diff --git a/.clang-tidy.ignore b/.clang-tidy.ignore index 2b27ee02b..94d682437 100644 --- a/.clang-tidy.ignore +++ b/.clang-tidy.ignore @@ -1 +1 @@ -src/quantizer/gguf.hpp \ No newline at end of file +mllm/quantizer/gguf.hpp \ No newline at end of file diff --git a/.gitignore b/.gitignore index 97847b5ed..a65454c60 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ .vscode/ .idea/ .cache/ +.DS_Store build*/ build/ bin/ @@ -26,15 +27,20 @@ models/* /.devcontainer/ /.vscode/ workflow.py -src/backends/qnn/qualcomm_ai_engine_direct_220/* -src/backends/qnn/HexagonSDK/* +mllm/backends/qnn/qualcomm_ai_engine_direct_220/* +mllm/backends/qnn/HexagonSDK/* tmp/ py-build-out/ mllm.egg-info/ -examples/demo_deepseek.cpp -src/models/deepseek/* -examples/demo.cpp +mllm/backends/qnn/sdk* -src/backends/qnn/sdk/* -*.mllm + + +.DS_Store +examples/test.cpp +examples/demo_bailing_moe2* +mllm/models/ling2 +scripts/tmp.sh +tools/convertor/gptq_converter.py +*.patch diff --git a/.gitmodules b/.gitmodules index b52617297..78351cb62 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,9 +4,12 @@ [submodule "third_party/pybind11"] path = third_party/pybind11 url = https://github.com/pybind/pybind11.git -[submodule "src/backends/xnnpack/third_party/XNNPACK"] - path = src/backends/xnnpack/third_party/XNNPACK +[submodule "mllm/backends/xnnpack/third_party/XNNPACK"] + path = mllm/backends/xnnpack/third_party/XNNPACK url = https://github.com/google/XNNPACK.git [submodule "third_party/googletest"] path = third_party/googletest url = https://github.com/google/googletest.git +[submodule "mllm/backends/cpu/third_party/kleidiai"] + path = mllm/backends/cpu/third_party/kleidiai + url = https://github.com/ARM-software/kleidiai diff --git a/CMakeLists.txt b/CMakeLists.txt index c842fefd4..ae0fd8f13 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.10) -project(mllm) +project(mllm CXX C ASM) # 添加编译选项来禁用所有警告 # if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") @@ -11,6 +11,29 @@ project(mllm) cmake_policy(SET CMP0074 NEW) set(CMAKE_CXX_STANDARD 17) + + +# 添加Address Sanitizer选项 +option(USE_ASAN "Enable AddressSanitizer for memory leak detection" OFF) + +if(USE_ASAN) + message(STATUS "Enabling AddressSanitizer") + # 确保包含调试符号 + if(NOT MSVC) + add_compile_options(-g) + endif() + + # 设置ASan编译选项 + if(MSVC) + add_compile_options(/fsanitize=address) + add_link_options(/fsanitize:address) + else() + add_compile_options(-fsanitize=address -fno-omit-frame-pointer) + add_link_options(-fsanitize=address) + endif() +endif() + + option(ARM "build on ARM" OFF) set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) @@ -31,17 +54,24 @@ add_compile_options(-Wno-gnu-string-literal-operator-template) if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") message(STATUS "ARM detected") set(ARM ON) - set(ANDROID_PLATFORM android-28) + # set(ANDROID_PLATFORM android-28) endif () if (ARM) - set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/../bin-arm) +if(QNN) +set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/../bin-arm-qnn) +elseif(CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin" AND NOT CMAKE_CROSSCOMPILING) +set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/../bin) +else() +set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/../bin-arm) +endif() + add_compile_definitions(__ARM_FEATURE_DOTPROD) # 检查是否使用的是 GCC 或 Clang 编译器 if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") # 默认使用 armv8.2-a+dotprod,除非用户自定义了 CMAKE_CXX_FLAGS if(NOT DEFINED CMAKE_CXX_FLAGS OR CMAKE_CXX_FLAGS STREQUAL "") - set(CMAKE_CXX_FLAGS "-march=armv8.6-a+dotprod+i8mm") + set(CMAKE_CXX_FLAGS "-march=armv8.2-a+fp16+fp16fml+dotprod+i8mm") endif() endif() else () @@ -88,6 +118,32 @@ if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") cmake_policy(SET CMP0135 NEW) endif () +option(OPENCL "Enable OpenCL Backend" ON) +if(OPENCL) + if(ANDROID) + # 对于现代 NDK (r23+),我们不再需要 find_library 或 find_package。 + # NDK 工具链会在链接时自动找到像 OpenCL 这样的系统库。 + # 我们只需确保启用了相关宏定义,并在链接目标时指明即可。 + message(STATUS "OpenCL backend for Android enabled. Linking will be handled by the NDK toolchain.") + add_definitions(-DUSE_OPENCL) + add_definitions(-DMLLM_TARGET_ANDROID) + include_directories(${CMAKE_SOURCE_DIR}/mllm/backends/opencl/third_party/OpenCL-Headers) + + else() + # 对于非 Android 平台,保持原有的查找逻辑 + find_package(OpenCL) + if(NOT OpenCL_FOUND) + message(STATUS "OpenCl backend not found.") + set(OPENCL OFF CACHE BOOL "Enable OpenCL Backend" FORCE) + else() + message(STATUS "OpenCL backend enabled.") + add_definitions(-DUSE_OPENCL) + endif() + endif() +endif() + + + # for XNNPACK, avoid invovle googltest twice. set(GOOGLETEST_SOURCE_DIR ${CMAKE_CURRENT_LIST_DIR}/third_party/googletest) add_subdirectory(third_party/googletest EXCLUDE_FROM_ALL) @@ -107,11 +163,11 @@ if (ARM AND NOT APK) set(MLLM_OPENMP_STATIC ON) endif () # turn off openmp when build on mac or for mac -if (CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin" AND NOT CMAKE_CROSSCOMPILING) - message(STATUS "mac detected, turn off openmp") - set(MLLM_OPENMP OFF) - set(MLLM_OPENMP_STATIC OFF) -endif () +# if (CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin" AND NOT CMAKE_CROSSCOMPILING) +# message(STATUS "mac detected, turn off openmp") +# set(MLLM_OPENMP OFF) +# set(MLLM_OPENMP_STATIC OFF) +# endif () if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$") message(STATUS "x86_64 detected") @@ -127,18 +183,19 @@ elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATC endif() endif() -aux_source_directory(${PROJECT_SOURCE_DIR}/src DIR_SRC) +aux_source_directory(${PROJECT_SOURCE_DIR}/mllm DIR_SRC) -aux_source_directory(${PROJECT_SOURCE_DIR}/src/express DIR_SRC_EXP) +aux_source_directory(${PROJECT_SOURCE_DIR}/mllm/express DIR_SRC_EXP) +# directory for legacy code, which is not used in the current version(Graph,Net,Executor) +aux_source_directory(${PROJECT_SOURCE_DIR}/mllm/legacy DIR_SRC_LEGACY) -aux_source_directory(${PROJECT_SOURCE_DIR}/src/processor DIR_SRC_PROCESSOE) -aux_source_directory(${PROJECT_SOURCE_DIR}/src/memory DIR_SRC_MEM_MANAGER) +aux_source_directory(${PROJECT_SOURCE_DIR}/mllm/processor DIR_SRC_PROCESSOE) +aux_source_directory(${PROJECT_SOURCE_DIR}/mllm/memory DIR_SRC_MEM_MANAGER) aux_source_directory(${PROJECT_SOURCE_DIR}/examples EMP_SRC) aux_source_directory(${PROJECT_SOURCE_DIR}/test TEST_SRC) aux_source_directory(${PROJECT_SOURCE_DIR}/third_party/wenet_audio DIR_THIRDPARTY_AUDIO) -include_directories(${PROJECT_SOURCE_DIR}/src) -include_directories(${PROJECT_SOURCE_DIR}/include) +include_directories(${PROJECT_SOURCE_DIR}/mllm) include_directories(${PROJECT_SOURCE_DIR}/third_party) include_directories(${PROJECT_SOURCE_DIR}/third_party/fmt/include) @@ -146,20 +203,21 @@ include_directories(${PROJECT_SOURCE_DIR}/third_party/fmt/include) # You can remove those lines if you just want to build mllm instead dev on it. include_directories(${PROJECT_SOURCE_DIR}/third_party/pybind11/include) -add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/src/backends/cpu) +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/mllm/backends/cpu) + + +if(OPENCL) + add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/mllm/backends/opencl) +endif() +# ========= if(QNN) # QNN lib include_directories( # $ENV{QNN_SDK_ROOT}/include/QNN # QNN SDK include - ${PROJECT_SOURCE_DIR}/src/backends/qnn/sdk/include/QNN # QNN SDK include - ${CMAKE_CURRENT_LIST_DIR}/src/backends/qnn - ${CMAKE_CURRENT_LIST_DIR}/src/backends/qnn/Log - ${CMAKE_CURRENT_LIST_DIR}/src/backends/qnn/PAL/include - ${CMAKE_CURRENT_LIST_DIR}/src/backends/qnn/Model - ${CMAKE_CURRENT_LIST_DIR}/src/backends/qnn/Utils - ${CMAKE_CURRENT_LIST_DIR}/src/backends/qnn/WrapperUtils + ${PROJECT_SOURCE_DIR}/mllm/backends/qnn/sdk/include/QNN # QNN SDK include + ${CMAKE_CURRENT_LIST_DIR}/mllm/backends/qnn ) - add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/src/backends/qnn) + add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/mllm/backends/qnn) endif() option(MLLM_BUILD_XNNPACK_BACKEND "Build mllm's XNNPACK backend" OFF) @@ -175,41 +233,88 @@ if(MLLM_BUILD_XNNPACK_BACKEND) set(XNNPACK_BUILD_TESTS OFF) set(XNNPACK_BUILD_BENCHMARKS OFF) add_definitions(-DMLLM_BUILD_XNNPACK_BACKEND=1) - add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/src/backends/xnnpack) + add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/mllm/backends/xnnpack) endif() # add tokenizers file(GLOB_RECURSE SRC_TOKENIZERS - ${PROJECT_SOURCE_DIR}/src/tokenizers/*.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/*.hpp + ${PROJECT_SOURCE_DIR}/mllm/tokenizers/*.cpp + ${PROJECT_SOURCE_DIR}/mllm/tokenizers/*.hpp ) # if compile to x86_64 if(QUANT) - include_directories(${PROJECT_SOURCE_DIR}/src/quantizer) + include_directories(${PROJECT_SOURCE_DIR}/tools/quantizer) file(GLOB_RECURSE MLLM_QUANT - ${PROJECT_SOURCE_DIR}/src/backends/cpu/compute/GEMM_AArch64.hpp - ${PROJECT_SOURCE_DIR}/src/backends/cpu/compute/GEMM_AArch64.cpp - ${PROJECT_SOURCE_DIR}/src/backends/cpu/quantize/*.hpp - ${PROJECT_SOURCE_DIR}/src/backends/cpu/quantize/*.cpp + ${PROJECT_SOURCE_DIR}/mllm/backends/cpu/third_party/ggml/GemmPack.cpp + ${PROJECT_SOURCE_DIR}/mllm/backends/cpu/compute/GemmKleidiai.cpp + ${PROJECT_SOURCE_DIR}/mllm/backends/cpu/third_party/ggml/QuantizeQ8.cpp + ${PROJECT_SOURCE_DIR}/mllm/backends/cpu/third_party/ggml/QuantizeQ4.cpp + ${PROJECT_SOURCE_DIR}/mllm/backends/cpu/third_party/ggml/QuantizeQ6.cpp + ${PROJECT_SOURCE_DIR}/mllm/backends/cpu/third_party/ggml/QuantizeQ3.cpp + ${PROJECT_SOURCE_DIR}/mllm/backends/cpu/third_party/ggml/QuantizeQ2.cpp ) - file(GLOB_RECURSE MLLM_QUANTIZER - ${CMAKE_CURRENT_LIST_DIR}/src/quantizer/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/src/quantizer/*.hpp) - list(REMOVE_ITEM MLLM_QUANTIZER ${CMAKE_CURRENT_LIST_DIR}/src/quantizer/main.cpp) + if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm64") + # 配置 kleidiai 库路径 + set(KLEIDIAI_SOURCE_DIR ${PROJECT_SOURCE_DIR}/mllm/backends/cpu/third_party/kleidiai) + if(NOT EXISTS ${KLEIDIAI_SOURCE_DIR}) + message(FATAL_ERROR "kleidiai library not found! Please place it in 'third_party/kleidiai'.") + endif() + # 添加所有源文件路径到 MLLM_QUANT + list(APPEND MLLM_QUANT + # QSI4_C32P (to FP32) 模块源文件 + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p/kai_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p/kai_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod_asm.S + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p/kai_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p/kai_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm_asm.S + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qai8dxp_f32.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_rhs_pack_kxn_qsi4c32p_qsu4c32s1s0.c + + # [新增] QSI4_CXP (to FP16) 模块源文件 + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod_asm.S + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm_asm.S + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qai8dxp_f16_neon.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_rhs_pack_kxn_qsi4cxp_qs4cxs1s0.c + + # FP16 (f16*f16) 模块源文件 + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_f16_f16p/kai_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_rhs_pack_kxn_f16p16x1biasf16_f16_f16_neon.c + + # FP32 (f32*f32) 模块源文件 + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_f32_f32p/kai_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_f32_f32p/kai_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla_asm.S + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_rhs_pack_kxn_f32p8x1biasf32_f32_f32_neon.c + ) + include_directories( + ${KLEIDIAI_SOURCE_DIR} + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_f16_f16p + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_f32_f32p + ) + endif() + file(GLOB_RECURSE MLLM_QUANTIZER + ${PROJECT_SOURCE_DIR}/tools/quantizer/*.cpp + ${PROJECT_SOURCE_DIR}/tools/quantizer/*.hpp) + list(REMOVE_ITEM MLLM_QUANTIZER ${PROJECT_SOURCE_DIR}/tools/quantizer/main_quantize.cpp) add_executable( quantize - ${PROJECT_SOURCE_DIR}/src/quantizer/main.cpp + ${PROJECT_SOURCE_DIR}/tools/quantizer/main_quantize.cpp ${MLLM_QUANT} ${MLLM_QUANTIZER} - - # ${DIR_SRC} - ${PROJECT_SOURCE_DIR}/src/ParamLoader.cpp + ${PROJECT_SOURCE_DIR}/mllm/ParamLoader.cpp ) - target_link_libraries(quantize fmt::fmt-header-only) + if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm64") + message(STATUS "AArch64/arm64 architecture detected. Applying FP16 compile options to the target.") + target_compile_options(quantize PRIVATE "-march=armv8.2-a+fp16") + endif() + target_link_libraries(quantize fmt::fmt-header-only) if(FROM_GGUF) add_executable( from_gguf @@ -219,7 +324,7 @@ if(QUANT) ${MLLM_QUANTIZER} # ${DIR_SRC} - ${PROJECT_SOURCE_DIR}/src/ParamLoader.cpp + ${PROJECT_SOURCE_DIR}/mllm/ParamLoader.cpp ) target_link_libraries(from_gguf fmt::fmt-header-only) endif() @@ -229,61 +334,41 @@ if(TEST) add_subdirectory(test) endif() -# add_executable(demo examples/demo.cpp) -# target_link_libraries(demo fmt::fmt-header-only) add_subdirectory(examples) if(APK) - add_library(mllm_lib STATIC ${DIR_SRC_CPU} ${DIR_SRC_EXP} ${DIR_SRC} ${DIR_SRC_MEM_MANAGER} ${DIR_SRC_PROCESSOE} + add_library(mllm_lib STATIC ${DIR_SRC_EXP} ${DIR_SRC} ${DIR_SRC_MEM_MANAGER} ${DIR_SRC_PROCESSOE} ${DIR_SRC_LEGACY} ${DIR_THIRDPARTY_AUDIO} - src/tokenizers/Tokenizer.cpp + mllm/tokenizers/Tokenizer.cpp tools/jni/LibHelper.cpp - - # src/tokenizers/Tokenizer.hpp - # src/tokenizers/Unigram/Unigram.hpp - src/tokenizers/Unigram/Unigram.cpp - - # src/tokenizers/Unigram/trie.hpp - src/tokenizers/BPE/Bpe.cpp - - # src/tokenizers/BPE/Bpe.hpp - src/tokenizers/Unicode.cpp - src/tokenizers/UnicodeData.cpp - src/tokenizers/BPE/Bpe.cpp - src/tokenizers/WordPiece/WordPiece.cpp - - # models/bert/configuration_bert.hpp - # models/bert/modeling_bert.hpp - # models/bert/tokenization_bert.hpp - # models/fuyu/configuration_fuyu.hpp - # models/fuyu/modeling_fuyu.hpp - # models/fuyu/processing_fuyu.hpp - # models/phonelm/configuration_phonelm.hpp - # models/phonelm/modeling_phonelm.hpp - # models/qwen/configuration_qwen.hpp - # models/qwen/modeling_qwen.hpp - # models/qwen/tokenization_qwen.hpp - # models/smollm/tokenization_smollm.hpp - # tokenizers/Unigram/Unigram.hpp + mllm/tokenizers/Unigram/Unigram.cpp + mllm/tokenizers/BPE/Bpe.cpp + mllm/tokenizers/Unicode.cpp + mllm/tokenizers/UnicodeData.cpp + mllm/tokenizers/BPE/Bpe.cpp + mllm/tokenizers/WordPiece/WordPiece.cpp ) - target_link_libraries(mllm_lib MLLM_CPU) + target_link_libraries(mllm_lib mllm_cpu) + if (OPENCL) + target_link_libraries(mllm_lib mllm_opencl) + endif() if(QNN) - target_link_libraries(mllm_lib MLLM_QNN) + target_link_libraries(mllm_lib mllm_qnn) endif() endif() if(MLLM_ENABLE_PYTHON) - target_compile_options(MLLM_CPU PRIVATE -fPIC) + target_compile_options(mllm_cpu PRIVATE -fPIC) find_package(Python3 COMPONENTS Interpreter Development) include_directories(${Python3_INCLUDE_DIRS}) add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/pybind11) set(_py_dep_libs - MLLM_CPU + mllm_cpu - # MLLM_QNN + # mllm_qnn # ${CMAKE_DL_LIBS} ) diff --git a/README.md b/README.md index 88c44bc70..544387659 100644 --- a/README.md +++ b/README.md @@ -28,16 +28,15 @@ fast and lightweight multimodal LLM inference engine for mobile and e mllm is a lightweight, fast, and easy-to-use (multimodal) on-device LLM inference engine for mobile devices (mainly supporting CPU/NPU), initiated by the research groups led by [Mengwei Xu](https://xumengwei.github.io/) (BUPT) and [Xuanzhe Liu](http://www.liuxuanzhe.com/) (PKU). ## Recent update + +- [2025 July 30] Add Rotation Quantization method for QNN backend models and support Qwen-2-VL 2B - [2025 August 28] 🔥🔥🔥 Support for MLLM V1 is ending soon. Before its retirement, V1 will integrate the following features: GPT-OSS and NPU QWEN2-VL. MLLM will then transition to V2, which can be viewed on the V2 branch. V2 will include brand-new capabilities: - A more Pythonic model authoring approach with eager execution - Compilation support and MLLM IR for easier NPU integration - Support for parallel execution of multiple models - A more refined engineering implementation -- [2024 November 21] Support new model: Phi 3 Vision https://github.com/UbiquitousLearning/mllm/pull/186 -- [2024 August 30] Support new model: MiniCPM 2B https://github.com/UbiquitousLearning/mllm/pull/132 -- [2024 August 15] Support new model: Phi 3 mini https://github.com/UbiquitousLearning/mllm/pull/119 -- [2024 Aug 10] Supporting Qualcomm NPU: https://github.com/UbiquitousLearning/mllm/pull/112 | [try it out](https://github.com/UbiquitousLearning/mllm/tree/main/src/backends/qnn) | [paper](https://arxiv.org/pdf/2407.05858v1) + ### Contents @@ -97,9 +96,9 @@ V2 will include brand-new capabilities: | [LLaVA 7B](https://github.com/haotian-liu/LLaVA) | [✔️](https://huggingface.co/mllmTeam/llava-1.5-7b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/llava-1.5-7b-mllm/tree/main) | | | [Gemma 2B](https://github.com/google/gemma_pytorch) | [✔️](https://huggingface.co/mllmTeam/gemma-2b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/gemma-2b-mllm/tree/main) | | | [Gemma 2 2B](https://github.com/google/gemma_pytorch) | [✔️](https://huggingface.co/mllmTeam/gemma-2-2b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/gemma-2-2b-mllm/tree/main) | | -| [Qwen 1.5 0.5B](https://github.com/QwenLM/Qwen) | [✔️](https://huggingface.co/mllmTeam/qwen-1.5-0.5b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/qwen-1.5-0.5b-mllm/tree/main) | | +| [Qwen 1.5 0.5B](https://github.com/QwenLM/Qwen) | [✔️](https://huggingface.co/mllmTeam/qwen-1.5-0.5b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/qwen-1.5-0.5b-mllm/tree/main) | ✔️ | | [Qwen 1.5 1.8B](https://github.com/QwenLM/Qwen) | [✔️](https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm) | [✔️](https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm) | [✔️](https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm) | -| [Qwen 2.5 1.5B](https://github.com/QwenLM/Qwen2.5) | [✔️](https://huggingface.co/mllmTeam/qwen-2.5-1.5b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/qwen-2.5-1.5b-mllm/tree/main) | | +| [Qwen 2.5 1.5B](https://github.com/QwenLM/Qwen2.5) | [✔️](https://huggingface.co/mllmTeam/qwen-2.5-1.5b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/qwen-2.5-1.5b-mllm/tree/main) | ✔️ | | [Qwen 3 0.6B](https://github.com/QwenLM/Qwen3) | [✔️](https://huggingface.co/mllmTeam/qwen-3-0.6b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/qwen-3-0.6b-mllm/tree/main) | | | [Mistral 7B](https://github.com/mistralai/mistral-src) | [✔️](https://huggingface.co/mllmTeam/mistral-7b-instruct-v0.2-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/mistral-7b-instruct-v0.2-mllm/tree/main) | | | [Yi 6B](https://huggingface.co/01-ai/Yi-1.5-6B) | [✔️](https://huggingface.co/mllmTeam/yi-1.5-6b-chat-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/yi-1.5-6b-chat-mllm/tree/main) | | @@ -116,15 +115,15 @@ V2 will include brand-new capabilities: ### Multimodal models -| Model | CPU
FP32 | CPU
INT4 | -|-----------------------------------------------------------------------------|------|-----| +| Model | CPU
FP32 | CPU
INT4 | Hexagon NPU
INT8 | +|-----------------------------------------------------------------------------|------|-----|----------------------------| | [Fuyu 8B](https://www.adept.ai/blog/fuyu-8b) | [✔️](https://huggingface.co/mllmTeam/fuyu-8b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/fuyu-8b-mllm/tree/main) | | [Vision Transformer](https://github.com/google-research/vision_transformer) | [✔️](https://huggingface.co/mllmTeam/vit-base-patch16-224-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/vit-base-patch16-224-mllm/tree/main) | | [CLIP](https://github.com/openai/CLIP) | [✔️](https://huggingface.co/mllmTeam/clip-vit-base-patch32-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/clip-vit-base-patch32-mllm/tree/main) | | [ImageBind](https://github.com/facebookresearch/ImageBind) (3 modalities) | [✔️](https://huggingface.co/mllmTeam/imagebind_huge-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/imagebind_huge-mllm/tree/main) | | [LLaVA 7B](https://github.com/haotian-liu/LLaVA) | [✔️](https://huggingface.co/mllmTeam/llava-1.5-7b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/llava-1.5-7b-mllm/tree/main) | | [Phi-3-Vision](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) | [✔️](https://huggingface.co/mllmTeam/phi-3-vision-instruct-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/phi-3-vision-instruct-mllm/tree/main) | -| [Qwen2-VL 2B](https://github.com/QwenLM/Qwen2-VL) | [✔️](https://huggingface.co/mllmTeam/qwen-2-vl-2b-instruct--mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/qwen-2-vl-2b-instruct--mllm/tree/main) | +| [Qwen2-VL 2B](https://github.com/QwenLM/Qwen2-VL) | [✔️](https://huggingface.co/mllmTeam/qwen-2-vl-2b-instruct--mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/qwen-2-vl-2b-instruct--mllm/tree/main) | ✔️ | ## Quick Start @@ -134,6 +133,9 @@ V2 will include brand-new capabilities: ```bash git clone https://github.com/UbiquitousLearning/mllm cd mllm +git submodule update --init --recursive \ + third_party/googletest \ + mllm/backends/cpu/third_party/kleidiai ``` ### Check prerequisites @@ -151,7 +153,12 @@ Building mllm requires following tools: *`NOTE:` The QNN backend is preliminary version which can do end-to-end inference. It is still under active development for better performance and more supported models.* -We support running Qwen-1.5-1.8B-Chat using [Qualcomm QNN](https://www.qualcomm.com/developer/software/qualcomm-ai-engine-direct-sdk) to get Hexagon NPU acceleration on devices with Snapdragon 8 Gen3. The details of QNN environment set up and design is [here](./src/backends/qnn/README.md). The prefilling stage is performered by QNN & CPU, and the inference stage is performed by CPU. +We support running several Qwen family models including Qwen-2-vl using [Qualcomm QNN](https://www.qualcomm.com/developer/software/qualcomm-ai-engine-direct-sdk) to get Hexagon NPU acceleration on devices with Snapdragon 8 Gen3. The details of QNN environment set up and design is [here](./mllm/backends/qnn/README.md). The prefilling stage is performered by QNN & CPU, and the inference stage is performed by CPU. + +Specifically, we support the following models (similar architecture models are also supported): +- Qwen 1.5 1.8B (demo_qwen_npu, demo_qwen_pipeline) +- Qwen 2.5 1.5B (demo_qwen_npu, demo_qwen_pipeline) +- Qwen 2 VL (demo_qwen2_vl_npu and demo_qwen2_vl_npuvit) Build the target with QNN backend. @@ -160,7 +167,7 @@ cd ../script ./build_qnn_android.sh ``` -Download the model from [here](https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm/blob/main/), or using the following instructions +Download the model from [here](https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm/blob/main/), or using the following instructions to download the model. You can also export Pytorch models for QNN backend with int8 weight quantization and apply rotation quantization. Details can be found in backend specific [README](./mllm/backends/qnn/README.md). ```bash mkdir ../models && cd ../models @@ -169,19 +176,19 @@ wget https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm/resolve/main/qwen-1 wget https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm/resolve/main/qwen-1.5-1.8b-chat-q4k.mllm?download=true -O qwen-1.5-1.8b-chat-q4k.mllm ``` -Run on an android phone with at least 16GB of memory. +Currently, QNN backend uses models with W8A8 or W8A16 quantization. (It is determined by Quantize & Dequantize ops in modeling class, you can refer to `mllm/models/qwen/modeling_qwen_npu_v2.hpp` for more details.) + +Run on an android phone with at least 16GB of memory as building the QNN graphs on device will consume a lot of memory. After building and saving QNN graphs to qnn_context.bin, the runtime memory usage will meet the expectation. The `demo_qwen_pipeline.cpp` will show the pipeline parallel execution for QNN models, which will nearly has 1.5x speedup compared with the original execution. ```bash cd ../script -./run_qwen_npu.sh +./run_qwen_qnn.sh ``` -There are two arguments in the executable. `-s` is for the sequence length of prefilling, the default value is 64 in the demo we provided. `-c` for type of QNN prefilling options, when it is set to 1, the input will be splited into many chunks of sequence 32 and be executed in a pipeline. When it is set to 0, the input will be executed in one chunk. - Result are as followed: ``` -> ./main_qwen_npu -s 64 -c 1 +> ./demo_qwen_npu [Q] <|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user @@ -362,7 +369,7 @@ You can convert vocabulary to mllm vocabulary as followed. ```bash cd tools/convertor -python vocab.py --input_file=tokenizer.json --output_file=vocab.mllm --type=Unigram +python vocab.py --input_file=tokenizer.json --output_file=vocab.mllm --type=BPE ``` ### Quantize models diff --git a/assets/rotation.png b/assets/rotation.png new file mode 100644 index 000000000..43f9f2960 Binary files /dev/null and b/assets/rotation.png differ diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 198877170..0136f525d 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,60 +1,84 @@ -macro(func_link_libaries target) - target_link_libraries(${target} PUBLIC MLLM_CPU fmt::fmt-header-only) +set(COMMON_SRC + ${DIR_SRC_CPU} + ${DIR_SRC_MEM_MANAGER} + ${DIR_SRC_EXP} + ${DIR_SRC} + ${PROJECT_SOURCE_DIR}/mllm/tokenizers/Tokenizer.cpp + ${PROJECT_SOURCE_DIR}/mllm/tokenizers/BPE/Bpe.cpp + ${PROJECT_SOURCE_DIR}/mllm/tokenizers/WordPiece/WordPiece.cpp + ${PROJECT_SOURCE_DIR}/mllm/tokenizers/Tiktoken/tiktoken.cpp + ${PROJECT_SOURCE_DIR}/mllm/tokenizers/Unicode.cpp + ${PROJECT_SOURCE_DIR}/mllm/tokenizers/UnicodeData.cpp + ${PROJECT_SOURCE_DIR}/mllm/processor/PreProcess.cpp +) + +set(VLM_SPECIFIC_SRC + ${PROJECT_SOURCE_DIR}/mllm/tokenizers/Unigram/Unigram.cpp + ${DIR_SRC_PROCESSOE} + ${DIR_THIRDPARTY_AUDIO} +) + +macro(func_set_compile_opts_defs target) if (MLLM_OPENMP) target_compile_options(${target} PRIVATE -fopenmp) - if (ARM) + endif() + if (QNN) + target_compile_definitions(${target} PRIVATE USE_QNN) + endif() +endmacro() + +macro(func_link_libs target) + target_link_libraries(${target} PUBLIC mllm_cpu fmt) + if (MLLM_OPENMP) + if (ARM AND NOT (CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin" AND NOT CMAKE_CROSSCOMPILING)) + # 非Mac的ARM,静态链接OpenMP target_link_libraries(${target} PUBLIC -fopenmp -static-openmp) - else () + else() + # 其它平台(含Mac),动态链接OpenMP target_link_libraries(${target} PUBLIC -fopenmp) - endif () - endif () + endif() + endif() + if(OPENCL) + target_link_libraries(${target} PUBLIC mllm_opencl ${CMAKE_DL_LIBS}) + endif() if (QNN) - target_compile_definitions(${target} PRIVATE USE_QNN) - target_link_libraries(${target} PUBLIC MLLM_QNN ${CMAKE_DL_LIBS}) + target_link_libraries(${target} PUBLIC mllm_qnn ${CMAKE_DL_LIBS}) endif() if (MLLM_BUILD_XNNPACK_BACKEND) - target_link_libraries(${target} PRIVATE MllmXnnpackBackend) + target_link_libraries(${target} PRIVATE mllm_xnnpack) endif() endmacro() +add_library(mllm_llm STATIC ${COMMON_SRC}) +func_set_compile_opts_defs(mllm_llm) + +add_library(mllm_vlm STATIC ${VLM_SPECIFIC_SRC}) +target_link_libraries(mllm_vlm PUBLIC mllm_llm) +func_set_compile_opts_defs(mllm_vlm) + macro(func_llm_add_executable target) - add_executable(${target} - ${PROJECT_SOURCE_DIR}/examples/${target}.cpp - ${DIR_SRC_CPU} - ${DIR_SRC_MEM_MANAGER} - ${DIR_SRC_EXP} - ${DIR_SRC} - ${PROJECT_SOURCE_DIR}/src/tokenizers/Tokenizer.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/BPE/Bpe.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/WordPiece/WordPiece.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/Tiktoken/tiktoken.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/Unicode.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/UnicodeData.cpp - ${PROJECT_SOURCE_DIR}/src/processor/PreProcess.cpp - ) - func_link_libaries(${target}) + if(EXISTS "${PROJECT_SOURCE_DIR}/examples/${target}.cpp") + add_executable(${target} ${PROJECT_SOURCE_DIR}/examples/${target}.cpp) + target_link_libraries(${target} PUBLIC mllm_llm) + func_set_compile_opts_defs(${target}) + func_link_libs(${target}) + else() + message(WARNING "Skip ${target}: ${PROJECT_SOURCE_DIR}/examples/${target}.cpp not found") + endif() endmacro() macro(func_vlm_add_executable target) - add_executable(${target} - ${PROJECT_SOURCE_DIR}/examples/${target}.cpp - ${DIR_SRC_CPU} - ${DIR_SRC_MEM_MANAGER} - ${DIR_SRC_EXP} - ${DIR_SRC} - ${PROJECT_SOURCE_DIR}/src/tokenizers/Tokenizer.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/Unigram/Unigram.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/Unicode.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/UnicodeData.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/BPE/Bpe.cpp - ${PROJECT_SOURCE_DIR}/src/tokenizers/WordPiece/WordPiece.cpp - ${PROJECT_SOURCE_DIR}/src/processor/PreProcess.cpp - ${DIR_SRC_PROCESSOE} - ${DIR_THIRDPARTY_AUDIO} - ) - func_link_libaries(${target}) + if(EXISTS "${PROJECT_SOURCE_DIR}/examples/${target}.cpp") + add_executable(${target} ${PROJECT_SOURCE_DIR}/examples/${target}.cpp) + target_link_libraries(${target} PUBLIC mllm_vlm) + func_set_compile_opts_defs(${target}) + func_link_libs(${target}) + else() + message(WARNING "Skip ${target}: ${PROJECT_SOURCE_DIR}/examples/${target}.cpp not found") + endif() endmacro() +func_llm_add_executable(test) func_llm_add_executable(mllm_benchmark) func_llm_add_executable(demo_llama) func_llm_add_executable(demo_tinyllama) @@ -81,8 +105,14 @@ func_llm_add_executable(demo_phonelm) func_llm_add_executable(demo_llama3) func_llm_add_executable(demo_minicpm_moe_mbm) func_llm_add_executable(demo_qwen_sd) +func_llm_add_executable(demo_qwen_batch) func_llm_add_executable(demo_minicpm_moe_mbp) - +func_llm_add_executable(demo_bailing_moe) +func_llm_add_executable(demo_bailing_moe2) +func_llm_add_executable(demo_bailing_moe_mbp) +func_llm_add_executable(demo_bailing_moe2_mbp) +func_llm_add_executable(demo_smallthinker) +func_llm_add_executable(demo_smallthinker_mbp) func_vlm_add_executable(demo_llava) func_vlm_add_executable(demo_fuyu) @@ -92,65 +122,19 @@ func_vlm_add_executable(demo_imagebind) func_vlm_add_executable(demo_imagebind_1mod) func_vlm_add_executable(demo_phi3v) func_vlm_add_executable(demo_qwen2_vl) +func_vlm_add_executable(demo_qwen2.5_vl) func_vlm_add_executable(demo_showui) -# func_vlm_add_executable(demo) - -# QNN demo +func_vlm_add_executable(demo_qwen2_vl_vtp) +func_vlm_add_executable(demo_showui_vtp) if(QNN) + # func_llm_add_executable(demo_phonelm_npu) func_llm_add_executable(demo_qwen_npu) - func_llm_add_executable(main_qwen_npu) - func_llm_add_executable(demo_phonelm_npu) - func_llm_add_executable(main_phonelm_npu) - func_llm_add_executable(demo_qwen2.5_npu) - func_llm_add_executable(demo_qwen_pipeline) + # func_llm_add_executable(demo_qwen_npu_pipeline) + func_vlm_add_executable(demo_qwen2_vl_npu) endif() if(MLLM_BUILD_XNNPACK_BACKEND) func_llm_add_executable(demo_qwen_xp) -endif() - - -# old main -# macro(func_o_vlm_add_executable target) -# add_executable(${target} -# ${PROJECT_SOURCE_DIR}/examples/${target}.cpp -# ${DIR_SRC_CPU} -# ${DIR_SRC_MEM_MANAGER} -# ${DIR_SRC_EXP} -# ${DIR_SRC} -# ${PROJECT_SOURCE_DIR}/src/tokenizers/Tokenizer.cpp -# ${PROJECT_SOURCE_DIR}/src/tokenizers/Unigram/Unigram.cpp -# ${PROJECT_SOURCE_DIR}/src/tokenizers/BPE/Bpe.cpp -# ${PROJECT_SOURCE_DIR}/src/processor/PreProcess.cpp -# ${PROJECT_SOURCE_DIR}/src/processor/ClipPreProcess.cpp -# ${PROJECT_SOURCE_DIR}/src/processor/FuyuPreProcess.cpp -# ) -# func_link_libaries(${target}) -# endmacro() -# macro(func_o_avlm_add_executable target) -# add_executable(${target} -# ${PROJECT_SOURCE_DIR}/examples/${target}.cpp -# ${DIR_SRC_CPU} -# ${DIR_SRC_MEM_MANAGER} -# ${DIR_SRC_EXP} -# ${DIR_SRC} -# ${PROJECT_SOURCE_DIR}/src/tokenizers/Tokenizer.cpp -# ${PROJECT_SOURCE_DIR}/src/tokenizers/Unigram/Unigram.cpp -# ${PROJECT_SOURCE_DIR}/src/tokenizers/BPE/Bpe.cpp -# ${PROJECT_SOURCE_DIR}/src/processor/PreProcess.cpp -# ${PROJECT_SOURCE_DIR}/src/processor/ClipPreProcess.cpp -# ${DIR_SRC_PROCESSOE} -# ${DIR_THIRDPARTY_AUDIO} -# ) -# func_link_libaries(${target}) -# endmacro() -# func_llm_add_executable(main_llama) -# func_llm_add_executable(main_alpaca) -# func_llm_add_executable(main_tinyllama) -# func_o_vlm_add_executable(main_llava) -# func_o_vlm_add_executable(main_fuyu) -# func_o_vlm_add_executable(main_vit) -# func_o_vlm_add_executable(main_clip) -# func_o_avlm_add_executable(main_imagebind) +endif() \ No newline at end of file diff --git a/examples/demo_bailing_moe.cpp b/examples/demo_bailing_moe.cpp new file mode 100644 index 000000000..75fb23120 --- /dev/null +++ b/examples/demo_bailing_moe.cpp @@ -0,0 +1,93 @@ +/** + * @file demo_bailing_moe.cpp + * @brief A demo for using Bailing MoE model. + * @author Rongjie Yi + * @date 2025-07-01 + * + */ +#include "Types.hpp" +#include "cmdline.h" +#include "models/ling/configuration_bailing_moe.hpp" +#include "models/ling/modeling_bailing_moe.hpp" +#include "models/ling/tokenization_bailing.hpp" + +using namespace mllm; + +int main(int argc, char **argv) { + std::iostream::sync_with_stdio(false); + + cmdline::parser cmdParser; + cmdParser.add("device", 'd', "mllm backend [0:`cpu` | 1:`opencl`]", false, 0); + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/ling_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/ling_merges.txt"); + string default_model_path = "../models/ling-lite-1.5-q4_0.mllm"; +#if defined(ARM) + default_model_path = "../models/ling-lite-1.5-kai_q4_0.mllm"; +#endif + cmdParser.add("model", 'm', "specify mllm model path", false, default_model_path); + cmdParser.add("limits", 'l', "max KV cache size", false, 500); + cmdParser.add("thread", 't', "num of threads", false, 4); + cmdParser.add("gen", 'g', "max new tokens", false, -1); + cmdParser.parse_check(argc, argv); + + string vocab_path = cmdParser.get("vocab"); + string merge_path = cmdParser.get("merge"); + string model_path = cmdParser.get("model"); + int tokens_limit = cmdParser.get("limits"); + int max_new_tokens = cmdParser.get("gen"); + CPUBackend::cpu_threads = cmdParser.get("thread"); + BackendType device = (BackendType)cmdParser.get("device"); + assert((device == MLLM_CPU || device == MLLM_OPENCL) && "device not supports!"); + + auto tokenizer = BaiLingTokenizer(vocab_path, merge_path); + BailingMoeConfig config(tokens_limit); +#ifdef USE_OPENCL + if (device == MLLM_OPENCL) { + config.dtype = MLLM_TYPE_F16; + config.attn_implementation = "eager"; + } +#endif + // config.attn_implementation = "sage_attention"; + auto model = BailingMoeForCausalLM(config); +#ifdef USE_OPENCL + model = model.to(device); +#endif + model.load(model_path); + + vector in_strs = { + "怎样计算1+2+...+100的和?", + "Who are you?", + "Give me a short introduction to large language model.", + "背诵天下第一骈文", + "夕焼けとコオロギが一斉に飛び、秋水は共に天一色になる。上面句子翻译成中文", + "你写一首七言绝句。", + "背诵一下水调歌头。", + "清晨的阳光透过薄纱窗帘,懒洋洋地洒在木地板上,空气中飘散着咖啡豆研磨后特有的醇厚香气。窗外传来几声清脆的鸟鸣,伴随着远处隐约的车流声,构成这座都市尚未完全苏醒的独特交响。书桌上摊开着昨夜未读完的书,书页边缘已微微卷起。厨房里,水壶正发出细密的声响,预示着一天的热饮即将就绪。昨日的计划表贴在冰箱门上,几个重要的待办事项用红笔醒目地圈出。公园里晨练的人们身影绰绰,有节奏的脚步声和太极音乐交织。一只橘猫敏捷地跃上围墙,在晨光中伸展着腰肢,神态悠闲得仿佛它是这片领地的主人。街角的面包店刚拉开铁门,新鲜出炉的面包香气迫不及待地涌向街头。公交站台上,等待的乘客低头刷着手机屏幕,神情各异。云朵缓慢地在湛蓝的天空中移动,时间似乎被拉长了片刻。生活就在这些微小的、平凡的细节里徐徐展开,既不惊天动地,却也充满细碎的温暖和实在的步履。新的一天开始了。\n​​请在以上文本中找出描述“气味”的句子(复制出来),然后判断叙述者对“橘猫”的态度是正面还是负面,最后请用三个成语概括文中描绘的早晨氛围。", + "项羽已杀卿子冠军,威震楚国,名闻诸侯。乃遣当阳君、蒲将军将卒二万渡河,救巨鹿。战少利,陈馀复请兵。项羽乃悉引兵渡河,皆沉船,破釜甑,烧庐舍,持三日粮,以示士卒必死,无一还心。于是至则围王离,与秦军遇,九战,绝其甬道,大破之,杀苏角,虏王离。涉间不降楚,自烧杀。当是时,楚兵冠诸侯。诸侯军救巨鹿下者十余壁,莫敢纵兵。及楚击秦,诸将皆从壁上观。楚战士无不一以当十,楚兵呼声动天,诸侯军无不人人惴恐。于是已破秦军,项羽召见诸侯将,入辕门,无不膝行而前,莫敢仰视。项羽由是始为诸侯上将军,诸侯皆属焉。 问题:结合项羽在巨鹿之战中的战术决策与心理威慑手段,分析其如何实现『楚战士无不一以当十』的战斗效应,并论述这种军事心理学实践对诸侯将领『膝行而前,莫敢仰视』行为模式的生成机制。", + }; + for (int i = 0; i < in_strs.size(); ++i) { + auto input_str = tokenizer.apply_chat_template(in_strs[i]); + auto input_tensor = tokenizer.tokenize(input_str); + // std::cout << "[tks]" << input_tensor.sequence() << " tokens" << std::endl; + std::cout << "[Q] " << in_strs[i] << std::endl; + std::cout << "[A] " << std::flush; + + LlmTextGeneratorOpts opt{ + .max_new_tokens = max_new_tokens > 0 ? max_new_tokens : static_cast(tokens_limit - input_tensor.sequence()), + .do_sample = false, + .temperature = 0.3F, + .top_k = 50, + .top_p = 0.F, + }; + model.generate(input_tensor, opt, [&](unsigned int out_token) -> bool { + auto out_string = tokenizer.detokenize({out_token}); + auto [not_end, output_string] = tokenizer.postprocess(out_string); + if (!not_end) { return false; } + std::cout << output_string << std::flush; + return true; + }); + std::cout << "\n"; + model.clear_kvcache(); + model.profiling(); + } +} diff --git a/examples/demo_bailing_moe_mbp.cpp b/examples/demo_bailing_moe_mbp.cpp new file mode 100644 index 000000000..c0071b45a --- /dev/null +++ b/examples/demo_bailing_moe_mbp.cpp @@ -0,0 +1,98 @@ +/** + * @file demo_bailing_moe.cpp + * @brief A demo for using Bailing MoE model. + * @author Rongjie Yi + * @date 2025-07-01 + * + */ +#include "Module.hpp" +#include "cmdline.h" +#include "models/ling/configuration_bailing_moe.hpp" +#include "models/ling/mbp/modeling_bailing_moe_mbp.hpp" +// #include "models/ling/mbp/modeling_bailing_moe_mbp_e.hpp" +// #include "models/ling/mbp/modeling_bailing_moe_mbppip.hpp" +#include "models/ling/tokenization_bailing.hpp" +#include + +using namespace mllm; + +int main(int argc, char **argv) { + std::iostream::sync_with_stdio(false); + Module::alloc_mmap = false; + + cmdline::parser cmdParser; + cmdParser.add("device", 'd', "mllm backend [0:`cpu` | 1:`opencl`]", false, 0); + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/ling_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/ling_merges.txt"); + string default_model_path = "../models/ling-lite-1.5-q4_0.mllm"; +#if defined(ARM) + default_model_path = "../models/ling-lite-1.5-kai_q4_0.mllm"; +#endif + cmdParser.add("model", 'm', "specify mllm model path", false, default_model_path); + cmdParser.add("limits", 'l', "max KV cache size", false, 500); + cmdParser.add("thread", 't', "num of threads", false, 4); + cmdParser.add("gen", 'g', "max new tokens", false, -1); + cmdParser.parse_check(argc, argv); + + string vocab_path = cmdParser.get("vocab"); + string merge_path = cmdParser.get("merge"); + string model_path = cmdParser.get("model"); + int tokens_limit = cmdParser.get("limits"); + int max_new_tokens = cmdParser.get("gen"); + CPUBackend::cpu_threads = cmdParser.get("thread"); + BackendType device = (BackendType)cmdParser.get("device"); + assert((device == MLLM_CPU || device == MLLM_OPENCL) && "device not supports!"); + + auto tokenizer = BaiLingTokenizer(vocab_path, merge_path); + BailingMoeConfig config(tokens_limit); +#ifdef USE_OPENCL + if (device == MLLM_OPENCL) { + config.dtype = MLLM_TYPE_F16; + config.attn_implementation = "eager"; + } +#endif + // config.attn_implementation = "sage_attention"; + auto model = BailingMoeForCausalLM(config); +#ifdef USE_OPENCL + model = model.to(device); +#endif + model.load(model_path); + + vector in_strs = { + "怎样计算1+2+...+100的和?", + "Who are you?", + "Give me a short introduction to large language model.", + "夕焼けとコオロギが一斉に飛び、秋水は共に天一色になる。上面句子翻译成中文", + "你写一首七言绝句。", + "背诵一下水调歌头。", + "清晨的阳光透过薄纱窗帘,懒洋洋地洒在木地板上,空气中飘散着咖啡豆研磨后特有的醇厚香气。窗外传来几声清脆的鸟鸣,伴随着远处隐约的车流声,构成这座都市尚未完全苏醒的独特交响。书桌上摊开着昨夜未读完的书,书页边缘已微微卷起。厨房里,水壶正发出细密的声响,预示着一天的热饮即将就绪。昨日的计划表贴在冰箱门上,几个重要的待办事项用红笔醒目地圈出。公园里晨练的人们身影绰绰,有节奏的脚步声和太极音乐交织。一只橘猫敏捷地跃上围墙,在晨光中伸展着腰肢,神态悠闲得仿佛它是这片领地的主人。街角的面包店刚拉开铁门,新鲜出炉的面包香气迫不及待地涌向街头。公交站台上,等待的乘客低头刷着手机屏幕,神情各异。云朵缓慢地在湛蓝的天空中移动,时间似乎被拉长了片刻。生活就在这些微小的、平凡的细节里徐徐展开,既不惊天动地,却也充满细碎的温暖和实在的步履。新的一天开始了。\n​​请在以上文本中找出描述“气味”的句子(复制出来),然后判断叙述者对“橘猫”的态度是正面还是负面,最后请用三个成语概括文中描绘的早晨氛围。", + "项羽已杀卿子冠军,威震楚国,名闻诸侯。乃遣当阳君、蒲将军将卒二万渡河,救巨鹿。战少利,陈馀复请兵。项羽乃悉引兵渡河,皆沉船,破釜甑,烧庐舍,持三日粮,以示士卒必死,无一还心。于是至则围王离,与秦军遇,九战,绝其甬道,大破之,杀苏角,虏王离。涉间不降楚,自烧杀。当是时,楚兵冠诸侯。诸侯军救巨鹿下者十余壁,莫敢纵兵。及楚击秦,诸将皆从壁上观。楚战士无不一以当十,楚兵呼声动天,诸侯军无不人人惴恐。于是已破秦军,项羽召见诸侯将,入辕门,无不膝行而前,莫敢仰视。项羽由是始为诸侯上将军,诸侯皆属焉。 问题:结合项羽在巨鹿之战中的战术决策与心理威慑手段,分析其如何实现『楚战士无不一以当十』的战斗效应,并论述这种军事心理学实践对诸侯将领『膝行而前,莫敢仰视』行为模式的生成机制。", + }; + + ling_mbp_init(config.num_hidden_layers, config.num_experts); + for (int i = 0; i < in_strs.size(); ++i) { + auto input_str = tokenizer.apply_chat_template(in_strs[i]); + auto input_tensor = tokenizer.tokenize(input_str); + std::cout << "[Q] " << in_strs[i] << std::endl; + std::cout << "[A] " << std::flush; + + LlmTextGeneratorOpts opt{ + .max_new_tokens = max_new_tokens > 0 ? max_new_tokens : static_cast(tokens_limit - input_tensor.sequence()), + .do_sample = false, + .temperature = 0.3F, + .top_k = 50, + .top_p = 0.F, + }; + model.generate(input_tensor, opt, [&](unsigned int out_token) -> bool { + auto out_string = tokenizer.detokenize({out_token}); + auto [not_end, output_string] = tokenizer.postprocess(out_string); + if (!not_end) { return false; } + std::cout << output_string << std::flush; + return true; + }); + std::cout << "\n"; + model.clear_kvcache(); + model.profiling(); + // prinMBPtimes(); + } +} diff --git a/examples/demo_ds_qwen2.cpp b/examples/demo_ds_qwen2.cpp index 36ef9ee6d..afa588790 100644 --- a/examples/demo_ds_qwen2.cpp +++ b/examples/demo_ds_qwen2.cpp @@ -23,7 +23,7 @@ int main(int argc, char **argv) { cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/ds_qwen2_merges.txt"); cmdParser.add("model", 'm', "specify mllm model path", false, "../models/ds-qwen-2-1.5b-q4_k.mllm"); cmdParser.add("billion", 'b', "only support ds-1.5B right now", false, "ds-1.5B"); - cmdParser.add("limits", 'l', "max KV cache size", false, 400); + cmdParser.add("limits", 'l', "max KV cache size", false, 1040); cmdParser.add("thread", 't', "num of threads", false, 4); cmdParser.parse_check(argc, argv); @@ -47,7 +47,7 @@ int main(int argc, char **argv) { std::cout << "[A] " << std::flush; LlmTextGeneratorOpts opt{ - .max_new_tokens = 300, + .max_new_tokens = 1000, .do_sample = true, .temperature = 0.3F, .top_k = 50, diff --git a/examples/demo_llama3.cpp b/examples/demo_llama3.cpp index 005f6c777..c7957e43e 100644 --- a/examples/demo_llama3.cpp +++ b/examples/demo_llama3.cpp @@ -13,8 +13,8 @@ using namespace mllm; int main(int argc, char **argv) { cmdline::parser cmdParser; cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/llama3_tokenizer.model"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/llama-3.2-1b-instruct_q4_k.mllm"); - cmdParser.add("billion", 'b', "[1B | 3B |]", false, "1B"); + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/llama-3.2-1b-instruct-kai_q4_0.mllm"); + cmdParser.add("billion", 'b', "[1B | 3B |]", false, "1B-lm"); cmdParser.add("limits", 'l', "max KV cache size", false, 400); cmdParser.add("thread", 't', "num of threads", false, 4); cmdParser.parse_check(argc, argv); diff --git a/examples/demo_minicpm_moe_mbm.cpp b/examples/demo_minicpm_moe_mbm.cpp index 6cd876429..44773c5f2 100644 --- a/examples/demo_minicpm_moe_mbm.cpp +++ b/examples/demo_minicpm_moe_mbm.cpp @@ -8,6 +8,7 @@ using namespace mllm; int main(int argc, char **argv) { + Module::alloc_mmap = false; cmdline::parser cmdParser; cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/minicpm_vocab.mllm"); cmdParser.add("model", 'm', "specify mllm model path", false, "../models/minicpm-moe-8x2b-q4_k.mllm"); diff --git a/examples/demo_minicpm_moe_mbp.cpp b/examples/demo_minicpm_moe_mbp.cpp index be85a8d8c..e2da3876c 100644 --- a/examples/demo_minicpm_moe_mbp.cpp +++ b/examples/demo_minicpm_moe_mbp.cpp @@ -8,6 +8,7 @@ using namespace mllm; int main(int argc, char **argv) { + Module::alloc_mmap = false; cmdline::parser cmdParser; cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/minicpm_vocab.mllm"); cmdParser.add("model", 'm', "specify mllm model path", false, "../models/minicpm-moe-8x2b-q4_k.mllm"); diff --git a/examples/demo_phonelm_npu.cpp b/examples/demo_phonelm_npu.cpp index 7d269eb94..494d5d79d 100644 --- a/examples/demo_phonelm_npu.cpp +++ b/examples/demo_phonelm_npu.cpp @@ -1,4 +1,8 @@ + + +#include "Context.hpp" #include "Module.hpp" +#include "QNNBackend.hpp" #include "Types.hpp" #include #include "backends/cpu/CPUBackend.hpp" @@ -13,8 +17,10 @@ int main(int argc, char **argv) { cmdline::parser cmdParser; cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/phonelm_vocab.mllm"); cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/phonelm_merges.txt"); + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/phonelm-1.5b-instruct-int8.mllm"); cmdParser.add("decoding", 'd', "specify mllm decoding model path", false, "../models/phonelm-1.5b-instruct-q4_0_4_4.mllm"); + cmdParser.add("limits", 'l', "max KV cache size", false, 400); cmdParser.add("thread", 't', "num of threads", false, 4); cmdParser.add("chunk", 'c', "chunk size", false, 64); @@ -28,6 +34,8 @@ int main(int argc, char **argv) { int chunk_size = cmdParser.get("chunk"); CPUBackend::cpu_threads = cmdParser.get("thread"); + Module::initBackend(MLLM_QNN); + auto tokenizer = SmolLMTokenizer(vocab_path, merge_path); PhoneLMConfig config(tokens_limit, "1.5B"); auto model = PhoneLMForCausalLM_NPU(config, chunk_size); @@ -51,14 +59,16 @@ int main(int argc, char **argv) { if (!not_end) { return false; } return true; }); - Module::isFirstChunk = false; - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(PROMPT); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - // turn on the multi-chunk prefilling - Module::isMultiChunkPrefilling = true; + Context::Instance().inference_state().setQnnGraphFrozen(true); + Context::Instance().inference_state().setCurSequenceLength(0); + Context::Instance().inference_state().setExecutionType(PROMPT); + Context::Instance().inference_state().toggleSwitching(); + // warmup END std::cout << "Warmup finished." << std::endl; + if (!std::filesystem::exists("qnn_context.bin")) { + static_cast(Backend::global_backends[MLLM_QNN].get())->saveQNNContext(); + } vector in_strs = { "Give me a short introduction to large language model.", @@ -69,86 +79,86 @@ int main(int argc, char **argv) { "Please introduce Beijing University of Posts and Telecommunications.", "\"Large Language Models (LLMs) are advanced artificial intelligence systems designed to understand and generate human-like text. These models are trained on vast amounts of data, enabling them to perform a wide range of tasks, from answering questions and summarizing text to generating creative content and engaging in conversational dialogue. LLMs like GPT-3 and GPT-4, developed by OpenAI, have set new benchmarks in natural language processing by leveraging deep learning architectures, particularly transformer models, which excel at capturing context and relationships within text. The scalability and versatility of LLMs make them invaluable tools for applications in education, customer service, content creation, and more. However, their deployment also raises ethical considerations, including issues of bias, misinformation, and the potential for misuse. As the field continues to evolve, ongoing research and responsible deployment strategies are essential to harnessing the full potential of these powerful AI systems while mitigating their risks.\"\nGenerate a title based on the above text."}; - for (int i = 0; i < in_strs.size(); ++i) { - auto input_str = tokenizer.apply_chat_template(in_strs[i]); - auto [real_seq_length, input_tensor] = tokenizer.tokenizePaddingByChunk(input_str, chunk_size, config.vocab_size); - const int seq_length_padding = (chunk_size - real_seq_length % chunk_size) + real_seq_length; - const int chunk_num = seq_length_padding / chunk_size; - bool isSwitched = false; - // std::cout << "real seq length: " << real_seq_length << " padding to: " << seq_length_padding << " chunk num: " << chunk_num << std::endl; - std::cout << "[Q] " << in_strs[i] << std::endl; - std::cout << "[A] " << std::flush; - - // set total seq length for HeadLinear execute, which can not get the real seq length from Opts - static_cast(Backend::global_backends[MLLM_CPU])->setTotalSequenceLength(real_seq_length); - // set chunk size for the HeadLinear execute, which can not get the chunk size from Opts - static_cast(Backend::global_backends[MLLM_CPU])->setChunkSize(chunk_size); - - // tensor vectors to save the chunked tensors of the QNN prefilling input - vector chunked_tensors(chunk_num); - LlmTextGeneratorOpts opt{ - .max_new_tokens = 1, - .do_sample = false, - .is_padding = true, - .seq_before_padding = real_seq_length, - .chunk_size = chunk_size, - }; - - for (int chunk_id = 0; chunk_id < chunk_num; ++chunk_id) { - chunked_tensors[chunk_id].setBackend(Backend::global_backends[MLLM_CPU]); - chunked_tensors[chunk_id].setTtype(INPUT_TENSOR); - chunked_tensors[chunk_id].reshape(1, 1, chunk_size, 1); - chunked_tensors[chunk_id].setName("input-chunk-" + to_string(chunk_id)); - chunked_tensors[chunk_id].shallowCopyFrom(&input_tensor, false, {0, 0, chunk_id * chunk_size, 0}); - - model.generate(chunked_tensors[chunk_id], opt, [&](unsigned int out_token) -> bool { - // if (i != 0 && !isSwitched && chunk_id == 0) { - if (!isSwitched && chunk_id == 0) { - // turn off switching at the first chunk of following inputs - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - isSwitched = true; - } - auto out_string = tokenizer.detokenize({out_token}); - auto [not_end, output_string] = tokenizer.postprocess(out_string); - if (!not_end) { return false; } - if (chunk_id == chunk_num - 1) { // print the output of the last chunk - std::cout << output_string << std::flush; - } - return true; - }); - Module::isFirstChunk = false; - } - - // turn on switching, set sequence length and execution type - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(real_seq_length); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(AUTOREGRESSIVE); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - - LlmTextGeneratorOpts decoding_opt{ - .max_new_tokens = 100, - .do_sample = false, - .temperature = 0.3f, - .top_k = 50, - .top_p = 0.f, - .is_padding = false, - }; - isSwitched = false; - decoding_model.generate(chunked_tensors.back(), decoding_opt, [&](unsigned int out_token) -> bool { - if (!isSwitched) { // turn off switching - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - isSwitched = true; - } - auto out_string = tokenizer.detokenize({out_token}); - auto [not_end, output_string] = tokenizer.postprocess(out_string); - if (!not_end) { return false; } - std::cout << output_string << std::flush; - return true; - }); - - // turn on switching, set sequence length and execution type - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(PROMPT); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - std::cout << "\n"; - } -} \ No newline at end of file + // for (int i = 0; i < in_strs.size(); ++i) { + // auto input_str = tokenizer.apply_chat_template(in_strs[i]); + // auto [real_seq_length, input_tensor] = tokenizer.tokenizePaddingByChunk(input_str, chunk_size, config.vocab_size); + // const int seq_length_padding = (chunk_size - real_seq_length % chunk_size) + real_seq_length; + // const int chunk_num = seq_length_padding / chunk_size; + // bool isSwitched = false; + // // std::cout << "real seq length: " << real_seq_length << " padding to: " << seq_length_padding << " chunk num: " << chunk_num << std::endl; + // std::cout << "[Q] " << in_strs[i] << std::endl; + // std::cout << "[A] " << std::flush; + + // // set total seq length for HeadLinear execute, which can not get the real seq length from Opts + // Context::Instance().inference_state().setTotalSequenceLength(real_seq_length); + // // set chunk size for the HeadLinear execute, which can not get the chunk size from Opts + // Context::Instance().inference_state().setChunkSize(chunk_size); + + // // tensor vectors to save the chunked tensors of the QNN prefilling input + // vector chunked_tensors(chunk_num); + // LlmTextGeneratorOpts opt{ + // .max_new_tokens = 1, + // .do_sample = false, + // .is_padding = true, + // .seq_before_padding = real_seq_length, + // .chunk_size = chunk_size, + // }; + + // for (int chunk_id = 0; chunk_id < chunk_num; ++chunk_id) { + // chunked_tensors[chunk_id].setBackend(Backend::global_backends[MLLM_CPU].get()); + // chunked_tensors[chunk_id].setTtype(INPUT_TENSOR); + // chunked_tensors[chunk_id].reshape(1, 1, chunk_size, 1); + // chunked_tensors[chunk_id].setName("input-chunk-" + to_string(chunk_id)); + // chunked_tensors[chunk_id].shallowCopyFrom(input_tensor, false, {0, 0, chunk_id * chunk_size, 0}); + + // model.generate(chunked_tensors[chunk_id], opt, [&](unsigned int out_token) -> bool { + // // if (i != 0 && !isSwitched && chunk_id == 0) { + // if (!isSwitched && chunk_id == 0) { + // // turn off switching at the first chunk of following inputs + // Context::Instance().inference_state().toggleSwitching(); + // isSwitched = true; + // } + // auto out_string = tokenizer.detokenize({out_token}); + // auto [not_end, output_string] = tokenizer.postprocess(out_string); + // if (!not_end) { return false; } + // if (chunk_id == chunk_num - 1) { // print the output of the last chunk + // std::cout << output_string << std::flush; + // } + // return true; + // }); + // Context::Instance().inference_state().setQnnGraphFrozen(true); + // } + + // // turn on switching, set sequence length and execution type + // Context::Instance().inference_state().setCurSequenceLength(real_seq_length); + // Context::Instance().inference_state().setExecutionType(AUTOREGRESSIVE); + // Context::Instance().inference_state().toggleSwitching(); + + // LlmTextGeneratorOpts decoding_opt{ + // .max_new_tokens = 100, + // .do_sample = false, + // .temperature = 0.3f, + // .top_k = 50, + // .top_p = 0.f, + // .is_padding = false, + // }; + // isSwitched = false; + // decoding_model.generate(chunked_tensors.back(), decoding_opt, [&](unsigned int out_token) -> bool { + // if (!isSwitched) { // turn off switching + // Context::Instance().inference_state().toggleSwitching(); + // isSwitched = true; + // } + // auto out_string = tokenizer.detokenize({out_token}); + // auto [not_end, output_string] = tokenizer.postprocess(out_string); + // if (!not_end) { return false; } + // std::cout << output_string << std::flush; + // return true; + // }); + + // // turn on switching, set sequence length and execution type + // Context::Instance().inference_state().setCurSequenceLength(0); + // Context::Instance().inference_state().setExecutionType(PROMPT); + // Context::Instance().inference_state().toggleSwitching(); + // std::cout << "\n"; + // } +} diff --git a/examples/demo_qwen.cpp b/examples/demo_qwen.cpp index 1c70d52ce..282705bdd 100644 --- a/examples/demo_qwen.cpp +++ b/examples/demo_qwen.cpp @@ -7,6 +7,7 @@ * @copyright Copyright (c) 2024 * */ +#include "DataType.hpp" #include "cmdline.h" #include "models/qwen/configuration_qwen.hpp" #include "models/qwen/modeling_qwen.hpp" @@ -18,11 +19,18 @@ int main(int argc, char **argv) { std::iostream::sync_with_stdio(false); cmdline::parser cmdParser; + cmdParser.add("device", 'd', "mllm backend [0:`cpu` | 1:`opencl`]", false, 0); cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen2.5_vocab.mllm"); cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen2.5_merges.txt"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-2.5-3b-instruct-q4_0_4x4.mllm"); - cmdParser.add("billion", 'b', "[0.5B | 1.8B | 1.5B | 3B |]", false, "3B"); - cmdParser.add("limits", 'l', "max KV cache size", false, 400); + string default_model_path = "../models/qwen-2.5-1.5b-instruct-q4_0_4_4.mllm"; + string default_model_billion = "1.5b"; +#if defined(ARM) + default_model_path = "../models/qwen-2.5-1.5b-instruct-kai_q4_0_lm.mllm"; + default_model_billion = "1.5b-lm"; +#endif + cmdParser.add("model", 'm', "specify mllm model path", false, default_model_path); + cmdParser.add("billion", 'b', "[0.5B | 1.8B | 1.5B | 3B |]", false, default_model_billion); + cmdParser.add("limits", 'l', "max KV cache size", false, 550); cmdParser.add("thread", 't', "num of threads", false, 4); cmdParser.parse_check(argc, argv); @@ -32,16 +40,29 @@ int main(int argc, char **argv) { string model_billion = cmdParser.get("billion"); int tokens_limit = cmdParser.get("limits"); CPUBackend::cpu_threads = cmdParser.get("thread"); + BackendType device = (BackendType)cmdParser.get("device"); + assert((device == MLLM_CPU || device == MLLM_OPENCL) && "device not supports!"); auto tokenizer = QWenTokenizer(vocab_path, merge_path); - QWenConfig config(tokens_limit, model_billion, RoPEType::HFHUBROPE); + QWenConfig config(tokens_limit, model_billion); +#ifdef USE_OPENCL + if (device == MLLM_OPENCL) { + config.dtype = MLLM_TYPE_F16; + // config.attn_implementation = "eager"; + } +#endif + // config.attn_implementation = "sage_attention"; auto model = QWenForCausalLM(config); +#ifdef USE_OPENCL + model = model.to(device); +#endif model.load(model_path); vector in_strs = { - "Hello, who are you?", - "What can you do?", - "Please introduce Beijing University of Posts and Telecommunications.", + "Give me a short introduction to large language model.", + "介绍一下你自己。", + "清晨的阳光透过薄纱窗帘,懒洋洋地洒在木地板上,空气中飘散着咖啡豆研磨后特有的醇厚香气。窗外传来几声清脆的鸟鸣,伴随着远处隐约的车流声,构成这座都市尚未完全苏醒的独特交响。书桌上摊开着昨夜未读完的书,书页边缘已微微卷起。厨房里,水壶正发出细密的声响,预示着一天的热饮即将就绪。昨日的计划表贴在冰箱门上,几个重要的待办事项用红笔醒目地圈出。公园里晨练的人们身影绰绰,有节奏的脚步声和太极音乐交织。一只橘猫敏捷地跃上围墙,在晨光中伸展着腰肢,神态悠闲得仿佛它是这片领地的主人。街角的面包店刚拉开铁门,新鲜出炉的面包香气迫不及待地涌向街头。公交站台上,等待的乘客低头刷着手机屏幕,神情各异。云朵缓慢地在湛蓝的天空中移动,时间似乎被拉长了片刻。生活就在这些微小的、平凡的细节里徐徐展开,既不惊天动地,却也充满细碎的温暖和实在的步履。新的一天开始了。\n​​请在以上文本中找出描述“气味”的句子(复制出来),然后判断叙述者对“橘猫”的态度是正面还是负面,最后请用三个成语概括文中描绘的早晨氛围。", + "项羽已杀卿子冠军,威震楚国,名闻诸侯。乃遣当阳君、蒲将军将卒二万渡河,救巨鹿。战少利,陈馀复请兵。项羽乃悉引兵渡河,皆沉船,破釜甑,烧庐舍,持三日粮,以示士卒必死,无一还心。于是至则围王离,与秦军遇,九战,绝其甬道,大破之,杀苏角,虏王离。涉间不降楚,自烧杀。当是时,楚兵冠诸侯。诸侯军救巨鹿下者十余壁,莫敢纵兵。及楚击秦,诸将皆从壁上观。楚战士无不一以当十,楚兵呼声动天,诸侯军无不人人惴恐。于是已破秦军,项羽召见诸侯将,入辕门,无不膝行而前,莫敢仰视。项羽由是始为诸侯上将军,诸侯皆属焉。 问题:结合项羽在巨鹿之战中的战术决策与心理威慑手段,分析其如何实现『楚战士无不一以当十』的战斗效应,并论述这种军事心理学实践对诸侯将领『膝行而前,莫敢仰视』行为模式的生成机制。", }; for (int i = 0; i < in_strs.size(); ++i) { auto input_str = tokenizer.apply_chat_template(in_strs[i]); @@ -50,8 +71,8 @@ int main(int argc, char **argv) { std::cout << "[A] " << std::flush; LlmTextGeneratorOpts opt{ - .max_new_tokens = 100, - .do_sample = true, + .max_new_tokens = 200, + .do_sample = false, .temperature = 0.3F, .top_k = 50, .top_p = 0.F, diff --git a/examples/demo_qwen2.5_npu.cpp b/examples/demo_qwen2.5_npu.cpp deleted file mode 100644 index 761a34926..000000000 --- a/examples/demo_qwen2.5_npu.cpp +++ /dev/null @@ -1,100 +0,0 @@ -#include "backends/cpu/CPUBackend.hpp" -#include "cmdline.h" -#include "models/qwen/configuration_qwen.hpp" -#include "models/qwen/modeling_qwen_npu.hpp" -#include "models/qwen/modeling_qwen.hpp" -#include "models/qwen/tokenization_qwen.hpp" -#include "processor/PostProcess.hpp" - -using namespace mllm; - -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen2.5_vocab.mllm"); - cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen2.5_merges.txt"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/Qwen2.5-1.5B-Instruct.mllm"); - cmdParser.add("billion", 'b', "[0.5B | 1.8B | 1.5B]", false, "1.8B"); - cmdParser.add("limits", 'l', "max KV cache size", false, 400); - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.parse_check(argc, argv); - - string vocab_path = cmdParser.get("vocab"); - string merge_path = cmdParser.get("merge"); - string model_path = cmdParser.get("model"); - string model_billion = cmdParser.get("billion"); - int tokens_limit = cmdParser.get("limits"); - CPUBackend::cpu_threads = cmdParser.get("thread"); - - auto tokenizer = QWenTokenizer(vocab_path, merge_path); - QWenConfig config(tokens_limit, "1.5B", RoPEType::HFHUBROPE); - auto model = QWenForCausalLM_NPU(config, 64); - model.load(model_path); - auto decoding_model = QWenForCausalLM(config); - decoding_model.load("../models/qwen-2.5-1.5b-instruct-q4_0_4_4.mllm"); - - vector in_strs = { - " Give me a short introduction to large language model.", - }; - - for (int i = 0; i < in_strs.size(); ++i) { - auto input_str = tokenizer.apply_chat_template(in_strs[i]); - auto [real_seq_length, input_tensor] = tokenizer.tokenizeWithPadding(input_str, 64, config.vocab_size); - std::cout << "[Q] " << in_strs[i] << std::endl; - std::cout << "[A] " << std::flush; - - // set total seq length for HeadLinear execute, which can not get the real seq length from Opts - static_cast(Backend::global_backends[MLLM_CPU])->setTotalSequenceLength(real_seq_length); - - LlmTextGeneratorOpts opt{ - .max_new_tokens = 1, - .do_sample = false, - .temperature = 0.3f, - .top_k = 50, - .top_p = 0.f, - .is_padding = true, - .seq_before_padding = real_seq_length, - }; - model.generate(input_tensor, opt, [&](unsigned int out_token) -> bool { - auto out_string = tokenizer.detokenize({out_token}); - auto [not_end, output_string] = tokenizer.postprocess(out_string); - if (!not_end) { return false; } - std::cout << output_string << std::flush; - return true; - }); - - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(real_seq_length); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(AUTOREGRESSIVE); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - - LlmTextGeneratorOpts decoding_opt{ - .max_new_tokens = 100, - .do_sample = false, - .temperature = 0.3f, - .top_k = 50, - .top_p = 0.f, - .is_padding = false, - }; - bool isSwitched = false; - decoding_model.generate(input_tensor, decoding_opt, [&](unsigned int out_token) -> bool { - // call only once of switchDecodeTag - if (!isSwitched) { - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - isSwitched = true; - } - auto out_string = tokenizer.detokenize({out_token}); - auto [isOk, print_string] = tokenizer.postprocess(out_string); - if (isOk) { - std::cout << print_string << std::flush; - } else { - return false; - } - return true; - }); - - // turn on switching, set sequence length and execution type - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(PROMPT); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - std::cout << "\n"; - } -} \ No newline at end of file diff --git a/examples/demo_qwen2.5_vl.cpp b/examples/demo_qwen2.5_vl.cpp new file mode 100644 index 000000000..c0fabb89e --- /dev/null +++ b/examples/demo_qwen2.5_vl.cpp @@ -0,0 +1,71 @@ +#include +#include "cmdline.h" +#include "models/qwen2_5_vl/configuration_qwen2_5_vl.hpp" +#include "models/qwen2_5_vl/modeling_qwen2_5_vl.hpp" +// #include "models/qwen2_vl/vtp/modeling_qwen2_vl.hpp" +#include "models/qwen2_vl/processing_qwen2_vl.hpp" +#include "processor/PostProcess.hpp" + +using namespace mllm; +int main(int argc, char **argv) { + cmdline::parser cmdParser; + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen2vl_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen2vl_merges.txt"); +#ifdef ARM + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-2.5-vl-3b-instruct-kai_q4_0_f.mllm"); +#else + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-2-vl-2b-instruct-q4_k.mllm"); +#endif + cmdParser.add("billion", 'b', "[3B | 7B |]", false, "3B"); + cmdParser.add("limits", 'l', "max KV cache size", false, 800); + cmdParser.add("thread", 't', "num of threads", false, 4); + cmdParser.parse_check(argc, argv); + + string vocab_path = cmdParser.get("vocab"); + string merge_path = cmdParser.get("merge"); + string model_path = cmdParser.get("model"); + string model_billion = cmdParser.get("billion") == "3B" ? "3b" : cmdParser.get("billion"); + int tokens_limit = cmdParser.get("limits"); + int thread_num = cmdParser.get("thread"); + CPUBackend::cpu_threads = cmdParser.get("thread"); + + ParamLoader param_loader(model_path); + auto processor = Qwen2VLProcessor(vocab_path, merge_path); + Qwen2VLConfig config(tokens_limit, model_billion); + auto model = Qwen2VLModel(config); + model.load(model_path); + + vector in_imgs = { + // "../assets/bus.png", + "../assets/two_cats.jpg", + // "../assets/bird_image.jpg", + }; + vector in_strs = { + "<|vision_start|><|image_pad|><|vision_end|>Describe this image.", + }; + + for (int i = 0; i < in_strs.size(); ++i) { + auto in_str = in_strs[i]; + in_str = processor.tokenizer->apply_chat_template(in_str); + auto input_tensor = processor.process(in_str, in_imgs[i]); + std::cout << "[Q] " << in_strs[i] << std::endl; + std::cout << "[A] " << std::flush; + + for (int step = 0; step < 100; step++) { + model.get_position_ids(input_tensor); + auto result = model(input_tensor); + auto outputs = processor.detokenize(result[0]); + auto out_string = outputs.first; + auto out_token = outputs.second; + auto [not_end, output_string] = processor.tokenizer->postprocess(out_string); + if (!not_end) { break; } + std::cout << output_string << std::flush; + chatPostProcessing(out_token, input_tensor[0], {&input_tensor[1], &input_tensor[2]}); + } + printf("\n"); + model.clear_kvcache(); + model.profiling(); + } + + return 0; +} \ No newline at end of file diff --git a/examples/demo_qwen2_vl.cpp b/examples/demo_qwen2_vl.cpp index 3a23c982a..8a238e887 100644 --- a/examples/demo_qwen2_vl.cpp +++ b/examples/demo_qwen2_vl.cpp @@ -2,6 +2,7 @@ #include "cmdline.h" #include "models/qwen2_vl/configuration_qwen2_vl.hpp" #include "models/qwen2_vl/modeling_qwen2_vl.hpp" +// #include "models/qwen2_vl/vtp/modeling_qwen2_vl.hpp" #include "models/qwen2_vl/processing_qwen2_vl.hpp" #include "processor/PostProcess.hpp" @@ -10,27 +11,35 @@ int main(int argc, char **argv) { cmdline::parser cmdParser; cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen2vl_vocab.mllm"); cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen2vl_merges.txt"); +#ifdef ARM + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-2-vl-2b-instruct-kai_q4_0.mllm"); +#else cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-2-vl-2b-instruct-q4_k.mllm"); - cmdParser.add("limits", 'l', "max KV cache size", false, 2000); +#endif + cmdParser.add("billion", 'b', "[2B | 7B |]", false, "2B"); + cmdParser.add("limits", 'l', "max KV cache size", false, 800); cmdParser.add("thread", 't', "num of threads", false, 4); cmdParser.parse_check(argc, argv); string vocab_path = cmdParser.get("vocab"); string merge_path = cmdParser.get("merge"); string model_path = cmdParser.get("model"); + string model_billion = cmdParser.get("billion") == "2B" ? "1.5b" : cmdParser.get("billion"); int tokens_limit = cmdParser.get("limits"); int thread_num = cmdParser.get("thread"); CPUBackend::cpu_threads = cmdParser.get("thread"); ParamLoader param_loader(model_path); auto processor = Qwen2VLProcessor(vocab_path, merge_path); - Qwen2VLConfig config(tokens_limit, "1.5b"); - auto model_config = Qwen2VLConfig(config); - auto model = Qwen2VLModel(model_config); + Qwen2VLConfig config(tokens_limit, model_billion); + auto model = Qwen2VLModel(config); model.load(model_path); vector in_imgs = { - "../assets/bus.png"}; + // "../assets/bus.png", + "../assets/two_cats.jpg", + // "../assets/bird_image.jpg", + }; vector in_strs = { "<|vision_start|><|image_pad|><|vision_end|>Describe this image.", }; @@ -53,6 +62,8 @@ int main(int argc, char **argv) { chatPostProcessing(out_token, input_tensor[0], {&input_tensor[1], &input_tensor[2]}); } printf("\n"); + model.clear_kvcache(); + model.profiling(); } return 0; diff --git a/examples/demo_qwen2_vl_npu.cpp b/examples/demo_qwen2_vl_npu.cpp new file mode 100644 index 000000000..ebca5fcab --- /dev/null +++ b/examples/demo_qwen2_vl_npu.cpp @@ -0,0 +1,181 @@ +#include "Context.hpp" +#include "QNNBackend.hpp" +#include +#include +#include "Types.hpp" +#include "cmdline.h" +#include "memory/MemInspect.hpp" +#include "models/qwen2_vl/configuration_qwen2_vl.hpp" +#include "models/qwen2_vl/modeling_qwen2_vl_npuvit.hpp" +#include "models/qwen2_vl/modeling_qwen2_vl_npu.hpp" +#include "models/qwen2_vl/processing_qwen2_vl.hpp" +#include "processor/PostProcess.hpp" + +using namespace mllm; +int main(int argc, char **argv) { + cmdline::parser cmdParser; + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen2vl_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen2vl_merges.txt"); + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen2_vl_vit_lm_rota_noshadow.mllm"); + cmdParser.add("limits", 'l', "max KV cache size", false, 1000); + cmdParser.add("thread", 't', "num of threads", false, 4); + cmdParser.parse_check(argc, argv); + + string vocab_path = cmdParser.get("vocab"); + string merge_path = cmdParser.get("merge"); + string model_path = cmdParser.get("model"); + const string cpu_model_path = "../models/Qwen2-VL-2B-Instruct_vit_lm_rotated-Q40.mllm"; + int tokens_limit = cmdParser.get("limits"); + int thread_num = cmdParser.get("thread"); + CPUBackend::cpu_threads = cmdParser.get("thread"); + + // TODO: add a function to calculate the chunk size + const int chunk_size = 128; + + Module::initBackend(MLLM_QNN); + + Context::Instance().inference_state().setCPUViT(false); + + ParamLoader param_loader(model_path); + auto processor = Qwen2VLProcessor(vocab_path, merge_path); + Qwen2VLNPUConfig npu_config(tokens_limit, "1.5b-vl-rotated"); + + // npu vit embedding + auto prefill_embedding = npu::Qwen2VL_ImagePatchAndEmbedding(npu_config); + prefill_embedding.load(model_path); + + // npu llm + auto prefill_body = Qwen2VL_PrefillBody(npu_config, chunk_size, npu_config.shadow_layers); + prefill_body.load(model_path); + + // cpu model + auto cpu_model_config = Qwen2VLConfig(tokens_limit, "1.5b"); + cpu_model_config.attn_implementation = "eager_notrans"; + auto decoding_model = Qwen2VL_Decoding_Model(cpu_model_config); + decoding_model.load(cpu_model_path); + + vector in_imgs = { + "../assets/bus.png"}; + vector in_strs = { + "<|vision_start|><|image_pad|><|vision_end|>Imagine you are describing this image to someone who cannot see it. Explain everything you observe, including the background, subjects, their expressions, and any activities they appear to be doing.", + }; + + auto &in_str = in_strs[0]; + in_str = processor.tokenizer->apply_chat_template(in_str); + auto input_tensors = processor.process(in_str, in_imgs[0]); + + const int real_seq_length = input_tensors[0].sequence(); + std::cout << "real seq length: " << real_seq_length << std::endl; + + const int num_iter = (real_seq_length + chunk_size - 1) / chunk_size; + std::cout << "num_iter: " << num_iter << std::endl; + // padding the position_ids to total chunk length(example: 256*2) for CPUMultimodalRoPEPipeline + prefill_embedding.get_position_ids(input_tensors, chunk_size * num_iter); + + // 1. QNN vit embedding + // NOTE: put vit here is because compatible with older qnn_context.bin. + // In QNNBackend, the graph should be executed in the order of the context + // TODO: better QNNBackend graph indexing and management + auto vit_start = mllm_time_ms(); + auto merged_embd = prefill_embedding(input_tensors); + auto vit_end = mllm_time_ms(); + + auto merged_embd_warmup_tensor = Tensor(0, MLLM_QNN); + merged_embd_warmup_tensor.reshape(1, 1, chunk_size, 1536); + merged_embd_warmup_tensor.setTtype(INPUT_TENSOR); + merged_embd_warmup_tensor.alloc(); + + merged_embd_warmup_tensor.setTtype(INPUT_TENSOR); + input_tensors.back().setTtype(INPUT_TENSOR); + vector prefill_input = {merged_embd_warmup_tensor, input_tensors.back()}; + + auto llm_start = mllm_time_ms(); + prefill_body(prefill_input); + auto llm_end = mllm_time_ms(); + std::cout << "after warm up" << std::endl; + + if (!std::filesystem::exists("qnn_context.bin")) { + static_cast(Backend::global_backends[MLLM_QNN].get())->saveQNNContext(); + } + + Context::Instance().inference_state().setQnnGraphFrozen(true); + Context::Instance().inference_state().setCurSequenceLength(0); + Context::Instance().inference_state().setExecutionType(PROMPT); + Context::Instance().inference_state().toggleSwitching(); + + // set total seq length for HeadLinear execute, which can not get the real seq length from Opts + Context::Instance().inference_state().setTotalSequenceLength(real_seq_length); + // set chunk size for the HeadLinear execute, which can not get the chunk size from Opts + Context::Instance().inference_state().setChunkSize(chunk_size); + + std::cout << "[Q] " << in_strs[0] << std::endl; + std::cout << "[A] " << std::flush; + + for (auto &t : input_tensors) { + t.setTtype(INPUT_TENSOR); + } + + // 2. QNN LLM Prefill + unsigned int out_token = 0; + auto start_time = mllm_time_ms(); + int64_t prefill_time; + for (auto i = 0; i < num_iter; ++i) { + // copy the data from merged_embd[0] to merged_embd_warmup_tensor + auto source = merged_embd[0].ptrAt(0, 0, chunk_size * i, 0); + auto dest = prefill_input[0].hostPtr(); + if (i == 0) { + memcpy(dest, source, std::min(prefill_input[0].cntSize(), merged_embd[0].cntSize())); + } else { + memcpy(dest, source, (merged_embd[0].sequence() % chunk_size) * merged_embd[0].dimension() * sizeof(float)); + } + + auto result = prefill_body(prefill_input); + + if (i == 0) { // turn off switching to avoid RoPE h_cnt_ reset to curSequenceLength in next chunk + Context::Instance().inference_state().toggleSwitching(); + } + + if (i == num_iter - 1) { + auto end_time = mllm_time_ms(); + prefill_time = end_time - start_time; + auto outputs = processor.detokenize(result[0], real_seq_length % chunk_size); + auto out_string = outputs.first; + out_token = outputs.second; + auto [not_end, output_string] = processor.tokenizer->postprocess(out_string); + std::cout << output_string << std::flush; + } + } + + chatPostProcessing(out_token, input_tensors[0], {&input_tensors[1], &input_tensors[2]}); + + Context::Instance().inference_state().setCurSequenceLength(real_seq_length); + Context::Instance().inference_state().setExecutionType(AUTOREGRESSIVE); + Context::Instance().inference_state().toggleSwitching(); + + // 3. CPU LLM Decoding + for (auto &t : input_tensors) { // set to INPUT_TENSOR to let decoding module update act + t.setTtype(INPUT_TENSOR); + } + + const int last_position_id = input_tensors[3].dataAt(0, 0, 0, real_seq_length - 1); + for (int step = 0; step < 100; step++) { + // use the last position id(no padding position) in decoding + prefill_embedding.get_position_ids(input_tensors, 0, last_position_id + 1 + step); + + auto result = decoding_model(input_tensors); + auto outputs = processor.detokenize(result[0]); + auto out_string = outputs.first; + auto out_token = outputs.second; + auto [not_end, output_string] = processor.tokenizer->postprocess(out_string); + if (!not_end) { break; } + std::cout << output_string << std::flush; + chatPostProcessing(out_token, input_tensors[0], {&input_tensors[1], &input_tensors[2]}); + + if (step == 0) Context::Instance().inference_state().toggleSwitching(); + } + + std::cout << std::endl; + std::cout << "vit embedding time: " << vit_end - vit_start << " ms" << std::endl; + std::cout << "Prefill:" << prefill_time << " ms" << std::endl; + return 0; +} \ No newline at end of file diff --git a/examples/demo_qwen2_vl_vtp.cpp b/examples/demo_qwen2_vl_vtp.cpp new file mode 100644 index 000000000..40bd82ade --- /dev/null +++ b/examples/demo_qwen2_vl_vtp.cpp @@ -0,0 +1,72 @@ +#include +#include "cmdline.h" +#include "models/qwen2_vl/configuration_qwen2_vl.hpp" +#include "models/qwen2_vl/vtp/modeling_qwen2_vl.hpp" +#include "models/qwen2_vl/vtp/processing_qwen2_vl.hpp" +#include "processor/PostProcess.hpp" + +using namespace mllm; +int main(int argc, char **argv) { + cmdline::parser cmdParser; + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen2vl_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen2vl_merges.txt"); + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-2-vl-2b-instruct-kai_q4_0.mllm"); + cmdParser.add("billion", 'b', "[2B | 7B |]", false, "2B"); + cmdParser.add("limits", 'l', "max KV cache size", false, 800); + cmdParser.add("thread", 't', "num of threads", false, 4); + cmdParser.add("premerge", 'g', "enable pre-ViT image token merging", false, false); + cmdParser.add("pruning", 'p', "enable pruning", false, false); + cmdParser.parse_check(argc, argv); + + string vocab_path = cmdParser.get("vocab"); + string merge_path = cmdParser.get("merge"); + string model_path = cmdParser.get("model"); + string model_billion = cmdParser.get("billion") == "2B" ? "1.5b" : cmdParser.get("billion"); + int tokens_limit = cmdParser.get("limits"); + int thread_num = cmdParser.get("thread"); + CPUBackend::cpu_threads = cmdParser.get("thread"); + use_pre_vit_merge = cmdParser.exist("premerge"); + bool use_pruning = cmdParser.exist("pruning"); + if (!use_pruning) { + WHERE_TOKEN_PRUNING.pruning_place_cfg = {}; + } + + ParamLoader param_loader(model_path); + auto processor = Qwen2VLProcessor(vocab_path, merge_path); + Qwen2VLConfig config(tokens_limit, model_billion); + auto model = Qwen2VLModel(config); + model.load(model_path); + + vector in_imgs = { + // "../assets/bus.png", + "../assets/two_cats.jpg", + // "../assets/bird_image.jpg", + }; + vector in_strs = { + "<|vision_start|><|image_pad|><|vision_end|>Describe this image.", + }; + + for (int i = 0; i < in_strs.size(); ++i) { + auto in_str = in_strs[i]; + in_str = processor.tokenizer->apply_chat_template(in_str); + auto input_tensor = processor.process(in_str, in_imgs[i]); + std::cout << "[Q] " << in_strs[i] << std::endl; + std::cout << "[A] " << std::flush; + for (int step = 0; step < 100; step++) { + model.get_position_ids(input_tensor); + auto result = model(input_tensor); + auto outputs = processor.detokenize(result[0]); + auto out_string = outputs.first; + auto out_token = outputs.second; + auto [not_end, output_string] = processor.tokenizer->postprocess(out_string); + if (!not_end) { break; } + std::cout << output_string << std::flush; + chatPostProcessing(out_token, input_tensor[0], {&input_tensor[1], &input_tensor[2]}); + } + printf("\n"); + model.clear_kvcache(); + model.profiling(); + } + + return 0; +} \ No newline at end of file diff --git a/examples/demo_qwen3.cpp b/examples/demo_qwen3.cpp index 6a93e6d36..009dceb00 100644 --- a/examples/demo_qwen3.cpp +++ b/examples/demo_qwen3.cpp @@ -20,8 +20,8 @@ int main(int argc, char **argv) { cmdline::parser cmdParser; cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen_vocab.mllm"); cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen_merges.txt"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-3-0.6b-q4_k.mllm"); - cmdParser.add("billion", 'b', "[0.6B | 4B |]", false, "0.6B"); + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-3-0.6b-kai_q4_0.mllm"); + cmdParser.add("billion", 'b', "[0.6B | 4B |]", false, "0.6b-lm"); cmdParser.add("limits", 'l', "max KV cache size", false, 800); cmdParser.add("thread", 't', "num of threads", false, 4); cmdParser.parse_check(argc, argv); @@ -67,5 +67,7 @@ int main(int argc, char **argv) { return true; }); std::cout << "\n"; + model.clear_kvcache(); + model.profiling(); } } diff --git a/examples/demo_qwen_batch.cpp b/examples/demo_qwen_batch.cpp new file mode 100644 index 000000000..4c25f3de9 --- /dev/null +++ b/examples/demo_qwen_batch.cpp @@ -0,0 +1,76 @@ +/** + * @file demo_qwen.cpp + * @author Chenghua Wang (chenghua.wang.edu@gmail.com) + * @version 0.1 + * @date 2024-05-01 + * + * @copyright Copyright (c) 2024 + * + */ +#include "cmdline.h" +#include "models/qwen/configuration_qwen.hpp" +#include "models/qwen/modeling_qwen.hpp" +#include "models/qwen/tokenization_qwen.hpp" +#include +#include + +using namespace mllm; + +int main(int argc, char **argv) { + std::iostream::sync_with_stdio(false); + + cmdline::parser cmdParser; + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen2.5_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen2.5_merges.txt"); +#ifdef ARM + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-2.5-1.5b-instruct-kai_q4_0_lm.mllm"); +#else + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-2.5-1.5b-instruct-q4_0_4_4.mllm"); +#endif + cmdParser.add("billion", 'b', "[0.5B | 1.8B | 1.5B | 3B |]", false, "1.5b-lm"); + cmdParser.add("limits", 'l', "max KV cache size", false, 400); + cmdParser.add("thread", 't', "num of threads", false, 4); + cmdParser.parse_check(argc, argv); + + string vocab_path = cmdParser.get("vocab"); + string merge_path = cmdParser.get("merge"); + string model_path = cmdParser.get("model"); + string model_billion = cmdParser.get("billion"); + int tokens_limit = cmdParser.get("limits"); + CPUBackend::cpu_threads = cmdParser.get("thread"); + + auto tokenizer = QWenTokenizer(vocab_path, merge_path); + QWenConfig config(tokens_limit, model_billion, RoPEType::HFHUBROPE); + // config.attn_implementation = "sage_attention"; // 使用Sage Attention实现 + auto model = QWenForCausalLM(config); + model.load(model_path); + + vector in_strs = { + "Give me a short introduction to large language model.", + "介绍一下你自己。", + "什么是北京市的旧称?", + }; + vector input_strs; + for (int i = 0; i < in_strs.size(); ++i) { + std::cout << "[Q" << i << "] " << in_strs[i] << std::endl; + auto input_str = tokenizer.apply_chat_template(in_strs[i]); + input_strs.push_back(input_str); + } + auto input_tensor = tokenizer.tokenize(input_strs); + + LlmTextGeneratorOpts opt{ + .max_new_tokens = 200, + .do_sample = false, + .temperature = 0.3F, + .top_k = 50, + .top_p = 0.F, + }; + auto output_tokens = model.generate(input_tensor, opt, tokenizer.eos_id_); + for (int i = 0; i < output_tokens.size(); ++i) { + auto out_token = output_tokens[i]; + auto out_string = tokenizer.detokenize(out_token); + std::cout << "[A" << i << "] " << out_string << std::endl; + } + model.clear_kvcache(); + model.profiling(); +} diff --git a/examples/demo_qwen_npu.cpp b/examples/demo_qwen_npu.cpp index 9e230f01c..9187f1301 100644 --- a/examples/demo_qwen_npu.cpp +++ b/examples/demo_qwen_npu.cpp @@ -1,8 +1,11 @@ +#include "Context.hpp" +#include "QNNBackend.hpp" +#include "Types.hpp" #include "backends/cpu/CPUBackend.hpp" #include "cmdline.h" #include "models/qwen/configuration_qwen.hpp" -#include "models/qwen/modeling_qwen_npu.hpp" #include "models/qwen/modeling_qwen.hpp" +#include "models/qwen/modeling_qwen_npu_v2.hpp" #include "models/qwen/tokenization_qwen.hpp" #include "processor/PostProcess.hpp" @@ -10,123 +13,82 @@ using namespace mllm; int main(int argc, char **argv) { cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen_vocab.mllm"); - cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen_merges.txt"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-1.5-1.8b-chat-int8.mllm"); - cmdParser.add("billion", 'b', "[0.5B | 1.8B]", false, "1.8B"); + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen2.5_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen2.5_merges.txt"); + cmdParser.add("qnn-model", 'm', "specify mllm model path", false, "../models/Qwen2.5-1.5B-Instruct_rotated-noshadow.mllm"); + cmdParser.add("decoding-model", '\0', "specify mllm model path", false, "../models/Qwen2.5-1.5B-Instruct_rotated-Q40.mllm"); + cmdParser.add("billion", 'b', "[0.5B | 1.8B | 1.5B | [1.5B, 1.8B]-rotated]", false, "1.5B-rotated"); cmdParser.add("limits", 'l', "max KV cache size", false, 400); cmdParser.add("thread", 't', "num of threads", false, 4); cmdParser.parse_check(argc, argv); string vocab_path = cmdParser.get("vocab"); string merge_path = cmdParser.get("merge"); - string model_path = cmdParser.get("model"); + string model_path = cmdParser.get("qnn-model"); + string decoding_model_path = cmdParser.get("decoding-model"); string model_billion = cmdParser.get("billion"); int tokens_limit = cmdParser.get("limits"); - const int chunk_size = 128; CPUBackend::cpu_threads = cmdParser.get("thread"); + Module::initBackend(MLLM_QNN); + auto tokenizer = QWenTokenizer(vocab_path, merge_path); - QWenConfig config(tokens_limit, model_billion, RoPEType::HFHUBROPE); - auto model = QWenForCausalLM_NPU(config, chunk_size); + QWenNPUConfig config(tokens_limit, "1.5b-rotated", RoPEType::HFHUBROPE); + auto model = v2::QWenForCausalLM_NPU(config, 256); + config.attn_implementation = "eager_notrans"; model.load(model_path); auto decoding_model = QWenForCausalLM(config); - decoding_model.load("../models/qwen-1.5-1.8b-chat-q4k.mllm"); - - // warmup START - std::string input_str = " "; - auto [real_seq_length, input_tensor] = tokenizer.tokenizePaddingByChunk(input_str, chunk_size, config.vocab_size); - LlmTextGeneratorOpts opt{ - .max_new_tokens = 1, - .do_sample = false, - .is_padding = true, - .seq_before_padding = real_seq_length, - .chunk_size = chunk_size, - }; - model.generate(input_tensor, opt, [&](unsigned int out_token) -> bool { - auto out_string = tokenizer.detokenize({out_token}); - auto [not_end, output_string] = tokenizer.postprocess(out_string); - if (!not_end) { return false; } - return true; - }); - Module::isFirstChunk = false; - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(PROMPT); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - // turn on the multi-chunk prefilling - Module::isMultiChunkPrefilling = true; - // warmup END - std::cout << "Warmup finished." << std::endl; + decoding_model.load(decoding_model_path); vector in_strs = { // " Give me a short introduction to large language model.", - "\"Large Language Models (LLMs) are advanced artificial intelligence systems designed to understand and generate human-like text. These models are trained on vast amounts of data, enabling them to perform a wide range of tasks, from answering questions and summarizing text to generating creative content and engaging in conversational dialogue. LLMs like GPT-3 and GPT-4, developed by OpenAI, have set new benchmarks in natural language processing by leveraging deep learning architectures, particularly transformer models, which excel at capturing context and relationships within text. The scalability and versatility of LLMs make them invaluable tools for applications in education, customer service, content creation, and more. However, their deployment also raises ethical considerations, including issues of bias, misinformation, and the potential for misuse. As the field continues to evolve, ongoing research and responsible deployment strategies are essential to harnessing the full potential of these powerful AI systems while mitigating their risks.\"\nGenerate a title based on the above text."}; + "\"Large Language Models (LLMs) are advanced artificial intelligence systems designed to understand and generate human-like text. These models are trained on vast amounts of data, enabling them to perform a wide range of tasks, from answering questions and summarizing text to generating creative content and engaging in conversational dialogue. LLMs like GPT-3 and GPT-4, developed by OpenAI, have set new benchmarks in natural language processing by leveraging deep learning architectures, particularly transformer models, which excel at capturing context and relationships within text. The scalability and versatility of LLMs make them invaluable tools for applications in education, customer service, content creation, and more. However, their deployment also raises ethical considerations, including issues of bias, misinformation, and the potential for misuse. As the field continues to evolve, ongoing research and responsible deployment strategies are essential to harnessing the full potential of these powerful AI systems while mitigating their risks.\"\nGenerate a title based on the above text.", + // " Hello, Who are you?" + }; for (int i = 0; i < in_strs.size(); ++i) { auto input_str = tokenizer.apply_chat_template(in_strs[i]); - auto [real_seq_length, input_tensor] = tokenizer.tokenizePaddingByChunk(input_str, chunk_size, config.vocab_size); - const int seq_length_padding = (chunk_size - real_seq_length % chunk_size) + real_seq_length; - const int chunk_num = seq_length_padding / chunk_size; - + auto [real_seq_length, input_tensor] = tokenizer.tokenizeWithPadding(input_str, 256, config.vocab_size); + // real_seq_length = 256; std::cout << "[Q] " << in_strs[i] << std::endl; std::cout << "[A] " << std::flush; + std::cout << "real_seq_length: " << real_seq_length << std::endl; // set total seq length for HeadLinear execute, which can not get the real seq length from Opts - static_cast(Backend::global_backends[MLLM_CPU])->setTotalSequenceLength(real_seq_length); - static_cast(Backend::global_backends[MLLM_CPU])->setChunkSize(chunk_size); + Context::Instance().inference_state().setTotalSequenceLength(real_seq_length); LlmTextGeneratorOpts opt{ .max_new_tokens = 1, .do_sample = false, .is_padding = true, .seq_before_padding = real_seq_length, - .chunk_size = chunk_size, }; + model.generate(input_tensor, opt, [&](unsigned int out_token) -> bool { + auto out_string = tokenizer.detokenize({out_token}); + auto [not_end, output_string] = tokenizer.postprocess(out_string); + if (!not_end) { return false; } + std::cout << output_string << std::flush; + return true; + }); - // tensor vectors to save the chunked tensors of the QNN prefilling input - bool isSwitched = false; - vector chunked_tensors(chunk_num); - for (int chunk_id = 0; chunk_id < chunk_num; ++chunk_id) { - chunked_tensors[chunk_id].setBackend(Backend::global_backends[MLLM_CPU]); - chunked_tensors[chunk_id].setTtype(INPUT_TENSOR); - chunked_tensors[chunk_id].reshape(1, 1, chunk_size, 1); - chunked_tensors[chunk_id].setName("input-chunk-" + to_string(chunk_id)); - chunked_tensors[chunk_id].shallowCopyFrom(&input_tensor, false, {0, 0, chunk_id * chunk_size, 0}); - - model.generate(chunked_tensors[chunk_id], opt, [&](unsigned int out_token) -> bool { - if (!isSwitched && chunk_id == 0 && static_cast(Backend::global_backends[MLLM_CPU])->isStageSwitching()) { - // turn off switching at the first chunk of following inputs - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - isSwitched = true; - } - auto out_string = tokenizer.detokenize({out_token}); - auto [not_end, output_string] = tokenizer.postprocess(out_string); - if (!not_end) { return false; } - if (chunk_id == chunk_num - 1) { // print the output of the last chunk - std::cout << output_string << std::flush; - } - return true; - }); - Module::isFirstChunk = false; - } - - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(real_seq_length); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(AUTOREGRESSIVE); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().setCurSequenceLength(real_seq_length); + Context::Instance().inference_state().setExecutionType(AUTOREGRESSIVE); + Context::Instance().inference_state().toggleSwitching(); LlmTextGeneratorOpts decoding_opt{ - .max_new_tokens = 100, + .max_new_tokens = 50, .do_sample = false, .temperature = 0.3f, .top_k = 50, .top_p = 0.f, .is_padding = false, }; - isSwitched = false; - decoding_model.generate(chunked_tensors.back(), decoding_opt, [&](unsigned int out_token) -> bool { + bool isSwitched = false; + decoding_model.generate(input_tensor, decoding_opt, [&](unsigned int out_token) -> bool { // call only once of switchDecodeTag if (!isSwitched) { - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().toggleSwitching(); + isSwitched = true; } auto out_string = tokenizer.detokenize({out_token}); @@ -140,9 +102,14 @@ int main(int argc, char **argv) { }); // turn on switching, set sequence length and execution type - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(PROMPT); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().setCurSequenceLength(0); + Context::Instance().inference_state().setExecutionType(PROMPT); + Context::Instance().inference_state().toggleSwitching(); std::cout << "\n"; + + if (!std::filesystem::exists("qnn_context.bin")) { + // static_cast(Backend::global_backends[MLLM_QNN].get())->saveQNNContext(); + static_cast(Backend::global_backends[MLLM_QNN].get())->saveQNNContext(); + } } -} \ No newline at end of file +} diff --git a/examples/demo_qwen_pipeline.cpp b/examples/demo_qwen_npu_pipeline.cpp similarity index 53% rename from examples/demo_qwen_pipeline.cpp rename to examples/demo_qwen_npu_pipeline.cpp index f2f8bb8d0..343db554a 100644 --- a/examples/demo_qwen_pipeline.cpp +++ b/examples/demo_qwen_npu_pipeline.cpp @@ -1,10 +1,12 @@ +#include "Context.hpp" #include "Backend.hpp" +#include "QNNBackend.hpp" #include "Trace.hpp" #include "Types.hpp" #include "backends/cpu/CPUBackend.hpp" #include "cmdline.h" #include "models/qwen/configuration_qwen.hpp" -#include "models/qwen/modeling_qwen_npu.hpp" +#include "models/qwen/modeling_qwen_npu_v2.hpp" #include "models/qwen/modeling_qwen.hpp" #include "models/qwen/tokenization_qwen.hpp" #include "processor/PostProcess.hpp" @@ -14,46 +16,60 @@ using namespace mllm; int main(int argc, char **argv) { cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen_vocab.mllm"); - cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen_merges.txt"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/qwen-1.5-1.8b-chat-int8.mllm"); - cmdParser.add("billion", 'b', "[0.5B | 1.8B]", false, "1.8B"); + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen2.5_vocab.mllm"); + // "../vocab/qwen_vocab.mllm" + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/qwen2.5_merges.txt"); + // "../vocab/qwen_merges.txt" + cmdParser.add("qnn-model", 'm', "specify mllm model path", false, "../models/Qwen2.5-1.5B-Instruct_rotated-noshadow.mllm"); + // "../models/qwen1.5-1.8b-chat-rot-qnn.mllm" + cmdParser.add("decoding-model", '\0', "specify mllm model path", false, "../models/Qwen2.5-1.5B-Instruct_rotated-Q40.mllm"); + // "../models/qwen1.5-1.8b-chat-rot_q4_0.mllm" + cmdParser.add("billion", 'b', "[0.5B | 1.8B | 1.5B | [1.5B, 1.8B]-rotated]", false, "1.5B-rotated"); cmdParser.add("limits", 'l', "max KV cache size", false, 400); cmdParser.add("thread", 't', "num of threads", false, 4); cmdParser.parse_check(argc, argv); string vocab_path = cmdParser.get("vocab"); string merge_path = cmdParser.get("merge"); - string model_path = cmdParser.get("model"); + string model_path = cmdParser.get("qnn-model"); + string decoding_model_path = cmdParser.get("decoding-model"); string model_billion = cmdParser.get("billion"); int tokens_limit = cmdParser.get("limits"); const int chunk_size = 128; CPUBackend::cpu_threads = cmdParser.get("thread"); + Module::initBackend(MLLM_QNN); + auto tokenizer = QWenTokenizer(vocab_path, merge_path); - QWenConfig config(tokens_limit, model_billion, RoPEType::HFHUBROPE); - auto model = QWenForCausalLM_NPU(config, chunk_size); + QWenNPUConfig config(tokens_limit, model_billion, RoPEType::HFHUBROPE); + auto model = v2::QWenForCausalLM_NPU(config, chunk_size); model.load(model_path); + config.attn_implementation = "eager_notrans"; auto decoding_model = QWenForCausalLM(config); - decoding_model.load("../models/qwen-1.5-1.8b-chat-q4k.mllm"); + decoding_model.load(decoding_model_path); string trace_string = " "; auto [_, input_tensor] = tokenizer.tokenizePaddingByChunk(trace_string, chunk_size, config.vocab_size); Tracer::trace(&model, {input_tensor}); std::cout << "Trace and Warmup finished" << std::endl; + if (!std::filesystem::exists("qnn_context.bin")) { + static_cast(Backend::global_backends[MLLM_QNN].get())->saveQNNContext(); + } + vector in_strs = { - // " Give me a short introduction to large language model.", - "\"Large Language Models (LLMs) are advanced artificial intelligence systems designed to understand and generate human-like text. These models are trained on vast amounts of data, enabling them to perform a wide range of tasks, from answering questions and summarizing text to generating creative content and engaging in conversational dialogue. LLMs like GPT-3 and GPT-4, developed by OpenAI, have set new benchmarks in natural language processing by leveraging deep learning architectures, particularly transformer models, which excel at capturing context and relationships within text. The scalability and versatility of LLMs make them invaluable tools for applications in education, customer service, content creation, and more. However, their deployment also raises ethical considerations, including issues of bias, misinformation, and the potential for misuse. As the field continues to evolve, ongoing research and responsible deployment strategies are essential to harnessing the full potential of these powerful AI systems while mitigating their risks.\"\nGenerate a title based on the above text."}; + " Give me a short introduction to large language model.", + // "\"Large Language Models (LLMs) are advanced artificial intelligence systems designed to understand and generate human-like text. These models are trained on vast amounts of data, enabling them to perform a wide range of tasks, from answering questions and summarizing text to generating creative content and engaging in conversational dialogue. LLMs like GPT-3 and GPT-4, developed by OpenAI, have set new benchmarks in natural language processing by leveraging deep learning architectures, particularly transformer models, which excel at capturing context and relationships within text. The scalability and versatility of LLMs make them invaluable tools for applications in education, customer service, content creation, and more. However, their deployment also raises ethical considerations, including issues of bias, misinformation, and the potential for misuse. As the field continues to evolve, ongoing research and responsible deployment strategies are essential to harnessing the full potential of these powerful AI systems while mitigating their risks.\"\nGenerate a title based on the above text." + }; for (int i = 0; i < in_strs.size(); ++i) { auto input_str = tokenizer.apply_chat_template(in_strs[i]); auto [real_seq_length, input_tensor] = tokenizer.tokenizePaddingByChunk(input_str, chunk_size, config.vocab_size); // set total seq length for HeadLinear execute, which can not get the real seq length from Opts - static_cast(Backend::global_backends[MLLM_CPU])->setTotalSequenceLength(real_seq_length); + Context::Instance().inference_state().setTotalSequenceLength(real_seq_length); // set chunk size for the HeadLinear execute, which can not get the chunk size from Opts - static_cast(Backend::global_backends[MLLM_CPU])->setChunkSize(chunk_size); + Context::Instance().inference_state().setChunkSize(chunk_size); std::cout << "[Q] " << in_strs[i] << std::endl; std::cout << "[A] " << std::flush; @@ -73,12 +89,11 @@ int main(int argc, char **argv) { ChunkPipeline pipeline(real_seq_length, chunk_size); auto prefill_result = pipeline.run(input_tensor, opt, tokenizer, model, isSwitched); - Module::isMultiChunkPrefilling = true; - Module::isFirstChunk = false; + Context::Instance().inference_state().setQnnGraphFrozen(true); - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(real_seq_length); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(AUTOREGRESSIVE); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().setCurSequenceLength(real_seq_length); + Context::Instance().inference_state().setExecutionType(AUTOREGRESSIVE); + Context::Instance().inference_state().toggleSwitching(); LlmTextGeneratorOpts decoding_opt{ .max_new_tokens = 100, @@ -91,7 +106,7 @@ int main(int argc, char **argv) { isSwitched = false; Tensor decoding_input; - decoding_input.setBackend(Backend::global_backends[MLLM_CPU]); + decoding_input.setBackend(Backend::global_backends[MLLM_CPU].get()); decoding_input.setTtype(INPUT_TENSOR); decoding_input.reshape(1, 1, 1, 1); decoding_input.setName("input0"); @@ -100,7 +115,7 @@ int main(int argc, char **argv) { decoding_model.generate(decoding_input, decoding_opt, [&](unsigned int out_token) -> bool { // call only once of switchDecodeTag if (!isSwitched) { - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().toggleSwitching(); isSwitched = true; } auto out_string = tokenizer.detokenize({out_token}); @@ -114,9 +129,11 @@ int main(int argc, char **argv) { }); // turn on switching, set sequence length and execution type - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(PROMPT); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().setCurSequenceLength(0); + Context::Instance().inference_state().setExecutionType(PROMPT); + Context::Instance().inference_state().toggleSwitching(); std::cout << "\n"; + + decoding_model.profiling("Decoding"); } } \ No newline at end of file diff --git a/examples/demo_showui.cpp b/examples/demo_showui.cpp index 4349f9872..c529bd72c 100644 --- a/examples/demo_showui.cpp +++ b/examples/demo_showui.cpp @@ -12,7 +12,11 @@ int main(int argc, char **argv) { cmdline::parser cmdParser; cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/showui_vocab.mllm"); cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/showui_merges.txt"); +#ifdef ARM + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/showui-2b-kai_q4_0.mllm"); +#else cmdParser.add("model", 'm', "specify mllm model path", false, "../models/showui-2b-q4_k.mllm"); +#endif cmdParser.add("limits", 'l', "max KV cache size", false, 2000); cmdParser.add("thread", 't', "num of threads", false, 4); cmdParser.parse_check(argc, argv); @@ -29,8 +33,7 @@ int main(int argc, char **argv) { int max_pixels = 1344 * 28 * 28; auto processor = Qwen2VLProcessor(vocab_path, merge_path, min_pixels, max_pixels); Qwen2VLConfig config(tokens_limit, "1.5b"); - auto model_config = Qwen2VLConfig(config); - auto model = Qwen2VLModel(model_config); + auto model = Qwen2VLModel(config); model.load(model_path); vector in_imgs = { diff --git a/examples/demo_showui_npu.cpp b/examples/demo_showui_npu.cpp new file mode 100644 index 000000000..d54943705 --- /dev/null +++ b/examples/demo_showui_npu.cpp @@ -0,0 +1,185 @@ +#include +#include +#include +#include "QNNBackend.hpp" +#include "Timing.hpp" +#include "Types.hpp" +#include "cmdline.h" +#include "models/qwen2_vl/configuration_qwen2_vl.hpp" +#include "models/qwen2_vl/modeling_qwen2_vl_npu.hpp" +#include "models/qwen2_vl/processing_qwen2_vl.hpp" +#include "processor/PostProcess.hpp" +#include "memory/MemInspect.hpp" + +using namespace mllm; +int main(int argc, char **argv) { + cmdline::parser cmdParser; + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/showui_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/showui_merges.txt"); + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/showui-w8-fpbias-noshadow-xdl-test.mllm"); + cmdParser.add("limits", 'l', "max KV cache size", false, 1000); + cmdParser.add("thread", 't', "num of threads", false, 4); + cmdParser.parse_check(argc, argv); + + string vocab_path = cmdParser.get("vocab"); + string merge_path = cmdParser.get("merge"); + string model_path = cmdParser.get("model"); + const string cpu_model_path = "../models/showui-2B-rotated-q40.mllm"; + int tokens_limit = cmdParser.get("limits"); + int thread_num = cmdParser.get("thread"); + CPUBackend::cpu_threads = cmdParser.get("thread"); + + // TODO: add a function to calculate the chunk size + const int chunk_size = 256; + + Module::initBackend(MLLM_QNN); + + ParamLoader param_loader(model_path); + auto processor = Qwen2VLProcessor(vocab_path, merge_path); + Qwen2VLConfig config(tokens_limit, "1.5b-rotated"); + auto model_config = Qwen2VLConfig(config); + model_config.attn_implementation = "eager"; + + auto prefill_embedding = Qwen2VL_ImagePatchAndEmbedding(config); + auto prefill_body = Qwen2VL_PrefillBody(config, chunk_size); + prefill_embedding.load(cpu_model_path); + prefill_body.load(model_path); + + auto decoding_model = Qwen2VL_Decoding_Model(model_config); + decoding_model.load(cpu_model_path); + + vector in_imgs = { + "../assets/showui.png"}; + vector in_strs = { + "Based on the screenshot of the page, I give a text description and you give its corresponding location. The coordinate represents a clickable location [x, y] for an element, which is a relative coordinate on the screenshot, scaled from 0 to 1.<|vision_start|><|image_pad|><|vision_end|>桌面", + }; + + auto &in_str = in_strs[0]; + in_str = processor.tokenizer->apply_chat_template(in_str); + auto input_tensors = processor.process(in_str, in_imgs[0]); + + const int real_seq_length = input_tensors[0].sequence(); + std::cout << "real seq length: " << real_seq_length << std::endl; + + const int num_iter = (real_seq_length + chunk_size - 1) / chunk_size; + std::cout << "num_iter" << num_iter << std::endl; + // padding the position_ids to total chunk length(example: 256*2) for CPUMultimodalRoPEPipeline + prefill_embedding.get_position_ids(input_tensors, chunk_size * num_iter); + + // warm up (still need a warm up as the setup stage is not omitted now) + auto merged_embd_warmup_tensor = Tensor(Backend::global_backends[MLLM_QNN]); + merged_embd_warmup_tensor.reshape(1, 1, chunk_size, 1536); + merged_embd_warmup_tensor.setTtype(INPUT_TENSOR); + merged_embd_warmup_tensor.alloc(); + + merged_embd_warmup_tensor.setTtype(INPUT_TENSOR); + input_tensors.back().setTtype(INPUT_TENSOR); + vector prefill_input = {merged_embd_warmup_tensor, input_tensors.back()}; + + auto warm_start = mllm_time_ms(); + prefill_body(prefill_input); + auto warm_end = mllm_time_ms(); + std::cout << "warm up " << warm_end - warm_start << " ms" << std::endl; + + Module::isFirstChunk = false; + static_cast(Backend::global_backends[MLLM_CPU].get())->setCurSequenceLength(0); + static_cast(Backend::global_backends[MLLM_CPU].get())->setExecutionType(PROMPT); + static_cast(Backend::global_backends[MLLM_CPU].get())->toggleSwitching(); + + // set total seq length for HeadLinear execute, which can not get the real seq length from Opts + static_cast(Backend::global_backends[MLLM_CPU].get())->setTotalSequenceLength(real_seq_length); + // set chunk size for the HeadLinear execute, which can not get the chunk size from Opts + static_cast(Backend::global_backends[MLLM_CPU].get())->setChunkSize(chunk_size); + + for (auto &t : input_tensors) { + t.setTtype(INPUT_TENSOR); + } + + // 1. get the vit embedding using CPU + auto vit_start = mllm_time_ms(); + auto merged_embd = prefill_embedding(input_tensors); + auto vit_end = mllm_time_ms(); + std::cout << "vit embedding: " << vit_end - vit_start << " ms" << std::endl; + + // free prefill embedding tensor, approximately free 1GB for 59ms + auto begin_free = mllm_time_ms(); + auto &embedding_act = prefill_embedding.activation_tensors; + // go through the activation tensors to get the merged_embd + for (auto iter = embedding_act.begin(); iter != embedding_act.end(); ++iter) { + // std::cout << iter->first << std::endl; + if (iter->first.find("input") != std::string::npos || iter->first.find("index_put") != std::string::npos) { + continue; + } + iter->second->free(); + } + auto end_free = mllm_time_ms(); + std::cout << "free time: " << end_free - begin_free << " ms" << std::endl; + + // 2. QNN LLM Prefill + unsigned int out_token = 0; + auto start_time = mllm_time_ms(); + for (auto i = 0; i < num_iter; ++i) { + // copy the data from merged_embd[0] to merged_embd_warmup_tensor + auto source = merged_embd[0].ptrAt(0, 0, chunk_size * i, 0); + auto dest = prefill_input[0].hostPtr(); + if (i == 0) { + memcpy(dest, source, prefill_input[0].cntSize()); + } + { + memcpy(dest, source, (merged_embd[0].sequence() % chunk_size) * merged_embd[0].dimension() * sizeof(float)); + } + + auto result = prefill_body(prefill_input); + + if (i == 0) { // turn off switching to avoid RoPE h_cnt_ reset to curSequenceLength in next chunk + static_cast(Backend::global_backends[MLLM_CPU].get())->toggleSwitching(); + } + + if (i == 1) { + auto end_time = mllm_time_ms(); + std::cout << "Prefill:" << end_time - start_time << " ms" << std::endl; + + auto outputs = processor.detokenize(result[0], real_seq_length % chunk_size); + auto out_string = outputs.first; + out_token = outputs.second; + auto [not_end, output_string] = processor.tokenizer->postprocess(out_string); + std::cout << output_string << std::flush; + } + } + + chatPostProcessing(out_token, input_tensors[0], {&input_tensors[1], &input_tensors[2]}); + + static_cast(Backend::global_backends[MLLM_CPU].get())->setCurSequenceLength(real_seq_length); + static_cast(Backend::global_backends[MLLM_CPU].get())->setExecutionType(AUTOREGRESSIVE); + static_cast(Backend::global_backends[MLLM_CPU].get())->toggleSwitching(); + + // 3. CPU LLM Decoding + for (auto &t : input_tensors) { // set to INPUT_TENSOR to let decoding module update act + t.setTtype(INPUT_TENSOR); + } + + const int last_position_id = input_tensors[3].dataAt(0, 0, 0, real_seq_length - 1); + for (int step = 0; step < 100; step++) { + // use the last position id(no padding position) in decoding + prefill_embedding.get_position_ids(input_tensors, 0, last_position_id + 1 + step); + + auto result = decoding_model(input_tensors); + auto outputs = processor.detokenize(result[0]); + auto out_string = outputs.first; + auto out_token = outputs.second; + auto [not_end, output_string] = processor.tokenizer->postprocess(out_string); + if (!not_end) { break; } + std::cout << output_string << std::flush; + chatPostProcessing(out_token, input_tensors[0], {&input_tensors[1], &input_tensors[2]}); + + if (step == 0) static_cast(Backend::global_backends[MLLM_CPU].get())->toggleSwitching(); + } + + std::cout << std::endl; + + if (!std::filesystem::exists("qnn_context.bin")) { + static_cast(Backend::global_backends[MLLM_QNN].get())->saveQNNContext(); + } + + return 0; +} \ No newline at end of file diff --git a/examples/demo_showui_vtp.cpp b/examples/demo_showui_vtp.cpp new file mode 100644 index 000000000..940719300 --- /dev/null +++ b/examples/demo_showui_vtp.cpp @@ -0,0 +1,65 @@ +#include +#include +#include "cmdline.h" +#include "models/qwen2_vl/configuration_qwen2_vl.hpp" +#include "models/qwen2_vl/vtp/modeling_qwen2_vl.hpp" +#include "models/qwen2_vl/vtp/processing_qwen2_vl.hpp" +#include "processor/PostProcess.hpp" + +using namespace mllm; +int main(int argc, char **argv) { + cmdline::parser cmdParser; + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/showui_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/showui_merges.txt"); + cmdParser.add("model", 'm', "specify mllm model path", false, "../models/showui-2b-q4_k.mllm"); + cmdParser.add("limits", 'l', "max KV cache size", false, 2000); + cmdParser.add("thread", 't', "num of threads", false, 4); + cmdParser.add("premerge", 'p', "enable pre-ViT image token merging"); + cmdParser.parse_check(argc, argv); + + string vocab_path = cmdParser.get("vocab"); + string merge_path = cmdParser.get("merge"); + string model_path = cmdParser.get("model"); + int tokens_limit = cmdParser.get("limits"); + int thread_num = cmdParser.get("thread"); + CPUBackend::cpu_threads = cmdParser.get("thread"); + use_pre_vit_merge = cmdParser.exist("premerge"); + + ParamLoader param_loader(model_path); + int min_pixels = 256 * 28 * 28; + int max_pixels = 1344 * 28 * 28; + auto processor = Qwen2VLProcessor(vocab_path, merge_path, min_pixels, max_pixels); + Qwen2VLConfig config(tokens_limit, "1.5b"); + auto model = Qwen2VLModel(config); + model.load(model_path); + + vector in_imgs = { + "../assets/uidemo2.png"}; + vector in_strs = { + "Based on the screenshot of the page, I give a text description and you give its corresponding location. The coordinate represents a clickable location [x, y] for an element, which is a relative coordinate on the screenshot, scaled from 0 to 1.<|vision_start|><|image_pad|><|vision_end|>桌面", + }; + + for (int i = 0; i < in_strs.size(); ++i) { + auto in_str = in_strs[i]; + in_str = processor.tokenizer->apply_chat_template(in_str); + auto input_tensor = processor.process(in_str, in_imgs[i]); + std::cout << "[Q] " << in_strs[i] << std::endl; + std::cout << "[A] " << std::flush; + for (int step = 0; step < 100; step++) { + model.get_position_ids(input_tensor); + auto result = model(input_tensor); + auto outputs = processor.detokenize(result[0]); + auto out_string = outputs.first; + auto out_token = outputs.second; + auto [not_end, output_string] = processor.tokenizer->postprocess(out_string); + if (!not_end) { break; } + std::cout << output_string << std::flush; + chatPostProcessing(out_token, input_tensor[0], {&input_tensor[1], &input_tensor[2]}); + } + printf("\n"); + model.clear_kvcache(); + model.profiling(); + } + + return 0; +} \ No newline at end of file diff --git a/examples/demo_smallthinker.cpp b/examples/demo_smallthinker.cpp new file mode 100644 index 000000000..9412e6e7c --- /dev/null +++ b/examples/demo_smallthinker.cpp @@ -0,0 +1,99 @@ +/** + * @file demo_smallthinker.cpp + * @brief A demo for using smallthinker model. + * @author Rongjie Yi + * @date 2025-08-06 + * + */ +#include "Types.hpp" +#include "cmdline.h" +#include "models/smallthinker/configuration_smallthinker.hpp" +#include "models/smallthinker/modeling_smallthinker.hpp" +#include "models/qwen/tokenization_qwen.hpp" + +using namespace mllm; + +int main(int argc, char **argv) { + std::iostream::sync_with_stdio(false); + + cmdline::parser cmdParser; + cmdParser.add("device", 'd', "mllm backend [0:`cpu` | 1:`opencl`]", false, 0); + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/smallthinker_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/smallthinker_merges.txt"); + string default_model_path = "../models/smallthinker-4ba0.6b-instruct-q4_0.mllm"; +#if defined(ARM) + default_model_path = "../models/smallthinker-4ba0.6b-instruct-kai_q4_0.mllm"; +#endif + cmdParser.add("model", 'm', "specify mllm model path", false, default_model_path); + cmdParser.add("limits", 'l', "max KV cache size", false, 500); + cmdParser.add("thread", 't', "num of threads", false, 4); + cmdParser.parse_check(argc, argv); + string vocab_path = cmdParser.get("vocab"); + string merge_path = cmdParser.get("merge"); + string model_path = cmdParser.get("model"); + int tokens_limit = cmdParser.get("limits"); + CPUBackend::cpu_threads = cmdParser.get("thread"); + BackendType device = (BackendType)cmdParser.get("device"); + assert((device == MLLM_CPU || device == MLLM_OPENCL) && "device not supports!"); + + auto tokenizer = QWenTokenizer(vocab_path, merge_path); + string chat_template_pre = "<|im_start|>system\nYou are SmallThinker. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n"; + string chat_template_end = "<|im_end|>\n<|im_start|>assistant\n"; + tokenizer.set_chat_template(chat_template_pre, chat_template_end); + SmallThinkerConfig config(tokens_limit, "4ba0.6b-lm"); +#ifdef USE_OPENCL + if (device == MLLM_OPENCL) { + config.dtype = MLLM_TYPE_F16; + config.attn_implementation = "eager"; + model_path = default_model_path; + } +#endif + // config.attn_implementation = "eager"; + // config.attn_implementation = "sage_attention"; + auto model = SmallThinkerForCausalLM(config); +#ifdef USE_OPENCL + model = model.to(device); +#endif + model.load(model_path); + + vector in_strs = { + "Give me a short introduction to large language model.", + "怎样计算1+2+...+100的和?", + "Who are you?", + "背诵天下第一骈文", + "你写一首七言绝句。", + "背诵一下水调歌头。", + "清晨的阳光透过薄纱窗帘,懒洋洋地洒在木地板上,空气中飘散着咖啡豆研磨后特有的醇厚香气。窗外传来几声清脆的鸟鸣,伴随着远处隐约的车流声,构成这座都市尚未完全苏醒的独特交响。书桌上摊开着昨夜未读完的书,书页边缘已微微卷起。厨房里,水壶正发出细密的声响,预示着一天的热饮即将就绪。昨日的计划表贴在冰箱门上,几个重要的待办事项用红笔醒目地圈出。公园里晨练的人们身影绰绰,有节奏的脚步声和太极音乐交织。一只橘猫敏捷地跃上围墙,在晨光中伸展着腰肢,神态悠闲得仿佛它是这片领地的主人。街角的面包店刚拉开铁门,新鲜出炉的面包香气迫不及待地涌向街头。公交站台上,等待的乘客低头刷着手机屏幕,神情各异。云朵缓慢地在湛蓝的天空中移动,时间似乎被拉长了片刻。生活就在这些微小的、平凡的细节里徐徐展开,既不惊天动地,却也充满细碎的温暖和实在的步履。新的一天开始了。\n​​请在以上文本中找出描述“气味”的句子(复制出来),然后判断叙述者对“橘猫”的态度是正面还是负面,最后请用三个成语概括文中描绘的早晨氛围。", + "项羽已杀卿子冠军,威震楚国,名闻诸侯。乃遣当阳君、蒲将军将卒二万渡河,救巨鹿。战少利,陈馀复请兵。项羽乃悉引兵渡河,皆沉船,破釜甑,烧庐舍,持三日粮,以示士卒必死,无一还心。于是至则围王离,与秦军遇,九战,绝其甬道,大破之,杀苏角,虏王离。涉间不降楚,自烧杀。当是时,楚兵冠诸侯。诸侯军救巨鹿下者十余壁,莫敢纵兵。及楚击秦,诸将皆从壁上观。楚战士无不一以当十,楚兵呼声动天,诸侯军无不人人惴恐。于是已破秦军,项羽召见诸侯将,入辕门,无不膝行而前,莫敢仰视。项羽由是始为诸侯上将军,诸侯皆属焉。 问题:结合项羽在巨鹿之战中的战术决策与心理威慑手段,分析其如何实现『楚战士无不一以当十』的战斗效应,并论述这种军事心理学实践对诸侯将领『膝行而前,莫敢仰视』行为模式的生成机制。", + }; + for (int i = 0; i < in_strs.size(); ++i) { + // auto input_str = in_strs[i]; + auto input_str = tokenizer.apply_chat_template(in_strs[i]); + auto input_tensor = tokenizer.tokenize(input_str); + std::cout << "[Q] " << in_strs[i] << std::endl; + std::cout << "[A] " << std::flush; + + LlmTextGeneratorOpts opt{ + .max_new_tokens = static_cast(tokens_limit - input_tensor.sequence()), + .do_sample = false, + .temperature = 0.3F, + .top_k = 50, + .top_p = 0.F, + }; + model.generate(input_tensor, opt, [&](unsigned int out_token) -> bool { + auto out_string = tokenizer.detokenize({out_token}); + auto [not_end, output_string] = tokenizer.postprocess(out_string); + if (!not_end) { return false; } + std::cout << output_string << std::flush; + return true; + }); + /* + auto output_tokens = model.generate(input_tensor, opt, tokenizer.eos_id_)[0]; + auto out_string = tokenizer.detokenize(output_tokens); + std::cout << out_string << std::endl; + */ + std::cout << "\n"; + model.clear_kvcache(); + model.profiling(); + } +} diff --git a/examples/demo_smallthinker_mbp.cpp b/examples/demo_smallthinker_mbp.cpp new file mode 100644 index 000000000..df6fc17cd --- /dev/null +++ b/examples/demo_smallthinker_mbp.cpp @@ -0,0 +1,102 @@ +/** + * @file demo_smallthinker.cpp + * @brief A demo for using smallthinker model. + * @author Rongjie Yi + * @date 2025-08-06 + * + */ +#include "Types.hpp" +#include "cmdline.h" +#include "models/smallthinker/configuration_smallthinker.hpp" +// #include "models/smallthinker/modeling_smallthinker.hpp" +#include "models/smallthinker/mbp/modeling_smallthinker_mbp.hpp" +#include "models/qwen/tokenization_qwen.hpp" + +using namespace mllm; + +int main(int argc, char **argv) { + std::iostream::sync_with_stdio(false); + Module::alloc_mmap = false; + + cmdline::parser cmdParser; + cmdParser.add("device", 'd', "mllm backend [0:`cpu` | 1:`opencl`]", false, 0); + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/smallthinker_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "../vocab/smallthinker_merges.txt"); + string default_model_path = "../models/smallthinker-4ba0.6b-instruct-q4_0.mllm"; +#if defined(ARM) + default_model_path = "../models/smallthinker-4ba0.6b-instruct-kai_q4_0.mllm"; +#endif + cmdParser.add("model", 'm', "specify mllm model path", false, default_model_path); + cmdParser.add("limits", 'l', "max KV cache size", false, 500); + cmdParser.add("thread", 't', "num of threads", false, 4); + cmdParser.parse_check(argc, argv); + string vocab_path = cmdParser.get("vocab"); + string merge_path = cmdParser.get("merge"); + string model_path = cmdParser.get("model"); + int tokens_limit = cmdParser.get("limits"); + CPUBackend::cpu_threads = cmdParser.get("thread"); + BackendType device = (BackendType)cmdParser.get("device"); + assert((device == MLLM_CPU || device == MLLM_OPENCL) && "device not supports!"); + + auto tokenizer = QWenTokenizer(vocab_path, merge_path); + string chat_template_pre = "<|im_start|>system\nYou are SmallThinker. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n"; + string chat_template_end = "<|im_end|>\n<|im_start|>assistant\n"; + tokenizer.set_chat_template(chat_template_pre, chat_template_end); + SmallThinkerConfig config(tokens_limit, "4ba0.6b-lm"); +#ifdef USE_OPENCL + if (device == MLLM_OPENCL) { + config.dtype = MLLM_TYPE_F16; + config.attn_implementation = "eager"; + model_path = default_model_path; + } +#endif + // config.attn_implementation = "eager"; + // config.attn_implementation = "sage_attention"; + auto model = SmallThinkerForCausalLM(config); +#ifdef USE_OPENCL + model = model.to(device); +#endif + model.load(model_path); + + vector in_strs = { + "Give me a short introduction to large language model.", + "怎样计算1+2+...+100的和?", + "Who are you?", + "背诵天下第一骈文", + "你写一首七言绝句。", + "背诵一下水调歌头。", + "清晨的阳光透过薄纱窗帘,懒洋洋地洒在木地板上,空气中飘散着咖啡豆研磨后特有的醇厚香气。窗外传来几声清脆的鸟鸣,伴随着远处隐约的车流声,构成这座都市尚未完全苏醒的独特交响。书桌上摊开着昨夜未读完的书,书页边缘已微微卷起。厨房里,水壶正发出细密的声响,预示着一天的热饮即将就绪。昨日的计划表贴在冰箱门上,几个重要的待办事项用红笔醒目地圈出。公园里晨练的人们身影绰绰,有节奏的脚步声和太极音乐交织。一只橘猫敏捷地跃上围墙,在晨光中伸展着腰肢,神态悠闲得仿佛它是这片领地的主人。街角的面包店刚拉开铁门,新鲜出炉的面包香气迫不及待地涌向街头。公交站台上,等待的乘客低头刷着手机屏幕,神情各异。云朵缓慢地在湛蓝的天空中移动,时间似乎被拉长了片刻。生活就在这些微小的、平凡的细节里徐徐展开,既不惊天动地,却也充满细碎的温暖和实在的步履。新的一天开始了。\n​​请在以上文本中找出描述“气味”的句子(复制出来),然后判断叙述者对“橘猫”的态度是正面还是负面,最后请用三个成语概括文中描绘的早晨氛围。", + "项羽已杀卿子冠军,威震楚国,名闻诸侯。乃遣当阳君、蒲将军将卒二万渡河,救巨鹿。战少利,陈馀复请兵。项羽乃悉引兵渡河,皆沉船,破釜甑,烧庐舍,持三日粮,以示士卒必死,无一还心。于是至则围王离,与秦军遇,九战,绝其甬道,大破之,杀苏角,虏王离。涉间不降楚,自烧杀。当是时,楚兵冠诸侯。诸侯军救巨鹿下者十余壁,莫敢纵兵。及楚击秦,诸将皆从壁上观。楚战士无不一以当十,楚兵呼声动天,诸侯军无不人人惴恐。于是已破秦军,项羽召见诸侯将,入辕门,无不膝行而前,莫敢仰视。项羽由是始为诸侯上将军,诸侯皆属焉。 问题:结合项羽在巨鹿之战中的战术决策与心理威慑手段,分析其如何实现『楚战士无不一以当十』的战斗效应,并论述这种军事心理学实践对诸侯将领『膝行而前,莫敢仰视』行为模式的生成机制。", + }; + mbp_init(config.num_hidden_layers, config.num_experts); + for (int i = 0; i < in_strs.size(); ++i) { + auto input_str = tokenizer.apply_chat_template(in_strs[i]); + auto input_tensor = tokenizer.tokenize(input_str); + std::cout << "[Q] " << in_strs[i] << std::endl; + std::cout << "[A] " << std::flush; + + LlmTextGeneratorOpts opt{ + .max_new_tokens = static_cast(tokens_limit - input_tensor.sequence()), + .do_sample = false, + .temperature = 0.3F, + .top_k = 50, + .top_p = 0.F, + }; + model.generate(input_tensor, opt, [&](unsigned int out_token) -> bool { + auto out_string = tokenizer.detokenize({out_token}); + auto [not_end, output_string] = tokenizer.postprocess(out_string); + if (!not_end) { return false; } + std::cout << output_string << std::flush; + return true; + }); + /* + auto output_tokens = model.generate(input_tensor, opt, tokenizer.eos_id_)[0]; + auto out_string = tokenizer.detokenize(output_tokens); + std::cout << out_string << std::endl; + */ + std::cout << "\n"; + model.clear_kvcache(); + model.profiling(); + // prinMBPtimes("10_"); + } +} diff --git a/examples/demo_sparse_llama.cpp b/examples/demo_sparse_llama.cpp index 74b6a4045..7af99032b 100644 --- a/examples/demo_sparse_llama.cpp +++ b/examples/demo_sparse_llama.cpp @@ -30,9 +30,7 @@ int main(int argc, char **argv) { LLaMAConfig config(tokens_limit, "7B", HFHUBROPE); auto is_down_sparse = true; auto model = SparseLLaMAModel(config, is_down_sparse); - // MultiFileParamLoader param_loader({model_path, predictor_path, "../ReLULlama_q4_k.mllm"}); - MultiFileParamLoader param_loader({model_path, "../models/ReLULlama_q4_k.mllm"}); - model.load(param_loader); + model.load_multifile({model_path, "../models/ReLULlama_q4_k.mllm"}); vector in_strs = { " Hello, who are you?", diff --git a/examples/demo_tinyllama.cpp b/examples/demo_tinyllama.cpp index c2984685b..1af994819 100644 --- a/examples/demo_tinyllama.cpp +++ b/examples/demo_tinyllama.cpp @@ -29,6 +29,7 @@ int main(int argc, char **argv) { tokenizer.set_chat_template(system_prompt_start, system_prompt_end); TinyLLaMAConfig config(tokens_limit, "1.5B", HFHUBROPE); + // config.attn_implementation = "sage_attention"; auto model = TinyLLaMAModel(config); model.load(model_path); @@ -51,6 +52,7 @@ int main(int argc, char **argv) { } printf("\n"); model.profiling(); + model.clear_kvcache(); } return 0; diff --git a/examples/main_alpaca.cpp b/examples/main_alpaca.cpp deleted file mode 100644 index e598f8956..000000000 --- a/examples/main_alpaca.cpp +++ /dev/null @@ -1,163 +0,0 @@ -#include -#include -#include -#include "cmdline.h" -#include "Net.hpp" -#include "Executor.hpp" -#include "express/Express.hpp" -#include "tokenizers/BPE/Bpe.hpp" -using namespace mllm; - -unsigned int argmax(const std::vector& scores) { - if(scores.empty()) { - throw std::invalid_argument("Input vector is empty"); - } - unsigned int maxIndex = 0; - float maxValue = scores[0]; - for(size_t i = 1; i < scores.size(); ++i) { - if(scores[i] > maxValue) { - maxIndex = i; - maxValue = scores[i]; - } - } - return maxIndex; -} -unsigned int postProcessing(shared_ptr result, shared_ptr& out_result){ - assert(result->batch() == 1); - assert(result->head() == 1); - out_result->reshape(1, 1, 1, 1); - out_result->alloc(); - vector scores; - for (int i = 0; i < result->dimension(); ++i) { - auto value = result->dataAt(0, 0, result->sequence()-1, i); - scores.push_back(value); - } - auto token_idx = argmax(scores); - out_result->setDataAt(0, 0, 0, 0, token_idx); - return token_idx; -} - - -NetTensor *Attention( NetTensor * x, int embedding_size, int hidden_size, int head_size, int cache_max, string name){ - auto *q =_Linear({x}, embedding_size, hidden_size * head_size, false, name + ".q_proj"); - auto *k =_Linear({x}, embedding_size, hidden_size * head_size, false, name + ".k_proj"); - auto *v =_Linear({x}, embedding_size, hidden_size * head_size, false, name + ".v_proj"); - q = q->view(-1, head_size, -1, hidden_size); - k = k->view(-1, head_size, -1, hidden_size); - v = v->view(-1, head_size, -1, hidden_size); - q = _RoPE( {q}, HFHUBROPE, name + ".q_rope"); - k = _RoPE( {k}, HFHUBROPE, name + ".k_rope"); - k = _KVCache( {k}, cache_max, name + ".k_cache"); - v = _KVCache( {v}, cache_max, name + ".v_cache"); - auto *qk = _Matmul( {q, k}, false, true, name + ".qk"); - qk = *qk/std::sqrt(hidden_size); - // qk = _Causalmask( {qk}, name + ".mask"); - qk = _Softmax( {qk}, DIMENSION, true, name + ".softmax"); - auto *o = _Matmul( {qk, v}, false, false, name + ".qkv"); - o = o->view(-1, 1, -1, hidden_size * head_size); - o = _Linear( {o}, hidden_size * head_size, embedding_size, false, name + ".o_proj"); - return o; -} -NetTensor *FFN( NetTensor * i, int hidden_dim, int ffn_hidden_dim, string name){ - auto *x = _Linear( {i}, hidden_dim, ffn_hidden_dim, false, name+".gate_proj"); - x = _SiLU( {x}, name+".silu"); - auto *y = _Linear( {i}, hidden_dim, ffn_hidden_dim, false, name+".up_proj"); - x = *x*y;// x = _Mul( {x, y}, name+".dot"); - x = _Linear( {x}, ffn_hidden_dim, hidden_dim, false, name+".down_proj"); - return x; -} -void llama(Context* c, int vocab_size= 55296, int hidden_dim= 4096, int ffn_hidden_dim = 11008, int mutil_head_size = 32, int cache_max = 200){ - auto *i = _Input(c); - i = _Embedding( {i}, vocab_size, hidden_dim, "model.embed_tokens"); - // loop - for(int layer=0; layer<32; ++layer) { - auto *x = _RMSNorm( {i}, hidden_dim, 1e-6, "model.layers."+std::to_string(layer)+".input_layernorm"); - i = *Attention( x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, cache_max, "model.layers."+std::to_string(layer)+".self_attn") +i; - x = _RMSNorm( {i}, hidden_dim, 1e-6, "model.layers."+std::to_string(layer)+".post_attention_layernorm"); - i = *FFN( x, hidden_dim, ffn_hidden_dim, "model.layers."+std::to_string(layer) +".mlp") +i; - //_SubgraphBegin(c); - } - // end loop - i = _RMSNorm( {i}, hidden_dim, 1e-6, "model.norm"); - i = _Linear( {i}, hidden_dim, vocab_size, false, "lm_head"); -} -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/chinese-alpaca_vocab.mllm"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/chinese-alpaca-7b-q4_k.mllm"); - cmdParser.add("limits", 'l', "max KV cache size", false, 400); - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.parse_check(argc, argv); - - // string in_str = cmdParser.get("input"); - string vocab_path = cmdParser.get("vocab"); - string model_path = cmdParser.get("model"); - int token_limit = cmdParser.get("limits"); - int thread_num = cmdParser.get("thread"); - - auto tokenizer = BPETokenizer(vocab_path); - - int vocab_size = 55296; - int hidden_dim = 4096; - int ffn_hidden_dim = 11008; - int mutil_head_size = 32; - - std::unique_ptr c_ptr(new Context()); - auto *c = c_ptr.get(); - llama(c, vocab_size, hidden_dim, ffn_hidden_dim, mutil_head_size, token_limit); - - BackendConfig bn; - Net net(bn); - net.convert(c->sub_param_, BackendType::MLLM_CPU, thread_num); - - ParamLoader param_loader(model_path); - Executor ex(¶m_loader); - ex.setup(&net); - - - vector in_strs = { - " 介绍北京邮电大学。", - " 介绍北京。", - }; - shared_ptr input = std::make_shared(); - for (int str_i = 0; str_i < in_strs.size(); ++str_i) - { - auto in_str = in_strs[str_i]; - if(in_str[0] != ' '){ - in_str = ' '+ in_str; - } - auto tokens_id = vector(); - tokenizer.tokenize(in_str, tokens_id, true); - if(str_i > 0) { - tokens_id[0] = 13; - } - BPETokenizer::token2Tensor( &net, tokens_id, input); - std::cout <<"[Q] "<< in_str << std::endl; - std::cout <<"[A] "<< std::flush; - for(int step = 0; step<100; step++) { - ex.run(&net, {input}); - auto result = ex.result(); - auto token_idx = postProcessing(result[0], input); - if(token_idx == 2){// "" - break; - } - auto out_token = tokenizer.detokenize({token_idx}); - std::cout << out_token << std::flush; - } - printf("\n"); - } - - - ex.perf(); - - - - // free memory - for (auto *op : c->net_ops) { - delete op; - } - for (auto *tensor : c->net_tensors) { - delete tensor; - } - return 0; -} diff --git a/examples/main_clip.cpp b/examples/main_clip.cpp deleted file mode 100644 index c5f37becf..000000000 --- a/examples/main_clip.cpp +++ /dev/null @@ -1,201 +0,0 @@ -#include -#include -#include -#include "cmdline.h" -#include "Net.hpp" -#include "Executor.hpp" -#include "express/Express.hpp" -#include "tokenizers/BPE/Bpe.hpp" -#include "processor/ClipPreProcess.hpp" -#include -#include -#include - -using namespace mllm; - -vector softmax(const vector& scores) { - vector exps; - float max_val = *max_element(scores.begin(), scores.end()); - for (float score : scores) { - exps.push_back(exp(score - max_val)); - } - float sum_exps = accumulate(exps.begin(), exps.end(), 0.0f); - for (float& exp : exps) { - exp /= sum_exps; - } - return exps; -} -vector postProcessing(shared_ptr result){ - vector scores; - for (int i = 0; i < result->batch(); ++i) { - auto value = result->dataAt(i, 0, 0, 0); - scores.push_back(value); - } - auto token_idx = softmax(scores); - return token_idx; -} - -NetTensor *Attention(NetTensor *x, int embedding_size, int hidden_size, int head_size, string name) { - auto *q = _Linear( {x}, embedding_size, hidden_size * head_size, true, name + ".q_proj"); - auto *k = _Linear( {x}, embedding_size, hidden_size * head_size, true, name + ".k_proj"); - auto *v = _Linear( {x}, embedding_size, hidden_size * head_size, true, name + ".v_proj"); - q = q->view(-1, head_size, -1, hidden_size); - k = k->view(-1, head_size, -1, hidden_size); - v = v->view(-1, head_size, -1, hidden_size); - auto *qk = _Matmul( {q, k}, false, true, name + ".qk"); - qk = _Scale( {qk}, 1.0F / std::sqrt(hidden_size), 0.0F, false, name + ".scale"); - if(name.find("text_model") != std::string::npos){ - // qk = _Causalmask( {qk}, name + ".mask"); - qk = _Softmax( {qk}, DIMENSION, true, name + ".softmax"); - } else{ - qk = _Softmax( {qk}, DIMENSION, false, name + ".softmax"); - } - auto *o = _Matmul( {qk, v}, false, false, name + ".qkv"); - o = o->view(-1, 1, -1, hidden_size * head_size); - o = _Linear( {o}, hidden_size * head_size, embedding_size, true, name + ".out_proj"); - return o; -} -NetTensor *MLP( NetTensor *i, int hidden_dim, int ffn_hidden_dim, string name) { - auto *x = _Linear( {i}, hidden_dim, ffn_hidden_dim, true, name + ".fc1"); - x = _QuickGELU( {x}, name + ".act_fn"); - x = _Linear( {x}, ffn_hidden_dim, hidden_dim, true, name + ".fc2"); - return x; -} -NetTensor *VisionEmbedding(Context *c, NetTensor * i, int hidden_size, string name) { - i = _Convolution2D({i}, 3, 768, {32, 32}, {32, 32}, VALID, false, name +".patch_embedding"); - i = i->transpose(SEQUENCE, DIMENSION); - i = i->flatten(HEAD, SEQUENCE); - auto *s = _Parameter(c, {}, 1, 1, 1, 768, name +".class_embedding"); - i = _Cat( {s, i}, SEQUENCE, name +".class_embedding.cat"); - s = _Parameter(c, {}, 1, 50, 1, 1, name +".position_ids"); - i = *_Embedding( {s}, 50, 768, name +".position_embedding") + i; - return i; -} -NetTensor *TextEmbedding(Context *c, NetTensor * i, int vocab_size, int hidden_dim, int max_position_embeddings, string name) { - i = _Embedding( {i}, vocab_size, hidden_dim, name +".token_embedding"); - auto *s = _Parameter(c, {}, 1, max_position_embeddings, 1, 1, name +".position_ids"); - s = s->_clip({}, {}, {0, i->shape(SEQUENCE)}, {}); - i = *_Embedding( {s}, max_position_embeddings, hidden_dim, name +".position_embedding") +i; - return i; -} -NetTensor *transformer(Context *c, NetTensor * i, int vocab_size = 49408, int hidden_dim = 512, int ffn_hidden_dim = 2048, int mutil_head_size = 8, string name="text_model") { - // auto *i = _Input(c); - i = TextEmbedding(c, i, vocab_size, hidden_dim, 77, name+".embeddings"); - // loop - for (int layer = 0; layer < 12; ++layer) { - auto *x = _LayerNorm( {i}, hidden_dim, true, 1e-6, name+".encoder.layers." + std::to_string(layer) + ".layer_norm1"); - i = *Attention( x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, name+".encoder.layers." + std::to_string(layer) + ".self_attn")+i; - x = _LayerNorm( {i}, hidden_dim, true, 1e-6, name+".encoder.layers." + std::to_string(layer) + ".layer_norm2"); - i = *MLP( x, hidden_dim, ffn_hidden_dim, name+".encoder.layers." + std::to_string(layer) + ".mlp") +i; - //_SubgraphBegin(c); - } - // end loop - i = _LayerNorm( {i}, hidden_dim,true, 1e-6, name + ".final_layer_norm"); - i = i->clip( {}, {}, {-1}, {}); - return i; -} -NetTensor *vit(Context* c, NetTensor * i, int hidden_dim= 768, int ffn_hidden_dim = 3072, int class_size=1000, int mutil_head_size = 12, string name = "vision_model"){ - // auto *i = _Input(c, {}, "input_ids"); - i = VisionEmbedding(c, i, hidden_dim, name+".embeddings"); - i = _LayerNorm( {i}, hidden_dim, true,1e-6, name + ".pre_layrnorm"); - for(int layer=0; layer<12; ++layer) { - auto *x = _LayerNorm( {i}, hidden_dim, true,1e-6, name + ".encoder.layers."+std::to_string(layer)+".layer_norm1"); - i = *Attention( x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, name + ".encoder.layers."+std::to_string(layer)+".self_attn")+i; - x = _LayerNorm( {i}, hidden_dim, true, 1e-6, name + ".encoder.layers."+std::to_string(layer)+".layer_norm2"); - i = *MLP( x, hidden_dim, ffn_hidden_dim, name + ".encoder.layers."+std::to_string(layer)+ ".mlp") +i; - _SubgraphBegin(c); - } - i = i->clip( {}, {}, {0}, {}); - i = _LayerNorm( {i}, hidden_dim, true, 1e-6, name + ".post_layernorm"); - return i; -} - -void CLIP(Context* c) { - auto *i = _Input(c, {}, "input_ids"); - i = transformer(c, i); - auto *p = _Input(c, {}, "input_imgs"); - p = vit(c, p); - i = _Linear( {i}, 512, 512, false, "text_projection"); - i = *i/i->norm(2); - p = _Linear( {p}, 768, 512, false, "visual_projection"); - p = *p/p->norm(2); - auto *o = _Matmul( {i, p}, false, true, "matmul"); - o = _Scale( {o}, 100.0, 0.0F, false, "scale"); -} -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/clip_vocab.mllm"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/clip-vit-base-patch32-q4_k.mllm"); - cmdParser.add("merges", 'f', "specify mllm tokenizer merges.txt path", false, "../vocab/clip_merges.txt"); - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.parse_check(argc, argv); - - string vocab_path = cmdParser.get("vocab"); - string model_path = cmdParser.get("model"); - string merges_path = cmdParser.get("merges"); - int thread_num = cmdParser.get("thread"); - - std::unique_ptr c_ptr(new Context()); - auto *c = c_ptr.get(); - - CLIP(c); - - BackendConfig bn; - Net net(bn); - net.convert(c->sub_param_, BackendType::MLLM_CPU, thread_num); - - ParamLoader param_loader(model_path); - Executor ex(¶m_loader); - ex.setup(&net); - - auto tokenizer = new BPETokenizer(vocab_path); - std::unordered_map merge_rank; - auto merge_file = std::ifstream(merges_path); - std::string line; - unsigned rank=0; - while (std::getline(merge_file, line)) { - if (line.empty()) { - continue; - } - if (line[0]=='#'){ - continue; - } - merge_rank[line]=rank; - rank++; - } - tokenizer->setMergeRank(merge_rank); - tokenizer->setSpecialToken("<|startoftext|>","<|endoftext|>"); - - vector in_strs = {"a photo of a cat", "a photo of a dog"}; - auto tokens_ids = vector>(); - for (auto in_str : in_strs) { - vector tokens_id={}; - tokenizer->tokenize(in_str, tokens_id, true, true, ""); - tokens_ids.push_back(tokens_id); - } - shared_ptr input_text = std::make_shared(); - BPETokenizer::tokens2Tensor(&net, tokens_ids, input_text); - - shared_ptr input_img = std::make_shared(); - auto *clip_processor = new ClipPreProcessor(tokenizer); - clip_processor->PreProcessImages({"../assets/cat.jpg"}); - auto images = clip_processor->pixel_values_[0]; - clip_processor->Img2Tensor(net.backends()[BackendType::MLLM_CPU].get(), input_img, images); - ex.run(&net, {input_text, input_img}); - auto result = ex.result(); - auto probs = postProcessing(result[0]); - for (auto prob : probs) { - std::cout << prob << " "; - } - std::cout << std::endl; - // ex.perf(); - - // free memory - for (auto *op : c->net_ops) { - delete op; - } - for (auto *tensor : c->net_tensors) { - delete tensor; - } - return 0; -} diff --git a/examples/main_fuyu.cpp b/examples/main_fuyu.cpp deleted file mode 100644 index 762e46a1b..000000000 --- a/examples/main_fuyu.cpp +++ /dev/null @@ -1,235 +0,0 @@ -#include -#include -#include -#include -#include "cmdline.h" -#include "Net.hpp" -#include "Executor.hpp" -#include "express/Express.hpp" -#include "tokenizers/BPE/Bpe.hpp" -#include "tokenizers/Unigram/Unigram.hpp" -#include "processor/FuyuPreProcess.hpp" - -using namespace std; - -void fullTensor(shared_ptr input_tensor, Net &net, vector shape) { - input_tensor->setBackend(net.backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(shape[0], shape[1], shape[2], shape[3]); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - input_tensor->fullData(1); -} - -void patches2Tensor(shared_ptr input_tensor, Net &net, vector>> image_patches) { - if (image_patches.empty()) { - fullTensor(input_tensor, net, {0, 0, 0, 0}); - return; - } - const int batch = image_patches.size(); - const int seq = image_patches[0].size(); - const int dims = image_patches[0][0].size(); - input_tensor->setBackend(net.backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(batch, 1, seq, dims); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - for (int i = 0; i < batch; ++i) { - for (int j = 0; j < seq; ++j) { - for (int k = 0; k < dims; ++k) { - input_tensor->setDataAt(i, 0, j, k, image_patches[i][j][k]); - } - } - } -} - -void patchIdx2Tensor(shared_ptr input_tensor, Net &net, vector> image_patches_indices) { - if (image_patches_indices.empty()) { - fullTensor(input_tensor, net, {0, 0, 0, 0}); - return; - } - const int batch = image_patches_indices.size(); - const int seq = image_patches_indices[0].size(); - input_tensor->setBackend(net.backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(batch, 1, seq, 1); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - for (int i = 0; i < batch; ++i) { - for (int j = 0; j < seq; ++j) { - input_tensor->setDataAt(i, 0, j, 0, image_patches_indices[i][j]); - } - } -} - -unsigned int argmax(const std::vector &scores) { - if (scores.empty()) { - throw std::invalid_argument("Input vector is empty"); - } - unsigned int maxIndex = 0; - float maxValue = scores[0]; - for (size_t i = 1; i < scores.size(); ++i) { - if (scores[i] > maxValue) { - maxIndex = i; - maxValue = scores[i]; - } - } - return maxIndex; -} -unsigned int postProcessing(shared_ptr result, shared_ptr &out_result) { - assert(result->batch() == 1); - assert(result->head() == 1); - out_result->reshape(1, 1, 1, 1); - out_result->alloc(); - vector scores; - for (int i = 0; i < result->dimension(); ++i) { - auto value = result->dataAt(0, 0, result->sequence() - 1, i); - scores.push_back(value); - } - auto token_idx = argmax(scores); - out_result->setDataAt(0, 0, 0, 0, token_idx); - return token_idx; -} - -NetTensor *Attention(NetTensor *x, int embedding_size, int hidden_size, int head_size, int cache_max, string name) { - x = _Linear({x}, embedding_size, hidden_size * head_size * 3, true, name + ".query_key_value"); - auto skv = _Split({x}, 3, Chl::D_HD, head_size, name + ".split"); - auto *q = skv[0]; - auto *k = skv[1]; - auto *v = skv[2]; - q = _LayerNorm({q}, hidden_size, true, 1e-6, name + ".q_layernorm"); - k = _LayerNorm({k}, hidden_size, true, 1e-6, name + ".k_layernorm"); - q = _RoPE({q}, PERSIMMONROPE, name + ".q_rope"); - k = _RoPE({k}, PERSIMMONROPE, name + ".k_rope"); - k = _KVCache({k}, cache_max, name + ".k_cache"); - v = _KVCache({v}, cache_max, name + ".v_cache"); - auto *qk = _Matmul({q, k}, false, true, name + ".qk"); - qk = _Scale({qk}, 1.0F / std::sqrt(head_size), 0.0F, false, name + ".scale"); - // qk = _Causalmask({qk}, name + ".mask"); - qk = _Softmax({qk}, DIMENSION, true, name + ".softmax"); - auto *o = _Matmul({qk, v}, false, false, name + ".qkv"); - o = o->view(-1, 1, -1, hidden_size * head_size); - o = _Linear({o}, hidden_size * head_size, embedding_size, true, name + ".dense"); - return o; -} -NetTensor *MLP(NetTensor *i, int hidden_dim, int ffn_hidden_dim, string name) { - auto *x = _Linear({i}, hidden_dim, ffn_hidden_dim, true, name + ".dense_h_to_4h"); - x = _ReLUSquaredActivation({x}, name + ".relu2"); - x = _Linear({x}, ffn_hidden_dim, hidden_dim, true, name + ".dense_4h_to_h"); - return x; -} -NetTensor *Persimmon(Context *c, NetTensor *i, int hidden_dim = 4096, int ffn_hidden_dim = 4096 * 4, int mutil_head_size = 64, int cache_max = 500, string name = "language_model.model") { - // loop - for (int layer = 0; layer < 36; ++layer) { - auto *x = _LayerNorm({i}, hidden_dim, true, 1e-6, name + (string) ".layers." + std::to_string(layer) + ".input_layernorm"); - x = Attention(x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, cache_max, name + (string) ".layers." + std::to_string(layer) + ".self_attn"); - i = _Add({x, i}, name + (string) ".layers." + std::to_string(layer) + ".add_attn"); - x = _LayerNorm({i}, hidden_dim, true, 1e-6, name + (string) ".layers." + std::to_string(layer) + ".post_attention_layernorm"); - x = MLP(x, hidden_dim, ffn_hidden_dim, name + (string) ".layers." + std::to_string(layer) + ".mlp"); - i = _Add({x, i}, name + (string) ".layers." + std::to_string(layer) + ".add_mlp"); - _SubgraphBegin(c); - } - // end loop - i = _LayerNorm({i}, hidden_dim, true, 1e-6, name + (string) ".final_layernorm"); - return i; -} -void Fuyu(Context *c, int vocab_size = 262144, int patch_size = 30, int cnl_size = 3, int hidden_dim = 4096, int ffn_hidden_dim = 4096 * 4, int mutil_head_size = 32, int cache_max = 500) { - auto *i = _Input(c, {}, "input_ids"); - i = _Embedding({i}, vocab_size, hidden_dim, (string) "language_model.model.embed_tokens"); - auto *p = _Input(c, {}, "image_patches"); - p = _Linear({p}, patch_size * patch_size * cnl_size, hidden_dim, true, "vision_embed_tokens"); - auto *id = _Input(c, {}, "image_patches_indices"); - i = _Gather({i, p, id}, "gather"); - i = Persimmon(c, i, hidden_dim, ffn_hidden_dim, mutil_head_size, cache_max, "language_model.model"); - i = _Linear({i}, hidden_dim, vocab_size, false, "language_model.lm_head"); -} -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/fuyu_vocab.mllm"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/fuyu-8b-q4_k.mllm"); - cmdParser.add("limits", 'l', "max KV cache size", false, 500); - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.parse_check(argc, argv); - - string vocab_path = cmdParser.get("vocab"); - string model_path = cmdParser.get("model"); - int tokens_limit = cmdParser.get("limits"); - int thread_num = cmdParser.get("thread"); - - auto tokenizer = UnigramTokenizer(vocab_path); - - int vocab_size = 262144; - int hidden_dim = 4096; - int ffn_hidden_dim = 4096 * 4; - int mutil_head_size = 64; - int patch_size = 30; - - std::unique_ptr c_ptr(new Context()); - auto *c = c_ptr.get(); - Fuyu(c, vocab_size, patch_size, 3, hidden_dim, ffn_hidden_dim, mutil_head_size, tokens_limit); - - BackendConfig bn; - Net net(bn); - net.convert(c->sub_param_, BackendType::MLLM_CPU, thread_num); - ParamLoader param_loader(model_path); - Executor ex(¶m_loader); - ex.setup(&net); - - std::vector> in_imgs = { - {"../assets/bus.png"}, - {"../assets/two_cats.jpg"}}; - vector in_strs = { - "Generate a coco-style caption.\n", - "What's this?\n"}; - shared_ptr input_seq = std::make_shared(); - shared_ptr img_patch = std::make_shared(); - shared_ptr img_patch_id = std::make_shared(); - for (int inId = 0; inId < in_strs.size(); ++inId) { - auto in_str = in_strs[inId]; - auto in_img = in_imgs[inId]; - auto preprocessor = FuyuPreProcess(&tokenizer); - preprocessor.images_.clear(); - preprocessor.image_input_ids_.clear(); - preprocessor.image_patches_indices_.clear(); - preprocessor.image_patches_.clear(); - preprocessor.PreProcessImages(in_img); - preprocessor.Process(in_str); - auto input_ids = preprocessor.image_input_ids_; - auto image_patches_indices = preprocessor.image_patches_indices_; - auto image_patches = preprocessor.image_patches_; - if (input_ids.empty()) { - input_ids = preprocessor.text_ids_; - } - UnigramTokenizer::token2Tensor(&net, input_ids[0], input_seq); - patches2Tensor(img_patch, net, image_patches); - patchIdx2Tensor(img_patch_id, net, image_patches_indices); - std::cout << "[Q] ["; - if (!in_img.empty()) { - std::cout << in_img[0]; - } - std::cout << "]" << in_str << std::endl; - std::cout << "[A] " << std::flush; - for (int step = 0; step < 50; step++) { - ex.run(&net, {input_seq, img_patch, img_patch_id}); - auto result = ex.result(); - auto token_idx = postProcessing(result[0], input_seq); - // std::cout << token_idx << std::endl; - if (token_idx == 71013) { - break; - } - fullTensor(img_patch, net, {0, 0, 0, 0}); - fullTensor(img_patch_id, net, {0, 0, 0, 0}); - auto out_token = tokenizer.detokenize({token_idx}); - std::cout << out_token << std::flush; - } - printf("\n"); - } - - ex.perf(); - - // free memory - for (auto *op : c->net_ops) { - delete op; - } - for (auto *tensor : c->net_tensors) { - delete tensor; - } - return 0; -} \ No newline at end of file diff --git a/examples/main_imagebind.cpp b/examples/main_imagebind.cpp deleted file mode 100644 index cde79f4a7..000000000 --- a/examples/main_imagebind.cpp +++ /dev/null @@ -1,322 +0,0 @@ -#include -#include -#include -#include "cmdline.h" -#include "Net.hpp" -#include "Executor.hpp" -#include "express/Express.hpp" -#include "tokenizers/BPE/Bpe.hpp" -#include "processor/ClipPreProcess.hpp" -#include -#include -#include - -using namespace mllm; - -std::string toLowercase(const std::string& input) { - std::string output = input; - std::transform(output.begin(), output.end(), output.begin(), - [](unsigned char c){ return std::tolower(c); }); - return output; -} - -void tokens2Tensor(Net *net, vector> tokens, shared_ptr input_tensor, shared_ptr input_text_lens) { - input_tensor->setBackend(net->backends()[BackendType::MLLM_CPU].get()); - const auto bsize = static_cast(tokens.size()); - input_tensor->reshape(bsize, 1, 77, 1); - input_tensor->alloc(); - - input_text_lens->setBackend(net->backends()[BackendType::MLLM_CPU].get()); - input_text_lens->reshape(1, 1, 1, bsize); - input_text_lens->alloc(); - - for (int b = 0; b < bsize; ++b){ - input_text_lens->setDataAt(0, 0, 0, b, tokens[b].size()-1); - for (int idx = 0; idx < 77; ++idx) { - if(idx < tokens[b].size()) { - input_tensor->setDataAt(b, 0, idx, 0, tokens[b][idx]); - }else { - input_tensor->setDataAt(b, 0, idx, 0, 0); - } - } - } -} - -void img2Tensor(shared_ptr input_tensor, Net &net, vector>>> imgs) { - int channel = imgs[0].size(); - int height = imgs[0][0].size(); - int width= imgs[0][0][0].size(); - input_tensor->setBackend(net.backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(imgs.size(), channel, 2, height, width); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - for (int bi = 0; bi < imgs.size(); ++bi) { - for (int t = 0; t < 2; ++t) { - for (int h = 0; h < height; ++h) { - for (int c = 0; c < channel; ++c) { - for (int w = 0; w < width; ++w) { - input_tensor->setDataAt(bi, c, t, h, w, imgs[bi][c][h][w]); - } - } - } - } - } -} -void audio2Tensor(shared_ptr input_tensor, Net &net, vector>>> audio) { - vector>> audio_new; - for (auto auv : audio) { - for (auto au : auv) { - audio_new.push_back(au); - } - } - int batch = audio_new.size(); - int channel = 1; - int height = audio_new[0].size(); - int width= audio_new[0][0].size(); - input_tensor->setBackend(net.backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(batch, height, channel, width); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - for (int bi = 0; bi < audio_new.size(); ++bi) { - for (int h = 0; h < height; ++h) { - for (int w = 0; w < width; ++w) { - input_tensor->setDataAt(bi, h, 0, w, audio_new[bi][h][w]); - } - } - } -} - -void showResult(shared_ptr tensor){ - std::cout<<"vision X text :"<sequence(); ++s) { - for (int d = 0; d < tensor->dimension(); ++d) { - std::cout<dataAt(0, 0, s, d)<<" "; - } - std::cout<sequence(); ++s) { - for (int d = 0; d < tensor->dimension(); ++d) { - std::cout<dataAt(1, 0, s, d)<<" "; - } - std::cout<view(-1, 1, -1, hidden_size * head_size); - o = _Linear( {o}, hidden_size * head_size, embedding_size, true, name + ".out_proj"); - return o; -} -NetTensor *MLP( NetTensor *i, int hidden_dim, int ffn_hidden_dim, string name) { - auto *x = _Linear( {i}, hidden_dim, ffn_hidden_dim, true, name + ".fc1"); - x = _GELU( {x}, name + ".act_fn"); - x = _Linear( {x}, ffn_hidden_dim, hidden_dim, true, name + ".fc2"); - return x; -} -NetTensor *VisionEmbedding(Context *c, NetTensor * i, int hidden_size, string name) { //TODO - i = _Convolution3D({i}, 3, 1280, {2, 14, 14}, {2, 14, 14}, VALID, false, name +".rgbt_stem.proj.1"); - i = i->transpose(THW, CHANNLE); - i = i->flatten(TIME, WIDTH); - auto *s = _Parameter(c, {}, 1, 1, 1, 1280, name +".cls_token"); - i = _Cat( {s, i}, SEQUENCE, name +".rgbt_cls.cat"); - s = _Parameter(c, {}, 1, 257, 1, 1280, name +".pos_embedding_helper.pos_embed"); - i = *s + i; - return i; -} -NetTensor *VisonModel(Context* c, NetTensor * i, int hidden_dim= 1280, int ffn_hidden_dim = 5120, int mutil_head_size = 16, string name = "vision"){ - i = VisionEmbedding(c, i, hidden_dim, "modality_preprocessors."+name); - i = _LayerNorm( {i}, hidden_dim, true,1e-6, "modality_trunks."+name + ".pre_transformer_layer.0"); - for(int layer=0; layer<32; ++layer) { - auto *x = _LayerNorm( {i}, hidden_dim, true,1e-6, "modality_trunks."+name + ".blocks."+std::to_string(layer)+".norm_1"); - i = *Attention(c, x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, "modality_trunks."+name + ".blocks."+std::to_string(layer)+".attn") +i; - x = _LayerNorm( {i}, hidden_dim, true, 1e-6, "modality_trunks."+name + ".blocks."+std::to_string(layer)+".norm_2"); - i = *MLP( x, hidden_dim, ffn_hidden_dim, "modality_trunks."+name + ".blocks."+std::to_string(layer)+ ".mlp") + i; - } - i = _LayerNorm( {i}, hidden_dim, true, 1e-6, "modality_heads."+ name + ".0"); - i = i->clip( {}, {}, {0}, {}); - i = _Linear( {i}, hidden_dim, 1024, false, "modality_heads."+ name + ".2"); - i = *i/i->norm(2); - return i; -} - - -NetTensor *TextEmbedding(Context *c, NetTensor * i, int vocab_size, int hidden_dim, int max_position_embeddings, string name) { - i = _Embedding( {i}, vocab_size, hidden_dim, name +".token_embedding"); - auto *s = _Parameter(c, {}, 1, max_position_embeddings, 1, hidden_dim, name +".pos_embed"); - i = *s + i; - return i; -} -NetTensor *TextModel(Context *c, NetTensor * i, NetTensor * in_len, int vocab_size = 49408, int hidden_dim = 1024, int ffn_hidden_dim = 4096, int mutil_head_size = 16, string name="text") { - i = TextEmbedding(c, i, vocab_size, hidden_dim, 77, "modality_preprocessors."+name); - for (int layer = 0; layer < 24; ++layer) { - auto *x = _LayerNorm( {i}, hidden_dim, true, 1e-6, "modality_trunks."+name+".blocks." + std::to_string(layer) + ".norm_1"); - i = *Attention(c, x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, "modality_trunks."+name+".blocks." + std::to_string(layer) + ".attn") +i; - x = _LayerNorm( {i}, hidden_dim, true, 1e-6, "modality_trunks."+name+".blocks." + std::to_string(layer) + ".norm_2"); - i = *MLP( x, hidden_dim, ffn_hidden_dim, "modality_trunks."+name+".blocks." + std::to_string(layer) + ".mlp") +i; - } - i = i->_clip({}, {}, {in_len}, {}); - i = _LayerNorm( {i}, hidden_dim,true, 1e-6,"modality_heads."+ name + ".proj.0"); - i = _Linear( {i}, hidden_dim, 1024, false, "modality_heads."+ name + ".proj.1"); - i = *i/i->norm(2); - i = *i*100.0; // i = _Scale( {i}, 100.0, 0.0F, false, "modality_postprocessors."+name +".logit_scale"); - return i; -} - -NetTensor *AudioEmbedding(Context *c, NetTensor * i, int hidden_size, string name) { //input: 9, 1, 128, 204 - i = _Convolution2D({i}, 1, 768, {16, 16}, {10, 10}, VALID, false, name +".rgbt_stem.proj"); // 9, 768, 12, 19 - i = i->transpose(SEQUENCE, DIMENSION); - i = i->flatten(HEAD, SEQUENCE); - i = _LayerNorm( {i}, hidden_size, true, 1e-6, name +".rgbt_stem.norm_layer"); - auto *s = _Parameter(c, {}, 1, 1, 1, 768, name +".cls_token"); - i = _Cat( {s, i}, SEQUENCE, name +".cls_token.cat"); - s = _Parameter(c, {}, 1, 229, 1, 768, name +".pos_embedding_helper.pos_embed"); - i = *s + i; - return i; -} -NetTensor *AudioModel(Context* c, NetTensor * i, int hidden_dim= 768, int ffn_hidden_dim = 3072, int mutil_head_size = 12, string name = "audio"){ - i = AudioEmbedding(c, i, hidden_dim, "modality_preprocessors."+name); - for(int layer=0; layer<12; ++layer) { - auto *x = _LayerNorm( {i}, hidden_dim, true,1e-6, "modality_trunks."+name + ".blocks."+std::to_string(layer)+".norm_1"); - i = *Attention(c, x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, "modality_trunks."+name + ".blocks."+std::to_string(layer)+".attn") +i; - x = _LayerNorm( {i}, hidden_dim, true, 1e-6, "modality_trunks."+name + ".blocks."+std::to_string(layer)+".norm_2"); - i = *MLP( x, hidden_dim, ffn_hidden_dim, "modality_trunks."+name + ".blocks."+std::to_string(layer)+ ".mlp") + i; - } - i = _LayerNorm( {i}, hidden_dim, true, 1e-6, "modality_heads."+ name + ".0"); - i = i->clip( {}, {}, {0}, {}); - i = _Linear( {i}, hidden_dim, 1024, false, "modality_heads."+ name + ".2"); - i = *i/i->norm(2); - i = *i*20.0; - i = i->view(ANYDIM, -1, 3, -1); - i = i->mean(SEQUENCE); - return i; -} - -void ImageBind(Context* c) { - auto *i = _Input(c, {}, "input_ids"); - auto *i_len = _Input(c, {}, "input_lens"); - i = TextModel(c, i, i_len); - - auto *p = _Input(c, {}, "input_imgs"); - p = VisonModel(c, p); - - auto *a = _Input(c, {}, "input_audios"); - a = AudioModel(c, a); - - - i = i->transpose(BATCH, SEQUENCE); - p = p->transpose(BATCH, SEQUENCE); - a = a->transpose(BATCH, SEQUENCE); - - auto *j1 = _Matmul( {p, i}, false, true, "final.vision@text"); - j1 = _Softmax( {j1}, DIMENSION, false, "final.vision@text.softmax"); - - auto *j2 = _Matmul( {p, a}, false, true, "final.vision@audio"); - j2 = _Softmax( {j2}, DIMENSION, false, "final.vision@audio.softmax"); - - i = _Cat( {j1, j2}, BATCH, "final.cat"); -} -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/clip_vocab.mllm"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/imagebind_huge-q4_k.mllm"); - cmdParser.add("merges", 'f', "specify mllm tokenizer merges.txt path", false, "../vocab/clip_merges.txt"); - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.parse_check(argc, argv); - - string vocab_path = cmdParser.get("vocab"); - string model_path = cmdParser.get("model"); - string merges_path = cmdParser.get("merges"); - int thread_num = cmdParser.get("thread"); - - // auto tokenizer = BPETokenizer(vocab_path); - - std::unique_ptr c_ptr(new Context()); - auto *c = c_ptr.get(); - ImageBind(c); - - BackendConfig bn; - Net net(bn); - net.convert(c->sub_param_, BackendType::MLLM_CPU, thread_num); - - ParamLoader param_loader(model_path); - Executor ex(¶m_loader); - ex.setup(&net); - - auto tokenizer = new BPETokenizer(vocab_path); - std::unordered_map merge_rank; - auto merge_file = std::ifstream(merges_path); - std::string line; - unsigned rank=0; - while (std::getline(merge_file, line)) { - if (line.empty()) { - continue; - } - if (line[0]=='#'){ - continue; - } - merge_rank[line]=rank; - rank++; - } - tokenizer->setMergeRank(merge_rank); - tokenizer->setSpecialToken("<|startoftext|>","<|endoftext|>"); - vector in_strs = {"a dog.", "A car", "A bird"}; - auto tokens_ids = vector>(); - for (auto in_str : in_strs) { - in_str = toLowercase(in_str); - auto tokens_id = vector(); - tokenizer->tokenize(in_str, tokens_id, true, true, ""); - tokens_ids.push_back(tokens_id); - } - shared_ptr input_text = std::make_shared(); - shared_ptr input_text_lens = std::make_shared(); - tokens2Tensor(&net, tokens_ids, input_text, input_text_lens); - - vector img_names = {"../assets/dog_image.jpg", "../assets/car_image.jpg", "../assets/bird_image.jpg"}; - // vector data_imgs; - vector< vector< vector>>> data_imgs; - auto* clip = new ClipPreProcessor(tokenizer); - clip->PreProcessImages(img_names); - data_imgs = clip->pixel_values_; - shared_ptr input_img = std::make_shared(); - img2Tensor(input_img, net, data_imgs); - - - auto audios = PreProcessor::ProcessAudio({"../assets/dog_audio.wav", "../assets/car_audio.wav", "../assets/bird_audio.wav"}); - shared_ptr input_audio = std::make_shared(); - audio2Tensor(input_audio, net, audios); - - ex.run(&net, {input_text, input_text_lens, input_img, input_audio}); - - - auto result = ex.result(); - showResult(result[0]); - - // free memory - for (auto *op : c->net_ops) { - delete op; - } - for (auto *tensor : c->net_tensors) { - delete tensor; - } - return 0; -} diff --git a/examples/main_llama.cpp b/examples/main_llama.cpp deleted file mode 100644 index ede99ecb1..000000000 --- a/examples/main_llama.cpp +++ /dev/null @@ -1,156 +0,0 @@ -#include -#include -#include -#include "cmdline.h" -#include "Net.hpp" -#include "Executor.hpp" -#include "express/Express.hpp" -#include "tokenizers/BPE/Bpe.hpp" -using namespace mllm; - -unsigned int argmax(const std::vector &scores) { - if (scores.empty()) { - throw std::invalid_argument("Input vector is empty"); - } - unsigned int maxIndex = 0; - float maxValue = scores[0]; - for (size_t i = 1; i < scores.size(); ++i) { - if (scores[i] > maxValue) { - maxIndex = i; - maxValue = scores[i]; - } - } - return maxIndex; -} -unsigned int postProcessing(shared_ptr result, shared_ptr &out_result) { - assert(result->batch() == 1); - assert(result->head() == 1); - out_result->reshape(1, 1, 1, 1); - out_result->alloc(); - vector scores; - for (int i = 0; i < result->dimension(); ++i) { - auto value = result->dataAt(0, 0, result->sequence() - 1, i); - scores.push_back(value); - } - auto token_idx = argmax(scores); - out_result->setDataAt(0, 0, 0, 0, token_idx); - return token_idx; -} - -NetTensor *Attention(NetTensor *x, int embedding_size, int hidden_size, int head_size, int cache_max, string name) { - auto *q = _Linear({x}, embedding_size, hidden_size * head_size, false, name + ".wq"); - auto *k = _Linear({x}, embedding_size, hidden_size * head_size, false, name + ".wk"); - auto *v = _Linear({x}, embedding_size, hidden_size * head_size, false, name + ".wv"); - q = q->view(-1, head_size, -1, hidden_size); - k = k->view(-1, head_size, -1, hidden_size); - v = v->view(-1, head_size, -1, hidden_size); - q = _RoPE({q}, LLAMAROPE, name + ".q_rope"); - k = _RoPE({k}, LLAMAROPE, name + ".k_rope"); - k = _KVCache({k}, cache_max, name + ".k_cache"); - v = _KVCache({v}, cache_max, name + ".v_cache"); - auto *qk = _Matmul({q, k}, false, true, name + ".qk"); - qk = *qk / std::sqrt(hidden_size); - // qk = _Causalmask({qk}, name + ".mask"); - qk = _Softmax({qk}, DIMENSION, true, name + ".softmax"); - auto *o = _Matmul({qk, v}, false, false, name + ".qkv"); - o = o->view(-1, 1, -1, hidden_size * head_size); - o = _Linear({o}, hidden_size * head_size, embedding_size, false, name + ".wo"); - return o; -} -NetTensor *FFN(NetTensor *i, int hidden_dim, int ffn_hidden_dim, string name) { - auto *x = _Linear({i}, hidden_dim, ffn_hidden_dim, false, name + ".w1"); - x = _SiLU({x}, name + ".silu"); - auto *y = _Linear({i}, hidden_dim, ffn_hidden_dim, false, name + ".w3"); - x = *x * y; // x = _Mul( {x, y}, name+".dot"); - x = _Linear({x}, ffn_hidden_dim, hidden_dim, false, name + ".w2"); - return x; -} -void llama(Context *c, int vocab_size = 32000, int hidden_dim = 4096, int ffn_hidden_dim = 11008, int mutil_head_size = 32, int cache_max = 200) { - auto *i = _Input(c); - i = _Embedding({i}, vocab_size, hidden_dim, (string) "tok_embeddings"); - // loop - for (int layer = 0; layer < 32; ++layer) { - auto *x = _RMSNorm({i}, hidden_dim, 1e-6, (string) "layers." + std::to_string(layer) + ".attention_norm"); - i = *Attention(x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, cache_max, (string) "layers." + std::to_string(layer) + ".attention") + i; - x = _RMSNorm({i}, hidden_dim, 1e-6, (string) "layers." + std::to_string(layer) + ".ffn_norm"); - i = *FFN(x, hidden_dim, ffn_hidden_dim, (string) "layers." + std::to_string(layer) + ".feed_forward") + i; - //_SubgraphBegin(c); - } - // end loop - i = _RMSNorm({i}, hidden_dim, 1e-6, (string) "norm"); - i = _Linear({i}, hidden_dim, vocab_size, false, "output"); -} -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/llama2_vocab.mllm"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/llama-2-7b-chat-q4_k.mllm"); - cmdParser.add("limits", 'l', "max KV cache size", false, 400); - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.parse_check(argc, argv); - - string vocab_path = cmdParser.get("vocab"); - string model_path = cmdParser.get("model"); - int tokens_limit = cmdParser.get("limits"); - int thread_num = cmdParser.get("thread"); - - auto tokenizer = BPETokenizer(vocab_path); - - int vocab_size = 32000; - int hidden_dim = 4096; - int ffn_hidden_dim = 11008; - int mutil_head_size = 32; - - std::unique_ptr c_ptr(new Context()); - auto *c = c_ptr.get(); - llama(c, vocab_size, hidden_dim, ffn_hidden_dim, mutil_head_size, tokens_limit); - - BackendConfig bn; - Net net(bn); - net.convert(c->sub_param_, BackendType::MLLM_CPU, thread_num); - - ParamLoader param_loader(model_path); - Executor ex(¶m_loader); - ex.setup(&net); - - vector in_strs = { - " Hello, who are you?", - " What can you do?", - "Please introduce Beijing University of Posts and Telecommunications."}; - shared_ptr input = std::make_shared(); - for (int str_i = 0; str_i < in_strs.size(); ++str_i) { - auto in_str = in_strs[str_i]; - if (in_str[0] != ' ') { - in_str = ' ' + in_str; - } - auto tokens_id = vector(); - tokenizer.tokenize(in_str, tokens_id, true); - if (str_i > 0) { - tokens_id[0] = 13; - } - BPETokenizer::token2Tensor(&net, tokens_id, input); - std::cout << "[Q] " << in_str << std::endl; - std::cout << "[A] " << std::flush; - for (int step = 0; step < 100; step++) { - ex.run(&net, {input}); - auto result = ex.result(); - auto token_idx = postProcessing(result[0], input); - if (token_idx == 2) { // "" - break; - } - auto out_token = tokenizer.detokenize({token_idx}); - std::cout << out_token << std::flush; - } - printf("\n"); - } - - ex.perf(); - - // free memory - for (auto *op : c->net_ops) { - delete op; - } - for (auto *tensor : c->net_tensors) { - delete tensor; - } - return 0; -} diff --git a/examples/main_llava.cpp b/examples/main_llava.cpp deleted file mode 100644 index 0cad04f9e..000000000 --- a/examples/main_llava.cpp +++ /dev/null @@ -1,267 +0,0 @@ -#include -#include -#include -#include "cmdline.h" -#include "Net.hpp" -#include "Executor.hpp" -#include "express/Express.hpp" -#include "tokenizers/BPE/Bpe.hpp" -#include "processor/ClipPreProcess.hpp" - -void print2DVetcors(std::vector> chunk_feats) { - std::cout << std::fixed; - std::cout << std::setprecision(4); - for (size_t i = 0; i < chunk_feats.size(); ++i) { - for (size_t j = 0; j < chunk_feats[i].size(); ++j) { - std::cout << chunk_feats[i][j] << ","; - } - std::cout << std::endl; - } -} -void print3DVetcors(std::vector>> all_clips) { - for (auto all_clip : all_clips) { - print2DVetcors(all_clip); - std::cout << "======================================" << std::endl; - } - std::cout << " [" << all_clips.size() << ", " << all_clips[0].size() << ", " << all_clips[0][0].size() << "]" << std::endl; -} - -int cache_max = 700; - -using namespace mllm; -unsigned int argmax(const std::vector &scores) { - if (scores.empty()) { - throw std::invalid_argument("Input vector is empty"); - } - unsigned int maxIndex = 0; - float maxValue = scores[0]; - for (size_t i = 1; i < scores.size(); ++i) { - if (scores[i] > maxValue) { - maxIndex = i; - maxValue = scores[i]; - } - } - return maxIndex; -} -unsigned int postProcessing(shared_ptr result, shared_ptr &out_result, shared_ptr &input_img) { - assert(result->batch() == 1); - assert(result->head() == 1); - out_result->reshape(1, 1, 1, 1); - out_result->alloc(); - vector scores; - for (int i = 0; i < result->dimension(); ++i) { - auto value = result->dataAt(0, 0, result->sequence() - 1, i); - scores.push_back(value); - } - auto token_idx = argmax(scores); - out_result->setDataAt(0, 0, 0, 0, token_idx); - input_img->reshape(0, 0, 0, 0); - input_img->alloc(); - return token_idx; -} -NetTensor *Attention(NetTensor *x, int embedding_size, int hidden_size, int head_size, int cache_max, string name) { - auto *q = _Linear({x}, embedding_size, hidden_size * head_size, false, name + ".q_proj"); - auto *k = _Linear({x}, embedding_size, hidden_size * head_size, false, name + ".k_proj"); - auto *v = _Linear({x}, embedding_size, hidden_size * head_size, false, name + ".v_proj"); - q = q->view(-1, head_size, -1, hidden_size); - k = k->view(-1, head_size, -1, hidden_size); - v = v->view(-1, head_size, -1, hidden_size); - q = _RoPE({q}, HFHUBROPE, name + ".q_rope"); - k = _RoPE({k}, HFHUBROPE, name + ".k_rope"); - k = _KVCache({k}, cache_max, name + ".k_cache"); - v = _KVCache({v}, cache_max, name + ".v_cache"); - auto *qk = _Matmul({q, k}, false, true, name + ".qk"); - qk = *qk / std::sqrt(hidden_size); - // qk = _Causalmask({qk}, name + ".mask"); - qk = _Softmax({qk}, DIMENSION, true, name + ".softmax"); - auto *o = _Matmul({qk, v}, false, false, name + ".qkv"); - o = o->view(-1, 1, -1, hidden_size * head_size); - o = _Linear({o}, hidden_size * head_size, embedding_size, false, name + ".o_proj"); - return o; -} -NetTensor *FFN(NetTensor *i, int hidden_dim, int ffn_hidden_dim, string name) { - auto *x = _Linear({i}, hidden_dim, ffn_hidden_dim, false, name + ".gate_proj"); - x = _SiLU({x}, name + ".silu"); - auto *y = _Linear({i}, hidden_dim, ffn_hidden_dim, false, name + ".up_proj"); - x = *x * y; // x = _Mul( {x, y}, name+".dot"); - x = _Linear({x}, ffn_hidden_dim, hidden_dim, false, name + ".down_proj"); - return x; -} - -NetTensor *text_embd(NetTensor *i, int vocab_size = 32064, int hidden_dim = 4096, string name = "language_model") { - // auto *i = _Input(c); - i = _Embedding({i}, vocab_size, hidden_dim, name + ".model.embed_tokens"); - return i; -} -NetTensor *llama(NetTensor *i, int vocab_size = 32064, int hidden_dim = 4096, int ffn_hidden_dim = 11008, int mutil_head_size = 32, int cache_max = 700, string name = "language_model") { - // i = _Embedding( {i}, vocab_size, hidden_dim, name+"model.embed_tokens"); - for (int layer = 0; layer < 32; ++layer) { - auto *x = _RMSNorm({i}, hidden_dim, 1e-6, name + ".model.layers." + std::to_string(layer) + ".input_layernorm"); - i = *Attention(x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, cache_max, name + ".model.layers." + std::to_string(layer) + ".self_attn") + i; - x = _RMSNorm({i}, hidden_dim, 1e-6, name + ".model.layers." + std::to_string(layer) + ".post_attention_layernorm"); - i = *FFN(x, hidden_dim, ffn_hidden_dim, name + ".model.layers." + std::to_string(layer) + ".mlp") + i; - //_SubgraphBegin(c); - } - i = _RMSNorm({i}, hidden_dim, 1e-6, name + ".model.norm"); - i = _Linear({i}, hidden_dim, vocab_size, false, name + ".lm_head"); - return i; -} - -NetTensor *VisionAttention(NetTensor *x, int embedding_size, int hidden_size, int head_size, string name) { - auto *q = _Linear({x}, embedding_size, hidden_size * head_size, true, name + ".q_proj"); - auto *k = _Linear({x}, embedding_size, hidden_size * head_size, true, name + ".k_proj"); - auto *v = _Linear({x}, embedding_size, hidden_size * head_size, true, name + ".v_proj"); - q = q->view(-1, head_size, -1, hidden_size); - k = k->view(-1, head_size, -1, hidden_size); - v = v->view(-1, head_size, -1, hidden_size); - auto *qk = _Matmul({q, k}, false, true, name + ".qk"); - qk = _Scale({qk}, 1.0F / std::sqrt(hidden_size), 0.0F, false, name + ".scale"); - if (name.find("text_model") != std::string::npos) { - qk = _Softmax( {qk}, DIMENSION, true, name + ".softmax"); - } else{ - qk = _Softmax( {qk}, DIMENSION, false, name + ".softmax"); - } - auto *o = _Matmul({qk, v}, false, false, name + ".qkv"); - o = o->view(-1, 1, -1, hidden_size * head_size); - o = _Linear({o}, hidden_size * head_size, embedding_size, true, name + ".out_proj"); - return o; -} -NetTensor *MLP(NetTensor *i, int hidden_dim, int ffn_hidden_dim, string name) { - auto *x = _Linear({i}, hidden_dim, ffn_hidden_dim, true, name + ".fc1"); - x = _QuickGELU({x}, name + ".act_fn"); - x = _Linear({x}, ffn_hidden_dim, hidden_dim, true, name + ".fc2"); - return x; -} -NetTensor *VisionEmbedding(Context *c, NetTensor *i, int hidden_size, string name) { - i = _Convolution2D({i}, 3, 1024, {14, 14}, {14, 14}, VALID, false, name + ".patch_embedding"); - i = i->transpose(SEQUENCE, DIMENSION); - i = i->flatten(HEAD, SEQUENCE); - auto *s = _Parameter(c, {}, 1, 1, 1, 1024, name + ".class_embedding"); - i = _Cat({s, i}, SEQUENCE, name + ".class_embedding.cat"); - s = _Range(c, {}, 0, 577, name + ".position_ids"); - i = *_Embedding({s}, 577, 1024, name + ".position_embedding") + i; - return i; -} -NetTensor *vision_tower(Context *c, NetTensor *i, int hidden_dim = 1024, int ffn_hidden_dim = 4096, int mutil_head_size = 16, string name = "vision_tower.vision_model") { - i = VisionEmbedding(c, i, hidden_dim, name + ".embeddings"); - i = _LayerNorm({i}, hidden_dim, true, 1e-6, name + ".pre_layrnorm"); - for (int layer = 0; layer < 23; ++layer) { - auto *x = _LayerNorm({i}, hidden_dim, true, 1e-6, name + ".encoder.layers." + std::to_string(layer) + ".layer_norm1"); - i = *VisionAttention(x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, name + ".encoder.layers." + std::to_string(layer) + ".self_attn") + i; - x = _LayerNorm({i}, hidden_dim, true, 1e-6, name + ".encoder.layers." + std::to_string(layer) + ".layer_norm2"); - i = *MLP(x, hidden_dim, ffn_hidden_dim, name + ".encoder.layers." + std::to_string(layer) + ".mlp") + i; - } - i = i->clip({}, {}, {1, 577}, {}); - i = _Linear({i}, hidden_dim, ffn_hidden_dim, true, "multi_modal_projector.linear_1"); - i = _GELU({i}, "multi_modal_projector.act_fn"); - i = _Linear({i}, ffn_hidden_dim, ffn_hidden_dim, true, "multi_modal_projector.linear_2"); - // i = _LayerNorm( {i}, hidden_dim, true, 1e-6, name + ".post_layernorm"); - return i; -} - -void llava(Context *c, int cache_max = 700) { - auto *i = _Input(c, {}, "input_text"); - auto *e = text_embd(i); - i = i->where(32000, SEQUENCE); - auto *v = _Input(c, {}, "input_imgs"); - v = vision_tower(c, v); - i = _Replace({e, v, i}); - i = llama(i, 32064, 4096, 11008, 32, cache_max, "language_model"); - i = i->clip({}, {}, {-1}, {}); -} - -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/llava_vocab.mllm"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/llava-1.5-7b-q4_k.mllm"); - cmdParser.add("limits", 'l', "max KV cache size", false, 700); - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.parse_check(argc, argv); - - string vocab_path = cmdParser.get("vocab"); - string model_path = cmdParser.get("model"); - int tokens_limit = cmdParser.get("limits"); - int thread_num = cmdParser.get("thread"); - - std::unique_ptr c_ptr(new Context()); - auto *c = c_ptr.get(); - llava(c, tokens_limit); - - BackendConfig bn; - Net net(bn); - net.convert(c->sub_param_, BackendType::MLLM_CPU, thread_num); - - ParamLoader param_loader(model_path); - Executor ex(¶m_loader); - ex.setup(&net); - - auto tokenizer = new BPETokenizer(vocab_path); - std::unordered_map merge_rank; - auto merge_file = std::ifstream("../vocab/llava_merges.txt"); - std::string line; - unsigned rank=0; - while (std::getline(merge_file, line)) { - if (line.empty()) { - continue; - } - if (line[0]=='#'){ - continue; - } - merge_rank[line]=rank; - rank++; - } - tokenizer->setMergeRank(merge_rank); - - std::vector> in_imgs = { - {"../assets/australia.jpg"}}; - vector in_strs = { - "\nUSER: What's the content of the image?\nASSISTANT:"}; - - for (int inId = 0; inId < in_strs.size(); ++inId) { - auto in_str = in_strs[0]; - if (in_str[0] != ' ') { - in_str = ' ' + in_str; - } - auto in_img = in_imgs[0]; - - auto tokens_ids = vector>(); - vector tokens_id = {}; - tokenizer->tokenize(BPETokenizer::replaceString(in_str,' ',"▁"), tokens_id, {"", "", "\n"}); - tokens_ids.push_back(tokens_id); - - shared_ptr input_text = std::make_shared(); - BPETokenizer::tokens2Tensor(&net, tokens_ids, input_text); - - auto *clip_processor = new ClipPreProcessor(tokenizer, 336, 336); - clip_processor->PreProcessImages(in_img); - auto images = clip_processor->pixel_values_[0]; - - shared_ptr input_img = std::make_shared(); - clip_processor->Img2Tensor(net.backends()[MLLM_CPU].get(), input_img, images); - - std::cout << in_strs[0] << std::flush; - for (int step = 0; step < 30; step++) { - ex.run(&net, {input_text, input_img}); - auto result = ex.result(); - auto token_idx = postProcessing(result[0], input_text, input_img); - if (token_idx == 2) { // "" - break; - } - auto out_token = tokenizer->detokenize({token_idx}); - std::cout << out_token << std::flush; - } - std::cout << std::endl; - } - - - ex.perf(); - - // free memory - for (auto *op : c->net_ops) { - delete op; - } - for (auto *tensor : c->net_tensors) { - delete tensor; - } - return 0; -} diff --git a/examples/main_phonelm_npu.cpp b/examples/main_phonelm_npu.cpp deleted file mode 100644 index 999974853..000000000 --- a/examples/main_phonelm_npu.cpp +++ /dev/null @@ -1,233 +0,0 @@ -#ifdef USE_QNN -#include -#include -#include -#include -#include "Executor.hpp" -#include "Types.hpp" -#include "backends/qnn/QNNNet.hpp" -#include "cmdline.h" -#include "Net.hpp" -#include "backends/qnn/QNNExecutor.hpp" - -#include "models/smollm/tokenization_smollm.hpp" -#include "main_phonelm_npu.hpp" - -using namespace mllm; - -unsigned int argmax(const std::vector &scores) { - return std::max_element(scores.begin(), scores.end()) - scores.begin(); -} - -unsigned int postProcessing(shared_ptr result, shared_ptr &out_result) { - assert(result->batch() == 1); - assert(result->head() == 1); - out_result->reshape(1, 1, 1, 1); - out_result->alloc(); - vector scores; - for (int i = 0; i < result->dimension(); ++i) { - auto value = result->dataAt(0, 0, result->sequence() - 1, i); - scores.push_back(value); - } - auto token_idx = argmax(scores); - out_result->setDataAt(0, 0, 0, 0, token_idx); - return token_idx; -} - -unsigned int postProcessing_prefill(shared_ptr result, shared_ptr &out_result, int seq) { - assert(result->batch() == 1); - assert(result->head() == 1); - out_result->reshape(1, 1, 1, 1); - out_result->alloc(); - vector scores; - for (int i = 0; i < result->dimension(); ++i) { - auto value = result->dataAt(0, 0, seq - 1, i); - scores.push_back(value); - } - auto token_idx = argmax(scores); - out_result->setDataAt(0, 0, 0, 0, token_idx); - return token_idx; -} - -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/phonelm_vocab.mllm"); - - cmdParser.add("limits", 'l', "max KV cache size", false, 1124); - - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.add("seq", 's', "seqenth length", false, 64); - cmdParser.add("chunk", 'c', "use chunk execute", false, true); - cmdParser.add("head", 'h', "num of heads", false, 16); - - cmdParser.add("ffn", 'f', "size of ffn hidden size", false, 6816); - cmdParser.add("hds", 'd', "size of hidden size", false, 2560); - - cmdParser.add("readfile", 'r', "read prompt from file", false, false); - - cmdParser.parse_check(argc, argv); - - const string npu_model_path = "../models/phonelm-1.5b-instruct-int8.mllm"; - const string cpu_model_path = "../models/phonelm-with-head-q4k.mllm"; - const string merge_file_path = "../vocab/phonelm_merges.txt"; - - string vocab_path = cmdParser.get("vocab"); - int tokens_limit = cmdParser.get("limits"); - int thread_num = cmdParser.get("thread"); - int seqLength = cmdParser.get("seq"); - bool isChunkExecute = cmdParser.get("chunk"); - int head_num = cmdParser.get("head"); - - bool read_file = cmdParser.get("readfile"); - - int chunk = 1; - if (isChunkExecute) - chunk = seqLength / 256; - - int vocab_size = 49152; - int hidden_dim = cmdParser.get("hds"); - int ffn_hidden_dim = cmdParser.get("ffn"); - - vector in_strs = { - "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nGive me a short introduction to large language model.<|im_end|>\n<|im_start|>assistant\n", - // " What can you do?", - // "Please introduce Beijing University of Posts and Telecommunications."}; - }; - - string input_string; - if (read_file) { - std::ifstream file("./func_prompt.txt"); - if (!file) { - std::cerr << "无法打开文件!" << std::endl; - return 1; - } - std::stringstream buffer; - buffer << file.rdbuf(); - input_string = buffer.str(); - file.close(); // 关闭文件 - } else { - input_string = in_strs[0]; - } - - auto tokenizer = SmolLMTokenizer(vocab_path, merge_file_path); - - std::unique_ptr npu_ctx_ptr(new Context()); - auto *npu_ctx = npu_ctx_ptr.get(); - std::unique_ptr cpu_ctx_ptr(new Context()); - auto *cpu_ctx = cpu_ctx_ptr.get(); - std::unique_ptr inter_ctx_ptr(new Context()); - auto *inter_ctx = inter_ctx_ptr.get(); - - // cache_max should be longer than seqLength - modeling::phonelm_npu(npu_ctx, vocab_size, hidden_dim, ffn_hidden_dim, head_num, tokens_limit, seqLength, chunk); - modeling::phonelm_npu_cpu_inter(inter_ctx, vocab_size, hidden_dim, ffn_hidden_dim, head_num, tokens_limit, seqLength, chunk); - modeling::phonelm_cpu_q40(cpu_ctx, vocab_size, hidden_dim, ffn_hidden_dim, head_num, tokens_limit); - - BackendConfig bn; - QNNNet npuNet(bn, npu_ctx); - npuNet.convert(npu_ctx, BackendType::MLLM_QNN, thread_num); - Net interNet(bn); - interNet.convert(inter_ctx->sub_param_, BackendType::MLLM_CPU, thread_num); - Net cpuNet(bn); - cpuNet.convert(cpu_ctx->sub_param_, BackendType::MLLM_CPU, thread_num); - - ParamLoader npu_prefill_param_loader(npu_model_path); - ParamLoader cpu_decoding_param_loader(cpu_model_path); - ParamLoader inter_param_loader(npu_model_path); - - QNNExecutor *npuExePtr; - if (isChunkExecute) { - npuExePtr = new QNNPipelineExecutor(&npu_prefill_param_loader); - } else { - npuExePtr = new QNNExecutor(&npu_prefill_param_loader); - } - auto &npuExe = *npuExePtr; - npuExe.setup(&npuNet); - Executor interExe(&inter_param_loader); - interExe.setup(&interNet); - Executor cpuExe(&cpu_decoding_param_loader); - cpuExe.setup(&cpuNet); - - shared_ptr input = std::make_shared(); - - for (int str_i = 0; str_i < in_strs.size(); ++str_i) { - // auto in_str = in_strs[str_i]; - auto [real_seq_length, input_tensor] = tokenizer.tokenizeWithPadding(input_string, seqLength, vocab_size); - auto input = std::make_shared(input_tensor); - - if (chunk != 1) - npuExe.warmup(npu_ctx, &npuNet, {input}); - - std::cout << "real_seq_length: " << real_seq_length << std::endl; - std::cout << "[Q] " << input_string << std::endl; - std::cout << "[A] " << std::flush; - - do { - // 1: Prefill stage using NPU chunk execute - npuExe.run(npu_ctx, &npuNet, {input}); - auto result = npuExe.result(); - - // inter model for prefill-decode - interExe.run(&interNet, {result[0]}); - result = interExe.result(); - - auto token_idx = postProcessing_prefill(result[0], input, real_seq_length); - if (token_idx == 2) { // "" - break; - } - // exit(0); - - auto out_token = tokenizer.detokenize({token_idx}); - std::cout << out_token << std::flush; - - auto prefill_cpu_backend = dynamic_cast(npuNet.backends()[MLLM_CPU].get()); - auto inter_cpu_backend = dynamic_cast(interNet.backends()[MLLM_CPU].get()); - auto decode_cpu_backend = dynamic_cast(cpuNet.backends()[MLLM_CPU].get()); - prefill_cpu_backend->setCurSequenceLength(real_seq_length); - prefill_cpu_backend->setExecutionType(AUTOREGRESSIVE); - prefill_cpu_backend->toggleSwitching(); - inter_cpu_backend->setCurSequenceLength(real_seq_length); - inter_cpu_backend->setExecutionType(AUTOREGRESSIVE); - inter_cpu_backend->toggleSwitching(); - decode_cpu_backend->setCurSequenceLength(real_seq_length); - decode_cpu_backend->setExecutionType(AUTOREGRESSIVE); - decode_cpu_backend->toggleSwitching(); - - // // 2: Decoding stage using CPU execute - for (int step = real_seq_length; step < real_seq_length + 100; step++) { - cpuExe.run(&cpuNet, {input}); - auto result = cpuExe.result(); - - auto token_idx = postProcessing(result[0], input); - if (token_idx == 2) { // "" - break; - } - - auto out_token = tokenizer.detokenize({token_idx}); - std::cout << out_token << std::flush; - - if (step == real_seq_length) { - prefill_cpu_backend->toggleSwitching(); - inter_cpu_backend->toggleSwitching(); - decode_cpu_backend->toggleSwitching(); - } - } - } while (false); - printf("\n"); - } - - std::cout << "====================" << std::endl; - npuExe.perf(); - cpuExe.perf(); - - // free memory - // for (auto *op : npu_ctx->net_ops) { - // delete op; - // } - // for (auto *tensor : npu_ctx->net_tensors) { - // delete tensor; - // } - - return 0; -} -#endif diff --git a/examples/main_phonelm_npu.hpp b/examples/main_phonelm_npu.hpp deleted file mode 100644 index 63db0bebe..000000000 --- a/examples/main_phonelm_npu.hpp +++ /dev/null @@ -1,231 +0,0 @@ -#include -#include -#include "Types.hpp" -#include "express/Express.hpp" - -using namespace mllm; - -namespace modeling { - -const std::set phonelm_shadow_layers = {1, 3, 4}; - -NetTensor *PhoneLM_FFN_NPU(Context *c, NetTensor *i, int hidden_dim, int ffn_hidden_dim, string name) { - auto *x = _LinearINT8({i}, hidden_dim, ffn_hidden_dim, false, name + ".gate_proj"); - auto *y = _LinearINT8({i}, hidden_dim, ffn_hidden_dim, false, name + ".up_proj"); - // x = _SuperSiLU({x,y}, name + ".supersilu"); - // x = _Dequantize({x}, true, (string)name + ".gate_proj.dequantize", true); - // y = _Dequantize({y}, true, (string)name + ".up_proj.dequantize", true); - x = _ReLU({x}, name + ".gate_proj.relu"); - x = *x * y; - // x = _Quantize({x}, true, (string)name + ".down_proj.quantize"); - x = _LinearINT8({x}, ffn_hidden_dim, hidden_dim, false, name + ".down_proj"); - x = _Dequantize({x}, true, (string)name + ".down_proj.dequantize"); - return x; -} - -std::vector PhoneLM_CPUNPUAttention(Context *c, NetTensor *x, NetTensor *res, int embedding_size, int hidden_size, int head_size, int cache_max, string name, int seq, int chunk, int layer) { - x = x->view(1, static_cast(seq / chunk / 32), static_cast(32), hidden_size * head_size); - auto *q = _LinearINT8({x}, embedding_size, hidden_size * head_size, false, name + ".q_proj"); - auto *k = _LinearINT8({x}, embedding_size, hidden_size * head_size, false, name + ".k_proj"); - auto *v = _LinearINT8({x}, embedding_size, hidden_size * head_size, false, name + ".v_proj"); - q = q->view(1, head_size, seq / chunk, hidden_size); - k = k->view(1, head_size, seq / chunk, hidden_size); - v = v->view(1, head_size, seq / chunk, hidden_size); - - q = _Dequantize({q}, true, (string)name + ".q_proj.dequantize", true); - k = _Dequantize({k}, true, (string)name + ".k_proj.dequantize", false); - v = _Dequantize({v}, true, (string)name + ".v_proj.dequantize", false); - // q = _QNNIRoPE({q}, HFHUBROPE, name + ".q_proj.rope", 10000, 2048, true); - // k = _QNNIRoPE({k}, HFHUBROPE, name + ".k_proj.rope", 10000, 2048, false); - - v = _Transpose({v}, {0, 2, 3, 1}, (string)name + ".v_proj.transpose"); - - // before attention is CPU, so don't output res - vector m, s; - if (layer == 0 || phonelm_shadow_layers.find(layer - 1) != phonelm_shadow_layers.end()) { // res is from cpu - m = _MergeOutput({q, k, v}, name + ".qkv_merge"); - // -------------------- - _SubgraphBegin(c, MLLM_CPU); - // -------------------- - s = _SplitInput({m}, true, 4, name + ".qkv_split"); - q = s[0]; - k = s[1]; - v = s[2]; - } else { // res is from qnn - m = _MergeOutput({q, k, v, res}, name + ".qkv_merge"); - // -------------------- - _SubgraphBegin(c, MLLM_CPU); - // -------------------- - s = _SplitInput({m}, true, 4, name + ".qkv_split"); - q = s[0]; - k = s[1]; - v = s[2]; - res = s[3]; - } - - q = _IRoPE({q}, HFHUBROPE, name + ".q_proj.rope", 10000, 2048); - k = _IRoPE({k}, HFHUBROPE, name + ".k_proj.rope", 10000, 2048); - - k = _KVCacheNPU({k}, cache_max, name + ".k_cache"); - v = _KVCacheNPU({v}, cache_max, name + ".v_cache"); - - auto *qk = _Matmul({q, k}, false, true, name + ".qk"); - // qk = *qk / std::sqrt(hidden_size); - // qk = _Causalmask({qk}, name + ".mask"); - qk = _Softmax({qk}, DIMENSION, true, name + ".softmax"); - - auto *o = _Matmul({qk, v}, false, false, name + ".qkv"); - o = _Quantize({o}, true, (string)name + ".o_proj.quantize"); - m = _MergeOutput({o, res}, name + ".or_merge"); - - // -------------------- - _SubgraphBegin(c, MLLM_QNN); - // -------------------- - s = _SplitInput({m}, true, 2, name + ".or_split"); - - o = s[0]; - res = s[1]; - - o = o->view(1, static_cast(seq / chunk / 32), static_cast(32), hidden_size * head_size); - res = res->view(-1, 1, -1, hidden_size * head_size); - o = _LinearINT8({o}, hidden_size * head_size, embedding_size, false, name + ".o_proj"); - o = _Dequantize({o}, true, (string)name + ".o_proj.dequantize"); - - return {o, res}; -} - -NetTensor *PhoneLM_CPUAttention_q40(Context *c, NetTensor *x, int embedding_size, int hidden_size, int head_size, int cache_max, string name, int seq, int chunk) { - auto *q = _Linear({x}, embedding_size, hidden_size * head_size, false, name + ".q_proj"); - auto *k = _Linear({x}, embedding_size, hidden_size * head_size, false, name + ".k_proj"); - auto *v = _Linear({x}, embedding_size, hidden_size * head_size, false, name + ".v_proj"); - q = q->view(-1, head_size, -1, hidden_size); - k = k->view(-1, head_size, -1, hidden_size); - v = v->view(-1, head_size, -1, hidden_size); - - q = _IRoPE({q}, HFHUBROPE, name + ".q_rope", 10000, 2048); - k = _IRoPE({k}, HFHUBROPE, name + ".k_rope", 10000, 2048); - - k = _KVCacheNPU({k}, cache_max, name + ".k_cache"); - v = _KVCacheNPU({v}, cache_max, name + ".v_cache"); - - auto *qk = _Matmul({q, k}, false, true, name + ".qk"); - qk = *qk / std::sqrt(hidden_size); - // qk = _Causalmask({qk}, name + ".mask"); - qk = _Softmax({qk}, DIMENSION, true, name + ".softmax"); - - auto *o = _Matmul({qk, v}, false, false, name + ".qkv"); - - o = o->view(-1, 1, -1, hidden_size * head_size); - - o = _Linear({o}, hidden_size * head_size, embedding_size, false, name + ".o_proj"); - - return o; -} - -NetTensor *PhoneLM_FFN_CPU_q40(Context *c, NetTensor *i, int hidden_dim, int ffn_hidden_dim, string name) { - auto *x = _Linear({i}, hidden_dim, ffn_hidden_dim, false, name + ".gate_proj"); - auto *y = _Linear({i}, hidden_dim, ffn_hidden_dim, false, name + ".up_proj"); - x = _ReLU({x}, name + ".relu"); - x = *x * y; - x = _Linear({x}, ffn_hidden_dim, hidden_dim, false, name + ".down_proj"); - return x; -} - -void phonelm_cpu_q40(Context *c, int vocab_size = 32000, int hidden_dim = 4096, int ffn_hidden_dim = 11008, int mutil_head_size = 32, int cache_max = 200, int seq = 256, int chunk = 2) { - auto *i = _Input(c); - i = _Embedding({i}, vocab_size, hidden_dim, (string) "model.embed_tokens"); - - for (int layer = 0; layer < 19; ++layer) { - auto res = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.layers." + std::to_string(layer) + ".input_layernorm"); - - i = *PhoneLM_CPUAttention_q40(c, res, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, cache_max, (string) "model.layers." + std::to_string(layer) + ".self_attn", seq, chunk) + i; - - res = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.layers." + std::to_string(layer) + ".post_attention_layernorm"); - - i = *PhoneLM_FFN_CPU_q40(c, res, hidden_dim, ffn_hidden_dim, (string) "model.layers." + std::to_string(layer) + ".mlp") + i; - } - i = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.norm"); - i = _Linear({i}, hidden_dim, vocab_size, false, "lm_head"); -} - -// merge o and FFN. -void phonelm_npu(Context *c, int vocab_size = 32000, int hidden_dim = 4096, int ffn_hidden_dim = 11008, int mutil_head_size = 32, int cache_max = 200, int seq = 256, int chunk = 2) { - auto *i = _Input(c); - i = _Embedding({i}, vocab_size, hidden_dim, (string) "model.embed_tokens"); - - // first 23 layer using NPU-CPU prefilling - for (int layer = 0; layer < 19; ++layer) { - auto res = i; - res = res->view(-1, mutil_head_size, -1, hidden_dim / mutil_head_size); - - i = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.layers." + std::to_string(layer) + ".input_layernorm"); - i = _Quantize({i}, true, (string) "model.layers." + std::to_string(layer) + ".self_attn.q_proj.quantize"); - - // above is from CPU, needs merge and split - if (layer == 0 || phonelm_shadow_layers.find(layer - 1) != phonelm_shadow_layers.end()) { - i = i->view(-1, mutil_head_size, -1, hidden_dim / mutil_head_size); - auto m = _MergeOutput({i}, "model.layers." + std::to_string(layer) + ".ires_merge"); - _SubgraphBegin(c, MLLM_QNN); - auto s = _SplitInput(m, true, 2, "model.layers." + std::to_string(layer) + ".self_attn.ires_split"); - i = s[0]; - } - - auto ix = PhoneLM_CPUNPUAttention(c, i, res, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, cache_max, (string) "model.layers." + std::to_string(layer) + ".self_attn", seq, chunk, layer); - - i = ix[0]; - res = ix[1]; - - i = i->view(1, 1, seq / chunk, hidden_dim); - i = *i + res; - - res = i; - - i = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.layers." + std::to_string(layer) + ".post_attention_layernorm", true); - - i = _Quantize({i}, true, (string) "model.layers." + std::to_string(layer) + ".mlp.up_proj.quantize"); - - i = i->view(1, static_cast(seq / chunk / 32), static_cast(32), hidden_dim); - - if (phonelm_shadow_layers.find(layer) == phonelm_shadow_layers.end()) { // normal ffn layers - // if (layer != 1) { - i = PhoneLM_FFN_NPU(c, i, hidden_dim, ffn_hidden_dim, (string) "model.layers." + std::to_string(layer) + ".mlp"); - - i = i->view(1, 1, seq / chunk, hidden_dim); - - i = *i + res; - } else { - auto name = (string) "model.layers." + std::to_string(layer) + ".mlp"; - auto *x = _LinearINT8({i}, hidden_dim, ffn_hidden_dim, false, name + ".gate_proj"); - auto *y = _LinearINT8({i}, hidden_dim, ffn_hidden_dim, false, name + ".up_proj"); - x = _ReLU({x}, name + ".gate_proj.relu"); - x = _Dequantize({x}, true, (string)name + ".gate_proj.dequantize", true); - y = _Dequantize({y}, true, (string)name + ".up_proj.dequantize", true); - x = *x * y; - - auto *i1 = x; - x = _Quantize({x}, true, (string)name + ".down_proj.quantize"); - - x = _LinearINT8({x}, ffn_hidden_dim, hidden_dim, false, name + ".down_proj"); - - auto *i2 = x; - x = _Dequantize({x}, true, (string)name + ".down_proj.dequantize"); - - x = x->view(1, 1, seq / chunk, hidden_dim); - - x = *x + res; - - auto shadow = _MergeOutput({i1, i2, x}, name + ".down_proj.shadow.qnn"); - - _SubgraphBegin(c, MLLM_CPU); - shadow = _SplitInput(shadow, true, 3); - i = _LinearINT8ShadowCPU(shadow, ffn_hidden_dim, hidden_dim, 1024, false, name + ".down_proj.shadow"); - } - } -} - -void phonelm_npu_cpu_inter(Context *c, int vocab_size = 32000, int hidden_dim = 4096, int ffn_hidden_dim = 11008, int mutil_head_size = 32, int cache_max = 200, int seq = 256, int chunk = 2) { - auto *i = _Input(c); - i = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.norm"); - i = _Linear({i}, hidden_dim, vocab_size, false, "lm_head"); -} -} // namespace modeling diff --git a/examples/main_qwen_npu.cpp b/examples/main_qwen_npu.cpp deleted file mode 100644 index 2f169f80d..000000000 --- a/examples/main_qwen_npu.cpp +++ /dev/null @@ -1,235 +0,0 @@ -#ifdef USE_QNN -#include -#include -#include -#include -#include "Executor.hpp" -#include "Types.hpp" -#include "backends/qnn/QNNNet.hpp" -#include "cmdline.h" -#include "Net.hpp" -#include "backends/qnn/QNNExecutor.hpp" - -#include "models/qwen/tokenization_qwen.hpp" -#include "main_qwen_npu.hpp" - -using namespace mllm; - -unsigned int argmax(const std::vector &scores) { - return std::max_element(scores.begin(), scores.end()) - scores.begin(); -} - -unsigned int postProcessing(shared_ptr result, shared_ptr &out_result) { - assert(result->batch() == 1); - assert(result->head() == 1); - out_result->reshape(1, 1, 1, 1); - out_result->alloc(); - vector scores; - for (int i = 0; i < result->dimension(); ++i) { - auto value = result->dataAt(0, 0, result->sequence() - 1, i); - scores.push_back(value); - } - auto token_idx = argmax(scores); - out_result->setDataAt(0, 0, 0, 0, token_idx); - return token_idx; -} - -unsigned int postProcessing_prefill(shared_ptr result, shared_ptr &out_result, int seq) { - assert(result->batch() == 1); - assert(result->head() == 1); - out_result->reshape(1, 1, 1, 1); - out_result->alloc(); - vector scores; - for (int i = 0; i < result->dimension(); ++i) { - auto value = result->dataAt(0, 0, seq - 1, i); - scores.push_back(value); - } - auto token_idx = argmax(scores); - out_result->setDataAt(0, 0, 0, 0, token_idx); - return token_idx; -} - -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/qwen_vocab.mllm"); - - cmdParser.add("limits", 'l', "max KV cache size", false, 1124); - - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.add("seq", 's', "seqenth length", false, 64); - cmdParser.add("chunk", 'c', "use chunk execute", false, true); - cmdParser.add("head", 'h', "num of heads", false, 16); - - cmdParser.add("ffn", 'f', "size of ffn hidden size", false, 5504); - cmdParser.add("hds", 'd', "size of hidden size", false, 2048); - - cmdParser.add("readfile", 'r', "read prompt from file", false, false); - - cmdParser.parse_check(argc, argv); - - const string npu_model_path = "../models/qwen-1.5-1.8b-chat-int8.mllm"; - const string cpu_model_path = "../models/qwen-1.5-1.8b-chat-q4k.mllm"; - const string merge_file_path = "../vocab/qwen_merges.txt"; - - string vocab_path = cmdParser.get("vocab"); - int tokens_limit = cmdParser.get("limits"); - int thread_num = cmdParser.get("thread"); - int seqLength = cmdParser.get("seq"); - bool isChunkExecute = cmdParser.get("chunk"); - int head_num = cmdParser.get("head"); - - bool read_file = cmdParser.get("readfile"); - - int chunk = 1; - if (isChunkExecute) - chunk = seqLength / 128; - - int vocab_size = 151936; - int hidden_dim = cmdParser.get("hds"); - int ffn_hidden_dim = cmdParser.get("ffn"); - - vector in_strs = { - "\"Large Language Models (LLMs) are advanced artificial intelligence systems designed to understand and generate human-like text. These models are trained on vast amounts of data, enabling them to perform a wide range of tasks, from answering questions and summarizing text to generating creative content and engaging in conversational dialogue. LLMs like GPT-3 and GPT-4, developed by OpenAI, have set new benchmarks in natural language processing by leveraging deep learning architectures, particularly transformer models, which excel at capturing context and relationships within text. The scalability and versatility of LLMs make them invaluable tools for applications in education, customer service, content creation, and more. However, their deployment also raises ethical considerations, including issues of bias, misinformation, and the potential for misuse. As the field continues to evolve, ongoing research and responsible deployment strategies are essential to harnessing the full potential of these powerful AI systems while mitigating their risks.\"\nGenerate a title based on the above text." - // " What can you do?", - // "Please introduce Beijing University of Posts and Telecommunications."}; - }; - - string input_string; - if (read_file) { - std::ifstream file("./func_prompt.txt"); - if (!file) { - std::cerr << "无法打开文件!" << std::endl; - return 1; - } - std::stringstream buffer; - buffer << file.rdbuf(); - input_string = buffer.str(); - file.close(); // 关闭文件 - } else { - input_string = in_strs[0]; - } - - auto tokenizer = QWenTokenizer(vocab_path, merge_file_path); - - std::unique_ptr npu_ctx_ptr(new Context()); - auto *npu_ctx = npu_ctx_ptr.get(); - std::unique_ptr cpu_ctx_ptr(new Context()); - auto *cpu_ctx = cpu_ctx_ptr.get(); - std::unique_ptr inter_ctx_ptr(new Context()); - auto *inter_ctx = inter_ctx_ptr.get(); - - // cache_max should be longer than seqLength - modeling::qwen_npu(npu_ctx, vocab_size, hidden_dim, ffn_hidden_dim, head_num, tokens_limit, seqLength, chunk); - modeling::qwen_npu_cpu_inter(inter_ctx, vocab_size, hidden_dim, ffn_hidden_dim, head_num, tokens_limit, seqLength, chunk); - modeling::qwen_cpu_q4k(cpu_ctx, vocab_size, hidden_dim, ffn_hidden_dim, head_num, tokens_limit); - - BackendConfig bn; - QNNNet npuNet(bn, npu_ctx); - npuNet.convert(npu_ctx, BackendType::MLLM_QNN, thread_num); - Net interNet(bn); - interNet.convert(inter_ctx->sub_param_, BackendType::MLLM_CPU, thread_num); - Net cpuNet(bn); - cpuNet.convert(cpu_ctx->sub_param_, BackendType::MLLM_CPU, thread_num); - - ParamLoader npu_prefill_param_loader(npu_model_path); - ParamLoader cpu_decoding_param_loader(cpu_model_path); - ParamLoader inter_param_loader(npu_model_path); - - QNNExecutor *npuExePtr; - if (isChunkExecute) { - npuExePtr = new QNNPipelineExecutor(&npu_prefill_param_loader); - } else { - npuExePtr = new QNNExecutor(&npu_prefill_param_loader); - } - auto &npuExe = *npuExePtr; - npuExe.setup(&npuNet); - Executor interExe(&inter_param_loader); - interExe.setup(&interNet); - Executor cpuExe(&cpu_decoding_param_loader); - cpuExe.setup(&cpuNet); - - shared_ptr input = std::make_shared(); - - for (int str_i = 0; str_i < in_strs.size(); ++str_i) { - // auto in_str = in_strs[str_i]; - auto input_str = tokenizer.apply_chat_template(input_string); - auto [real_seq_length, input_tensor] = tokenizer.tokenizeWithPadding(input_str, seqLength, vocab_size); - auto input = std::make_shared(input_tensor); - - if (chunk != 1) - npuExe.warmup(npu_ctx, &npuNet, {input}); - - std::cout << "real_seq_length: " << real_seq_length << std::endl; - std::cout << "[Q] " << input_string << std::endl; - std::cout << "[A] " << std::flush; - - do { - // 1: Prefill stage using NPU chunk execute - npuExe.run(npu_ctx, &npuNet, {input}); - auto result = npuExe.result(); - - // inter model for prefill-decode - interExe.run(&interNet, {result[0]}); - result = interExe.result(); - - auto token_idx = postProcessing_prefill(result[0], input, real_seq_length); - if (token_idx == 2) { // "" - break; - } - - auto out_token = tokenizer.detokenize({token_idx}); - std::cout << out_token << std::flush; - - auto prefill_cpu_backend = dynamic_cast(npuNet.backends()[MLLM_CPU].get()); - auto inter_cpu_backend = dynamic_cast(interNet.backends()[MLLM_CPU].get()); - auto decode_cpu_backend = dynamic_cast(cpuNet.backends()[MLLM_CPU].get()); - prefill_cpu_backend->setCurSequenceLength(real_seq_length); - prefill_cpu_backend->setExecutionType(AUTOREGRESSIVE); - prefill_cpu_backend->toggleSwitching(); - inter_cpu_backend->setCurSequenceLength(real_seq_length); - inter_cpu_backend->setExecutionType(AUTOREGRESSIVE); - inter_cpu_backend->toggleSwitching(); - decode_cpu_backend->setCurSequenceLength(real_seq_length); - decode_cpu_backend->setExecutionType(AUTOREGRESSIVE); - decode_cpu_backend->toggleSwitching(); - - // // 2: Decoding stage using CPU execute - for (int step = real_seq_length; step < real_seq_length + 100; step++) { - cpuExe.run(&cpuNet, {input}); - auto result = cpuExe.result(); - - auto token_idx = postProcessing(result[0], input); - auto out_token = tokenizer.detokenize({token_idx}); - - auto [isOk, print_string] = tokenizer.postprocess(out_token); - if (isOk) { - std::cout << print_string << std::flush; - } else { - break; - } - - if (step == real_seq_length) { - prefill_cpu_backend->toggleSwitching(); - inter_cpu_backend->toggleSwitching(); - decode_cpu_backend->toggleSwitching(); - } - } - } while (false); - printf("\n"); - } - - std::cout << "====================" << std::endl; - npuExe.perf(); - cpuExe.perf(); - - // free memory - // for (auto *op : npu_ctx->net_ops) { - // delete op; - // } - // for (auto *tensor : npu_ctx->net_tensors) { - // delete tensor; - // } - - return 0; -} -#endif \ No newline at end of file diff --git a/examples/main_qwen_npu.hpp b/examples/main_qwen_npu.hpp deleted file mode 100644 index b1093c162..000000000 --- a/examples/main_qwen_npu.hpp +++ /dev/null @@ -1,227 +0,0 @@ -#include -#include -#include "Types.hpp" -#include "express/Express.hpp" - -using namespace mllm; - -namespace modeling { - -const std::set qwen_shadow_layers = {1, 2, 6}; - -NetTensor *Qwen_FFN_NPU(Context *c, NetTensor *i, int hidden_dim, int ffn_hidden_dim, string name) { - auto *x = _LinearINT8({i}, hidden_dim, ffn_hidden_dim, false, name + ".gate_proj"); - auto *y = _LinearINT8({i}, hidden_dim, ffn_hidden_dim, false, name + ".up_proj"); - x = _SuperSiLU({x, y}, name + ".supersilu"); - // x = _Dequantize({x}, true, (string)name + ".gate_proj.dequantize", false); - // y = _Dequantize({y}, true, (string)name + ".up_proj.dequantize", false); - // x = _SiLU({x}, name + ".silu"); - // x = *x * y; - // x = _Quantize({x}, true, (string)name + ".down_proj.quantize"); - x = _LinearINT8({x}, ffn_hidden_dim, hidden_dim, false, name + ".down_proj"); - x = _Dequantize({x}, true, (string)name + ".down_proj.dequantize"); - return x; -} - -std::vector Qwen_CPUNPUAttention(Context *c, NetTensor *x, NetTensor *res, int embedding_size, int hidden_size, int head_size, int cache_max, string name, int seq, int chunk, int layer = 0) { - x = x->view(1, static_cast(seq / chunk / 32), static_cast(32), hidden_size * head_size); - auto *q = _LinearINT8({x}, embedding_size, hidden_size * head_size, true, name + ".q_proj"); - auto *k = _LinearINT8({x}, embedding_size, hidden_size * head_size, true, name + ".k_proj"); - auto *v = _LinearINT8({x}, embedding_size, hidden_size * head_size, true, name + ".v_proj"); - q = q->view(1, head_size, seq / chunk, hidden_size); - k = k->view(1, head_size, seq / chunk, hidden_size); - v = v->view(1, head_size, seq / chunk, hidden_size); - - q = _Dequantize({q}, true, (string)name + ".q_proj.dequantize", true); - k = _Dequantize({k}, true, (string)name + ".k_proj.dequantize", false); - v = _Dequantize({v}, true, (string)name + ".v_proj.dequantize", false); - - v = _Transpose({v}, {0, 2, 3, 1}, (string)name + ".v_proj.transpose"); - - // before attention is CPU, so don't output res - vector m, s; - if (layer == 0 || qwen_shadow_layers.find(layer - 1) != qwen_shadow_layers.end()) { // res is from cpu - m = _MergeOutput({q, k, v}, name + ".qkv_merge"); - // -------------------- - _SubgraphBegin(c, MLLM_CPU); - // -------------------- - s = _SplitInput({m}, true, 4, name + ".qkv_split"); - q = s[0]; - k = s[1]; - v = s[2]; - } else { // res is from qnn - m = _MergeOutput({q, k, v, res}, name + ".qkv_merge"); - // -------------------- - _SubgraphBegin(c, MLLM_CPU); - // -------------------- - s = _SplitInput({m}, true, 4, name + ".qkv_split"); - q = s[0]; - k = s[1]; - v = s[2]; - res = s[3]; - } - - q = _RoPE({q}, HFHUBROPE, name + ".q_proj.rope", 1000000, 1024); - k = _RoPE({k}, HFHUBROPE, name + ".k_proj.rope", 1000000, 1024); - - k = _KVCacheNPU({k}, cache_max, name + ".k_cache"); - v = _KVCacheNPU({v}, cache_max, name + ".v_cache"); - - auto *qk = _Matmul({q, k}, false, true, name + ".qk"); - // qk = *qk / std::sqrt(hidden_size); - // qk = _Causalmask({qk}, name + ".mask"); - qk = _Softmax({qk}, DIMENSION, true, name + ".softmax"); - - auto *o = _Matmul({qk, v}, false, false, name + ".qkv"); - o = _Quantize({o}, true, (string)name + ".o_proj.quantize"); - - auto merge = _MergeOutput({o, res}, name + ".or_merge"); - // -------------------- - _SubgraphBegin(c, MLLM_QNN); - // -------------------- - s = _SplitInput(merge, true, 2, name + ".or_split"); - - o = s[0]; - res = s[1]; - - o = o->view(1, static_cast(seq / chunk / 32), static_cast(32), hidden_size * head_size); - res = res->view(-1, 1, -1, hidden_size * head_size); - o = _LinearINT8({o}, hidden_size * head_size, embedding_size, false, name + ".o_proj"); - o = _Dequantize({o}, true, (string)name + ".o_proj.dequantize"); - - return {o, res}; -} - -NetTensor *Qwen_CPUAttention_q4k(Context *c, NetTensor *x, int embedding_size, int hidden_size, int head_size, int cache_max, string name, int seq, int chunk) { - auto *q = _Linear({x}, embedding_size, hidden_size * head_size, true, name + ".q_proj"); - auto *k = _Linear({x}, embedding_size, hidden_size * head_size, true, name + ".k_proj"); - auto *v = _Linear({x}, embedding_size, hidden_size * head_size, true, name + ".v_proj"); - q = q->view(-1, head_size, -1, hidden_size); - k = k->view(-1, head_size, -1, hidden_size); - v = v->view(-1, head_size, -1, hidden_size); - - q = _RoPE({q}, HFHUBROPE, name + ".q_rope", 1000000, 32768); - k = _RoPE({k}, HFHUBROPE, name + ".k_rope", 1000000, 32768); - - k = _KVCacheNPU({k}, cache_max, name + ".k_cache"); - v = _KVCacheNPU({v}, cache_max, name + ".v_cache"); - - auto *qk = _Matmul({q, k}, false, true, name + ".qk"); - qk = *qk / std::sqrt(hidden_size); - // qk = _Causalmask({qk}, name + ".mask"); - qk = _Softmax({qk}, DIMENSION, true, name + ".softmax"); - - auto *o = _Matmul({qk, v}, false, false, name + ".qkv"); - - o = o->view(-1, 1, -1, hidden_size * head_size); - - o = _Linear({o}, hidden_size * head_size, embedding_size, false, name + ".o_proj"); - - return o; -} - -NetTensor *Qwen_FFN_CPU_q4k(Context *c, NetTensor *i, int hidden_dim, int ffn_hidden_dim, string name) { - auto *x = _Linear({i}, hidden_dim, ffn_hidden_dim, false, name + ".gate_proj"); - auto *y = _Linear({i}, hidden_dim, ffn_hidden_dim, false, name + ".up_proj"); - x = _SiLU({x}, name + ".silu"); - x = *x * y; - x = _Linear({x}, ffn_hidden_dim, hidden_dim, false, name + ".down_proj"); - return x; -} - -void qwen_cpu_q4k(Context *c, int vocab_size = 32000, int hidden_dim = 4096, int ffn_hidden_dim = 11008, int mutil_head_size = 32, int cache_max = 200, int seq = 256, int chunk = 2) { - auto *i = _Input(c); - i = _Embedding({i}, vocab_size, hidden_dim, (string) "model.embed_tokens"); - - for (int layer = 0; layer < 24; ++layer) { - auto res = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.layers." + std::to_string(layer) + ".input_layernorm"); - - i = *Qwen_CPUAttention_q4k(c, res, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, cache_max, (string) "model.layers." + std::to_string(layer) + ".self_attn", seq, chunk) + i; - - res = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.layers." + std::to_string(layer) + ".post_attention_layernorm"); - - i = *Qwen_FFN_CPU_q4k(c, res, hidden_dim, ffn_hidden_dim, (string) "model.layers." + std::to_string(layer) + ".mlp") + i; - } - i = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.norm"); - i = _Linear({i}, hidden_dim, vocab_size, false, "lm_head"); -} - -// merge o and FFN. -void qwen_npu(Context *c, int vocab_size = 32000, int hidden_dim = 4096, int ffn_hidden_dim = 11008, int mutil_head_size = 32, int cache_max = 200, int seq = 256, int chunk = 2) { - auto *i = _Input(c); - i = _Embedding({i}, vocab_size, hidden_dim, (string) "model.embed_tokens"); - - // first 23 layer using NPU-CPU prefilling - for (int layer = 0; layer < 24; ++layer) { - auto res = i; - res = res->view(-1, mutil_head_size, -1, hidden_dim / mutil_head_size); - i = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.layers." + std::to_string(layer) + ".input_layernorm"); - i = _Quantize({i}, true, (string) "model.layers." + std::to_string(layer) + ".self_attn.q_proj.quantize"); - - // above is from CPU, needs merge and split - if (layer == 0 || qwen_shadow_layers.find(layer - 1) != qwen_shadow_layers.end()) { - i = i->view(-1, mutil_head_size, -1, hidden_dim / mutil_head_size); - auto m = _MergeOutput({i}, "model.layers." + std::to_string(layer) + ".ires_merge"); - _SubgraphBegin(c, MLLM_QNN); - auto s = _SplitInput(m, true, 2, "model.layers." + std::to_string(layer) + ".self_attn.ires_split"); - i = s[0]; - } - - auto ix = Qwen_CPUNPUAttention(c, i, res, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, cache_max, (string) "model.layers." + std::to_string(layer) + ".self_attn", seq, chunk, layer); - - i = ix[0]; - res = ix[1]; - - i = i->view(1, 1, seq / chunk, hidden_dim); - i = *i + res; - - res = i; - - i = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.layers." + std::to_string(layer) + ".post_attention_layernorm", false); - - // i = _Quantize({i}, true, (string) "model.layers." + std::to_string(layer) + ".mlp.up_proj.quantize"); - - i = i->view(1, static_cast(seq / chunk / 32), static_cast(32), hidden_dim); - - if (qwen_shadow_layers.find(layer) == qwen_shadow_layers.end()) { // normal ffn layers - i = Qwen_FFN_NPU(c, i, hidden_dim, ffn_hidden_dim, (string) "model.layers." + std::to_string(layer) + ".mlp"); - - i = i->view(1, 1, seq / chunk, hidden_dim); - - i = *i + res; - } else { // shadow layers - auto name = (string) "model.layers." + std::to_string(layer) + ".mlp"; - auto *x = _LinearINT8({i}, hidden_dim, ffn_hidden_dim, false, name + ".gate_proj"); - auto *y = _LinearINT8({i}, hidden_dim, ffn_hidden_dim, false, name + ".up_proj"); - x = _Dequantize({x}, true, (string)name + ".gate_proj.dequantize", true); - y = _Dequantize({y}, true, (string)name + ".up_proj.dequantize", true); - x = _SiLU({x}, name + ".silu"); - x = *x * y; - - auto *i1 = x; - x = _Quantize({x}, true, (string)name + ".down_proj.quantize"); - - x = _LinearINT8({x}, ffn_hidden_dim, hidden_dim, false, name + ".down_proj"); - - auto *i2 = x; - x = _Dequantize({x}, true, (string)name + ".down_proj.dequantize"); - - x = x->view(1, 1, seq / chunk, hidden_dim); - - x = *x + res; - - auto shadow = _MergeOutput({i1, i2, x}, name + ".down_proj.shadow.qnn"); - - _SubgraphBegin(c, MLLM_CPU); - shadow = _SplitInput(shadow, true, 3); - i = _LinearINT8ShadowCPU(shadow, ffn_hidden_dim, hidden_dim, 1024, false, name + ".down_proj.shadow"); - } - } -} - -void qwen_npu_cpu_inter(Context *c, int vocab_size = 32000, int hidden_dim = 4096, int ffn_hidden_dim = 11008, int mutil_head_size = 32, int cache_max = 200, int seq = 256, int chunk = 2) { - auto *i = _Input(c); - i = _RMSNorm({i}, hidden_dim, 1e-6, (string) "model.norm"); - i = _Linear({i}, hidden_dim, vocab_size, false, "lm_head"); -} -} // namespace modeling diff --git a/examples/main_tinyllama.cpp b/examples/main_tinyllama.cpp deleted file mode 100644 index 8d9e7cd46..000000000 --- a/examples/main_tinyllama.cpp +++ /dev/null @@ -1,168 +0,0 @@ -#include -#include -#include -#include "cmdline.h" -#include "Net.hpp" -#include "Executor.hpp" -#include "express/Express.hpp" -#include "tokenizers/BPE/Bpe.hpp" -using namespace mllm; - -unsigned int argmax(const std::vector& scores) { - if(scores.empty()) { - throw std::invalid_argument("Input vector is empty"); - } - unsigned int maxIndex = 0; - float maxValue = scores[0]; - for(size_t i = 1; i < scores.size(); ++i) { - if(scores[i] > maxValue) { - maxIndex = i; - maxValue = scores[i]; - } - } - return maxIndex; -} -unsigned int postProcessing(shared_ptr result, shared_ptr& out_result){ - assert(result->batch() == 1); - assert(result->head() == 1); - out_result->reshape(1, 1, 1, 1); - out_result->alloc(); - vector scores; - for (int i = 0; i < result->dimension(); ++i) { - auto value = result->dataAt(0, 0, result->sequence()-1, i); - scores.push_back(value); - } - auto token_idx = argmax(scores); - out_result->setDataAt(0, 0, 0, 0, token_idx); - return token_idx; -} - - -NetTensor *Attention( NetTensor * x, int embedding_size, int hidden_size, int head_size, int mutil_key_value_head, int cache_max, string name){ - auto *q =_Linear({x}, embedding_size, hidden_size * head_size, false, name + ".q_proj"); - auto *k =_Linear({x}, embedding_size, hidden_size * mutil_key_value_head, false, name + ".k_proj"); - auto *v =_Linear({x}, embedding_size, hidden_size * mutil_key_value_head, false, name + ".v_proj"); - q = q->view(-1, head_size, -1, hidden_size); - k = k->view(-1, mutil_key_value_head, -1, hidden_size); - v = v->view(-1, mutil_key_value_head, -1, hidden_size); - q = _RoPE( {q}, HFHUBROPE, name + ".q_rope"); - k = _RoPE( {k}, HFHUBROPE, name + ".k_rope"); - k = _KVCache( {k},head_size/mutil_key_value_head, cache_max, name + ".k_cache"); - v = _KVCache( {v},head_size/mutil_key_value_head, cache_max, name + ".v_cache"); - auto *qk = _Matmul( {q, k}, false, true, name + ".qk"); - qk = *qk/std::sqrt(hidden_size); - // qk = _Causalmask( {qk}, name + ".mask"); - qk = _Softmax( {qk}, DIMENSION, true, name + ".softmax"); - auto *o = _Matmul( {qk, v}, false, false, name + ".qkv"); - o = o->view(-1, 1, -1, hidden_size * head_size); - o = _Linear( {o}, hidden_size * head_size, embedding_size, false, name + ".o_proj"); - return o; -} -NetTensor *FFN( NetTensor * i, int hidden_dim, int ffn_hidden_dim, string name){ - auto *x = _Linear( {i}, hidden_dim, ffn_hidden_dim, false, name+".gate_proj"); - x = _SiLU( {x}, name+".silu"); - auto *y = _Linear( {i}, hidden_dim, ffn_hidden_dim, false, name+".up_proj"); - x = *x*y;// x = _Mul( {x, y}, name+".dot"); - x = _Linear( {x}, ffn_hidden_dim, hidden_dim, false, name+".down_proj"); - return x; -} -void tinyllama(Context* c, int vocab_size= 32000, int hidden_dim= 2048, int ffn_hidden_dim = 5632, int mutil_head_size = 32, int mutil_key_value_head= 4, int cache_max=200){ - auto *i = _Input(c); - i = _Embedding( {i}, vocab_size, hidden_dim, (string)"model.embed_tokens"); - // loop - for(int layer=0; layer<22; ++layer) { - auto *x = _RMSNorm( {i}, hidden_dim, 1e-6, (string)"model.layers."+std::to_string(layer)+".input_layernorm"); - i = *Attention( x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, mutil_key_value_head, cache_max, (string)"model.layers."+std::to_string(layer)+".self_attn") +i; - x = _RMSNorm( {i}, hidden_dim, 1e-6, (string)"model.layers."+std::to_string(layer)+".post_attention_layernorm"); - i = *FFN( x, hidden_dim, ffn_hidden_dim, (string)"model.layers."+std::to_string(layer) +".mlp") +i; - //_SubgraphBegin(c); - } - // end loop - i = _RMSNorm( {i}, hidden_dim, 1e-6, (string)"model.norm"); - i = _Linear( {i}, hidden_dim, vocab_size, false, "lm_head"); -} -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "../vocab/tinyllama_vocab.mllm"); - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/tinyllama-1.1b-chat-q4_k.mllm"); - cmdParser.add("limits", 'l', "max KV cache size", false, 600); - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.parse_check(argc, argv); - - string vocab_path = cmdParser.get("vocab"); - string model_path = cmdParser.get("model"); - int tokens_limit = cmdParser.get("limits"); - int thread_num = cmdParser.get("thread"); - - auto tokenizer = BPETokenizer(vocab_path); - - int vocab_size = 32000; - int hidden_dim = 2048; - int ffn_hidden_dim = 5632; - int mutil_head_size = 32; - int key_value_head_size = 4; - - std::unique_ptr c_ptr(new Context()); - auto *c = c_ptr.get(); - tinyllama(c, vocab_size, hidden_dim, ffn_hidden_dim, mutil_head_size, key_value_head_size, tokens_limit); - - BackendConfig bn; - Net net(bn); - net.convert(c->sub_param_, BackendType::MLLM_CPU, thread_num); - - ParamLoader param_loader(model_path); - Executor ex(¶m_loader); - ex.setup(&net); - - string system_prompt_start = " You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided.<|USER|>"; - string system_prompt_end = "<|ASSISTANT|>"; - - vector in_strs = { - "Hello, who are you?", - "Please introduce Beijing University of Posts and Telecommunications." - }; - shared_ptr input = std::make_shared(); - for (int str_i = 0; str_i < in_strs.size(); ++str_i) - { - auto in_str_origin = in_strs[str_i]; - auto in_str = system_prompt_start + in_str_origin + system_prompt_end; - if(in_str[0] != ' '){ - in_str = ' '+ in_str; - } - auto tokens_id = vector(); - tokenizer.tokenize(in_str, tokens_id, true); - std::cout< 0) { - tokens_id[0] = 13; - } - BPETokenizer::token2Tensor( &net, tokens_id, input); - std::cout <<"[Q] "<< in_str_origin << std::endl; - std::cout <<"[A] "<< std::flush; - for(int step = 0; step<100; step++) { - ex.run(&net, {input}); - auto result = ex.result(); - auto token_idx = postProcessing(result[0], input); - // std::cout <" - break; - } - auto out_token = tokenizer.detokenize({token_idx}); - std::cout <net_ops) { - delete op; - } - for (auto *tensor : c->net_tensors) { - delete tensor; - } - return 0; -} diff --git a/examples/main_vit.cpp b/examples/main_vit.cpp deleted file mode 100644 index ba7b0ac25..000000000 --- a/examples/main_vit.cpp +++ /dev/null @@ -1,1183 +0,0 @@ -#include -#include -#include -#include -#include "Net.hpp" -#include "Executor.hpp" -#include "express/Express.hpp" -#include "cmdline.h" -#ifndef STB_IMAGE_IMPLEMENTATION -#define STB_IMAGE_STATIC -#define STB_IMAGE_IMPLEMENTATION -#endif -#include "stb/stb_image.h" -#include "processor/PreProcess.hpp" - -using namespace std; - - - -void img2Tensor(shared_ptr input_tensor, Net &net, float* img, int height, int width, int channel) { - input_tensor->setBackend(net.backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(1, height, channel, width); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - for (int h = 0; h < height; ++h) { - for (int c = 0; c < channel; ++c) { - for (int w = 0; w < width; ++w) { - input_tensor->setDataAt(0, h, c, w, img[(h * width + w) * channel + c]); - } - } - } -} - -void imgFullTensor(shared_ptr input_tensor, Net &net, int height, int width, int channel) { - input_tensor->setBackend(net.backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(1, height, channel, width); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - for (int h = 0; h < height; ++h) { - for (int c = 0; c < channel; ++c) { - for (int w = 0; w < width; ++w) { - input_tensor->setDataAt(0, h, c, w, 0); - } - } - } -} - -std::map id2label = { - {0, "tench, Tinca tinca"}, - {1, "goldfish, Carassius auratus"}, - {2, "great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias"}, - {3, "tiger shark, Galeocerdo cuvieri"}, - {4, "hammerhead, hammerhead shark"}, - {5, "electric ray, crampfish, numbfish, torpedo"}, - {6, "stingray"}, - {7, "cock"}, - {8, "hen"}, - {9, "ostrich, Struthio camelus"}, - {10, "brambling, Fringilla montifringilla"}, - {11, "goldfinch, Carduelis carduelis"}, - {12, "house finch, linnet, Carpodacus mexicanus"}, - {13, "junco, snowbird"}, - {14, "indigo bunting, indigo finch, indigo bird, Passerina cyanea"}, - {15, "robin, American robin, Turdus migratorius"}, - {16, "bulbul"}, - {17, "jay"}, - {18, "magpie"}, - {19, "chickadee"}, - {20, "water ouzel, dipper"}, - {21, "kite"}, - {22, "bald eagle, American eagle, Haliaeetus leucocephalus"}, - {23, "vulture"}, - {24, "great grey owl, great gray owl, Strix nebulosa"}, - {25, "European fire salamander, Salamandra salamandra"}, - {26, "common newt, Triturus vulgaris"}, - {27, "eft"}, - {28, "spotted salamander, Ambystoma maculatum"}, - {29, "axolotl, mud puppy, Ambystoma mexicanum"}, - {30, "bullfrog, Rana catesbeiana"}, - {31, "tree frog, tree-frog"}, - {32, "tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui"}, - {33, "loggerhead, loggerhead turtle, Caretta caretta"}, - {34, "leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea"}, - {35, "mud turtle"}, - {36, "terrapin"}, - {37, "box turtle, box tortoise"}, - {38, "banded gecko"}, - {39, "common iguana, iguana, Iguana iguana"}, - {40, "American chameleon, anole, Anolis carolinensis"}, - {41, "whiptail, whiptail lizard"}, - {42, "agama"}, - {43, "frilled lizard, Chlamydosaurus kingi"}, - {44, "alligator lizard"}, - {45, "Gila monster, Heloderma suspectum"}, - {46, "green lizard, Lacerta viridis"}, - {47, "African chameleon, Chamaeleo chamaeleon"}, - {48, "Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis"}, - {49, "African crocodile, Nile crocodile, Crocodylus niloticus"}, - {50, "American alligator, Alligator mississipiensis"}, - {51, "triceratops"}, - {52, "thunder snake, worm snake, Carphophis amoenus"}, - {53, "ringneck snake, ring-necked snake, ring snake"}, - {54, "hognose snake, puff adder, sand viper"}, - {55, "green snake, grass snake"}, - {56, "king snake, kingsnake"}, - {57, "garter snake, grass snake"}, - {58, "water snake"}, - {59, "vine snake"}, - {60, "night snake, Hypsiglena torquata"}, - {61, "boa constrictor, Constrictor constrictor"}, - {62, "rock python, rock snake, Python sebae"}, - {63, "Indian cobra, Naja naja"}, - {64, "green mamba"}, - {65, "sea snake"}, - {66, "horned viper, cerastes, sand viper, horned asp, Cerastes cornutus"}, - {67, "diamondback, diamondback rattlesnake, Crotalus adamanteus"}, - {68, "sidewinder, horned rattlesnake, Crotalus cerastes"}, - {69, "trilobite"}, - {70, "harvestman, daddy longlegs, Phalangium opilio"}, - {71, "scorpion"}, - {72, "black and gold garden spider, Argiope aurantia"}, - {73, "barn spider, Araneus cavaticus"}, - {74, "garden spider, Aranea diademata"}, - {75, "black widow, Latrodectus mactans"}, - {76, "tarantula"}, - {77, "wolf spider, hunting spider"}, - {78, "tick"}, - {79, "centipede"}, - {80, "black grouse"}, - {81, "ptarmigan"}, - {82, "ruffed grouse, partridge, Bonasa umbellus"}, - {83, "prairie chicken, prairie grouse, prairie fowl"}, - {84, "peacock"}, - {85, "quail"}, - {86, "partridge"}, - {87, "African grey, African gray, Psittacus erithacus"}, - {88, "macaw"}, - {89, "sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita"}, - {90, "lorikeet"}, - {91, "coucal"}, - {92, "bee eater"}, - {93, "hornbill"}, - {94, "hummingbird"}, - {95, "jacamar"}, - {96, "toucan"}, - {97, "drake"}, - {98, "red-breasted merganser, Mergus serrator"}, - {99, "goose"}, - {100, "black swan, Cygnus atratus"}, - {101, "tusker"}, - {102, "echidna, spiny anteater, anteater"}, - {103, "platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus"}, - {104, "wallaby, brush kangaroo"}, - {105, "koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus"}, - {106, "wombat"}, - {107, "jellyfish"}, - {108, "sea anemone, anemone"}, - {109, "brain coral"}, - {110, "flatworm, platyhelminth"}, - {111, "nematode, nematode worm, roundworm"}, - {112, "conch"}, - {113, "snail"}, - {114, "slug"}, - {115, "sea slug, nudibranch"}, - {116, "chiton, coat-of-mail shell, sea cradle, polyplacophore"}, - {117, "chambered nautilus, pearly nautilus, nautilus"}, - {118, "Dungeness crab, Cancer magister"}, - {119, "rock crab, Cancer irroratus"}, - {120, "fiddler crab"}, - {121, "king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica"}, - {122, "American lobster, Northern lobster, Maine lobster, Homarus americanus"}, - {123, "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish"}, - {124, "crayfish, crawfish, crawdad, crawdaddy"}, - {125, "hermit crab"}, - {126, "isopod"}, - {127, "white stork, Ciconia ciconia"}, - {128, "black stork, Ciconia nigra"}, - {129, "spoonbill"}, - {130, "flamingo"}, - {131, "little blue heron, Egretta caerulea"}, - {132, "American egret, great white heron, Egretta albus"}, - {133, "bittern"}, - {134, "crane"}, - {135, "limpkin, Aramus pictus"}, - {136, "European gallinule, Porphyrio porphyrio"}, - {137, "American coot, marsh hen, mud hen, water hen, Fulica americana"}, - {138, "bustard"}, - {139, "ruddy turnstone, Arenaria interpres"}, - {140, "red-backed sandpiper, dunlin, Erolia alpina"}, - {141, "redshank, Tringa totanus"}, - {142, "dowitcher"}, - {143, "oystercatcher, oyster catcher"}, - {144, "pelican"}, - {145, "king penguin, Aptenodytes patagonica"}, - {146, "albatross, mollymawk"}, - {147, "grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus"}, - {148, "killer whale, killer, orca, grampus, sea wolf, Orcinus orca"}, - {149, "dugong, Dugong dugon"}, - {150, "sea lion"}, - {151, "Chihuahua"}, - {152, "Japanese spaniel"}, - {153, "Maltese dog, Maltese terrier, Maltese"}, - {154, "Pekinese, Pekingese, Peke"}, - {155, "Shih-Tzu"}, - {156, "Blenheim spaniel"}, - {157, "papillon"}, - {158, "toy terrier"}, - {159, "Rhodesian ridgeback"}, - {160, "Afghan hound, Afghan"}, - {161, "basset, basset hound"}, - {162, "beagle"}, - {163, "bloodhound, sleuthhound"}, - {164, "bluetick"}, - {165, "black-and-tan coonhound"}, - {166, "Walker hound, Walker foxhound"}, - {167, "English foxhound"}, - {168, "redbone"}, - {169, "borzoi, Russian wolfhound"}, - {170, "Irish wolfhound"}, - {171, "Italian greyhound"}, - {172, "whippet"}, - {173, "Ibizan hound, Ibizan Podenco"}, - {174, "Norwegian elkhound, elkhound"}, - {175, "otterhound, otter hound"}, - {176, "Saluki, gazelle hound"}, - {177, "Scottish deerhound, deerhound"}, - {178, "Weimaraner"}, - {179, "Staffordshire bullterrier, Staffordshire bull terrier"}, - {180, "American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier"}, - {181, "Bedlington terrier"}, - {182, "Border terrier"}, - {183, "Kerry blue terrier"}, - {184, "Irish terrier"}, - {185, "Norfolk terrier"}, - {186, "Norwich terrier"}, - {187, "Yorkshire terrier"}, - {188, "wire-haired fox terrier"}, - {189, "Lakeland terrier"}, - {190, "Sealyham terrier, Sealyham"}, - {191, "Airedale, Airedale terrier"}, - {192, "cairn, cairn terrier"}, - {193, "Australian terrier"}, - {194, "Dandie Dinmont, Dandie Dinmont terrier"}, - {195, "Boston bull, Boston terrier"}, - {196, "miniature schnauzer"}, - {197, "giant schnauzer"}, - {198, "standard schnauzer"}, - {199, "Scotch terrier, Scottish terrier, Scottie"}, - {200, "Tibetan terrier, chrysanthemum dog"}, - {201, "silky terrier, Sydney silky"}, - {202, "soft-coated wheaten terrier"}, - {203, "West Highland white terrier"}, - {204, "Lhasa, Lhasa apso"}, - {205, "flat-coated retriever"}, - {206, "curly-coated retriever"}, - {207, "golden retriever"}, - {208, "Labrador retriever"}, - {209, "Chesapeake Bay retriever"}, - {210, "German short-haired pointer"}, - {211, "vizsla, Hungarian pointer"}, - {212, "English setter"}, - {213, "Irish setter, red setter"}, - {214, "Gordon setter"}, - {215, "Brittany spaniel"}, - {216, "clumber, clumber spaniel"}, - {217, "English springer, English springer spaniel"}, - {218, "Welsh springer spaniel"}, - {219, "cocker spaniel, English cocker spaniel, cocker"}, - {220, "Sussex spaniel"}, - {221, "Irish water spaniel"}, - {222, "kuvasz"}, - {223, "schipperke"}, - {224, "groenendael"}, - {225, "malinois"}, - {226, "briard"}, - {227, "kelpie"}, - {228, "komondor"}, - {229, "Old English sheepdog, bobtail"}, - {230, "Shetland sheepdog, Shetland sheep dog, Shetland"}, - {231, "collie"}, - {232, "Border collie"}, - {233, "Bouvier des Flandres, Bouviers des Flandres"}, - {234, "Rottweiler"}, - {235, "German shepherd, German shepherd dog, German police dog, alsatian"}, - {236, "Doberman, Doberman pinscher"}, - {237, "miniature pinscher"}, - {238, "Greater Swiss Mountain dog"}, - {239, "Bernese mountain dog"}, - {240, "Appenzeller"}, - {241, "EntleBucher"}, - {242, "boxer"}, - {243, "bull mastiff"}, - {244, "Tibetan mastiff"}, - {245, "French bulldog"}, - {246, "Great Dane"}, - {247, "Saint Bernard, St Bernard"}, - {248, "Eskimo dog, husky"}, - {249, "malamute, malemute, Alaskan malamute"}, - {250, "Siberian husky"}, - {251, "dalmatian, coach dog, carriage dog"}, - {252, "affenpinscher, monkey pinscher, monkey dog"}, - {253, "basenji"}, - {254, "pug, pug-dog"}, - {255, "Leonberg"}, - {256, "Newfoundland, Newfoundland dog"}, - {257, "Great Pyrenees"}, - {258, "Samoyed, Samoyede"}, - {259, "Pomeranian"}, - {260, "chow, chow chow"}, - {261, "keeshond"}, - {262, "Brabancon griffon"}, - {263, "Pembroke, Pembroke Welsh corgi"}, - {264, "Cardigan, Cardigan Welsh corgi"}, - {265, "toy poodle"}, - {266, "miniature poodle"}, - {267, "standard poodle"}, - {268, "Mexican hairless"}, - {269, "timber wolf, grey wolf, gray wolf, Canis lupus"}, - {270, "white wolf, Arctic wolf, Canis lupus tundrarum"}, - {271, "red wolf, maned wolf, Canis rufus, Canis niger"}, - {272, "coyote, prairie wolf, brush wolf, Canis latrans"}, - {273, "dingo, warrigal, warragal, Canis dingo"}, - {274, "dhole, Cuon alpinus"}, - {275, "African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus"}, - {276, "hyena, hyaena"}, - {277, "red fox, Vulpes vulpes"}, - {278, "kit fox, Vulpes macrotis"}, - {279, "Arctic fox, white fox, Alopex lagopus"}, - {280, "grey fox, gray fox, Urocyon cinereoargenteus"}, - {281, "tabby, tabby cat"}, - {282, "tiger cat"}, - {283, "Persian cat"}, - {284, "Siamese cat, Siamese"}, - {285, "Egyptian cat"}, - {286, "cougar, puma, catamount, mountain lion, painter, panther, Felis concolor"}, - {287, "lynx, catamount"}, - {288, "leopard, Panthera pardus"}, - {289, "snow leopard, ounce, Panthera uncia"}, - {290, "jaguar, panther, Panthera onca, Felis onca"}, - {291, "lion, king of beasts, Panthera leo"}, - {292, "tiger, Panthera tigris"}, - {293, "cheetah, chetah, Acinonyx jubatus"}, - {294, "brown bear, bruin, Ursus arctos"}, - {295, "American black bear, black bear, Ursus americanus, Euarctos americanus"}, - {296, "ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus"}, - {297, "sloth bear, Melursus ursinus, Ursus ursinus"}, - {298, "mongoose"}, - {299, "meerkat, mierkat"}, - {300, "tiger beetle"}, - {301, "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle"}, - {302, "ground beetle, carabid beetle"}, - {303, "long-horned beetle, longicorn, longicorn beetle"}, - {304, "leaf beetle, chrysomelid"}, - {305, "dung beetle"}, - {306, "rhinoceros beetle"}, - {307, "weevil"}, - {308, "fly"}, - {309, "bee"}, - {310, "ant, emmet, pismire"}, - {311, "grasshopper, hopper"}, - {312, "cricket"}, - {313, "walking stick, walkingstick, stick insect"}, - {314, "cockroach, roach"}, - {315, "mantis, mantid"}, - {316, "cicada, cicala"}, - {317, "leafhopper"}, - {318, "lacewing, lacewing fly"}, - {319, "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk"}, - {320, "damselfly"}, - {321, "admiral"}, - {322, "ringlet, ringlet butterfly"}, - {323, "monarch, monarch butterfly, milkweed butterfly, Danaus plexippus"}, - {324, "cabbage butterfly"}, - {325, "sulphur butterfly, sulfur butterfly"}, - {326, "lycaenid, lycaenid butterfly"}, - {327, "starfish, sea star"}, - {328, "sea urchin"}, - {329, "sea cucumber, holothurian"}, - {330, "wood rabbit, cottontail, cottontail rabbit"}, - {331, "hare"}, - {332, "Angora, Angora rabbit"}, - {333, "hamster"}, - {334, "porcupine, hedgehog"}, - {335, "fox squirrel, eastern fox squirrel, Sciurus niger"}, - {336, "marmot"}, - {337, "beaver"}, - {338, "guinea pig, Cavia cobaya"}, - {339, "sorrel"}, - {340, "zebra"}, - {341, "hog, pig, grunter, squealer, Sus scrofa"}, - {342, "wild boar, boar, Sus scrofa"}, - {343, "warthog"}, - {344, "hippopotamus, hippo, river horse, Hippopotamus amphibius"}, - {345, "ox"}, - {346, "water buffalo, water ox, Asiatic buffalo, Bubalus bubalis"}, - {347, "bison"}, - {348, "ram, tup"}, - {349, "bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis"}, - {350, "ibex, Capra ibex"}, - {351, "hartebeest"}, - {352, "impala, Aepyceros melampus"}, - {353, "gazelle"}, - {354, "Arabian camel, dromedary, Camelus dromedarius"}, - {355, "llama"}, - {356, "weasel"}, - {357, "mink"}, - {358, "polecat, fitch, foulmart, foumart, Mustela putorius"}, - {359, "black-footed ferret, ferret, Mustela nigripes"}, - {360, "otter"}, - {361, "skunk, polecat, wood pussy"}, - {362, "badger"}, - {363, "armadillo"}, - {364, "three-toed sloth, ai, Bradypus tridactylus"}, - {365, "orangutan, orang, orangutang, Pongo pygmaeus"}, - {366, "gorilla, Gorilla gorilla"}, - {367, "chimpanzee, chimp, Pan troglodytes"}, - {368, "gibbon, Hylobates lar"}, - {369, "siamang, Hylobates syndactylus, Symphalangus syndactylus"}, - {370, "guenon, guenon monkey"}, - {371, "patas, hussar monkey, Erythrocebus patas"}, - {372, "baboon"}, - {373, "macaque"}, - {374, "langur"}, - {375, "colobus, colobus monkey"}, - {376, "proboscis monkey, Nasalis larvatus"}, - {377, "marmoset"}, - {378, "capuchin, ringtail, Cebus capucinus"}, - {379, "howler monkey, howler"}, - {380, "titi, titi monkey"}, - {381, "spider monkey, Ateles geoffroyi"}, - {382, "squirrel monkey, Saimiri sciureus"}, - {383, "Madagascar cat, ring-tailed lemur, Lemur catta"}, - {384, "indri, indris, Indri indri, Indri brevicaudatus"}, - {385, "Indian elephant, Elephas maximus"}, - {386, "African elephant, Loxodonta africana"}, - {387, "lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens"}, - {388, "giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca"}, - {389, "barracouta, snoek"}, - {390, "eel"}, - {391, "coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch"}, - {392, "rock beauty, Holocanthus tricolor"}, - {393, "anemone fish"}, - {394, "sturgeon"}, - {395, "gar, garfish, garpike, billfish, Lepisosteus osseus"}, - {396, "lionfish"}, - {397, "puffer, pufferfish, blowfish, globefish"}, - {398, "abacus"}, - {399, "abaya"}, - {400, "academic gown, academic robe, judge's robe"}, - {401, "accordion, piano accordion, squeeze box"}, - {402, "acoustic guitar"}, - {403, "aircraft carrier, carrier, flattop, attack aircraft carrier"}, - {404, "airliner"}, - {405, "airship, dirigible"}, - {406, "altar"}, - {407, "ambulance"}, - {408, "amphibian, amphibious vehicle"}, - {409, "analog clock"}, - {410, "apiary, bee house"}, - {411, "apron"}, - {412, "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin"}, - {413, "assault rifle, assault gun"}, - {414, "backpack, back pack, knapsack, packsack, rucksack, haversack"}, - {415, "bakery, bakeshop, bakehouse"}, - {416, "balance beam, beam"}, - {417, "balloon"}, - {418, "ballpoint, ballpoint pen, ballpen, Biro"}, - {419, "Band Aid"}, - {420, "banjo"}, - {421, "bannister, banister, balustrade, balusters, handrail"}, - {422, "barbell"}, - {423, "barber chair"}, - {424, "barbershop"}, - {425, "barn"}, - {426, "barometer"}, - {427, "barrel, cask"}, - {428, "barrow, garden cart, lawn cart, wheelbarrow"}, - {429, "baseball"}, - {430, "basketball"}, - {431, "bassinet"}, - {432, "bassoon"}, - {433, "bathing cap, swimming cap"}, - {434, "bath towel"}, - {435, "bathtub, bathing tub, bath, tub"}, - {436, "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon"}, - {437, "beacon, lighthouse, beacon light, pharos"}, - {438, "beaker"}, - {439, "bearskin, busby, shako"}, - {440, "beer bottle"}, - {441, "beer glass"}, - {442, "bell cote, bell cot"}, - {443, "bib"}, - {444, "bicycle-built-for-two, tandem bicycle, tandem"}, - {445, "bikini, two-piece"}, - {446, "binder, ring-binder"}, - {447, "binoculars, field glasses, opera glasses"}, - {448, "birdhouse"}, - {449, "boathouse"}, - {450, "bobsled, bobsleigh, bob"}, - {451, "bolo tie, bolo, bola tie, bola"}, - {452, "bonnet, poke bonnet"}, - {453, "bookcase"}, - {454, "bookshop, bookstore, bookstall"}, - {455, "bottlecap"}, - {456, "bow"}, - {457, "bow tie, bow-tie, bowtie"}, - {458, "brass, memorial tablet, plaque"}, - {459, "brassiere, bra, bandeau"}, - {460, "breakwater, groin, groyne, mole, bulwark, seawall, jetty"}, - {461, "breastplate, aegis, egis"}, - {462, "broom"}, - {463, "bucket, pail"}, - {464, "buckle"}, - {465, "bulletproof vest"}, - {466, "bullet train, bullet"}, - {467, "butcher shop, meat market"}, - {468, "cab, hack, taxi, taxicab"}, - {469, "caldron, cauldron"}, - {470, "candle, taper, wax light"}, - {471, "cannon"}, - {472, "canoe"}, - {473, "can opener, tin opener"}, - {474, "cardigan"}, - {475, "car mirror"}, - {476, "carousel, carrousel, merry-go-round, roundabout, whirligig"}, - {477, "carpenter's kit, tool kit"}, - {478, "carton"}, - {479, "car wheel"}, - {480, "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM"}, - {481, "cassette"}, - {482, "cassette player"}, - {483, "castle"}, - {484, "catamaran"}, - {485, "CD player"}, - {486, "cello, violoncello"}, - {487, "cellular telephone, cellular phone, cellphone, cell, mobile phone"}, - {488, "chain"}, - {489, "chainlink fence"}, - {490, "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour"}, - {491, "chain saw, chainsaw"}, - {492, "chest"}, - {493, "chiffonier, commode"}, - {494, "chime, bell, gong"}, - {495, "china cabinet, china closet"}, - {496, "Christmas stocking"}, - {497, "church, church building"}, - {498, "cinema, movie theater, movie theatre, movie house, picture palace"}, - {499, "cleaver, meat cleaver, chopper"}, - {500, "cliff dwelling"}, - {501, "cloak"}, - {502, "clog, geta, patten, sabot"}, - {503, "cocktail shaker"}, - {504, "coffee mug"}, - {505, "coffeepot"}, - {506, "coil, spiral, volute, whorl, helix"}, - {507, "combination lock"}, - {508, "computer keyboard, keypad"}, - {509, "confectionery, confectionary, candy store"}, - {510, "container ship, containership, container vessel"}, - {511, "convertible"}, - {512, "corkscrew, bottle screw"}, - {513, "cornet, horn, trumpet, trump"}, - {514, "cowboy boot"}, - {515, "cowboy hat, ten-gallon hat"}, - {516, "cradle"}, - {517, "crane"}, - {518, "crash helmet"}, - {519, "crate"}, - {520, "crib, cot"}, - {521, "Crock Pot"}, - {522, "croquet ball"}, - {523, "crutch"}, - {524, "cuirass"}, - {525, "dam, dike, dyke"}, - {526, "desk"}, - {527, "desktop computer"}, - {528, "dial telephone, dial phone"}, - {529, "diaper, nappy, napkin"}, - {530, "digital clock"}, - {531, "digital watch"}, - {532, "dining table, board"}, - {533, "dishrag, dishcloth"}, - {534, "dishwasher, dish washer, dishwashing machine"}, - {535, "disk brake, disc brake"}, - {536, "dock, dockage, docking facility"}, - {537, "dogsled, dog sled, dog sleigh"}, - {538, "dome"}, - {539, "doormat, welcome mat"}, - {540, "drilling platform, offshore rig"}, - {541, "drum, membranophone, tympan"}, - {542, "drumstick"}, - {543, "dumbbell"}, - {544, "Dutch oven"}, - {545, "electric fan, blower"}, - {546, "electric guitar"}, - {547, "electric locomotive"}, - {548, "entertainment center"}, - {549, "envelope"}, - {550, "espresso maker"}, - {551, "face powder"}, - {552, "feather boa, boa"}, - {553, "file, file cabinet, filing cabinet"}, - {554, "fireboat"}, - {555, "fire engine, fire truck"}, - {556, "fire screen, fireguard"}, - {557, "flagpole, flagstaff"}, - {558, "flute, transverse flute"}, - {559, "folding chair"}, - {560, "football helmet"}, - {561, "forklift"}, - {562, "fountain"}, - {563, "fountain pen"}, - {564, "four-poster"}, - {565, "freight car"}, - {566, "French horn, horn"}, - {567, "frying pan, frypan, skillet"}, - {568, "fur coat"}, - {569, "garbage truck, dustcart"}, - {570, "gasmask, respirator, gas helmet"}, - {571, "gas pump, gasoline pump, petrol pump, island dispenser"}, - {572, "goblet"}, - {573, "go-kart"}, - {574, "golf ball"}, - {575, "golfcart, golf cart"}, - {576, "gondola"}, - {577, "gong, tam-tam"}, - {578, "gown"}, - {579, "grand piano, grand"}, - {580, "greenhouse, nursery, glasshouse"}, - {581, "grille, radiator grille"}, - {582, "grocery store, grocery, food market, market"}, - {583, "guillotine"}, - {584, "hair slide"}, - {585, "hair spray"}, - {586, "half track"}, - {587, "hammer"}, - {588, "hamper"}, - {589, "hand blower, blow dryer, blow drier, hair dryer, hair drier"}, - {590, "hand-held computer, hand-held microcomputer"}, - {591, "handkerchief, hankie, hanky, hankey"}, - {592, "hard disc, hard disk, fixed disk"}, - {593, "harmonica, mouth organ, harp, mouth harp"}, - {594, "harp"}, - {595, "harvester, reaper"}, - {596, "hatchet"}, - {597, "holster"}, - {598, "home theater, home theatre"}, - {599, "honeycomb"}, - {600, "hook, claw"}, - {601, "hoopskirt, crinoline"}, - {602, "horizontal bar, high bar"}, - {603, "horse cart, horse-cart"}, - {604, "hourglass"}, - {605, "iPod"}, - {606, "iron, smoothing iron"}, - {607, "jack-o'-lantern"}, - {608, "jean, blue jean, denim"}, - {609, "jeep, landrover"}, - {610, "jersey, T-shirt, tee shirt"}, - {611, "jigsaw puzzle"}, - {612, "jinrikisha, ricksha, rickshaw"}, - {613, "joystick"}, - {614, "kimono"}, - {615, "knee pad"}, - {616, "knot"}, - {617, "lab coat, laboratory coat"}, - {618, "ladle"}, - {619, "lampshade, lamp shade"}, - {620, "laptop, laptop computer"}, - {621, "lawn mower, mower"}, - {622, "lens cap, lens cover"}, - {623, "letter opener, paper knife, paperknife"}, - {624, "library"}, - {625, "lifeboat"}, - {626, "lighter, light, igniter, ignitor"}, - {627, "limousine, limo"}, - {628, "liner, ocean liner"}, - {629, "lipstick, lip rouge"}, - {630, "Loafer"}, - {631, "lotion"}, - {632, "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system"}, - {633, "loupe, jeweler's loupe"}, - {634, "lumbermill, sawmill"}, - {635, "magnetic compass"}, - {636, "mailbag, postbag"}, - {637, "mailbox, letter box"}, - {638, "maillot"}, - {639, "maillot, tank suit"}, - {640, "manhole cover"}, - {641, "maraca"}, - {642, "marimba, xylophone"}, - {643, "mask"}, - {644, "matchstick"}, - {645, "maypole"}, - {646, "maze, labyrinth"}, - {647, "measuring cup"}, - {648, "medicine chest, medicine cabinet"}, - {649, "megalith, megalithic structure"}, - {650, "microphone, mike"}, - {651, "microwave, microwave oven"}, - {652, "military uniform"}, - {653, "milk can"}, - {654, "minibus"}, - {655, "miniskirt, mini"}, - {656, "minivan"}, - {657, "missile"}, - {658, "mitten"}, - {659, "mixing bowl"}, - {660, "mobile home, manufactured home"}, - {661, "Model T"}, - {662, "modem"}, - {663, "monastery"}, - {664, "monitor"}, - {665, "moped"}, - {666, "mortar"}, - {667, "mortarboard"}, - {668, "mosque"}, - {669, "mosquito net"}, - {670, "motor scooter, scooter"}, - {671, "mountain bike, all-terrain bike, off-roader"}, - {672, "mountain tent"}, - {673, "mouse, computer mouse"}, - {674, "mousetrap"}, - {675, "moving van"}, - {676, "muzzle"}, - {677, "nail"}, - {678, "neck brace"}, - {679, "necklace"}, - {680, "nipple"}, - {681, "notebook, notebook computer"}, - {682, "obelisk"}, - {683, "oboe, hautboy, hautbois"}, - {684, "ocarina, sweet potato"}, - {685, "odometer, hodometer, mileometer, milometer"}, - {686, "oil filter"}, - {687, "organ, pipe organ"}, - {688, "oscilloscope, scope, cathode-ray oscilloscope, CRO"}, - {689, "overskirt"}, - {690, "oxcart"}, - {691, "oxygen mask"}, - {692, "packet"}, - {693, "paddle, boat paddle"}, - {694, "paddlewheel, paddle wheel"}, - {695, "padlock"}, - {696, "paintbrush"}, - {697, "pajama, pyjama, pj's, jammies"}, - {698, "palace"}, - {699, "panpipe, pandean pipe, syrinx"}, - {700, "paper towel"}, - {701, "parachute, chute"}, - {702, "parallel bars, bars"}, - {703, "park bench"}, - {704, "parking meter"}, - {705, "passenger car, coach, carriage"}, - {706, "patio, terrace"}, - {707, "pay-phone, pay-station"}, - {708, "pedestal, plinth, footstall"}, - {709, "pencil box, pencil case"}, - {710, "pencil sharpener"}, - {711, "perfume, essence"}, - {712, "Petri dish"}, - {713, "photocopier"}, - {714, "pick, plectrum, plectron"}, - {715, "pickelhaube"}, - {716, "picket fence, paling"}, - {717, "pickup, pickup truck"}, - {718, "pier"}, - {719, "piggy bank, penny bank"}, - {720, "pill bottle"}, - {721, "pillow"}, - {722, "ping-pong ball"}, - {723, "pinwheel"}, - {724, "pirate, pirate ship"}, - {725, "pitcher, ewer"}, - {726, "plane, carpenter's plane, woodworking plane"}, - {727, "planetarium"}, - {728, "plastic bag"}, - {729, "plate rack"}, - {730, "plow, plough"}, - {731, "plunger, plumber's helper"}, - {732, "Polaroid camera, Polaroid Land camera"}, - {733, "pole"}, - {734, "police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria"}, - {735, "poncho"}, - {736, "pool table, billiard table, snooker table"}, - {737, "pop bottle, soda bottle"}, - {738, "pot, flowerpot"}, - {739, "potter's wheel"}, - {740, "power drill"}, - {741, "prayer rug, prayer mat"}, - {742, "printer"}, - {743, "prison, prison house"}, - {744, "projectile, missile"}, - {745, "projector"}, - {746, "puck, hockey puck"}, - {747, "punching bag, punch bag, punching ball, punchball"}, - {748, "purse"}, - {749, "quill, quill pen"}, - {750, "quilt, comforter, comfort, puff"}, - {751, "racer, race car, racing car"}, - {752, "racket, racquet"}, - {753, "radiator"}, - {754, "radio, wireless"}, - {755, "radio telescope, radio reflector"}, - {756, "rain barrel"}, - {757, "recreational vehicle, RV, R.V."}, - {758, "reel"}, - {759, "reflex camera"}, - {760, "refrigerator, icebox"}, - {761, "remote control, remote"}, - {762, "restaurant, eating house, eating place, eatery"}, - {763, "revolver, six-gun, six-shooter"}, - {764, "rifle"}, - {765, "rocking chair, rocker"}, - {766, "rotisserie"}, - {767, "rubber eraser, rubber, pencil eraser"}, - {768, "rugby ball"}, - {769, "rule, ruler"}, - {770, "running shoe"}, - {771, "safe"}, - {772, "safety pin"}, - {773, "saltshaker, salt shaker"}, - {774, "sandal"}, - {775, "sarong"}, - {776, "sax, saxophone"}, - {777, "scabbard"}, - {778, "scale, weighing machine"}, - {779, "school bus"}, - {780, "schooner"}, - {781, "scoreboard"}, - {782, "screen, CRT screen"}, - {783, "screw"}, - {784, "screwdriver"}, - {785, "seat belt, seatbelt"}, - {786, "sewing machine"}, - {787, "shield, buckler"}, - {788, "shoe shop, shoe-shop, shoe store"}, - {789, "shoji"}, - {790, "shopping basket"}, - {791, "shopping cart"}, - {792, "shovel"}, - {793, "shower cap"}, - {794, "shower curtain"}, - {795, "ski"}, - {796, "ski mask"}, - {797, "sleeping bag"}, - {798, "slide rule, slipstick"}, - {799, "sliding door"}, - {800, "slot, one-armed bandit"}, - {801, "snorkel"}, - {802, "snowmobile"}, - {803, "snowplow, snowplough"}, - {804, "soap dispenser"}, - {805, "soccer ball"}, - {806, "sock"}, - {807, "solar dish, solar collector, solar furnace"}, - {808, "sombrero"}, - {809, "soup bowl"}, - {810, "space bar"}, - {811, "space heater"}, - {812, "space shuttle"}, - {813, "spatula"}, - {814, "speedboat"}, - {815, "spider web, spider's web"}, - {816, "spindle"}, - {817, "sports car, sport car"}, - {818, "spotlight, spot"}, - {819, "stage"}, - {820, "steam locomotive"}, - {821, "steel arch bridge"}, - {822, "steel drum"}, - {823, "stethoscope"}, - {824, "stole"}, - {825, "stone wall"}, - {826, "stopwatch, stop watch"}, - {827, "stove"}, - {828, "strainer"}, - {829, "streetcar, tram, tramcar, trolley, trolley car"}, - {830, "stretcher"}, - {831, "studio couch, day bed"}, - {832, "stupa, tope"}, - {833, "submarine, pigboat, sub, U-boat"}, - {834, "suit, suit of clothes"}, - {835, "sundial"}, - {836, "sunglass"}, - {837, "sunglasses, dark glasses, shades"}, - {838, "sunscreen, sunblock, sun blocker"}, - {839, "suspension bridge"}, - {840, "swab, swob, mop"}, - {841, "sweatshirt"}, - {842, "swimming trunks, bathing trunks"}, - {843, "swing"}, - {844, "switch, electric switch, electrical switch"}, - {845, "syringe"}, - {846, "table lamp"}, - {847, "tank, army tank, armored combat vehicle, armoured combat vehicle"}, - {848, "tape player"}, - {849, "teapot"}, - {850, "teddy, teddy bear"}, - {851, "television, television system"}, - {852, "tennis ball"}, - {853, "thatch, thatched roof"}, - {854, "theater curtain, theatre curtain"}, - {855, "thimble"}, - {856, "thresher, thrasher, threshing machine"}, - {857, "throne"}, - {858, "tile roof"}, - {859, "toaster"}, - {860, "tobacco shop, tobacconist shop, tobacconist"}, - {861, "toilet seat"}, - {862, "torch"}, - {863, "totem pole"}, - {864, "tow truck, tow car, wrecker"}, - {865, "toyshop"}, - {866, "tractor"}, - {867, "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi"}, - {868, "tray"}, - {869, "trench coat"}, - {870, "tricycle, trike, velocipede"}, - {871, "trimaran"}, - {872, "tripod"}, - {873, "triumphal arch"}, - {874, "trolleybus, trolley coach, trackless trolley"}, - {875, "trombone"}, - {876, "tub, vat"}, - {877, "turnstile"}, - {878, "typewriter keyboard"}, - {879, "umbrella"}, - {880, "unicycle, monocycle"}, - {881, "upright, upright piano"}, - {882, "vacuum, vacuum cleaner"}, - {883, "vase"}, - {884, "vault"}, - {885, "velvet"}, - {886, "vending machine"}, - {887, "vestment"}, - {888, "viaduct"}, - {889, "violin, fiddle"}, - {890, "volleyball"}, - {891, "waffle iron"}, - {892, "wall clock"}, - {893, "wallet, billfold, notecase, pocketbook"}, - {894, "wardrobe, closet, press"}, - {895, "warplane, military plane"}, - {896, "washbasin, handbasin, washbowl, lavabo, wash-hand basin"}, - {897, "washer, automatic washer, washing machine"}, - {898, "water bottle"}, - {899, "water jug"}, - {900, "water tower"}, - {901, "whiskey jug"}, - {902, "whistle"}, - {903, "wig"}, - {904, "window screen"}, - {905, "window shade"}, - {906, "Windsor tie"}, - {907, "wine bottle"}, - {908, "wing"}, - {909, "wok"}, - {910, "wooden spoon"}, - {911, "wool, woolen, woollen"}, - {912, "worm fence, snake fence, snake-rail fence, Virginia fence"}, - {913, "wreck"}, - {914, "yawl"}, - {915, "yurt"}, - {916, "web site, website, internet site, site"}, - {917, "comic book"}, - {918, "crossword puzzle, crossword"}, - {919, "street sign"}, - {920, "traffic light, traffic signal, stoplight"}, - {921, "book jacket, dust cover, dust jacket, dust wrapper"}, - {922, "menu"}, - {923, "plate"}, - {924, "guacamole"}, - {925, "consomme"}, - {926, "hot pot, hotpot"}, - {927, "trifle"}, - {928, "ice cream, icecream"}, - {929, "ice lolly, lolly, lollipop, popsicle"}, - {930, "French loaf"}, - {931, "bagel, beigel"}, - {932, "pretzel"}, - {933, "cheeseburger"}, - {934, "hotdog, hot dog, red hot"}, - {935, "mashed potato"}, - {936, "head cabbage"}, - {937, "broccoli"}, - {938, "cauliflower"}, - {939, "zucchini, courgette"}, - {940, "spaghetti squash"}, - {941, "acorn squash"}, - {942, "butternut squash"}, - {943, "cucumber, cuke"}, - {944, "artichoke, globe artichoke"}, - {945, "bell pepper"}, - {946, "cardoon"}, - {947, "mushroom"}, - {948, "Granny Smith"}, - {949, "strawberry"}, - {950, "orange"}, - {951, "lemon"}, - {952, "fig"}, - {953, "pineapple, ananas"}, - {954, "banana"}, - {955, "jackfruit, jak, jack"}, - {956, "custard apple"}, - {957, "pomegranate"}, - {958, "hay"}, - {959, "carbonara"}, - {960, "chocolate sauce, chocolate syrup"}, - {961, "dough"}, - {962, "meat loaf, meatloaf"}, - {963, "pizza, pizza pie"}, - {964, "potpie"}, - {965, "burrito"}, - {966, "red wine"}, - {967, "espresso"}, - {968, "cup"}, - {969, "eggnog"}, - {970, "alp"}, - {971, "bubble"}, - {972, "cliff, drop, drop-off"}, - {973, "coral reef"}, - {974, "geyser"}, - {975, "lakeside, lakeshore"}, - {976, "promontory, headland, head, foreland"}, - {977, "sandbar, sand bar"}, - {978, "seashore, coast, seacoast, sea-coast"}, - {979, "valley, vale"}, - {980, "volcano"}, - {981, "ballplayer, baseball player"}, - {982, "groom, bridegroom"}, - {983, "scuba diver"}, - {984, "rapeseed"}, - {985, "daisy"}, - {986, "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum"}, - {987, "corn"}, - {988, "acorn"}, - {989, "hip, rose hip, rosehip"}, - {990, "buckeye, horse chestnut, conker"}, - {991, "coral fungus"}, - {992, "agaric"}, - {993, "gyromitra"}, - {994, "stinkhorn, carrion fungus"}, - {995, "earthstar"}, - {996, "hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa"}, - {997, "bolete"}, - {998, "ear, spike, capitulum"}, - {999, "toilet tissue, toilet paper, bathroom tissue"}, -}; - - -unsigned int argmax(const std::vector& scores) { - if(scores.empty()) { - throw std::invalid_argument("Input vector is empty"); - } - unsigned int maxIndex = 0; - float maxValue = scores[0]; - for(size_t i = 1; i < scores.size(); ++i) { - if(scores[i] > maxValue) { - maxIndex = i; - maxValue = scores[i]; - } - } - return maxIndex; -} -unsigned int postProcessing(shared_ptr result, shared_ptr& out_result){ - assert(result->batch() == 1); - assert(result->head() == 1); - out_result->reshape(1, 1, 1, 1); - out_result->alloc(); - vector scores; - for (int i = 0; i < result->dimension(); ++i) { - auto value = result->dataAt(0, 0, 0, i); - scores.push_back(value); - } - auto token_idx = argmax(scores); - out_result->setDataAt(0, 0, 0, 0, token_idx); - return token_idx; -} - -NetTensor *Attention(NetTensor * x, int embedded_size, int hidden_size, int head_size, string name){ - auto *q =_Linear({x}, embedded_size, hidden_size * head_size, true, name + ".attention.query"); - auto *k =_Linear({x}, embedded_size, hidden_size * head_size, true, name + ".attention.key"); - auto *v =_Linear({x}, embedded_size, hidden_size * head_size, true, name + ".attention.value"); - q = q->view(-1, head_size, -1, hidden_size); - k = k->view(-1, head_size, -1, hidden_size); - v = v->view(-1, head_size, -1, hidden_size); - auto *qk = _Matmul( {q, k}, false, true, name + ".qk"); - qk = *qk/std::sqrt(hidden_size); - // qk = _Scale( {qk}, 1.0F / std::sqrt(hidden_size), 0.0F, false, name + ".scale"); - // qk = _Causalmask( {qk}, name + ".mask"); - qk = _Softmax( {qk}, DIMENSION, false, name + ".softmax"); - auto *o = _Matmul( {qk, v}, false, false, name + ".qkv"); - o = o->view(-1, 1, -1, hidden_size * head_size); - o = _Linear( {o}, hidden_size * head_size, embedded_size, true, name + ".output.dense"); - return o; -} -NetTensor *MLP( NetTensor * i, int hidden_dim, int ffn_hidden_dim, string name){ - auto *x = _Linear( {i}, hidden_dim, ffn_hidden_dim, true, name+".intermediate.dense"); - x = _GELU( {x}, name+".intermediate_act_fn"); - x = _Linear( {x}, ffn_hidden_dim, hidden_dim, true, name+".output.dense"); - return x; -} -NetTensor *Embedding(Context *c, NetTensor * i, int hidden_size, string name) { - i = _Convolution2D({i}, 3, 768, {16, 16}, {16, 16}, VALID, true, name +".patch_embeddings.projection"); - // i = _Transpose( {i}, name +".patch_embeddings.projection_transpose"); - i = i->transpose(SEQUENCE, DIMENSION); - i = i->flatten(HEAD, SEQUENCE); - auto *s = _Parameter(c, {}, 1, 1, 1, 768, name +".cls_token"); - i = _Cat( {s, i}, SEQUENCE, name +".cls_token.cat"); - i = *_Parameter(c, {}, 1, 197, 1, 768, name +".position_embeddings") +i; - return i; -} -void vit(Context* c, int hidden_dim= 768, int ffn_hidden_dim = 3072, int class_size=1000, int mutil_head_size = 12, string name = "vit"){ - auto *i = _Input(c, {}, "input_ids"); - i = Embedding(c, i, hidden_dim, name+".embeddings"); - for(int layer=0; layer<12; ++layer) { - auto *x = _LayerNorm( {i}, hidden_dim, true,1e-6, name + ".encoder.layer."+std::to_string(layer)+".layernorm_before"); - i = *Attention( x, hidden_dim, hidden_dim / mutil_head_size, mutil_head_size, name + ".encoder.layer."+std::to_string(layer)+".attention") +i; - x = _LayerNorm( {i}, hidden_dim, true, 1e-6, name + ".encoder.layer."+std::to_string(layer)+".layernorm_after"); - i = *MLP( x, hidden_dim, ffn_hidden_dim, name + ".encoder.layer."+std::to_string(layer)) +i; - _SubgraphBegin(c); - } - i = i->clip( {}, {}, {0}, {}); - i = _LayerNorm( {i}, hidden_dim, true, 1e-6, name + ".layernorm"); - i = _Linear( {i}, hidden_dim, class_size, false, "classifier"); -} -int main(int argc, char **argv) { - cmdline::parser cmdParser; - cmdParser.add("model", 'm', "specify mllm model path", false, "../models/vit-base-patch16-224-q4_k.mllm"); - cmdParser.add("thread", 't', "num of threads", false, 4); - cmdParser.parse_check(argc, argv); - - string model_path = cmdParser.get("model"); - int thread_num = cmdParser.get("thread"); - - int width, height, channel; - unsigned char *data = stbi_load("../assets/cat.jpg", &width, &height, &channel, 0); - if (data == nullptr) { - cout << "load image failed" << endl; - return -1; - } - cout << "width: " << width << " height: " << height << " channel: " << channel << endl; - auto data_f32 = PreProcessor::RescaleImage(data,255.0,height*width*channel); - auto images =std::vector( { ImageInfo(data_f32, width, height, channel)}); - images = PreProcessor::ResizeImages(images, 224, 224,true); - images = PreProcessor::NormalizeImages(images, 0.5, 0.5); - data_f32 = images[0].data; - stbi_image_free(data); - - - - std::unique_ptr c_ptr(new Context()); - auto *c = c_ptr.get(); - - vit(c); - - BackendConfig bn; - Net net(bn); - net.convert(c->sub_param_, BackendType::MLLM_CPU, thread_num); - ParamLoader param_loader(model_path); - Executor ex(¶m_loader); - ex.setup(&net); - - shared_ptr input_img = std::make_shared(); - img2Tensor(input_img, net, data_f32, 224, 224, 3); - ex.run(&net, {input_img}); - auto result = ex.result(); - auto token_idx = postProcessing(result[0], input_img); - std::cout << id2label[token_idx] << std::endl; - - - // ex.perf(); - - // free memory - for (auto *op : c->net_ops) { - delete op; - } - for (auto *tensor : c->net_tensors) { - delete tensor; - } - return 0; -} \ No newline at end of file diff --git a/examples/mllm_benchmark.cpp b/examples/mllm_benchmark.cpp index 64939c2ff..3f3f6a0d0 100644 --- a/examples/mllm_benchmark.cpp +++ b/examples/mllm_benchmark.cpp @@ -1,6 +1,7 @@ #include #include "Types.hpp" #include "cmdline.h" +#include "Context.hpp" // tiny llama #include "models/tinyllama/modeling_tinyllama.hpp" @@ -39,7 +40,7 @@ using namespace mllm; Tensor tokens2Input(int tokens_size, string name = "input", BackendType type = MLLM_CPU) { - Tensor tensor1(1, 1, tokens_size, 1, Backend::global_backends[type], true); + Tensor tensor1(1, 1, tokens_size, 1, Backend::global_backends[type].get(), true); tensor1.setName(name); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); diff --git a/include/OpDefined.hpp b/include/OpDefined.hpp deleted file mode 100644 index 982896413..000000000 --- a/include/OpDefined.hpp +++ /dev/null @@ -1,196 +0,0 @@ -#ifndef MLLM_OPDEFINED_H -#define MLLM_OPDEFINED_H - -#include -#include -using std::string; -using std::vector; - -namespace mllm { -enum OpType { - INVALID_VALUE = 0, - PARAMETER, - ADD, - SOFTMAX, - SILU, - MATMUL, - SCALE, - ROPE, - POSITIOANL_EMBEDDING, - RMSNORM, - CAUSALMASK, - SLIDINGWINDOWMASK, - LINEAR, - LINEARINT8, - LINEARINT8SHADOW, - EMBEDDING, - MUL, - VIEW, - KVCACHE, - KVCACHENPU, - RELU, - RELU2, - OP_GELU, - QUICKGLUE, - LAYERNORM, - SPLIT, - GATHER, - CONVOLUTION2D, - CONVOLUTION3D, - VISIONROPE, - MULTIMODALROPE, - AVGPOOL2D, - MAXPOOL2D, - CAT, - TRANSPOSE, - SUBDIM, - DIVISION, - NORM, - SHAPE, - MEAN, - RANGE, - WHERE, - REPLACE, - PREDICTOR, - SPARSELINEAR, - SPARSEIDLINEAR, - ELASTICLINEAR, - POSITION, - WNOP, - QUANTIZE, - DEQUANTIZE, - MERGEOUTPUT, - SPLITINPUT, - IROPE, - OP_NUM, - NTKROPE, - - // add in xnnpack - DIRECT, - DISPATCH, - SUBGRAPHSTART, - SUBGRAPHFINALIZE, - D2H, - XP_KVCACHE, - SDPA, - - // new front-end - SUPERSILU, - HEADLINEAR, - - // for speculative decoding - ROPETREE, - CAUSALTREEMASK, -}; - -static const vector OpNames = { - "INVALID_VALUE", - "Parameter", - "Add", - "SoftMax", - "SiLU", - "MatMul", - "MatMulINT8", - "Scale", - "RoPE", - "RMSNorm", - "CausalMask", - "SlidingWindowMask", - "Linear", - "LinearINT8", - "LinearINT8Shadow", - "Embedding", - "Mul", - "VIEW", - "KVCACHE", - "KVCACHENPU", - "ReLU", - "ReLUSquaredActivation", - "GELU", - "QuickGELU", - "LayerNorm", - "Split", - "Gqther", - "Convolution2D", - "Convolution3D", - "VisonRoPE", - "MultimodalRoPE", - "AvgPool2D", - "MaxPool2D", - "Cat", - "Transpose", - "SubDim", - "Division", - "Norm", - "Shape", - "Mean", - "Range", - "Where", - "Replace", - "Predictor", - "SparseLinear", - "SparseIdLinear", - "ElasticLinear", - "Position", - "WNop", - "Quantize", - "Dequantize", - "MergeOutput", - "SplitInput", - "IRoPE", - "OP_NUM", - - // in xnnpack - "Direct", - "Dispatch", - "SubgraphStart", - "SubgraphFinalize", - "D2H", - "XP_KVCACHE", - "SDPA", - "SuperSiLU", - "HeadLinear", - "RoPETree", - "CausalTreeMask", -}; - -enum TensorFuncType { - FUNC_ADD, - FUNC_SUB, - FUNC_MUL, - FUNC_DIV, - FUNC_DIVINT, - FUNC_TTADD, - FUNC_TTSUB, - FUNC_TTMUL, - FUNC_TTDIV, - FUNC_MM, - FUNC_NORM, - FUNC_MEAN, - FUNC_CAT, - FUNC_VIEW, - FUNC_TRANPOSE, - FUNC_FLATTEN, - FUNC_CLIP, - FUNC_CLIPAXIS, - FUNC_CLIPTENSOR, - FUNC_RANGE, - FUNC_WHERE, - FUNC_INDEX_PUT, - FUNC_SPLIT, - FUNC_SUM, - FUNC_TOPK, - FUNC_EXPPAND, - FUNC_ARGSORT, - FUNC_BINCOUNT, - FUNC_REPEAT, - FUNC_LIKE, - FUNC_SCATTERREDUCE, - FUNC_APPLY_VISIOROPE, - // models use only - FUNC_FUYU_GATHER_EMBD, - FUNC_PHI3V_HD_MERGE, -}; - -} // namespace mllm -#endif diff --git a/src/Backend.cpp b/mllm/Backend.cpp similarity index 94% rename from src/Backend.cpp rename to mllm/Backend.cpp index d42bd85e1..7d2daf567 100644 --- a/src/Backend.cpp +++ b/mllm/Backend.cpp @@ -20,7 +20,7 @@ void registerBackend() { #ifdef USE_QNN registerQNNBackendCreator(); #elif defined(MLLM_BUILD_XNNPACK_BACKEND) - registerXNNBackendCreator(); + registerXNNBackendCreator(); #endif }); } @@ -59,6 +59,6 @@ bool InsertBackendCreatorMap(BackendType type, shared_ptr creato return true; } -map Backend::global_backends; +map> Backend::global_backends; } // namespace mllm \ No newline at end of file diff --git a/src/Backend.hpp b/mllm/Backend.hpp similarity index 74% rename from src/Backend.hpp rename to mllm/Backend.hpp index a0ac69bdb..92334073c 100644 --- a/src/Backend.hpp +++ b/mllm/Backend.hpp @@ -4,6 +4,7 @@ #include "MemoryManager.hpp" #include "OpDefined.hpp" #include "Types.hpp" +#include #include #include #include @@ -16,11 +17,11 @@ class Tensor; class Backend; class Module; class Layer; +struct DeviceMemory; +class ParamLoader; // KVCache map for QNN-CPU KVCache sharing -#ifdef USE_QNN static std::unordered_map kv_cache_map; -#endif class TensorFunction { public: @@ -29,7 +30,10 @@ class TensorFunction { virtual void execute(vector> outputs, vector> inputs, vector args) = 0; }; class Backend { + friend class Context; + public: + Backend(){}; Backend(shared_ptr &mm) : mem_manager_(mm) { } @@ -53,6 +57,21 @@ class Backend { mem_manager_->free(ptr); } + virtual void alloc_device(DeviceMemory &mem, DataType dtype) { + assert(type_ != MLLM_CPU && "alloc_device should not be called on CPU backend"); + } + virtual void free_device(DeviceMemory &mem) { + assert(type_ != MLLM_CPU && "free_device should not be called on CPU backend"); + } + virtual void copy_from_host(const DeviceMemory &dest, const void *src) { + assert(type_ != MLLM_CPU && "copy_from_host should be handled by specific backends"); + } + virtual void copy_to_host(void *dest, const DeviceMemory &src) { + assert(type_ != MLLM_CPU && "copy_to_host should be handled by specific backends"); + } + + virtual void convert_fp_data(Tensor *src, Tensor *dest){}; + /** * \brief Creates an operation(Op) with the given parameters. * \param op_param The parameters for the operation to be created. @@ -73,13 +92,9 @@ class Backend { * @param in_place Whether to run the function in place. * @return std::vector The output tensors. */ - virtual std::vector runFunc( - std::vector out_names, - TensorFuncType type, - std::vector float_args, - std::vector> input_tensors, - bool in_place) = 0; virtual std::vector runLayer(Layer *layer, std::vector inputs, int N) = 0; + + virtual std::vector runOp(Op *op, std::vector input, std::vector out_names, bool in_place) = 0; virtual std::vector runForward(Module *module, std::vector inputs, std::vector args) = 0; virtual void onSetUpStart(vector> &inputs, vector> &outputs, string graphName = ""){}; @@ -94,10 +109,20 @@ class Backend { virtual void registerOps() = 0; virtual void registerFuncs() = 0; + /** + * @brief (可选) 从文件中直接加载张量数据到设备,为后端提供优化路径。 + * @param tensor 目标张量,其设备内存应已分配。 + * @param loader 参数加载器,用于获取文件句柄和元数据。 + * @return 如果后端处理了加载则返回 true,否则返回 false,让 ParamLoader 使用默认方法。 + */ + virtual bool load_from_file(Tensor *tensor, ParamLoader *loader) { + return false; + } + BackendType type() const { return type_; } - static map global_backends; + static map> global_backends; protected: BackendType type_; diff --git a/mllm/Context.cpp b/mllm/Context.cpp new file mode 100644 index 000000000..8fd2f55f7 --- /dev/null +++ b/mllm/Context.cpp @@ -0,0 +1,42 @@ +#include "Context.hpp" + +#include "backends/cpu/CPUBackend.hpp" +#include "memory/MemoryPoolManager.hpp" + +namespace mllm { +Context &Context::Instance() { + static Context instance; + return instance; +} + +Context::Context() { +} + +// void Context::initBackend(BackendType type) { +// if (Backend::global_backends.find(type) == Backend::global_backends.end() || Backend::global_backends[type] == nullptr) { +// switch (type) { +// case BackendType::MLLM_CPU: { +// shared_ptr mm = nullptr; +// // mm = std::make_shared(); +// mm = std::make_shared(); // todomm +// Backend::global_backends[MLLM_CPU] = new CPUBackend(mm); +// break; +// } +// #ifdef USE_QNN +// case BackendType::MLLM_QNN: { +// Backend::global_backends.emplace(MLLM_QNN, GetBackendCreator(MLLM_QNN)->create({})); +// break; +// } +// #endif +// #ifdef MLLM_BUILD_XNNPACK_BACKEND +// case BackendType::MLLM_XNNPACK: { +// Context::Instance().initBackend(MLLM_XNNPACK); +// break; +// } +// #endif +// default: { +// } +// } +// } +// } +} // namespace mllm \ No newline at end of file diff --git a/mllm/Context.hpp b/mllm/Context.hpp new file mode 100644 index 000000000..6a3bf9b89 --- /dev/null +++ b/mllm/Context.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include "StateManager.hpp" +#include "Types.hpp" +#include "Backend.hpp" + +namespace mllm { + +class Context { +public: + static Context &Instance(); + + // Backend *globalBackends(BackendType type) const { + // return Backend::global_backends[type]; + // } + + // template + // T *globalBackends(BackendType type) const { + // auto backend = Backend::global_backends[type]; + // if (backend == nullptr) { + // throw std::runtime_error("Backend not initialized: " + std::to_string(type)); + // } + // return dynamic_cast(backend); + // } + + // void initBackend(BackendType type); + + InferenceStateManager &inference_state() { + return inference_state_; + } + + SpeculativeDecodingManager &speculative_decoding_state() { + return speculative_decoding_state_; + } + +private: + Context(); + ~Context() = default; + + Context(const Context &) = delete; + Context &operator=(const Context &) = delete; + + InferenceStateManager inference_state_; + SpeculativeDecodingManager speculative_decoding_state_; +}; + +} // namespace mllm \ No newline at end of file diff --git a/mllm/DataType.hpp b/mllm/DataType.hpp new file mode 100644 index 000000000..063f5dd7a --- /dev/null +++ b/mllm/DataType.hpp @@ -0,0 +1,181 @@ +#pragma once + +#include + +/** + * fp 16 type + */ +#if defined(__ARM_NEON) && !defined(_MSC_VER) +typedef __fp16 mllm_fp16_t; +#else +typedef uint16_t mllm_fp16_t; +#endif + +/** + * k quantization + */ + +// #define MLLM_QKK_64 +#ifdef MLLM_QKK_64 +#define QK_K 64 +#define K_SCALE_SIZE 4 +#else +#define QK_K 256 +#define K_SCALE_SIZE 12 +#endif + +/** + * 2-bits quantization + */ +#define QK2_0 32 +#pragma pack(1) +typedef struct { + mllm_fp16_t d; // delta + uint8_t qs[QK2_0 / 4]; // 2-bit quants +} block_q2_0; +#pragma pack() +static_assert(sizeof(block_q2_0) == sizeof(mllm_fp16_t) + QK2_0 / 4, "wrong q2_0 block size/padding"); + +#pragma pack(1) +typedef struct { + uint8_t scales[QK_K / 16]; // scales and mins, quantized with 4 bits + uint8_t qs[QK_K / 4]; // quants + mllm_fp16_t d; // super-block scale for quantized scales + mllm_fp16_t dmin; // super-block scale for quantized mins +} block_q2_K; +#pragma pack() +static_assert(sizeof(block_q2_K) == 2 * sizeof(mllm_fp16_t) + QK_K / 16 + QK_K / 4, "wrong q2_K block size/padding"); + +#pragma pack(1) +typedef struct { + mllm_fp16_t d; + uint16_t qs[QK_K / 8]; +} block_iq2_xxs; +#pragma pack() +static_assert(sizeof(block_iq2_xxs) == sizeof(mllm_fp16_t) + QK_K / 8 * sizeof(uint16_t), "wrong iq2_xxs block size/padding"); + +/** + * 3-bits quantization + */ +#pragma pack(1) +typedef struct { + uint8_t hmask[QK_K / 8]; // quants - high bit + uint8_t qs[QK_K / 4]; // quants - low 2 bits + uint8_t scales[12]; // scales, quantized with 6 bits + mllm_fp16_t d; // super-block scale +} block_q3_K; +#pragma pack() +static_assert(sizeof(block_q3_K) == sizeof(mllm_fp16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding"); + +/** + * 4-bits quantization + */ +#define QK4_0 32 +#pragma pack(1) +typedef struct { + mllm_fp16_t d; // delta + uint8_t qs[QK4_0 / 2]; // nibbles / quants +} block_q4_0; +#pragma pack() + +// 4-bit quantization; 16 blocks of 32 elements each weight is represented as x = a * q + b; Effectively 4.5 bits per weight +#ifdef MLLM_QKK_64 +#pragma pack(1) +typedef struct { + mllm_fp16_t d[2]; // super-block scales/mins + uint8_t scales[2]; // 4-bit block scales/mins + uint8_t qs[QK_K / 2]; // 4--bit quants +} block_q4_K; +#pragma pack() +static_assert(sizeof(block_q4_K) == 2 * sizeof(uint16_t) + QK_K / 2 + 2, "wrong q4_K block size/padding"); +#else +#pragma pack(1) +typedef struct { + mllm_fp16_t d; // super-block scale for quantized scales + mllm_fp16_t dmin; // super-block scale for quantized mins + uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits + uint8_t qs[QK_K / 2]; // 4--bit quants +} block_q4_K; +#pragma pack() +static_assert(sizeof(block_q4_K) == 2 * sizeof(mllm_fp16_t) + K_SCALE_SIZE + QK_K / 2, "wrong q4_K block size/padding"); +#endif + +#pragma pack(1) +typedef struct { + mllm_fp16_t d[4]; // deltas for 4 q4_0 blocks + uint8_t qs[QK4_0 * 2]; // nibbles / quants for 4 q4_0 blocks +} block_q4_0x4; +#pragma pack() +static_assert(sizeof(block_q4_0x4) == 4 * sizeof(mllm_fp16_t) + QK4_0 * 2, "wrong q4_0x4 block size/padding"); + +#pragma pack(1) +typedef struct { + mllm_fp16_t d[8]; // deltas for 8 q4_0 blocks + uint8_t qs[QK4_0 * 4]; // nibbles / quants for 8 q4_0 blocks +} block_q4_0x8; +#pragma pack() +static_assert(sizeof(block_q4_0x8) == 8 * sizeof(mllm_fp16_t) + QK4_0 * 4, "wrong q4_0x8 block size/padding"); + +/** + * 6-bits quantization + */ +#pragma pack(1) +typedef struct { + uint8_t ql[QK_K / 2]; // quants, lower 4 bits + uint8_t qh[QK_K / 4]; // quants, upper 2 bits + int8_t scales[QK_K / 16]; // scales, quantized with 8 bits + mllm_fp16_t d; // super-block scale +} block_q6_K; +#pragma pack() +static_assert(sizeof(block_q6_K) == sizeof(mllm_fp16_t) + QK_K / 16 + 3 * QK_K / 4, "wrong q6_K block size/padding"); + +/** + * 8-bits quantization + */ +#define QK8_0 32 +#pragma pack(1) +typedef struct { + mllm_fp16_t d; // delta + int8_t qs[QK8_0]; // quants +} block_q8_0; +#pragma pack() + +#pragma pack(1) +typedef struct { + int8_t qs[QK8_0]; // quants +} block_q8_per_tensor; // used in vecdot_i8_i8, TODO: remove +#pragma pack() + +#define QK8_0F 32 +#pragma pack(1) +typedef struct { + float scale; // delta + int8_t qs[QK8_0F]; // quants +} block_q8_0f; +#pragma pack() + +// This is only used for intermediate quantization and dot products +#pragma pack(1) +typedef struct { + float d; // delta + int8_t qs[QK_K]; // quants + int16_t bsums[QK_K / 16]; // sum of quants in groups of 16 +} block_q8_K; +#pragma pack() +static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K / 16 * sizeof(int16_t), "wrong q8_K block size/padding"); + +#pragma pack(1) +typedef struct { + mllm_fp16_t d[4]; // deltas for 4 q8_0 blocks + int8_t qs[QK8_0 * 4]; // quants for 4 q8_0 blocks +} block_q8_0x4; +#pragma pack() +static_assert(sizeof(block_q8_0x4) == 4 * sizeof(mllm_fp16_t) + QK8_0 * 4, "wrong q8_0x4 block size/padding"); + +#pragma pack(1) +typedef struct { + mllm_fp16_t d[8]; // deltas for 8 q8_0 blocks + int8_t qs[QK8_0 * 8]; // quants for 8 q8_0 blocks +} block_q8_0x8; +#pragma pack() +static_assert(sizeof(block_q8_0x8) == 8 * sizeof(mllm_fp16_t) + QK8_0 * 8, "wrong q8_0x8 block size/padding"); diff --git a/src/Draft.hpp b/mllm/Draft.hpp similarity index 100% rename from src/Draft.hpp rename to mllm/Draft.hpp diff --git a/src/Generate.cpp b/mllm/Generate.cpp similarity index 100% rename from src/Generate.cpp rename to mllm/Generate.cpp diff --git a/src/Generate.hpp b/mllm/Generate.hpp similarity index 80% rename from src/Generate.hpp rename to mllm/Generate.hpp index 63f79b5cb..6c58eb280 100644 --- a/src/Generate.hpp +++ b/mllm/Generate.hpp @@ -9,6 +9,8 @@ * */ #pragma once +#include "Types.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" #ifndef MLLM_GENERATE_HPP #define MLLM_GENERATE_HPP #include @@ -79,7 +81,14 @@ class _LlmTextGenerateMethod { } } for (int i = 0; i < _dims; ++i) { - auto value = t.dataAt(0, 0, _seq, i); + float value; + if (t.dtype() == MLLM_TYPE_F16) { + value = MLLM_FP16_TO_FP32(t.dataAt(0, 0, _seq, i)); + } else if (t.dtype() == MLLM_TYPE_F32) { + value = t.dataAt(0, 0, _seq, i); + } else { + throw std::runtime_error("Unsupported dtype for text generation."); + } scores.push_back(value); } } @@ -152,39 +161,39 @@ class _LlmTextGenerateGreedySearchMethod : public _LlmTextGenerateMethod { }; class _LlmTextGenerateGreedySearchMethodForSD : public _LlmTextGenerateMethod { - public: - _LlmTextGenerateGreedySearchMethodForSD() = default; - ~_LlmTextGenerateGreedySearchMethodForSD() = default; - inline void _tensor_to_vec_of_multiIndices(Tensor &t, std::vector> &scores, std::vector indices) { - assert(t.batch() == 1 && "Batch size of result is not 1. Which is not supported for now."); - assert(t.head() == 1 && "The 3rd dim of result should be one. e.g.:[1, 1, seq, hidden]"); - int _dims = t.dimension(); - // TODO: 考虑QNN进行padding - // padding prefill for QNN - // if (is_padding) { - // if (chunk_size > 0) { - // _seq = (seq_before_padding - 1) % chunk_size; - // } else { - // _seq = seq_before_padding - 1; - // } - // } - for (int idx = 0; idx < indices.size(); ++idx) { - std::vector values(t.dimension()); - int _seq = indices[idx]; - for (int i = 0; i < _dims; ++i) { - auto value = t.dataAt(0, 0, _seq, i); - values[i] = value; - } - scores.push_back(values); +public: + _LlmTextGenerateGreedySearchMethodForSD() = default; + ~_LlmTextGenerateGreedySearchMethodForSD() = default; + inline void _tensor_to_vec_of_multiIndices(Tensor &t, std::vector> &scores, std::vector indices) { + assert(t.batch() == 1 && "Batch size of result is not 1. Which is not supported for now."); + assert(t.head() == 1 && "The 3rd dim of result should be one. e.g.:[1, 1, seq, hidden]"); + int _dims = t.dimension(); + // TODO: 考虑QNN进行padding + // padding prefill for QNN + // if (is_padding) { + // if (chunk_size > 0) { + // _seq = (seq_before_padding - 1) % chunk_size; + // } else { + // _seq = seq_before_padding - 1; + // } + // } + for (int idx = 0; idx < indices.size(); ++idx) { + std::vector values(t.dimension()); + int _seq = indices[idx]; + for (int i = 0; i < _dims; ++i) { + auto value = t.dataAt(0, 0, _seq, i); + values[i] = value; } + scores.push_back(values); } - unsigned int generate(Tensor &t) override { - std::cerr << "Should use generate_SD" << std::endl; - assert(false); - return -1; - }; - unsigned int generate_SD(Tensor &t, TracePool &tp); + } + unsigned int generate(Tensor &t) override { + std::cerr << "Should use generate_SD" << std::endl; + assert(false); + return -1; }; + unsigned int generate_SD(Tensor &t, TracePool &tp); +}; class _LlmTextGenerateTopkSamplingMethod : public _LlmTextGenerateMethod { public: @@ -216,7 +225,9 @@ class _LlmTextGenerateToppSamplingMethod : public _LlmTextGenerateMethod { class _LlmTextGenerateNucleusSamplingMethodForSD : public _LlmTextGenerateMethod { public: - _LlmTextGenerateNucleusSamplingMethodForSD(int k, float p, float temp) : samplingConfig(SamplingConfig(temp, p, k)) {} + _LlmTextGenerateNucleusSamplingMethodForSD(int k, float p, float temp) : + samplingConfig(SamplingConfig(temp, p, k)) { + } ~_LlmTextGenerateNucleusSamplingMethodForSD() = default; unsigned int generate(Tensor &t) override { @@ -226,6 +237,7 @@ class _LlmTextGenerateNucleusSamplingMethodForSD : public _LlmTextGenerateMethod }; unsigned int generate_SD(Tensor &t, TracePool &tp); std::vector evalPosterior(const std::vector> &logit_scores, const std::vector &sampled_token_ids, TracePool &tp); + private: float temperature = 1.0; float top_p = 1.0; @@ -234,17 +246,19 @@ class _LlmTextGenerateNucleusSamplingMethodForSD : public _LlmTextGenerateMethod float temperature = 1.0; float top_p = 1.0; int top_k = -1; - SamplingConfig(float _temperature, float _top_p, float _top_k): temperature(_temperature), top_p(_top_p), top_k(_top_k) {} + SamplingConfig(float _temperature, float _top_p, float _top_k) : + temperature(_temperature), top_p(_top_p), top_k(_top_k) { + } } samplingConfig; - void apply_logits_processor(std::vector>& logits_with_indices, const SamplingConfig& config) { + void apply_logits_processor(std::vector> &logits_with_indices, const SamplingConfig &config) { const size_t vocab_size = logits_with_indices.size(); if (vocab_size == 0) return; // 温度调整 if (config.temperature > 0 && config.temperature != 1.0f) { const float inv_temp = 1.0f / config.temperature; - for (auto& v : logits_with_indices) v.first *= inv_temp; + for (auto &v : logits_with_indices) v.first *= inv_temp; } // Top-k处理 @@ -253,17 +267,16 @@ class _LlmTextGenerateNucleusSamplingMethodForSD : public _LlmTextGenerateMethod std::partial_sort( logits_with_indices.begin(), logits_with_indices.begin() + config.top_k, - logits_with_indices.end() - ); + logits_with_indices.end()); // 构建屏蔽掩码 std::vector mask(vocab_size, false); - for (int i=0; i probs(vocab_size); std::pair max_logit_with_index = *std::max_element(logits_with_indices.begin(), logits_with_indices.end(), - [](std::pair a, std::pair b) { return a.first > b.first; }); + [](std::pair a, std::pair b) { return a.first > b.first; }); float max_logit = max_logit_with_index.first; float sum_exp = 0.0f; - for (size_t i=0; i> sorted_probs(vocab_size); - for (size_t i=0; i a, std::pair b) { return a.first > b.first; }); @@ -295,16 +308,16 @@ class _LlmTextGenerateNucleusSamplingMethodForSD : public _LlmTextGenerateMethod cumulative += sorted_probs[cutoff].first; if (cumulative > config.top_p) break; } - cutoff = std::min(cutoff+1, vocab_size-1); + cutoff = std::min(cutoff + 1, vocab_size - 1); // 构建有效集合 std::vector valid(vocab_size, false); - for (size_t i=0; i(m_method_class)->generate_SD(t, tp); + return dynamic_cast<_LlmTextGenerateGreedySearchMethodForSD *>(m_method_class)->generate_SD(t, tp); }; inline unsigned int generate(Tensor &t, const LlmTextGeneratorOpts &opt) { diff --git a/src/Layer.cpp b/mllm/Layer.cpp similarity index 100% rename from src/Layer.cpp rename to mllm/Layer.cpp diff --git a/src/Layer.hpp b/mllm/Layer.hpp similarity index 75% rename from src/Layer.hpp rename to mllm/Layer.hpp index 670ffcea0..b9e833bfe 100644 --- a/src/Layer.hpp +++ b/mllm/Layer.hpp @@ -7,9 +7,11 @@ #include #include +#include #include #include +#include "Context.hpp" #include "OpDefined.hpp" #include "Tensor.hpp" #include "Op.hpp" @@ -29,67 +31,54 @@ namespace mllm { class Layer { public: Layer() = default; + ~Layer() { + delete op_; // 手动添加 delete + op_ = nullptr; + } void init(std::string name, OpType type) { name_ = std::move(name); + type_ = type; param_["type"] = type; Module::initBackend(MLLM_CPU); - backend_ = Backend::global_backends[MLLM_CPU]; + backend_ = Backend::global_backends[MLLM_CPU].get(); saved_list_idx = Module::listIdx; init_ = true; } bool ready() { return init_; } - static map layername_2_tensorname; - static bool use_layername_2_tensorname; + /**** for forward ****/ Tensor operator()(Tensor input) { auto ts = run({input}, 1); return ts[0]; } - Tensor operator()(Tensor input0, Tensor input1) { auto ts = run({input0, input1}, 1); return ts[0]; } - Tensor operator()(Tensor input0, Tensor input1, Tensor input2) { auto ts = run({input0, input1, input2}, 1); return ts[0]; } - Tensor operator()(Tensor input0, Tensor input1, Tensor input2, Tensor input3) { auto ts = run({input0, input1, input2, input3}, 1); return ts[0]; } - void load() { + /**** for dynamic load ****/ + void load(std::shared_ptr loader = nullptr) { + if (op_ == nullptr) { + initOp(); + } if (inited_loaded && loaded_param) return; - if (op_ == nullptr) { -#ifdef USE_QNN - if ((param_["type"] == KVCACHE || param_["type"] == KVCACHENPU) && (Backend::global_backends.find(MLLM_QNN) != Backend::global_backends.end())) { - if (kv_cache_map.find(name_) == kv_cache_map.end()) { - // for the prefill part, we need to create a new op - param_["type"] = KVCACHENPU; - op_ = backend_->opCreate(param_, name_); - kv_cache_map[name_] = op_; - } else { -#ifdef DEBUGPRINT - std::cout << name_ << " is shared used" << std::endl; -#endif - // for the decoding part, we need to get created op from global container - op_ = kv_cache_map[name_]; - } - } else { - op_ = backend_->opCreate(param_, name_); - } -#else - op_ = backend_->opCreate(param_, name_); -#endif + if (!loader) { + loader = Module::llm_model_ptr->loader; } - op_->load(*Module::llm_model_ptr->loader); + op_->load(*loader); loaded_param = true; + inited_loaded = true; } bool &loaded() { return loaded_param; @@ -97,19 +86,98 @@ class Layer { void free() { op_->free({}, {}); loaded_param = false; + inited_loaded = false; + } + + /**** for change backend ****/ + void to(BackendType type) { + if (type == backend_->type()) { + return; + } + Module::initBackend(type); + backend_ = Backend::global_backends[type].get(); + if (!init_) { + init(name_, type_); + } + } + void cpu() { + to(MLLM_CPU); + } + void cl() { +#ifdef USE_OPENCL + to(MLLM_OPENCL); +#else + throw std::runtime_error("OpenCL backend is not available. Please compile with USE_OPENCL=ON."); +#endif } protected: + // 统一入口 vector run(vector inputs, int N = 1) { - auto backend = inputs.empty() ? Backend::global_backends[MLLM_CPU] : inputs[0].backend(); + //////////==============QNN only====================/////////// + if (Backend::global_backends.size() == 2 && Backend::global_backends.find(MLLM_QNN) != Backend::global_backends.end()) { + auto backend = Backend::global_backends[MLLM_QNN].get(); + return backend->runLayer(this, inputs, N); + } + //////////==============QNN only====================/////////// + + // auto start_time = mllm_time_us(); + // ==================== [开始] op ====================// + Module *module = inputs.empty() ? Module::llm_model_ptr : inputs[0].module(); + /*** part.1 change backend(if) ***/ + if (module && module->doChangeBn) { + if (check_op_support(Backend::global_backends[module->device()].get())) { + to(module->device()); + } else { + std::cout << "Backend " << module->device() << " does not support the operation, falling back to CPU." << std::endl; + to(MLLM_CPU); + } + } + /*** part.2 init op(if) ***/ + if (op_ == nullptr || op_->backend()->type() != backend_->type()) { + auto created = initOp(); + assert(op_ != nullptr && "Op creation failed, please check the backend and parameters."); + } + /*** part.3 load params(if) ***/ + if (module && (module->doLoad || !inited_loaded) && !module->doChangeBn) { // load + if (module->doLoad) { + op_->load(*module->loader); + inited_loaded = true; + } else if (loaded_param) { + inited_loaded = loaded_param; + } else if (!inited_loaded) { + ParamLoader empty_loader(""); + op_->load(empty_loader); + inited_loaded = true; + } + } + /*** part.4 forward ***/ + auto backend = backend_; if (Backend::global_backends.size() == 2 && Backend::global_backends.find(MLLM_QNN) != Backend::global_backends.end()) { - backend = Backend::global_backends[MLLM_QNN]; + backend = Backend::global_backends[MLLM_QNN].get(); + } + for (auto &input : inputs) { + if (input.backend() != backend) { + input.to(backend->type()); + } + } + vector out_names; + int count = (N > 1) ? N : 1; + for (int i = 0; i < count; ++i) { + std::string tensor_name = (N > 1) ? "out-" + op_->name() + "-" + std::to_string(i) : "out-" + op_->name(); + out_names.push_back(tensor_name); } - return backend->runLayer(this, inputs, N); + // ==================== [结束] op ====================// + // if (module && !module->doTrace) { + // auto end_time = mllm_time_us(); + // std::cout << name_ << " dispatch Layer in " << (end_time - start_time) / 1000.0F << " ms" << std::endl; + // } + return backend->runOp(op_, inputs, out_names, false); } public: std::string name_; + OpType type_; Op *op_ = nullptr; Backend *backend_{}; OpParam param_; @@ -118,6 +186,43 @@ class Layer { bool inited_loaded = false; bool loaded_param = false; + + static map layername_2_tensorname; + static bool use_layername_2_tensorname; + +private: + bool initOp(Backend *bn = nullptr) { // 返回bn是否能成功创建op + assert(op_ == nullptr); + if (bn == nullptr) { + bn = backend_; + } + if ((param_["type"] == KVCACHE || param_["type"] == KVCACHENPU) + && (Backend::global_backends.find(MLLM_QNN) != Backend::global_backends.end())) { // 针对qnn专门处理 + if (kv_cache_map.find(name_) == kv_cache_map.end()) { + // for the prefill part, we need to create a new op + param_["type"] = KVCACHENPU; + assert(bn->type() == MLLM_CPU && "QNN backend is required for cpu's KVCACHENPU."); + op_ = bn->opCreate(param_, name_); + kv_cache_map[name_] = op_; + } else { + // for the decoding part, we need to get created op from global container + op_ = kv_cache_map[name_]; + } + op_->type() = (OpType)param_["type"]; + return true; + } + if (!(op_ = bn->opCreate(param_, name_))) { + // std::cout << "Backend" << op_->type() << " does not support the operation, falling back to CPU." << std::endl; // ttt + op_ = backend_->opCreate(param_, name_); // cpu fallback + op_->type() = (OpType)param_["type"]; + return false; + } + op_->type() = (OpType)param_["type"]; + return true; + } + bool check_op_support(Backend *bn) { + return bn->opCreate(param_, name_); + } }; class Linear final : public Layer { @@ -311,6 +416,17 @@ class Softmax final : public Layer { return ts[0]; } }; +class Sigmoid final : public Layer { +public: + Sigmoid() = default; + explicit Sigmoid(std::string name) { + init(std::move(name), OpType::SIGMOID); + } + Tensor operator()(Tensor input) { + auto ts = run({input}, 1); + return ts[0]; + } +}; class Embedding final : public Layer { public: @@ -511,6 +627,15 @@ class RoPETree final : public Layer { } }; +class RoPESimple final : public Layer { +public: + RoPESimple() = default; + explicit RoPESimple(int pose_type, std::string name) { + param_["pose_type"] = pose_type; + init(std::move(name), OpType::ROPESIMPLE); + } +}; + class IRoPE final : public Layer { public: IRoPE() = default; @@ -552,6 +677,20 @@ class KVCache final : public Layer { init(std::move(name), OpType::KVCACHE); } + explicit KVCache(int head, int hidden, int n_rep, int cache_max, string attn_impl, std::string name) { + param_["head"] = head; + param_["hidden"] = hidden; + param_["n_rep"] = n_rep; + param_["cache_max"] = cache_max; + param_["for_xnn"] = false; + param_["fa2"] = (attn_impl == "flash_attention_2" || attn_impl == "sage_attention"); + if (attn_impl == "sage_attention" && hidden % QK8_0F == 0 && KVCacheSageDtypeBit == 8) { + init(std::move(name), OpType::KVCACHESAGE); + } else { + init(std::move(name), OpType::KVCACHE); + } + } + explicit KVCache(int cache_max, std::string name) { param_["n_rep"] = 1; param_["cache_max"] = cache_max; @@ -580,6 +719,18 @@ class KVCache final : public Layer { init(std::move(name), OpType::KVCACHE); } } + explicit KVCache(int head, int hidden, int n_rep, int cache_max, std::string name, bool npuEnbaled) { + param_["head"] = head; + param_["hidden"] = hidden; + param_["n_rep"] = n_rep; + param_["cache_max"] = cache_max; + param_["for_xnn"] = false; + if (npuEnbaled) { + init(std::move(name), OpType::KVCACHENPU); + } else { + init(std::move(name), OpType::KVCACHE); + } + } Tensor operator()(Tensor input) { auto ts = run({input}, 1); return ts[0]; @@ -699,6 +850,33 @@ class VisionRoPE final : public Layer { return ts[0]; } }; + +class VisionRoPESin final : public Layer { +public: + explicit VisionRoPESin(int dim_size, int spatial_merge_size, std::string name) { + param_["dim"] = (float)dim_size; + param_["spatial_merge_size"] = (float)spatial_merge_size; + init(std::move(name), OpType::VISIONROPESIN); + } + Tensor operator()(Tensor input) { + auto ts = run({input}, 1); + return ts[0]; + } +}; + +class VisionRoPECos final : public Layer { +public: + explicit VisionRoPECos(int dim_size, int spatial_merge_size, std::string name) { + param_["dim"] = (float)dim_size; + param_["spatial_merge_size"] = (float)spatial_merge_size; + init(std::move(name), OpType::VISIONROPECOS); + } + Tensor operator()(Tensor input) { + auto ts = run({input}, 1); + return ts[0]; + } +}; + class MultimodalRoPE final : public Layer { public: MultimodalRoPE() = default; @@ -708,9 +886,13 @@ class MultimodalRoPE final : public Layer { for (int i = 0; i < mrope_section.size(); i++) { param_["mrope_section_" + std::to_string(i)] = (float)mrope_section[i]; } - init(std::move(name), OpType::MULTIMODALROPE); + if (Backend::global_backends.size() == 2 && Backend::global_backends.find(MLLM_QNN) != Backend::global_backends.end()) { + init(std::move(name), OpType::MULTIMODALROPEPIP); + } else { + init(std::move(name), OpType::MULTIMODALROPE); + } } - Tensor operator()(Tensor input, Tensor position_ids) { + Tensor operator()(Tensor input, Tensor &position_ids) { auto ts = run({input, position_ids}, 1); return ts[0]; } @@ -748,10 +930,23 @@ class Position final : public Layer { // Only for QNN START +class SiLU_Full_Precision final : public Layer { +public: + SiLU_Full_Precision() = default; + SiLU_Full_Precision(std::string name) { + init(std::move(name), OpType::SILU_FULL_PRECISION); + } + Tensor operator()(Tensor input) { + auto ts = run({input}, 1); + return ts[0]; + } +}; + class Quantize final : public Layer { public: - explicit Quantize(bool isNSHD, std::string name) { + explicit Quantize(bool isNSHD, std::string name, DataType type = MLLM_TYPE_I8) { param_["isNSHD"] = (float)isNSHD; + param_["dtype"] = (float)type; init(std::move(name), OpType::QUANTIZE); } Tensor operator()(Tensor input) { @@ -777,9 +972,10 @@ class Direct final : public Layer { class Dequantize final : public Layer { public: - explicit Dequantize(bool isNSHD, std::string name, bool isFP32 = true) { + explicit Dequantize(bool isNSHD, std::string name, bool isFP32 = true, DataType inType = MLLM_TYPE_I8) { param_["isNSHD"] = (float)isNSHD; param_["isFP32"] = (float)isFP32; + param_["inType"] = (float)inType; init(std::move(name), OpType::DEQUANTIZE); } Tensor operator()(Tensor input) { @@ -788,6 +984,21 @@ class Dequantize final : public Layer { } }; +class DequantizeAdd final : public Layer { +public: + explicit DequantizeAdd(bool isNSHD, int out_features, std::string name, bool isFP32 = true, DataType inType = MLLM_TYPE_I8) { + param_["isNSHD"] = (float)isNSHD; + param_["out_features"] = out_features; + param_["isFP32"] = (float)isFP32; + param_["inType"] = (float)inType; + init(std::move(name), OpType::DEQUANTIZEADD); + } + Tensor operator()(Tensor input) { + auto ts = run({input}, 1); + return ts[0]; + } +}; + class Dispatch final : public Layer { public: explicit Dispatch(const std::string &name) { @@ -875,18 +1086,6 @@ class View final : public Layer { } }; -class SubgraphStart final : public Layer { -public: - explicit SubgraphStart(const std::string &name) { - init(name, OpType::SUBGRAPHSTART); - } - - Tensor operator()(Tensor input) { - auto ts = run({input}, 1); - return ts[0]; - } -}; - class Transpose final : public Layer { public: explicit Transpose(std::vector perm, std::string name) { @@ -902,14 +1101,30 @@ class Transpose final : public Layer { } }; +class SubgraphStart final : public Layer { +public: + SubgraphStart() = default; + explicit SubgraphStart(const std::string &name) { + init(name, OpType::SUBGRAPHSTART); + } + + Tensor operator()(vector inputs) { + Module::tmp_device = MLLM_QNN; + auto ts = run(inputs, 1); + return ts[0]; + } +}; + class SubgraphFinalize final : public Layer { public: + SubgraphFinalize() = default; explicit SubgraphFinalize(const std::string &name) { init(name, OpType::SUBGRAPHFINALIZE); } - Tensor operator()(Tensor input) { - auto ts = run({input}, 1); + Tensor operator()(vector inputs) { + auto ts = run(inputs, 1); + Module::tmp_device = MLLM_CPU; return ts[0]; } }; @@ -981,7 +1196,38 @@ class NTKRoPE final : public Layer { return op_->clearCache(); } }; -// Only for QNN END + +class Split final : public Layer { +public: + Split() = default; + + explicit Split(int split_num, Chl split_dim, int split_dim_size, std::string name) { + param_["split_num"] = (float)split_num; + param_["split_dim"] = (float)split_dim; + param_["split_dim_size"] = (float)split_dim_size; + init(std::move(name), OpType::SPLIT); + } + + vector operator()(Tensor input) { + return run({input}, (int)param_["split_num"]); + } +}; + +class Scale final : public Layer { +public: + Scale() = default; + + explicit Scale(float scale, float bias, bool bias_after_scale, std::string name) { + param_["scale"] = (float)scale; + param_["bias"] = (float)bias; + param_["bias_after_scale"] = (float)bias_after_scale; + init(std::move(name), OpType::SCALE); + } + + vector operator()(Tensor input) { + return run({input}, 1); + } +}; } // namespace mllm diff --git a/src/Log.cpp b/mllm/Log.cpp similarity index 100% rename from src/Log.cpp rename to mllm/Log.cpp diff --git a/src/Log.h b/mllm/Log.h similarity index 100% rename from src/Log.h rename to mllm/Log.h diff --git a/src/MemoryManager.cpp b/mllm/MemoryManager.cpp similarity index 100% rename from src/MemoryManager.cpp rename to mllm/MemoryManager.cpp diff --git a/src/MemoryManager.hpp b/mllm/MemoryManager.hpp similarity index 100% rename from src/MemoryManager.hpp rename to mllm/MemoryManager.hpp diff --git a/src/Module.cpp b/mllm/Module.cpp similarity index 61% rename from src/Module.cpp rename to mllm/Module.cpp index a078ec73d..09a793741 100644 --- a/src/Module.cpp +++ b/mllm/Module.cpp @@ -4,27 +4,23 @@ #include "Module.hpp" #include "Types.hpp" +#include #include +#include +#include namespace mllm { -// AbstructLoader *Module::loader; -// TensorStatus Tensor::tensor_status; -// bool Module::doLoad = false; // The llm_model_ptr is a pointer to the outmost module Module *Module::llm_model_ptr; -bool Module::isMultiChunkPrefilling = false; -bool Module::isFirstChunk = true; - int Module::listIdx; std::stack Module::listIdxStack; // int Module::runlistIdx; // TensorStatus Tensor::tensor_status; BackendType Module::tmp_device = MLLM_CPU; std::unordered_map> Module::tensor_func_ops; - -int Module::graphIdx = 0; +bool Module::alloc_mmap = true; vector Module::profiling(string name) { vector output; @@ -38,7 +34,7 @@ vector Module::profiling(string name) { std::cout << " Load time: " << load_time_ / 1000.0F << " s" << std::endl; if (inference_times_.size() > 1 && decoding_token_size_ != prefilling_token_size_) { double prefile_speed = 1000 * prefilling_token_size_ / inference_times_[0]; - std::cout << " Prefilling speed: " << prefile_speed << " tokens/s" << std::endl; + std::cout << " Prefilling speed: " << prefile_speed << " tokens/s , TTFT: " << inference_times_[0] / 1000.0F << " s" << std::endl; double sum_decoding_time = std::accumulate(std::begin(inference_times_) + 1, std::end(inference_times_), 0.0); double mean_decoding_time = sum_decoding_time / (inference_times_.size() - 1); double decoding_speed = 1000 / mean_decoding_time; @@ -67,6 +63,7 @@ vector Module::profiling(string name) { void Module::generate( Tensor &input_ids, const LlmTextGeneratorOpts &opt, const std::function &call_back) { auto chatPostProcessing = [](unsigned token_idx, Tensor &tokens_tensor, const vector &clean_tensors) { + tokens_tensor.cpu(); tokens_tensor.reshape(1, 1, 1, 1); tokens_tensor.alloc(); tokens_tensor.setDataAt(0, 0, 0, 0, token_idx); @@ -93,11 +90,16 @@ void Module::generate( for (int step = 0; step < opt.max_new_tokens; ++step) { auto _out = (*this)({input_ids}); + if (_out[0].backend()->type() != MLLM_CPU) { + _out[0].cpu(); + } auto out_token = text_generator_->generate(_out[0]); if (!call_back(out_token)) break; chatPostProcessing(out_token, input_ids, {}); } } + +/* vector Module::generate(Tensor &input_ids, const LlmTextGeneratorOpts &opt, int end_token) { auto chatPostProcessing = [](unsigned token_idx, Tensor &tokens_tensor, const vector &clean_tensors) { tokens_tensor.reshape(1, 1, 1, 1); @@ -133,4 +135,73 @@ vector Module::generate(Tensor &input_ids, const LlmTextGeneratorOpts } return result; } +*/ +/** + * @brief 使用模型生成文本序列,支持批处理输入。 + * @param input_ids 输入的 token ID 张量,形状应为 [batch_size, 1, seq_len, 1]。 + * @param opt 生成选项,如最大新 token 数。 + * @param end_token 序列生成的结束符 ID。 + * @return 一个包含多个生成序列的向量,每个子向量是一个完整的 token ID 序列。 + */ +vector> Module::generate(Tensor &input_ids, const LlmTextGeneratorOpts &opt, int end_token) { + auto chatPostProcessing = [](vector token_idxs, Tensor &tokens_tensor, const vector &clean_tensors) { + tokens_tensor.reshape(token_idxs.size(), 1, 1, 1); + tokens_tensor.alloc(); + for (size_t idx = 0; idx < token_idxs.size(); ++idx) { + unsigned int token_idx = token_idxs[idx]; + tokens_tensor.setDataAt(idx, 0, 0, 0, token_idx); + } + for (auto tensor : clean_tensors) { + tensor->reshape(0, 0, 0, 0); + tensor->alloc(); + } + }; + + if (!opt.do_sample) { + // fail to greedy search + if (!text_generator_ || text_generator_->type() != LLmTextGeneratorType::kGreedySearch) + text_generator_ = std::make_shared(LLmTextGeneratorType::kGreedySearch, opt); + } else if (opt.do_sample && !opt.top_k && opt.top_p != 0.F) { + // fail to top p sampling + if (!text_generator_ || text_generator_->type() != LLmTextGeneratorType::kToppSampling) + text_generator_ = std::make_shared(LLmTextGeneratorType::kToppSampling, opt); + } else if (opt.do_sample && opt.top_k) { + // fail to top k sampling + if (!text_generator_ || text_generator_->type() != LLmTextGeneratorType::kTopkSampling) + text_generator_ = std::make_shared(LLmTextGeneratorType::kTopkSampling, opt); + } + auto batch_size = input_ids.batch(); + vector> results(batch_size); + vector is_end(batch_size, false); + for (int step = 0; step < opt.max_new_tokens; ++step) { + auto _out = (*this)({input_ids}); + // _out[0].saveData(); + // exit(1); + vector out_tokens; + for (int batch_ = 0; batch_ < batch_size; ++batch_) { + Tensor _outt(1, 1, _out[0].sequence(), _out[0].dimension(), MLLM_CPU, true); + memcpy(_outt.hostPtr(), _out[0].ptrAt(batch_, 0, 0, 0), _outt.cntSize()); + auto out_token = text_generator_->generate(_outt); + if (end_token != -1 && out_token == end_token) { + // std::cout << "End batch_: " << batch_ << std::endl; + is_end[batch_] = true; // 标记该 batch 已经结束 + // out_tokens.push_back(0); + // continue; + } + if (!is_end[batch_]) { + out_tokens.push_back(out_token); + results[batch_].push_back(out_token); + } else { + out_tokens.push_back(0); // 如果该 batch 已经结束,则填充 + } + } + chatPostProcessing(out_tokens, input_ids, {}); + if (std::all_of(is_end.begin(), is_end.end(), [](bool v) { return v; })) { + // std::cout << "All batches ended." << std::endl; + break; // 如果所有 batch 都结束,则退出循环 + } + } + return results; +} + } // namespace mllm \ No newline at end of file diff --git a/src/Module.hpp b/mllm/Module.hpp similarity index 72% rename from src/Module.hpp rename to mllm/Module.hpp index 93c1c8dab..83c5eeff9 100644 --- a/src/Module.hpp +++ b/mllm/Module.hpp @@ -13,6 +13,10 @@ #include "Trace.hpp" #include "Types.hpp" #include "backends/cpu/CPUBackend.hpp" +#include +#ifdef USE_OPENCL +#include "backends/opencl/OpenCLBackend.hpp" +#endif #include #include #include @@ -54,8 +58,11 @@ class Module { map> activation_tensors; map activation_tensors_num; - AbstructLoader *loader; + std::shared_ptr loader; bool doLoad = false; + bool doChangeBn = false; + bool doTrace = false; + bool tracedFlag = false; bool op_transposed_flag = false; static Module *llm_model_ptr; @@ -72,6 +79,7 @@ class Module { static BackendType tmp_device; static std::unordered_map> tensor_func_ops; // use for QNN + static bool alloc_mmap; private: template @@ -92,27 +100,77 @@ class Module { } int idx; + double forwardNoInput() { + mllm_time_init(); + vector tmps; + int max_in_size = 5; + for (int i = 0; i < max_in_size; ++i) { + Tensor t(Backend::global_backends[MLLM_CPU].get()); + t.setName("input" + std::to_string(i)); + t.reshape(1, 1, 1, 10); + t.alloc(); + t.setModule(this); + tmps.push_back(t); + } + llm_model_ptr = this; + vector alternate_args = { + {}, + vector{0, 0}, + std::vector>(32, std::vector(2))}; + uint64_t time_start = 0; + for (auto args : alternate_args) { + time_start = mllm_time_us(); + try { + operator()(tmps, args); + break; + } catch (const std::exception &e) { +#if not defined(__ARM_NEON) + if (std::string("bad any_cast") != e.what()) { + MLLM_LOG_ERROR_STREAM << e.what() << std::endl; + exit(0); + } +#endif + } catch (...) { + MLLM_LOG_ERROR_STREAM << "load error" << std::endl; + exit(0); + } + } + uint64_t time_end = mllm_time_us(); + return (time_end - time_start) / 1000.0F; // ms + } + public: Module() { - idx = Module::graphIdx; - Module::graphIdx++; } virtual ~Module() = default; - BackendType device() const { + BackendType &device() { return device_; } static void initBackend(BackendType type = BackendType::MLLM_CPU) { if (Backend::global_backends.find(type) == Backend::global_backends.end() || Backend::global_backends[type] == nullptr) { + // std::cout << "Initializing OpenswwssCL Backend..." << std::endl; + // #ifdef USE_OPENCL + // std::cout << "Initializiwwng OpenswwssCL Backend..." << std::endl; + // #endif + switch (type) { case BackendType::MLLM_CPU: { shared_ptr mm = nullptr; // mm = std::make_shared(); mm = std::make_shared(); // todomm - Backend::global_backends[MLLM_CPU] = new CPUBackend(mm); + Backend::global_backends[MLLM_CPU] = std::make_unique(mm); break; } +#ifdef USE_OPENCL + case BackendType::MLLM_OPENCL: { + // std::cout << "Initializing OpensssCL Backend..." << std::endl; + BackendConfig config; + Backend::global_backends[MLLM_OPENCL] = std::make_unique(config); + break; + } +#endif #ifdef USE_QNN case BackendType::MLLM_QNN: { Backend::global_backends.emplace(MLLM_QNN, GetBackendCreator(MLLM_QNN)->create({})); @@ -130,58 +188,50 @@ class Module { } } } - void to(BackendType type) { + + // TODO: Deprecated, the module is not backend specific, the backend should be set in the SubGraphStart and SubGraphFinalize + Module &to(BackendType type) { initBackend(type); device_ = type; + doChangeBn = true; + doTrace = true; + forwardNoInput(); + doChangeBn = false; + doTrace = false; + tracedFlag = true; + return *this; + } + Module &cpu() { + return to(MLLM_CPU); + } + Module &cl() { +#ifdef USE_OPENCL + return to(MLLM_OPENCL); +#else + throw std::runtime_error("OpenCL backend is not available. Please compile with USE_OPENCL=ON."); +#endif } void load(string path) { // create global loader and save to llm_model_ptr.loader as QNNBackend needs to load weights in runtime - loader = new ParamLoader(std::move(path), true); // TODO mmap - load(*loader); + loader = std::make_unique(std::move(path), alloc_mmap); // todo + Tensor::tensor_status = TENSOR_STATIC_INIT; + doLoad = true; + doTrace = true; + load_time_ = forwardNoInput(); // ms + doLoad = false; + tracedFlag = true; + doTrace = false; } - void load(AbstructLoader ¶m_loader) { + void load_multifile(const std::initializer_list path) { + loader = std::make_unique(std::move(path)); Tensor::tensor_status = TENSOR_STATIC_INIT; - mllm_time_init(); - - loader = ¶m_loader; doLoad = true; - vector tmps; - int max_in_size = 5; - for (int i = 0; i < max_in_size; ++i) { - Tensor t(Backend::global_backends[MLLM_CPU]); - t.setName("input" + std::to_string(i)); - t.reshape(1, 1, 1, 10); - t.alloc(); - t.setModule(this); - tmps.push_back(t); - } - llm_model_ptr = this; - vector alternate_args = { - {}, - vector{0, 0}, - std::vector>(32, std::vector(2))}; - uint64_t time_start = 0; - for (auto args : alternate_args) { - time_start = mllm_time_us(); - try { - operator()(tmps, args); - break; - } catch (const std::exception &e) { -#if not defined(__ARM_NEON) - if (std::string("bad any_cast") != e.what()) { - MLLM_LOG_ERROR_STREAM << e.what() << std::endl; - exit(0); - } -#endif - } catch (...) { - MLLM_LOG_ERROR_STREAM << "load error" << std::endl; - exit(0); - } - } - uint64_t time_end = mllm_time_us(); - load_time_ = (time_end - time_start) / 1000.0F; // ms + doTrace = true; + load_time_ = forwardNoInput(); // ms doLoad = false; + tracedFlag = true; + doTrace = false; } virtual vector Forward(vector inputs, vector args) = 0; @@ -197,9 +247,15 @@ class Module { template vector operator()(vector inputs, Args... args) { vector anyArgs = convertArgsToAnyVector(args...); - auto backend = inputs.empty() ? Backend::global_backends[MLLM_CPU] : inputs[0].backend(); + device_ = Module::llm_model_ptr->device(); + auto backend = Backend::global_backends[device_].get(); + if (inputs.empty()) { + for (auto input : inputs) { + assert(input.backend() == backend && "All inputs must have the same backend as the module."); + } + } if (Backend::global_backends.size() == 2 && Backend::global_backends.find(MLLM_QNN) != Backend::global_backends.end()) { - backend = Backend::global_backends[MLLM_QNN]; + backend = Backend::global_backends[MLLM_QNN].get(); } return backend->runForward(this, inputs, anyArgs); } @@ -241,7 +297,9 @@ class Module { virtual void generate( Tensor &input_ids, const LlmTextGeneratorOpts &opt, const std::function &call_back = [](unsigned int) -> bool { return true; }); - vector generate(Tensor &input_ids, const LlmTextGeneratorOpts &opt, int end_token = -1); + // vector generate(Tensor &input_ids, const LlmTextGeneratorOpts &opt, int end_token = -1); + + vector> generate(Tensor &input_ids, const LlmTextGeneratorOpts &opt, int end_token = -1); }; class CPUModuleWrapper : public Module { @@ -304,6 +362,20 @@ class QNNModuleWrapper : public Module { } }; +#define CHAINABLE_MODULE_METHODS(ClassName) \ + ClassName &to(BackendType type) { \ + Module::to(type); \ + return *this; \ + } \ + ClassName &cpu() { \ + to(MLLM_CPU); \ + return *this; \ + } \ + ClassName &cl() { \ + to(MLLM_OPENCL); \ + return *this; \ + } + } // namespace mllm #endif // MODULE_HPP diff --git a/src/Op.cpp b/mllm/Op.cpp similarity index 100% rename from src/Op.cpp rename to mllm/Op.cpp diff --git a/src/Op.hpp b/mllm/Op.hpp similarity index 97% rename from src/Op.hpp rename to mllm/Op.hpp index ddfa99bf1..f765d41c8 100644 --- a/src/Op.hpp +++ b/mllm/Op.hpp @@ -61,6 +61,7 @@ class Op { virtual ErrorCode setUp(vector> inputs, vector> outputs) { for (auto &output : outputs) { output->setDtype(activation_dtype_); + output->setCtype(inputs[0]->ctype()); output->alloc(); } return MLLM_NO_ERROR; @@ -110,7 +111,7 @@ class Op { DataType activation_dtype() const { return activation_dtype_; } - OpType type() const { + OpType &type() { return type_; } void setOpType(OpType type) { @@ -131,6 +132,10 @@ class Op { return no_load_weights_dtype_; } + bool &traced() { + return traced_; + } + protected: Backend *backend_; vector inputs_; @@ -139,6 +144,7 @@ class Op { DataType activation_dtype_ = MLLM_TYPE_F32; OpType type_; static DataType no_load_weights_dtype_; + bool traced_ = false; }; class Callable { diff --git a/mllm/OpDefined.hpp b/mllm/OpDefined.hpp new file mode 100644 index 000000000..3161f44b7 --- /dev/null +++ b/mllm/OpDefined.hpp @@ -0,0 +1,176 @@ +#ifndef MLLM_OPDEFINED_H +#define MLLM_OPDEFINED_H + +#include +#include +using std::string; +using std::vector; + +namespace mllm { +enum OpType { + INVALID_VALUE = 0, // 0 + PARAMETER, // 1 + ADD, // 2 + SOFTMAX, // 3 + SILU, // 4 + SILU_FULL_PRECISION, // 5 + MATMUL, // 6 + SCALE, // 7 + ROPE, // 8 + ROPESIMPLE, // 9 + POSITIOANL_EMBEDDING, // 10 + RMSNORM, // 11 + CAUSALMASK, // 12 + SLIDINGWINDOWMASK, // 13 + LINEAR, // 14 + LINEARINT8, // 15 + LINEARINT8SHADOW, // 16 + EMBEDDING, // 17 + MUL, // 18 + VIEW, // 19 + KVCACHE, // 20 + KVCACHENPU, // 21 + RELU, // 22 + RELU2, // 23 + OP_GELU, // 24 + QUICKGLUE, // 25 + LAYERNORM, // 26 + SPLIT, // 27 + GATHER, // 28 + CONVOLUTION2D, // 29 + CONVOLUTION3D, // 30 + VISIONROPE, // 31 + VISIONROPESIN, // 32 + VISIONROPECOS, // 33 + MULTIMODALROPEPIP, // 34 + MULTIMODALROPE, // 35 + AVGPOOL2D, // 36 + MAXPOOL2D, // 37 + CAT, // 38 + TRANSPOSE, // 39 + SUBDIM, // 40 + DIVISION, // 41 + NORM, // 42 + SHAPE, // 43 + MEAN, // 44 + RANGE, // 45 + WHERE, // 46 + REPLACE, // 47 + PREDICTOR, // 48 + SPARSELINEAR, // 49 + SPARSEIDLINEAR, // 50 + ELASTICLINEAR, // 51 + POSITION, // 52 + WNOP, // 53 + QUANTIZE, // 54 + DEQUANTIZE, // 55 + DEQUANTIZEADD, // 56 + MERGEOUTPUT, // 57 + SPLITINPUT, // 58 + IROPE, // 59 + OP_NUM, // 60 + NTKROPE, // 61 + SCATTER, // 62 + TILDE, // 63 + MASKEDFILL, // 64 + SIGMOID, // 65 + + // add in xnnpack + DIRECT, // 66 + DISPATCH, // 67 + SUBGRAPHSTART, // 68 + SUBGRAPHFINALIZE, // 69 + D2H, // 70 + XP_KVCACHE, // 71 + SDPA, // 72 + + // new front-end + SUPERSILU, // 73 + HEADLINEAR, // 74 + + // for speculative decoding + ROPETREE, // 75 + CAUSALTREEMASK, // 76 + KVCACHESAGE, // 77 + + // + F_ADD, // 78 + F_SUB, // 79 + F_MUL, // 80 + F_DIV, // 81 + F_DIVINT, // 82 + F_TTADD, // 83 + F_TTSUB, // 84 + F_TTMUL, // 85 + F_TTDIV, // 86 + F_MM, // 87 + F_NORM, // 88 + F_MEAN, // 89 + F_CAT, // 90 + F_VIEW, // 91 + F_TRANPOSE, // 92 + F_FLATTEN, // 93 + F_CLIP, // 94 + F_CLIPAXIS, // 95 + F_CLIPTENSOR, // 96 + F_RANGE, // 97 + F_WHERE, // 98 + F_INDEX_PUT, // 99 + F_SPLIT, // 100 + F_SUM, // 101 + F_TOPK, // 102 + F_EXPPAND, // 103 + F_ARGSORT, // 104 + F_BINCOUNT, // 105 + F_REPEAT, // 106 + F_LIKE, // 107 + F_SCATTERRADD, // 108 + F_APPLY_VISIOROPE, // 109 + F_FA2, // 110 + F_SAGEATTN, // 111 + // models use only + F_FUYU_GATHER_EMBD, // 112 + F_PHI3V_HD_MERGE, // 113 +}; + +enum TensorFuncType { + FUNC_ADD, + FUNC_SUB, + FUNC_MUL, + FUNC_DIV, + FUNC_DIVINT, + FUNC_TTADD, + FUNC_TTSUB, + FUNC_TTMUL, + FUNC_TTDIV, + FUNC_MM, + FUNC_NORM, + FUNC_MEAN, + FUNC_CAT, + FUNC_VIEW, + FUNC_TRANPOSE, + FUNC_FLATTEN, + FUNC_CLIP, + FUNC_CLIPAXIS, + FUNC_CLIPTENSOR, + FUNC_RANGE, + FUNC_WHERE, + FUNC_INDEX_PUT, + FUNC_SPLIT, + FUNC_SUM, + FUNC_TOPK, + FUNC_EXPPAND, + FUNC_ARGSORT, + FUNC_BINCOUNT, + FUNC_REPEAT, + FUNC_LIKE, + FUNC_SCATTERREDUCE, + FUNC_APPLY_VISIOROPE, + FUNC_FA2, + // models use only + FUNC_FUYU_GATHER_EMBD, + FUNC_PHI3V_HD_MERGE, +}; + +} // namespace mllm +#endif diff --git a/src/Parallel.cpp b/mllm/Parallel.cpp similarity index 100% rename from src/Parallel.cpp rename to mllm/Parallel.cpp diff --git a/src/Parallel.hpp b/mllm/Parallel.hpp similarity index 81% rename from src/Parallel.hpp rename to mllm/Parallel.hpp index 9603a6107..e203a4e76 100644 --- a/src/Parallel.hpp +++ b/mllm/Parallel.hpp @@ -22,17 +22,21 @@ class ChunkPipeline { chunk_num = seq_length_padding / chunk_size; } - shared_ptr run(Tensor &input_tensor, LlmTextGeneratorOpts &opt, Tokenizer &tokenizer, Module &model, bool &isSwitched) { + shared_ptr run(Tensor &input_tensor, LlmTextGeneratorOpts &opt, Tokenizer &tokenizer, Module &model, bool &isSwitched, const vector &clean_tensors = {}) { + auto input_copy_sp = std::make_shared(); + input_copy_sp->initFrom(input_tensor); // 初始化形状和数据类型 + input_copy_sp->copyFrom(input_tensor); // 深拷贝数据 + const int num_graph = Tracer::model_.size(); Tensor::tensor_status = TENSOR_STATIC_READY; std::cout << "num_graph: " << num_graph << std::endl; for (int chunk_id = 0; chunk_id < chunk_num; ++chunk_id) { - chunked_tensors.push_back(std::make_shared(Backend::global_backends[MLLM_CPU])); + chunked_tensors.push_back(std::make_shared(Backend::global_backends[MLLM_CPU].get())); chunked_tensors[chunk_id]->setTtype(INPUT_TENSOR); chunked_tensors[chunk_id]->setName(input_tensor.name()); chunked_tensors[chunk_id]->reshape(1, 1, chunk_size, 1); - chunked_tensors[chunk_id]->shallowCopyFrom(&input_tensor, false, {0, 0, chunk_id * chunk_size, 0}); + chunked_tensors[chunk_id]->shallowCopyFrom(input_copy_sp, false, {0, 0, chunk_id * chunk_size, 0}, 1); } std::function executeFunc = [&](int chunk_id, int graphIdx) { @@ -42,19 +46,22 @@ class ChunkPipeline { return; } // only the last chunk need to execute the last graph - if(i == num_graph - 1 && chunk_id != chunk_num - 1) { + if (i == num_graph - 1 && chunk_id != chunk_num - 1) { return; } // before the first graph, need to refresh the input tensor if (i == 0) { Tracer::refleshInputTensor({chunked_tensors[chunk_id]}); } - +#ifdef DEBUGPRINT auto graph_start = mllm_time_us(); +#endif auto &graph = Tracer::model_[i]; graph->Forward({}, {chunk_id}); +#ifdef DEBUGPRINT auto graph_end = mllm_time_us(); std::cout << "chunk_id: " << chunk_id << ", graphIdx: " << i << ", graph time: " << (graph_end - graph_start) / 1000.0F << "ms" << std::endl; +#endif }; auto start_t = mllm_time_us(); omp_set_max_active_levels(3); @@ -63,14 +70,14 @@ class ChunkPipeline { for (int i = chunk_id * 2; i < num_graph + chunk_id * 2 + 5; ++i) { #pragma omp parallel for num_threads(2) for (int pair_idx = 0; pair_idx < 2; ++pair_idx) { - executeFunc(chunk_id * 2 + pair_idx, i - pair_idx * 4); + executeFunc((chunk_id * 2) + pair_idx, i - (pair_idx * 4)); } #pragma omp barrier - std::cout << "---------------------------" << std::endl; + // std::cout << "---------------------------" << std::endl; } } auto end_t = mllm_time_us(); - std::cout << "time: " << (end_t - start_t) / 1000.0F << "ms" << std::endl; + std::cout << "prefill time: " << (end_t - start_t) / 1000.0F << "ms" << std::endl; auto postProcessing = [&](shared_ptr result, shared_ptr &out_result, int real_seq_length) -> unsigned int { assert(result->batch() == 1); @@ -95,6 +102,12 @@ class ChunkPipeline { auto token_idx = postProcessing(result[0], chunked_tensors.back(), real_seq_length); auto out_string = tokenizer.detokenize({token_idx}); std::cout << out_string << std::flush; + + for (auto tensor : clean_tensors) { + tensor->reshape(0, 0, 0, 0); + tensor->alloc(); + } + return chunked_tensors.back(); } }; diff --git a/src/ParamLoader.cpp b/mllm/ParamLoader.cpp similarity index 63% rename from src/ParamLoader.cpp rename to mllm/ParamLoader.cpp index a82fd7cf7..bb59dadc2 100644 --- a/src/ParamLoader.cpp +++ b/mllm/ParamLoader.cpp @@ -30,6 +30,8 @@ * Weights File Structure */ namespace mllm { + +/* bool ParamLoader::load(mllm::Tensor *tensor) { string name = tensor->name(); if (!use_mmap_) { @@ -38,52 +40,101 @@ bool ParamLoader::load(mllm::Tensor *tensor) { std::pair offset = offsets_[name]; auto *p = tensor->hostPtr(); fseek(fp_, offset.first, SEEK_SET); - size_t read_size = std::min(tensor->cntSize(), offset.second); + size_t read_size = std::min(tensor->cntSize(), static_cast(offset.second)); auto _ = fread(p, sizeof(uint8_t), read_size, fp_); - /* + return true; + } else { // USE_MMAP is defined + if (mmap_buffer_ == nullptr || offsets_.find(name) == offsets_.end()) { + return false; + } + std::lock_guard lock(mtx); + auto offset_info = offsets_[name]; + + // --- 在这里加入对齐诊断代码 --- + int required_alignment = DataTypeSize(tensor->dtype()); // 获取数据类型的大小,如 float 是 4 + if (required_alignment == 0) required_alignment = 1; // 避免除零 + + bool is_aligned = (offset_info.first % required_alignment == 0); + + if (!is_aligned) { + fprintf(stderr, "[ALIGNMENT ERROR] Tensor: '%s', DataType: %d, Offset: %llu, Required Alignment: %d. DATA IS MISALIGNED!\n", + name.c_str(), + tensor->dtype(), + (unsigned long long)offset_info.first, + required_alignment); + } else { + // (可选) 打印出对齐正确的信息,用于确认 + // fprintf(stdout, "[ALIGNMENT OK] Tensor: '%s', Offset: %llu, Alignment: %d.\n", + // name.c_str(), (unsigned long long)offset_info.first, required_alignment); + } + // --- 诊断代码结束 --- + + // 如果不对齐,直接返回失败,因为我们不允许拷贝 + if (!is_aligned) { + return false; + } + + // 只有在对齐检查通过后,才执行零拷贝的指针赋值 + if (tensor->cntSize() != offset_info.second) { return false; } + uint8_t* source_ptr = mmap_buffer_.get() + offset_info.first; + tensor->setHostPtr(source_ptr, mmap_buffer_); + + return true; + } +} + */ +// 在 ParamLoader.cpp 中 +bool ParamLoader::load(mllm::Tensor *tensor) { + if (tensor->backend() && tensor->backend()->load_from_file(tensor, this)) { + return true; + } + + string name = tensor->name(); + if (!use_mmap_) { + std::lock_guard lock(mtx); if (offsets_.find(name) == offsets_.end()) { return false; } std::pair offset = offsets_[name]; - uint8_t *data = new uint8_t[offset.second]; - fseek(fp_, offset.first, SEEK_SET); - auto _ = fread(data, sizeof(uint8_t), offset.second, fp_); - // TODO:Data? - // tenor. = data; auto *p = tensor->hostPtr(); - - if (tensor->cntSize() >= offset.second) - memcpy(static_cast(p), static_cast(data), - offset.second); // Cast pointers to void* - else - memcpy(static_cast(p), static_cast(data), - tensor->cntSize()); // Cast pointers to void* - delete[] data; // Free the memory allocated by new - */ + fseek(fp_, offset.first, SEEK_SET); + size_t read_size = std::min(tensor->cntSize(), static_cast(offset.second)); + auto _ = fread(p, sizeof(uint8_t), read_size, fp_); return true; - } else { // USE_MMAP is defined - // 确保 buffer_ 和 offsets_ 已经为 mmap 正确初始化 - if (!use_mmap_ || buffer_ == nullptr || offsets_.find(name) == offsets_.end()) { - // 可以选择打印错误信息或返回 false - // TODO - // if (offsets_.find(name) == offsets_.end()) { - // fprintf(stderr, "Tensor name '%s' not found in offsets.\n", name.c_str()); - // } else { - // fprintf(stderr, "Buffer is null or mmap not initialized.\n"); - // } + + } else { + // --- mmap 模式,实现智能选择 --- + if (mmap_buffer_ == nullptr || offsets_.find(name) == offsets_.end()) { + return false; + } + std::lock_guard lock(mtx); + auto offset_info = offsets_[name]; + + // 1. 尺寸检查 (保持不变) + if (tensor->cntSize() != offset_info.second) { + fprintf(stderr, "Error: Tensor '%s' size mismatch. Code wants %zu, file has %llu.\n", + name.c_str(), tensor->cntSize(), (unsigned long long)offset_info.second); return false; } - std::lock_guard lock(mtx); // mmap 访问也可能需要同步,取决于使用场景 - std::pair offset_info = offsets_[name]; - auto *p = tensor->hostPtr(); // 获取 tensor 的主机指针 - // 计算源数据指针 - uint8_t *source_ptr = buffer_ + offset_info.first; + // 2. 检查对齐 + int required_alignment = DataTypeSize(tensor->dtype()); + if (required_alignment == 0) required_alignment = 1; + bool is_aligned = (offset_info.first % required_alignment == 0); - // 要拷贝的数据大小,取 tensor 大小和参数大小的最小值 - size_t copy_size = std::min(tensor->cntSize(), offset_info.second); + if (is_aligned) { + // -- 对齐:执行零拷贝 -- + // fprintf(stdout, "[MMAP ZERO-COPY] Tensor: '%s'\n", name.c_str()); + uint8_t *source_ptr = mmap_buffer_.get() + offset_info.first; + tensor->setHostPtr(source_ptr, mmap_buffer_); // setHostPtr 会处理好一切 + } else { + // -- 未对齐:回退到 fread 普通加载 -- + // fprintf(stdout, "[MMAP FALLBACK to FREAD] Tensor: '%s' is not aligned.\n", name.c_str()); - // 从内存映射的 buffer_ 拷贝数据到 tensor - memcpy(static_cast(p), static_cast(source_ptr), copy_size); + // 因为 fp_ 现在是有效的,我们可以直接使用普通加载逻辑 + auto *p = tensor->hostPtr(); + fseek(fp_, offset_info.first, SEEK_SET); + auto _ = fread(p, sizeof(uint8_t), tensor->cntSize(), fp_); + } return true; } @@ -94,11 +145,10 @@ ParamLoader::~ParamLoader() { munmap(buffer_, size_); buffer_ = nullptr; } - } else { - if (fp_ != nullptr) { - fclose(fp_); - fp_ = nullptr; - } + } + if (fp_ != nullptr) { + fclose(fp_); + fp_ = nullptr; } } // #ifdef ANDROID_API @@ -108,98 +158,110 @@ ParamLoader::ParamLoader(std::string filename, bool use_mmap_param) : path_(std::move(filename)), use_mmap_(use_mmap_param), fp_(nullptr), buffer_(nullptr), size_(0) { // Initialize new members if (use_mmap_) { - // 1. 打开文件 - FILE *temp_fp = fopen(this->path_.c_str(), "rb"); - if (temp_fp == nullptr) { - // perror(("Error opening file for mmap: " + this->path_).c_str()); - // exit(1); // Or handle error differently + // --- 1. 打开文件并获取文件描述符,直接使用成员变量 fp_ --- + this->fp_ = fopen(this->path_.c_str(), "rb"); // 直接赋值给 this->fp_ + if (!this->path_.empty() && this->fp_ == nullptr) { + perror(("Error opening file: " + this->path_).c_str()); + exit(1); // 报错并立即退出程序 + return; // 打开失败,直接返回 + } + if (this->path_.empty()) { return; } - // 2. 获取文件大小 - fseek(temp_fp, 0, SEEK_END); - size_ = ftell(temp_fp); - fseek(temp_fp, 0, SEEK_SET); // Reset to beginning - - // 3. 内存映射 (示例使用 POSIX mmap) - // #include - // #include - // #include - int fd = fileno(temp_fp); // Get file descriptor - buffer_ = (uint8_t *)mmap(NULL, size_, PROT_READ, MAP_PRIVATE, fd, 0); - if (buffer_ == MAP_FAILED) { + // --- 2. 获取文件大小 --- + fseek(this->fp_, 0, SEEK_END); + size_ = ftell(this->fp_); + // 注意:这里不要将文件指针移回开头,fseek 和 mmap 的 offset 是独立的 + + // --- 3. 执行内存映射 --- + int fd = fileno(this->fp_); + uint8_t *mapped_ptr = (uint8_t *)mmap(NULL, size_, PROT_READ, MAP_PRIVATE, fd, 0); + + // --- 关键:不要关闭文件!--- + // fclose(this->fp_); // <-- 注释或删除这一行 + + if (mapped_ptr == MAP_FAILED) { perror("mmap failed"); - fclose(temp_fp); // Close the temporary file pointer - buffer_ = nullptr; // Mark buffer as invalid - use_mmap_ = false; // Fallback or indicate error - // exit(1); // Or handle error differently + // mmap 失败,但文件还开着,可以考虑退回到纯文件模式或标记失败 + use_mmap_ = false; + // 清理已打开的文件 + fclose(this->fp_); + this->fp_ = nullptr; + // 也许这里应该重新走一遍非 mmap 的初始化逻辑,或者直接返回让对象不可用 return; } - // 文件描述符可以关闭了,mmap 会保持映射 - // fclose(temp_fp); // Or keep fp_ as the original file pointer if needed for non-mmap fallback - // 4. 从 buffer_ 读取元数据 (类似于 readInt, readString 等,但操作指针) - uint8_t *current_ptr = buffer_; + // --- 4. 包装 mmap 指针 --- + auto mmap_size = this->size_; + this->mmap_buffer_ = std::shared_ptr(mapped_ptr, [mmap_size](uint8_t *p) { + munmap(p, mmap_size); + }); + + // ==================================================================== + // --- 5. 从 mmap 内存区域中解析元数据 --- + // ==================================================================== + + // 定义一系列在内存指针上操作的辅助 lambda 函数,用于替代原先在 FILE* 上的操作 auto mmap_readInt = [&](uint8_t *&ptr) { int32_t val; memcpy(&val, ptr, sizeof(int32_t)); - ptr += sizeof(int32_t); + ptr += sizeof(int32_t); // 移动指针 return val; }; auto mmap_readu64 = [&](uint8_t *&ptr) { uint64_t val; memcpy(&val, ptr, sizeof(uint64_t)); - ptr += sizeof(uint64_t); + ptr += sizeof(uint64_t); // 移动指针 return val; }; auto mmap_readString = [&](uint8_t *&ptr) { int len = mmap_readInt(ptr); - std::string str((char *)ptr, len); - ptr += len; + if (len == 0) return std::string(""); + std::string str(reinterpret_cast(ptr), len); + ptr += len; // 移动指针 return str; }; + // 获取指向 mmap 区域开头的当前指针 + uint8_t *current_ptr = mmap_buffer_.get(); + + // a. 读取并验证幻数 int magic = mmap_readInt(current_ptr); if (magic != _MAGIC_NUMBER) { fprintf(stderr, "Mmap: magic number error\n"); - munmap(buffer_, size_); // Unmap memory - buffer_ = nullptr; + this->mmap_buffer_.reset(); // 释放 shared_ptr,触发 munmap use_mmap_ = false; - // exit(1); // Or handle error return; } + // b. 读取索引区域的总长度 uint64_t index_size = mmap_readu64(current_ptr); + + // c. 计算索引区域的结束地址 uint8_t *index_end_ptr = current_ptr + index_size; + // d. 循环读取所有张量的元信息,直到遍历完整个索引区域 while (current_ptr < index_end_ptr) { std::string name = mmap_readString(current_ptr); uint64_t length = mmap_readu64(current_ptr); - // 对于 mmap,offset 通常是相对于 buffer_ 开始的偏移 - // 如果文件格式中的 offset 是相对于文件数据区的绝对偏移,需要调整 - uint64_t offset_in_file = mmap_readu64(current_ptr); // This is the offset as stored in the file - offsets_[name] = std::make_pair(offset_in_file, length); // Store the original offset from file - data_type_[name] = mmap_readInt(current_ptr); - } - // Mmap is set up, fp_ might not be needed or could be temp_fp if kept open - // If you want to keep fp_ for potential non-mmap operations or cleanup: - this->fp_ = temp_fp; // Assign after successful mmap, or keep it as NULL - // If keeping temp_fp, ensure it's closed in destructor if mmap was used. - // Alternatively, just close temp_fp here if all mmap ops are done. - // fclose(temp_fp) was already called if mmap failed. If successful, and you don't need fp_ for mmap path: - // fclose(temp_fp); // Or defer to destructor. For mmap, fd is what mattered. - // Let's assume we close it here if mmap succeeded and fp_ isn't used by mmap logic itself - if (buffer_ != MAP_FAILED) { // if mmap succeeded - // fclose(temp_fp); // Decide on fp_ lifecycle. If mmap is primary, fp_ might be set to nullptr + uint64_t offset_in_file = mmap_readu64(current_ptr); + + // 将解析出的信息存入 map + offsets_[name] = std::make_pair(offset_in_file, length); + data_type_[name] = static_cast(mmap_readInt(current_ptr)); } } else { // USE_MMAP is NOT defined // Original logic when USE_MMAP is not defined (ensures use_mmap_param is ignored) use_mmap_ = false; // Force false if USE_MMAP macro is not defined this->fp_ = fopen(this->path_.c_str(), "rb"); - if (this->fp_ == nullptr) { - // perror(("Error opening file: " + this->path_).c_str()); - // exit(1); + if (!this->path_.empty() && this->fp_ == nullptr) { + perror(("Error opening file: " + this->path_).c_str()); + exit(1); + return; + } + if (this->path_.empty()) { return; } fseek(fp_, 0, SEEK_SET); @@ -241,18 +303,23 @@ std::tuple ParamLoader::load(string name) { auto _ = fread(data, sizeof(uint8_t), length, fp_); return std::make_tuple(data, length); } + DataType ParamLoader::getDataType(string name) { if (data_type_.count(name) != 1) { - if (!this->path_.empty() && this->fp_ == nullptr) { - MLLM_LOG_ERROR_STREAM << this->path_ << " not found" << std::endl; + if (!use_mmap_ && !this->path_.empty() && this->fp_ == nullptr) { + MLLM_LOG_ERROR_STREAM << "File IO mode: " << this->path_ << " not found or failed to open." << std::endl; exit(0); - } else if (this->fp_ != nullptr && !this->path_.empty()) { - MLLM_LOG_ERROR_STREAM << name << " not found" << std::endl; } + + if (use_mmap_) { + MLLM_LOG_WARNING_STREAM << "Mmap mode: Tensor '" << name << "' not found in model metadata." << std::endl; + } else { + MLLM_LOG_WARNING_STREAM << "File IO mode: Tensor '" << name << "' not found in model metadata." << std::endl; + } + return DataType::MLLM_TYPE_COUNT; } int type = data_type_[name]; - // check if exists return static_cast(type); } @@ -396,4 +463,20 @@ bool ParamLoader::partialLoad(mllm::Tensor *tensor, std::set validRow, int return true; } } + +ParamMetadata ParamLoader::getParamMetadata(const std::string &name) { + if (offsets_.find(name) == offsets_.end()) { + throw std::runtime_error("Parameter '" + name + "' not found in offsets map."); + } + auto &offset_pair = offsets_.at(name); + return {offset_pair.first, offset_pair.second}; +} + +FILE *ParamLoader::getInputStream() { + return this->fp_; +} + +std::string ParamLoader::getParamPath() const { + return this->path_; +} } // namespace mllm \ No newline at end of file diff --git a/src/ParamLoader.hpp b/mllm/ParamLoader.hpp similarity index 91% rename from src/ParamLoader.hpp rename to mllm/ParamLoader.hpp index 94e987e61..54450260b 100644 --- a/src/ParamLoader.hpp +++ b/mllm/ParamLoader.hpp @@ -51,6 +51,7 @@ static std::string readString(mllm_file *fp_) { */ class AbstructLoader { public: + virtual ~AbstructLoader() = default; virtual bool load(mllm::Tensor *tensor) = 0; virtual bool load(std::shared_ptr tensor) = 0; @@ -64,6 +65,11 @@ class AbstructLoader { // virtual bool partialLoad(mllm::Tensor *tensor, std::set validRow, int rowNum, int colNum) = 0; }; +struct ParamMetadata { + uint64_t offset; // 参数在文件中的起始偏移 + uint64_t size; // 参数占用的字节数 +}; + /** * \brief The ParamLoader class is the default and only(currently) implementation of the AbstructLoader class. */ @@ -96,6 +102,10 @@ class ParamLoader : public AbstructLoader { return offsets_.size(); } + ParamMetadata getParamMetadata(const std::string &name); + FILE *getInputStream(); + std::string getParamPath() const; + protected: std::mutex mtx; mllm_file *fp_; @@ -105,6 +115,7 @@ class ParamLoader : public AbstructLoader { std::map> offsets_; // offsets,length std::map data_type_; bool use_mmap_; + std::shared_ptr mmap_buffer_; }; /** diff --git a/mllm/StateManager.hpp b/mllm/StateManager.hpp new file mode 100644 index 000000000..42788e4f9 --- /dev/null +++ b/mllm/StateManager.hpp @@ -0,0 +1,139 @@ +/** + * @brief Interface for managing inference-related state modules. + * + * The StateManager class defines a common interface for modules that manage + * runtime state in a model inference engine. These states may include + * sequence lengths, chunked prefill stages, speculative decoding information, + * or any future task-specific metadata. + * + * The goal of this abstraction is to decouple backend execution from + * stateful control logic, and to provide a unified mechanism for managing, + * resetting, and debugging inference state across different components. + * + */ + +#pragma once + +#include "Types.hpp" +#include + +namespace mllm { + +class StateManager { +public: + virtual ~StateManager() = default; + + virtual std::string name() const = 0; + + virtual void reset() = 0; +}; + +class InferenceStateManager : public StateManager { +public: + std::string name() const override { + return "InferenceStateManager"; + } + void reset() override { + execution_type_ = PROMPT; + cur_sequence_length_ = 0; + total_sequence_length_ = 0; + is_switching_stage_ = false; + } + + void setCurSequenceLength(int sequence_length) { + cur_sequence_length_ = sequence_length; + } + int getCurSequenceLength() const { + return cur_sequence_length_; + } + void setTotalSequenceLength(int sequence_length) { + total_sequence_length_ = sequence_length; + } + int getTotalSequenceLength() const { + return total_sequence_length_; + } + void toggleSwitching() { + is_switching_stage_ = !is_switching_stage_; + } + void setChunkSize(int chunk_size) { + chunk_size_ = chunk_size; + } + int getChunkSize() const { + return chunk_size_; + } + bool isStageSwitching() const { + return is_switching_stage_; + } + void setExecutionType(ExecutionType type) { + execution_type_ = type; + } + ExecutionType getExecutionType() const { + return execution_type_; + } + void setQnnGraphFrozen(bool frozen) { + is_qnn_graph_frozen = frozen; + } + bool isQnnGraphFrozen() const { + return is_qnn_graph_frozen; + } + void setCPUViT(bool value) { + isCPUViT = value; + } + bool getIsCPUViT() const { + return isCPUViT; + } + +private: + // indicate whether the state manager is in a prefill or decoding stage + ExecutionType execution_type_ = PROMPT; + // auto regression seq state + int cur_sequence_length_ = 0; + // total real seq length used for chunk & padding input + int total_sequence_length_ = 0; + // chunk size used in HeadLinear + int chunk_size_ = 0; + bool is_switching_stage_ = false; + // used to indicate whether the QNN graph is frozen for inference + bool is_qnn_graph_frozen = false; + + // QNN ViT specific config, when using CPU ViT, layers must be reused (block.X.) + bool isCPUViT = true; +}; + +class SpeculativeDecodingManager : public StateManager { +public: + std::string name() const override { + return "SpeculativeDecodingManager"; + } + void reset() override { + using_draft_ = false; + last_draft_length_ = 0; + last_verified_position_ids_.clear(); + } + + void setLastDraftLength(unsigned int draft_length) { + last_draft_length_ = draft_length; + } + void setLastVerifiedPositionIds(const std::vector &verified_position_ids) { + last_verified_position_ids_ = verified_position_ids; + } + void setUsingDraft(bool _usingDraft) { + this->using_draft_ = _usingDraft; + } + unsigned int getLastDraftLength() { + return last_draft_length_; + } + std::vector getLastVerifiedPositionIds() { + return last_verified_position_ids_; + } + bool isUsingDraft() { + return using_draft_; + } + +private: + bool using_draft_ = false; + std::vector last_verified_position_ids_; + unsigned int last_draft_length_ = 0; +}; + +} // namespace mllm diff --git a/mllm/Tensor.cpp b/mllm/Tensor.cpp new file mode 100644 index 000000000..cbef03c9e --- /dev/null +++ b/mllm/Tensor.cpp @@ -0,0 +1,762 @@ +#include "Tensor.hpp" + +#include +#include +// #include +#include "Backend.hpp" +#include "Op.hpp" +#include "OpDefined.hpp" +#include "TensorImpl.hpp" +// #include "Timing.hpp" +#include "Types.hpp" +#include +#include +#include +// #include +#include +#include + +namespace mllm { + +Tensor::Tensor(int batch, int head, int sequence, int dimension, Backend *bn, bool do_alloc) : + impl_(std::make_shared(bn)) { // 使用带Backend的TensorImpl构造函数 + impl_->dtype_ = MLLM_TYPE_F32; + reshape(batch, head, sequence, dimension); + if (do_alloc) { + alloc(); + } +} + +Tensor::Tensor(int batch, int head, int sequence, int dimension, BackendType bn_type, bool do_alloc) : + impl_(std::make_shared()) { + if (Backend::global_backends.find(bn_type) == Backend::global_backends.end()) { + Module::initBackend(bn_type); + } + impl_->dtype_ = MLLM_TYPE_F32; + impl_->backend_ = Backend::global_backends[bn_type].get(); + reshape(batch, head, sequence, dimension); + if (do_alloc) { + alloc(); + } +} + +Tensor::Tensor(const std::vector &shape) : + impl_(std::make_shared()) { + impl_->private_reshape(shape); +} + +Tensor::Tensor(int value, Backend *bn) : + impl_(std::make_shared()) { + impl_->dtype_ = MLLM_TYPE_F32; + impl_->backend_ = Backend::global_backends[MLLM_CPU].get(); + reshape(1, 1, 1, 1); + alloc(); + impl_->should_in_graphs_ = false; + setDataAt(0, 0, 0, 0, static_cast(value)); + to(bn->type()); +} + +Tensor::Tensor(int value, BackendType bn_type) : + impl_(std::make_shared()) { + impl_->dtype_ = MLLM_TYPE_F32; + impl_->backend_ = Backend::global_backends[bn_type].get(); + reshape(1, 1, 1, 1); + alloc(); + impl_->should_in_graphs_ = false; + setDataAt(0, 0, 0, 0, static_cast(value)); +} + +Tensor::Tensor(std::vector values, BackendType bn_type) : + impl_(std::make_shared()) { + impl_->dtype_ = MLLM_TYPE_F32; + impl_->backend_ = Backend::global_backends[bn_type].get(); + reshape(1, 1, 1, values.size()); + alloc(); + impl_->should_in_graphs_ = false; + for (size_t i = 0; i < values.size(); ++i) { + setDataAt(0, 0, 0, i, values[i]); + } +} + +bool Tensor::reshape(const int batch, const int head, const int sequence, const int dimension) { + return impl_->reshape(batch, head, sequence, dimension); + // vector shape(4); + // shape[chls()[BATCH]] = batch; + // shape[chls()[HEAD]] = head; + // shape[chls()[SEQUENCE]] = sequence; + // shape[chls()[DIMENSION]] = dimension; + // return reshape(shape); +} + +// Tensor.cpp +void Tensor::alloc() { + // if ("out-model.embed_tokens" == name()) + // std::cout << "alloc " << name() << std::endl; + if (aggregated_) return; + assert(impl_->backend_ != nullptr); + if (!master_tensor_.expired()) return; + if (!shape_offset_.empty() && !shape_master_.empty()) return; + + impl_->alloc(); +} + +bool Tensor::reshape(int batch, int channel, int time, int height, int width) { + if (impl_->ctype_ != BTHWC) { + impl_->ctype_ = BCTHW; + impl_->chls_[BATCH] = 0; + impl_->chls_[CHANNLE] = 1; + impl_->chls_[TIME] = 2; + impl_->chls_[HEIGHT] = 3; + impl_->chls_[WIDTH] = 4; + } else { + impl_->chls_[BATCH] = 0; + impl_->chls_[TIME] = 1; + impl_->chls_[HEIGHT] = 2; + impl_->chls_[WIDTH] = 3; + impl_->chls_[CHANNLE] = 4; + } + + std::vector shape(5); + const auto &chls = impl_->chls_; // 从TensorImpl获取维度映射 + + shape[chls.at(BATCH)] = batch; + shape[chls.at(CHANNLE)] = channel; + shape[chls.at(TIME)] = time; + shape[chls.at(HEIGHT)] = height; + shape[chls.at(WIDTH)] = width; + + return impl_->private_reshape(shape); +} + +TensorStatus Tensor::tensor_status; + +uint32_t &Tensor::uuid() { + return uuid_; +} + +TensorType &Tensor::xnnTensorType() { + return xnn_tensor_type_; +} + +void Tensor::forceResetHostPointer(void *ptr) { + impl_->host_ptr_ = ptr; +} + +Tensor &Tensor::to(BackendType backend_type) { + // TODO: check if the data is shared between devices + // if so, return the origin tensor + // if not, return the new tensor + // TODO: if need copy, should implement copyDataCrossBn and do copy when Tensor::TENSOR_STATIC_READY + // if (Module::llm_model_ptr->doChangeBn) { + // Module::llm_model_ptr->device() = backend_type; + // } + /** + * Currently, there are following cases: + * CPU -> QNN, QNN -> CPU + * if it is CPU -> QNN, the buffer should be realloced + * (NOTE: not handling data copy as the tensor.to() shoudld be called before the data is set and tensor.device() should be checked in frontend) + * if it is QNN -> CPU, the data is sharable between CPU and QNN, no need to copy or realloc + */ + if (device() == backend_type) { + return *this; + } + if (backend_type == MLLM_CPU && device() == MLLM_QNN) { + // data is sharable between CPU and QNN + return *this; + } + // realloc the tensor + // realloc the tensor + if (backend_type == MLLM_QNN && device() == MLLM_CPU) { + if (this->masterTensor() != nullptr) { + auto master_tensor = this->masterTensor(); + master_tensor->free(); + master_tensor->to(MLLM_QNN); + master_tensor->alloc(); + for (auto &child_wp : master_tensor->childTensors()) { + // Lock the weak_ptr to get a shared_ptr + if (auto child_sp = child_wp.lock()) { + child_sp->forceResetHostPointer(this->impl_->host_ptr_); + } + } + } else { + this->free(); + module()->activation_tensors[name()]->setBackend(Backend::global_backends[backend_type].get()); + this->setBackend(Backend::global_backends[backend_type].get()); + } + return *this; + } + if (backend_type == MLLM_CPU && device() == MLLM_XNNPACK) { + module()->activation_tensors[name()]->setBackend(Backend::global_backends[backend_type].get()); + this->setBackend(Backend::global_backends[backend_type].get()); + return *this; + } + if (backend_type == MLLM_XNNPACK && device() == MLLM_CPU) { + module()->activation_tensors[name()]->setBackend(Backend::global_backends[backend_type].get()); + this->setBackend(Backend::global_backends[backend_type].get()); + return *this; + } + Backend *target_backend = Backend::global_backends[backend_type].get(); + if (target_backend == nullptr) { + Module::initBackend(backend_type); + target_backend = Backend::global_backends[backend_type].get(); + assert(target_backend != nullptr && "Target backend is not initialized."); + } + // { + // std::cout << name() << ", changing backend from " << device() << " to " << backend_type << std::endl; // debug log + // } + impl_->to(target_backend); + return *this; +}; + +bool is_kvcached_tensor(const std::shared_ptr &tensor) { + if (tensor == nullptr) return false; + if (auto master = tensor->masterTensor()) { // 调用新的 masterTensor() + return master->name().find("Cache") != std::string::npos; + } + return false; +} + +/** + * @brief Allocates a single, non-aggregated tensor, deciding between KVCache or standard allocation. + * @param module The current module. + * @param backend The current backend. + * @param standard_alloc_func The function to call for standard allocation. + */ +void Tensor::_allocate_final_tensor( + const std::shared_ptr &template_tensor, + Backend *backend) { + if (is_kvcached_tensor(template_tensor)) { + if (auto master_tensor_sp = template_tensor->masterTensor()) { + if (master_tensor_sp->name().find(".Cache") != std::string::npos && (master_tensor_sp->batch() != batch())) { + KVCache_batch = batch(); + master_tensor_sp->reshape(KVCache_batch, master_tensor_sp->head(), + master_tensor_sp->sequence(), master_tensor_sp->dimension()); + master_tensor_sp->setName(name() + ".Cache"); + master_tensor_sp->alloc(); + + switch (master_tensor_sp->dtype()) { + case MLLM_TYPE_F32: + memset(master_tensor_sp->hostPtr(), 0, master_tensor_sp->count() * sizeof(float)); + break; + case MLLM_TYPE_F16: + memset(master_tensor_sp->hostPtr(), 0, master_tensor_sp->count() * sizeof(mllm_fp16_t)); + break; + case MLLM_TYPE_Q8_0: + memset((char *)master_tensor_sp->rawHostPtr(), 0, + master_tensor_sp->count() * sizeof(block_q8_0) / QK8_0); + break; + default: + break; + }; + } + auto cache_seq_len_ = template_tensor->shapeOffset()[2]; + + if (name().find("cache") == std::string::npos) { + cache_seq_len_ = master_tensor_sp->cache_seq_len_; + auto cpu_backend = dynamic_cast(backend); + if (cpu_backend && cpu_backend->isUsingDraft()) { + unsigned int last_draft_length = cpu_backend->getLastDraftLength(); + const auto &last_verified_position_ids = cpu_backend->getLastVerifiedPositionIds(); + cache_seq_len_ = cache_seq_len_ - last_draft_length + last_verified_position_ids.size(); + } + } + setDtype(master_tensor_sp->dtype()); + shallowCopyFrom(master_tensor_sp, false, {0, 0, (int)cache_seq_len_, 0}); + } else { + setDtype(template_tensor->dtype()); + alloc(); + } + } else { + if (template_tensor != nullptr) { + setDtype(template_tensor->dtype()); + } + alloc(); + } +} +/** + * @brief Handles the allocation and setup for an output tensor that is part of an aggregated tensor structure. + * @param template_tensor The corresponding tensor from the activation map, which holds aggregation info. + * @param module The current module. + * @param backend The current backend. + */ +void Tensor::_allocate_aggregated_tensor( + const std::shared_ptr &template_tensor, + Module *module, + Backend *backend) { + bool keep_aggregated_structure = false; + if (template_tensor->aggregatedDim() > 3) { + keep_aggregated_structure = true; // Cannot handle dimensions > 3 + } else { + for (const auto &ag_tensor : template_tensor->aggregatedTensors()) { + if (ag_tensor->ctype() != template_tensor->aggregatedTensors()[0]->ctype()) { //???我什么这么写?因为quant + keep_aggregated_structure = true; + break; + } + } + } + if (keep_aggregated_structure) { + vector> shared_outputs; + auto split_dim = template_tensor->aggregatedDim(); + const auto &ag_tensor = template_tensor->aggregatedTensors(); + for (int id = 0; id < ag_tensor.size(); ++id) { + const auto &child_tt = ag_tensor[id]; + auto shared_ot = std::make_shared(backend); + // shared_ot->setName(out_tensor->name() + ".split-" + std::to_string(id)); + assert(child_tt->name() == name() + ".split-" + std::to_string(id)); + shared_ot->setName(child_tt->name()); + shared_ot->setModule(module); + shared_ot->setCtype(child_tt->ctype()); + // Reshape based on the split dimension and the template tensor + switch (split_dim) { + case Chl::HEAD: + shared_ot->reshape(batch(), child_tt->head(), sequence(), dimension()); + break; + case Chl::SEQUENCE: + shared_ot->reshape(this->batch(), head(), child_tt->sequence(), dimension()); + break; + case Chl::DIMENSION: + shared_ot->reshape(batch(), head(), sequence(), child_tt->dimension()); + break; + case Chl::D_HD: + case Chl::HD: + shared_ot->reshape(batch(), child_tt->head(), sequence(), child_tt->dimension()); + break; + default: + break; // Should not happen + } + shared_ot->_allocate_final_tensor(child_tt, backend); + shared_outputs.push_back(shared_ot); + } + addTensors(shared_outputs, split_dim); + } else { + allowAggregated() = false; + alloc(); + } +} + +/** + * @brief Allocates memory for a tensor based on a template tensor. + * If the template tensor is aggregated, it allocates an aggregated tensor. + * Otherwise, it allocates a final tensor. + * @param template_tensor The template tensor to base the allocation on. + */ +void Tensor::allocFromTemplate(shared_ptr template_tensor) { + assert(backend() != nullptr); + if (template_tensor != nullptr && !template_tensor->aggregatedTensors().empty()) { + _allocate_aggregated_tensor(template_tensor, module(), backend()); + } else { + _allocate_final_tensor(template_tensor, backend()); + } +} + +/** + * @brief Runs a tensor function with the specified parameters. + * @param out_names The names for the output tensors. + * @param type The type of the tensor function to run. + * @param param The parameters for the tensor function. + * @param input_tensors The input tensors to the function. + * @param in_place Whether to run the function in-place. + * @return A vector of output tensors. + */ +std::vector Tensor::runFunc(std::vector out_names, + OpType type, + OpParam param, + std::vector input_tensors, + bool in_place) { + // auto start_time = mllm_time_us(); + // ==================== [开始] Op 缓存 ==================== + if (!input_tensors.empty()) { + for (auto &input : input_tensors) { + assert(input.backend() == input_tensors[0].backend() && "All inputs must have the same backend."); + } + } + auto backend = input_tensors.empty() ? Backend::global_backends[MLLM_CPU].get() : input_tensors[0].backend(); + //////////==============QNN only====================/////////// + if (Backend::global_backends.size() == 2 && Backend::global_backends.find(MLLM_QNN) != Backend::global_backends.end()) { // 针对QNN的特殊处理 + // backend = Backend::global_backends[MLLM_QNN].get(); + backend = Backend::global_backends[MLLM_CPU].get(); // 想不到吧 + } + //////////==============QNN only====================/////////// + // 1. 使用更高效的键生成方式 + static std::unordered_map> op_cache; // 改用size_t作为键类型 + param["type"] = type; + std::shared_ptr op_to_run; + // 2. 使用更高效的哈希键生成 + static auto hash_combine = [](size_t seed, const auto &v) { + seed ^= std::hash>{}(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); + return seed; + }; + size_t key = std::hash{}(static_cast(type)); + for (const auto &pair : param) { + key = hash_combine(key, pair.first); + key = hash_combine(key, pair.second); + } + // 3. 查找缓存 - 现在使用更快的size_t哈希查找 + auto it = op_cache.find(key); + if (it != op_cache.end()) { + op_to_run = it->second; + if (op_to_run->backend() != backend) { + backend = op_to_run->backend(); + } + } else { + std::unique_ptr op_new(backend->opCreate(param, "")); + if (!op_new) { + backend = Backend::global_backends[MLLM_CPU].get(); + op_new.reset(backend->opCreate(param, "")); + } + op_to_run = std::move(op_new); + op_cache[key] = op_to_run; + } + // ==================== [结束] Op 缓存 ==================== + // Module *module = Module::llm_model_ptr; + // if (module && !module->doTrace) { + // auto end_time = mllm_time_us(); + // string name_o = out_names.empty() ? "out-" + input_tensors[0].name() : out_names[0]; + // std::cout << name_o << " dispatch Func: " << type << " in " << (end_time - start_time) / 1000.0F << " ms" << std::endl; + // } + // 4. 使用缓存的或新创建的 Op 执行计算 + if (Backend::global_backends.size() == 2 && Backend::global_backends.find(MLLM_QNN) != Backend::global_backends.end()) { // 针对QNN的特殊处理 + backend = Backend::global_backends[MLLM_QNN].get(); // 想不到吧 + } + return backend->runOp(op_to_run.get(), input_tensors, out_names, in_place); +} + +Tensor Tensor::operator+(float data) { + OpParam param; + param["data"] = data; + return runFunc({name() + "-add"}, F_ADD, param, + {*this})[0]; +} + +Tensor Tensor::operator-(float data) { + OpParam param; + param["data"] = data; + return runFunc({name() + "-sub"}, F_SUB, param, + {*this})[0]; +} + +Tensor Tensor::operator*(float data) { + OpParam param; + param["data"] = data; + return runFunc({name() + "-mul"}, F_MUL, param, + {*this})[0]; +} + +Tensor Tensor::operator/(float data) { + OpParam param; + param["data"] = data; + return runFunc({name() + "-div"}, F_DIV, param, + {*this})[0]; +} + +Tensor Tensor::operator/(double data) { + OpParam param; + param["data"] = static_cast(data); + return runFunc({name() + "-div"}, F_DIV, param, + {*this})[0]; +} + +Tensor Tensor::operator/(int data) { + OpParam param; + param["data"] = (float)data; + return runFunc({name() + "-div"}, F_DIVINT, param, + {*this})[0]; +} + +Tensor Tensor::operator+(Tensor other) { + return runFunc({name() + "-TTadd"}, F_TTADD, {}, + {*this, other})[0]; +} + +Tensor Tensor::operator-(Tensor other) { + return runFunc({name() + "-TTsub"}, F_TTSUB, {}, + {*this, other})[0]; +} + +Tensor Tensor::operator*(Tensor other) { + return runFunc({name() + "-TTmul"}, F_TTMUL, {}, + {*this, other})[0]; +} + +Tensor Tensor::operator/(Tensor other) { + return runFunc({name() + "-TTdiv"}, F_TTDIV, {}, + {*this, other})[0]; +} +Tensor Tensor::operator~() { + return runFunc({name() + "~"}, TILDE, {}, {*this})[0]; +} + +Tensor Tensor::mean(Chl axis) { + OpParam param; + param["axis"] = (float)axis; + return runFunc({name() + "-mean"}, F_MEAN, param, + {*this})[0]; +} + +Tensor Tensor::view(int b, int h, int s, int d, bool in_place) { + OpParam param; + param["b"] = (float)b; + param["h"] = (float)h; + param["s"] = (float)s; + param["d"] = (float)d; + return runFunc({name() + "-view"}, F_VIEW, param, + {*this}, in_place)[0]; +} + +Tensor Tensor::flatten(Chl axis_start, Chl axis_end) { + OpParam param; + param["axis_start"] = (float)axis_start; + param["axis_end"] = (float)axis_end; + return runFunc({name() + "-flatten"}, F_FLATTEN, param, + {*this}, true)[0]; +} + +Tensor Tensor::transpose(vector> axiss) { + OpParam param; + param["num_pairs"] = (float)axiss.size(); + int idx = 0; + for (auto &axis : axiss) { + param["axis1_" + std::to_string(idx)] = (float)axis.first; + param["axis2_" + std::to_string(idx)] = (float)axis.second; + idx++; + } + bool in_place = (master_tensor_.expired() || (master_tensor_.lock()->name().find("Cache") == std::string::npos && master_tensor_.lock()->name().find("weight") != std::string::npos)); + // for BSHD attention start + if (Module::llm_model_ptr == nullptr || backend()->type() != MLLM_CPU || (axiss.size() == 1 && axiss[0].first == HEAD && axiss[0].second == SEQUENCE)) { + in_place = false; // in-place transpose + } + // for BSHD attention end + return runFunc({name() + "-transpose"}, F_TRANPOSE, param, + {*this}, in_place)[0]; +} + +Tensor Tensor::clip(vector b, vector h, vector s, vector d) { + OpParam param; + param["b_size"] = (float)b.size(); + param["h_size"] = (float)h.size(); + param["s_size"] = (float)s.size(); + param["d_size"] = (float)d.size(); + for (int i = 0; i < b.size(); ++i) param["b_" + std::to_string(i)] = (float)b[i]; + for (int i = 0; i < h.size(); ++i) param["h_" + std::to_string(i)] = (float)h[i]; + for (int i = 0; i < s.size(); ++i) param["s_" + std::to_string(i)] = (float)s[i]; + for (int i = 0; i < d.size(); ++i) param["d_" + std::to_string(i)] = (float)d[i]; + string name_su = "-clip-"; + if (!(d.empty() && b.empty() && h.empty() && s.empty())) { + for (auto as : param) { + name_su += std::to_string(int(as.second)) + "_"; + } + } + return runFunc({name() + name_su}, F_CLIP, param, + {*this})[0]; +} + +Tensor Tensor::clip(Chl keep_axis, vector b, vector h, vector s, vector d) { + OpParam param; + param["axis"] = (float)keep_axis; + param["b_size"] = (float)b.size(); + param["h_size"] = (float)h.size(); + param["s_size"] = (float)s.size(); + param["d_size"] = (float)d.size(); + for (int i = 0; i < b.size(); ++i) param["b_" + std::to_string(i)] = (float)b[i]; + for (int i = 0; i < h.size(); ++i) param["h_" + std::to_string(i)] = (float)h[i]; + for (int i = 0; i < s.size(); ++i) param["s_" + std::to_string(i)] = (float)s[i]; + for (int i = 0; i < d.size(); ++i) param["d_" + std::to_string(i)] = (float)d[i]; + return runFunc({name() + "-clipaxis"}, F_CLIPAXIS, param, + {*this})[0]; +} + +Tensor Tensor::clip(vector index, Chl dim) { + Tensor index_tensor(1, 1, 1, index.size(), impl_->backend_, false); + index_tensor.alloc(); + for (size_t i = 0; i < index.size(); ++i) { + index_tensor.setDataAt(0, 0, 0, i, static_cast(index[i])); + } + index_tensor.setName(name() + "-cliptensor-index"); + OpParam param; + param["dim"] = (float)dim; + return runFunc({name() + "-cliptensor"}, F_CLIPTENSOR, param, + {*this, index_tensor})[0]; +} +Tensor Tensor::clip(Tensor index, Chl dim) { + OpParam param; + param["dim"] = (float)dim; + return runFunc({name() + "-cliptensor"}, F_CLIPTENSOR, param, + {*this, index})[0]; +} +Tensor Tensor::expand(int b, int h, int s, int d) { + OpParam param; + param["b"] = (float)b; + param["h"] = (float)h; + param["s"] = (float)s; + param["d"] = (float)d; + return runFunc({name() + "-expand"}, F_EXPPAND, param, + {*this})[0]; +} + +Tensor Tensor::norm(int L_n) { + OpParam param; + param["L_n"] = (float)L_n; + return runFunc({name() + "-norm"}, F_NORM, param, + {*this})[0]; +} + +Tensor Tensor::where(float value, Chl axis) { + OpParam param; + param["value"] = value; + param["axis"] = axis; + return runFunc({name() + "-where"}, F_WHERE, param, + {*this})[0]; +} + +Tensor Tensor::index_put(Tensor value, Tensor indices, bool accumulate) { + OpParam param; + param["accumulate"] = (float)accumulate; + return runFunc({name() + "-index_put"}, F_INDEX_PUT, param, + {*this, value, indices}, + !accumulate)[0]; +} +void Tensor::scatter_add(Tensor value, Tensor indices, Chl dim) { + OpParam param; + runFunc({}, F_SCATTERRADD, param, + {*this, value, indices})[0]; +} +void Tensor::scatter_(Chl dim, Tensor index, float src) { + OpParam param; + param["dim"] = (float)dim; + param["value"] = src; + runFunc({}, SCATTER, param, + {*this, index})[0]; +} +Tensor Tensor::cat(vector input_tensors, Chl axis) { + OpParam param; + param["axis"] = (float)axis; + Module *module = input_tensors[0].module(); + vector inputs = {}; + for (auto &input_tensor : input_tensors) { + inputs.push_back(input_tensor); + } + return runFunc({input_tensors[0].name() + "-cat"}, F_CAT, param, inputs)[0]; +} + +Tensor Tensor::mm(Tensor input0, Tensor input1) { + Module *module = input0.module(); + string nname = input0.name() + "-mm-" + input1.name(); + return runFunc( + {nname}, F_MM, {}, + {input0, input1})[0]; +} + +Tensor Tensor::range(int start, int end) { + OpParam param; + param["start"] = (float)start; + param["end"] = (float)end; + return runFunc({"range-" + std::to_string(start) + "-" + std::to_string(end)}, F_RANGE, + param, {})[0]; +} + +vector Tensor::split(Tensor input, std::vector each_dims, + Chl split_dim, int same_dim_size) { + OpParam param; + vector next_names; + param["num_splits"] = (float)each_dims.size(); + for (int i = 0; i < each_dims.size(); ++i) { + param["dim_" + std::to_string(i)] = (float)each_dims[i]; + next_names.push_back(input.name() + ".split-" + std::to_string(i)); + } + param["split_dim"] = (float)split_dim; + param["head_size"] = (float)same_dim_size; + Module *module = input.module(); + return runFunc(next_names, F_SPLIT, param, + {input}); +} + +vector Tensor::topk(Tensor input, int k, Chl dim) { + Module *module = input.module(); + OpParam param; + param["k"] = (float)k; + param["dim"] = (float)dim; + return runFunc({input.name() + "-top" + std::to_string(k) + "-value", + input.name() + "-top" + std::to_string(k) + "-idx"}, + F_TOPK, + param, + {input}); +} +Tensor Tensor::sum(Chl dim) { + OpParam param; + param["dim"] = (float)dim; + return runFunc({name() + "-sum"}, F_SUM, param, + {*this})[0]; +} +Tensor Tensor::argsort() { + return runFunc({name() + "-argsort"}, F_ARGSORT, {}, + {*this})[0]; +} +Tensor Tensor::bincount() { + return runFunc({name() + "-bincount"}, F_BINCOUNT, {}, + {*this})[0]; +} +Tensor Tensor::repeat(Chl dim, int dim_size) { + OpParam param; + param["dim"] = (float)dim; + param["dim_size"] = (float)dim_size; + return runFunc({name() + "-repeat"}, F_REPEAT, param, + {*this})[0]; +} +Tensor Tensor::masked_fill(Tensor mask_index, float value) { + OpParam param; + param["value"] = value; + return runFunc({name() + "-masked_fill"}, MASKEDFILL, param, {*this, mask_index})[0]; +} +Tensor Tensor::gather(Tensor input, Tensor index, Chl dim) { + OpParam param; + param["dim"] = dim; + return runFunc({input.name() + "-gather"}, GATHER, param, {input, index})[0]; +} +Tensor Tensor::zero_like(Tensor input) { + Module *module = input.module(); + OpParam param; + param["like_value"] = 0.0f; + return runFunc({input.name() + "-zero_like"}, F_LIKE, param, + {input})[0]; +} +Tensor Tensor::flash_attention2_forward(Tensor q, Tensor k, Tensor v, bool causal_mask) { + Module *module = q.module(); + OpParam param; + param["causal_mask"] = causal_mask ? 1.0f : 0.0f; + return runFunc({q.name() + "-" + k.name() + "-fa2"}, F_FA2, param, + {q, k, v})[0]; +}; +Tensor Tensor::sage_attention_forward(Tensor q, Tensor k, Tensor v, bool causal_mask) { + Module *module = q.module(); + OpParam param; + param["causal_mask"] = causal_mask ? 1.0f : 0.0f; + return runFunc({q.name() + "-" + k.name() + "-sage_attn"}, F_SAGEATTN, param, + {q, k, v})[0]; +}; +Tensor Tensor::apply_rotary_pos_emb_vision(Tensor input, Tensor rotary_pos_emb) { + Module *module = input.module(); + return runFunc({input.name() + "-apply_rotary_pos_emb"}, F_APPLY_VISIOROPE, + {}, + {input, rotary_pos_emb})[0]; +} + +Tensor Tensor::fuyu_gather_embd(Tensor word, Tensor image_patches, Tensor image_patches_indices) { + Module *module = word.module(); + return runFunc({word.name() + ".fuyu_gather_embd"}, F_FUYU_GATHER_EMBD, + {}, + {word, image_patches, image_patches_indices}, + true)[0]; +} + +Tensor Tensor::phi3v_hd_merge(Tensor input, int h_crop, int w_crop) { + Module *module = input.module(); + OpParam param; + param["h_crop"] = (float)h_crop; + param["w_crop"] = (float)w_crop; + // The input tensor should be in BTHWC format + return runFunc({input.name() + ".phi3v_hd_merge"}, F_PHI3V_HD_MERGE, + param, + {input})[0]; +} + +} // namespace mllm \ No newline at end of file diff --git a/src/Tensor.hpp b/mllm/Tensor.hpp similarity index 70% rename from src/Tensor.hpp rename to mllm/Tensor.hpp index 429fedd7f..94168788e 100644 --- a/src/Tensor.hpp +++ b/mllm/Tensor.hpp @@ -1,8 +1,8 @@ #ifndef MLLM_TENSOR_H #define MLLM_TENSOR_H -#include -#include "Backend.hpp" -#include "OpDefined.hpp" +// #include +#include "DataType.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" #include #include #include @@ -60,28 +60,50 @@ class Module; * then the size of SEQUENCE dimension of the first Tensor is 2, the size of SEQUENCE dimension of the second Tensor is 1. * */ -class Tensor { - std::shared_ptr impl_; // 核心:使用shared_ptr管理实现 +class QuantParam { +public: + QuantParam() : + scale(0.0f), zero_point(0) { + } + QuantParam(float s, int zp) : + scale(s), zero_point(zp) { + } + + float scale; // quantization scale + int zero_point; // quantization zero point +}; + +class Tensor : public std::enable_shared_from_this { +protected: + std::shared_ptr impl_; // 核心:使用shared_ptr管理实现 +private: // used for ChildTensor vector shape_offset_; vector shape_master_; - Tensor *master_tensor_ = nullptr; - vector child_tensors_; + std::weak_ptr master_tensor_; + vector> child_tensors_; // AggregatedTensor相关 bool aggregated_ = false; + bool allow_aggregated_ = true; vector> aggregated_tensors_; Tensor *deaggregated_tensor_ = nullptr; Chl aggregated_dim_; vector aggregated_dims_; + vector seq_means_; + TensorType ttype_ = NORMAL_TENSOR; uint32_t uuid_ = 4294967295U; TensorType xnn_tensor_type_ = TensorType::NORMAL_TENSOR; public: int cache_seq_len_; + QuantParam quant_param; + bool inited() { + return impl_ != nullptr && impl_->host_ptr_ != nullptr && impl_->name_ != ""; + } public: // 拷贝语义:默认浅拷贝(共享实现) @@ -108,7 +130,7 @@ class Tensor { * \param sequence tokens numbers in a sequence * \param dimension the hidden size */ - explicit Tensor(int batch, int head, int sequence, int dimension); + // explicit Tensor(int batch, int head, int sequence, int dimension); explicit Tensor(int batch, int head, int sequence, int dimension, Backend *bn, bool do_alloc = true); explicit Tensor(int batch, int head, int sequence, int dimension, BackendType bn_type = MLLM_CPU, bool do_alloc = true); /** @@ -125,7 +147,17 @@ class Tensor { Tensor(vector values, BackendType bn_type = MLLM_CPU); - ~Tensor() = default; + ~Tensor() { + if (auto master = master_tensor_.lock()) { + auto &children = master->childTensors(); + children.erase( + std::remove_if(children.begin(), children.end(), + [this](const std::weak_ptr &wp) { + return wp.expired() || wp.lock().get() == this; + }), + children.end()); + } + } public: static TensorStatus tensor_status; @@ -160,6 +192,11 @@ class Tensor { impl_->free(); } } + void unload() { + if (impl_) { + impl_->unload(); + } + } /** * \brief get the number of bytes occupied by Tensor's data in memory. @@ -237,6 +274,8 @@ class Tensor { auto s_ = (s + shape_offset_[2]) % base_sequence_; auto d_ = (d + shape_offset_[3]) % base_dimension_; switch (impl_->ctype_) { + case BHSD: + return ((b_ * base_head_ + h_) * base_sequence_ + s_) * base_dimension_ + d_; case BSHD: return ((b_ * base_sequence_ + s_) * base_head_ + h_) * base_dimension_ + d_; case BHDS: @@ -252,6 +291,8 @@ class Tensor { } } else { switch (impl_->ctype_) { + case BHSD: + return ((b * impl_->shape_[1] + h) * impl_->shape_[2] + s) * impl_->shape_[3] + d; case BSHD: return ((b * impl_->shape_[1] + s) * impl_->shape_[2] + h) * impl_->shape_[3] + d; case BHDS: @@ -289,26 +330,32 @@ class Tensor { } int sequenceSkipDim() const { - if (master_tensor_ != NULL) { - if (master_tensor_->master_tensor_ != NULL) { - auto shape = master_tensor_->master_tensor_->impl_->shape_; - if (master_tensor_->master_tensor_->impl_->ctype_ == BSHD) { + if (!master_tensor_.expired()) { + auto master = master_tensor_.lock(); + if (master && !master->master_tensor_.expired()) { + auto grandmaster = master->master_tensor_.lock(); + auto shape = grandmaster->impl_->shape_; + if (grandmaster->impl_->ctype_ == BSHD) { return shape[3] * shape[2]; - } else if (master_tensor_->master_tensor_->impl_->ctype_ == BHDS) { + } else if (grandmaster->impl_->ctype_ == BHSD) { return shape[3]; - } else if (master_tensor_->master_tensor_->impl_->ctype_ == BDHS) { + } else if (grandmaster->impl_->ctype_ == BHDS) { + return shape[3]; + } else if (grandmaster->impl_->ctype_ == BDHS) { return shape[3] * impl_->shape_[2]; } else { std::cout << "sequenceSkipDim() only support for BSHD and BHDS" << std::endl; return -1; } - } else { - auto shape = master_tensor_->impl_->shape_; - if (master_tensor_->impl_->ctype_ == BSHD) { + } else if (master) { + auto shape = master->impl_->shape_; + if (master->impl_->ctype_ == BSHD) { return shape[3] * shape[2]; - } else if (master_tensor_->impl_->ctype_ == BHDS) { + } else if (master->impl_->ctype_ == BHSD) { return shape[3]; - } else if (master_tensor_->impl_->ctype_ == BDHS) { + } else if (master->impl_->ctype_ == BHDS) { + return shape[3]; + } else if (master->impl_->ctype_ == BDHS) { return shape[3] * impl_->shape_[2]; } else { std::cout << "sequenceSkipDim() only support for BSHD and BHDS" << std::endl; @@ -318,6 +365,8 @@ class Tensor { } else { if (impl_->ctype_ == BSHD) { return impl_->shape_[3] * impl_->shape_[2]; + } else if (impl_->ctype_ == BHSD) { + return impl_->shape_[3]; } else if (impl_->ctype_ == BHDS) { return impl_->shape_[3]; } else if (impl_->ctype_ == BDHS) { @@ -332,6 +381,7 @@ class Tensor { } // return shape_[3]*shape_[2]; } + return -1; } /** @@ -352,6 +402,27 @@ class Tensor { return (Dtype *)impl_->host_ptr_; } + /** + * @brief 获取设备内存的通用描述符。 + * @return DeviceMemory& 对设备内存描述符的引用。 + */ + DeviceMemory &device_memory() { + if (backend() == nullptr || backend()->type() == MLLM_CPU) { + throw std::runtime_error("Device memory is not available for CPU backend."); + } + return impl_->device_memory_; + } + + /** + * @brief 获取设备内存的通用描述符 (const 版本)。 + * @return const DeviceMemory& 对设备内存描述符的常量引用。 + */ + const DeviceMemory &device_memory() const { + if (backend() == nullptr || backend()->type() == MLLM_CPU) { + throw std::runtime_error("Device memory is not available for CPU backend."); + } + return impl_->device_memory_; + } /** * \brief Get the data at the specified position. * \tparam Dtype Data type, such as float, mllm_fp16_t, etc. @@ -526,6 +597,28 @@ class Tensor { }); return shape_int; } + int shape(Chl axis) const { + switch (axis) { + case Chl::BATCH: + return impl_->shape_[impl_->chls_[BATCH]]; + case Chl::HEAD: + return impl_->shape_[impl_->chls_[HEAD]]; + case Chl::SEQUENCE: + return impl_->shape_[impl_->chls_[SEQUENCE]]; + case Chl::DIMENSION: + return impl_->shape_[impl_->chls_[DIMENSION]]; + // case TIME: + // return impl_->shape_[impl_->chls_[TIME]]; + // case HEIGHT: + return impl_->shape_[impl_->chls_[HEIGHT]]; + case Chl::WIDTH: + return impl_->shape_[impl_->chls_[WIDTH]]; + // case CHANNLE: + // return impl_->shape_[impl_->chls_[CHANNLE]]; + default: + throw std::invalid_argument("Invalid axis for shape retrieval"); + } + } ChlType ctype() const { return impl_->ctype_; @@ -533,6 +626,12 @@ class Tensor { void setCtype(ChlType type) { impl_->ctype_ = type; switch (impl_->ctype_) { + case BHSD: + impl_->chls()[BATCH] = 0; + impl_->chls()[HEAD] = 1; + impl_->chls()[SEQUENCE] = 2; + impl_->chls()[DIMENSION] = 3; + break; case BSHD: impl_->chls()[BATCH] = 0; impl_->chls()[SEQUENCE] = 1; @@ -711,7 +810,9 @@ class Tensor { void setUndiffusion(bool undiffusion) { impl_->undiffusion_ = undiffusion; for (auto &child_tensor : child_tensors_) { - child_tensor->impl_->undiffusion_ = undiffusion; + if (!child_tensor.expired()) { + child_tensor.lock()->impl_->undiffusion_ = undiffusion; + } } } @@ -723,15 +824,28 @@ class Tensor { return impl_->should_in_graphs_; } + vector &seqMeans() { + if (!master_tensor_.expired()) { + return master_tensor_.lock()->seq_means_; + } + return seq_means_; + } + static Tensor zeros(int batch, int head, int sequence, int dimension, BackendType bn_type = MLLM_CPU) { Tensor tensor1(batch, head, sequence, dimension, bn_type, true); - memset(tensor1.hostPtr(), 0, tensor1.count() * sizeof(float)); + std::fill(tensor1.hostPtr(), tensor1.hostPtr() + tensor1.count(), 0); tensor1.shouldInGraphs() = false; return tensor1; } static Tensor ones(int batch, int head, int sequence, int dimension, BackendType bn_type = MLLM_CPU) { Tensor tensor1(batch, head, sequence, dimension, bn_type, true); - memset(tensor1.hostPtr(), 1, tensor1.count() * sizeof(float)); + std::fill(tensor1.hostPtr(), tensor1.hostPtr() + tensor1.count(), 1); + tensor1.shouldInGraphs() = false; + return tensor1; + } + static Tensor full(int batch, int head, int sequence, int dimension, float data, BackendType bn_type = MLLM_CPU) { + Tensor tensor1(batch, head, sequence, dimension, bn_type, true); + std::fill(tensor1.hostPtr(), tensor1.hostPtr() + tensor1.count(), data); tensor1.shouldInGraphs() = false; return tensor1; } @@ -746,8 +860,8 @@ class Tensor { Tensor operator*(float data); Tensor operator/(float data); Tensor operator/(double data); - Tensor operator/(int data); + Tensor operator~(); /** * \brief Overload the operators. @@ -761,7 +875,7 @@ class Tensor { Tensor mean(Chl axis); - Tensor view(int b, int h, int s, int d); + Tensor view(int b, int h, int s, int d, bool in_place = true); Tensor flatten(Chl axis_start, Chl axis_end); Tensor transpose(Chl axis0, Chl axis1) { return transpose({{axis0, axis1}}); @@ -769,6 +883,7 @@ class Tensor { Tensor transpose(vector> axiss); Tensor clip(vector b, vector h, vector s, vector d); Tensor clip(Chl keep_axis, vector b, vector h, vector s, vector d); + Tensor clip(vector index, Chl dim); Tensor clip(Tensor index, Chl dim); Tensor expand(int b, int h, int s, int d); static Tensor cat(vector input_tensors, Chl dims); @@ -781,13 +896,21 @@ class Tensor { return split(*this, each_dims, split_dim, same_dim_size); } Tensor index_put(Tensor value, Tensor indices, bool accumulate); - void scatter_reduce(Tensor value, Tensor indices); + void scatter_add(Tensor value, Tensor indices, Chl dim = SEQUENCE); + void scatter_(Chl dim, Tensor index, float src); static vector topk(Tensor input, int k, Chl dim); + vector topk(int k, Chl dim) { + return topk(*this, k, dim); + } Tensor sum(Chl dim); Tensor argsort(); Tensor bincount(); Tensor repeat(Chl dim, int dim_size); + Tensor masked_fill(Tensor mask, float value); + static Tensor gather(Tensor input, Tensor index, Chl dim); static Tensor zero_like(Tensor input); + static Tensor flash_attention2_forward(Tensor q, Tensor k, Tensor v, bool is_causal = true); + static Tensor sage_attention_forward(Tensor q, Tensor k, Tensor v, bool causal_mask = false); static Tensor apply_rotary_pos_emb_vision(Tensor input, Tensor rotary_pos_emb); // models use only @@ -804,147 +927,67 @@ class Tensor { * - addChildTensor */ + // 新增一个方法,用于强制设置指针并转移所有权句柄 + // 这是比将 ParamLoader 设为友元类更清晰的做法 + void setHostPtr(void *ptr, std::shared_ptr memory_handle) { + // 如果 Tensor 已经持有自己分配的内存,则先释放它 + if (impl_->host_ptr_ != nullptr && impl_->owns_host_ptr_) { + impl_->free(); + } + // 接管来自 mmap 的新指针和内存句柄 + impl_->host_ptr_ = ptr; + impl_->owns_host_ptr_ = false; // 标记内存为外部管理 + impl_->memory_handle_ = std::move(memory_handle); // 持有 mmap 句柄 + impl_->allocated_ = count(); // 标记为已分配状态 + } + /** - * \brief this Tensor is a DEEPCOPY of source, only used for ChildTensor. - * \param source MasterTensor. - * \param shape_offset the offset of each dimension of ChildTensor compared to MasterTensor. - * \param head_rep the repeat number of heads of ChildTensor compared to MasterTensor. - * used for repeat the head of K/V in Transformer-based LLMs. Default is 1. + * @brief 使当前 Tensor 成为 source Tensor 的一个子 Tensor (Shallow Copy)。 + * 它不分配新内存,而是共享 source 的内存。 + * @param source 将要成为父 Tensor 的张量。 + * @param copyshape 如果为 true 且 shape_offset 为空,则直接复制 source 的形状。 + * @param shape_offset 定义子 Tensor 相对于父 Tensor 的维度偏移,用于创建切片(slice)。 + * @param head_rep 用于分组查询注意力(GQA),表示K/V头的重复次数。 */ - void shallowCopyFrom(Tensor *source, bool copyshape = true, const vector &shape_offset = {}, int head_rep = 1) { + void shallowCopyFrom(std::shared_ptr source, bool copyshape = true, const vector &shape_offset = {}, int head_rep = 1) { + // 步骤 0: 初始设置 + // 如果提供了偏移量,则子张量有自己的独立形状,不应复制父张量的形状。 if (!shape_offset.empty()) { copyshape = false; } - setMasterTensor(source); - if (impl_->ctype_ != BCTHW && impl_->ctype_ != BTHWC && impl_->ctype_ != master_tensor_->ctype() && impl_->undiffusion_ == false) { - if (impl_->transed_) { // child tensor have been transed(BSHD->BHDS); - auto b = master_tensor_->batch(); - auto h = master_tensor_->head(); - auto d = master_tensor_->dimension(); - auto s = master_tensor_->sequence(); - master_tensor_->impl_->ctype_ = impl_->ctype_; - master_tensor_->impl_->chls_ = impl_->chls_; - master_tensor_->reshape(b, h, s, d); - } else { - auto b = batch(); - auto h = head(); - auto d = dimension(); - auto s = sequence(); - impl_->ctype_ = master_tensor_->impl_->ctype_; - impl_->chls_ = master_tensor_->impl_->chls_; - reshape(b, h, s, d); - } - } else if (child_tensors_.size() == 1 && child_tensors_[0]->ctype() == master_tensor_->impl_->ctype_ && ctype() != master_tensor_->impl_->ctype_) { - auto b = child_tensors_[0]->batch(); - auto h = child_tensors_[0]->head(); - auto s = child_tensors_[0]->sequence(); - auto d = child_tensors_[0]->dimension(); - auto origin_c_0 = child_tensors_[0]->impl_->chls_; - auto origin_c_1 = impl_->chls_; - impl_->chls_ = master_tensor_->impl_->chls_; - child_tensors_[0]->impl_->chls_ = master_tensor_->impl_->chls_; - for (int i = impl_->trans_from_.size() - 1; i >= 0; --i) { - auto tf = impl_->trans_from_[i]; - auto axis0 = tf.first; - auto axis1 = tf.second; - auto ori_0_idx = child_tensors_[0]->impl_->chls()[axis0]; - auto ori_1_idx = child_tensors_[0]->impl_->chls()[axis1]; - child_tensors_[0]->impl_->chls()[axis0] = ori_1_idx; - child_tensors_[0]->impl_->chls()[axis1] = ori_0_idx; - } - changeCtype(); - child_tensors_[0]->changeCtype(); - child_tensors_[0]->reshape(b, h, s, d); - transCopyShape(child_tensors_[0]->shape()); - } else if (child_tensors_.size() == 1 && child_tensors_[0]->ctype() == BCTHW && master_tensor_->impl_->ctype_ == BSHD && ctype() != BCTHW) { - auto b = child_tensors_[0]->batch(); - auto c = child_tensors_[0]->channel(); - auto t = child_tensors_[0]->time(); - auto h = child_tensors_[0]->height(); - auto w = child_tensors_[0]->width(); - auto origin_c_0 = child_tensors_[0]->impl_->chls_; - auto origin_c_1 = impl_->chls_; - - impl_->chls_ = {{BATCH, 0}, {CHANNLE, 1}, {TIME, 2}, {HEIGHT, 3}, {WIDTH, 4}}; - child_tensors_[0]->impl_->chls_ = {{BATCH, 0}, {CHANNLE, 1}, {TIME, 2}, {HEIGHT, 3}, {WIDTH, 4}}; - for (int i = impl_->trans_from_.size() - 1; i >= 0; --i) { - auto tf = impl_->trans_from_[i]; - auto axis0 = tf.first; - auto axis1 = tf.second; - auto ori_0_idx = child_tensors_[0]->impl_->chls()[axis0]; - auto ori_1_idx = child_tensors_[0]->impl_->chls()[axis1]; - child_tensors_[0]->impl_->chls()[axis0] = ori_1_idx; - child_tensors_[0]->impl_->chls()[axis1] = ori_0_idx; - } - changeCtype(); - child_tensors_[0]->changeCtype(); - child_tensors_[0]->reshape(b, c, t, h, w); - transCopyShape(child_tensors_[0]->shape()); - } + setMasterTensor(source); // 建立父子关系的第一步 + + // 步骤 1: 同步父子 Tensor 间的内存布局 (ctype) + reconcileLayouts(source.get()); + + // 步骤 2: 核心浅拷贝操作 - 共享数据指针和元数据 impl_->host_ptr_ = source->hostPtr(); - impl_->owns_host_ptr_ = false; // 子Tensor不拥有所有权 + impl_->memory_handle_ = source->impl_->memory_handle_; + impl_->owns_host_ptr_ = false; + impl_->device_memory_ = source->impl_->device_memory_; + impl_->owns_device_memory_ = false; impl_->capacity_ = source->impl_->capacity_; impl_->count_ = source->impl_->count_; + impl_->allocated_ = source->impl_->allocated_; + impl_->dtype_ = source->impl_->dtype_; if (copyshape) { impl_->shape_ = source->impl_->shape_; } - impl_->allocated_ = source->impl_->allocated_; - impl_->dtype_ = source->impl_->dtype_; + + // 步骤 3: 处理切片(offset)和GQA逻辑 if (!shape_offset.empty()) { - shape_master_ = {(uint64_t)source->batch(), - (uint64_t)source->head(), - (uint64_t)source->sequence(), - (uint64_t)source->dimension()}; - shape_offset_ = {(uint64_t)shape_offset[0], - (uint64_t)shape_offset[1], - (uint64_t)shape_offset[2], - (uint64_t)shape_offset[3]}; - if (!std::equal(source->impl_->chls_.begin(), source->impl_->chls_.end(), impl_->chls_.begin()) && impl_->chls()[SEQUENCE] == source->impl_->chls()[DIMENSION] && source->impl_->chls()[SEQUENCE] == impl_->chls()[DIMENSION]) { - shape_master_ = {(uint64_t)source->batch(), - (uint64_t)source->head(), - (uint64_t)source->dimension(), - (uint64_t)source->sequence()}; - shape_offset_ = {(uint64_t)shape_offset[0], - (uint64_t)shape_offset[1], - (uint64_t)shape_offset[3], - (uint64_t)shape_offset[2]}; - } - if (source->head() != head()) { // TODO: need to check - if (head() == 1 && head_rep == 1) { - shape_master_ = {(uint64_t)source->batch(), - (uint64_t)head(), - (uint64_t)source->sequence(), - (uint64_t)source->dimension() * source->head() / head()}; - } else if (head() == 1 && head_rep > 1) { - shape_master_ = {(uint64_t)source->batch(), - (uint64_t)head(), - (uint64_t)source->sequence(), - (uint64_t)source->dimension() * source->head() / head_rep}; - } - } + setupShapeForView(source.get(), shape_offset, head_rep); } - auto it = child_tensors_.begin(); - while (it != child_tensors_.end()) { - auto &child_tensor = *it; - auto origin_shape_offset = child_tensor->shapeOffset(); - if (!origin_shape_offset.empty()) { - if (!shape_offset.empty()) { - origin_shape_offset[2] = shape_offset[2]; - } - child_tensor->shallowCopyFrom(source, false, origin_shape_offset, head_rep); - } else if (!shape_offset.empty()) { - child_tensor->shallowCopyFrom(source, false, shape_offset, head_rep); - } else { - child_tensor->shallowCopyFrom(source, false, {}, head_rep); - } - it = child_tensors_.erase(it); - } - source->addChildTensor(this); - } - void shallowCopyFrom(Tensor &source, bool copyshape = true, const vector &shape_offset = {}, int head_rep = 1) { - shallowCopyFrom(&source, copyshape, shape_offset, head_rep); + + // 步骤 4: 维护张量层级结构 (处理孙张量) + reparentChildTensors(source, shape_offset, head_rep); + source->addChildTensor(shared_from_this()); } + // void shallowCopyFrom(Tensor &source, bool copyshape, const vector &shape_offset = {}, int head_rep = 1) { + // // 使用 source.shared_from_this() 从一个已经被 shared_ptr 管理的对象引用中,安全地获取其 shared_ptr。否则有use-after-free 的风险 + // shallowCopyFrom(source.shared_from_this(), copyshape, shape_offset, head_rep); + // } vector shapeOffset() const { std::vector shape_int(shape_offset_.size()); std::transform(shape_offset_.begin(), shape_offset_.end(), shape_int.begin(), [](uint64_t val) { @@ -963,18 +1006,23 @@ class Tensor { return shape_master_; } - Tensor *masterTensor() const { - return master_tensor_; + std::shared_ptr masterTensor() const { + return master_tensor_.lock(); } - void setMasterTensor(Tensor *master_tensor) { + void setMasterTensor(shared_ptr master_tensor) { master_tensor_ = master_tensor; } - vector &childTensors() { + vector> &childTensors() { return child_tensors_; } - void addChildTensor(Tensor *child) { - auto it = std::find(child_tensors_.begin(), child_tensors_.end(), child); + + void addChildTensor(std::shared_ptr child) { + auto it = std::find_if(child_tensors_.begin(), child_tensors_.end(), + [&](const std::weak_ptr &wp) { + return !wp.expired() && wp.lock() == child; + }); + if (it == child_tensors_.end()) { child_tensors_.push_back(child); } @@ -1060,6 +1108,13 @@ class Tensor { vector> &aggregatedTensors() { return aggregated_tensors_; } + void removeAggregatedTensors() { + aggregated_tensors_.clear(); + aggregated_ = false; + aggregated_dim_ = BATCH; + aggregated_dims_.clear(); + deaggregated_tensor_ = nullptr; + } Tensor *deaggregatedTensor() const { return deaggregated_tensor_; } @@ -1072,6 +1127,78 @@ class Tensor { } Tensor &to(BackendType backend_type); + Tensor to(DataType dtype) { + if (dtype == MLLM_TYPE_F16) { + return half(); + } else if (dtype == MLLM_TYPE_F32) { + return fp32(); + } else { + throw std::runtime_error("Unsupported dtype conversion."); + } + } + + Tensor &cpu() { + return to(MLLM_CPU); + } + Tensor &qnn() { + return to(MLLM_QNN); + } + Tensor &cl() { + return to(MLLM_OPENCL); + } + + Tensor half() { + if (dtype() == MLLM_TYPE_F16) { + return *this; + } + assert(dtype() == MLLM_TYPE_F32 && "Tensor::half() can only be called on an FP32 tensor."); + assert(master_tensor_.expired() && "Conversion not supported for child tensors."); + if (allocted()) { + Tensor half_tensor(backend()); + auto batch = this->batch(); + auto head = this->head(); + auto sequence = this->sequence(); + auto dimension = this->dimension(); + half_tensor.setDtype(MLLM_TYPE_F16); + half_tensor.setName(impl_->name_); + half_tensor.setCtype(impl_->ctype_); + half_tensor.reshape(batch, head, sequence, dimension); + half_tensor.alloc(); + backend()->convert_fp_data(this, &half_tensor); + return half_tensor; + } else { + impl_->dtype_ = MLLM_TYPE_F16; + return *this; + } + } + Tensor fp16() { + return half(); + } + Tensor fp32() { + if (dtype() == MLLM_TYPE_F32) { + return *this; + } + assert(dtype() == MLLM_TYPE_F16 && "Tensor::fp32() can only be called on an FP16 tensor."); + assert(master_tensor_.expired() && "Conversion not supported for child tensors."); + if (allocted()) { + Tensor fp32_tensor(backend()); + auto batch = this->batch(); + auto head = this->head(); + auto sequence = this->sequence(); + auto dimension = this->dimension(); + fp32_tensor.setDtype(MLLM_TYPE_F32); + fp32_tensor.setName(impl_->name_); + fp32_tensor.setCtype(impl_->ctype_); + fp32_tensor.reshape(batch, head, sequence, dimension); + fp32_tensor.alloc(); + backend()->convert_fp_data(this, &fp32_tensor); + return fp32_tensor; + } else { + impl_->dtype_ = MLLM_TYPE_F16; + return *this; + } + } + static vector toDevice(vector inputs, BackendType backend_type) { for (auto &input : inputs) { if (input.device() != backend_type) { @@ -1090,6 +1217,31 @@ class Tensor { static void reshapeAllocCrossBn(Tensor &src_t, Tensor &dst_t); static void copyDataCrossBn(Tensor &src_t, Tensor &dst_t); +public: + uint32_t &uuid(); + + TensorType &xnnTensorType(); + + bool &allowAggregated() { + return allow_aggregated_; + } + + void forceResetHostPointer(void *ptr); + + float i8_scale = 1.f; + + void allocFromTemplate(shared_ptr template_tensor); + +private: + void _allocate_final_tensor( + const std::shared_ptr &template_tensor, + Backend *backend); + void _allocate_aggregated_tensor( + const std::shared_ptr &template_tensor, + Module *module, + Backend *backend); + +public: /* Functions used for 5-D Tensor: * - reshape * - channel @@ -1171,6 +1323,160 @@ class Tensor { } private: + /** + * @brief (辅助函数) 处理父子 Tensor 之间的内存布局 (ctype) 同步。 + * 这段逻辑直接从原始的 shallowCopyFrom 中提取,保留了所有边缘情况的处理。 + * @param master_tensor 新的父 Tensor。 + */ + void reconcileLayouts(Tensor *master_tensor) { + // 情况 1: 通用的4D张量布局同步 (非5D视觉张量) + // 条件: 父子 ctype 不一致,且允许布局变化从子张量“扩散”到父张量。 + if (impl_->ctype_ != BCTHW && impl_->ctype_ != BTHWC + && impl_->ctype_ != master_tensor->ctype() && !impl_->undiffusion_) { + if (impl_->transed_) { // 如果子张量(this)已被转置,则强制父张量跟随子的布局 + auto b = master_tensor->batch(); + auto h = master_tensor->head(); + auto d = master_tensor->dimension(); + auto s = master_tensor->sequence(); + master_tensor->impl_->ctype_ = impl_->ctype_; + master_tensor->impl_->chls_ = impl_->chls_; + master_tensor->reshape(b, h, s, d); + } else { // 否则,子张量跟随父张量的布局 + auto b = batch(); + auto h = head(); + auto d = dimension(); + auto s = sequence(); + impl_->ctype_ = master_tensor->impl_->ctype_; + impl_->chls_ = master_tensor->impl_->chls_; + reshape(b, h, s, d); + } + } + // 情况 2 和 情况 3 都需要访问 child_tensors_,所以需要保护 + if (child_tensors_.empty()) { + return; + } + // 情况 2: 处理三层张量结构中的布局冲突 (祖父 -> this -> 孙子) + if (auto child_sp = child_tensors_[0].lock()) { + Tensor *child = child_sp.get(); + + if (child->ctype() == master_tensor->impl_->ctype_ && ctype() != master_tensor->impl_->ctype_) { + auto b = child->batch(); + auto h = child->head(); + auto s = child->sequence(); + auto d = child->dimension(); + + impl_->chls_ = master_tensor->impl_->chls_; + child->impl_->chls_ = master_tensor->impl_->chls_; + + for (int i = impl_->trans_from_.size() - 1; i >= 0; --i) { + auto tf = impl_->trans_from_[i]; + std::swap(child->impl_->chls()[tf.first], child->impl_->chls()[tf.second]); + } + changeCtype(); + child->changeCtype(); + child->reshape(b, h, s, d); + transCopyShape(child->shape()); + } + // 情况 3: 处理从4D (LLM) 到5D (Vision) 张量的特殊布局转换 + else if (child->ctype() == BCTHW && master_tensor->impl_->ctype_ == BSHD && ctype() != BCTHW) { + auto b = child->batch(); + auto c = child->channel(); + auto t = child->time(); + auto h = child->height(); + auto w = child->width(); + + impl_->chls_ = {{BATCH, 0}, {CHANNLE, 1}, {TIME, 2}, {HEIGHT, 3}, {WIDTH, 4}}; + child->impl_->chls_ = {{BATCH, 0}, {CHANNLE, 1}, {TIME, 2}, {HEIGHT, 3}, {WIDTH, 4}}; + + for (int i = impl_->trans_from_.size() - 1; i >= 0; --i) { + auto tf = impl_->trans_from_[i]; + std::swap(child->impl_->chls()[tf.first], child->impl_->chls()[tf.second]); + } + changeCtype(); + child->changeCtype(); + child->reshape(b, c, t, h, w); + transCopyShape(child->shape()); + } + } + } + + /** + * @brief (辅助函数) 为作为 "View" 的子 Tensor 设置形状和偏移信息。 + * @param source 父 Tensor + * @param shape_offset 维度偏移 + * @param head_rep GQA 的头重复次数 + */ + void setupShapeForView(Tensor *source, const vector &shape_offset, int head_rep) { + // 记录父张量的原始维度,用于后续计算 offset + shape_master_ = {(uint64_t)source->batch(), (uint64_t)source->head(), + (uint64_t)source->sequence(), (uint64_t)source->dimension()}; + shape_offset_ = {(uint64_t)shape_offset[0], (uint64_t)shape_offset[1], + (uint64_t)shape_offset[2], (uint64_t)shape_offset[3]}; + + // 如果父子布局转置了 (例如 BSHD vs BHDS), 需要同步调整 shape_master_ 和 shape_offset_ 的记录顺序 + if (!std::equal(source->impl_->chls_.begin(), source->impl_->chls_.end(), impl_->chls_.begin()) + && impl_->chls_[SEQUENCE] == source->impl_->chls_[DIMENSION] + && source->impl_->chls_[SEQUENCE] == impl_->chls_[DIMENSION]) { + std::swap(shape_master_[2], shape_master_[3]); // 交换 sequence 和 dimension + std::swap(shape_offset_[2], shape_offset_[3]); + } + + // 特殊处理 GQA (Grouped-Query Attention) + // 当子张量的头数量与父张量不同时,通常意味着 K/V cache 的共享。 + // 我们需要调整 shape_master_ 的 dimension 来反映这一点,确保偏移计算正确。 + if (source->head() != head()) { + if (head() == 1 && head_rep == 1) { // 可能是 MQA (Multi-Query Attention) + shape_master_ = {(uint64_t)source->batch(), (uint64_t)head(), (uint64_t)source->sequence(), (uint64_t)source->dimension() * source->head() / head()}; + } else if (head() == 1 && head_rep > 1) { // GQA + shape_master_ = {(uint64_t)source->batch(), (uint64_t)head(), (uint64_t)source->sequence(), (uint64_t)source->dimension() * source->head() / head_rep}; + } + } + } + + /** + * @brief (辅助函数) 重新指定当前 Tensor 的子 Tensor (孙张量) 的父节点。 + * 将它们从 this 的子节点变为 source 的直接子节点。 + * @param source 新的父(祖父)节点 + * @param shape_offset + * @param head_rep + */ + void reparentChildTensors(std::shared_ptr source, const vector &shape_offset, int head_rep) { + auto it = child_tensors_.begin(); + while (it != child_tensors_.end()) { + if (auto child_sp = it->lock()) { + /* + vector final_offset; + auto origin_shape_offset = child_sp->shapeOffset(); + if (!origin_shape_offset.empty()) { + final_offset = origin_shape_offset; + } else if (!shape_offset.empty()) { + final_offset = shape_offset; + } + child_sp->shallowCopyFrom(source, false, final_offset, head_rep); + */ + // merge qnn: + vector final_offset; + auto origin_shape_offset = child_sp->shapeOffset(); + if (!origin_shape_offset.empty()) { + if (!shape_offset.empty()) { + // 修改 origin_shape_offset 的第三个元素(索引为2) + origin_shape_offset[2] = shape_offset[2]; + } + final_offset = origin_shape_offset; // 使用修改后的 origin_shape_offset + } else if (!shape_offset.empty()) { + final_offset = shape_offset; + } else { + final_offset.clear(); // 或者保持默认的空 vector + } + child_sp->shallowCopyFrom(source, false, final_offset, head_rep); + + it = child_tensors_.erase(it); + } else { + it = child_tensors_.erase(it); + } + } + } + int checkDim(int &b, int &h, int &s, int &d) { if (!aggregated_) { return -1; @@ -1290,20 +1596,12 @@ class Tensor { // in_place=true: 只有输入, 输出==输入,返回输入 static std::vector runFunc(std::vector out_names, - TensorFuncType type, - std::vector float_args, - std::vector> input_tensors = {}, + OpType type, + OpParam param, + std::vector input_tensors = {}, bool in_place = false); public: - uint32_t &uuid(); - - TensorType &xnnTensorType(); - - void forceResetHostPointer(void *ptr); - - float i8_scale = 1.f; - /* Functions used for TEST & DEBUG * - checkData * - printShape @@ -1460,6 +1758,7 @@ class Tensor { template void printData() { + assert(backend()->type() == MLLM_CPU && "printData only support CPU backend."); if (ctype() == BTHWC || ctype() == BCTHW) { printData(); return; @@ -1512,7 +1811,6 @@ class Tensor { return; } // std::filesystem::create_directory("save_out"); - // string directory = "save_out"; struct stat info; #ifdef _WIN32 _mkdir(directory.c_str()); @@ -1538,11 +1836,59 @@ class Tensor { int C = head(); int H = sequence(); int W = dimension(); + + if (impl_->ctype_ == BHSD) { + for (int n = 0; n < batch(); ++n) { + for (int c = 0; c < head(); ++c) { + for (int h = 0; h < sequence(); ++h) { + for (int w = 0; w < dimension(); ++w) { + outFile << std::fixed << std::setprecision(6) << dataAt(n, c, h, w) << " "; + } + outFile << std::endl; + } + outFile << std::endl; + } + outFile << std::endl; + } + outFile.close(); + return; + } if (impl_->ctype_ == BSHD) { for (int n = 0; n < batch(); ++n) { for (int h = 0; h < sequence(); ++h) { for (int c = 0; c < head(); ++c) { for (int w = 0; w < dimension(); ++w) { + outFile << std::fixed << std::setprecision(6) << static_cast(dataAt(n, c, h, w)) << " "; + } + outFile << std::endl; + } + outFile << std::endl; + } + outFile << std::endl; + } + outFile.close(); + return; + } + if (N == 1 && C == 1) { + for (int h = 0; h < H; ++h) { + for (int c = 0; c < W; ++c) { + outFile << std::fixed << std::setprecision(6) << dataAt(0, 0, h, c) << " "; + } + outFile << std::endl; + outFile << "---------" << std::endl; + } + } else if (N == 1 && W == 1) { + for (int h = 0; h < H; ++h) { + for (int c = 0; c < C; ++c) { + outFile << std::fixed << std::setprecision(6) << dataAt(0, c, h, 0) << " "; + } + outFile << std::endl; + } + } else { + for (int n = 0; n < N; ++n) { + for (int h = 0; h < H; ++h) { + for (int c = 0; c < C; ++c) { + for (int w = 0; w < W; ++w) { outFile << std::fixed << std::setprecision(6) << dataAt(n, c, h, w) << " "; } outFile << std::endl; @@ -1551,6 +1897,116 @@ class Tensor { } outFile << std::endl; } + } + + outFile.close(); + } + + void saveQ4Data_d(string ex = "", string directory = "save_out") { + if (batch() == 0) { + return; + } + struct stat info; +#ifdef _WIN32 + _mkdir(directory.c_str()); +#else + if (stat(directory.c_str(), &info) != 0) { + if (stat(directory.c_str(), &info) != 0) { + mkdir(directory.c_str(), 0777); // notice that 0777 is different than usual + } else if (!(info.st_mode & S_IFDIR)) { + // if the path exists but it is not a directory, also create it + mkdir(directory.c_str(), 0777); // notice that 0777 is different than usual + } + } +#endif + std::ofstream outFile(directory + "/" + name() + ex + ".log"); + outFile << "----------------------------------------" << std::endl; + if (impl_->ctype_ == BSHD) { + outFile << name() << ": [BSHD]shape:[" << batch() << " " << sequence() << " " << head() << " " << dimension() << "] " << DataTypeName(dtype()) << " " << ctype() << std::endl; + } else { + outFile << name() << ": shape:[" << batch() << " " << head() << " " << sequence() << " " << dimension() << "] " << DataTypeName(dtype()) << " " << ctype() << std::endl; + } + + if (impl_->dtype_ != MLLM_TYPE_Q4_0) { + outFile << "Error: Tensor is not of type MLLM_TYPE_Q4_0." << std::endl; + outFile.close(); + return; + } + + block_q4_0 *data_ptr = hostPtr(); + if (data_ptr == nullptr) { + outFile << "Error: Host pointer is null." << std::endl; + outFile.close(); + return; + } + + const int W_blocks = dimension() / QK4_0; + + // 保持原始的循环结构。注意变量名: h 代表 sequence, c 代表 head。 + for (int n = 0; n < batch(); ++n) { + for (int h = 0; h < sequence(); ++h) { + for (int c = 0; c < head(); ++c) { + for (int w = 0; w < W_blocks; ++w) { + uint64_t block_offset = offset(n, c, h, w) / QK4_0; + block_q4_0 &data_block = data_ptr[block_offset]; + float da = MLLM_FP16_TO_FP32(data_block.d); + outFile << std::fixed << std::setprecision(6) << da << " "; + } + outFile << std::endl; + } + outFile << std::endl; + } + outFile << std::endl; + } + outFile.close(); + } + template + void saveIntData(string ex = "") { + if (Tensor::tensor_status != TENSOR_STATIC_READY) return; + if (ctype() == BTHWC || ctype() == BCTHW) { + save5Data(ex); + return; + } + // std::filesystem::create_directory("save_out"); + string directory = "save_out"; + struct stat info; +#ifdef _WIN32 + _mkdir(directory.c_str()); +#else + if (stat(directory.c_str(), &info) != 0) { + if (stat(directory.c_str(), &info) != 0) { + mkdir(directory.c_str(), 0777); // notice that 0777 is different than usual + } else if (!(info.st_mode & S_IFDIR)) { + // if the path exists but it is not a directory, also create it + mkdir(directory.c_str(), 0777); // notice that 0777 is different than usual + } + } +#endif + std::ofstream outFile(directory + "/" + name() + ex + ".log"); + outFile << "----------------------------------------" << std::endl; + if (impl_->ctype_ == BSHD) { + outFile << name() << ": [BSHD]shape:[" << batch() << " " << sequence() << " " << head() << " " << dimension() << "] " << dtype() << " " << ctype() << std::endl; + } else { + outFile << name() << ": shape:[" << batch() << " " << head() << " " << sequence() << " " << dimension() << "] " << dtype() << " " << ctype() << std::endl; + } + + int N = batch(); + int C = head(); + int H = sequence(); + int W = dimension(); + if (impl_->ctype_ == BSHD) { + for (int n = 0; n < batch(); ++n) { + for (int h = 0; h < sequence(); ++h) { + for (int c = 0; c < head(); ++c) { + for (int w = 0; w < dimension(); ++w) { + outFile << (int)dataAt(n, c, h, w) << " "; + } + outFile << std::endl; + } + outFile << std::endl; + } + outFile << std::endl; + } outFile.close(); return; } @@ -1779,6 +2235,9 @@ class Tensor { void printCtype() { std::string ctype; switch (impl_->ctype_) { + case BHSD: + ctype = "BHSD"; + break; case BSHD: ctype = "BSHD"; break; diff --git a/src/TensorImpl.hpp b/mllm/TensorImpl.hpp similarity index 50% rename from src/TensorImpl.hpp rename to mllm/TensorImpl.hpp index bfe3c4081..942312260 100644 --- a/src/TensorImpl.hpp +++ b/mllm/TensorImpl.hpp @@ -1,32 +1,65 @@ #ifndef MLLM_TENSORIMPL_H #define MLLM_TENSORIMPL_H -#include -#include "Backend.hpp" -#include "OpDefined.hpp" -#include #include -#include -#include -#include +#include #include #include -#include +// #include #ifdef _WIN32 #include #else #include #endif -#include #include -// #include + +#include "OpDefined.hpp" +#include "Backend.hpp" +#include namespace mllm { class Backend; class Module; +enum DeviceMemType { + MEM_TYPE_GENERIC, // 通用设备指针 (可用于 CUDA 的 `cudaMalloc` 结果) + MEM_TYPE_BUFFER, // OpenCL 缓冲区 (cl_buffer) + MEM_TYPE_IMAGE_2D, // OpenCL 2D图像 (cl_image) + MEM_TYPE_IMAGE_3D, // OpenCL 3D图像 (cl_image) + MEM_TYPE_TEXTURE, // 没用 +}; + +// 通用设备内存描述符结构体 +struct DeviceMemory { + void *handle = nullptr; // 通用句柄 (存放 cl_mem, cuda pointer, etc.) + DeviceMemType type = MEM_TYPE_BUFFER; // 内存类型,默认为 Buffer + + // 后端无关的元数据 + size_t size_in_bytes = 0; + + // 专门为 Image 类型准备的元数据 + size_t image_width = 0; + size_t image_height = 0; + size_t image_depth = 0; // 用于 3D 图像 + + size_t image_row_pitch_in_bytes = 0; +}; + class TensorImpl { public: - bool owns_host_ptr_ = true; // 新增标志位 + void *host_ptr_ = nullptr; + bool owns_host_ptr_ = true; + bool owns_device_memory_ = true; + std::shared_ptr memory_handle_ = nullptr; + + //=====GPU====== + enum Location { + UNSPECIFIED, // 未指定位置 + ON_HOST, // 在主机 (CPU) 内存中 + ON_DEVICE // 在设备 (如 OpenCL GPU) 内存中 + }; + Location location_ = ON_HOST; // 默认位置设为 ON_HOST, + DeviceMemory device_memory_; + //=====GPU====== std::map chls_ = {{BATCH, 0}, {SEQUENCE, 1}, {HEAD, 2}, {DIMENSION, 3}, {CHANNLE, 1}, {TIME, 2}, {HEIGHT, 3}, {WIDTH, 4}}; string name_; @@ -34,7 +67,6 @@ class TensorImpl { ChlType ctype_ = BSHD; Backend *backend_ = nullptr; - void *host_ptr_ = nullptr; vector shape_; uint64_t capacity_ = 0; @@ -53,15 +85,14 @@ class TensorImpl { TensorImpl() = default; TensorImpl(Backend *bn) : backend_(bn) { + if (backend_->type() == MLLM_OPENCL) { + location_ = ON_DEVICE; + } } // 析构函数负责资源释放 ~TensorImpl() { - if (host_ptr_ != nullptr && owns_host_ptr_) { - if (backend_) { - backend_->free(host_ptr_); - } - } + free_memory(); } // 禁止拷贝(使用shared_ptr管理) @@ -72,6 +103,22 @@ class TensorImpl { return DataTypeSize(dtype_, count_); } void alloc() { + if (backend_->type() == MLLM_OPENCL) { + location_ = ON_DEVICE; + } + if (location_ == ON_DEVICE) { + if (device_memory_.handle != nullptr) return; + if (host_ptr_ != nullptr && owns_host_ptr_) { + backend_->free(host_ptr_); + host_ptr_ = nullptr; + allocated_ = 0; + } + device_memory_.size_in_bytes = cntSize(); + backend_->alloc_device(device_memory_, dtype_); + // owns_device_memory_ = true; + allocated_ = count_; + return; + } if (allocated_ != count_) { if (host_ptr_ != nullptr && owns_host_ptr_) { backend_->free(host_ptr_); @@ -84,14 +131,80 @@ class TensorImpl { } } + void free_memory() { + if (location_ == ON_HOST && host_ptr_ != nullptr && owns_host_ptr_) { + if (backend_) backend_->free(host_ptr_); + } else if (location_ == ON_DEVICE && device_memory_.handle != nullptr && owns_device_memory_) { // + if (backend_) backend_->free_device(device_memory_); + device_memory_.handle = nullptr; // 清理句柄 + // owns_device_memory_ = false; + } + host_ptr_ = nullptr; + allocated_ = 0; + } + + void unload() { + memory_handle_.reset(); + if (owns_host_ptr_ && host_ptr_ != nullptr) { + free_memory(); + } + host_ptr_ = nullptr; + allocated_ = 0; + owns_host_ptr_ = true; + } + + // 保留旧的 free() 接口,但让它调用新的 free_memory() void free() { - if (host_ptr_ != nullptr && owns_host_ptr_) { // 直接访问成员变量 - if (backend_) { - backend_->free(host_ptr_); + free_memory(); + } + + // void free() { + // if (host_ptr_ != nullptr && owns_host_ptr_) { // 直接访问成员变量 + // if (backend_) { + // backend_->free(host_ptr_); + // } + // host_ptr_ = nullptr; + // allocated_ = 0; + // } + // } + + void to(Backend *target_backend) { + if (backend_ == target_backend) { + return; + } + // 路径1: 从任何后端迁移到主机 (CPU) + if (target_backend->type() == MLLM_CPU) { + if (location_ == ON_DEVICE) { // 从设备迁移到Host + void *new_host_ptr = nullptr; + target_backend->alloc(&new_host_ptr, cntSize() + 16, 128); + backend_->copy_to_host(new_host_ptr, device_memory_); + backend_->free_device(device_memory_); + host_ptr_ = new_host_ptr; + // cl_device_buffer_ = nullptr; + device_memory_.handle = nullptr; + location_ = ON_HOST; + allocated_ = count_; } - host_ptr_ = nullptr; - allocated_ = 0; } + // 路径2: 从主机 (CPU) 迁移到某个设备 + else if (backend_->type() == MLLM_CPU) { + if (location_ == ON_HOST) { + device_memory_.size_in_bytes = cntSize(); + target_backend->alloc_device(device_memory_, dtype_); + target_backend->copy_from_host(device_memory_, host_ptr_); + if (owns_host_ptr_) { + backend_->free(host_ptr_); + } + host_ptr_ = nullptr; + location_ = ON_DEVICE; + // allocated_ = 0;// todo1418 + } + } else { + std::cout << "Device -> Device migration via Host" << std::endl; + this->to(Backend::global_backends[MLLM_CPU].get()); + this->to(target_backend); + } + backend_ = target_backend; } // diff --git a/include/Timing.hpp b/mllm/Timing.hpp similarity index 100% rename from include/Timing.hpp rename to mllm/Timing.hpp diff --git a/src/Trace.cpp b/mllm/Trace.cpp similarity index 86% rename from src/Trace.cpp rename to mllm/Trace.cpp index 5e1f51ac3..18b57f833 100644 --- a/src/Trace.cpp +++ b/mllm/Trace.cpp @@ -41,10 +41,12 @@ void Tracer::addTensorFunction(TensorFunction *func, } void Tracer::trace(Module *model, vector inputs) { - inputs[0].setTtype(TensorType::NORMAL_TENSOR); - model->activation_tensors[inputs[0].name()] = std::shared_ptr(&inputs[0], [](Tensor *) {}); - model->activation_tensors[inputs[0].name()]->setName(inputs[0].name()); - model->activation_tensors[inputs[0].name()]->setModule(model); + for(auto& input : inputs) { + input.setTtype(TensorType::NORMAL_TENSOR); + model->activation_tensors[input.name()] = std::shared_ptr(&input, [](Tensor *) {}); + model->activation_tensors[input.name()]->setName(input.name()); + model->activation_tensors[input.name()]->setModule(model); + } Module::llm_model_ptr = model; diff --git a/src/Trace.hpp b/mllm/Trace.hpp similarity index 100% rename from src/Trace.hpp rename to mllm/Trace.hpp diff --git a/include/Types.hpp b/mllm/Types.hpp similarity index 51% rename from include/Types.hpp rename to mllm/Types.hpp index 5fd850ce6..0d22e402e 100644 --- a/include/Types.hpp +++ b/mllm/Types.hpp @@ -2,6 +2,7 @@ #ifndef MLLM_TYPES_H #define MLLM_TYPES_H #include "OpDefined.hpp" +#include "DataType.hpp" #include #include #include @@ -18,12 +19,18 @@ using std::vector; using std::map; typedef map OpParam; - // #define DEBUGSAVETENSOR // #define DEBUGOPTIME -#define LLAMAFILE_SGEMM +// #define LLAMAFILE_SGEMM inline int KVCache_TYPE = 16; +#if !defined(ARM) +inline int KVCache_Type_eager = KVCache_TYPE; +#else +inline int KVCache_Type_eager = 32; +#endif +inline int KVCacheSageDtypeBit = 8; // 8 or 16 +inline int KVCache_batch = 1; typedef enum { MLLM_CPU, MLLM_OPENCL, @@ -72,7 +79,7 @@ enum DataType { MLLM_TYPE_Q4_0_4_8 = 20, MLLM_TYPE_Q4_0_8_8 = 21, MLLM_TYPE_Q8_0_4_4 = 22, - + // 2-bit quantizations MLLM_TYPE_Q3_K = 23, // MLLM_TYPE_Q2_K = 24, MLLM_TYPE_Q1_K = 25, // @@ -82,11 +89,16 @@ enum DataType { MLLM_TYPE_IQ1_M = 29, // MLLM_TYPE_IQ2_S = 30, + MLLM_TYPE_KLEIDIAI_Q4_0 = 31, + MLLM_TYPE_Q8_0F = 32, // quantized with float scale + MLLM_TYPE_Q2_0 = 33, // 2-bits quantization + MLLM_TYPE_COUNT, }; enum ChlType { BSHD = 0, + BHSD, BHDS = 2, BCTHW = 3, @@ -103,6 +115,7 @@ inline std::map, ChlType> Chls2Type = { {{0, 2, 3, 1}, BDHS}, {{0, 1, 3, 2}, BHDS}, {{0, 2, 1, 3}, BSHD}, + {{0, 1, 2, 3}, BHSD}, {{1, 2, 0, 3}, SBHD}, {{0, 3, 2, 1}, BDSH}, {{1, 2, 3, 0}, DBHS}, @@ -142,6 +155,12 @@ enum AttnQKVSplitType { SPLIT_D_HD = Chl::D_HD, }; +enum AttnPostQkvNormType { + PostQkv_NONE = 0, + PostQkv_LayerNorm, + PostQkv_RMSNorm, +}; + #define ANYDIM -198098 enum PaddingType { @@ -155,6 +174,7 @@ enum RoPEType { PERSIMMONROPE = 3, HFHUBROPE = 4, MLAROPE = 5, + NTKROPE = 6, }; enum RoPEThetaType { @@ -167,191 +187,6 @@ enum ExecutionType { AUTOREGRESSIVE = 1, }; -/* - * This code is based on ggml(https://github.com/ggerganov/ggml), - * please see https://github.com/ggerganov/ggml/blob/master/src/ggml.c - * ggml is licensed under MIT Copyright (c) 2022 Georgi Gerganov: - * - * MIT License - * Copyright (c) 2022 Georgi Gerganov - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifdef _MSC_VER -#define MLLM_EXTENSION -#else // _MSC_VER -#define MLLM_EXTENSION __extension__ -#endif // _MSC_VER - -typedef uint32_t mllm_half32; - -#if defined(__ARM_NEON) && !defined(_MSC_VER) -typedef __fp16 mllm_fp16_t; -#else -typedef uint16_t mllm_fp16_t; -#endif - -// #define MLLM_QKK_64 -#ifdef MLLM_QKK_64 -#define QK_K 64 -#define K_SCALE_SIZE 4 -#else -#define QK_K 256 -#define K_SCALE_SIZE 12 -#endif -#define QK4_0 32 - -#pragma pack(1) -typedef struct { - mllm_fp16_t d; // delta - uint8_t qs[QK4_0 / 2]; // nibbles / quants -} block_q4_0; -#pragma pack() - -// 4-bit quantization -// 16 blocks of 32 elements each -// weight is represented as x = a * q + b -// Effectively 4.5 bits per weight -#ifdef MLLM_QKK_64 -#pragma pack(1) -typedef struct { - mllm_fp16_t d[2]; // super-block scales/mins - uint8_t scales[2]; // 4-bit block scales/mins - uint8_t qs[QK_K / 2]; // 4--bit quants -} block_q4_K; -#pragma pack() -static_assert(sizeof(block_q4_K) == 2 * sizeof(uint16_t) + QK_K / 2 + 2, "wrong q4_K block size/padding"); -#else -#pragma pack(1) -typedef struct { - mllm_fp16_t d; // super-block scale for quantized scales - mllm_fp16_t dmin; // super-block scale for quantized mins - uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits - uint8_t qs[QK_K / 2]; // 4--bit quants -} block_q4_K; -#pragma pack() -static_assert(sizeof(block_q4_K) == 2 * sizeof(mllm_fp16_t) + K_SCALE_SIZE + QK_K / 2, "wrong q4_K block size/padding"); -#endif - -#pragma pack(1) -typedef struct { - uint8_t ql[QK_K / 2]; // quants, lower 4 bits - uint8_t qh[QK_K / 4]; // quants, upper 2 bits - int8_t scales[QK_K / 16]; // scales, quantized with 8 bits - mllm_fp16_t d; // super-block scale -} block_q6_K; -#pragma pack() -static_assert(sizeof(block_q6_K) == sizeof(mllm_fp16_t) + QK_K / 16 + 3 * QK_K / 4, "wrong q6_K block size/padding"); - -#define QK8_0 32 -#pragma pack(1) -typedef struct { - mllm_fp16_t d; // delta - int8_t qs[QK8_0]; // quants -} block_q8_0; -#pragma pack() -#pragma pack(1) -typedef struct { - int8_t qs[QK8_0]; // quants -} block_q8_per_tensor; // used in vecdot_i8_i8, TODO: remove -#pragma pack() - -// This is only used for intermediate quantization and dot products -#pragma pack(1) -typedef struct { - float d; // delta - int8_t qs[QK_K]; // quants - int16_t bsums[QK_K / 16]; // sum of quants in groups of 16 -} block_q8_K; -#pragma pack() -static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K / 16 * sizeof(int16_t), "wrong q8_K block size/padding"); - -#pragma pack(1) -typedef struct { - mllm_fp16_t d[4]; // deltas for 4 q4_0 blocks - uint8_t qs[QK4_0 * 2]; // nibbles / quants for 4 q4_0 blocks -} block_q4_0x4; -#pragma pack() -static_assert(sizeof(block_q4_0x4) == 4 * sizeof(mllm_fp16_t) + QK4_0 * 2, "wrong q4_0x4 block size/padding"); - -#pragma pack(1) -typedef struct { - mllm_fp16_t d[8]; // deltas for 8 q4_0 blocks - uint8_t qs[QK4_0 * 4]; // nibbles / quants for 8 q4_0 blocks -} block_q4_0x8; -#pragma pack() -static_assert(sizeof(block_q4_0x8) == 8 * sizeof(mllm_fp16_t) + QK4_0 * 4, "wrong q4_0x8 block size/padding"); - -#pragma pack(1) -typedef struct { - mllm_fp16_t d[4]; // deltas for 4 q8_0 blocks - int8_t qs[QK8_0 * 4]; // quants for 4 q8_0 blocks -} block_q8_0x4; -#pragma pack() -static_assert(sizeof(block_q8_0x4) == 4 * sizeof(mllm_fp16_t) + QK8_0 * 4, "wrong q8_0x4 block size/padding"); - -#pragma pack(1) -typedef struct { - mllm_fp16_t d[8]; // deltas for 8 q8_0 blocks - int8_t qs[QK8_0 * 8]; // quants for 8 q8_0 blocks -} block_q8_0x8; -#pragma pack() -static_assert(sizeof(block_q8_0x8) == 8 * sizeof(mllm_fp16_t) + QK8_0 * 8, "wrong q8_0x8 block size/padding"); - -#pragma pack(1) -typedef struct { - uint8_t scales[QK_K / 16]; // scales and mins, quantized with 4 bits - uint8_t qs[QK_K / 4]; // quants - // MLLM_EXTENSION union { - // struct { - // mllm_fp16_t d; // super-block scale for quantized scales - // mllm_fp16_t dmin; // super-block scale for quantized mins - // } MLLM_COMMON_AGGR_S; - // mllm_half32 dm; - // } MLLM_COMMON_AGGR_U; - mllm_fp16_t d; // super-block scale for quantized scales - mllm_fp16_t dmin; // super-block scale for quantized mins -} block_q2_K; -#pragma pack() -static_assert(sizeof(block_q2_K) == 2 * sizeof(mllm_fp16_t) + QK_K / 16 + QK_K / 4, "wrong q2_K block size/padding"); - -#pragma pack(1) -typedef struct { - uint8_t hmask[QK_K / 8]; // quants - high bit - uint8_t qs[QK_K / 4]; // quants - low 2 bits - uint8_t scales[12]; // scales, quantized with 6 bits - mllm_fp16_t d; // super-block scale -} block_q3_K; -#pragma pack() -static_assert(sizeof(block_q3_K) == sizeof(mllm_fp16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding"); - -#pragma pack(1) -typedef struct { - mllm_fp16_t d; - uint16_t qs[QK_K / 8]; -} block_iq2_xxs; -#pragma pack() -static_assert(sizeof(block_iq2_xxs) == sizeof(mllm_fp16_t) + QK_K / 8 * sizeof(uint16_t), "wrong iq2_xxs block size/padding"); - -// - static string DataTypeName(DataType dataType) { switch (dataType) { case MLLM_TYPE_F32: @@ -404,6 +239,12 @@ static string DataTypeName(DataType dataType) { return "IQ1_M"; case MLLM_TYPE_IQ2_S: return "IQ2_S"; + case MLLM_TYPE_KLEIDIAI_Q4_0: + return "KLEIDIAI_Q4_0"; + case MLLM_TYPE_Q8_0F: + return "Q8_0F"; + case MLLM_TYPE_Q2_0: + return "Q2_0"; case MLLM_TYPE_COUNT: return "COUNT"; default: @@ -446,7 +287,6 @@ static size_t DataTypeSize(DataType dtype, uint64_t count = 1) { return (sizeof(block_q4_0x8)) * count / (QK4_0 * 8); case MLLM_TYPE_Q8_0_4_4: return (sizeof(block_q8_0x4)) * count / (QK8_0 * 4); - case MLLM_TYPE_Q3_K: return (sizeof(block_q3_K)) * count / (QK_K); case MLLM_TYPE_Q2_K: @@ -463,6 +303,12 @@ static size_t DataTypeSize(DataType dtype, uint64_t count = 1) { return -1; case MLLM_TYPE_IQ2_S: return -1; + case MLLM_TYPE_KLEIDIAI_Q4_0: + return sizeof(uint8_t) * count; + case MLLM_TYPE_Q8_0F: + return (sizeof(block_q8_0f)) * count / (QK8_0F); + case MLLM_TYPE_Q2_0: + return (sizeof(block_q2_0)) * count / (QK2_0); case MLLM_TYPE_COUNT: return 0; default: @@ -471,7 +317,7 @@ static size_t DataTypeSize(DataType dtype, uint64_t count = 1) { } #ifdef __cplusplus namespace mllm { -// TODO: copy from MNN; need to recode +// TODO: copy from MNN; need to recode #UNUSED struct BackendConfig { enum MemoryMode { Memory_Normal = 0, diff --git a/mllm/backends/cpu/CMakeLists.txt b/mllm/backends/cpu/CMakeLists.txt new file mode 100644 index 000000000..bd03fb1a3 --- /dev/null +++ b/mllm/backends/cpu/CMakeLists.txt @@ -0,0 +1,175 @@ +# CPU Backend +file(GLOB MLLM_CPU_SRC + ${CMAKE_CURRENT_LIST_DIR}/*.cpp + ${CMAKE_CURRENT_LIST_DIR}/compute/*.cpp + ${CMAKE_CURRENT_LIST_DIR}/third_party/ggml/*.cpp + ${CMAKE_CURRENT_LIST_DIR}/op/*.cpp + ${CMAKE_CURRENT_LIST_DIR}/function/*.cpp +) + + +# --- 初始化 kleidiai 源文件和头文件目录 --- +set(ALL_KLEIDIAI_SOURCES "") +set(ALL_KLEIDIAI_INCLUDE_DIRS "") + + +# Conditionally compile kleidiai for ARM architectures +if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") + # --- 设置 kleidiai 库的路径 --- + set(KLEIDIAI_SOURCE_DIR ${CMAKE_CURRENT_LIST_DIR}/third_party/kleidiai) + if(NOT EXISTS ${KLEIDIAI_SOURCE_DIR}) + message(FATAL_ERROR "kleidiai library not found! Please place it in 'third_party/kleidiai'.") + endif() + list(APPEND ALL_KLEIDIAI_INCLUDE_DIRS ${KLEIDIAI_SOURCE_DIR}) + + + # --- Section for QSI4_C32P (FP32 output) kernels --- + message(STATUS "Enabling kleidiai QSI4_C32 MatMul implementation.") + list(APPEND ALL_KLEIDIAI_INCLUDE_DIRS + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p + ) + set(KLEIDIAI_SOURCES_QSI4_C32P + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p/kai_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p/kai_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod_asm.S + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p/kai_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4c32p/kai_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm_asm.S + ) + list(APPEND ALL_KLEIDIAI_SOURCES ${KLEIDIAI_SOURCES_QSI4_C32P}) + + # --- Section for QSI4_CXP (FP16 output) kernels --- + message(STATUS "Enabling kleidiai QSI4_CXP (to FP16) MatMul implementation.") + list(APPEND ALL_KLEIDIAI_INCLUDE_DIRS + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp + ) + set(KLEIDIAI_SOURCES_QSI4_CXP + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod_asm.S + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm_asm.S + ) + list(APPEND ALL_KLEIDIAI_SOURCES ${KLEIDIAI_SOURCES_QSI4_CXP}) + + # --- Section for FP16 (f16*f16) kernels --- + message(STATUS "Enabling kleidiai FP16 MatMul implementation.") + list(APPEND ALL_KLEIDIAI_INCLUDE_DIRS + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_f16_f16p + ) + set(KLEIDIAI_SOURCES_FP16 + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f16_f16_f16p/kai_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla.c + ) + list(APPEND ALL_KLEIDIAI_SOURCES ${KLEIDIAI_SOURCES_FP16}) + + # --- Section for FP32 (f32*f32) kernels --- + message(STATUS "Enabling kleidiai FP32 MatMul implementation.") + list(APPEND ALL_KLEIDIAI_INCLUDE_DIRS + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_f32_f32p + ) + set(KLEIDIAI_SOURCES_FP32 + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_f32_f32p/kai_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/matmul_clamp_f32_f32_f32p/kai_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla_asm.S + ) + list(APPEND ALL_KLEIDIAI_SOURCES ${KLEIDIAI_SOURCES_FP32}) + + # --- Section for all packing functions --- + list(APPEND ALL_KLEIDIAI_INCLUDE_DIRS + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack + ) + set(KLEIDIAI_PACK_SOURCES + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qai8dxp_f32.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_rhs_pack_kxn_qsi4c32p_qsu4c32s1s0.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qai8dxp_f16_neon.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_rhs_pack_kxn_qsi4cxp_qs4cxs1s0.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_rhs_pack_kxn_f16p16x1biasf16_f16_f16_neon.c + ${KLEIDIAI_SOURCE_DIR}/kai/ukernels/matmul/pack/kai_rhs_pack_kxn_f32p8x1biasf32_f32_f32_neon.c + ) + list(APPEND ALL_KLEIDIAI_SOURCES ${KLEIDIAI_PACK_SOURCES}) + + # 将所有 kleidiai 源文件添加到主源文件列表 + list(APPEND MLLM_CPU_SRC ${ALL_KLEIDIAI_SOURCES}) +endif() # End ARM check for kleidiai sources + + +if (MLLM_OPENMP) +find_package(OpenMP REQUIRED) +if(OpenMP_FOUND) + message(STATUS "found openmp") + set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} ${OPENMP_C_FLAGS}) + set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${OPENMP_CXX_FLAGS}) +else() + message(FATAL_ERROR "openmp not found!") +endif() +endif() + + + +# 架构优化标志 +if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") + message(STATUS "ARM detected") + add_compile_options(-march=armv8.2-a+dotprod+fp16+fp16fml) + + # 特殊处理:为FP16源文件添加格式标志 + if(KLAI_USE_FP16 AND CMAKE_CROSS_COMPILING) + message(STATUS "Cross-compilation for ARM detected. Applying FP16 compiler flags.") + set_source_files_properties(${KLEIDIAI_FP16_SOURCES} ${KLEIDIAI_SOURCES_QSI4_CXP} + PROPERTIES COMPILE_FLAGS "-mfp16-format=ieee" + ) + endif() +elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$") + message(STATUS "x86_64 detected") + add_compile_options(-mavx2) + add_compile_options(-march=native) +endif() + +if(${MLLM_ENABLE_PYTHON}) +add_library( + mllm_cpu + SHARED + ${MLLM_CPU_SRC} +) +else() +add_library( + mllm_cpu + OBJECT + ${MLLM_CPU_SRC} +) +endif() + +# Conditionally add kleidiai compile definitions for ARM +if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") + target_compile_definitions(mllm_cpu PUBLIC KLAI_USE_QSI4_C32) + target_compile_definitions(mllm_cpu PUBLIC KLAI_USE_QSI4_CXP) + target_compile_definitions(mllm_cpu PUBLIC KLAI_USE_FP16) + target_compile_definitions(mllm_cpu PUBLIC KLAI_USE_FP32) +endif() + +# 包含头文件目录 +list(REMOVE_DUPLICATES ALL_KLEIDIAI_INCLUDE_DIRS) +target_include_directories( + mllm_cpu + PRIVATE + ${CMAKE_CURRENT_LIST_DIR} + ${ALL_KLEIDIAI_INCLUDE_DIRS} +) + +if(OpenMP_FOUND) + message(STATUS "found openmp") + if(ARM AND NOT APK) + message(STATUS "[ARM] found openmp") + target_compile_options(mllm_cpu PUBLIC -fopenmp) + # tmac or for mac + if (CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin" AND NOT CMAKE_CROSSCOMPILING) + target_link_libraries(mllm_cpu PUBLIC OpenMP::OpenMP_CXX) + else() + target_link_libraries(mllm_cpu PUBLIC -fopenmp -static-openmp) + endif () + else() + target_link_libraries(mllm_cpu + PUBLIC + OpenMP::OpenMP_CXX + ) + endif() +endif() + +target_link_libraries(mllm_cpu PUBLIC fmt::fmt-header-only) + +set_target_properties(mllm_cpu PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE) diff --git a/mllm/backends/cpu/CPUBackend.cpp b/mllm/backends/cpu/CPUBackend.cpp new file mode 100644 index 000000000..0c2d17d08 --- /dev/null +++ b/mllm/backends/cpu/CPUBackend.cpp @@ -0,0 +1,488 @@ +#include "CPUBackend.hpp" +#include +#include +#include +#include +#include +#include +#include +#include "Backend.hpp" +#include "OpDefined.hpp" +#include "Types.hpp" +// #include "memory/SystemMemoryManager.hpp" +// #include +#include +#include "Layer.hpp" + +#include "op/CPUHeadLinear.hpp" +#include "op/CPULinearInt8.hpp" +#include "op/CPUMultimodalRoPEPipeline.hpp" +#include "op/CPUNTKRoPE.hpp" +// #include "op/CPUPoEmbedding.hpp" +#include "op/CPUSplitInput.hpp" +#include "op/CPUView.hpp" +#include "op/CPUAdd.hpp" +#include "op/CPUCausalMask.hpp" +#include "op/CPUCausalTreeMask.hpp" +#include "op/CPUSlidingWindowMask.hpp" +#include "op/CPUMatmul.hpp" +#include "op/CPURMSNorm.hpp" +#include "op/CPURoPE.hpp" +#include "op/CPURoPETree.hpp" +#include "op/CPUScale.hpp" +#include "op/CPUSiLU.hpp" +#include "op/CPUSoftMax.hpp" +#include "op/CPULinear.hpp" +#include "op/CPUEmbedding.hpp" +#include "op/CPUMul.hpp" +#include "op/CPUKVCache.hpp" +#include "op/CPUReLU.hpp" +#include "op/CPUReLU2.hpp" +#include "op/CPUGELU.hpp" +#include "op/CPUSplit.hpp" +#include "op/CPULayerNorm.hpp" +#include "op/CPUGather.hpp" +#include "op/CPUConvolution2D.hpp" +#include "op/CPUAvgPool2D.hpp" +#include "op/CPUMaxPool2D.hpp" +#include "op/CPUConvolution3D.hpp" +#include "op/CPUVisionRoPE.hpp" +#include "op/CPUMultimodalRoPE.hpp" +#include "op/CPUParameter.hpp" +// #include "op/CPUCat.hpp" +#include "op/CPUSubDim.hpp" +#include "op/CPUQuickGELU.hpp" +#include "op/CPUDivision.hpp" +#include "op/CPUNorm.hpp" +#include "op/CPUShape.hpp" +#include "op/CPUTranspose.hpp" +#include "op/CPUMean.hpp" +#include "op/CPURange.hpp" +#include "op/CPUVisionRoPECos.hpp" +#include "op/CPUVisionRoPESin.hpp" +#include "op/CPUWhere.hpp" +#include "op/CPUReplace.hpp" +#include "op/CPUPredictor.hpp" +#include "op/CPUSparseIdLinear.hpp" +#include "op/CPUSparseLinear.hpp" +#include "op/CPUElasticLinear.hpp" +#include "op/CPUQuantize.hpp" +#include "op/CPUMergeOutput.hpp" +#include "op/CPULinearINT8Shadow.hpp" +#include "op/CPUIRoPE.hpp" +#include "op/CPUPosition.hpp" + +#include "op/CPUKVCacheNPU.hpp" +#include "op/CPUKVCacheXp.hpp" +#include "op/CPUKVCacheSage.hpp" + +#include "op/CPUBinaryFunc.hpp" +#include "op/CPUCatFunc.hpp" +#include "op/CPUClipFunc.hpp" +#include "op/CPUExpandFunc.hpp" +#include "op/CPUFlattenFunc.hpp" +#include "op/CPUMatmulFunc.hpp" +#include "op/CPUMeanFunc.hpp" +#include "op/CPUNormFunc.hpp" +#include "op/CPURangeFunc.hpp" +#include "op/CPUSplitFunc.hpp" +#include "op/CPUSumFunc.hpp" +#include "op/CPUTopkFunc.hpp" +#include "op/CPUTransposeFunc.hpp" +#include "op/CPUViewFunc.hpp" +#include "op/CPUWhereFunc.hpp" +#include "op/CPUIndexPutFunc.hpp" +#include "op/CPUArgSortFunc.hpp" +#include "op/CPUBinCountFunc.hpp" +#include "op/CPURepeatFunc.hpp" +#include "op/CPULikeFunc.hpp" +#include "op/CPUScatterAddFunc.hpp" +#include "op/CPUVisionRoPEFunc.hpp" +#include "op/CPUFlashAttention2Func.hpp" +#include "op/CPUSageAttentionFunc.hpp" +#include "op/CPUScatter.hpp" +#include "op/CPUTilde.hpp" +#include "op/CPUMaskedFill.hpp" +#include "op/CPUSigmoid.hpp" + +#include "op/CPUFuyuGatherEmbdFunc.hpp" +#include "op/CPUPhi3VhdmergeFunc.hpp" + +namespace mllm { +class CPUBackendCreator : public BackendCreator { + Backend *create(BackendConfig config) { + shared_ptr mm = nullptr; + // mm = std::make_shared(); + mm = std::make_shared(); // todomm + return new CPUBackend(mm); + }; +}; + +void registerCPUBackendCreator() { + InsertBackendCreatorMap(MLLM_CPU, std::make_shared()); +} + +CPUBackend::CPUBackend(shared_ptr &mm) : + Backend(mm) { + type_ = BackendType::MLLM_CPU; + registerOps(); + // registerFuncs(); +} + +Op *CPUBackend::opCreate(const OpParam &op_param, string name, int threadCount) { + OpType optype = OpType(op_param.find("type")->second); + auto iter = map_creator_.find(optype); + if (iter == map_creator_.end()) { + std::cout << "CPU Op Don't support type : " << name << std::endl; + return nullptr; + } + Op *exe = iter->second->create(op_param, this, name, cpu_threads); + return exe; +} +void CPUBackend::registerOps() { + addCreator(PARAMETER, (CPUBackend::Creator *)(new CPUParameterCreator())); + addCreator(ADD, (CPUBackend::Creator *)(new CPUAddCreator())); + addCreator(CAUSALMASK, (CPUBackend::Creator *)(new CPUCausalMaskCreator())); + addCreator(CAUSALTREEMASK, (CPUBackend::Creator *)(new CPUCausalTreeMaskCreator())); + addCreator(SLIDINGWINDOWMASK, (CPUBackend::Creator *)(new CPUSlidingWindowMaskCreator())); + addCreator(MATMUL, (CPUBackend::Creator *)(new CPUMatmulCreator())); + addCreator(RMSNORM, (CPUBackend::Creator *)(new CPURMSNormCreator())); + addCreator(ROPE, (CPUBackend::Creator *)(new CPURoPECreator())); + addCreator(ROPETREE, (CPUBackend::Creator *)(new CPURoPETreeCreator())); + addCreator(SCALE, (CPUBackend::Creator *)(new CPUScaleCreator())); + addCreator(SILU, (CPUBackend::Creator *)(new CPUSiLUCreator())); + addCreator(SOFTMAX, (CPUBackend::Creator *)(new CPUSoftMaxCreator())); + addCreator(LINEAR, (CPUBackend::Creator *)(new CPULinearCreator())); + addCreator(LINEARINT8, (CPUBackend::Creator *)(new CPULinearInt8Creator())); + addCreator(EMBEDDING, (CPUBackend::Creator *)(new CPUEmbeddingCreator())); + addCreator(MUL, (CPUBackend::Creator *)(new CPUMulCreator())); + addCreator(VIEW, (CPUBackend::Creator *)(new CPUViewCreator())); + addCreator(KVCACHE, (CPUBackend::Creator *)(new CPUKVCacheCreator())); + addCreator(KVCACHENPU, (CPUBackend::Creator *)(new CPUKVCacheNPUCreator())); + addCreator(RELU, (CPUBackend::Creator *)(new CPUReLUCreator())); + addCreator(RELU2, (CPUBackend::Creator *)(new CPUReLU2Creator())); + addCreator(OP_GELU, (CPUBackend::Creator *)(new CPUGELUCreator())); + addCreator(QUICKGLUE, (CPUBackend::Creator *)(new CPUQuickGELUCreator())); + addCreator(LAYERNORM, (CPUBackend::Creator *)(new CPULayerNormCreator())); + addCreator(SPLIT, (CPUBackend::Creator *)(new CPUSplitCreator())); + addCreator(GATHER, (CPUBackend::Creator *)(new CPUGatherCreator())); + addCreator(CONVOLUTION2D, (CPUBackend::Creator *)(new CPUConvolution2DCreator())); + addCreator(AVGPOOL2D, (CPUBackend::Creator *)(new CPUAvgPoolCreator())); + addCreator(MAXPOOL2D, (CPUBackend::Creator *)(new CPUMaxPoolCreator())); + addCreator(CONVOLUTION3D, (CPUBackend::Creator *)(new CPUConvolution3DCreator())); + addCreator(VISIONROPE, (CPUBackend::Creator *)(new CPUVisionRoPECreator())); + addCreator(VISIONROPESIN, (CPUBackend::Creator *)(new CPUVisionRoPESinCreator())); + addCreator(VISIONROPECOS, (CPUBackend::Creator *)(new CPUVisionRoPECosCreator())); + addCreator(MULTIMODALROPEPIP, (CPUBackend::Creator *)(new CPUMultimodalRoPEPipelineCreator())); + addCreator(MULTIMODALROPE, (CPUBackend::Creator *)(new CPUMultimodalRoPECreator())); + addCreator(TRANSPOSE, (CPUBackend::Creator *)(new CPUTransposeCreator())); + addCreator(SUBDIM, (CPUBackend::Creator *)(new CPUSubDimCreator())); + addCreator(DIVISION, (CPUBackend::Creator *)(new CPUDivisionCreator())); + addCreator(NORM, (CPUBackend::Creator *)(new CPUNormCreator())); + addCreator(SHAPE, (CPUBackend::Creator *)(new CPUShapeCreator())); + addCreator(MEAN, (CPUBackend::Creator *)(new CPUMeanCreator())); + addCreator(RANGE, (CPUBackend::Creator *)(new CPURangeCreator())); + addCreator(WHERE, (CPUBackend::Creator *)(new CPUWhereCreator())); + addCreator(REPLACE, (CPUBackend::Creator *)(new CPUReplaceCreator())); + addCreator(PREDICTOR, (CPUBackend::Creator *)(new CPUPredictorCreator())); + addCreator(SPARSELINEAR, (CPUBackend::Creator *)(new CPUSparseLinearCreator())); + addCreator(SPARSEIDLINEAR, (CPUBackend::Creator *)(new CPUSparseIdLinearCreator())); + addCreator(ELASTICLINEAR, (CPUBackend::Creator *)(new CPUElasticLinearCreator())); + addCreator(POSITION, (CPUBackend::Creator *)(new CPUPositionCreator())); + addCreator(QUANTIZE, (CPUBackend::Creator *)(new CPUQuantizeCreator())); + addCreator(MERGEOUTPUT, (CPUBackend::Creator *)(new CPUMergeOutputCreator())); + addCreator(SPLITINPUT, (CPUBackend::Creator *)(new CPUSplitInputCreator())); + addCreator(LINEARINT8SHADOW, (CPUBackend::Creator *)(new CPULinearINT8ShadowCreator())); + addCreator(IROPE, (CPUBackend::Creator *)(new CPUIRoPECreator())); + addCreator(XP_KVCACHE, (CPUBackend::Creator *)(new CPUKVCacheXpCreator())); + addCreator(NTKROPE, (CPUBackend::Creator *)(new CPUNTKRoPECreator())); + addCreator(HEADLINEAR, (CPUBackend::Creator *)(new CPUHeadLinearCreator())); + addCreator(KVCACHESAGE, (CPUBackend::Creator *)(new CPUKVCacheSageCreator())); + addCreator(SIGMOID, (CPUBackend::Creator *)(new CPUSigmoidCreator())); + + // funsction + addCreator(F_ADD, (CPUBackend::Creator *)(new CPUaddFunctionCreator())); + addCreator(F_SUB, (CPUBackend::Creator *)(new CPUsubFunctionCreator())); + addCreator(F_MUL, (CPUBackend::Creator *)(new CPUmulFunctionCreator())); + addCreator(F_DIV, (CPUBackend::Creator *)(new CPUdivFunctionCreator())); + addCreator(F_DIVINT, (CPUBackend::Creator *)(new CPUdivintFunctionCreator())); + addCreator(F_TTADD, (CPUBackend::Creator *)(new CPUaddTwoFunctionCreator())); + addCreator(F_TTSUB, (CPUBackend::Creator *)(new CPUsubTwoFunctionCreator())); + addCreator(F_TTMUL, (CPUBackend::Creator *)(new CPUmulTwoFunctionCreator())); + addCreator(F_TTDIV, (CPUBackend::Creator *)(new CPUdivTwoFunctionCreator())); + addCreator(F_MM, (CPUBackend::Creator *)(new CPUmmFunctionCreator())); + addCreator(F_NORM, (CPUBackend::Creator *)(new CPUnormFunctionCreator())); + addCreator(F_MEAN, (CPUBackend::Creator *)(new CPUmeanFunctionCreator())); + addCreator(F_CAT, (CPUBackend::Creator *)(new CPUcatFunctionCreator())); + addCreator(F_VIEW, (CPUBackend::Creator *)(new CPUviewFunctionCreator())); + addCreator(F_TRANPOSE, (CPUBackend::Creator *)(new CPUtransposeFunctionCreator())); + addCreator(F_FLATTEN, (CPUBackend::Creator *)(new CPUflattenFunctionCreator())); + addCreator(F_CLIP, (CPUBackend::Creator *)(new CPUclipFunctionCreator())); + addCreator(F_CLIPAXIS, (CPUBackend::Creator *)(new CPUclipaxisFunctionCreator())); + addCreator(F_CLIPTENSOR, (CPUBackend::Creator *)(new CPUcliptensorFunctionCreator())); + addCreator(F_RANGE, (CPUBackend::Creator *)(new CPURangeFunctionCreator())); + addCreator(F_WHERE, (CPUBackend::Creator *)(new CPUwhereFunctionCreator())); + addCreator(F_INDEX_PUT, (CPUBackend::Creator *)(new CPUIndexPutFunctionCreator())); + addCreator(F_SPLIT, (CPUBackend::Creator *)(new CPUsplitFunctionCreator())); + addCreator(F_SUM, (CPUBackend::Creator *)(new CPUsumFunctionCreator())); + addCreator(F_TOPK, (CPUBackend::Creator *)(new CPUtopkFunctionCreator())); + addCreator(F_EXPPAND, (CPUBackend::Creator *)(new CPUexpandFunctionCreator())); + addCreator(F_ARGSORT, (CPUBackend::Creator *)(new CPUargsortFunctionCreator())); + addCreator(F_BINCOUNT, (CPUBackend::Creator *)(new CPUbincountFunctionCreator())); + addCreator(F_REPEAT, (CPUBackend::Creator *)(new CPUrepeatFunctionCreator())); + addCreator(F_LIKE, (CPUBackend::Creator *)(new CPUlikeFunctionCreator())); + addCreator(F_SCATTERRADD, (CPUBackend::Creator *)(new CPUScatterAddFunctionCreator())); + addCreator(F_APPLY_VISIOROPE, (CPUBackend::Creator *)(new CPUVisionRoPEFuncFunctionCreator())); + addCreator(F_FA2, (CPUBackend::Creator *)(new CPUFlashAttention2FuncCreator())); + addCreator(F_SAGEATTN, (CPUBackend::Creator *)(new CPUSageAttentionFuncCreator())); + addCreator(SCATTER, (CPUBackend::Creator *)(new CPUScatterCreator())); + addCreator(TILDE, (CPUBackend::Creator *)(new CPUTildeCreator())); + addCreator(MASKEDFILL, (CPUBackend::Creator *)(new CPUMaskedFillCreator())); + // models use only + addCreator(F_FUYU_GATHER_EMBD, (CPUBackend::Creator *)(new CPUFuyuGatherEmbdFuncCreator())); + addCreator(F_PHI3V_HD_MERGE, (CPUBackend::Creator *)(new CPUPhi3VhdmergeFunctionCreator())); +} +TensorFunction *CPUBackend::funcCreate(const TensorFuncType type) { + auto iter = map_function_.find(type); + if (iter == map_function_.end()) { + std::cout << "CPU funcCreate Don't support type : " << type << std::endl; + return nullptr; + } + return iter->second; +} + +void CPUBackend::registerFuncs() { + ; +}; + +int CPUBackend::cpu_threads = 4; + +void CPUBackend::convert_fp_data(Tensor *src, Tensor *dest) { + // 根据源和目标的类型,执行相应的CPU循环转换 + if (src->dtype() == MLLM_TYPE_F32 && dest->dtype() == MLLM_TYPE_F16) { + float *src_ptr = src->hostPtr(); + mllm_fp16_t *dst_ptr = dest->hostPtr(); + for (int i = 0; i < src->count(); i++) { + dst_ptr[i] = MLLM_FP32_TO_FP16(src_ptr[i]); + } + } else if (src->dtype() == MLLM_TYPE_F16 && dest->dtype() == MLLM_TYPE_F32) { + mllm_fp16_t *src_ptr = src->hostPtr(); + float *dst_ptr = dest->hostPtr(); + for (int i = 0; i < src->count(); i++) { + dst_ptr[i] = MLLM_FP16_TO_FP32(src_ptr[i]); + } + } else { + throw std::runtime_error("Unsupported conversion types for CPU backend."); + } +} +/************************************************************************************************/ +/* Refactored Helper Functions */ +/************************************************************************************************/ +/** + * @brief Creates the initial output tensor objects (shells), either from an aggregated input or from a list of names. + * @param out_tensors The vector of output tensors to be populated. + * @param input_tensors The vector of input tensors, checked for aggregation. + * @param out_names The names for the output tensors if not from an aggregated input. + * @param module The current module. + * @param backend The current backend. + */ +void CPUBackend::_create_output_tensors( + std::vector> &out_tensors, + const std::vector> &input_tensors, + const std::vector &out_names, + Module *module, + map> &activation_tensors, + Backend *backend) { + if (input_tensors.size() == 1 && !input_tensors[0]->aggregatedTensors().empty()) { + const auto &aggregated_tensors = input_tensors[0]->aggregatedTensors(); + out_tensors.insert(out_tensors.end(), aggregated_tensors.begin(), aggregated_tensors.end()); + } else { + for (const auto &out_name : out_names) { + auto out_tensor = std::make_shared(backend); + out_tensor->setName(out_name); + out_tensor->setModule(module); + auto it = activation_tensors.find(out_name); + if (it != activation_tensors.end() && out_name.find("-transpose") == std::string::npos && out_tensor->ctype() != it->second->ctype()) { + out_tensor->chls() = it->second->chls(); + out_tensor->setCtype(it->second->ctype()); + } + out_tensors.push_back(out_tensor); + } + } +} + +std::vector CPUBackend::runOp(Op *op, std::vector inputs, std::vector out_names, bool in_place) { + Module *module = inputs.empty() ? Module::llm_model_ptr : inputs[0].module(); + static map> empty_activation_tensors; + map> &activation_tensors = module ? module->activation_tensors : empty_activation_tensors; + if (module && module->doTrace) { // trace + if (module->tracedFlag) { + vector results = {}; + for (auto &name : out_names) results.push_back(*activation_tensors[name]); + return results; + } + for (auto &input : inputs) { + if (input.shouldInGraphs() && activation_tensors.find(input.name()) == activation_tensors.end()) { + activation_tensors[input.name()] = std::make_shared(op->backend()); + activation_tensors[input.name()]->setName(input.name()); + activation_tensors[input.name()]->setModule(module); + } + } + for (const auto &out_name : out_names) { + if (activation_tensors.find(out_name) == activation_tensors.end()) { + activation_tensors[out_name] = std::make_shared(op->backend()); + activation_tensors[out_name]->setName(out_name); + activation_tensors[out_name]->setModule(module); + } + } + vector> inPtrs; + for (auto &input : inputs) { + inPtrs.push_back(input.shouldInGraphs() ? activation_tensors[input.name()] : + std::shared_ptr(&input, [](Tensor *) {})); + } + vector> outPtrs = {}; + for (auto &name : out_names) outPtrs.push_back(activation_tensors[name]); + op->setUp(inPtrs, outPtrs); + vector results = {}; + for (auto &name : out_names) results.push_back(*activation_tensors[name]); + return results; + } + +#ifdef DEBUGOPTIME + uint64_t time_start = mllm_time_us(); +#endif + vector> input_tensors; + for (auto &input : inputs) { + input_tensors.push_back(std::shared_ptr(&input, [](Tensor *) {})); + } + vector> out_tensors; + // Part 1: Create tensor shells + if (!in_place) { + _create_output_tensors(out_tensors, input_tensors, out_names, module, activation_tensors, op->backend()); + } else { + // If in-place, we already have out_tensors filled with input tensors. + for (size_t i = 0; i < input_tensors.size() && i < out_names.size(); ++i) { + input_tensors[i]->setName(out_names[i]); + out_tensors.push_back(input_tensors[i]); + } + } + // Part 2: Reshape the tensors + op->reshape(input_tensors, out_tensors); + // Part 3: Allocate memory + if (!in_place) { + for (auto &out_tensor : out_tensors) { + auto act_it = activation_tensors.find(out_tensor->name()); + auto template_it = act_it != activation_tensors.end() ? act_it->second : nullptr; + out_tensor->allocFromTemplate(template_it); + } + } + // Part 4: Execute the operation + op->execute(input_tensors, out_tensors); + +#ifdef DEBUGOPTIME + uint64_t time_end = mllm_time_us(); + double inference_time_ = (time_end - time_start) / 1000.0F; // ms + static int op_count = 0; + if (op_inference_time_.empty()) { + op_count = 0; + } + string name = std::to_string(op_count++) + "--" + (op->name().empty() ? (out_names.empty() ? "out-" + input_tensors[0]->name() : out_names[0]) : op->name()); + if (op->type() == LINEAR) + op_inference_time_[name] = inference_time_; +#endif + + vector results; + for (const auto &out_tensor : out_tensors) { + results.push_back(*out_tensor); +#ifdef DEBUGSAVETENSOR + if (out_tensor->dtype() == MLLM_TYPE_F32) + out_tensor->saveData(); + if (out_tensor->dtype() == MLLM_TYPE_F16) + out_tensor->saveData(); +#endif + } + return results; +} + +std::vector CPUBackend::runLayer(Layer *layer, std::vector inputs, int N) { + Module *module = inputs.empty() ? Module::llm_model_ptr : inputs[0].module(); + map> &activation_tensors = module->activation_tensors; + vector out_names; + int count = (N > 1) ? N : 1; + for (int i = 0; i < count; ++i) { + std::string tensor_name = (N > 1) ? "out-" + layer->op_->name() + "-" + std::to_string(i) : "out-" + layer->op_->name(); + out_names.push_back(tensor_name); + } + return runOp(layer->op_, inputs, out_names, false); +} + +std::vector CPUBackend::runForward(Module *module, std::vector inputs, std::vector args) { + if (mllm::Module::llm_model_ptr && (mllm::Module::llm_model_ptr->doLoad || Module::llm_model_ptr->doChangeBn)) { + auto outputs = module->Forward(inputs, args); + return outputs; + } + uint64_t time_start, time_end; + bool ouilter_flag = (inputs[0].ttype() == TensorType::INPUT_TENSOR); + if (ouilter_flag) { + for (int i = 0; i < inputs.size(); i++) { + auto &input = inputs[i]; + input.setModule(module); + input.setTtype(TensorType::NORMAL_TENSOR); + } + mllm::Module::llm_model_ptr = module; + if (module->prefilling_token_size_ == 0) { // first time init + module->prefilling_token_size_ = inputs[0].sequence() * inputs[0].batch(); + } else if (module->decoding_token_size_ == 0) { + module->decoding_token_size_ = inputs[0].sequence() * inputs[0].batch(); + } + time_start = mllm_time_us(); +#ifdef DEBUGOPTIME + op_inference_time_.clear(); +#endif + } + + auto output = module->Forward(inputs, args); + + if (ouilter_flag) { + time_end = mllm_time_us(); + double inference_time_ = (time_end - time_start) / 1000.0F; // ms + module->inference_times_.push_back(inference_time_); +#ifdef DEBUGOPTIME + _print_op_inference_time(true); + std::cout << "Token inference e2e time: " << inference_time_ << "ms" << std::endl; +#endif + } + return output; +} + +void CPUBackend::_print_op_inference_time(bool sort) { + size_t max_len = 0; + for (const auto &pair : op_inference_time_) { + max_len = std::max(pair.first.size(), max_len); + } + std::vector> sorted_pairs; + if (sort) { + sorted_pairs.assign(op_inference_time_.begin(), op_inference_time_.end()); + std::sort(sorted_pairs.begin(), sorted_pairs.end(), + [](const auto &a, const auto &b) { + return a.second > b.second; + }); + } + double token_inference_time = 0.0; + if (sort) { + for (const auto &pair : sorted_pairs) { + std::cout << std::left << std::setw(max_len) << pair.first + << " | time: " << pair.second << "ms" << std::endl; + token_inference_time += pair.second; + } + } else { + for (const auto &pair : op_inference_time_) { + std::cout << std::left << std::setw(max_len) << pair.first + << " | time: " << pair.second << "ms" << std::endl; + token_inference_time += pair.second; + } + } + std::cout << "Op times sum: " << token_inference_time << "ms" << std::endl; +} + +} // namespace mllm diff --git a/src/backends/cpu/CPUBackend.hpp b/mllm/backends/cpu/CPUBackend.hpp similarity index 78% rename from src/backends/cpu/CPUBackend.hpp rename to mllm/backends/cpu/CPUBackend.hpp index 276139af1..f5767cee8 100644 --- a/src/backends/cpu/CPUBackend.hpp +++ b/mllm/backends/cpu/CPUBackend.hpp @@ -4,7 +4,8 @@ #include "Backend.hpp" #include "Op.hpp" #include "Types.hpp" -#include "quantize/Quantize.hpp" +#include +// #include "backends/cpu/third_party/ggml/Quantize.hpp" namespace mllm { class Module; @@ -12,10 +13,16 @@ class Layer; class CPUBackend final : public Backend { public: explicit CPUBackend(shared_ptr &mm); - ~CPUBackend() override = default; + ~CPUBackend() { + for (auto &creator_pair : map_creator_) { + delete creator_pair.second; // 手动删除用 new 创建的 Creator 对象 + } + map_creator_.clear(); + } class Creator { public: + virtual ~Creator() = default; virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const = 0; }; bool addCreator(OpType t, Creator *c) { @@ -32,13 +39,9 @@ class CPUBackend final : public Backend { void registerOps() override; void registerFuncs() override; - std::vector runFunc( - std::vector out_names, - TensorFuncType type, - std::vector float_args, - std::vector> input_tensors, - bool in_place) override; std::vector runLayer(Layer *layer, std::vector inputs, int N) override; + + std::vector runOp(Op *op, std::vector input, std::vector out_names, bool in_place) override; std::vector runForward(Module *module, std::vector inputs, std::vector args) override; static int cpu_threads; @@ -76,7 +79,6 @@ class CPUBackend final : public Backend { } // #endif - // #ifdef USE_SD void setLastDraftLength(unsigned int draft_length) { last_draft_length = draft_length; @@ -96,6 +98,8 @@ class CPUBackend final : public Backend { bool isUsingDraft() { return usingDraft; } + + void convert_fp_data(Tensor *src, Tensor *dest) override; // #endif private: std::map map_creator_; @@ -117,6 +121,15 @@ class CPUBackend final : public Backend { unsigned int last_draft_length = 0; // #endif + void _create_output_tensors( + std::vector> &out_tensors, + const std::vector> &input_tensors, + const std::vector &out_names, + Module *module, + map> &activation_tensors, + Backend *backend); + map op_inference_time_; + void _print_op_inference_time(bool sort = false); }; } // namespace mllm diff --git a/src/backends/cpu/compute/ActivationFunction.cpp b/mllm/backends/cpu/compute/ActivationFunction.cpp similarity index 100% rename from src/backends/cpu/compute/ActivationFunction.cpp rename to mllm/backends/cpu/compute/ActivationFunction.cpp diff --git a/src/backends/cpu/compute/ActivationFunction.hpp b/mllm/backends/cpu/compute/ActivationFunction.hpp similarity index 99% rename from src/backends/cpu/compute/ActivationFunction.hpp rename to mllm/backends/cpu/compute/ActivationFunction.hpp index 161cf63e1..2dcf147e1 100644 --- a/src/backends/cpu/compute/ActivationFunction.hpp +++ b/mllm/backends/cpu/compute/ActivationFunction.hpp @@ -1,9 +1,7 @@ #ifndef ACTFUNC_HPP #define ACTFUNC_HPP - -#include "quantize/Quantize.hpp" -#include "compute/VecDot.hpp" +#include "backends/cpu/third_party/ggml/ComputeUtils.hpp" namespace mllm { #if defined(__ARM_NEON) && defined(__aarch64__) diff --git a/src/backends/cpu/compute/Arithmetic.cpp b/mllm/backends/cpu/compute/Arithmetic.cpp similarity index 100% rename from src/backends/cpu/compute/Arithmetic.cpp rename to mllm/backends/cpu/compute/Arithmetic.cpp diff --git a/src/backends/cpu/compute/Arithmetic.hpp b/mllm/backends/cpu/compute/Arithmetic.hpp similarity index 100% rename from src/backends/cpu/compute/Arithmetic.hpp rename to mllm/backends/cpu/compute/Arithmetic.hpp diff --git a/src/backends/cpu/compute/Convolution.cpp b/mllm/backends/cpu/compute/Convolution.cpp similarity index 99% rename from src/backends/cpu/compute/Convolution.cpp rename to mllm/backends/cpu/compute/Convolution.cpp index b5d5ef2f4..14b36858b 100644 --- a/src/backends/cpu/compute/Convolution.cpp +++ b/mllm/backends/cpu/compute/Convolution.cpp @@ -3,6 +3,7 @@ // #include "Convolution.hpp" +#include "backends/cpu/third_party/ggml/VecDotFP32.hpp" float **reshape_conv2d_kernal_fp32(Tensor *kernel) { int in_channel = kernel->sequence(); diff --git a/mllm/backends/cpu/compute/Convolution.hpp b/mllm/backends/cpu/compute/Convolution.hpp new file mode 100644 index 000000000..149e3eafb --- /dev/null +++ b/mllm/backends/cpu/compute/Convolution.hpp @@ -0,0 +1,21 @@ +// +// Created by Rongjie Yi on 23-12-18. +// + +#ifndef CONVOLUTION2D_HPP +#define CONVOLUTION2D_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +using namespace mllm; + +float **reshape_conv2d_kernal_fp32(Tensor *kernel); + +void conv2d_fp32_VALID(Tensor *input, Tensor *output, float **k_new, int kernel_h, int kernel_w, bool support_bias, Tensor *bias, int stride_h, int stride_w, int thread_count = 4); +void conv2d_fp32_SAME(Tensor *input, Tensor *output, float **k_new, int kernel_h, int kernel_w, bool support_bias, Tensor *bias, int stride_h, int stride_w, int padding_h, int padding_w, int thread_count = 4); + +float **reshape_conv3d_kernal_fp32(Tensor *kernel); + +void conv3d_fp32_VALID(Tensor *input, Tensor *output, float **k_new, int kernel_t, int kernel_h, int kernel_w, bool support_bias, Tensor *bias, int stride_t, int stride_h, int stride_w, int thread_count = 4); + +#endif // CONVOLUTION2D_HPP diff --git a/mllm/backends/cpu/compute/FeatureCheck.hpp b/mllm/backends/cpu/compute/FeatureCheck.hpp new file mode 100644 index 000000000..7f2e0838b --- /dev/null +++ b/mllm/backends/cpu/compute/FeatureCheck.hpp @@ -0,0 +1,78 @@ + +#if defined(__linux__) +#include +// #if defined(__aarch64__) && !defined(HWCAP_I8MM) +#include // 确保定义 HWCAP_I8MM +// #endif +#elif defined(__APPLE__) +#include +#include +#endif + +#include +#include +#include +#include + +#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) +// 读取 ID_AA64ISAR1_EL1 寄存器 +static inline uint64_t read_id_aa64isar1_el1() { + uint64_t value; + asm volatile("mrs %0, ID_AA64ISAR1_EL1" : "=r"(value)); + return value; +} +#endif + +static bool arm_is_i8mm_supported() { + // std::cout << "Starting i8mm detection..." << std::endl; + + // 1. macOS 专用检测(不受影响) +#if defined(__APPLE__) && defined(__aarch64__) + // std::cout << "Using macOS sysctl detection" << std::endl; + int supported = 0; + size_t size = sizeof(supported); + const int result = sysctlbyname("hw.optional.arm.FEAT_I8MM", + &supported, &size, NULL, 0); + if (result == 0 && supported) { + // std::cout << "sysctl detection: I8MM supported!" << std::endl; + return true; + } + // std::cerr << "sysctl detection " + // << (result ? "failed" : "I8MM not supported") << std::endl; +#endif + + // 2. 优先使用 CPU 寄存器检测(ARM64通用) +#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) + // std::cout << "Using ARM64 register detection method" << std::endl; + const uint64_t isar1 = read_id_aa64isar1_el1(); + const uint8_t i8mm_field = (isar1 >> 52) & 0xF; // 位52-55 + // std::cout << "ID_AA64ISAR1_EL1 = 0x" << std::hex << isar1 << std::dec << ", I8MM field = " << static_cast(i8mm_field) << std::endl; + // 值1或2表示支持i8mm + if (i8mm_field == 1 || i8mm_field == 2) { + // std::cout << "Register detection: I8MM supported!" << std::endl; + return true; + } + // std::cout << "Register detection: I8MM not supported" << std::endl; +#endif + +// 3. /proc/cpuinfo 后备检测 +#if defined(__linux__) + // std::cout << "Using /proc/cpuinfo detection" << std::endl; + FILE *cpuinfo = fopen("/proc/cpuinfo", "r"); + if (cpuinfo) { + char line[512]; + while (fgets(line, sizeof(line), cpuinfo)) { + // 检查包含"Features"的行 + if (strstr(line, "Features") && strstr(line, "i8mm")) { + fclose(cpuinfo); + // std::cout << "CPUinfo detection: I8MM supported!" << std::endl; + return true; + } + } + fclose(cpuinfo); + } + // std::cout << "CPUinfo detection: I8MM not found or file access failed" << std::endl; +#endif + // std::cout << "No I8MM support detected" << std::endl; + return false; +} diff --git a/mllm/backends/cpu/compute/FlashAttention2.hpp b/mllm/backends/cpu/compute/FlashAttention2.hpp new file mode 100644 index 000000000..639684de5 --- /dev/null +++ b/mllm/backends/cpu/compute/FlashAttention2.hpp @@ -0,0 +1,2285 @@ +#ifndef MLLM_FA2_CAL_HPP +#define MLLM_FA2_CAL_HPP + +#include +#include +#include +#include +#include +#include +#include "Types.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" +#include "backends/cpu/third_party/ggml/ComputeUtils.hpp" + +#ifdef __AVX2__ +#include +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) +#include +#endif + +namespace mobi_attn { + +// ======================================== +// 数学函数和工具 +// ======================================== +#define NEG_INF std::numeric_limits::lowest() + +#ifdef __AVX2__ +// Horizontal max of a __m256 vector +inline float _mm256_hmax_ps(__m256 x) { + __m128 lo = _mm256_castps256_ps128(x); + __m128 hi = _mm256_extractf128_ps(x, 1); + __m128 max_val = _mm_max_ps(lo, hi); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 2, 2))); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 0, 1))); + return _mm_cvtss_f32(max_val); +} + +// Horizontal sum of a __m256 vector +inline float _mm256_hadd_ps(__m256 x) { + __m128 lo = _mm256_castps256_ps128(x); + __m128 hi = _mm256_extractf128_ps(x, 1); + __m128 sum = _mm_add_ps(lo, hi); + sum = _mm_hadd_ps(sum, sum); + sum = _mm_hadd_ps(sum, sum); + return _mm_cvtss_f32(sum); +} +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) +inline float _vmaxvq_f32_hmax(float32x4_t x) { + return vmaxvq_f32(x); +} + +inline float _vaddvq_f32_hadd(float32x4_t x) { + return vaddvq_f32(x); +} +#endif + +void aligned_alloc(void **ptr, size_t required_bytes, size_t align) { + if (align % sizeof(void *) != 0 || (align & (align - 1)) != 0) { + *ptr = nullptr; + return; + } + if (posix_memalign(ptr, align, required_bytes) != 0) { + *ptr = nullptr; + } +} + +void aligned_free(void *ptr) { + free(ptr); +} + +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) +inline float32x4_t exp_ps_f32(float32x4_t x) { + float32x4_t a = vdupq_n_f32(12102203.0f); // (1 << 23) / ln(2) + float32x4_t b = vdupq_n_f32(1065353216.0f); // (1 << 23) * (0.5 - 0.04165) + (127 << 23) + int32x4_t m = vdupq_n_s32(0x7f); + float32x4_t y = vmlaq_f32(b, a, x); + int32x4_t r = vreinterpretq_s32_f32(y); + r = vandq_s32(r, vdupq_n_s32(0xffffff)); + r = vorrq_s32(r, vdupq_n_s32(0x3f800000)); + return vreinterpretq_f32_s32(r); +} +#endif + +// ======================================== +// FlashAttention2 核心实现 (FP32版本) +// ======================================== +struct FA_2_GQA_QKV_FP32_BSHD_O_FP32_BSHD_ACC_FP32_IMPL { + using dtype_q_in_t = float; + using dtype_kv_in_t = dtype_q_in_t; + using dtype_out_t = dtype_q_in_t; + using dtype_t = dtype_out_t; + using acc_dtype_t = float; + + int32_t Br; + int32_t Bc; + int32_t Q_Head; + int32_t KV_Head; + int32_t threads; + bool high_precision; + + void configure(int32_t Br_, int32_t Bc_, int32_t Q_Head_, int32_t KV_Head_, int32_t threads_, bool high_precision_) { + Br = Br_; + Bc = Bc_; + Q_Head = Q_Head_; + KV_Head = KV_Head_; + threads = threads_; + high_precision = high_precision_; + } + + void init_workspace(acc_dtype_t *acc_o, acc_dtype_t *acc_s, + acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum) { + acc_o_ = acc_o; + acc_s_ = acc_s; + logsum_ = logsum; + scoremax_ = scoremax; + scoremax_prev_ = scoremax_prev; + score_scale_ = score_scale; + score_sum_ = score_sum; + } + + void fa2(const dtype_t *__restrict__ Q, const dtype_t *__restrict__ K, + const dtype_t *__restrict__ V, dtype_t *__restrict__ O, const int32_t batch_size, + const int32_t head_size, const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask = true) { + assert(Br == Bc); + assert(Q_Head % KV_Head == 0); + assert(head_size % threads == 0); +#ifdef __AVX2__ + assert(dim_size % 8 == 0); // AVX processes 8 floats at a time +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + assert(dim_size % 4 == 0); // NEON processes 4 floats at a time +#endif + if (seq_size_q != 1) { + __fa2_prefill_append(Q, K, V, O, batch_size, head_size, seq_size_q, seq_size_k, dim_size, + causal_mask); + } else { + __fa2_decode(Q, K, V, O, batch_size, head_size, seq_size_q, seq_size_k, dim_size, + causal_mask); + } + } + +private: + inline void __fa2_prefill_append(const dtype_t *__restrict__ Q, const dtype_t *__restrict__ K, + const dtype_t *__restrict__ V, dtype_t *__restrict__ O, + const int32_t batch_size, const int32_t head_size, // head_size 就是 Q_Head + const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask = true) { + const int32_t Tr = seq_size_q / Br; + const int32_t Tr_left = seq_size_q % Br; + const int32_t Tc = seq_size_k / Bc; + const int32_t Tc_left = seq_size_k % Bc; + + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + + const int32_t kv_group_size = Q_Head / KV_Head; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) if (threads > 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_head = h_idx; + + const int32_t this_thread_kv_head = this_thread_head / kv_group_size; + + for (int t_r_idx = 0; t_r_idx < Tr; ++t_r_idx) { + init_temp(logsum_ + thread_id * Br, scoremax_ + thread_id * Br, + acc_o_ + thread_id * Br * dim_size, dim_size); + for (int t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + const dtype_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + t_r_idx * Br * head_size * dim_size + this_thread_head * dim_size; + const dtype_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + + mma0(tile_q, tile_k, tile_acc_s, dim_size, head_size * dim_size, KV_Head * dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + softmax(tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + rescale(acc_o, score_scale_ + thread_id * Br, dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + mma1(tile_acc_s, tile_v, acc_o, KV_Head, dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + } + if (Tc_left) { + const dtype_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + t_r_idx * Br * head_size * dim_size + this_thread_head * dim_size; + const dtype_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + mma0_pa_n_fixed(Br, Tc_left, tile_q, tile_k, tile_acc_s, dim_size, head_size * dim_size, KV_Head * dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + softmax_pa_n_fixed(Br, Tc_left, tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + rescale_pa_n_fixed(Br, Tc_left, acc_o, score_scale_ + thread_id * Br, dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + mma1_pa_n_fixed(Br, Tc_left, tile_acc_s, tile_v, acc_o, KV_Head, dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + } + scale_and_store(acc_o_ + thread_id * Br * dim_size, logsum_ + thread_id * Br, O + b_idx * seq_size_q * head_size * dim_size + t_r_idx * Br * head_size * dim_size + this_thread_head * dim_size, t_r_idx, head_size, dim_size); + } + if (Tr_left) { + init_temp(logsum_ + thread_id * Br, scoremax_ + thread_id * Br, acc_o_ + thread_id * Br * dim_size, dim_size); + for (int t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + const dtype_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + Tr * Br * head_size * dim_size + this_thread_head * dim_size; + const dtype_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + mma0_pa_n_fixed(Tr_left, Bc, tile_q, tile_k, tile_acc_s, dim_size, head_size * dim_size, KV_Head * dim_size, Tr, t_c_idx, seq_size_q, seq_size_k, causal_mask); + softmax_pa_n_fixed(Tr_left, Bc, tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, Tr, t_c_idx, seq_size_q, seq_size_k, causal_mask); + rescale_pa_n_fixed(Tr_left, Bc, acc_o, score_scale_ + thread_id * Br, dim_size, Tr, t_c_idx, seq_size_q, seq_size_k, causal_mask); + mma1_pa_n_fixed(Tr_left, Bc, tile_acc_s, tile_v, acc_o, KV_Head, dim_size, Tr, t_c_idx, seq_size_q, seq_size_k, causal_mask); + } + if (Tc_left) { + const dtype_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + Tr * Br * head_size * dim_size + this_thread_head * dim_size; + const dtype_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + mma0_pa_n_fixed(Tr_left, Tc_left, tile_q, tile_k, tile_acc_s, dim_size, head_size * dim_size, KV_Head * dim_size, Tr, Tc, seq_size_q, seq_size_k, causal_mask); + softmax_pa_n_fixed(Tr_left, Tc_left, tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, Tr, Tc, seq_size_q, seq_size_k, causal_mask); + rescale_pa_n_fixed(Tr_left, Tc_left, acc_o, score_scale_ + thread_id * Br, dim_size, Tr, Tc, seq_size_q, seq_size_k, causal_mask); + mma1_pa_n_fixed(Tr_left, Tc_left, tile_acc_s, tile_v, acc_o, KV_Head, dim_size, Tr, Tc, seq_size_q, seq_size_k, causal_mask); + } + scale_and_store_pa_n_fixed(Tr_left, acc_o_ + thread_id * Br * dim_size, logsum_ + thread_id * Br, O + b_idx * seq_size_q * head_size * dim_size + Tr * Br * head_size * dim_size + this_thread_head * dim_size, Tr, head_size, dim_size); + } + } + } + } + + inline void __fa2_decode(const dtype_t *__restrict__ Q, const dtype_t *__restrict__ K, + const dtype_t *__restrict__ V, dtype_t *__restrict__ O, + const int32_t batch_size, const int32_t head_size, + const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask = true) { + const int32_t Tr = 1; + const int32_t Tc = seq_size_k / Bc; + const int32_t Tc_left = seq_size_k % Bc; + + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) if (threads > 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_head = h_idx; + const int32_t this_thread_kv_head = this_thread_head / kv_group_size; + + for (int t_r_idx = 0; t_r_idx < Tr; ++t_r_idx) { + init_temp_d(logsum_ + thread_id * Br, scoremax_ + thread_id * Br, acc_o_ + thread_id * Br * dim_size, dim_size); + for (int t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + const dtype_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + t_r_idx * 1 * head_size * dim_size + this_thread_head * dim_size; + const dtype_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + + mma0_d(tile_q, tile_k, tile_acc_s, dim_size, KV_Head * dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + softmax_d(tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + rescale_d(acc_o, score_scale_ + thread_id * Br, dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + mma1_d(tile_acc_s, tile_v, acc_o, KV_Head, dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + } + if (Tc_left) { + const dtype_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + t_r_idx * 1 * head_size * dim_size + this_thread_head * dim_size; + const dtype_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + mma0_d_n_fixed(Tc_left, tile_q, tile_k, tile_acc_s, dim_size, KV_Head * dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + softmax_d_n_fixed(Tc_left, tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + rescale_d_n_fixed(Tc_left, acc_o, score_scale_ + thread_id * Br, dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + mma1_d_n_fixed(Tc_left, tile_acc_s, tile_v, acc_o, KV_Head, dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + } + scale_and_store_d(acc_o_ + thread_id * Br * dim_size, logsum_ + thread_id * Br, O + b_idx * seq_size_q * head_size * dim_size + t_r_idx * 1 * head_size * dim_size + this_thread_head * dim_size, t_r_idx, head_size, dim_size); + } + } + } + } + + inline void init_temp(acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *acc_o, const int32_t dim_size) { +#ifdef __AVX2__ + __m256 zero_vec = _mm256_set1_ps(0.0f); + __m256 neg_inf_vec = _mm256_set1_ps(NEG_INF); + + int i = 0; + for (; i <= Br - 8; i += 8) { + _mm256_storeu_ps(logsum + i, zero_vec); + _mm256_storeu_ps(scoremax + i, neg_inf_vec); + } + for (; i < Br; ++i) { + logsum[i] = 0.0f; + scoremax[i] = NEG_INF; + } + + for (int j = 0; j < Br * dim_size; j += 8) { + _mm256_storeu_ps(acc_o + j, zero_vec); + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t zero_vec = vdupq_n_f32(0.0f); + float32x4_t neg_inf_vec = vdupq_n_f32(NEG_INF); + + int i = 0; + for (; i <= Br - 4; i += 4) { + vst1q_f32(logsum + i, zero_vec); + vst1q_f32(scoremax + i, neg_inf_vec); + } + for (; i < Br; ++i) { + logsum[i] = 0.0f; + scoremax[i] = NEG_INF; + } + + for (int j = 0; j < Br * dim_size; j += 4) { + vst1q_f32(acc_o + j, zero_vec); + } +#endif + } + + inline void mma0(const dtype_t *__restrict__ q_block, const dtype_t *__restrict__ k_block, + acc_dtype_t *__restrict__ acc_s, const int32_t dim_size, + const int32_t q_stride_size, const int32_t kv_stride_size, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + // AVX2 implementation remains unchanged. + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_r_end = global_r_start + Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + + if (causal_mask && (global_c_start - delta_pos > (global_r_end - 1))) { return; } + +#pragma unroll + for (int32_t b_r_idx = 0; b_r_idx < Br; ++b_r_idx) { + const dtype_t *q_block_line = q_block + b_r_idx * q_stride_size; +#pragma unroll + for (int32_t b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + const dtype_t *k_block_line = k_block + b_c_idx * kv_stride_size; + + __m256 sum_vec = _mm256_setzero_ps(); + int i = 0; + for (; i <= dim_size - 8; i += 8) { + __builtin_prefetch(q_block_line + i + 64); + __builtin_prefetch(k_block_line + i + 64); + __m256 q_vec = _mm256_loadu_ps(q_block_line + i); + __m256 k_vec = _mm256_loadu_ps(k_block_line + i); + sum_vec = _mm256_fmadd_ps(q_vec, k_vec, sum_vec); + } + acc_dtype_t total = _mm256_hadd_ps(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * k_block_line[i]; } + + acc_s[b_r_idx * Bc + b_c_idx] = total; + } + } + + if (causal_mask && (global_r_end == (t_c_idx * Bc + Bc) - delta_pos)) { + for (int i = 0; i < Br; ++i) { + for (int j = 0; j < Bc; ++j) { + if (j > i) { acc_s[i * Bc + j] = NEG_INF; } + } + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_r_end = global_r_start + Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + + if (causal_mask && (global_c_start - delta_pos > (global_r_end - 1))) { return; } + + for (int32_t b_r_base = 0; b_r_base < Br; b_r_base += 4) { + for (int32_t b_c_base = 0; b_c_base < Bc; b_c_base += 4) { + float32x4_t accumulators[16]; + for (int i = 0; i < 16; ++i) { + accumulators[i] = vdupq_n_f32(0.0f); + } + + const dtype_t *q0_ptr = q_block + (b_r_base + 0) * q_stride_size; + const dtype_t *q1_ptr = q_block + (b_r_base + 1) * q_stride_size; + const dtype_t *q2_ptr = q_block + (b_r_base + 2) * q_stride_size; + const dtype_t *q3_ptr = q_block + (b_r_base + 3) * q_stride_size; + + for (int k = 0; k < dim_size; k += 4) { + float32x4_t q_vec0 = vld1q_f32(q0_ptr + k); + float32x4_t q_vec1 = vld1q_f32(q1_ptr + k); + float32x4_t q_vec2 = vld1q_f32(q2_ptr + k); + float32x4_t q_vec3 = vld1q_f32(q3_ptr + k); + + float32x4_t k_vec; + + k_vec = vld1q_f32(k_block + (b_c_base + 0) * kv_stride_size + k); + accumulators[0] = vfmaq_f32(accumulators[0], q_vec0, k_vec); + accumulators[4] = vfmaq_f32(accumulators[4], q_vec1, k_vec); + accumulators[8] = vfmaq_f32(accumulators[8], q_vec2, k_vec); + accumulators[12] = vfmaq_f32(accumulators[12], q_vec3, k_vec); + + k_vec = vld1q_f32(k_block + (b_c_base + 1) * kv_stride_size + k); + accumulators[1] = vfmaq_f32(accumulators[1], q_vec0, k_vec); + accumulators[5] = vfmaq_f32(accumulators[5], q_vec1, k_vec); + accumulators[9] = vfmaq_f32(accumulators[9], q_vec2, k_vec); + accumulators[13] = vfmaq_f32(accumulators[13], q_vec3, k_vec); + + k_vec = vld1q_f32(k_block + (b_c_base + 2) * kv_stride_size + k); + accumulators[2] = vfmaq_f32(accumulators[2], q_vec0, k_vec); + accumulators[6] = vfmaq_f32(accumulators[6], q_vec1, k_vec); + accumulators[10] = vfmaq_f32(accumulators[10], q_vec2, k_vec); + accumulators[14] = vfmaq_f32(accumulators[14], q_vec3, k_vec); + + k_vec = vld1q_f32(k_block + (b_c_base + 3) * kv_stride_size + k); + accumulators[3] = vfmaq_f32(accumulators[3], q_vec0, k_vec); + accumulators[7] = vfmaq_f32(accumulators[7], q_vec1, k_vec); + accumulators[11] = vfmaq_f32(accumulators[11], q_vec2, k_vec); + accumulators[15] = vfmaq_f32(accumulators[15], q_vec3, k_vec); + } + + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 4; ++j) { + acc_s[(b_r_base + i) * Bc + (b_c_base + j)] = vaddvq_f32(accumulators[i * 4 + j]); + } + } + } + } + if (causal_mask) { + for (int i = 0; i < Br; ++i) { + for (int j = 0; j < Bc; ++j) { + if ((global_c_start + j) > (global_r_start + i + delta_pos)) { + acc_s[i * Bc + j] = NEG_INF; + } + } + } + } +#endif + } + + inline void softmax(acc_dtype_t *__restrict__ acc_s, acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum, acc_dtype_t *logsum, + const float scale, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { + if (high_precision) { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + memcpy(scoremax_prev, scoremax, Br * sizeof(acc_dtype_t)); + for (int br = 0; br < Br; ++br) { + __m256 max_vec = _mm256_set1_ps(scoremax[br]); + acc_dtype_t *row = acc_s + br * Bc; + int bc = 0; + for (; bc <= Bc - 8; bc += 8) { max_vec = _mm256_max_ps(max_vec, _mm256_loadu_ps(row + bc)); } + float max_val = _mm256_hmax_ps(max_vec); + for (; bc < Bc; ++bc) { max_val = fmaxf(max_val, row[bc]); } + scoremax[br] = max_val; + } + for (int br = 0; br < Br; ++br) { score_scale[br] = expf((scoremax_prev[br] - scoremax[br]) * scale); } + for (int br = 0; br < Br; ++br) { + const float sm = scoremax[br]; + acc_dtype_t *row = acc_s + br * Bc; + float sum = 0.0f; + for (int bc = 0; bc < Bc; ++bc) { + float val = expf((row[bc] - sm) * scale); + row[bc] = val; + sum += val; + } + score_sum[br] = sum; + } + for (int br = 0; br < Br; ++br) { logsum[br] = logsum[br] * score_scale[br] + score_sum[br]; } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + memcpy(scoremax_prev, scoremax, Br * sizeof(acc_dtype_t)); + for (int br = 0; br < Br; ++br) { + float32x4_t max_vec = vdupq_n_f32(scoremax[br]); + acc_dtype_t *row = acc_s + br * Bc; + int bc = 0; + for (; bc <= Bc - 4; bc += 4) { + max_vec = vmaxq_f32(max_vec, vld1q_f32(row + bc)); + } + float max_val = _vmaxvq_f32_hmax(max_vec); + for (; bc < Bc; ++bc) { max_val = fmaxf(max_val, row[bc]); } + scoremax[br] = max_val; + } + for (int br = 0; br < Br; ++br) { + score_scale[br] = expf((scoremax_prev[br] - scoremax[br]) * scale); + } + for (int br = 0; br < Br; ++br) { + const float sm = scoremax[br]; + acc_dtype_t *row = acc_s + br * Bc; + float sum = 0.0f; + for (int bc = 0; bc < Bc; ++bc) { + float val = expf((row[bc] - sm) * scale); + row[bc] = val; + sum += val; + } + score_sum[br] = sum; + } + for (int br = 0; br < Br; ++br) { + logsum[br] = logsum[br] * score_scale[br] + score_sum[br]; + } +#endif + } else { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + memcpy(scoremax_prev, scoremax, Br * sizeof(acc_dtype_t)); + for (int br = 0; br < Br; ++br) { + __m256 max_vec = _mm256_set1_ps(scoremax[br]); + acc_dtype_t *row = acc_s + br * Bc; + int bc = 0; + for (; bc <= Bc - 8; bc += 8) { max_vec = _mm256_max_ps(max_vec, _mm256_loadu_ps(row + bc)); } + float max_val = _mm256_hmax_ps(max_vec); + for (; bc < Bc; ++bc) { max_val = fmaxf(max_val, row[bc]); } + scoremax[br] = max_val; + } + for (int br = 0; br < Br; ++br) { score_scale[br] = expf((scoremax_prev[br] - scoremax[br]) * scale); } + for (int br = 0; br < Br; ++br) { + const float sm = scoremax[br]; + acc_dtype_t *row = acc_s + br * Bc; + float sum = 0.0f; + for (int bc = 0; bc < Bc; ++bc) { + float val = expf((row[bc] - sm) * scale); + row[bc] = val; + sum += val; + } + score_sum[br] = sum; + } + for (int br = 0; br < Br; ++br) { logsum[br] = logsum[br] * score_scale[br] + score_sum[br]; } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + memcpy(scoremax_prev, scoremax, Br * sizeof(acc_dtype_t)); + for (int br = 0; br < Br; ++br) { + float32x4_t max_vec = vdupq_n_f32(scoremax[br]); + acc_dtype_t *row = acc_s + br * Bc; + int bc = 0; + for (; bc <= Bc - 4; bc += 4) { + max_vec = vmaxq_f32(max_vec, vld1q_f32(row + bc)); + } + float max_val = vmaxvq_f32(max_vec); + for (; bc < Bc; ++bc) { max_val = fmaxf(max_val, row[bc]); } + scoremax[br] = max_val; + } + for (int br = 0; br < Br; ++br) { + score_scale[br] = expf((scoremax_prev[br] - scoremax[br]) * scale); + } + for (int br = 0; br < Br; ++br) { + const float sm = scoremax[br]; + acc_dtype_t *row = acc_s + br * Bc; + float32x4_t sum_vec = vdupq_n_f32(0.0f); + const float32x4_t sm_vec = vdupq_n_f32(sm); + const float32x4_t scale_vec = vdupq_n_f32(scale); + int bc = 0; + for (; bc <= Bc - 4; bc += 4) { + float32x4_t s_vec = vld1q_f32(row + bc); + float32x4_t scaled_s_vec = vmulq_f32(vsubq_f32(s_vec, sm_vec), scale_vec); + float32x4_t p_vec = exp_ps_f32(scaled_s_vec); + vst1q_f32(row + bc, p_vec); + sum_vec = vaddq_f32(sum_vec, p_vec); + } + float sum = vaddvq_f32(sum_vec); + for (; bc < Bc; ++bc) { + float val = expf((row[bc] - sm) * scale); + row[bc] = val; + sum += val; + } + score_sum[br] = sum; + } + for (int br = 0; br < Br; ++br) { + logsum[br] = logsum[br] * score_scale[br] + score_sum[br]; + } +#endif + } + } + + inline void rescale(acc_dtype_t *__restrict__ acc_o, acc_dtype_t *__restrict__ score_scale, + const int32_t dim_size, const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + +#pragma unroll + for (int i = 0; i < Br; ++i) { + __m256 scale_v = _mm256_set1_ps(score_scale[i]); + float *row_ptr = acc_o + i * dim_size; + for (int j = 0; j < dim_size; j += 8) { + __m256 acc = _mm256_loadu_ps(row_ptr + j); + acc = _mm256_mul_ps(acc, scale_v); + _mm256_storeu_ps(row_ptr + j, acc); + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + + for (int i = 0; i < Br; ++i) { + float32x4_t scale_v = vdupq_n_f32(score_scale[i]); + float *row_ptr = acc_o + i * dim_size; + for (int j = 0; j < dim_size; j += 4) { + float32x4_t acc = vld1q_f32(row_ptr + j); + acc = vmulq_f32(acc, scale_v); + vst1q_f32(row_ptr + j, acc); + } + } +#endif + } + + inline void mma1(const acc_dtype_t *__restrict__ w_block, const dtype_t *__restrict__ v_block, + acc_dtype_t *__restrict__ acc_o, const int32_t kv_head_size, const int32_t dim_size, + const int32_t t_r_idx, const int32_t t_c_idx, const int32_t seq_size_q, + const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + // AVX2 implementation remains unchanged. + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + + const int32_t v_stride_size = kv_head_size * dim_size; + +#pragma unroll + for (int b_r_idx = 0; b_r_idx < Br; ++b_r_idx) { + for (int d_base = 0; d_base < dim_size; d_base += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + b_r_idx * dim_size + d_base); +#pragma unroll + for (int b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + __m256 w_vec = _mm256_set1_ps(w_block[b_r_idx * Bc + b_c_idx]); + const float *v_ptr = v_block + b_c_idx * v_stride_size + d_base; + __m256 v_vec = _mm256_loadu_ps(v_ptr); + acc = _mm256_fmadd_ps(w_vec, v_vec, acc); + } + _mm256_storeu_ps(acc_o + b_r_idx * dim_size + d_base, acc); + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + + const int32_t v_stride_size = kv_head_size * dim_size; + + for (int d_base = 0; d_base < dim_size; d_base += 4) { + float32x4_t acc0 = vld1q_f32(acc_o + 0 * dim_size + d_base); + float32x4_t acc1 = vld1q_f32(acc_o + 1 * dim_size + d_base); + float32x4_t acc2 = vld1q_f32(acc_o + 2 * dim_size + d_base); + float32x4_t acc3 = vld1q_f32(acc_o + 3 * dim_size + d_base); + + for (int b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + const dtype_t *v_ptr = v_block + b_c_idx * v_stride_size + d_base; + float32x4_t v_vec = vld1q_f32(v_ptr); + + float32x4_t w_vec; + + w_vec = vdupq_n_f32(w_block[0 * Bc + b_c_idx]); // P[0][b_c_idx] + acc0 = vfmaq_f32(acc0, v_vec, w_vec); + + w_vec = vdupq_n_f32(w_block[1 * Bc + b_c_idx]); // P[1][b_c_idx] + acc1 = vfmaq_f32(acc1, v_vec, w_vec); + + w_vec = vdupq_n_f32(w_block[2 * Bc + b_c_idx]); // P[2][b_c_idx] + acc2 = vfmaq_f32(acc2, v_vec, w_vec); + + w_vec = vdupq_n_f32(w_block[3 * Bc + b_c_idx]); // P[3][b_c_idx] + acc3 = vfmaq_f32(acc3, v_vec, w_vec); + } + + vst1q_f32(acc_o + 0 * dim_size + d_base, acc0); + vst1q_f32(acc_o + 1 * dim_size + d_base, acc1); + vst1q_f32(acc_o + 2 * dim_size + d_base, acc2); + vst1q_f32(acc_o + 3 * dim_size + d_base, acc3); + } +#endif + } + + inline void scale_and_store(const acc_dtype_t *__restrict__ acc_o, + const acc_dtype_t *__restrict__ logsum, + dtype_t *__restrict__ o_block, const int32_t t_r_idx, + const int32_t head_size, const int32_t dim_size) { +#ifdef __AVX2__ +#pragma unroll + for (int i = 0; i < Br; ++i) { + dtype_t *o_block_line = o_block + i * head_size * dim_size; + __m256 reciprocal_logsum_vec = _mm256_set1_ps(1.0f / logsum[i]); + int j = 0; + for (; j <= dim_size - 8; j += 8) { + __m256 vec_acc_o = _mm256_loadu_ps(acc_o + i * dim_size + j); + __m256 result_vec = _mm256_mul_ps(vec_acc_o, reciprocal_logsum_vec); + _mm256_storeu_ps(o_block_line + j, result_vec); + } + float reciprocal_logsum = 1.0f / logsum[i]; + for (; j < dim_size; ++j) { + o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + for (int i = 0; i < Br; ++i) { + dtype_t *o_block_line = o_block + i * head_size * dim_size; + float32x4_t reciprocal_logsum_vec = vdupq_n_f32(1.0f / logsum[i]); + int j = 0; + for (; j <= dim_size - 4; j += 4) { + float32x4_t vec_acc_o = vld1q_f32(acc_o + i * dim_size + j); + float32x4_t result_vec = vmulq_f32(vec_acc_o, reciprocal_logsum_vec); + vst1q_f32(o_block_line + j, result_vec); + } + float reciprocal_logsum = 1.0f / logsum[i]; + for (; j < dim_size; ++j) { + o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; + } + } +#endif + } + + inline void mma0_pa_n_fixed(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + const dtype_t *__restrict__ q_block, + const dtype_t *__restrict__ k_block, acc_dtype_t *__restrict__ acc_s, + const int32_t dim_size, const int32_t q_stride_size, const int32_t kv_stride_size, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, + bool causal_mask) { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_r_end = global_r_start + Br_n_fixed; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_end - 1))) { return; } + + for (int32_t b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { + const dtype_t *q_block_line = q_block + b_r_idx * q_stride_size; + for (int32_t b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + const dtype_t *k_block_line = k_block + b_c_idx * kv_stride_size; + __m256 sum_vec = _mm256_setzero_ps(); + int i = 0; + for (; i <= dim_size - 8; i += 8) { + sum_vec = _mm256_fmadd_ps(_mm256_loadu_ps(q_block_line + i), _mm256_loadu_ps(k_block_line + i), sum_vec); + } + acc_dtype_t total = _mm256_hadd_ps(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * k_block_line[i]; } + acc_s[b_r_idx * Bc + b_c_idx] = total; + } + } + + if (causal_mask && (global_r_end == (global_c_start + Bc_n_fixed) - delta_pos)) { + for (int i = 0; i < Br_n_fixed; ++i) { + for (int j = 0; j < Bc_n_fixed; ++j) { + if (j > i) { acc_s[i * Bc + j] = NEG_INF; } + } + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_r_end = global_r_start + Br_n_fixed; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_end - 1))) { return; } + for (int32_t b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { + const dtype_t *q_block_line = q_block + b_r_idx * q_stride_size; + for (int32_t b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + const dtype_t *k_block_line = k_block + b_c_idx * kv_stride_size; + + float32x4_t sum_vec = vdupq_n_f32(0.0f); + int k = 0; + for (; k <= dim_size - 4; k += 4) { + float32x4_t q_vec = vld1q_f32(q_block_line + k); + float32x4_t k_vec = vld1q_f32(k_block_line + k); + sum_vec = vfmaq_f32(sum_vec, q_vec, k_vec); + } + acc_dtype_t total = vaddvq_f32(sum_vec); + for (; k < dim_size; ++k) { + total += q_block_line[k] * k_block_line[k]; + } + acc_s[b_r_idx * Bc + b_c_idx] = total; + } + } + if (causal_mask) { + for (int i = 0; i < Br_n_fixed; ++i) { + for (int j = 0; j < Bc_n_fixed; ++j) { + if ((global_c_start + j) > (global_r_start + i + delta_pos)) { + acc_s[i * Bc + j] = NEG_INF; + } + } + } + } +#endif + } + + inline void softmax_pa_n_fixed(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + acc_dtype_t *__restrict__ acc_s, acc_dtype_t *scoremax, + acc_dtype_t *scoremax_prev, acc_dtype_t *score_scale, + acc_dtype_t *score_sum, acc_dtype_t *logsum, + const float scale, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br_n_fixed - 1))) return; + memcpy(scoremax_prev, scoremax, Br_n_fixed * sizeof(acc_dtype_t)); + for (int br = 0; br < Br_n_fixed; ++br) { + acc_dtype_t *row = acc_s + br * Bc; + float max_val = NEG_INF; + for (int bc = 0; bc < Bc_n_fixed; ++bc) max_val = fmaxf(max_val, row[bc]); + scoremax[br] = fmaxf(max_val, scoremax[br]); + } + for (int br = 0; br < Br_n_fixed; ++br) { + score_scale[br] = expf((scoremax_prev[br] - scoremax[br]) * scale); + } + for (int br = 0; br < Br_n_fixed; ++br) { + acc_dtype_t *row = acc_s + br * Bc; + float current_sum = 0.0f; + for (int bc = 0; bc < Bc_n_fixed; ++bc) { + float val = expf((row[bc] - scoremax[br]) * scale); + row[bc] = val; + current_sum += val; + } + score_sum[br] = current_sum; + } + for (int br = 0; br < Br_n_fixed; ++br) { logsum[br] = logsum[br] * score_scale[br] + score_sum[br]; } + } + + inline void rescale_pa_n_fixed(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + acc_dtype_t *__restrict__ acc_o, + acc_dtype_t *__restrict__ score_scale, const int32_t dim_size, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, + bool causal_mask) { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br_n_fixed - 1))) return; + + for (int i = 0; i < Br_n_fixed; ++i) { + float *row_ptr = acc_o + i * dim_size; + __m256 scale_v = _mm256_set1_ps(score_scale[i]); + for (int j = 0; j < dim_size; j += 8) { + _mm256_storeu_ps(row_ptr + j, _mm256_mul_ps(_mm256_loadu_ps(row_ptr + j), scale_v)); + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br_n_fixed - 1))) return; + + for (int i = 0; i < Br_n_fixed; ++i) { + float *row_ptr = acc_o + i * dim_size; + float32x4_t scale_v = vdupq_n_f32(score_scale[i]); + for (int j = 0; j < dim_size; j += 4) { + vst1q_f32(row_ptr + j, vmulq_f32(vld1q_f32(row_ptr + j), scale_v)); + } + } +#endif + } + + inline void mma1_pa_n_fixed(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + const acc_dtype_t *__restrict__ w_block, + const dtype_t *__restrict__ v_block, acc_dtype_t *__restrict__ acc_o, + const int32_t kv_head_size, const int32_t dim_size, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, + bool causal_mask) { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br_n_fixed - 1))) return; + + const int32_t v_stride_size = kv_head_size * dim_size; + + for (int b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { + for (int d_base = 0; d_base < dim_size; d_base += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + b_r_idx * dim_size + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + __m256 w_vec = _mm256_set1_ps(w_block[b_r_idx * Bc + b_c_idx]); + const float *v_ptr = v_block + b_c_idx * v_stride_size + d_base; + acc = _mm256_fmadd_ps(w_vec, _mm256_loadu_ps(v_ptr), acc); + } + _mm256_storeu_ps(acc_o + b_r_idx * dim_size + d_base, acc); + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br_n_fixed - 1))) return; + + const int32_t v_stride_size = kv_head_size * dim_size; + + for (int b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { + for (int d_base = 0; d_base < dim_size; d_base += 4) { + float32x4_t acc_vec = vld1q_f32(acc_o + b_r_idx * dim_size + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + float32x4_t w_vec = vdupq_n_f32(w_block[b_r_idx * Bc + b_c_idx]); + const float *v_ptr = v_block + b_c_idx * v_stride_size + d_base; + acc_vec = vfmaq_f32(acc_vec, vld1q_f32(v_ptr), w_vec); + } + vst1q_f32(acc_o + b_r_idx * dim_size + d_base, acc_vec); + } + } +#endif + } + + inline void scale_and_store_pa_n_fixed(const int32_t Br_n_fixed, + const acc_dtype_t *__restrict__ acc_o, + const acc_dtype_t *__restrict__ logsum, + dtype_t *__restrict__ o_block, const int32_t t_r_idx, + const int32_t head_size, const int32_t dim_size) { +#ifdef __AVX2__ + for (int i = 0; i < Br_n_fixed; ++i) { + dtype_t *o_block_line = o_block + i * head_size * dim_size; + float reciprocal_logsum = 1.0f / logsum[i]; + __m256 reciprocal_logsum_vec = _mm256_set1_ps(reciprocal_logsum); + int j = 0; + for (; j <= dim_size - 8; j += 8) { + __m256 vec_acc_o = _mm256_loadu_ps(acc_o + i * dim_size + j); + _mm256_storeu_ps(o_block_line + j, _mm256_mul_ps(vec_acc_o, reciprocal_logsum_vec)); + } + for (; j < dim_size; ++j) { + o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + for (int i = 0; i < Br_n_fixed; ++i) { + dtype_t *o_block_line = o_block + i * head_size * dim_size; + float reciprocal_logsum = 1.0f / logsum[i]; + float32x4_t reciprocal_logsum_vec = vdupq_n_f32(reciprocal_logsum); + int j = 0; + for (; j <= dim_size - 4; j += 4) { + float32x4_t vec_acc_o = vld1q_f32(acc_o + i * dim_size + j); + vst1q_f32(o_block_line + j, vmulq_f32(vec_acc_o, reciprocal_logsum_vec)); + } + for (; j < dim_size; ++j) { + o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; + } + } +#endif + } + + // Decode mode functions + inline void init_temp_d(acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *acc_o, + const int32_t dim_size) { +#ifdef __AVX2__ + logsum[0] = 0.0f; + scoremax[0] = NEG_INF; + __m256 zero_vec = _mm256_setzero_ps(); + for (int i = 0; i < 1 * dim_size; i += 8) { _mm256_storeu_ps(acc_o + i, zero_vec); } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + logsum[0] = 0.0f; + scoremax[0] = NEG_INF; + float32x4_t zero_vec = vdupq_n_f32(0.0f); + for (int i = 0; i < 1 * dim_size; i += 4) { + vst1q_f32(acc_o + i, zero_vec); + } +#endif + } + + inline void mma0_d(const dtype_t *__restrict__ q_block, const dtype_t *__restrict__ k_block, + acc_dtype_t *__restrict__ acc_s, const int32_t dim_size, + const int32_t kv_stride_size, const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + const dtype_t *q_block_line = q_block; +#pragma unroll + for (int32_t b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + const dtype_t *k_block_line = k_block + b_c_idx * kv_stride_size; + __m256 sum_vec = _mm256_setzero_ps(); + int i = 0; + for (; i <= dim_size - 8; i += 8) { + sum_vec = _mm256_fmadd_ps(_mm256_loadu_ps(q_block_line + i), _mm256_loadu_ps(k_block_line + i), sum_vec); + } + acc_dtype_t total = _mm256_hadd_ps(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * k_block_line[i]; } + acc_s[b_c_idx] = total; + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const dtype_t *q_block_line = q_block; + for (int32_t b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + const dtype_t *k_block_line = k_block + b_c_idx * kv_stride_size; + float32x4_t sum_vec = vdupq_n_f32(0.0f); + int i = 0; + for (; i <= dim_size - 4; i += 4) { + sum_vec = vfmaq_f32(sum_vec, vld1q_f32(q_block_line + i), vld1q_f32(k_block_line + i)); + } + acc_dtype_t total = _vaddvq_f32_hadd(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * k_block_line[i]; } + acc_s[b_c_idx] = total; + } +#endif + } + + inline void softmax_d(acc_dtype_t *__restrict__ acc_s, acc_dtype_t *scoremax, + acc_dtype_t *scoremax_prev, acc_dtype_t *score_scale, + acc_dtype_t *score_sum, acc_dtype_t *logsum, + const float scale, + const int32_t t_r_idx, + const int32_t t_c_idx, const int32_t seq_size_q, + const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + scoremax_prev[0] = scoremax[0]; + float max_val = NEG_INF; + for (int bc = 0; bc < Bc; ++bc) max_val = fmaxf(max_val, acc_s[bc]); + scoremax[0] = fmaxf(max_val, scoremax[0]); + score_scale[0] = expf((scoremax_prev[0] - scoremax[0]) * scale); + float current_sum = 0.0f; + for (int bc = 0; bc < Bc; ++bc) { + float val = expf((acc_s[bc] - scoremax[0]) * scale); + acc_s[bc] = val; + current_sum += val; + } + score_sum[0] = current_sum; + logsum[0] = logsum[0] * score_scale[0] + score_sum[0]; + +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + scoremax_prev[0] = scoremax[0]; + float32x4_t max_vec = vdupq_n_f32(scoremax[0]); + int bc = 0; + for (; bc <= Bc - 4; bc += 4) { + max_vec = vmaxq_f32(max_vec, vld1q_f32(acc_s + bc)); + } + float max_val = vmaxvq_f32(max_vec); + for (; bc < Bc; ++bc) { + max_val = fmaxf(max_val, acc_s[bc]); + } + scoremax[0] = max_val; + score_scale[0] = expf((scoremax_prev[0] - scoremax[0]) * scale); + float32x4_t sum_vec = vdupq_n_f32(0.0f); + const float32x4_t sm_vec = vdupq_n_f32(scoremax[0]); + const float32x4_t scale_vec = vdupq_n_f32(scale); + bc = 0; + for (; bc <= Bc - 4; bc += 4) { + float32x4_t s_vec = vld1q_f32(acc_s + bc); + float32x4_t scaled_s_vec = vmulq_f32(vsubq_f32(s_vec, sm_vec), scale_vec); + float32x4_t p_vec = exp_ps_f32(scaled_s_vec); + vst1q_f32(acc_s + bc, p_vec); + sum_vec = vaddq_f32(sum_vec, p_vec); + } + float current_sum = vaddvq_f32(sum_vec); + for (; bc < Bc; ++bc) { + float val = expf((acc_s[bc] - scoremax[0]) * scale); + acc_s[bc] = val; + current_sum += val; + } + score_sum[0] = current_sum; + logsum[0] = logsum[0] * score_scale[0] + score_sum[0]; +#endif + } + + inline void rescale_d(acc_dtype_t *__restrict__ acc_o, acc_dtype_t *__restrict__ score_scale, + const int32_t dim_size, const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + __m256 scale_v = _mm256_set1_ps(score_scale[0]); + for (int j = 0; j < dim_size; j += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + j); + acc = _mm256_mul_ps(acc, scale_v); + _mm256_storeu_ps(acc_o + j, acc); + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t scale_v = vdupq_n_f32(score_scale[0]); + for (int j = 0; j < dim_size; j += 4) { + float32x4_t acc = vld1q_f32(acc_o + j); + acc = vmulq_f32(acc, scale_v); + vst1q_f32(acc_o + j, acc); + } +#endif + } + + inline void mma1_d(const acc_dtype_t *__restrict__ w_block, const dtype_t *__restrict__ v_block, + acc_dtype_t *__restrict__ acc_o, const int32_t kv_head_size, + const int32_t dim_size, const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + const int32_t v_stride_size = kv_head_size * dim_size; + for (int d_base = 0; d_base < dim_size; d_base += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + d_base); + for (int b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + __m256 w_vec = _mm256_set1_ps(w_block[b_c_idx]); + const float *v_ptr = v_block + b_c_idx * v_stride_size + d_base; + acc = _mm256_fmadd_ps(w_vec, _mm256_loadu_ps(v_ptr), acc); + } + _mm256_storeu_ps(acc_o + d_base, acc); + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t v_stride_size = kv_head_size * dim_size; + int d_base = 0; + for (; d_base <= dim_size - 4; d_base += 4) { + float32x4_t acc_vec = vld1q_f32(acc_o + d_base); + for (int b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + float32x4_t w_vec = vdupq_n_f32(w_block[b_c_idx]); + const float *v_ptr = v_block + b_c_idx * v_stride_size + d_base; + acc_vec = vfmaq_f32(acc_vec, vld1q_f32(v_ptr), w_vec); + } + vst1q_f32(acc_o + d_base, acc_vec); + } + for (; d_base < dim_size; ++d_base) { + float acc = acc_o[d_base]; + for (int b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + acc += w_block[b_c_idx] * v_block[b_c_idx * v_stride_size + d_base]; + } + acc_o[d_base] = acc; + } +#endif + } + + inline void scale_and_store_d(const acc_dtype_t *__restrict__ acc_o, + const acc_dtype_t *__restrict__ logsum, + dtype_t *__restrict__ o_block, const int32_t t_r_idx, + const int32_t head_size, const int32_t dim_size) { +#ifdef __AVX2__ + float reciprocal_logsum = 1.0f / logsum[0]; + __m256 reciprocal_logsum_vec = _mm256_set1_ps(reciprocal_logsum); + int j = 0; + for (; j <= dim_size - 8; j += 8) { + _mm256_storeu_ps(o_block + j, _mm256_mul_ps(_mm256_loadu_ps(acc_o + j), reciprocal_logsum_vec)); + } + for (; j < dim_size; ++j) { + o_block[j] = acc_o[j] * reciprocal_logsum; + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float reciprocal_logsum = 1.0f / logsum[0]; + float32x4_t reciprocal_logsum_vec = vdupq_n_f32(reciprocal_logsum); + int j = 0; + for (; j <= dim_size - 4; j += 4) { + vst1q_f32(o_block + j, vmulq_f32(vld1q_f32(acc_o + j), reciprocal_logsum_vec)); + } + for (; j < dim_size; ++j) { + o_block[j] = acc_o[j] * reciprocal_logsum; + } +#endif + } + + // Decode n-fixed functions + inline void mma0_d_n_fixed(const int32_t Bc_n_fixed, const dtype_t *__restrict__ q_block, + const dtype_t *__restrict__ k_block, acc_dtype_t *__restrict__ acc_s, + const int32_t dim_size, const int32_t kv_stride_size, + const int32_t t_r_idx, const int32_t t_c_idx, const int32_t seq_size_q, + const int32_t seq_size_k, bool causal_mask) { + const dtype_t *q_block_line = q_block; + for (int32_t b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + const dtype_t *k_block_line = k_block + b_c_idx * kv_stride_size; + float total = 0.0f; + for (int i = 0; i < dim_size; ++i) { total += q_block_line[i] * k_block_line[i]; } + acc_s[b_c_idx] = total; + } + } + + inline void softmax_d_n_fixed(const int32_t Bc_n_fixed, acc_dtype_t *__restrict__ acc_s, + acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum, + acc_dtype_t *logsum, + const float scale, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { + scoremax_prev[0] = scoremax[0]; + float max_val = NEG_INF; + for (int bc = 0; bc < Bc_n_fixed; ++bc) max_val = fmaxf(max_val, acc_s[bc]); + scoremax[0] = fmaxf(max_val, scoremax[0]); + score_scale[0] = expf((scoremax_prev[0] - scoremax[0]) * scale); + float current_sum = 0.0f; + for (int bc = 0; bc < Bc_n_fixed; ++bc) { + float val = expf((acc_s[bc] - scoremax[0]) * scale); + acc_s[bc] = val; + current_sum += val; + } + score_sum[0] = current_sum; + logsum[0] = logsum[0] * score_scale[0] + score_sum[0]; + } + + inline void rescale_d_n_fixed(const int32_t Bc_n_fixed, acc_dtype_t *__restrict__ acc_o, + acc_dtype_t *__restrict__ score_scale, const int32_t dim_size, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, + bool causal_mask) { + float scale = score_scale[0]; + for (int j = 0; j < dim_size; ++j) { + acc_o[j] *= scale; + } + } + + inline void mma1_d_n_fixed(const int32_t Bc_n_fixed, const acc_dtype_t *__restrict__ w_block, + const dtype_t *__restrict__ v_block, acc_dtype_t *__restrict__ acc_o, + const int32_t kv_head_size, const int32_t dim_size, const int32_t t_r_idx, + const int32_t t_c_idx, const int32_t seq_size_q, + const int32_t seq_size_k, bool causal_mask) { + const int32_t v_stride_size = kv_head_size * dim_size; + for (int d_base = 0; d_base < dim_size; ++d_base) { + float acc = acc_o[d_base]; + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + acc += w_block[b_c_idx] * v_block[b_c_idx * v_stride_size + d_base]; + } + acc_o[d_base] = acc; + } + } + +private: + acc_dtype_t *acc_o_; + acc_dtype_t *acc_s_; + acc_dtype_t *logsum_; + acc_dtype_t *scoremax_; + acc_dtype_t *scoremax_prev_; + acc_dtype_t *score_scale_; + acc_dtype_t *score_sum_; +}; + +struct FA_2_GQA_Q_FP32_KV_FP16_BSHD_O_FP32_BSHD_ACC_FP32_IMPL { + using dtype_q_in_t = float; + using dtype_kv_in_t = mllm_fp16_t; + using dtype_out_t = float; + using acc_dtype_t = float; + + int32_t Br, Bc, Q_Head, KV_Head, threads; + bool high_precision; + + void configure(int32_t Br_, int32_t Bc_, int32_t Q_Head_, int32_t KV_Head_, int32_t threads_, bool high_precision_) { + Br = Br_; + Bc = Bc_; + Q_Head = Q_Head_; + KV_Head = KV_Head_; + threads = threads_; + high_precision = high_precision_; + } + + void init_workspace(acc_dtype_t *acc_o, acc_dtype_t *acc_s, + acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum) { + acc_o_ = acc_o; + acc_s_ = acc_s; + logsum_ = logsum; + scoremax_ = scoremax; + scoremax_prev_ = scoremax_prev; + score_scale_ = score_scale; + score_sum_ = score_sum; + } + + void fa2(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, + const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, const int32_t batch_size, + const int32_t head_size, const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask = true) { + assert(Br == Bc); + assert(head_size % threads == 0); +#ifdef __AVX2__ + assert(dim_size % 8 == 0); +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + assert(dim_size % 4 == 0); + assert(Q_Head % KV_Head == 0); +#endif + + if (seq_size_q != 1) { + __fa2_prefill_append(Q, K, V, O, batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask); + } else { + __fa2_decode(Q, K, V, O, batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask); + } + } + +private: +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) +#define MLLM_NEON_F32x4_FROM_FP16(addr) vcvt_f32_f16(vld1_f16((const __fp16 *)(addr))) +#endif + + inline void __fa2_prefill_append(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, + const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, + const int32_t batch_size, const int32_t head_size, + const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask = true) { + const int32_t Tr = seq_size_q / Br; + const int32_t Tr_left = seq_size_q % Br; + const int32_t Tc = seq_size_k / Bc; + const int32_t Tc_left = seq_size_k % Tc; + + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) if (threads > 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_head = h_idx; + const int32_t this_thread_kv_head = this_thread_head / kv_group_size; + + for (int t_r_idx = 0; t_r_idx < Tr; ++t_r_idx) { + init_temp(logsum_ + thread_id * Br, scoremax_ + thread_id * Br, + acc_o_ + thread_id * Br * dim_size, dim_size); + for (int t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + const dtype_q_in_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + t_r_idx * Br * head_size * dim_size + this_thread_head * dim_size; + const dtype_kv_in_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_kv_in_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + + mma0(tile_q, tile_k, tile_acc_s, dim_size, head_size * dim_size, KV_Head * dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + softmax(tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + rescale(acc_o, score_scale_ + thread_id * Br, dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + mma1(tile_acc_s, tile_v, acc_o, KV_Head, dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + } + if (Tc_left) { + const dtype_q_in_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + t_r_idx * Br * head_size * dim_size + this_thread_head * dim_size; + const dtype_kv_in_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_kv_in_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + mma0_pa_n_fixed(Br, Tc_left, tile_q, tile_k, tile_acc_s, dim_size, head_size * dim_size, KV_Head * dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + softmax_pa_n_fixed(Br, Tc_left, tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + rescale_pa_n_fixed(Br, Tc_left, acc_o, score_scale_ + thread_id * Br, dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + mma1_pa_n_fixed(Br, Tc_left, tile_acc_s, tile_v, acc_o, KV_Head, dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + } + scale_and_store(acc_o_ + thread_id * Br * dim_size, logsum_ + thread_id * Br, O + b_idx * seq_size_q * head_size * dim_size + t_r_idx * Br * head_size * dim_size + this_thread_head * dim_size, t_r_idx, head_size, dim_size); + } + if (Tr_left) { + init_temp(logsum_ + thread_id * Br, scoremax_ + thread_id * Br, acc_o_ + thread_id * Br * dim_size, dim_size); + for (int t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + const dtype_q_in_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + Tr * Br * head_size * dim_size + this_thread_head * dim_size; + const dtype_kv_in_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_kv_in_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + mma0_pa_n_fixed(Tr_left, Bc, tile_q, tile_k, tile_acc_s, dim_size, head_size * dim_size, KV_Head * dim_size, Tr, t_c_idx, seq_size_q, seq_size_k, causal_mask); + softmax_pa_n_fixed(Tr_left, Bc, tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, Tr, t_c_idx, seq_size_q, seq_size_k, causal_mask); + rescale_pa_n_fixed(Tr_left, Bc, acc_o, score_scale_ + thread_id * Br, dim_size, Tr, t_c_idx, seq_size_q, seq_size_k, causal_mask); + mma1_pa_n_fixed(Tr_left, Bc, tile_acc_s, tile_v, acc_o, KV_Head, dim_size, Tr, t_c_idx, seq_size_q, seq_size_k, causal_mask); + } + if (Tc_left) { + const dtype_q_in_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + Tr * Br * head_size * dim_size + this_thread_head * dim_size; + const dtype_kv_in_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_kv_in_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + mma0_pa_n_fixed(Tr_left, Tc_left, tile_q, tile_k, tile_acc_s, dim_size, head_size * dim_size, KV_Head * dim_size, Tr, Tc, seq_size_q, seq_size_k, causal_mask); + softmax_pa_n_fixed(Tr_left, Tc_left, tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, Tr, Tc, seq_size_q, seq_size_k, causal_mask); + rescale_pa_n_fixed(Tr_left, Tc_left, acc_o, score_scale_ + thread_id * Br, dim_size, Tr, Tc, seq_size_q, seq_size_k, causal_mask); + mma1_pa_n_fixed(Tr_left, Tc_left, tile_acc_s, tile_v, acc_o, KV_Head, dim_size, Tr, Tc, seq_size_q, seq_size_k, causal_mask); + } + scale_and_store_pa_n_fixed(Tr_left, acc_o_ + thread_id * Br * dim_size, logsum_ + thread_id * Br, O + b_idx * seq_size_q * head_size * dim_size + Tr * Br * head_size * dim_size + this_thread_head * dim_size, Tr, head_size, dim_size); + } + } + } + } + + inline void __fa2_decode(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, + const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, + const int32_t batch_size, const int32_t head_size, + const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask = true) { + const int32_t Tr = 1; + const int32_t Tc = seq_size_k / Bc; + const int32_t Tc_left = seq_size_k % Bc; + + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) if (threads > 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_head = h_idx; + const int32_t this_thread_kv_head = this_thread_head / kv_group_size; + + const int t_r_idx = 0; + init_temp_d(logsum_ + thread_id * Br, scoremax_ + thread_id * Br, acc_o_ + thread_id * Br * dim_size, dim_size); + for (int t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + const dtype_q_in_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + t_r_idx * 1 * head_size * dim_size + this_thread_head * dim_size; + const dtype_kv_in_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_kv_in_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + t_c_idx * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + + mma0_d(tile_q, tile_k, tile_acc_s, dim_size, KV_Head * dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + softmax_d(tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + rescale_d(acc_o, score_scale_ + thread_id * Br, dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + mma1_d(tile_acc_s, tile_v, acc_o, KV_Head, dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + } + if (Tc_left) { + const dtype_q_in_t *tile_q = Q + b_idx * seq_size_q * head_size * dim_size + t_r_idx * 1 * head_size * dim_size + this_thread_head * dim_size; + const dtype_kv_in_t *tile_k = K + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + const dtype_kv_in_t *tile_v = V + b_idx * seq_size_k * KV_Head * dim_size + Tc * Bc * KV_Head * dim_size + this_thread_kv_head * dim_size; + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + mma0_d_n_fixed(Tc_left, tile_q, tile_k, tile_acc_s, dim_size, KV_Head * dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + softmax_d_n_fixed(Tc_left, tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + rescale_d_n_fixed(Tc_left, acc_o, score_scale_ + thread_id * Br, dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + mma1_d_n_fixed(Tc_left, tile_acc_s, tile_v, acc_o, KV_Head, dim_size, t_r_idx, Tc, seq_size_q, seq_size_k, causal_mask); + } + scale_and_store_d(acc_o_ + thread_id * Br * dim_size, logsum_ + thread_id * Br, O + b_idx * seq_size_q * head_size * dim_size + t_r_idx * 1 * head_size * dim_size + this_thread_head * dim_size, t_r_idx, head_size, dim_size); + } + } + } + + inline void init_temp(acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *acc_o, const int32_t dim_size) { +#ifdef __AVX2__ + __m256 zero_vec = _mm256_set1_ps(0.0f); + __m256 neg_inf_vec = _mm256_set1_ps(NEG_INF); + + int i = 0; + for (; i <= Br - 8; i += 8) { + _mm256_storeu_ps(logsum + i, zero_vec); + _mm256_storeu_ps(scoremax + i, neg_inf_vec); + } + for (; i < Br; ++i) { + logsum[i] = 0.0f; + scoremax[i] = NEG_INF; + } + + for (int j = 0; j < Br * dim_size; j += 8) { + _mm256_storeu_ps(acc_o + j, zero_vec); + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t zero_vec = vdupq_n_f32(0.0f); + float32x4_t neg_inf_vec = vdupq_n_f32(NEG_INF); + int i = 0; + for (; i <= Br - 4; i += 4) { + vst1q_f32(logsum + i, zero_vec); + vst1q_f32(scoremax + i, neg_inf_vec); + } + for (; i < Br; ++i) { + logsum[i] = 0.0f; + scoremax[i] = NEG_INF; + } + for (int j = 0; j < Br * dim_size; j += 4) { + vst1q_f32(acc_o + j, zero_vec); + } +#endif + } + + inline void mma0(const dtype_q_in_t *__restrict__ q_block, const dtype_kv_in_t *__restrict__ k_block, + acc_dtype_t *__restrict__ acc_s, const int32_t dim_size, + const int32_t q_stride_size, const int32_t kv_stride_size, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br, global_r_end = global_r_start + Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_end - 1))) { return; } + for (int32_t b_r_idx = 0; b_r_idx < Br; ++b_r_idx) { + const dtype_q_in_t *q_block_line = q_block + b_r_idx * q_stride_size; + for (int32_t b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + const dtype_kv_in_t *k_block_line = k_block + b_c_idx * kv_stride_size; + __m256 sum_vec = _mm256_setzero_ps(); + int i = 0; + for (; i <= dim_size - 8; i += 8) { + __m256 q_vec = _mm256_loadu_ps(q_block_line + i); + __m256 k_vec = MLLM_F32Cx8_LOAD(k_block_line + i); + sum_vec = _mm256_fmadd_ps(q_vec, k_vec, sum_vec); + } + acc_dtype_t total = _mm256_hadd_ps(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * MLLM_FP16_TO_FP32(k_block_line[i]); } + acc_s[b_r_idx * Bc + b_c_idx] = total; + } + } + if (causal_mask && (global_r_end == (t_c_idx * Bc + Bc) - delta_pos)) { + for (int i = 0; i < Br; ++i) { + for (int j = 0; j < Bc; ++j) { + if (j > i) { acc_s[i * Bc + j] = NEG_INF; } + } + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_r_end = global_r_start + Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + + if (causal_mask && (global_c_start - delta_pos > (global_r_end - 1))) { return; } + + alignas(16) __fp16 q_f16_buf[Br * dim_size]; + for (int32_t i = 0; i < Br; ++i) { + const float *q_line_f32 = q_block + i * q_stride_size; + __fp16 *q_line_f16 = q_f16_buf + i * dim_size; + for (int j = 0; j < dim_size; ++j) { + q_line_f16[j] = (__fp16)q_line_f32[j]; + } + } + + for (int32_t b_r_base = 0; b_r_base < Br; b_r_base += 4) { + for (int32_t b_c_base = 0; b_c_base < Bc; b_c_base += 4) { + const __fp16 *q_base_ptr = q_f16_buf + b_r_base * dim_size; + const __fp16 *k_base_ptr = (const __fp16 *)k_block + b_c_base * kv_stride_size; + float *acc_s_base_ptr = acc_s + b_r_base * Bc + b_c_base; + +#pragma unroll + for (int32_t b_r_offset = 0; b_r_offset < 4; ++b_r_offset) { + const __fp16 *q_row = q_base_ptr + b_r_offset * dim_size; + + const __fp16 *k_row0 = k_base_ptr + 0 * kv_stride_size; + const __fp16 *k_row1 = k_base_ptr + 1 * kv_stride_size; + const __fp16 *k_row2 = k_base_ptr + 2 * kv_stride_size; + const __fp16 *k_row3 = k_base_ptr + 3 * kv_stride_size; + + float32x4_t sum0 = vdupq_n_f32(0.0f); + float32x4_t sum1 = vdupq_n_f32(0.0f); + float32x4_t sum2 = vdupq_n_f32(0.0f); + float32x4_t sum3 = vdupq_n_f32(0.0f); + + int32_t k = 0; + for (; k <= dim_size - 8; k += 8) { + float16x8_t q_vec = vld1q_f16(q_row + k); + + float16x8_t k_vec0 = vld1q_f16(k_row0 + k); + sum0 = vfmlalq_low_f16(sum0, q_vec, k_vec0); + sum0 = vfmlalq_high_f16(sum0, q_vec, k_vec0); + + float16x8_t k_vec1 = vld1q_f16(k_row1 + k); + sum1 = vfmlalq_low_f16(sum1, q_vec, k_vec1); + sum1 = vfmlalq_high_f16(sum1, q_vec, k_vec1); + + float16x8_t k_vec2 = vld1q_f16(k_row2 + k); + sum2 = vfmlalq_low_f16(sum2, q_vec, k_vec2); + sum2 = vfmlalq_high_f16(sum2, q_vec, k_vec2); + + float16x8_t k_vec3 = vld1q_f16(k_row3 + k); + sum3 = vfmlalq_low_f16(sum3, q_vec, k_vec3); + sum3 = vfmlalq_high_f16(sum3, q_vec, k_vec3); + } + + float total0 = vaddvq_f32(sum0); + float total1 = vaddvq_f32(sum1); + float total2 = vaddvq_f32(sum2); + float total3 = vaddvq_f32(sum3); + + for (; k < dim_size; ++k) { + total0 += (float)q_row[k] * (float)k_row0[k]; + total1 += (float)q_row[k] * (float)k_row1[k]; + total2 += (float)q_row[k] * (float)k_row2[k]; + total3 += (float)q_row[k] * (float)k_row3[k]; + } + + float *acc_s_row = acc_s_base_ptr + b_r_offset * Bc; + acc_s_row[0] = total0; + acc_s_row[1] = total1; + acc_s_row[2] = total2; + acc_s_row[3] = total3; + } + } + } + + if (causal_mask) { + for (int i = 0; i < Br; ++i) { + for (int j = 0; j < Bc; ++j) { + if ((global_c_start + j) > (global_r_start + i + delta_pos)) { + acc_s[i * Bc + j] = NEG_INF; + } + } + } + } +#endif + } + + inline void softmax(acc_dtype_t *__restrict__ acc_s, acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum, acc_dtype_t *logsum, + const float scale, const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + memcpy(scoremax_prev, scoremax, Br * sizeof(acc_dtype_t)); +#ifdef __AVX2__ + for (int br = 0; br < Br; ++br) { + __m256 max_vec = _mm256_set1_ps(scoremax[br]); + acc_dtype_t *row = acc_s + br * Bc; + int bc = 0; + for (; bc <= Bc - 8; bc += 8) { max_vec = _mm256_max_ps(max_vec, _mm256_loadu_ps(row + bc)); } + float max_val = _mm256_hmax_ps(max_vec); + for (; bc < Bc; ++bc) { max_val = fmaxf(max_val, row[bc]); } + scoremax[br] = max_val; + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + for (int br = 0; br < Br; ++br) { + float32x4_t max_vec = vdupq_n_f32(scoremax[br]); + acc_dtype_t *row = acc_s + br * Bc; + int bc = 0; + for (; bc <= Bc - 4; bc += 4) { max_vec = vmaxq_f32(max_vec, vld1q_f32(row + bc)); } + float max_val = _vmaxvq_f32_hmax(max_vec); + for (; bc < Bc; ++bc) { max_val = fmaxf(max_val, row[bc]); } + scoremax[br] = max_val; + } +#endif + for (int br = 0; br < Br; ++br) { score_scale[br] = expf((scoremax_prev[br] - scoremax[br]) * scale); } + for (int br = 0; br < Br; ++br) { + const float sm = scoremax[br]; + acc_dtype_t *row = acc_s + br * Bc; + float sum = 0.0f; + for (int bc = 0; bc < Bc; ++bc) { + float val = expf((row[bc] - sm) * scale); + row[bc] = val; + sum += val; + } + score_sum[br] = sum; + } + for (int br = 0; br < Br; ++br) { logsum[br] = logsum[br] * score_scale[br] + score_sum[br]; } + } + + inline void rescale(acc_dtype_t *__restrict__ acc_o, acc_dtype_t *__restrict__ score_scale, + const int32_t dim_size, const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + // (无变化) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + for (int i = 0; i < Br; ++i) { + __m256 scale_v = _mm256_set1_ps(score_scale[i]); + float *row_ptr = acc_o + i * dim_size; + for (int j = 0; j < dim_size; j += 8) { + __m256 acc = _mm256_loadu_ps(row_ptr + j); + acc = _mm256_mul_ps(acc, scale_v); + _mm256_storeu_ps(row_ptr + j, acc); + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + for (int i = 0; i < Br; ++i) { + float32x4_t scale_v = vdupq_n_f32(score_scale[i]); + float *row_ptr = acc_o + i * dim_size; + for (int j = 0; j < dim_size; j += 4) { + float32x4_t acc = vld1q_f32(row_ptr + j); + acc = vmulq_f32(acc, scale_v); + vst1q_f32(row_ptr + j, acc); + } + } +#endif + } + + inline void mma1(const acc_dtype_t *__restrict__ w_block, const dtype_kv_in_t *__restrict__ v_block, + acc_dtype_t *__restrict__ acc_o, const int32_t kv_head_size, const int32_t dim_size, + const int32_t t_r_idx, const int32_t t_c_idx, const int32_t seq_size_q, + const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + const int32_t v_stride_size = kv_head_size * dim_size; + for (int b_r_idx = 0; b_r_idx < Br; ++b_r_idx) { + for (int d_base = 0; d_base < dim_size; d_base += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + b_r_idx * dim_size + d_base); + for (int b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + __m256 w_vec = _mm256_set1_ps(w_block[b_r_idx * Bc + b_c_idx]); + const dtype_kv_in_t *v_ptr = v_block + b_c_idx * v_stride_size + d_base; + __m256 v_vec = MLLM_F32Cx8_LOAD(v_ptr); + acc = _mm256_fmadd_ps(w_vec, v_vec, acc); + } + _mm256_storeu_ps(acc_o + b_r_idx * dim_size + d_base, acc); + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br - 1))) return; + + alignas(16) __fp16 w_f16_buf[Br * Bc]; + for (int i = 0; i < Br * Bc; ++i) { + w_f16_buf[i] = (__fp16)w_block[i]; + } + + const int32_t v_stride = kv_head_size * dim_size; + for (int d_base = 0; d_base < dim_size; d_base += 8) { + float32x4_t acc0[2], acc1[2], acc2[2], acc3[2]; + + acc0[0] = vld1q_f32(acc_o + 0 * dim_size + d_base); + acc0[1] = vld1q_f32(acc_o + 0 * dim_size + d_base + 4); + acc1[0] = vld1q_f32(acc_o + 1 * dim_size + d_base); + acc1[1] = vld1q_f32(acc_o + 1 * dim_size + d_base + 4); + acc2[0] = vld1q_f32(acc_o + 2 * dim_size + d_base); + acc2[1] = vld1q_f32(acc_o + 2 * dim_size + d_base + 4); + acc3[0] = vld1q_f32(acc_o + 3 * dim_size + d_base); + acc3[1] = vld1q_f32(acc_o + 3 * dim_size + d_base + 4); + +#pragma unroll + for (int k_inner = 0; k_inner < Bc; ++k_inner) { + const float16x8_t v_vec = vld1q_f16((const __fp16 *)v_block + k_inner * v_stride + d_base); + + const float16x8_t w0_vec = vdupq_n_f16(w_f16_buf[0 * Bc + k_inner]); + acc0[0] = vfmlalq_low_f16(acc0[0], v_vec, w0_vec); + acc0[1] = vfmlalq_high_f16(acc0[1], v_vec, w0_vec); + + const float16x8_t w1_vec = vdupq_n_f16(w_f16_buf[1 * Bc + k_inner]); + acc1[0] = vfmlalq_low_f16(acc1[0], v_vec, w1_vec); + acc1[1] = vfmlalq_high_f16(acc1[1], v_vec, w1_vec); + + const float16x8_t w2_vec = vdupq_n_f16(w_f16_buf[2 * Bc + k_inner]); + acc2[0] = vfmlalq_low_f16(acc2[0], v_vec, w2_vec); + acc2[1] = vfmlalq_high_f16(acc2[1], v_vec, w2_vec); + + const float16x8_t w3_vec = vdupq_n_f16(w_f16_buf[3 * Bc + k_inner]); + acc3[0] = vfmlalq_low_f16(acc3[0], v_vec, w3_vec); + acc3[1] = vfmlalq_high_f16(acc3[1], v_vec, w3_vec); + } + + vst1q_f32(acc_o + 0 * dim_size + d_base, acc0[0]); + vst1q_f32(acc_o + 0 * dim_size + d_base + 4, acc0[1]); + vst1q_f32(acc_o + 1 * dim_size + d_base, acc1[0]); + vst1q_f32(acc_o + 1 * dim_size + d_base + 4, acc1[1]); + vst1q_f32(acc_o + 2 * dim_size + d_base, acc2[0]); + vst1q_f32(acc_o + 2 * dim_size + d_base + 4, acc2[1]); + vst1q_f32(acc_o + 3 * dim_size + d_base, acc3[0]); + vst1q_f32(acc_o + 3 * dim_size + d_base + 4, acc3[1]); + } +#endif + } + + inline void scale_and_store(const acc_dtype_t *__restrict__ acc_o, const acc_dtype_t *__restrict__ logsum, + dtype_out_t *__restrict__ o_block, const int32_t t_r_idx, + const int32_t head_size, const int32_t dim_size) { + for (int i = 0; i < Br; ++i) { + dtype_out_t *o_block_line = o_block + i * head_size * dim_size; // << 保持 BSHD 的行步长 +#ifdef __AVX2__ + __m256 reciprocal_logsum_vec = _mm256_set1_ps(1.0f / logsum[i]); + int j = 0; + for (; j <= dim_size - 8; j += 8) { + __m256 vec_acc_o = _mm256_loadu_ps(acc_o + i * dim_size + j); + __m256 result_vec = _mm256_mul_ps(vec_acc_o, reciprocal_logsum_vec); + _mm256_storeu_ps(o_block_line + j, result_vec); + } + float reciprocal_logsum = 1.0f / logsum[i]; + for (; j < dim_size; ++j) { o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + for (int i = 0; i < Br; ++i) { + dtype_out_t *o_block_line = o_block + i * head_size * dim_size; + float reciprocal_logsum = 1.0f / logsum[i]; + float32x4_t reciprocal_logsum_vec = vdupq_n_f32(reciprocal_logsum); + int j = 0; + for (; j <= dim_size - 4; j += 4) { + float32x4_t vec_acc_o = vld1q_f32(acc_o + i * dim_size + j); + vst1q_f32(o_block_line + j, vmulq_f32(vec_acc_o, reciprocal_logsum_vec)); + } + for (; j < dim_size; ++j) { + o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; + } + } +#endif + } + } + + inline void mma0_pa_n_fixed(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + const dtype_q_in_t *__restrict__ q_block, const dtype_kv_in_t *__restrict__ k_block, + acc_dtype_t *__restrict__ acc_s, const int32_t dim_size, + const int32_t q_stride_size, const int32_t kv_stride_size, + const int32_t t_r_idx, const int32_t t_c_idx, const int32_t seq_size_q, + const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_r_end = global_r_start + Br_n_fixed; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_end - 1))) { return; } + for (int32_t b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { + const dtype_q_in_t *q_block_line = q_block + b_r_idx * q_stride_size; + for (int32_t b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + const dtype_kv_in_t *k_block_line = k_block + b_c_idx * kv_stride_size; + __m256 sum_vec = _mm256_setzero_ps(); + int i = 0; + for (; i <= dim_size - 8; i += 8) { + sum_vec = _mm256_fmadd_ps(_mm256_loadu_ps(q_block_line + i), MLLM_F32Cx8_LOAD(k_block_line + i), sum_vec); + } + acc_dtype_t total = _mm256_hadd_ps(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * MLLM_FP16_TO_FP32(k_block_line[i]); } + acc_s[b_r_idx * Bc + b_c_idx] = total; + } + } + if (causal_mask && (global_r_end == (global_c_start + Bc_n_fixed) - delta_pos)) { + for (int i = 0; i < Br_n_fixed; ++i) { + for (int j = 0; j < Bc_n_fixed; ++j) { + if (j > i) { acc_s[i * Bc + j] = NEG_INF; } + } + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_r_end = global_r_start + Br_n_fixed; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_end - 1))) { return; } + for (int32_t b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { + const dtype_q_in_t *q_block_line = q_block + b_r_idx * q_stride_size; + for (int32_t b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + const dtype_kv_in_t *k_block_line = k_block + b_c_idx * kv_stride_size; + float32x4_t sum_vec = vdupq_n_f32(0.0f); + int i = 0; + for (; i <= dim_size - 4; i += 4) { + float32x4_t q_vec = vld1q_f32(q_block_line + i); + float32x4_t k_vec = vcvt_f32_f16(vld1_f16((const __fp16 *)k_block_line + i)); + sum_vec = vfmaq_f32(sum_vec, q_vec, k_vec); + } + acc_dtype_t total = vaddvq_f32(sum_vec); + for (; i < dim_size; ++i) { + total += q_block_line[i] * (float)k_block_line[i]; + } + acc_s[b_r_idx * Bc + b_c_idx] = total; + } + } + if (causal_mask) { + for (int i = 0; i < Br_n_fixed; ++i) { + for (int j = 0; j < Bc_n_fixed; ++j) { + if ((global_c_start + j) > (global_r_start + i + delta_pos)) { + acc_s[i * Bc + j] = NEG_INF; + } + } + } + } +#endif + } + + inline void softmax_pa_n_fixed(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + acc_dtype_t *__restrict__ acc_s, acc_dtype_t *scoremax, + acc_dtype_t *scoremax_prev, acc_dtype_t *score_scale, + acc_dtype_t *score_sum, acc_dtype_t *logsum, + const float scale, const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br_n_fixed - 1))) return; + memcpy(scoremax_prev, scoremax, Br_n_fixed * sizeof(acc_dtype_t)); + for (int br = 0; br < Br_n_fixed; ++br) { + float max_val = scoremax[br]; + acc_dtype_t *row = acc_s + br * Bc; + for (int bc = 0; bc < Bc_n_fixed; ++bc) { + max_val = fmaxf(max_val, row[bc]); + } + scoremax[br] = max_val; + } + for (int br = 0; br < Br_n_fixed; ++br) { + score_scale[br] = expf((scoremax_prev[br] - scoremax[br]) * scale); + } + for (int br = 0; br < Br_n_fixed; ++br) { + const float sm = scoremax[br]; + acc_dtype_t *row = acc_s + br * Bc; + float current_sum = 0.0f; + for (int bc = 0; bc < Bc_n_fixed; ++bc) { + float val = expf((row[bc] - sm) * scale); + row[bc] = val; + current_sum += val; + } + score_sum[br] = current_sum; + } + for (int br = 0; br < Br_n_fixed; ++br) { + logsum[br] = logsum[br] * score_scale[br] + score_sum[br]; + } + } + + inline void rescale_pa_n_fixed(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + acc_dtype_t *__restrict__ acc_o, acc_dtype_t *__restrict__ score_scale, + const int32_t dim_size, const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br_n_fixed - 1))) return; + for (int i = 0; i < Br_n_fixed; ++i) { + float *row_ptr = acc_o + i * dim_size; + __m256 scale_v = _mm256_set1_ps(score_scale[i]); + for (int j = 0; j < dim_size; j += 8) { + _mm256_storeu_ps(row_ptr + j, _mm256_mul_ps(_mm256_loadu_ps(row_ptr + j), scale_v)); + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br_n_fixed - 1))) return; + for (int i = 0; i < Br_n_fixed; ++i) { + float *row_ptr = acc_o + i * dim_size; + float32x4_t scale_v = vdupq_n_f32(score_scale[i]); + for (int j = 0; j < dim_size; j += 4) { + vst1q_f32(row_ptr + j, vmulq_f32(vld1q_f32(row_ptr + j), scale_v)); + } + } +#endif + } + + inline void mma1_pa_n_fixed(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + const acc_dtype_t *__restrict__ w_block, const dtype_kv_in_t *__restrict__ v_block, + acc_dtype_t *__restrict__ acc_o, const int32_t kv_head_size, const int32_t dim_size, + const int32_t t_r_idx, const int32_t t_c_idx, const int32_t seq_size_q, + const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br_n_fixed - 1))) return; + const int32_t v_stride_size = kv_head_size * dim_size; + for (int b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { + for (int d_base = 0; d_base < dim_size; d_base += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + b_r_idx * dim_size + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + __m256 w_vec = _mm256_set1_ps(w_block[b_r_idx * Bc + b_c_idx]); + const dtype_kv_in_t *v_ptr = v_block + b_c_idx * v_stride_size + d_base; + acc = _mm256_fmadd_ps(w_vec, MLLM_F32Cx8_LOAD(v_ptr), acc); + } + _mm256_storeu_ps(acc_o + b_r_idx * dim_size + d_base, acc); + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + int delta_pos = seq_size_k - seq_size_q; + if (causal_mask && (global_c_start - delta_pos > (global_r_start + Br_n_fixed - 1))) return; + const int32_t v_stride_size = kv_head_size * dim_size; + for (int b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { + for (int d_base = 0; d_base < dim_size; d_base += 4) { + float32x4_t acc_vec = vld1q_f32(acc_o + b_r_idx * dim_size + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + float32x4_t w_vec = vdupq_n_f32(w_block[b_r_idx * Bc + b_c_idx]); + const dtype_kv_in_t *v_ptr = v_block + b_c_idx * v_stride_size + d_base; + float32x4_t v_vec = vcvt_f32_f16(vld1_f16((const __fp16 *)v_ptr)); + acc_vec = vfmaq_f32(acc_vec, v_vec, w_vec); + } + vst1q_f32(acc_o + b_r_idx * dim_size + d_base, acc_vec); + } + } +#endif + } + + inline void scale_and_store_pa_n_fixed(const int32_t Br_n_fixed, const acc_dtype_t *__restrict__ acc_o, + const acc_dtype_t *__restrict__ logsum, dtype_out_t *__restrict__ o_block, + const int32_t t_r_idx, const int32_t head_size, const int32_t dim_size) { + for (int i = 0; i < Br_n_fixed; ++i) { + dtype_out_t *o_block_line = o_block + i * head_size * dim_size; +#ifdef __AVX2__ + float reciprocal_logsum = 1.0f / logsum[i]; + __m256 reciprocal_logsum_vec = _mm256_set1_ps(reciprocal_logsum); + int j = 0; + for (; j <= dim_size - 8; j += 8) { + __m256 vec_acc_o = _mm256_loadu_ps(acc_o + i * dim_size + j); + _mm256_storeu_ps(o_block_line + j, _mm256_mul_ps(vec_acc_o, reciprocal_logsum_vec)); + } + for (; j < dim_size; ++j) { o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float reciprocal_logsum = 1.0f / logsum[i]; + float32x4_t reciprocal_logsum_vec = vdupq_n_f32(reciprocal_logsum); + int j = 0; + for (; j <= dim_size - 4; j += 4) { + float32x4_t vec_acc_o = vld1q_f32(acc_o + i * dim_size + j); + vst1q_f32(o_block_line + j, vmulq_f32(vec_acc_o, reciprocal_logsum_vec)); + } + for (; j < dim_size; ++j) { o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; } +#endif + } + } + + inline void init_temp_d(acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *acc_o, const int32_t dim_size) { + logsum[0] = 0.0f; + scoremax[0] = NEG_INF; +#ifdef __AVX2__ + __m256 zero_vec = _mm256_setzero_ps(); + for (int i = 0; i < 1 * dim_size; i += 8) { _mm256_storeu_ps(acc_o + i, zero_vec); } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t zero_vec = vdupq_n_f32(0.0f); + for (int i = 0; i < 1 * dim_size; i += 4) { vst1q_f32(acc_o + i, zero_vec); } +#endif + } + + inline void mma0_d(const dtype_q_in_t *__restrict__ q_block, const dtype_kv_in_t *__restrict__ k_block, + acc_dtype_t *__restrict__ acc_s, const int32_t dim_size, + const int32_t kv_stride_size, const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + const dtype_q_in_t *q_block_line = q_block; + for (int32_t b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + const dtype_kv_in_t *k_block_line = k_block + b_c_idx * kv_stride_size; + __m256 sum_vec = _mm256_setzero_ps(); + int i = 0; + for (; i <= dim_size - 8; i += 8) { + sum_vec = _mm256_fmadd_ps(_mm256_loadu_ps(q_block_line + i), MLLM_F32Cx8_LOAD(k_block_line + i), sum_vec); + } + acc_dtype_t total = _mm256_hadd_ps(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * MLLM_FP16_TO_FP32(k_block_line[i]); } + acc_s[b_c_idx] = total; + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + for (int32_t b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + const dtype_kv_in_t *k_block_line = k_block + b_c_idx * kv_stride_size; + float32x4_t sum_vec = vdupq_n_f32(0.0f); + int i = 0; + for (; i <= dim_size - 4; i += 4) { + sum_vec = vfmaq_f32(sum_vec, vld1q_f32(q_block + i), vcvt_f32_f16(vld1_f16((const __fp16 *)k_block_line + i))); + } + acc_dtype_t total = vaddvq_f32(sum_vec); + for (; i < dim_size; ++i) { total += q_block[i] * (float)k_block_line[i]; } + acc_s[b_c_idx] = total; + } +#endif + } + + inline void softmax_d(acc_dtype_t *__restrict__ acc_s, + acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, acc_dtype_t *score_scale, + acc_dtype_t *score_sum, acc_dtype_t *logsum, const float scale, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { + scoremax_prev[0] = scoremax[0]; + float max_val = NEG_INF; + for (int bc = 0; bc < Bc; ++bc) max_val = fmaxf(max_val, acc_s[bc]); + scoremax[0] = fmaxf(max_val, scoremax[0]); + score_scale[0] = expf((scoremax_prev[0] - scoremax[0]) * scale); + float current_sum = 0.0f; + for (int bc = 0; bc < Bc; ++bc) { + float val = expf((acc_s[bc] - scoremax[0]) * scale); + acc_s[bc] = val; + current_sum += val; + } + score_sum[0] = current_sum; + logsum[0] = logsum[0] * score_scale[0] + score_sum[0]; + } + + inline void rescale_d(acc_dtype_t *__restrict__ acc_o, acc_dtype_t *__restrict__ score_scale, + const int32_t dim_size, const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + __m256 scale_v = _mm256_set1_ps(score_scale[0]); + for (int j = 0; j < dim_size; j += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + j); + acc = _mm256_mul_ps(acc, scale_v); + _mm256_storeu_ps(acc_o + j, acc); + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t scale_v = vdupq_n_f32(score_scale[0]); + for (int j = 0; j < dim_size; j += 4) { + vst1q_f32(acc_o + j, vmulq_f32(vld1q_f32(acc_o + j), scale_v)); + } +#endif + } + + inline void mma1_d(const acc_dtype_t *__restrict__ w_block, const dtype_kv_in_t *__restrict__ v_block, + acc_dtype_t *__restrict__ acc_o, const int32_t kv_head_size, const int32_t dim_size, + const int32_t t_r_idx, const int32_t t_c_idx, const int32_t seq_size_q, + const int32_t seq_size_k, bool causal_mask) { +#ifdef __AVX2__ + const int32_t v_stride_size = kv_head_size * dim_size; + for (int d_base = 0; d_base < dim_size; d_base += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + d_base); + for (int b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + __m256 w_vec = _mm256_set1_ps(w_block[b_c_idx]); + const dtype_kv_in_t *v_ptr = v_block + b_c_idx * v_stride_size + d_base; + __m256 v_vec = MLLM_F32Cx8_LOAD(v_ptr); + acc = _mm256_fmadd_ps(w_vec, v_vec, acc); + } + _mm256_storeu_ps(acc_o + d_base, acc); + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int32_t v_stride_size = kv_head_size * dim_size; + int d_base = 0; + for (; d_base <= dim_size - 4; d_base += 4) { + float32x4_t acc_vec = vld1q_f32(acc_o + d_base); + for (int b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + float32x4_t w_vec = vdupq_n_f32(w_block[b_c_idx]); + const __fp16 *v_ptr = (const __fp16 *)v_block + b_c_idx * v_stride_size + d_base; + acc_vec = vfmaq_f32(acc_vec, vcvt_f32_f16(vld1_f16(v_ptr)), w_vec); + } + vst1q_f32(acc_o + d_base, acc_vec); + } + for (; d_base < dim_size; ++d_base) { + float acc = acc_o[d_base]; + for (int b_c_idx = 0; b_c_idx < Bc; ++b_c_idx) { + acc += w_block[b_c_idx] * (float)v_block[b_c_idx * v_stride_size + d_base]; + } + acc_o[d_base] = acc; + } +#endif + } + + inline void scale_and_store_d(const acc_dtype_t *__restrict__ acc_o, + const acc_dtype_t *__restrict__ logsum, + dtype_out_t *__restrict__ o_block, const int32_t t_r_idx, + const int32_t head_size, const int32_t dim_size) { + float reciprocal_logsum = 1.0f / logsum[0]; + int j = 0; +#ifdef __AVX2__ + __m256 reciprocal_logsum_vec = _mm256_set1_ps(reciprocal_logsum); + for (; j <= dim_size - 8; j += 8) { + _mm256_storeu_ps(o_block + j, _mm256_mul_ps(_mm256_loadu_ps(acc_o + j), reciprocal_logsum_vec)); + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t reciprocal_logsum_vec = vdupq_n_f32(reciprocal_logsum); + for (; j <= dim_size - 4; j += 4) { + vst1q_f32(o_block + j, vmulq_f32(vld1q_f32(acc_o + j), reciprocal_logsum_vec)); + } +#endif + for (; j < dim_size; ++j) { + o_block[j] = acc_o[j] * reciprocal_logsum; + } + } + + inline void mma0_d_n_fixed(const int32_t Bc_n_fixed, const dtype_q_in_t *__restrict__ q_block, + const dtype_kv_in_t *__restrict__ k_block, acc_dtype_t *__restrict__ acc_s, + const int32_t dim_size, const int32_t kv_stride_size, + const int32_t t_r_idx, const int32_t t_c_idx, const int32_t seq_size_q, + const int32_t seq_size_k, bool causal_mask) { + const dtype_q_in_t *q_block_line = q_block; + for (int32_t b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + const dtype_kv_in_t *k_block_line = k_block + b_c_idx * kv_stride_size; + float total = 0.0f; + for (int i = 0; i < dim_size; ++i) { total += q_block_line[i] * MLLM_FP16_TO_FP32(k_block_line[i]); } + acc_s[b_c_idx] = total; + } + } + + inline void softmax_d_n_fixed(const int32_t Bc_n_fixed, acc_dtype_t *__restrict__ acc_s, + acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum, + acc_dtype_t *logsum, const float scale, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { + scoremax_prev[0] = scoremax[0]; + float max_val = NEG_INF; + for (int bc = 0; bc < Bc_n_fixed; ++bc) max_val = fmaxf(max_val, acc_s[bc]); + scoremax[0] = fmaxf(max_val, scoremax[0]); + score_scale[0] = expf((scoremax_prev[0] - scoremax[0]) * scale); + float current_sum = 0.0f; + for (int bc = 0; bc < Bc_n_fixed; ++bc) { + float val = expf((acc_s[bc] - scoremax[0]) * scale); + acc_s[bc] = val; + current_sum += val; + } + score_sum[0] = current_sum; + logsum[0] = logsum[0] * score_scale[0] + score_sum[0]; + } + + inline void rescale_d_n_fixed(const int32_t Bc_n_fixed, acc_dtype_t *__restrict__ acc_o, + acc_dtype_t *__restrict__ score_scale, const int32_t dim_size, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, + bool causal_mask) { + float scale = score_scale[0]; + for (int j = 0; j < dim_size; ++j) { acc_o[j] *= scale; } + } + + inline void mma1_d_n_fixed(const int32_t Bc_n_fixed, const acc_dtype_t *__restrict__ w_block, + const dtype_kv_in_t *__restrict__ v_block, acc_dtype_t *__restrict__ acc_o, + const int32_t kv_head_size, const int32_t dim_size, const int32_t t_r_idx, + const int32_t t_c_idx, const int32_t seq_size_q, + const int32_t seq_size_k, bool causal_mask) { + const int32_t v_stride_size = kv_head_size * dim_size; + for (int d_base = 0; d_base < dim_size; ++d_base) { + float acc = acc_o[d_base]; + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + acc += w_block[b_c_idx] * MLLM_FP16_TO_FP32(v_block[b_c_idx * v_stride_size + d_base]); + } + acc_o[d_base] = acc; + } + } + +private: + // float scale_; + acc_dtype_t *acc_o_; + acc_dtype_t *acc_s_; + acc_dtype_t *logsum_; + acc_dtype_t *scoremax_; + acc_dtype_t *scoremax_prev_; + acc_dtype_t *score_scale_; + acc_dtype_t *score_sum_; +}; + +template +struct FlashAttn2T { +public: + using dtype_q_in_t = typename Impl::dtype_q_in_t; + using dtype_kv_in_t = typename Impl::dtype_kv_in_t; + using dtype_out_t = typename Impl::dtype_out_t; + using acc_dtype_t = typename Impl::acc_dtype_t; + + void configure(int32_t Br, int32_t Bc, int32_t Q_Head, int32_t KV_Head, int32_t threads, bool high_precision) { + impl_.configure(Br, Bc, Q_Head, KV_Head, threads, high_precision); + } + + void init_workspace(acc_dtype_t *acc_o, acc_dtype_t *acc_s, + acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum) { + // Note: workspace pointers are always float, acc_s_cast is removed + impl_.init_workspace(acc_o, acc_s, logsum, scoremax, scoremax_prev, score_scale, score_sum); + } + + void operator()(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, + const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, + const int32_t batch_size, const int32_t head_size, + const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask = true) { + impl_.fa2(Q, K, V, O, batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask); + } + +private: + Impl impl_; +}; + +class WorkspaceManager { +public: + WorkspaceManager() : + workspace_{}, current_sizes_{} { + } + + ~WorkspaceManager() { + for (int i = 0; i < 7; ++i) { + if (workspace_[i]) { + aligned_free(workspace_[i]); + } + } + } + + void **get_workspace(const size_t *required_sizes) { + for (int i = 0; i < 7; ++i) { + if (required_sizes[i] > current_sizes_[i]) { + if (workspace_[i]) { + aligned_free(workspace_[i]); + } + aligned_alloc(&workspace_[i], required_sizes[i], 32); + current_sizes_[i] = required_sizes[i]; + } + } + return workspace_; + } + +private: + WorkspaceManager(const WorkspaceManager &) = delete; + WorkspaceManager &operator=(const WorkspaceManager &) = delete; + + void *workspace_[7]; + size_t current_sizes_[7]; +}; + +} // namespace mobi_attn + +void flash_attention_2_forward( + const void *Q, const void *K, const void *V, void *O, + int32_t batch_size, int32_t head_size, int32_t seq_size_q, int32_t seq_size_k, int32_t dim_size, + bool causal_mask, bool use_fp32, int32_t threads, int32_t br, int32_t bc, + int32_t q_head, int32_t kv_head, bool high_precision_exp) { + thread_local mobi_attn::WorkspaceManager manager; + + const size_t acc_o_size = threads * br * dim_size * sizeof(float); + const size_t acc_s_size = threads * br * bc * sizeof(float); + const size_t logsum_size = threads * br * sizeof(float); + const size_t scoremax_size = threads * br * sizeof(float); + const size_t scoremax_prev_size = threads * br * sizeof(float); + const size_t score_scale_size = threads * br * sizeof(float); + const size_t score_sum_size = threads * br * sizeof(float); + + const size_t required_sizes[7] = { + acc_o_size, acc_s_size, logsum_size, scoremax_size, + scoremax_prev_size, score_scale_size, score_sum_size}; + + void **workspace = manager.get_workspace(required_sizes); + + if (use_fp32) { + mobi_attn::FlashAttn2T op; + op.configure(br, bc, q_head, kv_head, threads, high_precision_exp); + + op.init_workspace( + static_cast(workspace[0]), static_cast(workspace[1]), + static_cast(workspace[2]), static_cast(workspace[3]), + static_cast(workspace[4]), static_cast(workspace[5]), + static_cast(workspace[6])); + + op(static_cast(Q), static_cast(K), static_cast(V), + static_cast(O), + batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask); + } else { + mobi_attn::FlashAttn2T op; + op.configure(br, bc, q_head, kv_head, threads, high_precision_exp); + + op.init_workspace( + static_cast(workspace[0]), static_cast(workspace[1]), + static_cast(workspace[2]), static_cast(workspace[3]), + static_cast(workspace[4]), static_cast(workspace[5]), + static_cast(workspace[6])); + + op(static_cast(Q), static_cast(K), static_cast(V), + static_cast(O), + batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask); + } +} +#endif // MLLM_FA2_CAL_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/compute/FlashAttention2H.hpp b/mllm/backends/cpu/compute/FlashAttention2H.hpp new file mode 100644 index 000000000..d7e00377c --- /dev/null +++ b/mllm/backends/cpu/compute/FlashAttention2H.hpp @@ -0,0 +1,1059 @@ +#ifndef MLLM_FA2H_CAL_HPP +#define MLLM_FA2H_CAL_HPP + +#include +#include +#include +#include +#include +#include +#include "Types.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" +#include "backends/cpu/third_party/ggml/ComputeUtils.hpp" + +// 平台相关的头文件和宏定义 +#ifdef __AVX2__ +#include +#define NEG_INF_F32 (-std::numeric_limits::infinity()) + +// Horizontal max of a __m256 vector +inline float hmax_ps_avx(__m256 x) { + __m128 lo = _mm256_castps256_ps128(x); + __m128 hi = _mm256_extractf128_ps(x, 1); + __m128 max_val = _mm_max_ps(lo, hi); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 2, 2))); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 0, 1))); + return _mm_cvtss_f32(max_val); +} + +// Horizontal sum of a __m256 vector +inline float hadd_ps_avx(__m256 x) { + __m128 lo = _mm256_castps256_ps128(x); + __m128 hi = _mm256_extractf128_ps(x, 1); + __m128 sum = _mm_add_ps(lo, hi); + sum = _mm_hadd_ps(sum, sum); + sum = _mm_hadd_ps(sum, sum); + return _mm_cvtss_f32(sum); +} + +#elif __ARM_NEON +#include +#define NEG_INF_F32 (-std::numeric_limits::infinity()) + +// NEON版本:水平最大值 (Horizontal max of a float32x4_t vector) +inline float hmax_ps_neon(float32x4_t x) { + return vmaxvq_f32(x); +} + +// NEON版本:水平求和 (Horizontal sum of a float32x4_t vector) +inline float hadd_ps_neon(float32x4_t x) { + return vaddvq_f32(x); +} + +#else +#error "Unsupported architecture. Please define __AVX2__ or __ARM_NEON." +#endif + +// Common aligned allocation/free functions +inline void platform_aligned_alloc(void **ptr, size_t required_bytes, size_t align) { + if (align % sizeof(void *) != 0 || (align & (align - 1)) != 0) { + *ptr = nullptr; + return; + } + if (posix_memalign(ptr, align, required_bytes) != 0) { + *ptr = nullptr; + } +} + +inline void platform_aligned_free(void *ptr) { + free(ptr); +} + +namespace mobi_attn { + +// ======================================== +// FlashAttention2 核心实现 (FP32版本) - BHSD Layout +// ======================================== +struct FA_2_GQA_QKV_FP32_BHSD_O_FP32_BHSD_ACC_FP32_IMPL { + using dtype_q_in_t = float; + using dtype_kv_in_t = dtype_q_in_t; + using dtype_out_t = dtype_q_in_t; + using dtype_t = dtype_out_t; + using acc_dtype_t = float; + + int32_t Br; + int32_t Bc; + int32_t Q_Head; + int32_t KV_Head; + int32_t threads; + bool high_precision; + + // Workspace pointers + acc_dtype_t *acc_o_; + acc_dtype_t *acc_s_; + acc_dtype_t *logsum_; + acc_dtype_t *scoremax_; + acc_dtype_t *scoremax_prev_; + acc_dtype_t *score_scale_; + acc_dtype_t *score_sum_; + + void configure(int32_t Br_, int32_t Bc_, int32_t Q_Head_, int32_t KV_Head_, int32_t threads_, bool high_precision_) { + Br = Br_; + Bc = Bc_; + Q_Head = Q_Head_; + KV_Head = KV_Head_; + threads = threads_; + high_precision = high_precision_; + } + + void init_workspace(acc_dtype_t *acc_o, acc_dtype_t *acc_s, + acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum) { + acc_o_ = acc_o; + acc_s_ = acc_s; + logsum_ = logsum; + scoremax_ = scoremax; + scoremax_prev_ = scoremax_prev; + score_scale_ = score_scale; + score_sum_ = score_sum; + } + + void fa2(const dtype_t *__restrict__ Q, const dtype_t *__restrict__ K, + const dtype_t *__restrict__ V, dtype_t *__restrict__ O, const int32_t batch_size, + const int32_t head_size, const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask, + int32_t q_head_skp, int32_t k_head_skp, int32_t v_head_skp) { + assert(Br == Bc); + assert(Q_Head % KV_Head == 0); + assert(head_size % threads == 0); +#ifdef __AVX2__ + assert(dim_size % 8 == 0); +#elif __ARM_NEON + assert(dim_size % 4 == 0); +#endif + + if (seq_size_q != 1) { + __fa2_prefill_append(Q, K, V, O, batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask, q_head_skp, k_head_skp, v_head_skp); + } else { + __fa2_decode(Q, K, V, O, batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask, q_head_skp, k_head_skp, v_head_skp); + } + } + +private: + inline void __fa2_prefill_append(const dtype_t *__restrict__ Q, const dtype_t *__restrict__ K, + const dtype_t *__restrict__ V, dtype_t *__restrict__ O, + const int32_t batch_size, const int32_t head_size, + const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask, + int32_t q_head_skp, int32_t k_head_skp, int32_t v_head_skp) { + const int32_t Tr = (seq_size_q + Br - 1) / Br; + const int32_t Tc = (seq_size_k + Bc - 1) / Bc; + + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = Q_Head / KV_Head; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) if (threads > 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_head = h_idx; + const int32_t this_thread_kv_head = this_thread_head / kv_group_size; + + const dtype_t *q_batch_base = Q + b_idx * head_size * q_head_skp; + const dtype_t *k_batch_base = K + b_idx * KV_Head * k_head_skp; + const dtype_t *v_batch_base = V + b_idx * KV_Head * v_head_skp; + dtype_t *o_batch_base = O + b_idx * head_size * q_head_skp; + + for (int t_r_idx = 0; t_r_idx < Tr; ++t_r_idx) { + const int32_t Br_n_fixed = (t_r_idx == Tr - 1) ? (seq_size_q - t_r_idx * Br) : Br; + init_temp(logsum_ + thread_id * Br, scoremax_ + thread_id * Br, + acc_o_ + thread_id * Br * dim_size, Br_n_fixed, dim_size); + + for (int t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + const int32_t Bc_n_fixed = (t_c_idx == Tc - 1) ? (seq_size_k - t_c_idx * Bc) : Bc; + + const dtype_t *tile_q = q_batch_base + this_thread_head * q_head_skp + t_r_idx * Br * dim_size; + const dtype_t *tile_k = k_batch_base + this_thread_kv_head * k_head_skp + t_c_idx * Bc * dim_size; + const dtype_t *tile_v = v_batch_base + this_thread_kv_head * v_head_skp + t_c_idx * Bc * dim_size; + + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + + mma0(Br_n_fixed, Bc_n_fixed, tile_q, tile_k, tile_acc_s, dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + softmax(Br_n_fixed, Bc_n_fixed, tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale); + rescale(Br_n_fixed, acc_o, score_scale_ + thread_id * Br, dim_size); + mma1(Br_n_fixed, Bc_n_fixed, tile_acc_s, tile_v, acc_o, dim_size); + } + + dtype_t *o_block_ptr = o_batch_base + this_thread_head * q_head_skp + t_r_idx * Br * dim_size; + scale_and_store(Br_n_fixed, acc_o_ + thread_id * Br * dim_size, logsum_ + thread_id * Br, o_block_ptr, dim_size); + } + } + } + } + + inline void __fa2_decode(const dtype_t *__restrict__ Q, const dtype_t *__restrict__ K, + const dtype_t *__restrict__ V, dtype_t *__restrict__ O, + const int32_t batch_size, const int32_t head_size, + const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask, + int32_t q_head_skp, int32_t k_head_skp, int32_t v_head_skp) { + const int32_t Tc = (seq_size_k + Bc - 1) / Bc; + + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) if (threads > 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_head = h_idx; + const int32_t this_thread_kv_head = this_thread_head / kv_group_size; + + init_temp_d(logsum_ + thread_id, scoremax_ + thread_id, acc_o_ + thread_id * dim_size, dim_size); + + const dtype_t *q_batch_base = Q + b_idx * head_size * q_head_skp; + const dtype_t *k_batch_base = K + b_idx * KV_Head * k_head_skp; + const dtype_t *v_batch_base = V + b_idx * KV_Head * v_head_skp; + dtype_t *o_batch_base = O + b_idx * head_size * q_head_skp; + + for (int t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + const int32_t Bc_n_fixed = (t_c_idx == Tc - 1) ? (seq_size_k - t_c_idx * Bc) : Bc; + + const dtype_t *tile_q = q_batch_base + this_thread_head * q_head_skp; + const dtype_t *tile_k = k_batch_base + this_thread_kv_head * k_head_skp + t_c_idx * Bc * dim_size; + const dtype_t *tile_v = v_batch_base + this_thread_kv_head * v_head_skp + t_c_idx * Bc * dim_size; + + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * dim_size; + + mma0_d(Bc_n_fixed, tile_q, tile_k, tile_acc_s, dim_size, t_c_idx, seq_size_k, causal_mask); + softmax_d(Bc_n_fixed, tile_acc_s, scoremax_ + thread_id, scoremax_prev_ + thread_id, score_scale_ + thread_id, score_sum_ + thread_id, logsum_ + thread_id, local_scale); + rescale_d(acc_o, score_scale_ + thread_id, dim_size); + mma1_d(Bc_n_fixed, tile_acc_s, tile_v, acc_o, dim_size); + } + + dtype_t *o_block_ptr = o_batch_base + this_thread_head * q_head_skp; + scale_and_store_d(acc_o_ + thread_id * dim_size, logsum_ + thread_id, o_block_ptr, dim_size); + } + } + } + + inline void init_temp(acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *acc_o, const int32_t Br_n_fixed, const int32_t dim_size) { + for (int i = 0; i < Br_n_fixed; ++i) { + logsum[i] = 0.0f; + scoremax[i] = NEG_INF_F32; + } +#ifdef __AVX2__ + __m256 zero_vec = _mm256_setzero_ps(); + for (int j = 0; j < Br_n_fixed * dim_size; j += 8) { + _mm256_storeu_ps(acc_o + j, zero_vec); + } +#elif __ARM_NEON + float32x4_t zero_vec = vdupq_n_f32(0.0f); + for (int j = 0; j < Br_n_fixed * dim_size; j += 4) { + vst1q_f32(acc_o + j, zero_vec); + } +#endif + } + + inline void mma0(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + const dtype_t *__restrict__ q_block, const dtype_t *__restrict__ k_block, + acc_dtype_t *__restrict__ acc_s, const int32_t dim_size, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + const int32_t delta_pos = seq_size_k - seq_size_q; + + for (int32_t b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { + const dtype_t *q_block_line = q_block + b_r_idx * dim_size; + for (int32_t b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + const int32_t global_r_idx = global_r_start + b_r_idx; + const int32_t global_c_idx = global_c_start + b_c_idx; + if (causal_mask && (global_c_idx > global_r_idx + delta_pos)) { + acc_s[b_r_idx * Bc + b_c_idx] = NEG_INF_F32; + continue; + } + const dtype_t *k_block_line = k_block + b_c_idx * dim_size; +#ifdef __AVX2__ + __m256 sum_vec = _mm256_setzero_ps(); + int i = 0; + for (; i <= dim_size - 8; i += 8) { + __m256 q_vec = _mm256_loadu_ps(q_block_line + i); + __m256 k_vec = _mm256_loadu_ps(k_block_line + i); + sum_vec = _mm256_fmadd_ps(q_vec, k_vec, sum_vec); + } + acc_dtype_t total = hadd_ps_avx(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * k_block_line[i]; } +#elif __ARM_NEON + float32x4_t sum_vec = vdupq_n_f32(0.0f); + int i = 0; + for (; i <= dim_size - 4; i += 4) { + sum_vec = vfmaq_f32(sum_vec, vld1q_f32(q_block_line + i), vld1q_f32(k_block_line + i)); + } + acc_dtype_t total = hadd_ps_neon(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * k_block_line[i]; } +#endif + acc_s[b_r_idx * Bc + b_c_idx] = total; + } + } + } + + inline void softmax(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + acc_dtype_t *__restrict__ acc_s, acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum, acc_dtype_t *logsum, + const float scale) { + memcpy(scoremax_prev, scoremax, Br_n_fixed * sizeof(acc_dtype_t)); + + for (int br = 0; br < Br_n_fixed; ++br) { + acc_dtype_t *row = acc_s + br * Bc; + float block_max = NEG_INF_F32; + for (int bc = 0; bc < Bc_n_fixed; ++bc) { + block_max = fmaxf(block_max, row[bc]); + } + scoremax[br] = fmaxf(scoremax[br], block_max); + } + + for (int br = 0; br < Br_n_fixed; ++br) { + score_scale[br] = expf((scoremax_prev[br] - scoremax[br]) * scale); + } + + for (int br = 0; br < Br_n_fixed; ++br) { + const float current_max = scoremax[br]; + acc_dtype_t *row = acc_s + br * Bc; + float sum = 0.0f; + for (int bc = 0; bc < Bc_n_fixed; ++bc) { + if (row[bc] == NEG_INF_F32) { + row[bc] = 0.0f; + continue; + } + float val = expf((row[bc] - current_max) * scale); + row[bc] = val; + sum += val; + } + score_sum[br] = sum; + } + + for (int br = 0; br < Br_n_fixed; ++br) { + logsum[br] = logsum[br] * score_scale[br] + score_sum[br]; + } + } + + inline void rescale(const int32_t Br_n_fixed, acc_dtype_t *__restrict__ acc_o, acc_dtype_t *__restrict__ score_scale, const int32_t dim_size) { + for (int i = 0; i < Br_n_fixed; ++i) { +#ifdef __AVX2__ + __m256 scale_v = _mm256_set1_ps(score_scale[i]); + float *row_ptr = acc_o + i * dim_size; + for (int j = 0; j < dim_size; j += 8) { + __m256 acc = _mm256_loadu_ps(row_ptr + j); + _mm256_storeu_ps(row_ptr + j, _mm256_mul_ps(acc, scale_v)); + } +#elif __ARM_NEON + float32x4_t scale_v = vdupq_n_f32(score_scale[i]); + float *row_ptr = acc_o + i * dim_size; + for (int j = 0; j < dim_size; j += 4) { + float32x4_t acc = vld1q_f32(row_ptr + j); + vst1q_f32(row_ptr + j, vmulq_f32(acc, scale_v)); + } +#endif + } + } + + inline void mma1(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + const acc_dtype_t *__restrict__ w_block, const dtype_t *__restrict__ v_block, + acc_dtype_t *__restrict__ acc_o, const int32_t dim_size) { + const int32_t v_stride_size = dim_size; + + for (int b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { +#ifdef __AVX2__ + for (int d_base = 0; d_base < dim_size; d_base += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + b_r_idx * dim_size + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + acc = _mm256_fmadd_ps(_mm256_set1_ps(w_block[b_r_idx * Bc + b_c_idx]), _mm256_loadu_ps(v_block + b_c_idx * v_stride_size + d_base), acc); + } + _mm256_storeu_ps(acc_o + b_r_idx * dim_size + d_base, acc); + } +#elif __ARM_NEON + for (int d_base = 0; d_base < dim_size; d_base += 4) { + float32x4_t acc = vld1q_f32(acc_o + b_r_idx * dim_size + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + acc = vfmaq_f32(acc, vdupq_n_f32(w_block[b_r_idx * Bc + b_c_idx]), vld1q_f32(v_block + b_c_idx * v_stride_size + d_base)); + } + vst1q_f32(acc_o + b_r_idx * dim_size + d_base, acc); + } +#endif + } + } + + inline void scale_and_store(const int32_t Br_n_fixed, const acc_dtype_t *__restrict__ acc_o, + const acc_dtype_t *__restrict__ logsum, + dtype_t *__restrict__ o_block, const int32_t dim_size) { + for (int i = 0; i < Br_n_fixed; ++i) { + dtype_t *o_block_line = o_block + i * dim_size; + float reciprocal_logsum = (logsum[i] == 0.0f) ? 0.0f : 1.0f / logsum[i]; +#ifdef __AVX2__ + __m256 reciprocal_logsum_vec = _mm256_set1_ps(reciprocal_logsum); + for (int j = 0; j <= dim_size - 8; j += 8) { + __m256 vec_acc_o = _mm256_loadu_ps(acc_o + i * dim_size + j); + _mm256_storeu_ps(o_block_line + j, _mm256_mul_ps(vec_acc_o, reciprocal_logsum_vec)); + } + for (int j = dim_size - (dim_size % 8); j < dim_size; ++j) { o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; } +#elif __ARM_NEON + float32x4_t reciprocal_logsum_vec = vdupq_n_f32(reciprocal_logsum); + for (int j = 0; j <= dim_size - 4; j += 4) { + float32x4_t vec_acc_o = vld1q_f32(acc_o + i * dim_size + j); + vst1q_f32(o_block_line + j, vmulq_f32(vec_acc_o, reciprocal_logsum_vec)); + } + for (int j = dim_size - (dim_size % 4); j < dim_size; ++j) { o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; } +#endif + } + } + + inline void init_temp_d(acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *acc_o, const int32_t dim_size) { + logsum[0] = 0.0f; + scoremax[0] = NEG_INF_F32; +#ifdef __AVX2__ + __m256 zero_vec = _mm256_setzero_ps(); + for (int i = 0; i < dim_size; i += 8) { _mm256_storeu_ps(acc_o + i, zero_vec); } +#elif __ARM_NEON + float32x4_t zero_vec = vdupq_n_f32(0.0f); + for (int i = 0; i < dim_size; i += 4) { vst1q_f32(acc_o + i, zero_vec); } +#endif + } + + inline void mma0_d(const int32_t Bc_n_fixed, const dtype_t *__restrict__ q_block, + const dtype_t *__restrict__ k_block, acc_dtype_t *__restrict__ acc_s, + const int32_t dim_size, const int32_t t_c_idx, + const int32_t seq_size_k, bool causal_mask) { + const int32_t global_c_start = t_c_idx * Bc; + const int32_t global_r_idx = seq_size_k - 1; + + for (int32_t b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + const int32_t global_c_idx = global_c_start + b_c_idx; + if (causal_mask && global_c_idx > global_r_idx) { + acc_s[b_c_idx] = NEG_INF_F32; + continue; + } + const dtype_t *k_block_line = k_block + b_c_idx * dim_size; +#ifdef __AVX2__ + __m256 sum_vec = _mm256_setzero_ps(); + int i = 0; + for (; i <= dim_size - 8; i += 8) { + sum_vec = _mm256_fmadd_ps(_mm256_loadu_ps(q_block + i), _mm256_loadu_ps(k_block_line + i), sum_vec); + } + acc_dtype_t total = hadd_ps_avx(sum_vec); + for (int i = dim_size - (dim_size % 8); i < dim_size; ++i) { total += q_block[i] * k_block_line[i]; } +#elif __ARM_NEON + float32x4_t sum_vec = vdupq_n_f32(0.0f); + int i = 0; + for (; i <= dim_size - 4; i += 4) { + sum_vec = vfmaq_f32(sum_vec, vld1q_f32(q_block + i), vld1q_f32(k_block_line + i)); + } + acc_dtype_t total = hadd_ps_neon(sum_vec); + for (int i = dim_size - (dim_size % 4); i < dim_size; ++i) { total += q_block[i] * k_block_line[i]; } +#endif + acc_s[b_c_idx] = total; + } + } + + inline void softmax_d(const int32_t Bc_n_fixed, acc_dtype_t *__restrict__ acc_s, + acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum, acc_dtype_t *logsum, + const float scale) { + scoremax_prev[0] = scoremax[0]; + + float block_max = NEG_INF_F32; + for (int bc = 0; bc < Bc_n_fixed; ++bc) block_max = fmaxf(block_max, acc_s[bc]); + scoremax[0] = fmaxf(scoremax[0], block_max); + + score_scale[0] = expf((scoremax_prev[0] - scoremax[0]) * scale); + + float current_sum = 0.0f; + for (int bc = 0; bc < Bc_n_fixed; ++bc) { + if (acc_s[bc] == NEG_INF_F32) { + acc_s[bc] = 0.0f; + continue; + } + float val = expf((acc_s[bc] - scoremax[0]) * scale); + acc_s[bc] = val; + current_sum += val; + } + score_sum[0] = current_sum; + logsum[0] = logsum[0] * score_scale[0] + score_sum[0]; + } + + inline void rescale_d(acc_dtype_t *__restrict__ acc_o, acc_dtype_t *__restrict__ score_scale, const int32_t dim_size) { +#ifdef __AVX2__ + __m256 scale_v = _mm256_set1_ps(score_scale[0]); + for (int j = 0; j < dim_size; j += 8) { + _mm256_storeu_ps(acc_o + j, _mm256_mul_ps(_mm256_loadu_ps(acc_o + j), scale_v)); + } +#elif __ARM_NEON + float32x4_t scale_v = vdupq_n_f32(score_scale[0]); + for (int j = 0; j < dim_size; j += 4) { + vst1q_f32(acc_o + j, vmulq_f32(vld1q_f32(acc_o + j), scale_v)); + } +#endif + } + + inline void mma1_d(const int32_t Bc_n_fixed, const acc_dtype_t *__restrict__ w_block, + const dtype_t *__restrict__ v_block, acc_dtype_t *__restrict__ acc_o, const int32_t dim_size) { + const int32_t v_stride_size = dim_size; +#ifdef __AVX2__ + for (int d_base = 0; d_base < dim_size; d_base += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + acc = _mm256_fmadd_ps(_mm256_set1_ps(w_block[b_c_idx]), _mm256_loadu_ps(v_block + b_c_idx * v_stride_size + d_base), acc); + } + _mm256_storeu_ps(acc_o + d_base, acc); + } +#elif __ARM_NEON + for (int d_base = 0; d_base < dim_size; d_base += 4) { + float32x4_t acc = vld1q_f32(acc_o + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + acc = vfmaq_f32(acc, vdupq_n_f32(w_block[b_c_idx]), vld1q_f32(v_block + b_c_idx * v_stride_size + d_base)); + } + vst1q_f32(acc_o + d_base, acc); + } +#endif + } + + inline void scale_and_store_d(const acc_dtype_t *__restrict__ acc_o, const acc_dtype_t *__restrict__ logsum, + dtype_t *__restrict__ o_block, const int32_t dim_size) { + float reciprocal_logsum = (logsum[0] == 0.0f) ? 0.0f : 1.0f / logsum[0]; +#ifdef __AVX2__ + __m256 reciprocal_logsum_vec = _mm256_set1_ps(reciprocal_logsum); + for (int j = 0; j <= dim_size - 8; j += 8) { + _mm256_storeu_ps(o_block + j, _mm256_mul_ps(_mm256_loadu_ps(acc_o + j), reciprocal_logsum_vec)); + } + for (int j = dim_size - (dim_size % 8); j < dim_size; ++j) { o_block[j] = acc_o[j] * reciprocal_logsum; } +#elif __ARM_NEON + float32x4_t reciprocal_logsum_vec = vdupq_n_f32(reciprocal_logsum); + for (int j = 0; j <= dim_size - 4; j += 4) { + vst1q_f32(o_block + j, vmulq_f32(vld1q_f32(acc_o + j), reciprocal_logsum_vec)); + } + for (int j = dim_size - (dim_size % 4); j < dim_size; ++j) { o_block[j] = acc_o[j] * reciprocal_logsum; } +#endif + } +}; + +// ======================================== +// FlashAttention2 核心实现 ( Q FP32/KV FP16 输入,FP32 输出版本) - BHSD Layout +// ======================================== +struct FA_2_GQA_Q_FP32_KV_FP16_BHSD_O_FP32_BHSD_ACC_FP32_IMPL { + using dtype_q_in_t = float; + using dtype_kv_in_t = mllm_fp16_t; + using dtype_out_t = float; + using acc_dtype_t = float; + + int32_t Br, Bc, Q_Head, KV_Head, threads; + bool high_precision; + + acc_dtype_t *acc_o_; + acc_dtype_t *acc_s_; + acc_dtype_t *logsum_; + acc_dtype_t *scoremax_; + acc_dtype_t *scoremax_prev_; + acc_dtype_t *score_scale_; + acc_dtype_t *score_sum_; + + void configure(int32_t Br_, int32_t Bc_, int32_t Q_Head_, int32_t KV_Head_, int32_t threads_, bool high_precision_) { + Br = Br_; + Bc = Bc_; + Q_Head = Q_Head_; + KV_Head = KV_Head_; + threads = threads_; + high_precision = high_precision_; + } + + void init_workspace(acc_dtype_t *acc_o, acc_dtype_t *acc_s, + acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum) { + acc_o_ = acc_o; + acc_s_ = acc_s; + logsum_ = logsum; + scoremax_ = scoremax; + scoremax_prev_ = scoremax_prev; + score_scale_ = score_scale; + score_sum_ = score_sum; + } + + void fa2(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, + const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, const int32_t batch_size, + const int32_t head_size, const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask, + int32_t q_head_skp, int32_t k_head_skp, int32_t v_head_skp) { + assert(Br == Bc); + assert(head_size % threads == 0); + assert(Q_Head % KV_Head == 0); + +#ifdef __AVX2__ + assert(dim_size % 8 == 0); +#elif __ARM_NEON + assert(dim_size % 4 == 0); +#endif + + if (seq_size_q != 1) { + __fa2_prefill_append(Q, K, V, O, batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask, q_head_skp, k_head_skp, v_head_skp); + } else { + __fa2_decode(Q, K, V, O, batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask, q_head_skp, k_head_skp, v_head_skp); + } + } + +private: + inline void __fa2_prefill_append(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, + const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, + const int32_t batch_size, const int32_t head_size, + const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask, + int32_t q_head_skp, int32_t k_head_skp, int32_t v_head_skp) { + const int32_t Tr = (seq_size_q + Br - 1) / Br; + const int32_t Tc = (seq_size_k + Bc - 1) / Bc; + + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) if (threads > 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_head = h_idx; + const int32_t this_thread_kv_head = this_thread_head / kv_group_size; + + const dtype_q_in_t *q_batch_base = Q + b_idx * head_size * q_head_skp; + const dtype_kv_in_t *k_batch_base = K + b_idx * KV_Head * k_head_skp; + const dtype_kv_in_t *v_batch_base = V + b_idx * KV_Head * v_head_skp; + dtype_out_t *o_batch_base = O + b_idx * head_size * q_head_skp; + + for (int t_r_idx = 0; t_r_idx < Tr; ++t_r_idx) { + const int32_t Br_n_fixed = (t_r_idx == Tr - 1) ? (seq_size_q - t_r_idx * Br) : Br; + init_temp(logsum_ + thread_id * Br, scoremax_ + thread_id * Br, + acc_o_ + thread_id * Br * dim_size, Br_n_fixed, dim_size); + for (int t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + const int32_t Bc_n_fixed = (t_c_idx == Tc - 1) ? (seq_size_k - t_c_idx * Bc) : Bc; + + const dtype_q_in_t *tile_q = q_batch_base + this_thread_head * q_head_skp + t_r_idx * Br * dim_size; + const dtype_kv_in_t *tile_k = k_batch_base + this_thread_kv_head * k_head_skp + t_c_idx * Bc * dim_size; + const dtype_kv_in_t *tile_v = v_batch_base + this_thread_kv_head * v_head_skp + t_c_idx * Bc * dim_size; + + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Br * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * Br * dim_size; + + mma0(Br_n_fixed, Bc_n_fixed, tile_q, tile_k, tile_acc_s, dim_size, t_r_idx, t_c_idx, seq_size_q, seq_size_k, causal_mask); + softmax(Br_n_fixed, Bc_n_fixed, tile_acc_s, scoremax_ + thread_id * Br, scoremax_prev_ + thread_id * Br, score_scale_ + thread_id * Br, score_sum_ + thread_id * Br, logsum_ + thread_id * Br, local_scale); + rescale(Br_n_fixed, acc_o, score_scale_ + thread_id * Br, dim_size); + mma1(Br_n_fixed, Bc_n_fixed, tile_acc_s, tile_v, acc_o, dim_size); + } + dtype_out_t *o_block_ptr = o_batch_base + this_thread_head * q_head_skp + t_r_idx * Br * dim_size; + scale_and_store(Br_n_fixed, acc_o_ + thread_id * Br * dim_size, logsum_ + thread_id * Br, o_block_ptr, dim_size); + } + } + } + } + + inline void __fa2_decode(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, + const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, + const int32_t batch_size, const int32_t head_size, + const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask, + int32_t q_head_skp, int32_t k_head_skp, int32_t v_head_skp) { + const int32_t Tc = (seq_size_k + Bc - 1) / Bc; + + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) if (threads > 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_head = h_idx; + const int32_t this_thread_kv_head = this_thread_head / kv_group_size; + + init_temp_d(logsum_ + thread_id, scoremax_ + thread_id, acc_o_ + thread_id * dim_size, dim_size); + + const dtype_q_in_t *q_batch_base = Q + b_idx * head_size * q_head_skp; + const dtype_kv_in_t *k_batch_base = K + b_idx * KV_Head * k_head_skp; + const dtype_kv_in_t *v_batch_base = V + b_idx * KV_Head * v_head_skp; + dtype_out_t *o_batch_base = O + b_idx * head_size * q_head_skp; + + for (int t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + const int32_t Bc_n_fixed = (t_c_idx == Tc - 1) ? (seq_size_k - t_c_idx * Bc) : Bc; + + const dtype_q_in_t *tile_q = q_batch_base + this_thread_head * q_head_skp; + const dtype_kv_in_t *tile_k = k_batch_base + this_thread_kv_head * k_head_skp + t_c_idx * Bc * dim_size; + const dtype_kv_in_t *tile_v = v_batch_base + this_thread_kv_head * v_head_skp + t_c_idx * Bc * dim_size; + + acc_dtype_t *tile_acc_s = acc_s_ + thread_id * Bc; + acc_dtype_t *acc_o = acc_o_ + thread_id * dim_size; + + mma0_d(Bc_n_fixed, tile_q, tile_k, tile_acc_s, dim_size, t_c_idx, seq_size_k, causal_mask); + softmax_d(Bc_n_fixed, tile_acc_s, scoremax_ + thread_id, scoremax_prev_ + thread_id, score_scale_ + thread_id, score_sum_ + thread_id, logsum_ + thread_id, local_scale); + rescale_d(acc_o, score_scale_ + thread_id, dim_size); + mma1_d(Bc_n_fixed, tile_acc_s, tile_v, acc_o, dim_size); + } + dtype_out_t *o_block_ptr = o_batch_base + this_thread_head * q_head_skp; + scale_and_store_d(acc_o_ + thread_id * dim_size, logsum_ + thread_id, o_block_ptr, dim_size); + } + } + } + + inline void init_temp(acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *acc_o, const int32_t Br_n_fixed, const int32_t dim_size) { + for (int i = 0; i < Br_n_fixed; ++i) { + logsum[i] = 0.0f; + scoremax[i] = NEG_INF_F32; + } +#ifdef __AVX2__ + __m256 zero_vec = _mm256_setzero_ps(); + for (int j = 0; j < Br_n_fixed * dim_size; j += 8) { + _mm256_storeu_ps(acc_o + j, zero_vec); + } +#elif __ARM_NEON + float32x4_t zero_vec = vdupq_n_f32(0.0f); + for (int j = 0; j < Br_n_fixed * dim_size; j += 4) { + vst1q_f32(acc_o + j, zero_vec); + } +#endif + } + + inline void mma0(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + const dtype_q_in_t *__restrict__ q_block, const dtype_kv_in_t *__restrict__ k_block, + acc_dtype_t *__restrict__ acc_s, const int32_t dim_size, + const int32_t t_r_idx, const int32_t t_c_idx, + const int32_t seq_size_q, const int32_t seq_size_k, bool causal_mask) { + const int32_t global_r_start = t_r_idx * Br; + const int32_t global_c_start = t_c_idx * Bc; + const int32_t delta_pos = seq_size_k - seq_size_q; + + for (int32_t b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { + const dtype_q_in_t *q_block_line = q_block + b_r_idx * dim_size; + for (int32_t b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + const int32_t global_r_idx = global_r_start + b_r_idx; + const int32_t global_c_idx = global_c_start + b_c_idx; + if (causal_mask && (global_c_idx > global_r_idx + delta_pos)) { + acc_s[b_r_idx * Bc + b_c_idx] = NEG_INF_F32; + continue; + } + const dtype_kv_in_t *k_block_line = k_block + b_c_idx * dim_size; +#ifdef __AVX2__ + __m256 sum_vec = _mm256_setzero_ps(); + int i = 0; + for (; i <= dim_size - 8; i += 8) { + __m256 q_vec = _mm256_loadu_ps(q_block_line + i); + __m256 k_vec = MLLM_F32Cx8_LOAD(k_block_line + i); + sum_vec = _mm256_fmadd_ps(q_vec, k_vec, sum_vec); + } + acc_dtype_t total = hadd_ps_avx(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * MLLM_FP16_TO_FP32(k_block_line[i]); } +#elif __ARM_NEON + float32x4_t sum_vec = vdupq_n_f32(0.0f); + int i = 0; + for (; i <= dim_size - 4; i += 4) { + float32x4_t q_vec = vld1q_f32(q_block_line + i); + float32x4_t k_vec = vcvt_f32_f16(vld1_f16((const __fp16 *)(k_block_line + i))); + sum_vec = vfmaq_f32(sum_vec, q_vec, k_vec); + } + acc_dtype_t total = hadd_ps_neon(sum_vec); + for (; i < dim_size; ++i) { total += q_block_line[i] * MLLM_FP16_TO_FP32(k_block_line[i]); } +#endif + acc_s[b_r_idx * Bc + b_c_idx] = total; + } + } + } + + inline void softmax(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + acc_dtype_t *__restrict__ acc_s, acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum, acc_dtype_t *logsum, + const float scale) { + memcpy(scoremax_prev, scoremax, Br_n_fixed * sizeof(acc_dtype_t)); + for (int br = 0; br < Br_n_fixed; ++br) { + acc_dtype_t *row = acc_s + br * Bc; + float block_max = NEG_INF_F32; + for (int bc = 0; bc < Bc_n_fixed; ++bc) block_max = fmaxf(block_max, row[bc]); + scoremax[br] = fmaxf(scoremax[br], block_max); + } + for (int br = 0; br < Br_n_fixed; ++br) score_scale[br] = expf((scoremax_prev[br] - scoremax[br]) * scale); + for (int br = 0; br < Br_n_fixed; ++br) { + const float current_max = scoremax[br]; + acc_dtype_t *row = acc_s + br * Bc; + float sum = 0.0f; + for (int bc = 0; bc < Bc_n_fixed; ++bc) { + if (row[bc] == NEG_INF_F32) { + row[bc] = 0.0f; + continue; + } + float val = expf((row[bc] - current_max) * scale); + row[bc] = val; + sum += val; + } + score_sum[br] = sum; + } + for (int br = 0; br < Br_n_fixed; ++br) logsum[br] = logsum[br] * score_scale[br] + score_sum[br]; + } + + inline void rescale(const int32_t Br_n_fixed, acc_dtype_t *__restrict__ acc_o, acc_dtype_t *__restrict__ score_scale, const int32_t dim_size) { + for (int i = 0; i < Br_n_fixed; ++i) { +#ifdef __AVX2__ + __m256 scale_v = _mm256_set1_ps(score_scale[i]); + float *row_ptr = acc_o + i * dim_size; + for (int j = 0; j < dim_size; j += 8) _mm256_storeu_ps(row_ptr + j, _mm256_mul_ps(_mm256_loadu_ps(row_ptr + j), scale_v)); +#elif __ARM_NEON + float32x4_t scale_v = vdupq_n_f32(score_scale[i]); + float *row_ptr = acc_o + i * dim_size; + for (int j = 0; j < dim_size; j += 4) vst1q_f32(row_ptr + j, vmulq_f32(vld1q_f32(row_ptr + j), scale_v)); +#endif + } + } + + inline void mma1(const int32_t Br_n_fixed, const int32_t Bc_n_fixed, + const acc_dtype_t *__restrict__ w_block, const dtype_kv_in_t *__restrict__ v_block, + acc_dtype_t *__restrict__ acc_o, const int32_t dim_size) { + const int32_t v_stride_size = dim_size; + for (int b_r_idx = 0; b_r_idx < Br_n_fixed; ++b_r_idx) { +#ifdef __AVX2__ + for (int d_base = 0; d_base < dim_size; d_base += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + b_r_idx * dim_size + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + acc = _mm256_fmadd_ps(_mm256_set1_ps(w_block[b_r_idx * Bc + b_c_idx]), MLLM_F32Cx8_LOAD(v_block + b_c_idx * v_stride_size + d_base), acc); + } + _mm256_storeu_ps(acc_o + b_r_idx * dim_size + d_base, acc); + } +#elif __ARM_NEON + for (int d_base = 0; d_base < dim_size; d_base += 4) { + float32x4_t acc = vld1q_f32(acc_o + b_r_idx * dim_size + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + float32x4_t w_vec = vdupq_n_f32(w_block[b_r_idx * Bc + b_c_idx]); + float32x4_t v_vec = vcvt_f32_f16(vld1_f16((const __fp16 *)(v_block + b_c_idx * v_stride_size + d_base))); + acc = vfmaq_f32(acc, w_vec, v_vec); + } + vst1q_f32(acc_o + b_r_idx * dim_size + d_base, acc); + } +#endif + } + } + + inline void scale_and_store(const int32_t Br_n_fixed, const acc_dtype_t *__restrict__ acc_o, const acc_dtype_t *__restrict__ logsum, + dtype_out_t *__restrict__ o_block, const int32_t dim_size) { + for (int i = 0; i < Br_n_fixed; ++i) { + dtype_out_t *o_block_line = o_block + i * dim_size; + float reciprocal_logsum = (logsum[i] == 0.0f) ? 0.0f : 1.0f / logsum[i]; +#ifdef __AVX2__ + __m256 reciprocal_logsum_vec = _mm256_set1_ps(reciprocal_logsum); + for (int j = 0; j <= dim_size - 8; j += 8) _mm256_storeu_ps(o_block_line + j, _mm256_mul_ps(_mm256_loadu_ps(acc_o + i * dim_size + j), reciprocal_logsum_vec)); + for (int j = dim_size - (dim_size % 8); j < dim_size; ++j) o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; +#elif __ARM_NEON + float32x4_t reciprocal_logsum_vec = vdupq_n_f32(reciprocal_logsum); + for (int j = 0; j <= dim_size - 4; j += 4) vst1q_f32(o_block_line + j, vmulq_f32(vld1q_f32(acc_o + i * dim_size + j), reciprocal_logsum_vec)); + for (int j = dim_size - (dim_size % 4); j < dim_size; ++j) o_block_line[j] = acc_o[i * dim_size + j] * reciprocal_logsum; +#endif + } + } + + inline void init_temp_d(acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *acc_o, const int32_t dim_size) { + logsum[0] = 0.0f; + scoremax[0] = NEG_INF_F32; +#ifdef __AVX2__ + __m256 zero_vec = _mm256_setzero_ps(); + for (int i = 0; i < dim_size; i += 8) { _mm256_storeu_ps(acc_o + i, zero_vec); } +#elif __ARM_NEON + float32x4_t zero_vec = vdupq_n_f32(0.0f); + for (int i = 0; i < dim_size; i += 4) { vst1q_f32(acc_o + i, zero_vec); } +#endif + } + + inline void mma0_d(const int32_t Bc_n_fixed, const dtype_q_in_t *__restrict__ q_block, + const dtype_kv_in_t *__restrict__ k_block, acc_dtype_t *__restrict__ acc_s, + const int32_t dim_size, const int32_t t_c_idx, + const int32_t seq_size_k, bool causal_mask) { + const int32_t global_c_start = t_c_idx * Bc; + const int32_t global_r_idx = seq_size_k - 1; + for (int32_t b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + const int32_t global_c_idx = global_c_start + b_c_idx; + if (causal_mask && global_c_idx > global_r_idx) { + acc_s[b_c_idx] = NEG_INF_F32; + continue; + } + const dtype_kv_in_t *k_block_line = k_block + b_c_idx * dim_size; +#ifdef __AVX2__ + __m256 sum_vec = _mm256_setzero_ps(); + int i = 0; + for (; i <= dim_size - 8; i += 8) sum_vec = _mm256_fmadd_ps(_mm256_loadu_ps(q_block + i), MLLM_F32Cx8_LOAD(k_block_line + i), sum_vec); + acc_dtype_t total = hadd_ps_avx(sum_vec); + for (; i < dim_size; ++i) total += q_block[i] * MLLM_FP16_TO_FP32(k_block_line[i]); +#elif __ARM_NEON + float32x4_t sum_vec = vdupq_n_f32(0.0f); + int i = 0; + for (; i <= dim_size - 4; i += 4) { + float32x4_t q_vec = vld1q_f32(q_block + i); + float32x4_t k_vec = vcvt_f32_f16(vld1_f16((const __fp16 *)(k_block_line + i))); + sum_vec = vfmaq_f32(sum_vec, q_vec, k_vec); + } + acc_dtype_t total = hadd_ps_neon(sum_vec); + for (; i < dim_size; ++i) total += q_block[i] * MLLM_FP16_TO_FP32(k_block_line[i]); +#endif + acc_s[b_c_idx] = total; + } + } + + inline void softmax_d(const int32_t Bc_n_fixed, acc_dtype_t *__restrict__ acc_s, + acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum, acc_dtype_t *logsum, + const float scale) { + scoremax_prev[0] = scoremax[0]; + float block_max = NEG_INF_F32; + for (int bc = 0; bc < Bc_n_fixed; ++bc) block_max = fmaxf(block_max, acc_s[bc]); + scoremax[0] = fmaxf(scoremax[0], block_max); + score_scale[0] = expf((scoremax_prev[0] - scoremax[0]) * scale); + float current_sum = 0.0f; + for (int bc = 0; bc < Bc_n_fixed; ++bc) { + if (acc_s[bc] == NEG_INF_F32) { + acc_s[bc] = 0.0f; + continue; + } + float val = expf((acc_s[bc] - scoremax[0]) * scale); + acc_s[bc] = val; + current_sum += val; + } + score_sum[0] = current_sum; + logsum[0] = logsum[0] * score_scale[0] + score_sum[0]; + } + + inline void rescale_d(acc_dtype_t *__restrict__ acc_o, acc_dtype_t *__restrict__ score_scale, const int32_t dim_size) { +#ifdef __AVX2__ + __m256 scale_v = _mm256_set1_ps(score_scale[0]); + for (int j = 0; j < dim_size; j += 8) _mm256_storeu_ps(acc_o + j, _mm256_mul_ps(_mm256_loadu_ps(acc_o + j), scale_v)); +#elif __ARM_NEON + float32x4_t scale_v = vdupq_n_f32(score_scale[0]); + for (int j = 0; j < dim_size; j += 4) vst1q_f32(acc_o + j, vmulq_f32(vld1q_f32(acc_o + j), scale_v)); +#endif + } + + inline void mma1_d(const int32_t Bc_n_fixed, const acc_dtype_t *__restrict__ w_block, + const dtype_kv_in_t *__restrict__ v_block, acc_dtype_t *__restrict__ acc_o, const int32_t dim_size) { + const int32_t v_stride_size = dim_size; +#ifdef __AVX2__ + for (int d_base = 0; d_base < dim_size; d_base += 8) { + __m256 acc = _mm256_loadu_ps(acc_o + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + acc = _mm256_fmadd_ps(_mm256_set1_ps(w_block[b_c_idx]), MLLM_F32Cx8_LOAD(v_block + b_c_idx * v_stride_size + d_base), acc); + } + _mm256_storeu_ps(acc_o + d_base, acc); + } +#elif __ARM_NEON + for (int d_base = 0; d_base < dim_size; d_base += 4) { + float32x4_t acc = vld1q_f32(acc_o + d_base); + for (int b_c_idx = 0; b_c_idx < Bc_n_fixed; ++b_c_idx) { + float32x4_t w_vec = vdupq_n_f32(w_block[b_c_idx]); + float32x4_t v_vec = vcvt_f32_f16(vld1_f16((const __fp16 *)(v_block + b_c_idx * v_stride_size + d_base))); + acc = vfmaq_f32(acc, w_vec, v_vec); + } + vst1q_f32(acc_o + d_base, acc); + } +#endif + } + + inline void scale_and_store_d(const acc_dtype_t *__restrict__ acc_o, const acc_dtype_t *__restrict__ logsum, + dtype_out_t *__restrict__ o_block, const int32_t dim_size) { + float reciprocal_logsum = (logsum[0] == 0.0f) ? 0.0f : 1.0f / logsum[0]; +#ifdef __AVX2__ + __m256 reciprocal_logsum_vec = _mm256_set1_ps(reciprocal_logsum); + for (int j = 0; j <= dim_size - 8; j += 8) _mm256_storeu_ps(o_block + j, _mm256_mul_ps(_mm256_loadu_ps(acc_o + j), reciprocal_logsum_vec)); + for (int j = dim_size - (dim_size % 8); j < dim_size; ++j) o_block[j] = acc_o[j] * reciprocal_logsum; +#elif __ARM_NEON + float32x4_t reciprocal_logsum_vec = vdupq_n_f32(reciprocal_logsum); + for (int j = 0; j <= dim_size - 4; j += 4) vst1q_f32(o_block + j, vmulq_f32(vld1q_f32(acc_o + j), reciprocal_logsum_vec)); + for (int j = dim_size - (dim_size % 4); j < dim_size; ++j) o_block[j] = acc_o[j] * reciprocal_logsum; +#endif + } +}; + +template +struct FlashAttn2HeadFirstT { +public: + using dtype_q_in_t = typename Impl::dtype_q_in_t; + using dtype_kv_in_t = typename Impl::dtype_kv_in_t; + using dtype_out_t = typename Impl::dtype_out_t; + using acc_dtype_t = typename Impl::acc_dtype_t; + + void configure(int32_t Br, int32_t Bc, int32_t Q_Head, int32_t KV_Head, int32_t threads, bool high_precision) { + impl_.configure(Br, Bc, Q_Head, KV_Head, threads, high_precision); + } + + void init_workspace(acc_dtype_t *acc_o, acc_dtype_t *acc_s, + acc_dtype_t *logsum, acc_dtype_t *scoremax, acc_dtype_t *scoremax_prev, + acc_dtype_t *score_scale, acc_dtype_t *score_sum) { + impl_.init_workspace(acc_o, acc_s, logsum, scoremax, scoremax_prev, score_scale, score_sum); + } + + void operator()(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, + const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, + const int32_t batch_size, const int32_t head_size, + const int32_t seq_size_q, const int32_t seq_size_k, + const int32_t dim_size, bool causal_mask, + int32_t q_head_skp, int32_t k_head_skp, int32_t v_head_skp) { + impl_.fa2(Q, K, V, O, batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask, q_head_skp, k_head_skp, v_head_skp); + } + +private: + Impl impl_; +}; + +} // namespace mobi_attn + +inline void flash_attention_2_forward_h( + const void *Q, const void *K, const void *V, void *O, + int32_t batch_size, int32_t head_size, int32_t seq_size_q, int32_t seq_size_k, int32_t dim_size, + bool causal_mask, bool use_fp32, int32_t threads, int32_t br, int32_t bc, + int32_t q_head, int32_t kv_head, bool high_precision_exp, + int32_t q_head_skp, int32_t k_head_skp, int32_t v_head_skp) { + const size_t align = 32; + const size_t acc_o_size = threads * br * dim_size * sizeof(float); + const size_t acc_s_size = threads * br * bc * sizeof(float); + const size_t logsum_size = threads * br * sizeof(float); + const size_t scoremax_size = threads * br * sizeof(float); + const size_t scoremax_prev_size = threads * br * sizeof(float); + const size_t score_scale_size = threads * br * sizeof(float); + const size_t score_sum_size = threads * br * sizeof(float); + + void *workspace_ptr = nullptr; + size_t total_workspace_size = acc_o_size + acc_s_size + logsum_size + scoremax_size + scoremax_prev_size + score_scale_size + score_sum_size; + + platform_aligned_alloc(&workspace_ptr, total_workspace_size, align); + if (workspace_ptr == nullptr) { + return; + } + + float *acc_o = static_cast(workspace_ptr); + float *acc_s = acc_o + threads * br * dim_size; + float *logsum = acc_s + threads * br * bc; + float *scoremax = logsum + threads * br; + float *scoremax_prev = scoremax + threads * br; + float *score_scale = scoremax_prev + threads * br; + float *score_sum = score_scale + threads * br; + + if (use_fp32) { + mobi_attn::FlashAttn2HeadFirstT op; + op.configure(br, bc, q_head, kv_head, threads, high_precision_exp); + op.init_workspace(acc_o, acc_s, logsum, scoremax, scoremax_prev, score_scale, score_sum); + op(static_cast(Q), static_cast(K), static_cast(V), + static_cast(O), + batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask, + q_head_skp, k_head_skp, v_head_skp); + } else { + mobi_attn::FlashAttn2HeadFirstT op; + op.configure(br, bc, q_head, kv_head, threads, high_precision_exp); + op.init_workspace(acc_o, acc_s, logsum, scoremax, scoremax_prev, score_scale, score_sum); + op(static_cast(Q), + static_cast(K), + static_cast(V), + static_cast(O), + batch_size, head_size, seq_size_q, seq_size_k, dim_size, causal_mask, + q_head_skp, k_head_skp, v_head_skp); + } + + if (workspace_ptr) { + platform_aligned_free(workspace_ptr); + } +} + +#endif // MLLM_FA2H_CAL_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/compute/GemmFp.hpp b/mllm/backends/cpu/compute/GemmFp.hpp new file mode 100644 index 000000000..977f11c85 --- /dev/null +++ b/mllm/backends/cpu/compute/GemmFp.hpp @@ -0,0 +1,283 @@ +#include +#include +#include +#include + +#include "DataType.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" + +// --- 平台检测和微内核尺寸定义 --- +#if defined(__aarch64__) +#include +#define TARGET_ARCH "aarch64 (NEON)" +#define MR_micro 4 // NEON 使用 4x4 微内核 +#define NR_micro 4 + +#elif defined(__x86_64__) || defined(_M_X64) +#include // for AVX, FMA +#define TARGET_ARCH "x86_64 (AVX)" +#define MR_micro 8 // AVX 使用 8x8 微内核 +#define NR_micro 8 + +#else +#define TARGET_ARCH "Generic C" +#define MR_micro 4 +#define NR_micro 4 +#endif + +#define KC_BLOCK 256 + +static inline int min(int a, int b) { + return a < b ? a : b; +} + +// --- 平台特定的打包和微内核函数 --- + +#if defined(__aarch64__) +// NEON: 打包B矩阵的一个 4xkc 的 Panel +static void pack_b_for_neon_4x4(float *packed_b, const float *b_ptr, int N, int k_start, int k_end, int n_start) { + int kc = k_end - k_start; + const int nc = NR_micro; + + for (int k_local = 0; k_local < kc; ++k_local) { + const float *b_src_ptr = b_ptr + (k_start + k_local) * N + n_start; + float *b_dest_ptr = packed_b + k_local * nc; + memcpy(b_dest_ptr, b_src_ptr, nc * sizeof(float)); + } +} + +// NEON: 4x4 微内核 +static void gemm_micro_kernel_neon_4x4(float *c_ptr, const float *a_ptr, const float *packed_b_ptr, int kc, int lda, int ldc) { + float32x4_t c_reg0, c_reg1, c_reg2, c_reg3; + c_reg0 = vld1q_f32(c_ptr + 0 * ldc); + c_reg1 = vld1q_f32(c_ptr + 1 * ldc); + c_reg2 = vld1q_f32(c_ptr + 2 * ldc); + c_reg3 = vld1q_f32(c_ptr + 3 * ldc); + for (int k = 0; k < kc; ++k) { + float32x4_t b_reg = vld1q_f32(packed_b_ptr + k * NR_micro); + c_reg0 = vfmaq_f32(c_reg0, vld1q_dup_f32(a_ptr + 0 * lda + k), b_reg); + c_reg1 = vfmaq_f32(c_reg1, vld1q_dup_f32(a_ptr + 1 * lda + k), b_reg); + c_reg2 = vfmaq_f32(c_reg2, vld1q_dup_f32(a_ptr + 2 * lda + k), b_reg); + c_reg3 = vfmaq_f32(c_reg3, vld1q_dup_f32(a_ptr + 3 * lda + k), b_reg); + } + vst1q_f32(c_ptr + 0 * ldc, c_reg0); + vst1q_f32(c_ptr + 1 * ldc, c_reg1); + vst1q_f32(c_ptr + 2 * ldc, c_reg2); + vst1q_f32(c_ptr + 3 * ldc, c_reg3); +} + +#elif defined(__x86_64__) || defined(_M_X64) +// AVX: 打包B矩阵的一个 8xkc 的 Panel +static void pack_b_for_avx_8x8(float *packed_b, const float *b_ptr, int N, int k_start, int k_end, int n_start) { + int kc = k_end - k_start; + const int nc = NR_micro; + + for (int k_local = 0; k_local < kc; ++k_local) { + const float *b_src_ptr = b_ptr + (k_start + k_local) * N + n_start; + float *b_dest_ptr = packed_b + k_local * nc; + memcpy(b_dest_ptr, b_src_ptr, nc * sizeof(float)); + } +} + +// AVX: 8x8 微内核 +static void gemm_micro_kernel_avx_8x8(float *c_ptr, const float *a_ptr, const float *packed_b_ptr, int kc, int lda, int ldc) { + __m256 c_reg[MR_micro]; + for (int i = 0; i < MR_micro; ++i) { + c_reg[i] = _mm256_loadu_ps(c_ptr + i * ldc); + } + + for (int k = 0; k < kc; ++k) { + __m256 b_reg = _mm256_load_ps(packed_b_ptr + k * NR_micro); + for (int i = 0; i < MR_micro; ++i) { + __m256 a_broadcast = _mm256_set1_ps(a_ptr[i * lda + k]); + c_reg[i] = _mm256_fmadd_ps(a_broadcast, b_reg, c_reg[i]); + } + } + + for (int i = 0; i < MR_micro; ++i) { + _mm256_storeu_ps(c_ptr + i * ldc, c_reg[i]); + } +} +#endif + +// 主 GEMM 函数 +void gemm_fp32(float *c_ptr, const float *a_ptr, const float *b_ptr, int M, int N, int K) { +#if defined(__aarch64__) || defined(__x86_64__) || defined(_M_X64) + // 仅在NEON或AVX路径下,我们才为打包分配内存 + float *packed_b = (float *)malloc(KC_BLOCK * NR_micro * sizeof(float)); + if (!packed_b) return; +#endif + + for (int k_col = 0; k_col < K; k_col += KC_BLOCK) { + int kc = min(KC_BLOCK, K - k_col); + + for (int i_row = 0; i_row < M; i_row += MR_micro) { + int mc = min(MR_micro, M - i_row); + + for (int j_col = 0; j_col < N; j_col += NR_micro) { + int nc = min(NR_micro, N - j_col); + +#if defined(__aarch64__) + if (mc == MR_micro && nc == NR_micro) { + pack_b_for_neon_4x4(packed_b, b_ptr, N, k_col, k_col + kc, j_col); + gemm_micro_kernel_neon_4x4(c_ptr + i_row * N + j_col, a_ptr + i_row * K + k_col, packed_b, kc, K, N); + continue; + } +#elif defined(__x86_64__) || defined(_M_X64) + if (mc == MR_micro && nc == NR_micro) { + pack_b_for_avx_8x8(packed_b, b_ptr, N, k_col, k_col + kc, j_col); + gemm_micro_kernel_avx_8x8(c_ptr + i_row * N + j_col, a_ptr + i_row * K + k_col, packed_b, kc, K, N); + continue; + } +#endif + + // --- 通用C语言路径 (也用于NEON/AVX的边缘情况) --- + // 直接使用原始A, B矩阵进行计算,确保正确性 + for (int i = 0; i < mc; ++i) { + for (int j = 0; j < nc; ++j) { + float sum = 0.0f; + for (int k = 0; k < kc; ++k) { + sum += a_ptr[(i_row + i) * K + (k_col + k)] * b_ptr[(k_col + k) * N + (j_col + j)]; + } + c_ptr[(i_row + i) * N + (j_col + j)] += sum; + } + } + } + } + } + +#if defined(__aarch64__) || defined(__x86_64__) || defined(_M_X64) + free(packed_b); +#endif +} + +#if defined(__aarch64__) +// NEON: 打包B矩阵(fp16)的一个 4xkc 的 Panel +static void pack_b_fp16_for_neon_4x4(mllm_fp16_t *packed_b, const mllm_fp16_t *b_ptr, int N, int k_start, int k_end, int n_start) { + int kc = k_end - k_start; + const int nc = NR_micro; + + for (int k_local = 0; k_local < kc; ++k_local) { + const mllm_fp16_t *b_src_ptr = b_ptr + (k_start + k_local) * N + n_start; + mllm_fp16_t *b_dest_ptr = packed_b + k_local * nc; + memcpy(b_dest_ptr, b_src_ptr, nc * sizeof(mllm_fp16_t)); + } +} + +// NEON: 4x4 微内核 (fp32 * fp16) +static void gemm_micro_kernel_fp32_fp16_neon_4x4(float *c_ptr, const float *a_ptr, const mllm_fp16_t *packed_b_ptr, int kc, int lda, int ldc) { + float32x4_t c_reg0, c_reg1, c_reg2, c_reg3; + c_reg0 = vld1q_f32(c_ptr + 0 * ldc); + c_reg1 = vld1q_f32(c_ptr + 1 * ldc); + c_reg2 = vld1q_f32(c_ptr + 2 * ldc); + c_reg3 = vld1q_f32(c_ptr + 3 * ldc); + + for (int k = 0; k < kc; ++k) { + // 1. 从打包好的B中加载一行fp16 + float16x4_t b_reg_f16 = vld1_f16(packed_b_ptr + k * NR_micro); + // 2. 将fp16向量转换为fp32向量 + float32x4_t b_reg_f32 = vcvt_f32_f16(b_reg_f16); + + // 3. 执行乘加操作 (与之前相同) + c_reg0 = vfmaq_f32(c_reg0, vld1q_dup_f32(a_ptr + 0 * lda + k), b_reg_f32); + c_reg1 = vfmaq_f32(c_reg1, vld1q_dup_f32(a_ptr + 1 * lda + k), b_reg_f32); + c_reg2 = vfmaq_f32(c_reg2, vld1q_dup_f32(a_ptr + 2 * lda + k), b_reg_f32); + c_reg3 = vfmaq_f32(c_reg3, vld1q_dup_f32(a_ptr + 3 * lda + k), b_reg_f32); + } + vst1q_f32(c_ptr + 0 * ldc, c_reg0); + vst1q_f32(c_ptr + 1 * ldc, c_reg1); + vst1q_f32(c_ptr + 2 * ldc, c_reg2); + vst1q_f32(c_ptr + 3 * ldc, c_reg3); +} + +#elif defined(__x86_64__) || defined(_M_X64) +// AVX: 打包B矩阵(fp16)的一个 8xkc 的 Panel +static void pack_b_fp16_for_avx_8x8(mllm_fp16_t *packed_b, const mllm_fp16_t *b_ptr, int N, int k_start, int k_end, int n_start) { + int kc = k_end - k_start; + const int nc = NR_micro; + + for (int k_local = 0; k_local < kc; ++k_local) { + const mllm_fp16_t *b_src_ptr = b_ptr + (k_start + k_local) * N + n_start; + mllm_fp16_t *b_dest_ptr = packed_b + k_local * nc; + memcpy(b_dest_ptr, b_src_ptr, nc * sizeof(mllm_fp16_t)); + } +} + +// AVX: 8x8 微内核 (fp32 * fp16) +static void gemm_micro_kernel_fp32_fp16_avx_8x8(float *c_ptr, const float *a_ptr, const mllm_fp16_t *packed_b_ptr, int kc, int lda, int ldc) { + __m256 c_reg[MR_micro]; + for (int i = 0; i < MR_micro; ++i) { + c_reg[i] = _mm256_loadu_ps(c_ptr + i * ldc); + } + + for (int k = 0; k < kc; ++k) { + // 1. 从打包好的B中加载一行fp16 (8个uint16_t) 到一个128位的XMM寄存器 + __m128i b_reg_f16 = _mm_loadu_si128((__m128i const *)(packed_b_ptr + k * NR_micro)); + // 2. 将128位的fp16向量转换为256位的fp32向量 + __m256 b_reg_f32 = _mm256_cvtph_ps(b_reg_f16); + + // 3. 执行乘加操作 (与之前相同) + for (int i = 0; i < MR_micro; ++i) { + __m256 a_broadcast = _mm256_set1_ps(a_ptr[i * lda + k]); + c_reg[i] = _mm256_fmadd_ps(a_broadcast, b_reg_f32, c_reg[i]); + } + } + + for (int i = 0; i < MR_micro; ++i) { + _mm256_storeu_ps(c_ptr + i * ldc, c_reg[i]); + } +} +#endif + +// 新增的 GEMM 函数 +void gemm_fp32_fp16(float *c_ptr, const float *a_ptr, const mllm_fp16_t *b_ptr, int M, int N, int K) { +#if defined(__aarch64__) || defined(__x86_64__) || defined(_M_X64) + // 仅在NEON或AVX路径下,我们才为打包分配内存 + mllm_fp16_t *packed_b = (mllm_fp16_t *)malloc(KC_BLOCK * NR_micro * sizeof(mllm_fp16_t)); + if (!packed_b) return; +#endif + + for (int k_col = 0; k_col < K; k_col += KC_BLOCK) { + int kc = min(KC_BLOCK, K - k_col); + + for (int i_row = 0; i_row < M; i_row += MR_micro) { + int mc = min(MR_micro, M - i_row); + + for (int j_col = 0; j_col < N; j_col += NR_micro) { + int nc = min(NR_micro, N - j_col); + +#if defined(__aarch64__) + if (mc == MR_micro && nc == NR_micro) { + pack_b_fp16_for_neon_4x4(packed_b, b_ptr, N, k_col, k_col + kc, j_col); + gemm_micro_kernel_fp32_fp16_neon_4x4(c_ptr + i_row * N + j_col, a_ptr + i_row * K + k_col, packed_b, kc, K, N); + continue; + } +#elif defined(__x86_64__) || defined(_M_X64) + if (mc == MR_micro && nc == NR_micro) { + pack_b_fp16_for_avx_8x8(packed_b, b_ptr, N, k_col, k_col + kc, j_col); + gemm_micro_kernel_fp32_fp16_avx_8x8(c_ptr + i_row * N + j_col, a_ptr + i_row * K + k_col, packed_b, kc, K, N); + continue; + } +#endif + + // ---- 通用C语言路径 (也用于NEON/AVX的边缘情况) ---- + // 直接使用原始A(fp32)和B(fp16)矩阵进行计算 + for (int i = 0; i < mc; ++i) { + for (int j = 0; j < nc; ++j) { + float sum = 0.0f; + for (int k = 0; k < kc; ++k) { + // C[i_row+i][j_col+j] += A[i_row+i][k_col+k] * B[k_col+k][j_col+j] + // 关键:使用宏将B的fp16值转换为fp32 + sum += a_ptr[(i_row + i) * K + (k_col + k)] * MLLM_FP16_TO_FP32(b_ptr[(k_col + k) * N + (j_col + j)]); + } + c_ptr[(i_row + i) * N + (j_col + j)] += sum; + } + } + } + } + } + +#if defined(__aarch64__) || defined(__x86_64__) || defined(_M_X64) + free(packed_b); +#endif +} diff --git a/mllm/backends/cpu/compute/GemmKleidiai.cpp b/mllm/backends/cpu/compute/GemmKleidiai.cpp new file mode 100644 index 000000000..d83ac8c5c --- /dev/null +++ b/mllm/backends/cpu/compute/GemmKleidiai.cpp @@ -0,0 +1,889 @@ + +#if defined(__aarch64__) || defined(__arm__) || defined(__arm64__) + +#include "GemmKleidiai.hpp" +#include "FeatureCheck.hpp" +#include +#include +#include +#include +#include +#include + +#include + +class WorkspaceManager { +public: + static WorkspaceManager &get_instance() { + static WorkspaceManager instance; + return instance; + } + +private: + WorkspaceManager() { + int max_threads = kai_thread_count; +#ifdef _OPENMP + max_threads = omp_get_max_threads(); +#endif + qsi4_workspaces_.resize(max_threads); + qsi4_c_temp_buffers_.resize(max_threads); + fp16_a_buffers_.resize(max_threads); + fp16_c_buffers_.resize(max_threads); + } + +public: + WorkspaceManager(const WorkspaceManager &) = delete; + WorkspaceManager &operator=(const WorkspaceManager &) = delete; + + std::vector &get_qsi4_workspace() { + int thread_id = 0; +#ifdef _OPENMP + thread_id = omp_get_thread_num(); +#endif + return qsi4_workspaces_[thread_id]; + } + + std::vector &get_qsi4_c_temp_buffer() { + int thread_id = 0; +#ifdef _OPENMP + thread_id = omp_get_thread_num(); +#endif + return qsi4_c_temp_buffers_[thread_id]; + } + + std::vector &get_fp16_a_buffer() { + int thread_id = 0; +#ifdef _OPENMP + thread_id = omp_get_thread_num(); +#endif + return fp16_a_buffers_[thread_id]; + } + + std::vector &get_fp16_c_buffer() { + int thread_id = 0; +#ifdef _OPENMP + thread_id = omp_get_thread_num(); +#endif + return fp16_c_buffers_[thread_id]; + } + +private: + std::vector> qsi4_workspaces_; + std::vector> qsi4_c_temp_buffers_; + std::vector> fp16_a_buffers_; + std::vector> fp16_c_buffers_; +}; + +#include +#if defined(__linux__) +#include +#include +#endif + +#include "kai_matmul_clamp_f32_qai8dxp_qsi4c32p_interface.h" +#include "kai_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod.h" +#include "kai/ukernels/matmul/pack/kai_lhs_quant_pack_qai8dxp_f32.h" +#include "kai/ukernels/matmul/pack/kai_rhs_pack_kxn_qsi4c32p_qsu4c32s1s0.h" +#include "kai_lhs_quant_pack_qai8dxp_f32.h" +#include "kai_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod.h" +#include "kai_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm.h" +#include "kai_matmul_clamp_f32_qai8dxp_qsi4c32p_interface.h" +#include "kai/ukernels/matmul/pack/kai_rhs_pack_kxn_qsi4c32p_qsu4c32s1s0.h" + +#include "kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp_qsi4cxp_interface.h" +#include "kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod.h" +#include "kai/ukernels/matmul/matmul_clamp_f16_qai8dxp_qsi4cxp/kai_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm.h" +#include "kai/ukernels/matmul/pack/kai_lhs_quant_pack_qai8dxp_f16_neon.h" +#include "kai/ukernels/matmul/pack/kai_rhs_pack_kxn_qsi4cxp_qs4cxs1s0.h" + +enum class KleidiaiQsi4Tile { + k1x8_4x8_1x4x32_dotprod, + k4x8_4x8_8x4x32_i8mm, +}; + +namespace std { +template <> +struct hash { + std::size_t operator()(const KleidiaiQsi4Tile &k) const noexcept { + return std::hash::type>()( + static_cast::type>(k)); + } +}; +} // namespace std + +static const kai_matmul_clamp_f32_qai8dxp_qsi4c32p_ukernel dotprod_ukernel = { + .get_m_step = kai_get_m_step_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod, + .get_n_step = kai_get_n_step_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod, + .get_mr = kai_get_mr_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod, + .get_nr = kai_get_nr_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod, + .get_kr = kai_get_kr_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod, + .get_sr = kai_get_sr_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod, + .get_lhs_packed_offset = kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod, + .get_rhs_packed_offset = kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod, + .get_dst_offset = kai_get_dst_offset_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod, + .run_matmul = kai_run_matmul_clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod}; + +static const kai_matmul_clamp_f32_qai8dxp_qsi4c32p_ukernel i8mm_ukernel = { + .get_m_step = kai_get_m_step_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm, + .get_n_step = kai_get_n_step_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm, + .get_mr = kai_get_mr_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm, + .get_nr = kai_get_nr_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm, + .get_kr = kai_get_kr_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm, + .get_sr = kai_get_sr_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm, + .get_lhs_packed_offset = kai_get_lhs_packed_offset_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm, + .get_rhs_packed_offset = kai_get_rhs_packed_offset_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm, + .get_dst_offset = kai_get_dst_offset_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm, + .run_matmul = kai_run_matmul_clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm}; + +static const std::unordered_map qsi4_ukernels = { + {KleidiaiQsi4Tile::k1x8_4x8_1x4x32_dotprod, dotprod_ukernel}, + {KleidiaiQsi4Tile::k4x8_4x8_8x4x32_i8mm, i8mm_ukernel}}; + +static KleidiaiQsi4Tile kleidiai_get_best_qsi4_tile_config() { + static const KleidiaiQsi4Tile best_tile = arm_is_i8mm_supported() ? + KleidiaiQsi4Tile::k4x8_4x8_8x4x32_i8mm : + KleidiaiQsi4Tile::k1x8_4x8_1x4x32_dotprod; + return best_tile; +} + +enum class KleidiaiQsi4TileF16 { + k1x8_4x8_1x4_dotprod, + k4x8_4x8_16x4_i8mm, +}; + +namespace std { +template <> +struct hash { + std::size_t operator()(const KleidiaiQsi4TileF16 &k) const noexcept { + return std::hash::type>()( + static_cast::type>(k)); + } +}; +} // namespace std + +static const kai_matmul_clamp_f16_qai8dxp_qsi4cxp_ukernel f16_dotprod_ukernel = { + .get_m_step = kai_get_m_step_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod, + .get_n_step = kai_get_n_step_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod, + .get_mr = kai_get_mr_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod, + .get_nr = kai_get_nr_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod, + .get_kr = kai_get_kr_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod, + .get_sr = kai_get_sr_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod, + .get_lhs_packed_offset = kai_get_lhs_packed_offset_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod, + .get_rhs_packed_offset = kai_get_rhs_packed_offset_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod, + .get_dst_offset = kai_get_dst_offset_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod, + .run_matmul = kai_run_matmul_clamp_f16_qai8dxp1x8_qsi4cxp4x8_1x4_neon_dotprod}; + +static const kai_matmul_clamp_f16_qai8dxp_qsi4cxp_ukernel f16_i8mm_ukernel = { + .get_m_step = kai_get_m_step_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm, + .get_n_step = kai_get_n_step_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm, + .get_mr = kai_get_mr_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm, + .get_nr = kai_get_nr_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm, + .get_kr = kai_get_kr_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm, + .get_sr = kai_get_sr_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm, + .get_lhs_packed_offset = kai_get_lhs_packed_offset_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm, + .get_rhs_packed_offset = kai_get_rhs_packed_offset_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm, + .get_dst_offset = kai_get_dst_offset_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm, + .run_matmul = kai_run_matmul_clamp_f16_qai8dxp4x8_qsi4cxp4x8_16x4_neon_i8mm}; + +static const std::unordered_map qsi4_f16_ukernels = { + {KleidiaiQsi4TileF16::k1x8_4x8_1x4_dotprod, f16_dotprod_ukernel}, + {KleidiaiQsi4TileF16::k4x8_4x8_16x4_i8mm, f16_i8mm_ukernel}}; + +static KleidiaiQsi4TileF16 kleidiai_get_best_qsi4_tile_config_f16() { + static const KleidiaiQsi4TileF16 best_tile = arm_is_i8mm_supported() ? + KleidiaiQsi4TileF16::k4x8_4x8_16x4_i8mm : + KleidiaiQsi4TileF16::k1x8_4x8_1x4_dotprod; + return best_tile; +} + +size_t mllm_kleidai_get_packed_b_qsi4_size(int N, int K) { + const auto tile_cfg = kleidiai_get_best_qsi4_tile_config(); + const auto &ukernel = qsi4_ukernels.at(tile_cfg); + const int block_len = 32; + return kai_get_rhs_packed_size_rhs_pack_kxn_qsi4c32p_qsu4c32s1s0( + N, K, ukernel.get_nr(), ukernel.get_kr(), ukernel.get_sr(), + block_len, kai_dt_bf16); +} + +size_t get_workspace_qsi4_size(int M, int K) { + const auto tile_cfg = kleidiai_get_best_qsi4_tile_config(); + const auto &ukernel = qsi4_ukernels.at(tile_cfg); + return kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f32( + M, K, ukernel.get_mr(), ukernel.get_kr(), ukernel.get_sr()); +} + +void mllm_kleidai_pack_b_and_bias_qsi4( + uint8_t *packed_b_ptr, + const float *b_ptr, + const float *bias_ptr, + int N, + int K) { + const auto tile_cfg = kleidiai_get_best_qsi4_tile_config(); + const auto &ukernel = qsi4_ukernels.at(tile_cfg); + + const float *bias_to_use = bias_ptr; + std::vector fake_bias; + if (bias_to_use == nullptr) { + fake_bias.assign(N, 0.0f); + bias_to_use = fake_bias.data(); + } + + const int block_len = 32; + const size_t num_blocks_k = (K + block_len - 1) / block_len; + std::vector temp_quantized_b(K * N / 2); + std::vector temp_scales(N * num_blocks_k); + + for (int n = 0; n < N; ++n) { + for (size_t kb = 0; kb < num_blocks_k; ++kb) { + float amax = 0.0f; + int start_k = kb * block_len; + int end_k = std::min(start_k + block_len, K); + for (int k = start_k; k < end_k; ++k) { + const float val = b_ptr[k * N + n]; + const float abs_val = std::abs(val); + amax = std::max(abs_val, amax); + } + const float scale = amax / 7.0f; + const float inv_scale = scale != 0.0f ? 1.0f / scale : 0.0f; + temp_scales[(n * num_blocks_k) + kb] = kai_cast_bf16_f32(scale); + for (int k = start_k; k < end_k; ++k) { + const float val = b_ptr[k * N + n]; + int32_t q_val = static_cast(round(val * inv_scale)); + q_val = std::max(-8, std::min(7, q_val)); + uint8_t stored_val = q_val + 8; + size_t byte_idx = (k * N + n) / 2; + if ((k * N + n) % 2 == 0) { + temp_quantized_b[byte_idx] = stored_val; + } else { + temp_quantized_b[byte_idx] |= (stored_val << 4); + } + } + } + } + kai_rhs_pack_kxn_qsi4c32p_qsu4c32s1s0_params params = {}; + params.lhs_zero_point = 1; + params.rhs_zero_point = 8; + params.scale_dt = kai_dt_bf16; + kai_run_rhs_pack_kxn_qsi4c32p_qsu4c32s1s0( + 1, N, K, ukernel.get_nr(), ukernel.get_kr(), ukernel.get_sr(), block_len, + temp_quantized_b.data(), N / 2, bias_to_use, (const uint8_t *)temp_scales.data(), num_blocks_k * sizeof(uint16_t), + packed_b_ptr, 0, ¶ms); +} +/** + * @brief Packs pre-quantized 4-bit weights, scales, and bias into the format required by the QSI4 GEMM kernel. + * + * This function takes weights that are already quantized to 4-bit values (represented as uint8_t in the range [0, 15]), + * their corresponding floating-point scales, and an optional bias vector. It then transforms and packs this data + * into a single buffer (`packed_b_ptr`) for efficient use in `mllm_kleidai_gemm_qsi4`. + * + * @param packed_b_ptr [out] Pointer to the destination buffer for the packed data. + * @param b_qweight_ptr [in] Pointer to the pre-quantized 4-bit weights. Assumed layout is KxN row-major, with each uint8_t holding one 4-bit value. + * @param b_scale_ptr [in] Pointer to the quantization scales. Assumed layout is Nx(K/32) row-major. +// * @param b_zero_ptr [in] Pointer to the quantization zero points. This parameter is currently IGNORED because the underlying kernel supports only a single, symmetric zero point, which is hardcoded to 8. + * @param bias_ptr [in] Pointer to the bias vector of size N. Can be nullptr if no bias is to be added. + * @param N The N dimension of the weight matrix (number of columns/output channels). + * @param K The K dimension of the weight matrix (number of rows/input channels). + */ +void mllm_kleidai_pack_b_and_bias_qsi4_quant( + uint8_t *packed_b_ptr, + const uint8_t *b_qweight_ptr, + const float *b_scale_ptr, + // const uint8_t *b_zero_ptr, + const float *bias_ptr, + int N, + int K) { + const auto tile_cfg = kleidiai_get_best_qsi4_tile_config(); + const auto &ukernel = qsi4_ukernels.at(tile_cfg); + const float *bias_to_use = bias_ptr; + std::vector fake_bias; + if (bias_to_use == nullptr) { + fake_bias.assign(N, 0.0f); + bias_to_use = fake_bias.data(); + } + const int block_len = 32; // Corresponds to the 'c32p' part of the qsi4c32p scheme. + const size_t num_blocks_k = (K + block_len - 1) / block_len; + std::vector temp_quantized_b(K * N / 2); + std::vector temp_scales(N * num_blocks_k); +#pragma omp parallel for + for (size_t i = 0; i < N * num_blocks_k; ++i) { + temp_scales[i] = kai_cast_bf16_f32(b_scale_ptr[i]); + } +#pragma omp parallel for + for (int k = 0; k < K; ++k) { + for (int n = 0; n < N; n += 2) { + size_t byte_idx = (size_t)k * (N / 2) + (n / 2); + uint8_t val1 = b_qweight_ptr[(size_t)k * N + n]; + uint8_t val2 = b_qweight_ptr[(size_t)k * N + n + 1]; + temp_quantized_b[byte_idx] = val1 | (val2 << 4); + } + } + kai_rhs_pack_kxn_qsi4c32p_qsu4c32s1s0_params params = {}; + params.lhs_zero_point = 1; + params.rhs_zero_point = 8; + params.scale_dt = kai_dt_bf16; + kai_run_rhs_pack_kxn_qsi4c32p_qsu4c32s1s0( + 1, N, K, ukernel.get_nr(), ukernel.get_kr(), ukernel.get_sr(), block_len, + temp_quantized_b.data(), N / 2, + bias_to_use, + (const uint8_t *)temp_scales.data(), num_blocks_k * sizeof(uint16_t), + packed_b_ptr, 0, ¶ms); +} + +#ifndef KAI_FP16_CAL +void mllm_kleidai_gemm_qsi4( + float *c_ptr, const float *a_ptr, const uint8_t *packed_b_ptr, + int M, int N, int K) { + kai_matmul_clamp_f32_qai8dxp_qsi4c32p_ukernel ukernel; + if (M == 1) { + ukernel = qsi4_ukernels.at(KleidiaiQsi4Tile::k1x8_4x8_1x4x32_dotprod); + } else { + const auto tile_cfg = arm_is_i8mm_supported() ? + KleidiaiQsi4Tile::k4x8_4x8_8x4x32_i8mm : + KleidiaiQsi4Tile::k1x8_4x8_1x4x32_dotprod; + ukernel = qsi4_ukernels.at(tile_cfg); + } + + auto &workspace_data = WorkspaceManager::get_instance().get_qsi4_workspace(); + size_t required_workspace_size = get_workspace_qsi4_size(M, K); + if (workspace_data.size() < required_workspace_size) { + workspace_data.resize(required_workspace_size); + } + + kai_run_lhs_quant_pack_qai8dxp_f32( + M, K, + ukernel.get_mr(), ukernel.get_kr(), ukernel.get_sr(), + 0, + a_ptr, K * sizeof(float), + workspace_data.data()); + + const int n_step = ukernel.get_n_step(); + const int block_len = 32; + +#pragma omp parallel for num_threads(kai_thread_count) + for (int n_start = 0; n_start < N; n_start += n_step) { + const int current_n = std::min(N - n_start, n_step); + + const void *a_packed_ptr = workspace_data.data(); + const void *b_packed_offset = (const char *)packed_b_ptr + ukernel.get_rhs_packed_offset(n_start, K, block_len); + float *c_offset = c_ptr + n_start; + + ukernel.run_matmul( + M, current_n, K, block_len, + a_packed_ptr, b_packed_offset, + c_offset, N * sizeof(float), + sizeof(float), + -FLT_MAX, FLT_MAX); + } +} + +#else +void mllm_kleidai_gemm_qsi4_f16_compute( + mllm_fp16_t *c_ptr, const mllm_fp16_t *a_ptr, const uint8_t *packed_b_ptr, + int M, int N, int K) { + kai_matmul_clamp_f16_qai8dxp_qsi4cxp_ukernel ukernel; + if (M == 1) { + ukernel = qsi4_f16_ukernels.at(KleidiaiQsi4TileF16::k1x8_4x8_1x4_dotprod); + } else { + const auto tile_cfg = arm_is_i8mm_supported() ? + KleidiaiQsi4TileF16::k4x8_4x8_16x4_i8mm : + KleidiaiQsi4TileF16::k1x8_4x8_1x4_dotprod; + ukernel = qsi4_f16_ukernels.at(tile_cfg); + } + + auto &workspace_data = WorkspaceManager::get_instance().get_qsi4_workspace(); + + size_t required_workspace_size = kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f16_neon( + M, K, ukernel.get_mr(), ukernel.get_kr(), ukernel.get_sr()); + if (workspace_data.size() < required_workspace_size) { + workspace_data.resize(required_workspace_size); + } + + kai_run_lhs_quant_pack_qai8dxp_f16_neon( + M, K, + ukernel.get_mr(), ukernel.get_kr(), ukernel.get_sr(), + 0, + a_ptr, + K * sizeof(mllm_fp16_t), + workspace_data.data()); + + const int n_step = ukernel.get_n_step(); + +#pragma omp parallel for num_threads(kai_thread_count) + for (int n_start = 0; n_start < N; n_start += n_step) { + const int current_n = std::min(N - n_start, n_step); + const void *a_packed_ptr = workspace_data.data(); + const void *b_packed_offset = (const char *)packed_b_ptr + ukernel.get_rhs_packed_offset(n_start, K); + void *c_offset = c_ptr + n_start; + + ukernel.run_matmul( + M, current_n, K, + a_packed_ptr, b_packed_offset, + c_offset, N * sizeof(mllm_fp16_t), + sizeof(mllm_fp16_t), + -FLT_MAX, FLT_MAX); + } +} + +void mllm_kleidai_gemm_qsi4( + float *c_ptr, const float *a_ptr, const uint8_t *packed_b_ptr, + int M, int N, int K) { + auto &a_fp16 = WorkspaceManager::get_instance().get_fp16_a_buffer(); + if (a_fp16.size() < M * K) { + a_fp16.resize(M * K); + } + + auto &c_fp16 = WorkspaceManager::get_instance().get_fp16_c_buffer(); + if (c_fp16.size() < M * N) { + c_fp16.resize(M * N); + } + +#pragma omp parallel for num_threads(kai_thread_count) + for (int i = 0; i < M * K; ++i) { + a_fp16[i] = static_cast(a_ptr[i]); + } + + mllm_kleidai_gemm_qsi4_f16_compute(c_fp16.data(), a_fp16.data(), packed_b_ptr, M, N, K); + +#pragma omp parallel for num_threads(kai_thread_count) + for (int i = 0; i <= (M * N) - 8; i += 8) { + float16x8_t fp16_vec = vld1q_f16(reinterpret_cast(c_fp16.data() + i)); + + float32x4_t fp32_vec_low = vcvt_f32_f16(vget_low_f16(fp16_vec)); + float32x4_t fp32_vec_high = vcvt_f32_f16(vget_high_f16(fp16_vec)); + + vst1q_f32(c_ptr + i, fp32_vec_low); + vst1q_f32(c_ptr + i + 4, fp32_vec_high); + } + for (int i = (M * N) - ((M * N) % 8); i < M * N; ++i) { + c_ptr[i] = static_cast(c_fp16[i]); + } +} +#endif + +size_t mllm_kleidai_get_packed_b_qsi4_size_to_fp16(int N, int K) { + const auto tile_cfg = kleidiai_get_best_qsi4_tile_config_f16(); + const auto &ukernel = qsi4_f16_ukernels.at(tile_cfg); + + return kai_get_rhs_packed_size_rhs_pack_kxn_qsi4cxp_qs4cxs1s0( + N, K, ukernel.get_nr(), ukernel.get_kr(), ukernel.get_sr()); +} + +void mllm_kleidai_pack_b_and_bias_qsi4_to_fp16( + uint8_t *packed_b_ptr, + const float *b_ptr, + const float *bias_ptr, + int N, + int K) { + const auto tile_cfg = kleidiai_get_best_qsi4_tile_config_f16(); + const auto &ukernel = qsi4_f16_ukernels.at(tile_cfg); + + const float *bias_to_use = bias_ptr; + std::vector fake_bias; + if (bias_to_use == nullptr) { + fake_bias.assign(N, 0.0f); + bias_to_use = fake_bias.data(); + } + + const size_t quantized_b_size = (size_t)K * N / 2; + std::vector temp_quantized_b(quantized_b_size, 0); + std::vector temp_scales_fp32(N); + + for (int n = 0; n < N; ++n) { + float amax = 0.0f; + for (int k = 0; k < K; ++k) { + amax = std::max(amax, std::abs(b_ptr[k * N + n])); + } + + const float scale = amax / 7.0f; + temp_scales_fp32[n] = scale; + const float inv_scale = (scale != 0.0f) ? 1.0f / scale : 0.0f; + + for (int k = 0; k < K; ++k) { + const float val = b_ptr[k * N + n]; + int32_t q_val = static_cast(roundf(val * inv_scale)); + q_val = std::max(-8, std::min(7, q_val)); + + uint8_t stored_val = static_cast(q_val + 8); + + size_t byte_idx = (k * N + n) / 2; + if ((k * N + n) % 2 == 0) { + temp_quantized_b[byte_idx] = stored_val; + } else { + temp_quantized_b[byte_idx] |= (stored_val << 4); + } + } + } + + struct kai_rhs_pack_kxn_qsi4cxp_qs4cxs1s0_params kxn_params = {}; + kxn_params.lhs_zero_point = 1; + kxn_params.rhs_zero_point = 8; + kai_run_rhs_pack_kxn_qsi4cxp_qs4cxs1s0( + 1, N, K, ukernel.get_nr(), ukernel.get_kr(), ukernel.get_sr(), + temp_quantized_b.data(), // Pointer to quantized data + bias_to_use, // Pointer to bias data + temp_scales_fp32.data(), // Pointer to fp32 scale data + packed_b_ptr, // Output packed data + 0, // Output stride (0 for contiguous) + &kxn_params); +} + +void mllm_kleidai_gemm_qsi4_f16_internal( + mllm_fp16_t *c_ptr, const float *a_ptr, const uint8_t *packed_b_ptr, + int M, int N, int K) { + kai_matmul_clamp_f16_qai8dxp_qsi4cxp_ukernel ukernel; + if (M == 1) { + ukernel = qsi4_f16_ukernels.at(KleidiaiQsi4TileF16::k1x8_4x8_1x4_dotprod); + } else { + const auto tile_cfg = arm_is_i8mm_supported() ? + KleidiaiQsi4TileF16::k4x8_4x8_16x4_i8mm : + KleidiaiQsi4TileF16::k1x8_4x8_1x4_dotprod; + ukernel = qsi4_f16_ukernels.at(tile_cfg); + } + + auto &workspace_data = WorkspaceManager::get_instance().get_qsi4_workspace(); + + size_t required_workspace_size = kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f16_neon( + M, K, ukernel.get_mr(), ukernel.get_kr(), ukernel.get_sr()); + if (workspace_data.size() < required_workspace_size) { + workspace_data.resize(required_workspace_size); + } + kai_run_lhs_quant_pack_qai8dxp_f16_neon( + M, K, + ukernel.get_mr(), ukernel.get_kr(), ukernel.get_sr(), + 0, + reinterpret_cast(a_ptr), + K * sizeof(float), + workspace_data.data()); + + const int n_step = ukernel.get_n_step(); + +#pragma omp parallel for num_threads(kai_thread_count) + for (int n_start = 0; n_start < N; n_start += n_step) { + const int current_n = std::min(N - n_start, n_step); + + const void *a_packed_ptr = workspace_data.data(); + const void *b_packed_offset = (const char *)packed_b_ptr + ukernel.get_rhs_packed_offset(n_start, K); + + uint16_t *c_offset = reinterpret_cast(c_ptr) + n_start * N + n_start; + + ukernel.run_matmul( + M, current_n, K, + a_packed_ptr, b_packed_offset, + c_offset, N * sizeof(uint16_t), + sizeof(uint16_t), + -FLT_MAX, FLT_MAX); + } +} + +#ifndef KAI_FP16_CAL +void mllm_kleidai_gemm_qsi4_to_fp16( + mllm_fp16_t *c_ptr, const float *a_ptr, const uint8_t *packed_b_ptr, + int M, int N, int K) { + auto &c_temp = WorkspaceManager::get_instance().get_qsi4_c_temp_buffer(); + if (c_temp.size() < M * N) { + c_temp.resize(M * N); + } + + mllm_kleidai_gemm_qsi4(c_temp.data(), a_ptr, packed_b_ptr, M, N, K); + +#pragma omp parallel for num_threads(kai_thread_count) + for (int i = 0; i <= (M * N) - 4; i += 4) { + float32x4_t fp32_vec = vld1q_f32(c_temp.data() + i); + float16x4_t fp16_vec = vcvt_f16_f32(fp32_vec); + vst1_f16(reinterpret_cast<__fp16 *>(c_ptr + i), fp16_vec); + } + for (int i = (M * N) - ((M * N) % 4); i < M * N; ++i) { + c_ptr[i] = static_cast(c_temp[i]); + } +} +#else +void mllm_kleidai_gemm_qsi4_to_fp16( + mllm_fp16_t *c_ptr, const float *a_ptr, const uint8_t *packed_b_ptr, + int M, int N, int K) { + mllm_kleidai_gemm_qsi4_f16_internal(c_ptr, a_ptr, packed_b_ptr, M, N, K); +} +#endif + +#include "kai/ukernels/matmul/matmul_clamp_f16_f16_f16p/kai_matmul_clamp_f16_f16_f16p_interface.h" +#include "kai/ukernels/matmul/matmul_clamp_f16_f16_f16p/kai_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla.h" +#include "kai/ukernels/matmul/pack/kai_rhs_pack_kxn_f16p16x1biasf16_f16_f16_neon.h" + +static const kai_matmul_clamp_f16_f16_f16p_ukernel fp16_ukernel = { + .get_m_step = kai_get_m_step_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla, + .get_n_step = kai_get_n_step_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla, + .get_nr = kai_get_nr_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla, + .get_kr = kai_get_kr_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla, + .get_sr = kai_get_sr_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla, + .run_matmul = kai_run_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla, +}; + +size_t mllm_kleidai_get_packed_b_fp16_size(int N, int K) { + return kai_get_rhs_packed_size_rhs_pack_kxn_f16p16x1biasf16_f16_f16_neon(N, K); +} + +void mllm_kleidai_pack_b_and_bias_fp16(mllm_fp16_t *packed_b_ptr, const mllm_fp16_t *b_ptr, const float *bias_ptr, int N, int K) { + std::vector bias_fp16_buffer(N); + if (bias_ptr != nullptr) { + for (int i = 0; i < N; ++i) { + bias_fp16_buffer[i] = static_cast(bias_ptr[i]); + } + } else { + std::fill(bias_fp16_buffer.begin(), bias_fp16_buffer.end(), static_cast(0.0f)); + } + kai_run_rhs_pack_kxn_f16p16x1biasf16_f16_f16_neon( + 1, N, K, fp16_ukernel.get_nr(), fp16_ukernel.get_kr(), fp16_ukernel.get_sr(), + N * sizeof(mllm_fp16_t), b_ptr, bias_fp16_buffer.data(), nullptr, packed_b_ptr, 0, nullptr); +} + +void mllm_kleidai_gemm_fp16(float *c_ptr, const float *a_ptr, const mllm_fp16_t *packed_b_ptr, int M, int N, int K) { + auto &a_fp16 = WorkspaceManager::get_instance().get_fp16_a_buffer(); + if (a_fp16.size() < M * K) { + a_fp16.resize(M * K); + } + + auto &c_fp16 = WorkspaceManager::get_instance().get_fp16_c_buffer(); + if (c_fp16.size() < M * N) { + c_fp16.resize(M * N); + } + +#pragma omp parallel for num_threads(kai_thread_count) + for (int i = 0; i < M * K; ++i) { + a_fp16[i] = static_cast(a_ptr[i]); + } + + const int m_step = fp16_ukernel.get_m_step(); + const int n_step = fp16_ukernel.get_n_step(); + +#pragma omp parallel for collapse(2) num_threads(kai_thread_count) + for (int m_start = 0; m_start < M; m_start += m_step) { + for (int n_start = 0; n_start < N; n_start += n_step) { + const int current_m = std::min(M - m_start, m_step); + const int current_n = std::min(N - n_start, n_step); + + const mllm_fp16_t *a_offset = a_fp16.data() + m_start * K; + const mllm_fp16_t *b_offset = packed_b_ptr + (n_start * (K + 1)); + mllm_fp16_t *c_offset = c_fp16.data() + m_start * N + n_start; + + fp16_ukernel.run_matmul( + current_m, current_n, K, a_offset, K * sizeof(mllm_fp16_t), + b_offset, c_offset, N * sizeof(mllm_fp16_t), sizeof(mllm_fp16_t), + -FLT_MAX, FLT_MAX); + } + } + +#pragma omp parallel for num_threads(kai_thread_count) + for (int i = 0; i < M * N; ++i) { + c_ptr[i] = static_cast(c_fp16[i]); + } +} + +#include "kai/ukernels/matmul/matmul_clamp_f32_f32_f32p/kai_matmul_clamp_f32_f32_f32p_interface.h" +#include "kai/ukernels/matmul/matmul_clamp_f32_f32_f32p/kai_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla.h" +#include "kai/ukernels/matmul/pack/kai_rhs_pack_kxn_f32p8x1biasf32_f32_f32_neon.h" + +static const kai_matmul_clamp_f32_f32_f32p_ukernel fp32_ukernel = { + .get_m_step = kai_get_m_step_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla, + .get_n_step = kai_get_n_step_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla, + .get_nr = kai_get_nr_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla, + .get_kr = kai_get_kr_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla, + .get_sr = kai_get_sr_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla, + .run_matmul = kai_run_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla, +}; + +size_t mllm_kleidai_get_packed_b_fp32_size(int N, int K) { + return kai_get_rhs_packed_size_rhs_pack_kxn_f32p8x1biasf32_f32_f32_neon(N, K); +} + +void mllm_kleidai_pack_b_and_bias_fp32(float *packed_b_ptr, const float *b_ptr, const float *bias_ptr, int N, int K) { + const float *bias_to_use = bias_ptr; + std::vector fake_bias; + if (bias_to_use == nullptr) { + fake_bias.assign(N, 0.0f); + bias_to_use = fake_bias.data(); + } + kai_run_rhs_pack_kxn_f32p8x1biasf32_f32_f32_neon( + 1, N, K, fp32_ukernel.get_nr(), fp32_ukernel.get_kr(), fp32_ukernel.get_sr(), + N * sizeof(float), b_ptr, bias_to_use, nullptr, packed_b_ptr, 0, nullptr); +} + +void mllm_kleidai_gemm_fp32(float *c_ptr, const float *a_ptr, const float *packed_b_ptr, int M, int N, int K) { + const int m_step = fp32_ukernel.get_m_step(); + const int n_step = fp32_ukernel.get_n_step(); + +#pragma omp parallel for collapse(2) num_threads(kai_thread_count) + for (int m_start = 0; m_start < M; m_start += m_step) { + for (int n_start = 0; n_start < N; n_start += n_step) { + const int current_m = std::min(M - m_start, m_step); + const int current_n = std::min(N - n_start, n_step); + const float *a_offset = a_ptr + m_start * K; + const float *b_offset = packed_b_ptr + (n_start * (K + 1)); + float *c_offset = c_ptr + m_start * N + n_start; + fp32_ukernel.run_matmul( + current_m, current_n, K, + a_offset, K * sizeof(float), b_offset, + c_offset, N * sizeof(float), sizeof(float), + -FLT_MAX, FLT_MAX); + } + } +} + +#include "Transpose2D.hpp" +void mllm_kleidai_pack_b_and_bias_fp32_transpose(float *packed_b_ptr, const float *b_ptr_nxk, const float *bias_ptr, int N, int K) { + std::vector b_temp_kxn(K * N); + transpose_matrix_efficient(b_ptr_nxk, b_temp_kxn.data(), N, K); + mllm_kleidai_pack_b_and_bias_fp32(packed_b_ptr, b_temp_kxn.data(), bias_ptr, N, K); +} + +void mllm_kleidai_pack_b_and_bias_fp16_transpose(mllm_fp16_t *packed_b_ptr, const mllm_fp16_t *b_ptr_nxk, const float *bias_ptr, int N, int K) { + std::vector b_temp_kxn(K * N); + +#if defined(__aarch64__) + transpose_matrix_efficient_fp16(b_ptr_nxk, b_temp_kxn.data(), N, K); +#else + const int BLOCK_DIM = 32; + for (int i = 0; i < N; i += BLOCK_DIM) { + for (int j = 0; j < K; j += BLOCK_DIM) { + for (int bi = i; bi < i + BLOCK_DIM && bi < N; ++bi) { + for (int bj = j; j < K && bj < j + BLOCK_DIM; ++bj) { + b_temp_kxn[bj * N + bi] = b_ptr_nxk[bi * K + bj]; + } + } + } + } +#endif + + mllm_kleidai_pack_b_and_bias_fp16(packed_b_ptr, b_temp_kxn.data(), bias_ptr, N, K); +} +/*** no use ****/ +void mllm_kleidai_gemm_fp32_transpose(float *c_ptr, const float *a_ptr, const float *b_ptr_nxk, const float *bias_ptr, int M, int N, int K) { + size_t packed_b_size = mllm_kleidai_get_packed_b_fp32_size(N, K); + std::vector packed_b_data(packed_b_size); + mllm_kleidai_pack_b_and_bias_fp32_transpose(packed_b_data.data(), b_ptr_nxk, bias_ptr, N, K); + mllm_kleidai_gemm_fp32(c_ptr, a_ptr, packed_b_data.data(), M, N, K); +} + +void mllm_kleidai_gemm_fp16_transpose(float *c_ptr, const float *a_ptr, const mllm_fp16_t *b_ptr_nxk, const float *bias_ptr, int M, int N, int K) { + size_t packed_b_size = mllm_kleidai_get_packed_b_fp16_size(N, K); + std::vector packed_b_data(packed_b_size / sizeof(mllm_fp16_t)); + mllm_kleidai_pack_b_and_bias_fp16_transpose(packed_b_data.data(), b_ptr_nxk, bias_ptr, N, K); + mllm_kleidai_gemm_fp16(c_ptr, a_ptr, packed_b_data.data(), M, N, K); +} + +void mllm_kleidai_gemm_fp32_bshd(float *c_ptr, const float *a_ptr, const float *packed_b_ptr, int B, int H, int S_M, int S_N, int D_K) { + const int M = S_M; + const int K = D_K; // 在GEMM上下文中,K是BSHD布局中的D(dimension) + + // 为 BSHD (B,S,H,D/N) 布局计算跨距 + const long long stride_a_b = (long long)S_M * H * K; + const long long stride_a_s = (long long)H * K; + + const long long stride_c_b = (long long)S_M * H * S_N; + const long long stride_c_s = (long long)H * S_N; + + const int m_step = fp32_ukernel.get_m_step(); + const int n_step = fp32_ukernel.get_n_step(); + + // 并行处理 batch 和 head 维度 +#pragma omp parallel for collapse(2) num_threads(kai_thread_count) + for (int b = 0; b < B; ++b) { + for (int h = 0; h < H; ++h) { + for (int m_start = 0; m_start < M; m_start += m_step) { + for (int n_start = 0; n_start < S_N; n_start += n_step) { + const int current_m = std::min(M - m_start, m_step); + const int current_n = std::min(S_N - n_start, n_step); + + // 计算当前块在BSHD布局中的A矩阵偏移 + // 指向 A[b, m_start, h, 0] + const float *a_offset = a_ptr + b * stride_a_b + m_start * stride_a_s + h * K; + + // B矩阵是预打包的,其偏移仅与N维度相关 + const float *b_offset = packed_b_ptr + (n_start * (K + 1)); + + // 计算当前块在BSHD布局中的C矩阵偏移 + // 指向 C[b, m_start, h, n_start] + float *c_offset = c_ptr + b * stride_c_b + m_start * stride_c_s + h * S_N + n_start; + + // 调用微内核,传入正确的行跨距 + fp32_ukernel.run_matmul( + current_m, current_n, K, + a_offset, stride_a_s * sizeof(float), // A矩阵的行跨距 + b_offset, + c_offset, stride_c_s * sizeof(float), // C矩阵的行跨距 + sizeof(float), + -FLT_MAX, FLT_MAX); + } + } + } + } +} + +void mllm_kleidai_gemm_fp16_bshd(float *c_ptr, const float *a_ptr, const mllm_fp16_t *packed_b_ptr, int B, int H, int S_M, int S_N, int D_K) { + const int M = S_M; + const int K = D_K; // 在GEMM上下文中,K是BSHD布局中的D(dimension) + + // 为 BSHD (B,S,H,D/N) 布局计算跨距 + const long long stride_a_b = (long long)S_M * H * K; + const long long stride_a_s = (long long)H * K; + + const long long stride_c_b = (long long)S_M * H * S_N; + const long long stride_c_s = (long long)H * S_N; + + const int m_step = fp16_ukernel.get_m_step(); + const int n_step = fp16_ukernel.get_n_step(); + + // 并行处理 batch 和 head 维度 +#pragma omp parallel for collapse(2) num_threads(kai_thread_count) + for (int b = 0; b < B; ++b) { + for (int h = 0; h < H; ++h) { + // 从工作区管理器获取线程本地缓冲区 + auto &a_fp16 = WorkspaceManager::get_instance().get_fp16_a_buffer(); + if (a_fp16.size() < M * K) { + a_fp16.resize(M * K); + } + + auto &c_fp16 = WorkspaceManager::get_instance().get_fp16_c_buffer(); + if (c_fp16.size() < M * S_N) { + c_fp16.resize(M * S_N); + } + + // 1. 收集(Gather)和转换: 将非连续的 BSHD float A矩阵切片复制到连续的 fp16 缓冲区 + const float *a_bh_ptr = a_ptr + b * stride_a_b; // 指向批次 b 的起始位置 + for (int s = 0; s < S_M; ++s) { + for (int d = 0; d < D_K; ++d) { + // 从 A[b,s,h,d] 读取 + a_fp16[s * D_K + d] = static_cast(a_bh_ptr[s * stride_a_s + h * D_K + d]); + } + } + + // 2. 计算: 在连续的缓冲区上执行GEMM + for (int m_start = 0; m_start < M; m_start += m_step) { + for (int n_start = 0; n_start < S_N; n_start += n_step) { + const int current_m = std::min(M - m_start, m_step); + const int current_n = std::min(S_N - n_start, n_step); + + const mllm_fp16_t *a_offset = a_fp16.data() + m_start * K; + const mllm_fp16_t *b_offset = packed_b_ptr + (n_start * (K + 1)); + mllm_fp16_t *c_offset = c_fp16.data() + m_start * S_N + n_start; + + // 由于 a_fp16 和 c_fp16 是连续的,使用标准的行跨距 + fp16_ukernel.run_matmul( + current_m, current_n, K, + a_offset, K * sizeof(mllm_fp16_t), + b_offset, + c_offset, S_N * sizeof(mllm_fp16_t), sizeof(mllm_fp16_t), + -FLT_MAX, FLT_MAX); + } + } + + // 3. 分散(Scatter)和转换: 将连续的 fp16 结果缓冲区复制回非连续的 BSHD float C矩阵 + float *c_bh_ptr = c_ptr + b * stride_c_b; // 指向批次 b 的起始位置 + for (int s = 0; s < S_M; ++s) { + for (int n = 0; n < S_N; ++n) { + // 写入 C[b,s,h,n] + c_bh_ptr[s * stride_c_s + h * S_N + n] = static_cast(c_fp16[s * S_N + n]); + } + } + } + } +} +#endif \ No newline at end of file diff --git a/mllm/backends/cpu/compute/GemmKleidiai.hpp b/mllm/backends/cpu/compute/GemmKleidiai.hpp new file mode 100644 index 000000000..f844629eb --- /dev/null +++ b/mllm/backends/cpu/compute/GemmKleidiai.hpp @@ -0,0 +1,49 @@ +#pragma once +#if defined(__aarch64__) || defined(__arm__) || defined(__arm64__) + +#include // For size_t +#include // For uint8_t +#include "Types.hpp" + +// #define KAI_FP16_CAL + +static int kai_thread_count = 4; + +// --- 实现 1: float * qsi4c32 -> float--- +size_t mllm_kleidai_get_packed_b_qsi4_size(int N, int K); +void mllm_kleidai_pack_b_and_bias_qsi4(uint8_t *packed_b_ptr, const float *b_ptr, const float *bias_ptr, int N, int K); +void mllm_kleidai_pack_b_and_bias_qsi4_quant( + uint8_t *packed_b_ptr, + const uint8_t *b_qweight_ptr, + const float *b_scale_ptr, + // const uint8_t *b_zero_ptr, + const float *bias_ptr, + int N, + int K); +void mllm_kleidai_gemm_qsi4(float *c_ptr, const float *a_ptr, const uint8_t *packed_b_ptr, int M, int N, int K); + +// --- 实现 1.5: float * qsi4c32 -> fp16--- +size_t mllm_kleidai_get_packed_b_qsi4_size_to_fp16(int N, int K); +void mllm_kleidai_pack_b_and_bias_qsi4_to_fp16(uint8_t *packed_b_ptr, const float *b_ptr, const float *bias_ptr, int N, int K); +void mllm_kleidai_gemm_qsi4_to_fp16(mllm_fp16_t *c_ptr, const float *a_ptr, const uint8_t *packed_b_ptr, int M, int N, int K); + +// --- 实现 2: float * fp16 -> float--- + +size_t mllm_kleidai_get_packed_b_fp16_size(int N, int K); +void mllm_kleidai_pack_b_and_bias_fp16(mllm_fp16_t *packed_b_ptr, const mllm_fp16_t *b_ptr, const float *bias_ptr, int N, int K); +void mllm_kleidai_gemm_fp16(float *c_ptr, const float *a_ptr, const mllm_fp16_t *packed_b_ptr, int M, int N, int K); + +// --- 实现 3: float * fp32 -> float--- +size_t mllm_kleidai_get_packed_b_fp32_size(int N, int K); +void mllm_kleidai_pack_b_and_bias_fp32(float *packed_b_ptr, const float *b_ptr, const float *bias_ptr, int N, int K); +void mllm_kleidai_gemm_fp32(float *c_ptr, const float *a_ptr, const float *packed_b_ptr, int M, int N, int K); + +// --- APIs for Transposed Right-Hand Matrix Multiplication --- +void mllm_kleidai_gemm_fp32_transpose(float *c_ptr, const float *a_ptr, const float *b_ptr_nxk, const float *bias_ptr, int M, int N, int K); +void mllm_kleidai_gemm_fp16_transpose(float *c_ptr, const float *a_ptr, const mllm_fp16_t *b_ptr_nxk, const float *bias_ptr, int M, int N, int K); + +// --- APIs for BSHD layout GEMM --- +void mllm_kleidai_gemm_fp32_bshd(float *c_ptr, const float *a_ptr, const float *packed_b_ptr, int B, int H, int S_M, int S_N, int D_K); +void mllm_kleidai_gemm_fp16_bshd(float *c_ptr, const float *a_ptr, const mllm_fp16_t *packed_b_ptr, int B, int H, int S_M, int S_N, int D_K); + +#endif \ No newline at end of file diff --git a/mllm/backends/cpu/compute/GemmQ2K.cpp b/mllm/backends/cpu/compute/GemmQ2K.cpp new file mode 100644 index 000000000..3ed03a7fb --- /dev/null +++ b/mllm/backends/cpu/compute/GemmQ2K.cpp @@ -0,0 +1,348 @@ +#include "GemmQ2K.hpp" + +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" + +#include +#include +#include +#include + +#if defined(__ARM_NEON) +#include +#endif + +#define M_BLOCK_SIZE 8 +#define N_BLOCK_SIZE 16 + +/** + * @brief 为 GEMM 操作对 float 矩阵 B(KxN) 进行 Q8_K 量化和打包。 + * + * 原始的打包方式导致在 GEMM 内核中访问数据时是跨步的 (strided access), + * 这严重破坏了缓存局部性。新的打包方式将微内核所需的数据块连续存储。 + * + * 原始布局 (逻辑上): [N 块][列偏移][K 块] + * 访问模式: `base + n_idx * K_blocks` -> 步长巨大! + * + * 优化后布局 (逻辑上): [N 块][K 块][列偏移] + * 访问模式: `base + n_idx` -> 完美连续访问! + * + * @param B_packed 输出,打包好的 Q8_K 矩阵。 + * @param B_float 输入,行主序的 float 矩阵 (KxN)。 + * @param K 矩阵 B 的行数。 + * @param N 矩阵 B 的列数。 + */ +void quantize_and_pack_q8_k_for_gemm( + block_q8_K *B_packed, + const float *B_float, + int K, + int N) { + assert(K % QK_K == 0); + assert(N % N_BLOCK_SIZE == 0); + const int K_blocks = K / QK_K; + const int N_chunks = N / N_BLOCK_SIZE; + +#pragma omp parallel for num_threads(4) + for (int j_chunk = 0; j_chunk < N_chunks; ++j_chunk) { + std::vector temp_col(QK_K); + for (int k_block = 0; k_block < K_blocks; ++k_block) { + for (int col_offset = 0; col_offset < N_BLOCK_SIZE; ++col_offset) { + const int j = j_chunk * N_BLOCK_SIZE; + for (int k_inner = 0; k_inner < QK_K; ++k_inner) { + const int row_idx = (k_block * QK_K) + k_inner; + const int col_idx = j + col_offset; + temp_col[k_inner] = B_float[(row_idx * N) + col_idx]; + } + // 核心改动:调整写入位置,确保 N_BLOCK_SIZE 这个维度的数据是连续的 + block_q8_K *dest_block = B_packed + (j_chunk * (K_blocks * N_BLOCK_SIZE)) + (k_block * N_BLOCK_SIZE) + col_offset; + quantize_row_q8_K(temp_col.data(), dest_block, QK_K); + } + } + } +} + +#if defined(__ARM_NEON) +/** + * @brief 8x16 NEON 微内核 + * + * 1. 一次性处理一个 8x16 的输出块,共 128 个 float 累加器。 + * 2. 使用 32 个 NEON 向量寄存器中的大部分来保存这些累加器。 + * 3. 对 A 矩阵的数据(scales, quants)在 K 维度内循环前进行预加载和预处理, + * 在 B 矩阵的数据(quants, bsums)在 N 维度上进行向量化加载。 + * 4. 通过精心设计的指令序列,最大化浮点乘加 (FMLA) 指令的吞吐量。 + * 5. 循环展开和指令重排以减少依赖和流水线停顿。 + */ +static inline void micro_kernel_8x16_neon( + float acc[M_BLOCK_SIZE][N_BLOCK_SIZE], + const block_q2_K *a_blocks[M_BLOCK_SIZE], + const block_q8_K *b_block_base) { + // 8x16 = 128 个累加器,用 32 个 float32x4_t 向量寄存器表示 + float32x4_t acc_vecs[8][4]; + for (int i = 0; i < 8; ++i) { + for (int j = 0; j < 4; ++j) { + acc_vecs[i][j] = vld1q_f32(&acc[i][j * 4]); + } + } + + // 预加载 B 矩阵的 d + float32x4_t d_b_vecs[4]; + float d_b_vals[16]; + for (int n = 0; n < 16; ++n) d_b_vals[n] = b_block_base[n].d; + for (int j = 0; j < 4; ++j) d_b_vecs[j] = vld1q_f32(&d_b_vals[j * 4]); + + // 预加载 A 矩阵的 d 和 dmin + float d_a_vals[8], dmin_a_vals[8]; + for (int m = 0; m < 8; ++m) { + d_a_vals[m] = MLLM_FP16_TO_FP32(a_blocks[m]->d); + dmin_a_vals[m] = MLLM_FP16_TO_FP32(a_blocks[m]->dmin); + } + + // 内部循环处理一个 QK_K 大小的块 + for (int sub_block_idx = 0; sub_block_idx < QK_K / 16; ++sub_block_idx) { + // --- 1. 处理 dmin * bsums 部分 --- + int16_t bsums_vals[16]; + for (int n = 0; n < 16; ++n) bsums_vals[n] = b_block_base[n].bsums[sub_block_idx]; + + const int16x8_t bsums_vec_lo = vld1q_s16(bsums_vals); + const int16x8_t bsums_vec_hi = vld1q_s16(bsums_vals + 8); + + for (int m = 0; m < 8; ++m) { + const int16_t m_val = (int16_t)(a_blocks[m]->scales[sub_block_idx] >> 4); + + float32x4_t summs_f[4]; + summs_f[0] = vcvtq_f32_s32(vmull_n_s16(vget_low_s16(bsums_vec_lo), m_val)); + summs_f[1] = vcvtq_f32_s32(vmull_n_s16(vget_high_s16(bsums_vec_lo), m_val)); + summs_f[2] = vcvtq_f32_s32(vmull_n_s16(vget_low_s16(bsums_vec_hi), m_val)); + summs_f[3] = vcvtq_f32_s32(vmull_n_s16(vget_high_s16(bsums_vec_hi), m_val)); + + for (int j = 0; j < 4; ++j) { + float32x4_t dmin_a_d_b = vmulq_n_f32(d_b_vecs[j], -dmin_a_vals[m]); + acc_vecs[m][j] = vmlaq_f32(acc_vecs[m][j], dmin_a_d_b, summs_f[j]); + } + } + + // --- 2. 处理主点积部分 --- + // 预加载 B 矩阵的 quants + int8x16_t q8_vecs[16]; + for (int n = 0; n < 16; ++n) { + q8_vecs[n] = vld1q_s8(b_block_base[n].qs + sub_block_idx * 16); + } + + // 对 A 矩阵的每一行 + for (int m = 0; m < 8; ++m) { + const int s_val = a_blocks[m]->scales[sub_block_idx] & 0x0F; + if (s_val == 0) continue; + + uint8_t l_bytes[16]; + for (int k = 0; k < 16; ++k) { + int k_inner = sub_block_idx * 16 + k; + const int ib = k_inner / 128, iib = k_inner % 128, iic = iib % 32, cic = iib / 32; + l_bytes[k] = (a_blocks[m]->qs[ib * 32 + iic] >> (cic * 2)) & 3; + } + const int8x16_t l_vec = vreinterpretq_s8_u8(vld1q_u8(l_bytes)); + + // 计算一行 A 和 16 列 B 的点积 + int32_t s_dot_vals[16]; + for (int n = 0; n < 16; ++n) { + const int16x8_t p_lo = vmull_s8(vget_low_s8(l_vec), vget_low_s8(q8_vecs[n])); + const int16x8_t p_hi = vmull_s8(vget_high_s8(l_vec), vget_high_s8(q8_vecs[n])); + s_dot_vals[n] = vaddvq_s32(vpaddlq_s16(vaddq_s16(p_lo, p_hi))); + } + + // 向量化乘法和累加 + const float32x4_t scale_factor = vdupq_n_f32((float)s_val * d_a_vals[m]); + for (int j = 0; j < 4; ++j) { + float32x4_t isum_f32 = vcvtq_f32_s32(vld1q_s32(&s_dot_vals[j * 4])); + float32x4_t term = vmulq_f32(scale_factor, d_b_vecs[j]); + acc_vecs[m][j] = vmlaq_f32(acc_vecs[m][j], term, isum_f32); + } + } + } + + // 写回累加器 + for (int i = 0; i < 8; ++i) { + for (int j = 0; j < 4; ++j) { + vst1q_f32(&acc[i][j * 4], acc_vecs[i][j]); + } + } +} +#else +// C++ and AVX2 Fallback +static inline void micro_kernel_8x16_reference( + float acc[M_BLOCK_SIZE][N_BLOCK_SIZE], + const block_q2_K *a_blocks[M_BLOCK_SIZE], + const block_q8_K *b_block_base) { + float d_all[M_BLOCK_SIZE][N_BLOCK_SIZE], d_min[M_BLOCK_SIZE][N_BLOCK_SIZE]; + for (int m = 0; m < M_BLOCK_SIZE; ++m) { + for (int n = 0; n < N_BLOCK_SIZE; ++n) { + d_all[m][n] = MLLM_FP16_TO_FP32(a_blocks[m]->d) * b_block_base[n].d; + d_min[m][n] = MLLM_FP16_TO_FP32(a_blocks[m]->dmin) * b_block_base[n].d; + } + } + + for (int sub_block_idx = 0; sub_block_idx < QK_K / 16; ++sub_block_idx) { + int32_t isum_tile[M_BLOCK_SIZE][N_BLOCK_SIZE] = {{0}}; + + for (int k_in_subblock = 0; k_in_subblock < 16; ++k_in_subblock) { + const int k_inner = sub_block_idx * 16 + k_in_subblock; + + int s_vals[M_BLOCK_SIZE], l_vals[M_BLOCK_SIZE]; + for (int m = 0; m < M_BLOCK_SIZE; ++m) { + s_vals[m] = a_blocks[m]->scales[sub_block_idx] & 0x0F; + const int ib = k_inner / 128, iib = k_inner % 128, iic = iib % 32, cic = iib / 32; + l_vals[m] = (a_blocks[m]->qs[ib * 32 + iic] >> (cic * 2)) & 3; + } + + int8_t q_vals[N_BLOCK_SIZE]; + for (int n = 0; n < N_BLOCK_SIZE; ++n) { + q_vals[n] = b_block_base[n].qs[k_inner]; + } + + for (int m = 0; m < M_BLOCK_SIZE; ++m) { + for (int n = 0; n < N_BLOCK_SIZE; ++n) { + isum_tile[m][n] += s_vals[m] * l_vals[m] * q_vals[n]; + } + } + } + + for (int m = 0; m < M_BLOCK_SIZE; ++m) { + const int m_val = a_blocks[m]->scales[sub_block_idx] >> 4; + for (int n = 0; n < N_BLOCK_SIZE; ++n) { + const int32_t summs = m_val * b_block_base[n].bsums[sub_block_idx]; + acc[m][n] += d_all[m][n] * isum_tile[m][n] - d_min[m][n] * summs; + } + } + } +} +#endif + +void gemv_q2_k_q8_k( + float *y, + const block_q2_K *A, + const block_q8_K *x, + int M, + int K) { + assert(K % QK_K == 0); + const int K_blocks = K / QK_K; +#pragma omp parallel for num_threads(4) + for (int i = 0; i < M; ++i) { + float row_sum = 0.0f; + const block_q2_K *A_row = A + i * K_blocks; + for (int k_block = 0; k_block < K_blocks; ++k_block) { + const block_q2_K *a_block = A_row + k_block; + const block_q8_K *x_block = x + k_block; +#if defined(__ARM_NEON__) || defined(__ARM_NEON) + // ... (gemv NEON code remains the same as it was already efficient) + const float d = x_block->d * MLLM_FP16_TO_FP32(a_block->d); + const float dmin = -x_block->d * MLLM_FP16_TO_FP32(a_block->dmin); + + int32_t summs32_total = 0; + { + const uint8x16_t mins_and_scales = vld1q_u8(a_block->scales); + const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); + const int16x8_t q8sums_lo = vld1q_s16(x_block->bsums); + const int16x8_t q8sums_hi = vld1q_s16(x_block->bsums + 8); + const int16x8_t mins_lo = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))); + const int16x8_t mins_hi = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins))); + int32x4_t summs32 = vmull_s16(vget_low_s16(mins_lo), vget_low_s16(q8sums_lo)); + summs32 = vmlal_s16(summs32, vget_high_s16(mins_lo), vget_high_s16(q8sums_lo)); + summs32 = vmlal_s16(summs32, vget_low_s16(mins_hi), vget_low_s16(q8sums_hi)); + summs32 = vmlal_s16(summs32, vget_high_s16(mins_hi), vget_high_s16(q8sums_hi)); + summs32_total = vaddvq_s32(summs32); + } + row_sum += dmin * summs32_total; + + int32_t isum = 0; + { + uint8_t scales_s[16]; + vst1q_u8(scales_s, vandq_u8(vld1q_u8(a_block->scales), vdupq_n_u8(0xF))); + for (int sub_block_idx = 0; sub_block_idx < QK_K / 16; ++sub_block_idx) { + uint8_t l_bytes[16]; + for (int k = 0; k < 16; ++k) { + int k_inner = sub_block_idx * 16 + k; + const int ib = k_inner / 128, iib = k_inner % 128, iic = iib % 32, cic = iib / 32; + l_bytes[k] = (a_block->qs[ib * 32 + iic] >> (cic * 2)) & 3; + } + const int8x16_t l_vec = vreinterpretq_s8_u8(vld1q_u8(l_bytes)); + const int8x16_t q8_vec = vld1q_s8(x_block->qs + sub_block_idx * 16); + const int16x8_t p_lo = vmull_s8(vget_low_s8(l_vec), vget_low_s8(q8_vec)); + const int16x8_t p_hi = vmull_s8(vget_high_s8(l_vec), vget_high_s8(q8_vec)); + isum += vaddvq_s32(vpaddlq_s16(vaddq_s16(p_lo, p_hi))) * scales_s[sub_block_idx]; + } + } + row_sum += d * isum; +#else + for (int k_inner = 0; k_inner < QK_K; ++k_inner) { + const float d = MLLM_FP16_TO_FP32(a_block->d); + const float dmin = MLLM_FP16_TO_FP32(a_block->dmin); + const int sub_block_idx = k_inner / 16; + const uint8_t sm_byte = a_block->scales[sub_block_idx]; + const float s = (float)(sm_byte & 0x0F); + const float m = (float)(sm_byte >> 4); + const int ib = k_inner / 128, iib = k_inner % 128, iic = iib % 32, c_idx = iib / 32; + const uint8_t p_byte = a_block->qs[ib * 32 + iic]; + const int L = (p_byte >> (c_idx * 2)) & 3; + const float a_val = (d * s) * L - (dmin * m); + const float x_val = x_block->d * (float)x_block->qs[k_inner]; + row_sum += a_val * x_val; + } +#endif + } + y[i] = row_sum; + } +} + +/** + * @brief矩阵-矩阵乘法 (GEMM): C = A * B + * + * 1. 采用三层循环分块策略 (i, j, k),并使用 OpenMP 对最外层 j 循环进行并行化, + * 这是 GEMM 优化的经典且高效的并行策略。 + * 2. 主循环以更大的 M_BLOCK_SIZE x N_BLOCK_SIZE (8x16) 的步长进行, + * 处理更大的数据块,提高了计算密度。 + * 3. 在 K 维度上,一次处理一个 QK_K 大小的块,这与数据的量化方式天然契合。 + * 4. 调用上面优化过的 `micro_kernel_8x16` 来执行核心计算。 + * 5. 对 A 和 B 矩阵中参与计算的块地址进行预计算和传递,使微内核的调用更简洁高效。 + */ +void gemm_q2_k_q8_k( + float *C, + const block_q2_K *A, + const block_q8_K *B_packed, + int M, + int N, + int K) { + assert(M % M_BLOCK_SIZE == 0); + assert(N % N_BLOCK_SIZE == 0); + assert(K % QK_K == 0); + + const int K_blocks = K / QK_K; + +#pragma omp parallel for num_threads(4) + for (int j = 0; j < N; j += N_BLOCK_SIZE) { + for (int i = 0; i < M; i += M_BLOCK_SIZE) { + float acc[M_BLOCK_SIZE][N_BLOCK_SIZE] = {{0.0f}}; + + for (int k_block = 0; k_block < K_blocks; ++k_block) { + // --- 优化点 3: 干净利落地传递数据指针 --- + const block_q2_K *a_blocks[M_BLOCK_SIZE]; + for (int m_idx = 0; m_idx < M_BLOCK_SIZE; ++m_idx) { + a_blocks[m_idx] = A + (i + m_idx) * K_blocks + k_block; + } + + // 关键改动:得益于新的 packing 方式,B 矩阵块的地址是连续的 + const block_q8_K *b_block_base = B_packed + (j / N_BLOCK_SIZE) * (K_blocks * N_BLOCK_SIZE) + k_block * N_BLOCK_SIZE; + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) + micro_kernel_8x16_neon(acc, a_blocks, b_block_base); +#else + micro_kernel_8x16_reference(acc, a_blocks, b_block_base); +#endif + } + + // --- 将累加结果写回 C 矩阵 --- + for (int m_idx = 0; m_idx < M_BLOCK_SIZE; ++m_idx) { + for (int n_idx = 0; n_idx < N_BLOCK_SIZE; ++n_idx) { + C[(i + m_idx) * N + (j + n_idx)] = acc[m_idx][n_idx]; + } + } + } + } +} \ No newline at end of file diff --git a/mllm/backends/cpu/compute/GemmQ2K.hpp b/mllm/backends/cpu/compute/GemmQ2K.hpp new file mode 100644 index 000000000..8c2ca4d56 --- /dev/null +++ b/mllm/backends/cpu/compute/GemmQ2K.hpp @@ -0,0 +1,45 @@ +#pragma once +#include "DataType.hpp" + +/** + * @brief 为 GEMM 操作,对 float 矩阵 B(KxN) 进行 Q8_K 量化和打包。 + * 将行主序的 float 矩阵 B(KxN) 转换为适合微内核高效列式访问的打包格式。 + * @param B_packed 输出,打包好的 Q8_K 矩阵。 + * @param B_float 输入,行主序的 float 矩阵 (KxN)。 + * @param K 矩阵 B 的行数。 + * @param N 矩阵 B 的列数。 + */ +void quantize_and_pack_q8_k_for_gemm( + block_q8_K *B_packed, + const float *B_float, + int K, + int N); + +/** + * @brief 矩阵-向量乘法 (GEMV): y = A * x + * @param y 输出向量 (M x 1), float + * @param A 输入矩阵 (M x K), Q2_K 格式, 按行主序存储 + * @param x 输入向量 (K x 1), Q8_K 格式 + * @param M, K 矩阵/向量维度 + */ +void gemv_q2_k_q8_k( + float *y, + const block_q2_K *A, + const block_q8_K *x, + int M, + int K); + +/** + * @brief 矩阵-矩阵乘法 (GEMM): C = A * B + * @param C 输出矩阵 (M x N), float, 列主序 + * @param A 输入矩阵 (M x K), Q2_K 格式, 行主序 + * @param B_packed 输入矩阵 (K x N), 已被 quantize_and_pack 处理过 + * @param M, N, K 矩阵维度 + */ +void gemm_q2_k_q8_k( + float *C, + const block_q2_K *A, + const block_q8_K *B_packed, + int M, + int N, + int K); diff --git a/src/backends/cpu/compute/SMEGEMM.hpp b/mllm/backends/cpu/compute/GemmSme.hpp similarity index 100% rename from src/backends/cpu/compute/SMEGEMM.hpp rename to mllm/backends/cpu/compute/GemmSme.hpp diff --git a/src/backends/cpu/compute/Im2Col.cpp b/mllm/backends/cpu/compute/Im2Col.cpp similarity index 100% rename from src/backends/cpu/compute/Im2Col.cpp rename to mllm/backends/cpu/compute/Im2Col.cpp diff --git a/src/backends/cpu/compute/Im2Col.hpp b/mllm/backends/cpu/compute/Im2Col.hpp similarity index 100% rename from src/backends/cpu/compute/Im2Col.hpp rename to mllm/backends/cpu/compute/Im2Col.hpp diff --git a/src/backends/cpu/compute/Matmul.cpp b/mllm/backends/cpu/compute/Matmul.cpp similarity index 90% rename from src/backends/cpu/compute/Matmul.cpp rename to mllm/backends/cpu/compute/Matmul.cpp index bcf01c42c..a2f500a10 100644 --- a/src/backends/cpu/compute/Matmul.cpp +++ b/mllm/backends/cpu/compute/Matmul.cpp @@ -4,12 +4,12 @@ #include "Matmul.hpp" #include "Types.hpp" -#include "VecDotType.hpp" -#include "LlamafileSGEMM.hpp" +#include "backends/cpu/third_party/ggml/VecDotType.hpp" +#include "backends/cpu/third_party/ggml/GemmLlamafile.hpp" #include #include #include "Arithmetic.hpp" -#include "SMEGEMM.hpp" +#include "GemmSme.hpp" #ifdef __ARM_NEON #include @@ -28,77 +28,12 @@ ErrorCode mat_mul(Tensor *src0, Tensor *src1, Tensor *dst, bool support_bias, Te auto src1_dtype = src1->dtype(); auto dst_dtype = dst->dtype(); - // ----------- BEGIN SME Path Check ----------- -#if defined(__ARM_FEATURE_SME) - if (src0->batch() == 1 && src0->head() == 1 && src1->batch() == 1 && src1->head() == 1 && dst->batch() == 1 && dst->head() == 1 && // Ensure dst also expects B=1, H=1 - src1->ctype() == BSHD && dst->ctype() == BSHD && dst_dtype == MLLM_TYPE_F32) { - // Calculate base pointers for b=0, h=0 - // Assuming blck_size is 1 for F32/F16, so division by blck_size is not strictly needed here - // but kept for consistency if it could be > 1 for some packed types. - const void *p_src0_base = (const char *)src0->rawHostPtr() + src0->offset(0, 0, 0, 0) * type_size(src0_dtype) / blck_size(src0_dtype); - const void *p_src1_base = (const char *)src1->rawHostPtr() + src1->offset(0, 0, 0, 0) * type_size(src1_dtype) / blck_size(src1_dtype); - float *p_dst_base = (float *)((char *)dst->rawHostPtr() + dst->offset(0, 0, 0, 0) * type_size(dst_dtype) / blck_size(dst_dtype)); - - // Leading dimensions (number of columns for row-major) - // For A (src0) M x K: lda = K - // For B (src1, which is W) K x N: ldb = N (cols of W) - // For C (dst) M x N: ldc = N - int lda = K; - int ldb = N; // src1 (W) has N columns - int ldc = N; - - bool sme_path_taken = false; - - if (src0_dtype == MLLM_TYPE_F32 && src1_dtype == MLLM_TYPE_F32) { - // Call SME F32 x F32^T -> F32 - sme_gemm_f32f32_f32( - static_cast(p_src0_base), - static_cast(p_src1_base), - p_dst_base, - M, K, N, lda, ldb, ldc); - sme_path_taken = true; - } else if (src0_dtype == MLLM_TYPE_F16 && src1_dtype == MLLM_TYPE_F16) { -#if MLLM_FP16_SUPPORTED // Ensure mllm_fp16_t is usable - // Call SME F16 x F16^T -> F32 - sme_gemm_f16f16_f32( - static_cast(p_src0_base), - static_cast(p_src1_base), - p_dst_base, - M, K, N, lda, ldb, ldc); - sme_path_taken = true; -#else -// std::cerr << "Warning: MLLM_TYPE_F16 SME path requested but mllm_fp16_t support is limited/dummy." << std::endl; -#endif - } - - if (sme_path_taken) { - if (support_bias && bias != nullptr) { -// Bias addition: Iterating through the single batch/head/sequence -// Assuming bias is [1,1,1,N] or compatible and needs to be added to each M row of C. -// The existing bias logic seems to add bias[0,0,0,n] to C[b,h,s,n]. -// For B=1,H=1, this means C[0,0,s,n] += bias[0,0,0,n]. -// This effectively adds the bias vector to each row of the computed C matrix. -// This can be parallelized if M is large. -#pragma omp parallel for num_threads(thread_count) - for (int m_idx = 0; m_idx < M; ++m_idx) { - mllm_add_fp32(dst->ptrAt(0, 0, m_idx, 0), - bias->ptrAt(0, 0, 0, 0), // Assuming bias is [1,1,1,N] - dst->ptrAt(0, 0, m_idx, 0), - N); // N is dst->dimension() in this context if dst is M seq, N dim - } - } - return MLLM_NO_ERROR; - } - } -#endif // __ARM_FEATURE_SME - // ----------- END SME Path Check ----------- - auto vec_dot_type = type_traits[src1_dtype].vec_dot_type; auto vec_dot = type_traits[src1_dtype].vec_dot; auto x_to_vec_dot_type = type_traits[vec_dot_type].from_float; auto from_float_to_mat = type_traits[vec_dot_type].from_float_to_mat; - mllm_gemv_func const gemv = type_traits[src1_dtype].gemv; - mllm_gemm_func const gemm = type_traits[src1_dtype].gemm; + gemv_func const gemv = type_traits[src1_dtype].gemv; + gemm_func const gemm = type_traits[src1_dtype].gemm; auto blck_size_interleave = type_traits[src1_dtype].blck_size_interleave; auto src1_type_size = type_size(src1_dtype); @@ -192,6 +127,7 @@ ErrorCode mat_mul(Tensor *src0, Tensor *src1, Tensor *dst, bool support_bias, Te dst->dtype(), ld_src1 / src1_blck_size, ld_src0 / src0_blck_size, ld_dst / blck_size(dst->dtype())) && dst->dtypeAt(0, 0, 0, 0) == MLLM_TYPE_F32 && dst->ctype() == BSHD && dst->aggregatedTensors().empty()) { + int is_0 = (src1->batch() == 1 && src1->head() == 1 && src1->batch() != src0->batch()) ? 0 : 1; #pragma omp parallel for collapse(3) num_threads(thread_count) for (int64_t b = 0; b < dst->batch(); b++) { for (int64_t h = 0; h < dst->head(); h++) { @@ -199,7 +135,7 @@ ErrorCode mat_mul(Tensor *src0, Tensor *src1, Tensor *dst, bool support_bias, Te llamafile_sgemm( N, M, K / blck_size(src1->dtype()), (char *)src1->rawHostPtr() - + src1->offset(b, h, 0, 0) * src1_type_size / src1_blck_size, + + src1->offset(b * is_0, h, 0, 0) * src1_type_size / src1_blck_size, ld_src1 / src1_blck_size, (char *)src0->rawHostPtr() + src0->offset(b, h, 0, 0) * src0_type_size / src0_blck_size, diff --git a/src/backends/cpu/compute/Matmul.hpp b/mllm/backends/cpu/compute/Matmul.hpp similarity index 97% rename from src/backends/cpu/compute/Matmul.hpp rename to mllm/backends/cpu/compute/Matmul.hpp index 7cafe4766..34336b7a6 100644 --- a/src/backends/cpu/compute/Matmul.hpp +++ b/mllm/backends/cpu/compute/Matmul.hpp @@ -5,7 +5,9 @@ #ifndef MLLM_MATMUL_HPP #define MLLM_MATMUL_HPP -#include "VecDot.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include "backends/cpu/third_party/ggml/Quantize.hpp" using namespace mllm; ErrorCode mat_mul(Tensor *src0_, Tensor *src1, Tensor *dst, bool support_bias, Tensor *bias = nullptr, bool transpose0 = false, bool transpose1 = true, int thread_count = 4); diff --git a/src/backends/cpu/compute/MatmulElastic.cpp b/mllm/backends/cpu/compute/MatmulElastic.cpp similarity index 97% rename from src/backends/cpu/compute/MatmulElastic.cpp rename to mllm/backends/cpu/compute/MatmulElastic.cpp index 1bd6cd078..5cc3fbad4 100644 --- a/src/backends/cpu/compute/MatmulElastic.cpp +++ b/mllm/backends/cpu/compute/MatmulElastic.cpp @@ -4,9 +4,9 @@ #include "MatmulElastic.hpp" #include "Types.hpp" -#include "VecDotType.hpp" -// #include -#include "LlamafileSGEMM.hpp" +#include "backends/cpu/third_party/ggml/VecDotType.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" +#include "backends/cpu/third_party/ggml/GemmLlamafile.hpp" #include #include @@ -31,8 +31,8 @@ ErrorCode mat_mul_elastic(Tensor *src0, Tensor *src1, Tensor *dst, bool support_ auto x_to_vec_dot_type = type_traits[vec_dot_type].from_float; auto from_float_to_mat = type_traits[vec_dot_type].from_float_to_mat; - mllm_gemv_func const gemv = type_traits[src1_dtype].gemv; - mllm_gemm_func const gemm = type_traits[src1_dtype].gemm; + gemv_func const gemv = type_traits[src1_dtype].gemv; + gemm_func const gemm = type_traits[src1_dtype].gemm; auto blck_size_interleave = type_traits[src1_dtype].blck_size_interleave; auto src1_type_size = type_size(src1_dtype); auto src1_blck_size = blck_size(src1_dtype); diff --git a/src/backends/cpu/compute/MatmulElastic.hpp b/mllm/backends/cpu/compute/MatmulElastic.hpp similarity index 90% rename from src/backends/cpu/compute/MatmulElastic.hpp rename to mllm/backends/cpu/compute/MatmulElastic.hpp index b098451c2..47c67df62 100644 --- a/src/backends/cpu/compute/MatmulElastic.hpp +++ b/mllm/backends/cpu/compute/MatmulElastic.hpp @@ -5,7 +5,8 @@ #ifndef MLLM_MATMULELASTIC_HPP #define MLLM_MATMULELASTIC_HPP -#include "VecDot.hpp" +#include "Tensor.hpp" +#include "Types.hpp" using namespace mllm; ErrorCode mat_mul_elastic(Tensor *src0_, Tensor *src1, Tensor *dst, bool support_bias, Tensor *bias = nullptr, int activate_input_dim = -1, int activate_output_dim = -1, bool transpose0 = false, bool transpose1 = true, int thread_count = 4); diff --git a/src/backends/cpu/compute/MatmulSparse.cpp b/mllm/backends/cpu/compute/MatmulSparse.cpp similarity index 98% rename from src/backends/cpu/compute/MatmulSparse.cpp rename to mllm/backends/cpu/compute/MatmulSparse.cpp index 7f1d56089..88145bcf0 100644 --- a/src/backends/cpu/compute/MatmulSparse.cpp +++ b/mllm/backends/cpu/compute/MatmulSparse.cpp @@ -4,9 +4,9 @@ #include "MatmulSparse.hpp" #include "Types.hpp" -#include "VecDotType.hpp" +#include "backends/cpu/third_party/ggml/VecDotType.hpp" // #include -#include "LlamafileSGEMM.hpp" +#include "backends/cpu/third_party/ggml/GemmLlamafile.hpp" #include #ifdef __ARM_NEON diff --git a/src/backends/cpu/compute/MatmulSparse.hpp b/mllm/backends/cpu/compute/MatmulSparse.hpp similarity index 88% rename from src/backends/cpu/compute/MatmulSparse.hpp rename to mllm/backends/cpu/compute/MatmulSparse.hpp index ba4798cf2..1523c8374 100644 --- a/src/backends/cpu/compute/MatmulSparse.hpp +++ b/mllm/backends/cpu/compute/MatmulSparse.hpp @@ -5,7 +5,8 @@ #ifndef MLLM_MATMULSPARSE_HPP #define MLLM_MATMULSPARSE_HPP -#include "VecDot.hpp" +#include "Tensor.hpp" +#include "Types.hpp" using namespace mllm; ErrorCode sparse_mat_mul_id(Tensor *x, Tensor *W, Tensor *ids, Tensor *dst, int thread_count = 4); diff --git a/src/backends/cpu/compute/Pooling.cpp b/mllm/backends/cpu/compute/Pooling.cpp similarity index 99% rename from src/backends/cpu/compute/Pooling.cpp rename to mllm/backends/cpu/compute/Pooling.cpp index 18444eb00..633587a85 100644 --- a/src/backends/cpu/compute/Pooling.cpp +++ b/mllm/backends/cpu/compute/Pooling.cpp @@ -3,6 +3,7 @@ // #include "Pooling.hpp" +#include "backends/cpu/third_party/ggml/VecDotFP32.hpp" void avgpool2d_fp32_VALID(Tensor *input, Tensor *output, int kernel_h, int kernel_w, int stride_h, int stride_w, int thread_count) { int in_height = input->head(); int in_width = input->dimension(); diff --git a/src/backends/cpu/compute/Pooling.hpp b/mllm/backends/cpu/compute/Pooling.hpp similarity index 94% rename from src/backends/cpu/compute/Pooling.hpp rename to mllm/backends/cpu/compute/Pooling.hpp index e58b0f94f..e30d8c5b7 100644 --- a/src/backends/cpu/compute/Pooling.hpp +++ b/mllm/backends/cpu/compute/Pooling.hpp @@ -5,7 +5,8 @@ #ifndef POOLING_HPP #define POOLING_HPP -#include "VecDot.hpp" +#include "Tensor.hpp" +#include "Types.hpp" using namespace mllm; void avgpool2d_fp32_VALID(Tensor *input, Tensor *output, int kernel_h, int kernel_w, int stride_h, int stride_w, int thread_count = 4); diff --git a/src/backends/cpu/compute/SIMDMemory.hpp b/mllm/backends/cpu/compute/SIMDMemory.hpp similarity index 97% rename from src/backends/cpu/compute/SIMDMemory.hpp rename to mllm/backends/cpu/compute/SIMDMemory.hpp index 4846521ea..0e632a7b1 100644 --- a/src/backends/cpu/compute/SIMDMemory.hpp +++ b/mllm/backends/cpu/compute/SIMDMemory.hpp @@ -1,5 +1,6 @@ // 平台检测头文件 +#include #if defined(__AVX__) #include #elif defined(__ARM_NEON) diff --git a/mllm/backends/cpu/compute/SageAttention.hpp b/mllm/backends/cpu/compute/SageAttention.hpp new file mode 100644 index 000000000..69990d58a --- /dev/null +++ b/mllm/backends/cpu/compute/SageAttention.hpp @@ -0,0 +1,1610 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// --- SIMD Intrinsics --- +#ifdef __AVX2__ +#include +#include +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) +#include +#if defined(__ARM_FP16_FORMAT_IEEE) && !defined(_MSC_VER) +#include +#endif +#endif + +#include "Types.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" +#include "../compute/SageQuantize.hpp" + +#define SAGE_V_I8 + +#ifdef SAGE_V_I8 + +namespace sage_attn_cpu { +const int QK_K_BLOCK_SIZE = QK8_0F; +#define NEG_INF std::numeric_limits::lowest() + +template +inline float to_float(T val); +template <> +inline float to_float(float val) { + return val; +} +template <> +inline float to_float(mllm_fp16_t val) { + return MLLM_FP16_TO_FP32(val); +} + +#if defined(_WIN32) +#include +inline void aligned_alloc(void **ptr, size_t r, size_t a) { + *ptr = _aligned_malloc(r, a); +} +inline void aligned_free(void *ptr) { + _aligned_free(ptr); +} +#else +inline void aligned_alloc(void **ptr, size_t r, size_t a) { + if (a % sizeof(void *) != 0 || (a & (a - 1)) != 0 || posix_memalign(ptr, a, r) != 0) *ptr = nullptr; +} +inline void aligned_free(void *ptr) { + free(ptr); +} +#endif + +#ifdef __AVX2__ +inline float _mm256_hmax_ps(__m256 x) { + __m128 lo = _mm256_castps256_ps128(x); + __m128 hi = _mm256_extractf128_ps(x, 1); + __m128 max_val = _mm_max_ps(lo, hi); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 2, 2))); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 0, 1))); + return _mm_cvtss_f32(max_val); +} +inline int32_t hsum_i32(__m256i v) { + __m128i vlo = _mm256_castsi256_si128(v); + __m128i vhi = _mm256_extracti128_si256(v, 1); + __m128i vsum = _mm_add_epi32(vlo, vhi); + vsum = _mm_add_epi32(vsum, _mm_shuffle_epi32(vsum, _MM_SHUFFLE(1, 0, 3, 2))); + vsum = _mm_add_epi32(vsum, _mm_shuffle_epi32(vsum, _MM_SHUFFLE(2, 3, 0, 1))); + return _mm_cvtsi128_si32(vsum); +} +inline __m256 load_and_convert_to_fp32_vec(const float *ptr) { + return _mm256_loadu_ps(ptr); +} +#ifdef __F16C__ +inline __m256 load_and_convert_to_fp32_vec(const mllm_fp16_t *ptr) { + return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)ptr)); +} +#endif +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) +inline void load_and_convert_to_fp32x4x2(const float *ptr, float32x4_t &out_lo, float32x4_t &out_hi) { + out_lo = vld1q_f32(ptr); + out_hi = vld1q_f32(ptr + 4); +} +#if defined(__ARM_FP16_FORMAT_IEEE) +inline void load_and_convert_to_fp32x4x2(const mllm_fp16_t *ptr, float32x4_t &out_lo, float32x4_t &out_hi) { + float16x8_t v_f16 = vld1q_f16(reinterpret_cast(ptr)); + out_lo = vcvt_f32_f16(vget_low_f16(v_f16)); + out_hi = vcvt_f32_f16(vget_high_f16(v_f16)); +} +#endif +#endif + +void quantize_row_per_group_simd(const float *float_row, int8_t *int8_row, float *scales, int dim_size, float sm_scale, float *temp_buf) { + const int num_groups = dim_size / QK_K_BLOCK_SIZE; + for (int g = 0; g < num_groups; ++g) { + const int group_start_idx = g * QK_K_BLOCK_SIZE; + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) temp_buf[d] = float_row[group_start_idx + d] * sm_scale; + float max_abs_val = 0.0f; +#if defined(__AVX2__) + __m256 max_vec = _mm256_setzero_ps(); + const __m256 abs_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7FFFFFFF)); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 8; d += 8) max_vec = _mm256_max_ps(max_vec, _mm256_and_ps(_mm256_loadu_ps(temp_buf + d), abs_mask)); + max_abs_val = _mm256_hmax_ps(max_vec); + for (; d < QK_K_BLOCK_SIZE; ++d) max_abs_val = std::max(max_abs_val, fabsf(temp_buf[d])); +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t max_vec = vdupq_n_f32(0.0f); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 4; d += 4) max_vec = vmaxq_f32(max_vec, vabsq_f32(vld1q_f32(temp_buf + d))); + max_abs_val = vmaxvq_f32(max_vec); + for (; d < QK_K_BLOCK_SIZE; ++d) max_abs_val = std::max(max_abs_val, fabsf(temp_buf[d])); +#else + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) max_abs_val = std::max(max_abs_val, fabsf(temp_buf[d])); +#endif + const float scale = (max_abs_val > 1e-9f) ? max_abs_val / 127.0f : 0.0f; + scales[g] = scale; + const float inv_scale = (scale > 1e-9f) ? 1.0f / scale : 0.0f; + int8_t *group_int8_row = int8_row + group_start_idx; +#if defined(__AVX2__) + __m256 inv_scale_vec = _mm256_set1_ps(inv_scale); + d = 0; + for (; d <= QK_K_BLOCK_SIZE - 8; d += 8) { + __m256i val_i32 = _mm256_cvtps_epi32(_mm256_mul_ps(_mm256_loadu_ps(temp_buf + d), inv_scale_vec)); + __m128i val_i16 = _mm_packs_epi32(_mm256_castsi256_si128(val_i32), _mm256_extracti128_si256(val_i32, 1)); + __m128i val_i8 = _mm_packs_epi16(val_i16, val_i16); + *(int64_t *)(group_int8_row + d) = _mm_cvtsi128_si64(val_i8); + } + for (; d < QK_K_BLOCK_SIZE; ++d) group_int8_row[d] = static_cast(roundf(temp_buf[d] * inv_scale)); +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t inv_scale_vec = vdupq_n_f32(inv_scale); + d = 0; + for (; d <= QK_K_BLOCK_SIZE - 16; d += 16) { + int32x4_t i32_0 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 0), inv_scale_vec)); + int32x4_t i32_1 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 4), inv_scale_vec)); + int32x4_t i32_2 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 8), inv_scale_vec)); + int32x4_t i32_3 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 12), inv_scale_vec)); + int16x8_t i16_0 = vcombine_s16(vqmovn_s32(i32_0), vqmovn_s32(i32_1)); + int16x8_t i16_1 = vcombine_s16(vqmovn_s32(i32_2), vqmovn_s32(i32_3)); + vst1q_s8(group_int8_row + d, vcombine_s8(vqmovn_s16(i16_0), vqmovn_s16(i16_1))); + } + for (; d < QK_K_BLOCK_SIZE; ++d) group_int8_row[d] = static_cast(roundf(temp_buf[d] * inv_scale)); +#else + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) group_int8_row[d] = static_cast(roundf(temp_buf[d] * inv_scale)); +#endif + } +} + +template +void compute_mean_and_quantize_tensor( + const T *tensor_bshd, float *mean_tensor_bhd, int8_t *quant_global_bhsd, + float *scale_global_bhsn, int batch_size, int head_size, int seq_size, + int dim_size, int threads, float *temp_sum, float *temp_smoothed, + float *temp_head_buffer) { +#pragma omp parallel for num_threads(threads) collapse(2) + for (int b = 0; b < batch_size; ++b) { + for (int h = 0; h < head_size; ++h) { + const int thread_id = omp_get_thread_num(); + float *thread_sum_buf = temp_sum + thread_id * dim_size; + float *thread_smoothed_buf = temp_smoothed + thread_id * dim_size; + float *thread_head_buf_bhsd = temp_head_buffer + thread_id * seq_size * dim_size; + + float *target_mean = mean_tensor_bhd + (b * head_size + h) * dim_size; + const int num_blocks = dim_size / QK_K_BLOCK_SIZE; + int8_t *target_quant_bhsd = quant_global_bhsd + (b * head_size + h) * seq_size * dim_size; + float *target_scale_bhsn = scale_global_bhsn + (b * head_size + h) * seq_size * num_blocks; + + memset(thread_sum_buf, 0, dim_size * sizeof(float)); + + for (int s = 0; s < seq_size; ++s) { + const T *row_global_bshd = tensor_bshd + (size_t)b * seq_size * head_size * dim_size + (size_t)s * head_size * dim_size + (size_t)h * dim_size; + float *row_buffered_bhsd = thread_head_buf_bhsd + s * dim_size; +#if defined(__AVX2__) + int d = 0; + for (; d <= dim_size - 8; d += 8) { + const __m256 val_vec = load_and_convert_to_fp32_vec(row_global_bshd + d); + _mm256_storeu_ps(row_buffered_bhsd + d, val_vec); + __m256 sum_vec = _mm256_loadu_ps(thread_sum_buf + d); + sum_vec = _mm256_add_ps(sum_vec, val_vec); + _mm256_storeu_ps(thread_sum_buf + d, sum_vec); + } + for (; d < dim_size; ++d) { + float val = to_float(row_global_bshd[d]); + row_buffered_bhsd[d] = val; + thread_sum_buf[d] += val; + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + int d = 0; + for (; d <= dim_size - 8; d += 8) { + float32x4_t val_lo, val_hi; + load_and_convert_to_fp32x4x2(row_global_bshd + d, val_lo, val_hi); + vst1q_f32(row_buffered_bhsd + d, val_lo); + vst1q_f32(row_buffered_bhsd + d + 4, val_hi); + float32x4_t sum_lo = vld1q_f32(thread_sum_buf + d); + float32x4_t sum_hi = vld1q_f32(thread_sum_buf + d + 4); + vst1q_f32(thread_sum_buf + d, vaddq_f32(sum_lo, val_lo)); + vst1q_f32(thread_sum_buf + d + 4, vaddq_f32(sum_hi, val_hi)); + } + for (; d < dim_size; ++d) { + float val = to_float(row_global_bshd[d]); + row_buffered_bhsd[d] = val; + thread_sum_buf[d] += val; + } +#else + for (int d = 0; d < dim_size; ++d) { + float val = to_float(row_global_bshd[d]); + row_buffered_bhsd[d] = val; + thread_sum_buf[d] += val; + } +#endif + } + + const float inv_seq_len = 1.0f / seq_size; +#if defined(__AVX2__) + const __m256 inv_len_vec = _mm256_set1_ps(inv_seq_len); + int d = 0; + for (; d <= dim_size - 8; d += 8) _mm256_storeu_ps(target_mean + d, _mm256_mul_ps(_mm256_loadu_ps(thread_sum_buf + d), inv_len_vec)); + for (; d < dim_size; ++d) target_mean[d] = thread_sum_buf[d] * inv_seq_len; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const float32x4_t inv_len_vec = vdupq_n_f32(inv_seq_len); + int d = 0; + for (; d <= dim_size - 4; d += 4) vst1q_f32(target_mean + d, vmulq_f32(vld1q_f32(thread_sum_buf + d), inv_len_vec)); + for (; d < dim_size; ++d) target_mean[d] = thread_sum_buf[d] * inv_seq_len; +#else + for (int d = 0; d < dim_size; ++d) target_mean[d] = thread_sum_buf[d] * inv_seq_len; +#endif + + for (int s = 0; s < seq_size; ++s) { + const float *row_buffered_bhsd = thread_head_buf_bhsd + s * dim_size; +#if defined(__AVX2__) + int d = 0; + for (; d <= dim_size - 8; d += 8) _mm256_storeu_ps(thread_smoothed_buf + d, _mm256_sub_ps(_mm256_loadu_ps(row_buffered_bhsd + d), _mm256_loadu_ps(target_mean + d))); + for (; d < dim_size; ++d) thread_smoothed_buf[d] = row_buffered_bhsd[d] - target_mean[d]; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + int d = 0; + for (; d <= dim_size - 4; d += 4) vst1q_f32(thread_smoothed_buf + d, vsubq_f32(vld1q_f32(row_buffered_bhsd + d), vld1q_f32(target_mean + d))); + for (; d < dim_size; ++d) thread_smoothed_buf[d] = row_buffered_bhsd[d] - target_mean[d]; +#else + for (int d = 0; d < dim_size; ++d) thread_smoothed_buf[d] = row_buffered_bhsd[d] - target_mean[d]; +#endif + quantize_row_per_group_simd(thread_smoothed_buf, target_quant_bhsd + s * dim_size, target_scale_bhsn + s * num_blocks, dim_size, 1.0f, thread_sum_buf); + } + } + } +} + +class WorkspaceManager { +public: + WorkspaceManager() = default; + ~WorkspaceManager() { + for (auto &p : workspace_) + if (p) aligned_free(p); + } + void **get_workspace(const std::vector &s) { + if (workspace_.empty()) { + workspace_.resize(s.size(), nullptr); + current_sizes_.resize(s.size(), 0); + } + for (size_t i = 0; i < s.size(); ++i) { + if (s[i] > current_sizes_[i]) { + if (workspace_[i]) aligned_free(workspace_[i]); + aligned_alloc(&workspace_[i], s[i], 64); + current_sizes_[i] = s[i]; + } + } + return workspace_.data(); + } + +private: + std::vector workspace_; + std::vector current_sizes_; +}; + +template +struct SAGE_CPU_IMPL { + using TQ = float; + using TKV = KVDtype; + using TO = float; + int32_t Br, Bc, Q_Head, KV_Head, threads; + float *acc_o, *acc_s, *logsum, *scoremax, *scoremax_prev, *score_scale, *score_sum; + int8_t *q_quant, *k_quant_global, *v_quant_global; + float *q_scale, *k_scale_global, *v_scale_global, *k_smoothed_buf, *q_scaled_buf; + + int8_t *p_quant; + float *p_scale; + + void configure(int32_t Br_, int32_t Bc_, int32_t Q_H, int32_t KV_H, int32_t T) { + Br = Br_; + Bc = Bc_; + Q_Head = Q_H; + KV_Head = KV_H; + threads = T; + } + + void init_workspace(void **ws) { + acc_o = static_cast(ws[0]); + acc_s = static_cast(ws[1]); + logsum = static_cast(ws[2]); + scoremax = static_cast(ws[3]); + scoremax_prev = static_cast(ws[4]); + score_scale = static_cast(ws[5]); + score_sum = static_cast(ws[6]); + q_quant = static_cast(ws[7]); + k_quant_global = static_cast(ws[8]); + q_scale = static_cast(ws[9]); + k_scale_global = static_cast(ws[10]); + k_smoothed_buf = static_cast(ws[11]); + q_scaled_buf = static_cast(ws[12]); + v_quant_global = static_cast(ws[15]); + v_scale_global = static_cast(ws[16]); + + p_quant = static_cast(ws[18]); + p_scale = static_cast(ws[19]); + } + + void init_temp(float *l, float *sm, float *o, int Br_f, int D) { + for (int i = 0; i < Br_f; ++i) { + l[i] = 0.0f; + sm[i] = -std::numeric_limits::infinity(); + } + if (o) memset(o, 0, Br_f * D * sizeof(float)); + } + + void quantize_p_rows(int Br_f, int Bc_f, const float *p_float_block, int8_t *p_quant_block, float *p_scale_block) { + for (int r = 0; r < Br_f; ++r) { + const float *p_float_row = p_float_block + r * Bc; + int8_t *p_quant_row = p_quant_block + r * Bc; + + float max_abs_val = 0.0f; + for (int c = 0; c < Bc_f; ++c) { + max_abs_val = std::max(max_abs_val, fabsf(p_float_row[c])); + } + + const float scale = (max_abs_val > 1e-9f) ? max_abs_val / 127.0f : 0.0f; + p_scale_block[r] = scale; + const float inv_scale = (scale > 1e-9f) ? 1.0f / scale : 0.0f; + + for (int c = 0; c < Bc_f; ++c) { + p_quant_row[c] = static_cast(roundf(p_float_row[c] * inv_scale)); + } + } + } + + void sage_attn_prefill(const TQ *Q, TO *O, const float *K_mean, const float *V_mean, int32_t batch_size, int32_t head_size, int32_t seq_size_q, int32_t seq_size_k, int32_t dim_size, bool causal) { + const int32_t Tr = (seq_size_q + Br - 1) / Br, Tc = (seq_size_k + Bc - 1) / Bc; + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + const int32_t num_k_blocks = dim_size / QK_K_BLOCK_SIZE; + for (int32_t b = 0; b < batch_size; ++b) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) + for (int32_t h = 0; h < head_size; ++h) { + const int32_t tid = omp_get_thread_num(); + const int32_t kvh = h / kv_group; + float *po = acc_o + tid * Br * dim_size, *ps = acc_s + tid * Br * Bc; + float *plog = logsum + tid * Br, *pmax = scoremax + tid * Br, *pmax_p = scoremax_prev + tid * Br; + float *pscale = score_scale + tid * Br, *psum = score_sum + tid * Br; + int8_t *p_q_q = q_quant + tid * Br * dim_size; + const int8_t *p_k_q_g = k_quant_global + (b * KV_Head + kvh) * seq_size_k * dim_size; + const int8_t *p_v_q_g = v_quant_global + (b * KV_Head + kvh) * seq_size_k * dim_size; + float *p_q_s = q_scale + tid * Br * num_k_blocks; + const float *p_k_s_g = k_scale_global + (b * KV_Head + kvh) * seq_size_k * num_k_blocks; + const float *p_v_s_g = v_scale_global + (b * KV_Head + kvh) * seq_size_k * num_k_blocks; + float *p_q_scaled = q_scaled_buf + tid * dim_size; + const float *p_V_m = V_mean + (b * KV_Head + kvh) * dim_size; + + int8_t *p_p_q = p_quant + tid * Br * Bc; + float *p_p_s = p_scale + tid * Br; + + for (int32_t tr = 0; tr < Tr; ++tr) { + int32_t Br_f = std::min(Br, seq_size_q - tr * Br); + init_temp(plog, pmax, po, Br_f, dim_size); + const TQ *tile_q_bshd = Q + (size_t)b * seq_size_q * head_size * dim_size + (size_t)tr * Br * head_size * dim_size + (size_t)h * dim_size; + for (int r = 0; r < Br_f; ++r) quantize_row_per_group_simd(tile_q_bshd + (size_t)r * head_size * dim_size, p_q_q + r * dim_size, p_q_s + r * num_k_blocks, dim_size, local_scale, p_q_scaled); + for (int32_t tc = 0; tc < Tc; ++tc) { + int32_t Bc_f = std::min(Bc, seq_size_k - tc * Bc); + const int kv_offset = seq_size_k - seq_size_q; + quantize_and_mma0_sdot(Br_f, Bc_f, p_q_q, p_k_q_g + tc * Bc * dim_size, ps, p_q_s, p_k_s_g + tc * Bc * num_k_blocks, dim_size, tr * Br + kv_offset, tc * Bc, causal); + softmax(Br_f, Bc_f, ps, pmax, pmax_p, pscale, psum, plog); + rescale(Br_f, po, pscale, dim_size); + + quantize_p_rows(Br_f, Bc_f, ps, p_p_q, p_p_s); + + const int8_t *v_q = p_v_q_g + tc * Bc * dim_size; + const float *v_s = p_v_s_g + tc * Bc * num_k_blocks; + + mma1(Br_f, Bc_f, p_p_q, p_p_s, v_q, v_s, po, dim_size); + } + TO *tile_o_bshd = O + (size_t)b * seq_size_q * head_size * dim_size + (size_t)tr * Br * head_size * dim_size + (size_t)h * dim_size; + scale_and_store(Br_f, po, plog, p_V_m, tile_o_bshd, head_size, dim_size); + } + } + } + } + void sage_attn_decode(const TQ *Q, TO *O, const float *K_mean, const float *V_mean, int32_t batch_size, int32_t head_size, int32_t seq_size_k, int32_t dim_size, bool causal) { + const int32_t Tc = (seq_size_k + Bc - 1) / Bc; + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + const int32_t num_k_blocks = dim_size / QK_K_BLOCK_SIZE; +#pragma omp parallel for num_threads(threads) collapse(2) + for (int32_t b = 0; b < batch_size; ++b) { + for (int32_t h = 0; h < head_size; ++h) { + const int32_t Br_f = 1; + const int32_t tid = omp_get_thread_num(); + const int32_t kvh = h / kv_group; + float *po = acc_o + tid * Br_f * dim_size, *ps = acc_s + tid * Br_f * Bc; + float *plog = logsum + tid * Br_f, *pmax = scoremax + tid * Br_f, *pmax_p = scoremax_prev + tid * Br_f; + float *pscale = score_scale + tid * Br_f, *psum = score_sum + tid * Br_f; + int8_t *p_q_q = q_quant + tid * Br_f * dim_size; + const int8_t *p_k_q_g = k_quant_global + (b * KV_Head + kvh) * seq_size_k * dim_size; + const int8_t *p_v_q_g = v_quant_global + (b * KV_Head + kvh) * seq_size_k * dim_size; + float *p_q_s = q_scale + tid * Br_f * num_k_blocks; + const float *p_k_s_g = k_scale_global + (b * KV_Head + kvh) * seq_size_k * num_k_blocks; + const float *p_v_s_g = v_scale_global + (b * KV_Head + kvh) * seq_size_k * num_k_blocks; + float *p_q_scaled = q_scaled_buf + tid * dim_size; + const float *p_V_m = V_mean + (b * KV_Head + kvh) * dim_size; + + // [新增] 获取量化P矩阵的工作区指针 + int8_t *p_p_q = p_quant + tid * Br_f * Bc; + float *p_p_s = p_scale + tid * Br_f; + + const TQ *tile_q_bshd = Q + (size_t)b * 1 * head_size * dim_size + (size_t)0 * head_size * dim_size + (size_t)h * dim_size; + quantize_row_per_group_simd(tile_q_bshd, p_q_q, p_q_s, dim_size, local_scale, p_q_scaled); + init_temp(plog, pmax, po, Br_f, dim_size); + for (int32_t tc = 0; tc < Tc; ++tc) { + int32_t Bc_f = std::min(Bc, seq_size_k - tc * Bc); + quantize_and_mma0_sdot(Br_f, Bc_f, p_q_q, p_k_q_g + tc * Bc * dim_size, ps, p_q_s, p_k_s_g + tc * Bc * num_k_blocks, dim_size, seq_size_k - 1, tc * Bc, causal); + softmax(Br_f, Bc_f, ps, pmax, pmax_p, pscale, psum, plog); + rescale(Br_f, po, pscale, dim_size); + + quantize_p_rows(Br_f, Bc_f, ps, p_p_q, p_p_s); + + const int8_t *v_q = p_v_q_g + tc * Bc * dim_size; + const float *v_s = p_v_s_g + tc * Bc * num_k_blocks; + + mma1(Br_f, Bc_f, p_p_q, p_p_s, v_q, v_s, po, dim_size); + } + TO *tile_o_bshd = O + (size_t)b * 1 * head_size * dim_size + (size_t)0 * head_size * dim_size + (size_t)h * dim_size; + scale_and_store(Br_f, po, plog, p_V_m, tile_o_bshd, head_size, dim_size); + } + } + } + void quantize_and_mma0_sdot(int Br_f, int Bc_f, const int8_t *q_q, const int8_t *k_q, float *s, const float *q_s, const float *k_s, int D, int grs, int gcs, bool causal) { + const int num_k_blocks = D / QK_K_BLOCK_SIZE; + for (int r = 0; r < Br_f; ++r) + for (int c = 0; c < Bc_f; ++c) { + if (causal && (gcs + c) > (grs + r)) { + s[r * Bc + c] = NEG_INF; + continue; + } + const int8_t *q_ql = q_q + r * D, *k_ql = k_q + c * D; + const float *q_sl = q_s + r * num_k_blocks, *k_sl = k_s + c * num_k_blocks; + float total_f32 = 0.0f; + for (int g = 0; g < num_k_blocks; ++g) { + const int g_start = g * QK_K_BLOCK_SIZE; + int32_t g_dot = 0; +#if defined(__AVX2__) + __m256i acc_i32_v = _mm256_setzero_si256(); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 16; d += 16) { + __m128i q_i8_v = _mm_loadu_si128((const __m128i *)(q_ql + g_start + d)); + __m128i k_i8_v = _mm_loadu_si128((const __m128i *)(k_ql + g_start + d)); + __m256i q_i16_v = _mm256_cvtepi8_epi16(q_i8_v); + __m256i k_i16_v = _mm256_cvtepi8_epi16(k_i8_v); + __m256i prod_i32_v = _mm256_madd_epi16(k_i16_v, q_i16_v); + acc_i32_v = _mm256_add_epi32(acc_i32_v, prod_i32_v); + } + g_dot = hsum_i32(acc_i32_v); + for (; d < QK_K_BLOCK_SIZE; ++d) g_dot += (q_ql + g_start)[d] * (k_ql + g_start)[d]; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FEATURE_DOTPROD) + int32x4_t acc_i32_vec = vdupq_n_s32(0); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 16; d += 16) acc_i32_vec = vdotq_s32(acc_i32_vec, vld1q_s8(q_ql + g_start + d), vld1q_s8(k_ql + g_start + d)); + g_dot = vaddvq_s32(acc_i32_vec); + for (; d < QK_K_BLOCK_SIZE; ++d) g_dot += (q_ql + g_start)[d] * (k_ql + g_start)[d]; +#else + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) g_dot += (q_ql + g_start)[d] * (k_ql + g_start)[d]; +#endif + total_f32 += (float)g_dot * q_sl[g] * k_sl[g]; + } + s[r * Bc + c] = total_f32; + } + } + void softmax(int Br_f, int Bc_f, float *acc_s, float *sm, float *sm_p, float *ss, float *sum, float *l) { + memcpy(sm_p, sm, Br_f * sizeof(float)); + for (int r = 0; r < Br_f; ++r) { + float *row = acc_s + r * Bc, cmax = sm[r]; + for (int c = 0; c < Bc_f; ++c) cmax = std::max(cmax, row[c]); + sm[r] = cmax; + } + for (int r = 0; r < Br_f; ++r) ss[r] = expf(sm_p[r] - sm[r]); + for (int r = 0; r < Br_f; ++r) { + float *row = acc_s + r * Bc; + float smax = sm[r], s = 0.f; + for (int c = 0; c < Bc_f; ++c) row[c] = (row[c] > NEG_INF / 2) ? (s += row[c] = expf(row[c] - smax), row[c]) : 0.f; + sum[r] = s; + } + for (int r = 0; r < Br_f; ++r) l[r] = l[r] * ss[r] + sum[r]; + } + void rescale(int Br_f, float *acc_o, const float *ss, int D) { + for (int r = 0; r < Br_f; ++r) { + float s_val = ss[r], *r_ptr = acc_o + r * D; +#if defined(__AVX2__) + __m256 s_vec = _mm256_set1_ps(s_val); + int d = 0; + for (; d <= D - 8; d += 8) _mm256_storeu_ps(r_ptr + d, _mm256_mul_ps(_mm256_loadu_ps(r_ptr + d), s_vec)); + for (; d < D; ++d) r_ptr[d] *= s_val; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t s_vec = vdupq_n_f32(s_val); + int d = 0; + for (; d <= D - 4; d += 4) vst1q_f32(r_ptr + d, vmulq_f32(vld1q_f32(r_ptr + d), s_vec)); + for (; d < D; ++d) r_ptr[d] *= s_val; +#else + for (int d = 0; d < D; ++d) r_ptr[d] *= s_val; +#endif + } + } + + void mma1(int Br_f, int Bc_f, const int8_t *p_quant_block, const float *p_scale_block, const int8_t *v_quant_block, const float *v_scale_block, float *acc_o, int D) { + const int num_v_blocks = D / QK_K_BLOCK_SIZE; + for (int r = 0; r < Br_f; ++r) { + const float p_row_scale = p_scale_block[r]; + if (fabsf(p_row_scale) < 1e-9) continue; + + const int8_t *p_quant_row = p_quant_block + r * Bc; + float *o_row = acc_o + r * D; + + for (int c = 0; c < Bc_f; ++c) { + const int8_t p_quant_scalar = p_quant_row[c]; + if (p_quant_scalar == 0) continue; + + const float p_dequant_val = (float)p_quant_scalar * p_row_scale; + + const int8_t *v_q_row = v_quant_block + c * D; + const float *v_s_row = v_scale_block + c * num_v_blocks; + +#if defined(__AVX2__) && defined(__FMA__) + const __m256 p_vec = _mm256_set1_ps(p_dequant_val); + for (int g = 0; g < num_v_blocks; ++g) { + const int g_start = g * QK_K_BLOCK_SIZE; + const __m256 v_scale_vec = _mm256_set1_ps(v_s_row[g]); + for (int d_group = 0; d_group < QK_K_BLOCK_SIZE; d_group += 8) { + const int d = g_start + d_group; + __m128i v_i8_vec_part = _mm_loadl_epi64((const __m128i *)(v_q_row + d)); + __m256i v_i32_vec = _mm256_cvtepi8_epi32(v_i8_vec_part); + __m256 v_f32_vec = _mm256_cvtepi32_ps(v_i32_vec); + __m256 dequant_v_vec = _mm256_mul_ps(v_f32_vec, v_scale_vec); + __m256 o_vec = _mm256_loadu_ps(o_row + d); + o_vec = _mm256_fmadd_ps(p_vec, dequant_v_vec, o_vec); + _mm256_storeu_ps(o_row + d, o_vec); + } + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const float32x4_t p_vec = vdupq_n_f32(p_dequant_val); + for (int g = 0; g < num_v_blocks; ++g) { + const int g_start = g * QK_K_BLOCK_SIZE; + const float32x4_t v_scale_vec = vdupq_n_f32(v_s_row[g]); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 8; d += 8) { + int8x8_t v_i8 = vld1_s8(v_q_row + g_start + d); + int16x8_t v_i16 = vmovl_s8(v_i8); + int32x4_t v_i32_lo = vmovl_s16(vget_low_s16(v_i16)); + int32x4_t v_i32_hi = vmovl_s16(vget_high_s16(v_i16)); + float32x4_t v_f32_lo = vcvtq_f32_s32(v_i32_lo); + float32x4_t v_f32_hi = vcvtq_f32_s32(v_i32_hi); + v_f32_lo = vmulq_f32(v_f32_lo, v_scale_vec); + v_f32_hi = vmulq_f32(v_f32_hi, v_scale_vec); + float32x4_t o_f32_lo = vld1q_f32(o_row + g_start + d); + float32x4_t o_f32_hi = vld1q_f32(o_row + g_start + d + 4); + // FMA: O += P * V_dequant + o_f32_lo = vfmaq_f32(o_f32_lo, p_vec, v_f32_lo); + o_f32_hi = vfmaq_f32(o_f32_hi, p_vec, v_f32_hi); + vst1q_f32(o_row + g_start + d, o_f32_lo); + vst1q_f32(o_row + g_start + d + 4, o_f32_hi); + } + for (; d < QK_K_BLOCK_SIZE; ++d) { + o_row[g_start + d] += p_dequant_val * ((float)v_q_row[g_start + d] * v_s_row[g]); + } + } +#else + for (int g = 0; g < num_v_blocks; ++g) { + const int g_start = g * QK_K_BLOCK_SIZE; + const float v_s = v_s_row[g]; + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) { + o_row[g_start + d] += p_dequant_val * ((float)v_q_row[g_start + d] * v_s); + } + } +#endif + } + } + } + + void scale_and_store(int Br_f, const float *acc_o, const float *logsum, const float *v_mean, TO *O, int H, int D) { + int o_stride = H * D; + for (int r = 0; r < Br_f; ++r) { + float inv_logsum = (logsum[r] > 1e-9f) ? 1.f / logsum[r] : 0.f; + const float *o_row = acc_o + r * D; + float *O_row = O + (size_t)r * o_stride; +#if defined(__AVX2__) && defined(__FMA__) + const __m256 inv_l_vec = _mm256_set1_ps(inv_logsum); + int d = 0; + for (; d <= D - 8; d += 8) { + const __m256 o_vec = _mm256_loadu_ps(o_row + d); + const __m256 vm_vec = _mm256_loadu_ps(v_mean + d); + _mm256_storeu_ps(O_row + d, _mm256_fmadd_ps(o_vec, inv_l_vec, vm_vec)); + } + for (; d < D; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const float32x4_t inv_l_vec = vdupq_n_f32(inv_logsum); + int d = 0; + for (; d <= D - 4; d += 4) { + const float32x4_t o_vec = vld1q_f32(o_row + d); + const float32x4_t vm_vec = vld1q_f32(v_mean + d); + vst1q_f32(O_row + d, vfmaq_f32(vm_vec, o_vec, inv_l_vec)); + } + for (; d < D; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#else + for (int d = 0; d < D; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#endif + } + } +}; + +template +void sage_attention_forward_cpu_dispatch( + const float *Q, const void *K_in, const void *V_in, const float *K_mean_ext, + const float *V_mean_ext, float *O, int32_t batch_size, int32_t q_head, + int32_t kv_head, int32_t seq_size_q, int32_t seq_size_k, int32_t dim_size, + bool causal_mask, int32_t threads, int32_t br, int32_t bc, + int32_t cache_stride_s) { + if (dim_size % QK_K_BLOCK_SIZE != 0) { + std::cerr << "Error: dim_size must be divisible by QK_K_BLOCK_SIZE\n"; + return; + } + const int32_t num_k_blocks = dim_size / QK_K_BLOCK_SIZE; + + thread_local WorkspaceManager manager; + SAGE_CPU_IMPL op; + op.configure(br, bc, q_head, kv_head, threads); + + const int32_t current_br = (seq_size_q > 1) ? br : 1; + + const std::vector ws_sizes = { + (size_t)threads * current_br * dim_size * sizeof(float), // 0: acc_o + (size_t)threads * current_br * bc * sizeof(float), // 1: acc_s + (size_t)threads * current_br * sizeof(float), // 2: logsum + (size_t)threads * current_br * sizeof(float), // 3: scoremax + (size_t)threads * current_br * sizeof(float), // 4: scoremax_prev + (size_t)threads * current_br * sizeof(float), // 5: score_scale + (size_t)threads * current_br * sizeof(float), // 6: score_sum + (size_t)threads * current_br * dim_size * sizeof(int8_t), // 7: q_quant + (size_t)batch_size * kv_head * seq_size_k * dim_size * sizeof(int8_t), // 8: k_quant_global + (size_t)threads * current_br * num_k_blocks * sizeof(float), // 9: q_scale + (size_t)batch_size * kv_head * seq_size_k * num_k_blocks * sizeof(float), // 10: k_scale_global + (size_t)threads * dim_size * sizeof(float), // 11: k_smoothed_buf + (size_t)threads * dim_size * sizeof(float), // 12: q_scaled_buf + (size_t)threads * dim_size * sizeof(float), // 13: temp_k_sum + (size_t)threads * seq_size_k * dim_size * sizeof(float), // 14: temp_k_head_buffer + (size_t)batch_size * kv_head * seq_size_k * dim_size * sizeof(int8_t), // 15: v_quant_global + (size_t)batch_size * kv_head * seq_size_k * num_k_blocks * sizeof(float), // 16: v_scale_global + (size_t)threads * seq_size_k * dim_size * sizeof(float), // 17: temp_v_head_buffer + (size_t)threads * current_br * bc * sizeof(int8_t), // 18: p_quant [新增] + (size_t)threads * current_br * sizeof(float) // 19: p_scale [新增] + }; + void **workspace = manager.get_workspace(ws_sizes); + op.init_workspace(workspace); + + if constexpr (std::is_same_v) { + const auto *k_blocks_bhsd = reinterpret_cast(K_in); + const auto *v_blocks_bhsd = reinterpret_cast(V_in); + +#pragma omp parallel for collapse(3) num_threads(threads) + for (int b = 0; b < batch_size; ++b) { + for (int h = 0; h < kv_head; ++h) { + for (int s = 0; s < seq_size_k; ++s) { + size_t sparse_offset = ((size_t)b * kv_head + h) * cache_stride_s + s; + const block_q8_0f *k_block_src = k_blocks_bhsd + sparse_offset * num_k_blocks; + const block_q8_0f *v_block_src = v_blocks_bhsd + sparse_offset * num_k_blocks; + + size_t dense_offset = ((size_t)b * kv_head + h) * seq_size_k + s; + int8_t *k_quant_dest = op.k_quant_global + dense_offset * dim_size; + float *k_scale_dest = op.k_scale_global + dense_offset * num_k_blocks; + int8_t *v_quant_dest = op.v_quant_global + dense_offset * dim_size; + float *v_scale_dest = op.v_scale_global + dense_offset * num_k_blocks; + + for (int g = 0; g < num_k_blocks; ++g) { + k_scale_dest[g] = k_block_src[g].scale; + memcpy(k_quant_dest + g * QK8_0F, k_block_src[g].qs, QK8_0F); + v_scale_dest[g] = v_block_src[g].scale; + memcpy(v_quant_dest + g * QK8_0F, v_block_src[g].qs, QK8_0F); + } + } + } + } + if (seq_size_q > 1) { + op.sage_attn_prefill(Q, O, K_mean_ext, V_mean_ext, batch_size, q_head, + seq_size_q, seq_size_k, dim_size, causal_mask); + } else { + op.sage_attn_decode(Q, O, K_mean_ext, V_mean_ext, batch_size, q_head, + seq_size_k, dim_size, causal_mask); + } + } else { + std::vector K_mean_internal((size_t)batch_size * kv_head * dim_size); + std::vector V_mean_internal((size_t)batch_size * kv_head * dim_size); + const auto *K = static_cast(K_in); + const auto *V = static_cast(V_in); + compute_mean_and_quantize_tensor( + K, K_mean_internal.data(), op.k_quant_global, op.k_scale_global, + batch_size, kv_head, seq_size_k, dim_size, threads, + (float *)workspace[13], (float *)workspace[11], (float *)workspace[14]); + compute_mean_and_quantize_tensor( + V, V_mean_internal.data(), op.v_quant_global, op.v_scale_global, + batch_size, kv_head, seq_size_k, dim_size, threads, + (float *)workspace[13], (float *)workspace[11], (float *)workspace[17]); + + if (seq_size_q > 1) { + op.sage_attn_prefill(Q, O, K_mean_internal.data(), V_mean_internal.data(), + batch_size, q_head, seq_size_q, seq_size_k, dim_size, + causal_mask); + } else { + op.sage_attn_decode(Q, O, K_mean_internal.data(), V_mean_internal.data(), + batch_size, q_head, seq_size_k, dim_size, + causal_mask); + } + } +} +} // namespace sage_attn_cpu + +#else +namespace sage_attn_cpu { +const int QK_K_BLOCK_SIZE = 128; +#define NEG_INF std::numeric_limits::lowest() + +template +inline float to_float(T val); +template <> +inline float to_float(float val) { + return val; +} +template <> +inline float to_float(mllm_fp16_t val) { + return MLLM_FP16_TO_FP32(val); +} + +// ======================= SIMD HELPERS ======================= +#ifdef __AVX2__ +inline float _mm256_hmax_ps(__m256 x) { + __m128 lo = _mm256_castps256_ps128(x); + __m128 hi = _mm256_extractf128_ps(x, 1); + __m128 max_val = _mm_max_ps(lo, hi); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 2, 2))); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 0, 1))); + return _mm_cvtss_f32(max_val); +} +inline float hsum_ps(__m256 v) { + __m128 vlow = _mm256_castps256_ps128(v); + __m128 vhigh = _mm256_extractf128_ps(v, 1); + vlow = _mm_add_ps(vlow, vhigh); + __m128 shuf = _mm_movehdup_ps(vlow); + __m128 sums = _mm_add_ps(vlow, shuf); + shuf = _mm_movehl_ps(shuf, sums); + sums = _mm_add_ss(sums, shuf); + return _mm_cvtss_f32(sums); +} +inline int32_t hsum_i32(__m256i v) { + __m128i vlo = _mm256_castsi256_si128(v); + __m128i vhi = _mm256_extracti128_si256(v, 1); + __m128i vsum = _mm_add_epi32(vlo, vhi); + vsum = _mm_add_epi32(vsum, _mm_shuffle_epi32(vsum, _MM_SHUFFLE(1, 0, 3, 2))); + vsum = _mm_add_epi32(vsum, _mm_shuffle_epi32(vsum, _MM_SHUFFLE(2, 3, 0, 1))); + return _mm_cvtsi128_si32(vsum); +} + +inline __m256 load_and_convert_to_fp32_vec(const float *ptr) { + return _mm256_loadu_ps(ptr); +} +#ifdef __F16C__ +inline __m256 load_and_convert_to_fp32_vec(const mllm_fp16_t *ptr) { + return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)ptr)); +} +#endif +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) +inline float _vmaxvq_f32_hmax(float32x4_t x) { + return vmaxvq_f32(x); +} +inline void load_and_convert_to_fp32x4x2(const float *ptr, float32x4_t &out_lo, float32x4_t &out_hi) { + out_lo = vld1q_f32(ptr); + out_hi = vld1q_f32(ptr + 4); +} +#if defined(__ARM_FP16_FORMAT_IEEE) +inline void load_and_convert_to_fp32x4x2(const mllm_fp16_t *ptr, float32x4_t &out_lo, float32x4_t &out_hi) { + float16x8_t v_f16 = vld1q_f16(reinterpret_cast(ptr)); + out_lo = vcvt_f32_f16(vget_low_f16(v_f16)); + out_hi = vcvt_f32_f16(vget_high_f16(v_f16)); +} +#endif +#endif + +inline void aligned_alloc(void **ptr, size_t required_bytes, size_t align) { + if (align % sizeof(void *) != 0 || (align & (align - 1)) != 0) { + *ptr = nullptr; + return; + } + if (posix_memalign(ptr, align, required_bytes) != 0) { *ptr = nullptr; } +} +inline void aligned_free(void *ptr) { + free(ptr); +} + +inline void quantize_row_per_group_simd(const float *float_row, int8_t *int8_row, float *scales, int dim_size, float sm_scale, float *temp_buf) { + const int num_groups = dim_size / QK_K_BLOCK_SIZE; + + for (int g = 0; g < num_groups; ++g) { + const int group_start_idx = g * QK_K_BLOCK_SIZE; + const float *group_float_row = float_row + group_start_idx; + float *group_temp_buf = temp_buf; // reuse the same temp buffer + + // Apply softmax scale if needed (only for Q) + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) { + group_temp_buf[d] = group_float_row[d] * sm_scale; + } + + float max_abs_val = 0.0f; +#if defined(__AVX2__) + __m256 max_vec = _mm256_setzero_ps(); + const __m256 abs_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7FFFFFFF)); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 8; d += 8) { + max_vec = _mm256_max_ps(max_vec, _mm256_and_ps(_mm256_loadu_ps(group_temp_buf + d), abs_mask)); + } + max_abs_val = _mm256_hmax_ps(max_vec); + for (; d < QK_K_BLOCK_SIZE; ++d) max_abs_val = std::max(max_abs_val, fabsf(group_temp_buf[d])); +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t max_vec = vdupq_n_f32(0.0f); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 4; d += 4) { + max_vec = vmaxq_f32(max_vec, vabsq_f32(vld1q_f32(group_temp_buf + d))); + } + max_abs_val = vmaxvq_f32(max_vec); + for (; d < QK_K_BLOCK_SIZE; ++d) max_abs_val = std::max(max_abs_val, fabsf(group_temp_buf[d])); +#else + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) max_abs_val = std::max(max_abs_val, fabsf(group_temp_buf[d])); +#endif + + const float scale = (max_abs_val > 1e-9f) ? max_abs_val / 127.0f : 0.0f; + scales[g] = scale; + const float inv_scale = (scale > 1e-9f) ? 1.0f / scale : 0.0f; + + int8_t *group_int8_row = int8_row + group_start_idx; + +#if defined(__AVX2__) + __m256 inv_scale_vec = _mm256_set1_ps(inv_scale); + for (int d = 0; d <= QK_K_BLOCK_SIZE - 8; d += 8) { + __m256i val_i32 = _mm256_cvtps_epi32(_mm256_mul_ps(_mm256_loadu_ps(group_temp_buf + d), inv_scale_vec)); + __m128i val_i16 = _mm_packs_epi32(_mm256_castsi256_si128(val_i32), _mm256_extracti128_si256(val_i32, 1)); + __m128i val_i8 = _mm_packs_epi16(val_i16, val_i16); + *(int64_t *)(group_int8_row + d) = _mm_cvtsi128_si64(val_i8); + } + for (; d < QK_K_BLOCK_SIZE; ++d) group_int8_row[d] = static_cast(roundf(group_temp_buf[d] * inv_scale)); +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t inv_scale_vec = vdupq_n_f32(inv_scale); + d = 0; + for (; d <= QK_K_BLOCK_SIZE - 16; d += 16) { + int32x4_t i32_0 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(group_temp_buf + d + 0), inv_scale_vec)); + int32x4_t i32_1 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(group_temp_buf + d + 4), inv_scale_vec)); + int32x4_t i32_2 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(group_temp_buf + d + 8), inv_scale_vec)); + int32x4_t i32_3 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(group_temp_buf + d + 12), inv_scale_vec)); + int16x8_t i16_0 = vcombine_s16(vqmovn_s32(i32_0), vqmovn_s32(i32_1)); + int16x8_t i16_1 = vcombine_s16(vqmovn_s32(i32_2), vqmovn_s32(i32_3)); + vst1q_s8(group_int8_row + d, vcombine_s8(vqmovn_s16(i16_0), vqmovn_s16(i16_1))); + } + for (; d < QK_K_BLOCK_SIZE; ++d) group_int8_row[d] = static_cast(roundf(group_temp_buf[d] * inv_scale)); +#else + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) group_int8_row[d] = static_cast(roundf(group_temp_buf[d] * inv_scale)); +#endif + } +} + +template +void compute_channel_means(const KVDtype *tensor, float *mean_tensor, int batch_size, int head_size, int seq_len, int dim_size) { +#pragma omp parallel for collapse(3) + for (int b = 0; b < batch_size; ++b) { + for (int h = 0; h < head_size; ++h) { + for (int d = 0; d < dim_size; d += 8) { // AVX2 processes 8 floats at a time +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP16_FORMAT_IEEE) + float32x4_t sum_vec = vdupq_n_f32(0.0f); + for (int s = 0; s < seq_len; ++s) { + int idx = b * (seq_len * head_size * dim_size) + s * (head_size * dim_size) + h * (dim_size) + d; + float32x4_t val_vec; + if constexpr (std::is_same_v) { + val_vec = vcvt_f32_f16(vld1_f16(reinterpret_cast(tensor + idx))); + } else { + val_vec = vld1q_f32(tensor + idx); + } + sum_vec = vaddq_f32(sum_vec, val_vec); + } + float inv_seq_len = 1.0f / seq_len; + float32x4_t mean_vec = vmulq_f32(sum_vec, vdupq_n_f32(inv_seq_len)); + // Note: Original code used d+=4, but NEON loop processed only 4 floats. + // Assuming it should be d+=4 for the NEON part. Let's stick to the original logic. + vst1q_f32(mean_tensor + b * head_size * dim_size + h * dim_size + d, mean_vec); +#elif defined(__AVX2__) + // =========== AVX2 IMPLEMENTATION START =========== + if (d + 8 > dim_size) { // Handle remainder + for (int i = 0; i < (dim_size - d); ++i) { + double sum = 0.0; + for (int s = 0; s < seq_len; ++s) { + int idx = b * (seq_len * head_size * dim_size) + s * (head_size * dim_size) + h * (dim_size) + d + i; + sum += to_float(tensor[idx]); + } + mean_tensor[b * head_size * dim_size + h * dim_size + d + i] = static_cast(sum / seq_len); + } + continue; // Skip to next d in the outer loop + } + + __m256 sum_vec = _mm256_setzero_ps(); + for (int s = 0; s < seq_len; ++s) { + const KVDtype *current_row = tensor + b * (seq_len * head_size * dim_size) + s * (head_size * dim_size) + h * (dim_size) + d; + __m256 val_vec = load_and_convert_to_fp32_vec(current_row); + sum_vec = _mm256_add_ps(sum_vec, val_vec); + } + const float inv_seq_len = 1.0f / seq_len; + __m256 inv_len_vec = _mm256_set1_ps(inv_seq_len); + __m256 mean_vec = _mm256_mul_ps(sum_vec, inv_len_vec); + _mm256_storeu_ps(mean_tensor + b * head_size * dim_size + h * dim_size + d, mean_vec); + // =========== AVX2 IMPLEMENTATION END =========== +#else + // Fallback for non-AVX2/NEON + double sum[8] = {0.0}; + for (int s = 0; s < seq_len; ++s) { + for (int i = 0; i < 8; ++i) { + if (d + i < dim_size) { + int idx = b * (seq_len * head_size * dim_size) + s * (head_size * dim_size) + h * (dim_size) + d + i; + sum[i] += to_float(tensor[idx]); + } + } + } + for (int i = 0; i < 8; ++i) { + if (d + i < dim_size) { + mean_tensor[b * head_size * dim_size + h * dim_size + d + i] = static_cast(sum[i] / seq_len); + } + } +#endif + } + } + } +} + +template +void compute_mean_and_quantize_k( + const KVDtype *K, + float *mean_tensor, + int8_t *k_quant_global, + float *k_scale_global, + int batch_size, int kv_head_size, int seq_size_k, int dim_size, + int threads, + float *temp_k_sum, // [threads * dim_size] + float *temp_k_smoothed, // [threads * dim_size] + float *temp_k_head_buffer) { // [threads * seq_size_k * dim_size] + +#pragma omp parallel for num_threads(threads) collapse(2) + for (int b = 0; b < batch_size; ++b) { + for (int h = 0; h < kv_head_size; ++h) { + const int thread_id = omp_get_thread_num(); + float *thread_sum_buf = temp_k_sum + thread_id * dim_size; + float *thread_smoothed_buf = temp_k_smoothed + thread_id * dim_size; + float *thread_k_head_buf = temp_k_head_buffer + thread_id * seq_size_k * dim_size; + + float *target_mean = mean_tensor + b * kv_head_size * dim_size + h * dim_size; + const int num_k_blocks = dim_size / QK_K_BLOCK_SIZE; + int8_t *target_k_quant = k_quant_global + (b * kv_head_size + h) * seq_size_k * dim_size; + float *target_k_scale = k_scale_global + (b * kv_head_size + h) * seq_size_k * num_k_blocks; + + const int k_stride = kv_head_size * dim_size; + + memset(thread_sum_buf, 0, dim_size * sizeof(float)); + + for (int s = 0; s < seq_size_k; ++s) { + const KVDtype *k_row_global = K + b * seq_size_k * k_stride + s * k_stride + h * dim_size; + float *k_row_buffered = thread_k_head_buf + s * dim_size; + +#if defined(__AVX2__) + int d = 0; + for (; d <= dim_size - 8; d += 8) { + const __m256 val_vec = load_and_convert_to_fp32_vec(k_row_global + d); + _mm256_storeu_ps(k_row_buffered + d, val_vec); + __m256 sum_vec = _mm256_loadu_ps(thread_sum_buf + d); + sum_vec = _mm256_add_ps(sum_vec, val_vec); + _mm256_storeu_ps(thread_sum_buf + d, sum_vec); + } + for (; d < dim_size; ++d) { + float val = to_float(k_row_global[d]); + k_row_buffered[d] = val; + thread_sum_buf[d] += val; + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + int d = 0; + for (; d <= dim_size - 8; d += 8) { + float32x4_t val_vec_lo, val_vec_hi; + load_and_convert_to_fp32x4x2(k_row_global + d, val_vec_lo, val_vec_hi); + + vst1q_f32(k_row_buffered + d, val_vec_lo); + vst1q_f32(k_row_buffered + d + 4, val_vec_hi); + + float32x4_t sum_vec_lo = vld1q_f32(thread_sum_buf + d); + float32x4_t sum_vec_hi = vld1q_f32(thread_sum_buf + d + 4); + + vst1q_f32(thread_sum_buf + d, vaddq_f32(sum_vec_lo, val_vec_lo)); + vst1q_f32(thread_sum_buf + d + 4, vaddq_f32(sum_vec_hi, val_vec_hi)); + } + for (; d < dim_size; ++d) { + float val = to_float(k_row_global[d]); + k_row_buffered[d] = val; + thread_sum_buf[d] += val; + } +#else + for (int d = 0; d < dim_size; ++d) { + float val = to_float(k_row_global[d]); + k_row_buffered[d] = val; + thread_sum_buf[d] += val; + } +#endif + } + + const float inv_seq_len = 1.0f / seq_size_k; +#if defined(__AVX2__) + const __m256 inv_len_vec = _mm256_set1_ps(inv_seq_len); + int d = 0; + for (; d <= dim_size - 8; d += 8) { + __m256 sum_vec = _mm256_loadu_ps(thread_sum_buf + d); + __m256 mean_vec = _mm256_mul_ps(sum_vec, inv_len_vec); + _mm256_storeu_ps(target_mean + d, mean_vec); + } + for (; d < dim_size; ++d) { + target_mean[d] = thread_sum_buf[d] * inv_seq_len; + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const float32x4_t inv_len_vec = vdupq_n_f32(inv_seq_len); + int d = 0; + for (; d <= dim_size - 4; d += 4) { + float32x4_t sum_vec = vld1q_f32(thread_sum_buf + d); + float32x4_t mean_vec = vmulq_f32(sum_vec, inv_len_vec); + vst1q_f32(target_mean + d, mean_vec); + } + for (; d < dim_size; ++d) { + target_mean[d] = thread_sum_buf[d] * inv_seq_len; + } +#else + for (int d = 0; d < dim_size; ++d) { + target_mean[d] = thread_sum_buf[d] * inv_seq_len; + } +#endif + + for (int s = 0; s < seq_size_k; ++s) { + const float *k_row_buffered = thread_k_head_buf + s * dim_size; + +#if defined(__AVX2__) + int d = 0; + for (; d <= dim_size - 8; d += 8) { + const __m256 k_vec = _mm256_loadu_ps(k_row_buffered + d); + const __m256 mean_vec = _mm256_loadu_ps(target_mean + d); + const __m256 smoothed_vec = _mm256_sub_ps(k_vec, mean_vec); + _mm256_storeu_ps(thread_smoothed_buf + d, smoothed_vec); + } + for (; d < dim_size; ++d) { + thread_smoothed_buf[d] = k_row_buffered[d] - target_mean[d]; + } +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + int d = 0; + for (; d <= dim_size - 4; d += 4) { + const float32x4_t k_vec = vld1q_f32(k_row_buffered + d); + const float32x4_t mean_vec = vld1q_f32(target_mean + d); + const float32x4_t smoothed_vec = vsubq_f32(k_vec, mean_vec); + vst1q_f32(thread_smoothed_buf + d, smoothed_vec); + } + for (; d < dim_size; ++d) { + thread_smoothed_buf[d] = k_row_buffered[d] - target_mean[d]; + } +#else + for (int d = 0; d < dim_size; ++d) { + thread_smoothed_buf[d] = k_row_buffered[d] - target_mean[d]; + } +#endif + quantize_row_per_group_simd(thread_smoothed_buf, + target_k_quant + s * dim_size, + target_k_scale + s * num_k_blocks, + dim_size, 1.0f, thread_sum_buf); + } + } + } +} + +class WorkspaceManager { +public: + WorkspaceManager() = default; + ~WorkspaceManager() { + for (auto &ptr : workspace_) { + if (ptr) aligned_free(ptr); + } + } + void **get_workspace(const std::vector &required_sizes) { + if (workspace_.empty()) { + workspace_.resize(required_sizes.size(), nullptr); + current_sizes_.resize(required_sizes.size(), 0); + } + for (size_t i = 0; i < required_sizes.size(); ++i) { + if (required_sizes[i] > current_sizes_[i]) { + if (workspace_[i]) aligned_free(workspace_[i]); + aligned_alloc(&workspace_[i], required_sizes[i], 64); + current_sizes_[i] = required_sizes[i]; + } + } + return workspace_.data(); + } + +private: + std::vector workspace_; + std::vector current_sizes_; +}; + +template +struct SAGE_CPU_IMPL { + using dtype_q_in_t = float; + using dtype_kv_in_t = KVDtype; + using dtype_out_t = float; + int32_t Br, Bc, Q_Head, KV_Head, threads; + float *acc_o_, *acc_s_, *logsum_, *scoremax_, *scoremax_prev_, *score_scale_, *score_sum_; + int8_t *q_quant_tile_, *k_quant_tile_; + float *q_scale_, *k_scale_, *k_smoothed_row_buf_, *q_scaled_row_buf_; + + void configure(int32_t Br_, int32_t Bc_, int32_t Q_Head_, int32_t KV_Head_, int32_t threads_) { + Br = Br_; + Bc = Bc_; + Q_Head = Q_Head_; + KV_Head = KV_Head_; + threads = threads_; + } + void init_workspace(void **workspace) { + acc_o_ = static_cast(workspace[0]); + acc_s_ = static_cast(workspace[1]); + logsum_ = static_cast(workspace[2]); + scoremax_ = static_cast(workspace[3]); + scoremax_prev_ = static_cast(workspace[4]); + score_scale_ = static_cast(workspace[5]); + score_sum_ = static_cast(workspace[6]); + q_quant_tile_ = static_cast(workspace[7]); + k_quant_tile_ = static_cast(workspace[8]); + q_scale_ = static_cast(workspace[9]); + k_scale_ = static_cast(workspace[10]); + k_smoothed_row_buf_ = static_cast(workspace[11]); + q_scaled_row_buf_ = static_cast(workspace[12]); + } + + void sage_attn_prefill(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, const float *K_mean, const float *V_mean, int32_t batch_size, int32_t head_size, int32_t seq_size_q, int32_t seq_size_k, int32_t dim_size, bool causal_mask) { + const int32_t Tr = (seq_size_q + Br - 1) / Br; + const int32_t Tc = (seq_size_k + Bc - 1) / Bc; + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + const int32_t num_k_blocks = dim_size / QK_K_BLOCK_SIZE; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_kv_head = h_idx / kv_group_size; + + float *p_acc_o = acc_o_ + thread_id * Br * dim_size; + float *p_acc_s = acc_s_ + thread_id * Br * Bc; + float *p_logsum = logsum_ + thread_id * Br; + float *p_scoremax = scoremax_ + thread_id * Br; + float *p_scoremax_prev = scoremax_prev_ + thread_id * Br; + float *p_score_scale = score_scale_ + thread_id * Br; + float *p_score_sum = score_sum_ + thread_id * Br; + int8_t *p_q_quant = q_quant_tile_ + thread_id * Br * dim_size; + const int8_t *p_k_quant_global = k_quant_tile_ + (b_idx * KV_Head + this_thread_kv_head) * seq_size_k * dim_size; + float *p_q_scale = q_scale_ + thread_id * Br * num_k_blocks; + const float *p_k_scale_global = k_scale_ + (b_idx * KV_Head + this_thread_kv_head) * seq_size_k * num_k_blocks; + float *p_q_scaled = q_scaled_row_buf_ + thread_id * dim_size; + + const float *p_V_mean = V_mean + b_idx * KV_Head * dim_size + this_thread_kv_head * dim_size; + const int k_stride = KV_Head * dim_size; + + for (int32_t t_r_idx = 0; t_r_idx < Tr; ++t_r_idx) { + int32_t Br_fixed = std::min(Br, seq_size_q - t_r_idx * Br); + init_temp(p_logsum, p_scoremax, p_acc_o, Br_fixed, dim_size); + + const dtype_q_in_t *tile_q_main = Q + b_idx * seq_size_q * head_size * dim_size + t_r_idx * Br * head_size * dim_size + h_idx * dim_size; + for (int r = 0; r < Br_fixed; ++r) { + quantize_row_per_group_simd(tile_q_main + r * (head_size * dim_size), p_q_quant + r * dim_size, p_q_scale + r * num_k_blocks, dim_size, local_scale, p_q_scaled); + } + + for (int32_t t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + int32_t Bc_fixed = std::min(Bc, seq_size_k - t_c_idx * Bc); + const dtype_kv_in_t *tile_v = V + b_idx * seq_size_k * k_stride + t_c_idx * Bc * k_stride + this_thread_kv_head * dim_size; + + quantize_and_mma0_sdot(Br_fixed, Bc_fixed, p_q_quant, p_k_quant_global + t_c_idx * Bc * dim_size, p_acc_s, p_q_scale, p_k_scale_global + t_c_idx * Bc * num_k_blocks, dim_size, t_r_idx * Br, t_c_idx * Bc, causal_mask); + softmax(Br_fixed, Bc_fixed, p_acc_s, p_scoremax, p_scoremax_prev, p_score_scale, p_score_sum, p_logsum); + rescale(Br_fixed, p_acc_o, p_score_scale, dim_size); + mma1(Br_fixed, Bc_fixed, p_acc_s, tile_v, p_V_mean, p_acc_o, KV_Head, dim_size); + } + + dtype_out_t *tile_o = O + b_idx * seq_size_q * head_size * dim_size + t_r_idx * Br * head_size * dim_size + h_idx * dim_size; + scale_and_store(Br_fixed, p_acc_o, p_logsum, p_V_mean, tile_o, head_size, dim_size); + } + } + } + } + + void sage_attn_decode(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, const float *K_mean, const float *V_mean, int32_t batch_size, int32_t head_size, int32_t seq_size_k, int32_t dim_size, bool causal_mask) { + const int32_t Tc = (seq_size_k + Bc - 1) / Bc; + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + const int32_t num_k_blocks = dim_size / QK_K_BLOCK_SIZE; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t Br_fixed = 1; + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_kv_head = h_idx / kv_group_size; + + float *p_acc_o = acc_o_ + thread_id * Br_fixed * dim_size; + float *p_acc_s = acc_s_ + thread_id * Br_fixed * Bc; + float *p_logsum = logsum_ + thread_id * Br_fixed; + float *p_scoremax = scoremax_ + thread_id * Br_fixed; + float *p_scoremax_prev = scoremax_prev_ + thread_id * Br_fixed; + float *p_score_scale = score_scale_ + thread_id * Br_fixed; + float *p_score_sum = score_sum_ + thread_id * Br_fixed; + int8_t *p_q_quant = q_quant_tile_ + thread_id * Br_fixed * dim_size; + const int8_t *p_k_quant_global = k_quant_tile_ + (b_idx * KV_Head + this_thread_kv_head) * seq_size_k * dim_size; + float *p_q_scale = q_scale_ + thread_id * Br_fixed * num_k_blocks; + const float *p_k_scale_global = k_scale_ + (b_idx * KV_Head + this_thread_kv_head) * seq_size_k * num_k_blocks; + float *p_q_scaled = q_scaled_row_buf_ + thread_id * dim_size; + + const float *p_V_mean = V_mean + b_idx * KV_Head * dim_size + this_thread_kv_head * dim_size; + const int k_stride = KV_Head * dim_size; + + const dtype_q_in_t *tile_q_decode = Q + b_idx * head_size * dim_size + h_idx * dim_size; + quantize_row_per_group_simd(tile_q_decode, p_q_quant, p_q_scale, dim_size, local_scale, p_q_scaled); + + init_temp(p_logsum, p_scoremax, p_acc_o, Br_fixed, dim_size); + + for (int32_t t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + int32_t Bc_fixed = std::min(Bc, seq_size_k - t_c_idx * Bc); + const dtype_kv_in_t *tile_v = V + b_idx * seq_size_k * k_stride + t_c_idx * Bc * k_stride + this_thread_kv_head * dim_size; + + quantize_and_mma0_sdot(Br_fixed, Bc_fixed, p_q_quant, p_k_quant_global + t_c_idx * Bc * dim_size, p_acc_s, p_q_scale, p_k_scale_global + t_c_idx * Bc * num_k_blocks, dim_size, seq_size_k - 1, t_c_idx * Bc, causal_mask); + softmax(Br_fixed, Bc_fixed, p_acc_s, p_scoremax, p_scoremax_prev, p_score_scale, p_score_sum, p_logsum); + rescale(Br_fixed, p_acc_o, p_score_scale, dim_size); + mma1(Br_fixed, Bc_fixed, p_acc_s, tile_v, p_V_mean, p_acc_o, KV_Head, dim_size); + } + dtype_out_t *tile_o = O + b_idx * head_size * dim_size + h_idx * dim_size; + scale_and_store(Br_fixed, p_acc_o, p_logsum, p_V_mean, tile_o, head_size, dim_size); + } + } + } + + void init_temp(float *logsum, float *scoremax, float *acc_o, int Br_fixed, int dim_size) { + for (int i = 0; i < Br_fixed; ++i) { + logsum[i] = 0.0f; + scoremax[i] = NEG_INF; + } + memset(acc_o, 0, Br_fixed * dim_size * sizeof(float)); + } + + void quantize_and_mma0_sdot(int Br_fixed, int Bc_fixed, const int8_t *q_quant_tile, const int8_t *k_quant_tile, float *acc_s, const float *q_scale, const float *k_scale, int dim_size, int global_r_start, int global_c_start, bool causal) { + const int num_k_blocks = dim_size / QK_K_BLOCK_SIZE; + + for (int r = 0; r < Br_fixed; ++r) { + for (int c = 0; c < Bc_fixed; ++c) { + if (causal && (global_c_start + c) > (global_r_start + r)) { + acc_s[r * Bc + c] = NEG_INF; + continue; + } + + const int8_t *q_quant_line = q_quant_tile + r * dim_size; + const int8_t *k_quant_line = k_quant_tile + c * dim_size; + const float *q_scale_line = q_scale + r * num_k_blocks; + const float *k_scale_line = k_scale + c * num_k_blocks; + + float total_f32 = 0.0f; + + // Loop over groups/blocks + for (int g = 0; g < num_k_blocks; ++g) { + const int group_start_idx = g * QK_K_BLOCK_SIZE; + const int8_t *q_group_ptr = q_quant_line + group_start_idx; + const int8_t *k_group_ptr = k_quant_line + group_start_idx; + + int32_t group_dot_product_i32 = 0; +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FEATURE_DOTPROD) + int32x4_t acc_i32_vec = vdupq_n_s32(0); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 16; d += 16) { + acc_i32_vec = vdotq_s32(acc_i32_vec, vld1q_s8(q_group_ptr + d), vld1q_s8(k_group_ptr + d)); + } + group_dot_product_i32 = vaddvq_s32(acc_i32_vec); + for (; d < QK_K_BLOCK_SIZE; ++d) { + group_dot_product_i32 += q_group_ptr[d] * k_group_ptr[d]; + } +#elif defined(__AVX2__) + __m256i acc_i32_v = _mm256_setzero_si256(); + int d = 0; + // Process 16 bytes (16 pairs of signed int8) at a time + for (; d <= QK_K_BLOCK_SIZE - 16; d += 16) { + // Load 16 int8 values for q and k + __m128i q_i8_v = _mm_loadu_si128((const __m128i *)(q_group_ptr + d)); + __m128i k_i8_v = _mm_loadu_si128((const __m128i *)(k_group_ptr + d)); + __m256i q_i16_v = _mm256_cvtepi8_epi16(q_i8_v); + __m256i k_i16_v = _mm256_cvtepi8_epi16(k_i8_v); + __m256i prod_i32_v = _mm256_madd_epi16(q_i16_v, k_i16_v); + acc_i32_v = _mm256_add_epi32(acc_i32_v, prod_i32_v); + } + + group_dot_product_i32 = hsum_i32(acc_i32_v); + + // Process any remaining elements + for (; d < QK_K_BLOCK_SIZE; ++d) { + group_dot_product_i32 += q_group_ptr[d] * k_group_ptr[d]; + } +#else + // Fallback for other platforms + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) { + group_dot_product_i32 += q_group_ptr[d] * k_group_ptr[d]; + } +#endif + // Accumulate the scaled result of this group + total_f32 += (float)group_dot_product_i32 * q_scale_line[g] * k_scale_line[g]; + } + + acc_s[r * Bc + c] = total_f32; + } + } + } + + void softmax(int Br_fixed, int Bc_fixed, float *acc_s, float *scoremax, float *scoremax_prev, float *score_scale, float *score_sum, float *logsum) { + memcpy(scoremax_prev, scoremax, Br_fixed * sizeof(float)); + for (int r = 0; r < Br_fixed; ++r) { + float *row = acc_s + r * Bc; + float current_max = scoremax[r]; + for (int c = 0; c < Bc_fixed; ++c) current_max = std::max(current_max, row[c]); + scoremax[r] = current_max; + } + for (int r = 0; r < Br_fixed; ++r) score_scale[r] = expf(scoremax_prev[r] - scoremax[r]); + for (int r = 0; r < Br_fixed; ++r) { + float *row = acc_s + r * Bc; + float sm = scoremax[r]; + float sum = 0.f; + for (int c = 0; c < Bc_fixed; ++c) { + if (row[c] > NEG_INF / 2) { + float val = expf(row[c] - sm); + row[c] = val; + sum += val; + } else { + row[c] = 0.f; + } + } + score_sum[r] = sum; + } + for (int r = 0; r < Br_fixed; ++r) logsum[r] = logsum[r] * score_scale[r] + score_sum[r]; + } + void rescale(int Br_fixed, float *acc_o, const float *score_scale, int dim_size) { + for (int r = 0; r < Br_fixed; ++r) { + float scale_val = score_scale[r]; + float *row_ptr = acc_o + r * dim_size; +#if defined(__AVX2__) + __m256 scale_vec = _mm256_set1_ps(scale_val); + int d = 0; + for (; d <= dim_size - 8; d += 8) _mm256_storeu_ps(row_ptr + d, _mm256_mul_ps(_mm256_loadu_ps(row_ptr + d), scale_vec)); + for (; d < dim_size; ++d) row_ptr[d] *= scale_val; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t scale_vec = vdupq_n_f32(scale_val); + int d = 0; + for (; d <= dim_size - 4; d += 4) vst1q_f32(row_ptr + d, vmulq_f32(vld1q_f32(row_ptr + d), scale_vec)); + for (; d < dim_size; ++d) row_ptr[d] *= scale_val; +#else + for (int d = 0; d < dim_size; ++d) row_ptr[d] *= scale_val; +#endif + } + } + + void mma1(int Br_fixed, int Bc_fixed, const float *p_block, const dtype_kv_in_t *v_block, const float *v_mean, float *acc_o, int kv_head_size, int dim_size) { + int v_stride = kv_head_size * dim_size; + +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP16_FORMAT_IEEE) + for (int r = 0; r < Br_fixed; ++r) { + float *o_row = acc_o + r * dim_size; + const float *p_row = p_block + r * Bc; + + for (int d = 0; d < dim_size; d += 4) { + float32x4_t o_acc_vec = vld1q_f32(o_row + d); + const float32x4_t vm_vec = vld1q_f32(v_mean + d); + + int c = 0; + for (; c <= Bc_fixed - 4; c += 4) { + __builtin_prefetch(v_block + (c + 8) * v_stride + d, 0, 0); + + const float p0 = p_row[c + 0]; + const float p1 = p_row[c + 1]; + const float p2 = p_row[c + 2]; + const float p3 = p_row[c + 3]; + + const dtype_kv_in_t *v_row0 = v_block + (c + 0) * v_stride; + const dtype_kv_in_t *v_row1 = v_block + (c + 1) * v_stride; + const dtype_kv_in_t *v_row2 = v_block + (c + 2) * v_stride; + const dtype_kv_in_t *v_row3 = v_block + (c + 3) * v_stride; + + float32x4_t v_vec0, v_vec1, v_vec2, v_vec3; + if constexpr (std::is_same_v) { + v_vec0 = vcvt_f32_f16(vld1_f16(reinterpret_cast(v_row0 + d))); + v_vec1 = vcvt_f32_f16(vld1_f16(reinterpret_cast(v_row1 + d))); + v_vec2 = vcvt_f32_f16(vld1_f16(reinterpret_cast(v_row2 + d))); + v_vec3 = vcvt_f32_f16(vld1_f16(reinterpret_cast(v_row3 + d))); + } else { // float + v_vec0 = vld1q_f32(v_row0 + d); + v_vec1 = vld1q_f32(v_row1 + d); + v_vec2 = vld1q_f32(v_row2 + d); + v_vec3 = vld1q_f32(v_row3 + d); + } + o_acc_vec = vfmaq_n_f32(o_acc_vec, vsubq_f32(v_vec0, vm_vec), p0); + o_acc_vec = vfmaq_n_f32(o_acc_vec, vsubq_f32(v_vec1, vm_vec), p1); + o_acc_vec = vfmaq_n_f32(o_acc_vec, vsubq_f32(v_vec2, vm_vec), p2); + o_acc_vec = vfmaq_n_f32(o_acc_vec, vsubq_f32(v_vec3, vm_vec), p3); + } + + for (; c < Bc_fixed; ++c) { + const float p_scalar = p_row[c]; + const float32x4_t p_vec = vdupq_n_f32(p_scalar); + const dtype_kv_in_t *v_row = v_block + c * v_stride; + float32x4_t v_vec; + if constexpr (std::is_same_v) { + v_vec = vcvt_f32_f16(vld1_f16(reinterpret_cast(v_row + d))); + } else { + v_vec = vld1q_f32(v_row + d); + } + float32x4_t v_smoothed = vsubq_f32(v_vec, vm_vec); + o_acc_vec = vfmaq_f32(o_acc_vec, p_vec, v_smoothed); + } + + vst1q_f32(o_row + d, o_acc_vec); + } + } +#elif defined(__AVX2__) && defined(__FMA__) + for (int r = 0; r < Br_fixed; ++r) { + float *o_row = acc_o + r * dim_size; + for (int c = 0; c < Bc_fixed; ++c) { + const float p_scalar = p_block[r * Bc + c]; + if (fabsf(p_scalar) < 1e-9) continue; + + const __m256 p_vec = _mm256_set1_ps(p_scalar); + const dtype_kv_in_t *v_row = v_block + c * v_stride; + + int d = 0; + for (; d <= dim_size - 8; d += 8) { + const __m256 vm_vec = _mm256_loadu_ps(v_mean + d); + const __m256 v_vec = load_and_convert_to_fp32_vec(v_row + d); + + __m256 o_vec = _mm256_loadu_ps(o_row + d); + __m256 v_smoothed = _mm256_sub_ps(v_vec, vm_vec); + + // Fused Multiply-Add: o_vec = (p_vec * v_smoothed) + o_vec + o_vec = _mm256_fmadd_ps(p_vec, v_smoothed, o_vec); + + _mm256_storeu_ps(o_row + d, o_vec); + } + // Remainder loop + for (; d < dim_size; ++d) { + o_row[d] += p_scalar * (to_float(v_row[d]) - v_mean[d]); + } + } + } +#else + // Fallback for other platforms + for (int r = 0; r < Br_fixed; ++r) { + float *o_row = acc_o + r * dim_size; + for (int c = 0; c < Bc_fixed; ++c) { + const float p = p_block[r * Bc + c]; + if (fabsf(p) < 1e-9) continue; + const dtype_kv_in_t *v_row = v_block + c * v_stride; + for (int d = 0; d < dim_size; ++d) { o_row[d] += p * (to_float(v_row[d]) - v_mean[d]); } + } + } +#endif + } + + void scale_and_store(int Br_fixed, const float *acc_o, const float *logsum, const float *v_mean, float *O, int head_size, int dim_size) { + int o_stride = head_size * dim_size; + for (int r = 0; r < Br_fixed; ++r) { + float inv_logsum = (logsum[r] > 1e-9f) ? 1.f / logsum[r] : 0.f; + const float *o_row = acc_o + r * dim_size; + float *O_row = O + r * o_stride; +#if defined(__AVX2__) && defined(__FMA__) + const __m256 inv_logsum_vec = _mm256_set1_ps(inv_logsum); + int d = 0; + for (; d <= dim_size - 8; d += 8) { + const __m256 o_vec = _mm256_loadu_ps(o_row + d); + const __m256 vm_vec = _mm256_loadu_ps(v_mean + d); + _mm256_storeu_ps(O_row + d, _mm256_fmadd_ps(o_vec, inv_logsum_vec, vm_vec)); + } + for (; d < dim_size; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t inv_logsum_vec = vdupq_n_f32(inv_logsum); + int d = 0; + for (; d <= dim_size - 4; d += 4) { + float32x4_t o_vec = vld1q_f32(o_row + d); + float32x4_t vm_vec = vld1q_f32(v_mean + d); + vst1q_f32(O_row + d, vfmaq_f32(vm_vec, o_vec, inv_logsum_vec)); + } + for (; d < dim_size; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#else + for (int d = 0; d < dim_size; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#endif + } + } +}; + +template +void sage_attention_forward_cpu_dispatch(const float *Q, const KVDtype *K, const KVDtype *V, float *O, int32_t batch_size, int32_t q_head, int32_t kv_head, int32_t seq_size_q, int32_t seq_size_k, int32_t dim_size, bool causal_mask, int32_t threads, int32_t br, int32_t bc) { + // 确保 dim_size 可以被块大小整除 + if (dim_size % QK_K_BLOCK_SIZE != 0) { + std::cerr << "Error: dim_size must be divisible by QK_K_BLOCK_SIZE (" << QK_K_BLOCK_SIZE << ")" << std::endl; + return; + } + const int32_t num_k_blocks = dim_size / QK_K_BLOCK_SIZE; + + std::vector V_mean(batch_size * kv_head * dim_size); + compute_channel_means(V, V_mean.data(), batch_size, kv_head, seq_size_k, dim_size); + + thread_local WorkspaceManager manager; + + std::vector K_mean(batch_size * kv_head * dim_size); + + if (seq_size_q > 1) { // Prefill 阶段 + const std::vector required_sizes = { + (size_t)threads * br * dim_size * sizeof(float), // 0: acc_o_ + (size_t)threads * br * bc * sizeof(float), // 1: acc_s_ + (size_t)threads * br * sizeof(float), // 2: logsum_ + (size_t)threads * br * sizeof(float), // 3: scoremax_ + (size_t)threads * br * sizeof(float), // 4: scoremax_prev_ + (size_t)threads * br * sizeof(float), // 5: score_scale_ + (size_t)threads * br * sizeof(float), // 6: score_sum_ + (size_t)threads * br * dim_size * sizeof(int8_t), // 7: q_quant_tile_ + (size_t)batch_size * kv_head * seq_size_k * dim_size * sizeof(int8_t), // 8: k_quant_tile_ (global) + (size_t)threads * br * num_k_blocks * sizeof(float), // 9: q_scale_ + (size_t)batch_size * kv_head * seq_size_k * num_k_blocks * sizeof(float), // 10: k_scale_ (global) + (size_t)threads * dim_size * sizeof(float), // 11: k_smoothed_row_buf_ + (size_t)threads * dim_size * sizeof(float), // 12: q_scaled_row_buf_ + (size_t)threads * dim_size * sizeof(float), // 13: temp_k_sum + (size_t)threads * seq_size_k * dim_size * sizeof(float) // 14: temp_k_head_buffer + }; + void **workspace = manager.get_workspace(required_sizes); + + int8_t *k_quant_global_buffer = static_cast(workspace[8]); + float *k_scale_global_buffer = static_cast(workspace[10]); + + compute_mean_and_quantize_k(K, K_mean.data(), k_quant_global_buffer, k_scale_global_buffer, + batch_size, kv_head, seq_size_k, dim_size, threads, + static_cast(workspace[13]), + static_cast(workspace[11]), + static_cast(workspace[14])); + + SAGE_CPU_IMPL op; + op.configure(br, bc, q_head, kv_head, threads); + op.init_workspace(workspace); + op.sage_attn_prefill(Q, K, V, O, K_mean.data(), V_mean.data(), batch_size, q_head, seq_size_q, seq_size_k, dim_size, causal_mask); + + } else { + const int32_t decode_br = 1; + const std::vector required_sizes = { + (size_t)threads * decode_br * dim_size * sizeof(float), + (size_t)threads * decode_br * bc * sizeof(float), + (size_t)threads * decode_br * sizeof(float), + (size_t)threads * decode_br * sizeof(float), + (size_t)threads * decode_br * sizeof(float), + (size_t)threads * decode_br * sizeof(float), + (size_t)threads * decode_br * sizeof(float), + (size_t)threads * decode_br * dim_size * sizeof(int8_t), + (size_t)batch_size * kv_head * seq_size_k * dim_size * sizeof(int8_t), + (size_t)threads * decode_br * num_k_blocks * sizeof(float), + (size_t)batch_size * kv_head * seq_size_k * num_k_blocks * sizeof(float), + (size_t)threads * dim_size * sizeof(float), + (size_t)threads * dim_size * sizeof(float), + (size_t)threads * dim_size * sizeof(float), + (size_t)threads * seq_size_k * dim_size * sizeof(float)}; + void **workspace = manager.get_workspace(required_sizes); + + int8_t *k_quant_global_buffer = static_cast(workspace[8]); + float *k_scale_global_buffer = static_cast(workspace[10]); + + compute_mean_and_quantize_k(K, K_mean.data(), k_quant_global_buffer, k_scale_global_buffer, + batch_size, kv_head, seq_size_k, dim_size, threads, + static_cast(workspace[13]), + static_cast(workspace[11]), + static_cast(workspace[14])); + + SAGE_CPU_IMPL op; + op.configure(br, bc, q_head, kv_head, threads); + op.init_workspace(workspace); + op.sage_attn_decode(Q, K, V, O, K_mean.data(), V_mean.data(), batch_size, q_head, seq_size_k, dim_size, causal_mask); + } +} +} // namespace sage_attn_cpu +#endif // SAGE_ATTENTION_CPU_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/compute/SageAttentionKVQ8.hpp b/mllm/backends/cpu/compute/SageAttentionKVQ8.hpp new file mode 100644 index 000000000..71abbcc47 --- /dev/null +++ b/mllm/backends/cpu/compute/SageAttentionKVQ8.hpp @@ -0,0 +1,700 @@ +#ifndef SAGE_ATTENTION_KVQ8_HPP +#define SAGE_ATTENTION_KVQ8_HPP + +#include "Types.hpp" +#include +#include +#include +#include +#include +#include + +#ifdef __AVX2__ +#include +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) +#include +#endif + +namespace seq_attn_kvq8 { + +const int QK_K_BLOCK_SIZE = QK8_0F; +#define NEG_INF std::numeric_limits::lowest() + +namespace { // 匿名空间,限制作用域 +#ifdef __AVX2__ +inline float _mm256_hmax_ps(__m256 x) { + __m128 lo = _mm256_castps256_ps128(x); + __m128 hi = _mm256_extractf128_ps(x, 1); + __m128 max_val = _mm_max_ps(lo, hi); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 2, 2))); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 0, 1))); + return _mm_cvtss_f32(max_val); +} + +inline int32_t hsum_i32(__m256i v) { + __m128i vlo = _mm256_castsi256_si128(v); + __m128i vhi = _mm256_extracti128_si256(v, 1); + __m128i vsum = _mm_add_epi32(vlo, vhi); + vsum = _mm_add_epi32(vsum, _mm_shuffle_epi32(vsum, _MM_SHUFFLE(1, 0, 3, 2))); + vsum = _mm_add_epi32(vsum, _mm_shuffle_epi32(vsum, _MM_SHUFFLE(2, 3, 0, 1))); + return _mm_cvtsi128_si32(vsum); +} +#endif // __AVX2__ +} // namespace + +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) +inline float32x4_t exp_ps_f32(float32x4_t x) { + float32x4_t a = vdupq_n_f32(12102203.0f); // (1 << 23) / ln(2) + float32x4_t b = vdupq_n_f32(1065353216.0f); // (1 << 23) * (0.5 - 0.04165) + (127 << 23) + int32x4_t m = vdupq_n_s32(0x7f); + float32x4_t y = vmlaq_f32(b, a, x); + int32x4_t r = vreinterpretq_s32_f32(y); + r = vandq_s32(r, vdupq_n_s32(0xffffff)); + r = vorrq_s32(r, vdupq_n_s32(0x3f800000)); + return vreinterpretq_f32_s32(r); +} +#endif + +class WorkspaceManager { +public: + WorkspaceManager() = default; + ~WorkspaceManager() { + for (auto &p : workspace_) + if (p) free(p); + } + void **get_workspace(const std::vector &s) { + if (workspace_.empty()) { + workspace_.resize(s.size(), nullptr); + current_sizes_.resize(s.size(), 0); + } + for (size_t i = 0; i < s.size(); ++i) { + if (s[i] > current_sizes_[i]) { + if (workspace_[i]) free(workspace_[i]); + posix_memalign(&workspace_[i], 64, s[i]); + current_sizes_[i] = s[i]; + } + } + return workspace_.data(); + } + +private: + std::vector workspace_; + std::vector current_sizes_; +}; + +struct SAGE_CPU_IMPL_KVQ8 { + using TQ = float; + using TO = float; + int32_t Br, Bc, Q_Head, KV_Head, threads; + float *acc_o, *acc_s, *logsum, *scoremax, *scoremax_prev, *score_scale, *score_sum; + int8_t *q_quant; + float *q_scale, *q_scaled_buf; + int8_t *p_quant; + float *p_scale; + + void configure(int32_t Br_, int32_t Bc_, int32_t Q_H, int32_t KV_H, int32_t T) { + Br = Br_; + Bc = Bc_; + Q_Head = Q_H; + KV_Head = KV_H; + threads = T; + } + + void init_workspace(void **ws) { + acc_o = static_cast(ws[0]); + acc_s = static_cast(ws[1]); + logsum = static_cast(ws[2]); + scoremax = static_cast(ws[3]); + scoremax_prev = static_cast(ws[4]); + score_scale = static_cast(ws[5]); + score_sum = static_cast(ws[6]); + q_quant = static_cast(ws[7]); + q_scale = static_cast(ws[8]); + q_scaled_buf = static_cast(ws[9]); + p_quant = static_cast(ws[10]); + p_scale = static_cast(ws[11]); + } + + void init_temp(float *l, float *sm, float *o, int Br_f, int D) { + for (int i = 0; i < Br_f; ++i) { + l[i] = 0.0f; + sm[i] = -std::numeric_limits::infinity(); + } + if (o) memset(o, 0, Br_f * D * sizeof(float)); + } + + void quantize_q_row(const float *float_row, int8_t *int8_row, float *scales, int dim_size, float sm_scale, float *temp_buf) { + const int num_groups = dim_size / QK_K_BLOCK_SIZE; + for (int g = 0; g < num_groups; ++g) { + const int group_start_idx = g * QK_K_BLOCK_SIZE; + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) temp_buf[d] = float_row[group_start_idx + d] * sm_scale; + float max_abs_val = 0.0f; +#if defined(__AVX2__) + __m256 max_vec = _mm256_setzero_ps(); + const __m256 abs_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7FFFFFFF)); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 8; d += 8) max_vec = _mm256_max_ps(max_vec, _mm256_and_ps(_mm256_loadu_ps(temp_buf + d), abs_mask)); + max_abs_val = _mm256_hmax_ps(max_vec); + for (; d < QK_K_BLOCK_SIZE; ++d) max_abs_val = std::max(max_abs_val, fabsf(temp_buf[d])); +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t max_vec = vdupq_n_f32(0.0f); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 4; d += 4) max_vec = vmaxq_f32(max_vec, vabsq_f32(vld1q_f32(temp_buf + d))); + max_abs_val = vmaxvq_f32(max_vec); + for (; d < QK_K_BLOCK_SIZE; ++d) max_abs_val = std::max(max_abs_val, fabsf(temp_buf[d])); +#else + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) max_abs_val = std::max(max_abs_val, fabsf(temp_buf[d])); +#endif + const float scale = (max_abs_val > 1e-9f) ? max_abs_val / 127.0f : 0.0f; + scales[g] = scale; + const float inv_scale = (scale > 1e-9f) ? 1.0f / scale : 0.0f; + int8_t *group_int8_row = int8_row + group_start_idx; +#if defined(__AVX2__) + __m256 inv_scale_vec = _mm256_set1_ps(inv_scale); + d = 0; + for (; d <= QK_K_BLOCK_SIZE - 8; d += 8) { + __m256i val_i32 = _mm256_cvtps_epi32(_mm256_mul_ps(_mm256_loadu_ps(temp_buf + d), inv_scale_vec)); + __m128i val_i16 = _mm_packs_epi32(_mm256_castsi256_si128(val_i32), _mm256_extracti128_si256(val_i32, 1)); + __m128i val_i8 = _mm_packs_epi16(val_i16, val_i16); + *(int64_t *)(group_int8_row + d) = _mm_cvtsi128_si64(val_i8); + } + for (; d < QK_K_BLOCK_SIZE; ++d) group_int8_row[d] = static_cast(roundf(temp_buf[d] * inv_scale)); +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t inv_scale_vec = vdupq_n_f32(inv_scale); + d = 0; + for (; d <= QK_K_BLOCK_SIZE - 16; d += 16) { + int32x4_t i32_0 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 0), inv_scale_vec)); + int32x4_t i32_1 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 4), inv_scale_vec)); + int32x4_t i32_2 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 8), inv_scale_vec)); + int32x4_t i32_3 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 12), inv_scale_vec)); + int16x8_t i16_0 = vcombine_s16(vqmovn_s32(i32_0), vqmovn_s32(i32_1)); + int16x8_t i16_1 = vcombine_s16(vqmovn_s32(i32_2), vqmovn_s32(i32_3)); + vst1q_s8(group_int8_row + d, vcombine_s8(vqmovn_s16(i16_0), vqmovn_s16(i16_1))); + } + for (; d < QK_K_BLOCK_SIZE; ++d) group_int8_row[d] = static_cast(roundf(temp_buf[d] * inv_scale)); +#else + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) group_int8_row[d] = static_cast(roundf(temp_buf[d] * inv_scale)); +#endif + } + } + + void mma0_sdot(int Br_f, int Bc_f, const int8_t *q_q_tile, const block_q8_0f *k_cache_tile, float *s, const float *q_s_tile, int D, int grs, int gcs, bool causal) { +#if defined(__AVX2__) + const int num_k_blocks = D / QK_K_BLOCK_SIZE; + for (int r = 0; r < Br_f; ++r) { + for (int c = 0; c < Bc_f; ++c) { + if (causal && (gcs + c) > (grs + r)) { + s[r * Bc + c] = NEG_INF; + continue; + } + const int8_t *q_quant_line = q_q_tile + r * D; + const block_q8_0f *k_block_line = k_cache_tile + c * KV_Head * num_k_blocks; + const float *q_scale_line = q_s_tile + r * num_k_blocks; + float total_f32 = 0.0f; + + for (int g = 0; g < num_k_blocks; ++g) { + const int g_start = g * QK_K_BLOCK_SIZE; + const int8_t *q_group_ptr = q_quant_line + g_start; + const int8_t *k_group_ptr = k_block_line[g].qs; + const float k_group_scale = k_block_line[g].scale; + const float q_group_scale = q_scale_line[g]; + + int32_t g_dot = 0; + __m256i acc_i32_v = _mm256_setzero_si256(); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 16; d += 16) { + __m128i q_i8_v = _mm_loadu_si128((const __m128i *)(q_group_ptr + d)); + __m128i k_i8_v = _mm_loadu_si128((const __m128i *)(k_group_ptr + d)); + __m256i q_i16_v = _mm256_cvtepi8_epi16(q_i8_v); + __m256i k_i16_v = _mm256_cvtepi8_epi16(k_i8_v); + __m256i prod_i32_v = _mm256_madd_epi16(k_i16_v, q_i16_v); + acc_i32_v = _mm256_add_epi32(acc_i32_v, prod_i32_v); + } + g_dot = hsum_i32(acc_i32_v); + for (; d < QK_K_BLOCK_SIZE; ++d) g_dot += q_group_ptr[d] * k_group_ptr[d]; + + total_f32 += (float)g_dot * q_group_scale * k_group_scale; + } + s[r * Bc + c] = total_f32; + } + } + +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FEATURE_DOTPROD) + const int num_k_blocks = D / QK_K_BLOCK_SIZE; + + for (int br_base = 0; br_base < Br_f; br_base += 4) { + for (int bc_base = 0; bc_base < Bc_f; bc_base += 4) { + int br_limit = std::min(4, Br_f - br_base); + int bc_limit = std::min(4, Bc_f - bc_base); + float accumulators[16] = {0.0f}; + + for (int g = 0; g < num_k_blocks; ++g) { + const int g_start = g * QK_K_BLOCK_SIZE; + const int8_t *q_rows[4]; + float q_scales[4]; + for (int i = 0; i < br_limit; ++i) { + q_rows[i] = q_q_tile + (br_base + i) * D + g_start; + q_scales[i] = q_s_tile[(br_base + i) * num_k_blocks + g]; + } + + const int8_t *k_rows[4]; + float k_scales[4]; + for (int i = 0; i < bc_limit; ++i) { + k_rows[i] = k_cache_tile[(bc_base + i) * KV_Head * num_k_blocks + g].qs; + k_scales[i] = k_cache_tile[(bc_base + i) * KV_Head * num_k_blocks + g].scale; + } + + for (int r_i = 0; r_i < br_limit; ++r_i) { + for (int c_i = 0; c_i < bc_limit; ++c_i) { + int32x4_t acc_s32 = vdupq_n_s32(0); + int d = 0; + for (; d <= QK_K_BLOCK_SIZE - 16; d += 16) { + int8x16_t q_s8 = vld1q_s8(q_rows[r_i] + d); + int8x16_t k_s8 = vld1q_s8(k_rows[c_i] + d); + acc_s32 = vdotq_s32(acc_s32, q_s8, k_s8); + } + int32_t dot_prod = vaddvq_s32(acc_s32); + for (; d < QK_K_BLOCK_SIZE; ++d) { dot_prod += q_rows[r_i][d] * k_rows[c_i][d]; } + + accumulators[r_i * 4 + c_i] += (float)dot_prod * q_scales[r_i] * k_scales[c_i]; + } + } + } + + for (int r_i = 0; r_i < br_limit; ++r_i) { + for (int c_i = 0; c_i < bc_limit; ++c_i) { + const int gr = grs + br_base + r_i; + const int gc = gcs + bc_base + c_i; + if (causal && gc > gr) { + s[(br_base + r_i) * Bc + (bc_base + c_i)] = NEG_INF; + } else { + s[(br_base + r_i) * Bc + (bc_base + c_i)] = accumulators[r_i * 4 + c_i]; + } + } + } + } + } + +#else + const int num_k_blocks = D / QK_K_BLOCK_SIZE; + for (int r = 0; r < Br_f; ++r) { + for (int c = 0; c < Bc_f; ++c) { + if (causal && (gcs + c) > (grs + r)) { + s[r * Bc + c] = NEG_INF; + continue; + } + const int8_t *q_quant_line = q_q_tile + r * D; + const block_q8_0f *k_block_line = k_cache_tile + c * KV_Head * num_k_blocks; + const float *q_scale_line = q_s_tile + r * num_k_blocks; + float total_f32 = 0.0f; + + for (int g = 0; g < num_k_blocks; ++g) { + const int g_start = g * QK_K_BLOCK_SIZE; + const int8_t *q_group_ptr = q_quant_line + g_start; + const int8_t *k_group_ptr = k_block_line[g].qs; + const float k_group_scale = k_block_line[g].scale; + const float q_group_scale = q_scale_line[g]; + + int32_t g_dot = 0; + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) { + g_dot += q_group_ptr[d] * k_group_ptr[d]; + } + total_f32 += (float)g_dot * q_group_scale * k_group_scale; + } + s[r * Bc + c] = total_f32; + } + } +#endif + } + + void quantize_p_rows(int Br_f, int Bc_f, const float *p_float_block, int8_t *p_quant_block, float *p_scale_block) { + for (int r = 0; r < Br_f; ++r) { + const float *p_float_row = p_float_block + r * Bc; + int8_t *p_quant_row = p_quant_block + r * Bc; + + float max_abs_val = 0.0f; + for (int c = 0; c < Bc_f; ++c) { + max_abs_val = std::max(max_abs_val, fabsf(p_float_row[c])); + } + + const float scale = (max_abs_val > 1e-9f) ? max_abs_val / 127.0f : 0.0f; + p_scale_block[r] = scale; + const float inv_scale = (scale > 1e-9f) ? 1.0f / scale : 0.0f; + + for (int c = 0; c < Bc_f; ++c) { + p_quant_row[c] = static_cast(roundf(p_float_row[c] * inv_scale)); + } + } + } + + void mma1(int Br_f, int Bc_f, const int8_t *p_quant_block, const float *p_scale_block, const block_q8_0f *v_cache_tile, float *acc_o, int D) { +#if defined(__AVX2__) && defined(__FMA__) + const int num_v_blocks = D / QK_K_BLOCK_SIZE; + for (int r = 0; r < Br_f; ++r) { + const float p_row_scale = p_scale_block[r]; + if (fabsf(p_row_scale) < 1e-9) continue; + + float *o_row = acc_o + r * D; + const int8_t *p_quant_row = p_quant_block + r * Bc; + + for (int c = 0; c < Bc_f; ++c) { + const int8_t p_quant_scalar = p_quant_row[c]; + if (p_quant_scalar == 0) continue; + + const float p_dequant_val = (float)p_quant_scalar * p_row_scale; + const block_q8_0f *v_block_line = v_cache_tile + c * KV_Head * num_v_blocks; + + const __m256 p_vec = _mm256_set1_ps(p_dequant_val); + for (int g = 0; g < num_v_blocks; ++g) { + const int g_start = g * QK_K_BLOCK_SIZE; + const float v_scale_scalar = v_block_line[g].scale; + const __m256 v_scale_vec = _mm256_set1_ps(v_scale_scalar); + for (int d_group = 0; d_group < QK_K_BLOCK_SIZE; d_group += 8) { + const int d = g_start + d_group; + __m128i v_i8_vec_part = _mm_loadl_epi64((const __m128i *)(v_block_line[g].qs + d_group)); + __m256i v_i32_vec = _mm256_cvtepi8_epi32(v_i8_vec_part); + __m256 v_f32_vec = _mm256_cvtepi32_ps(v_i32_vec); + __m256 dequant_v_vec = _mm256_mul_ps(v_f32_vec, v_scale_vec); + __m256 o_vec = _mm256_loadu_ps(o_row + d); + o_vec = _mm256_fmadd_ps(p_vec, dequant_v_vec, o_vec); + _mm256_storeu_ps(o_row + d, o_vec); + } + } + } + } + +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const int num_v_blocks = D / QK_K_BLOCK_SIZE; + + for (int r = 0; r < Br_f; ++r) { + float *o_row = acc_o + r * D; + const float p_row_scale = p_scale_block[r]; + if (fabsf(p_row_scale) < 1e-9) continue; + + for (int g = 0; g < num_v_blocks; ++g) { + const int g_start = g * QK_K_BLOCK_SIZE; + + for (int d = 0; d < QK_K_BLOCK_SIZE; d += 8) { + float32x4_t acc0 = vld1q_f32(o_row + g_start + d); + float32x4_t acc1 = vld1q_f32(o_row + g_start + d + 4); + for (int c = 0; c < Bc_f; ++c) { + const int8_t p_quant_scalar = p_quant_block[r * Bc + c]; + if (p_quant_scalar == 0) continue; + + const float32x4_t p_vec = vdupq_n_f32((float)p_quant_scalar * p_row_scale); + + const block_q8_0f *v_block = v_cache_tile + c * KV_Head * num_v_blocks + g; + const float32x4_t v_scale_vec = vdupq_n_f32(v_block->scale); + const int8_t *v_qs = v_block->qs + d; + + int8x8_t v_s8 = vld1_s8(v_qs); + int16x8_t v_s16 = vmovl_s8(v_s8); + + float32x4_t v_f32_lo = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_s16))), v_scale_vec); + float32x4_t v_f32_hi = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_s16))), v_scale_vec); + + acc0 = vfmaq_f32(acc0, p_vec, v_f32_lo); + acc1 = vfmaq_f32(acc1, p_vec, v_f32_hi); + } + + vst1q_f32(o_row + g_start + d, acc0); + vst1q_f32(o_row + g_start + d + 4, acc1); + } + } + } + +#else + const int num_v_blocks = D / QK_K_BLOCK_SIZE; + for (int r = 0; r < Br_f; ++r) { + const float p_row_scale = p_scale_block[r]; + if (fabsf(p_row_scale) < 1e-9) continue; + + float *o_row = acc_o + r * D; + const int8_t *p_quant_row = p_quant_block + r * Bc; + + for (int c = 0; c < Bc_f; ++c) { + const int8_t p_quant_scalar = p_quant_row[c]; + if (p_quant_scalar == 0) continue; + + const float p_dequant_val = (float)p_quant_scalar * p_row_scale; + const block_q8_0f *v_block_line = v_cache_tile + c * KV_Head * num_v_blocks; + + for (int g = 0; g < num_v_blocks; ++g) { + const int g_start = g * QK_K_BLOCK_SIZE; + const float v_s = v_block_line[g].scale; + const int8_t *v_qs = v_block_line[g].qs; + for (int d = 0; d < QK_K_BLOCK_SIZE; ++d) { + o_row[g_start + d] += p_dequant_val * ((float)v_qs[d] * v_s); + } + } + } + } +#endif + } + + void softmax(int Br_f, int Bc_f, float *acc_s, float *sm, float *sm_p, float *ss, float *sum, float *l) { + memcpy(sm_p, sm, Br_f * sizeof(float)); + for (int r = 0; r < Br_f; ++r) { + float *row = acc_s + r * Bc, cmax = sm[r]; + for (int c = 0; c < Bc_f; ++c) cmax = std::max(cmax, row[c]); + sm[r] = cmax; + } + for (int r = 0; r < Br_f; ++r) ss[r] = expf(sm_p[r] - sm[r]); + for (int r = 0; r < Br_f; ++r) { + float *row = acc_s + r * Bc; + float smax = sm[r], s = 0.f; + for (int c = 0; c < Bc_f; ++c) row[c] = (row[c] > NEG_INF / 2) ? (s += row[c] = expf(row[c] - smax), row[c]) : 0.f; + sum[r] = s; + } + for (int r = 0; r < Br_f; ++r) l[r] = l[r] * ss[r] + sum[r]; + } + + /* + void softmax(int Br_f, int Bc_f, float *acc_s, float *sm, float *sm_p, float *ss, float *sum, float *l) { + memcpy(sm_p, sm, Br_f * sizeof(float)); + + for (int r = 0; r < Br_f; ++r) { + float *row = acc_s + r * Bc; + float cmax = sm[r]; +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t max_vec = vdupq_n_f32(cmax); + int c = 0; + for (; c <= Bc_f - 4; c += 4) { + max_vec = vmaxq_f32(max_vec, vld1q_f32(row + c)); + } + cmax = vmaxvq_f32(max_vec); + for (; c < Bc_f; ++c) cmax = std::max(cmax, row[c]); +#else + for (int c = 0; c < Bc_f; ++c) cmax = std::max(cmax, row[c]); +#endif + sm[r] = cmax; + } + + for (int r = 0; r < Br_f; ++r) ss[r] = expf(sm_p[r] - sm[r]); + + for (int r = 0; r < Br_f; ++r) { + float *row = acc_s + r * Bc; + float smax = sm[r]; + float current_sum = 0.f; +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t sum_vec = vdupq_n_f32(0.0f); + const float32x4_t smax_vec = vdupq_n_f32(smax); + int c = 0; + for (; c <= Bc_f - 4; c += 4) { + float32x4_t val_vec = vld1q_f32(row + c); + val_vec = vsubq_f32(val_vec, smax_vec); + val_vec = exp_ps_f32(val_vec); // 使用快速exp + vst1q_f32(row + c, val_vec); + sum_vec = vaddq_f32(sum_vec, val_vec); + } + current_sum = vaddvq_f32(sum_vec); + for (; c < Bc_f; ++c) + if (row[c] > NEG_INF / 2) + current_sum += row[c] = expf(row[c] - smax); + else + row[c] = 0.f; +#else + for (int c = 0; c < Bc_f; ++c) + if (row[c] > NEG_INF / 2) + current_sum += row[c] = expf(row[c] - smax); + else + row[c] = 0.f; +#endif + sum[r] = current_sum; + } + for (int r = 0; r < Br_f; ++r) l[r] = l[r] * ss[r] + sum[r]; + } + */ + + void rescale(int Br_f, float *acc_o, const float *ss, int D) { + for (int r = 0; r < Br_f; ++r) { + float s_val = ss[r], *r_ptr = acc_o + r * D; +#if defined(__AVX2__) + __m256 s_vec = _mm256_set1_ps(s_val); + int d = 0; + for (; d <= D - 8; d += 8) _mm256_storeu_ps(r_ptr + d, _mm256_mul_ps(_mm256_loadu_ps(r_ptr + d), s_vec)); + for (; d < D; ++d) r_ptr[d] *= s_val; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t s_vec = vdupq_n_f32(s_val); + int d = 0; + for (; d <= D - 4; d += 4) vst1q_f32(r_ptr + d, vmulq_f32(vld1q_f32(r_ptr + d), s_vec)); + for (; d < D; ++d) r_ptr[d] *= s_val; +#else + for (int d = 0; d < D; ++d) r_ptr[d] *= s_val; +#endif + } + } + + void scale_and_store(int Br_f, const float *acc_o, const float *logsum, const float *v_mean, TO *O, int H, int D) { + int o_stride = H * D; + for (int r = 0; r < Br_f; ++r) { + float inv_logsum = (logsum[r] > 1e-9f) ? 1.f / logsum[r] : 0.f; + const float *o_row = acc_o + r * D; + float *O_row = O + (size_t)r * o_stride; +#if defined(__AVX2__) && defined(__FMA__) + const __m256 inv_l_vec = _mm256_set1_ps(inv_logsum); + int d = 0; + for (; d <= D - 8; d += 8) { + const __m256 o_vec = _mm256_loadu_ps(o_row + d); + const __m256 vm_vec = _mm256_loadu_ps(v_mean + d); + _mm256_storeu_ps(O_row + d, _mm256_fmadd_ps(o_vec, inv_l_vec, vm_vec)); + } + for (; d < D; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + const float32x4_t inv_l_vec = vdupq_n_f32(inv_logsum); + int d = 0; + for (; d <= D - 4; d += 4) { + const float32x4_t o_vec = vld1q_f32(o_row + d); + const float32x4_t vm_vec = vld1q_f32(v_mean + d); + vst1q_f32(O_row + d, vfmaq_f32(vm_vec, o_vec, inv_l_vec)); + } + for (; d < D; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#else + for (int d = 0; d < D; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#endif + } + } + + void sage_attn_prefill(const TQ *Q, const block_q8_0f *K_cache, const block_q8_0f *V_cache, TO *O, const float *K_mean, const float *V_mean, int32_t batch_size, int32_t head_size, int32_t seq_size_q, int32_t seq_size_k, int32_t dim_size, bool causal) { + const int32_t Tr = (seq_size_q + Br - 1) / Br, Tc = (seq_size_k + Bc - 1) / Bc; + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + const int32_t num_k_blocks = dim_size / QK_K_BLOCK_SIZE; + +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) + for (int32_t b_h = 0; b_h < batch_size * head_size; ++b_h) { + int b = b_h / head_size; + int h = b_h % head_size; + const int32_t tid = omp_get_thread_num(); + const int32_t kvh = h / kv_group; + float *po = acc_o + tid * Br * dim_size, *ps = acc_s + tid * Br * Bc; + float *plog = logsum + tid * Br, *pmax = scoremax + tid * Br, *pmax_p = scoremax_prev + tid * Br; + float *pscale = score_scale + tid * Br, *psum = score_sum + tid * Br; + int8_t *p_q_q = q_quant + tid * Br * dim_size; + float *p_q_s = q_scale + tid * Br * num_k_blocks; + float *p_q_scaled = q_scaled_buf + tid * dim_size; + + int8_t *p_p_q = p_quant + tid * Br * Bc; + float *p_p_s = p_scale + tid * Br; + + const float *p_V_m = V_mean + (b * KV_Head + kvh) * dim_size; + + for (int32_t tr = 0; tr < Tr; ++tr) { + int32_t Br_f = std::min(Br, seq_size_q - tr * Br); + init_temp(plog, pmax, po, Br_f, dim_size); + const TQ *tile_q_bshd = Q + (size_t)b * seq_size_q * head_size * dim_size + (size_t)tr * Br * head_size * dim_size + (size_t)h * dim_size; + + for (int r = 0; r < Br_f; ++r) { + quantize_q_row(tile_q_bshd + r * head_size * dim_size, p_q_q + r * dim_size, p_q_s + r * num_k_blocks, dim_size, local_scale, p_q_scaled); + } + + for (int32_t tc = 0; tc < Tc; ++tc) { + int32_t Bc_f = std::min(Bc, seq_size_k - tc * Bc); + const int kv_offset = seq_size_k - seq_size_q; + + const block_q8_0f *k_cache_tile = K_cache + ((size_t)b * seq_size_k * KV_Head + (tc * Bc) * KV_Head + kvh) * num_k_blocks; + const block_q8_0f *v_cache_tile = V_cache + ((size_t)b * seq_size_k * KV_Head + (tc * Bc) * KV_Head + kvh) * num_k_blocks; + + mma0_sdot(Br_f, Bc_f, p_q_q, k_cache_tile, ps, p_q_s, dim_size, tr * Br + kv_offset, tc * Bc, causal); + softmax(Br_f, Bc_f, ps, pmax, pmax_p, pscale, psum, plog); + rescale(Br_f, po, pscale, dim_size); + + quantize_p_rows(Br_f, Bc_f, ps, p_p_q, p_p_s); + + mma1(Br_f, Bc_f, p_p_q, p_p_s, v_cache_tile, po, dim_size); + } + TO *tile_o_bshd = O + (size_t)b * seq_size_q * head_size * dim_size + (size_t)tr * Br * head_size * dim_size + (size_t)h * dim_size; + scale_and_store(Br_f, po, plog, p_V_m, tile_o_bshd, head_size, dim_size); + } + } + } + void sage_attn_decode(const TQ *Q, const block_q8_0f *K_cache, const block_q8_0f *V_cache, TO *O, const float *K_mean, const float *V_mean, int32_t batch_size, int32_t head_size, int32_t seq_size_k, int32_t dim_size, bool causal) { + const int32_t Tc = (seq_size_k + Bc - 1) / Bc; + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + const int32_t num_k_blocks = dim_size / QK_K_BLOCK_SIZE; + +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) + for (int32_t b_h = 0; b_h < batch_size * head_size; ++b_h) { + int b = b_h / head_size; + int h = b_h % head_size; + const int32_t Br_f = 1; + const int32_t tid = omp_get_thread_num(); + const int32_t kvh = h / kv_group; + float *po = acc_o + tid * Br_f * dim_size, *ps = acc_s + tid * Br_f * Bc; + float *plog = logsum + tid * Br_f, *pmax = scoremax + tid * Br_f, *pmax_p = scoremax_prev + tid * Br_f; + float *pscale = score_scale + tid * Br_f, *psum = score_sum + tid * Br_f; + int8_t *p_q_q = q_quant + tid * Br_f * dim_size; + float *p_q_s = q_scale + tid * Br_f * num_k_blocks; + float *p_q_scaled = q_scaled_buf + tid * dim_size; + const float *p_V_m = V_mean + (b * KV_Head + kvh) * dim_size; + + int8_t *p_p_q = p_quant + tid * Br_f * Bc; + float *p_p_s = p_scale + tid * Br_f; + + const TQ *tile_q_decode = Q + (size_t)b * head_size * dim_size + (size_t)h * dim_size; + quantize_q_row(tile_q_decode, p_q_q, p_q_s, dim_size, local_scale, p_q_scaled); + init_temp(plog, pmax, po, Br_f, dim_size); + + for (int32_t tc = 0; tc < Tc; ++tc) { + int32_t Bc_f = std::min(Bc, seq_size_k - tc * Bc); + const block_q8_0f *k_cache_tile = K_cache + ((size_t)b * seq_size_k * KV_Head + (tc * Bc) * KV_Head + kvh) * num_k_blocks; + const block_q8_0f *v_cache_tile = V_cache + ((size_t)b * seq_size_k * KV_Head + (tc * Bc) * KV_Head + kvh) * num_k_blocks; + mma0_sdot(Br_f, Bc_f, p_q_q, k_cache_tile, ps, p_q_s, dim_size, seq_size_k - 1, tc * Bc, causal); + softmax(Br_f, Bc_f, ps, pmax, pmax_p, pscale, psum, plog); + rescale(Br_f, po, pscale, dim_size); + + quantize_p_rows(Br_f, Bc_f, ps, p_p_q, p_p_s); + + mma1(Br_f, Bc_f, p_p_q, p_p_s, v_cache_tile, po, dim_size); + } + TO *tile_o_bshd = O + (size_t)b * head_size * dim_size + (size_t)h * dim_size; + scale_and_store(Br_f, po, plog, p_V_m, tile_o_bshd, head_size, dim_size); + } + } +}; + +inline void sage_attention_forward_cpu_dispatch( + const float *Q, const void *K_in, const void *V_in, const float *K_mean_ext, + const float *V_mean_ext, float *O, int32_t batch_size, int32_t q_head, + int32_t kv_head, int32_t seq_size_q, int32_t seq_size_k, int32_t dim_size, + bool causal_mask, int32_t threads, int32_t br, int32_t bc) { + if (dim_size % QK_K_BLOCK_SIZE != 0) { + std::cerr << "Error: dim_size must be divisible by QK_K_BLOCK_SIZE" << std::endl; + return; + } + const int32_t num_k_blocks = dim_size / QK_K_BLOCK_SIZE; + + thread_local WorkspaceManager manager; + SAGE_CPU_IMPL_KVQ8 op; + op.configure(br, bc, q_head, kv_head, threads); + + const int32_t current_br = (seq_size_q > 1) ? br : 1; + const std::vector ws_sizes = { + (size_t)threads * current_br * dim_size * sizeof(float), // 0: acc_o + (size_t)threads * current_br * bc * sizeof(float), // 1: acc_s + (size_t)threads * current_br * sizeof(float), // 2: logsum + (size_t)threads * current_br * sizeof(float), // 3: scoremax + (size_t)threads * current_br * sizeof(float), // 4: scoremax_prev + (size_t)threads * current_br * sizeof(float), // 5: score_scale + (size_t)threads * current_br * sizeof(float), // 6: score_sum + (size_t)threads * current_br * dim_size * sizeof(int8_t), // 7: q_quant + (size_t)threads * current_br * num_k_blocks * sizeof(float), // 8: q_scale + (size_t)threads * dim_size * sizeof(float), // 9: q_scaled_buf (for quantize_q_row) + (size_t)threads * current_br * bc * sizeof(int8_t), // 10: p_quant [NEW] + (size_t)threads * current_br * sizeof(float), // 11: p_scale [NEW] + }; + void **workspace = manager.get_workspace(ws_sizes); + op.init_workspace(workspace); + + if (seq_size_q > 1) { + op.sage_attn_prefill(Q, (const block_q8_0f *)K_in, (const block_q8_0f *)V_in, O, K_mean_ext, V_mean_ext, batch_size, q_head, + seq_size_q, seq_size_k, dim_size, causal_mask); + } else { + op.sage_attn_decode(Q, (const block_q8_0f *)K_in, (const block_q8_0f *)V_in, O, K_mean_ext, V_mean_ext, batch_size, q_head, + seq_size_k, dim_size, causal_mask); + } +} + +} // namespace seq_attn_kvq8 +#endif // SAGE_ATTENTION_KVQ8_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/compute/SageAttentionPT.hpp b/mllm/backends/cpu/compute/SageAttentionPT.hpp new file mode 100644 index 000000000..a89230664 --- /dev/null +++ b/mllm/backends/cpu/compute/SageAttentionPT.hpp @@ -0,0 +1,834 @@ +// 文件名: sage_attention_unified_final_with_simd.cpp +#include +#include +#include +#include +#include +#include +#include +#include +#include +// #include +#include +// #include +#include +#include + +// --- SIMD Intrinsics --- +#ifdef __AVX2__ +#include +#include +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) +#include +#if defined(__ARM_FP16_FORMAT_IEEE) && !defined(_MSC_VER) +#include +#endif +#endif + +#include "Types.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" + +namespace sage_attn_pt_cpu { + +#define NEG_INF std::numeric_limits::lowest() + +template +inline float to_float(T val); +template <> +inline float to_float(float val) { + return val; +} +template <> +inline float to_float(mllm_fp16_t val) { + return MLLM_FP16_TO_FP32(val); +} + +#ifdef __AVX2__ +inline float _mm256_hmax_ps(__m256 x) { + __m128 lo = _mm256_castps256_ps128(x); + __m128 hi = _mm256_extractf128_ps(x, 1); + __m128 max_val = _mm_max_ps(lo, hi); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 2, 2))); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 0, 1))); + return _mm_cvtss_f32(max_val); +} +inline float hsum_ps(__m256 v) { + __m128 vlow = _mm256_castps256_ps128(v); + __m128 vhigh = _mm256_extractf128_ps(v, 1); + vlow = _mm_add_ps(vlow, vhigh); + __m128 shuf = _mm_movehdup_ps(vlow); + __m128 sums = _mm_add_ps(vlow, shuf); + shuf = _mm_movehl_ps(shuf, sums); + sums = _mm_add_ss(sums, shuf); + return _mm_cvtss_f32(sums); +} +inline int32_t hsum_i32(__m256i v) { + __m128i vlo = _mm256_castsi256_si128(v); + __m128i vhi = _mm256_extracti128_si256(v, 1); + __m128i vsum = _mm_add_epi32(vlo, vhi); + vsum = _mm_add_epi32(vsum, _mm_shuffle_epi32(vsum, _MM_SHUFFLE(1, 0, 3, 2))); + vsum = _mm_add_epi32(vsum, _mm_shuffle_epi32(vsum, _MM_SHUFFLE(2, 3, 0, 1))); + return _mm_cvtsi128_si32(vsum); +} + +inline __m256 load_and_convert_to_fp32_vec(const float *ptr) { + return _mm256_loadu_ps(ptr); +} +#ifdef __F16C__ +inline __m256 load_and_convert_to_fp32_vec(const mllm_fp16_t *ptr) { + return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)ptr)); +} +#endif +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) +inline float _vmaxvq_f32_hmax(float32x4_t x) { + return vmaxvq_f32(x); +} +inline void load_and_convert_to_fp32x4x2(const float *ptr, float32x4_t &out_lo, float32x4_t &out_hi) { + out_lo = vld1q_f32(ptr); + out_hi = vld1q_f32(ptr + 4); +} +#if defined(__ARM_FP16_FORMAT_IEEE) +inline void load_and_convert_to_fp32x4x2(const mllm_fp16_t *ptr, float32x4_t &out_lo, float32x4_t &out_hi) { + float16x8_t v_f16 = vld1q_f16(reinterpret_cast(ptr)); + out_lo = vcvt_f32_f16(vget_low_f16(v_f16)); + out_hi = vcvt_f32_f16(vget_high_f16(v_f16)); +} +#endif +#endif + +inline void aligned_alloc(void **ptr, size_t required_bytes, size_t align) { + if (align % sizeof(void *) != 0 || (align & (align - 1)) != 0) { + *ptr = nullptr; + return; + } + if (posix_memalign(ptr, align, required_bytes) != 0) { *ptr = nullptr; } +} +inline void aligned_free(void *ptr) { + free(ptr); +} + +inline void quantize_row_simd(const float *float_row, int8_t *int8_row, float *scale, int dim_size, float sm_scale, float *temp_buf) { + for (int d = 0; d < dim_size; ++d) { temp_buf[d] = float_row[d] * sm_scale; } + float max_abs_val = 0.0f; +#if defined(__AVX2__) + __m256 max_vec = _mm256_setzero_ps(); + const __m256 abs_mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7FFFFFFF)); + int d = 0; + for (; d <= dim_size - 8; d += 8) max_vec = _mm256_max_ps(max_vec, _mm256_and_ps(_mm256_loadu_ps(temp_buf + d), abs_mask)); + max_abs_val = _mm256_hmax_ps(max_vec); + for (; d < dim_size; ++d) max_abs_val = std::max(max_abs_val, fabsf(temp_buf[d])); +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t max_vec = vdupq_n_f32(0.0f); + int d = 0; + for (; d <= dim_size - 4; d += 4) max_vec = vmaxq_f32(max_vec, vabsq_f32(vld1q_f32(temp_buf + d))); + max_abs_val = vmaxvq_f32(max_vec); + for (; d < dim_size; ++d) max_abs_val = std::max(max_abs_val, fabsf(temp_buf[d])); +#else + for (int d = 0; d < dim_size; ++d) max_abs_val = std::max(max_abs_val, fabsf(temp_buf[d])); +#endif + *scale = (max_abs_val > 1e-9f) ? max_abs_val / 127.0f : 0.0f; + const float inv_scale = (*scale > 1e-9f) ? 1.0f / *scale : 0.0f; +#if defined(__AVX2__) + __m256 inv_scale_vec = _mm256_set1_ps(inv_scale); + d = 0; + for (; d <= dim_size - 8; d += 8) { + __m256i val_i32 = _mm256_cvtps_epi32(_mm256_mul_ps(_mm256_loadu_ps(temp_buf + d), inv_scale_vec)); + __m128i val_i16 = _mm_packs_epi32(_mm256_castsi256_si128(val_i32), _mm256_extracti128_si256(val_i32, 1)); + __m128i val_i8 = _mm_packs_epi16(val_i16, val_i16); + *(int64_t *)(int8_row + d) = _mm_cvtsi128_si64(val_i8); + } + for (; d < dim_size; ++d) int8_row[d] = static_cast(roundf(temp_buf[d] * inv_scale)); +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t inv_scale_vec = vdupq_n_f32(inv_scale); + d = 0; + for (; d <= dim_size - 16; d += 16) { + int32x4_t i32_0 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 0), inv_scale_vec)); + int32x4_t i32_1 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 4), inv_scale_vec)); + int32x4_t i32_2 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 8), inv_scale_vec)); + int32x4_t i32_3 = vcvtaq_s32_f32(vmulq_f32(vld1q_f32(temp_buf + d + 12), inv_scale_vec)); + int16x8_t i16_0 = vcombine_s16(vqmovn_s32(i32_0), vqmovn_s32(i32_1)); + int16x8_t i16_1 = vcombine_s16(vqmovn_s32(i32_2), vqmovn_s32(i32_3)); + vst1q_s8(int8_row + d, vcombine_s8(vqmovn_s16(i16_0), vqmovn_s16(i16_1))); + } + for (; d < dim_size; ++d) int8_row[d] = static_cast(roundf(temp_buf[d] * inv_scale)); +#else + for (int d = 0; d < dim_size; ++d) int8_row[d] = static_cast(roundf(temp_buf[d] * inv_scale)); +#endif +} + +template +void compute_channel_means(const KVDtype *tensor, float *mean_tensor, int batch_size, int head_size, int seq_len, int dim_size) { +#pragma omp parallel for collapse(3) + for (int b = 0; b < batch_size; ++b) { + for (int h = 0; h < head_size; ++h) { + for (int d = 0; d < dim_size; d += 8) { // AVX2 processes 8 floats at a time +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP16_FORMAT_IEEE) + float32x4_t sum_vec = vdupq_n_f32(0.0f); + for (int s = 0; s < seq_len; ++s) { + int idx = b * (seq_len * head_size * dim_size) + s * (head_size * dim_size) + h * (dim_size) + d; + float32x4_t val_vec; + if constexpr (std::is_same_v) { + val_vec = vcvt_f32_f16(vld1_f16(reinterpret_cast(tensor + idx))); + } else { + val_vec = vld1q_f32(tensor + idx); + } + sum_vec = vaddq_f32(sum_vec, val_vec); + } + float inv_seq_len = 1.0f / seq_len; + float32x4_t mean_vec = vmulq_f32(sum_vec, vdupq_n_f32(inv_seq_len)); + // Note: Original code used d+=4, but NEON loop processed only 4 floats. + // Assuming it should be d+=4 for the NEON part. Let's stick to the original logic. + vst1q_f32(mean_tensor + b * head_size * dim_size + h * dim_size + d, mean_vec); +#elif defined(__AVX2__) + // =========== AVX2 IMPLEMENTATION START =========== + if (d + 8 > dim_size) { // Handle remainder + for (int i = 0; i < (dim_size - d); ++i) { + double sum = 0.0; + for (int s = 0; s < seq_len; ++s) { + int idx = b * (seq_len * head_size * dim_size) + s * (head_size * dim_size) + h * (dim_size) + d + i; + sum += to_float(tensor[idx]); + } + mean_tensor[b * head_size * dim_size + h * dim_size + d + i] = static_cast(sum / seq_len); + } + continue; // Skip to next d in the outer loop + } + + __m256 sum_vec = _mm256_setzero_ps(); + for (int s = 0; s < seq_len; ++s) { + const KVDtype *current_row = tensor + b * (seq_len * head_size * dim_size) + s * (head_size * dim_size) + h * (dim_size) + d; + __m256 val_vec = load_and_convert_to_fp32_vec(current_row); + sum_vec = _mm256_add_ps(sum_vec, val_vec); + } + const float inv_seq_len = 1.0f / seq_len; + __m256 inv_len_vec = _mm256_set1_ps(inv_seq_len); + __m256 mean_vec = _mm256_mul_ps(sum_vec, inv_len_vec); + _mm256_storeu_ps(mean_tensor + b * head_size * dim_size + h * dim_size + d, mean_vec); + // =========== AVX2 IMPLEMENTATION END =========== +#else + // Fallback for non-AVX2/NEON + double sum[8] = {0.0}; + for (int s = 0; s < seq_len; ++s) { + for (int i = 0; i < 8; ++i) { + if (d + i < dim_size) { + int idx = b * (seq_len * head_size * dim_size) + s * (head_size * dim_size) + h * (dim_size) + d + i; + sum[i] += to_float(tensor[idx]); + } + } + } + for (int i = 0; i < 8; ++i) { + if (d + i < dim_size) { + mean_tensor[b * head_size * dim_size + h * dim_size + d + i] = static_cast(sum[i] / seq_len); + } + } +#endif + } + } + } +} + +template +void compute_mean_and_quantize_k( + const KVDtype *K, + float *mean_tensor, + int8_t *k_quant_global, + float *k_scale_global, + int batch_size, int kv_head_size, int seq_size_k, int dim_size, + int threads, + float *temp_k_sum, + float *temp_k_smoothed) { +#pragma omp parallel for num_threads(threads) collapse(2) + for (int b = 0; b < batch_size; ++b) { + for (int h = 0; h < kv_head_size; ++h) { + const int thread_id = omp_get_thread_num(); + float *thread_sum_buf = temp_k_sum + thread_id * dim_size; + float *thread_smoothed_buf = temp_k_smoothed + thread_id * dim_size; + + float *target_mean = mean_tensor + b * kv_head_size * dim_size + h * dim_size; + int8_t *target_k_quant = k_quant_global + (b * kv_head_size + h) * seq_size_k * dim_size; + float *target_k_scale = k_scale_global + (b * kv_head_size + h) * seq_size_k; + + const int k_stride = kv_head_size * dim_size; + + memset(thread_sum_buf, 0, dim_size * sizeof(float)); + + for (int s = 0; s < seq_size_k; ++s) { + const KVDtype *k_row = K + b * seq_size_k * k_stride + s * k_stride + h * dim_size; + for (int d = 0; d < dim_size; ++d) { + thread_sum_buf[d] += to_float(k_row[d]); + } + } + + float inv_seq_len = 1.0f / seq_size_k; + for (int d = 0; d < dim_size; ++d) { + target_mean[d] = thread_sum_buf[d] * inv_seq_len; + } + + for (int s = 0; s < seq_size_k; ++s) { + const KVDtype *k_row = K + b * seq_size_k * k_stride + s * k_stride + h * dim_size; + for (int d = 0; d < dim_size; ++d) { + thread_smoothed_buf[d] = to_float(k_row[d]) - target_mean[d]; + } + quantize_row_simd(thread_smoothed_buf, target_k_quant + s * dim_size, &target_k_scale[s], dim_size, 1.0f, thread_sum_buf); + } + } + } +} + +class WorkspaceManager { +public: + WorkspaceManager() = default; + ~WorkspaceManager() { + for (auto &ptr : workspace_) { + if (ptr) aligned_free(ptr); + } + } + void **get_workspace(const std::vector &required_sizes) { + if (workspace_.empty()) { + workspace_.resize(required_sizes.size(), nullptr); + current_sizes_.resize(required_sizes.size(), 0); + } + for (size_t i = 0; i < required_sizes.size(); ++i) { + if (required_sizes[i] > current_sizes_[i]) { + if (workspace_[i]) aligned_free(workspace_[i]); + aligned_alloc(&workspace_[i], required_sizes[i], 64); + current_sizes_[i] = required_sizes[i]; + } + } + return workspace_.data(); + } + +private: + std::vector workspace_; + std::vector current_sizes_; +}; + +template +struct SAGE_CPU_IMPL { + using dtype_q_in_t = float; + using dtype_kv_in_t = KVDtype; + using dtype_out_t = float; + int32_t Br, Bc, Q_Head, KV_Head, threads; + float *acc_o_, *acc_s_, *logsum_, *scoremax_, *scoremax_prev_, *score_scale_, *score_sum_; + int8_t *q_quant_tile_, *k_quant_tile_; + float *q_scale_, *k_scale_, *k_smoothed_row_buf_, *q_scaled_row_buf_; + + void configure(int32_t Br_, int32_t Bc_, int32_t Q_Head_, int32_t KV_Head_, int32_t threads_) { + Br = Br_; + Bc = Bc_; + Q_Head = Q_Head_; + KV_Head = KV_Head_; + threads = threads_; + } + void init_workspace(void **workspace) { + acc_o_ = static_cast(workspace[0]); + acc_s_ = static_cast(workspace[1]); + logsum_ = static_cast(workspace[2]); + scoremax_ = static_cast(workspace[3]); + scoremax_prev_ = static_cast(workspace[4]); + score_scale_ = static_cast(workspace[5]); + score_sum_ = static_cast(workspace[6]); + q_quant_tile_ = static_cast(workspace[7]); + k_quant_tile_ = static_cast(workspace[8]); + q_scale_ = static_cast(workspace[9]); + k_scale_ = static_cast(workspace[10]); + k_smoothed_row_buf_ = static_cast(workspace[11]); + q_scaled_row_buf_ = static_cast(workspace[12]); + } + + void sage_attn_prefill(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, const float *K_mean, const float *V_mean, int32_t batch_size, int32_t head_size, int32_t seq_size_q, int32_t seq_size_k, int32_t dim_size, bool causal_mask) { + const int32_t Tr = (seq_size_q + Br - 1) / Br; + const int32_t Tc = (seq_size_k + Bc - 1) / Bc; + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_kv_head = h_idx / kv_group_size; + + float *p_acc_o = acc_o_ + thread_id * Br * dim_size; + float *p_acc_s = acc_s_ + thread_id * Br * Bc; + float *p_logsum = logsum_ + thread_id * Br; + float *p_scoremax = scoremax_ + thread_id * Br; + float *p_scoremax_prev = scoremax_prev_ + thread_id * Br; + float *p_score_scale = score_scale_ + thread_id * Br; + float *p_score_sum = score_sum_ + thread_id * Br; + int8_t *p_q_quant = q_quant_tile_ + thread_id * Br * dim_size; + const int8_t *p_k_quant_global = k_quant_tile_ + (b_idx * KV_Head + this_thread_kv_head) * seq_size_k * dim_size; + float *p_q_scale = q_scale_ + thread_id * Br; + const float *p_k_scale_global = k_scale_ + (b_idx * KV_Head + this_thread_kv_head) * seq_size_k; + float *p_q_scaled = q_scaled_row_buf_ + thread_id * dim_size; + + const float *p_V_mean = V_mean + b_idx * KV_Head * dim_size + this_thread_kv_head * dim_size; + const int k_stride = KV_Head * dim_size; + + for (int32_t t_r_idx = 0; t_r_idx < Tr; ++t_r_idx) { + int32_t Br_fixed = std::min(Br, seq_size_q - t_r_idx * Br); + init_temp(p_logsum, p_scoremax, p_acc_o, Br_fixed, dim_size); + + const dtype_q_in_t *tile_q_main = Q + b_idx * seq_size_q * head_size * dim_size + t_r_idx * Br * head_size * dim_size + h_idx * dim_size; + for (int r = 0; r < Br_fixed; ++r) { + quantize_row_simd(tile_q_main + r * (head_size * dim_size), p_q_quant + r * dim_size, &p_q_scale[r], dim_size, local_scale, p_q_scaled); + } + + for (int32_t t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + int32_t Bc_fixed = std::min(Bc, seq_size_k - t_c_idx * Bc); + const dtype_kv_in_t *tile_v = V + b_idx * seq_size_k * k_stride + t_c_idx * Bc * k_stride + this_thread_kv_head * dim_size; + + quantize_and_mma0_sdot(Br_fixed, Bc_fixed, p_q_quant, p_k_quant_global + t_c_idx * Bc * dim_size, p_acc_s, p_q_scale, p_k_scale_global + t_c_idx * Bc, dim_size, t_r_idx * Br, t_c_idx * Bc, causal_mask); + softmax(Br_fixed, Bc_fixed, p_acc_s, p_scoremax, p_scoremax_prev, p_score_scale, p_score_sum, p_logsum); + rescale(Br_fixed, p_acc_o, p_score_scale, dim_size); + mma1(Br_fixed, Bc_fixed, p_acc_s, tile_v, p_V_mean, p_acc_o, KV_Head, dim_size); + } + + dtype_out_t *tile_o = O + b_idx * seq_size_q * head_size * dim_size + t_r_idx * Br * head_size * dim_size + h_idx * dim_size; + scale_and_store(Br_fixed, p_acc_o, p_logsum, p_V_mean, tile_o, head_size, dim_size); + } + } + } + } + + void sage_attn_decode(const dtype_q_in_t *__restrict__ Q, const dtype_kv_in_t *__restrict__ K, const dtype_kv_in_t *__restrict__ V, dtype_out_t *__restrict__ O, const float *K_mean, const float *V_mean, int32_t batch_size, int32_t head_size, int32_t seq_size_k, int32_t dim_size, bool causal_mask) { + const int32_t Tc = (seq_size_k + Bc - 1) / Bc; + const float local_scale = 1.0f / sqrtf(static_cast(dim_size)); + const int32_t kv_group_size = (Q_Head > 0 && KV_Head > 0) ? Q_Head / KV_Head : 1; + + for (int32_t b_idx = 0; b_idx < batch_size; ++b_idx) { +#pragma omp parallel for num_threads(threads) schedule(dynamic, 1) + for (int32_t h_idx = 0; h_idx < head_size; ++h_idx) { + const int32_t Br_fixed = 1; + const int32_t thread_id = omp_get_thread_num(); + const int32_t this_thread_kv_head = h_idx / kv_group_size; + + float *p_acc_o = acc_o_ + thread_id * Br_fixed * dim_size; + float *p_acc_s = acc_s_ + thread_id * Br_fixed * Bc; + float *p_logsum = logsum_ + thread_id * Br_fixed; + float *p_scoremax = scoremax_ + thread_id * Br_fixed; + float *p_scoremax_prev = scoremax_prev_ + thread_id * Br_fixed; + float *p_score_scale = score_scale_ + thread_id * Br_fixed; + float *p_score_sum = score_sum_ + thread_id * Br_fixed; + int8_t *p_q_quant = q_quant_tile_ + thread_id * Br_fixed * dim_size; + const int8_t *p_k_quant_global = k_quant_tile_ + (b_idx * KV_Head + this_thread_kv_head) * seq_size_k * dim_size; + float *p_q_scale = q_scale_ + thread_id * Br_fixed; + const float *p_k_scale_global = k_scale_ + (b_idx * KV_Head + this_thread_kv_head) * seq_size_k; + float *p_q_scaled = q_scaled_row_buf_ + thread_id * dim_size; + + const float *p_V_mean = V_mean + b_idx * KV_Head * dim_size + this_thread_kv_head * dim_size; + const int k_stride = KV_Head * dim_size; + + const dtype_q_in_t *tile_q_decode = Q + b_idx * head_size * dim_size + h_idx * dim_size; + quantize_row_simd(tile_q_decode, p_q_quant, p_q_scale, dim_size, local_scale, p_q_scaled); + + init_temp(p_logsum, p_scoremax, p_acc_o, Br_fixed, dim_size); + + for (int32_t t_c_idx = 0; t_c_idx < Tc; ++t_c_idx) { + int32_t Bc_fixed = std::min(Bc, seq_size_k - t_c_idx * Bc); + const dtype_kv_in_t *tile_v = V + b_idx * seq_size_k * k_stride + t_c_idx * Bc * k_stride + this_thread_kv_head * dim_size; + + quantize_and_mma0_sdot(Br_fixed, Bc_fixed, p_q_quant, p_k_quant_global + t_c_idx * Bc * dim_size, p_acc_s, p_q_scale, p_k_scale_global + t_c_idx * Bc, dim_size, seq_size_k - 1, t_c_idx * Bc, causal_mask); + softmax(Br_fixed, Bc_fixed, p_acc_s, p_scoremax, p_scoremax_prev, p_score_scale, p_score_sum, p_logsum); + rescale(Br_fixed, p_acc_o, p_score_scale, dim_size); + mma1(Br_fixed, Bc_fixed, p_acc_s, tile_v, p_V_mean, p_acc_o, KV_Head, dim_size); + } + dtype_out_t *tile_o = O + b_idx * head_size * dim_size + h_idx * dim_size; + scale_and_store(Br_fixed, p_acc_o, p_logsum, p_V_mean, tile_o, head_size, dim_size); + } + } + } + + void init_temp(float *logsum, float *scoremax, float *acc_o, int Br_fixed, int dim_size) { + for (int i = 0; i < Br_fixed; ++i) { + logsum[i] = 0.0f; + scoremax[i] = NEG_INF; + } + memset(acc_o, 0, Br_fixed * dim_size * sizeof(float)); + } + + void quantize_and_mma0_sdot(int Br_fixed, int Bc_fixed, const int8_t *q_quant_tile, const int8_t *k_quant_tile, float *acc_s, const float *q_scale, const float *k_scale, int dim_size, int global_r_start, int global_c_start, bool causal) { +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FEATURE_DOTPROD) + constexpr int MR = 4; + constexpr int NR = 4; + const int br_4_end = (Br_fixed / MR) * MR; + const int bc_4_end = (Bc_fixed / NR) * NR; + + int r = 0; + for (; r < br_4_end; r += MR) { + int c = 0; + for (; c < bc_4_end; c += NR) { + int32x4_t vacc[MR][NR]; + for (int i = 0; i < MR; ++i) + for (int j = 0; j < NR; ++j) vacc[i][j] = vdupq_n_s32(0); + + const int8_t *q_ptr[MR]; + for (int i = 0; i < MR; ++i) q_ptr[i] = q_quant_tile + (r + i) * dim_size; + const int8_t *k_ptr[NR]; + for (int j = 0; j < NR; ++j) k_ptr[j] = k_quant_tile + (c + j) * dim_size; + + int d = 0; + for (; d <= dim_size - 16; d += 16) { + int8x16_t k0 = vld1q_s8(k_ptr[0] + d); + int8x16_t k1 = vld1q_s8(k_ptr[1] + d); + int8x16_t k2 = vld1q_s8(k_ptr[2] + d); + int8x16_t k3 = vld1q_s8(k_ptr[3] + d); + for (int i = 0; i < MR; ++i) { + int8x16_t q_vec = vld1q_s8(q_ptr[i] + d); + vacc[i][0] = vdotq_s32(vacc[i][0], q_vec, k0); + vacc[i][1] = vdotq_s32(vacc[i][1], q_vec, k1); + vacc[i][2] = vdotq_s32(vacc[i][2], q_vec, k2); + vacc[i][3] = vdotq_s32(vacc[i][3], q_vec, k3); + } + } + + for (int i = 0; i < MR; ++i) { + for (int j = 0; j < NR; ++j) { + int32_t total_i32 = vaddvq_s32(vacc[i][j]); + for (int d_tail = d; d_tail < dim_size; ++d_tail) total_i32 += q_ptr[i][d_tail] * k_ptr[j][d_tail]; + if (causal && (global_c_start + c + j) > (global_r_start + r + i)) { + acc_s[(r + i) * Bc + c + j] = NEG_INF; + } else { + acc_s[(r + i) * Bc + c + j] = (float)total_i32 * q_scale[r + i] * k_scale[c + j]; + } + } + } + } + } + + for (r = 0; r < Br_fixed; ++r) { + int start_c = (r < br_4_end) ? bc_4_end : 0; + for (int c = start_c; c < Bc_fixed; ++c) { + if (causal && (global_c_start + c) > (global_r_start + r)) { + acc_s[r * Bc + c] = NEG_INF; + continue; + } + const int8_t *q_quant_line = q_quant_tile + r * dim_size; + const int8_t *k_quant_line = k_quant_tile + c * dim_size; + int32x4_t acc_i32_vec = vdupq_n_s32(0); + int d = 0; + for (; d <= dim_size - 16; d += 16) acc_i32_vec = vdotq_s32(acc_i32_vec, vld1q_s8(q_quant_line + d), vld1q_s8(k_quant_line + d)); + int32_t total_i32 = vaddvq_s32(acc_i32_vec); + for (; d < dim_size; ++d) total_i32 += q_quant_line[d] * k_quant_line[d]; + acc_s[r * Bc + c] = (float)total_i32 * q_scale[r] * k_scale[c]; + } + } +#elif defined(__AVX2__) + // =========== AVX2 IMPLEMENTATION START (FIXED) =========== + for (int r = 0; r < Br_fixed; ++r) { + for (int c = 0; c < Bc_fixed; ++c) { + if (causal && (global_c_start + c) > (global_r_start + r)) { + acc_s[r * Bc + c] = NEG_INF; + continue; + } + const int8_t *q_quant_line = q_quant_tile + r * dim_size; + const int8_t *k_quant_line = k_quant_tile + c * dim_size; + + // Accumulator for 8x 32-bit integers + __m256i acc_i32_v = _mm256_setzero_si256(); + int d = 0; + + // Process 16 bytes at a time, as we expand 8-bit to 16-bit + for (; d <= dim_size - 16; d += 16) { + // Load 16 int8 values from Q and K + __m128i q_i8_v = _mm_loadu_si128((const __m128i *)(q_quant_line + d)); + __m128i k_i8_v = _mm_loadu_si128((const __m128i *)(k_quant_line + d)); + + // Convert signed 8-bit integers to signed 16-bit integers + __m256i q_i16_v = _mm256_cvtepi8_epi16(q_i8_v); + __m256i k_i16_v = _mm256_cvtepi8_epi16(k_i8_v); + + // Multiply signed 16-bit integers and horizontally add adjacent pairs + // This computes dot products of 2-element chunks and stores them in 32-bit lanes + __m256i prod_i32_v = _mm256_madd_epi16(q_i16_v, k_i16_v); + + // Accumulate the 32-bit results + acc_i32_v = _mm256_add_epi32(acc_i32_v, prod_i32_v); + } + + // Horizontally sum the 8 integer results in the accumulator vector + int32_t total_i32 = hsum_i32(acc_i32_v); + + // Handle remainder + for (; d < dim_size; ++d) { + total_i32 += q_quant_line[d] * k_quant_line[d]; + } + + acc_s[r * Bc + c] = (float)total_i32 * q_scale[r] * k_scale[c]; + } + } + // =========== AVX2 IMPLEMENTATION END =========== +#else + // Fallback for other platforms + for (int r = 0; r < Br_fixed; ++r) { + for (int c = 0; c < Bc_fixed; ++c) { + if (causal && (global_c_start + c) > (global_r_start + r)) { + acc_s[r * Bc + c] = NEG_INF; + continue; + } + const int8_t *q_quant_line = q_quant_tile + r * dim_size; + const int8_t *k_quant_line = k_quant_tile + c * dim_size; + int32_t total_i32 = 0; + for (int d = 0; d < dim_size; ++d) total_i32 += q_quant_line[d] * k_quant_line[d]; + acc_s[r * Bc + c] = (float)total_i32 * q_scale[r] * k_scale[c]; + } + } +#endif + } + + void softmax(int Br_fixed, int Bc_fixed, float *acc_s, float *scoremax, float *scoremax_prev, float *score_scale, float *score_sum, float *logsum) { + memcpy(scoremax_prev, scoremax, Br_fixed * sizeof(float)); + for (int r = 0; r < Br_fixed; ++r) { + float *row = acc_s + r * Bc; + float current_max = scoremax[r]; + for (int c = 0; c < Bc_fixed; ++c) current_max = std::max(current_max, row[c]); + scoremax[r] = current_max; + } + for (int r = 0; r < Br_fixed; ++r) score_scale[r] = expf(scoremax_prev[r] - scoremax[r]); + for (int r = 0; r < Br_fixed; ++r) { + float *row = acc_s + r * Bc; + float sm = scoremax[r]; + float sum = 0.f; + for (int c = 0; c < Bc_fixed; ++c) { + if (row[c] > NEG_INF / 2) { + float val = expf(row[c] - sm); + row[c] = val; + sum += val; + } else { + row[c] = 0.f; + } + } + score_sum[r] = sum; + } + for (int r = 0; r < Br_fixed; ++r) logsum[r] = logsum[r] * score_scale[r] + score_sum[r]; + } + void rescale(int Br_fixed, float *acc_o, const float *score_scale, int dim_size) { + for (int r = 0; r < Br_fixed; ++r) { + float scale_val = score_scale[r]; + float *row_ptr = acc_o + r * dim_size; +#if defined(__AVX2__) + __m256 scale_vec = _mm256_set1_ps(scale_val); + int d = 0; + for (; d <= dim_size - 8; d += 8) _mm256_storeu_ps(row_ptr + d, _mm256_mul_ps(_mm256_loadu_ps(row_ptr + d), scale_vec)); + for (; d < dim_size; ++d) row_ptr[d] *= scale_val; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t scale_vec = vdupq_n_f32(scale_val); + int d = 0; + for (; d <= dim_size - 4; d += 4) vst1q_f32(row_ptr + d, vmulq_f32(vld1q_f32(row_ptr + d), scale_vec)); + for (; d < dim_size; ++d) row_ptr[d] *= scale_val; +#else + for (int d = 0; d < dim_size; ++d) row_ptr[d] *= scale_val; +#endif + } + } + + void mma1(int Br_fixed, int Bc_fixed, const float *p_block, const dtype_kv_in_t *v_block, const float *v_mean, float *acc_o, int kv_head_size, int dim_size) { + int v_stride = kv_head_size * dim_size; + +#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP16_FORMAT_IEEE) + for (int r = 0; r < Br_fixed; ++r) { + float *o_row = acc_o + r * dim_size; + const float *p_row = p_block + r * Bc; + + for (int d = 0; d < dim_size; d += 4) { + float32x4_t o_acc_vec = vld1q_f32(o_row + d); + const float32x4_t vm_vec = vld1q_f32(v_mean + d); + + int c = 0; + for (; c <= Bc_fixed - 4; c += 4) { + // 预取更远的数据 + __builtin_prefetch(v_block + (c + 8) * v_stride + d, 0, 0); + + // 加载 4 个 P 标量 + const float p0 = p_row[c + 0]; + const float p1 = p_row[c + 1]; + const float p2 = p_row[c + 2]; + const float p3 = p_row[c + 3]; + + // 加载 4 个 V 向量 + const dtype_kv_in_t *v_row0 = v_block + (c + 0) * v_stride; + const dtype_kv_in_t *v_row1 = v_block + (c + 1) * v_stride; + const dtype_kv_in_t *v_row2 = v_block + (c + 2) * v_stride; + const dtype_kv_in_t *v_row3 = v_block + (c + 3) * v_stride; + + float32x4_t v_vec0, v_vec1, v_vec2, v_vec3; + if constexpr (std::is_same_v) { + v_vec0 = vcvt_f32_f16(vld1_f16(reinterpret_cast(v_row0 + d))); + v_vec1 = vcvt_f32_f16(vld1_f16(reinterpret_cast(v_row1 + d))); + v_vec2 = vcvt_f32_f16(vld1_f16(reinterpret_cast(v_row2 + d))); + v_vec3 = vcvt_f32_f16(vld1_f16(reinterpret_cast(v_row3 + d))); + } else { // float + v_vec0 = vld1q_f32(v_row0 + d); + v_vec1 = vld1q_f32(v_row1 + d); + v_vec2 = vld1q_f32(v_row2 + d); + v_vec3 = vld1q_f32(v_row3 + d); + } + + // 4组独立的 FMA 运算 + o_acc_vec = vfmaq_n_f32(o_acc_vec, vsubq_f32(v_vec0, vm_vec), p0); + o_acc_vec = vfmaq_n_f32(o_acc_vec, vsubq_f32(v_vec1, vm_vec), p1); + o_acc_vec = vfmaq_n_f32(o_acc_vec, vsubq_f32(v_vec2, vm_vec), p2); + o_acc_vec = vfmaq_n_f32(o_acc_vec, vsubq_f32(v_vec3, vm_vec), p3); + } + + // 处理剩余的循环 + for (; c < Bc_fixed; ++c) { + const float p_scalar = p_row[c]; + const float32x4_t p_vec = vdupq_n_f32(p_scalar); + const dtype_kv_in_t *v_row = v_block + c * v_stride; + float32x4_t v_vec; + if constexpr (std::is_same_v) { + v_vec = vcvt_f32_f16(vld1_f16(reinterpret_cast(v_row + d))); + } else { + v_vec = vld1q_f32(v_row + d); + } + float32x4_t v_smoothed = vsubq_f32(v_vec, vm_vec); + o_acc_vec = vfmaq_f32(o_acc_vec, p_vec, v_smoothed); + } + + vst1q_f32(o_row + d, o_acc_vec); + } + } +#elif defined(__AVX2__) && defined(__FMA__) + // =========== AVX2 IMPLEMENTATION START =========== + for (int r = 0; r < Br_fixed; ++r) { + float *o_row = acc_o + r * dim_size; + for (int c = 0; c < Bc_fixed; ++c) { + const float p_scalar = p_block[r * Bc + c]; + if (fabsf(p_scalar) < 1e-9) continue; + + const __m256 p_vec = _mm256_set1_ps(p_scalar); + const dtype_kv_in_t *v_row = v_block + c * v_stride; + + int d = 0; + for (; d <= dim_size - 8; d += 8) { + const __m256 vm_vec = _mm256_loadu_ps(v_mean + d); + const __m256 v_vec = load_and_convert_to_fp32_vec(v_row + d); + + __m256 o_vec = _mm256_loadu_ps(o_row + d); + __m256 v_smoothed = _mm256_sub_ps(v_vec, vm_vec); + + // Fused Multiply-Add: o_vec = (p_vec * v_smoothed) + o_vec + o_vec = _mm256_fmadd_ps(p_vec, v_smoothed, o_vec); + + _mm256_storeu_ps(o_row + d, o_vec); + } + // Remainder loop + for (; d < dim_size; ++d) { + o_row[d] += p_scalar * (to_float(v_row[d]) - v_mean[d]); + } + } + } + // =========== AVX2 IMPLEMENTATION END =========== +#else + // Fallback for other platforms + for (int r = 0; r < Br_fixed; ++r) { + float *o_row = acc_o + r * dim_size; + for (int c = 0; c < Bc_fixed; ++c) { + const float p = p_block[r * Bc + c]; + if (fabsf(p) < 1e-9) continue; + const dtype_kv_in_t *v_row = v_block + c * v_stride; + for (int d = 0; d < dim_size; ++d) { o_row[d] += p * (to_float(v_row[d]) - v_mean[d]); } + } + } +#endif + } + + void scale_and_store(int Br_fixed, const float *acc_o, const float *logsum, const float *v_mean, float *O, int head_size, int dim_size) { + int o_stride = head_size * dim_size; + for (int r = 0; r < Br_fixed; ++r) { + float inv_logsum = (logsum[r] > 1e-9f) ? 1.f / logsum[r] : 0.f; + const float *o_row = acc_o + r * dim_size; + float *O_row = O + r * o_stride; +#if defined(__AVX2__) && defined(__FMA__) + const __m256 inv_logsum_vec = _mm256_set1_ps(inv_logsum); + int d = 0; + for (; d <= dim_size - 8; d += 8) { + const __m256 o_vec = _mm256_loadu_ps(o_row + d); + const __m256 vm_vec = _mm256_loadu_ps(v_mean + d); + _mm256_storeu_ps(O_row + d, _mm256_fmadd_ps(o_vec, inv_logsum_vec, vm_vec)); + } + for (; d < dim_size; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#elif (defined(__ARM_NEON) || defined(__ARM_NEON__)) + float32x4_t inv_logsum_vec = vdupq_n_f32(inv_logsum); + int d = 0; + for (; d <= dim_size - 4; d += 4) { + float32x4_t o_vec = vld1q_f32(o_row + d); + float32x4_t vm_vec = vld1q_f32(v_mean + d); + vst1q_f32(O_row + d, vfmaq_f32(vm_vec, o_vec, inv_logsum_vec)); + } + for (; d < dim_size; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#else + for (int d = 0; d < dim_size; ++d) O_row[d] = o_row[d] * inv_logsum + v_mean[d]; +#endif + } + } +}; + +template +void sage_attention_forward_cpu_dispatch(const float *Q, const KVDtype *K, const KVDtype *V, float *O, int32_t batch_size, int32_t q_head, int32_t kv_head, int32_t seq_size_q, int32_t seq_size_k, int32_t dim_size, bool causal_mask, int32_t threads, int32_t br, int32_t bc) { + std::vector V_mean(batch_size * kv_head * dim_size); + compute_channel_means(V, V_mean.data(), batch_size, kv_head, seq_size_k, dim_size); + + thread_local WorkspaceManager manager; + + std::vector K_mean(batch_size * kv_head * dim_size); + + if (seq_size_q > 1) { // Prefill + const std::vector required_sizes = { + (size_t)threads * br * dim_size * sizeof(float), + (size_t)threads * br * bc * sizeof(float), + (size_t)threads * br * sizeof(float), + (size_t)threads * br * sizeof(float), + (size_t)threads * br * sizeof(float), + (size_t)threads * br * sizeof(float), + (size_t)threads * br * sizeof(float), + (size_t)threads * br * dim_size * sizeof(int8_t), + (size_t)batch_size * kv_head * seq_size_k * dim_size * sizeof(int8_t), + (size_t)threads * br * sizeof(float), + (size_t)batch_size * kv_head * seq_size_k * sizeof(float), + (size_t)threads * dim_size * sizeof(float), + (size_t)threads * dim_size * sizeof(float), + (size_t)threads * dim_size * sizeof(float), + }; + void **workspace = manager.get_workspace(required_sizes); + + int8_t *k_quant_global_buffer = static_cast(workspace[8]); + float *k_scale_global_buffer = static_cast(workspace[10]); + + compute_mean_and_quantize_k(K, K_mean.data(), k_quant_global_buffer, k_scale_global_buffer, batch_size, kv_head, seq_size_k, dim_size, threads, static_cast(workspace[13]), static_cast(workspace[11])); + + SAGE_CPU_IMPL op; + op.configure(br, bc, q_head, kv_head, threads); + op.init_workspace(workspace); + op.sage_attn_prefill(Q, K, V, O, K_mean.data(), V_mean.data(), batch_size, q_head, seq_size_q, seq_size_k, dim_size, causal_mask); + } else { // Decode + const int32_t decode_br = 1; + const std::vector required_sizes = { + (size_t)threads * decode_br * dim_size * sizeof(float), + (size_t)threads * decode_br * bc * sizeof(float), + (size_t)threads * decode_br * sizeof(float), + (size_t)threads * decode_br * sizeof(float), + (size_t)threads * decode_br * sizeof(float), + (size_t)threads * decode_br * sizeof(float), + (size_t)threads * decode_br * sizeof(float), + (size_t)threads * decode_br * dim_size * sizeof(int8_t), + (size_t)batch_size * kv_head * seq_size_k * dim_size * sizeof(int8_t), + (size_t)threads * decode_br * sizeof(float), + (size_t)batch_size * kv_head * seq_size_k * sizeof(float), + (size_t)threads * dim_size * sizeof(float), + (size_t)threads * dim_size * sizeof(float), + (size_t)threads * dim_size * sizeof(float), + }; + void **workspace = manager.get_workspace(required_sizes); + + int8_t *k_quant_global_buffer = static_cast(workspace[8]); + float *k_scale_global_buffer = static_cast(workspace[10]); + + compute_mean_and_quantize_k(K, K_mean.data(), k_quant_global_buffer, k_scale_global_buffer, batch_size, kv_head, seq_size_k, dim_size, threads, static_cast(workspace[13]), static_cast(workspace[11])); + + SAGE_CPU_IMPL op; + op.configure(br, bc, q_head, kv_head, threads); + op.init_workspace(workspace); + op.sage_attn_decode(Q, K, V, O, K_mean.data(), V_mean.data(), batch_size, q_head, seq_size_k, dim_size, causal_mask); + } +} +} // namespace sage_attn_pt_cpu diff --git a/mllm/backends/cpu/compute/SageQuantize.hpp b/mllm/backends/cpu/compute/SageQuantize.hpp new file mode 100644 index 000000000..9d33f0b48 --- /dev/null +++ b/mllm/backends/cpu/compute/SageQuantize.hpp @@ -0,0 +1,138 @@ +#ifndef SAGE_QUANT_H +#define SAGE_QUANT_H + +#include +#include +#include +#include +#include +#include +#include +#include "Types.hpp" + +// --- SIMD Intrinsics (依赖项) --- +#ifdef __AVX2__ +#include +#endif + +// 外部依赖,确保QK8_0F在包含此文件前已定义 +// 或者直接在这里定义 +// #ifndef QK8_0F +// #define QK8_0F 128 +// #endif + +// 为了让工具函数独立,把 hmax_ps 这种辅助函数也移过来 +#ifdef __AVX2__ +namespace { // 放在匿名空间中,使其为内部链接 +inline float _mm256_hmax_ps(__m256 x) { + __m128 lo = _mm256_castps256_ps128(x); + __m128 hi = _mm256_extractf128_ps(x, 1); + __m128 max_val = _mm_max_ps(lo, hi); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 2, 2))); + max_val = _mm_max_ps(max_val, _mm_shuffle_ps(max_val, max_val, _MM_SHUFFLE(0, 0, 0, 1))); + return _mm_cvtss_f32(max_val); +} +} // namespace +#endif + +// struct block_q8_0f { +// float scale; +// int8_t qs[QK8_0F]; +// }; +#pragma region "SAGE KV Cache Utils" +namespace sage_kv_cache { +inline void compute_sage_mean_vector(const float *head_start_ptr, float *out_mean, + int seq_size_k, int dim_size, size_t stride_s) { + if (seq_size_k == 0) + return; + std::vector sum(dim_size, 0.0); +#pragma omp parallel + { + std::vector local_sum(dim_size, 0.0); +#pragma omp for + for (int s = 0; s < seq_size_k; ++s) { + const float *row_ptr = head_start_ptr + s * stride_s; + for (int d = 0; d < dim_size; ++d) + local_sum[d] += row_ptr[d]; + } +#pragma omp critical + { + for (int d = 0; d < dim_size; ++d) + sum[d] += local_sum[d]; + } + } + const float inv_seq_len = 1.0f / static_cast(seq_size_k); +#pragma omp parallel for + for (int d = 0; d < dim_size; ++d) + out_mean[d] = sum[d] * inv_seq_len; +} + +inline void update_sage_mean_vector_incremental(float *mean_vector_io, + const float *new_token_vector, + int old_seq_len, int dim_size) { + if (old_seq_len <= 0) { + memcpy(mean_vector_io, new_token_vector, dim_size * sizeof(float)); + return; + } + const float N = static_cast(old_seq_len); + const float factor1 = N / (N + 1.0f); + const float factor2 = 1.0f / (N + 1.0f); +#pragma omp parallel for + for (int d = 0; d < dim_size; ++d) + mean_vector_io[d] = + mean_vector_io[d] * factor1 + new_token_vector[d] * factor2; +} + +template +inline void quantize_new_token_to_sage_blocks(const float *new_token_vector, + const float *current_mean_data, + T *out_blocks_for_token, int dim_size) { + static_assert(std::is_same_v, + "This function can only quantize to block_q8_0f."); + block_q8_0f *out_ptr = reinterpret_cast(out_blocks_for_token); + const int num_k_blocks = dim_size / QK8_0F; + std::vector smoothed_row(dim_size); + for (int d = 0; d < dim_size; ++d) + smoothed_row[d] = new_token_vector[d] - current_mean_data[d]; + for (int g = 0; g < num_k_blocks; ++g) { + const int offset = g * QK8_0F; + const float *smoothed_block_ptr = smoothed_row.data() + offset; + float max_abs_val = 0.0f; + for (int d = 0; d < QK8_0F; ++d) + max_abs_val = std::max(max_abs_val, fabsf(smoothed_block_ptr[d])); + const float scale = (max_abs_val > 1e-9f) ? max_abs_val / 127.0f : 0.0f; + out_ptr[g].scale = scale; + const float inv_scale = (scale > 1e-9f) ? 1.0f / scale : 0.0f; + for (int d = 0; d < QK8_0F; ++d) + out_ptr[g].qs[d] = + static_cast(roundf(smoothed_block_ptr[d] * inv_scale)); + } +} + +// [修正] 改为只为单个头计算均值 +inline void compute_sage_mean_for_one_head_bshd( + const float *head_start_ptr, // 指向单个头的起始地址 + float *out_mean_for_head, // 指向该头对应的均值输出位置 + int seq_size, int dim_size, + size_t s_stride // BSHD布局下,序列方向的步长 +) { + if (seq_size == 0) return; + + std::vector sum(dim_size, 0.0); + for (int s = 0; s < seq_size; ++s) { + // 直接通过步长访问一个头内的所有序列 + const float *row_ptr = head_start_ptr + s * s_stride; + for (int d = 0; d < dim_size; ++d) { + sum[d] += row_ptr[d]; + } + } + + const float inv_seq_len = 1.0f / static_cast(seq_size); + for (int d = 0; d < dim_size; ++d) { + out_mean_for_head[d] = sum[d] * inv_seq_len; + } +} + +} // namespace sage_kv_cache +#pragma endregion +#endif // SAGE_QUANT_H \ No newline at end of file diff --git a/mllm/backends/cpu/compute/Sigmoid.hpp b/mllm/backends/cpu/compute/Sigmoid.hpp new file mode 100644 index 000000000..d7a8b7958 --- /dev/null +++ b/mllm/backends/cpu/compute/Sigmoid.hpp @@ -0,0 +1,69 @@ +#if defined(__ARM_NEON) && defined(__aarch64__) +#include // 包含 NEON 指令集的头文件 +#endif +#if defined(__AVX2__) && defined(__FMA__) +#include // 包含 AVX, SSE 等指令集的头文件 +#endif +#include + +#if defined(__AVX2__) && defined(__FMA__) +// AVX2 版本的快速 exp (示意) +static inline __m256 fast_exp_ps_avx2(__m256 x) { + float temp_in[8], temp_out[8]; + _mm256_storeu_ps(temp_in, x); + for (int i = 0; i < 8; ++i) temp_out[i] = expf(temp_in[i]); + return _mm256_loadu_ps(temp_out); +} +#endif + +#if defined(__ARM_NEON) && defined(__aarch64__) +static inline float32x4_t fast_exp_f32_neon(float32x4_t x) { + float temp_in[4], temp_out[4]; + vst1q_f32(temp_in, x); + for (int i = 0; i < 4; ++i) temp_out[i] = expf(temp_in[i]); + return vld1q_f32(temp_out); +} +#endif + +/** + * @brief 对一个 float 数组进行 Sigmoid 计算 (支持 AVX 和 NEON 的高性能版本) + * @param n 数组中元素的数量 + * @param y 指向输出数组的指针 + * @param x 指向输入数组的指针 + */ +void vec_sigmoid_f32(const int n, float *y, const float *x) { + int i = 0; + +// 1. 优先使用 AVX2 和 FMA 指令集 (x86 架构, 一次处理8个float) +#if defined(__AVX2__) && defined(__FMA__) + const __m256 ones_avx = _mm256_set1_ps(1.0f); + const __m256 zeros_avx = _mm256_setzero_ps(); + + for (; i + 7 < n; i += 8) { + __m256 val = _mm256_loadu_ps(x + i); // 加载数据 + val = _mm256_sub_ps(zeros_avx, val); // 计算 -x + val = fast_exp_ps_avx2(val); // 计算 exp(-x) + val = _mm256_add_ps(ones_avx, val); // 计算 1 + exp(-x) + val = _mm256_div_ps(ones_avx, val); // 计算 1 / (...) + _mm256_storeu_ps(y + i, val); // 存储结果 + } + +// 2. 其次,如果平台是 ARMv8-A (aarch64),则使用 NEON (一次处理4个float) +#elif defined(__ARM_NEON) && defined(__aarch64__) + const float32x4_t ones_neon = vdupq_n_f32(1.0f); + + for (; i + 3 < n; i += 4) { + float32x4_t val = vld1q_f32(x + i); // 加载数据 + val = vnegq_f32(val); // 计算 -x + val = fast_exp_f32_neon(val); // 计算 exp(-x) + val = vaddq_f32(ones_neon, val); // 计算 1 + exp(-x) + val = vdivq_f32(ones_neon, val); // 计算 1 / (...) (vdivq_f32 在 aarch64 中可用) + vst1q_f32(y + i, val); // 存储结果 + } +#endif + + // 3. "收尾"循环:处理剩余的不足一个SIMD块的元素,或在不支持SIMD的平台上运行 + for (; i < n; ++i) { + y[i] = 1.0f / (1.0f + expf(-x[i])); + } +} \ No newline at end of file diff --git a/mllm/backends/cpu/compute/Split.hpp b/mllm/backends/cpu/compute/Split.hpp new file mode 100644 index 000000000..9c10ab3b0 --- /dev/null +++ b/mllm/backends/cpu/compute/Split.hpp @@ -0,0 +1,269 @@ +// #include +// #include +// #include +// #include +// #include // for memcpy + +// // 引入 OpenMP 头文件 +// #include + +// // 引入SIMD头文件 +// #if defined(__AVX__) +// #include +// #elif defined(__ARM_NEON) +// #include +// #endif + +// /** +// * @brief 高效地将一个4D张量按指定维度分割 (OpenMP并行版本) +// * @param origin 输入的4D张量的裸指针 +// * @param origin_dims 输入张量的维度信息,大小为4的数组,例如 {N, C, H, W} +// * @param out 输出张量指针的向量 +// * @param split_dims 每个输出张量在分割维度上的大小 +// * @param dim_id 要进行分割的维度索引 (0到3) +// */ +// void efficient_split(const float *origin, const int *origin_dims, +// std::vector &out, const std::vector &split_dims, +// int dim_id) { +// // --- 1. 输入验证 --- +// if (dim_id < 0 || dim_id > 3) { +// throw std::invalid_argument("Error: dim_id must be between 0 and 3."); +// } + +// int total_split_dim = std::accumulate(split_dims.begin(), split_dims.end(), 0); +// if (total_split_dim != origin_dims[dim_id]) { +// throw std::invalid_argument("Error: Sum of split_dims must be equal to the dimension size of origin tensor."); +// } + +// if (out.size() != split_dims.size()) { +// throw std::invalid_argument("Error: The size of 'out' vector must be equal to the size of 'split_dims' vector."); +// } + +// // --- 2. 计算步长和循环尺寸 --- +// int strides[4]; +// strides[3] = 1; +// for (int i = 2; i >= 0; --i) { +// strides[i] = strides[i + 1] * origin_dims[i + 1]; +// } + +// int outer_loop_size = 1; +// for (int i = 0; i < dim_id; ++i) { +// outer_loop_size *= origin_dims[i]; +// } + +// int inner_loop_size = strides[dim_id]; +// int original_dim_size_at_split_axis = origin_dims[dim_id] * inner_loop_size; + +// // --- 3. 为并行计算预先计算偏移量 --- +// // 为了使主循环能够并行化,我们需要为每个输出张量预先计算其在源张量中的起始偏移量。 +// // 这避免了在循环中依赖于前一次迭代结果的顺序更新。 +// std::vector split_offsets(split_dims.size() + 1, 0); +// for (size_t i = 0; i < split_dims.size(); ++i) { +// split_offsets[i + 1] = split_offsets[i] + split_dims[i]; +// } + +// // --- 4. 并行处理 --- +// // 使用 OpenMP 对主循环进行并行化。 +// // OpenMP 会自动根据系统配置或 OMP_NUM_THREADS 环境变量来决定使用的线程数。 +// // 循环变量 'i' 默认是私有的。所有在循环外声明的变量都是共享的, +// // 这在这里是安全的,因为它们在并行区域内是只读的(除了 'out',但每个线程访问 out[i],不会冲突)。 +// #pragma omp parallel for +// for (size_t i = 0; i < out.size(); ++i) { +// float *out_ptr = out[i]; +// const int split_size = split_dims[i]; +// const int offset_in_dim = split_offsets[i]; + +// // 遍历所有不被分割的外部维度 +// for (int outer_idx = 0; outer_idx < outer_loop_size; ++outer_idx) { +// // 计算源和目标在当前外部维度块的基地址 +// const float *src_base = origin + outer_idx * original_dim_size_at_split_axis + offset_in_dim * inner_loop_size; +// float *dst_base = out_ptr + outer_idx * split_size * inner_loop_size; + +// // 沿着分割轴,拷贝 'split_size' 个大小为 'inner_loop_size' 的数据块 +// for (int split_idx = 0; split_idx < split_size; ++split_idx) { +// const float *src = src_base + split_idx * inner_loop_size; +// float *dst = dst_base + split_idx * inner_loop_size; +// int count = inner_loop_size; + +// // 使用SIMD指令集进行高效的内存拷贝 +// #if defined(__AVX__) +// for (; count >= 8; count -= 8) { +// __m256 data = _mm256_loadu_ps(src); +// _mm256_storeu_ps(dst, data); +// src += 8; +// dst += 8; +// } +// #elif defined(__ARM_NEON) +// for (; count >= 4; count -= 4) { +// float32x4_t data = vld1q_f32(src); +// vst1q_f32(dst, data); +// src += 4; +// dst += 4; +// } +// #endif +// // 处理剩余不足一个SIMD寄存器大小的数据 +// for (; count > 0; --count) { +// *dst++ = *src++; +// } +// } +// } +// } +// } + +// Split.hpp + +#include +#include +#include +#include +#include // for memcpy + +// 引入 OpenMP 头文件 +#include + +// 引入SIMD头文件 +#if defined(__AVX__) +#include +#elif defined(__ARM_NEON) +#include +#endif + +// 新增:引入项目所需的数据类型和转换定义 +#include "Types.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" + +/** + * @brief 高效地将一个4D张量按指定维度分割 (OpenMP并行版本),支持混合输出类型(float/fp16) + * @param origin 输入的4D张量的裸指针 (必须是 float 类型) + * @param origin_dims 输入张量的维度信息,大小为4的数组,例如 {N, C, H, W} + * @param out 输出张量裸指针的向量 (void*) + * @param out_types 每个输出张量的数据类型 + * @param split_dims 每个输出张量在分割维度上的大小 + * @param dim_id 要进行分割的维度索引 (0到3) + */ +void efficient_split(const float *origin, const int *origin_dims, + std::vector &out, const std::vector &out_types, // 修改点 + const std::vector &split_dims, + int dim_id) { + // --- 1. 输入验证 --- + if (dim_id < 0 || dim_id > 3) { + throw std::invalid_argument("Error: dim_id must be between 0 and 3."); + } + + int total_split_dim = std::accumulate(split_dims.begin(), split_dims.end(), 0); + if (total_split_dim != origin_dims[dim_id]) { + throw std::invalid_argument("Error: Sum of split_dims must be equal to the dimension size of origin tensor."); + } + + if (out.size() != split_dims.size() || out.size() != out_types.size()) { // 修改点 + throw std::invalid_argument("Error: The size of 'out', 'out_types', and 'split_dims' vectors must be equal."); + } + + // --- 2. 计算步长和循环尺寸 --- + int strides[4]; + strides[3] = 1; + for (int i = 2; i >= 0; --i) { + strides[i] = strides[i + 1] * origin_dims[i + 1]; + } + + int outer_loop_size = 1; + for (int i = 0; i < dim_id; ++i) { + outer_loop_size *= origin_dims[i]; + } + + int inner_loop_size = strides[dim_id]; + int original_dim_size_at_split_axis = origin_dims[dim_id] * inner_loop_size; + + // --- 3. 为并行计算预先计算偏移量 --- + std::vector split_offsets(split_dims.size() + 1, 0); + for (size_t i = 0; i < split_dims.size(); ++i) { + split_offsets[i + 1] = split_offsets[i] + split_dims[i]; + } + +// --- 4. 并行处理 --- +#pragma omp parallel for + for (size_t i = 0; i < out.size(); ++i) { + void *out_ptr_void = out[i]; + const int split_size = split_dims[i]; + const int offset_in_dim = split_offsets[i]; + const DataType out_type = out_types[i]; + + // 遍历所有不被分割的外部维度 + for (int outer_idx = 0; outer_idx < outer_loop_size; ++outer_idx) { + // 计算源在当前外部维度块的基地址 + const float *src_base = origin + outer_idx * original_dim_size_at_split_axis + offset_in_dim * inner_loop_size; + + // --- 修改点:根据输出类型选择不同的处理路径 --- + if (out_type == MLLM_TYPE_F32) { + float *out_ptr = static_cast(out_ptr_void); + float *dst_base = out_ptr + outer_idx * split_size * inner_loop_size; + const size_t copy_bytes = split_size * inner_loop_size * sizeof(float); + // memcpy(dst_base, src_base, copy_bytes); + for (int split_idx = 0; split_idx < split_size; ++split_idx) { + const float *src = src_base + split_idx * inner_loop_size; + float *dst = dst_base + split_idx * inner_loop_size; + int count = inner_loop_size; +#if defined(__AVX__) + for (; count >= 8; count -= 8) { + __m256 data = _mm256_loadu_ps(src); + _mm256_storeu_ps(dst, data); + src += 8; + dst += 8; + } +#elif defined(__ARM_NEON) + for (; count >= 4; count -= 4) { + float32x4_t data = vld1q_f32(src); + vst1q_f32(dst, data); + src += 4; + dst += 4; + } +#endif + for (; count > 0; --count) *dst++ = *src++; + } + + } else if (out_type == MLLM_TYPE_F16) { + mllm_fp16_t *out_ptr = static_cast(out_ptr_void); + mllm_fp16_t *dst_base = out_ptr + outer_idx * split_size * inner_loop_size; + + for (int split_idx = 0; split_idx < split_size; ++split_idx) { + const float *src = src_base + split_idx * inner_loop_size; + mllm_fp16_t *dst = dst_base + split_idx * inner_loop_size; + int count = inner_loop_size; + +// 使用SIMD指令集进行高效的转换和内存拷贝 +#if defined(__AVX__) && defined(__F16C__) + for (; count >= 8; count -= 8) { + // 从内存加载 8 个 float + __m256 float_vec = _mm256_loadu_ps(src); + // 将 8 个 float 转换为 8 个 fp16 + __m128i fp16_vec = _mm256_cvtps_ph(float_vec, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); + // 存储 8 个 fp16 (128 bits) + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), fp16_vec); + src += 8; + dst += 8; + } +#elif defined(__ARM_NEON) + for (; count >= 4; count -= 4) { + // 从内存加载 4 个 float + float32x4_t float_vec = vld1q_f32(src); + // 将 4 个 float 转换为 4 个 fp16 + float16x4_t fp16_vec = vcvt_f16_f32(float_vec); + // 存储 4 个 fp16 + vst1_f16(reinterpret_cast(dst), fp16_vec); + src += 4; + dst += 4; + } +#endif + + // 处理剩余不足一个SIMD寄存器大小的数据 + for (; count > 0; --count) { + *dst++ = MLLM_FP32_TO_FP16(*src++); + } + } + } else { + // 如果将来支持更多类型,可以在此添加 + // 为了安全,可以抛出异常或打印错误日志 + } + } + } +} \ No newline at end of file diff --git a/mllm/backends/cpu/compute/Transpose2D.hpp b/mllm/backends/cpu/compute/Transpose2D.hpp new file mode 100644 index 000000000..eabde519e --- /dev/null +++ b/mllm/backends/cpu/compute/Transpose2D.hpp @@ -0,0 +1,258 @@ +#pragma once +#include +#include "Types.hpp" + +// 为不同平台引入对应的 SIMD 指令集头文件 +#if defined(__AVX__) || defined(__AVX2__) +#include // Intel/AMD AVX 指令集 +#elif defined(__aarch64__) +#include // ARM NEON 指令集 +#endif + +/** + * @brief 使用 SIMD 指令 (AVX) 转置一个 8x8 的浮点数矩阵块。 + * @param src 指向源数据块左上角的指针 + * @param dst 指向目标数据块左上角的指针 + * @param src_stride 源矩阵的行步长 (即列数) + * @param dst_stride 目标矩阵的行步长 (即转置前的行数) + */ +#if defined(__AVX__) || defined(__AVX2__) +static inline void transpose_block_8x8_avx(const float *src, float *dst, const int src_stride, const int dst_stride) { + // 1. 从源矩阵加载8行数据到8个AVX寄存器 + __m256 row0 = _mm256_loadu_ps(src + 0 * src_stride); + __m256 row1 = _mm256_loadu_ps(src + 1 * src_stride); + __m256 row2 = _mm256_loadu_ps(src + 2 * src_stride); + __m256 row3 = _mm256_loadu_ps(src + 3 * src_stride); + __m256 row4 = _mm256_loadu_ps(src + 4 * src_stride); + __m256 row5 = _mm256_loadu_ps(src + 5 * src_stride); + __m256 row6 = _mm256_loadu_ps(src + 6 * src_stride); + __m256 row7 = _mm256_loadu_ps(src + 7 * src_stride); + + // 2. 在寄存器内进行 8x8 矩阵的转置 (这是一个标准的多步shuffle操作) + __m256 t0, t1, t2, t3, t4, t5, t6, t7; + t0 = _mm256_unpacklo_ps(row0, row1); + t1 = _mm256_unpackhi_ps(row0, row1); + t2 = _mm256_unpacklo_ps(row2, row3); + t3 = _mm256_unpackhi_ps(row2, row3); + t4 = _mm256_unpacklo_ps(row4, row5); + t5 = _mm256_unpackhi_ps(row4, row5); + t6 = _mm256_unpacklo_ps(row6, row7); + t7 = _mm256_unpackhi_ps(row6, row7); + + __m256 tt0, tt1, tt2, tt3, tt4, tt5, tt6, tt7; + tt0 = _mm256_shuffle_ps(t0, t2, _MM_SHUFFLE(1, 0, 1, 0)); + tt1 = _mm256_shuffle_ps(t0, t2, _MM_SHUFFLE(3, 2, 3, 2)); + tt2 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); + tt3 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); + tt4 = _mm256_shuffle_ps(t4, t6, _MM_SHUFFLE(1, 0, 1, 0)); + tt5 = _mm256_shuffle_ps(t4, t6, _MM_SHUFFLE(3, 2, 3, 2)); + tt6 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); + tt7 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); + + row0 = _mm256_permute2f128_ps(tt0, tt4, 0x20); + row1 = _mm256_permute2f128_ps(tt1, tt5, 0x20); + row2 = _mm256_permute2f128_ps(tt2, tt6, 0x20); + row3 = _mm256_permute2f128_ps(tt3, tt7, 0x20); + row4 = _mm256_permute2f128_ps(tt0, tt4, 0x31); + row5 = _mm256_permute2f128_ps(tt1, tt5, 0x31); + row6 = _mm256_permute2f128_ps(tt2, tt6, 0x31); + row7 = _mm256_permute2f128_ps(tt3, tt7, 0x31); + + // 3. 将转置后的8个寄存器(现在是8列)写回到目标矩阵 + _mm256_storeu_ps(dst + 0 * dst_stride, row0); + _mm256_storeu_ps(dst + 1 * dst_stride, row1); + _mm256_storeu_ps(dst + 2 * dst_stride, row2); + _mm256_storeu_ps(dst + 3 * dst_stride, row3); + _mm256_storeu_ps(dst + 4 * dst_stride, row4); + _mm256_storeu_ps(dst + 5 * dst_stride, row5); + _mm256_storeu_ps(dst + 6 * dst_stride, row6); + _mm256_storeu_ps(dst + 7 * dst_stride, row7); +} +#endif + +/** + * @brief 使用 SIMD 指令 (NEON) 转置一个 4x4 的浮点数矩阵块。 + * @param src 指向源数据块左上角的指针 + * @param dst 指向目标数据块左上角的指针 + * @param src_stride 源矩阵的行步长 (即列数) + * @param dst_stride 目标矩阵的行步长 (即转置前的行数) + */ +#if defined(__aarch64__) +static inline void transpose_block_4x4_neon(const float *src, float *dst, const int src_stride, const int dst_stride) { + // 1. 从源矩阵加载4行数据,每行4个float + float32x4_t row0 = vld1q_f32(src + 0 * src_stride); + float32x4_t row1 = vld1q_f32(src + 1 * src_stride); + float32x4_t row2 = vld1q_f32(src + 2 * src_stride); + float32x4_t row3 = vld1q_f32(src + 3 * src_stride); + + // 2. 使用 VTRN 指令对 4x4 矩阵进行转置 + // 第一次 VTRN, 两两交换元素 + float32x4x2_t p01 = vtrnq_f32(row0, row1); // p01.val[0] = {r0[0], r1[0], r0[2], r1[2]}, p01.val[1] = {r0[1], r1[1], r0[3], r1[3]} + float32x4x2_t p23 = vtrnq_f32(row2, row3); // p23.val[0] = {r2[0], r3[0], r2[2], r3[2]}, p23.val[1] = {r2[1], r3[1], r2[3], r3[3]} + + // 3. 提取并组合成最终的转置结果 + // 从 p01 和 p23 中提取低位的 64bit (2个float) 并组合 + // res0 = {r0[0], r1[0], r2[0], r3[0]} + float32x4_t res0 = vcombine_f32(vget_low_f32(p01.val[0]), vget_low_f32(p23.val[0])); + // 从 p01 和 p23 中提取高位的 64bit (2个float) 并组合 + // res1 = {r0[1], r1[1], r2[1], r3[1]} + float32x4_t res1 = vcombine_f32(vget_low_f32(p01.val[1]), vget_low_f32(p23.val[1])); + float32x4_t res2 = vcombine_f32(vget_high_f32(p01.val[0]), vget_high_f32(p23.val[0])); + float32x4_t res3 = vcombine_f32(vget_high_f32(p01.val[1]), vget_high_f32(p23.val[1])); + + // 4. 将转置后的4个寄存器(现在是4列)写回到目标矩阵 + vst1q_f32(dst + 0 * dst_stride, res0); + vst1q_f32(dst + 1 * dst_stride, res1); + vst1q_f32(dst + 2 * dst_stride, res2); + vst1q_f32(dst + 3 * dst_stride, res3); +} +#endif + +/** + * @brief 对一个二维浮点数矩阵进行高效转置 (dst = src^T)。 + * 自动检测平台并使用 AVX 或 NEON 指令进行加速。 + * 如果平台不支持,则回退到缓存优化的C++实现。 + * @param src 指向源矩阵 (N x M) 的指针 + * @param dst 指向目标矩阵 (M x N) 的指针 + * @param N 源矩阵的行数 + * @param M 源矩阵的列数 + */ +inline void transpose_matrix_efficient(const float *src, float *dst, const int N, const int M) { +#if defined(__AVX__) || defined(__AVX2__) + const int BLOCK_DIM = 8; + // 使用8x8分块处理大部分矩阵 + for (int i = 0; i < N / BLOCK_DIM * BLOCK_DIM; i += BLOCK_DIM) { + for (int j = 0; j < M / BLOCK_DIM * BLOCK_DIM; j += BLOCK_DIM) { + transpose_block_8x8_avx(src + i * M + j, dst + j * N + i, M, N); + } + } + // 处理右侧和下方的剩余部分 + for (int i = 0; i < N; ++i) { + for (int j = M / BLOCK_DIM * BLOCK_DIM; j < M; ++j) { + dst[j * N + i] = src[i * M + j]; + } + } + for (int i = N / BLOCK_DIM * BLOCK_DIM; i < N; ++i) { + for (int j = 0; j < M / BLOCK_DIM * BLOCK_DIM; ++j) { + dst[j * N + i] = src[i * M + j]; + } + } + +#elif defined(__aarch64__) + const int BLOCK_DIM = 4; + // 使用4x4分块处理大部分矩阵 + for (int i = 0; i < N / BLOCK_DIM * BLOCK_DIM; i += BLOCK_DIM) { + for (int j = 0; j < M / BLOCK_DIM * BLOCK_DIM; j += BLOCK_DIM) { + transpose_block_4x4_neon(src + i * M + j, dst + j * N + i, M, N); + } + } + // 处理剩余部分 + for (int i = 0; i < N; ++i) { + for (int j = M / BLOCK_DIM * BLOCK_DIM; j < M; ++j) { + dst[j * N + i] = src[i * M + j]; + } + } + for (int i = N / BLOCK_DIM * BLOCK_DIM; i < N; ++i) { + for (int j = 0; j < M / BLOCK_DIM * BLOCK_DIM; ++j) { + dst[j * N + i] = src[i * M + j]; + } + } + +#else + const int BLOCK_DIM = 16; + for (int i = 0; i < N; i += BLOCK_DIM) { + for (int j = 0; j < M; j += BLOCK_DIM) { + for (int bi = i; bi < i + BLOCK_DIM && bi < N; ++bi) { + for (int bj = j; bj < j + BLOCK_DIM && bj < M; ++bj) { + dst[bj * N + bi] = src[bi * M + bj]; + } + } + } + } +#endif +} + +// --- BEGIN: High-Performance FP16 Transpose Function for ARM NEON --- + +#if defined(__aarch64__) + +/** + * @brief 使用ARM NEON指令高效转置一个 8x8 的 __fp16 矩阵块。 + * 【已修正】使用了正确的 VTRN/VZIP 指令序列,避免了 vtrnq_f64 错误。 + * @param src 指向源数据块左上角的指针 (__fp16) + * @param dst 指向目标数据块左上角的指针 (__fp16) + * @param src_stride 源矩阵的行步长 (即列数) + * @param dst_stride 目标矩阵的行步长 (即转置前的行数) + */ +static inline void transpose_block_8x8_neon_fp16(const __fp16 *src, __fp16 *dst, const int src_stride, const int dst_stride) { + // 1. Load 8 rows from source matrix into 8 NEON registers + float16x8_t r0 = vld1q_f16(src + 0 * src_stride); + float16x8_t r1 = vld1q_f16(src + 1 * src_stride); + float16x8_t r2 = vld1q_f16(src + 2 * src_stride); + float16x8_t r3 = vld1q_f16(src + 3 * src_stride); + float16x8_t r4 = vld1q_f16(src + 4 * src_stride); + float16x8_t r5 = vld1q_f16(src + 5 * src_stride); + float16x8_t r6 = vld1q_f16(src + 6 * src_stride); + float16x8_t r7 = vld1q_f16(src + 7 * src_stride); + + // 2. Perform in-register transpose using VTRN and VZIP + // Stage 1: Transpose 2x2 blocks of __fp16 elements + float16x8x2_t t01 = vtrnq_f16(r0, r1); + float16x8x2_t t23 = vtrnq_f16(r2, r3); + float16x8x2_t t45 = vtrnq_f16(r4, r5); + float16x8x2_t t67 = vtrnq_f16(r6, r7); + + // Stage 2: Transpose 4x4 blocks by zipping 32-bit (2x fp16) chunks + float32x4x2_t z02 = vzipq_f32(vreinterpretq_f32_f16(t01.val[0]), vreinterpretq_f32_f16(t23.val[0])); + float32x4x2_t z13 = vzipq_f32(vreinterpretq_f32_f16(t01.val[1]), vreinterpretq_f32_f16(t23.val[1])); + float32x4x2_t z46 = vzipq_f32(vreinterpretq_f32_f16(t45.val[0]), vreinterpretq_f32_f16(t67.val[0])); + float32x4x2_t z57 = vzipq_f32(vreinterpretq_f32_f16(t45.val[1]), vreinterpretq_f32_f16(t67.val[1])); + + // 3. Store the transposed 8x8 block to the destination matrix + vst1q_f16(dst + 0 * dst_stride, vreinterpretq_f16_f32(z02.val[0])); + vst1q_f16(dst + 1 * dst_stride, vreinterpretq_f16_f32(z13.val[0])); + vst1q_f16(dst + 2 * dst_stride, vreinterpretq_f16_f32(z02.val[1])); + vst1q_f16(dst + 3 * dst_stride, vreinterpretq_f16_f32(z13.val[1])); + vst1q_f16(dst + 4 * dst_stride, vreinterpretq_f16_f32(z46.val[0])); + vst1q_f16(dst + 5 * dst_stride, vreinterpretq_f16_f32(z57.val[0])); + vst1q_f16(dst + 6 * dst_stride, vreinterpretq_f16_f32(z46.val[1])); + vst1q_f16(dst + 7 * dst_stride, vreinterpretq_f16_f32(z57.val[1])); +} +/** + * @brief 对一个 mllm_fp16_t 矩阵进行高效转置 (dst = src^T)。 + * 在 aarch64 平台上,此函数使用 NEON SIMD 指令进行极致加速。 + * 在其他平台,回退到缓存优化的C++实现。 + * @param src 指向源矩阵 (N x M) 的指针 + * @param dst 指向目标矩阵 (M x N) 的指针 + * @param N 源矩阵的行数 + * @param M 源矩阵的列数 + */ +inline void transpose_matrix_efficient_fp16(const mllm_fp16_t *src, mllm_fp16_t *dst, const int N, const int M) { + const int BLOCK_DIM = 8; + // Use an 8x8 block loop to process the majority of the matrix + for (int i = 0; i < N / BLOCK_DIM * BLOCK_DIM; i += BLOCK_DIM) { + for (int j = 0; j < M / BLOCK_DIM * BLOCK_DIM; j += BLOCK_DIM) { + // On ARM, mllm_fp16_t is __fp16, so we can call the NEON helper directly + transpose_block_8x8_neon_fp16( + (const __fp16 *)(src + i * M + j), + (__fp16 *)(dst + j * N + i), + M, N); + } + } + + // Process the remaining rows and columns on the edges using standard C++ + for (int i = 0; i < N; ++i) { + for (int j = M / BLOCK_DIM * BLOCK_DIM; j < M; ++j) { + dst[j * N + i] = src[i * M + j]; + } + } + for (int i = N / BLOCK_DIM * BLOCK_DIM; i < N; ++i) { + for (int j = 0; j < M / BLOCK_DIM * BLOCK_DIM; ++j) { + dst[j * N + i] = src[i * M + j]; + } + } +} + +#endif // __aarch64__ + +// --- END: High-Performance FP16 Transpose Function for ARM NEON --- \ No newline at end of file diff --git a/mllm/backends/cpu/compute/Transpose3D.hpp b/mllm/backends/cpu/compute/Transpose3D.hpp new file mode 100644 index 000000000..208dfd051 --- /dev/null +++ b/mllm/backends/cpu/compute/Transpose3D.hpp @@ -0,0 +1,338 @@ +#pragma once + +#include +#include +#include +#include +#include // 用于 memcpy +#include "DataType.hpp" +// 为不同平台引入对应的 SIMD 指令集头文件 +#if defined(__AVX__) || defined(__AVX2__) +#include // Intel/AMD AVX & AVX2 指令集 +#elif defined(__aarch64__) +#include // ARM NEON 指令集 +#endif + +// 引入 OpenMP 头文件以支持多线程并行 +#include + +#if defined(__AVX__) || defined(__AVX2__) +static inline void transpose_block_8x8_avx(const float *src, float *dst, const int src_stride, const int dst_stride) { + __m256 row0 = _mm256_loadu_ps(src + 0 * src_stride); + __m256 row1 = _mm256_loadu_ps(src + 1 * src_stride); + __m256 row2 = _mm256_loadu_ps(src + 2 * src_stride); + __m256 row3 = _mm256_loadu_ps(src + 3 * src_stride); + __m256 row4 = _mm256_loadu_ps(src + 4 * src_stride); + __m256 row5 = _mm256_loadu_ps(src + 5 * src_stride); + __m256 row6 = _mm256_loadu_ps(src + 6 * src_stride); + __m256 row7 = _mm256_loadu_ps(src + 7 * src_stride); + __m256 t0, t1, t2, t3, t4, t5, t6, t7; + t0 = _mm256_unpacklo_ps(row0, row1); + t1 = _mm256_unpackhi_ps(row0, row1); + t2 = _mm256_unpacklo_ps(row2, row3); + t3 = _mm256_unpackhi_ps(row2, row3); + t4 = _mm256_unpacklo_ps(row4, row5); + t5 = _mm256_unpackhi_ps(row4, row5); + t6 = _mm256_unpacklo_ps(row6, row7); + t7 = _mm256_unpackhi_ps(row6, row7); + __m256 tt0, tt1, tt2, tt3, tt4, tt5, tt6, tt7; + tt0 = _mm256_shuffle_ps(t0, t2, _MM_SHUFFLE(1, 0, 1, 0)); + tt1 = _mm256_shuffle_ps(t0, t2, _MM_SHUFFLE(3, 2, 3, 2)); + tt2 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); + tt3 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); + tt4 = _mm256_shuffle_ps(t4, t6, _MM_SHUFFLE(1, 0, 1, 0)); + tt5 = _mm256_shuffle_ps(t4, t6, _MM_SHUFFLE(3, 2, 3, 2)); + tt6 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); + tt7 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); + row0 = _mm256_permute2f128_ps(tt0, tt4, 0x20); + row1 = _mm256_permute2f128_ps(tt1, tt5, 0x20); + row2 = _mm256_permute2f128_ps(tt2, tt6, 0x20); + row3 = _mm256_permute2f128_ps(tt3, tt7, 0x20); + row4 = _mm256_permute2f128_ps(tt0, tt4, 0x31); + row5 = _mm256_permute2f128_ps(tt1, tt5, 0x31); + row6 = _mm256_permute2f128_ps(tt2, tt6, 0x31); + row7 = _mm256_permute2f128_ps(tt3, tt7, 0x31); + _mm256_storeu_ps(dst + 0 * dst_stride, row0); + _mm256_storeu_ps(dst + 1 * dst_stride, row1); + _mm256_storeu_ps(dst + 2 * dst_stride, row2); + _mm256_storeu_ps(dst + 3 * dst_stride, row3); + _mm256_storeu_ps(dst + 4 * dst_stride, row4); + _mm256_storeu_ps(dst + 5 * dst_stride, row5); + _mm256_storeu_ps(dst + 6 * dst_stride, row6); + _mm256_storeu_ps(dst + 7 * dst_stride, row7); +} +#endif +#if defined(__aarch64__) +static inline void transpose_block_4x4_neon(const float *src, float *dst, const int src_stride, const int dst_stride) { + float32x4_t row0 = vld1q_f32(src + 0 * src_stride); + float32x4_t row1 = vld1q_f32(src + 1 * src_stride); + float32x4_t row2 = vld1q_f32(src + 2 * src_stride); + float32x4_t row3 = vld1q_f32(src + 3 * src_stride); + float32x4x2_t p01 = vtrnq_f32(row0, row1); + float32x4x2_t p23 = vtrnq_f32(row2, row3); + float32x4_t res0 = vcombine_f32(vget_low_f32(p01.val[0]), vget_low_f32(p23.val[0])); + float32x4_t res1 = vcombine_f32(vget_low_f32(p01.val[1]), vget_low_f32(p23.val[1])); + float32x4_t res2 = vcombine_f32(vget_high_f32(p01.val[0]), vget_high_f32(p23.val[0])); + float32x4_t res3 = vcombine_f32(vget_high_f32(p01.val[1]), vget_high_f32(p23.val[1])); + vst1q_f32(dst + 0 * dst_stride, res0); + vst1q_f32(dst + 1 * dst_stride, res1); + vst1q_f32(dst + 2 * dst_stride, res2); + vst1q_f32(dst + 3 * dst_stride, res3); +} +#endif +static inline void transpose_matrix_2d_efficient(const float *src, float *dst, const int N, const int M) { +#if defined(__AVX__) || defined(__AVX2__) + const int BLOCK_DIM = 8; + for (int i = 0; i < N - (N % BLOCK_DIM); i += BLOCK_DIM) { + for (int j = 0; j < M - (M % BLOCK_DIM); j += BLOCK_DIM) { + transpose_block_8x8_avx(src + i * M + j, dst + j * N + i, M, N); + } + } + for (int i = 0; i < N; ++i) { + for (int j = M - (M % BLOCK_DIM); j < M; ++j) { dst[j * N + i] = src[i * M + j]; } + } + for (int i = N - (N % BLOCK_DIM); i < N; ++i) { + for (int j = 0; j < M - (M % BLOCK_DIM); ++j) { dst[j * N + i] = src[i * M + j]; } + } +#elif defined(__aarch64__) + const int BLOCK_DIM = 4; + for (int i = 0; i < N - (N % BLOCK_DIM); i += BLOCK_DIM) { + for (int j = 0; j < M - (M % BLOCK_DIM); j += BLOCK_DIM) { + transpose_block_4x4_neon(src + i * M + j, dst + j * N + i, M, N); + } + } + for (int i = 0; i < N; ++i) { + for (int j = M - (M % BLOCK_DIM); j < M; ++j) { dst[j * N + i] = src[i * M + j]; } + } + for (int i = N - (N % BLOCK_DIM); i < N; ++i) { + for (int j = 0; j < M - (M % BLOCK_DIM); ++j) { dst[j * N + i] = src[i * M + j]; } + } +#else + const int BLOCK_DIM = 16; + for (int i = 0; i < N; i += BLOCK_DIM) { + for (int j = 0; j < M; j += BLOCK_DIM) { + for (int bi = i; bi < i + BLOCK_DIM && bi < N; ++bi) { + for (int bj = j; bj < j + BLOCK_DIM && bj < M; ++bj) { + dst[bj * N + bi] = src[bi * M + bj]; + } + } + } + } +#endif +} + +/** + * @brief 对一个三维浮点数张量进行高效转置 + * 该函数根据指定的维度置换 (permutation) 来重新排列数据。 + * + * @param src 指向源张量数据的指针。数据布局为 (D1, D2, D3) 的稠密行主序。 + * @param dst 指向目标张量数据的指针。其维度根据 perm 计算得出。 + * @param d1 源张量的第 1 维大小 + * @param d2 源张量的第 2 维大小 + * @param d3 源张量的第 3 维大小 + * @param perm 一个包含 {0, 1, 2} 的置换向量,定义了转置方式。 + */ +void transpose3d_efficient(const float *src, float *dst, int d1, int d2, int d3, const std::vector &perm) { + // --- 1. 输入验证 --- + if (perm.size() != 3) { + throw std::invalid_argument("Permutation vector must contain 3 elements."); + } + std::vector sorted_perm = perm; + std::sort(sorted_perm.begin(), sorted_perm.end()); + if (sorted_perm[0] != 0 || sorted_perm[1] != 1 || sorted_perm[2] != 2) { + throw std::invalid_argument("Permutation vector must be a permutation of {0, 1, 2}."); + } + + const int src_dims[3] = {d1, d2, d3}; + + // --- 2. 处理特殊情况:无需转置 --- + if (perm[0] == 0 && perm[1] == 1 && perm[2] == 2) { + const size_t total_elements = static_cast(d1) * d2 * d3; + if (src != dst) { + memcpy(dst, src, total_elements * sizeof(float)); + } + return; + } + + // --- 3. 性能最优路径:只交换最后两个维度 (e.g., NHW -> NWH) --- + if (perm[0] == 0 && perm[1] == 2 && perm[2] == 1) { + const int N = d2; + const int M = d3; +#pragma omp parallel for schedule(static) + for (int i = 0; i < d1; ++i) { + const float *src_slice = src + i * (N * M); + float *dst_slice = dst + i * (M * N); + transpose_matrix_2d_efficient(src_slice, dst_slice, N, M); + } + return; + } + + // --- 4. 通用路径:处理所有其他维度置换 --- + const int dst_dims[3] = {src_dims[perm[0]], src_dims[perm[1]], src_dims[perm[2]]}; + const int BLOCK_DIM = 16; + + long src_strides[3] = {(long)d2 * d3, d3, 1}; + long dst_strides[3] = {(long)dst_dims[1] * dst_dims[2], dst_dims[2], 1}; + + int p_inv[3]; + p_inv[perm[0]] = 0; + p_inv[perm[1]] = 1; + p_inv[perm[2]] = 2; + +#pragma omp parallel for schedule(static) + for (int i0 = 0; i0 < dst_dims[0]; i0 += BLOCK_DIM) { + for (int j0 = 0; j0 < dst_dims[1]; j0 += BLOCK_DIM) { + for (int k0 = 0; k0 < dst_dims[2]; k0 += BLOCK_DIM) { + for (int i = i0; i < i0 + BLOCK_DIM && i < dst_dims[0]; ++i) { + for (int j = j0; j < j0 + BLOCK_DIM && j < dst_dims[1]; ++j) { + for (int k = k0; k < k0 + BLOCK_DIM && k < dst_dims[2]; ++k) { + long dst_idx = (long)i * dst_strides[0] + (long)j * dst_strides[1] + k; + int dst_coords[3] = {i, j, k}; + int src_coords[3]; + src_coords[p_inv[0]] = dst_coords[0]; + src_coords[p_inv[1]] = dst_coords[1]; + src_coords[p_inv[2]] = dst_coords[2]; + long src_idx = (long)src_coords[0] * src_strides[0] + (long)src_coords[1] * src_strides[1] + src_coords[2]; + dst[dst_idx] = src[src_idx]; + } + } + } + } + } + } +} + +#if defined(__aarch64__) +// NEON 平台使用 8x8 的 __fp16 块转置 +static inline void transpose_block_8x8_neon_fp16(const mllm_fp16_t *src, mllm_fp16_t *dst, const int src_stride, const int dst_stride) { + // 在 aarch64 上, mllm_fp16_t 就是 __fp16 + float16x8_t r0 = vld1q_f16(src + 0 * src_stride); + float16x8_t r1 = vld1q_f16(src + 1 * src_stride); + float16x8_t r2 = vld1q_f16(src + 2 * src_stride); + float16x8_t r3 = vld1q_f16(src + 3 * src_stride); + float16x8_t r4 = vld1q_f16(src + 4 * src_stride); + float16x8_t r5 = vld1q_f16(src + 5 * src_stride); + float16x8_t r6 = vld1q_f16(src + 6 * src_stride); + float16x8_t r7 = vld1q_f16(src + 7 * src_stride); + float16x8x2_t t01 = vtrnq_f16(r0, r1); + float16x8x2_t t23 = vtrnq_f16(r2, r3); + float16x8x2_t t45 = vtrnq_f16(r4, r5); + float16x8x2_t t67 = vtrnq_f16(r6, r7); + float32x4x2_t z02 = vzipq_f32(vreinterpretq_f32_f16(t01.val[0]), vreinterpretq_f32_f16(t23.val[0])); + float32x4x2_t z13 = vzipq_f32(vreinterpretq_f32_f16(t01.val[1]), vreinterpretq_f32_f16(t23.val[1])); + float32x4x2_t z46 = vzipq_f32(vreinterpretq_f32_f16(t45.val[0]), vreinterpretq_f32_f16(t67.val[0])); + float32x4x2_t z57 = vzipq_f32(vreinterpretq_f32_f16(t45.val[1]), vreinterpretq_f32_f16(t67.val[1])); + vst1q_f16(dst + 0 * dst_stride, vreinterpretq_f16_f32(z02.val[0])); + vst1q_f16(dst + 1 * dst_stride, vreinterpretq_f16_f32(z13.val[0])); + vst1q_f16(dst + 2 * dst_stride, vreinterpretq_f16_f32(z02.val[1])); + vst1q_f16(dst + 3 * dst_stride, vreinterpretq_f16_f32(z13.val[1])); + vst1q_f16(dst + 4 * dst_stride, vreinterpretq_f16_f32(z46.val[0])); + vst1q_f16(dst + 5 * dst_stride, vreinterpretq_f16_f32(z57.val[0])); + vst1q_f16(dst + 6 * dst_stride, vreinterpretq_f16_f32(z46.val[1])); + vst1q_f16(dst + 7 * dst_stride, vreinterpretq_f16_f32(z57.val[1])); +} +#endif + +// 高效的2D FP16矩阵转置 +static inline void transpose_matrix_2d_efficient_fp16(const mllm_fp16_t *src, mllm_fp16_t *dst, const int N, const int M) { +#if defined(__aarch64__) + const int BLOCK_DIM = 8; + for (int i = 0; i < N - (N % BLOCK_DIM); i += BLOCK_DIM) { + for (int j = 0; j < M - (M % BLOCK_DIM); j += BLOCK_DIM) { + transpose_block_8x8_neon_fp16(src + i * M + j, dst + j * N + i, M, N); + } + } + // 处理边缘情况 + for (int i = 0; i < N; ++i) { + for (int j = M - (M % BLOCK_DIM); j < M; ++j) { dst[j * N + i] = src[i * M + j]; } + } + for (int i = N - (N % BLOCK_DIM); i < N; ++i) { + for (int j = 0; j < M - (M % BLOCK_DIM); ++j) { dst[j * N + i] = src[i * M + j]; } + } +#else + // 在非NEON平台 (如AVX),使用通用的缓存分块方法 + const int BLOCK_DIM = 16; + for (int i = 0; i < N; i += BLOCK_DIM) { + for (int j = 0; j < M; j += BLOCK_DIM) { + for (int bi = i; bi < i + BLOCK_DIM && bi < N; ++bi) { + for (int bj = j; bj < j + BLOCK_DIM && bj < M; ++bj) { + dst[bj * N + bi] = src[bi * M + bj]; + } + } + } + } +#endif +} + +/** + * @brief 对一个三维 mllm_fp16_t 张量进行高效转置。 + * @param src 指向源张量数据的指针。 + * @param dst 指向目标张量数据的指针。 + * @param d1, d2, d3 源张量的维度。 + * @param perm 维度置换向量, e.g., {0, 2, 1}。 + */ +void transpose3d_efficient_fp16(const mllm_fp16_t *src, mllm_fp16_t *dst, int d1, int d2, int d3, const std::vector &perm) { + // --- 1. 输入验证 --- + if (perm.size() != 3) { throw std::invalid_argument("Permutation vector must contain 3 elements."); } + std::vector sorted_perm = perm; + std::sort(sorted_perm.begin(), sorted_perm.end()); + if (sorted_perm[0] != 0 || sorted_perm[1] != 1 || sorted_perm[2] != 2) { + throw std::invalid_argument("Permutation vector must be a permutation of {0, 1, 2}."); + } + + const int src_dims[3] = {d1, d2, d3}; + + // --- 2. 处理特殊情况:无需转置 --- + if (perm[0] == 0 && perm[1] == 1 && perm[2] == 2) { + const size_t total_elements = static_cast(d1) * d2 * d3; + if (src != dst) { + memcpy(dst, src, total_elements * sizeof(mllm_fp16_t)); + } + return; + } + + // --- 3. 性能最优路径:只交换最后两个维度 (e.g., HSD -> HDS) --- + if (perm[0] == 0 && perm[1] == 2 && perm[2] == 1) { + const int N = d2; + const int M = d3; +#pragma omp parallel for schedule(static) + for (int i = 0; i < d1; ++i) { + const mllm_fp16_t *src_slice = src + i * (N * M); + mllm_fp16_t *dst_slice = dst + i * (M * N); + transpose_matrix_2d_efficient_fp16(src_slice, dst_slice, N, M); + } + return; + } + + // --- 4. 通用路径:处理所有其他维度置换 --- + const int dst_dims[3] = {src_dims[perm[0]], src_dims[perm[1]], src_dims[perm[2]]}; + const int BLOCK_DIM = 16; + long src_strides[3] = {(long)d2 * d3, d3, 1}; + long dst_strides[3] = {(long)dst_dims[1] * dst_dims[2], dst_dims[2], 1}; + int p_inv[3]; + p_inv[perm[0]] = 0; + p_inv[perm[1]] = 1; + p_inv[perm[2]] = 2; + +#pragma omp parallel for schedule(static) + for (int i0 = 0; i0 < dst_dims[0]; i0 += BLOCK_DIM) { + for (int j0 = 0; j0 < dst_dims[1]; j0 += BLOCK_DIM) { + for (int k0 = 0; k0 < dst_dims[2]; k0 += BLOCK_DIM) { + for (int i = i0; i < i0 + BLOCK_DIM && i < dst_dims[0]; ++i) { + for (int j = j0; j < j0 + BLOCK_DIM && j < dst_dims[1]; ++j) { + for (int k = k0; k < k0 + BLOCK_DIM && k < dst_dims[2]; ++k) { + long dst_idx = (long)i * dst_strides[0] + (long)j * dst_strides[1] + k; + int dst_coords[3] = {i, j, k}; + int src_coords[3]; + src_coords[p_inv[0]] = dst_coords[0]; + src_coords[p_inv[1]] = dst_coords[1]; + src_coords[p_inv[2]] = dst_coords[2]; + long src_idx = (long)src_coords[0] * src_strides[0] + (long)src_coords[1] * src_strides[1] + src_coords[2]; + dst[dst_idx] = src[src_idx]; + } + } + } + } + } + } +} \ No newline at end of file diff --git a/src/backends/cpu/op/CPUAdd.cpp b/mllm/backends/cpu/op/CPUAdd.cpp similarity index 100% rename from src/backends/cpu/op/CPUAdd.cpp rename to mllm/backends/cpu/op/CPUAdd.cpp diff --git a/src/backends/cpu/op/CPUAdd.hpp b/mllm/backends/cpu/op/CPUAdd.hpp similarity index 100% rename from src/backends/cpu/op/CPUAdd.hpp rename to mllm/backends/cpu/op/CPUAdd.hpp diff --git a/src/backends/cpu/function/CPUArgSortFunc.hpp b/mllm/backends/cpu/op/CPUArgSortFunc.hpp similarity index 64% rename from src/backends/cpu/function/CPUArgSortFunc.hpp rename to mllm/backends/cpu/op/CPUArgSortFunc.hpp index f1a3976a4..fbe858ca1 100644 --- a/src/backends/cpu/function/CPUArgSortFunc.hpp +++ b/mllm/backends/cpu/op/CPUArgSortFunc.hpp @@ -4,18 +4,27 @@ #ifndef CPUARGSORTKFUNC_HPP #define CPUARGSORTKFUNC_HPP + #include "Tensor.hpp" #include "Types.hpp" +#include "CPUBackend.hpp" // For Op and Creator #include +#include +#include +#include namespace mllm { class Tensor; -class CPUargsortFunction : public TensorFunction { +class CPUargsortFunction : public Op { +private: + int thread_count = 4; + // 自定义比较函数,用于对索引进行排序 bool compareIndices(const std::pair &a, const std::pair &b) { return a.second < b.second; } + void argsort(float *input, int size, float *out_indices) { std::vector> indexedInput(size); for (int i = 0; i < size; ++i) { @@ -30,22 +39,34 @@ class CPUargsortFunction : public TensorFunction { } public: - void reshape(vector> outputs, vector> inputs, vector args) override { - assert(args.empty()); + CPUargsortFunction(Backend *bn, string name, int threadCount) : + thread_count(threadCount), Op(bn, name) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { assert(inputs[0]->sequence() == 1); assert(inputs[0]->head() == 1); outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); outputs[0]->setDtype(inputs[0]->dtype()); // argsortk_values - outputs[0]->alloc(); + return ErrorCode::MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { + ErrorCode execute(vector> inputs, vector> outputs) override { int size = inputs[0]->dimension(); for (int b = 0; b < inputs[0]->batch(); b++) { float *data = inputs[0]->ptrAt(b, 0, 0, 0); float *out = outputs[0]->ptrAt(b, 0, 0, 0); argsort(data, size, out); } + return ErrorCode::MLLM_NO_ERROR; } }; + +class CPUargsortFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new CPUargsortFunction(bn, name, threadCount); + } +}; + } // namespace mllm #endif // CPUARGSORTKFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUAvgPool2D.cpp b/mllm/backends/cpu/op/CPUAvgPool2D.cpp similarity index 100% rename from src/backends/cpu/op/CPUAvgPool2D.cpp rename to mllm/backends/cpu/op/CPUAvgPool2D.cpp diff --git a/src/backends/cpu/op/CPUAvgPool2D.hpp b/mllm/backends/cpu/op/CPUAvgPool2D.hpp similarity index 100% rename from src/backends/cpu/op/CPUAvgPool2D.hpp rename to mllm/backends/cpu/op/CPUAvgPool2D.hpp diff --git a/src/backends/cpu/function/CPUBinCountFunc.hpp b/mllm/backends/cpu/op/CPUBinCountFunc.hpp similarity index 65% rename from src/backends/cpu/function/CPUBinCountFunc.hpp rename to mllm/backends/cpu/op/CPUBinCountFunc.hpp index 7d834cf06..327622cd6 100644 --- a/src/backends/cpu/function/CPUBinCountFunc.hpp +++ b/mllm/backends/cpu/op/CPUBinCountFunc.hpp @@ -6,23 +6,18 @@ #define CPUBINCOUNTKFUNC_HPP #include "Tensor.hpp" #include "Types.hpp" +#include #include #include "CPUBackend.hpp" namespace mllm { class Tensor; -class CPUbincountFunction : public TensorFunction { - void bincount(float *input, int size, float *out, int max_val) { - // 找到输入数组中的最大值 - // int max_val = 0; - // for (int i = 0; i < size; ++i) { - // int val = static_cast(input[i]); - // if (val > max_val) { - // max_val = val; - // } - // } +class CPUbincountFunction : public Op { +private: + int thread_count = 4; + void bincount(float *input, int size, float *out, int max_val) { // 初始化输出数组 #pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) for (int i = 0; i <= max_val; ++i) { @@ -40,16 +35,21 @@ class CPUbincountFunction : public TensorFunction { } public: - void reshape(vector> outputs, vector> inputs, vector args) override { - assert(args.empty()); + CPUbincountFunction(Backend *bn, string name, int threadCount) : + thread_count(threadCount), Op(bn, name) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { assert(inputs[0]->batch() == 1); assert(inputs[0]->sequence() == 1); assert(inputs[0]->head() == 1); + // For dynamic-shape ops, reshape sets what's known. Final shape is set in execute. outputs[0]->reshape(1, 1, 1, 0); - // outputs[0]->setDtype(inputs[0]->dtype()); // bincountk_values - outputs[0]->alloc(); + outputs[0]->setDtype(MLLM_TYPE_F32); + return ErrorCode::MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { + + ErrorCode execute(vector> inputs, vector> outputs) override { int size = inputs[0]->dimension(); int max_val = 0; for (int i = 0; i < size; ++i) { @@ -66,7 +66,16 @@ class CPUbincountFunction : public TensorFunction { if (max_val > 0) { bincount(data, size, out, max_val); } + return ErrorCode::MLLM_NO_ERROR; } }; + +class CPUbincountFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new CPUbincountFunction(bn, name, threadCount); + } +}; + } // namespace mllm #endif // CPUBINCOUNTKFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUBinaryFunc.hpp b/mllm/backends/cpu/op/CPUBinaryFunc.hpp similarity index 53% rename from src/backends/cpu/function/CPUBinaryFunc.hpp rename to mllm/backends/cpu/op/CPUBinaryFunc.hpp index 446fa84e0..2270cc4d5 100644 --- a/src/backends/cpu/function/CPUBinaryFunc.hpp +++ b/mllm/backends/cpu/op/CPUBinaryFunc.hpp @@ -12,20 +12,24 @@ namespace mllm { class Tensor; -class CPUaddFunction : public TensorFunction { +class CPUaddFunction : public Op { +private: + int thread_count = 4; + float data = 0.0f; // The data to be added public: - void reshape(vector> outputs, vector> inputs, vector args) override { - // float data = (float)args[0]; + CPUaddFunction(Backend *bn, string name, float data, int threadCount) : + thread_count(threadCount), Op(bn, name) { + this->data = data; + } + ErrorCode reshape(vector> inputs, vector> outputs) override { auto input = inputs[0]; auto output = outputs[0]; output->reshape(input->batch(), input->head(), input->sequence(), input->dimension()); output->setDtype(input->dtype()); - output->alloc(); + return ErrorCode::MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { - float data = (float)args[0]; + ErrorCode execute(vector> inputs, vector> outputs) override { auto input = inputs[0]; - auto output = outputs[0]; #pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) for (int n = 0; n < input->batch(); ++n) { for (int c = 0; c < input->head(); ++c) { @@ -35,21 +39,36 @@ class CPUaddFunction : public TensorFunction { } } } + return ErrorCode::MLLM_NO_ERROR; + } +}; +class CPUaddFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + float data = (float)op_param.find("data")->second; + return new CPUaddFunction(bn, name, data, threadCount); } }; -class CPUsubFunction : public TensorFunction { + +class CPUsubFunction : public Op { +private: + int thread_count = 4; + float data = 0.0f; + public: - void reshape(vector> outputs, vector> inputs, vector args) override { + CPUsubFunction(Backend *bn, string name, float data, int threadCount) : + thread_count(threadCount), Op(bn, name) { + this->data = data; + } + ErrorCode reshape(vector> inputs, vector> outputs) override { auto input = inputs[0]; auto output = outputs[0]; output->reshape(input->batch(), input->head(), input->sequence(), input->dimension()); output->setDtype(input->dtype()); - output->alloc(); + return ErrorCode::MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { - float data = (float)args[0]; + ErrorCode execute(vector> inputs, vector> outputs) override { auto input = inputs[0]; - auto output = outputs[0]; #pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) for (int n = 0; n < input->batch(); ++n) { for (int c = 0; c < input->head(); ++c) { @@ -59,21 +78,36 @@ class CPUsubFunction : public TensorFunction { } } } + return ErrorCode::MLLM_NO_ERROR; } }; -class CPUmulFunction : public TensorFunction { +class CPUsubFunctionCreator : public CPUBackend::Creator { public: - void reshape(vector> outputs, vector> inputs, vector args) override { + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + float data = (float)op_param.find("data")->second; + return new CPUsubFunction(bn, name, data, threadCount); + } +}; + +class CPUmulFunction : public Op { +private: + int thread_count = 4; + float data = 0.0f; + +public: + CPUmulFunction(Backend *bn, string name, float data, int threadCount) : + thread_count(threadCount), Op(bn, name) { + this->data = data; + } + ErrorCode reshape(vector> inputs, vector> outputs) override { auto input = inputs[0]; auto output = outputs[0]; output->reshape(input->batch(), input->head(), input->sequence(), input->dimension()); output->setDtype(input->dtype()); - output->alloc(); + return ErrorCode::MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { - float data = (float)args[0]; + ErrorCode execute(vector> inputs, vector> outputs) override { auto input = inputs[0]; - auto output = outputs[0]; #pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) for (int n = 0; n < input->batch(); ++n) { for (int c = 0; c < input->head(); ++c) { @@ -83,21 +117,37 @@ class CPUmulFunction : public TensorFunction { } } } + return ErrorCode::MLLM_NO_ERROR; } }; -class CPUdivFunction : public TensorFunction { +class CPUmulFunctionCreator : public CPUBackend::Creator { public: - void reshape(vector> outputs, vector> inputs, vector args) override { + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + float data = (float)op_param.find("data")->second; + return new CPUmulFunction(bn, name, data, threadCount); + } +}; + +class CPUdivFunction : public Op { +private: + int thread_count = 4; + float data = 0.0f; + +public: + CPUdivFunction(Backend *bn, string name, float data, int threadCount) : + thread_count(threadCount), Op(bn, name) { + this->data = data; + } + ErrorCode reshape(vector> inputs, vector> outputs) override { auto input = inputs[0]; auto output = outputs[0]; + output->setCtype(input->ctype()); output->reshape(input->batch(), input->head(), input->sequence(), input->dimension()); output->setDtype(input->dtype()); - output->alloc(); + return ErrorCode::MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { - float data = (float)args[0]; + ErrorCode execute(vector> inputs, vector> outputs) override { auto input = inputs[0]; - auto output = outputs[0]; #pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) for (int n = 0; n < input->batch(); ++n) { for (int c = 0; c < input->head(); ++c) { @@ -107,22 +157,36 @@ class CPUdivFunction : public TensorFunction { } } } + return ErrorCode::MLLM_NO_ERROR; + } +}; +class CPUdivFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + float data = (float)op_param.find("data")->second; + return new CPUdivFunction(bn, name, data, threadCount); } }; -class CPUdivintFunction : public TensorFunction { +class CPUdivintFunction : public Op { +private: + int thread_count = 4; + float data = 0.0f; + public: - void reshape(vector> outputs, vector> inputs, vector args) override { + CPUdivintFunction(Backend *bn, string name, float data, int threadCount) : + thread_count(threadCount), Op(bn, name) { + this->data = data; + } + ErrorCode reshape(vector> inputs, vector> outputs) override { auto input = inputs[0]; auto output = outputs[0]; output->reshape(input->batch(), input->head(), input->sequence(), input->dimension()); output->setDtype(input->dtype()); - output->alloc(); + return ErrorCode::MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { - float data = (float)args[0]; + ErrorCode execute(vector> inputs, vector> outputs) override { auto input = inputs[0]; - auto output = outputs[0]; #pragma omp parallel for collapse(4) num_threads(CPUBackend::cpu_threads) for (int n = 0; n < input->batch(); ++n) { for (int c = 0; c < input->head(); ++c) { @@ -134,44 +198,75 @@ class CPUdivintFunction : public TensorFunction { } } } + return ErrorCode::MLLM_NO_ERROR; } }; +class CPUdivintFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + float data = (float)op_param.find("data")->second; + return new CPUdivintFunction(bn, name, data, threadCount); + } +}; + +class CPUaddTwoFunction : public Op { +private: + int thread_count = 4; -class CPUaddTwoFunction : public TensorFunction { public: - void reshape(vector> outputs, vector> inputs, vector args) override { + CPUaddTwoFunction(Backend *bn, string name, int threadCount) : + thread_count(threadCount), Op(bn, name) { + } + ErrorCode reshape(vector> inputs, vector> outputs) override { outputs[0]->reshape(std::max(inputs[0]->batch(), inputs[1]->batch()), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); + return ErrorCode::MLLM_NO_ERROR; }; - void execute(vector> outputs, vector> inputs, vector args) override { + ErrorCode execute(vector> inputs, vector> outputs) override { auto input0 = inputs[0]; auto input1 = inputs[1]; int batch_ = std::max(input0->batch(), input1->batch()); + int head_ = std::max(input0->head(), input1->head()); for (int n = 0; n < batch_; ++n) { auto n_0 = std::min(n, input0->batch() - 1); auto n_1 = std::min(n, input1->batch() - 1); -#pragma omp parallel for collapse(2) num_threads(CPUBackend::cpu_threads) - for (int c = 0; c < input0->head(); ++c) { +#pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) + for (int c = 0; c < head_; ++c) { + auto c_0 = std::min(c, input0->head() - 1); + auto c_1 = std::min(c, input1->head() - 1); for (int h = 0; h < input0->sequence(); ++h) { - mllm_add_fp32(input0->ptrAt(n_0, c, h, 0), - input1->ptrAt(n_1, c, h, 0), + mllm_add_fp32(input0->ptrAt(n_0, c_0, h, 0), + input1->ptrAt(n_1, c_1, h, 0), outputs[0]->ptrAt(n, c, h, 0), input0->dimension()); } } } + return ErrorCode::MLLM_NO_ERROR; }; }; -class CPUsubTwoFunction : public TensorFunction { +class CPUaddTwoFunctionCreator : public CPUBackend::Creator { public: - void reshape(vector> outputs, vector> inputs, vector args) override { + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + return new CPUaddTwoFunction(bn, name, threadCount); + } +}; + +class CPUsubTwoFunction : public Op { +private: + int thread_count = 4; + +public: + CPUsubTwoFunction(Backend *bn, string name, int threadCount) : + thread_count(threadCount), Op(bn, name) { + } + ErrorCode reshape(vector> inputs, vector> outputs) override { outputs[0]->reshape(std::max(inputs[0]->batch(), inputs[1]->batch()), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); + return ErrorCode::MLLM_NO_ERROR; }; - void execute(vector> outputs, vector> inputs, vector args) override { + ErrorCode execute(vector> inputs, vector> outputs) override { auto input0 = inputs[0]; auto input1 = inputs[1]; int batch_ = std::max(input0->batch(), input1->batch()); @@ -187,17 +282,31 @@ class CPUsubTwoFunction : public TensorFunction { } } } + return ErrorCode::MLLM_NO_ERROR; }; }; -class CPUmulTwoFunction : public TensorFunction { +class CPUsubTwoFunctionCreator : public CPUBackend::Creator { public: - void reshape(vector> outputs, vector> inputs, vector args) override { + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + return new CPUsubTwoFunction(bn, name, threadCount); + } +}; + +class CPUmulTwoFunction : public Op { +private: + int thread_count = 4; + +public: + CPUmulTwoFunction(Backend *bn, string name, int threadCount) : + thread_count(threadCount), Op(bn, name) { + } + ErrorCode reshape(vector> inputs, vector> outputs) override { outputs[0]->reshape(std::max(inputs[0]->batch(), inputs[1]->batch()), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); + return ErrorCode::MLLM_NO_ERROR; }; - void execute(vector> outputs, vector> inputs, vector args) override { + ErrorCode execute(vector> inputs, vector> outputs) override { if (outputs[0]->sequence() == 0 || inputs[0]->sequence() != outputs[0]->sequence()) { outputs[0]->reshape(std::max(inputs[0]->batch(), inputs[1]->batch()), @@ -225,17 +334,31 @@ class CPUmulTwoFunction : public TensorFunction { } } } + return ErrorCode::MLLM_NO_ERROR; }; }; -class CPUdivTwoFunction : public TensorFunction { +class CPUmulTwoFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + return new CPUmulTwoFunction(bn, name, threadCount); + } +}; + +class CPUdivTwoFunction : public Op { +private: + int thread_count = 4; + public: - void reshape(vector> outputs, vector> inputs, vector args) override { + CPUdivTwoFunction(Backend *bn, string name, int threadCount) : + thread_count(threadCount), Op(bn, name) { + } + ErrorCode reshape(vector> inputs, vector> outputs) override { outputs[0]->reshape(std::max(inputs[0]->batch(), inputs[1]->batch()), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); + return ErrorCode::MLLM_NO_ERROR; }; - void execute(vector> outputs, vector> inputs, vector args) override { + ErrorCode execute(vector> inputs, vector> outputs) override { auto input0 = inputs[0]; auto input1 = inputs[1]; int batch_ = std::max(input0->batch(), input1->batch()); @@ -257,8 +380,15 @@ class CPUdivTwoFunction : public TensorFunction { } } } + return ErrorCode::MLLM_NO_ERROR; }; }; +class CPUdivTwoFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + return new CPUdivTwoFunction(bn, name, threadCount); + } +}; } // namespace mllm #endif // CPUBINARYFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUCat.cpp b/mllm/backends/cpu/op/CPUCat.cpp similarity index 97% rename from src/backends/cpu/op/CPUCat.cpp rename to mllm/backends/cpu/op/CPUCat.cpp index 8372889cd..d4775c3b3 100644 --- a/src/backends/cpu/op/CPUCat.cpp +++ b/mllm/backends/cpu/op/CPUCat.cpp @@ -124,7 +124,7 @@ ErrorCode CPUCat::setUp(vector> inputs, vector 0) { cseq += inputs[idx - 1]->sequence(); } - inputs[idx]->shallowCopyFrom(outputs[0].get(), false, {cbatch, chead, cseq, cdim}); // b,h,s,d + inputs[idx]->shallowCopyFrom(outputs[0], false, {cbatch, chead, cseq, cdim}, 1); // b,h,s,d } return MLLM_NO_ERROR; } else { diff --git a/src/backends/cpu/op/CPUCat.hpp b/mllm/backends/cpu/op/CPUCat.hpp similarity index 100% rename from src/backends/cpu/op/CPUCat.hpp rename to mllm/backends/cpu/op/CPUCat.hpp diff --git a/src/backends/cpu/function/CPUCatFunc.hpp b/mllm/backends/cpu/op/CPUCatFunc.hpp similarity index 74% rename from src/backends/cpu/function/CPUCatFunc.hpp rename to mllm/backends/cpu/op/CPUCatFunc.hpp index 381bb56fd..f329da8a6 100644 --- a/src/backends/cpu/function/CPUCatFunc.hpp +++ b/mllm/backends/cpu/op/CPUCatFunc.hpp @@ -4,18 +4,29 @@ #ifndef CPUCATFUNC_HPP #define CPUCATFUNC_HPP + #include "Tensor.hpp" #include "Types.hpp" #include "CPUBackend.hpp" +#include #include +#include +#include namespace mllm { class Tensor; -class CPUcatFunction : public TensorFunction { +class CPUcatFunction : public Op { +private: + int thread_count = 4; + Chl axis_; + public: - void setUp(vector> outputs, vector> inputs, vector args) override { - Chl axis = (Chl)args[0]; + CPUcatFunction(Backend *bn, string name, int threadCount, Chl axis) : + Op(bn, name), thread_count(threadCount), axis_(axis) { + } + + ErrorCode setUp(vector> inputs, vector> outputs) override { if (outputs[0]->shape().empty()) { int expd_batch_ = inputs[0]->batch(); for (int ii = 0; ii < inputs.size(); ++ii) { @@ -32,7 +43,7 @@ class CPUcatFunction : public TensorFunction { Chl axes[] = {BATCH, HEAD, SEQUENCE, DIMENSION}; int *dims[] = {&dim_b, &dim_h, &dim_s, &dim_d}; for (int i = 0; i < 4; i++) { - if (axis == axes[i]) { + if (axis_ == axes[i]) { for (auto input : inputs) { sizes[i] += (i == 0) ? input->batch() : (i == 1) ? input->head() : (i == 2) ? input->sequence() : @@ -46,7 +57,7 @@ class CPUcatFunction : public TensorFunction { outputs[0]->setDtype(inputs[0]->dtype()); outputs[0]->alloc(); } - if (axis == HEAD) { + if (axis_ == HEAD) { int cbatch = 0; int chead = 0; int cseq = 0; @@ -55,7 +66,7 @@ class CPUcatFunction : public TensorFunction { if (inputs[0]->masterTensor() == nullptr) { inputs[0]->free(); } - inputs[0]->shallowCopyFrom(outputs[0].get(), false, {cbatch, chead, cseq, cdim}); + inputs[0]->shallowCopyFrom(outputs[0], false, {cbatch, chead, cseq, cdim}); } else { for (int idx = 0; idx < inputs.size(); idx++) { if (inputs[idx]->masterTensor() == nullptr) { @@ -64,10 +75,10 @@ class CPUcatFunction : public TensorFunction { if (idx > 0) { chead += inputs[idx - 1]->head(); } - inputs[idx]->shallowCopyFrom(outputs[0].get(), false, {cbatch, chead, cseq, cdim}); // b,h,s,d + inputs[idx]->shallowCopyFrom(outputs[0], false, {cbatch, chead, cseq, cdim}); // b,h,s,d } } - } else if (axis == SEQUENCE && inputs[0]->head() != 1) { + } else if (axis_ == SEQUENCE && inputs[0]->head() != 1) { int cbatch = 0; int chead = 0; int cseq = 0; @@ -79,9 +90,9 @@ class CPUcatFunction : public TensorFunction { if (idx > 0) { cseq += inputs[idx - 1]->sequence(); } - inputs[idx]->shallowCopyFrom(outputs[0].get(), false, {cbatch, chead, cseq, cdim}); // b,h,s,d + inputs[idx]->shallowCopyFrom(outputs[0], false, {cbatch, chead, cseq, cdim}); // b,h,s,d } - } else if (axis == DIMENSION && inputs[0]->head() != 1) { + } else if (axis_ == DIMENSION && inputs[0]->head() != 1) { int cbatch = 0; int chead = 0; int cseq = 0; @@ -102,7 +113,7 @@ class CPUcatFunction : public TensorFunction { } } } - inputs[idx]->shallowCopyFrom(outputs[0].get(), false, {cbatch, chead, cseq, cdim}); // b,h,s,d + inputs[idx]->shallowCopyFrom(outputs[0], false, {cbatch, chead, cseq, cdim}); // b,h,s,d if (inputs[idx]->deaggregatedTensor() != nullptr) { vector> shared_outputs = {}; for (int t = 0; t < inputs[idx]->deaggregatedTensor()->aggregatedTensors().size(); t++) { @@ -114,10 +125,10 @@ class CPUcatFunction : public TensorFunction { } } } + return MLLM_NO_ERROR; } - void reshape(vector> outputs, vector> inputs, vector args) override { - Chl axis = (Chl)args[0]; + ErrorCode reshape(vector> inputs, vector> outputs) override { int expd_batch_ = inputs[0]->batch(); for (int ii = 0; ii < inputs.size(); ++ii) { auto input = inputs[ii]; @@ -133,7 +144,7 @@ class CPUcatFunction : public TensorFunction { Chl axes[] = {BATCH, HEAD, SEQUENCE, DIMENSION}; int *dims[] = {&dim_b, &dim_h, &dim_s, &dim_d}; for (int i = 0; i < 4; i++) { - if (axis == axes[i]) { + if (axis_ == axes[i]) { for (auto input : inputs) { sizes[i] += (i == 0) ? input->batch() : (i == 1) ? input->head() : (i == 2) ? input->sequence() : @@ -148,80 +159,10 @@ class CPUcatFunction : public TensorFunction { outputs[0]->setDtype(inputs[0]->dtype()); outputs[0]->alloc(); } - /* - if (axis == HEAD){ - int cbatch = 0; - int chead = 0; - int cseq = 0; - int cdim = 0; - if(inputs[0]->hostPtr() == inputs[1]->hostPtr()){ - if (inputs[0]->masterTensor() == nullptr) { - inputs[0]->free(); - } - inputs[0]->shallowCopyFrom(outputs[0].get(), false, {cbatch, chead, cseq, cdim}); - }else{ - for (int idx = 0; idx < inputs.size(); idx++) { - if (inputs[idx]->masterTensor() == nullptr) { - inputs[idx]->free(); - } - if (idx > 0) { - chead += inputs[idx - 1]->head(); - } - inputs[idx]->shallowCopyFrom(outputs[0].get(), false, {cbatch, chead, cseq, cdim}); // b,h,s,d - } - } - }else if (axis == SEQUENCE && inputs[0]->head() != 1) { - int cbatch = 0; - int chead = 0; - int cseq = 0; - int cdim = 0; - for (int idx = 0; idx < inputs.size(); idx++) { - if (inputs[idx]->masterTensor() == nullptr) { - inputs[idx]->free(); - } - if (idx > 0) { - cseq += inputs[idx - 1]->sequence(); - } - inputs[idx]->shallowCopyFrom(outputs[0].get(), false, {cbatch, chead, cseq, cdim}); // b,h,s,d - } - } else if (axis == DIMENSION && inputs[0]->head() != 1) { - int cbatch = 0; - int chead = 0; - int cseq = 0; - int cdim = 0; - for (int idx = 0; idx < inputs.size(); idx++) { - if (inputs[idx]->masterTensor() == nullptr) { - inputs[idx]->free(); - } - if (idx > 0) { - cdim += inputs[idx - 1]->dimension(); - } - int tmp_agg_idx; - if (inputs[idx]->deaggregatedTensor() != nullptr) { - for (int t = 0; t < inputs[idx]->deaggregatedTensor()->aggregatedTensors().size(); t++) { - if (inputs[idx]->deaggregatedTensor()->aggregatedTensors()[t].get() == inputs[idx]) { - tmp_agg_idx = t; - continue; - } - } - } - inputs[idx]->shallowCopyFrom(outputs[0].get(), false, {cbatch, chead, cseq, cdim}); // b,h,s,d - if (inputs[idx]->deaggregatedTensor() != nullptr) { - vector> shared_outputs = {}; - for (int t = 0; t < inputs[idx]->deaggregatedTensor()->aggregatedTensors().size(); t++) { - if (t == tmp_agg_idx) { - inputs[idx]->deaggregatedTensor()->aggregatedTensors()[t] = - std::shared_ptr(inputs[idx], [](Tensor *) {}); - } - } - } - } - } - */ + return MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { - // TOD : FIX THIS WHEN NO MEMCPY - Chl axis = (Chl)args[0]; + + ErrorCode execute(vector> inputs, vector> outputs) override { int expd_batch_ = inputs[0]->batch(); int expd_batch_input_idx = 0; // int expd_head_ = inputs[0]->head(); @@ -237,14 +178,14 @@ class CPUcatFunction : public TensorFunction { // expd_head_input_idx = ii; // } } - if (axis == BATCH) { + if (axis_ == BATCH) { for (int n = 0; n < inputs.size(); ++n) { auto copysize = inputs[0]->batch() * inputs[0]->head() * inputs[0]->sequence() * inputs[0]->dimension(); memcpy(outputs[0]->ptrAt(n * inputs[0]->batch(), 0, 0, 0), inputs[n]->ptrAt(0, 0, 0, 0), sizeof(float) * copysize); } - } else if (axis == DIMENSION) { + } else if (axis_ == DIMENSION) { for (int n = 0; n < expd_batch_; ++n) { for (int c = 0; c < inputs[0]->head(); ++c) { for (int h = 0; h < inputs[0]->sequence(); ++h) { @@ -270,7 +211,7 @@ class CPUcatFunction : public TensorFunction { } } } - } else if ((axis == SEQUENCE) && inputs[0]->head() != 1) { + } else if ((axis_ == SEQUENCE) && inputs[0]->head() != 1) { // #pragma omp parallel for collapse(2) num_threads(CPUBackend::cpu_threads) .//TODO优化 assert(inputs[0]->head() == inputs[1]->head()); assert(outputs[0]->ctype() == inputs[0]->ctype()); @@ -322,7 +263,7 @@ class CPUcatFunction : public TensorFunction { } } } - } else if ((axis == SEQUENCE) && inputs[0]->head() == 1) { + } else if ((axis_ == SEQUENCE) && inputs[0]->head() == 1) { for (int n = 0; n < expd_batch_; ++n) { int h = 0; for (int idx = 0; idx < inputs.size(); idx++) { @@ -336,7 +277,7 @@ class CPUcatFunction : public TensorFunction { h += inputs[idx]->sequence(); } } - } else if (axis == HEAD) { + } else if (axis_ == HEAD) { if (inputs[0]->hostPtr() == inputs[1]->hostPtr()) { for (int b = 0; b < outputs[0]->batch(); ++b) { for (int s = 0; s < inputs[0]->sequence(); ++s) { @@ -348,7 +289,7 @@ class CPUcatFunction : public TensorFunction { } } } - return; + return MLLM_NO_ERROR; } for (int b = 0; b < expd_batch_; ++b) { #pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) @@ -370,6 +311,15 @@ class CPUcatFunction : public TensorFunction { } } } + return MLLM_NO_ERROR; + } +}; + +class CPUcatFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + Chl axis = (Chl)op_param.at("axis"); + return new CPUcatFunction(bn, name, threadCount, axis); } }; diff --git a/src/backends/cpu/op/CPUCausalMask.cpp b/mllm/backends/cpu/op/CPUCausalMask.cpp similarity index 81% rename from src/backends/cpu/op/CPUCausalMask.cpp rename to mllm/backends/cpu/op/CPUCausalMask.cpp index 5ede7400a..199dd11b2 100644 --- a/src/backends/cpu/op/CPUCausalMask.cpp +++ b/mllm/backends/cpu/op/CPUCausalMask.cpp @@ -4,30 +4,30 @@ namespace mllm { - -CPUCausalMask::CPUCausalMask(Backend *bn, string opName, int threadCount) : thread_count(threadCount), +CPUCausalMask::CPUCausalMask(Backend *bn, string opName, int threadCount) : + thread_count(threadCount), Op(bn, opName) { } ErrorCode CPUCausalMask::reshape(vector> inputs, vector> outputs) { - //std::cout << "CPUMask reshape" << std::endl; - // assert(inputs.size() == 1); + // std::cout << "CPUMask reshape" << std::endl; + // assert(inputs.size() == 1); assert(outputs.size() == 1); outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); return Op::reshape(inputs, outputs); } ErrorCode CPUCausalMask::execute(vector> inputs, vector> outputs) { - if(inputs[0]->sequence() >1 ) { + if (inputs[0]->sequence() > 1) { int batch_size = inputs[0]->batch(); int head_num = inputs[0]->head(); int sequence = inputs[0]->sequence(); int dimension = inputs[0]->dimension(); // memset(outputs[0]->hostPtr(),-INFINITY,outputs[0]->count() * sizeof(float)); int old_dim = 0; - if (inputs.size()>1) { - old_dim = (int)inputs[1]->dataAt(0,0,0,0)-sequence; - }else{ + if (inputs.size() > 1) { + old_dim = (int)inputs[1]->dataAt(0, 0, 0, 0) - sequence; + } else { #ifndef LLAMAFILE_SGEMM old_dim = dimension - sequence; #endif @@ -39,16 +39,14 @@ ErrorCode CPUCausalMask::execute(vector> inputs, vectordimension(); ++d) { if (d > s + old_dim) { outputs[0]->setDataAt({n, h, s, d}, -INFINITY); - } - else{ + } else { outputs[0]->setDataAt({n, h, s, d}, inputs[0]->dataAt(n, h, s, d)); } } } } } - } - else{ + } else { outputs[0]->copyFrom(inputs[0]); } return Op::execute(inputs, outputs); @@ -57,12 +55,12 @@ ErrorCode CPUCausalMask::execute(vector> inputs, vector> inputs, vector> outputs) { // assert(inputs.size() == 1); assert(outputs.size() == 1); - if(inputs[0]->masterTensor() == nullptr) { + if (inputs[0]->masterTensor() == nullptr) { inputs[0]->free(); // TODO remove } outputs[0]->setDtype(activation_dtype()); outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); + inputs[0]->shallowCopyFrom(outputs[0], false); return MLLM_NO_ERROR; } } // namespace mllm diff --git a/src/backends/cpu/op/CPUCausalMask.hpp b/mllm/backends/cpu/op/CPUCausalMask.hpp similarity index 100% rename from src/backends/cpu/op/CPUCausalMask.hpp rename to mllm/backends/cpu/op/CPUCausalMask.hpp diff --git a/src/backends/cpu/op/CPUCausalTreeMask.cpp b/mllm/backends/cpu/op/CPUCausalTreeMask.cpp similarity index 100% rename from src/backends/cpu/op/CPUCausalTreeMask.cpp rename to mllm/backends/cpu/op/CPUCausalTreeMask.cpp diff --git a/src/backends/cpu/op/CPUCausalTreeMask.hpp b/mllm/backends/cpu/op/CPUCausalTreeMask.hpp similarity index 100% rename from src/backends/cpu/op/CPUCausalTreeMask.hpp rename to mllm/backends/cpu/op/CPUCausalTreeMask.hpp diff --git a/mllm/backends/cpu/op/CPUClipFunc.hpp b/mllm/backends/cpu/op/CPUClipFunc.hpp new file mode 100644 index 000000000..c6b623933 --- /dev/null +++ b/mllm/backends/cpu/op/CPUClipFunc.hpp @@ -0,0 +1,357 @@ +// +// Created by Rongjie Yi on 24-2-26. +// + +#ifndef CPUCLIPFUNC_HPP +#define CPUCLIPFUNC_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include +#include +#include + +namespace mllm { +class Tensor; + +class CPUclipFunction : public Op { +private: + int thread_count = 4; + std::vector b_; + std::vector h_; + std::vector s_; + std::vector d_; + +public: + CPUclipFunction(Backend *bn, string name, int threadCount, + const std::vector &b, const std::vector &h, + const std::vector &s, const std::vector &d) : + Op(bn, name), + thread_count(threadCount), b_(b), h_(h), s_(s), d_(d) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + int dim_b = inputs[0]->batch(); + int dim_h = inputs[0]->head(); + int dim_s = inputs[0]->sequence(); + int dim_d = inputs[0]->dimension(); + + std::vector *, int *>> data = {{&b_, &dim_b}, {&h_, &dim_h}, {&s_, &dim_s}, {&d_, &dim_d}}; + for (auto &pair : data) { + if (pair.first->size() == 2) { + *pair.second = (*pair.first)[1] - (*pair.first)[0]; + } else if (pair.first->size() == 1) { + *pair.second = 1; + } + } + + outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); + outputs[0]->setDtype(inputs[0]->dtype()); + return ErrorCode::MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + int dim_b = inputs[0]->batch(); + int dim_h = inputs[0]->head(); + int dim_s = inputs[0]->sequence(); + int dim_d = inputs[0]->dimension(); + std::vector, int *>> data = {{b_, &dim_b}, {h_, &dim_h}, {s_, &dim_s}, {d_, &dim_d}}; + for (auto &pair : data) { + if (pair.first.size() == 2) { + *pair.second = pair.first[1] - pair.first[0]; + } else if (pair.first.size() == 1) { + *pair.second = 1; + } + } + if (outputs[0]->dimension() * outputs[0]->sequence() * outputs[0]->head() * outputs[0]->batch() == 0 + || outputs[0]->shape().empty() + || dim_d != outputs[0]->dimension()) { + outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); + outputs[0]->alloc(); + } + + if (s_.size() == 2) { +#pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) + for (int b = 0; b < inputs[0]->batch(); ++b) { + memcpy(outputs[0]->hostPtr() + outputs[0]->offset(b, 0, 0, 0), + inputs[0]->hostPtr() + inputs[0]->offset(b, 0, s_[0], 0), + inputs[0]->head() * (s_[1] - s_[0]) * inputs[0]->dimension() * sizeof(float)); + } + } else if (s_.size() == 1) { + int seq_idx = s_[0]; + if (seq_idx < 0) { + seq_idx = inputs[0]->sequence() + seq_idx; + } +#pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) + for (int b = 0; b < inputs[0]->batch(); ++b) { + memcpy(outputs[0]->hostPtr() + outputs[0]->offset(b, 0, 0, 0), + inputs[0]->hostPtr() + inputs[0]->offset(b, 0, seq_idx, 0), + inputs[0]->head() * 1 * inputs[0]->dimension() * sizeof(float)); + } + } else if (b_.size() == 1) { + int bth_idx = b_[0]; + if (bth_idx < 0) { + bth_idx = inputs[0]->batch() + bth_idx; + } + memcpy(outputs[0]->hostPtr(), + inputs[0]->hostPtr() + inputs[0]->offset(bth_idx, 0, 0, 0), + inputs[0]->head() * inputs[0]->sequence() * inputs[0]->dimension() * sizeof(float)); + } else if (b_.size() == 2) { + assert(b_[1] - b_[0] > 0); + memcpy(outputs[0]->hostPtr(), + inputs[0]->hostPtr() + inputs[0]->offset(b_[0], 0, 0, 0), + (b_[1] - b_[0]) * inputs[0]->head() * inputs[0]->sequence() * inputs[0]->dimension() * sizeof(float)); + } else if (d_.size() == 2) { +#pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) + for (int b = 0; b < inputs[0]->batch(); ++b) { + for (int s = 0; s < inputs[0]->sequence(); ++s) { + for (int h = 0; h < inputs[0]->head(); ++h) { + memcpy(outputs[0]->hostPtr() + outputs[0]->offset(b, h, s, 0), + inputs[0]->hostPtr() + inputs[0]->offset(b, h, s, d_[0]), + (d_[1] - d_[0]) * sizeof(float)); + } + } + } + } else if (d_.size() == 1) { + int seq_idx = d_[0]; + if (seq_idx < 0) { + seq_idx = inputs[0]->dimension() + seq_idx; + } +#pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) + for (int b = 0; b < inputs[0]->batch(); ++b) { + for (int s = 0; s < inputs[0]->sequence(); ++s) { + for (int h = 0; h < inputs[0]->head(); ++h) { + memcpy(outputs[0]->hostPtr() + outputs[0]->offset(b, h, s, 0), + inputs[0]->hostPtr() + inputs[0]->offset(b, h, s, seq_idx), + sizeof(float)); + } + } + } + } else { + std::cout << "[TODO]Tensor.CLip not support!!!!" << std::endl; + } + + return ErrorCode::MLLM_NO_ERROR; + } +}; + +class CPUclipFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam is structured to reconstruct the vectors. + // Example structure: {"b_size": 1, "b_0": 5, "h_size": 0, ...} + int b_size = op_param.at("b_size"); + int h_size = op_param.at("h_size"); + int s_size = op_param.at("s_size"); + int d_size = op_param.at("d_size"); + + std::vector b, h, s, d; + for (int i = 0; i < b_size; ++i) b.push_back(op_param.at("b_" + std::to_string(i))); + for (int i = 0; i < h_size; ++i) h.push_back(op_param.at("h_" + std::to_string(i))); + for (int i = 0; i < s_size; ++i) s.push_back(op_param.at("s_" + std::to_string(i))); + for (int i = 0; i < d_size; ++i) d.push_back(op_param.at("d_" + std::to_string(i))); + + return new CPUclipFunction(bn, name, threadCount, b, h, s, d); + } +}; + +class CPUclipaxisFunction : public Op { +private: + int thread_count = 4; + Chl axis_; + std::vector b_; + std::vector h_; + std::vector s_; + std::vector d_; + +public: + CPUclipaxisFunction(Backend *bn, string name, int threadCount, Chl axis, + const std::vector &b, const std::vector &h, + const std::vector &s, const std::vector &d) : + Op(bn, name), + thread_count(threadCount), axis_(axis), b_(b), h_(h), s_(s), d_(d) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + int dim_b = inputs[0]->batch(); + int dim_h = inputs[0]->head(); + int dim_s = inputs[0]->sequence(); + int dim_d = inputs[0]->dimension(); + switch (axis_) { + case BATCH: { + std::vector, int *>> data = {{h_, &dim_h}, {s_, &dim_s}, {d_, &dim_d}}; + for (auto &pair : data) { + if (!pair.first.empty()) { + *pair.second = 1; + } + } + break; + } + case HEAD: { + std::vector, int *>> data = {{b_, &dim_b}, {s_, &dim_s}, {d_, &dim_d}}; + for (auto &pair : data) { + if (!pair.first.empty()) { + *pair.second = 1; + } + } + break; + } + case SEQUENCE: { + std::vector, int *>> data = {{b_, &dim_b}, {h_, &dim_h}, {d_, &dim_d}}; + for (auto &pair : data) { + if (!pair.first.empty()) { + *pair.second = 1; + } + } + break; + } + case DIMENSION: { + std::vector, int *>> data = {{b_, &dim_b}, {h_, &dim_h}, {s_, &dim_s}}; + for (auto &pair : data) { + if (!pair.first.empty()) { + *pair.second = 1; + } + } + break; + } + default: + break; + } + outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); + outputs[0]->setDtype(inputs[0]->dtype()); + return ErrorCode::MLLM_NO_ERROR; + } + ErrorCode execute(vector> inputs, vector> outputs) override { + if (axis_ == BATCH) { + if (!s_.empty()) { + for (int i = 0; i < s_.size(); ++i) { + auto seq_idx = s_[i]; + memcpy(outputs[0]->hostPtr() + outputs[0]->offset(i, 0, 0, 0), + inputs[0]->hostPtr() + inputs[0]->offset(i, 0, seq_idx, 0), + inputs[0]->head() * 1 * inputs[0]->dimension() * sizeof(float)); + } + } + } else { + std::cout << "[TODO]Tensor.CLip axis not support!!!!" << std::endl; + } + return ErrorCode::MLLM_NO_ERROR; + } +}; + +class CPUclipaxisFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + Chl axis = (Chl)op_param.at("axis"); + int b_size = op_param.count("b_size") ? op_param.at("b_size") : 0; + int h_size = op_param.count("h_size") ? op_param.at("h_size") : 0; + int s_size = op_param.count("s_size") ? op_param.at("s_size") : 0; + int d_size = op_param.count("d_size") ? op_param.at("d_size") : 0; + + std::vector b, h, s, d; + for (int i = 0; i < b_size; ++i) b.push_back(op_param.at("b_" + std::to_string(i))); + for (int i = 0; i < h_size; ++i) h.push_back(op_param.at("h_" + std::to_string(i))); + for (int i = 0; i < s_size; ++i) s.push_back(op_param.at("s_" + std::to_string(i))); + for (int i = 0; i < d_size; ++i) d.push_back(op_param.at("d_" + std::to_string(i))); + + return new CPUclipaxisFunction(bn, name, threadCount, axis, b, h, s, d); + } +}; + +class CPUcliptensorFunction : public Op { +private: + int thread_count = 4; + Chl dim_; + +public: + CPUcliptensorFunction(Backend *bn, string name, int threadCount, Chl dim) : + Op(bn, name), thread_count(threadCount), dim_(dim) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + if (dim_ == SEQUENCE) { + int new_seq = inputs[1]->dimension(); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), new_seq, inputs[0]->dimension()); + } else if (dim_ == DIMENSION) { + int new_dim = inputs[1]->dimension(); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), new_dim); + } else { + std::cout << "[TODO]Tensor.Clip tensor not support!!!!" << std::endl; + } + outputs[0]->setDtype(inputs[0]->dtype()); + return ErrorCode::MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + if (dim_ == SEQUENCE) { + if (inputs[0]->ctype() == BHDS) { + outputs[0]->chls() = inputs[0]->chls(); + outputs[0]->setCtype(BHDS); + int new_seq = inputs[1]->dimension(); + if (outputs[0]->sequence() == 0 || outputs[0]->shape().empty() + || new_seq != outputs[0]->sequence()) { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), new_seq, inputs[0]->dimension()); + outputs[0]->alloc(); + } + +#pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) + for (int b = 0; b < inputs[0]->batch(); ++b) { + for (int d = 0; d < inputs[0]->dimension(); ++d) { + for (int s = 0; s < new_seq; ++s) { + auto selected_idx = (int)inputs[1]->dataAt(0, 0, 0, s); + outputs[0]->setDataAt(b, 0, s, d, + inputs[0]->dataAt(b, 0, selected_idx, d)); + } + } + } + return MLLM_NO_ERROR; + } + int new_seq = inputs[1]->dimension(); + if (outputs[0]->sequence() == 0 || outputs[0]->shape().empty() + || new_seq != outputs[0]->sequence()) { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), new_seq, inputs[0]->dimension()); + outputs[0]->alloc(); + } + for (int b = 0; b < inputs[0]->batch(); ++b) { +#pragma omp parallel for num_threads(CPUBackend::cpu_threads) + for (int s = 0; s < inputs[1]->dimension(); ++s) { + auto selected_idx = (int)inputs[1]->dataAt(0, 0, 0, s); + memcpy(outputs[0]->ptrAt(b, 0, s, 0), + inputs[0]->ptrAt(b, 0, selected_idx, 0), + inputs[0]->head() * inputs[0]->dimension() * sizeof(float)); + } + } + } else if (dim_ == DIMENSION) { + int new_seq = inputs[1]->dimension(); + if (outputs[0]->sequence() == 0 || outputs[0]->shape().empty() + || new_seq != outputs[0]->sequence()) { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), new_seq); + outputs[0]->alloc(); + } +#pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) + for (int b = 0; b < inputs[0]->batch(); ++b) { + for (int s = 0; s < inputs[0]->sequence(); ++s) { + for (int d = 0; d < inputs[1]->dimension(); ++d) { + auto selected_idx = (int)inputs[1]->dataAt(0, 0, 0, d); + outputs[0]->setDataAt(b, 0, s, d, + inputs[0]->dataAt(b, 0, s, selected_idx)); + } + } + } + } else { + std::cout << "[TODO]Tensor.CLip not support!!!!" << std::endl; + } + return ErrorCode::MLLM_NO_ERROR; + } +}; + +class CPUcliptensorFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + Chl dim = (Chl)op_param.at("dim"); + return new CPUcliptensorFunction(bn, name, threadCount, dim); + } +}; + +} // namespace mllm +#endif // CPUCLIPFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUConvolution2D.cpp b/mllm/backends/cpu/op/CPUConvolution2D.cpp similarity index 100% rename from src/backends/cpu/op/CPUConvolution2D.cpp rename to mllm/backends/cpu/op/CPUConvolution2D.cpp diff --git a/src/backends/cpu/op/CPUConvolution2D.hpp b/mllm/backends/cpu/op/CPUConvolution2D.hpp similarity index 100% rename from src/backends/cpu/op/CPUConvolution2D.hpp rename to mllm/backends/cpu/op/CPUConvolution2D.hpp diff --git a/src/backends/cpu/op/CPUConvolution3D.cpp b/mllm/backends/cpu/op/CPUConvolution3D.cpp similarity index 100% rename from src/backends/cpu/op/CPUConvolution3D.cpp rename to mllm/backends/cpu/op/CPUConvolution3D.cpp diff --git a/src/backends/cpu/op/CPUConvolution3D.hpp b/mllm/backends/cpu/op/CPUConvolution3D.hpp similarity index 100% rename from src/backends/cpu/op/CPUConvolution3D.hpp rename to mllm/backends/cpu/op/CPUConvolution3D.hpp diff --git a/src/backends/cpu/op/CPUDivision.cpp b/mllm/backends/cpu/op/CPUDivision.cpp similarity index 100% rename from src/backends/cpu/op/CPUDivision.cpp rename to mllm/backends/cpu/op/CPUDivision.cpp diff --git a/src/backends/cpu/op/CPUDivision.hpp b/mllm/backends/cpu/op/CPUDivision.hpp similarity index 100% rename from src/backends/cpu/op/CPUDivision.hpp rename to mllm/backends/cpu/op/CPUDivision.hpp diff --git a/src/backends/cpu/op/CPUElasticLinear.cpp b/mllm/backends/cpu/op/CPUElasticLinear.cpp similarity index 73% rename from src/backends/cpu/op/CPUElasticLinear.cpp rename to mllm/backends/cpu/op/CPUElasticLinear.cpp index b2f9462cf..8314a0362 100644 --- a/src/backends/cpu/op/CPUElasticLinear.cpp +++ b/mllm/backends/cpu/op/CPUElasticLinear.cpp @@ -69,32 +69,6 @@ ErrorCode CPUElasticLinear::execute(vector> inputs, vector> inputs, vector> outputs) { diff --git a/src/backends/cpu/op/CPUElasticLinear.hpp b/mllm/backends/cpu/op/CPUElasticLinear.hpp similarity index 100% rename from src/backends/cpu/op/CPUElasticLinear.hpp rename to mllm/backends/cpu/op/CPUElasticLinear.hpp diff --git a/src/backends/cpu/op/CPUEmbedding.cpp b/mllm/backends/cpu/op/CPUEmbedding.cpp similarity index 98% rename from src/backends/cpu/op/CPUEmbedding.cpp rename to mllm/backends/cpu/op/CPUEmbedding.cpp index 6af2fe36a..8195b8bb7 100644 --- a/src/backends/cpu/op/CPUEmbedding.cpp +++ b/mllm/backends/cpu/op/CPUEmbedding.cpp @@ -1,7 +1,7 @@ #include "CPUEmbedding.hpp" #include "ParamLoader.hpp" -#include "quantize/QuantizeQ4.hpp" -#include "quantize/QuantizeQ8.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ4.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" namespace mllm { CPUEmbedding::CPUEmbedding(Backend *bn, string opName, int hiddenSize, int vocabSize, int threadCount) : diff --git a/src/backends/cpu/op/CPUEmbedding.hpp b/mllm/backends/cpu/op/CPUEmbedding.hpp similarity index 100% rename from src/backends/cpu/op/CPUEmbedding.hpp rename to mllm/backends/cpu/op/CPUEmbedding.hpp diff --git a/mllm/backends/cpu/op/CPUExpandFunc.hpp b/mllm/backends/cpu/op/CPUExpandFunc.hpp new file mode 100644 index 000000000..7877e12f6 --- /dev/null +++ b/mllm/backends/cpu/op/CPUExpandFunc.hpp @@ -0,0 +1,112 @@ +// +// Created by Rongjie Yi on 24-2-26. +// + +#ifndef CPUEXPANDFUNC_HPP +#define CPUEXPANDFUNC_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include +#include + +namespace mllm { +class Tensor; + +class CPUexpandFunction : public Op { +private: + int thread_count = 4; + int b_, h_, s_, d_; + +public: + CPUexpandFunction(Backend *bn, string name, int threadCount, int b, int h, int s, int d) : + Op(bn, name), thread_count(threadCount), b_(b), h_(h), s_(s), d_(d) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + // The original assert seems to imply only one dimension can be expanded at a time. + // Let's ensure a similar check but allow -1 for non-expanded dims. + // Example: b=5, h=-1, s=-1, d=-1. (5 * -1 * -1 * -1) = -5 < 0. This logic is preserved. + assert(b_ * h_ * s_ * d_ < 0); + + int dim_b = inputs[0]->batch(); + int dim_h = inputs[0]->head(); + int dim_s = inputs[0]->sequence(); + int dim_d = inputs[0]->dimension(); + + if (b_ != -1) { + assert(dim_b == 1); + dim_b = b_; + } else if (s_ != -1) { + assert(dim_s == 1); + dim_s = s_; + } else if (h_ != -1) { + assert(dim_h == 1); + dim_h = h_; + } else if (d_ != -1) { + assert(dim_d == 1); + dim_d = d_; + } + + outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); + outputs[0]->setDtype(inputs[0]->dtype()); + return ErrorCode::MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + int dim_b = inputs[0]->batch(); + int dim_s = inputs[0]->sequence(); + int dim_h = inputs[0]->head(); + int dim_d = inputs[0]->dimension(); + + if (b_ != -1) { + std::cerr << "expand for BATCH not support" << std::endl; + } else if (s_ != -1) { +#pragma omp parallel for collapse(2) num_threads(thread_count) + for (int b = 0; b < dim_b; ++b) { + for (int s = 0; s < s_; ++s) { + memcpy(outputs[0]->ptrAt(b, 0, s, 0), + inputs[0]->ptrAt(b, 0, 0, 0), + dim_d * dim_h * inputs[0]->dtypeSize()); + } + } + } else if (h_ != -1) { +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int b = 0; b < dim_b; ++b) { + for (int s = 0; s < dim_s; ++s) { + for (int h = 0; h < h_; ++h) { + memcpy(outputs[0]->ptrAt(b, h, s, 0), + inputs[0]->ptrAt(b, 0, s, 0), // Assumes input head is 1 + dim_d * inputs[0]->dtypeSize()); + } + } + } + } else if (d_ != -1) { + for (int b = 0; b < dim_b; ++b) { + for (int s = 0; s < dim_s; ++s) { + for (int h = 0; h < dim_h; ++h) { + float data = inputs[0]->dataAt(b, h, s, 0); + std::fill_n(outputs[0]->ptrAt(b, h, s, 0), outputs[0]->dimension(), data); + } + } + } + } + return ErrorCode::MLLM_NO_ERROR; + } +}; + +class CPUexpandFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam contains keys "b", "h", "s", "d" + int b = static_cast(op_param.at("b")); + int h = static_cast(op_param.at("h")); + int s = static_cast(op_param.at("s")); + int d = static_cast(op_param.at("d")); + return new CPUexpandFunction(bn, name, threadCount, b, h, s, d); + } +}; + +} // namespace mllm +#endif // CPUEXPANDFUNC_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPUFlashAttention2Func.hpp b/mllm/backends/cpu/op/CPUFlashAttention2Func.hpp new file mode 100644 index 000000000..5c5ed6120 --- /dev/null +++ b/mllm/backends/cpu/op/CPUFlashAttention2Func.hpp @@ -0,0 +1,137 @@ +// +// Created by Rongjie Yi on 25-2-16. +// + +#ifndef CPUFA2FUNC_HPP +#define CPUFA2FUNC_HPP + +#include "CPUBackend.hpp" +#include "DataType.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include "../compute/FlashAttention2.hpp" +#include "../compute/FlashAttention2H.hpp" +#include + +namespace mllm { +class Tensor; + +class CPUFlashAttention2Func : public Op { +private: + int thread_count = 4; + bool causal_mask_; + +public: + CPUFlashAttention2Func(Backend *bn, string name, int threadCount, bool causal_mask) : + Op(bn, name), thread_count(threadCount), causal_mask_(causal_mask) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + auto q_tensor = inputs[0]; + auto o_tensor = outputs[0]; + + int batch_size = q_tensor->batch(); + int q_head = q_tensor->head(); + int q_sequence = q_tensor->sequence(); + int dimension = q_tensor->dimension(); + + // for BSHD attention start + if (inputs[0]->ctype() == BHSD && inputs[1]->ctype() == BHSD && inputs[2]->ctype() == BHSD) { + o_tensor->setCtype(q_tensor->ctype()); + } + // for BSHD attention end + + o_tensor->reshape(batch_size, q_head, q_sequence, dimension); + o_tensor->setDtype(inputs[0]->dtype()); + return ErrorCode::MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + auto q_tensor = inputs[0]; + auto k_tensor = inputs[1]; + auto v_tensor = inputs[2]; + auto o_tensor = outputs[0]; + + int batch_size = q_tensor->batch(); + int q_head = q_tensor->head(); + int q_sequence = q_tensor->sequence(); + int dimension = q_tensor->dimension(); + int k_head = k_tensor->head(); + int k_sequence = k_tensor->sequence(); + int v_head = v_tensor->head(); + int v_sequence = v_tensor->sequence(); + + assert(v_head == k_head && v_sequence == k_sequence); + + bool kv_use_fp32 = (k_tensor->dtype() == MLLM_TYPE_F32); // x86只支持FP32 + + int threads = thread_count; + threads = std::min(threads, v_head); + + int32_t br = q_sequence >= 4 ? 4 : 1; + int32_t bc = q_sequence >= 4 ? 4 : 1; + constexpr bool high_precision_exp = true; + for (int bch = 0; bch < batch_size; ++bch) { + void *o_ptr = o_tensor->ptrAt(bch, 0, 0, 0); + void *q_ptr = q_tensor->ptrAt(bch, 0, 0, 0); + void *k_ptr; + void *v_ptr; + if (kv_use_fp32) { + k_ptr = k_tensor->ptrAt(bch, 0, 0, 0); + v_ptr = v_tensor->ptrAt(bch, 0, 0, 0); + } else { + k_ptr = k_tensor->ptrAt(bch, 0, 0, 0); + v_ptr = v_tensor->ptrAt(bch, 0, 0, 0); + } + // for BSHD attention start + if (inputs[0]->ctype() == BHSD && inputs[1]->ctype() == BHSD && inputs[2]->ctype() == BHSD) { + int km = k_sequence; + int vm = v_sequence; + if (k_tensor->masterTensor() != nullptr && v_tensor->masterTensor() != nullptr) { + km = k_tensor->masterTensor()->sequence(); + vm = v_tensor->masterTensor()->sequence(); + } + flash_attention_2_forward_h( + q_ptr, k_ptr, v_ptr, o_ptr, // 输入输出张量 + 1, q_head, q_sequence, k_sequence, dimension, // 基本维度 + causal_mask_, // 使用因果掩码 + kv_use_fp32, // 使用FP32(x86必须) + threads, // 线程数 + br, // 查询分块大小 + bc, // 键值分块大小 + q_head, // 查询头数 + k_head, // 键值头数 + high_precision_exp, // 使用快速指数近似 + q_sequence * dimension, + km * dimension, + vm * dimension); + // for BSHD attention end + } else { + flash_attention_2_forward( + q_ptr, k_ptr, v_ptr, o_ptr, // 输入输出张量 + 1, q_head, q_sequence, k_sequence, dimension, // 基本维度 + causal_mask_, // 使用因果掩码 + kv_use_fp32, // 使用FP32(x86必须) + threads, // 线程数 + br, // 查询分块大小 + bc, // 键值分块大小 + q_head, // 查询头数 + k_head, // 键值头数 + high_precision_exp // 使用快速指数近似 + ); + } + } + return ErrorCode::MLLM_NO_ERROR; + } +}; + +class CPUFlashAttention2FuncCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + bool causal_mask = (bool)op_param.at("causal_mask"); + return new CPUFlashAttention2Func(bn, name, threadCount, causal_mask); + } +}; + +} // namespace mllm +#endif // CPUFA2FUNC_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPUFlattenFunc.hpp b/mllm/backends/cpu/op/CPUFlattenFunc.hpp new file mode 100644 index 000000000..128fc7d67 --- /dev/null +++ b/mllm/backends/cpu/op/CPUFlattenFunc.hpp @@ -0,0 +1,126 @@ +// +// Created by Rongjie Yi on 24-2-26. +// + +#ifndef CPUFLATTENFUNC_HPP +#define CPUFLATTENFUNC_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include +#include +#include "Module.hpp" + +namespace mllm { +class Tensor; + +class CPUflattenFunction : public Op { +private: + int thread_count = 4; + Chl axis_start_; + Chl axis_end_; + +public: + CPUflattenFunction(Backend *bn, string name, int threadCount, Chl axis_start, Chl axis_end) : + Op(bn, name), thread_count(threadCount), axis_start_(axis_start), axis_end_(axis_end) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + auto input = inputs[0]; + auto output = outputs[0]; + + int dim_b = input->batch(); + int dim_h = 0; + int dim_s = 0; + int dim_d = 0; + + if (inputs[0]->shape().size() == 4) { + dim_h = inputs[0]->head(); + dim_s = inputs[0]->sequence(); + dim_d = inputs[0]->dimension(); + if (axis_start_ == BATCH & axis_end_ == SEQUENCE) { + dim_b = 1; + dim_s = inputs[0]->sequence() * inputs[0]->batch(); + } else if (axis_start_ == HEAD & axis_end_ == SEQUENCE) { + dim_h = 1; + dim_s = inputs[0]->sequence() * inputs[0]->head(); + } else if (axis_start_ == HEAD & axis_end_ == DIMENSION) { + dim_h = 1; + dim_d = inputs[0]->dimension() * inputs[0]->head(); + } else { + std::cout << "ERROR: flatten " << axis_start_ << "&" << axis_end_ << std::endl; + } + } else if (inputs[0]->shape().size() == 5) { + if (axis_start_ == CHANNLE & axis_end_ == HEIGHT) { + dim_h = 1; + dim_s = inputs[0]->channel() * inputs[0]->height() * inputs[0]->time(); + dim_d = inputs[0]->width(); + } else if (axis_start_ == HEIGHT & axis_end_ == CHANNLE) { + dim_h = 1; + dim_s = inputs[0]->channel() * inputs[0]->height() * inputs[0]->width(); + dim_d = inputs[0]->time(); + } + } + assert(dim_d + dim_s + dim_h > 0); + if (inputs[0]->ctype() == BCTHW) { // TODOTMPA + outputs[0]->chls()[BATCH] = 0; + outputs[0]->chls()[SEQUENCE] = 1; + outputs[0]->chls()[HEAD] = 2; + outputs[0]->chls()[DIMENSION] = 3; + outputs[0]->setCtype(BSHD); + } + outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); + return ErrorCode::MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + // No data movement needed, all work done in reshape by creating a view. + return ErrorCode::MLLM_NO_ERROR; + } + + ErrorCode setUp(vector> inputs, vector> outputs) override { + // inputs[0]->shallowCopyFrom(outputs[0].get(), false); + // Chl axis_start = (Chl)args[0]; + // Chl axis_end = (Chl)args[1]; + if ((axis_start_ == TIME & axis_end_ == WIDTH && inputs[0]->ctype() == BCTHW) + || (axis_start_ == CHANNLE & axis_end_ == HEIGHT && inputs[0]->ctype() == BWCTH) + || (axis_start_ == HEIGHT & axis_end_ == CHANNLE && inputs[0]->ctype() == BTHWC) + || (axis_start_ == BATCH & axis_end_ == SEQUENCE && inputs[0]->ctype() != BCTHW) + || (axis_start_ == HEAD & axis_end_ == SEQUENCE && inputs[0]->ctype() == BSHD) + || (axis_start_ == HEAD & axis_end_ == SEQUENCE && inputs[0]->ctype() == BHDS) + || (axis_start_ == HEAD & axis_end_ == DIMENSION && inputs[0]->ctype() == BSHD) + || (axis_start_ == HEAD & axis_end_ == DIMENSION && inputs[0]->ctype() == BHDS) + || (axis_start_ == HEAD & axis_end_ == SEQUENCE && inputs[0]->ctype() == BDSH)) { + if (inputs[0]->masterTensor() == nullptr) { + inputs[0]->free(); + } + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->alloc(); + inputs[0]->shallowCopyFrom(outputs[0], false); + } else if (Module::llm_model_ptr->op_transposed_flag) { + if (inputs[0]->masterTensor() == nullptr) { + inputs[0]->free(); + } + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->alloc(); + inputs[0]->shallowCopyFrom(outputs[0], false); + } else { + std::cout << "[TODO]Tensor.Flatten not support!!!!" << std::endl; + } + return ErrorCode::MLLM_NO_ERROR; + } +}; + +class CPUflattenFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam contains keys "axis_start" and "axis_end" + Chl axis_start = (Chl)op_param.at("axis_start"); + Chl axis_end = (Chl)op_param.at("axis_end"); + return new CPUflattenFunction(bn, name, threadCount, axis_start, axis_end); + } +}; + +} // namespace mllm +#endif // CPUFLATTENFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUFuyuGatherEmbdFunc.hpp b/mllm/backends/cpu/op/CPUFuyuGatherEmbdFunc.hpp similarity index 58% rename from src/backends/cpu/function/CPUFuyuGatherEmbdFunc.hpp rename to mllm/backends/cpu/op/CPUFuyuGatherEmbdFunc.hpp index 00decd0cf..f398df20c 100644 --- a/src/backends/cpu/function/CPUFuyuGatherEmbdFunc.hpp +++ b/mllm/backends/cpu/op/CPUFuyuGatherEmbdFunc.hpp @@ -4,49 +4,52 @@ #ifndef CPUFUYUGATHEREMBDFUNC_HPP #define CPUFUYUGATHEREMBDFUNC_HPP + #include "Tensor.hpp" #include "Types.hpp" +#include "CPUBackend.hpp" +#include +#include namespace mllm { class Tensor; -class CPUFuyuGatherEmbdFunc : public TensorFunction { +class CPUFuyuGatherEmbdFunc : public Op { +private: + int thread_count = 4; + public: - void setUp(vector> outputs, vector> inputs, vector args) override { + CPUFuyuGatherEmbdFunc(Backend *bn, string name, int threadCount) : + Op(bn, name), thread_count(threadCount) { + } + + ErrorCode setUp(vector> inputs, vector> outputs) override { if (inputs[0]->masterTensor() == nullptr) { inputs[0]->free(); } outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); + inputs[0]->shallowCopyFrom(outputs[0], false); + return MLLM_NO_ERROR; } - void reshape(vector> outputs, vector> inputs, vector args) override { + + ErrorCode reshape(vector> inputs, vector> outputs) override { assert(inputs.size() == 3); - // assert(outputs.size() == 1); - // if (inputs[1]->batch() == 0) { - // outputs[0]->reshape(inputs[0]->batch(), 1, inputs[0]->sequence(), inputs[0]->dimension()); - // } assert(inputs[0]->batch() == inputs[1]->batch()); assert(inputs[0]->head() == inputs[1]->head()); assert(inputs[0]->head() == 1); assert(inputs[0]->dimension() == inputs[1]->dimension()); assert(inputs[2]->dimension() == 1); - // outputs[0]->reshape(inputs[0]->batch(), 1, inputs[0]->sequence(), inputs[0]->dimension()); - // alloc - // if (inputs[0]->masterTensor() == nullptr) { - // inputs[0]->free(); - // } - // outputs[0]->alloc(); - // inputs[0]->shallowCopyFrom(outputs[0].get(), false); + return MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { + + ErrorCode execute(vector> inputs, vector> outputs) override { if (inputs[1]->batch() == 0) { - return; + return MLLM_NO_ERROR; } assert(inputs[0]->ctype() == BSHD); assert(inputs[1]->ctype() == BSHD); - // assert(outputs[0]->ctype() == BSHD); auto input_indices = inputs[2]; int hiddenSize = inputs[0]->dimension(); for (int batch = 0; batch < inputs[0]->batch(); ++batch) { @@ -58,6 +61,14 @@ class CPUFuyuGatherEmbdFunc : public TensorFunction { } } } + return MLLM_NO_ERROR; + } +}; + +class CPUFuyuGatherEmbdFuncCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new CPUFuyuGatherEmbdFunc(bn, name, threadCount); } }; diff --git a/src/backends/cpu/op/CPUGELU.cpp b/mllm/backends/cpu/op/CPUGELU.cpp similarity index 55% rename from src/backends/cpu/op/CPUGELU.cpp rename to mllm/backends/cpu/op/CPUGELU.cpp index dbda9de23..560aee320 100644 --- a/src/backends/cpu/op/CPUGELU.cpp +++ b/mllm/backends/cpu/op/CPUGELU.cpp @@ -1,10 +1,12 @@ #include "CPUGELU.hpp" +#include "backends/cpu/third_party/ggml/Quantize.hpp" #include #include namespace mllm { -CPUGELU::CPUGELU(Backend *bn, string opName, int threadCount):thread_count(threadCount), Op(bn, std::move(opName)) { +CPUGELU::CPUGELU(Backend *bn, string opName, int threadCount) : + thread_count(threadCount), Op(bn, std::move(opName)) { if (!init_table_gelu_f16_flag) { init_table_gelu_f16(); init_table_gelu_f16_flag = true; @@ -26,17 +28,17 @@ ErrorCode CPUGELU::execute(vector> inputs, vectorsequence(); int dim = input->dimension(); #pragma omp parallel for collapse(3) num_threads(thread_count) - for (int b = 0; b dataAt(b, h, s, d); -// // output->setDataAt(b, h, s, d, 0.5 * value * (1 + std::tanh(std::sqrt(2 / M_PI) * (value + 0.044715 * std::pow(value, 3))))); -// output->setDataAt(b, h, s, d, 0.5 * value * (1 + std::tanh(std::sqrt(2 / M_PI) * (0.7978845608 * (value + 0.044715 * std::pow(value, 3)))))); -//; -// } - mllm_vec_gelu_f32(dim, outputs[0]->ptrAt(b, h, s,0), - inputs[0]->ptrAt(b, h, s,0)); + // for (int d = 0; d < dim; ++d) { + // float value = input->dataAt(b, h, s, d); + // // output->setDataAt(b, h, s, d, 0.5 * value * (1 + std::tanh(std::sqrt(2 / M_PI) * (value + 0.044715 * std::pow(value, 3))))); + // output->setDataAt(b, h, s, d, 0.5 * value * (1 + std::tanh(std::sqrt(2 / M_PI) * (0.7978845608 * (value + 0.044715 * std::pow(value, 3)))))); + //; + // } + mllm_vec_gelu_f32(dim, outputs[0]->ptrAt(b, h, s, 0), + inputs[0]->ptrAt(b, h, s, 0)); } } } diff --git a/src/backends/cpu/op/CPUGELU.hpp b/mllm/backends/cpu/op/CPUGELU.hpp similarity index 100% rename from src/backends/cpu/op/CPUGELU.hpp rename to mllm/backends/cpu/op/CPUGELU.hpp diff --git a/mllm/backends/cpu/op/CPUGather.cpp b/mllm/backends/cpu/op/CPUGather.cpp new file mode 100644 index 000000000..eebbe2e3d --- /dev/null +++ b/mllm/backends/cpu/op/CPUGather.cpp @@ -0,0 +1,59 @@ +#include "CPUGather.hpp" +#include + +namespace mllm { + +CPUGather::CPUGather(Backend *bn, string opName, int threadCount) : + thread_count(threadCount), + Op(bn, opName) { +} + +ErrorCode CPUGather::reshape(vector> inputs, vector> outputs) { + // assert(inputs.size() == 3); + assert(outputs.size() == 1); + // if (inputs[1]->batch() == 0) { + // outputs[0]->reshape(inputs[0]->batch(), 1, inputs[0]->sequence(), inputs[0]->dimension()); + // return Op::reshape(inputs, outputs); + // } + assert(inputs[0]->batch() == inputs[1]->batch()); + assert(inputs[0]->head() == inputs[1]->head()); + assert(inputs[0]->head() == 1); + // assert(inputs[0]->dimension() == inputs[1]->dimension()); + // assert(inputs[1]->dimension() == 1); + outputs[0]->reshape(inputs[0]->batch(), 1, inputs[0]->sequence(), inputs[1]->dimension()); + return Op::reshape(inputs, outputs); +} + +ErrorCode CPUGather::execute(vector> inputs, vector> outputs) { + if (inputs[1]->batch() == 0) { + return Op::execute(inputs, outputs); + } + + assert(inputs[0]->ctype() == BSHD); + assert(inputs[1]->ctype() == BSHD); + assert(outputs[0]->ctype() == BSHD); + auto input_indices = inputs[1]; + int hiddenSize = inputs[0]->dimension(); +#pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) + for (int batch = 0; batch < inputs[0]->batch(); ++batch) { + for (int seq = 0; seq < inputs[0]->sequence(); ++seq) { + for (int indices = 0; indices < input_indices->dimension(); ++indices) { + int dim_index = input_indices->dataAt(batch, 0, seq, indices); + float value = inputs[0]->dataAt(batch, 0, seq, dim_index); + outputs[0]->setDataAt(batch, 0, seq, indices, value); + } + } + } + return Op::execute(inputs, outputs); +} + +// ErrorCode CPUGather::setUp(vector> inputs, vector> outputs) { +// if (inputs[0]->masterTensor() == nullptr) { +// inputs[0]->free(); +// } +// outputs[0]->setDtype(activation_dtype()); +// outputs[0]->alloc(); +// inputs[0]->shallowCopyFrom(outputs[0], false); +// return MLLM_NO_ERROR; +// } +} // namespace mllm diff --git a/src/backends/cpu/op/CPUGather.hpp b/mllm/backends/cpu/op/CPUGather.hpp similarity index 87% rename from src/backends/cpu/op/CPUGather.hpp rename to mllm/backends/cpu/op/CPUGather.hpp index cee464f68..d902203ee 100644 --- a/src/backends/cpu/op/CPUGather.hpp +++ b/mllm/backends/cpu/op/CPUGather.hpp @@ -13,7 +13,7 @@ class CPUGather final : public Op { virtual ~CPUGather() = default; virtual ErrorCode reshape(vector> inputs, vector> outputs) override; virtual ErrorCode execute(vector> inputs, vector> outputs) override; - virtual ErrorCode setUp(vector> inputs, vector> outputs) override; + // virtual ErrorCode setUp(vector> inputs, vector> outputs) override; private: int thread_count = 4; diff --git a/src/backends/cpu/op/CPUHeadLinear.cpp b/mllm/backends/cpu/op/CPUHeadLinear.cpp similarity index 94% rename from src/backends/cpu/op/CPUHeadLinear.cpp rename to mllm/backends/cpu/op/CPUHeadLinear.cpp index 3612e4a57..fe0a6f1b1 100644 --- a/src/backends/cpu/op/CPUHeadLinear.cpp +++ b/mllm/backends/cpu/op/CPUHeadLinear.cpp @@ -1,8 +1,10 @@ #include "CPUHeadLinear.hpp" +#include "Context.hpp" #include "Types.hpp" #include #include +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" namespace mllm { @@ -75,15 +77,15 @@ ErrorCode CPUHeadLinear::load(AbstructLoader &loader) { ErrorCode CPUHeadLinear::execute(vector> inputs, vector> outputs) { auto cpuBackend = dynamic_cast(backend_); - int seqLength = cpuBackend->getTotalSequenceLength(); - int chunk_size = cpuBackend->getChunkSize(); + int seqLength = Context::Instance().inference_state().getTotalSequenceLength(); + int chunk_size = Context::Instance().inference_state().getChunkSize(); shared_ptr tmp_in = std::make_shared(backend_); tmp_in->reshape(1, 1, 1, inputs[0]->dimension()); - tmp_in->shallowCopyFrom(inputs[0].get(), false, {0, 0, seqLength % chunk_size - 1, 0}); + tmp_in->shallowCopyFrom(inputs[0], false, {0, 0, seqLength % chunk_size - 1, 0}); shared_ptr tmp_out = std::make_shared(backend_); tmp_out->reshape(1, 1, 1, out_features_); - tmp_out->shallowCopyFrom(outputs[0].get(), false, {0, 0, seqLength % chunk_size - 1, 0}); + tmp_out->shallowCopyFrom(outputs[0], false, {0, 0, seqLength % chunk_size - 1, 0}); // auto start = mllm::mllm_time_us(); if (inputs[0]->count() == 0) { diff --git a/src/backends/cpu/op/CPUHeadLinear.hpp b/mllm/backends/cpu/op/CPUHeadLinear.hpp similarity index 100% rename from src/backends/cpu/op/CPUHeadLinear.hpp rename to mllm/backends/cpu/op/CPUHeadLinear.hpp diff --git a/src/backends/cpu/op/CPUIRoPE.cpp b/mllm/backends/cpu/op/CPUIRoPE.cpp similarity index 98% rename from src/backends/cpu/op/CPUIRoPE.cpp rename to mllm/backends/cpu/op/CPUIRoPE.cpp index 95c9cd363..06a8e69ce 100644 --- a/src/backends/cpu/op/CPUIRoPE.cpp +++ b/mllm/backends/cpu/op/CPUIRoPE.cpp @@ -1,11 +1,12 @@ #include "CPUIRoPE.hpp" +#include "Context.hpp" #include "Log.h" #include "Types.hpp" #include #include #include -#include "backends/cpu/quantize/QuantizeQ8.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" namespace mllm { @@ -138,9 +139,8 @@ ErrorCode CPUIRoPE::reshape(vector> inputs, vector(backend_); - if (cpuBackend->isStageSwitching()) { - h_cnt_ = cpuBackend->getCurSequenceLength(); + if (Context::Instance().inference_state().isStageSwitching()) { + h_cnt_ = Context::Instance().inference_state().getCurSequenceLength(); } #endif return Op::reshape(inputs, outputs); @@ -394,8 +394,8 @@ ErrorCode CPUIRoPE::execute(vector> inputs, vector> inputs, vector> outputs) { // if use QNN, when a new prompt input, the seq should be reset to 0 here as the setUp is not called #ifdef USE_QNN - auto cpuBackend = dynamic_cast(backend_); - if (cpuBackend->isStageSwitching() && cpuBackend->getExecutionType() == PROMPT) { + if (Context::Instance().inference_state().isStageSwitching() && + Context::Instance().inference_state().getExecutionType() == PROMPT) { h_cnt_ = 0; } #endif diff --git a/src/backends/cpu/op/CPUIRoPE.hpp b/mllm/backends/cpu/op/CPUIRoPE.hpp similarity index 100% rename from src/backends/cpu/op/CPUIRoPE.hpp rename to mllm/backends/cpu/op/CPUIRoPE.hpp diff --git a/src/backends/cpu/function/CPUIndexPutFunc.hpp b/mllm/backends/cpu/op/CPUIndexPutFunc.hpp similarity index 67% rename from src/backends/cpu/function/CPUIndexPutFunc.hpp rename to mllm/backends/cpu/op/CPUIndexPutFunc.hpp index 388bcf3ff..3d0c2a789 100644 --- a/src/backends/cpu/function/CPUIndexPutFunc.hpp +++ b/mllm/backends/cpu/op/CPUIndexPutFunc.hpp @@ -7,82 +7,82 @@ #include "Tensor.hpp" #include "Types.hpp" #include "CPUBackend.hpp" +#include +#include namespace mllm { class Tensor; -class CPUIndexPutFunction : public TensorFunction { +class CPUIndexPutFunction : public Op { +private: + int thread_count = 4; + bool accumulate_; + public: - void setUp(vector> outputs, vector> inputs, vector args) override { - bool accumulate = (bool)args[0]; - if (!accumulate) { + CPUIndexPutFunction(Backend *bn, string name, int threadCount, bool accumulate) : + Op(bn, name), thread_count(threadCount), accumulate_(accumulate) { + } + + ErrorCode setUp(vector> inputs, vector> outputs) override { + if (!accumulate_) { if (inputs[0]->masterTensor() == nullptr) { inputs[0]->free(); } outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); + inputs[0]->shallowCopyFrom(outputs[0], false); } + return MLLM_NO_ERROR; } - void reshape(vector> outputs, vector> inputs, vector args) override { - bool accumulate = (bool)args[0]; - if (inputs[1]->batch() == 0) { + + ErrorCode reshape(vector> inputs, vector> outputs) override { + if (inputs.size() > 1 && inputs[1]->batch() == 0) { outputs[0]->reshape(inputs[0]->batch(), 1, inputs[0]->sequence(), inputs[0]->dimension()); - if (inputs[0]->masterTensor() == nullptr) { - inputs[0]->free(); + if (!accumulate_) { + if (inputs[0]->masterTensor() == nullptr) { + inputs[0]->free(); + } + outputs[0]->alloc(); + inputs[0]->shallowCopyFrom(outputs[0], false); } - inputs[0]->shallowCopyFrom(outputs[0].get(), false); - outputs[0]->alloc(); - return; + return MLLM_NO_ERROR; } // reshape assert(inputs.size() == 3); - // assert(outputs.size() == 1); auto dest_input = inputs[0]; auto src_input = inputs[1]; assert(dest_input->batch() == 1); assert(dest_input->head() == 1); assert(src_input->head() == 1); assert(dest_input->dimension() == src_input->dimension()); - int origin_s = dest_input->sequence(); - int replace_s = src_input->sequence(); - int replace_size = src_input->batch(); - int seq = origin_s - replace_size + replace_size * replace_s; - if (!accumulate) { - outputs[0]->reshape(dest_input->batch(), dest_input->head(), dest_input->sequence(), dest_input->dimension()); // 1, 1, 595, 4096 - } else { - outputs[0]->reshape(dest_input->batch(), dest_input->head(), seq, dest_input->dimension()); // 1, 1, 595, 4096 - } - // alloc - if (!accumulate) { - // outputs[0]->shallowCopyFrom(inputs[0], false); + + if (!accumulate_) { + outputs[0]->reshape(dest_input->batch(), dest_input->head(), dest_input->sequence(), dest_input->dimension()); } else { + int origin_s = dest_input->sequence(); + int replace_s = src_input->sequence(); + int replace_size = src_input->batch(); + int seq = origin_s - replace_size + (replace_size * replace_s); + outputs[0]->reshape(dest_input->batch(), dest_input->head(), seq, dest_input->dimension()); outputs[0]->alloc(); } - // if (!accumulate) { - // if (inputs[0]->masterTensor() == nullptr) { - // inputs[0]->free(); - // } - // outputs[0]->alloc(); - // inputs[0]->shallowCopyFrom(outputs[0].get(), false); - // } else { - // outputs[0]->alloc(); - // } + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { - bool accumulate = (bool)args[0]; - if (inputs[1]->batch() == 0) { - return; + + ErrorCode execute(vector> inputs, vector> outputs) override { + if (inputs.size() > 1 && inputs[1]->batch() == 0) { + return MLLM_NO_ERROR; } + assert(inputs.size() == 3); - // assert(outputs.size() == 1); auto dest_input = inputs[0]; auto src_input = inputs[1]; auto replace_idx = inputs[2]; assert(replace_idx->batch() == 1); assert(replace_idx->sequence() == 1); assert(replace_idx->head() == 1); - if (!accumulate) { + if (!accumulate_) { for (int r_idx = 0; r_idx < replace_idx->dimension(); r_idx++) { auto replace_seq = (int)replace_idx->dataAt(0, 0, 0, r_idx); auto dst_ptr = inputs[0]->ptrAt(0, 0, replace_seq, 0); @@ -97,7 +97,7 @@ class CPUIndexPutFunction : public TensorFunction { int in1_batch = 0; #pragma omp parallel for num_threads(CPUBackend::cpu_threads) for (int i = 0; i < replace_size; ++i) { - auto start_src_seq = (int)replace_idx->dataAt(0, 0, 0, i) + i * replace_s; + auto start_src_seq = (int)replace_idx->dataAt(0, 0, 0, i) + (i * replace_s); auto end_dest_seq = start_src_seq; auto end_src_seq = start_src_seq + replace_s; @@ -132,6 +132,15 @@ class CPUIndexPutFunction : public TensorFunction { memcpy(dst_ptr, src_ptr, sizeof(float) * src_input->dimension()); } } + return MLLM_NO_ERROR; + } +}; + +class CPUIndexPutFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + bool accumulate = (bool)op_param.at("accumulate"); + return new CPUIndexPutFunction(bn, name, threadCount, accumulate); } }; diff --git a/mllm/backends/cpu/op/CPUKVCache.cpp b/mllm/backends/cpu/op/CPUKVCache.cpp new file mode 100644 index 000000000..b8b953fc3 --- /dev/null +++ b/mllm/backends/cpu/op/CPUKVCache.cpp @@ -0,0 +1,347 @@ + + +#include "CPUKVCache.hpp" +#include "Context.hpp" +#include "ParamLoader.hpp" +#include "Types.hpp" + +int n_pack = 16; +namespace mllm { +CPUKVCache::CPUKVCache(Backend *bn, string opName, int hidden, int head, int n_rep, bool fa2, int cache_max, int threadCount) : + thread_count(threadCount), Op(bn, opName) { + cache_ = std::make_shared(1, head * n_rep, cache_max, hidden, bn, false); + fa2_ = fa2; + auto KVdtype = KVCache_TYPE; + if (!fa2) { + KVdtype = KVCache_Type_eager; + } + switch (KVdtype) { + case 16: { + cache_->setDtype(MLLM_TYPE_F16); + break; + } + case 8: { + if (opName.find("k_cache") != std::string::npos) { + cache_->setDtype(MLLM_TYPE_Q8_0); + n_pack = QK8_0; + } else { + cache_->setDtype(MLLM_TYPE_F16); + } + break; + } + case 32: { + cache_->setDtype(MLLM_TYPE_F32); + break; + } + default: { + cache_->setDtype(MLLM_TYPE_F32); + break; + } + } + if (!fa2) { // not fa2 +#ifdef LLAMAFILE_SGEMM + cache_max = ((cache_max + (n_pack - 1)) / n_pack) * n_pack; +#endif + } else { // fa2 + n_rep = 1; + } + cache_limit_ = cache_max; + n_rep_ = n_rep; + if (head > 0) { + if (for_xnn_) cache_->setDtype(MLLM_TYPE_F32); + + cache_->reshape(KVCache_batch, head * n_rep_, cache_limit_, hidden); + cache_->setName(name() + ".Cache"); + cache_->alloc(); + + switch (cache_->dtype()) { + case MLLM_TYPE_F32: + memset(cache_->hostPtr(), 0, cache_->count() * sizeof(float)); + break; + case MLLM_TYPE_F16: + memset(cache_->hostPtr(), 0, cache_->count() * sizeof(mllm_fp16_t)); + break; + case MLLM_TYPE_Q8_0: + memset((char *)cache_->rawHostPtr(), 0, cache_->count() * sizeof(block_q8_0) / QK8_0); + break; + default: + break; + }; + cache_seq_len_ = 0; + cache_->cache_seq_len_ = cache_seq_len_; + } +} + +ErrorCode CPUKVCache::reshape(vector> inputs, + vector> outputs) { + assert(inputs.size() == 1); + assert(outputs.size() == 1); + if (cache_seq_len_ < 0) { //|| inputs[0]->batch() != cache_->batch() + if (for_xnn_) cache_->setDtype(MLLM_TYPE_F32); + + cache_->reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, cache_limit_, + inputs[0]->dimension()); + cache_->setName(name() + ".Cache"); + cache_->alloc(); + + switch (cache_->dtype()) { + case MLLM_TYPE_F32: + memset(cache_->hostPtr(), 0, cache_->count() * sizeof(float)); + break; + case MLLM_TYPE_F16: + memset(cache_->hostPtr(), 0, cache_->count() * sizeof(mllm_fp16_t)); + break; + case MLLM_TYPE_Q8_0: + memset((char *)cache_->rawHostPtr(), 0, cache_->count() * sizeof(block_q8_0) / QK8_0); + break; + default: + break; + }; + cache_seq_len_ = 0; + cache_->cache_seq_len_ = cache_seq_len_; + } + + // for sd + auto cpuBackend = dynamic_cast(backend_); + if (Context::Instance().speculative_decoding_state().isUsingDraft()) { + unsigned int last_draft_length = Context::Instance().speculative_decoding_state().getLastDraftLength(); + const std::vector &last_verified_position_ids = Context::Instance().speculative_decoding_state().getLastVerifiedPositionIds(); + cache_seq_len_ = cache_seq_len_ - (last_draft_length) + last_verified_position_ids.size(); + cache_->cache_seq_len_ = cache_seq_len_; + } + + int sequence = inputs[0]->sequence() + cache_seq_len_; +#ifdef LLAMAFILE_SGEMM + if (!fa2_) { + if (!for_xnn_ && sequence % n_pack != 0) sequence = ((sequence + (n_pack - 1)) / n_pack) * n_pack; + } +#endif + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, sequence, + inputs[0]->dimension()); + if (sequence > cache_limit_) { + MLLM_LOG_ERROR_STREAM << "\n[ERROR]: Current tokens exceed cache limit: " << sequence << ">" + << cache_limit_ << ";" + << "\n Please set args `--limits` >" << cache_limit_ << std::endl; + + exit(1); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, cache_limit_, + inputs[0]->dimension()); + } + return Op::reshape(inputs, outputs); +} + +ErrorCode CPUKVCache::load(AbstructLoader &loader) { + return Op::load(loader); +} + +ErrorCode CPUKVCache::execute(vector> inputs, + vector> outputs) { + // for sd + auto cpuBackend = dynamic_cast(backend_); + if (Context::Instance().speculative_decoding_state().isUsingDraft()) { + const std::vector &last_verified_position_ids = Context::Instance().speculative_decoding_state().getLastVerifiedPositionIds(); + if (!last_verified_position_ids.empty()) { + this->updateVerifiedKVCache(last_verified_position_ids); + } + } + + int cache_seq_len_old = cache_seq_len_; + cache_seq_len_ += inputs[0]->sequence(); + cache_->cache_seq_len_ = cache_seq_len_; + if (n_rep_ > 1) { + if (cache_->ctype() == BSHD) { + for (int b = 0; b < cache_->batch(); ++b) { + for (int h = inputs[0]->head() - 1; h >= 0; --h) { +#pragma omp parallel for collapse(2) num_threads(thread_count) + for (int seq = cache_seq_len_old; seq < cache_seq_len_; ++seq) { + for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { + auto cache_head = h * n_rep_ + i_rep; + if (cache_->dtype() == MLLM_TYPE_F32) { + auto src_ptr = + inputs[0]->ptrAt(b, h, seq - cache_seq_len_old, 0); + auto dest_ptr = cache_->ptrAt(b, cache_head, seq, 0); + int copy_size = cache_->dimension(); + memcpy(dest_ptr, src_ptr, copy_size * sizeof(float)); + } else if (cache_->dtype() == MLLM_TYPE_F16) { + auto src_ptr = + inputs[0]->ptrAt(b, h, seq - cache_seq_len_old, 0); + auto dest_ptr = cache_->ptrAt(b, cache_head, seq, 0); + int copy_size = cache_->dimension(); + memcpy(dest_ptr, src_ptr, copy_size * sizeof(mllm_fp16_t)); + } else if (cache_->dtype() == MLLM_TYPE_Q8_0) { + auto src_ptr = + (char *)inputs[0]->rawHostPtr() + inputs[0]->offset(b, h, seq - cache_seq_len_old, 0) * sizeof(block_q8_0) / QK8_0; + auto dest_ptr = (char *)cache_->rawHostPtr() + cache_->offset(b, cache_head, seq, 0) * sizeof(block_q8_0) / QK8_0; + int copy_size = cache_->dimension(); + memcpy(dest_ptr, src_ptr, copy_size * sizeof(block_q8_0) / QK8_0); + } + } + } + } + } + } else if (cache_->ctype() == BHDS) { + for (int b = 0; b < cache_->batch(); ++b) { + for (int h = inputs[0]->head() - 1; h >= 0; --h) { +#pragma omp parallel for collapse(2) num_threads(thread_count) + for (int d = 0; d < inputs[0]->dimension(); ++d) { + for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { + auto cache_head = h * n_rep_ + i_rep; + if (cache_->dtype() == MLLM_TYPE_F32) { + auto src_ptr = inputs[0]->ptrAt(b, h, 0, d); + auto dest_ptr = + cache_->ptrAt(b, cache_head, cache_seq_len_old, d); + int copy_size = cache_seq_len_ - cache_seq_len_old; + memcpy(dest_ptr, src_ptr, copy_size * sizeof(float)); + } else if (cache_->dtype() == MLLM_TYPE_F16) { + auto src_ptr = inputs[0]->ptrAt(b, h, 0, d); + auto dest_ptr = + cache_->ptrAt(b, cache_head, cache_seq_len_old, d); + int copy_size = cache_seq_len_ - cache_seq_len_old; + memcpy(dest_ptr, src_ptr, copy_size * sizeof(mllm_fp16_t)); + } else if (cache_->dtype() == MLLM_TYPE_Q8_0) { + auto src_ptr = + (char *)inputs[0]->rawHostPtr() + inputs[0]->offset(b, h, 0, d) * sizeof(block_q8_0) / QK8_0; + auto dest_ptr = (char *)cache_->rawHostPtr() + cache_->offset(b, cache_head, cache_seq_len_old, d) * sizeof(block_q8_0) / QK8_0; + int copy_size = cache_->dimension(); + memcpy(dest_ptr, src_ptr, copy_size * sizeof(block_q8_0) / QK8_0); + } + } + } + } + } + } else if (cache_->ctype() == BHSD) { + for (int b = 0; b < cache_->batch(); ++b) { + for (int h = inputs[0]->head() - 1; h >= 0; --h) { +#pragma omp parallel for collapse(2) num_threads(thread_count) + for (int seq = cache_seq_len_old; seq < cache_seq_len_; ++seq) { + for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { + auto cache_head = h * n_rep_ + i_rep; + if (cache_->dtype() == MLLM_TYPE_F32) { + auto src_ptr = + inputs[0]->ptrAt(b, h, seq - cache_seq_len_old, 0); + auto dest_ptr = cache_->ptrAt(b, cache_head, seq, 0); + int copy_size = cache_->dimension(); + memcpy(dest_ptr, src_ptr, copy_size * sizeof(float)); + } else if (cache_->dtype() == MLLM_TYPE_F16) { + auto src_ptr = + inputs[0]->ptrAt(b, h, seq - cache_seq_len_old, 0); + auto dest_ptr = cache_->ptrAt(b, cache_head, seq, 0); + int copy_size = cache_->dimension(); + memcpy(dest_ptr, src_ptr, copy_size * sizeof(mllm_fp16_t)); + } else if (cache_->dtype() == MLLM_TYPE_Q8_0) { + auto src_ptr = + (char *)inputs[0]->rawHostPtr() + inputs[0]->offset(b, h, seq - cache_seq_len_old, 0) * sizeof(block_q8_0) / QK8_0; + auto dest_ptr = (char *)cache_->rawHostPtr() + cache_->offset(b, cache_head, seq, 0) * sizeof(block_q8_0) / QK8_0; + int copy_size = cache_->dimension(); + memcpy(dest_ptr, src_ptr, copy_size * sizeof(block_q8_0) / QK8_0); + } + } + } + } + } + } else { + std::cout << "ERROR Ctype in KVCcache;" << std::endl; + } + } + return Op::execute(inputs, outputs); +} + +ErrorCode CPUKVCache::free(vector> inputs, vector> outputs) { + return Op::free(inputs, outputs); +} + +ErrorCode CPUKVCache::setUp(vector> inputs, vector> outputs) { + assert(inputs.size() == 1); + assert(outputs.size() == 1); + // for BSHD attention start + if (inputs[0]->ctype() == BHSD && cache_->ctype() == BSHD) { + auto origin_b = cache_->batch(); + auto origin_h = cache_->head(); + auto origin_s = cache_->sequence(); + auto origin_d = cache_->dimension(); + cache_->setCtype(BHSD); + cache_->reshape(origin_b, origin_h, origin_s + cache_seq_len_, origin_d); + cache_->alloc(); + } + // for BSHD attention end + outputs[0]->setDtype(cache_->dtype()); + outputs[0]->shallowCopyFrom(cache_, false, {0, 0, cache_seq_len_ / cache_limit_, 0}); + if (inputs[0]->sequence() + cache_seq_len_ > cache_limit_) { + outputs[0]->shallowCopyFrom(cache_, false, {0, 0, cache_seq_len_ % cache_limit_ + 1, 0}); + } + if (inputs[0]->masterTensor() == nullptr) { inputs[0]->free(); } + inputs[0]->shallowCopyFrom(cache_, false, {0, 0, cache_seq_len_ % cache_limit_, 0}); + return MLLM_NO_ERROR; +} + +// for sd +ErrorCode CPUKVCache::updateVerifiedKVCache(const std::vector &verified_position_ids) { + if (cache_->ctype() == BSHD) { + unsigned int dest_pid = cache_seq_len_ - verified_position_ids.size(); + for (unsigned int src_pid : verified_position_ids) { + if (src_pid == dest_pid) { + dest_pid += 1; + continue; + } + // #pragma omp parallel for collapse(1) num_threads(thread_count) + for (int b = 0; b < cache_->batch(); ++b) { + if (cache_->dtype() == MLLM_TYPE_F32) { + auto src_ptr = cache_->ptrAt(b, 0, src_pid, 0); + auto dest_ptr = cache_->ptrAt(b, 0, dest_pid, 0); + int copy_size = cache_->dimension() * cache_->head(); + memcpy(dest_ptr, src_ptr, copy_size * sizeof(float)); + } else if (cache_->dtype() == MLLM_TYPE_F16) { + auto src_ptr = cache_->ptrAt(b, 0, src_pid, 0); + auto dest_ptr = cache_->ptrAt(b, 0, dest_pid, 0); + int copy_size = cache_->dimension() * cache_->head(); + memcpy(dest_ptr, src_ptr, copy_size * sizeof(mllm_fp16_t)); + } else if (cache_->dtype() == MLLM_TYPE_Q8_0) { + // TODO: Q8 Check + auto src_ptr = + (char *)cache_->rawHostPtr() + cache_->offset(b, 0, src_pid, 0) * sizeof(block_q8_0) / QK8_0; + auto dest_ptr = (char *)cache_->rawHostPtr() + cache_->offset(b, 0, dest_pid, 0) * sizeof(block_q8_0) / QK8_0; + int copy_size = cache_->dimension() * cache_->head(); + memcpy(dest_ptr, src_ptr, copy_size * sizeof(block_q8_0) / QK8_0); + } + } + dest_pid += 1; + } + } else if (cache_->ctype() == BHDS) { + unsigned int dest_pid = cache_seq_len_ - verified_position_ids.size(); + for (unsigned int src_pid : verified_position_ids) { + if (src_pid == dest_pid) { + dest_pid += 1; + continue; + } +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int b = 0; b < cache_->batch(); ++b) { + for (int h = 0; h < cache_->head(); ++h) { + for (int d = 0; d < cache_->dimension(); ++d) { + if (cache_->dtype() == MLLM_TYPE_F32) { + auto src_data = cache_->dataAt(b, h, src_pid, d); + cache_->setDataAt(b, h, dest_pid, d, src_data); + } else if (cache_->dtype() == MLLM_TYPE_F16) { + auto src_data = cache_->dataAt(b, h, src_pid, d); + cache_->setDataAt(b, h, dest_pid, d, src_data); + } else if (cache_->dtype() == MLLM_TYPE_Q8_0) { + // TODO: Q8 Check 不知道q8能不能直接setDataAt + // auto src_data = cache_->dataAt(b, h, src_pid, d); + // cache_->setDataAt(b, h, dest_pid, d, src_data); + auto src_ptr = + (char *)cache_->rawHostPtr() + cache_->offset(b, h, src_pid, d) * sizeof(block_q8_0) / QK8_0; + auto dest_ptr = (char *)cache_->rawHostPtr() + cache_->offset(b, h, dest_pid, d) * sizeof(block_q8_0) / QK8_0; + int copy_size = 1; + memcpy(dest_ptr, src_ptr, copy_size * sizeof(block_q8_0) / QK8_0); + } + } + } + } + dest_pid += 1; + } + } else { + std::cout << "ERROR Ctype in KVCcache;" << std::endl; + } + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/src/backends/cpu/op/CPUKVCache.hpp b/mllm/backends/cpu/op/CPUKVCache.hpp similarity index 87% rename from src/backends/cpu/op/CPUKVCache.hpp rename to mllm/backends/cpu/op/CPUKVCache.hpp index abaf4822f..476b4f0dc 100644 --- a/src/backends/cpu/op/CPUKVCache.hpp +++ b/mllm/backends/cpu/op/CPUKVCache.hpp @@ -10,7 +10,7 @@ namespace mllm { class CPUKVCache final : public Op { public: - CPUKVCache(Backend *bn, string opName, int hidden, int head, int n_rep, int cache_max = 100, int threadCount = 4); + CPUKVCache(Backend *bn, string opName, int hidden, int head, int n_rep, bool fa2, int cache_max = 100, int threadCount = 4); virtual ~CPUKVCache() = default; virtual ErrorCode reshape(vector> inputs, vector> outputs) override; virtual ErrorCode load(AbstructLoader &loader) override; @@ -18,14 +18,14 @@ class CPUKVCache final : public Op { virtual ErrorCode free(vector> inputs, vector> outputs) override; virtual ErrorCode setUp(vector> inputs, vector> outputs) override; - Tensor cache_; + shared_ptr cache_; int getCacheSeqLen() override { return cache_seq_len_; } void clearCache() override { cache_seq_len_ = 0; - cache_.cache_seq_len_ = cache_seq_len_; + cache_->cache_seq_len_ = cache_seq_len_; } void setForXnn(bool for_xnn) { @@ -42,6 +42,8 @@ class CPUKVCache final : public Op { bool for_xnn_ = false; int cache_limit_; + + bool fa2_ = false; // not_fa2 }; class CPUKVCacheCreator : public CPUBackend::Creator { @@ -52,7 +54,8 @@ class CPUKVCacheCreator : public CPUBackend::Creator { bool for_xnn = (bool)op_param["for_xnn"]; int hidden = (int)op_param["hidden"]; int head = (int)op_param["head"]; - auto ret = new CPUKVCache(bn, name, hidden, head, n_rep, cache_max, threadCount); + bool fa2 = (bool)op_param["fa2"]; + auto ret = new CPUKVCache(bn, name, hidden, head, n_rep, fa2, cache_max, threadCount); ret->setForXnn(for_xnn); return ret; } diff --git a/src/backends/cpu/op/CPUKVCacheNPU.cpp b/mllm/backends/cpu/op/CPUKVCacheNPU.cpp similarity index 71% rename from src/backends/cpu/op/CPUKVCacheNPU.cpp rename to mllm/backends/cpu/op/CPUKVCacheNPU.cpp index 6015976e0..3e0329a12 100644 --- a/src/backends/cpu/op/CPUKVCacheNPU.cpp +++ b/mllm/backends/cpu/op/CPUKVCacheNPU.cpp @@ -1,6 +1,8 @@ #include "CPUKVCacheNPU.hpp" +#include "Context.hpp" +#include "DataType.hpp" #include "ParamLoader.hpp" #include "Types.hpp" @@ -8,10 +10,10 @@ namespace mllm { CPUKVCacheNPU::CPUKVCacheNPU(Backend *bn, string opName, int n_rep, int cache_max, int threadCount) : thread_count(threadCount), Op(bn, opName) { - cache_.setBackend(bn); + cache_ = std::make_shared(bn); // TODO: Chaning it to FP16 - cache_.setDtype(MLLM_TYPE_F16); + cache_->setDtype(MLLM_TYPE_F16); cache_limit_ = cache_max; n_rep_ = n_rep; } @@ -20,9 +22,9 @@ ErrorCode CPUKVCacheNPU::reshape(vector> inputs, vectorbatch(), inputs[0]->head() * n_rep_, cache_limit_, inputs[0]->dimension()); - cache_.setName(name() + ".Cache"); - cache_.alloc(); + cache_->reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, cache_limit_, inputs[0]->dimension()); + cache_->setName(name() + ".Cache"); + cache_->alloc(); cache_seq_len_ = 0; // when using the old frontend, the V will be transposed here; while in the module API, the V will be transposed in the QNNTranspose @@ -33,14 +35,15 @@ ErrorCode CPUKVCacheNPU::reshape(vector> inputs, vector(backend_); - if (cpuBackend->isStageSwitching() && cpuBackend->getExecutionType() == AUTOREGRESSIVE) { - cache_seq_len_ = cpuBackend->getCurSequenceLength(); + if (Context::Instance().inference_state().isStageSwitching() + && Context::Instance().inference_state().getExecutionType() == AUTOREGRESSIVE) { + cache_seq_len_ = Context::Instance().inference_state().getCurSequenceLength(); isDecoding = true; } // if a new prompt is given, the cache should be updated - if (cpuBackend->isStageSwitching() && cpuBackend->getExecutionType() == PROMPT) { - cache_seq_len_ = cpuBackend->getCurSequenceLength(); + if (Context::Instance().inference_state().isStageSwitching() + && Context::Instance().inference_state().getExecutionType() == PROMPT) { + cache_seq_len_ = Context::Instance().inference_state().getCurSequenceLength(); isDecoding = false; } #endif @@ -48,7 +51,8 @@ ErrorCode CPUKVCacheNPU::reshape(vector> inputs, vectorreshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, inputs[0]->sequence() + cache_seq_len_, inputs[0]->dimension()); if (inputs[0]->sequence() + cache_seq_len_ > cache_limit_) { - MLLM_LOG_ERROR_STREAM << "\n[ERROR]: Current tokens exceed cache limit: " << inputs[0]->sequence() + cache_seq_len_ << ">" << cache_limit_ << ";" << "\n Please set args `--limits` >" << cache_limit_ << std::endl; + MLLM_LOG_ERROR_STREAM << "\n[ERROR]: Current tokens exceed cache limit: " << inputs[0]->sequence() + cache_seq_len_ << ">" << cache_limit_ << ";" + << "\n Please set args `--limits` >" << cache_limit_ << std::endl; exit(1); outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, cache_limit_, inputs[0]->dimension()); @@ -68,46 +72,46 @@ ErrorCode CPUKVCacheNPU::execute(vector> inputs, vector 1) { - if (cache_.ctype() == BSHD) { - for (int b = 0; b < cache_.batch(); ++b) { + if (cache_->ctype() == BSHD) { + for (int b = 0; b < cache_->batch(); ++b) { for (int h = inputs[0]->head() - 1; h >= 0; --h) { #pragma omp parallel for collapse(2) num_threads(thread_count) for (int seq = cache_seq_len_old; seq < cache_seq_len_; ++seq) { for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { auto cache_head = h * n_rep_ + i_rep; - if (cache_.dtype() == MLLM_TYPE_F32) { + if (cache_->dtype() == MLLM_TYPE_F32) { auto src_ptr = inputs[0]->ptrAt(b, h, seq - cache_seq_len_old, 0); - auto dest_ptr = cache_.ptrAt(b, cache_head, seq, 0); - int copy_size = cache_.dimension(); + auto dest_ptr = cache_->ptrAt(b, cache_head, seq, 0); + int copy_size = cache_->dimension(); memcpy(dest_ptr, src_ptr, copy_size * sizeof(float)); - } else if (cache_.dtype() == MLLM_TYPE_F16) { + } else if (cache_->dtype() == MLLM_TYPE_F16) { auto src_ptr = inputs[0]->ptrAt(b, h, seq - cache_seq_len_old, 0); - auto dest_ptr = cache_.ptrAt(b, cache_head, seq, 0); - int copy_size = cache_.dimension(); + auto dest_ptr = cache_->ptrAt(b, cache_head, seq, 0); + int copy_size = cache_->dimension(); memcpy(dest_ptr, src_ptr, copy_size * sizeof(mllm_fp16_t)); } } } } } - } else if (cache_.ctype() == BHDS) { - for (int b = 0; b < cache_.batch(); ++b) { + } else if (cache_->ctype() == BHDS) { + for (int b = 0; b < cache_->batch(); ++b) { for (int h = inputs[0]->head() - 1; h >= 0; --h) { #pragma omp parallel for collapse(2) num_threads(thread_count) for (int d = 0; d < inputs[0]->dimension(); ++d) { for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { auto cache_head = h * n_rep_ + i_rep; - if (cache_.dtype() == MLLM_TYPE_F32) { + if (cache_->dtype() == MLLM_TYPE_F32) { auto src_ptr = inputs[0]->ptrAt(b, h, 0, d); auto dest_ptr = - cache_.ptrAt(b, cache_head, cache_seq_len_old, d); + cache_->ptrAt(b, cache_head, cache_seq_len_old, d); memcpy(dest_ptr, src_ptr, input_seq * sizeof(float)); - } else if (cache_.dtype() == MLLM_TYPE_F16) { + } else if (cache_->dtype() == MLLM_TYPE_F16) { auto src_ptr = inputs[0]->ptrAt(b, h, 0, d); auto dest_ptr = - cache_.ptrAt(b, cache_head, cache_seq_len_old, d); + cache_->ptrAt(b, cache_head, cache_seq_len_old, d); memcpy(dest_ptr, src_ptr, input_seq * sizeof(mllm_fp16_t)); } } @@ -123,44 +127,44 @@ ErrorCode CPUKVCacheNPU::execute(vector> inputs, vectorctype() == BSHD) { + for (int b = 0; b < cache_->batch(); ++b) { for (int h = 0; h < inputs[0]->head(); ++h) { #pragma omp parallel for collapse(2) num_threads(thread_count) for (int seq = 0; seq < inputs[0]->sequence(); ++seq) { for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { auto cache_head = h * n_rep_ + i_rep; - if (cache_.dtype() == MLLM_TYPE_F32) { + if (cache_->dtype() == MLLM_TYPE_F32) { auto src_ptr = inputs[0]->ptrAt(b, h, seq, 0); - auto dest_ptr = cache_.ptrAt(b, cache_head, cache_seq_len_old + seq, 0); + auto dest_ptr = cache_->ptrAt(b, cache_head, cache_seq_len_old + seq, 0); memcpy(dest_ptr, src_ptr, inputs[0]->dimension() * sizeof(float)); - } else if (cache_.dtype() == MLLM_TYPE_F16) { + } else if (cache_->dtype() == MLLM_TYPE_F16) { auto src_ptr = inputs[0]->ptrAt(b, h, seq, 0); - auto dest_ptr = cache_.ptrAt(b, cache_head, cache_seq_len_old + seq, 0); + auto dest_ptr = cache_->ptrAt(b, cache_head, cache_seq_len_old + seq, 0); memcpy(dest_ptr, src_ptr, inputs[0]->dimension() * sizeof(mllm_fp16_t)); } } } } } - } else if (cache_.ctype() == BHDS) { - for (int b = 0; b < cache_.batch(); ++b) { + } else if (cache_->ctype() == BHDS) { + for (int b = 0; b < cache_->batch(); ++b) { for (int h = 0; h < inputs[0]->head(); ++h) { #pragma omp parallel for collapse(2) num_threads(thread_count) for (int d = 0; d < inputs[0]->dimension(); ++d) { for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { auto cache_head = h * n_rep_ + i_rep; - if (cache_.dtype() == MLLM_TYPE_F32) { + if (cache_->dtype() == MLLM_TYPE_F32) { auto src_ptr = inputs[0]->ptrAt(b, h, 0, d); auto dest_ptr = - cache_.ptrAt(b, cache_head, cache_seq_len_old, d); + cache_->ptrAt(b, cache_head, cache_seq_len_old, d); memcpy(dest_ptr, src_ptr, inputs[0]->sequence() * sizeof(float)); - } else if (cache_.dtype() == MLLM_TYPE_F16) { + } else if (cache_->dtype() == MLLM_TYPE_F16) { auto src_ptr = inputs[0]->ptrAt(b, h, 0, d); auto dest_ptr = - cache_.ptrAt(b, cache_head, cache_seq_len_old, d); + cache_->ptrAt(b, cache_head, cache_seq_len_old, d); memcpy(dest_ptr, src_ptr, inputs[0]->sequence() * sizeof(mllm_fp16_t)); } } @@ -184,7 +188,7 @@ ErrorCode CPUKVCacheNPU::setUp(vector> inputs, vectorsetDtype(cache_.dtype()); + outputs[0]->setDtype(cache_->dtype()); outputs[0]->shallowCopyFrom(cache_, false, {0, 0, cache_seq_len_ / cache_limit_, 0}); if (inputs[0]->sequence() + cache_seq_len_ > cache_limit_) { outputs[0]->shallowCopyFrom(cache_, false, {0, 0, cache_seq_len_ % cache_limit_ + 1, 0}); @@ -197,13 +201,13 @@ ErrorCode CPUKVCacheNPU::setUp(vector> inputs, vectorsetDtype(cache_.dtype()); + outputs[0]->setDtype(cache_->dtype()); outputs[0]->shallowCopyFrom(cache_, false, {0, 0, cache_seq_len_ / cache_limit_, 0}); if (inputs[0]->sequence() + cache_seq_len_ > cache_limit_) { outputs[0]->shallowCopyFrom(cache_, false, {0, 0, cache_seq_len_ % cache_limit_ + 1, 0}); } - inputs[0]->setDtype(cache_.dtype()); + inputs[0]->setDtype(cache_->dtype()); return MLLM_NO_ERROR; } } // namespace mllm \ No newline at end of file diff --git a/src/backends/cpu/op/CPUKVCacheNPU.hpp b/mllm/backends/cpu/op/CPUKVCacheNPU.hpp similarity index 97% rename from src/backends/cpu/op/CPUKVCacheNPU.hpp rename to mllm/backends/cpu/op/CPUKVCacheNPU.hpp index 54cf0a3af..c9bd26bb2 100644 --- a/src/backends/cpu/op/CPUKVCacheNPU.hpp +++ b/mllm/backends/cpu/op/CPUKVCacheNPU.hpp @@ -5,6 +5,7 @@ #include "Op.hpp" #include "../CPUBackend.hpp" #include "ParamLoader.hpp" +#include namespace mllm { /** @@ -24,7 +25,7 @@ class CPUKVCacheNPU final : public Op { virtual ErrorCode free(vector> inputs, vector> outputs) override; virtual ErrorCode setUp(vector> inputs, vector> outputs) override; - Tensor cache_; + shared_ptr cache_; int getCacheSeqLen() override { return cache_seq_len_; diff --git a/mllm/backends/cpu/op/CPUKVCacheSage.cpp b/mllm/backends/cpu/op/CPUKVCacheSage.cpp new file mode 100644 index 000000000..b8c09bd53 --- /dev/null +++ b/mllm/backends/cpu/op/CPUKVCacheSage.cpp @@ -0,0 +1,129 @@ + + +#include "CPUKVCacheSage.hpp" +#include "ParamLoader.hpp" +#include "Types.hpp" +#include "../compute/SageQuantize.hpp" +#include + +// int n_pack = 16; +namespace mllm { +CPUKVCacheSage::CPUKVCacheSage(Backend *bn, string opName, int hidden, int head, int n_rep, bool fa2, int cache_max, int threadCount) : + thread_count(threadCount), Op(bn, opName) { + cache_ = std::make_shared(bn); + cache_->setDtype(MLLM_TYPE_Q8_0F); + n_rep = 1; + + cache_limit_ = cache_max; + n_rep_ = n_rep; + if (head > 0) { + // cache_->setCtype(BHSD); + cache_->reshape(1, head * n_rep_, cache_limit_, hidden); + cache_->setName(name() + ".Cache"); + cache_->alloc(); + + // memset((char *)cache_->rawHostPtr(), 0, cache_->count() * sizeof(block_q8_0f) / QK8_0F); + cache_->seqMeans().resize(1 * head * hidden); + cache_seq_len_ = 0; + cache_->cache_seq_len_ = cache_seq_len_; + } +} + +ErrorCode CPUKVCacheSage::reshape(vector> inputs, + vector> outputs) { + assert(inputs.size() == 1); + assert(outputs.size() == 1); + if (cache_seq_len_ < 0) { + // cache_->setCtype(BHSD); + cache_->reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, cache_limit_, + inputs[0]->dimension()); + cache_->setName(name() + ".Cache"); + cache_->alloc(); + + // memset((char *)cache_->rawHostPtr(), 0, cache_->count() * sizeof(block_q8_0f) / QK8_0F); + cache_->seqMeans().resize(inputs[0]->batch() * inputs[0]->head() * inputs[0]->dimension()); + cache_seq_len_ = 0; + cache_->cache_seq_len_ = cache_seq_len_; + } + + int sequence = inputs[0]->sequence() + cache_seq_len_; + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, sequence, + inputs[0]->dimension()); + if (sequence > cache_limit_) { + MLLM_LOG_ERROR_STREAM << "\n[ERROR]: Current tokens exceed cache limit: " << sequence << ">" + << cache_limit_ << ";" + << "\n Please set args `--limits` >" << cache_limit_ << std::endl; + + exit(1); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, cache_limit_, + inputs[0]->dimension()); + } + return Op::reshape(inputs, outputs); +} + +ErrorCode CPUKVCacheSage::load(AbstructLoader &loader) { + return Op::load(loader); +} + +ErrorCode CPUKVCacheSage::execute(vector> inputs, + vector> outputs) { + int cache_seq_len_old = cache_seq_len_; + + auto new_tokens = inputs[0]; + const int batch_size = new_tokens->batch(); + const int kv_head = new_tokens->head(); + const int seq_len = new_tokens->sequence(); + const int dim = new_tokens->dimension(); + const int num_k_blocks = dim / QK8_0F; +#pragma omp parallel for collapse(2) num_threads(thread_count) + for (int b = 0; b < batch_size; ++b) { + for (int h = 0; h < kv_head; ++h) { + float *p_mean = &cache_->seqMeans()[(b * kv_head + h) * dim]; + + if (seq_len > 1) { // Prefill 阶段 + const size_t bshd_s_stride = (size_t)new_tokens->head() * new_tokens->dimension(); + const float *head_start_ptr = new_tokens->ptrAt(b, h, 0, 0); + + sage_kv_cache::compute_sage_mean_for_one_head_bshd(head_start_ptr, p_mean, seq_len, dim, bshd_s_stride); + + for (int s = 0; s < seq_len; ++s) { + const float *token_to_quantize = new_tokens->ptrAt(b, h, s, 0); + + size_t block_offset = ((size_t)b * cache_->sequence() + s) * cache_->head() + h; + block_q8_0f *p_dest = reinterpret_cast(cache_->rawHostPtr()) + block_offset * num_k_blocks; + + sage_kv_cache::quantize_new_token_to_sage_blocks(token_to_quantize, p_mean, p_dest, dim); + } + } else { // Decode 阶段 + const float *p_new_token = new_tokens->ptrAt(b, h, 0, 0); + sage_kv_cache::update_sage_mean_vector_incremental(p_mean, p_new_token, cache_seq_len_old, dim); + + size_t block_offset = ((size_t)b * cache_->sequence() + cache_seq_len_old) * cache_->head() + h; + block_q8_0f *p_dest = reinterpret_cast(cache_->rawHostPtr()) + block_offset * num_k_blocks; + + sage_kv_cache::quantize_new_token_to_sage_blocks(p_new_token, p_mean, p_dest, dim); + } + } + } + + cache_seq_len_ += inputs[0]->sequence(); + cache_->cache_seq_len_ = cache_seq_len_; + return Op::execute(inputs, outputs); +} + +ErrorCode CPUKVCacheSage::free(vector> inputs, vector> outputs) { + return Op::free(inputs, outputs); +} + +ErrorCode CPUKVCacheSage::setUp(vector> inputs, vector> outputs) { + assert(inputs.size() == 1); + assert(outputs.size() == 1); + + // for BSHD attention end + outputs[0]->setDtype(cache_->dtype()); + outputs[0]->shallowCopyFrom(cache_, false, {0, 0, 0, 0}); + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPUKVCacheSage.hpp b/mllm/backends/cpu/op/CPUKVCacheSage.hpp new file mode 100644 index 000000000..13d646126 --- /dev/null +++ b/mllm/backends/cpu/op/CPUKVCacheSage.hpp @@ -0,0 +1,60 @@ + +#ifndef MLLM_CPUKVCACHESAGE_H +#define MLLM_CPUKVCACHESAGE_H + +#include "Op.hpp" +#include "../CPUBackend.hpp" +#include "ParamLoader.hpp" +#include + +namespace mllm { + +class CPUKVCacheSage final : public Op { +public: + CPUKVCacheSage(Backend *bn, string opName, int hidden, int head, int n_rep, bool fa2, int cache_max = 100, int threadCount = 4); + virtual ~CPUKVCacheSage() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode load(AbstructLoader &loader) override; + virtual ErrorCode execute(vector> inputs, vector> outputs) override; + virtual ErrorCode free(vector> inputs, vector> outputs) override; + virtual ErrorCode setUp(vector> inputs, vector> outputs) override; + + shared_ptr cache_; + + int getCacheSeqLen() override { + return cache_seq_len_; + } + void clearCache() override { + cache_seq_len_ = 0; + cache_->cache_seq_len_ = cache_seq_len_; + } + +private: + int thread_count = 4; + + int cache_seq_len_ = -999; + int n_rep_ = 1; + + bool for_xnn_ = false; + int cache_limit_; + + // bool fa2_ = false; // not_fa2 +}; + +class CPUKVCacheSageCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + int n_rep = (int)op_param["n_rep"]; + int cache_max = (int)op_param["cache_max"]; + bool for_xnn = (bool)op_param["for_xnn"]; + int hidden = (int)op_param["hidden"]; + int head = (int)op_param["head"]; + bool fa2 = (bool)op_param["fa2"]; + auto ret = new CPUKVCacheSage(bn, name, hidden, head, n_rep, fa2, cache_max, threadCount); + return ret; + } +}; + +} // namespace mllm + +#endif // MLLM_CPUKVCACHESAGE_H \ No newline at end of file diff --git a/src/backends/cpu/op/CPUKVCacheXp.cpp b/mllm/backends/cpu/op/CPUKVCacheXp.cpp similarity index 100% rename from src/backends/cpu/op/CPUKVCacheXp.cpp rename to mllm/backends/cpu/op/CPUKVCacheXp.cpp diff --git a/src/backends/cpu/op/CPUKVCacheXp.hpp b/mllm/backends/cpu/op/CPUKVCacheXp.hpp similarity index 100% rename from src/backends/cpu/op/CPUKVCacheXp.hpp rename to mllm/backends/cpu/op/CPUKVCacheXp.hpp diff --git a/src/backends/cpu/op/CPULayerNorm.cpp b/mllm/backends/cpu/op/CPULayerNorm.cpp similarity index 100% rename from src/backends/cpu/op/CPULayerNorm.cpp rename to mllm/backends/cpu/op/CPULayerNorm.cpp diff --git a/src/backends/cpu/op/CPULayerNorm.hpp b/mllm/backends/cpu/op/CPULayerNorm.hpp similarity index 100% rename from src/backends/cpu/op/CPULayerNorm.hpp rename to mllm/backends/cpu/op/CPULayerNorm.hpp diff --git a/mllm/backends/cpu/op/CPULikeFunc.hpp b/mllm/backends/cpu/op/CPULikeFunc.hpp new file mode 100644 index 000000000..a93ae72b4 --- /dev/null +++ b/mllm/backends/cpu/op/CPULikeFunc.hpp @@ -0,0 +1,48 @@ +// +// Created by Rongjie Yi on 24-12-16. +// + +#ifndef CPULIKEFUNC_HPP +#define CPULIKEFUNC_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include + +namespace mllm { +class Tensor; + +class CPUlikeFunction : public Op { +private: + int thread_count = 4; + float like_value_; + +public: + CPUlikeFunction(Backend *bn, string name, int threadCount, float like_value) : + Op(bn, name), thread_count(threadCount), like_value_(like_value) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); // like_values + return ErrorCode::MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + memset(outputs[0]->hostPtr(), like_value_, outputs[0]->count() * sizeof(float)); + return ErrorCode::MLLM_NO_ERROR; + } +}; + +class CPUlikeFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam contains the key "like_value" + float like_value = op_param.at("like_value"); + return new CPUlikeFunction(bn, name, threadCount, like_value); + } +}; + +} // namespace mllm +#endif // CPULIKEFUNC_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPULinear.cpp b/mllm/backends/cpu/op/CPULinear.cpp new file mode 100644 index 000000000..bfa573f42 --- /dev/null +++ b/mllm/backends/cpu/op/CPULinear.cpp @@ -0,0 +1,253 @@ + +#include "CPULinear.hpp" +#include "Types.hpp" +#include +#include +#include +#include "../compute/GemmKleidiai.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" + +namespace mllm { + +CPULinear::CPULinear(Backend *bn, string opName, int in_features, int out_features, bool bias, int threadCount) : + thread_count(threadCount), + Op(bn, opName) { + in_features_ = in_features; + out_features_ = out_features; + support_bias_ = bias; + thread_count = threadCount; + weight_.setBackend(bn); + bias_.setBackend(bn); +} + +ErrorCode CPULinear::reshape(vector> inputs, vector> outputs) { + // std::cout << name() << " CPULinear reshape" << std::endl; + assert(inputs.size() == 1); + assert(outputs.size() == 1); + if (inputs[0]->count() == 0 && inputs[0]->sequence() != 0) { + outputs[0]->reshape(0, 0, 0, 0); + return Op::reshape(inputs, outputs); + } + // N | C | H | W + // ----------------------------------------------- + // 1 |out_channel | in_channel | 1 + // |out_features| in_features | + // ----------------------------------------------- + // batch |in_channel | seq_len | 1 + // |in_features | inputs[0]->sequence() | + // ----------------------------------------------- + // batch |out_channel | seq_len | 1 + // |out_features| inputs[0]->sequence() | + assert(inputs[0]->head() == 1); + assert(in_features_ == inputs[0]->dimension()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), out_features_); + // outputs[0]->setDtype(activationDtype()); + return Op::reshape(inputs, outputs); +} + +ErrorCode CPULinear::load(AbstructLoader &loader) { + // std::cout << name() << " CPULinear load" << std::endl; + bool kai_flag = false; + weight_.setName(name() + ".weight"); + weight_.reshape(1, 1, out_features_, in_features_); + if (loader.getDataType(weight_.name()) != MLLM_TYPE_COUNT) { + if (loader.getDataType(weight_.name()) == MLLM_TYPE_KLEIDIAI_Q4_0) { +#if defined(__aarch64__) || defined(__arm__) || defined(__arm64__) + kai_thread_count = thread_count; + kai_flag = true; + // out_features_:N + // in_features_:K +#ifndef KAI_FP16_CAL + size_t packed_b_size = mllm_kleidai_get_packed_b_qsi4_size(out_features_, in_features_); +#else + size_t packed_b_size = mllm_kleidai_get_packed_b_qsi4_size_to_fp16(out_features_, in_features_); +#endif + weight_.reshape(1, 1, 1, packed_b_size); +#else + std::cerr << "KLEIDIAI_Q4_0 is not supported on this platform!" << std::endl; + exit(-1); + return NOT_SUPPORT; +#endif + } + weight_.setDtype(loader.getDataType(weight_.name())); + weight_.alloc(); + loader.load(&weight_); + } else { + if (weight_.name().find('v') != std::string::npos && Op::noLoadWeightsDtype() == MLLM_TYPE_Q4_0_4_4) { + weight_.setDtype(MLLM_TYPE_Q4_0); + } else { + weight_.setDtype(Op::noLoadWeightsDtype()); + } + weight_.alloc(); + } + if (support_bias_ && !kai_flag) { + bias_.setName(name() + ".bias"); + bias_.reshape(1, 1, 1, out_features_); + if (loader.getDataType(bias_.name()) != MLLM_TYPE_COUNT) { + bias_.setDtype(loader.getDataType(bias_.name())); + bias_.alloc(); + loader.load(&bias_); + } else { + bias_.setDtype(MLLM_TYPE_F32); + bias_.alloc(); + } + } + return Op::load(loader); +} + +ErrorCode CPULinear::execute(vector> inputs, vector> outputs) { + // auto start = mllm::mllm_time_us(); + if (inputs[0]->count() == 0) { + return Op::execute(inputs, outputs); + } + if (inputs[0]->sequence() != outputs[0]->sequence() && outputs[0]->masterTensor() == nullptr) { + outputs[0]->reshape(outputs[0]->batch(), outputs[0]->head(), inputs[0]->sequence(), outputs[0]->dimension()); + outputs[0]->alloc(); + } + // TODO: Q8_0 KVCache can not use!! + if (outputs[0]->dtype() == MLLM_TYPE_Q8_0) { + auto tmp_out = std::make_shared(outputs[0]->backend()); + // tmp_out->setBackend(outputs[0]->backend()); + auto b = outputs[0]->batch(); + auto h = outputs[0]->head(); + auto d = outputs[0]->dimension(); + auto s = outputs[0]->sequence(); + tmp_out->chls() = outputs[0]->chls(); + tmp_out->setCtype(outputs[0]->ctype()); + tmp_out->reshape(b, h, s, d); + tmp_out->setDtype(MLLM_TYPE_F32); + tmp_out->alloc(); + if (weight_.dtype() == MLLM_TYPE_KLEIDIAI_Q4_0) { +#if defined(__aarch64__) || defined(__arm__) || defined(__arm64__) + kai_thread_count = thread_count; + // KLEIDIAI_Q4_0 is a packed type, we need to use a special function to handle it + for (int b = 0; b < inputs[0]->batch(); b++) { + auto M = inputs[0]->sequence(); + auto N = outputs[0]->dimension(); + auto K = inputs[0]->dimension(); + if (outputs[0]->dtype() == MLLM_TYPE_F16) { + mllm_kleidai_gemm_qsi4_to_fp16(outputs[0]->ptrAt(b, 0, 0, 0), + inputs[0]->ptrAt(b, 0, 0, 0), + (const uint8_t *)weight_.rawHostPtr(), M, N, K); + } else { + mllm_kleidai_gemm_qsi4(outputs[0]->ptrAt(b, 0, 0, 0), + inputs[0]->ptrAt(b, 0, 0, 0), + (const uint8_t *)weight_.rawHostPtr(), M, N, K); + } + } + return MLLM_NO_ERROR; +#else + std::cerr << "KLEIDIAI_Q4_0 is not supported on this platform!" << std::endl; + exit(-1); + return NOT_SUPPORT; +#endif + } + mat_mul(inputs[0].get(), &weight_, tmp_out.get(), support_bias_, &bias_, false, true, thread_count); + if (tmp_out->ctype() == BSHD) { +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int b = 0; b < tmp_out->batch(); b++) { + for (int h = 0; h < tmp_out->head(); h++) { + for (int s = 0; s < tmp_out->sequence(); s++) { + quantize_row_q8_0(tmp_out->hostPtr() + tmp_out->offset(b, h, s, 0), + (char *)outputs[0]->rawHostPtr() + + outputs[0]->offset(b, h, s, 0) * sizeof(block_q8_0) / QK8_0, + tmp_out->dimension()); + } + } + } + } else { // BHDS +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int b = 0; b < tmp_out->batch(); b++) { + for (int h = 0; h < tmp_out->head(); h++) { + for (int d = 0; d < tmp_out->dimension(); d++) { + quantize_row_q8_0(tmp_out->hostPtr() + tmp_out->offset(b, h, 0, d), + (char *)outputs[0]->rawHostPtr() + + outputs[0]->offset(b, h, 0, d) * sizeof(block_q8_0) / QK8_0, + outputs[0]->sequence()); + } + } + } + } + } else { + if (weight_.dtype() == MLLM_TYPE_KLEIDIAI_Q4_0) { +#if defined(__aarch64__) || defined(__arm__) || defined(__arm64__) + // KLEIDIAI_Q4_0 is a packed type, we need to use a special function to handle it + if (outputs[0]->ctype() == BHDS) { //&& outputs[0]->masterTensor() != nullptr && outputs[0]->masterTensor()->ctype() == BHDS) { + for (int b = 0; b < inputs[0]->batch(); b++) { + auto M = inputs[0]->sequence(); + auto N = outputs[0]->dimension(); // out_features_ + auto K = inputs[0]->dimension(); // in_features_ + if (outputs[0]->dtype() == MLLM_TYPE_F16) { + // auto out_ptr = outputs[0]->ptrAt(b, 0, 0, 0); + vector out_vec(M * N); + auto out_ptr = out_vec.data(); + mllm_kleidai_gemm_qsi4_to_fp16(out_ptr, + inputs[0]->ptrAt(b, 0, 0, 0), + (const uint8_t *)weight_.rawHostPtr(), M, N, K); +#pragma omp parallel for num_threads(thread_count) + for (int s = 0; s < M; s++) { + for (int d = 0; d < N; d++) { + outputs[0]->setDataAt(b, 0, s, d, out_ptr[s * N + d]); + } + } + } else { + // auto out_ptr = outputs[0]->ptrAt(b, 0, 0, 0); + vector out_vec(M * N); + auto out_ptr = out_vec.data(); + mllm_kleidai_gemm_qsi4(out_ptr, + inputs[0]->ptrAt(b, 0, 0, 0), + (const uint8_t *)weight_.rawHostPtr(), M, N, K); +#pragma omp parallel for num_threads(thread_count) + for (int s = 0; s < M; s++) { + for (int d = 0; d < N; d++) { + outputs[0]->setDataAt(b, 0, s, d, out_ptr[s * N + d]); + } + } + } + } + } else { + for (int b = 0; b < inputs[0]->batch(); b++) { + auto M = inputs[0]->sequence(); + auto N = outputs[0]->dimension(); // out_features_ + auto K = inputs[0]->dimension(); // in_features_ + if (outputs[0]->dtype() == MLLM_TYPE_F16) { + mllm_kleidai_gemm_qsi4_to_fp16(outputs[0]->ptrAt(b, 0, 0, 0), + inputs[0]->ptrAt(b, 0, 0, 0), + (const uint8_t *)weight_.rawHostPtr(), M, N, K); + } else { + mllm_kleidai_gemm_qsi4(outputs[0]->ptrAt(b, 0, 0, 0), + inputs[0]->ptrAt(b, 0, 0, 0), + (const uint8_t *)weight_.rawHostPtr(), M, N, K); + } + } + } + return MLLM_NO_ERROR; +#else + std::cerr << "KLEIDIAI_Q4_0 is not supported on this platform!" << std::endl; + exit(-1); + return NOT_SUPPORT; +#endif + } + mat_mul(inputs[0].get(), &weight_, outputs[0].get(), support_bias_, &bias_, false, true, thread_count); + } + return Op::execute(inputs, outputs); +} +ErrorCode CPULinear::setUp(vector> inputs, vector> outputs) { + for (auto &output : outputs) { + output->setDtype(activation_dtype_); + output->alloc(); + // if (weight_.dtype() == MLLM_TYPE_KLEIDIAI_Q4_0 || weight_.dtype() == MLLM_TYPE_Q4_0_4_4) { + // output->allowAggregated() = false; + // } + } + return MLLM_NO_ERROR; +} +ErrorCode CPULinear::free(vector> inputs, vector> outputs) { + weight_.unload(); + if (support_bias_) { + bias_.unload(); + } + return Op::free(inputs, outputs); +} + +} // namespace mllm diff --git a/src/backends/cpu/op/CPULinear.hpp b/mllm/backends/cpu/op/CPULinear.hpp similarity index 92% rename from src/backends/cpu/op/CPULinear.hpp rename to mllm/backends/cpu/op/CPULinear.hpp index 200903196..33f2e4347 100644 --- a/src/backends/cpu/op/CPULinear.hpp +++ b/mllm/backends/cpu/op/CPULinear.hpp @@ -16,6 +16,7 @@ class CPULinear final : public Op { virtual ErrorCode load(AbstructLoader &loader) override; virtual ErrorCode execute(vector> inputs, vector> outputs) override; virtual ErrorCode free(vector> inputs, vector> outputs) override; + virtual ErrorCode setUp(vector> inputs, vector> outputs) override; Tensor &weight() { return weight_; diff --git a/src/backends/cpu/op/CPULinearINT8Shadow.cpp b/mllm/backends/cpu/op/CPULinearINT8Shadow.cpp similarity index 97% rename from src/backends/cpu/op/CPULinearINT8Shadow.cpp rename to mllm/backends/cpu/op/CPULinearINT8Shadow.cpp index f510609c4..bda13abc0 100755 --- a/src/backends/cpu/op/CPULinearINT8Shadow.cpp +++ b/mllm/backends/cpu/op/CPULinearINT8Shadow.cpp @@ -1,8 +1,9 @@ #include "CPULinearINT8Shadow.hpp" #include "Types.hpp" -#include "../compute/VecDot.hpp" -#include "quantize/QuantizeQ8.hpp" +#include "backends/cpu/third_party/ggml/VecDotFP32.hpp" +#include "backends/cpu/third_party/ggml/VecDotFP16.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" #include namespace mllm { @@ -155,9 +156,9 @@ ErrorCode CPULinearINT8Shadow::execute(vector> inputs, vector int8_t output_clip = outputClip_.dataAt(0, 0, 0, 0); input_scale = input_scale / 127.0; - input_scale = roundf(input_scale * 100000) / 100000; + // input_scale = roundf(input_scale * 100000) / 100000; - output_scale = roundf(output_scale * 100000) / 100000; + // output_scale = roundf(output_scale * 100000) / 100000; memcpy(outputs[0]->hostPtr(), inputs[2]->hostPtr(), inputs[2]->cntSize()); @@ -173,7 +174,7 @@ ErrorCode CPULinearINT8Shadow::execute(vector> inputs, vector for (int j = 0; j < input0_buffer_.sequence(); j++) { for (int k = 0; k < input0_buffer_.dimension(); k++) { float round_value = roundf(input0_buffer_.dataAt(i, h, j, k) / input_scale); - if (round_value > (127.0 * 8) || round_value < (-128.0 * 8)) { + if (round_value > (127.0) || round_value < (-128.0)) { #if defined(__ARM_NEON) float origin_value = round_value * input_scale * weight_scale; float clip_value = std::fmax(std::fmin(round_value, 127), -128) * input_scale * weight_scale; @@ -213,7 +214,7 @@ ErrorCode CPULinearINT8Shadow::execute(vector> inputs, vector } #else - mllm_fp16_t origin_value = round_value * input_scale * weight_scale; + float origin_value = round_value * input_scale * weight_scale; float clip_value = std::fmax(std::fmin(round_value, 127), -128) * input_scale * weight_scale; #pragma omp parallel for collapse(1) num_threads(4) diff --git a/src/backends/cpu/op/CPULinearINT8Shadow.hpp b/mllm/backends/cpu/op/CPULinearINT8Shadow.hpp similarity index 100% rename from src/backends/cpu/op/CPULinearINT8Shadow.hpp rename to mllm/backends/cpu/op/CPULinearINT8Shadow.hpp diff --git a/src/backends/cpu/op/CPULinearInt8.cpp b/mllm/backends/cpu/op/CPULinearInt8.cpp similarity index 97% rename from src/backends/cpu/op/CPULinearInt8.cpp rename to mllm/backends/cpu/op/CPULinearInt8.cpp index fdbc09f5a..194abf329 100644 --- a/src/backends/cpu/op/CPULinearInt8.cpp +++ b/mllm/backends/cpu/op/CPULinearInt8.cpp @@ -4,6 +4,8 @@ #include #include #include +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" +#include "backends/cpu/third_party/ggml/VecDotQ8.hpp" namespace mllm { @@ -138,7 +140,7 @@ ErrorCode CPULinearInt8::free(vector> inputs, vector()[0] / 127.0; - scale1 = roundf(scale1 * 100000) / 100000; + // scale1 = roundf(scale1 * 100000) / 100000; float scale2 = weightScale_.hostPtr()[0]; @@ -147,7 +149,7 @@ ErrorCode CPULinearInt8::mat_mul_fp32_i8(Tensor *src0_, Tensor *src1, Tensor *ds scale3 = biasScale_.hostPtr()[0]; float scale4 = outputActivatationScale_.hostPtr()[0] / 127.0; - scale4 = roundf(scale4 * 100000) / 100000; + // scale4 = roundf(scale4 * 100000) / 100000; assert(src1->dtype() == MLLM_TYPE_I8); assert(src0_->dtype() == MLLM_TYPE_F32); diff --git a/src/backends/cpu/op/CPULinearInt8.hpp b/mllm/backends/cpu/op/CPULinearInt8.hpp similarity index 100% rename from src/backends/cpu/op/CPULinearInt8.hpp rename to mllm/backends/cpu/op/CPULinearInt8.hpp diff --git a/mllm/backends/cpu/op/CPUMaskedFill.hpp b/mllm/backends/cpu/op/CPUMaskedFill.hpp new file mode 100644 index 000000000..febd27406 --- /dev/null +++ b/mllm/backends/cpu/op/CPUMaskedFill.hpp @@ -0,0 +1,70 @@ +// +// Created by Rongjie Yi on 24-12-16. +// + +#ifndef CPUMaskedFill_HPP +#define CPUMaskedFill_HPP + +#include "CPUBackend.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include +// #include +// #include +#include + +namespace mllm { +class Tensor; + +class CPUMaskedFill : public Op { +private: + int thread_count = 4; + float value_; + +public: + CPUMaskedFill(Backend *bn, float value, string name, int threadCount) : + Op(bn, name), value_(value), thread_count(threadCount) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + // assert(outputs.size() == 2); // topk returns values and indices + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); // topk_values + + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + // std::cout << inputs[0]->name() << " " << inputs[1]->name() << std::endl; + memcpy(outputs[0]->hostPtr(), inputs[0]->hostPtr(), inputs[0]->cntSize()); +#pragma omp parallel for collapse(4) num_threads(CPUBackend::cpu_threads) + for (int n = 0; n < inputs[0]->batch(); n++) { + for (int h = 0; h < inputs[0]->head(); h++) { + for (int s = 0; s < inputs[0]->sequence(); s++) { + for (int d = 0; d < inputs[0]->dimension(); ++d) { + float mask_flag = inputs[1]->dataAt(n, h, s, d); + if ((int)mask_flag == 1) { + outputs[0]->setDataAt(n, h, s, d, value_); // MaskedFill operation: negation + } + } + } + } + } + // NOTE: Add cases for other dimensions if needed. + return MLLM_NO_ERROR; + } +}; + +class CPUMaskedFillCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + float value = op_param["value"]; + return new CPUMaskedFill(bn, value, name, threadCount); + } +}; + +} // namespace mllm +#endif // CPUMaskedFill_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUMatmul.cpp b/mllm/backends/cpu/op/CPUMatmul.cpp similarity index 100% rename from src/backends/cpu/op/CPUMatmul.cpp rename to mllm/backends/cpu/op/CPUMatmul.cpp diff --git a/src/backends/cpu/op/CPUMatmul.hpp b/mllm/backends/cpu/op/CPUMatmul.hpp similarity index 100% rename from src/backends/cpu/op/CPUMatmul.hpp rename to mllm/backends/cpu/op/CPUMatmul.hpp diff --git a/mllm/backends/cpu/op/CPUMatmulFunc.hpp b/mllm/backends/cpu/op/CPUMatmulFunc.hpp new file mode 100644 index 000000000..6f00cf0b3 --- /dev/null +++ b/mllm/backends/cpu/op/CPUMatmulFunc.hpp @@ -0,0 +1,192 @@ +// +// Created by Rongjie Yi on 24-2-26. +// + +#ifndef CPUMATMULFUNC_HPP +#define CPUMATMULFUNC_HPP + +#include "CPUBackend.hpp" +#include "DataType.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include "../compute/Matmul.hpp" +#include +#include +#include +#include // For std::equal +#include "../compute/GemmKleidiai.hpp" +#include "../compute/GemmFp.hpp" + +namespace mllm { +class Tensor; + +class CPUmmFunction : public Op { +private: + int thread_count = 4; + + static void tranTensorChl(Tensor &input) { + assert(input.ctype() == BSHD); + auto b = input.batch(); + auto h = input.head(); + auto d = input.dimension(); + auto s = input.sequence(); + auto ori_seq_idx = input.chls()[SEQUENCE]; + auto ori_head_idx = input.chls()[HEAD]; + auto ori_dim_idx = input.chls()[DIMENSION]; + input.chls()[HEAD] = ori_seq_idx; + input.chls()[DIMENSION] = ori_head_idx; + input.chls()[SEQUENCE] = ori_dim_idx; + input.changeCtype(); + input.reshape(b, h, s, d); + input.transed() = true; + input.undiffusion() = false; + + // [FIX] Correctly handle the master tensor and its children + if (auto master = input.masterTensor()) { // master is now a shared_ptr + auto batch = master->batch(); + auto head = master->head(); + auto dimension = master->dimension(); + auto sequence = master->sequence(); + master->chls() = input.chls(); + master->changeCtype(); + master->reshape(batch, head, sequence, dimension); + + // Loop through the master's children + for (auto &child_wp : master->childTensors()) { + // Lock the weak_ptr to get a shared_ptr + if (auto child_sp = child_wp.lock()) { + // Now, use the shared_ptr to access members + auto b_c = child_sp->batch(); + auto h_c = child_sp->head(); + auto d_c = child_sp->dimension(); + auto s_c = child_sp->sequence(); + child_sp->chls() = input.chls(); + child_sp->changeCtype(); + child_sp->reshape(b_c, h_c, s_c, d_c); + } + } + } else { + // [FIX] Correctly handle this tensor's own children + for (auto &child_wp : input.childTensors()) { + // Lock the weak_ptr to get a shared_ptr + if (auto child_sp = child_wp.lock()) { + // Now, use the shared_ptr to access members + auto b_c = child_sp->batch(); + auto h_c = child_sp->head(); + auto d_c = child_sp->dimension(); + auto s_c = child_sp->sequence(); + child_sp->chls() = input.chls(); + child_sp->changeCtype(); + child_sp->reshape(b_c, h_c, s_c, d_c); + } + } + } + } + +public: + CPUmmFunction(Backend *bn, string name, int threadCount) : + Op(bn, name), thread_count(threadCount) { + } + + ErrorCode setUp(vector> inputs, vector> outputs) override { + if (inputs[0]->ctype() == BHSD) { + assert(inputs[0]->ctype() == inputs[1]->ctype()); + outputs[0]->setCtype(BHSD); + } else if (inputs[1]->chls()[SEQUENCE] != 3) { + tranTensorChl(*inputs[1]); + } + if (!inputs[1]->shape().empty() && !inputs[0]->shape().empty()) { + assert(inputs[0]->dimension() == inputs[1]->sequence()); + } + outputs[0]->alloc(); + return MLLM_NO_ERROR; + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + if (inputs[0]->ctype() != BHSD && inputs[1]->chls()[SEQUENCE] != 3) { + tranTensorChl(*inputs[1]); + assert(inputs[1]->chls()[SEQUENCE] == 3); + } + if (inputs[0]->ctype() == BHSD) { + assert(inputs[0]->ctype() == inputs[1]->ctype()); + outputs[0]->setCtype(BHSD); + } + assert(inputs[0]->dimension() == inputs[1]->sequence()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[1]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + if (inputs[0]->ctype() == BHSD) { +#ifdef ARM + auto M = inputs[0]->sequence(); + auto N = inputs[1]->dimension(); + auto K = inputs[0]->dimension(); + size_t packed_b_size = mllm_kleidai_get_packed_b_fp32_size(N, K); + for (int b = 0; b < inputs[0]->batch(); b++) { + for (int h = 0; h < inputs[0]->head(); h++) { + if (inputs[1]->dtype() == MLLM_TYPE_F32) { + std::vector packed_b_data(packed_b_size); + mllm_kleidai_pack_b_and_bias_fp32(packed_b_data.data(), + inputs[1]->ptrAt(b, h, 0, 0), + nullptr, N, K); // Pass nullptr for bias + mllm_kleidai_gemm_fp32(outputs[0]->ptrAt(b, h, 0, 0), + inputs[0]->ptrAt(b, h, 0, 0), + packed_b_data.data(), + M, N, K); + } else { // inputs[1]->dtype() == MLLM_TYPE_F16 + std::vector packed_b_data(packed_b_size); + mllm_kleidai_pack_b_and_bias_fp16(packed_b_data.data(), + inputs[1]->ptrAt(b, h, 0, 0), + nullptr, N, K); // Pass nullptr for bias + mllm_kleidai_gemm_fp16(outputs[0]->ptrAt(b, h, 0, 0), + inputs[0]->ptrAt(b, h, 0, 0), + packed_b_data.data(), + M, N, K); + } + } + } + return MLLM_NO_ERROR; +#else + auto M = inputs[0]->sequence(); + auto N = inputs[1]->dimension(); + auto K = inputs[0]->dimension(); + memset(outputs[0]->hostPtr(), 0, outputs[0]->cntSize()); + for (int b = 0; b < inputs[0]->batch(); b++) { + for (int h = 0; h < inputs[0]->head(); h++) { + if (inputs[1]->dtype() == MLLM_TYPE_F32) { + gemm_fp32(outputs[0]->ptrAt(b, h, 0, 0), + inputs[0]->ptrAt(b, h, 0, 0), + inputs[1]->ptrAt(b, h, 0, 0), + M, N, K); + + } else { // inputs[1]->dtype() == MLLM_TYPE_F16 + gemm_fp32_fp16(outputs[0]->ptrAt(b, h, 0, 0), + inputs[0]->ptrAt(b, h, 0, 0), + inputs[1]->ptrAt(b, h, 0, 0), + M, N, K); + } + } + } + return MLLM_NO_ERROR; +#endif + } + bool isSame = std::equal(inputs[0]->chls().begin(), inputs[0]->chls().end(), inputs[1]->chls().begin()); + assert(inputs[0]->dtype() == MLLM_TYPE_F32); + mat_mul(inputs[0].get(), inputs[1].get(), outputs[0].get(), false, nullptr, false, isSame, thread_count); + return MLLM_NO_ERROR; + } +}; + +class CPUmmFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new CPUmmFunction(bn, name, threadCount); + } +}; + +} // namespace mllm +#endif // CPUMATMULFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUMaxPool2D.cpp b/mllm/backends/cpu/op/CPUMaxPool2D.cpp similarity index 100% rename from src/backends/cpu/op/CPUMaxPool2D.cpp rename to mllm/backends/cpu/op/CPUMaxPool2D.cpp diff --git a/src/backends/cpu/op/CPUMaxPool2D.hpp b/mllm/backends/cpu/op/CPUMaxPool2D.hpp similarity index 100% rename from src/backends/cpu/op/CPUMaxPool2D.hpp rename to mllm/backends/cpu/op/CPUMaxPool2D.hpp diff --git a/src/backends/cpu/op/CPUMean.cpp b/mllm/backends/cpu/op/CPUMean.cpp similarity index 100% rename from src/backends/cpu/op/CPUMean.cpp rename to mllm/backends/cpu/op/CPUMean.cpp diff --git a/src/backends/cpu/op/CPUMean.hpp b/mllm/backends/cpu/op/CPUMean.hpp similarity index 100% rename from src/backends/cpu/op/CPUMean.hpp rename to mllm/backends/cpu/op/CPUMean.hpp diff --git a/src/backends/cpu/function/CPUMeanFunc.hpp b/mllm/backends/cpu/op/CPUMeanFunc.hpp similarity index 66% rename from src/backends/cpu/function/CPUMeanFunc.hpp rename to mllm/backends/cpu/op/CPUMeanFunc.hpp index 46084f122..efc3f8bcf 100644 --- a/src/backends/cpu/function/CPUMeanFunc.hpp +++ b/mllm/backends/cpu/op/CPUMeanFunc.hpp @@ -4,21 +4,30 @@ #ifndef CPUMEANFUNC_HPP #define CPUMEANFUNC_HPP + #include "Tensor.hpp" #include "Types.hpp" +#include "CPUBackend.hpp" +#include namespace mllm { class Tensor; -class CPUmeanFunction : public TensorFunction { +class CPUmeanFunction : public Op { +private: + int thread_count = 4; + Chl axis_; + public: - void reshape(vector> outputs, vector> inputs, vector args) override { - Chl axis = (Chl)args[0]; + CPUmeanFunction(Backend *bn, string name, int threadCount, Chl axis) + : Op(bn, name), thread_count(threadCount), axis_(axis) {} + + ErrorCode reshape(vector> inputs, vector> outputs) override { int batch = inputs[0]->batch(); int head = inputs[0]->head(); int sequence = inputs[0]->sequence(); int dimension = inputs[0]->dimension(); - switch (axis) { + switch (axis_) { case BATCH: batch = 1; break; @@ -36,15 +45,21 @@ class CPUmeanFunction : public TensorFunction { } outputs[0]->reshape(batch, head, sequence, dimension); outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + return MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { - Chl axis = (Chl)args[0]; + + ErrorCode execute(vector> inputs, vector> outputs) override { int batch = inputs[0]->batch(); int dim = inputs[0]->dimension(); int seq = inputs[0]->sequence(); int head = inputs[0]->head(); - switch (axis) { + + // Note: OpenMP might be beneficial here for larger tensors. + // Adding it would be an optimization over the original direct translation. + + switch (axis_) { case BATCH: { for (int h = 0; h < head; h++) { for (int s = 0; s < seq; ++s) { @@ -53,7 +68,8 @@ class CPUmeanFunction : public TensorFunction { for (int n = 0; n < batch; n++) { sum += inputs[0]->dataAt(n, h, s, d); } - outputs[0]->setDataAt(0, h, s, d, sum / seq); + // Bug fix: was sum / seq, should be sum / batch + outputs[0]->setDataAt(0, h, s, d, sum / batch); } } } @@ -67,7 +83,8 @@ class CPUmeanFunction : public TensorFunction { for (int h = 0; h < head; h++) { sum += inputs[0]->dataAt(n, h, s, d); } - outputs[0]->setDataAt(n, 0, s, d, sum / seq); + // Bug fix: was sum / seq, should be sum / head + outputs[0]->setDataAt(n, 0, s, d, sum / head); } } } @@ -81,6 +98,7 @@ class CPUmeanFunction : public TensorFunction { for (int s = 0; s < seq; ++s) { sum += inputs[0]->dataAt(n, h, s, d); } + // This was correct outputs[0]->setDataAt(n, h, 0, d, sum / seq); } } @@ -92,10 +110,11 @@ class CPUmeanFunction : public TensorFunction { for (int h = 0; h < head; h++) { for (int s = 0; s < seq; s++) { float sum = 0.0f; - for (int d = 0; d < inputs[0]->dimension(); ++d) { + for (int d = 0; d < dim; ++d) { sum += inputs[0]->dataAt(n, h, s, d); } - outputs[0]->setDataAt(n, h, s, 0, sum / inputs[0]->dimension()); + // This was correct + outputs[0]->setDataAt(n, h, s, 0, sum / dim); } } } @@ -104,6 +123,16 @@ class CPUmeanFunction : public TensorFunction { default: break; } + return MLLM_NO_ERROR; + } +}; + +class CPUmeanFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam contains the key "axis" + Chl axis = (Chl)op_param.at("axis"); + return new CPUmeanFunction(bn, name, threadCount, axis); } }; diff --git a/src/backends/cpu/op/CPUMergeOutput.cpp b/mllm/backends/cpu/op/CPUMergeOutput.cpp similarity index 92% rename from src/backends/cpu/op/CPUMergeOutput.cpp rename to mllm/backends/cpu/op/CPUMergeOutput.cpp index 2e39ad0e4..27b474bed 100644 --- a/src/backends/cpu/op/CPUMergeOutput.cpp +++ b/mllm/backends/cpu/op/CPUMergeOutput.cpp @@ -24,13 +24,13 @@ ErrorCode CPUMergeOutput::reshape(vector> inputs, vector> inputs, vector> outputs) { for (int i = 0; i < inputs.size(); i++) { if (inputs[i]->device() == MLLM_QNN || (inputs[i]->masterTensor() && inputs[i]->masterTensor()->device() == MLLM_QNN)) { - outputs[i]->shallowCopyFrom(inputs[i].get(), true); + outputs[i]->shallowCopyFrom(inputs[i], true); // set output backend to QNN to let the device() be QNN outputs[i]->setBackend(inputs[i]->backend()); } else { if (inputs[i]->allocted() != 0) inputs[i]->free(); outputs[i]->alloc(); - inputs[i]->shallowCopyFrom(outputs[i].get(), true); + inputs[i]->shallowCopyFrom(outputs[i], true); // set inputput backend to QNN to let the device() be QNN inputs[i]->setBackend(outputs[i]->backend()); } diff --git a/src/backends/cpu/op/CPUMergeOutput.hpp b/mllm/backends/cpu/op/CPUMergeOutput.hpp similarity index 100% rename from src/backends/cpu/op/CPUMergeOutput.hpp rename to mllm/backends/cpu/op/CPUMergeOutput.hpp diff --git a/src/backends/cpu/op/CPUMul.cpp b/mllm/backends/cpu/op/CPUMul.cpp similarity index 100% rename from src/backends/cpu/op/CPUMul.cpp rename to mllm/backends/cpu/op/CPUMul.cpp diff --git a/src/backends/cpu/op/CPUMul.hpp b/mllm/backends/cpu/op/CPUMul.hpp similarity index 100% rename from src/backends/cpu/op/CPUMul.hpp rename to mllm/backends/cpu/op/CPUMul.hpp diff --git a/src/backends/cpu/op/CPUMultimodalRoPE.cpp b/mllm/backends/cpu/op/CPUMultimodalRoPE.cpp similarity index 94% rename from src/backends/cpu/op/CPUMultimodalRoPE.cpp rename to mllm/backends/cpu/op/CPUMultimodalRoPE.cpp index 4985a6014..a80d451a6 100644 --- a/src/backends/cpu/op/CPUMultimodalRoPE.cpp +++ b/mllm/backends/cpu/op/CPUMultimodalRoPE.cpp @@ -7,7 +7,9 @@ #include #include // #include -#include "backends/cpu/quantize/QuantizeQ8.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" +#include // 用来计算 accumulate +#include // 用来断言 namespace mllm { @@ -38,6 +40,11 @@ void apply_multimodal_rotary_pos_emb( std::vector> &out_cos, std::vector> &out_sin, const std::vector &mrope_section) { + // 在函数一开始就检查尺寸! + long long total_mrope_dim = std::accumulate(mrope_section.begin(), mrope_section.end(), 0LL); + long long input_dim = in_cos[0][0].size(); + assert(total_mrope_dim <= input_dim && "CRITICAL ERROR: Sum of mrope_section exceeds input dimension!"); + int num_rows = in_cos[0].size(); int num_cols = in_cos[0][0].size(); // 初始化输出向量大小 @@ -77,6 +84,8 @@ void apply_multimodal_rotary_pos_emb( void multimodal_sinusoidal_position_embedding(shared_ptr position_ids, int seq_len, int output_dim, const vector &theta, vector> &sin, vector> &cos, float attention_scaling = 1.0, const std::vector &mrope_section = {}) { + sin.clear(); + cos.clear(); vector>> tmp_sin; vector>> tmp_cos; // assert(position_ids->dimension() == output_dim); @@ -97,6 +106,14 @@ void multimodal_sinusoidal_position_embedding(shared_ptr position_ids, i } if (!mrope_section.empty()) { apply_multimodal_rotary_pos_emb(tmp_cos, tmp_sin, cos, sin, mrope_section); + } else { + // 如果 mrope_section 为空,这是标准的 RoPE + // 需要把局部变量 tmp_cos/tmp_sin 的内容赋给输出参数 cos/sin + // 假设 batch size 为 1,所以我们取第一个元素 + if (!tmp_cos.empty()) { // 做个健壮性检查 + cos = std::move(tmp_cos[0]); // 使用 std::move 避免不必要的拷贝,效率更高 + sin = std::move(tmp_sin[0]); + } } } @@ -106,9 +123,6 @@ CPUMultimodalRoPE::CPUMultimodalRoPE(Backend *bn, string opName, float rope_thet rope_theta_ = rope_theta; pos_max_ = max_position_embeddings; mrope_section_ = mrope_section; - for (int i = 0; i < mrope_section.size(); i++) { - mrope_section_.push_back(mrope_section[i]); - } } ErrorCode CPUMultimodalRoPE::reshape(vector> inputs, vector> outputs) { @@ -132,12 +146,7 @@ ErrorCode CPUMultimodalRoPE::reshape(vector> inputs, vector(backend_); - if (cpuBackend->isStageSwitching()) { - h_cnt_ = cpuBackend->getCurSequenceLength(); - } -#endif + return Op::reshape(inputs, outputs); } @@ -317,10 +326,7 @@ ErrorCode CPUMultimodalRoPE::doExecute(vector> inputs, vector } } } - h_cnt_ += input->sequence(); - if (h_cnt_ >= pos_max_) { - h_cnt_ = 0; - } + return Op::execute(inputs, outputs); } diff --git a/src/backends/cpu/op/CPUMultimodalRoPE.hpp b/mllm/backends/cpu/op/CPUMultimodalRoPE.hpp similarity index 100% rename from src/backends/cpu/op/CPUMultimodalRoPE.hpp rename to mllm/backends/cpu/op/CPUMultimodalRoPE.hpp diff --git a/mllm/backends/cpu/op/CPUMultimodalRoPEPipeline.cpp b/mllm/backends/cpu/op/CPUMultimodalRoPEPipeline.cpp new file mode 100644 index 000000000..fd5df8941 --- /dev/null +++ b/mllm/backends/cpu/op/CPUMultimodalRoPEPipeline.cpp @@ -0,0 +1,329 @@ + +#include "CPUMultimodalRoPEPipeline.hpp" +#include "Context.hpp" +// #include "Timing.hpp" +#include "Types.hpp" +#include +#include +#include +// #include +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" + +namespace mllm { + +vector CPUMultimodalRoPEPipeline::theta_; // inv_freq + +vector> CPUMultimodalRoPEPipeline::sin_; +vector> CPUMultimodalRoPEPipeline::cos_; +int CPUMultimodalRoPEPipeline::ishape_old; +int CPUMultimodalRoPEPipeline::last_pos; + +// to avoid conflict with CPUMultimodalRoPE +namespace pipeline_rope { +typedef float (*mllm_rope_init_func)(const OpParam &, std::vector &); + +float multimodal_default_init_rope(const OpParam &config, vector &theta) { + auto base = config.at("base"); // theta_i = base^-(2i/dim) = 1 / base^(2i/dim) i from 0 to (dim/2 - 1) + auto dim = config.at("dim"); + + theta.resize((int)(dim / 2)); +#pragma omp parallel for num_threads(4) + for (int i = 0; i < theta.size(); i++) + theta[i] = 1.0 / pow(base, 2.0 * i / dim); + + return 1.0; +} + +void apply_multimodal_rotary_pos_emb( + const std::vector>> &in_cos, + const std::vector>> &in_sin, + std::vector> &out_cos, + std::vector> &out_sin, + const std::vector &mrope_section) { + int num_rows = in_cos[0].size(); + int num_cols = in_cos[0][0].size(); + // 初始化输出向量大小 + out_cos.resize(num_rows, std::vector(num_cols)); + out_sin.resize(num_rows, std::vector(num_cols)); + // 计算每个块的起始列索引 + std::vector start_cols; + int current_start = 0; + start_cols.push_back(current_start); + for (int s : mrope_section) { + current_start += s; + start_cols.push_back(current_start); + } + // 遍历每个块 + for (int j = 0; j < mrope_section.size(); ++j) { + int layer = j % 3; + int s_j = mrope_section[j]; + int start_col_in = start_cols[j]; + int start_col_out = start_cols[j]; // 输出和输入的起始列相同 + for (int row = 0; row < num_rows; ++row) { + // 处理cos + const auto &in_cos_row = in_cos[layer][row]; + auto &out_cos_row = out_cos[row]; + for (int c = 0; c < s_j; ++c) { + out_cos_row[start_col_out + c] = in_cos_row[start_col_in + c]; + } + // 处理sin + const auto &in_sin_row = in_sin[layer][row]; + auto &out_sin_row = out_sin[row]; + for (int c = 0; c < s_j; ++c) { + out_sin_row[start_col_out + c] = in_sin_row[start_col_in + c]; + } + } + } +} + +void multimodal_sinusoidal_position_embedding(shared_ptr position_ids, int seq_len, int output_dim, const vector &theta, + vector> &sin, vector> &cos, float attention_scaling = 1.0, + const std::vector &mrope_section = {}) { + vector>> tmp_sin; + vector>> tmp_cos; + for (int b = 0; b < position_ids->batch(); ++b) { + vector> cos_freqs(position_ids->dimension(), std::vector(theta.size() * 2, 0)); + vector> sin_freqs(position_ids->dimension(), std::vector(theta.size() * 2, 0)); + for (int i = 0; i < theta.size(); ++i) { + for (int j = 0; j < position_ids->dimension(); ++j) { + auto value = theta[i] * position_ids->dataAt(b, 0, 0, j); + cos_freqs[j][i] = cosf(value) * attention_scaling; + cos_freqs[j][i + theta.size()] = cosf(value) * attention_scaling; + sin_freqs[j][i] = sinf(value) * attention_scaling; + sin_freqs[j][i + theta.size()] = sinf(value) * attention_scaling; + } + } + tmp_cos.push_back(cos_freqs); + tmp_sin.push_back(sin_freqs); + } + if (!mrope_section.empty()) { + apply_multimodal_rotary_pos_emb(tmp_cos, tmp_sin, cos, sin, mrope_section); + } +} +} // namespace pipeline_rope + +CPUMultimodalRoPEPipeline::CPUMultimodalRoPEPipeline(Backend *bn, string opName, float rope_theta, int max_position_embeddings, vector mrope_section, int threadCount) : + thread_count(threadCount), + Op(bn, opName) { + rope_theta_ = rope_theta; + pos_max_ = max_position_embeddings; + mrope_section_ = mrope_section; + for (int i = 0; i < mrope_section.size(); i++) { + mrope_section_.push_back(mrope_section[i]); + } +} + +ErrorCode CPUMultimodalRoPEPipeline::reshape(vector> inputs, vector> outputs) { + assert(inputs.size() == 2); + assert(outputs.size() == 1); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + ishape = inputs[0]->dimension() * partial_rotary_factor_; + // pos_max_ = 16384; + auto position_ids = inputs[1]; + + if (sin_.empty() || ishape_old < ishape || position_ids->dataAt(0, 0, 0, position_ids->dimension() - 1) != last_pos) { + auto config = config_; + config["base"] = (float)rope_theta_; + config["dim"] = ishape; + float attention_scaling = pipeline_rope::multimodal_default_init_rope(config, theta_); + ishape_old = ishape; + last_pos = position_ids->dataAt(0, 0, 0, position_ids->dimension() - 1); + pipeline_rope::multimodal_sinusoidal_position_embedding(position_ids, pos_max_, ishape, theta_, sin_, cos_, attention_scaling, mrope_section_); + } + + // if in switching, reset the h_cnt_ + if (Context::Instance().inference_state().isStageSwitching()) { + if (Context::Instance().inference_state().getExecutionType() == PROMPT) { + // set to 0/chunk_size*iter when in prefill stage + h_cnt_ = Context::Instance().inference_state().getCurSequenceLength(); + } else { + // when switch to decoding, reset the h_cnt_ to 0 + h_cnt_ = 0; + } + } + return Op::reshape(inputs, outputs); +} + +void CPUMultimodalRoPEPipeline::multimodal_rope_hf(shared_ptr input, shared_ptr output) { + auto out_dtype = output->dtype(); + int partial_dimension = (input->dimension()) * partial_rotary_factor_; + int half = (int)(partial_dimension / 2); + assert(partial_dimension % 2 == 0); + + const int seq_offset = h_cnt_; + if (Context::Instance().inference_state().getExecutionType() == PROMPT) { + // increment the h_cnt_ when in prefill stage + h_cnt_ += input->sequence(); + } + + if (output->ctype() == BSHD) { + if (input->dtype() == MLLM_TYPE_F16) { +#pragma omp parallel for collapse(4) num_threads(thread_count) + for (int n = 0; n < input->batch(); ++n) { + for (int h = 0; h < input->head(); ++h) { + for (int s = 0; s < input->sequence(); ++s) { // sequance + for (int d = 0; d < partial_dimension / 2; ++d) { + auto v = input->ptrAt(n, h, s, d); + auto o = output->ptrAt(n, h, s, d); + float in_value = static_cast(v[0]); + float in_value_2 = static_cast(v[half]); + float sin_value = sin_[s + seq_offset][d]; + float cos_value = cos_[s + seq_offset][d]; + auto value = in_value * cos_value - in_value_2 * sin_value; + auto value2 = in_value * sin_value + in_value_2 * cos_value; + o[0] = MLLM_FP32_TO_FP16(value); + o[half] = MLLM_FP32_TO_FP16(value2); + } + } + } + } + + } else { + if (out_dtype == MLLM_TYPE_F32) { +#pragma omp parallel for collapse(4) num_threads(thread_count) + for (int n = 0; n < input->batch(); ++n) { + for (int h = 0; h < input->head(); ++h) { + for (int s = 0; s < input->sequence(); ++s) { // sequance + for (int d = 0; d < partial_dimension / 2; ++d) { + auto v = input->ptrAt(n, h, s, d); + auto o = output->ptrAt(n, h, s, d); + float in_value = v[0]; + float in_value_2 = v[half]; + float sin_value = sin_[s + seq_offset][d]; + float cos_value = cos_[s + seq_offset][d]; + auto value = in_value * cos_value - in_value_2 * sin_value; + auto value2 = in_value * sin_value + in_value_2 * cos_value; + o[0] = value; + o[half] = value2; + } + } + } + } + } else if (out_dtype == MLLM_TYPE_F16) { +#pragma omp parallel for collapse(4) num_threads(thread_count) + for (int n = 0; n < input->batch(); ++n) { + for (int h = 0; h < input->head(); ++h) { + for (int s = 0; s < input->sequence(); ++s) { // sequance + for (int d = 0; d < partial_dimension / 2; ++d) { + auto v = input->ptrAt(n, h, s, d); + auto o = output->ptrAt(n, h, s, d); + float in_value = v[0]; + float in_value_2 = v[half]; + float sin_value = sin_[s + seq_offset][d]; + float cos_value = cos_[s + seq_offset][d]; + auto value = in_value * cos_value - in_value_2 * sin_value; + auto value2 = in_value * sin_value + in_value_2 * cos_value; + o[0] = MLLM_FP32_TO_FP16(value); + o[half] = MLLM_FP32_TO_FP16(value2); + } + } + } + } + } + } + return; + } +#pragma omp parallel for collapse(4) num_threads(thread_count) + for (int n = 0; n < input->batch(); ++n) { + for (int h = 0; h < input->head(); ++h) { + for (int s = 0; s < input->sequence(); ++s) { // sequance + for (int d = 0; d < partial_dimension / 2; ++d) { + if (input->dtype() == MLLM_TYPE_F16) { + float in_value = static_cast(input->dataAt(n, h, s, d)); + float in_value_2 = static_cast(input->dataAt(n, h, s, d + partial_dimension / 2)); + float sin_value = sin_[s + seq_offset][d]; + float cos_value = cos_[s + seq_offset][d]; + auto value = in_value * cos_value - in_value_2 * sin_value; + auto value2 = in_value * sin_value + in_value_2 * cos_value; + if (out_dtype == MLLM_TYPE_F32) { + output->setDataAt(n, h, s, d, value); + output->setDataAt(n, h, s, d + partial_dimension / 2, value2); + } else if (out_dtype == MLLM_TYPE_F16) { + output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); + output->setDataAt(n, h, s, d + partial_dimension / 2, MLLM_FP32_TO_FP16(value2)); + } + + } else { + float in_value = input->dataAt(n, h, s, d); + float in_value_2 = input->dataAt(n, h, s, d + partial_dimension / 2); + float sin_value = sin_[s + seq_offset][d]; + float cos_value = cos_[s + seq_offset][d]; + auto value = in_value * cos_value - in_value_2 * sin_value; + auto value2 = in_value * sin_value + in_value_2 * cos_value; + if (out_dtype == MLLM_TYPE_F32) { + output->setDataAt(n, h, s, d, value); + output->setDataAt(n, h, s, d + partial_dimension / 2, value2); + } else if (out_dtype == MLLM_TYPE_F16) { + output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); + output->setDataAt(n, h, s, d + partial_dimension / 2, MLLM_FP32_TO_FP16(value2)); + } + } + } + } + } + } +} + +// TODO: Q8_0 KVCache can not use!! +ErrorCode CPUMultimodalRoPEPipeline::execute(vector> inputs, vector> outputs) { + if (outputs[0]->dtype() == MLLM_TYPE_Q8_0) { + auto tmp_out = std::make_shared(outputs[0]->backend()); + // tmp_out->setBackend(outputs[0]->backend()); + auto b = outputs[0]->batch(); + auto h = outputs[0]->head(); + auto d = outputs[0]->dimension(); + auto s = outputs[0]->sequence(); + tmp_out->chls() = outputs[0]->chls(); + tmp_out->setCtype(outputs[0]->ctype()); + tmp_out->reshape(b, h, s, d); + tmp_out->setDtype(MLLM_TYPE_F32); + tmp_out->alloc(); + doExecute(inputs, {tmp_out}); +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int b = 0; b < tmp_out->batch(); b++) { + for (int h = 0; h < tmp_out->head(); h++) { + for (int s = 0; s < tmp_out->sequence(); s++) { + quantize_row_q8_0(tmp_out->hostPtr() + tmp_out->offset(b, h, s, 0), + (char *)outputs[0]->rawHostPtr() + + outputs[0]->offset(b, h, s, 0) * sizeof(block_q8_0) / QK8_0, + tmp_out->dimension()); + } + } + } + return MLLM_NO_ERROR; + } else { + return doExecute(inputs, outputs); + } +} +ErrorCode CPUMultimodalRoPEPipeline::doExecute(vector> inputs, vector> outputs) { + auto &input = inputs[0]; + auto &output = outputs[0]; + auto out_dtype = output->dtype(); + int partial_dimension = (input->dimension()) * partial_rotary_factor_; + // auto start_t = mllm_time_us(); + multimodal_rope_hf(input, output); +#pragma omp parallel for collapse(4) num_threads(thread_count) + for (int n = 0; n < input->batch(); ++n) { + for (int h = 0; h < input->head(); ++h) { + for (int s = 0; s < input->sequence(); ++s) { + for (int d = partial_dimension; d < input->dimension(); ++d) { + if (out_dtype == MLLM_TYPE_F32) { + output->setDataAt(n, h, s, d, input->dataAt(n, h, s, d)); + } else if (out_dtype == MLLM_TYPE_F16) { + output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(input->dataAt(n, h, s, d))); + } + } + } + } + } + + return Op::execute(inputs, outputs); +} + +ErrorCode CPUMultimodalRoPEPipeline::load(AbstructLoader &loader) { + return Op::load(loader); +} +ErrorCode CPUMultimodalRoPEPipeline::free(vector> inputs, vector> outputs) { + return Op::free(inputs, outputs); +} +} // namespace mllm diff --git a/mllm/backends/cpu/op/CPUMultimodalRoPEPipeline.hpp b/mllm/backends/cpu/op/CPUMultimodalRoPEPipeline.hpp new file mode 100644 index 000000000..9b37e0948 --- /dev/null +++ b/mllm/backends/cpu/op/CPUMultimodalRoPEPipeline.hpp @@ -0,0 +1,72 @@ +#ifndef MLLM_CPUMULTIMODALROPE_PIPELINE_H +#define MLLM_CPUMULTIMODALROPE_PIPELINE_H + +#include "Op.hpp" +#include "../CPUBackend.hpp" + +namespace mllm { + +class CPUMultimodalRoPEPipeline final : public Op { +public: + CPUMultimodalRoPEPipeline(Backend *bn, string opName, float rope_theta, int max_position_embeddings, vector mrope_section, int threadCount); + + virtual ~CPUMultimodalRoPEPipeline() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode load(AbstructLoader &loader) override; + virtual ErrorCode execute(vector> inputs, vector> outputs) override; + virtual ErrorCode free(vector> inputs, vector> outputs) override; + ErrorCode doExecute(vector> inputs, vector> outputs); + +private: + static vector theta_; // inv_freq + static vector> sin_; + static vector> cos_; + static int ishape_old; + static int last_pos; + vector mrope_section_; + int rope_theta_ = 10000; + int h_cnt_ = 0; + int pos_max_ = 16384; + int ishape; + int thread_count = 4; + float partial_rotary_factor_ = 1; + + OpParam config_; + + RoPEThetaType rope_type = DEFAULT; + + void multimodal_rope_hf(shared_ptr input, shared_ptr output); + void clearCache() override { + h_cnt_ = 0; + } +}; + +class CPUMultimodalRoPEPipelineCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + // int pose_type = op_param["pose_type"]; + // if (op_param.find("rope_theta") == op_param.end()) { + // return new CPUMultimodalRoPEPipeline(bn, name, pose_type, threadCount); + // } + // float rope_theta = op_param["rope_theta"]; + // int max_position_embeddings = op_param["max_position_embeddings"]; + // if (op_param.find("partial_rotary_factor") == op_param.end()) { + // return new CPUMultimodalRoPEPipeline(bn, name, pose_type, rope_theta, max_position_embeddings, threadCount); + // } + // float partial_rotary_factor = op_param["partial_rotary_factor"]; + // return new CPUMultimodalRoPEPipeline(bn, name, pose_type, rope_theta, partial_rotary_factor, max_position_embeddings, threadCount); + + // int pose_type = op_param["pose_type"]; + float rope_theta = op_param["rope_theta"]; + int max_position_embeddings = op_param["max_position_embeddings"]; + int length = op_param.size() - 3; + vector mrope_section; + for (int i = 0; i < length; i++) { + mrope_section.push_back((int)op_param["mrope_section_" + std::to_string(i)]); + } + return new CPUMultimodalRoPEPipeline(bn, name, rope_theta, max_position_embeddings, mrope_section, threadCount); + } +}; +} // namespace mllm + +#endif // MLLM_CPUMULTIMODALROPE_PIPELINE_H \ No newline at end of file diff --git a/src/backends/cpu/op/CPUNTKRoPE.cpp b/mllm/backends/cpu/op/CPUNTKRoPE.cpp similarity index 99% rename from src/backends/cpu/op/CPUNTKRoPE.cpp rename to mllm/backends/cpu/op/CPUNTKRoPE.cpp index 605761778..fa323cbbb 100644 --- a/src/backends/cpu/op/CPUNTKRoPE.cpp +++ b/mllm/backends/cpu/op/CPUNTKRoPE.cpp @@ -12,7 +12,7 @@ #include "Types.hpp" #include #include -#include "backends/cpu/quantize/QuantizeQ8.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" namespace mllm { diff --git a/src/backends/cpu/op/CPUNTKRoPE.hpp b/mllm/backends/cpu/op/CPUNTKRoPE.hpp similarity index 100% rename from src/backends/cpu/op/CPUNTKRoPE.hpp rename to mllm/backends/cpu/op/CPUNTKRoPE.hpp diff --git a/src/backends/cpu/op/CPUNorm.cpp b/mllm/backends/cpu/op/CPUNorm.cpp similarity index 100% rename from src/backends/cpu/op/CPUNorm.cpp rename to mllm/backends/cpu/op/CPUNorm.cpp diff --git a/src/backends/cpu/op/CPUNorm.hpp b/mllm/backends/cpu/op/CPUNorm.hpp similarity index 100% rename from src/backends/cpu/op/CPUNorm.hpp rename to mllm/backends/cpu/op/CPUNorm.hpp diff --git a/mllm/backends/cpu/op/CPUNormFunc.hpp b/mllm/backends/cpu/op/CPUNormFunc.hpp new file mode 100644 index 000000000..864afa865 --- /dev/null +++ b/mllm/backends/cpu/op/CPUNormFunc.hpp @@ -0,0 +1,80 @@ +// +// Created by Rongjie Yi on 24-2-26. +// + +#ifndef CPUNORMFUNC_HPP +#define CPUNORMFUNC_HPP + +#include "CPUBackend.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include // For std::sqrt and std::abs +#include + +namespace mllm { +class Tensor; + +class CPUnormFunction : public Op { +private: + int thread_count = 4; + int L_n_; + +public: + CPUnormFunction(Backend *bn, string name, int threadCount, int L_n) + : Op(bn, name), thread_count(threadCount), L_n_(L_n) {} + + ErrorCode reshape(vector> inputs, vector> outputs) override { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + // Parallelize the outer loops for better efficiency + #pragma omp parallel for collapse(3) num_threads(thread_count) + for (int n = 0; n < inputs[0]->batch(); n++) { + for (int h = 0; h < inputs[0]->head(); h++) { + for (int s = 0; s < inputs[0]->sequence(); s++) { + if (L_n_ == 2) { // L2 Norm + float sum_of_squares = 0.0f; + for (int d = 0; d < inputs[0]->dimension(); ++d) { + float val = inputs[0]->dataAt(n, h, s, d); + sum_of_squares += val * val; + } + float l2_norm = std::sqrt(sum_of_squares); + + // Broadcast the norm value across the dimension + for (int d = 0; d < inputs[0]->dimension(); d++) { + outputs[0]->setDataAt(n, h, s, d, l2_norm); + } + } else { // L1 Norm (or other) + float sum_of_abs_values = 0.0f; + for (int d = 0; d < inputs[0]->dimension(); ++d) { + sum_of_abs_values += std::abs(inputs[0]->dataAt(n, h, s, d)); + } + + // Broadcast the norm value across the dimension + for (int d = 0; d < inputs[0]->dimension(); d++) { + outputs[0]->setDataAt(n, h, s, d, sum_of_abs_values); + } + } + } + } + } + return MLLM_NO_ERROR; + } +}; + +class CPUnormFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam contains the key "L_n" + int L_n = static_cast(op_param.at("L_n")); + return new CPUnormFunction(bn, name, threadCount, L_n); + } +}; + +} // namespace mllm +#endif // CPUNORMFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUParameter.cpp b/mllm/backends/cpu/op/CPUParameter.cpp similarity index 77% rename from src/backends/cpu/op/CPUParameter.cpp rename to mllm/backends/cpu/op/CPUParameter.cpp index 5bd0650f0..5ec2b4c9e 100644 --- a/src/backends/cpu/op/CPUParameter.cpp +++ b/mllm/backends/cpu/op/CPUParameter.cpp @@ -11,14 +11,14 @@ CPUParameter::CPUParameter(Backend *bn, string opName, int batch, int head, int head_ = head; seq_ = seq; dim_ = dim; - weight_.setBackend(bn); + weight_ = std::make_shared(bn); } ErrorCode CPUParameter::reshape(vector> inputs, vector> outputs) { // outputs[0] = std::make_shared(weight_); if (outputs[0]->masterTensor() == nullptr) { - outputs[0]->shallowCopyFrom(&weight_, false); + outputs[0]->shallowCopyFrom(weight_, false); } outputs[0]->reshape(batch_, head_, seq_, dim_); @@ -26,36 +26,34 @@ ErrorCode CPUParameter::reshape(vector> inputs, vectorsetName(name()); + weight_->reshape(batch_, head_, seq_, dim_); + if (loader.getDataType(weight_->name()) != MLLM_TYPE_COUNT) { + weight_->setDtype(loader.getDataType(weight_->name())); + weight_->alloc(); + loader.load(weight_); } else { - weight_.setDtype(MLLM_TYPE_F32); - weight_.alloc(); + weight_->setDtype(MLLM_TYPE_F32); + weight_->alloc(); } return Op::load(loader); } ErrorCode CPUParameter::execute(vector> inputs, vector> outputs) { - // outputs[0] = std::make_shared(weight_); - - if (outputs[0]->masterTensor()->name() != weight_.name()) { + if (outputs[0]->masterTensor()->name() != weight_->name()) { if (outputs[0]->masterTensor() == nullptr) { // outputs[0]->copyFrom(weight_); for (int n = 0; n < outputs[0]->batch(); ++n) { for (int c = 0; c < outputs[0]->head(); ++c) { for (int h = 0; h < outputs[0]->sequence(); ++h) { for (int w = 0; w < outputs[0]->dimension(); ++w) { - outputs[0]->setDataAt(n, c, h, w, weight_.dataAt(n, c, h, w)); + outputs[0]->setDataAt(n, c, h, w, weight_->dataAt(n, c, h, w)); } } } } } else { - if (weight_.batch() == 1) { + if (weight_->batch() == 1) { auto off = outputs[0]->shapeOffset(); auto off_b = off[0]; auto off_h = off[1]; @@ -65,7 +63,7 @@ ErrorCode CPUParameter::execute(vector> inputs, vectorhead(); ++c) { for (int h = 0; h < outputs[0]->sequence(); ++h) { for (int w = 0; w < outputs[0]->dimension(); ++w) { - outputs[0]->masterTensor()->setDataAt(n + off_b, c + off_h, h + off_s_, w + off_d, weight_.dataAt(0, c, h, w)); + outputs[0]->masterTensor()->setDataAt(n + off_b, c + off_h, h + off_s_, w + off_d, weight_->dataAt(0, c, h, w)); } } } @@ -78,12 +76,12 @@ ErrorCode CPUParameter::execute(vector> inputs, vector> inputs, vector> outputs) { - weight_.free(); + weight_->free(); return Op::free(inputs, outputs); } ErrorCode CPUParameter::setUp(vector> inputs, vector> outputs) { - outputs[0]->shallowCopyFrom(&weight_, false); + outputs[0]->shallowCopyFrom(weight_, false); return MLLM_NO_ERROR; } } // namespace mllm diff --git a/src/backends/cpu/op/CPUParameter.hpp b/mllm/backends/cpu/op/CPUParameter.hpp similarity index 92% rename from src/backends/cpu/op/CPUParameter.hpp rename to mllm/backends/cpu/op/CPUParameter.hpp index 20dc7eae3..7b0d3dbe7 100644 --- a/src/backends/cpu/op/CPUParameter.hpp +++ b/mllm/backends/cpu/op/CPUParameter.hpp @@ -4,6 +4,7 @@ #include "Op.hpp" #include "../CPUBackend.hpp" +#include namespace mllm { @@ -17,13 +18,13 @@ class CPUParameter final : public Op { virtual ErrorCode free(vector> inputs, vector> outputs) override; virtual ErrorCode setUp(vector> inputs, vector> outputs) override; - Tensor &weight() { - return weight_; - } + // Tensor &weight() { + // return weight_; + // } private: int thread_count = 4; - Tensor weight_; + shared_ptr weight_; int batch_; int head_; int seq_; diff --git a/mllm/backends/cpu/op/CPUPhi3VhdmergeFunc.hpp b/mllm/backends/cpu/op/CPUPhi3VhdmergeFunc.hpp new file mode 100644 index 000000000..61e1f06a8 --- /dev/null +++ b/mllm/backends/cpu/op/CPUPhi3VhdmergeFunc.hpp @@ -0,0 +1,102 @@ +// +// Created by Rongjie Yi on 24-2-26. +// + +#ifndef CPUPHI3VHDMERGEEFUNC_HPP +#define CPUPHI3VHDMERGEEFUNC_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include +#include // For std::sqrt + +namespace mllm { +class Tensor; + +class CPUPhi3VhdmergeFunction : public Op { +private: + int thread_count = 4; + int h_crop_; + int w_crop_; + +public: + CPUPhi3VhdmergeFunction(Backend *bn, string name, int threadCount, int h_crop, int w_crop) + : Op(bn, name), thread_count(threadCount), h_crop_(h_crop), w_crop_(w_crop) {} + + ErrorCode reshape(vector> inputs, vector> outputs) override { + int N = inputs[0]->batch(); + int L = inputs[0]->sequence(); + int C = inputs[0]->dimension(); + assert(L == 24 * 24); + assert(C == 1024); + assert(N % (h_crop_ * w_crop_) == 0); + + int num_images = N / (h_crop_ * w_crop_); + int H = static_cast(std::sqrt(L)); + + int b = num_images; + int s = h_crop_ * H / 2; + int h = w_crop_ * H / 2; + int d = 4 * C; + + outputs[0]->reshape(b, h, s, d); + outputs[0]->setDtype(inputs[0]->dtype()); + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + int N = inputs[0]->batch(); + int L = inputs[0]->sequence(); + int C = inputs[0]->dimension(); + int num_images = N / (h_crop_ * w_crop_); + int H = static_cast(std::sqrt(L)); + + int b = num_images; + int s = h_crop_ * H / 2; + int h = w_crop_ * H / 2; + int d = 4 * C; + + #pragma omp parallel for collapse(3) num_threads(thread_count) + for (int ob = 0; ob < b; ob++) { + for (int os = 0; os < s; os++) { + for (int oh = 0; oh < h; oh++) { + int base_s = ((oh / 12) * (24 * 24)) + (os * 48) + (2 * (oh % 12)); + int hed = base_s % L; + int btch = (ob * (h_crop_ * w_crop_)) + (base_s / L); + + auto i_ptr_0 = inputs[0]->ptrAt(btch, 0, hed, 0); + auto i_ptr_1 = inputs[0]->ptrAt(btch, 0, hed + 1, 0); + auto i_ptr_2 = inputs[0]->ptrAt(btch, 0, hed + 24, 0); + auto i_ptr_3 = inputs[0]->ptrAt(btch, 0, hed + 25, 0); + + size_t copy_size = (size_t)C * inputs[0]->dtypeSize(); + + memcpy(outputs[0]->ptrAt(ob, oh, os, 0), + i_ptr_0, copy_size); + memcpy(outputs[0]->ptrAt(ob, oh, os, C), + i_ptr_1, copy_size); + memcpy(outputs[0]->ptrAt(ob, oh, os, C * 2), + i_ptr_2, copy_size); + memcpy(outputs[0]->ptrAt(ob, oh, os, C * 3), + i_ptr_3, copy_size); + } + } + } + return MLLM_NO_ERROR; + } +}; + +class CPUPhi3VhdmergeFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + int h_crop = static_cast(op_param.at("h_crop")); + int w_crop = static_cast(op_param.at("w_crop")); + return new CPUPhi3VhdmergeFunction(bn, name, threadCount, h_crop, w_crop); + } +}; + +} // namespace mllm +#endif // CPUPHI3VHDMERGEEFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUPoEmbedding.cpp b/mllm/backends/cpu/op/CPUPoEmbedding.cpp similarity index 100% rename from src/backends/cpu/op/CPUPoEmbedding.cpp rename to mllm/backends/cpu/op/CPUPoEmbedding.cpp diff --git a/src/backends/cpu/op/CPUPoEmbedding.hpp b/mllm/backends/cpu/op/CPUPoEmbedding.hpp similarity index 100% rename from src/backends/cpu/op/CPUPoEmbedding.hpp rename to mllm/backends/cpu/op/CPUPoEmbedding.hpp diff --git a/src/backends/cpu/op/CPUPosition.cpp b/mllm/backends/cpu/op/CPUPosition.cpp similarity index 100% rename from src/backends/cpu/op/CPUPosition.cpp rename to mllm/backends/cpu/op/CPUPosition.cpp diff --git a/src/backends/cpu/op/CPUPosition.hpp b/mllm/backends/cpu/op/CPUPosition.hpp similarity index 100% rename from src/backends/cpu/op/CPUPosition.hpp rename to mllm/backends/cpu/op/CPUPosition.hpp diff --git a/src/backends/cpu/op/CPUPredictor.cpp b/mllm/backends/cpu/op/CPUPredictor.cpp similarity index 98% rename from src/backends/cpu/op/CPUPredictor.cpp rename to mllm/backends/cpu/op/CPUPredictor.cpp index d3e17c1b7..6c44ec010 100644 --- a/src/backends/cpu/op/CPUPredictor.cpp +++ b/mllm/backends/cpu/op/CPUPredictor.cpp @@ -1,6 +1,6 @@ #include "CPUPredictor.hpp" -#include "../compute/VecDotType.hpp" +#include "backends/cpu/third_party/ggml/VecDotType.hpp" #include "../compute/Matmul.hpp" #include diff --git a/src/backends/cpu/op/CPUPredictor.hpp b/mllm/backends/cpu/op/CPUPredictor.hpp similarity index 100% rename from src/backends/cpu/op/CPUPredictor.hpp rename to mllm/backends/cpu/op/CPUPredictor.hpp diff --git a/mllm/backends/cpu/op/CPUQuantize.cpp b/mllm/backends/cpu/op/CPUQuantize.cpp new file mode 100644 index 000000000..2508b9e7d --- /dev/null +++ b/mllm/backends/cpu/op/CPUQuantize.cpp @@ -0,0 +1,122 @@ +// +// Created by Daliang Xu on 2024/04/18. +// + +#include "CPUQuantize.hpp" +#include "Types.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" + +#include +#include +#include + +namespace mllm { +CPUQuantize::CPUQuantize(Backend *bn, string opName, DataType type, int threadCount) : + thread_count(threadCount), + Op(bn, std::move(opName)) { + assert(type == MLLM_TYPE_I8 || type == MLLM_TYPE_I16); + activation_dtype_ = type; + scale_.setBackend(bn); +} + +ErrorCode CPUQuantize::reshape(vector> inputs, vector> outputs) { + assert(inputs.size() == 1); + assert(outputs.size() == 1); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + return Op::reshape(inputs, outputs); +} + +ErrorCode CPUQuantize::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + int batch = input->batch(); + int head = input->head(); + int seq = input->sequence(); + int dim = input->dimension(); + + float quantScale = 0; + // quantScale = scale_.hostPtr()[0] / 127.0; + // quantScale = roundf(quantScale * 100000) / 100000; + switch (activation_dtype_) { + case MLLM_TYPE_I8: + quantScale = scale_.hostPtr()[0] / (pow(2, 7) - 1); + break; + case MLLM_TYPE_I16: + quantScale = scale_.hostPtr()[0] / (pow(2, 15) - 1); + break; + default: + return NOT_SUPPORT; + } + // quantScale = roundf(quantScale * 100000) / 100000; + + auto src0 = inputs[0]; + auto out0 = outputs[0]; + + if (activation_dtype_ == MLLM_TYPE_I8) { +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int b = 0; b < batch; b++) { + for (int h = 0; h < head; h++) { + for (int s = 0; s < seq; s++) { + quantize_row_i8(src0->hostPtr() + src0->offset(b, h, s, 0), + out0->hostPtr() + out0->offset(b, h, s, 0), + dim, quantScale); + } + } + } + } else if (activation_dtype_ == MLLM_TYPE_I16) { +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int b = 0; b < batch; b++) { + for (int h = 0; h < head; h++) { + for (int s = 0; s < seq; s++) { + quantize_row_i16(src0->hostPtr() + src0->offset(b, h, s, 0), + out0->hostPtr() + out0->offset(b, h, s, 0), + dim, quantScale); + } + } + } + } else { + return NOT_SUPPORT; + } + + return Op::execute(inputs, outputs); +} + +ErrorCode CPUQuantize::setUp(vector> inputs, vector> outputs) { + float quantScale; + switch (activation_dtype_) { + case MLLM_TYPE_I8: + quantScale = scale_.hostPtr()[0] / (pow(2, 7) - 1); + outputs[0]->quant_param.scale = quantScale; + break; + case MLLM_TYPE_I16: + quantScale = scale_.hostPtr()[0] / (pow(2, 15) - 1); + outputs[0]->quant_param.scale = quantScale; + break; + default: + return NOT_SUPPORT; + } + return Op::setUp(inputs, outputs); +} + +ErrorCode CPUQuantize::free(vector> inputs, vector> outputs) { + return Op::free(inputs, outputs); +} + +ErrorCode CPUQuantize::load(AbstructLoader &loader) { + string scaleName = name(); + + std::string wordToRemove = "quantize"; + int pos = scaleName.find(wordToRemove); + if (pos != -1) { + scaleName.erase(pos, wordToRemove.length()); + } + + scale_.setName(scaleName + "input_scale"); + scale_.reshape(1, 1, 1, 1); + scale_.setDtype(MLLM_TYPE_F32); + scale_.alloc(); + loader.load(&scale_); + + return Op::load(loader); +} +} // namespace mllm \ No newline at end of file diff --git a/src/backends/cpu/op/CPUQuantize.hpp b/mllm/backends/cpu/op/CPUQuantize.hpp similarity index 87% rename from src/backends/cpu/op/CPUQuantize.hpp rename to mllm/backends/cpu/op/CPUQuantize.hpp index df3751ee1..13df2b2e0 100644 --- a/src/backends/cpu/op/CPUQuantize.hpp +++ b/mllm/backends/cpu/op/CPUQuantize.hpp @@ -7,10 +7,11 @@ #include "Op.hpp" #include "../CPUBackend.hpp" +#include "Types.hpp" namespace mllm { class CPUQuantize final : public Op { public: - CPUQuantize(Backend *bn, string opName, int threadCount); + CPUQuantize(Backend *bn, string opName, DataType type, int threadCount); virtual ~CPUQuantize() = default; virtual ErrorCode reshape(vector> inputs, vector> outputs) override; virtual ErrorCode execute(vector> inputs, vector> outputs) override; @@ -36,7 +37,7 @@ class CPUQuantize final : public Op { class CPUQuantizeCreator : public CPUBackend::Creator { public: virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { - return new CPUQuantize(bn, name, threadCount); + return new CPUQuantize(bn, name, (DataType)op_param["dtype"], threadCount); } }; } // namespace mllm diff --git a/src/backends/cpu/op/CPUQuickGELU.cpp b/mllm/backends/cpu/op/CPUQuickGELU.cpp similarity index 61% rename from src/backends/cpu/op/CPUQuickGELU.cpp rename to mllm/backends/cpu/op/CPUQuickGELU.cpp index d7bc7092a..00671ffb6 100644 --- a/src/backends/cpu/op/CPUQuickGELU.cpp +++ b/mllm/backends/cpu/op/CPUQuickGELU.cpp @@ -1,9 +1,11 @@ #include "CPUQuickGELU.hpp" +#include "backends/cpu/third_party/ggml/Quantize.hpp" namespace mllm { -CPUQuickGELU::CPUQuickGELU(Backend *bn, string opName, int threadCount) : thread_count(threadCount), +CPUQuickGELU::CPUQuickGELU(Backend *bn, string opName, int threadCount) : + thread_count(threadCount), Op(bn, opName) { if (!init_table_gelu_quick_f16_flag) { init_table_gelu_quick_f16(); @@ -18,7 +20,6 @@ ErrorCode CPUQuickGELU::reshape(vector> inputs, vector> inputs, vector> outputs) { auto input = inputs[0]; auto output = outputs[0]; @@ -27,15 +28,15 @@ ErrorCode CPUQuickGELU::execute(vector> inputs, vectorsequence(); int dim = input->dimension(); #pragma omp parallel for collapse(3) num_threads(thread_count) - for (int b = 0; b dataAt(b, h, s, d); -// output->setDataAt(b, h, s, d, value * (1 / (1 + std::exp(-1.702 * value)))); -// } - mllm_vec_gelu_quick_f32(dim, outputs[0]->ptrAt(b, h, s,0), - inputs[0]->ptrAt(b, h, s,0)); + // for (int d = 0; d < dim; ++d) { + // float value = input->dataAt(b, h, s, d); + // output->setDataAt(b, h, s, d, value * (1 / (1 + std::exp(-1.702 * value)))); + // } + mllm_vec_gelu_quick_f32(dim, outputs[0]->ptrAt(b, h, s, 0), + inputs[0]->ptrAt(b, h, s, 0)); } } } @@ -43,4 +44,3 @@ ErrorCode CPUQuickGELU::execute(vector> inputs, vector +#include "CPURMSNorm.hpp" +#include "Tensor.hpp" +#include "Timing.hpp" +#include "backends/cpu/third_party/ggml/VecDotFP32.hpp" +#include "backends/cpu/third_party/ggml/VecDotQ4.hpp" + +namespace mllm { + +// int32_t opp = 897988541; + +// int32_t op_params[1]; +CPURMSNorm::CPURMSNorm(Backend *bn, string opName, int normSize, float epsilon, bool add_unit_offset_, int threadCount) : + thread_count(threadCount), add_unit_offset_(add_unit_offset_), + Op(bn, opName), epsilon_(epsilon) { + // op_params[0] = 897988541;s, sizeof(float)); + // memcpy(&epsilon_, op_param) + normSize_ = normSize; + weight_.setBackend(bn); +} + +ErrorCode CPURMSNorm::reshape(vector> inputs, vector> outputs) { + // RMSNorm is similar to LayerNorm which operates on the channel dimension. + assert(normSize_ == inputs[0]->dimension()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + // outputs[0]->setDtype(activationDtype()); + // std::cout << name() << " CPURMSNorm reshape" << std::endl; + return Op::reshape(inputs, outputs); +} + +ErrorCode CPURMSNorm::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + int batch = input->batch(); + int dim = input->dimension(); + int seq = input->sequence(); + int head = input->head(); +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int h = 0; h < head; h++) { + for (int n = 0; n < batch; n++) { + for (int s = 0; s < seq; s++) { + double sum_squares = 0.0F; + // sum + for (int d = 0; d < dim; d++) { + float value = input->dataAt(n, h, s, d); + sum_squares += (double)value * value; + } + const float mean = sum_squares / dim; + const float rms = 1.0f / sqrtf(mean + epsilon_); + + memcpy(outputs[0]->ptrAt(n, h, s, 0), + inputs[0]->ptrAt(n, h, s, 0), + dim * sizeof(float)); + vec_scale_f32(dim, outputs[0]->ptrAt(n, h, s, 0), rms); + } + } + } + +// #pragma omp parallel for collapse(4) num_threads(thread_count) +// for (int h = 0; h < head; h++) { +// for (int n = 0; n < batch; n++) { +// for (int s = 0; s < seq; s++) { +// for (int d = 0; d < dim; d++) { +// float weight = weight_.dataAt(0, 0, 0, d); +// if (add_unit_offset_) { +// *outputs[0]->ptrAt(n, h, s, d) *= (1 + weight); +// } else { +// *outputs[0]->ptrAt(n, h, s, d) *= (weight); +// } +// } +// } +// } +// } +// 第二部分:应用权重乘法 +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int h = 0; h < head; h++) { + for (int n = 0; n < batch; n++) { + for (int s = 0; s < seq; s++) { + float *output_vec_ptr = outputs[0]->ptrAt(n, h, s, 0); + + // 根据 weight_ 的数据类型处理 + if (weight_.dtype() == MLLM_TYPE_F32) { + // 如果权重是FP32类型 + float *weight_vec_ptr = weight_.ptrAt(0, 0, 0, 0); + if (add_unit_offset_) { + // 如果需要加1,创建 (1 + weight) 的临时向量 + float *adjusted_weight_vec = new float[dim]; + for (int d_idx = 0; d_idx < dim; ++d_idx) { + adjusted_weight_vec[d_idx] = 1.0f + weight_vec_ptr[d_idx]; + } + // 进行逐元素乘法:output_vec_ptr *= adjusted_weight_vec + vec_mul_fp32(dim, output_vec_ptr, output_vec_ptr, adjusted_weight_vec); + delete[] adjusted_weight_vec; // 释放临时内存 + } else { + // 直接使用 vec_mul_fp32 进行逐元素乘法:output_vec_ptr *= weight_vec_ptr + vec_mul_fp32(dim, output_vec_ptr, output_vec_ptr, weight_vec_ptr); + } + } else if (weight_.dtype() == MLLM_TYPE_Q4_0) { + // 如果权重是 Q4_0 类型 + + if (add_unit_offset_) { + // 场景:output_fp32[i] = output_fp32_original[i] * (1.0f + Dequantize(weight_q4_0[i])) + // 这里的 `+1` 操作需要浮点数精度,因此必须先反量化 Q4_0 权重。 + // 这将导致操作回到 FP32 * FP32 的逐元素乘法。 + float *dequantized_and_adjusted_weight_fp32 = new float[dim]; + // 反量化 Q4_0 权重 + dequantize_row_q4_0(weight_.ptrAt(0, 0, 0, 0), dequantized_and_adjusted_weight_fp32, dim); + // 逐元素添加 1 + for (int i = 0; i < dim; ++i) { + dequantized_and_adjusted_weight_fp32[i] = 1.0f + dequantized_and_adjusted_weight_fp32[i]; + } + // 执行 FP32 向量的逐元素乘法:output_vec_ptr *= dequantized_and_adjusted_weight_fp32 + vec_mul_fp32(dim, output_vec_ptr, output_vec_ptr, dequantized_and_adjusted_weight_fp32); + delete[] dequantized_and_adjusted_weight_fp32; // 释放临时内存 + + } else { + // 场景:output_fp32[i] = Dequantize(output_q8_0[i]) * Dequantize(weight_q4_0[i]) + // 这是用户期望的 Q4_0 * Q8_0 混合精度逐元素乘法。 + // 首先,将当前 FP32 输出向量量化为临时的 Q8_0 缓冲区。 + block_q8_0 *temp_output_q8_0 = new block_q8_0[dim / QK8_0]; + quantize_row_q8_0(output_vec_ptr, temp_output_q8_0, dim); + + // 执行 Q4_0 权重和 Q8_0 量化输出之间的逐元素乘法 + // 结果直接存储回 output_vec_ptr (FP32) + vec_mul_q4_0_q8_0(dim, output_vec_ptr, weight_.hostPtr(), temp_output_q8_0); + + delete[] temp_output_q8_0; // 释放临时内存 + } + } else { + // 对于不支持的权重类型,此处断言以指示错误。 + assert(false && "Unsupported weight_ dtype in CPURMSNorm::execute"); + } + } + } + } + return Op::execute(inputs, outputs); +} +ErrorCode CPURMSNorm::load(AbstructLoader &loader) { + weight_.setName(name() + ".weight"); + weight_.reshape(1, 1, 1, normSize_); // + if (loader.getDataType(weight_.name()) != MLLM_TYPE_COUNT) { + weight_.setDtype(loader.getDataType(weight_.name())); + weight_.alloc(); + // auto l = loader.length(weight_.name()); + loader.load(&weight_); + } else { + weight_.setDtype(MLLM_TYPE_F32); + weight_.alloc(); + } + return Op::load(loader); +} +ErrorCode CPURMSNorm::free(vector> inputs, vector> outputs) { + weight_.free(); + return Op::free(inputs, outputs); +} +} // namespace mllm \ No newline at end of file diff --git a/src/backends/cpu/op/CPURMSNorm.hpp b/mllm/backends/cpu/op/CPURMSNorm.hpp similarity index 100% rename from src/backends/cpu/op/CPURMSNorm.hpp rename to mllm/backends/cpu/op/CPURMSNorm.hpp diff --git a/src/backends/cpu/op/CPURange.cpp b/mllm/backends/cpu/op/CPURange.cpp similarity index 100% rename from src/backends/cpu/op/CPURange.cpp rename to mllm/backends/cpu/op/CPURange.cpp diff --git a/src/backends/cpu/op/CPURange.hpp b/mllm/backends/cpu/op/CPURange.hpp similarity index 100% rename from src/backends/cpu/op/CPURange.hpp rename to mllm/backends/cpu/op/CPURange.hpp diff --git a/mllm/backends/cpu/op/CPURangeFunc.hpp b/mllm/backends/cpu/op/CPURangeFunc.hpp new file mode 100644 index 000000000..0e9da6aa6 --- /dev/null +++ b/mllm/backends/cpu/op/CPURangeFunc.hpp @@ -0,0 +1,57 @@ +// +// Created by Rongjie Yi on 24-2-26. +// + +#ifndef CPURANGEFUNC_HPP +#define CPURANGEFUNC_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include + +namespace mllm { +class Tensor; + +class CPURangeFunction : public Op { +private: + int thread_count = 4; + int start_; + int end_; + +public: + CPURangeFunction(Backend *bn, string name, int threadCount, int start, int end) + : Op(bn, name), thread_count(threadCount), start_(start), end_(end) {} + + ErrorCode reshape(vector> inputs, vector> outputs) override { + outputs[0]->reshape(1, 1, end_ - start_, 1); + outputs[0]->setDtype(MLLM_TYPE_F32); + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + int length = end_ - start_; + #pragma omp parallel for num_threads(thread_count) + for (int i = 0; i < length; ++i) { + // Bug fix: Index should be 'i', value should be 'start_ + i'. + // Original code had `setDataAt(..., i + start_, ..., (float)i)`, which was incorrect. + outputs[0]->setDataAt(0, 0, i, 0, (float)(start_ + i)); + } + return MLLM_NO_ERROR; + } +}; + +class CPURangeFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam contains keys "start" and "end" + int start = static_cast(op_param.at("start")); + int end = static_cast(op_param.at("end")); + return new CPURangeFunction(bn, name, threadCount, start, end); + } +}; + +} // namespace mllm +#endif // CPURANGEFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUReLU.cpp b/mllm/backends/cpu/op/CPUReLU.cpp similarity index 100% rename from src/backends/cpu/op/CPUReLU.cpp rename to mllm/backends/cpu/op/CPUReLU.cpp diff --git a/src/backends/cpu/op/CPUReLU.hpp b/mllm/backends/cpu/op/CPUReLU.hpp similarity index 100% rename from src/backends/cpu/op/CPUReLU.hpp rename to mllm/backends/cpu/op/CPUReLU.hpp diff --git a/src/backends/cpu/op/CPUReLU2.cpp b/mllm/backends/cpu/op/CPUReLU2.cpp similarity index 100% rename from src/backends/cpu/op/CPUReLU2.cpp rename to mllm/backends/cpu/op/CPUReLU2.cpp diff --git a/src/backends/cpu/op/CPUReLU2.hpp b/mllm/backends/cpu/op/CPUReLU2.hpp similarity index 100% rename from src/backends/cpu/op/CPUReLU2.hpp rename to mllm/backends/cpu/op/CPUReLU2.hpp diff --git a/mllm/backends/cpu/op/CPURepeatFunc.hpp b/mllm/backends/cpu/op/CPURepeatFunc.hpp new file mode 100644 index 000000000..6c361108f --- /dev/null +++ b/mllm/backends/cpu/op/CPURepeatFunc.hpp @@ -0,0 +1,106 @@ +// +// Created by Rongjie Yi on 24-12-16. +// + +#ifndef CPUREPEATEFUNC_HPP +#define CPUREPEATEFUNC_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include +#include +// #include +#include + +namespace mllm { +class Tensor; + +class CPUrepeatFunction : public Op { +private: + int thread_count = 4; + Chl dim_; + int size_; + +public: + CPUrepeatFunction(Backend *bn, string name, int threadCount, Chl dim, int size) + : Op(bn, name), thread_count(threadCount), dim_(dim), size_(size) {} + + ErrorCode reshape(vector> inputs, vector> outputs) override { + int batch = inputs[0]->batch(); + int head = inputs[0]->head(); + int sequence = inputs[0]->sequence(); + int dimension = inputs[0]->dimension(); + + switch (dim_) { + case Chl::BATCH: + batch = size_; + break; + case Chl::HEAD: + head = size_; + break; + case Chl::SEQUENCE: + sequence = size_; + break; + case Chl::DIMENSION: + dimension = size_; + break; + default: + break; + } + + outputs[0]->reshape(batch, head, sequence, dimension); + outputs[0]->setDtype(inputs[0]->dtype()); + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + switch (dim_) { + case Chl::BATCH: { + std::cerr << "Repeat Not implemented for BATCH" << std::endl; + break; + } + case Chl::HEAD: { + std::cerr << "Repeat Not implemented for HEAD" << std::endl; + break; + } + case Chl::SEQUENCE: { + std::cerr << "Repeat Not implemented for SEQUENCE" << std::endl; + break; + } + case Chl::DIMENSION: { +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int b = 0; b < inputs[0]->batch(); b++) { + for (int h = 0; h < inputs[0]->head(); h++) { + for (int s = 0; s < inputs[0]->sequence(); s++) { + // Assuming the input dimension to repeat is 1 + float data = inputs[0]->dataAt(b, h, s, 0); + for (int d = 0; d < size_; d++) { + outputs[0]->setDataAt(b, h, s, d, data); + } + } + } + } + break; + } + default: + break; + } + return MLLM_NO_ERROR; + } +}; + +class CPUrepeatFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam contains keys "dim" and "size" + Chl dim = (Chl)op_param.at("dim"); + int size = static_cast(op_param.at("size")); + return new CPUrepeatFunction(bn, name, threadCount, dim, size); + } +}; + +} // namespace mllm +#endif // CPUREPEATEFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUReplace.cpp b/mllm/backends/cpu/op/CPUReplace.cpp similarity index 98% rename from src/backends/cpu/op/CPUReplace.cpp rename to mllm/backends/cpu/op/CPUReplace.cpp index 85f87e752..7d94ae129 100644 --- a/src/backends/cpu/op/CPUReplace.cpp +++ b/mllm/backends/cpu/op/CPUReplace.cpp @@ -102,7 +102,7 @@ ErrorCode CPUReplace::setUp(vector> inputs, vectorsetDtype(activation_dtype()); outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); + inputs[0]->shallowCopyFrom(outputs[0], false); return MLLM_NO_ERROR; } else { diff --git a/src/backends/cpu/op/CPUReplace.hpp b/mllm/backends/cpu/op/CPUReplace.hpp similarity index 100% rename from src/backends/cpu/op/CPUReplace.hpp rename to mllm/backends/cpu/op/CPUReplace.hpp diff --git a/src/backends/cpu/op/CPURoPE.cpp b/mllm/backends/cpu/op/CPURoPE.cpp similarity index 99% rename from src/backends/cpu/op/CPURoPE.cpp rename to mllm/backends/cpu/op/CPURoPE.cpp index fe334b3c3..0cf0bc627 100644 --- a/src/backends/cpu/op/CPURoPE.cpp +++ b/mllm/backends/cpu/op/CPURoPE.cpp @@ -1,11 +1,12 @@ #include "CPURoPE.hpp" +#include "Context.hpp" #include "Timing.hpp" #include "Types.hpp" #include #include #include -#include "backends/cpu/quantize/QuantizeQ8.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" namespace mllm { @@ -224,9 +225,8 @@ ErrorCode CPURoPE::reshape(vector> inputs, vector(backend_); - if (cpuBackend->isStageSwitching()) { - h_cnt_ = cpuBackend->getCurSequenceLength(); + if (Context::Instance().inference_state().isStageSwitching()) { + h_cnt_ = Context::Instance().inference_state().getCurSequenceLength(); } #endif return Op::reshape(inputs, outputs); diff --git a/src/backends/cpu/op/CPURoPE.hpp b/mllm/backends/cpu/op/CPURoPE.hpp similarity index 100% rename from src/backends/cpu/op/CPURoPE.hpp rename to mllm/backends/cpu/op/CPURoPE.hpp diff --git a/src/backends/cpu/op/CPURoPETree.cpp b/mllm/backends/cpu/op/CPURoPETree.cpp similarity index 97% rename from src/backends/cpu/op/CPURoPETree.cpp rename to mllm/backends/cpu/op/CPURoPETree.cpp index 065fd72bc..d6fd673a5 100644 --- a/src/backends/cpu/op/CPURoPETree.cpp +++ b/mllm/backends/cpu/op/CPURoPETree.cpp @@ -1,12 +1,13 @@ #include "CPURoPE.hpp" +#include "Context.hpp" #include "CPURoPETree.hpp" #include "Timing.hpp" #include "Types.hpp" #include #include #include -#include "backends/cpu/quantize/QuantizeQ8.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" namespace mllm { @@ -87,17 +88,17 @@ ErrorCode CPURoPETree::reshape(vector> inputs, vector(backend_); - if (cpuBackend->isStageSwitching()) { - h_cnt_ = cpuBackend->getCurSequenceLength(); + if (Context::Instance().inference_state().isStageSwitching()) { + h_cnt_ = Context::Instance().inference_state().getCurSequenceLength(); } #else auto cpuBackend = dynamic_cast(backend_); #endif // for sd - if (cpuBackend->isUsingDraft()) { - unsigned int last_draft_length = cpuBackend->getLastDraftLength(); - const std::vector &last_verified_position_ids = cpuBackend->getLastVerifiedPositionIds(); + if (Context::Instance().speculative_decoding_state().isUsingDraft()) { + unsigned int last_draft_length = Context::Instance().speculative_decoding_state().getLastDraftLength(); + const std::vector &last_verified_position_ids = Context::Instance().speculative_decoding_state().getLastVerifiedPositionIds(); h_cnt_ = h_cnt_ - (last_draft_length) + last_verified_position_ids.size(); if (h_cnt_ < 0) { h_cnt_ = 0; diff --git a/src/backends/cpu/op/CPURoPETree.hpp b/mllm/backends/cpu/op/CPURoPETree.hpp similarity index 100% rename from src/backends/cpu/op/CPURoPETree.hpp rename to mllm/backends/cpu/op/CPURoPETree.hpp diff --git a/mllm/backends/cpu/op/CPUSageAttentionFunc.hpp b/mllm/backends/cpu/op/CPUSageAttentionFunc.hpp new file mode 100644 index 000000000..5767ef038 --- /dev/null +++ b/mllm/backends/cpu/op/CPUSageAttentionFunc.hpp @@ -0,0 +1,153 @@ +// +// Created by Rongjie Yi on 25-2-16, with adaptations by Gemini. +// + +#ifndef CPUSAGEATTENTIONFUNC_HPP +#define CPUSAGEATTENTIONFUNC_HPP + +#include "CPUBackend.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include +#include +#include "../compute/SageAttention.hpp" +#include "../compute/SageAttentionPT.hpp" +#include "../compute/SageAttentionKVQ8.hpp" + +namespace mllm { + +class CPUSageAttentionFunc : public Op { +private: + int thread_count_ = 4; + bool causal_mask_; + +public: + CPUSageAttentionFunc(Backend *bn, string name, int threadCount, bool causal_mask) : + Op(bn, name), thread_count_(threadCount), causal_mask_(causal_mask) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + auto q_tensor = inputs[0]; + auto o_tensor = outputs[0]; + + int batch_size = q_tensor->batch(); + int q_head = q_tensor->head(); + int q_sequence = q_tensor->sequence(); + int dimension = q_tensor->dimension(); + + o_tensor->setCtype(q_tensor->ctype()); + o_tensor->reshape(batch_size, q_head, q_sequence, dimension); + o_tensor->setDtype(inputs[0]->dtype()); + return ErrorCode::MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + auto q_tensor = inputs[0]; + auto k_tensor = inputs[1]; + auto v_tensor = inputs[2]; + auto o_tensor = outputs[0]; + + int batch_size = q_tensor->batch(); + int q_head = q_tensor->head(); + int q_sequence = q_tensor->sequence(); + int dimension = q_tensor->dimension(); + + int k_head = k_tensor->head(); + int k_sequence = k_tensor->sequence(); + + assert(v_tensor->head() == k_head); + assert(v_tensor->sequence() == k_sequence); + + bool kv_use_fp32 = (k_tensor->dtype() == MLLM_TYPE_F32); + + int threads = thread_count_; + threads = std::min(threads, q_head); + + int32_t br = q_sequence >= 4 ? 4 : q_sequence; + int32_t bc = q_sequence >= 4 ? 4 : q_sequence; + if (dimension % QK8_0F != 0) { + if (kv_use_fp32) { + sage_attn_pt_cpu::sage_attention_forward_cpu_dispatch( + q_tensor->hostPtr(), + k_tensor->hostPtr(), + v_tensor->hostPtr(), + o_tensor->hostPtr(), + batch_size, q_head, k_head, + q_sequence, k_sequence, dimension, + causal_mask_, + threads, + br, bc); + } else { + sage_attn_pt_cpu::sage_attention_forward_cpu_dispatch( + q_tensor->hostPtr(), + k_tensor->hostPtr(), + v_tensor->hostPtr(), + o_tensor->hostPtr(), + batch_size, q_head, k_head, + q_sequence, k_sequence, dimension, + causal_mask_, + threads, + br, bc); + } + return ErrorCode::MLLM_NO_ERROR; + } + if (k_tensor->dtype() == MLLM_TYPE_F32 || k_tensor->dtype() == MLLM_TYPE_F16) { + if (kv_use_fp32) { + sage_attn_cpu::sage_attention_forward_cpu_dispatch( + q_tensor->hostPtr(), + k_tensor->hostPtr(), + v_tensor->hostPtr(), + nullptr, + nullptr, + o_tensor->hostPtr(), + batch_size, q_head, k_head, + q_sequence, k_sequence, dimension, + causal_mask_, + threads, + br, bc, k_sequence); + } else { + sage_attn_cpu::sage_attention_forward_cpu_dispatch( + q_tensor->hostPtr(), + k_tensor->hostPtr(), + v_tensor->hostPtr(), + nullptr, + nullptr, + o_tensor->hostPtr(), + batch_size, q_head, k_head, + q_sequence, k_sequence, dimension, + causal_mask_, + threads, + br, bc, k_sequence); + } + } else if (k_tensor->dtype() == MLLM_TYPE_Q8_0F) { + const float *k_mean_ptr = k_tensor->seqMeans().data(); + const float *v_mean_ptr = v_tensor->seqMeans().data(); + seq_attn_kvq8::sage_attention_forward_cpu_dispatch( + q_tensor->hostPtr(), + k_tensor->hostPtr(), + v_tensor->hostPtr(), + k_mean_ptr, + v_mean_ptr, + o_tensor->hostPtr(), + batch_size, q_head, k_head, q_sequence, k_sequence, dimension, + causal_mask_, threads, br, bc); + } else { + std::cout << "Unsupported K/V dtype: " << k_tensor->dtype() << std::endl; + return MLLM_NO_ERROR; + } + + return ErrorCode::MLLM_NO_ERROR; + } +}; + +class CPUSageAttentionFuncCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // 从op_param中获取参数 + bool causal_mask = op_param.find("causal_mask") != op_param.end() ? (bool)op_param.at("causal_mask") : true; + return new CPUSageAttentionFunc(bn, name, threadCount, causal_mask); + } +}; + +} // namespace mllm +#endif // CPUSAGEATTENTIONFUNC_HPP diff --git a/src/backends/cpu/op/CPUScale.cpp b/mllm/backends/cpu/op/CPUScale.cpp similarity index 98% rename from src/backends/cpu/op/CPUScale.cpp rename to mllm/backends/cpu/op/CPUScale.cpp index 3e088657b..305430b11 100644 --- a/src/backends/cpu/op/CPUScale.cpp +++ b/mllm/backends/cpu/op/CPUScale.cpp @@ -78,7 +78,7 @@ ErrorCode CPUScale::setUp(vector> inputs, vectorsetDtype(activation_dtype()); outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); + inputs[0]->shallowCopyFrom(outputs[0], false); return MLLM_NO_ERROR; } diff --git a/src/backends/cpu/op/CPUScale.hpp b/mllm/backends/cpu/op/CPUScale.hpp similarity index 100% rename from src/backends/cpu/op/CPUScale.hpp rename to mllm/backends/cpu/op/CPUScale.hpp diff --git a/mllm/backends/cpu/op/CPUScatter.hpp b/mllm/backends/cpu/op/CPUScatter.hpp new file mode 100644 index 000000000..c95d2f18f --- /dev/null +++ b/mllm/backends/cpu/op/CPUScatter.hpp @@ -0,0 +1,85 @@ +// +// Created by Rongjie Yi on 24-12-26. +// + +#ifndef CPUSCATTE_HPP +#define CPUSCATTE_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include +#include +#include + +namespace mllm { +class Tensor; + +class CPUScatter : public Op { +private: + int thread_count = 4; + Chl dim_; // default dimension is SEQUENCE + float value_ = 0.0f; // default value is 0.0f + +public: + CPUScatter(Backend *bn, string name, Chl dim, float value, int threadCount) : + Op(bn, name), dim_(dim), value_(value), thread_count(threadCount) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + if (inputs[1]->batch() == 0) { + return MLLM_NO_ERROR; + } + assert(inputs.size() == 2); + assert(inputs[0]->batch() == 1); + auto dest_input = inputs[0]; + auto replace_idx = inputs[1]; + if (dim_ == SEQUENCE) { + assert(inputs[0]->head() == 1); + assert(replace_idx->batch() == 1); + assert(replace_idx->sequence() == 1); + assert(replace_idx->head() == 1); + assert(dest_input->head() == 1); + // Todo check + // #pragma omp parallel for num_threads(CPUBackend::cpu_threads) + for (int r_idx = 0; r_idx < replace_idx->dimension(); r_idx++) { + auto replace_seq = (int)replace_idx->dataAt(0, 0, 0, r_idx); + auto dst_ptr = dest_input->ptrAt(0, 0, replace_seq, 0); + memset(dst_ptr, value_, sizeof(float) * dest_input->dimension()); + } + } else if (dim_ == HEAD) { + assert(replace_idx->sequence() == dest_input->sequence()); + for (int tok = 0; tok < replace_idx->sequence(); tok++) { + for (int r_idx = 0; r_idx < replace_idx->dimension(); r_idx++) { + auto replace_seq = (int)replace_idx->dataAt(0, 0, tok, r_idx); + auto dst_ptr = dest_input->ptrAt(0, replace_seq, tok, 0); + dest_input->setDataAt(0, replace_seq, tok, 0, value_); + } + }; + } else { + std::cerr << "Error: CPUScatter only supports SEQUENCE dimension currently." << std::endl; + return NOT_SUPPORT; + } + return MLLM_NO_ERROR; + } +}; + +class CPUScatterCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + Chl dim = SEQUENCE; + auto it = op_param.find("dim"); + if (it != op_param.end()) { + dim = static_cast(it->second); + } + float value = static_cast(op_param["value"]); + return new CPUScatter(bn, name, dim, value, threadCount); + } +}; + +} // namespace mllm +#endif // CPUSCATTE_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPUScatterAddFunc.hpp b/mllm/backends/cpu/op/CPUScatterAddFunc.hpp new file mode 100644 index 000000000..4dd7e3c7b --- /dev/null +++ b/mllm/backends/cpu/op/CPUScatterAddFunc.hpp @@ -0,0 +1,79 @@ +// +// Created by Rongjie Yi on 24-12-26. +// + +#ifndef CPUSCATTEADDFUNC_HPP +#define CPUSCATTEADDFUNC_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include "../compute/Arithmetic.hpp" +#include +#include + +namespace mllm { +class Tensor; + +class CPUScatterAddFunction : public Op { +private: + int thread_count = 4; + Chl dim_ = SEQUENCE; // default dimension is SEQUENCE + +public: + CPUScatterAddFunction(Backend *bn, string name, Chl dim, int threadCount) : + Op(bn, name), dim_(dim), thread_count(threadCount) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + if (inputs[1]->batch() == 0) { + return MLLM_NO_ERROR; + } + assert(inputs.size() == 3); + assert(inputs[0]->batch() == 1); + assert(inputs[0]->head() == 1); + auto dest_input = inputs[0]; + auto src_input = inputs[1]; + auto replace_idx = inputs[2]; + assert(replace_idx->batch() == 1); + assert(replace_idx->sequence() == 1); + assert(replace_idx->head() == 1); + if (dim_ == SEQUENCE) { + // #pragma omp parallel for num_threads(CPUBackend::cpu_threads) + for (int r_idx = 0; r_idx < replace_idx->dimension(); r_idx++) { + auto replace_seq = (int)replace_idx->dataAt(0, 0, 0, r_idx); + auto dst_ptr = dest_input->ptrAt(0, 0, replace_seq, 0); + auto src_ptr = src_input->ptrAt(0, 0, r_idx, 0); + // memcpy(dst_ptr, src_ptr, sizeof(float) * src_input->dimension()); + float tmp[src_input->dimension()]; + memcpy(tmp, dst_ptr, sizeof(float) * dest_input->dimension()); + mllm_add_fp32(tmp, + src_ptr, + dst_ptr, dest_input->dimension()); + } + } else { + std::cerr << "Error: CPUScatterAddFunction only supports SEQUENCE dimension currently." << std::endl; + return NOT_SUPPORT; + } + return MLLM_NO_ERROR; + } +}; + +class CPUScatterAddFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + Chl dim = SEQUENCE; + auto it = op_param.find("dim"); + if (it != op_param.end()) { + dim = static_cast(it->second); + } + return new CPUScatterAddFunction(bn, name, dim, threadCount); + } +}; + +} // namespace mllm +#endif // CPUSCATTEADDFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUShape.cpp b/mllm/backends/cpu/op/CPUShape.cpp similarity index 100% rename from src/backends/cpu/op/CPUShape.cpp rename to mllm/backends/cpu/op/CPUShape.cpp diff --git a/src/backends/cpu/op/CPUShape.hpp b/mllm/backends/cpu/op/CPUShape.hpp similarity index 100% rename from src/backends/cpu/op/CPUShape.hpp rename to mllm/backends/cpu/op/CPUShape.hpp diff --git a/src/backends/cpu/op/CPUSiLU.cpp b/mllm/backends/cpu/op/CPUSiLU.cpp similarity index 100% rename from src/backends/cpu/op/CPUSiLU.cpp rename to mllm/backends/cpu/op/CPUSiLU.cpp diff --git a/src/backends/cpu/op/CPUSiLU.hpp b/mllm/backends/cpu/op/CPUSiLU.hpp similarity index 100% rename from src/backends/cpu/op/CPUSiLU.hpp rename to mllm/backends/cpu/op/CPUSiLU.hpp diff --git a/mllm/backends/cpu/op/CPUSigmoid.cpp b/mllm/backends/cpu/op/CPUSigmoid.cpp new file mode 100644 index 000000000..4bab0908e --- /dev/null +++ b/mllm/backends/cpu/op/CPUSigmoid.cpp @@ -0,0 +1,46 @@ +#include "CPUSigmoid.hpp" +// #include +#include "Tensor.hpp" +#include "../compute/Sigmoid.hpp" + +namespace mllm { + +// static void vec_sigmoid_f32(const int n, float *y, const float *x) { +// for (int i = 0; i < n; ++i) { +// y[i] = 1.0f / (1.0f + expf(-x[i])); +// } +// } + +CPUSigmoid::CPUSigmoid(Backend *bn, string opName, int threadCount) : + thread_count(threadCount), + Op(bn, opName) { + // 构造函数中没有特殊操作 +} + +ErrorCode CPUSigmoid::reshape(vector> inputs, vector> outputs) { + // Sigmoid 是按元素操作的,所以输出张量的形状与输入张量完全相同 + assert(inputs.size() == 1); + assert(outputs.size() == 1); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + return Op::reshape(inputs, outputs); +} + +ErrorCode CPUSigmoid::execute(vector> inputs, vector> outputs) { + auto &input = inputs[0]; + auto &output = outputs[0]; + +#pragma omp parallel for collapse(3) num_threads(thread_count) + for (int n = 0; n < input->batch(); ++n) { + for (int h = 0; h < input->head(); ++h) { + for (int s = 0; s < input->sequence(); ++s) { + const float *in_ptr = input->ptrAt(n, h, s, 0); + float *out_ptr = output->ptrAt(n, h, s, 0); + vec_sigmoid_f32(input->dimension(), out_ptr, in_ptr); + } + } + } + + return Op::execute(inputs, outputs); +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPUSigmoid.hpp b/mllm/backends/cpu/op/CPUSigmoid.hpp new file mode 100644 index 000000000..d12211bcd --- /dev/null +++ b/mllm/backends/cpu/op/CPUSigmoid.hpp @@ -0,0 +1,28 @@ +#ifndef MLLM_CPUSIGMOID_H +#define MLLM_CPUSIGMOID_H + +#include "Op.hpp" +#include "../CPUBackend.hpp" + +namespace mllm { + +class CPUSigmoid final : public Op { +public: + CPUSigmoid(Backend *bn, string opName, int threadCount); + virtual ~CPUSigmoid() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + int thread_count = 4; +}; + +class CPUSigmoidCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + return new CPUSigmoid(bn, name, threadCount); + } +}; +} // namespace mllm + +#endif // MLLM_CPUSIGMOID_H \ No newline at end of file diff --git a/src/backends/cpu/op/CPUSlidingWindowMask.cpp b/mllm/backends/cpu/op/CPUSlidingWindowMask.cpp similarity index 97% rename from src/backends/cpu/op/CPUSlidingWindowMask.cpp rename to mllm/backends/cpu/op/CPUSlidingWindowMask.cpp index aa4e8d2e6..b182e9b50 100644 --- a/src/backends/cpu/op/CPUSlidingWindowMask.cpp +++ b/mllm/backends/cpu/op/CPUSlidingWindowMask.cpp @@ -69,7 +69,7 @@ ErrorCode CPUSlidingWindowMask::setUp(vector> inputs, vector< } outputs[0]->setDtype(activation_dtype()); outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); + inputs[0]->shallowCopyFrom(outputs[0], false); return MLLM_NO_ERROR; } } // namespace mllm diff --git a/src/backends/cpu/op/CPUSlidingWindowMask.hpp b/mllm/backends/cpu/op/CPUSlidingWindowMask.hpp similarity index 100% rename from src/backends/cpu/op/CPUSlidingWindowMask.hpp rename to mllm/backends/cpu/op/CPUSlidingWindowMask.hpp diff --git a/src/backends/cpu/op/CPUSoftMax.cpp b/mllm/backends/cpu/op/CPUSoftMax.cpp similarity index 98% rename from src/backends/cpu/op/CPUSoftMax.cpp rename to mllm/backends/cpu/op/CPUSoftMax.cpp index 9566da226..4f586b474 100644 --- a/src/backends/cpu/op/CPUSoftMax.cpp +++ b/mllm/backends/cpu/op/CPUSoftMax.cpp @@ -2,7 +2,8 @@ #include "CPUSoftMax.hpp" #include #include "Tensor.hpp" -#include "quantize/Quantize.hpp" +#include "backends/cpu/third_party/ggml/Quantize.hpp" +#include "backends/cpu/third_party/ggml/VecDotFP32.hpp" #include "../compute/ActivationFunction.hpp" namespace mllm { diff --git a/src/backends/cpu/op/CPUSoftMax.hpp b/mllm/backends/cpu/op/CPUSoftMax.hpp similarity index 100% rename from src/backends/cpu/op/CPUSoftMax.hpp rename to mllm/backends/cpu/op/CPUSoftMax.hpp diff --git a/src/backends/cpu/op/CPUSparseIdLinear.cpp b/mllm/backends/cpu/op/CPUSparseIdLinear.cpp similarity index 100% rename from src/backends/cpu/op/CPUSparseIdLinear.cpp rename to mllm/backends/cpu/op/CPUSparseIdLinear.cpp diff --git a/src/backends/cpu/op/CPUSparseIdLinear.hpp b/mllm/backends/cpu/op/CPUSparseIdLinear.hpp similarity index 100% rename from src/backends/cpu/op/CPUSparseIdLinear.hpp rename to mllm/backends/cpu/op/CPUSparseIdLinear.hpp diff --git a/src/backends/cpu/op/CPUSparseLinear.cpp b/mllm/backends/cpu/op/CPUSparseLinear.cpp similarity index 100% rename from src/backends/cpu/op/CPUSparseLinear.cpp rename to mllm/backends/cpu/op/CPUSparseLinear.cpp diff --git a/src/backends/cpu/op/CPUSparseLinear.hpp b/mllm/backends/cpu/op/CPUSparseLinear.hpp similarity index 100% rename from src/backends/cpu/op/CPUSparseLinear.hpp rename to mllm/backends/cpu/op/CPUSparseLinear.hpp diff --git a/src/backends/cpu/op/CPUSplit.cpp b/mllm/backends/cpu/op/CPUSplit.cpp similarity index 100% rename from src/backends/cpu/op/CPUSplit.cpp rename to mllm/backends/cpu/op/CPUSplit.cpp diff --git a/src/backends/cpu/op/CPUSplit.hpp b/mllm/backends/cpu/op/CPUSplit.hpp similarity index 100% rename from src/backends/cpu/op/CPUSplit.hpp rename to mllm/backends/cpu/op/CPUSplit.hpp diff --git a/mllm/backends/cpu/op/CPUSplitFunc.hpp b/mllm/backends/cpu/op/CPUSplitFunc.hpp new file mode 100644 index 000000000..47f99636c --- /dev/null +++ b/mllm/backends/cpu/op/CPUSplitFunc.hpp @@ -0,0 +1,195 @@ +// +// Created by Rongjie Yi on 24-2-26. +// + +#ifndef CPUSPLITFUNC_HPP +#define CPUSPLITFUNC_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include "../compute/Split.hpp" +#include +#include + +namespace mllm { +class Tensor; + +class CPUsplitFunction : public Op { +private: + int thread_count = 4; + std::vector each_dims_; + Chl split_dim_; + int head_size_; + +public: + CPUsplitFunction(Backend *bn, string name, int threadCount, + const std::vector &each_dims, Chl split_dim, int head_size) : + Op(bn, name), thread_count(threadCount), each_dims_(each_dims), + split_dim_(split_dim), head_size_(head_size) { + } + + ErrorCode setUp(vector> inputs, vector> outputs) override { + int split_num_ = each_dims_.size(); + // store each dims + int split_dim_size_ = 0; + for (size_t i = 0; i < each_dims_.size(); ++i) { + split_dim_size_ += each_dims_[i]; + } + assert(split_num_ == outputs.size()); + switch (split_dim_) { + case Chl::HEAD: { + // assert(inputs[0]->head() == split_dim_size_); + for (int i = 0; i < split_num_; i++) { + outputs[i]->reshape(inputs[0]->batch(), each_dims_[i], inputs[0]->sequence(), inputs[0]->dimension()); + } + break; + } + case Chl::SEQUENCE: { + // assert(inputs[0]->sequence() == split_dim_size_); + for (int i = 0; i < split_num_; i++) { + outputs[i]->reshape(inputs[0]->batch(), inputs[0]->head(), each_dims_[i], inputs[0]->dimension()); + } + break; + } + case Chl::DIMENSION: { + // assert(inputs[0]->dimension() == split_dim_size_); + for (int i = 0; i < split_num_; i++) { + outputs[i]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), each_dims_[i]); + } + break; + } + case Chl::D_HD: { + // assert(inputs[0]->dimension() == split_dim_size_ * head_size); + for (int i = 0; i < split_num_; i++) { + outputs[i]->reshape(inputs[0]->batch(), head_size_, inputs[0]->sequence(), each_dims_[i]); + } + break; + } + case Chl::HD: { + // assert(inputs[0]->dimension() == split_dim_size_ * head_size); + for (int i = 0; i < split_num_; i++) { + outputs[i]->reshape(inputs[0]->batch(), head_size_, inputs[0]->sequence(), each_dims_[i]); + } + break; + } + default: { + break; + } + } + if (inputs[0]->allowAggregated()) { + vector> shared_outputs = {}; + for (const auto &output : outputs) { + output->alloc(); + shared_outputs.push_back(output); + } + if (inputs[0]->masterTensor() == nullptr && !inputs[0]->childTensors().empty()) { + inputs[0]->free(); + } + inputs[0]->addTensors(shared_outputs, split_dim_); + } + return MLLM_NO_ERROR; + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + int split_num_ = each_dims_.size(); + // store each dims + int split_dim_size_ = 0; + for (size_t i = 0; i < each_dims_.size(); ++i) { + split_dim_size_ += each_dims_[i]; + } + assert(split_num_ == outputs.size()); + switch (split_dim_) { + case Chl::HEAD: { + // assert(inputs[0]->head() == split_dim_size_); + for (int i = 0; i < split_num_; i++) { + outputs[i]->reshape(inputs[0]->batch(), each_dims_[i], inputs[0]->sequence(), inputs[0]->dimension()); + } + break; + } + case Chl::SEQUENCE: { + // assert(inputs[0]->sequence() == split_dim_size_); + for (int i = 0; i < split_num_; i++) { + outputs[i]->reshape(inputs[0]->batch(), inputs[0]->head(), each_dims_[i], inputs[0]->dimension()); + } + break; + } + case Chl::DIMENSION: { + // assert(inputs[0]->dimension() == split_dim_size_); + for (int i = 0; i < split_num_; i++) { + outputs[i]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), each_dims_[i]); + } + break; + } + case Chl::D_HD: { + // assert(inputs[0]->dimension() == split_dim_size_ * head_size); + for (int i = 0; i < split_num_; i++) { + outputs[i]->reshape(inputs[0]->batch(), head_size_, inputs[0]->sequence(), each_dims_[i]); + } + break; + } + case Chl::HD: { + // assert(inputs[0]->dimension() == split_dim_size_ * head_size); + for (int i = 0; i < split_num_; i++) { + outputs[i]->reshape(inputs[0]->batch(), head_size_, inputs[0]->sequence(), each_dims_[i]); + } + break; + } + default: { + break; + } + } + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + // This path is taken only if the memory aggregation in setUp was not performed. + if (inputs[0]->aggregatedTensors().empty()) { + std::vector out_pointers; + std::vector out_types; + + assert(each_dims_.size() == outputs.size()); + for (const auto &output : outputs) { + if (output->hostPtr() == nullptr) { + output->alloc(); + } + if (output->dtype() == MLLM_TYPE_F32) { + out_pointers.push_back(output->ptrAt(0, 0, 0, 0)); + } else if (output->dtype() == MLLM_TYPE_F16) { + out_pointers.push_back(output->ptrAt(0, 0, 0, 0)); + } + out_types.push_back(output->dtype()); + } + const int origin_dims[4] = {inputs[0]->batch(), inputs[0]->sequence(), inputs[0]->head(), inputs[0]->dimension()}; + + efficient_split(inputs[0]->ptrAt(0, 0, 0, 0), + origin_dims, + out_pointers, + out_types, + each_dims_, + split_dim_); + } + + return MLLM_NO_ERROR; + } +}; + +class CPUsplitFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam is structured to pass the split parameters. + // Example: {"num_splits": 2, "dim_0": 64, "dim_1": 64, "split_dim": 3, "head_size": 12} + int num_splits = static_cast(op_param.at("num_splits")); + std::vector each_dims; + for (int i = 0; i < num_splits; ++i) { + each_dims.push_back(static_cast(op_param.at("dim_" + std::to_string(i)))); + } + Chl split_dim = (Chl)op_param.at("split_dim"); + int head_size = static_cast(op_param.at("head_size")); + + return new CPUsplitFunction(bn, name, threadCount, each_dims, split_dim, head_size); + } +}; + +} // namespace mllm +#endif // CPUSPLITFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUSplitInput.cpp b/mllm/backends/cpu/op/CPUSplitInput.cpp similarity index 94% rename from src/backends/cpu/op/CPUSplitInput.cpp rename to mllm/backends/cpu/op/CPUSplitInput.cpp index 19b2066b0..423ce5174 100644 --- a/src/backends/cpu/op/CPUSplitInput.cpp +++ b/mllm/backends/cpu/op/CPUSplitInput.cpp @@ -19,10 +19,9 @@ ErrorCode CPUSplitInput::reshape(vector> inputs, vector> inputs, vector> outputs) { for (int i = 0; i < inputs.size(); i++) { - outputs[i]->shallowCopyFrom(inputs[i].get(), true); + outputs[i]->shallowCopyFrom(inputs[i], true); // the split output is CPU backend by default, set output backend to QNN to let the device() be QNN outputs[i]->setBackend(inputs[i]->backend()); - } return MLLM_NO_ERROR; } @@ -31,4 +30,3 @@ ErrorCode CPUSplitInput::execute(vector> inputs, vector +#include namespace mllm { class Tensor; -class CPUsumFunction : public TensorFunction { +class CPUsumFunction : public Op { +private: + int thread_count = 4; + Chl axis_; + public: - void reshape(vector> outputs, vector> inputs, vector args) override { - Chl axis = (Chl)args[0]; + CPUsumFunction(Backend *bn, string name, int threadCount, Chl axis) : + Op(bn, name), thread_count(threadCount), axis_(axis) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { int batch = inputs[0]->batch(); int head = inputs[0]->head(); int sequence = inputs[0]->sequence(); int dimension = inputs[0]->dimension(); - switch (axis) { + switch (axis_) { case BATCH: batch = 1; break; @@ -36,15 +47,17 @@ class CPUsumFunction : public TensorFunction { } outputs[0]->reshape(batch, head, sequence, dimension); outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + return MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { - Chl axis = (Chl)args[0]; + + ErrorCode execute(vector> inputs, vector> outputs) override { int batch = inputs[0]->batch(); int dim = inputs[0]->dimension(); int seq = inputs[0]->sequence(); int head = inputs[0]->head(); - switch (axis) { + switch (axis_) { case BATCH: { for (int h = 0; h < head; h++) { for (int s = 0; s < seq; ++s) { @@ -104,6 +117,15 @@ class CPUsumFunction : public TensorFunction { default: break; } + return MLLM_NO_ERROR; + } +}; + +class CPUsumFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + Chl axis = (Chl)op_param.at("dim"); + return new CPUsumFunction(bn, name, threadCount, axis); } }; diff --git a/mllm/backends/cpu/op/CPUTilde.hpp b/mllm/backends/cpu/op/CPUTilde.hpp new file mode 100644 index 000000000..edb934ece --- /dev/null +++ b/mllm/backends/cpu/op/CPUTilde.hpp @@ -0,0 +1,65 @@ +// +// Created by Rongjie Yi on 24-12-16. +// + +#ifndef CPUTILDE_HPP +#define CPUTILDE_HPP + +#include "CPUBackend.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include +// #include +// #include +#include + +namespace mllm { +class Tensor; + +class CPUTilde : public Op { +private: + int thread_count = 4; + +public: + CPUTilde(Backend *bn, string name, int threadCount) : + Op(bn, name), thread_count(threadCount) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + // assert(outputs.size() == 2); // topk returns values and indices + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); // topk_values + + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { +#pragma omp parallel for collapse(4) num_threads(CPUBackend::cpu_threads) + for (int n = 0; n < inputs[0]->batch(); n++) { + for (int h = 0; h < inputs[0]->head(); h++) { + for (int s = 0; s < inputs[0]->sequence(); s++) { + for (int d = 0; d < inputs[0]->dimension(); ++d) { + float value = inputs[0]->dataAt(n, h, s, d); + assert(((int)value == 1 || (int)value == 0) && "Tilde operation expects input to be 1.0"); + float set_data = ((int)value == 1) ? 0.0F : 1.0F; // Tilde operation: negation + outputs[0]->setDataAt(n, h, s, d, set_data); // Tilde operation: negation + } + } + } + } + return MLLM_NO_ERROR; + } +}; + +class CPUTildeCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new CPUTilde(bn, name, threadCount); + } +}; + +} // namespace mllm +#endif // CPUTILDE_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPUTopkFunc.hpp b/mllm/backends/cpu/op/CPUTopkFunc.hpp new file mode 100644 index 000000000..2c028bd2a --- /dev/null +++ b/mllm/backends/cpu/op/CPUTopkFunc.hpp @@ -0,0 +1,110 @@ +// +// Created by Rongjie Yi on 24-12-16. +// + +#ifndef CPUTOPKFUNC_HPP +#define CPUTOPKFUNC_HPP + +#include "CPUBackend.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include +#include +#include +#include + +namespace mllm { +class Tensor; + +class CPUtopkFunction : public Op { +private: + int thread_count = 4; + int k_; + Chl dim_; + +public: + CPUtopkFunction(Backend *bn, string name, int threadCount, int k, Chl dim) : + Op(bn, name), thread_count(threadCount), k_(k), dim_(dim) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + assert(outputs.size() == 2); // topk returns values and indices + if (dim_ == DIMENSION) { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), k_); + outputs[1]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), k_); + } else if (dim_ == HEAD) { + assert(inputs[0]->dimension() == 1 && "Only support topk on last dimension currently."); + outputs[0]->reshape(inputs[0]->batch(), k_, inputs[0]->sequence(), 1); // topk values + outputs[1]->reshape(inputs[0]->batch(), k_, inputs[0]->sequence(), 1); // topk values + } + // NOTE: Add cases for other dimensions if needed. + + outputs[0]->setDtype(inputs[0]->dtype()); // topk_values + outputs[1]->setDtype(inputs[0]->dtype()); // topk_indices are typically int, but float is used here + + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + // outputs[1]->alloc(); + + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + if (dim_ == DIMENSION) { +#pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) + for (int n = 0; n < inputs[0]->batch(); n++) { + for (int h = 0; h < inputs[0]->head(); h++) { + for (int s = 0; s < inputs[0]->sequence(); s++) { + std::priority_queue, std::vector>, std::greater<>> topk_value_indices; + for (int d = 0; d < inputs[0]->dimension(); ++d) { + float value = inputs[0]->dataAt(n, h, s, d); + topk_value_indices.push({value, d}); + if (topk_value_indices.size() > k_) { + topk_value_indices.pop(); + } + } + for (int d = k_ - 1; d >= 0; --d) { + auto top = topk_value_indices.top(); + topk_value_indices.pop(); + outputs[0]->setDataAt(n, h, s, d, top.first); + outputs[1]->setDataAt(n, h, s, d, top.second); + } + } + } + } + } else if (dim_ == HEAD) { + for (int n = 0; n < inputs[0]->batch(); n++) { + for (int s = 0; s < inputs[0]->sequence(); s++) { + std::priority_queue, std::vector>, std::greater<>> topk_value_indices; + for (int h = 0; h < inputs[0]->head(); h++) { + float value = inputs[0]->dataAt(n, h, s, 0); + topk_value_indices.push({value, h}); + if (topk_value_indices.size() > k_) { + topk_value_indices.pop(); + } + } + for (int h = k_ - 1; h >= 0; --h) { + auto top = topk_value_indices.top(); + topk_value_indices.pop(); + outputs[0]->setDataAt(n, h, s, 0, top.first); + outputs[1]->setDataAt(n, h, s, 0, top.second); + } + } + } + } + // NOTE: Add cases for other dimensions if needed. + return MLLM_NO_ERROR; + } +}; + +class CPUtopkFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + int k = static_cast(op_param.at("k")); + Chl dim = (Chl)op_param.at("dim"); + return new CPUtopkFunction(bn, name, threadCount, k, dim); + } +}; + +} // namespace mllm +#endif // CPUTOPKFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUTranspose.cpp b/mllm/backends/cpu/op/CPUTranspose.cpp similarity index 76% rename from src/backends/cpu/op/CPUTranspose.cpp rename to mllm/backends/cpu/op/CPUTranspose.cpp index ef63454bd..37d2cc41a 100644 --- a/src/backends/cpu/op/CPUTranspose.cpp +++ b/mllm/backends/cpu/op/CPUTranspose.cpp @@ -3,27 +3,25 @@ namespace mllm { -CPUTranspose::CPUTranspose(Backend *bn, string opName, int axis0, int axis1, int threadCount) : thread_count(threadCount), +CPUTranspose::CPUTranspose(Backend *bn, string opName, int axis0, int axis1, int threadCount) : + thread_count(threadCount), Op(bn, opName) { axis0_ = (Chl)axis0; axis1_ = (Chl)axis1; } ErrorCode CPUTranspose::reshape(vector> inputs, vector> outputs) { - // inputs[0]->transShape(SEQUENCE, DIMENSION); - if(axis0_ == SEQUENCE && axis1_ == DIMENSION) { - if(inputs[0]->ctype() == BSHD) { + if (axis0_ == SEQUENCE && axis1_ == DIMENSION) { + if (inputs[0]->ctype() == BSHD) { outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->dimension(), inputs[0]->sequence()); } - } - else if(axis0_ == THW && axis1_ == CHANNLE) { - if(inputs[0]->ctype() == BCTHW) { + } else if (axis0_ == THW && axis1_ == CHANNLE) { + if (inputs[0]->ctype() == BCTHW) { outputs[0]->reshape(inputs[0]->batch(), inputs[0]->time(), inputs[0]->height(), inputs[0]->width(), inputs[0]->channel()); } - } - else if(axis0_ == BATCH && axis1_ == SEQUENCE) { - if(inputs[0]->ctype() == BSHD) { + } else if (axis0_ == BATCH && axis1_ == SEQUENCE) { + if (inputs[0]->ctype() == BSHD) { outputs[0]->reshape(inputs[0]->sequence(), inputs[0]->head(), inputs[0]->batch(), inputs[0]->dimension()); } } @@ -31,30 +29,26 @@ ErrorCode CPUTranspose::reshape(vector> inputs, vector> inputs, vector> outputs) { - return Op::execute(inputs, outputs); } ErrorCode CPUTranspose::free(vector> inputs, vector> outputs) { - return Op::free(inputs, outputs); } ErrorCode CPUTranspose::setUp(vector> inputs, vector> outputs) { - // return Op::setUp(inputs, outputs); - if(inputs[0]->masterTensor() == nullptr) { + if (inputs[0]->masterTensor() == nullptr) { inputs[0]->free(); } outputs[0]->setDtype(activation_dtype()); outputs[0]->alloc(); // outputs[0]->transShape(SEQUENCE, DIMENSION); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); + inputs[0]->shallowCopyFrom(outputs[0], false); inputs[0]->transShape(axis0_, axis1_, true); // if(inputs[0]->ctype() == BSHD) { // inputs[0]->transShape(SEQUENCE, DIMENSION, true); @@ -65,4 +59,3 @@ ErrorCode CPUTranspose::setUp(vector> inputs, vector +// #include +#include +#include +#include // For std::pair +#include // For std::equal + +namespace mllm { +class Tensor; + +class CPUtransposeFunction : public Op { +private: + int thread_count = 4; + vector> axiss_; + +public: + CPUtransposeFunction(Backend *bn, string name, int threadCount, const vector> &axiss) : + Op(bn, name), thread_count(threadCount), axiss_(axiss) { + } + + ErrorCode setUp(vector> inputs, vector> outputs) override { + // for BSHD attention start + if (axiss_.size() == 1 && axiss_[0].first == HEAD && axiss_[0].second == SEQUENCE) { + if (inputs[0]->ctype() == BSHD) { + outputs[0]->chls() = {{BATCH, 0}, {HEAD, 1}, {SEQUENCE, 2}, {DIMENSION, 3}}; + } else { // inputs[0]->ctype() == BHSD + outputs[0]->chls() = {{BATCH, 0}, {SEQUENCE, 1}, {HEAD, 2}, {DIMENSION, 3}}; + } + outputs[0]->changeCtype(4); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + return MLLM_NO_ERROR; + } else if (axiss_.size() == 1 && axiss_[0].first == SEQUENCE && axiss_[0].second == DIMENSION && inputs[0]->ctype() == BHSD) { + outputs[0]->setCtype(BHSD); + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->dimension(), inputs[0]->sequence()); + return MLLM_NO_ERROR; + } + // for BSHD attention end + + if (!outputs[0]->undiffusion()) { + outputs[0]->transCopyShape(inputs[0]->shape()); + std::map origin_chls = {{BATCH, 0}, {SEQUENCE, 1}, {HEAD, 2}, {DIMENSION, 3}, {CHANNLE, 1}, {TIME, 2}, {HEIGHT, 3}, {WIDTH, 4}}; + if (std::equal(outputs[0]->chls().begin(), outputs[0]->chls().end(), origin_chls.begin())) { + outputs[0]->chls() = inputs[0]->chls(); + for (auto axis : axiss_) { + auto axis0 = axis.first; + auto axis1 = axis.second; + auto ori_0_idx = outputs[0]->chls()[axis0]; + auto ori_1_idx = outputs[0]->chls()[axis1]; + outputs[0]->chls()[axis0] = ori_1_idx; + outputs[0]->chls()[axis1] = ori_0_idx; + } + outputs[0]->changeCtype(inputs[0]->shape().size()); + outputs[0]->undiffusion() = true; + } + } + + if (inputs[0]->masterTensor() != nullptr && (inputs[0]->masterTensor()->name().find("Cache") != std::string::npos || inputs[0]->masterTensor()->name().find("weight") != std::string::npos)) { + if (outputs[0]->masterTensor() == nullptr) { + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->shallowCopyFrom(inputs[0], false); + } + } else { + if (inputs[0]->masterTensor() == nullptr) { + inputs[0]->free(); + } + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->alloc(); + inputs[0]->setUndiffusion(true); + inputs[0]->shallowCopyFrom(outputs[0], false); + outputs[0]->transFrom() = axiss_; + } + return MLLM_NO_ERROR; + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { + // for BSHD attention start + if (axiss_.size() == 1 && axiss_[0].first == HEAD && axiss_[0].second == SEQUENCE) { + outputs[0]->transCopyShape(inputs[0]->shape()); + outputs[0]->chls() = inputs[0]->chls(); + std::swap(outputs[0]->chls()[HEAD], outputs[0]->chls()[SEQUENCE]); + outputs[0]->changeCtype(inputs[0]->shape().size()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + return MLLM_NO_ERROR; + } else if (axiss_.size() == 1 && axiss_[0].first == SEQUENCE && axiss_[0].second == DIMENSION && inputs[0]->ctype() == BHSD) { + outputs[0]->setCtype(BHSD); + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->dimension(), inputs[0]->sequence()); + return MLLM_NO_ERROR; + } + // for BSHD attention end + + std::map origin_chls = {{BATCH, 0}, {SEQUENCE, 1}, {HEAD, 2}, {DIMENSION, 3}, {CHANNLE, 1}, {TIME, 2}, {HEIGHT, 3}, {WIDTH, 4}}; + auto origin_s = inputs[0]->shape().size(); + outputs[0]->transCopyShape(inputs[0]->shape()); + + if (inputs[0]->masterTensor() == nullptr || std::equal(outputs[0]->chls().begin(), outputs[0]->chls().end(), origin_chls.begin())) { + outputs[0]->chls() = inputs[0]->chls(); + for (auto axis : axiss_) { + auto axis0 = axis.first; + auto axis1 = axis.second; + std::swap(outputs[0]->chls()[axis0], outputs[0]->chls()[axis1]); + } + outputs[0]->changeCtype(origin_s); + outputs[0]->undiffusion() = true; + } + + if (inputs[0]->masterTensor() != nullptr && (inputs[0]->masterTensor()->name().find("Cache") != std::string::npos || inputs[0]->masterTensor()->name().find("weight") != std::string::npos)) { + if (outputs[0]->masterTensor() == nullptr) { + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->shallowCopyFrom(inputs[0], false); + } + } + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + // for BSHD attention start + if (axiss_.size() == 1 && axiss_[0].first == HEAD && axiss_[0].second == SEQUENCE) { + // This is a physical transpose, allocate and copy + if (outputs[0]->hostPtr() == nullptr) { + outputs[0]->alloc(); + } + // BSHD -> BHSD (transpose S and H) + // 真转置 + assert(inputs[0]->batch() == 1); + assert(outputs[0]->batch() == 1); + assert(inputs[0]->head() == outputs[0]->head()); + assert(inputs[0]->sequence() == outputs[0]->sequence()); + if (inputs[0]->dtype() == outputs[0]->dtype()) { +#pragma omp parallel for num_threads(thread_count) + for (int h = 0; h < inputs[0]->head(); ++h) { + for (int s = 0; s < inputs[0]->sequence(); ++s) { + auto input_ptr = inputs[0]->ptrAt(0, h, s, 0); + auto output_ptr = outputs[0]->ptrAt(0, h, s, 0); + memcpy(output_ptr, input_ptr, inputs[0]->dimension() * sizeof(float)); + } + } + } else { // With quantization +#pragma omp parallel for num_threads(thread_count) + for (int h = 0; h < inputs[0]->head(); ++h) { + for (int s = 0; s < inputs[0]->sequence(); ++s) { + // auto input_ptr = inputs[0]->ptrAt(0, h, s, 0); + // auto output_ptr = outputs[0]->ptrAt(0, h, s, 0); + for (int d = 0; d < inputs[0]->dimension(); ++d) { + // output_ptr[d] = MLLM_FP32_TO_FP16(input_ptr[d]); + auto value = inputs[0]->dataAt(0, h, s, d); + outputs[0]->setDataAt(0, h, s, d, MLLM_FP32_TO_FP16(value)); + } + } + } + } + + } else if (axiss_.size() == 1 && axiss_[0].first == SEQUENCE && axiss_[0].second == DIMENSION && inputs[0]->ctype() == BHSD) { + assert(outputs[0]->ctype() == BHSD); + // 真转置 + assert(inputs[0]->batch() == 1); + assert(outputs[0]->batch() == 1); + assert(inputs[0]->sequence() == outputs[0]->dimension()); + assert(outputs[0]->sequence() == inputs[0]->dimension()); + // BHSD->BHDS + const int N = inputs[0]->sequence(); + const int M = inputs[0]->dimension(); + if (inputs[0]->dtype() == MLLM_TYPE_F32) { +#pragma omp parallel for num_threads(thread_count) + for (int h = 0; h < inputs[0]->head(); ++h) { + const float *src_ptr = inputs[0]->ptrAt(0, h, 0, 0); + float *dst_ptr = outputs[0]->ptrAt(0, h, 0, 0); + transpose_matrix_efficient(src_ptr, dst_ptr, N, M); + } + } else { +#if defined(__aarch64__) +#pragma omp parallel for num_threads(thread_count) + for (int h = 0; h < inputs[0]->head(); ++h) { + const mllm_fp16_t *src_ptr = inputs[0]->ptrAt(0, h, 0, 0); + mllm_fp16_t *dst_ptr = outputs[0]->ptrAt(0, h, 0, 0); + transpose_matrix_efficient_fp16(src_ptr, dst_ptr, N, M); + } +#else + std::cout << "FP16 transpose not supported on non-aarch64 platform" << std::endl; +#endif + } + } + // for BSHD attention end + // Note: The general transpose case is handled by metadata changes in reshape/setUp + // and does not require data movement in execute. + return MLLM_NO_ERROR; + } +}; + +class CPUtransposeFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam is structured to pass the axis pairs. + // Example: {"num_pairs": 1, "axis1_0": 2, "axis2_0": 1} (HEAD, SEQUENCE) + int num_pairs = static_cast(op_param.at("num_pairs")); + vector> axiss; + for (int i = 0; i < num_pairs; ++i) { + Chl axis1 = (Chl)op_param.at("axis1_" + std::to_string(i)); + Chl axis2 = (Chl)op_param.at("axis2_" + std::to_string(i)); + axiss.push_back({axis1, axis2}); + } + return new CPUtransposeFunction(bn, name, threadCount, axiss); + } +}; + +} // namespace mllm +#endif // CPUTRANSPOSEFUNC_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPUView.cpp b/mllm/backends/cpu/op/CPUView.cpp new file mode 100644 index 000000000..cc92dc537 --- /dev/null +++ b/mllm/backends/cpu/op/CPUView.cpp @@ -0,0 +1,125 @@ + + +#include "CPUView.hpp" + +namespace mllm { + +CPUView::CPUView(Backend *bn, string opName, vector dims, vector data_dims, int threadCount) : + thread_count(threadCount), + Op(bn, opName) { + dim0_ = dims[0]; + dim1_ = dims[1]; + dim2_ = dims[2]; + dim3_ = dims[3]; + // if(dims.size() == 5) { + // dim4_ = dims[4]; + // } + data_dim0_ = data_dims[0]; + data_dim1_ = data_dims[1]; + data_dim2_ = data_dims[2]; + data_dim3_ = data_dims[3]; + // if(data_dims.size() == 5) { + // data_dim4_ = data_dims[4]; + // } +} + +ErrorCode CPUView::reshape(vector> inputs, vector> outputs) { + // if(data_dim4_ != -999) { + // int dim0 = inputs[0]->batch(); + // int dim1 = inputs[0]->channel(); + // int dim2 = inputs[0]->height(); + // int dim3 = inputs[0]->width(); + // int dim4 = inputs[0]->dimension(); + // assert(inputs[0]->ctype() == BCTHW); + // + // outputs[0]->reshape(dim0, dim1, dim2, dim3, dim4); + // } else { + int dim0 = inputs[0]->batch(); + int dim1 = inputs[0]->head(); + int dim2 = inputs[0]->sequence(); + int dim3 = inputs[0]->dimension(); + if (data_dim0_ == BATCH && data_dim1_ == DIMENSION && data_dim2_ == SEQUENCE && data_dim3_ == DIMENSION) { + dim1 = dim1_; + dim3 = inputs[0]->dimension() / dim1_; + } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == SEQUENCE && data_dim3_ == HEAD + DIMENSION) { + dim1 = 1; + dim3 = inputs[0]->dimension() * inputs[0]->head(); + } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == SEQUENCE + HEAD && data_dim3_ == DIMENSION) { + dim1 = 1; + dim2 = inputs[0]->sequence() * inputs[0]->head(); + } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == CHANNLE && data_dim3_ == TIME + HEIGHT + WIDTH) { + // assert(inputs[0]->ctype() == BCTHW); + dim1 = 1; + dim2 = inputs[0]->channel(); + dim3 = inputs[0]->time() * inputs[0]->height() * inputs[0]->width(); + } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == TIME + HEIGHT + WIDTH && data_dim3_ == CHANNLE) { + if (inputs[0]->ctype() == BTHWC) { + dim1 = 1; + dim2 = inputs[0]->time() * inputs[0]->height() * inputs[0]->width(); + dim3 = inputs[0]->channel(); + } else { + dim1 = 1; + dim2 = inputs[0]->time() * inputs[0]->height() * inputs[0]->channel(); + dim3 = inputs[0]->width(); + } + } else if (data_dim0_ == SEQUENCE && data_dim1_ == HEAD && data_dim2_ == BATCH && data_dim3_ == DIMENSION) { + dim0 = inputs[0]->sequence(); + dim1 = inputs[0]->head(); + dim2 = inputs[0]->batch(); + dim3 = inputs[0]->dimension(); + } else if (data_dim0_ == BATCH && data_dim1_ == HEAD && data_dim2_ == BATCH && data_dim3_ == DIMENSION) { + dim0 = inputs[0]->batch() / dim2_; + dim1 = inputs[0]->head(); + dim2 = dim2_; + dim3 = inputs[0]->dimension(); + } else if (data_dim0_ == BATCH && data_dim1_ == HEAD && data_dim2_ == SEQUENCE && data_dim3_ == DIMENSION) { + dim0 = dim0_; + dim1 = dim1_; + dim2 = dim2_; + dim3 = dim3_; + } else { + std::cout << "CPUView not support!!!!" << std::endl; + } + outputs[0]->reshape(dim0, dim1, dim2, dim3); + // } + return Op::reshape(inputs, outputs); +} + +ErrorCode CPUView::execute(vector> inputs, vector> outputs) { + if (noNeedEx_) { + return Op::execute(inputs, outputs); + } else { + std::cout << "CPUView not support!!!!" << std::endl; + } + return Op::execute(inputs, outputs); +} + +ErrorCode CPUView::setUp(vector> inputs, vector> outputs) { + assert(inputs.size() == 1); + assert(outputs.size() == 1); + + activation_dtype_ = inputs[0]->dtype(); + + if ((data_dim0_ == BATCH && data_dim2_ == SEQUENCE && inputs[0]->ctype() != BCTHW) // head & dimension + || (data_dim0_ == BATCH && data_dim3_ == DIMENSION && inputs[0]->ctype() == BSHD) // head & sequence + || (data_dim0_ == SEQUENCE && data_dim1_ == HEAD && data_dim2_ == BATCH && data_dim3_ == DIMENSION && inputs[0]->ctype() == BSHD) // head & sequence + || (data_dim0_ == BATCH && inputs[0]->ctype() == BCTHW) // + || (data_dim1_ == HEAD && data_dim3_ == DIMENSION && inputs[0]->ctype() == BSHD // batch & sequence + || (data_dim0_ == BATCH && data_dim1_ == HEAD && data_dim2_ == SEQUENCE && data_dim3_ == DIMENSION)) // batch & sequence & head & dimension + // || (data_dim0_ == BATCH && data_dim3_ == CHANNLE && inputs[0]->ctype()==BTHWC) // + ) { + noNeedEx_ = true; + if (inputs[0]->masterTensor() == nullptr) { + inputs[0]->free(); + } + outputs[0]->setDtype(activation_dtype()); + outputs[0]->alloc(); + inputs[0]->shallowCopyFrom(outputs[0], false); + return MLLM_NO_ERROR; + } else { + std::cout << "CPUView not support!!!!" << std::endl; + return Op::setUp(inputs, outputs); + } +} + +} // namespace mllm diff --git a/src/backends/cpu/op/CPUView.hpp b/mllm/backends/cpu/op/CPUView.hpp similarity index 100% rename from src/backends/cpu/op/CPUView.hpp rename to mllm/backends/cpu/op/CPUView.hpp diff --git a/src/backends/cpu/function/CPUViewFunc.hpp b/mllm/backends/cpu/op/CPUViewFunc.hpp similarity index 66% rename from src/backends/cpu/function/CPUViewFunc.hpp rename to mllm/backends/cpu/op/CPUViewFunc.hpp index 346b51d04..36aec5505 100644 --- a/src/backends/cpu/function/CPUViewFunc.hpp +++ b/mllm/backends/cpu/op/CPUViewFunc.hpp @@ -4,20 +4,30 @@ #ifndef CPUVIEWFUNC_HPP #define CPUVIEWFUNC_HPP + #include "Tensor.hpp" #include "Types.hpp" +#include "CPUBackend.hpp" +#include +#include +#include +#include +#include namespace mllm { class Tensor; -class CPUviewFunction : public TensorFunction { +class CPUviewFunction : public Op { +private: + int thread_count = 4; + int b, h, s, d; + public: - void setUp(vector> outputs, vector> inputs, vector args) override { - // inputs[0]->shallowCopyFrom(outputs[0].get(), false); - int b = (int)args[0]; - int h = (int)args[1]; - int s = (int)args[2]; - int d = (int)args[3]; + CPUviewFunction(Backend *bn, string name, int threadCount, int b_, int h_, int s_, int d_) : + Op(bn, name), thread_count(threadCount), b(b_), h(h_), s(s_), d(d_) { + } + + ErrorCode setUp(vector> inputs, vector> outputs) override { if ((b == -1 && s == -1 && inputs[0]->ctype() != BCTHW) // head & dimension || (b == 1 && h == 1 && inputs[0]->ctype() == BCTHW) // head & dimension || (b == 1 && h == 1 && inputs[0]->ctype() == BSHD) // head & dimension @@ -31,16 +41,15 @@ class CPUviewFunction : public TensorFunction { } outputs[0]->setDtype(inputs[0]->dtype()); outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); + inputs[0]->shallowCopyFrom(outputs[0], false); } else { - std::cout << "[TODO]Tensor.View alloc not support!!!!" << std::endl; + std::cout << "[TODO]Tensor.View [" << b << ", " << s << ", " << h << ", " << d << "] alloc not support!!!!" << std::endl; + exit(-2); } + return MLLM_NO_ERROR; } - void reshape(vector> outputs, vector> inputs, vector args) override { - int b = (int)args[0]; - int h = (int)args[1]; - int s = (int)args[2]; - int d = (int)args[3]; + + ErrorCode reshape(vector> inputs, vector> outputs) override { int dim_b = inputs[0]->batch(); int dim_h = inputs[0]->head(); int dim_s = inputs[0]->sequence(); @@ -75,7 +84,8 @@ class CPUviewFunction : public TensorFunction { dim_h = inputs[0]->dimension() * inputs[0]->head() / d; dim_d = d; } else { - std::cout << "[TODO]Tensor.View not support!!!!" << std::endl; + std::cout << "[TODO]Tensor.View [" << b << ", " << s << ", " << h << ", " << d << "] alloc not support!!!!" << std::endl; + exit(-2); } } else if (b == -1 && h != -1 && s != -1 && d == -1) { // head & sequence if (h != ANYDIM && s != ANYDIM) { @@ -89,7 +99,8 @@ class CPUviewFunction : public TensorFunction { dim_h = inputs[0]->sequence() * inputs[0]->head() / s; dim_s = s; } else { - std::cout << "[TODO]Tensor.View not support!!!!" << std::endl; + std::cout << "[TODO]Tensor.View [" << b << ", " << s << ", " << h << ", " << d << "] not support!!!!" << std::endl; + exit(-2); } } else if (b != -1 && h == -1 && s != -1 && d == -1) { // batch & sequence if (b != ANYDIM && s != ANYDIM) { @@ -103,10 +114,12 @@ class CPUviewFunction : public TensorFunction { dim_b = inputs[0]->sequence() * inputs[0]->batch() / s; dim_s = s; } else { - std::cout << "[TODO]Tensor.View not support!!!!" << std::endl; + std::cout << "[TODO]Tensor.View [" << b << ", " << s << ", " << h << ", " << d << "] not support!!!!" << std::endl; + exit(-2); } } else { - std::cout << "[TODO]Tensor.View not support!!!!" << std::endl; + std::cout << "[TODO]Tensor.View [" << b << ", " << s << ", " << h << ", " << d << "] not support!!!!" << std::endl; + exit(-2); } if (inputs[0]->ctype() == BCTHW && inputs[0]->name() == outputs[0]->name()) { outputs[0]->setCtype(BSHD); @@ -115,29 +128,27 @@ class CPUviewFunction : public TensorFunction { if (inputs[0]->masterTensor() != nullptr && inputs[0]->name() == outputs[0]->name()) { inputs[0]->shallowCopyFrom(inputs[0]->masterTensor(), false, inputs[0]->shapeOffset()); } + return MLLM_NO_ERROR; + } - /* - outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); - if ((b == -1 && s == -1 && inputs[0]->ctype() != BCTHW) // head & dimension - || (b == 1 && h == 1 && inputs[0]->ctype() == BCTHW) // head & dimension - || (b == 1 && h == 1 && inputs[0]->ctype() == BSHD) // head & dimension - || (b == -1 && d == -1 && inputs[0]->ctype() == BSHD) // head & sequence - || (h == -1 && d == -1 && inputs[0]->ctype() == BSHD) // batch & sequence - || (b == -1 && h == 1 && s == 1 && d == -1 && inputs[0]->ctype() == BSHD) // sequence & head & dimension -> dimension - || (b == -1 && h == -1 && s == 1 && d == 1 && inputs[0]->ctype() == BSHD) // sequence & head & dimension -> sequence - ) { - if (inputs[0]->masterTensor() == nullptr) { - inputs[0]->free(); - } - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0], false); - } else { - std::cout << "[TODO]Tensor.View alloc not support!!!!" << std::endl; + ErrorCode execute(vector> inputs, vector> outputs) override { + // View is a metadata-only operation, no data movement is needed in execute. + if (inputs[0]->hostPtr() != outputs[0]->hostPtr()) { + memcpy(outputs[0]->hostPtr(), inputs[0]->hostPtr(), inputs[0]->cntSize()); } - */ + return MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { +}; + +class CPUviewFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Assumes OpParam contains keys "b", "h", "s", "d" + int b = static_cast(op_param.at("b")); + int h = static_cast(op_param.at("h")); + int s = static_cast(op_param.at("s")); + int d = static_cast(op_param.at("d")); + return new CPUviewFunction(bn, name, threadCount, b, h, s, d); } }; diff --git a/src/backends/cpu/op/CPUVisionRoPE.cpp b/mllm/backends/cpu/op/CPUVisionRoPE.cpp similarity index 100% rename from src/backends/cpu/op/CPUVisionRoPE.cpp rename to mllm/backends/cpu/op/CPUVisionRoPE.cpp diff --git a/src/backends/cpu/op/CPUVisionRoPE.hpp b/mllm/backends/cpu/op/CPUVisionRoPE.hpp similarity index 100% rename from src/backends/cpu/op/CPUVisionRoPE.hpp rename to mllm/backends/cpu/op/CPUVisionRoPE.hpp diff --git a/mllm/backends/cpu/op/CPUVisionRoPECos.cpp b/mllm/backends/cpu/op/CPUVisionRoPECos.cpp new file mode 100644 index 000000000..44262b2be --- /dev/null +++ b/mllm/backends/cpu/op/CPUVisionRoPECos.cpp @@ -0,0 +1,213 @@ +#include "CPUVisionRoPECos.hpp" +#include +#include +#include +#include +#include "CPUBackend.hpp" +#include "compute/SIMDMemory.hpp" + +using namespace std; + +namespace mllm { +CPUVisionRoPECos::CPUVisionRoPECos(Backend *bn, string opName, int dim, int spatial_merge_size, int threadCount) : + thread_count(threadCount), + Op(bn, opName) { + dim_ = dim; + spatial_merge_size_ = spatial_merge_size; + compute_inv_freq(dim); +} +// 计算并填充inv_freq向量的方法 +void CPUVisionRoPECos::compute_inv_freq(int dim) { + const int half_dim = dim / 2; + inv_freq.clear(); + inv_freq.resize(half_dim); +#pragma omp parallel for num_threads(4) + for (int i = 0; i < half_dim; ++i) { + const float exponent = (2.0f * i) / static_cast(dim); + inv_freq[i] = 1.0f / std::pow(theta, exponent); + } +} +std::vector> CPUVisionRoPECos::rotary_pos_emb_forward(int seqlen) { + if (seqlen <= 0) { + throw std::invalid_argument("seqlen must be positive"); + } + // 生成序列 [0, 1, ..., seqlen-1] + std::vector seq(seqlen); +#pragma omp parallel for num_threads(4) + for (int i = 0; i < seqlen; ++i) { + seq[i] = static_cast(i); + } + // 预分配结果矩阵 + std::vector> freqs( + seqlen, + std::vector(inv_freq.size())); +// 并行计算外积 +#pragma omp parallel for num_threads(4) schedule(dynamic) + for (size_t i = 0; i < seq.size(); ++i) { + const float seq_val = seq[i]; + auto &row = freqs[i]; + for (size_t j = 0; j < inv_freq.size(); ++j) { + row[j] = std::cos(seq_val * inv_freq[j]); + } + } + return freqs; +} + +vector> CPUVisionRoPECos::rot_pos_emb(vector> grid_thw, int spatial_merge_size) { + vector> pos_ids; + // int max_grid_size = 0; + // 遍历每个时空网格配置 + for (auto &row : grid_thw) { + int t = static_cast(row[0]); + int h = static_cast(row[1]); + int w = static_cast(row[2]); + // 更新最大空间网格尺寸 + max_grid_size = max({max_grid_size, h, w}); + // 计算分块参数 + int num_h_blocks = h / spatial_merge_size; + int num_w_blocks = w / spatial_merge_size; + int total_blocks = num_h_blocks * num_w_blocks; + const int block_area = spatial_merge_size * spatial_merge_size; + // 预分配内存 + vector flatten_hpos(total_blocks * block_area); + vector flatten_wpos(total_blocks * block_area); +// 并行生成坐标序列 +#pragma omp parallel for num_threads(thread_count) schedule(static) + for (int block_idx = 0; block_idx < total_blocks; ++block_idx) { + const int i_h = block_idx / num_w_blocks; + const int i_w = block_idx % num_w_blocks; + const int start_idx = block_idx * block_area; + // 生成块内坐标 + for (int j_h = 0; j_h < spatial_merge_size; ++j_h) { + for (int j_w = 0; j_w < spatial_merge_size; ++j_w) { + const int pos = start_idx + j_h * spatial_merge_size + j_w; + flatten_hpos[pos] = i_h * spatial_merge_size + j_h; + flatten_wpos[pos] = i_w * spatial_merge_size + j_w; + } + } + } + // 创建坐标对并重复时间维度 + vector> current_pos; + current_pos.reserve(flatten_hpos.size()); + for (size_t i = 0; i < flatten_hpos.size(); ++i) { + current_pos.push_back({flatten_hpos[i], flatten_wpos[i]}); + } + // 扩展时间维度 + for (int i = 0; i < t; ++i) { + pos_ids.insert(pos_ids.end(), current_pos.begin(), current_pos.end()); + } + } + // return {pos_ids, max_grid_size}; + return pos_ids; +} + +void CPUVisionRoPECos::compute_rotary_pos_embd( + const vector> &rotary_pos_emb_full, + const vector> &pos_ids, + shared_ptr output) { + // 输入验证 + if (rotary_pos_emb_full.empty() || pos_ids.empty()) { + throw invalid_argument("Input containers must not be empty"); + } + const size_t num_positions = rotary_pos_emb_full.size(); + const size_t dim = rotary_pos_emb_full[0].size(); + const size_t batch_size = pos_ids.size(); + // 验证嵌入维度一致性 + for (const auto &emb : rotary_pos_emb_full) { + if (emb.size() != dim) { + throw invalid_argument("Inconsistent embedding dimensions"); + } + } + // 验证位置ID有效性 + const size_t seq_len = pos_ids[0].size(); + for (const auto &ids : pos_ids) { + if (ids.size() != seq_len) { + throw invalid_argument("Varied sequence lengths not supported"); + } + for (int idx : ids) { + if (idx < 0 || idx >= num_positions) { + throw out_of_range("Position index out of bounds"); + } + } + } + // 准备输出Tensor + const size_t flattened_size = seq_len * dim; + assert(flattened_size == output->dimension()); // output->resize(1, 1, batch_size, flattened_size); +// 并行处理主循环 +#pragma omp parallel for num_threads(CPUBackend::cpu_threads) + for (size_t batch_idx = 0; batch_idx < batch_size; ++batch_idx) { + float *batch_ptr = output->ptrAt(0, 0, batch_idx, 0); + size_t offset = 0; + for (const int pos_idx : pos_ids[batch_idx]) { + const auto &emb = rotary_pos_emb_full[pos_idx]; + simd_memcpy(batch_ptr + offset, emb.data(), dim); + offset += dim; + } + } +} + +/* +vector> compute_rotary_positional_embeddings( + const vector>& rotary_pos_emb_full, + const vector>& pos_ids, + shared_ptr output) +{ + // 输入验证 + if (rotary_pos_emb_full.empty() || pos_ids.empty()) { + return {}; + } + const size_t num_positions = rotary_pos_emb_full.size(); + const size_t dim = rotary_pos_emb_full[0].size(); + const size_t batch_size = pos_ids.size(); + // 验证所有位置的维度一致性 + for (const auto& emb : rotary_pos_emb_full) { + if (emb.size() != dim) { + throw invalid_argument("All positional embeddings must have the same dimension"); + } + } + vector> result(batch_size); + // 并行处理每个样本 + #pragma omp parallel for + for (size_t i = 0; i < batch_size; ++i) { + const auto& positions = pos_ids[i]; + vector flattened; + flattened.reserve(positions.size() * dim); + for (const int idx : positions) { + // 边界检查 + if (idx < 0 || idx >= num_positions) { + throw out_of_range("Position index out of range"); + } + // 获取对应的位置嵌入 + const auto& emb = rotary_pos_emb_full[idx]; + // 使用SIMD加速的内存复制 + const size_t prev_size = flattened.size(); + flattened.resize(prev_size + dim); + simd_memcpy(flattened.data() + prev_size, emb.data(), dim); + } + // 移动语义优化内存分配 + result[i] = std::move(flattened); + } + return result; +} +*/ + +ErrorCode CPUVisionRoPECos::reshape(vector> inputs, vector> outputs) { + int grid_t = (int)inputs[0]->dataAt(0, 0, 0, 0); + int grid_h = (int)inputs[0]->dataAt(0, 0, 0, 1); + int grid_w = (int)inputs[0]->dataAt(0, 0, 0, 2); + outputs[0]->reshape(1, 1, grid_t * grid_h * grid_w, 2 * (dim_ / 2)); + return Op::reshape(inputs, outputs); +} + +ErrorCode CPUVisionRoPECos::execute(vector> inputs, vector> outputs) { + vector> grid_thw = {{inputs[0]->dataAt(0, 0, 0, 0), + inputs[0]->dataAt(0, 0, 0, 1), + inputs[0]->dataAt(0, 0, 0, 2)}}; + auto pos_ids = rot_pos_emb(grid_thw, spatial_merge_size_); // get pos_ids and max_grid_size + + auto rotary_pos_emb_full = rotary_pos_emb_forward(max_grid_size); + compute_rotary_pos_embd(rotary_pos_emb_full, pos_ids, outputs[0]); + return Op::execute(inputs, outputs); +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPUVisionRoPECos.hpp b/mllm/backends/cpu/op/CPUVisionRoPECos.hpp new file mode 100644 index 000000000..03aa13712 --- /dev/null +++ b/mllm/backends/cpu/op/CPUVisionRoPECos.hpp @@ -0,0 +1,114 @@ + +#ifndef MLLM_CPUVISIONROPECOS_H +#define MLLM_CPUVISIONROPECOS_H + +#include "Op.hpp" +#include "../CPUBackend.hpp" + +namespace mllm { + +class CPUVisionRoPECos final : public Op { +public: + CPUVisionRoPECos(Backend *bn, string opName, int dim, int spatial_merge_size, int threadCount); + virtual ~CPUVisionRoPECos() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode execute(vector> inputs, vector> outputs) override; + // virtual ErrorCode setUp(vector> inputs, vector> outputs) override; + +private: + int thread_count = 4; + int spatial_merge_size_; + int dim_; + + std::vector inv_freq; // 存储计算结果 + float theta = 10000.0f; // 默认theta值,与Transformer常用的值一致 + + // vector> pos_ids; + int max_grid_size; + + void compute_inv_freq(int dim); + std::vector> rotary_pos_emb_forward(int seqlen); + vector> rot_pos_emb(vector> grid_thw, int spatial_merge_size); + void compute_rotary_pos_embd( + const vector>& rotary_pos_emb_full, + const vector>& pos_ids, + shared_ptr output + ); +}; + +class CPUVisionRoPECosCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + int dims = (int)op_param["dim"]; + int spatial_merge_size = (int)op_param["spatial_merge_size"]; + return new CPUVisionRoPECos(bn, name, dims, spatial_merge_size, threadCount); + } +}; + + +/* +class VisionRotaryEmbedding { +private: + std::vector inv_freq; + float theta; + const int dim; + +public: + // 构造函数 + VisionRotaryEmbedding(int dim, float theta = 10000.0f) : dim(dim), theta(theta) + { + if (dim <= 0 || dim % 2 != 0) { + throw std::invalid_argument("Dimension must be positive even number"); + } + compute_inv_freq(); + } + + // 计算逆频率向量 + void compute_inv_freq() { + const int half_dim = dim / 2; + inv_freq.resize(half_dim); + #pragma omp parallel for num_threads(4) + for (int i = 0; i < half_dim; ++i) { + const float exponent = (2.0f * i) / static_cast(dim); + inv_freq[i] = 1.0f / std::pow(theta, exponent); + } + } + + // 前向计算(生成频率矩阵) + std::vector> rotary_pos_emb_forward(int seqlen) { + if (seqlen <= 0) { + throw std::invalid_argument("seqlen must be positive"); + } + // 生成序列 [0, 1, ..., seqlen-1] + std::vector seq(seqlen); + #pragma omp parallel for num_threads(4) + for (int i = 0; i < seqlen; ++i) { + seq[i] = static_cast(i); + } + // 预分配结果矩阵 + std::vector> freqs( + seqlen, + std::vector(inv_freq.size()) + ); + // 并行计算外积 + #pragma omp parallel for num_threads(4) schedule(dynamic) + for (size_t i = 0; i < seq.size(); ++i) { + const float seq_val = seq[i]; + auto& row = freqs[i]; + for (size_t j = 0; j < inv_freq.size(); ++j) { + row[j] = seq_val * inv_freq[j]; + } + } + return freqs; + } + + // 访问器方法 + const std::vector& get_inv_freq() const { return inv_freq; } + float get_theta() const { return theta; } + int get_dim() const { return dim; } +}; +*/ + +} // namespace mllm + +#endif // MLLM_CPUVISIONROPE_H diff --git a/src/backends/cpu/function/CPUApplyVisionRoPE.hpp b/mllm/backends/cpu/op/CPUVisionRoPEFunc.hpp similarity index 73% rename from src/backends/cpu/function/CPUApplyVisionRoPE.hpp rename to mllm/backends/cpu/op/CPUVisionRoPEFunc.hpp index 121cf71a8..7f6a6528e 100644 --- a/src/backends/cpu/function/CPUApplyVisionRoPE.hpp +++ b/mllm/backends/cpu/op/CPUVisionRoPEFunc.hpp @@ -4,19 +4,26 @@ #ifndef CPUAPPLYVISIONROPEFUNC_HPP #define CPUAPPLYVISIONROPEFUNC_HPP + #include "CPUBackend.hpp" #include "Tensor.hpp" #include "Types.hpp" +#include +#include // For std::sin, std::cos +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" namespace mllm { class Tensor; -class CPUApplyVisionRoPEFunction : public TensorFunction { +class CPUVisionRoPEFuncFunction : public Op { +private: + int thread_count = 4; + void rope_hf(shared_ptr input, shared_ptr rotary_pos_emb, shared_ptr output, - int thread_count = 4) { + int thread_count) { auto out_dtype = output->dtype(); int partial_dimension = input->dimension(); - int half = (int)(partial_dimension / 2); + int half = (partial_dimension / 2); assert(partial_dimension % 2 == 0); if (output->ctype() == BSHD) { if (input->dtype() == MLLM_TYPE_F16) { @@ -32,8 +39,8 @@ class CPUApplyVisionRoPEFunction : public TensorFunction { auto rope_d = rotary_pos_emb->dataAt(0, 0, s, d); float sin_value = std::sin(rope_d); // sin_[s][d]; float cos_value = std::cos(rope_d); // cos_[s][d]; - auto value = in_value * cos_value - in_value_2 * sin_value; - auto value2 = in_value * sin_value + in_value_2 * cos_value; + auto value = (in_value * cos_value) - (in_value_2 * sin_value); + auto value2 = (in_value * sin_value) + (in_value_2 * cos_value); o[0] = MLLM_FP32_TO_FP16(value); o[half] = MLLM_FP32_TO_FP16(value2); } @@ -55,8 +62,8 @@ class CPUApplyVisionRoPEFunction : public TensorFunction { auto rope_d = rotary_pos_emb->dataAt(0, 0, s, d); float sin_value = std::sin(rope_d); // sin_[s][d]; float cos_value = std::cos(rope_d); // cos_[s][d]; - auto value = in_value * cos_value - in_value_2 * sin_value; - auto value2 = in_value * sin_value + in_value_2 * cos_value; + auto value = (in_value * cos_value) - (in_value_2 * sin_value); + auto value2 = (in_value * sin_value) + (in_value_2 * cos_value); o[0] = value; o[half] = value2; } @@ -76,8 +83,8 @@ class CPUApplyVisionRoPEFunction : public TensorFunction { auto rope_d = rotary_pos_emb->dataAt(0, 0, s, d); float sin_value = std::sin(rope_d); // sin_[s][d]; float cos_value = std::cos(rope_d); // cos_[s][d]; - auto value = in_value * cos_value - in_value_2 * sin_value; - auto value2 = in_value * sin_value + in_value_2 * cos_value; + auto value = (in_value * cos_value) - (in_value_2 * sin_value); + auto value2 = (in_value * sin_value) + (in_value_2 * cos_value); o[0] = MLLM_FP32_TO_FP16(value); o[half] = MLLM_FP32_TO_FP16(value2); } @@ -95,34 +102,34 @@ class CPUApplyVisionRoPEFunction : public TensorFunction { for (int d = 0; d < partial_dimension / 2; ++d) { if (input->dtype() == MLLM_TYPE_F16) { float in_value = static_cast(input->dataAt(n, h, s, d)); - float in_value_2 = static_cast(input->dataAt(n, h, s, d + partial_dimension / 2)); + float in_value_2 = static_cast(input->dataAt(n, h, s, d + (partial_dimension / 2))); auto rope_d = rotary_pos_emb->dataAt(0, 0, s, d); float sin_value = std::sin(rope_d); // sin_[s][d]; float cos_value = std::cos(rope_d); // cos_[s][d]; - auto value = in_value * cos_value - in_value_2 * sin_value; - auto value2 = in_value * sin_value + in_value_2 * cos_value; + auto value = (in_value * cos_value) - (in_value_2 * sin_value); + auto value2 = (in_value * sin_value) + (in_value_2 * cos_value); if (out_dtype == MLLM_TYPE_F32) { output->setDataAt(n, h, s, d, value); - output->setDataAt(n, h, s, d + partial_dimension / 2, value2); + output->setDataAt(n, h, s, d + (partial_dimension / 2), value2); } else if (out_dtype == MLLM_TYPE_F16) { output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); - output->setDataAt(n, h, s, d + partial_dimension / 2, MLLM_FP32_TO_FP16(value2)); + output->setDataAt(n, h, s, d + (partial_dimension / 2), MLLM_FP32_TO_FP16(value2)); } } else { float in_value = input->dataAt(n, h, s, d); - float in_value_2 = input->dataAt(n, h, s, d + partial_dimension / 2); + float in_value_2 = input->dataAt(n, h, s, d + (partial_dimension / 2)); auto rope_d = rotary_pos_emb->dataAt(0, 0, s, d); float sin_value = std::sin(rope_d); // sin_[s][d]; float cos_value = std::cos(rope_d); // cos_[s][d]; - auto value = in_value * cos_value - in_value_2 * sin_value; - auto value2 = in_value * sin_value + in_value_2 * cos_value; + auto value = (in_value * cos_value) - (in_value_2 * sin_value); + auto value2 = (in_value * sin_value) + (in_value_2 * cos_value); if (out_dtype == MLLM_TYPE_F32) { output->setDataAt(n, h, s, d, value); - output->setDataAt(n, h, s, d + partial_dimension / 2, value2); + output->setDataAt(n, h, s, d + (partial_dimension / 2), value2); } else if (out_dtype == MLLM_TYPE_F16) { output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); - output->setDataAt(n, h, s, d + partial_dimension / 2, MLLM_FP32_TO_FP16(value2)); + output->setDataAt(n, h, s, d + (partial_dimension / 2), MLLM_FP32_TO_FP16(value2)); } } } @@ -132,16 +139,32 @@ class CPUApplyVisionRoPEFunction : public TensorFunction { } public: - void reshape(vector> outputs, vector> inputs, vector args) override { + CPUVisionRoPEFuncFunction(Backend *bn, string name, int threadCount) : + Op(bn, name), thread_count(threadCount) { + } + + ErrorCode reshape(vector> inputs, vector> outputs) override { outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); + // 遵从原始 reshape 逻辑,在这里 alloc + // outputs[0]->alloc(); + return MLLM_NO_ERROR; } - void execute(vector> outputs, vector> inputs, vector args) override { + + ErrorCode execute(vector> inputs, vector> outputs) override { auto input = inputs[0]; auto rotary_pos_emb = inputs[1]; - rope_hf(input, rotary_pos_emb, outputs[0], CPUBackend::cpu_threads); + rope_hf(input, rotary_pos_emb, outputs[0], thread_count); + return MLLM_NO_ERROR; } }; + +class CPUVisionRoPEFuncFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new CPUVisionRoPEFuncFunction(bn, name, threadCount); + } +}; + } // namespace mllm #endif // CPUAPPLYVISIONROPEFUNC_HPP \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPUVisionRoPESin.cpp b/mllm/backends/cpu/op/CPUVisionRoPESin.cpp new file mode 100644 index 000000000..7514f9e43 --- /dev/null +++ b/mllm/backends/cpu/op/CPUVisionRoPESin.cpp @@ -0,0 +1,213 @@ +#include "CPUVisionRoPESin.hpp" +#include +#include +#include +#include +#include "CPUBackend.hpp" +#include "compute/SIMDMemory.hpp" + +using namespace std; + +namespace mllm { +CPUVisionRoPESin::CPUVisionRoPESin(Backend *bn, string opName, int dim, int spatial_merge_size, int threadCount) : + thread_count(threadCount), + Op(bn, opName) { + dim_ = dim; + spatial_merge_size_ = spatial_merge_size; + compute_inv_freq(dim); +} +// 计算并填充inv_freq向量的方法 +void CPUVisionRoPESin::compute_inv_freq(int dim) { + const int half_dim = dim / 2; + inv_freq.clear(); + inv_freq.resize(half_dim); +#pragma omp parallel for num_threads(4) + for (int i = 0; i < half_dim; ++i) { + const float exponent = (2.0f * i) / static_cast(dim); + inv_freq[i] = 1.0f / std::pow(theta, exponent); + } +} +std::vector> CPUVisionRoPESin::rotary_pos_emb_forward(int seqlen) { + if (seqlen <= 0) { + throw std::invalid_argument("seqlen must be positive"); + } + // 生成序列 [0, 1, ..., seqlen-1] + std::vector seq(seqlen); +#pragma omp parallel for num_threads(4) + for (int i = 0; i < seqlen; ++i) { + seq[i] = static_cast(i); + } + // 预分配结果矩阵 + std::vector> freqs( + seqlen, + std::vector(inv_freq.size())); +// 并行计算外积 +#pragma omp parallel for num_threads(4) schedule(dynamic) + for (size_t i = 0; i < seq.size(); ++i) { + const float seq_val = seq[i]; + auto &row = freqs[i]; + for (size_t j = 0; j < inv_freq.size(); ++j) { + row[j] = std::sin(seq_val * inv_freq[j]); + } + } + return freqs; +} + +vector> CPUVisionRoPESin::rot_pos_emb(vector> grid_thw, int spatial_merge_size) { + vector> pos_ids; + // int max_grid_size = 0; + // 遍历每个时空网格配置 + for (auto &row : grid_thw) { + int t = static_cast(row[0]); + int h = static_cast(row[1]); + int w = static_cast(row[2]); + // 更新最大空间网格尺寸 + max_grid_size = max({max_grid_size, h, w}); + // 计算分块参数 + int num_h_blocks = h / spatial_merge_size; + int num_w_blocks = w / spatial_merge_size; + int total_blocks = num_h_blocks * num_w_blocks; + const int block_area = spatial_merge_size * spatial_merge_size; + // 预分配内存 + vector flatten_hpos(total_blocks * block_area); + vector flatten_wpos(total_blocks * block_area); +// 并行生成坐标序列 +#pragma omp parallel for num_threads(thread_count) schedule(static) + for (int block_idx = 0; block_idx < total_blocks; ++block_idx) { + const int i_h = block_idx / num_w_blocks; + const int i_w = block_idx % num_w_blocks; + const int start_idx = block_idx * block_area; + // 生成块内坐标 + for (int j_h = 0; j_h < spatial_merge_size; ++j_h) { + for (int j_w = 0; j_w < spatial_merge_size; ++j_w) { + const int pos = start_idx + j_h * spatial_merge_size + j_w; + flatten_hpos[pos] = i_h * spatial_merge_size + j_h; + flatten_wpos[pos] = i_w * spatial_merge_size + j_w; + } + } + } + // 创建坐标对并重复时间维度 + vector> current_pos; + current_pos.reserve(flatten_hpos.size()); + for (size_t i = 0; i < flatten_hpos.size(); ++i) { + current_pos.push_back({flatten_hpos[i], flatten_wpos[i]}); + } + // 扩展时间维度 + for (int i = 0; i < t; ++i) { + pos_ids.insert(pos_ids.end(), current_pos.begin(), current_pos.end()); + } + } + // return {pos_ids, max_grid_size}; + return pos_ids; +} + +void CPUVisionRoPESin::compute_rotary_pos_embd( + const vector> &rotary_pos_emb_full, + const vector> &pos_ids, + shared_ptr output) { + // 输入验证 + if (rotary_pos_emb_full.empty() || pos_ids.empty()) { + throw invalid_argument("Input containers must not be empty"); + } + const size_t num_positions = rotary_pos_emb_full.size(); + const size_t dim = rotary_pos_emb_full[0].size(); + const size_t batch_size = pos_ids.size(); + // 验证嵌入维度一致性 + for (const auto &emb : rotary_pos_emb_full) { + if (emb.size() != dim) { + throw invalid_argument("Inconsistent embedding dimensions"); + } + } + // 验证位置ID有效性 + const size_t seq_len = pos_ids[0].size(); + for (const auto &ids : pos_ids) { + if (ids.size() != seq_len) { + throw invalid_argument("Varied sequence lengths not supported"); + } + for (int idx : ids) { + if (idx < 0 || idx >= num_positions) { + throw out_of_range("Position index out of bounds"); + } + } + } + // 准备输出Tensor + const size_t flattened_size = seq_len * dim; + assert(flattened_size == output->dimension()); // output->resize(1, 1, batch_size, flattened_size); +// 并行处理主循环 +#pragma omp parallel for num_threads(CPUBackend::cpu_threads) + for (size_t batch_idx = 0; batch_idx < batch_size; ++batch_idx) { + float *batch_ptr = output->ptrAt(0, 0, batch_idx, 0); + size_t offset = 0; + for (const int pos_idx : pos_ids[batch_idx]) { + const auto &emb = rotary_pos_emb_full[pos_idx]; + simd_memcpy(batch_ptr + offset, emb.data(), dim); + offset += dim; + } + } +} + +/* +vector> compute_rotary_positional_embeddings( + const vector>& rotary_pos_emb_full, + const vector>& pos_ids, + shared_ptr output) +{ + // 输入验证 + if (rotary_pos_emb_full.empty() || pos_ids.empty()) { + return {}; + } + const size_t num_positions = rotary_pos_emb_full.size(); + const size_t dim = rotary_pos_emb_full[0].size(); + const size_t batch_size = pos_ids.size(); + // 验证所有位置的维度一致性 + for (const auto& emb : rotary_pos_emb_full) { + if (emb.size() != dim) { + throw invalid_argument("All positional embeddings must have the same dimension"); + } + } + vector> result(batch_size); + // 并行处理每个样本 + #pragma omp parallel for + for (size_t i = 0; i < batch_size; ++i) { + const auto& positions = pos_ids[i]; + vector flattened; + flattened.reserve(positions.size() * dim); + for (const int idx : positions) { + // 边界检查 + if (idx < 0 || idx >= num_positions) { + throw out_of_range("Position index out of range"); + } + // 获取对应的位置嵌入 + const auto& emb = rotary_pos_emb_full[idx]; + // 使用SIMD加速的内存复制 + const size_t prev_size = flattened.size(); + flattened.resize(prev_size + dim); + simd_memcpy(flattened.data() + prev_size, emb.data(), dim); + } + // 移动语义优化内存分配 + result[i] = std::move(flattened); + } + return result; +} +*/ + +ErrorCode CPUVisionRoPESin::reshape(vector> inputs, vector> outputs) { + int grid_t = (int)inputs[0]->dataAt(0, 0, 0, 0); + int grid_h = (int)inputs[0]->dataAt(0, 0, 0, 1); + int grid_w = (int)inputs[0]->dataAt(0, 0, 0, 2); + outputs[0]->reshape(1, 1, grid_t * grid_h * grid_w, 2 * (dim_ / 2)); + return Op::reshape(inputs, outputs); +} + +ErrorCode CPUVisionRoPESin::execute(vector> inputs, vector> outputs) { + vector> grid_thw = {{inputs[0]->dataAt(0, 0, 0, 0), + inputs[0]->dataAt(0, 0, 0, 1), + inputs[0]->dataAt(0, 0, 0, 2)}}; + auto pos_ids = rot_pos_emb(grid_thw, spatial_merge_size_); // get pos_ids and max_grid_size + + auto rotary_pos_emb_full = rotary_pos_emb_forward(max_grid_size); + compute_rotary_pos_embd(rotary_pos_emb_full, pos_ids, outputs[0]); + return Op::execute(inputs, outputs); +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/cpu/op/CPUVisionRoPESin.hpp b/mllm/backends/cpu/op/CPUVisionRoPESin.hpp new file mode 100644 index 000000000..9e9d20e4f --- /dev/null +++ b/mllm/backends/cpu/op/CPUVisionRoPESin.hpp @@ -0,0 +1,114 @@ + +#ifndef MLLM_CPUVISIONROPESIN_H +#define MLLM_CPUVISIONROPESIN_H + +#include "Op.hpp" +#include "../CPUBackend.hpp" + +namespace mllm { + +class CPUVisionRoPESin final : public Op { +public: + CPUVisionRoPESin(Backend *bn, string opName, int dim, int spatial_merge_size, int threadCount); + virtual ~CPUVisionRoPESin() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode execute(vector> inputs, vector> outputs) override; + // virtual ErrorCode setUp(vector> inputs, vector> outputs) override; + +private: + int thread_count = 4; + int spatial_merge_size_; + int dim_; + + std::vector inv_freq; // 存储计算结果 + float theta = 10000.0f; // 默认theta值,与Transformer常用的值一致 + + // vector> pos_ids; + int max_grid_size; + + void compute_inv_freq(int dim); + std::vector> rotary_pos_emb_forward(int seqlen); + vector> rot_pos_emb(vector> grid_thw, int spatial_merge_size); + void compute_rotary_pos_embd( + const vector>& rotary_pos_emb_full, + const vector>& pos_ids, + shared_ptr output + ); +}; + +class CPUVisionRoPESinCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const { + int dims = (int)op_param["dim"]; + int spatial_merge_size = (int)op_param["spatial_merge_size"]; + return new CPUVisionRoPESin(bn, name, dims, spatial_merge_size, threadCount); + } +}; + + +/* +class VisionRotaryEmbedding { +private: + std::vector inv_freq; + float theta; + const int dim; + +public: + // 构造函数 + VisionRotaryEmbedding(int dim, float theta = 10000.0f) : dim(dim), theta(theta) + { + if (dim <= 0 || dim % 2 != 0) { + throw std::invalid_argument("Dimension must be positive even number"); + } + compute_inv_freq(); + } + + // 计算逆频率向量 + void compute_inv_freq() { + const int half_dim = dim / 2; + inv_freq.resize(half_dim); + #pragma omp parallel for num_threads(4) + for (int i = 0; i < half_dim; ++i) { + const float exponent = (2.0f * i) / static_cast(dim); + inv_freq[i] = 1.0f / std::pow(theta, exponent); + } + } + + // 前向计算(生成频率矩阵) + std::vector> rotary_pos_emb_forward(int seqlen) { + if (seqlen <= 0) { + throw std::invalid_argument("seqlen must be positive"); + } + // 生成序列 [0, 1, ..., seqlen-1] + std::vector seq(seqlen); + #pragma omp parallel for num_threads(4) + for (int i = 0; i < seqlen; ++i) { + seq[i] = static_cast(i); + } + // 预分配结果矩阵 + std::vector> freqs( + seqlen, + std::vector(inv_freq.size()) + ); + // 并行计算外积 + #pragma omp parallel for num_threads(4) schedule(dynamic) + for (size_t i = 0; i < seq.size(); ++i) { + const float seq_val = seq[i]; + auto& row = freqs[i]; + for (size_t j = 0; j < inv_freq.size(); ++j) { + row[j] = seq_val * inv_freq[j]; + } + } + return freqs; + } + + // 访问器方法 + const std::vector& get_inv_freq() const { return inv_freq; } + float get_theta() const { return theta; } + int get_dim() const { return dim; } +}; +*/ + +} // namespace mllm + +#endif // MLLM_CPUVISIONROPE_H diff --git a/src/backends/cpu/op/CPUWhere.cpp b/mllm/backends/cpu/op/CPUWhere.cpp similarity index 100% rename from src/backends/cpu/op/CPUWhere.cpp rename to mllm/backends/cpu/op/CPUWhere.cpp diff --git a/src/backends/cpu/op/CPUWhere.hpp b/mllm/backends/cpu/op/CPUWhere.hpp similarity index 100% rename from src/backends/cpu/op/CPUWhere.hpp rename to mllm/backends/cpu/op/CPUWhere.hpp diff --git a/mllm/backends/cpu/op/CPUWhereFunc.hpp b/mllm/backends/cpu/op/CPUWhereFunc.hpp new file mode 100644 index 000000000..78a855a03 --- /dev/null +++ b/mllm/backends/cpu/op/CPUWhereFunc.hpp @@ -0,0 +1,115 @@ +// +// Created by Rongjie Yi on 24-2-26. +// + +#ifndef CPUWHEREFUNC_HPP +#define CPUWHEREFUNC_HPP + +#include "Tensor.hpp" +#include "Types.hpp" +#include "CPUBackend.hpp" +#include +#include + +namespace mllm { +class Tensor; + +class CPUwhereFunction : public Op { +private: + int thread_count = 4; + float value_; + Chl axis_; + +public: + CPUwhereFunction(Backend *bn, string name, int threadCount, float value, Chl axis) + : Op(bn, name), thread_count(threadCount), value_(value), axis_(axis) {} + + ErrorCode reshape(vector> inputs, vector> outputs) override { + // Shape is data-dependent and will be determined in execute. + return MLLM_NO_ERROR; + } + + ErrorCode execute(vector> inputs, vector> outputs) override { + std::vector b_vec; + std::vector s_vec; + std::vector h_vec; + std::vector d_vec; + + // NOTE: The original parallel implementation was thread-unsafe due to race conditions + // on shared vectors. Using the sequential version for correctness. + for (int b = 0; b < inputs[0]->batch(); b++) { + for (auto s = 0; s < inputs[0]->sequence(); s++) { + for (auto h = 0; h < inputs[0]->head(); h++) { + for (auto d = 0; d < inputs[0]->dimension(); d++) { + if (inputs[0]->dataAt(b, h, s, d) == value_) { + b_vec.push_back(b); + s_vec.push_back(s); + h_vec.push_back(h); + d_vec.push_back(d); + } + } + } + } + } + + int num = b_vec.size(); + if ((int)axis_ == -1) { + outputs[0]->reshape(1, 1, 4, num); + outputs[0]->setDtype(MLLM_TYPE_F32); + outputs[0]->alloc(); + for (int i = 0; i < 4; ++i) { + auto dest_ptr = outputs[0]->hostPtr() + outputs[0]->offset(0, 0, i, 0); + switch (i) { + case 0: + memcpy(dest_ptr, b_vec.data(), num * sizeof(float)); + break; + case 1: + memcpy(dest_ptr, h_vec.data(), num * sizeof(float)); + break; + case 2: + memcpy(dest_ptr, s_vec.data(), num * sizeof(float)); + break; + case 3: + memcpy(dest_ptr, d_vec.data(), num * sizeof(float)); + break; + default: + break; + } + } + } else { + outputs[0]->reshape(1, 1, 1, num); + outputs[0]->setDtype(MLLM_TYPE_F32); + outputs[0]->alloc(); + auto dest_ptr = outputs[0]->hostPtr(); + switch (axis_) { + case BATCH: + memcpy(dest_ptr, b_vec.data(), num * sizeof(float)); + break; + case HEAD: + memcpy(dest_ptr, h_vec.data(), num * sizeof(float)); + break; + case SEQUENCE: + memcpy(dest_ptr, s_vec.data(), num * sizeof(float)); + break; + case DIMENSION: + memcpy(dest_ptr, d_vec.data(), num * sizeof(float)); + break; + default: + break; + } + } + return MLLM_NO_ERROR; + } +}; + +class CPUwhereFunctionCreator : public CPUBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + float value = op_param.at("value"); + Chl axis = (Chl)op_param.at("axis"); + return new CPUwhereFunction(bn, name, threadCount, value, axis); + } +}; + +} // namespace mllm +#endif // CPUWHEREFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/compute/VecDot.hpp b/mllm/backends/cpu/third_party/ggml/ComputeUtils.hpp similarity index 85% rename from src/backends/cpu/compute/VecDot.hpp rename to mllm/backends/cpu/third_party/ggml/ComputeUtils.hpp index 4862a3633..fefcf03e4 100644 --- a/src/backends/cpu/compute/VecDot.hpp +++ b/mllm/backends/cpu/third_party/ggml/ComputeUtils.hpp @@ -24,18 +24,16 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ - -#ifndef MLLM_VECDOT_HPP -#define MLLM_VECDOT_HPP -#include "Tensor.hpp" +#pragma once +// #include "Tensor.hpp" #include "Types.hpp" #include #include "ParamLoader.hpp" -#include "../quantize/QuantizeQ8.hpp" -#include "../quantize/QuantizeQ4.hpp" -#include "../quantize/QuantizeQ6.hpp" -#include "../quantize/QuantizeQ3.hpp" -#include "../quantize/QuantizeQ2.hpp" +#include "QuantizeQ8.hpp" +#include "QuantizeQ4.hpp" +#include "QuantizeQ6.hpp" +#include "QuantizeQ3.hpp" +#include "QuantizeQ2.hpp" #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) #include @@ -225,7 +223,7 @@ #define MLLM_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x))) #define MLLM_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) #else -static inline __m256 __avx_f32cx8_load(MLLM_fp16_t *x) { +static inline __m256 __avx_f32cx8_load(mllm_fp16_t *x) { float tmp[8]; for (int i = 0; i < 8; i++) { @@ -234,7 +232,7 @@ static inline __m256 __avx_f32cx8_load(MLLM_fp16_t *x) { return _mm256_loadu_ps(tmp); } -static inline void __avx_f32cx8_store(MLLM_fp16_t *x, __m256 y) { +static inline void __avx_f32cx8_store(mllm_fp16_t *x, __m256 y) { float arr[8]; _mm256_storeu_ps(arr, y); @@ -375,10 +373,10 @@ inline int mllm_cpu_get_sve_cnt(void) { // ref: https://github.com/mllm-org/llama.cpp/pull/5404 #ifdef _MSC_VER #define mllm_vld1q_u32(w, x, y, z) \ - { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) } + {((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32))} #else #define mllm_vld1q_u32(w, x, y, z) \ - { (w), (x), (y), (z) } + {(w), (x), (y), (z)} #endif // _MSC_VER #if !defined(__aarch64__) @@ -608,54 +606,3 @@ inline static uint8x16_t mllm_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) { #endif // !defined(__aarch64__) #endif // !defined(__ARM_NEON) - -using namespace mllm; - -inline static void vec_scale_f32(const int n, float *y, const float v) { - const int np = (n & ~(MLLM_F32_STEP - 1)); - - MLLM_F32_VEC vx = MLLM_F32_VEC_SET1(v); - - MLLM_F32_VEC ay[MLLM_F32_ARR]; - - for (int i = 0; i < np; i += MLLM_F32_STEP) { - for (int j = 0; j < MLLM_F32_ARR; j++) { - ay[j] = MLLM_F32_VEC_LOAD(y + i + j * MLLM_F32_EPR); - ay[j] = MLLM_F32_VEC_MUL(ay[j], vx); - - MLLM_F32_VEC_STORE(y + i + j * MLLM_F32_EPR, ay[j]); - } - } - - // leftovers - for (int i = np; i < n; ++i) { - y[i] *= v; - } - - // for (int i = 0; i < n; ++i) { - // y[i] *= v; - // } -} - -// void vec_dot_fp32(const float * __restrict src0, const float * __restrict src1, Tensor *dst, bool support_bias, Tensor *bias, int hid_len, int batch, int head, int src0_inf, int sec1_outf); -void vec_dot_q4_0_q8_0(const void *__restrict src0, const void *__restrict src1, Tensor *dst, bool support_bias, Tensor *bias, int hid_len, int batch, int head, int src0_inf, int sec1_outf); -void vec_dot_q4_K_q8_K(const void *__restrict src0, const void *__restrict src1, Tensor *dst, bool support_bias, Tensor *bias, int hid_len, int batch, int head, int src0_inf, int sec1_outf); -void vec_dot_q6_K_q8_K(const void *__restrict src0, const void *__restrict src1, Tensor *dst, bool support_bias, Tensor *bias, int hid_len, int batch, int head, int src0_inf, int sec1_outf); - -void vec_dot_q4_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); -void vec_dot_q6_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); -void vec_dot_q4_0_q8_0(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); -void vec_dot_fp32(const int n, float *__restrict s, const float *__restrict vx, const float *__restrict vy); -void vec_dot_fp16(const int n, float *__restrict s, const mllm_fp16_t *__restrict vx, const mllm_fp16_t *__restrict vy); -void vec_dot_q8_0_q8_0(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy, size_t bs = 0, size_t bx = 0, size_t by = 0); - -void vec_dot_q2_K_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); -void vec_dot_q3_K_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); -void vec_dot_iq2_xxs_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); - -// for sparse linear -void vec_value_dot_fp32(const int n, float *__restrict s, const float x, const float *__restrict vy, bool addition); -// for per-tensor i8, currently not suitable for vecdot trait -void vec_dot_i8_i8(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy, float scale1 = 1, float scale2 = 1); - -#endif // MLLM_VECDOT_HPP diff --git a/src/backends/cpu/compute/LlamafileSGEMM.cpp b/mllm/backends/cpu/third_party/ggml/GemmLlamafile.cpp similarity index 99% rename from src/backends/cpu/compute/LlamafileSGEMM.cpp rename to mllm/backends/cpu/third_party/ggml/GemmLlamafile.cpp index 174e8ab40..358744400 100644 --- a/src/backends/cpu/compute/LlamafileSGEMM.cpp +++ b/mllm/backends/cpu/third_party/ggml/GemmLlamafile.cpp @@ -48,8 +48,8 @@ #pragma GCC diagnostic ignored "-Wignored-attributes" #endif -#include "LlamafileSGEMM.hpp" -#include "../quantize/Quantize.hpp" +#include "backends/cpu/third_party/ggml/GemmLlamafile.hpp" +#include "Quantize.hpp" #ifdef _MSC_VER #define NOINLINE __declspec(noinline) diff --git a/src/backends/cpu/compute/LlamafileSGEMM.hpp b/mllm/backends/cpu/third_party/ggml/GemmLlamafile.hpp similarity index 92% rename from src/backends/cpu/compute/LlamafileSGEMM.hpp rename to mllm/backends/cpu/third_party/ggml/GemmLlamafile.hpp index 3b0942970..1b2af37b8 100644 --- a/src/backends/cpu/compute/LlamafileSGEMM.hpp +++ b/mllm/backends/cpu/third_party/ggml/GemmLlamafile.hpp @@ -5,8 +5,6 @@ #ifndef MLLM_GEMM_HPP #define MLLM_GEMM_HPP -// #include "VecDot.hpp" -// #include "Tensor.hpp" #include "Types.hpp" using namespace mllm; diff --git a/src/backends/cpu/compute/GEMM_AArch64.cpp b/mllm/backends/cpu/third_party/ggml/GemmPack.cpp similarity index 94% rename from src/backends/cpu/compute/GEMM_AArch64.cpp rename to mllm/backends/cpu/third_party/ggml/GemmPack.cpp index 708712a37..38c81946f 100644 --- a/src/backends/cpu/compute/GEMM_AArch64.cpp +++ b/mllm/backends/cpu/third_party/ggml/GemmPack.cpp @@ -1,4 +1,4 @@ -#include "GEMM_AArch64.hpp" +#include "GemmPack.hpp" #include "Types.hpp" #include #include @@ -7,6 +7,7 @@ #include // for assert #include // for qsort #include +#include "ComputeUtils.hpp" int mllm_cpu_has_sve(void) { #if defined(__ARM_FEATURE_SVE) @@ -363,11 +364,11 @@ size_t quantize_q4_0_8x8(const float *__restrict src, void *__restrict dst, int6 return 0; } -void mllm_gemv_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void gemv_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { if (bias != nullptr) { - _mllm_gemv_q4_0_4x4_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); + _gemv_q4_0_4x4_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); return; } @@ -396,12 +397,78 @@ void mllm_gemv_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void * // "for optimal performance"); // } // #endif -// #if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) -// assert(!(mllm_cpu_has_neon() && mllm_cpu_has_matmul_int8()) -// && "__ARM_NEON and __ARM_FEATURE_MATMUL_INT8 defined, use the Q4_0_4_8 " -// "quantization format for optimal performance"); -// #elif defined(__ARM_NEON) && defined(__aarch64__) -#if defined(__ARM_NEON) && defined(__aarch64__) +#if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) + const block_q8_0 *a_ptr_base = (const block_q8_0 *)vy; + const block_q4_0x4 *b_ptr_base = (const block_q4_0x4 *)vx; + + for (int i_nc = 0; i_nc < nc; i_nc += ncols_interleaved) { + float *s_ptr = s + i_nc; + const block_q4_0x4 *b_ptr = b_ptr_base + (i_nc / ncols_interleaved) * nb; + const block_q8_0 *a_ptr = a_ptr_base; + __asm__ __volatile__( + "movi v16.4s, #0\n" // v16 = acc0 = [0, 0, 0, 0] (32-bit accumulators for 4 outputs) + "movi v17.4s, #0\n" // v17 = acc1 + "mov x5, %x[nb]\n" // x5 = nb (block counter) + "1:\n" // Main loop over blocks (nb) + // --- 加载 Scales --- + "ldrh w6, [%x[a_ptr], #0]\n" // Load d_a (fp16 scale) + "ldr s18, [%x[b_ptr], #0]\n" // Load d_b0, d_b1 + "ldr s19, [%x[b_ptr], #4]\n" // Load d_b2, d_b3 + "fmov s20, w6\n" // Move d_a to float register + "fcvtl v18.2s, v18.2h\n" // Convert d_b0, d_b1 to fp32 + "fcvtl v19.2s, v19.2h\n" // Convert d_b2, d_b3 to fp32 + "fcvtl v20.2s, v20.2h\n" // Convert d_a to fp32 + "dup v18.4s, v18.s[0]\n" // Broadcast d_b0 + "dup v19.4s, v19.s[0]\n" // Broadcast d_b2 + "dup v20.4s, v20.s[0]\n" // Broadcast d_a + // --- Q8向量数据加载 (a) --- + "ldr q0, [%x[a_ptr], #2]\n" // Load first 16 bytes of a->qs + "ldr q1, [%x[a_ptr], #18]\n" // Load second 16 bytes of a->qs + // --- Q4权重数据加载 (b) --- + "ldr q2, [%x[b_ptr], #8]\n" // Load first 16 bytes of b->qs + "ldr q3, [%x[b_ptr], #24]\n" // Load second 16 bytes of b->qs + // --- 解包 Q4.0 权重到 Q8.0 --- + // Unpack first 32x 4-bit quants into 32x 8-bit quants + "movi v21.16b, #0x0f\n" // low nibble mask + "movi v22.16b, #-8\n" // subtraction value + "and v4.16b, v2.16b, v21.16b\n" // low nibbles + "ushr v5.16b, v2.16b, #4\n" // high nibbles + "add v4.16b, v4.16b, v22.16b\n" // v4 = unpacked b quants 0..15 + "add v5.16b, v5.16b, v22.16b\n" // v5 = unpacked b quants 16..31 + // Unpack second 32x 4-bit quants + "and v6.16b, v3.16b, v21.16b\n" + "ushr v7.16b, v3.16b, #4\n" + "add v6.16b, v6.16b, v22.16b\n" // v6 + "add v7.16b, v7.16b, v22.16b\n" // v7 + // --- 执行 4x4 矩阵乘法 --- + // smmla acc, w, v + // The 4x4 matrix is formed by the 16 bytes in the register. + // We are doing a row-vector * matrix multiplication. The vector 'a' needs + // to be treated as rows of a matrix. + "smmla v16.4s, v4.16b, v0.16b\n" // acc0 += mat(v4) * mat(v0) + "smmla v17.4s, v5.16b, v0.16b\n" // acc1 += mat(v5) * mat(v0) + "smmla v16.4s, v6.16b, v1.16b\n" + "smmla v17.4s, v7.16b, v1.16b\n" + // --- 累加和转换 --- + "add v16.4s, v16.4s, v17.4s\n" // v16 has the final int32 sums for this block + "scvtf v17.4s, v16.4s\n" // Convert int32 sums to float32 + "fmul v18.4s, v18.4s, v20.4s\n" // multiply scales d_a * d_b + "fmla %v[sum].4s, v17.4s, v18.4s\n" // FMLA into final sum register + // --- 循环控制 --- + "add %x[a_ptr], %x[a_ptr], #34\n" // sizeof(block_q8_0) = 2+32 + "add %x[b_ptr], %x[b_ptr], #72\n" // sizeof(block_q4_0x4) = 8+64 + "subs x5, x5, #1\n" + "bne 1b\n" + // --- 存储结果 --- + "str q[sum], [%x[s_ptr]]\n" + : [sum] "+w"(s_ptr) // using "+w" for NEON registers + : [a_ptr] "r"(a_ptr), [b_ptr] "r"(b_ptr), [nb] "r"(nb), [s_ptr] "r"(s_ptr) + : "cc", "memory", "x5", "x6", + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v16", "v17", "v18", "v19", "v20", "v21", "v22"); + } +// AArch64 NEON (包括 Apple Silicon) 使用点积内联函数 +#elif defined(__ARM_NEON) && defined(__aarch64__) const void *b_ptr = vx; const void *a_ptr = vy; float *res_ptr = s; @@ -457,6 +524,88 @@ void mllm_gemv_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void * : [a_ptr] "r"(a_ptr), [nb] "r"(nb) : "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22"); +// x86 AVX-VNNI 和 AVX2 实现 +#elif defined(__AVX2__) +#define CALC_SUMI_PARTIAL(V_B_Q4, V_A0_S16, V_A1_S16) \ + ({ \ + /* v0 = (int8_t)(L<<4), v1 = (int8_t)(H<<4) */ \ + const __m128i low_nib_mask = _mm_set1_epi8(0x0F); \ + const __m128i v_L_nibbles = _mm_and_si128((V_B_Q4), low_nib_mask); \ + const __m128i v0s_u8 = _mm_slli_epi16(v_L_nibbles, 4); \ + const __m128i v1s_u8 = _mm_andnot_si128(low_nib_mask, (V_B_Q4)); \ + \ + /* 符号扩展到16位 */ \ + const __m128i v0s_s16 = _mm_cvtepi8_epi16(v0s_u8); \ + const __m128i v1s_s16 = _mm_cvtepi8_epi16(v1s_u8); \ + \ + /* 核心计算: (v0*a0 + v1*a1) >> 4 */ \ + const __m128i prod0 = _mm_mullo_epi16(v0s_s16, (V_A0_S16)); \ + const __m128i prod1 = _mm_mullo_epi16(v1s_s16, (V_A1_S16)); \ + const __m128i sum_prods_s16 = _mm_add_epi16(prod0, prod1); \ + const __m128i terms_s16 = _mm_srai_epi16(sum_prods_s16, 4); \ + \ + /* 水平求和 */ \ + const __m128i ones = _mm_set1_epi16(1); \ + const __m128i sums_s32 = _mm_madd_epi16(terms_s16, ones); \ + _mm_extract_epi32(sums_s32, 0) + _mm_extract_epi32(sums_s32, 1); \ + }) + + const block_q8_0 *a_ptr_base = (const block_q8_0 *)vy; + const block_q4_0x4 *b_ptr_base = (const block_q4_0x4 *)vx; + + // 外层循环:处理不同的4列输出组 + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 *b_ptr = b_ptr_base + x * nb; + const block_q8_0 *a_ptr = a_ptr_base; + + float sumf[4] = {0.0f, 0.0f, 0.0f, 0.0f}; + + // 中层循环:处理数据块 + for (int l = 0; l < nb; l++) { + int32_t sumi_cols[4] = {0, 0, 0, 0}; + + // 内层循环:处理块内的子区域 (k) + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + // 加载 a 向量的两个部分并符号扩展到16位 + const __m128i v_a0_s16 = _mm_cvtepi8_epi16(_mm_loadu_si32(a_ptr[l].qs + k * blocklen)); + const __m128i v_a1_s16 = _mm_cvtepi8_epi16(_mm_loadu_si32(a_ptr[l].qs + k * blocklen + qk / 2)); + + // 加载 b 权重的4x4字节区域 + const __m128i v_b_block = _mm_loadu_si128((const __m128i *)(b_ptr[l].qs + k * 16)); + + // ---- 循环展开: 手动处理 j = 0, 1, 2, 3 ---- + + // j = 0 + sumi_cols[0] += CALC_SUMI_PARTIAL(v_b_block, v_a0_s16, v_a1_s16); + + // j = 1 + sumi_cols[1] += CALC_SUMI_PARTIAL(_mm_srli_si128(v_b_block, 4), v_a0_s16, v_a1_s16); + + // j = 2 + sumi_cols[2] += CALC_SUMI_PARTIAL(_mm_srli_si128(v_b_block, 8), v_a0_s16, v_a1_s16); + + // j = 3 + sumi_cols[3] += CALC_SUMI_PARTIAL(_mm_srli_si128(v_b_block, 12), v_a0_s16, v_a1_s16); + } + + // --- 应用缩放因子并累加到浮点和 --- + const __m128i sumi_vec = _mm_loadu_si128((const __m128i *)sumi_cols); + const __m128 sumi_f = _mm_cvtepi32_ps(sumi_vec); + + const float d_a = MLLM_FP16_TO_FP32(a_ptr[l].d); + const __m128 d_b = _mm_cvtph_ps(_mm_loadu_si128((const __m128i *)b_ptr[l].d)); + const __m128 scales = _mm_mul_ps(_mm_set1_ps(d_a), d_b); + + __m128 current_sumf = _mm_loadu_ps(sumf); + current_sumf = _mm_add_ps(current_sumf, _mm_mul_ps(sumi_f, scales)); + _mm_storeu_ps(sumf, current_sumf); + } + + // 存储最终结果 + _mm_storeu_ps(s + x * ncols_interleaved, _mm_loadu_ps(sumf)); + } + // 确保宏只在当前代码块生效 +#undef CALC_SUMI_PARTIAL #else float sumf[4]; int sumi; @@ -493,9 +642,9 @@ void mllm_gemv_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void * #endif } -void _mllm_gemv_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void _gemv_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; @@ -632,11 +781,11 @@ void _mllm_gemv_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const #endif } -void mllm_gemv_q4_0_4x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void gemv_q4_0_4x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { if (bias != nullptr) { - _mllm_gemv_q4_0_4x8_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); + _gemv_q4_0_4x8_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); return; } @@ -768,9 +917,9 @@ void mllm_gemv_q4_0_4x8_q8_0(int n, float *__restrict s, size_t bs, const void * #endif } -void _mllm_gemv_q4_0_4x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void _gemv_q4_0_4x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; @@ -908,11 +1057,11 @@ void _mllm_gemv_q4_0_4x8_q8_0_bias(int n, float *__restrict s, size_t bs, const #endif } -void mllm_gemv_q4_0_8x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void gemv_q4_0_8x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { if (bias != nullptr) { - _mllm_gemv_q4_0_8x8_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); + _gemv_q4_0_8x8_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); return; } @@ -1058,9 +1207,9 @@ void mllm_gemv_q4_0_8x8_q8_0(int n, float *__restrict s, size_t bs, const void * #endif } -void _mllm_gemv_q4_0_8x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void _gemv_q4_0_8x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; @@ -1215,13 +1364,13 @@ void _mllm_gemv_q4_0_8x8_q8_0_bias(int n, float *__restrict s, size_t bs, const } // lhs: q8_0, rhs: q4_0x4 -void mllm_gemm_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void gemm_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { if (bias != nullptr) { - _mllm_gemm_q4_0_4x4_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); + _gemm_q4_0_4x4_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); #if defined(__ARM_NEON) - std::cout << "_mllm_gemm_q4_0_4x4_q8_0_bias not implemented"; + std::cout << "_gemm_q4_0_4x4_q8_0_bias not implemented"; abort(); #endif return; @@ -1253,11 +1402,12 @@ void mllm_gemm_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void * // "for optimal performance"); // } // #endif -#if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) - assert(!(mllm_cpu_has_neon() && mllm_cpu_has_matmul_int8()) - && "__ARM_NEON and __ARM_FEATURE_MATMUL_INT8 defined, use the Q4_0_4_8 " - "quantization format for optimal performance"); -#elif defined(__ARM_NEON) && defined(__aarch64__) +// #if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) +// assert(!(mllm_cpu_has_neon() && mllm_cpu_has_matmul_int8()) +// && "__ARM_NEON and __ARM_FEATURE_MATMUL_INT8 defined, use the Q4_0_4_8 " +// "quantization format for optimal performance"); +// #elif defined(__ARM_NEON) && defined(__aarch64__) +#if defined(__ARM_NEON) && defined(__aarch64__) const void *b_ptr = vx; const void *a_ptr = vy; float *res_ptr = s; @@ -1759,9 +1909,9 @@ void mllm_gemm_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void * #endif } -void _mllm_gemm_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void _gemm_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; @@ -1788,11 +1938,12 @@ void _mllm_gemm_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const // "for optimal performance"); // } // #endif -#if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) - assert(!(mllm_cpu_has_neon() && mllm_cpu_has_matmul_int8()) - && "__ARM_NEON and __ARM_FEATURE_MATMUL_INT8 defined, use the Q4_0_4_8 " - "quantization format for optimal performance"); -#elif defined(__ARM_NEON) && defined(__aarch64__) +// #if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) +// assert(!(mllm_cpu_has_neon() && mllm_cpu_has_matmul_int8()) +// && "__ARM_NEON and __ARM_FEATURE_MATMUL_INT8 defined, use the Q4_0_4_8 " +// "quantization format for optimal performance"); +// #elif defined(__ARM_NEON) && defined(__aarch64__) +#if defined(__ARM_NEON) && defined(__aarch64__) const void *b_ptr = vx; const void *a_ptr = vy; const void *bias_ptr = bias; @@ -2322,15 +2473,15 @@ void _mllm_gemm_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const #endif } -void mllm_gemm_q4_0_4x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void gemm_q4_0_4x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { if (bias != nullptr) { #if defined(__ARM_NEON) - std::cout << "_mllm_gemm_q4_0_4x8_q8_0_bias not implemented"; + std::cout << "_gemm_q4_0_4x8_q8_0_bias not implemented"; abort(); #endif - _mllm_gemm_q4_0_4x8_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); + _gemm_q4_0_4x8_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); return; } @@ -2807,9 +2958,9 @@ void mllm_gemm_q4_0_4x8_q8_0(int n, float *__restrict s, size_t bs, const void * #endif } -void _mllm_gemm_q4_0_4x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void _gemm_q4_0_4x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; @@ -3287,15 +3438,15 @@ void _mllm_gemm_q4_0_4x8_q8_0_bias(int n, float *__restrict s, size_t bs, const #endif } -void mllm_gemm_q4_0_8x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void gemm_q4_0_8x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { if (bias != nullptr) { #if defined(__ARM_NEON) - std::cout << "_mllm_gemm_q4_0_8x8_q8_0_bias not implemented"; + std::cout << "_gemm_q4_0_8x8_q8_0_bias not implemented"; abort(); #endif - _mllm_gemm_q4_0_8x8_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); + _gemm_q4_0_8x8_q8_0_bias(n, s, bs, vx, vy, nr, nc, bias); return; } @@ -3798,9 +3949,9 @@ void mllm_gemm_q4_0_8x8_q8_0(int n, float *__restrict s, size_t bs, const void * #endif } -void _mllm_gemm_q4_0_8x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, - const void *__restrict vy, int nr, int nc, - const void *__restrict bias) { +void _gemm_q4_0_8x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, + const void *__restrict vy, int nr, int nc, + const void *__restrict bias) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; diff --git a/mllm/backends/cpu/third_party/ggml/GemmPack.hpp b/mllm/backends/cpu/third_party/ggml/GemmPack.hpp new file mode 100644 index 000000000..ca5ecb8a2 --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/GemmPack.hpp @@ -0,0 +1,44 @@ +#ifndef MLLM_GEMM_AARCH64_HPP +#define MLLM_GEMM_AARCH64_HPP + +#include +#include +// using namespace mllm; + +// Quantization +void quantize_q8_0_4x4(const float *__restrict x, void *__restrict y, int64_t k); +void quantize_q8_0_4x8(const float *__restrict x, void *__restrict y, int64_t k); + +void quantize_mat_q8_0(const float *__restrict x, void *__restrict y, int64_t nrows, int64_t n_per_row, int64_t blck_size_interleave); + +// Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization") +size_t quantize_q4_0_4x4(const float *__restrict src, void *__restrict dst, int64_t nrows, int64_t n_per_row, const float *imatrix); +size_t quantize_q4_0_4x8(const float *__restrict src, void *__restrict dst, int64_t nrows, int64_t n_per_row, const float *imatrix); +size_t quantize_q4_0_8x8(const float *__restrict src, void *__restrict dst, int64_t nrows, int64_t n_per_row, const float *imatrix); + +//===----------------------------------------------------------------------===// +// GEMV +//===----------------------------------------------------------------------===// +void gemv_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); +void gemv_q4_0_4x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); +void gemv_q4_0_8x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); + +// NOTE: Do not add a bias flag in gemv_q4_0_4x4_q8_0. It may cause branch miss hit problem. +void _gemv_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); +void _gemv_q4_0_4x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); +void _gemv_q4_0_8x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); + +//===----------------------------------------------------------------------===// +// GEMM +//===----------------------------------------------------------------------===// +void gemm_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); +void gemm_q4_0_4x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); +void gemm_q4_0_8x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); +void _gemm_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); +void _gemm_q4_0_4x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); +void _gemm_q4_0_8x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); + +void quantize_row_q4_0_4x4(const float *__restrict x, void *__restrict y, int k); +void quantize_row_q4_0_4x4(const float *__restrict x, void *__restrict y, int k, int raw); + +#endif // MLLM_GEMM_HPP \ No newline at end of file diff --git a/src/backends/cpu/quantize/Quantize.hpp b/mllm/backends/cpu/third_party/ggml/Quantize.hpp similarity index 94% rename from src/backends/cpu/quantize/Quantize.hpp rename to mllm/backends/cpu/third_party/ggml/Quantize.hpp index b2a0abc8f..b41b77ce2 100644 --- a/src/backends/cpu/quantize/Quantize.hpp +++ b/mllm/backends/cpu/third_party/ggml/Quantize.hpp @@ -32,13 +32,13 @@ #include "assert.h" #include "math.h" #include -#include #include "Types.hpp" #include +#include "QuantizeFP16.hpp" -// #if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) -// #include -// #endif +#if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) +#include +#endif #undef MIN #undef MAX @@ -71,89 +71,6 @@ #endif #endif -#if defined(__ARM_NEON) && !defined(_MSC_VER) -#include -#define MLLM_COMPUTE_FP16_TO_FP32(x) ((float)(x)) -#define MLLM_COMPUTE_FP32_TO_FP16(x) ((mllm_fp16_t)x) - -#define MLLM_FP16_TO_FP32(x) ((float)(x)) -#define MLLM_FP32_TO_FP16(x) ((mllm_fp16_t)x) - -#elif defined _MSC_VER -#define MLLM_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) -#define MLLM_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) - -static float table_f32_f16[1 << 16]; -static bool table_f32_f16_init = false; - -inline static float lookup_fp16_to_fp32(uint16_t f) { - if (!table_f32_f16_init) { - uint16_t ii; - for (int i = 0; i < (1 << 16); ++i) { - uint16_t ui = i; - memcpy(&ii, &ui, sizeof(ii)); - table_f32_f16[i] = MLLM_COMPUTE_FP16_TO_FP32(ii); - } - table_f32_f16_init = true; - } - uint16_t s; - memcpy(&s, &f, sizeof(uint16_t)); - return table_f32_f16[s]; -} - -#define MLLM_FP16_TO_FP32(x) lookup_fp16_to_fp32(x) -#define MLLM_FP32_TO_FP16(x) MLLM_COMPUTE_FP32_TO_FP16(x) - -#else -#define MLLM_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) -#define MLLM_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) - -static float table_f32_f16[1 << 16]; -static bool table_f32_f16_init = false; - -inline static float lookup_fp16_to_fp32(uint16_t f) { - if (!table_f32_f16_init) { - uint16_t ii; - for (int i = 0; i < (1 << 16); ++i) { - uint16_t ui = i; - memcpy(&ii, &ui, sizeof(ii)); - table_f32_f16[i] = MLLM_COMPUTE_FP16_TO_FP32(ii); - } - table_f32_f16_init = true; - } - uint16_t s; - memcpy(&s, &f, sizeof(uint16_t)); - return table_f32_f16[s]; -} - -#define MLLM_FP16_TO_FP32(x) lookup_fp16_to_fp32(x) -#define MLLM_FP32_TO_FP16(x) MLLM_COMPUTE_FP32_TO_FP16(x) -#endif - -static mllm_fp16_t table_exp_f16[1 << 16]; -static bool init_table_exp_f16_flag = false; -inline void init_table_exp_f16() { - mllm_fp16_t ii; - for (int i = 0; i < (1 << 16); ++i) { - uint16_t ui = i; - memcpy(&ii, &ui, sizeof(ii)); - const float f = MLLM_COMPUTE_FP16_TO_FP32(ii); - table_exp_f16[i] = MLLM_FP32_TO_FP16(expf(f)); - // float val = MLLM_FP16_TO_FP32(expf(f)); - // std::cout< +#include "Types.hpp" +#include + +// #if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) +// #include +// #endif + +#undef MIN +#undef MAX +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +// 16-bit float +// on Arm, we use __fp16 +// on x86, we use uint16_t +#ifdef __ARM_NEON + +#else + +#ifdef __wasm_simd128__ +#include +#else +#ifdef __POWER9_VECTOR__ +#include +#undef bool +#define bool _Bool +#else +#if defined(_MSC_VER) || defined(__MINGW32__) +#include +#else +#if !defined(__riscv) +#include +#endif +#endif +#endif +#endif +#endif + +// fp32<->fp16 start // +#if defined(__ARM_NEON) && !defined(_MSC_VER) +#include +#define MLLM_COMPUTE_FP16_TO_FP32(x) ((float)(x)) +#define MLLM_COMPUTE_FP32_TO_FP16(x) ((mllm_fp16_t)x) + +#define MLLM_FP16_TO_FP32(x) ((float)(x)) +#define MLLM_FP32_TO_FP16(x) ((mllm_fp16_t)x) + +#elif defined _MSC_VER +#define MLLM_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) +#define MLLM_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) + +static float table_f32_f16[1 << 16]; +static bool table_f32_f16_init = false; + +inline static float lookup_fp16_to_fp32(uint16_t f) { + if (!table_f32_f16_init) { + uint16_t ii; + for (int i = 0; i < (1 << 16); ++i) { + uint16_t ui = i; + memcpy(&ii, &ui, sizeof(ii)); + table_f32_f16[i] = MLLM_COMPUTE_FP16_TO_FP32(ii); + } + table_f32_f16_init = true; + } + uint16_t s; + memcpy(&s, &f, sizeof(uint16_t)); + return table_f32_f16[s]; +} + +#define MLLM_FP16_TO_FP32(x) lookup_fp16_to_fp32(x) +#define MLLM_FP32_TO_FP16(x) MLLM_COMPUTE_FP32_TO_FP16(x) + +#else +#define MLLM_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) +#define MLLM_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) + +static float table_f32_f16[1 << 16]; +static bool table_f32_f16_init = false; + +inline static float lookup_fp16_to_fp32(uint16_t f) { + if (!table_f32_f16_init) { + uint16_t ii; + for (int i = 0; i < (1 << 16); ++i) { + uint16_t ui = i; + memcpy(&ii, &ui, sizeof(ii)); + table_f32_f16[i] = MLLM_COMPUTE_FP16_TO_FP32(ii); + } + table_f32_f16_init = true; + } + uint16_t s; + memcpy(&s, &f, sizeof(uint16_t)); + return table_f32_f16[s]; +} + +#define MLLM_FP16_TO_FP32(x) lookup_fp16_to_fp32(x) +#define MLLM_FP32_TO_FP16(x) MLLM_COMPUTE_FP32_TO_FP16(x) +#endif + +static mllm_fp16_t table_exp_f16[1 << 16]; +static bool init_table_exp_f16_flag = false; +inline void init_table_exp_f16() { + mllm_fp16_t ii; + for (int i = 0; i < (1 << 16); ++i) { + uint16_t ui = i; + memcpy(&ii, &ui, sizeof(ii)); + const float f = MLLM_COMPUTE_FP16_TO_FP32(ii); + table_exp_f16[i] = MLLM_FP32_TO_FP16(expf(f)); + } +} + +// fp32<->fp16 end // + +#endif // MLLM_QUANTIZEFP16_HPP \ No newline at end of file diff --git a/src/backends/cpu/quantize/QuantizeQ2.cpp b/mllm/backends/cpu/third_party/ggml/QuantizeQ2.cpp similarity index 92% rename from src/backends/cpu/quantize/QuantizeQ2.cpp rename to mllm/backends/cpu/third_party/ggml/QuantizeQ2.cpp index 713cde23a..74cbf61e6 100644 --- a/src/backends/cpu/quantize/QuantizeQ2.cpp +++ b/mllm/backends/cpu/third_party/ggml/QuantizeQ2.cpp @@ -29,6 +29,59 @@ #include #include "QuantizeQ2.hpp" #include "Quantize.hpp" +#include // For std::min/max on some platforms + +// 修正后的量化函数 +void quantize_row_q2_0_reference(const float *__restrict x, block_q2_0 *__restrict y, int k) { + static const int QK = QK2_0; + assert(k % QK == 0); + const int nb = k / QK; + + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // absolute max + for (int j = 0; j < QK; j++) { + amax = std::max(amax, fabsf(x[i * QK + j])); + } + + const float d = -amax / 1.0f; // d is negative or zero + const float id = (d != 0.0f) ? 1.0f / d : 0.0f; + + y[i].d = MLLM_FP32_TO_FP16(d); + + for (int j = 0; j < QK / 4; ++j) { + y[i].qs[j] = 0; + for (int l = 0; l < 4; ++l) { + const float x0 = x[i * QK + j * 4 + l] * id; + const uint8_t xi0 = static_cast(fminf(3, roundf(x0 + 2.0f))); + y[i].qs[j] |= (xi0 << (l * 2)); + } + } + } +} + +void quantize_row_q2_0(const float *__restrict x, void *__restrict y, int k) { + quantize_row_q2_0_reference(x, (block_q2_0 *)y, k); +} + +// 修正后的反量化函数 +void dequantize_row_q2_0(const void *__restrict vx, float *__restrict y, int k) { + static const int QK = QK2_0; + assert(k % QK == 0); + + const block_q2_0 *__restrict x = (const block_q2_0 *)vx; + const int nb = k / QK; + + for (int i = 0; i < nb; i++) { + const float d = MLLM_FP16_TO_FP32(x[i].d); + + for (int j = 0; j < QK / 4; ++j) { + for (int l = 0; l < 4; ++l) { + const int x0 = ((x[i].qs[j] >> (l * 2)) & 0x03) - 2; + y[i * QK + j * 4 + l] = x0 * d; + } + } + } +} static float make_qkx2_quants(int n, int nmax, const float *__restrict x, const float *__restrict weights, uint8_t *__restrict L, float *__restrict the_min, uint8_t *__restrict Laux, diff --git a/src/backends/cpu/quantize/QuantizeQ2.hpp b/mllm/backends/cpu/third_party/ggml/QuantizeQ2.hpp similarity index 92% rename from src/backends/cpu/quantize/QuantizeQ2.hpp rename to mllm/backends/cpu/third_party/ggml/QuantizeQ2.hpp index c1034a0e2..bd4f8b636 100644 --- a/src/backends/cpu/quantize/QuantizeQ2.hpp +++ b/mllm/backends/cpu/third_party/ggml/QuantizeQ2.hpp @@ -30,6 +30,9 @@ #include "Types.hpp" +void quantize_row_q2_0(const float *__restrict x, void *__restrict y, int k); +void dequantize_row_q2_0(const void *__restrict x, float *__restrict y, int k); + void quantize_row_q2_K(const float *__restrict x, void *__restrict y, int k); void dequantize_row_q2_K(const block_q2_K *__restrict x, float *__restrict y, int64_t k); diff --git a/src/backends/cpu/quantize/QuantizeQ3.cpp b/mllm/backends/cpu/third_party/ggml/QuantizeQ3.cpp similarity index 100% rename from src/backends/cpu/quantize/QuantizeQ3.cpp rename to mllm/backends/cpu/third_party/ggml/QuantizeQ3.cpp diff --git a/src/backends/cpu/quantize/QuantizeQ3.hpp b/mllm/backends/cpu/third_party/ggml/QuantizeQ3.hpp similarity index 100% rename from src/backends/cpu/quantize/QuantizeQ3.hpp rename to mllm/backends/cpu/third_party/ggml/QuantizeQ3.hpp diff --git a/src/backends/cpu/quantize/QuantizeQ4.cpp b/mllm/backends/cpu/third_party/ggml/QuantizeQ4.cpp similarity index 100% rename from src/backends/cpu/quantize/QuantizeQ4.cpp rename to mllm/backends/cpu/third_party/ggml/QuantizeQ4.cpp diff --git a/src/backends/cpu/quantize/QuantizeQ4.hpp b/mllm/backends/cpu/third_party/ggml/QuantizeQ4.hpp similarity index 100% rename from src/backends/cpu/quantize/QuantizeQ4.hpp rename to mllm/backends/cpu/third_party/ggml/QuantizeQ4.hpp diff --git a/src/backends/cpu/quantize/QuantizeQ6.cpp b/mllm/backends/cpu/third_party/ggml/QuantizeQ6.cpp similarity index 100% rename from src/backends/cpu/quantize/QuantizeQ6.cpp rename to mllm/backends/cpu/third_party/ggml/QuantizeQ6.cpp diff --git a/src/backends/cpu/quantize/QuantizeQ6.hpp b/mllm/backends/cpu/third_party/ggml/QuantizeQ6.hpp similarity index 100% rename from src/backends/cpu/quantize/QuantizeQ6.hpp rename to mllm/backends/cpu/third_party/ggml/QuantizeQ6.hpp diff --git a/src/backends/cpu/quantize/QuantizeQ8.cpp b/mllm/backends/cpu/third_party/ggml/QuantizeQ8.cpp similarity index 89% rename from src/backends/cpu/quantize/QuantizeQ8.cpp rename to mllm/backends/cpu/third_party/ggml/QuantizeQ8.cpp index 44e16292d..35f51f7e8 100644 --- a/src/backends/cpu/quantize/QuantizeQ8.cpp +++ b/mllm/backends/cpu/third_party/ggml/QuantizeQ8.cpp @@ -541,4 +541,84 @@ void quantize_round_dequantize_row_i8(const float *__restrict vx, float *__restr } } +// per-tensor int16 +void quantize_row_i16(const float *__restrict x, void *__restrict vy, int k, float scale) { + const int BLOCK_SIZE = 32; + assert(k % BLOCK_SIZE == 0); + const int nb = k / BLOCK_SIZE; + + int16_t *__restrict y = (int16_t *)vy; + + const float d = scale; + const float id = d ? 1.0f / d : 0.0f; + +#if defined(__ARM_NEON) + const int32x4_t min_32768 = vdupq_n_s32(-32768); + const int32x4_t max32767 = vdupq_n_s32(32767); + + for (int i = 0; i < nb; i++) { + float32x4_t srcv[8]; + for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i * 32 + 4 * j); + + for (int j = 0; j < 8; j++) { + const float32x4_t v = vmulq_n_f32(srcv[j], id); + int32x4_t vi = vcvtnq_s32_f32(v); + + vi = vminq_s32(vi, max32767); + vi = vmaxq_s32(vi, min_32768); + + y[i * 32 + 4 * j + 0] = (int16_t)vgetq_lane_s32(vi, 0); + y[i * 32 + 4 * j + 1] = (int16_t)vgetq_lane_s32(vi, 1); + y[i * 32 + 4 * j + 2] = (int16_t)vgetq_lane_s32(vi, 2); + y[i * 32 + 4 * j + 3] = (int16_t)vgetq_lane_s32(vi, 3); + } + } +#else + // fallback scalar version + for (int i = 0; i < k; i++) { + int v = (int)roundf(x[i] * id); + if (v < -32768) v = -32768; + if (v > 32767) v = 32767; + y[i] = (int16_t)v; + } +#endif +} + +void dequantize_row_i16(const void *__restrict vx, float *__restrict y, int k, float scale) { +#if defined(__ARM_NEON) + const int16_t *__restrict x = (int16_t *)vx; + + float32x4_t scale_vec = vdupq_n_f32(scale); + + int i; + for (i = 0; i <= k - 8; i += 8) { + // Load 8 int16_t values + int16x8_t x_vec = vld1q_s16(&x[i]); + + // Split into lower and upper 4 elements + int32x4_t x_lo = vmovl_s16(vget_low_s16(x_vec)); // 前4个 int16 -> int32 + int32x4_t x_hi = vmovl_s16(vget_high_s16(x_vec)); // 后4个 int16 -> int32 + + // Convert to float32 + float32x4_t x_f32_lo = vcvtq_f32_s32(x_lo); + float32x4_t x_f32_hi = vcvtq_f32_s32(x_hi); + + // Multiply by scale + x_f32_lo = vmulq_f32(x_f32_lo, scale_vec); + x_f32_hi = vmulq_f32(x_f32_hi, scale_vec); + + // Store result + vst1q_f32(&y[i], x_f32_lo); + vst1q_f32(&y[i + 4], x_f32_hi); + } + + // Handle remaining elements + for (; i < k; i++) { + y[i] = x[i] * scale; + } +#else +// TODO: avx +#endif +} + // #endif \ No newline at end of file diff --git a/src/backends/cpu/quantize/QuantizeQ8.hpp b/mllm/backends/cpu/third_party/ggml/QuantizeQ8.hpp similarity index 90% rename from src/backends/cpu/quantize/QuantizeQ8.hpp rename to mllm/backends/cpu/third_party/ggml/QuantizeQ8.hpp index defb69943..77ead9267 100644 --- a/src/backends/cpu/quantize/QuantizeQ8.hpp +++ b/mllm/backends/cpu/third_party/ggml/QuantizeQ8.hpp @@ -40,5 +40,8 @@ void quantize_row_i8(const float *__restrict x, void *__restrict y, int k, float void dequantize_row_i8(const void *__restrict vx, float *__restrict y, int k, float scale = 1.f); void dequantize_row_i8_to_fp16(const void *__restrict vx, void *__restrict vy, int k, float scale = 1.f); void quantize_round_dequantize_row_i8(const float *__restrict vx, float *__restrict y, int k, float scale = 1.f); +// per-tensor int16 quantize +void quantize_row_i16(const float *__restrict x, void *__restrict y, int k, float scale = 1.f); +void dequantize_row_i16(const void *__restrict vx, float *__restrict y, int k, float scale = 1.f); #endif // MLLM_QUANTIZEQ8_HPP diff --git a/mllm/backends/cpu/third_party/ggml/VecDotFP16.cpp b/mllm/backends/cpu/third_party/ggml/VecDotFP16.cpp new file mode 100644 index 000000000..286db0f01 --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotFP16.cpp @@ -0,0 +1,64 @@ +/* + * This code is based on ggml(https://github.com/ggerganov/ggml), + * please see https://github.com/ggerganov/ggml/blob/master/src/ggml.c + * ggml is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "VecDotFP16.hpp" + +void vec_dot_fp16(const int n, float *__restrict s, const mllm_fp16_t *__restrict vx, const mllm_fp16_t *__restrict vy) { + float sumf = 0.0; + +#if defined(__AVX2__) || defined(__ARM_NEON) + const int np = (n & ~(MLLM_F16_STEP - 1)); + + MLLM_F16_VEC sum[MLLM_F16_ARR] = {MLLM_F16_VEC_ZERO}; + + MLLM_F16_VEC ax[MLLM_F16_ARR]; + MLLM_F16_VEC ay[MLLM_F16_ARR]; + + for (int i = 0; i < np; i += MLLM_F16_STEP) { + for (int j = 0; j < MLLM_F16_ARR; j++) { + ax[j] = MLLM_F16_VEC_LOAD(vx + i + j * MLLM_F16_EPR, j); + ay[j] = MLLM_F16_VEC_LOAD(vy + i + j * MLLM_F16_EPR, j); + + sum[j] = MLLM_F16_VEC_FMA(sum[j], ax[j], ay[j]); + } + } + + // reduce sum0..sum3 to sum0 + MLLM_F16_VEC_REDUCE(sumf, sum); + + // leftovers + for (int i = np; i < n; ++i) { + sumf += (float)(MLLM_FP16_TO_FP32(vx[i]) * MLLM_FP16_TO_FP32(vy[i])); + } +#else + for (int i = 0; i < n; ++i) { + sumf += (float)(MLLM_FP16_TO_FP32(vx[i]) * MLLM_FP16_TO_FP32(vy[i])); + } +#endif + + *s = sumf; +} diff --git a/mllm/backends/cpu/third_party/ggml/VecDotFP16.hpp b/mllm/backends/cpu/third_party/ggml/VecDotFP16.hpp new file mode 100644 index 000000000..a53d78567 --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotFP16.hpp @@ -0,0 +1,33 @@ +/* + * This code is based on mllm(https://github.com/ggerganov/mllm), + * please see https://github.com/ggerganov/mllm/blob/master/src/mllm.c + * mllm is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once +#include "ComputeUtils.hpp" + +// using namespace mllm; + +void vec_dot_fp16(const int n, float *__restrict s, const mllm_fp16_t *__restrict vx, const mllm_fp16_t *__restrict vy); \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotFP32.cpp b/mllm/backends/cpu/third_party/ggml/VecDotFP32.cpp new file mode 100644 index 000000000..ae54ddc2c --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotFP32.cpp @@ -0,0 +1,177 @@ +/* + * This code is based on ggml(https://github.com/ggerganov/ggml), + * please see https://github.com/ggerganov/ggml/blob/master/src/ggml.c + * ggml is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "VecDotFP32.hpp" + +#ifdef __AVX2__ +static void vec_dot_fp32_avx2(const int n, float *__restrict s, const float *__restrict x, const float *__restrict y) { + float sumf = 0.0F; + const int np = (n & ~(MLLM_F32_STEP - 1)); + + MLLM_F32_VEC sum[MLLM_F32_ARR] = {MLLM_F32_VEC_ZERO}; + + MLLM_F32_VEC ax[MLLM_F32_ARR]; + MLLM_F32_VEC ay[MLLM_F32_ARR]; + + for (int i = 0; i < np; i += MLLM_F32_STEP) { + for (int j = 0; j < MLLM_F32_ARR; j++) { + ax[j] = MLLM_F32_VEC_LOAD(x + i + j * MLLM_F32_EPR); + ay[j] = MLLM_F32_VEC_LOAD(y + i + j * MLLM_F32_EPR); + + sum[j] = MLLM_F32_VEC_FMA(sum[j], ax[j], ay[j]); + } + } + + // reduce sum0..sum3 to sum0 + MLLM_F32_VEC_REDUCE(sumf, sum); + + // leftovers + for (int i = np; i < n; ++i) { + sumf += x[i] * y[i]; + } + + *s = sumf; +} +#endif + +#ifdef __ARM_NEON +static void vec_dot_fp32_arm(const int n, float *__restrict s, const float *__restrict x, const float *__restrict y) { + float sumf = 0.0F; + const int np = (n & ~(16 - 1)); + + F32_VEC sum[4] = {vdupq_n_f32(0.0F)}; + + F32_VEC ax[F32_ARR]; + F32_VEC ay[F32_ARR]; + + for (int i = 0; i < np; i += F32_STEP) { + for (int j = 0; j < F32_ARR; j++) { + ax[j] = vld1q_f32(x + i + j * F32_REG); + ay[j] = vld1q_f32(y + i + j * F32_REG); + sum[j] = vfmaq_f32(sum[j], ax[j], ay[j]); + // sum[j] = vmlaq_lane_f32(sum[j], ax[j], ay[0], + } + } + + // reduce sum0..sum3 to sum0 + F32_VEC_REDUCE(sumf, sum); + + // leftovers + for (int i = np; i < n; ++i) { + sumf += x[i] * y[i]; + } + + *s = sumf; +} +#endif + +void vec_dot_fp32(const int n, float *__restrict s, const float *__restrict vx, const float *__restrict vy) { +#ifdef __AVX2__ + vec_dot_fp32_avx2(n, s, vx, vy); +#elif defined(__ARM_NEON) + vec_dot_fp32_arm(n, s, vx, vy); +#endif +} + +#ifdef __AVX2__ +static void vec_value_dot_fp32_avx2(const int n, float *__restrict s, const float *__restrict x, const float *__restrict y, bool addition) { + float sumf = 0.0F; + const int np = (n & ~(MLLM_F32_STEP - 1)); + + MLLM_F32_VEC sum[MLLM_F32_ARR] = {MLLM_F32_VEC_ZERO}; + + MLLM_F32_VEC ax[MLLM_F32_ARR]; + MLLM_F32_VEC ay[MLLM_F32_ARR]; + + for (int i = 0; i < np; i += MLLM_F32_STEP) { + for (int j = 0; j < MLLM_F32_ARR; j++) { + ax[j] = MLLM_F32_VEC_LOAD(x + i + j * MLLM_F32_EPR); + ay[j] = MLLM_F32_VEC_LOAD(y + i + j * MLLM_F32_EPR); + + sum[j] = MLLM_F32_VEC_FMA(sum[j], ax[j], ay[j]); + } + } + + // reduce sum0..sum3 to sum0 + MLLM_F32_VEC_REDUCE(sumf, sum); + + // leftovers + for (int i = np; i < n; ++i) { + sumf += x[i] * y[i]; + } + + *s = sumf; +} +#endif + +#ifdef __ARM_NEON +// s:vector k +// x:value +// y:vector k +static void vec_value_dot_fp32_arm(const int n, float *__restrict s, const float x, const float *__restrict y, bool addition) { + int i; + float32x4_t vec_x; + float32x4_t vec_y; + float32x4_t vec_s; + + vec_x = vdupq_n_f32(x); + + int n_aligned = n & -4; + + if (addition) { + for (i = 0; i < n_aligned; i += 4) { + vec_y = vld1q_f32(y + i); + vec_s = vmulq_f32(vec_x, vec_y); + vec_s = vaddq_f32(vec_s, vld1q_f32(s + i)); + vst1q_f32(s + i, vec_s); + } + } else { + for (i = 0; i < n_aligned; i += 4) { + vec_y = vld1q_f32(y + i); + vec_s = vmulq_f32(vec_x, vec_y); + vst1q_f32(s + i, vec_s); + } + } + for (; i < n; ++i) { + if (addition) + s[i] += x * y[i]; + else { + s[i] = x * y[i]; + } + } +} +#endif + +#ifdef __AVX2__ +void vec_value_dot_fp32(const int n, float *__restrict s, const float *x, const float *__restrict vy, bool addition) { + vec_value_dot_fp32_avx2(n, s, x, vy, addition); +} +#elif defined(__ARM_NEON) +void vec_value_dot_fp32(const int n, float *__restrict s, const float x, const float *__restrict vy, bool addition) { + vec_value_dot_fp32_arm(n, s, x, vy, addition); +} +#endif diff --git a/mllm/backends/cpu/third_party/ggml/VecDotFP32.hpp b/mllm/backends/cpu/third_party/ggml/VecDotFP32.hpp new file mode 100644 index 000000000..e9adb24c4 --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotFP32.hpp @@ -0,0 +1,76 @@ +/* + * This code is based on mllm(https://github.com/ggerganov/mllm), + * please see https://github.com/ggerganov/mllm/blob/master/src/mllm.c + * mllm is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once +#include "ComputeUtils.hpp" + +// using namespace mllm; + +inline static void vec_scale_f32(const int n, float *y, const float v) { + const int np = (n & ~(MLLM_F32_STEP - 1)); + + MLLM_F32_VEC vx = MLLM_F32_VEC_SET1(v); + + MLLM_F32_VEC ay[MLLM_F32_ARR]; + + for (int i = 0; i < np; i += MLLM_F32_STEP) { + for (int j = 0; j < MLLM_F32_ARR; j++) { + ay[j] = MLLM_F32_VEC_LOAD(y + i + j * MLLM_F32_EPR); + ay[j] = MLLM_F32_VEC_MUL(ay[j], vx); + + MLLM_F32_VEC_STORE(y + i + (j * MLLM_F32_EPR), ay[j]); + } + } + + // leftovers + for (int i = np; i < n; ++i) { + y[i] *= v; + } +} + +inline void vec_mul_fp32(const int n, float *__restrict s, const float *__restrict x, const float *__restrict y) { + int i = 0; + const int np = (n & ~(MLLM_F32_STEP - 1)); + MLLM_F32_VEC ax[MLLM_F32_ARR]; + MLLM_F32_VEC ay[MLLM_F32_ARR]; + MLLM_F32_VEC as[MLLM_F32_ARR]; + for (i = 0; i < np; i += MLLM_F32_STEP) { + for (int j = 0; j < MLLM_F32_ARR; j++) { + ax[j] = MLLM_F32_VEC_LOAD(x + i + j * MLLM_F32_EPR); + ay[j] = MLLM_F32_VEC_LOAD(y + i + j * MLLM_F32_EPR); + as[j] = MLLM_F32_VEC_MUL(ax[j], ay[j]); + MLLM_F32_VEC_STORE(s + i + (j * MLLM_F32_EPR), as[j]); + } + } + for (; i < n; ++i) { + s[i] = x[i] * y[i]; + } +} + +void vec_dot_fp32(const int n, float *__restrict s, const float *__restrict vx, const float *__restrict vy); +// for sparse linear +void vec_value_dot_fp32(const int n, float *__restrict s, const float x, const float *__restrict vy, bool addition); \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotQ2.cpp b/mllm/backends/cpu/third_party/ggml/VecDotQ2.cpp new file mode 100644 index 000000000..089be4674 --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotQ2.cpp @@ -0,0 +1,1526 @@ +/* + * This code is based on ggml(https://github.com/ggerganov/ggml), + * please see https://github.com/ggerganov/ggml/blob/master/src/ggml.c + * ggml is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "VecDotQ2.hpp" +#include "ComputeUtils.hpp" + +void vec_dot_q2_0_q8_0(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + + const auto *__restrict x = static_cast(vx); + const auto *__restrict y = static_cast(vy); + +#if defined(__AVX2__) + // AVX2 implementation + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + const __m256 d_vec = _mm256_set1_ps(MLLM_FP16_TO_FP32(x[i].d) * MLLM_FP16_TO_FP32(y[i].d)); + + const __m128i q2_packed = _mm_loadl_epi64(reinterpret_cast(x[i].qs)); + + const __m256i pshufb_mask = _mm256_setr_epi8( + 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7); + const __m256i q2_bytes = _mm256_shuffle_epi8(_mm256_set_m128i(_mm_setzero_si128(), q2_packed), pshufb_mask); + + const __m256i shift_const = _mm256_set_epi32(0, 2, 4, 6, 0, 2, 4, 6); + const __m256i q2_shifted = _mm256_srlv_epi32(q2_bytes, shift_const); + const __m256i q2_isolated = _mm256_and_si256(q2_shifted, _mm256_set1_epi8(0x03)); + const __m256i q2_final = _mm256_sub_epi8(q2_isolated, _mm256_set1_epi8(2)); + + const __m256i q8_data = _mm256_loadu_si256(reinterpret_cast(y[i].qs)); + + const __m256i products = _mm256_maddubs_epi16(q2_final, q8_data); + const __m256i sum_lanes = _mm256_madd_epi16(_mm256_set1_epi16(1), products); + + acc = _mm256_fmadd_ps(_mm256_cvtepi32_ps(sum_lanes), d_vec, acc); + } + *s = hsum_float_8(acc); + +#elif defined(__ARM_NEON) + // ARM NEON implementation + float32x4_t sumv = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; i++) { + const float d = MLLM_FP16_TO_FP32(x[i].d) * MLLM_FP16_TO_FP32(y[i].d); + const float32x4_t d_vec = vdupq_n_f32(d); + + int32x4_t isum_block = vdupq_n_s32(0); + + const uint8_t *q2_ptr = x[i].qs; + const int8_t *q8_ptr = y[i].qs; + + // Unpack 8 bytes of Q2 data into a temporary 32-byte array + int8_t q2_unpacked[32]; + for (int j = 0; j < 8; ++j) { + uint8_t b = q2_ptr[j]; + q2_unpacked[j * 4 + 0] = ((b >> 0) & 3) - 2; + q2_unpacked[j * 4 + 1] = ((b >> 2) & 3) - 2; + q2_unpacked[j * 4 + 2] = ((b >> 4) & 3) - 2; + q2_unpacked[j * 4 + 3] = ((b >> 6) & 3) - 2; + } + + // Perform dot product on unpacked data + const int8x16_t q2_v0 = vld1q_s8(&q2_unpacked[0]); + const int8x16_t q2_v1 = vld1q_s8(&q2_unpacked[16]); + + const int8x16_t q8_v0 = vld1q_s8(q8_ptr); + const int8x16_t q8_v1 = vld1q_s8(q8_ptr + 16); + + const int16x8_t p0 = vmull_s8(vget_low_s8(q2_v0), vget_low_s8(q8_v0)); + const int16x8_t p1 = vmull_s8(vget_high_s8(q2_v0), vget_high_s8(q8_v0)); + const int16x8_t p2 = vmull_s8(vget_low_s8(q2_v1), vget_low_s8(q8_v1)); + const int16x8_t p3 = vmull_s8(vget_high_s8(q2_v1), vget_high_s8(q8_v1)); + + isum_block = vcombine_s32( + vpadd_s32(vpaddl_s16(vget_low_s16(p0)), vpaddl_s16(vget_high_s16(p0))), + vpadd_s32(vpaddl_s16(vget_low_s16(p1)), vpaddl_s16(vget_high_s16(p1)))); + isum_block = vaddq_s32(isum_block, vcombine_s32( + vpadd_s32(vpaddl_s16(vget_low_s16(p2)), vpaddl_s16(vget_high_s16(p2))), + vpadd_s32(vpaddl_s16(vget_low_s16(p3)), vpaddl_s16(vget_high_s16(p3))))); + + sumv = vmlaq_f32(sumv, vcvtq_f32_s32(isum_block), d_vec); + } + *s = vaddvq_f32(sumv); + +#else + // Fallback scalar implementation + float sumf = 0.0; + for (int i = 0; i < nb; ++i) { + const float d = MLLM_FP16_TO_FP32(x[i].d) * MLLM_FP16_TO_FP32(y[i].d); + int32_t isum = 0; + for (int j = 0; j < qk / 4; ++j) { + uint8_t packed_q2 = x[i].qs[j]; + for (int l = 0; l < 4; ++l) { + const int8_t x0 = ((packed_q2 >> (l * 2)) & 3) - 2; + isum += x0 * y[i].qs[j * 4 + l]; + } + } + sumf += d * isum; + } + *s = sumf; +#endif +} + +void vec_dot_q2_K_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { + const block_q2_K *__restrict x = (block_q2_K *)vx; + const block_q8_K *__restrict y = (block_q8_K *)vy; + + const int nb = n / QK_K; + +#ifdef __ARM_FEATURE_SVE + const int vector_length = svcntb() * 8; + const svuint8_t m3s = svdup_n_u8(0x3); + const svuint32_t m4s = svdup_n_u32(0xF); + const svint32_t vzero_sv = svdup_n_s32(0); + svfloat32_t acc_sum = svdup_n_f32(0); + svbool_t pred_s32 = svptrue_pat_b32(SV_VL4); + + switch (vector_length) { + case 128: + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + svfloat32_t d_broad = svdup_n_f32((float32_t)d); + const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); + + const uint8_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8_sv = y[i].qs; + const uint8_t *__restrict sc = x[i].scales; + + svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc); + const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc + 4); + const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums); + svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums + 4); + + const svint32_t s0 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_2, q8sums_sv_2)); + + mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc + 8); + const svint32_t mins_sv_3 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc + 12); + const svint32_t mins_sv_4 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums + 8); + q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums + 12); + + svint32_t s1 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_3, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_4, q8sums_sv_2)); + + svfloat32_t temp = svcvt_f32_s32_x(svptrue_b32(), svadd_s32_x(svptrue_b32(), s0, s1)); + + acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, temp, dmin_broad); + + svint32_t sumi1 = svdup_n_s32(0); + + { + const svuint8_t q2bits_1 = svld1_u8(svptrue_b8(), q2); + svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_1, m3s)); + svint8_t q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc), m4s)); + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 0)); + + const svuint8_t q2bits_3 = svld1_u8(svptrue_b8(), q2 + 16); + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_3, m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 1)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 3)); + + const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc + 4), m4s)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 0)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 1)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 3)); + + //------------------------------- + + q2 += 32; + const svint32_t scales_sv_2 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc + 8), m4s)); + const svuint8_t q2bits_2 = svld1_u8(svptrue_b8(), q2); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_2, m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 0)); + + const svuint8_t q2bits_4 = svld1_u8(svptrue_b8(), q2 + 16); + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_4, m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 1)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 3)); + + const svint32_t scales_sv_3 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc + 12), m4s)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 0)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 1)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 3)); + } + acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, svcvt_f32_s32_x(svptrue_b32(), sumi1), d_broad); + } + *s = svaddv_f32(svptrue_b32(), acc_sum); + break; + + case 256: + case 512: + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + svfloat32_t d_broad = svdup_n_f32((float32_t)d); + const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); + + const uint8_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8_sv = y[i].qs; + const uint8_t *__restrict sc = x[i].scales; + + const svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); + sc += 8; + const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, m4s)); + const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, 4)); + svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums); + + const svuint32_t mins_and_scales_sve_1 = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); + const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, m4s)); + const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, 4)); + + svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums + 8); + + svfloat32_t temp = svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_2, q8sums_sv_2))); + + acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, temp, dmin_broad); + + svint32_t sumi1 = svdup_n_s32(0); + + { + const svuint8_t q2bits_1 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); + svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_1, m3s)); + svint8_t q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + + svint32_t scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 0), svdup_lane_s32(scales_sv, 1)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + + svint32_t scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 2), svdup_lane_s32(scales_sv, 3)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(svdup_n_s32(0), q2bytes_sv, q8bytes_sv), scale_2); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + + scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 4), svdup_lane_s32(scales_sv, 5)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + + scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 6), svdup_lane_s32(scales_sv, 7)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); + + q2 += 32; + + const svuint8_t q2bits_2 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_2, m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + + scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 0), svdup_lane_s32(scales_sv_1, 1)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + + scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 2), svdup_lane_s32(scales_sv_1, 3)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + + scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 4), svdup_lane_s32(scales_sv_1, 5)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + + scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 6), svdup_lane_s32(scales_sv_1, 7)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); + } + acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), sumi1), d_broad); + } + *s = svaddv_f32(svptrue_pat_b32(SV_VL8), acc_sum); + break; + + default: + assert(false && "Unsupported vector length"); + break; + } + +#elif __ARM_NEON + const uint8x16_t m3 = vdupq_n_u8(0x3); + const uint8x16_t m4 = vdupq_n_u8(0xF); + + const int32x4_t vzero = vdupq_n_s32(0); + + mllm_int8x16x2_t q2bytes; + uint8_t aux[16]; + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + + const uint8_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + const uint8_t *__restrict sc = x[i].scales; + + const uint8x16_t mins_and_scales = vld1q_u8(sc); + const uint8x16_t scales = vandq_u8(mins_and_scales, m4); + vst1q_u8(aux, scales); + + const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); + const mllm_int16x8x2_t q8sums = mllm_vld1q_s16_x2(y[i].bsums); + const mllm_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}}; + const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16(mins16.val[0]), vget_low_s16(q8sums.val[0])), + vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0]))); + const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16(mins16.val[1]), vget_low_s16(q8sums.val[1])), + vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1]))); + sum += dmin * vaddvq_s32(vaddq_s32(s0, s1)); + + int isum = 0; + int is = 0; + + // We use this macro instead of a function call because for some reason + // the code runs 2-3% slower, even if the function is declared inline +#define MULTIPLY_ACCUM_WITH_SCALE(index) \ + isum += vaddvq_s32(mllm_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is + (index)]; \ + isum += vaddvq_s32(mllm_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is + 1 + (index)]; + +#define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index) \ + q8bytes = mllm_vld1q_s8_x2(q8); \ + q8 += 32; \ + q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3)); \ + q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3)); \ + MULTIPLY_ACCUM_WITH_SCALE((index)); + + for (int j = 0; j < QK_K / 128; ++j) { + const mllm_uint8x16x2_t q2bits = mllm_vld1q_u8_x2(q2); + q2 += 32; + + mllm_int8x16x2_t q8bytes = mllm_vld1q_s8_x2(q8); + q8 += 32; + q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3)); + q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3)); + + MULTIPLY_ACCUM_WITH_SCALE(0); + + SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2); + SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4); + SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6); + + is += 8; + } + + sum += d * isum; + } + + *s = sum; + +#elif defined __AVX2__ + + const __m256i m3 = _mm256_set1_epi8(3); + const __m128i m4 = _mm_set1_epi8(0xF); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + + const uint8_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + const __m128i mins_and_scales = _mm_loadu_si128((const __m128i *)x[i].scales); + const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); + const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); + const __m256i mins = _mm256_cvtepi8_epi16(mins8); + const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i *)y[i].bsums)); + + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); + + const __m256i all_scales = _mm256_cvtepi8_epi16(scales8); + const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); + const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); + const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; + + __m256i sumi = _mm256_setzero_si256(); + + for (int j = 0; j < QK_K / 128; ++j) { + const __m256i q2bits = _mm256_loadu_si256((const __m256i *)q2); + q2 += 32; + + const __m256i q8_0 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + + const __m256i q2_0 = _mm256_and_si256(q2bits, m3); + const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); + const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); + const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); + + __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0); + __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1); + __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2); + __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3); + + p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0); + p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1); + p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2); + p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3); + + p0 = _mm256_add_epi32(p0, p1); + p2 = _mm256_add_epi32(p2, p3); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); + } + + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); + } + + *s = hsum_float_8(acc); + +#elif defined __AVX__ + + const __m128i m3 = _mm_set1_epi8(0x3); + const __m128i m4 = _mm_set1_epi8(0xF); + const __m128i m2 = _mm_set1_epi8(0x2); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + const float dall = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + + const uint8_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + // load mins and scales from block_q2_K.scales[QK_K/16] + const __m128i mins_and_scales = _mm_loadu_si128((const __m128i *)x[i].scales); + const __m128i scales16 = _mm_and_si128(mins_and_scales, m4); + const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); + const __m128i mins_0 = _mm_cvtepi8_epi16(mins16); + const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16)); + + // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2 + const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i *)&y[i].bsums[0])); + const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i *)&y[i].bsums[8])); + + // sumf += -dmin * summs in 32bits*8 + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc); + + const __m128i scales_0 = _mm_cvtepi8_epi16(scales16); + const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16)); + const __m128i scales[2] = {scales_0, scales_1}; + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + for (int j = 0; j < QK_K / 128; ++j) { + // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K] + const __m128i q8_0 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_1 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_2 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_3 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_4 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_5 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_6 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_7 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + + // load 2bits*16*8 from block_q2_K.qs[QK_K/4] + __m128i q2bits = _mm_loadu_si128((const __m128i *)q2); + q2 += 16; + const __m128i q2_0 = _mm_and_si128(q2bits, m3); + const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); + const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); + const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); + q2bits = _mm_loadu_si128((const __m128i *)q2); + q2 += 16; + const __m128i q2_1 = _mm_and_si128(q2bits, m3); + const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); + const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); + const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); + + // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8 + __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0); + __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1); + __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2); + __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3); + __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4); + __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5); + __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6); + __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7); + + // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8 + __m128i shuffle = _mm_set1_epi16(0x0100); + p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0); + shuffle = _mm_add_epi16(shuffle, m2); + p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1); + shuffle = _mm_add_epi16(shuffle, m2); + p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2); + shuffle = _mm_add_epi16(shuffle, m2); + p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3); + shuffle = _mm_add_epi16(shuffle, m2); + p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4); + shuffle = _mm_add_epi16(shuffle, m2); + p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5); + shuffle = _mm_add_epi16(shuffle, m2); + p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6); + shuffle = _mm_add_epi16(shuffle, m2); + p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7); + + p0 = _mm_add_epi32(p0, p1); + p2 = _mm_add_epi32(p2, p3); + p4 = _mm_add_epi32(p4, p5); + p6 = _mm_add_epi32(p6, p7); + + // isum in 32bits*4*2 + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6)); + } + + // sumf += dall * isum - dmin * summs in 32bits + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc); + } + + *s = hsum_float_8(acc); + +#elif defined __wasm_simd128__ + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + const uint8_t *q2 = x[i].qs; + const int8_t *q8 = y[i].qs; + const uint8_t *sc = x[i].scales; + + // Vectorized summs calculation + v128_t summs_vec = wasm_i32x4_splat(0); + { + v128_t sc_vec = wasm_v128_load(sc); + v128_t sc_upper = wasm_u8x16_shr(sc_vec, 4); + + v128_t sc_low = wasm_u16x8_extend_low_u8x16(sc_upper); + v128_t sc_high = wasm_u16x8_extend_high_u8x16(sc_upper); + + v128_t bsums1 = wasm_v128_load(&y[i].bsums[0]); + v128_t bsums2 = wasm_v128_load(&y[i].bsums[8]); + + summs_vec = wasm_i32x4_add( + wasm_i32x4_add(wasm_i32x4_dot_i16x8(sc_low, bsums1), + wasm_i32x4_dot_i16x8(sc_high, bsums2)), + summs_vec); + + summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 2, 3, 0, 1)); + summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 1, 0, 3, 2)); + } + int32_t summs = wasm_i32x4_extract_lane(summs_vec, 0); + + // Vectorized isum calculation + int32_t isum = 0; + const uint8_t *sc_ptr = sc; + const int k_iters = QK_K / 128; + + for (int k = 0; k < k_iters; ++k) { + v128_t isum_vec = wasm_i32x4_splat(0); + int shift = 0; + + for (int j = 0; j < 4; ++j) { + const int d0 = (sc_ptr[0] & 0xF); + const int d1 = (sc_ptr[1] & 0xF); + sc_ptr += 2; + + // Process first 16 elements + v128_t q2_0 = wasm_v128_load(q2); + v128_t q8_0 = wasm_v128_load(q8); + v128_t q2_shift_0 = wasm_u8x16_shr(q2_0, shift); + v128_t q2_bits_0 = wasm_v128_and(q2_shift_0, wasm_i8x16_splat(0x03)); + + // Process next 16 elements + v128_t q2_1 = wasm_v128_load(q2 + 16); + v128_t q8_1 = wasm_v128_load(q8 + 16); + v128_t q2_shift_1 = wasm_u8x16_shr(q2_1, shift); + v128_t q2_bits_1 = wasm_v128_and(q2_shift_1, wasm_i8x16_splat(0x03)); + + // Calculate dot products + v128_t p0 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q8_0), + wasm_i16x8_extend_low_i8x16(q2_bits_0)); + v128_t p1 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q8_0), + wasm_i16x8_extend_high_i8x16(q2_bits_0)); + v128_t p2 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q8_1), + wasm_i16x8_extend_low_i8x16(q2_bits_1)); + v128_t p3 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q8_1), + wasm_i16x8_extend_high_i8x16(q2_bits_1)); + + // Accumulate scaled results + v128_t scaled = wasm_i32x4_add( + wasm_i32x4_mul(wasm_i32x4_add(p0, p1), wasm_i32x4_splat(d0)), + wasm_i32x4_mul(wasm_i32x4_add(p2, p3), wasm_i32x4_splat(d1))); + + isum_vec = wasm_i32x4_add(isum_vec, scaled); + q8 += 32; + shift += 2; + } + q2 += 32; + + // Horizontal sum of isum_vec + isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 2, 3, 0, 1)); + isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 1, 0, 3, 2)); + isum += wasm_i32x4_extract_lane(isum_vec, 0); + } + + const float dall = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + const float dmin = MLLM_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf += dall * isum - dmin * summs; + } + + *s = sumf; + +#elif defined __riscv_v_intrinsic + + const int vector_length = __riscv_vlenb() * 8; + float sumf = 0; + + uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + uint8_t atmp[16]; + + switch (vector_length) { + case 256: + for (int i = 0; i < nb; ++i) { + const uint8_t *q2 = x[i].qs; + const int8_t *q8 = y[i].qs; + const uint8_t *sc = x[i].scales; + + const float dall = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + + size_t vl = 16; + + vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl); + vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl); + + vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl); + + vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl); + vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl); + vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); + vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl); + vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); + + sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums); + + vl = 32; + + vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); + vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl); + + uint8_t is = 0; + int isum = 0; + + for (int j = 0; j < QK_K / 128; ++j) { + // load Q2 + vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl); + + vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl); + vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03, vl); + vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03, vl); + vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03, vl); + + // duplicate scale elements for product + vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0 + is, vl), vl); + vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2 + is, vl), vl); + vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4 + is, vl), vl); + vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6 + is, vl), vl); + + vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl)); + vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl)); + vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl)); + vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl)); + + // load Q8 + vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); + vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8 + 32, vl); + vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8 + 64, vl); + vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8 + 96, vl); + + vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl); + vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl); + vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl); + vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl); + + vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl); + vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl); + + isum += __riscv_vmv_x_s_i32m1_i32(isum1); + + q2 += 32; + q8 += 128; + is = 8; + } + + sumf += dall * isum; + } + break; + case 128: + for (int i = 0; i < nb; ++i) { + const uint8_t *q2 = x[i].qs; + const int8_t *q8 = y[i].qs; + const uint8_t *sc = x[i].scales; + const float dall = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + uint8_t *patmp = atmp; + int vsums; + int tmp; + __asm__ __volatile__( + "vsetivli zero, 16, e8, m1\n\t" + "vmv.v.x v8, zero\n\t" + "vle8.v v1, (%[sc])\n\t" + "vand.vi v0, v1, 0xF\n\t" + "vsrl.vi v1, v1, 4\n\t" + "vse8.v v0, (%[scale])\n\t" + "vsetivli zero, 16, e16, m2\n\t" + "vle16.v v2, (%[bsums])\n\t" + "vzext.vf2 v0, v1\n\t" + "vwmul.vv v4, v0, v2\n\t" + "vsetivli zero, 16, e32, m4\n\t" + "vredsum.vs v8, v4, v8\n\t" + "vmv.x.s %[vsums], v8" + : [tmp] "=&r"(tmp), [vsums] "=&r"(vsums) + : [sc] "r"(sc), [scale] "r"(atmp), [bsums] "r"(y[i].bsums) + : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); + sumf += dmin * vsums; + int isum = 0; + + for (int j = 0; j < QK_K / 128; ++j) { + __asm__ __volatile__( + "vsetvli zero, %[vl32], e8, m2\n\t" + "vle8.v v0, (%[q2])\n\t" + "vsrl.vi v2, v0, 2\n\t" + "vsrl.vi v4, v0, 4\n\t" + "vsrl.vi v6, v0, 6\n\t" + "vand.vi v0, v0, 0x3\n\t" + "vand.vi v2, v2, 0x3\n\t" + "vand.vi v4, v4, 0x3\n\t" + "vsetvli zero, %[vl128], e8, m8\n\t" + "vle8.v v8, (%[q8])\n\t" + "vsetvli zero, %[vl64], e8, m4\n\t" + "vwmul.vv v16, v0, v8\n\t" + "vwmul.vv v24, v4, v12\n\t" + "vsetivli zero, 16, e16, m2\n\t" + "vmv.v.x v0, zero\n\t" + "vwredsum.vs v10, v16, v0\n\t" + "vwredsum.vs v9, v18, v0\n\t" + "vwredsum.vs v8, v20, v0\n\t" + "vwredsum.vs v7, v22, v0\n\t" + "vwredsum.vs v11, v24, v0\n\t" + "vwredsum.vs v12, v26, v0\n\t" + "vwredsum.vs v13, v28, v0\n\t" + "vwredsum.vs v14, v30, v0\n\t" + "vsetivli zero, 4, e32, m1\n\t" + "vslideup.vi v10, v9, 1\n\t" + "vslideup.vi v8, v7, 1\n\t" + "vslideup.vi v11, v12, 1\n\t" + "vslideup.vi v13, v14, 1\n\t" + "vslideup.vi v10, v8, 2\n\t" + "vslideup.vi v11, v13, 2\n\t" + "vsetivli zero, 8, e32, m2\n\t" + "vle8.v v15, (%[scale])\n\t" + "vzext.vf4 v12, v15\n\t" + "vmul.vv v10, v10, v12\n\t" + "vredsum.vs v0, v10, v0\n\t" + "vmv.x.s %[tmp], v0\n\t" + "add %[isum], %[isum], %[tmp]" + : [tmp] "=&r"(tmp), [isum] "+&r"(isum) + : [q2] "r"(q2), [scale] "r"(patmp), [q8] "r"(q8), [vl32] "r"(32), [vl64] "r"(64), [vl128] "r"(128) + : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); + q2 += 32; + q8 += 128; + patmp += 8; + } + + sumf += dall * isum; + } + break; + default: + assert(false && "Unsupported vector length"); + break; + } + + *s = sumf; + +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char lowScaleMask = vec_splats((signed char)0xF); + const vector int v0 = vec_splats((int32_t)0); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(MLLM_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(MLLM_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + vector signed short q8ysums0 = vec_xl(0, y[i].bsums); + vector signed short q8ysums1 = vec_xl(16, y[i].bsums); + + vector signed char q2xmins = (vector signed char)vec_xl(0, x[i].scales); + vector signed char vscales = vec_and(q2xmins, lowScaleMask); + + q2xmins = vec_sr(q2xmins, v4); + vector signed short q2xmins0 = vec_unpackh(q2xmins); + vector signed short q2xmins1 = vec_unpackl(q2xmins); + + vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); + vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); + vector signed int prod2 = vec_mule(q2xmins1, q8ysums1); + vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); + vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + vector signed int vsumi4 = v0; + vector signed int vsumi5 = v0; + vector signed int vsumi6 = v0; + vector signed int vsumi7 = v0; + + const uint8_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K / 128; ++j) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl(0, q2); + vector signed char qxs1 = (vector signed char)vec_xl(16, q2); + q2 += 32; + + vector unsigned char q2x00 = (vector unsigned char)vec_and(qxs0, lowMask); + vector unsigned char q2x01 = (vector unsigned char)vec_and(vec_sr(qxs0, v2), lowMask); + vector unsigned char q2x02 = (vector unsigned char)vec_and(vec_sr(qxs0, v4), lowMask); + vector unsigned char q2x03 = (vector unsigned char)vec_and(vec_sr(qxs0, v6), lowMask); + vector unsigned char q2x10 = (vector unsigned char)vec_and(qxs1, lowMask); + vector unsigned char q2x11 = (vector unsigned char)vec_and(vec_sr(qxs1, v2), lowMask); + vector unsigned char q2x12 = (vector unsigned char)vec_and(vec_sr(qxs1, v4), lowMask); + vector unsigned char q2x13 = (vector unsigned char)vec_and(vec_sr(qxs1, v6), lowMask); + + vector signed char q8y00 = vec_xl(0, q8); + vector signed char q8y10 = vec_xl(16, q8); + vector signed char q8y01 = vec_xl(32, q8); + vector signed char q8y11 = vec_xl(48, q8); + vector signed char q8y02 = vec_xl(64, q8); + vector signed char q8y12 = vec_xl(80, q8); + vector signed char q8y03 = vec_xl(96, q8); + vector signed char q8y13 = vec_xl(112, q8); + q8 += 128; + + vector signed int qv0 = vec_msum(q8y00, q2x00, v0); + vector signed int qv1 = vec_msum(q8y01, q2x01, v0); + vector signed int qv2 = vec_msum(q8y02, q2x02, v0); + vector signed int qv3 = vec_msum(q8y03, q2x03, v0); + vector signed int qv4 = vec_msum(q8y10, q2x10, v0); + vector signed int qv5 = vec_msum(q8y11, q2x11, v0); + vector signed int qv6 = vec_msum(q8y12, q2x12, v0); + vector signed int qv7 = vec_msum(q8y13, q2x13, v0); + + vector signed short vscales_07 = vec_unpackh(vscales); + vector signed int vscales_03 = vec_unpackh(vscales_07); + vector signed int vscales_47 = vec_unpackl(vscales_07); + vector signed int vs0 = vec_splat(vscales_03, 0); + vector signed int vs1 = vec_splat(vscales_03, 1); + vector signed int vs2 = vec_splat(vscales_03, 2); + vector signed int vs3 = vec_splat(vscales_03, 3); + vector signed int vs4 = vec_splat(vscales_47, 0); + vector signed int vs5 = vec_splat(vscales_47, 1); + vector signed int vs6 = vec_splat(vscales_47, 2); + vector signed int vs7 = vec_splat(vscales_47, 3); + vscales = vec_sld(vscales, vscales, 8); + + vsumi0 = vec_add(vec_mul(qv0, vs0), vsumi0); + vsumi1 = vec_add(vec_mul(qv1, vs2), vsumi1); + vsumi2 = vec_add(vec_mul(qv2, vs4), vsumi2); + vsumi3 = vec_add(vec_mul(qv3, vs6), vsumi3); + vsumi4 = vec_add(vec_mul(qv4, vs1), vsumi4); + vsumi5 = vec_add(vec_mul(qv5, vs3), vsumi5); + vsumi6 = vec_add(vec_mul(qv6, vs5), vsumi6); + vsumi7 = vec_add(vec_mul(qv7, vs7), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + +#elif defined __loongarch_asx + + __m256 acc = (__m256)__lasx_xvldi(0); + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + + const uint8_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + const __m128i mins_and_scales128 = __lsx_vld((const __m128i *)x[i].scales, 0); + const __m128i scales128 = __lsx_vandi_b(mins_and_scales128, 0xf); + const __m256i mins = lasx_ext8_16(__lsx_vsrli_b(mins_and_scales128, 4)); + const __m256i prod = lasx_madd_h(mins, __lasx_xvld((const __m256i *)y[i].bsums, 0)); + + acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(dmin), __lasx_xvffint_s_w(prod), acc); + + const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; + const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); + + __m256i sumi = __lasx_xvldi(0); + + for (int j = 0; j < QK_K / 128; ++j) { + const __m256i q2bits = __lasx_xvld((const __m256i *)q2, 0); + q2 += 32; + + const __m256i q8_0 = __lasx_xvld((const __m256i *)q8, 0); + q8 += 32; + const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); + q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); + q8 += 32; + const __m256i q8_3 = __lasx_xvld((const __m256i *)q8, 0); + q8 += 32; + + const __m256i q2_0 = __lasx_xvandi_b(q2bits, 3); + const __m256i q2_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 2), 3); + const __m256i q2_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 4), 3); + const __m256i q2_3 = __lasx_xvsrli_b(q2bits, 6); + + __m256i p0 = lasx_madd_h_b(q2_0, q8_0); + __m256i p1 = lasx_madd_h_b(q2_1, q8_1); + __m256i p2 = lasx_madd_h_b(q2_2, q8_2); + __m256i p3 = lasx_madd_h_b(q2_3, q8_3); + + p0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p0); + p1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p1); + p2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p2); + p3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p3); + + p0 = __lasx_xvadd_w(p0, p1); + p2 = __lasx_xvadd_w(p2, p3); + + sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p0, p2)); + } + + acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); + } + + *s = hsum_float_8(acc); + +#else + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + const uint8_t *q2 = x[i].qs; + const int8_t *q8 = y[i].qs; + const uint8_t *sc = x[i].scales; + + int summs = 0; + for (int j = 0; j < 16; ++j) { + summs += y[i].bsums[j] * (sc[j] >> 4); + } + + const float dall = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + + int isum = 0; + int is = 0; + int d; + for (int k = 0; k < QK_K / 128; ++k) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + d = sc[is++] & 0xF; + int isuml = 0; + for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + d = sc[is++] & 0xF; + isuml = 0; + for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + shift += 2; + q8 += 32; + } + q2 += 32; + } + sumf += dall * isum - dmin * summs; + } + *s = sumf; +#endif +} + +void vec_dot_iq2_xxs_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { + assert(n % QK_K == 0); + + const block_iq2_xxs *__restrict x = (block_iq2_xxs *)vx; + const block_q8_K *__restrict y = (block_q8_K *)vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + const uint64_t *signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t *aux8 = (const uint8_t *)aux32; + + mllm_int8x16x4_t q2u; + mllm_int8x16x4_t q2s; + mllm_int8x16x4_t q8b; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + float sumf1 = 0, sumf2 = 0; + for (int ib32 = 0; ib32 < QK_K / 32; ib32 += 2) { + q8b = mllm_vld1q_s8_x4(q8); + q8 += 64; + memcpy(aux32, q2, 4 * sizeof(uint32_t)); + q2 += 8; + q2u.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2xxs_grid + aux8[0])), vld1_s8((const int8_t *)(iq2xxs_grid + aux8[1]))); + q2u.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2xxs_grid + aux8[2])), vld1_s8((const int8_t *)(iq2xxs_grid + aux8[3]))); + q2u.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2xxs_grid + aux8[8])), vld1_s8((const int8_t *)(iq2xxs_grid + aux8[9]))); + q2u.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2xxs_grid + aux8[10])), vld1_s8((const int8_t *)(iq2xxs_grid + aux8[11]))); + q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const int8_t *)(signs64 + ((aux32[1] >> 7) & 127)))); + q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const int8_t *)(signs64 + ((aux32[1] >> 21) & 127)))); + q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const int8_t *)(signs64 + ((aux32[3] >> 7) & 127)))); + q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const int8_t *)(signs64 + ((aux32[3] >> 21) & 127)))); + q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); + q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); + q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); + q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); + const int32x4_t p1 = mllm_vdotq_s32(mllm_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]); + const int32x4_t p2 = mllm_vdotq_s32(mllm_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]); + sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28)); + sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28)); + } + sumf += d * (sumf1 + sumf2); + } + *s = 0.25f * sumf; + +#elif defined(__AVX2__) + + const uint64_t *signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t *aux8 = (const uint8_t *)aux32; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K / 32; ib32 += 2) { + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + memcpy(aux32, q2, 4 * sizeof(uint32_t)); + q2 += 8; + const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); + const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); + const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], + signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], + signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); + const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); + const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); + const uint16_t ls1 = aux32[1] >> 28; + const uint16_t ls2 = aux32[3] >> 28; + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2 * ls1 + 1)); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2 * ls2 + 1)); + sumi1 = _mm256_add_epi32(sumi1, p1); + sumi2 = _mm256_add_epi32(sumi2, p2); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + } + + *s = 0.125f * hsum_float_8(accumf); + +#elif defined(__AVX__) + const uint64_t *signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t *aux8 = (const uint8_t *)aux32; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K / 32; ib32 += 2) { + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + memcpy(aux32, q2, 4 * sizeof(uint32_t)); + q2 += 8; + const __m128i q2_1_0 = _mm_set_epi64x(iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); + const __m128i q2_1_1 = _mm_set_epi64x(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]]); + const __m128i q2_2_0 = _mm_set_epi64x(iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); + const __m128i q2_2_1 = _mm_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]]); + const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); + const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); + const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127]); + const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); + const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); + const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); + const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + const uint16_t ls1 = aux32[1] >> 28; + const uint16_t ls2 = aux32[3] >> 28; + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2 * ls1 + 1)); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2 * ls1 + 1)); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2 * ls2 + 1)); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2 * ls2 + 1)); + sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); + sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); + sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); + sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + } + + *s = 0.125f * hsum_float_8(accumf); + +#elif defined(__POWER9_VECTOR__) + const vector int v0 = vec_splats((int32_t)0); + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const uint64_t *signs64 = (const uint64_t *)keven_signs_q2xs; + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(MLLM_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + + const uint16_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K / 32; j += 2) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + uint32_t aux32[4]; + const uint8_t *aux8 = (const uint8_t *)aux32; + + memcpy(aux32, q2, 4 * sizeof(uint32_t)); + q2 += 8; + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[0]), *(const int64_t *)(iq2xxs_grid + aux8[1])}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[2]), *(const int64_t *)(iq2xxs_grid + aux8[3])}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[8]), *(const int64_t *)(iq2xxs_grid + aux8[9])}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])}; + + vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))}; + vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))}; + vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))}; + vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))}; + + vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); + vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); + vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); + vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); + + vector signed char q8y0 = vec_xl(0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); + + const uint16_t ls0 = aux32[1] >> 28; + const uint16_t ls1 = aux32[3] >> 28; + + vector signed short vscales01 = vec_splats((int16_t)(2 * ls0 + 1)); + vector signed short vscales23 = vec_splats((int16_t)(2 * ls1 + 1)); + + vsumi0 = vec_msum(qv0, vscales01, vsumi0); + vsumi1 = vec_msum(qv1, vscales01, vsumi1); + vsumi2 = vec_msum(qv2, vscales23, vsumi2); + vsumi3 = vec_msum(qv3, vscales23, vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.125f * vec_extract(vsumf0, 0); + +#elif defined(__loongarch_asx) + + const uint64_t *signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t *aux8 = (const uint8_t *)aux32; + + __m256 accumf = (__m256)__lasx_xvldi(0); + for (int i = 0; i < nb; ++i) { + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + __m256i sumi1 = __lasx_xvldi(0); + __m256i sumi2 = __lasx_xvldi(0); + for (int ib32 = 0; ib32 < QK_K / 32; ib32 += 2) { + const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); + q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); + q8 += 32; + memcpy(aux32, q2, 4 * sizeof(uint32_t)); + q2 += 8; + + const __m256i q2_1 = lasx_set_d(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); + const __m256i q2_2 = lasx_set_d(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); + const __m256i s2_1 = lasx_set_d(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], + signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m256i s2_2 = lasx_set_d(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], + signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); + const __m256i q8s_1 = __lasx_xvsigncov_b(s2_1, q8_1); + const __m256i q8s_2 = __lasx_xvsigncov_b(s2_2, q8_2); + const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); + const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); + const uint16_t ls1 = aux32[1] >> 28; + const uint16_t ls2 = aux32[3] >> 28; + const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2 * ls1 + 1)); + const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2 * ls2 + 1)); + sumi1 = __lasx_xvadd_w(sumi1, p1); + sumi2 = __lasx_xvadd_w(sumi2, p2); + } + + accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); + } + + *s = 0.125f * hsum_float_8(accumf); +// #elif defined(__VXE__) || defined(__VXE2__) +// const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; +// +// uint32_t aux32[4]; +// const uint8_t * aux8 = (const uint8_t *)aux32; +// +// float sumf = 0; +// +// for (int i = 0; i < nb; ++i) { +// const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; +// const uint16_t * __restrict q2 = x[i].qs; +// const int8_t * __restrict q8 = y[i].qs; +// +// float sumf1 = 0, sumf2 = 0; +// +// for (int ib32 = 0; ib32 < QK_K/32; ib += 2) { +// int8x16_t q8b0 = vec_xl( 0, q8); +// int8x16_t qb81 = vec_xl(16, q8); +// int8x16_t q8b2 = vec_xl(32, q8); +// int8x16_t q8b3 = vec_xl(48, q8); +// q8 += 64; +// +// memcpy(aux32, q2, 4 * sizeof(uint32_t)); +// q2 += 8; +// +// int8x16_t q2u0 = { *(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1]) }; +// int8x16_t q2u1 = { *(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3]) }; +// int8x16_t q2u2 = { *(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9]) }; +// int8x16_t q2u3 = { *(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11]) }; +// +// int8x16_t q2s0 = { *(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127)) }; +// int8x16_t q2s1 = { *(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127)) }; +// int8x16_t q2s2 = { *(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127)) }; +// int8x16_t q2s3 = { *(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127)) }; +// +// q2u0 = vec_mul(q2u0, q2s0); +// q2u1 = vec_mul(q2u1, q2s1); +// q2u2 = vec_mul(q2u2, q2s2); +// q2u3 = vec_mul(q2u3, q2s3); +// +// const int32x4_t p1 = mllm_vec_dot(mllm_vec_dot(vec_splat_s32(0), q2u0, q8b0), q2u1, q8b1); +// const int32x4_t p2 = mllm_vec_dot(mllm_vec_dot(vec_splat_s32(0), q2u2, q8b2), q2u3, q8b3); +// +// sumf1 += (p1[0] + p1[1] + p1[2] + p1[3]) * (0.5f + (aux32[1] >> 28)); +// sumf2 += (p2[0] + p2[1] + p2[2] + p2[3]) * (0.5f + (aux32[3] >> 28)); +// } +// +// sumf += d * (sumf1 + sumf2); +// } +// +// *s = 0.25f * sumf; +#else + + uint32_t aux32[2]; + const uint8_t *aux8 = (const uint8_t *)aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t *__restrict q2 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K / 32; ++ib32) { + memcpy(aux32, q2, 2 * sizeof(uint32_t)); + q2 += 4; + const uint32_t ls = 2 * (aux32[1] >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t *grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); + const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7 * l) & 127]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +#endif +} \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotQ2.hpp b/mllm/backends/cpu/third_party/ggml/VecDotQ2.hpp new file mode 100644 index 000000000..ef4e4b489 --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotQ2.hpp @@ -0,0 +1,34 @@ +/* + * This code is based on mllm(https://github.com/ggerganov/mllm), + * please see https://github.com/ggerganov/mllm/blob/master/src/mllm.c + * mllm is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once +#include + +void vec_dot_q2_0_q8_0(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); + +void vec_dot_q2_K_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); +void vec_dot_iq2_xxs_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotQ3.cpp b/mllm/backends/cpu/third_party/ggml/VecDotQ3.cpp new file mode 100644 index 000000000..fb036e936 --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotQ3.cpp @@ -0,0 +1,1282 @@ +/* + * This code is based on ggml(https://github.com/ggerganov/ggml), + * please see https://github.com/ggerganov/ggml/blob/master/src/ggml.c + * ggml is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "VecDotQ3.hpp" +#include "ComputeUtils.hpp" + +void vec_dot_q3_K_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { + assert(n % QK_K == 0); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K *__restrict x = (block_q3_K *)vx; + const block_q8_K *__restrict y = (block_q8_K *)vy; + + const int nb = n / QK_K; + +#if defined(__ARM_FEATURE_SVE) + + uint32_t aux[3]; + uint32_t utmp[4]; + + const int8_t m32 = 32; + const int vector_length = svcntb() * 8; + const svuint8_t m3b_sv = svdup_n_u8(0x3); + const svint32_t vzero_sv = svdup_n_s32(0); + + const svuint8_t m0_sv = svdup_n_u8(1); + const svuint8_t m1_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 1); + const svuint8_t m2_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 2); + const svuint8_t m3_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 3); + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + + const uint8_t *__restrict q3_sv = x[i].qs; + const uint8_t *__restrict qh_sv = x[i].hmask; + const int8_t *__restrict q8_sv = y[i].qs; + + // Set up scales + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + int8_t *scale = (int8_t *)utmp; + + for (int j = 0; j < 16; ++j) scale[j] -= m32; + + switch (vector_length) { + case 128: { + svuint8_t qhbits_sv_1 = svld1_u8(svptrue_b8(), qh_sv); + svuint8_t qhbits_sv_2 = svld1_u8(svptrue_b8(), qh_sv + 16); + svuint8_t q3h_sv; + + svint32_t sumi1_1 = svdup_n_s32(0); + svint8_t q3bytes_sv; + + for (int j = 0; j < QK_K / 128; ++j) { + const svuint8_t q3bits_sv = svld1_u8(svptrue_b8(), q3_sv); + q3_sv += 16; + const svuint8_t q3bits_sv_1 = svld1_u8(svptrue_b8(), q3_sv); + q3_sv += 16; + svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_1), 2); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_2), 2); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv_1, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); + + q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_1), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_2), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); + + scale += 4; + q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); + + q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_2); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); + + q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); + q8_sv += 16; + + q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_1), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); + + q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_2), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); + + if (j == 0) { + qhbits_sv_1 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_1, 4); + qhbits_sv_2 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_2, 4); + } + + scale += 4; + } + + sum += d * (svaddv_s32(svptrue_b32(), sumi1_1)); + } break; + case 256: + case 512: { + svuint8_t qhbits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), qh_sv); + svuint8_t q3h_sv; + + svint32_t sumi1_1 = svdup_n_s32(0); + svint8_t q3bytes_sv; + + for (int j = 0; j < QK_K / 128; ++j) { + const svuint8_t q3bits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), q3_sv); + q3_sv += 32; + svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + + q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m0_sv, qhbits_sv), 2); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + svint32_t scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); + + q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m1_sv, qhbits_sv), 1); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); + + scale += 4; + q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); + q8_sv += 32; + + q3h_sv = svbic_u8_x(svptrue_pat_b8(SV_VL32), m2_sv, qhbits_sv); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); + + q3h_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m3_sv, qhbits_sv), 1); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); + + if (j == 0) { + qhbits_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), qhbits_sv, 4); + } + + scale += 4; + } + + sum += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), sumi1_1)); + } break; + default: + assert(false && "Unsupported vector length"); + break; + } + } + *s = sum; + +#elif __ARM_NEON + + uint32_t aux[3]; + uint32_t utmp[4]; + + const uint8x16_t m3b = vdupq_n_u8(0x3); + const int32x4_t vzero = vdupq_n_s32(0); + + const uint8x16_t m0 = vdupq_n_u8(1); + const uint8x16_t m1 = vshlq_n_u8(m0, 1); + const uint8x16_t m2 = vshlq_n_u8(m0, 2); + const uint8x16_t m3 = vshlq_n_u8(m0, 3); + const int8_t m32 = 32; + + mllm_int8x16x4_t q3bytes; + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + + const uint8_t *__restrict q3 = x[i].qs; + const uint8_t *__restrict qh = x[i].hmask; + const int8_t *__restrict q8 = y[i].qs; + + mllm_uint8x16x2_t qhbits = mllm_vld1q_u8_x2(qh); + + mllm_uint8x16x4_t q3h; + + int32_t isum = 0; + + // Set up scales + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + int8_t *scale = (int8_t *)utmp; + for (int j = 0; j < 16; ++j) scale[j] -= m32; + + for (int j = 0; j < QK_K / 128; ++j) { + const mllm_uint8x16x2_t q3bits = mllm_vld1q_u8_x2(q3); + q3 += 32; + const mllm_int8x16x4_t q8bytes_1 = mllm_vld1q_s8_x4(q8); + q8 += 64; + const mllm_int8x16x4_t q8bytes_2 = mllm_vld1q_s8_x4(q8); + q8 += 64; + + q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2); + q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2); + q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1); + q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1); + + q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0])); + q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1])); + q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2])); + q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3])); + + isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0]; + isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1]; + isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2]; + isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3]; + + scale += 4; + + q3h.val[0] = vbicq_u8(m2, qhbits.val[0]); + q3h.val[1] = vbicq_u8(m2, qhbits.val[1]); + q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1); + q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1); + + q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0])); + q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1])); + q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2])); + q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3])); + + isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0]; + isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1]; + isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2]; + isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3]; + + scale += 4; + + if (j == 0) { + qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4); + qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4); + } + } + sum += d * isum; + } + + *s = sum; + +#elif defined __AVX2__ + + const __m256i m3 = _mm256_set1_epi8(3); + const __m256i mone = _mm256_set1_epi8(1); + const __m128i m32 = _mm_set1_epi8(32); + + __m256 acc = _mm256_setzero_ps(); + + uint32_t aux[3]; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + + const uint8_t *__restrict q3 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + // Set up scales + memcpy(aux, x[i].scales, 12); + __m128i scales128 = _mm_set_epi32( + ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), + ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), + (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), + (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); + scales128 = _mm_sub_epi8(scales128, m32); + const __m256i all_scales = _mm256_cvtepi8_epi16(scales128); + const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); + const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); + const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; + + // high bit + const __m256i hbits = _mm256_loadu_si256((const __m256i *)x[i].hmask); + + // integer accumulator + __m256i sumi = _mm256_setzero_si256(); + + int bit = 0; + int is = 0; + + for (int j = 0; j < QK_K / 128; ++j) { + // load low 2 bits + const __m256i q3bits = _mm256_loadu_si256((const __m256i *)q3); + q3 += 32; + + // prepare low and high bits + const __m256i q3l_0 = _mm256_and_si256(q3bits, m3); + const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); + const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); + const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); + const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + // load Q8 quants + const __m256i q8_0 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + + // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, + // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, + // and 2 if the high bit was set) + __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); + __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); + __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); + __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); + + __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); + __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); + __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); + __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); + + p16_0 = _mm256_sub_epi16(p16_0, q8s_0); + p16_1 = _mm256_sub_epi16(p16_1, q8s_1); + p16_2 = _mm256_sub_epi16(p16_2, q8s_2); + p16_3 = _mm256_sub_epi16(p16_3, q8s_3); + + // multiply with scales + p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0); + p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1); + p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2); + p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3); + + // accumulate + p16_0 = _mm256_add_epi32(p16_0, p16_1); + p16_2 = _mm256_add_epi32(p16_2, p16_3); + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); + } + + // multiply with block scale and accumulate + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); + } + + *s = hsum_float_8(acc); + +#elif defined __AVX__ + + const __m128i m3 = _mm_set1_epi8(3); + const __m128i mone = _mm_set1_epi8(1); + const __m128i m32 = _mm_set1_epi8(32); + const __m128i m2 = _mm_set1_epi8(2); + + __m256 acc = _mm256_setzero_ps(); + + const uint32_t *aux; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + + const uint8_t *__restrict q3 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + // Set up scales + aux = (const uint32_t *)x[i].scales; + __m128i scales128 = _mm_set_epi32( + ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), + ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), + (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), + (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); + scales128 = _mm_sub_epi8(scales128, m32); + const __m128i scales_0 = _mm_cvtepi8_epi16(scales128); + const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128)); + const __m128i scales[2] = {scales_0, scales_1}; + + // high bit *128*2 from block_q3_K.hmask[QK_K/8] + const __m128i hbits_0 = _mm_loadu_si128((const __m128i *)&x[i].hmask[0]); + const __m128i hbits_1 = _mm_loadu_si128((const __m128i *)&x[i].hmask[16]); + + // integer accumulator + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + for (int j = 0; j < QK_K / 128; ++j) { + // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4] + const __m128i q3bits_0 = _mm_loadu_si128((const __m128i *)q3); + q3 += 16; + const __m128i q3bits_1 = _mm_loadu_si128((const __m128i *)q3); + q3 += 16; + + // prepare low and high bits + const int bit = j << 2; + + const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3); + const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3); + const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2); + const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2); + + const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3); + const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3); + const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit + 1)), bit + 1), 2); + const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit + 1)), bit + 1), 2); + + const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3); + const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3); + const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit + 2)), bit + 2), 2); + const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit + 2)), bit + 2), 2); + + const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3); + const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3); + const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit + 3)), bit + 3), 2); + const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit + 3)), bit + 3), 2); + + // load Q8 quants from block_q8_K.qs[QK_K] + const __m128i q8_0 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_1 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_2 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_3 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_4 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_5 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_6 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_7 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + + // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, + // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, + // and 2 if the high bit was set) + __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0); + __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1); + __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2); + __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3); + __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4); + __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5); + __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6); + __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7); + + __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0); + __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1); + __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2); + __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3); + __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4); + __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5); + __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6); + __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7); + + p16_0 = _mm_sub_epi16(p16_0, q8s_0); + p16_1 = _mm_sub_epi16(p16_1, q8s_1); + p16_2 = _mm_sub_epi16(p16_2, q8s_2); + p16_3 = _mm_sub_epi16(p16_3, q8s_3); + p16_4 = _mm_sub_epi16(p16_4, q8s_4); + p16_5 = _mm_sub_epi16(p16_5, q8s_5); + p16_6 = _mm_sub_epi16(p16_6, q8s_6); + p16_7 = _mm_sub_epi16(p16_7, q8s_7); + + // multiply with scales + __m128i shuffle = _mm_set1_epi16(0x0100); + p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0); + shuffle = _mm_add_epi16(shuffle, m2); + p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1); + shuffle = _mm_add_epi16(shuffle, m2); + p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2); + shuffle = _mm_add_epi16(shuffle, m2); + p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3); + shuffle = _mm_add_epi16(shuffle, m2); + p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4); + shuffle = _mm_add_epi16(shuffle, m2); + p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5); + shuffle = _mm_add_epi16(shuffle, m2); + p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6); + shuffle = _mm_add_epi16(shuffle, m2); + p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7); + + // accumulate + p16_0 = _mm_add_epi32(p16_0, p16_1); + p16_2 = _mm_add_epi32(p16_2, p16_3); + p16_4 = _mm_add_epi32(p16_4, p16_5); + p16_6 = _mm_add_epi32(p16_6, p16_7); + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6)); + } + + // multiply with block scale and accumulate + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); + } + + *s = hsum_float_8(acc); + +#elif defined __wasm_simd128__ + int8_t aux8[QK_K]; + float sums[8] = {0}; + uint32_t auxs[4]; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t *__restrict q3 = x[i].qs; + const uint8_t *__restrict hm = x[i].hmask; + const int8_t *__restrict q8 = y[i].qs; + + // Process blocks with SIMD + int8_t *a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int shift = 0; shift <= 6; shift += 2) { + v128_t v_m = wasm_i8x16_splat(m); + for (int l = 0; l < 32; l += 16) { + v128_t v_q3 = wasm_v128_load(q3 + l); + v128_t v_shift = wasm_i8x16_shr(v_q3, shift); + v128_t v_low2 = wasm_v128_and(v_shift, wasm_i8x16_splat(0x03)); + + v128_t v_hm = wasm_v128_load(hm + l); + v128_t v_mask = wasm_v128_and(v_hm, v_m); + v_mask = wasm_i8x16_ne(v_mask, wasm_i8x16_splat(0)); + + v_low2 = wasm_i8x16_sub(v_low2, wasm_v128_and(wasm_i8x16_splat(4), wasm_v128_not(v_mask))); + wasm_v128_store(a + l, v_low2); + } + a += 32; + m <<= 1; + } + q3 += 32; + } + + // Extract scales + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + const int8_t *scales = (const int8_t *)auxs; + + // SIMD dot product with register accumulators + v128_t v_acc0 = wasm_i32x4_splat(0); + v128_t v_acc1 = wasm_i32x4_splat(0); + a = aux8; + for (int j = 0; j < QK_K / 16; ++j) { + const v128_t v_scale = wasm_i16x8_splat(scales[j] - 32); + + // Process 16 elements per iteration + for (int k = 0; k < 2; ++k) { + const v128_t v_q8 = wasm_i16x8_load8x8(q8); + const v128_t v_a = wasm_i16x8_load8x8(a); + + v128_t v_prod = wasm_i16x8_mul(v_q8, v_a); + v_prod = wasm_i16x8_mul(v_prod, v_scale); + + v_acc0 = wasm_i32x4_add(v_acc0, wasm_i32x4_extend_low_i16x8(v_prod)); + v_acc1 = wasm_i32x4_add(v_acc1, wasm_i32x4_extend_high_i16x8(v_prod)); + + q8 += 8; + a += 8; + } + } + + // Accumulate results + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + const v128_t v_d = wasm_f32x4_splat(d); + v128_t v_sum = wasm_f32x4_add( + wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc0), v_d), + wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc1), v_d)); + + // Accumulate into sums vector + wasm_v128_store(sums, wasm_f32x4_add(wasm_v128_load(sums), v_sum)); + } + + // Horizontal sum + v128_t v_sum = wasm_f32x4_add(wasm_v128_load(sums), wasm_v128_load(sums + 4)); + sumf = wasm_f32x4_extract_lane(v_sum, 0) + wasm_f32x4_extract_lane(v_sum, 1) + wasm_f32x4_extract_lane(v_sum, 2) + wasm_f32x4_extract_lane(v_sum, 3); + + *s = sumf; + +#elif defined __riscv_v_intrinsic + + uint32_t aux[3]; + uint32_t utmp[4]; + + const int vector_length = __riscv_vlenb() * 8; + float sumf = 0; + + switch (vector_length) { + case 256: + for (int i = 0; i < nb; ++i) { + const uint8_t *__restrict q3 = x[i].qs; + const uint8_t *__restrict qh = x[i].hmask; + const int8_t *__restrict q8 = y[i].qs; + + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + int8_t *scale = (int8_t *)utmp; + for (int j = 0; j < 16; ++j) scale[j] -= 32; + + size_t vl = 32; + uint8_t m = 1; + + vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); + vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl); + + int sum_t = 0; + + for (int j = 0; j < QK_K; j += 128) { + vl = 32; + + // load Q3 + vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl); + + vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl)); + vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03, vl)); + vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03, vl)); + vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03, vl)); + + // compute mask for subtraction + vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl); + vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl); + vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_mu(vmask_0, q3_0, q3_0, 0x4, vl); + m <<= 1; + + vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl); + vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl); + vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_mu(vmask_1, q3_1, q3_1, 0x4, vl); + m <<= 1; + + vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl); + vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl); + vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_mu(vmask_2, q3_2, q3_2, 0x4, vl); + m <<= 1; + + vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl); + vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl); + vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_mu(vmask_3, q3_3, q3_3, 0x4, vl); + m <<= 1; + + // load Q8 and take product with Q3 + vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl); + vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8 + 32, vl), vl); + vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8 + 64, vl), vl); + vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8 + 96, vl), vl); + + vl = 16; + + // retrieve lane to multiply with scale + vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl); + vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl); + vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl); + vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl); + vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl); + vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl); + vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl); + vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl); + + vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl); + vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl); + vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl); + vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl); + + sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); + + q3 += 32; + q8 += 128; + scale += 8; + } + + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + + sumf += d * sum_t; + } + break; + case 128: + for (int i = 0; i < nb; ++i) { + const uint8_t *restrict q3 = x[i].qs; + const uint8_t *restrict qh = x[i].hmask; + const int8_t *restrict q8 = y[i].qs; + + int8_t *scale = (int8_t *)utmp; + int tmp; + __asm__ __volatile__( + "vsetivli zero, 12, e8, m1\n\t" + "vle8.v v0, (%[s6b])\n\t" + "vmv1r.v v2, v0\n\t" + "vsetivli zero, 2, e64, m1\n\t" + "vmv.v.x v9, %[sh]\n\t" + "vslidedown.vi v1, v0, 1\n\t" + "vslide1up.vx v8, v9, zero\n\t" // {0, 0, 4, 4} + "vslideup.vi v0, v2, 1\n\t" // {aux[0], aux[1], aux[0], aux[1]} + "vsetivli zero, 4, e32, m1\n\t" + "vid.v v9\n\t" + "vmv.x.s %[tmp], v1\n\t" + "vsll.vi v9, v9, 1\n\t" // {0, 2, 4, 6} + "vmv.v.x v1, %[tmp]\n\t" // {aux[2], aux[2], aux[2], aux[2]} + "vsrl.vv v4, v1, v9\n\t" + "vsrl.vv v2, v0, v8\n\t" + "vand.vx v5, v4, %[kmask1]\n\t" + "vand.vx v3, v2, %[kmask2]\n\t" + "vsll.vi v6, v5, 4\n\t" + "vor.vv v7, v6, v3\n\t" + "vsetivli zero, 16, e8, m1\n\t" + "vsub.vx v0, v7, %[c]\n\t" + "vse8.v v0, (%[scale])" + : [tmp] "=&r"(tmp) + : [sh] "r"(0x0000000400000004), [s6b] "r"(x[i].scales), [c] "r"(32), [scale] "r"(scale), [kmask1] "r"(kmask1), [kmask2] "r"(kmask2) + : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); + + uint8_t m = 1; + int isum = 0; + for (int j = 0; j < QK_K; j += 128) { + __asm__ __volatile__( + "vsetvli zero, %[vl32], e8, m2, ta, mu\n\t" + "vle8.v v8, (%[q3])\n\t" + "vsrl.vi v10, v8, 2\n\t" + "vsrl.vi v12, v8, 4\n\t" + "vsrl.vi v14, v8, 6\n\t" + "vand.vi v8, v8, 3\n\t" + "vand.vi v10, v10, 3\n\t" + "vand.vi v12, v12, 3\n\t" + "vle8.v v2, (%[qh])\n\t" + "vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "vmseq.vx v0, v4, zero\n\t" + "vadd.vi v8, v8, -4, v0.t\n\t" + "vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "vmseq.vx v0, v4, zero\n\t" + "vadd.vi v10, v10, -4, v0.t\n\t" + "vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "vmseq.vx v0, v4, zero\n\t" + "vadd.vi v12, v12, -4, v0.t\n\t" + "vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "vmseq.vx v0, v4, zero\n\t" + "vadd.vi v14, v14, -4, v0.t\n\t" + "vsetvli zero, %[vl128], e8, m8\n\t" + "vle8.v v0, (%[q8])\n\t" + "vsetvli zero, %[vl64], e8, m4\n\t" + "vwmul.vv v16, v0, v8\n\t" + "vwmul.vv v24, v4, v12\n\t" + "vsetivli zero, 16, e16, m2\n\t" + "vmv.v.x v0, zero\n\t" + "vwredsum.vs v10, v16, v0\n\t" + "vwredsum.vs v9, v18, v0\n\t" + "vwredsum.vs v8, v20, v0\n\t" + "vwredsum.vs v7, v22, v0\n\t" + "vwredsum.vs v11, v24, v0\n\t" + "vwredsum.vs v12, v26, v0\n\t" + "vwredsum.vs v13, v28, v0\n\t" + "vwredsum.vs v14, v30, v0\n\t" + "vsetivli zero, 4, e32, m1\n\t" + "vslideup.vi v10, v9, 1\n\t" + "vslideup.vi v8, v7, 1\n\t" + "vslideup.vi v11, v12, 1\n\t" + "vslideup.vi v13, v14, 1\n\t" + "vslideup.vi v10, v8, 2\n\t" + "vslideup.vi v11, v13, 2\n\t" + "vsetivli zero, 8, e32, m2\n\t" + "vle8.v v15, (%[scale])\n\t" + "vsext.vf4 v12, v15\n\t" + "vmul.vv v10, v10, v12\n\t" + "vredsum.vs v0, v10, v0\n\t" + "vmv.x.s %[tmp], v0\n\t" + "add %[isum], %[isum], %[tmp]" + : [tmp] "=&r"(tmp), [m] "+&r"(m), [isum] "+&r"(isum) + : [vl128] "r"(128), [vl64] "r"(64), [vl32] "r"(32), [q3] "r"(q3), [qh] "r"(qh), [scale] "r"(scale), [q8] "r"(q8) + : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); + q3 += 32; + q8 += 128; + scale += 8; + } + + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + sumf += d * isum; + } + break; + default: + assert(false && "Unsupported vector length"); + break; + } + + *s = sumf; + +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char lowMask1 = vec_splats((int8_t)0xf); + const vector signed char lowMask2 = vec_splats((int8_t)0x30); + const vector int v0 = vec_splats((int32_t)0); + const vector signed char v1 = vec_splats((signed char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x20); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(MLLM_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + UNUSED(kmask1); + UNUSED(kmask2); + + vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); + vector signed char u1 = vec_and(u0, lowMask1); + vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); + vector signed char u3 = (vector signed char)vec_mergeh((vector signed int)u2, (vector signed int)vec_sr(u2, v2)); + vector signed char u30 = vec_sl(vec_and(u3, lowMask), v4); + vector signed char u31 = vec_and(u3, lowMask2); + + u1 = vec_or(u1, u30); + u2 = vec_or(vec_sr(u0, v4), u31); + + vector signed char vscales = (vector signed char)vec_mergeh((vector signed long long)u1, (vector signed long long)u2); + vector signed char qxhs0 = (vector signed char)vec_xl(0, x[i].hmask); + vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask); + + vscales = vec_sub(vscales, off); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + vector signed int vsumi4 = v0; + vector signed int vsumi5 = v0; + vector signed int vsumi6 = v0; + vector signed int vsumi7 = v0; + + const uint8_t *__restrict q3 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K / 128; ++j) { + __builtin_prefetch(q3, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl(0, q3); + vector signed char qxs1 = (vector signed char)vec_xl(16, q3); + q3 += 32; + + // the low 2 bits + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); + vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask); + vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask); + vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask); + vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask); + + // the 3rd bit + vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); + vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2); + vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); + vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2); + vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2); + vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2); + vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2); + vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2); + qxhs0 = vec_sr(qxhs0, v4); + qxhs1 = vec_sr(qxhs1, v4); + + vector signed char q3x00 = vec_sub(qxs00, qxh00); + vector signed char q3x01 = vec_sub(qxs01, qxh01); + vector signed char q3x02 = vec_sub(qxs02, qxh02); + vector signed char q3x03 = vec_sub(qxs03, qxh03); + vector signed char q3x10 = vec_sub(qxs10, qxh10); + vector signed char q3x11 = vec_sub(qxs11, qxh11); + vector signed char q3x12 = vec_sub(qxs12, qxh12); + vector signed char q3x13 = vec_sub(qxs13, qxh13); + + vector signed char q8y00 = vec_xl(0, q8); + vector signed char q8y10 = vec_xl(16, q8); + vector signed char q8y01 = vec_xl(32, q8); + vector signed char q8y11 = vec_xl(48, q8); + vector signed char q8y02 = vec_xl(64, q8); + vector signed char q8y12 = vec_xl(80, q8); + vector signed char q8y03 = vec_xl(96, q8); + vector signed char q8y13 = vec_xl(112, q8); + q8 += 128; + + vector signed short vscales_h = vec_unpackh(vscales); + vector signed short vs0 = vec_splat(vscales_h, 0); + vector signed short vs1 = vec_splat(vscales_h, 1); + vector signed short vs2 = vec_splat(vscales_h, 2); + vector signed short vs3 = vec_splat(vscales_h, 3); + vector signed short vs4 = vec_splat(vscales_h, 4); + vector signed short vs5 = vec_splat(vscales_h, 5); + vector signed short vs6 = vec_splat(vscales_h, 6); + vector signed short vs7 = vec_splat(vscales_h, 7); + vscales = vec_sld(vscales, vscales, 8); + + vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); + vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02)); + vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03)); + vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); + vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12)); + vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13)); + + vsumi0 = vec_msum(qv00, vs0, vsumi0); + vsumi1 = vec_msum(qv01, vs2, vsumi1); + vsumi2 = vec_msum(qv02, vs4, vsumi2); + vsumi3 = vec_msum(qv03, vs6, vsumi3); + vsumi4 = vec_msum(qv10, vs1, vsumi4); + vsumi5 = vec_msum(qv11, vs3, vsumi5); + vsumi6 = vec_msum(qv12, vs5, vsumi6); + vsumi7 = vec_msum(qv13, vs7, vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + +#elif defined __loongarch_asx + + const __m128i m32 = __lsx_vreplgr2vr_b(32); + + __m256 acc = (__m256)__lasx_xvldi(0); + + uint32_t aux[3]; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const uint8_t *__restrict q3 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + // Set up scales + memcpy(aux, x[i].scales, 12); + __m128i scales128 = lsx_set_w( + ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), + ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), + (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), + (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); + scales128 = __lsx_vsub_b(scales128, m32); + + const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; + const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); + + // high bit + const __m256i hbits = __lasx_xvld((const __m256i *)x[i].hmask, 0); + + // integer accumulator + __m256i sumi = __lasx_xvldi(0); + + for (int j = 0; j < QK_K / 128; ++j) { + // load low 2 bits + const __m256i q3bits = __lasx_xvld((const __m256i *)q3, 0); + q3 += 32; + + // prepare low and high bits + const __m256i q3l_0 = __lasx_xvandi_b(q3bits, 3); + const __m256i q3l_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 2), 3); + const __m256i q3l_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 4), 3); + const __m256i q3l_3 = __lasx_xvsrli_b(q3bits, 6); + const __m256i q3h_0 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 0), 0), 2); + const __m256i q3h_1 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 1), 0), 2); + const __m256i q3h_2 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 2), 0), 2); + const __m256i q3h_3 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 3), 0), 2); + const __m256i q3_0 = __lasx_xvor_v(q3h_0, q3l_0); + const __m256i q3_1 = __lasx_xvor_v(q3h_1, q3l_1); + const __m256i q3_2 = __lasx_xvor_v(q3h_2, q3l_2); + const __m256i q3_3 = __lasx_xvor_v(q3h_3, q3l_3); + + // load Q8 quants + const __m256i q8_0 = __lasx_xvld((const __m256i *)q8, 0); + q8 += 32; + const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); + q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); + q8 += 32; + const __m256i q8_3 = __lasx_xvld((const __m256i *)q8, 0); + q8 += 32; + + __m256i p16_0 = lasx_madd_h_b(q8_0, q3_0); + __m256i p16_1 = lasx_madd_h_b(q8_1, q3_1); + __m256i p16_2 = lasx_madd_h_b(q8_2, q3_2); + __m256i p16_3 = lasx_madd_h_b(q8_3, q3_3); + + // multiply with scales + p16_0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p16_0); + p16_1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p16_1); + p16_2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p16_2); + p16_3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p16_3); + + // accumulate + p16_0 = __lasx_xvadd_w(p16_0, p16_1); + p16_2 = __lasx_xvadd_w(p16_2, p16_3); + sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_2)); + } + // multiply with block scale and accumulate + acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); + } + + *s = hsum_float_8(acc); +#elif defined(__VXE__) || defined(__VXE2__) + uint32_t aux[3]; + uint32_t utmp[4]; + + const int32x4_t v_z = vec_splat_s32(0); + const uint8x16_t v_3m = vec_splat_u8(0x03); + + const uint8x16_t v_0c = vec_splat_u8(1); + const uint8x16_t v_1c = vec_sl(v_0c, 1); + const uint8x16_t v_2c = vec_sl(v_0c, 2); + const uint8x16_t v_3c = vec_sl(v_0c, 3); + + uint8x16_t q3h[4]; + uint8x16_t q3b[2]; + int8x16_t q3bytes[4]; + int8x16_t q8bytes[4]; + uint8x16_t qhbits[2]; + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + + const uint8_t *restrict x0l = x[i].qs; + const uint8_t *restrict x0h = x[i].hmask; + const int8_t *restrict y0 = y[i].qs; + + qhbits[0] = vec_xl(0, x0h); + qhbits[1] = vec_xl(16, x0h); + + int32_t isum = 0; + + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + int8_t *scale = (int8_t *)utmp; + for (int j = 0; j < 16; ++j) scale[j] -= 32; + + for (int j = 0; j < QK_K / 128; ++j) { + int32x4_t isum0, isum1, isum2, isum3; + + q3b[0] = vec_xl(0, x0l); + q3b[1] = vec_xl(16, x0l); + x0l += 32; + + q8bytes[0] = vec_xl(0, y0); + q8bytes[1] = vec_xl(16, y0); + q8bytes[2] = vec_xl(32, y0); + q8bytes[3] = vec_xl(48, y0); + q8bytes[4] = vec_xl(64, y0); + q8bytes[5] = vec_xl(80, y0); + q8bytes[6] = vec_xl(96, y0); + q8bytes[7] = vec_xl(112, y0); + y0 += 128; + + q3h[0] = vec_sl(vec_andc(v_0c, qhbits[0]), 2); + q3h[1] = vec_sl(vec_andc(v_0c, qhbits[1]), 2); + q3h[2] = vec_sl(vec_andc(v_1c, qhbits[0]), 1); + q3h[3] = vec_sl(vec_andc(v_1c, qhbits[1]), 1); + + q3bytes[0] = vec_sub((int8x16_t)vec_and(q3b[0], v_3m), (int8x16_t)q3h[0]); + q3bytes[1] = vec_sub((int8x16_t)vec_and(q3b[1], v_3m), (int8x16_t)q3h[1]); + q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 2), v_3m), (int8x16_t)q3h[2]); + q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 2), v_3m), (int8x16_t)q3h[3]); + + isum0 = mllm_vec_dot(v_z, q3bytes[0], q8bytes[0]); + isum1 = mllm_vec_dot(v_z, q3bytes[1], q8bytes[1]); + isum2 = mllm_vec_dot(v_z, q3bytes[2], q8bytes[2]); + isum3 = mllm_vec_dot(v_z, q3bytes[3], q8bytes[3]); + + isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; + isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; + isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; + isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; + + scale += 4; + + q3h[0] = vec_andc(v_2c, qhbits[0]); + q3h[1] = vec_andc(v_2c, qhbits[1]); + q3h[2] = vec_sr(vec_andc(v_3c, qhbits[0]), 1); + q3h[3] = vec_sr(vec_andc(v_3c, qhbits[1]), 1); + + q3bytes[0] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 4), v_3m), (int8x16_t)q3h[0]); + q3bytes[1] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 4), v_3m), (int8x16_t)q3h[1]); + q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 6), v_3m), (int8x16_t)q3h[2]); + q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 6), v_3m), (int8x16_t)q3h[3]); + + isum0 = mllm_vec_dot(v_z, q3bytes[0], q8bytes[4]); + isum1 = mllm_vec_dot(v_z, q3bytes[1], q8bytes[5]); + isum2 = mllm_vec_dot(v_z, q3bytes[2], q8bytes[6]); + isum3 = mllm_vec_dot(v_z, q3bytes[3], q8bytes[7]); + + isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; + isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; + isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; + isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; + + scale += 4; + + if (j == 0) { + qhbits[0] = vec_sr(qhbits[0], 4); + qhbits[1] = vec_sr(qhbits[1], 4); + } + } + + sum += d * isum; + } + + *s = sum; +#else + // scalar version + // This function is written like this so the compiler can manage to vectorize most of it + // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the + // manually vectorized version above. Every other version I tried would run at least 4 times slower. + // The ideal situation would be if we could just write the code once, and the compiler would + // automatically produce the best possible set of machine instructions, instead of us having to manually + // write vectorized versions for AVX, ARM_NEON, etc. + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums[8]; + int32_t aux32[8]; + memset(sums, 0, 8 * sizeof(float)); + + uint32_t auxs[4]; + const int8_t *scales = (const int8_t *)auxs; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t *__restrict q3 = x[i].qs; + const uint8_t *__restrict hm = x[i].hmask; + const int8_t *__restrict q8 = y[i].qs; + memset(aux32, 0, 8 * sizeof(int32_t)); + int8_t *__restrict a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; + m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; + m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; + m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; + m <<= 1; + q3 += 32; + } + a = aux8; + + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int j = 0; j < QK_K / 16; ++j) { + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; + a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; + a += 8; + } + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; + +#endif +} \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotQ3.hpp b/mllm/backends/cpu/third_party/ggml/VecDotQ3.hpp new file mode 100644 index 000000000..b0ded910d --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotQ3.hpp @@ -0,0 +1,31 @@ +/* + * This code is based on mllm(https://github.com/ggerganov/mllm), + * please see https://github.com/ggerganov/mllm/blob/master/src/mllm.c + * mllm is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once +#include + +void vec_dot_q3_K_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotQ4.cpp b/mllm/backends/cpu/third_party/ggml/VecDotQ4.cpp new file mode 100644 index 000000000..513191b6a --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotQ4.cpp @@ -0,0 +1,702 @@ +/* + * This code is based on ggml(https://github.com/ggerganov/ggml), + * please see https://github.com/ggerganov/ggml/blob/master/src/ggml.c + * ggml is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "VecDotQ4.hpp" +#include "ComputeUtils.hpp" + +#if QK_K == 256 +void vec_dot_q4_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { + assert(n % QK_K == 0); + + const block_q4_K *__restrict x = (block_q4_K *)vx; + const block_q8_K *__restrict y = (block_q8_K *)vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#ifdef __ARM_FEATURE_SVE + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + + const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); + + memcpy(utmp, x[i].scales, K_SCALE_SIZE); + + uint32x2_t mins8 = {0}; + mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); + mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); + + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[0] &= kmask1; + + const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); + const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)), + vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); + sumf -= dmin * vaddvq_s32(prod); + + const uint8_t *scales = (const uint8_t *)utmp; + + const uint8_t *__restrict q4 = (const uint8_t *)x[i].qs; + const int8_t *__restrict q8 = (const int8_t *)y[i].qs; + + const int vector_length = mllm_cpu_get_sve_cnt() * 8; + const svuint8_t m4b = svdup_n_u8(0xf); + const svint32_t mzero = svdup_n_s32(0); + svint32_t sumi1 = svdup_n_s32(0); + svint32_t sumi1_1 = svdup_n_s32(0); + svint32_t sumi1_2 = svdup_n_s32(0); + svint32_t sumi2 = svdup_n_s32(0); + svint32_t sumi2_1 = svdup_n_s32(0); + svint32_t sumi2_2 = svdup_n_s32(0); + switch (vector_length) { + case 128: { + for (int j = 0; j < QK_K / 64; ++j) { + svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), m4b)); + svint8_t q8bytes = svld1_s8(svptrue_b8(), q8); + q8 += 16; + sumi1_1 = svmla_n_s32_x(svptrue_b32(), sumi1_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 0]); + q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4 + 16), m4b)); + q8bytes = svld1_s8(svptrue_b8(), q8); + q8 += 16; + sumi1_2 = svmla_n_s32_x(svptrue_b32(), sumi1_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 0]); + + q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), 4)); + q8bytes = svld1_s8(svptrue_b8(), q8); + q8 += 16; + sumi2_1 = svmla_n_s32_x(svptrue_b32(), sumi2_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 1]); + q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4 + 16), 4)); + q8bytes = svld1_s8(svptrue_b8(), q8); + q8 += 16; + sumi2_2 = svmla_n_s32_x(svptrue_b32(), sumi2_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 1]); + q4 += 32; + } + sumi1 = svadd_s32_x(svptrue_b32(), sumi1_1, sumi1_2); + sumi2 = svadd_s32_x(svptrue_b32(), sumi2_1, sumi2_2); + sumf += d * (svaddv_s32(svptrue_b32(), svadd_s32_x(svptrue_b32(), sumi1, sumi2))); + } break; + case 256: + case 512: { + for (int j = 0; j < QK_K / 64; ++j) { + const svuint8_t q4bits = svld1_u8(svptrue_pat_b8(SV_VL32), q4); + q4 += 32; + svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_pat_b8(SV_VL32), q4bits, m4b)); + svint8_t q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); + q8 += 32; + sumi1 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 0]); + + q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q4bits, 4)); + q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); + q8 += 32; + sumi2 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi2, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 1]); + } + sumf += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), sumi1, sumi2))); + } break; + default: + assert(false && "Unsupported vector length"); + break; + } + } + *s = sumf; +#elif defined __ARM_NEON + + const uint8x16_t m4b = vdupq_n_u8(0xf); +#ifdef __ARM_FEATURE_DOTPROD + const int32x4_t mzero = vdupq_n_s32(0); +#endif + + int8x16x2_t q4bytes; + int8x16x2_t q8bytes; + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + + const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); + + memcpy(utmp, x[i].scales, 12); + + const uint32x2_t mins8 = {utmp[1] & kmask1, ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4)}; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[0] &= kmask1; + + const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); + const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)), + vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); + sumf -= dmin * vaddvq_s32(prod); + + const uint8_t *scales = (const uint8_t *)utmp; + + const uint8_t *__restrict q4 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + // int32x4_t isum = mzero; + + int32_t sumi1 = 0; + int32_t sumi2 = 0; + + for (int j = 0; j < QK_K / 64; ++j) { + const uint8x16x2_t q4bits = vld1q_u8_x2(q4); + q4 += 32; + +#ifdef __ARM_FEATURE_DOTPROD + q8bytes = vld1q_s8_x2(q8); + q8 += 32; + q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[0], m4b)); + q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[1], m4b)); + + const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); + sumi1 += vaddvq_s32(p1) * scales[2 * j + 0]; + + q8bytes = vld1q_s8_x2(q8); + q8 += 32; + q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); + q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); + + const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); + + sumi2 += vaddvq_s32(p2) * scales[2 * j + 1]; +#else + q8bytes = vld1q_s8_x2(q8); + q8 += 32; + q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[0], m4b)); + q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[1], m4b)); + const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[0]), vget_low_s8(q8bytes.val[0])), + vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0]))); + const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[1]), vget_low_s8(q8bytes.val[1])), + vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1]))); + sumi1 += vaddvq_s16(vaddq_s16(p0, p1)) * scales[2 * j + 0]; + + q8bytes = vld1q_s8_x2(q8); + q8 += 32; + q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); + q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); + const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[0]), vget_low_s8(q8bytes.val[0])), + vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0]))); + const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[1]), vget_low_s8(q8bytes.val[1])), + vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1]))); + sumi2 += vaddvq_s16(vaddq_s16(p2, p3)) * scales[2 * j + 1]; + +#endif + } + + sumf += d * (sumi1 + sumi2); + } + + *s = sumf; + +#elif defined __AVX2__ + + const __m256i m4 = _mm256_set1_epi8(0xF); + + __m256 acc = _mm256_setzero_ps(); + __m128 acc_m = _mm_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const uint8_t *__restrict q4 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); + + const __m256i q8sums = _mm256_loadu_si256((const __m256i *)y[i].bsums); + const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); + const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); + acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m); + + const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); + const __m256i scales = MM256_SET_M128I(sc128, sc128); + + __m256i sumi = _mm256_setzero_si256(); + + for (int j = 0; j < QK_K / 64; ++j) { + const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j + 0)); + const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j + 1)); + + const __m256i q4bits = _mm256_loadu_si256((const __m256i *)q4); + q4 += 32; + const __m256i q4l = _mm256_and_si256(q4bits, m4); + const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); + + const __m256i q8l = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); + p16l = _mm256_madd_epi16(scale_l, p16l); + + const __m256i q8h = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); + p16h = _mm256_madd_epi16(scale_h, p16h); + const __m256i sumj = _mm256_add_epi32(p16l, p16h); + + sumi = _mm256_add_epi32(sumi, sumj); + } + + __m256 vd = _mm256_set1_ps(d); + acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); + } + + acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); + acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); + + *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); + +#else + const uint8_t *scales = (const uint8_t *)&utmp[0]; + const uint8_t *mins = (const uint8_t *)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums[8]; + int32_t aux32[8]; + memset(sums, 0, 8 * sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t *__restrict q4 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + memset(aux32, 0, 8 * sizeof(int32_t)); + int8_t *__restrict a = aux8; + for (int j = 0; j < QK_K / 64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + a += 32; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + a += 32; + q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K / 16; ++j) sumi += y[i].bsums[j] * mins[j / 2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K / 32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; + a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; + a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; + a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; + a += 8; + } + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = MLLM_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} +#else +void vec_dot_q4_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { + assert(n % QK_K == 0); + + const block_q4_K *__restrict x = (block_q4_K *)vx; + const block_q8_K *__restrict y = (block_q8_K *)vy; + + const int nb = n / QK_K; + +#ifdef __ARM_NEON + + const uint8x16_t m4b = vdupq_n_u8(0xf); + +#ifdef __ARM_FEATURE_DOTPROD + const int32x4_t mzero = vdupq_n_s32(0); +#endif + + float sumf = 0; + + int8x16x2_t q4bytes; + int8x16x4_t q8bytes; + + float sum_mins = 0.f; + + uint16_t aux16[2]; + const uint8_t *__restrict scales = (const uint8_t *)aux16; + + for (int i = 0; i < nb; ++i) { + const uint8_t *__restrict q4 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + const uint16_t *__restrict a = (const uint16_t *)x[i].scales; + aux16[0] = a[0] & 0x0f0f; + aux16[1] = (a[0] >> 4) & 0x0f0f; + + const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]); + sum_mins += y[i].d * (float)x[i].d[1] * summi; + + const float d = y[i].d * (float)x[i].d[0]; + + const uint8x16x2_t q4bits = vld1q_u8_x2(q4); + +#ifdef __ARM_FEATURE_DOTPROD + q8bytes = vld1q_s8_x4(q8); + q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[0], m4b)); + q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[1], m4b)); + + const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); + const int32_t sumi1 = vaddvq_s32(p1) * scales[0]; + + q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); + q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); + + const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]); + const int32_t sumi2 = vaddvq_s32(p2) * scales[1]; + +#else + q8bytes = vld1q_s8_x4(q8); + q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[0], m4b)); + q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[1], m4b)); + const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[0]), vget_low_s8(q8bytes.val[0])), + vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0]))); + const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[1]), vget_low_s8(q8bytes.val[1])), + vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1]))); + int32_t sumi1 = vaddvq_s16(vaddq_s16(p0, p1)) * scales[0]; + + q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); + q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); + const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[0]), vget_low_s8(q8bytes.val[2])), + vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[2]))); + const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[1]), vget_low_s8(q8bytes.val[3])), + vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[3]))); + int32_t sumi2 = vaddvq_s16(vaddq_s16(p2, p3)) * scales[1]; + +#endif + sumf += d * (sumi1 + sumi2); + } + + *s = sumf - sum_mins; + +#elif defined __AVX2__ + + const __m256i m4 = _mm256_set1_epi8(0xF); + + __m256 acc = _mm256_setzero_ps(); + + float summs = 0; + + uint16_t aux16[2]; + const uint8_t *scales = (const uint8_t *)aux16; + + for (int i = 0; i < nb; ++i) { + const float d = MLLM_FP16_TO_FP32(x[i].d[0]) * y[i].d; + const float m = MLLM_FP16_TO_FP32(x[i].d[1]) * y[i].d; + const __m256 vd = _mm256_set1_ps(d); + + const uint16_t *a = (const uint16_t *)x[i].scales; + aux16[0] = a[0] & 0x0f0f; + aux16[1] = (a[0] >> 4) & 0x0f0f; + + summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); + + const uint8_t *__restrict q4 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + + const __m256i q4bits = _mm256_loadu_si256((const __m256i *)q4); + const __m256i q4l = _mm256_and_si256(q4bits, m4); + const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); + + const __m256i q8l = _mm256_loadu_si256((const __m256i *)(q8 + 0)); + const __m256i q8h = _mm256_loadu_si256((const __m256i *)(q8 + 32)); + + const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); + const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); + + const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l); + acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc); + + const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h); + acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc); + } + + *s = hsum_float_8(acc) - summs; + +#else + + uint8_t aux8[QK_K]; + int16_t aux16[16]; + float sums[8]; + memset(sums, 0, 8 * sizeof(float)); + + uint16_t s16[2]; + const uint8_t *__restrict scales = (const uint8_t *)s16; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t *__restrict q4 = x[i].qs; + const int8_t *__restrict q8 = y[i].qs; + uint8_t *__restrict a = aux8; + for (int l = 0; l < 32; ++l) a[l + 0] = q4[l] & 0xF; + for (int l = 0; l < 32; ++l) a[l + 32] = q4[l] >> 4; + + const uint16_t *__restrict b = (const uint16_t *)x[i].scales; + s16[0] = b[0] & 0x0f0f; + s16[1] = (b[0] >> 4) & 0x0f0f; + + sumf -= y[i].d * MLLM_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); + + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d[0]); + + for (int j = 0; j < QK_K / 32; ++j) { + for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l]; + q8 += 16; + a += 16; + for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l]; + q8 += 16; + a += 16; + const float dl = d * scales[j]; + for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l + 8]); + } + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} +#endif +#ifdef __AVX2__ +static void vec_dot_q4_0_q8_0_avx(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + + const block_q4_0 *__restrict x = (block_q4_0 *)vx; + const block_q8_0 *__restrict y = (block_q8_0 *)vy; + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (int i = 0; i < nb; ++i) { + /* Compute combined scale for the block */ + const __m256 d = _mm256_set1_ps(MLLM_FP16_TO_FP32(x[i].d) * MLLM_FP16_TO_FP32(y[i].d)); + + __m256i bx = bytes_from_nibbles_32(x[i].qs); + + // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. + const __m256i off = _mm256_set1_epi8(8); + bx = _mm256_sub_epi8(bx, off); + + __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); + + const __m256 q = mul_sum_i8_pairs_float(bx, by); + + /* Multiply q with scale and accumulate */ + acc = _mm256_fmadd_ps(d, q, acc); + } + *s = hsum_float_8(acc); +} +#endif +#ifdef __ARM_NEON +static void vec_dot_q4_0_q8_0_arm(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + + const block_q4_0 *__restrict x = (block_q4_0 *)vx; + const block_q8_0 *__restrict y = (block_q8_0 *)vy; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + { + size_t bs = 0; + size_t bx = 0; + size_t by = 0; + const block_q4_0 *__restrict vx0 = (const block_q4_0 *)vx; + const block_q4_0 *__restrict vx1 = (const block_q4_0 *)((const uint8_t *)vx + bx); + const block_q8_0 *__restrict vy0 = (const block_q8_0 *)vy; + const block_q8_0 *__restrict vy1 = (const block_q8_0 *)((const uint8_t *)vy + by); + + float32x4_t sumv0 = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; i++) { + const block_q4_0 *__restrict b_x0 = &vx0[i]; + const block_q4_0 *__restrict b_x1 = &vx1[i]; + const block_q8_0 *__restrict b_y0 = &vy0[i]; + const block_q8_0 *__restrict b_y1 = &vy1[i]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + const int8x16_t s8b = vdupq_n_s8(0x8); + + const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); + const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8(v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // sub 8 + const int8x16_t x0_l = vsubq_s8(v0_0l, s8b); + const int8x16_t x0_h = vsubq_s8(v0_0h, s8b); + const int8x16_t x1_l = vsubq_s8(v0_1l, s8b); + const int8x16_t x1_h = vsubq_s8(v0_1h, s8b); + + // load y + const int8x16_t y0_l = vld1q_s8(b_y0->qs); + const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); + const int8x16_t y1_l = vld1q_s8(b_y1->qs); + const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); + + float32_t _scale[4] = { + MLLM_FP16_TO_FP32(b_x0->d) * MLLM_FP16_TO_FP32(b_y0->d), + MLLM_FP16_TO_FP32(b_x0->d) * MLLM_FP16_TO_FP32(b_y1->d), + MLLM_FP16_TO_FP32(b_x1->d) * MLLM_FP16_TO_FP32(b_y0->d), + MLLM_FP16_TO_FP32(b_x1->d) * MLLM_FP16_TO_FP32(b_y1->d)}; + float32x4_t scale = vld1q_f32(_scale); + + int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + + int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + + int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + + int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + + sumv0 = vmlaq_f32(sumv0, (vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), l1, r1)), l2, r2)), l3, r3))), scale); + } + + float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); + float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); + + vst1_f32(s, vget_low_f32(sumv2)); + vst1_f32(s + bs, vget_high_f32(sumv2)); + + return; + } +#endif + + float32x4_t sumv0 = vdupq_n_f32(0.0F); + float32x4_t sumv1 = vdupq_n_f32(0.0F); + + assert(nb % 2 == 0); // TODO: handle odd nb + for (int i = 0; i < nb; i += 2) { + const block_q4_0 *__restrict x0 = &x[i + 0]; + const block_q4_0 *__restrict x1 = &x[i + 1]; + const block_q8_0 *__restrict y0 = &y[i + 0]; + const block_q8_0 *__restrict y1 = &y[i + 1]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + const int8x16_t s8b = vdupq_n_s8(0x8); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8(v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // sub 8 + const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); + const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); + const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); + const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + +#if defined(__ARM_FEATURE_DOTPROD) + // dot product into int32x4_t + const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); + const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), MLLM_FP16_TO_FP32(x0->d) * MLLM_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), MLLM_FP16_TO_FP32(x1->d) * MLLM_FP16_TO_FP32(y1->d)); +#else + const int16x8_t pl0l = vmull_s8(vget_low_s8(v0_0ls), vget_low_s8(v1_0l)); + const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l)); + const int16x8_t ph0l = vmull_s8(vget_low_s8(v0_0hs), vget_low_s8(v1_0h)); + const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h)); + + const int16x8_t pl1l = vmull_s8(vget_low_s8(v0_1ls), vget_low_s8(v1_1l)); + const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l)); + const int16x8_t ph1l = vmull_s8(vget_low_s8(v0_1hs), vget_low_s8(v1_1h)); + const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h)); + + const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); + const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); + const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); + const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), MLLM_FP16_TO_FP32(x0->d) * MLLM_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), MLLM_FP16_TO_FP32(x1->d) * MLLM_FP16_TO_FP32(y1->d)); +#endif + } + + *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); +} +#endif + +void vec_dot_q4_0_q8_0(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { +#ifdef __AVX2__ + vec_dot_q4_0_q8_0_avx(n, s, vx, vy); +#elif defined(__ARM_NEON) + vec_dot_q4_0_q8_0_arm(n, s, vx, vy); +#endif +} \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotQ4.hpp b/mllm/backends/cpu/third_party/ggml/VecDotQ4.hpp new file mode 100644 index 000000000..8d8558bd2 --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotQ4.hpp @@ -0,0 +1,82 @@ +/* + * This code is based on mllm(https://github.com/ggerganov/mllm), + * please see https://github.com/ggerganov/mllm/blob/master/src/mllm.c + * mllm is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once +#include "DataType.hpp" +#include "QuantizeFP16.hpp" +#include +#include + +inline void vec_mul_q4_0_q8_0(const int n, float *__restrict result_fp32, const void *__restrict q4_operand_ptr, const void *__restrict q8_operand_ptr) { + // QK4_0 和 QK8_0 在 ggml 中通常是 32,表示每个量化块的元素数量。 + // 确保维度 n 是块大小的倍数。 + assert(n % QK4_0 == 0); // Q4_0 block size + assert(n % QK8_0 == 0); // Q8_0 block size + + // 计算总的块数量 + const int num_blocks = n / QK4_0; // 假设 QK4_0 == QK8_0 或它们是兼容的块大小 + + const block_q4_0 *q4_blocks = static_cast(q4_operand_ptr); + const block_q8_0 *q8_blocks = static_cast(q8_operand_ptr); + + int current_fp32_idx = 0; // 用于跟踪 FP32 结果数组中的当前位置 + + for (int i = 0; i < num_blocks; ++i) { + // 获取当前 Q4_0 和 Q8_0 块的尺度因子 + const float d_q4 = MLLM_FP16_TO_FP32(q4_blocks[i].d); + const float d_q8 = MLLM_FP16_TO_FP32(q8_blocks[i].d); + const float combined_scale = d_q4 * d_q8; // 组合尺度 + + // 获取当前块的量化数据指针 + const uint8_t *qs_q4 = q4_blocks[i].qs; + const int8_t *qs_q8 = q8_blocks[i].qs; + + // 逐元素处理块内的量化数据 + // QK4_0 / 2 是因为 Q4_0 的每个 uint8_t 存储了两个 4-bit 元素(nibbles) + for (int j = 0; j < QK4_0 / 2; ++j) { + // 处理第一个 4-bit 元素 + // (value - 8) 是 Q4_0 的反量化零点调整 + const int q4_val_low_nibble = (qs_q4[j] & 0x0F) - 8; + // Q8_0 直接使用 8-bit 值 + const int q8_val_first_byte = qs_q8[j * 2]; + + // 逐元素相乘并乘以组合尺度,结果存入 FP32 数组 + result_fp32[current_fp32_idx + j] = (float)q4_val_low_nibble * (float)q8_val_first_byte * combined_scale; + + // 处理第二个 4-bit 元素 + const int q4_val_high_nibble = (qs_q4[j] >> 4) - 8; + const int q8_val_second_byte = qs_q8[j * 2 + 1]; + + result_fp32[current_fp32_idx + j + QK4_0 / 2] = (float)q4_val_high_nibble * (float)q8_val_second_byte * combined_scale; + } + // 更新 FP32 结果数组的索引,移动到下一个块的起始位置 + current_fp32_idx += QK4_0; + } +} + +void vec_dot_q4_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); +void vec_dot_q4_0_q8_0(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotQ6.cpp b/mllm/backends/cpu/third_party/ggml/VecDotQ6.cpp new file mode 100644 index 000000000..b14bf3e44 --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotQ6.cpp @@ -0,0 +1,682 @@ +/* + * This code is based on ggml(https://github.com/ggerganov/ggml), + * please see https://github.com/ggerganov/ggml/blob/master/src/ggml.c + * ggml is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "VecDotQ6.hpp" +#include "ComputeUtils.hpp" + +#if QK_K == 256 +void vec_dot_q6_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { + assert(n % QK_K == 0); + + const block_q6_K *__restrict x = (block_q6_K *)vx; + const block_q8_K *__restrict y = (block_q8_K *)vy; + + const int nb = n / QK_K; + +#ifdef __ARM_NEON + + float sum = 0; + + const uint8x16_t m4b = vdupq_n_u8(0xF); +#if defined(__ARM_FEATURE_DOTPROD) + const int32x4_t vzero = vdupq_n_s32(0); +#endif + // const int8x16_t m32s = vdupq_n_s8(32); + + const uint8x16_t mone = vdupq_n_u8(3); + + int8x16x4_t q6bytes; + uint8x16x4_t q6h; + + for (int i = 0; i < nb; ++i) { + const float d_all = MLLM_FP16_TO_FP32(x[i].d); + + const uint8_t *__restrict q6 = x[i].ql; + const uint8_t *__restrict qh = x[i].qh; + const int8_t *__restrict q8 = y[i].qs; + + const int8_t *__restrict scale = x[i].scales; + + const int16x8x2_t q8sums = vld1q_s16_x2(y[i].bsums); + const int8x16_t scales = vld1q_s8(scale); + const int16x8x2_t q6scales = {vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}; + + const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16(q8sums.val[0]), vget_low_s16(q6scales.val[0])), + vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16(q8sums.val[1]), vget_low_s16(q6scales.val[1])), + vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1])))); + int32_t isum_mins = vaddvq_s32(prod); + + int32_t isum = 0; + + for (int j = 0; j < QK_K / 128; ++j) { + uint8x16x2_t qhbits = vld1q_u8_x2(qh); + qh += 32; + uint8x16x4_t q6bits = vld1q_u8_x4(q6); + q6 += 64; + int8x16x4_t q8bytes = vld1q_s8_x4(q8); + q8 += 64; + + q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); + q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); + uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2); + q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[1], 2); + q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + + // q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); + // q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); + // q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s); + // q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s); + q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])); + q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])); + q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])); + q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])); + +#if defined(__ARM_FEATURE_DOTPROD) + + isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; + scale += 4; + +#else + + int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[0]), vget_low_s8(q8bytes.val[0])), + vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0]))); + int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[1]), vget_low_s8(q8bytes.val[1])), + vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1]))); + isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1]; + scale += 2; + + int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[2]), vget_low_s8(q8bytes.val[2])), + vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2]))); + int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[3]), vget_low_s8(q8bytes.val[3])), + vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3]))); + isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1]; + scale += 2; +#endif + + q8bytes = vld1q_s8_x4(q8); + q8 += 64; + + shifted = vshrq_n_u8(qhbits.val[0], 4); + q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[1], 4); + q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[0], 6); + q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[1], 6); + q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + + // q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s); + // q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s); + // q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s); + // q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s); + q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])); + q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])); + q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])); + q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])); + +#if defined(__ARM_FEATURE_DOTPROD) + + isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; + scale += 4; + + // for (int l = 0; l < 4; ++l) { + // const int32x4_t p = vdotq_s32(vzero, q6bytes.val[l], q8bytes.val[l]); + // isum += vaddvq_s32(p) * *scale++; + // } +#else + p0 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[0]), vget_low_s8(q8bytes.val[0])), + vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0]))); + p1 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[1]), vget_low_s8(q8bytes.val[1])), + vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1]))); + isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1]; + scale += 2; + + p2 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[2]), vget_low_s8(q8bytes.val[2])), + vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2]))); + p3 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[3]), vget_low_s8(q8bytes.val[3])), + vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3]))); + isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1]; + scale += 2; +#endif + } + // sum += isum * d_all * y[i].d; + sum += d_all * y[i].d * (isum - 32 * isum_mins); + } + *s = sum; + +#elif defined __AVX2__ + + const __m256i m4 = _mm256_set1_epi8(0xF); + const __m256i m2 = _mm256_set1_epi8(3); + const __m256i m32s = _mm256_set1_epi8(32); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + + const uint8_t *__restrict q4 = x[i].ql; + const uint8_t *__restrict qh = x[i].qh; + const int8_t *__restrict q8 = y[i].qs; + + const __m128i scales = _mm_loadu_si128((const __m128i *)x[i].scales); + + __m256i sumi = _mm256_setzero_si256(); + + int is = 0; + + for (int j = 0; j < QK_K / 128; ++j) { + const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); + const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); + const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); + const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); + is += 4; + + const __m256i q4bits1 = _mm256_loadu_si256((const __m256i *)q4); + q4 += 32; + const __m256i q4bits2 = _mm256_loadu_si256((const __m256i *)q4); + q4 += 32; + const __m256i q4bitsH = _mm256_loadu_si256((const __m256i *)qh); + qh += 32; + + const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4); + const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4); + const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4); + const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4); + + const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); + const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1); + const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2); + const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3); + + const __m256i q8_0 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); + q8 += 32; + + __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); + __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); + __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2); + __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3); + + __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); + __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); + __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2); + __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3); + + p16_0 = _mm256_sub_epi16(p16_0, q8s_0); + p16_1 = _mm256_sub_epi16(p16_1, q8s_1); + p16_2 = _mm256_sub_epi16(p16_2, q8s_2); + p16_3 = _mm256_sub_epi16(p16_3, q8s_3); + + p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); + p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); + p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2); + p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3)); + } + + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); + } + +#if defined(_MSC_VER) || defined(__MINGW32__) + float arr[8]; + _mm256_storeu_ps(arr, acc); + + // for(float i : arr) { + // printf("%f ", i); + // } + // printf("\n"); +#endif + + *s = hsum_float_8(acc); + +#elif defined __AVX__ + const __m128i m4 = _mm_set1_epi8(0xF); + const __m128i m3 = _mm_set1_epi8(3); + const __m128i m32s = _mm_set1_epi8(32); + const __m128i m2 = _mm_set1_epi8(2); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + + const uint8_t *__restrict q4 = x[i].ql; + const uint8_t *__restrict qh = x[i].qh; + const int8_t *__restrict q8 = y[i].qs; + + const __m128i scales = _mm_loadu_si128((const __m128i *)x[i].scales); + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); + for (int j = 0; j < QK_K / 128; ++j) { + const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i *)qh); + qh += 16; + const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i *)qh); + qh += 16; + + const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4); + const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4); + const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4); + const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4); + const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4); + const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4); + const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4); + const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4); + + const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i *)q4); + q4 += 16; + const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i *)q4); + q4 += 16; + const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i *)q4); + q4 += 16; + const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i *)q4); + q4 += 16; + + const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0); + const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1); + const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2); + const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3); + const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4); + const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5); + const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6); + const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7); + + const __m128i q8_0 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_1 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_2 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_3 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_4 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_5 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_6 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + const __m128i q8_7 = _mm_loadu_si128((const __m128i *)q8); + q8 += 16; + + __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0); + __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1); + __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2); + __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3); + __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4); + __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5); + __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6); + __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7); + + __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0); + __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1); + __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2); + __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3); + __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4); + __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5); + __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6); + __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7); + + p16_0 = _mm_sub_epi16(p16_0, q8s_0); + p16_1 = _mm_sub_epi16(p16_1, q8s_1); + p16_2 = _mm_sub_epi16(p16_2, q8s_2); + p16_3 = _mm_sub_epi16(p16_3, q8s_3); + p16_4 = _mm_sub_epi16(p16_4, q8s_4); + p16_5 = _mm_sub_epi16(p16_5, q8s_5); + p16_6 = _mm_sub_epi16(p16_6, q8s_6); + p16_7 = _mm_sub_epi16(p16_7, q8s_7); + + const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi8(shuffle, m2); + const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi8(shuffle, m2); + const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi8(shuffle, m2); + const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi8(shuffle, m2); + + p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); + p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1); + p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); + p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3); + p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4); + p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5); + p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6); + p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7); + + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7)); + } + + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); + } + + *s = hsum_float_8(acc); + +#else + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums[8]; + int32_t aux32[8]; + memset(sums, 0, 8 * sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t *__restrict q4 = x[i].ql; + const uint8_t *__restrict qh = x[i].qh; + const int8_t *__restrict q8 = y[i].qs; + memset(aux32, 0, 8 * sizeof(int32_t)); + int8_t *__restrict a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + a = aux8; + int is = 0; + for (int j = 0; j < QK_K / 16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; + a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; + a += 8; + } + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +#else + +void vec_dot_q6_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { + assert(n % QK_K == 0); + + const block_q6_K *__restrict x = (block_q6_K *)vx; + const block_q8_K *__restrict y = (block_q8_K *)vy; + + const int nb = n / QK_K; + +#ifdef __ARM_NEON + + float sum = 0; + + const uint8x16_t m4b = vdupq_n_u8(0xF); + const int8x16_t m32s = vdupq_n_s8(32); +#if defined(__ARM_FEATURE_DOTPROD) + const int32x4_t vzero = vdupq_n_s32(0); +#endif + + const uint8x16_t mone = vdupq_n_u8(3); + + int8x16x4_t q6bytes; + uint8x16x4_t q6h; + + for (int i = 0; i < nb; ++i) { + const float d_all = (float)x[i].d; + + const uint8_t *__restrict q6 = x[i].ql; + const uint8_t *__restrict qh = x[i].qh; + const int8_t *__restrict q8 = y[i].qs; + + const int8_t *__restrict scale = x[i].scales; + + int32_t isum = 0; + + uint8x16_t qhbits = vld1q_u8(qh); + uint8x16x2_t q6bits = vld1q_u8_x2(q6); + int8x16x4_t q8bytes = vld1q_s8_x4(q8); + + q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4); + uint8x16_t shifted = vshrq_n_u8(qhbits, 2); + q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits, 4); + q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits, 6); + q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + + q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); + q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); + q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s); + q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s); + +#if defined(__ARM_FEATURE_DOTPROD) + + isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; +#else + + int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[0]), vget_low_s8(q8bytes.val[0])), + vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0]))); + int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[1]), vget_low_s8(q8bytes.val[1])), + vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1]))); + isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1]; + + int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[2]), vget_low_s8(q8bytes.val[2])), + vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2]))); + int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[3]), vget_low_s8(q8bytes.val[3])), + vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3]))); + isum += vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3]; +#endif + + sum += isum * d_all * y[i].d; + } + *s = sum; + +#elif defined __AVX2__ + + const __m256i m4 = _mm256_set1_epi8(0xF); + const __m256i m2 = _mm256_set1_epi8(3); + const __m256i m32s = _mm256_set1_epi8(32); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + + const uint8_t *__restrict q4 = x[i].ql; + const uint8_t *__restrict qh = x[i].qh; + const int8_t *__restrict q8 = y[i].qs; + + const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]); + const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]); + const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]); + const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]); + + __m256i sumi = _mm256_setzero_si256(); + + const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1); + const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3); + + const __m256i q4bits1 = _mm256_loadu_si256((const __m256i *)q4); + const __m128i q4bitsH = _mm_loadu_si128((const __m128i *)qh); + + const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4); + const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4); + + const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); + const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1); + + const __m256i q8_0 = _mm256_loadu_si256((const __m256i *)(q8 + 0)); + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)(q8 + 32)); + + __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); + __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); + + __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); + __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); + + p16_0 = _mm256_sub_epi16(p16_0, q8s_0); + p16_1 = _mm256_sub_epi16(p16_1, q8s_1); + + p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); + p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); + + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); + } + + *s = hsum_float_8(acc); + +#elif defined __AVX__ + + const __m128i m4 = _mm_set1_epi8(0xF); + const __m128i m2 = _mm_set1_epi8(3); + const __m128i m32s = _mm_set1_epi8(32); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); + + const uint8_t *__restrict q4 = x[i].ql; + const uint8_t *__restrict qh = x[i].qh; + const int8_t *__restrict q8 = y[i].qs; + + const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]); + const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]); + const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]); + const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]); + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1); + const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3); + + const __m256i q4bits1 = _mm256_loadu_si256((const __m256i *)q4); + const __m128i q4bitsH = _mm_loadu_si128((const __m128i *)qh); + + const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4); + const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4); + const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4); + const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4); + + const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0); + const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1); + const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2); + const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3); + + const __m256i q8_0 = _mm256_loadu_si256((const __m256i *)(q8 + 0)); + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)(q8 + 32)); + + __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0)); + __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1)); + __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0)); + __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1)); + + __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0)); + __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1)); + __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0)); + __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1)); + + p16_0 = _mm_sub_epi16(p16_0, q8s_0); + p16_1 = _mm_sub_epi16(p16_1, q8s_1); + p16_2 = _mm_sub_epi16(p16_2, q8s_2); + p16_3 = _mm_sub_epi16(p16_3, q8s_3); + + p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); + p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1); + p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); + p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3); + + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); + + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc); + } + + *s = hsum_float_8(acc); + +#else + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums[8]; + int32_t aux32[8]; + memset(sums, 0, 8 * sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t *__restrict q4 = x[i].ql; + const uint8_t *__restrict qh = x[i].qh; + const int8_t *__restrict q8 = y[i].qs; + memset(aux32, 0, 8 * sizeof(int32_t)); + int8_t *__restrict a = aux8; + for (int l = 0; l < 16; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 16] = (int8_t)((q4[l + 16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 48] = (int8_t)((q4[l + 16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + int is = 0; + for (int j = 0; j < QK_K / 16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; + a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; + a += 8; + } + const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} +#endif \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotQ6.hpp b/mllm/backends/cpu/third_party/ggml/VecDotQ6.hpp new file mode 100644 index 000000000..7a12c584c --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotQ6.hpp @@ -0,0 +1,31 @@ +/* + * This code is based on mllm(https://github.com/ggerganov/mllm), + * please see https://github.com/ggerganov/mllm/blob/master/src/mllm.c + * mllm is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once +#include + +void vec_dot_q6_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy); \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotQ8.cpp b/mllm/backends/cpu/third_party/ggml/VecDotQ8.cpp new file mode 100644 index 000000000..b1f97690d --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotQ8.cpp @@ -0,0 +1,230 @@ +/* + * This code is based on ggml(https://github.com/ggerganov/ggml), + * please see https://github.com/ggerganov/ggml/blob/master/src/ggml.c + * ggml is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "VecDotQ8.hpp" +#include "ComputeUtils.hpp" + +void vec_dot_q8_0_q8_0(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy, size_t bs, size_t bx, size_t by) { + const int qk = QK8_0; + const int nb = n / qk; // number of blocks + + assert(n % qk == 0); + + const auto *__restrict x = static_cast(vx); + const auto *__restrict y = static_cast(vy); + +#if defined(__ARM_FEATURE_MATMUL_INT8) + // if (nrc == 2) + { + const block_q8_0 *__restrict vx0 = (const block_q8_0 *)vx; + const block_q8_0 *__restrict vx1 = (const block_q8_0 *)((const uint8_t *)vx + bx); + const block_q8_0 *__restrict vy0 = (const block_q8_0 *)vy; + const block_q8_0 *__restrict vy1 = (const block_q8_0 *)((const uint8_t *)vy + by); + + float32x4_t sumv0 = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; i++) { + const block_q8_0 *__restrict b_x0 = &vx0[i]; + const block_q8_0 *__restrict b_y0 = &vy0[i]; + + const block_q8_0 *__restrict b_x1 = &vx1[i]; + const block_q8_0 *__restrict b_y1 = &vy1[i]; + + const int8x16_t x0_l = vld1q_s8(b_x0->qs); + const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16); + const int8x16_t x1_l = vld1q_s8(b_x1->qs); + const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16); + + // load y + const int8x16_t y0_l = vld1q_s8(b_y0->qs); + const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); + const int8x16_t y1_l = vld1q_s8(b_y1->qs); + const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); + + float32_t _scale[4] = { + MLLM_FP16_TO_FP32(b_x0->d) * MLLM_FP16_TO_FP32(b_y0->d), + MLLM_FP16_TO_FP32(b_x0->d) * MLLM_FP16_TO_FP32(b_y1->d), + MLLM_FP16_TO_FP32(b_x1->d) * MLLM_FP16_TO_FP32(b_y0->d), + MLLM_FP16_TO_FP32(b_x1->d) * MLLM_FP16_TO_FP32(b_y1->d)}; + float32x4_t scale = vld1q_f32(_scale); + + int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + + int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + + int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + + int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + + sumv0 = vmlaq_f32(sumv0, (vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), l1, r1)), l2, r2)), l3, r3))), scale); + } + + float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); + float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); + + vst1_f32(s, vget_low_f32(sumv2)); + vst1_f32(s + bs, vget_high_f32(sumv2)); + + return; + } +#elif defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + assert(nb % 2 == 0); // TODO: handle odd nb + + for (int i = 0; i < nb; i += 2) { + const block_q8_0 *x0 = &x[i + 0]; + const block_q8_0 *x1 = &x[i + 1]; + const block_q8_0 *y0 = &y[i + 0]; + const block_q8_0 *y1 = &y[i + 1]; + + const int8x16_t x0_0 = vld1q_s8(x0->qs); + const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); + const int8x16_t x1_0 = vld1q_s8(x1->qs); + const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); + + // load y + const int8x16_t y0_0 = vld1q_s8(y0->qs); + const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); + const int8x16_t y1_0 = vld1q_s8(y1->qs); + const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(mllm_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), mllm_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), MLLM_FP16_TO_FP32(x0->d) * MLLM_FP16_TO_FP32(y0->d)); + + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(mllm_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), mllm_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), MLLM_FP16_TO_FP32(x1->d) * MLLM_FP16_TO_FP32(y1->d)); + } + + *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); +#elif defined(__AVX2__) || defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (int i = 0; i < nb; ++i) { + // Compute combined scale for the block + const __m256 d = _mm256_set1_ps(MLLM_FP16_TO_FP32(x[i].d) * MLLM_FP16_TO_FP32(y[i].d)); + __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs); + __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); + + const __m256 q = mul_sum_i8_pairs_float(bx, by); + + // Multiply q with scale and accumulate +#if defined(__AVX2__) + acc = _mm256_fmadd_ps(d, q, acc); +#else + acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); +#endif + } + + *s = hsum_float_8(acc); +#endif +} + +void vec_dot_i8_i8(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy, float scale1, float scale2) { + const int qk = QK8_0; + const int nb = n / qk; + + const float scale = scale1 * scale2; + + assert(n % qk == 0); + + const block_q8_per_tensor *__restrict x = (block_q8_per_tensor *)vx; + const block_q8_per_tensor *__restrict y = (block_q8_per_tensor *)vy; + +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + assert(nb % 2 == 0); // TODO: handle odd nb + + for (int i = 0; i < nb; i += 2) { + const block_q8_per_tensor *__restrict x0 = &x[i + 0]; + const block_q8_per_tensor *__restrict x1 = &x[i + 1]; + const block_q8_per_tensor *__restrict y0 = &y[i + 0]; + const block_q8_per_tensor *__restrict y1 = &y[i + 1]; + + const int8x16_t x0_0 = vld1q_s8(x0->qs); + const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); + const int8x16_t x1_0 = vld1q_s8(x1->qs); + const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); + + // load y + const int8x16_t y0_0 = vld1q_s8(y0->qs); + const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); + const int8x16_t y1_0 = vld1q_s8(y1->qs); + const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(mllm_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), mllm_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), scale); + + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(mllm_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), mllm_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), scale); + } + + *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); +#elif defined(__AVX2__) || defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (int i = 0; i < nb; ++i) { + // Compute combined scale for the block + const __m256 d = _mm256_set1_ps(scale); + __m256i qx = _mm256_loadu_si256((const __m256i *)x[i].qs); + __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs); + + const __m256 q = mul_sum_i8_pairs_float(qx, qy); + + // Multiply q with scale and accumulate +#if defined(__AVX2__) + acc = _mm256_fmadd_ps(d, q, acc); +#else + acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); +#endif + } + + *s = hsum_float_8(acc); +#else + // scalar + float sumf = 0.0; + + for (int i = 0; i < nb; i++) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[i].qs[j] * y[i].qs[j]; + } + + sumf += sumi * scale; + } + + *s = sumf; +#endif +} \ No newline at end of file diff --git a/mllm/backends/cpu/third_party/ggml/VecDotQ8.hpp b/mllm/backends/cpu/third_party/ggml/VecDotQ8.hpp new file mode 100644 index 000000000..d0f44c52c --- /dev/null +++ b/mllm/backends/cpu/third_party/ggml/VecDotQ8.hpp @@ -0,0 +1,33 @@ +/* + * This code is based on mllm(https://github.com/ggerganov/mllm), + * please see https://github.com/ggerganov/mllm/blob/master/src/mllm.c + * mllm is licensed under MIT Copyright (c) 2022 Georgi Gerganov: + * + * MIT License + * Copyright (c) 2022 Georgi Gerganov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once +#include + +void vec_dot_q8_0_q8_0(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy, size_t bs = 0, size_t bx = 0, size_t by = 0); + +void vec_dot_i8_i8(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy, float scale1 = 1, float scale2 = 1); \ No newline at end of file diff --git a/src/backends/cpu/compute/VecDotType.cpp b/mllm/backends/cpu/third_party/ggml/VecDotType.cpp similarity index 87% rename from src/backends/cpu/compute/VecDotType.cpp rename to mllm/backends/cpu/third_party/ggml/VecDotType.cpp index c7472fc81..641767880 100644 --- a/src/backends/cpu/compute/VecDotType.cpp +++ b/mllm/backends/cpu/third_party/ggml/VecDotType.cpp @@ -30,9 +30,15 @@ #include #include "VecDotType.hpp" #include "Types.hpp" -#include "quantize/Quantize.hpp" -#include "compute/VecDot.hpp" -#include "compute/GEMM_AArch64.hpp" +#include "Quantize.hpp" +#include "VecDotFP32.hpp" +#include "VecDotFP16.hpp" +#include "VecDotQ2.hpp" +#include "VecDotQ3.hpp" +#include "VecDotQ4.hpp" +#include "VecDotQ6.hpp" +#include "VecDotQ8.hpp" +#include "GemmPack.hpp" void fp32_add_row_to(int n, const float *MLLM_RESTRICT src, float *MLLM_RESTRICT dst, float alpha) { int i = 0; @@ -126,6 +132,7 @@ static inline void get_scale_min_k4(int j, const uint8_t *__restrict q, uint8_t #endif void q4_k_add_row_to(int n, const block_q4_K *MLLM_RESTRICT src, float *MLLM_RESTRICT dst, float alpha) { +#if QK_K == 256 assert(n % QK_K == 0); assert(QK_K == 256); // TODO: It is wired here for now const int nb = n / QK_K; @@ -158,6 +165,7 @@ void q4_k_add_row_to(int n, const block_q4_K *MLLM_RESTRICT src, float *MLLM_RES is += 2; } } +#endif } void q6_k_add_row_to(int n, const block_q6_K *MLLM_RESTRICT src, float *MLLM_RESTRICT dst, float alpha) { @@ -222,6 +230,23 @@ void q8_k_add_row_to(int n, const block_q8_K *MLLM_RESTRICT src, float *MLLM_RES } } +/*** + * This is the type traits for different data types used in MLLM. + * It defines the size, block size, conversion functions, vector dot product functions, + * and row addition functions for each data type. + * + * The `type_traits` array is indexed by the `mllm_type` enum values. + * Each entry in the array corresponds to a specific data type and contains + * the necessary information to handle that type. * The `to_float` and `from_float` functions are used to convert between + * the data type and float representation. + * The `vec_dot` function is used to compute the dot product of two vectors of the + * specified data type. + * The `add_row_to` function is used to add a row of the specified data type to a destination vector. + * The `vec_dot_type` field specifies the data type used for the dot product. + * The `size` field specifies the size of the data type in bytes. + * The `blck_size` field specifies the block size of the data type. + */ + type_traits_t type_traits[] = { /*[MLLM_TYPE_F32] = */ { .size = sizeof(float), @@ -311,8 +336,8 @@ type_traits_t type_traits[] = { .vec_dot_type = MLLM_TYPE_Q8_0, // .nrows = 1, // .ncols = 4, - .gemv = (mllm_gemv_func)mllm_gemv_q4_0_4x4_q8_0, - .gemm = (mllm_gemm_func)mllm_gemm_q4_0_4x4_q8_0, + .gemv = (gemv_func)gemv_q4_0_4x4_q8_0, + .gemm = (gemm_func)gemm_q4_0_4x4_q8_0, }, /*[MLLM_TYPE_Q4_0_4_8] = */ { .size = sizeof(block_q4_0), @@ -325,8 +350,8 @@ type_traits_t type_traits[] = { .vec_dot_type = MLLM_TYPE_Q8_0, // .nrows = 1, // .ncols = 4, - .gemv = (mllm_gemv_func)mllm_gemv_q4_0_4x8_q8_0, - .gemm = (mllm_gemm_func)mllm_gemm_q4_0_4x8_q8_0, + .gemv = (gemv_func)gemv_q4_0_4x8_q8_0, + .gemm = (gemm_func)gemm_q4_0_4x8_q8_0, }, /*[MLLM_TYPE_Q4_0_8_8] = */ { .size = sizeof(block_q4_0), @@ -339,8 +364,8 @@ type_traits_t type_traits[] = { .vec_dot_type = MLLM_TYPE_Q8_0, // .nrows = 1, // .ncols = 8, - .gemv = (mllm_gemv_func)mllm_gemv_q4_0_8x8_q8_0, - .gemm = (mllm_gemm_func)mllm_gemm_q4_0_8x8_q8_0, + .gemv = (gemv_func)gemv_q4_0_8x8_q8_0, + .gemm = (gemm_func)gemm_q4_0_8x8_q8_0, }, {}, /*MLLM_TYPE_Q3_K = */ { @@ -377,5 +402,16 @@ type_traits_t type_traits[] = { {}, {}, /*MLLM_TYPE_IQ2_S = */ {}, + /*MLLM_TYPE_KLEIDIAI_Q4_0 = */ {}, + /*MLLM_TYPE_Q8_0F = */ {}, + /*MLLM_TYPE_Q2_0 = */ { + .size = sizeof(block_q2_0), + .blck_size = QK_K, + .to_float = (mllm_to_float_func)dequantize_row_q2_0, + .from_float = (mllm_from_float_func)quantize_row_q2_0, + .vec_dot = (mllm_vec_dot_func)vec_dot_q2_0_q8_0, + .vec_dot_type = MLLM_TYPE_Q8_0, + .add_row_to = NULL, //(mllm_vec_add_row_func)q2_k_add_row_to, + }, // TODO: add support to more type }; diff --git a/src/backends/cpu/compute/VecDotType.hpp b/mllm/backends/cpu/third_party/ggml/VecDotType.hpp similarity index 86% rename from src/backends/cpu/compute/VecDotType.hpp rename to mllm/backends/cpu/third_party/ggml/VecDotType.hpp index 62757484a..e02ba6898 100644 --- a/src/backends/cpu/compute/VecDotType.hpp +++ b/mllm/backends/cpu/third_party/ggml/VecDotType.hpp @@ -36,10 +36,10 @@ typedef void (*mllm_from_float_func)(const float *src, void *dst, const int n); typedef void (*mllm_vec_dot_func)(const int n, float *MLLM_RESTRICT dst, const void *MLLM_RESTRICT x, const void *MLLM_RESTRICT y); typedef void (*mllm_from_float_to_mat_func)(const float *MLLM_RESTRICT x, void *MLLM_RESTRICT y, int64_t nr, int64_t k, int64_t bs); typedef void (*mllm_vec_add_row_func)(const int n, const void *MLLM_RESTRICT src, float *MLLM_RESTRICT dst, const float alpha); -typedef void (*mllm_gemv_func)(int n, float *MLLM_RESTRICT s, size_t bs, const void *MLLM_RESTRICT x, - const void *MLLM_RESTRICT y, int nr, int nc, const void *MLLM_RESTRICT bias); -typedef void (*mllm_gemm_func)(int n, float *MLLM_RESTRICT s, size_t bs, const void *MLLM_RESTRICT x, - const void *MLLM_RESTRICT y, int nr, int nc, const void *MLLM_RESTRICT bias); +typedef void (*gemv_func)(int n, float *MLLM_RESTRICT s, size_t bs, const void *MLLM_RESTRICT x, + const void *MLLM_RESTRICT y, int nr, int nc, const void *MLLM_RESTRICT bias); +typedef void (*gemm_func)(int n, float *MLLM_RESTRICT s, size_t bs, const void *MLLM_RESTRICT x, + const void *MLLM_RESTRICT y, int nr, int nc, const void *MLLM_RESTRICT bias); typedef struct type_traits_t { size_t size; // type size @@ -51,8 +51,8 @@ typedef struct type_traits_t { mllm_vec_dot_func vec_dot; DataType vec_dot_type; // vec_dot do dot product between two DataType, this is the other type mllm_vec_add_row_func add_row_to; // add alpha * row to a row of float - mllm_gemv_func gemv; - mllm_gemm_func gemm; + gemv_func gemv; + gemm_func gemm; } type_traits_t; extern type_traits_t type_traits[]; diff --git a/mllm/backends/cpu/third_party/kleidiai b/mllm/backends/cpu/third_party/kleidiai new file mode 160000 index 000000000..bb8523387 --- /dev/null +++ b/mllm/backends/cpu/third_party/kleidiai @@ -0,0 +1 @@ +Subproject commit bb8523387d2ddb69b505add0cdf4ea9f8c38bbed diff --git a/src/backends/new_op.py b/mllm/backends/new_op.py similarity index 100% rename from src/backends/new_op.py rename to mllm/backends/new_op.py diff --git a/mllm/backends/opencl/CMakeLists.txt b/mllm/backends/opencl/CMakeLists.txt new file mode 100644 index 000000000..759e51331 --- /dev/null +++ b/mllm/backends/opencl/CMakeLists.txt @@ -0,0 +1,34 @@ +# ====================================================================== +# 最终修正版 - 匹配您当前的目录结构 +# ====================================================================== + +# 收集所有 OpenCL 源文件 +file(GLOB OPENCL_SRC + ${CMAKE_CURRENT_LIST_DIR}/*.cpp + ${CMAKE_CURRENT_LIST_DIR}/op/*.cpp +) + +# 定义 OpenCL 后端静态库 +add_library(mllm_opencl STATIC ${OPENCL_SRC}) + + +if(ANDROID) + # --- 安卓平台的逻辑 --- + # 在安卓上,我们在 C++ 代码中通过 dlopen 动态加载 libOpenCL.so。 + # 因此,在编译时不应该链接 -lOpenCL。 + # 我们唯一需要的链接依赖是 dl 库,用于 dlopen/dlsym。 + # 这个依赖已经在 examples/CMakeLists.txt 中的 ${CMAKE_DL_LIBS} 变量里处理了。 + # 所以,这里什么都不用做。 + message(STATUS "OpenCL backend for Android: dynamic loading enabled, skipping link-time dependency on OpenCL.") + +else() + # --- 非安卓平台的逻辑 (macOS, Linux) --- + find_package(OpenCL REQUIRED) + target_include_directories(mllm_opencl PUBLIC ${OpenCL_INCLUDE_DIRS}) + target_link_libraries(mllm_opencl PUBLIC OpenCL::OpenCL) +endif() + +# 将 mllm_opencl 库自身的头文件目录设为 public +target_include_directories(mllm_opencl PUBLIC + ${CMAKE_CURRENT_LIST_DIR} +) \ No newline at end of file diff --git a/mllm/backends/opencl/OpenCLBackend.cpp b/mllm/backends/opencl/OpenCLBackend.cpp new file mode 100644 index 000000000..a39831142 --- /dev/null +++ b/mllm/backends/opencl/OpenCLBackend.cpp @@ -0,0 +1,1212 @@ +#include "OpenCLBackend.hpp" +#include +#include +#include +#include +#if defined(MLLM_TARGET_ANDROID) +#include +#include // for dirname +#endif +#include // C++17, for directory creation +#include // for std::error_code + +#include "Tensor.hpp" +#include "Backend.hpp" +#include "OpDefined.hpp" +#include "Types.hpp" +#include "Module.hpp" +#include "utils/OpenCLTools.hpp" +#include "op/OpenCLAddOp.hpp" +#include "op/OpenCLAddTwoOp.hpp" +#include "op/OpenCLSubOp.hpp" +#include "op/OpenCLSubTwoOp.hpp" +#include "op/OpenCLMulOp.hpp" +#include "op/OpenCLMulTwoOp.hpp" +#include "op/OpenCLDivOp.hpp" +#include "op/OpenCLDivIntOp.hpp" +#include "op/OpenCLDivTwoOp.hpp" +#include "op/OpenCLMatmulOp.hpp" +#include "op/OpenCLLinearOp.hpp" +#include "op/OpenCLTransposeOp.hpp" +#include "op/OpenCLSoftMaxOp.hpp" +#include "op/OpenCLRMSNormOp.hpp" +#include "op/OpenCLEmbeddingOp.hpp" +#include "op/OpenCLSiLUOp.hpp" +#include "op/OpenCLViewOp.hpp" +#include "op/OpenCLKVCacheOp.hpp" +#include "op/OpenCLRoPEOp.hpp" +#include "op/OpenCLClipOp.hpp" +#include "op/OpenCLFlashAttentionOp.hpp" +#include "op/OpenCLSplitOp.hpp" +#include "op/OpenCLTopkOp.hpp" +#include "op/OpenCLSumOp.hpp" +#include "op/OpenCLLikeOp.hpp" +#include "op/OpenCLClipTensorOp.hpp" +#include "op/OpenCLScatterAddOp.hpp" +#include "op/OpenCLArgSortOp.hpp" +#include "op/OpenCLBinCountOp.hpp" + +// 错误检查函数 +void check_cl_error(cl_int err, const std::string &operation) { + if (err != CL_SUCCESS) { + std::cerr << "OpenCL Error during " << operation << " (" << err << ")" << std::endl; + throw std::runtime_error("OpenCL Error: " + operation); + } +} + +// 从文件加载内核源码的辅助函数 +std::string load_file_contents(const char *filename) { + std::ifstream in(filename, std::ios::in | std::ios::binary); + if (in) { + return std::string((std::istreambuf_iterator(in)), std::istreambuf_iterator()); + } + throw std::runtime_error(std::string("Could not open file: ") + filename); +} + +#if defined(MLLM_TARGET_ANDROID) +std::string get_executable_dir() { + char path_buf[1024] = {0}; + // 读取 /proc/self/exe 符号链接,获取可执行文件的完整路径 + ssize_t len = readlink("/proc/self/exe", path_buf, sizeof(path_buf) - 1); + if (len != -1) { + path_buf[len] = '\0'; + // 使用 dirname 获取路径的目录部分 + return std::string(dirname(path_buf)); + } + // 如果失败,返回一个默认的相对路径作为后备 + return "."; +} +#endif + +namespace mllm { + +#if defined(MLLM_TARGET_ANDROID) +// 【关键修正】将 OpenCLSymbols 的完整定义放回到 .cpp 文件中 +struct OpenCLSymbols { + typedef cl_int (*clGetPlatformIDs_f_t)(cl_uint, cl_platform_id *, cl_uint *); + typedef cl_int (*clGetDeviceIDs_f_t)(cl_platform_id, cl_device_type, cl_uint, cl_device_id *, cl_uint *); + typedef cl_int (*clGetDeviceInfo_f_t)(cl_device_id, cl_device_info, size_t, void *, size_t *); + typedef cl_context (*clCreateContext_f_t)(const cl_context_properties *, cl_uint, const cl_device_id *, void(CL_CALLBACK *)(const char *, const void *, size_t, void *), void *, cl_int *); + typedef cl_command_queue (*clCreateCommandQueue_f_t)(cl_context, cl_device_id, cl_command_queue_properties, cl_int *); + typedef cl_int (*clReleaseCommandQueue_f_t)(cl_command_queue); + typedef cl_int (*clReleaseContext_f_t)(cl_context); + typedef cl_program (*clCreateProgramWithSource_f_t)(cl_context, cl_uint, const char **, const size_t *, cl_int *); + typedef cl_int (*clBuildProgram_f_t)(cl_program, cl_uint, const cl_device_id *, const char *, void(CL_CALLBACK *)(cl_program, void *), void *); + typedef cl_int (*clGetProgramBuildInfo_f_t)(cl_program, cl_device_id, cl_program_build_info, size_t, void *, size_t *); + typedef cl_program (*clCreateProgramWithBinary_f_t)(cl_context, cl_uint, const cl_device_id *, const size_t *, const unsigned char **, cl_int *, cl_int *); + typedef cl_int (*clGetProgramInfo_f_t)(cl_program, cl_program_info, size_t, void *, size_t *); + typedef cl_int (*clReleaseProgram_f_t)(cl_program); + typedef cl_kernel (*clCreateKernel_f_t)(cl_program, const char *, cl_int *); + typedef cl_int (*clReleaseKernel_f_t)(cl_kernel); + typedef cl_int (*clSetKernelArg_f_t)(cl_kernel, cl_uint, size_t, const void *); + typedef cl_int (*clEnqueueNDRangeKernel_f_t)(cl_command_queue, cl_kernel, cl_uint, const size_t *, const size_t *, const size_t *, cl_uint, const cl_event *, cl_event *); + typedef cl_mem (*clCreateBuffer_f_t)(cl_context, cl_mem_flags, size_t, void *, cl_int *); + typedef cl_int (*clReleaseMemObject_f_t)(cl_mem); + typedef cl_int (*clEnqueueWriteBuffer_f_t)(cl_command_queue, cl_mem, cl_bool, size_t, size_t, const void *, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clEnqueueReadBuffer_f_t)(cl_command_queue, cl_mem, cl_bool, size_t, size_t, void *, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clFinish_f_t)(cl_command_queue); + typedef cl_sampler (*clCreateSampler_f_t)(cl_context, cl_bool, cl_addressing_mode, cl_filter_mode, cl_int *); + typedef cl_int (*clReleaseSampler_f_t)(cl_sampler); + typedef cl_mem (*clCreateImage_f_t)(cl_context, cl_mem_flags, const cl_image_format *, const cl_image_desc *, void *, cl_int *); + typedef cl_int (*clEnqueueWriteImage_f_t)(cl_command_queue, cl_mem, cl_bool, const size_t *, const size_t *, size_t, size_t, const void *, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clEnqueueReadImage_f_t)(cl_command_queue, cl_mem, cl_bool, const size_t *, const size_t *, size_t, size_t, void *, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clEnqueueWriteBufferRect_f_t)(cl_command_queue, cl_mem, cl_bool, const size_t *, const size_t *, const size_t *, size_t, size_t, size_t, size_t, const void *, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clEnqueueReadBufferRect_f_t)(cl_command_queue, cl_mem, cl_bool, const size_t *, const size_t *, const size_t *, size_t, size_t, size_t, size_t, void *, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clReleaseDevice_f_t)(cl_device_id); + typedef cl_int (*clRetainDevice_f_t)(cl_device_id); + typedef cl_command_queue (*clCreateCommandQueueWithProperties_f_t)(cl_context, cl_device_id, const cl_queue_properties *, cl_int *); + typedef cl_int (*clRetainCommandQueue_f_t)(cl_command_queue); + typedef void *(*clSVMAlloc_f_t)(cl_context, cl_svm_mem_flags, size_t, cl_uint); + typedef void (*clSVMFree_f_t)(cl_context, void *); + typedef cl_int (*clEnqueueSVMMap_f_t)(cl_command_queue, cl_bool, cl_map_flags, void *, size_t, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clEnqueueSVMUnmap_f_t)(cl_command_queue, void *, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clSetKernelArgSVMPointer_f_t)(cl_kernel, cl_uint, const void *); + typedef cl_mem (*clCreateSubBuffer_f_t)(cl_mem, cl_mem_flags, cl_buffer_create_type, const void *, cl_int *); + typedef cl_int (*clEnqueueCopyBuffer_f_t)(cl_command_queue, cl_mem, cl_mem, size_t, size_t, size_t, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clEnqueueCopyBufferToImage_f_t)(cl_command_queue, cl_mem, cl_mem, size_t, const size_t *, const size_t *, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clEnqueueCopyBufferRect_f_t)(cl_command_queue, cl_mem, cl_mem, const size_t *, const size_t *, const size_t *, size_t, size_t, size_t, size_t, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clWaitForEvents_f_t)(cl_uint, const cl_event *); + typedef cl_int (*clGetEventProfilingInfo_f_t)(cl_event, cl_profiling_info, size_t, void *, size_t *); + typedef cl_int (*clReleaseEvent_f_t)(cl_event); + typedef cl_int (*clEnqueueCopyImageToBuffer_f_t)(cl_command_queue, cl_mem, cl_mem, const size_t *, const size_t *, size_t, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clEnqueueCopyImage_f_t)(cl_command_queue, cl_mem, cl_mem, const size_t *, const size_t *, const size_t *, cl_uint, const cl_event *, cl_event *); + typedef cl_int (*clGetMemObjectInfo_f_t)(cl_mem, cl_mem_info, size_t, void *, size_t *); + typedef cl_int (*clEnqueueFillBuffer_f_t)(cl_command_queue, cl_mem, const void *, size_t, size_t, size_t, cl_uint, const cl_event *, cl_event *); + typedef void *(*clEnqueueMapBuffer_f_t)(cl_command_queue, cl_mem, cl_bool, cl_map_flags, size_t, size_t, cl_uint, const cl_event *, cl_event *, cl_int *); + typedef cl_int (*clEnqueueUnmapMemObject_f_t)(cl_command_queue, cl_mem, void *, cl_uint, const cl_event *, cl_event *); + + clGetPlatformIDs_f_t clGetPlatformIDs = nullptr; + clGetDeviceIDs_f_t clGetDeviceIDs = nullptr; + clGetDeviceInfo_f_t clGetDeviceInfo = nullptr; + clCreateContext_f_t clCreateContext = nullptr; + clCreateCommandQueue_f_t clCreateCommandQueue = nullptr; + clReleaseCommandQueue_f_t clReleaseCommandQueue = nullptr; + clReleaseContext_f_t clReleaseContext = nullptr; + clCreateProgramWithSource_f_t clCreateProgramWithSource = nullptr; + clBuildProgram_f_t clBuildProgram = nullptr; + clGetProgramBuildInfo_f_t clGetProgramBuildInfo = nullptr; + clCreateProgramWithBinary_f_t clCreateProgramWithBinary = nullptr; + clGetProgramInfo_f_t clGetProgramInfo = nullptr; + clReleaseProgram_f_t clReleaseProgram = nullptr; + clCreateKernel_f_t clCreateKernel = nullptr; + clReleaseKernel_f_t clReleaseKernel = nullptr; + clSetKernelArg_f_t clSetKernelArg = nullptr; + clEnqueueNDRangeKernel_f_t clEnqueueNDRangeKernel = nullptr; + clCreateBuffer_f_t clCreateBuffer = nullptr; + clReleaseMemObject_f_t clReleaseMemObject = nullptr; + clEnqueueWriteBuffer_f_t clEnqueueWriteBuffer = nullptr; + clEnqueueReadBuffer_f_t clEnqueueReadBuffer = nullptr; + clFinish_f_t clFinish = nullptr; + clCreateSampler_f_t clCreateSampler = nullptr; + clReleaseSampler_f_t clReleaseSampler = nullptr; + clCreateImage_f_t clCreateImage = nullptr; + clEnqueueWriteImage_f_t clEnqueueWriteImage = nullptr; + clEnqueueReadImage_f_t clEnqueueReadImage = nullptr; + clEnqueueWriteBufferRect_f_t clEnqueueWriteBufferRect = nullptr; + clEnqueueReadBufferRect_f_t clEnqueueReadBufferRect = nullptr; + clReleaseDevice_f_t clReleaseDevice = nullptr; + clRetainDevice_f_t clRetainDevice = nullptr; + clCreateCommandQueueWithProperties_f_t clCreateCommandQueueWithProperties = nullptr; + clRetainCommandQueue_f_t clRetainCommandQueue = nullptr; + clSVMAlloc_f_t clSVMAlloc = nullptr; + clSVMFree_f_t clSVMFree = nullptr; + clEnqueueSVMMap_f_t clEnqueueSVMMap = nullptr; + clEnqueueSVMUnmap_f_t clEnqueueSVMUnmap = nullptr; + clSetKernelArgSVMPointer_f_t clSetKernelArgSVMPointer = nullptr; + clCreateSubBuffer_f_t clCreateSubBuffer = nullptr; + clEnqueueCopyBuffer_f_t clEnqueueCopyBuffer = nullptr; + clEnqueueCopyBufferToImage_f_t clEnqueueCopyBufferToImage = nullptr; + clEnqueueCopyBufferRect_f_t clEnqueueCopyBufferRect = nullptr; + clWaitForEvents_f_t clWaitForEvents = nullptr; + clGetEventProfilingInfo_f_t clGetEventProfilingInfo = nullptr; + clReleaseEvent_f_t clReleaseEvent = nullptr; + clEnqueueCopyImageToBuffer_f_t clEnqueueCopyImageToBuffer = nullptr; + clEnqueueCopyImage_f_t clEnqueueCopyImage = nullptr; + clGetMemObjectInfo_f_t clGetMemObjectInfo = nullptr; + clEnqueueFillBuffer_f_t clEnqueueFillBuffer = nullptr; + clEnqueueMapBuffer_f_t clEnqueueMapBuffer = nullptr; + clEnqueueUnmapMemObject_f_t clEnqueueUnmapMemObject = nullptr; + + void *handle = nullptr; +}; + +OpenCLSymbols OpenCLBackend::symbols_; +static std::once_flag opencl_symbols_load_flag; + +// 实现 getSymbols 辅助函数 +OpenCLSymbols *OpenCLBackend::getSymbols() { + return &symbols_; +} + +// extern "C" 包装函数 +extern "C" { +cl_int CL_API_CALL clGetPlatformIDs(cl_uint num_entries, cl_platform_id *platforms, cl_uint *num_platforms) { + auto func = mllm::OpenCLBackend::getSymbols()->clGetPlatformIDs; + return func(num_entries, platforms, num_platforms); +} +cl_int CL_API_CALL clGetDeviceIDs(cl_platform_id platform, cl_device_type device_type, cl_uint num_entries, cl_device_id *devices, cl_uint *num_devices) { + auto func = mllm::OpenCLBackend::getSymbols()->clGetDeviceIDs; + return func(platform, device_type, num_entries, devices, num_devices); +} +cl_int CL_API_CALL clGetDeviceInfo(cl_device_id device, cl_device_info param_name, size_t param_value_size, void *param_value, size_t *param_value_size_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clGetDeviceInfo; + return func(device, param_name, param_value_size, param_value, param_value_size_ret); +} +cl_context CL_API_CALL clCreateContext(const cl_context_properties *properties, cl_uint num_devices, const cl_device_id *devices, void(CL_CALLBACK *pfn_notify)(const char *, const void *, size_t, void *), void *user_data, cl_int *errcode_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clCreateContext; + return func(properties, num_devices, devices, pfn_notify, user_data, errcode_ret); +} +cl_command_queue CL_API_CALL clCreateCommandQueue(cl_context context, cl_device_id device, cl_command_queue_properties properties, cl_int *errcode_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clCreateCommandQueue; + return func(context, device, properties, errcode_ret); +} +cl_int CL_API_CALL clReleaseCommandQueue(cl_command_queue command_queue) { + auto func = mllm::OpenCLBackend::getSymbols()->clReleaseCommandQueue; + return func(command_queue); +} +cl_int CL_API_CALL clReleaseContext(cl_context context) { + auto func = mllm::OpenCLBackend::getSymbols()->clReleaseContext; + return func(context); +} +cl_program CL_API_CALL clCreateProgramWithSource(cl_context context, cl_uint count, const char **strings, const size_t *lengths, cl_int *errcode_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clCreateProgramWithSource; + return func(context, count, strings, lengths, errcode_ret); +} +cl_int CL_API_CALL clBuildProgram(cl_program program, cl_uint num_devices, const cl_device_id *device_list, const char *options, void(CL_CALLBACK *pfn_notify)(cl_program, void *), void *user_data) { + auto func = mllm::OpenCLBackend::getSymbols()->clBuildProgram; + return func(program, num_devices, device_list, options, pfn_notify, user_data); +} +cl_int CL_API_CALL clGetProgramBuildInfo(cl_program program, cl_device_id device, cl_program_build_info param_name, size_t param_value_size, void *param_value, size_t *param_value_size_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clGetProgramBuildInfo; + return func(program, device, param_name, param_value_size, param_value, param_value_size_ret); +} +cl_int CL_API_CALL clReleaseProgram(cl_program program) { + auto func = mllm::OpenCLBackend::getSymbols()->clReleaseProgram; + return func(program); +} +cl_kernel CL_API_CALL clCreateKernel(cl_program program, const char *kernel_name, cl_int *errcode_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clCreateKernel; + return func(program, kernel_name, errcode_ret); +} +cl_int CL_API_CALL clReleaseKernel(cl_kernel kernel) { + auto func = mllm::OpenCLBackend::getSymbols()->clReleaseKernel; + return func(kernel); +} +cl_int CL_API_CALL clSetKernelArg(cl_kernel kernel, cl_uint arg_index, size_t arg_size, const void *arg_value) { + auto func = mllm::OpenCLBackend::getSymbols()->clSetKernelArg; + return func(kernel, arg_index, arg_size, arg_value); +} +cl_int CL_API_CALL clEnqueueNDRangeKernel(cl_command_queue command_queue, cl_kernel kernel, cl_uint work_dim, const size_t *global_work_offset, const size_t *global_work_size, const size_t *local_work_size, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueNDRangeKernel; + return func(command_queue, kernel, work_dim, global_work_offset, global_work_size, local_work_size, num_events_in_wait_list, event_wait_list, event); +} +cl_mem CL_API_CALL clCreateBuffer(cl_context context, cl_mem_flags flags, size_t size, void *host_ptr, cl_int *errcode_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clCreateBuffer; + return func(context, flags, size, host_ptr, errcode_ret); +} +cl_int CL_API_CALL clReleaseMemObject(cl_mem memobj) { + auto func = mllm::OpenCLBackend::getSymbols()->clReleaseMemObject; + return func(memobj); +} +cl_int CL_API_CALL clEnqueueWriteBuffer(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_write, size_t offset, size_t size, const void *ptr, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueWriteBuffer; + return func(command_queue, buffer, blocking_write, offset, size, ptr, num_events_in_wait_list, event_wait_list, event); +} +cl_int CL_API_CALL clEnqueueReadBuffer(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_read, size_t offset, size_t size, void *ptr, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueReadBuffer; + return func(command_queue, buffer, blocking_read, offset, size, ptr, num_events_in_wait_list, event_wait_list, event); +} +cl_int CL_API_CALL clFinish(cl_command_queue command_queue) { + auto func = mllm::OpenCLBackend::getSymbols()->clFinish; + return func(command_queue); +} +cl_sampler CL_API_CALL clCreateSampler(cl_context context, cl_bool normalized_coords, cl_addressing_mode addressing_mode, cl_filter_mode filter_mode, cl_int *errcode_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clCreateSampler; + return func(context, normalized_coords, addressing_mode, filter_mode, errcode_ret); +} +cl_int CL_API_CALL clReleaseSampler(cl_sampler sampler) { + auto func = mllm::OpenCLBackend::getSymbols()->clReleaseSampler; + return func(sampler); +} +cl_mem CL_API_CALL clCreateImage(cl_context context, cl_mem_flags flags, const cl_image_format *image_format, const cl_image_desc *image_desc, void *host_ptr, cl_int *errcode_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clCreateImage; + return func(context, flags, image_format, image_desc, host_ptr, errcode_ret); +} +cl_int CL_API_CALL clEnqueueWriteImage(cl_command_queue command_queue, cl_mem image, cl_bool blocking_write, const size_t *origin, const size_t *region, size_t input_row_pitch, size_t input_slice_pitch, const void *ptr, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueWriteImage; + return func(command_queue, image, blocking_write, origin, region, input_row_pitch, input_slice_pitch, ptr, num_events_in_wait_list, event_wait_list, event); +} +cl_int CL_API_CALL clEnqueueReadImage(cl_command_queue command_queue, cl_mem image, cl_bool blocking_read, const size_t *origin, const size_t *region, size_t row_pitch, size_t slice_pitch, void *ptr, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueReadImage; + return func(command_queue, image, blocking_read, origin, region, row_pitch, slice_pitch, ptr, num_events_in_wait_list, event_wait_list, event); +} +cl_int CL_API_CALL clEnqueueWriteBufferRect(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_write, const size_t *buffer_origin, const size_t *host_origin, const size_t *region, size_t buffer_row_pitch, size_t buffer_slice_pitch, size_t host_row_pitch, size_t host_slice_pitch, const void *ptr, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueWriteBufferRect; + return func(command_queue, buffer, blocking_write, buffer_origin, host_origin, region, buffer_row_pitch, buffer_slice_pitch, host_row_pitch, host_slice_pitch, ptr, num_events_in_wait_list, event_wait_list, event); +} +cl_int CL_API_CALL clEnqueueReadBufferRect(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_read, const size_t *buffer_origin, const size_t *host_origin, const size_t *region, size_t buffer_row_pitch, size_t buffer_slice_pitch, size_t host_row_pitch, size_t host_slice_pitch, void *ptr, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueReadBufferRect; + return func(command_queue, buffer, blocking_read, buffer_origin, host_origin, region, buffer_row_pitch, buffer_slice_pitch, host_row_pitch, host_slice_pitch, ptr, num_events_in_wait_list, event_wait_list, event); +} +cl_int CL_API_CALL clReleaseDevice(cl_device_id device) { + auto func = mllm::OpenCLBackend::getSymbols()->clReleaseDevice; + return func(device); +} +cl_int CL_API_CALL clRetainDevice(cl_device_id device) { + auto func = mllm::OpenCLBackend::getSymbols()->clRetainDevice; + return func(device); +} +cl_command_queue CL_API_CALL clCreateCommandQueueWithProperties(cl_context context, cl_device_id device, const cl_queue_properties *properties, cl_int *errcode_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clCreateCommandQueueWithProperties; + return func(context, device, properties, errcode_ret); +} +cl_int CL_API_CALL clRetainCommandQueue(cl_command_queue command_queue) { + auto func = mllm::OpenCLBackend::getSymbols()->clRetainCommandQueue; + return func(command_queue); +} +void *CL_API_CALL clSVMAlloc(cl_context context, cl_svm_mem_flags flags, size_t size, cl_uint alignment) { + auto func = mllm::OpenCLBackend::getSymbols()->clSVMAlloc; + return func(context, flags, size, alignment); +} +void CL_API_CALL clSVMFree(cl_context context, void *svm_pointer) { + auto func = mllm::OpenCLBackend::getSymbols()->clSVMFree; + func(context, svm_pointer); +} +cl_int CL_API_CALL clEnqueueSVMMap(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags map_flags, void *svm_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueSVMMap; + return func(command_queue, blocking_map, map_flags, svm_ptr, size, num_events_in_wait_list, event_wait_list, event); +} +cl_int CL_API_CALL clEnqueueSVMUnmap(cl_command_queue command_queue, void *svm_ptr, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueSVMUnmap; + return func(command_queue, svm_ptr, num_events_in_wait_list, event_wait_list, event); +} +cl_int CL_API_CALL clSetKernelArgSVMPointer(cl_kernel kernel, cl_uint arg_index, const void *arg_value) { + auto func = mllm::OpenCLBackend::getSymbols()->clSetKernelArgSVMPointer; + return func(kernel, arg_index, arg_value); +} +cl_mem CL_API_CALL clCreateSubBuffer(cl_mem mem, cl_mem_flags flags, cl_buffer_create_type type, const void *b_info, cl_int *err) { + auto func = mllm::OpenCLBackend::getSymbols()->clCreateSubBuffer; + return func(mem, flags, type, b_info, err); +} +cl_int CL_API_CALL clEnqueueCopyBuffer(cl_command_queue q, cl_mem s, cl_mem d, size_t s_o, size_t d_o, size_t si, cl_uint e_l, const cl_event *e, cl_event *ev) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueCopyBuffer; + return func(q, s, d, s_o, d_o, si, e_l, e, ev); +} +cl_int CL_API_CALL clEnqueueCopyBufferToImage(cl_command_queue q, cl_mem s, cl_mem d, size_t s_o, const size_t *d_o, const size_t *r, cl_uint el, const cl_event *e, cl_event *ev) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueCopyBufferToImage; + return func(q, s, d, s_o, d_o, r, el, e, ev); +} +cl_int CL_API_CALL clEnqueueCopyBufferRect(cl_command_queue q, cl_mem s_b, cl_mem d_b, const size_t *s_o, const size_t *d_o, const size_t *r, size_t s_r_p, size_t s_s_p, size_t d_r_p, size_t d_s_p, cl_uint el, const cl_event *e, cl_event *ev) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueCopyBufferRect; + return func(q, s_b, d_b, s_o, d_o, r, s_r_p, s_s_p, d_r_p, d_s_p, el, e, ev); +} +cl_int CL_API_CALL clWaitForEvents(cl_uint num_events, const cl_event *event_list) { + auto func = mllm::OpenCLBackend::getSymbols()->clWaitForEvents; + return func(num_events, event_list); +} + +cl_int CL_API_CALL clGetEventProfilingInfo(cl_event event, cl_profiling_info param_name, size_t param_value_size, void *param_value, size_t *param_value_size_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clGetEventProfilingInfo; + return func(event, param_name, param_value_size, param_value, param_value_size_ret); +} + +cl_int CL_API_CALL clReleaseEvent(cl_event event) { + auto func = mllm::OpenCLBackend::getSymbols()->clReleaseEvent; + return func(event); +} + +// 添加下面的两个新函数 +cl_int CL_API_CALL clEnqueueCopyImageToBuffer(cl_command_queue q, cl_mem s_img, cl_mem d_buf, const size_t *s_o, const size_t *r, size_t d_o, cl_uint el, const cl_event *e, cl_event *ev) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueCopyImageToBuffer; + if (func) { + return func(q, s_img, d_buf, s_o, r, d_o, el, e, ev); + } + return CL_INVALID_OPERATION; // Or another appropriate error +} + +cl_int CL_API_CALL clEnqueueCopyImage(cl_command_queue q, cl_mem s_img, cl_mem d_img, const size_t *s_o, const size_t *d_o, const size_t *r, cl_uint el, const cl_event *e, cl_event *ev) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueCopyImage; + if (func) { + return func(q, s_img, d_img, s_o, d_o, r, el, e, ev); + } + return CL_INVALID_OPERATION; // Or another appropriate error +} +cl_int CL_API_CALL clGetMemObjectInfo(cl_mem memobj, cl_mem_info param_name, size_t param_value_size, void *param_value, size_t *param_value_size_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clGetMemObjectInfo; + if (func) { + return func(memobj, param_name, param_value_size, param_value, param_value_size_ret); + } + return CL_INVALID_OPERATION; // 或者返回其他合适的错误码 +} +cl_program CL_API_CALL clCreateProgramWithBinary(cl_context context, cl_uint num_devices, const cl_device_id *device_list, const size_t *lengths, const unsigned char **binaries, cl_int *binary_status, cl_int *errcode_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clCreateProgramWithBinary; + return func(context, num_devices, device_list, lengths, binaries, binary_status, errcode_ret); +} + +cl_int CL_API_CALL clGetProgramInfo(cl_program program, cl_program_info param_name, size_t param_value_size, void *param_value, size_t *param_value_size_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clGetProgramInfo; + return func(program, param_name, param_value_size, param_value, param_value_size_ret); +} +cl_int CL_API_CALL clEnqueueFillBuffer(cl_command_queue command_queue, + cl_mem buffer, + const void *pattern, + size_t pattern_size, + size_t offset, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event *event_wait_list, + cl_event *event) { + // 从 symbols_ 结构体中获取函数指针 + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueFillBuffer; + if (func) { + // 如果成功加载,则调用真实的 OpenCL 函数 + return func(command_queue, buffer, pattern, pattern_size, offset, size, num_events_in_wait_list, event_wait_list, event); + } + // 如果函数指针为空(例如在某些非常老的设备上不支持),返回一个错误码 + return CL_INVALID_OPERATION; +} +void *CL_API_CALL clEnqueueMapBuffer(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_map, cl_map_flags map_flags, size_t offset, size_t size, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event, cl_int *errcode_ret) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueMapBuffer; + if (func) { + return func(command_queue, buffer, blocking_map, map_flags, offset, size, num_events_in_wait_list, event_wait_list, event, errcode_ret); + } + if (errcode_ret) { + *errcode_ret = CL_INVALID_OPERATION; + } + return nullptr; +} + +cl_int CL_API_CALL clEnqueueUnmapMemObject(cl_command_queue command_queue, cl_mem memobj, void *mapped_ptr, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event) { + auto func = mllm::OpenCLBackend::getSymbols()->clEnqueueUnmapMemObject; + if (func) { + return func(command_queue, memobj, mapped_ptr, num_events_in_wait_list, event_wait_list, event); + } + return CL_INVALID_OPERATION; +} + +} // extern "C" +#endif // MLLM_TARGET_ANDROID + +std::shared_ptr OpenCLBackend::createMemoryManager(cl_context &context, cl_device_id &device) { +#if defined(MLLM_TARGET_ANDROID) + std::call_once(opencl_symbols_load_flag, [&]() { + loadOpenCLSymbols(); + }); +#endif + cl_int err; + cl_platform_id platform; + err = clGetPlatformIDs(1, &platform, nullptr); + check_cl_error(err, "clGetPlatformIDs"); + err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, nullptr); + if (err != CL_SUCCESS) { + std::cout << "No GPU found, trying CPU..." << std::endl; + err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_CPU, 1, &device, nullptr); + } + check_cl_error(err, "clGetDeviceIDs"); + context = clCreateContext(nullptr, 1, &device, nullptr, nullptr, &err); + check_cl_error(err, "clCreateContext"); + return std::make_shared(context); +} + +OpenCLBackend::OpenCLBackend(const BackendConfig &config) : + Backend() { + mem_manager_ = createMemoryManager(context_, device_); + cl_int err; + // queue_ = clCreateCommandQueue(context_, device_, 0, &err); + cl_command_queue_properties properties = CL_QUEUE_PROFILING_ENABLE; + queue_ = clCreateCommandQueue(context_, device_, properties, &err); + check_cl_error(err, "clCreateCommandQueue"); + err = clGetDeviceInfo(device_, CL_DEVICE_IMAGE2D_MAX_WIDTH, sizeof(size_t), &this->max_image2d_width_, nullptr); + check_cl_error(err, "clGetDeviceInfo for CL_DEVICE_IMAGE2D_MAX_WIDTH"); + // 如果查询失败或返回0,可以设置一个保守的默认值 + if (this->max_image2d_width_ == 0) { + this->max_image2d_width_ = 8192; // 一个非常保守的值 + } + size_t extensions_size; + clGetDeviceInfo(device_, CL_DEVICE_EXTENSIONS, 0, nullptr, &extensions_size); + std::string extensions(extensions_size, ' '); + clGetDeviceInfo(device_, CL_DEVICE_EXTENSIONS, extensions_size, &extensions[0], nullptr); + if (extensions.find("cl_khr_fp16") != std::string::npos) { + this->has_fp16_support_ = true; + } else { + this->has_fp16_support_ = false; + } + if (extensions.find("cl_khr_image2d_from_buffer") != std::string::npos) { + this->image_from_buffer_supported_ = true; + clGetDeviceInfo(device_, CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT, sizeof(cl_uint), &this->image_pitch_alignment_bytes_, nullptr); + if (this->image_pitch_alignment_bytes_ == 0) { + this->image_pitch_alignment_bytes_ = 1; + } + } else { + this->image_from_buffer_supported_ = false; + this->image_pitch_alignment_bytes_ = 0; + } +#if defined(MLLM_TARGET_ANDROID) + kernel_root_path_ = get_executable_dir(); +#else + kernel_root_path_ = get_kernel_path(__FILE__, "."); +#endif + const std::string convert_kernel_path = kernel_root_path_ + "/kernel/convert_fp.cl"; + std::string build_options = ""; + if (this->has_fp16_support_) { + build_options += " -DSUPPORTS_FP16"; + // std::cout << "OpenCL supports cl_khr_fp16." << std::endl; + } + cl_program convert_program = getProgram(convert_kernel_path, build_options); + if (this->has_fp16_support_) { + kernel_fp32_to_fp16_buffer_ = clCreateKernel(convert_program, "convert_fp32_to_fp16_buffer_ext", &err); + check_cl_error(err, "CreateKernel: convert_fp32_to_fp16_buffer_ext"); + kernel_fp16_to_fp32_buffer_ = clCreateKernel(convert_program, "convert_fp16_to_fp32_buffer_ext", &err); + check_cl_error(err, "CreateKernel: convert_fp16_to_fp32_buffer_ext"); + kernel_fp32_to_fp16_image_ = clCreateKernel(convert_program, "convert_fp32_to_fp16_image2d", &err); + check_cl_error(err, "CreateKernel: convert_fp32_to_fp16_image2d"); + kernel_fp16_to_fp32_image_ = clCreateKernel(convert_program, "convert_fp16_to_fp32_image2d", &err); + check_cl_error(err, "CreateKernel: convert_fp16_to_fp32_image2d"); + } else { + kernel_fp32_to_fp16_buffer_ = clCreateKernel(convert_program, "convert_fp32_to_fp16_buffer_compat", &err); + check_cl_error(err, "CreateKernel: convert_fp32_to_fp16_buffer_compat"); + kernel_fp16_to_fp32_buffer_ = clCreateKernel(convert_program, "convert_fp16_to_fp32_buffer_compat", &err); + check_cl_error(err, "CreateKernel: convert_fp16_to_fp32_buffer_compat"); + } + sampler_ = clCreateSampler(context_, CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler in Backend"); + this->type_ = MLLM_OPENCL; + registerOps(); +} + +OpenCLBackend::~OpenCLBackend() { + if (mem_manager_) { + mem_manager_.reset(); + } + if (kernel_fp32_to_fp16_buffer_) clReleaseKernel(kernel_fp32_to_fp16_buffer_); + if (kernel_fp16_to_fp32_buffer_) clReleaseKernel(kernel_fp16_to_fp32_buffer_); + if (kernel_fp32_to_fp16_image_) clReleaseKernel(kernel_fp32_to_fp16_image_); + if (kernel_fp16_to_fp32_image_) clReleaseKernel(kernel_fp16_to_fp32_image_); + if (sampler_) clReleaseSampler(sampler_); + for (auto const &[key, program] : program_cache_) { + if (program) { + clReleaseProgram(program); + } + } + if (queue_) clReleaseCommandQueue(queue_); + if (context_) clReleaseContext(context_); +#if defined(MLLM_TARGET_ANDROID) + if (symbols_.handle) { + dlclose(symbols_.handle); + symbols_.handle = nullptr; + } +#endif +} + +void OpenCLBackend::finishQueue() { + if (queue_) { + clFinish(queue_); + } +} +cl_program OpenCLBackend::getProgram(const std::string &program_name, const std::string &build_options) { + // 使用 program_name 和 build_options 创建唯一的键,用于在内存中缓存 program 对象 + std::string cache_key = program_name + build_options; + auto it = program_cache_.find(cache_key); + if (it != program_cache_.end()) { + return it->second; + } + + // 1. 构建源文件和缓存文件的完整路径 + std::filesystem::path source_path(kernel_root_path_); + source_path /= program_name; + + // a. 创建缓存目录 (例如: opencl/kernel/cache) + std::filesystem::path cache_dir = source_path.parent_path() / "cache"; + std::error_code ec; + if (!std::filesystem::exists(cache_dir, ec)) { + std::filesystem::create_directories(cache_dir, ec); + } + + // b. 生成一个稳定且唯一的缓存文件名 (例如: kernel_add_cl_xxxxx.bin) + // 将路径中的 '/' 和 '.' 替换为 '_',并附上编译选项的哈希值 + std::string bin_file_name = program_name; + std::replace(bin_file_name.begin(), bin_file_name.end(), '/', '_'); + std::replace(bin_file_name.begin(), bin_file_name.end(), '.', '_'); + std::hash hasher; + std::string options_hash = std::to_string(hasher(build_options)); + std::filesystem::path bin_path = cache_dir / (bin_file_name + "_" + options_hash + ".bin"); + + cl_program program = nullptr; + cl_int err; + + // 2. 尝试从二进制缓存文件加载程序 + std::ifstream bin_file(bin_path, std::ios::binary); + if (bin_file.is_open()) { + bin_file.seekg(0, std::ios::end); + size_t bin_size = bin_file.tellg(); + bin_file.seekg(0, std::ios::beg); + std::vector bin_data(bin_size); + bin_file.read(reinterpret_cast(bin_data.data()), bin_size); + bin_file.close(); + + const unsigned char *bin_ptr = bin_data.data(); + cl_int binary_status; + program = clCreateProgramWithBinary(context_, 1, &device_, &bin_size, &bin_ptr, &binary_status, &err); + + if (err == CL_SUCCESS && binary_status == CL_SUCCESS) { + // ===== [ 核心修正点 ] ===== + // 即使从二进制加载,也需要Build来使其对设备可执行 + err = clBuildProgram(program, 1, &device_, build_options.c_str(), nullptr, nullptr); + if (err != CL_SUCCESS) { + // 如果Build失败,说明缓存可能已损坏或不兼容,需要回退到从源码编译 + if (program) clReleaseProgram(program); // 释放无效的 program 对象 + program = nullptr; // 将 program 置空,以便后续逻辑能从源码重新编译 + } + // ===== [ 修正结束 ] ===== + } else { + // 如果加载失败,清空 program 对象 + if (program) clReleaseProgram(program); + program = nullptr; + } + } + + // 3. 如果从缓存加载失败 (program == nullptr),则从源码编译 + if (program == nullptr) { + std::string kernel_source = load_file_contents(source_path.c_str()); + const char *source_ptr = kernel_source.c_str(); + size_t source_len = kernel_source.length(); + program = clCreateProgramWithSource(context_, 1, &source_ptr, &source_len, &err); + check_cl_error(err, "clCreateProgramWithSource for " + program_name); + + err = clBuildProgram(program, 1, &device_, build_options.c_str(), nullptr, nullptr); + if (err != CL_SUCCESS) { + size_t log_size; + clGetProgramBuildInfo(program, device_, CL_PROGRAM_BUILD_LOG, 0, nullptr, &log_size); + std::vector log(log_size); + clGetProgramBuildInfo(program, device_, CL_PROGRAM_BUILD_LOG, log_size, log.data(), nullptr); + std::string error_msg = "Kernel build error for " + program_name + ":\n" + log.data(); + if (program) clReleaseProgram(program); + throw std::runtime_error(error_msg); + } + + // 4. 编译成功后,获取二进制码并保存到缓存文件 + size_t binary_size; + clGetProgramInfo(program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t), &binary_size, nullptr); + if (binary_size > 0) { + std::vector binary_data(binary_size); + unsigned char *bin_ptr = binary_data.data(); + // 注意:clGetProgramInfo的第三个参数应该是`sizeof(unsigned char*)` + clGetProgramInfo(program, CL_PROGRAM_BINARIES, sizeof(unsigned char *), &bin_ptr, nullptr); + + std::ofstream out_bin_file(bin_path, std::ios::binary); + if (out_bin_file.is_open()) { + out_bin_file.write(reinterpret_cast(binary_data.data()), binary_size); + out_bin_file.close(); + } else { + std::cerr << "Warning: Could not write to kernel cache file: " << bin_path << std::endl; + } + } + } + + // 将最终获取的 program 对象存入内存缓存 + program_cache_[cache_key] = program; + return program; +} +void OpenCLBackend::alloc_device(DeviceMemory &mem, DataType dtype) { + if (context_ == nullptr) throw std::runtime_error("OpenCL context is not initialized."); + cl_int err; + switch (mem.type) { + case MEM_TYPE_BUFFER: { + if (mem.image_width > 0 && mem.image_height > 0 && image_from_buffer_supported_) { + const size_t pixel_width = mem.image_width; + const cl_uint pitch_alignment = image_pitch_alignment_bytes_; + size_t row_pitch = pixel_width * 4 * sizeof(float); + if (pitch_alignment > 0 && row_pitch % pitch_alignment != 0) { + row_pitch = (row_pitch + pitch_alignment - 1) / pitch_alignment * pitch_alignment; + } + const size_t padded_buffer_size = mem.image_height * row_pitch; + mem.size_in_bytes = padded_buffer_size; + mem.image_row_pitch_in_bytes = row_pitch; + mem_manager_->alloc(&mem.handle, mem.size_in_bytes, 0); + if (mem.handle == nullptr) { + throw std::runtime_error("OpenCLMemoryManager failed to allocate buffer."); + } + } else if (mem.size_in_bytes > 0) { + mem_manager_->alloc(&mem.handle, mem.size_in_bytes, 0); + if (mem.handle == nullptr) { + throw std::runtime_error("OpenCLMemoryManager failed to allocate buffer."); + } + } + break; + } + case MEM_TYPE_IMAGE_2D: { + cl_image_format format = {CL_RGBA}; + switch (dtype) { + case MLLM_TYPE_F32: + format.image_channel_data_type = CL_FLOAT; + break; + case MLLM_TYPE_F16: + format.image_channel_data_type = CL_HALF_FLOAT; + break; + default: + throw std::runtime_error("Unsupported data type for Image2D creation."); + } + cl_image_desc desc = {}; + desc.image_type = CL_MEM_OBJECT_IMAGE2D; + desc.image_width = mem.image_width; + desc.image_height = mem.image_height; + if (desc.image_width > 0) { + mem.handle = clCreateImage(context_, CL_MEM_READ_WRITE, &format, &desc, nullptr, &err); + check_cl_error(err, "clCreateImage"); + } + break; + } + default: throw std::runtime_error("Unsupported device memory type for OpenCL."); + } +} + +void OpenCLBackend::free_device(DeviceMemory &mem) { + if (mem.handle != nullptr) { + switch (mem.type) { + case MEM_TYPE_BUFFER: + mem_manager_->free(mem.handle); + break; + case MEM_TYPE_IMAGE_2D: + clReleaseMemObject(static_cast(mem.handle)); + break; + default: + // 对于其他类型,也许也应该直接释放 + clReleaseMemObject(static_cast(mem.handle)); + break; + } + mem.handle = nullptr; + } +} + +void OpenCLBackend::copy_from_host(const DeviceMemory &dest, const void *src) { + if (dest.handle == nullptr || src == nullptr) return; + cl_mem dest_handle = static_cast(dest.handle); + switch (dest.type) { + case MEM_TYPE_BUFFER: { + if (dest.image_row_pitch_in_bytes > 0 && dest.image_height > 0) { + const size_t buffer_origin[3] = {0, 0, 0}; + const size_t host_origin[3] = {0, 0, 0}; + const size_t region_in_bytes[3] = { + dest.image_width * 4 * sizeof(float), + dest.image_height, + 1}; + clEnqueueWriteBufferRect( + queue_, dest_handle, CL_TRUE, buffer_origin, host_origin, + region_in_bytes, dest.image_row_pitch_in_bytes, 0, + dest.image_width * 4 * sizeof(float), 0, src, 0, nullptr, nullptr); + } else { + clEnqueueWriteBuffer(queue_, dest_handle, CL_TRUE, 0, dest.size_in_bytes, src, 0, nullptr, nullptr); + } + break; + } + case MEM_TYPE_IMAGE_2D: { + const size_t origin[3] = {0, 0, 0}; + const size_t region[3] = {dest.image_width, dest.image_height, 1}; + clEnqueueWriteImage(queue_, dest_handle, CL_TRUE, origin, region, 0, 0, src, 0, nullptr, nullptr); + break; + } + default: throw std::runtime_error("Unsupported copy for this memory type."); + } +} + +void OpenCLBackend::copy_to_host(void *dest, const DeviceMemory &src) { + if (dest == nullptr || src.handle == nullptr) return; + cl_mem src_handle = static_cast(src.handle); + switch (src.type) { + case MEM_TYPE_BUFFER: + clEnqueueReadBuffer(queue_, src_handle, CL_TRUE, 0, src.size_in_bytes, dest, 0, nullptr, nullptr); + break; + case MEM_TYPE_IMAGE_2D: { + const size_t origin[3] = {0, 0, 0}; + const size_t region[3] = {src.image_width, src.image_height, 1}; + clEnqueueReadImage(queue_, src_handle, CL_TRUE, origin, region, 0, 0, dest, 0, nullptr, nullptr); + break; + } + default: throw std::runtime_error("Unsupported copy for this memory type."); + } +} + +cl_mem OpenCLBackend::get_cl_mem(const Tensor &tensor) const { + if (tensor.backend() != this) throw std::runtime_error("Tensor is not on this backend."); + const auto &mem = tensor.device_memory(); + if (mem.handle == nullptr) throw std::runtime_error("Tensor CL handle is null."); + return static_cast(mem.handle); +} + +Op *OpenCLBackend::opCreate(const OpParam &op_param, std::string name, int threadCount) { + OpType type = (OpType)op_param.find("type")->second; + auto it = op_creator_map_.find(type); + if (it == op_creator_map_.end()) { + return nullptr; + } + return it->second->create(op_param, this, name, threadCount); +} + +TensorFunction *OpenCLBackend::funcCreate(TensorFuncType type) { + throw std::runtime_error("funcCreate not implemented for OpenCLBackend"); +} + +void OpenCLBackend::registerOps() { + op_creator_map_[F_ADD] = std::make_shared(); + op_creator_map_[F_TTADD] = std::make_shared(); + op_creator_map_[F_SUB] = std::make_shared(); + op_creator_map_[F_TTSUB] = std::make_shared(); + op_creator_map_[F_MUL] = std::make_shared(); + op_creator_map_[F_TTMUL] = std::make_shared(); + op_creator_map_[F_DIV] = std::make_shared(); + op_creator_map_[F_DIVINT] = std::make_shared(); + op_creator_map_[F_TTDIV] = std::make_shared(); + op_creator_map_[F_MM] = std::make_shared(); + op_creator_map_[LINEAR] = std::make_shared(); + op_creator_map_[F_TRANPOSE] = std::make_shared(); + op_creator_map_[SOFTMAX] = std::make_shared(); + op_creator_map_[RMSNORM] = std::make_shared(); + op_creator_map_[EMBEDDING] = std::make_shared(); + op_creator_map_[SILU] = std::make_shared(); + op_creator_map_[F_VIEW] = std::make_shared(); + op_creator_map_[KVCACHE] = std::make_shared(); + op_creator_map_[ROPE] = std::make_shared(); + op_creator_map_[F_CLIP] = std::make_shared(); + op_creator_map_[F_FA2] = std::make_shared(); + op_creator_map_[F_SPLIT] = std::make_shared(); + op_creator_map_[F_TOPK] = std::make_shared(); + op_creator_map_[F_SUM] = std::make_shared(); + op_creator_map_[F_LIKE] = std::make_shared(); + op_creator_map_[F_CLIPTENSOR] = std::make_shared(); + op_creator_map_[F_SCATTERRADD] = std::make_shared(); + op_creator_map_[F_ARGSORT] = std::make_shared(); + op_creator_map_[F_BINCOUNT] = std::make_shared(); +} + +void OpenCLBackend::registerFuncs() { + std::cout << "OpenCLBackend funcs is abanded." << std::endl; +} + +void OpenCLBackend::convert_fp_data(Tensor *src, Tensor *dest) { + if (src->device() != MLLM_OPENCL || dest->device() != MLLM_OPENCL) { + throw std::runtime_error("Type conversion on GPU requires both tensors to be on OpenCL backend."); + } + auto &src_mem = src->device_memory(); + auto &dest_mem = dest->device_memory(); + + if (src_mem.type == MEM_TYPE_BUFFER) { + if (dest_mem.type != MEM_TYPE_BUFFER) { + throw std::runtime_error("Destination must be a Buffer for Buffer conversion."); + } + cl_kernel kernel_to_use = nullptr; + + // 根据转换类型选择内核 + if (src->dtype() == MLLM_TYPE_F32 && dest->dtype() == MLLM_TYPE_F16) { + kernel_to_use = kernel_fp32_to_fp16_buffer_; + } else if (src->dtype() == MLLM_TYPE_F16 && dest->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp16_to_fp32_buffer_; + } else { + if (src->dtype() == dest->dtype()) return; + throw std::runtime_error("Unsupported Buffer conversion types."); + } + + cl_mem src_buf = get_cl_mem(*src); + cl_mem dest_buf = get_cl_mem(*dest); + const int count = src->count(); + + // ✨ **关键修正: 明确控制工作组大小** + if (count > 0) { + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &src_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &dest_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(int), &count); + + // 1. 定义一个标准的工作组大小 + const size_t local_work_size = 256; + + // 2. 手动计算向上补齐的全局工作大小 + const size_t global_work_size = ((count + local_work_size - 1) / local_work_size) * local_work_size; + + // 3. 使用明确的 local 和 global size 启动内核 + cl_event event; + cl_int err = clEnqueueNDRangeKernel(queue_, kernel_to_use, 1, nullptr, &global_work_size, &local_work_size, 0, nullptr, &event); + this->addProfilingEvent("convert_fp_data", event); + check_cl_error(err, "clEnqueueNDRangeKernel for type conversion"); + } + + } else if (src_mem.type == MEM_TYPE_IMAGE_2D) { + if (dest_mem.type != MEM_TYPE_IMAGE_2D) { + throw std::runtime_error("Destination must be an Image for Image conversion."); + } + cl_kernel kernel_to_use = nullptr; + if (src->dtype() == MLLM_TYPE_F32 && dest->dtype() == MLLM_TYPE_F16) { + kernel_to_use = kernel_fp32_to_fp16_image_; + } else if (src->dtype() == MLLM_TYPE_F16 && dest->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp16_to_fp32_image_; + } else { + if (src->dtype() == dest->dtype()) return; + throw std::runtime_error("Unsupported Image conversion types."); + } + if (!kernel_to_use) { + throw std::runtime_error("Image conversion kernel is not available. This may be due to lack of FP16 hardware support."); + } + + cl_mem src_img = get_cl_mem(*src); + cl_mem dest_img = get_cl_mem(*dest); + const int width = src_mem.image_width; + const int height = src_mem.image_height; + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &src_img); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &dest_img); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &height); + + // 对于2D图像,通常让驱动选择最佳工作组大小是安全的,但也可以明确指定 + const size_t local_ws[2] = {16, 16}; + const size_t global_ws[2] = { + ((size_t)width + local_ws[0] - 1) / local_ws[0] * local_ws[0], + ((size_t)height + local_ws[1] - 1) / local_ws[1] * local_ws[1]}; + + clEnqueueNDRangeKernel(queue_, kernel_to_use, 2, nullptr, global_ws, local_ws, 0, nullptr, nullptr); + } +} +bool OpenCLBackend::load_from_file(Tensor *tensor, ParamLoader *loader) { + // 1. 从加载器获取张量的元数据和文件句柄 + ParamMetadata metadata = loader->getParamMetadata(tensor->name()); + FILE *fp = loader->getInputStream(); + if (metadata.size == 0) { + return true; // 无需加载 + } + + // 2. 检查张量设备内存是否已就绪 + if (tensor->device_memory().handle == nullptr) { + // 如果内存未分配,此快速路径无法工作。 + // 这通常意味着调用顺序有问题,load() 之前应先 alloc()。 + return false; + } + + // 3. 获取OpenCL对象 + cl_command_queue queue = this->getQueue(); + cl_mem buffer = this->get_cl_mem(*tensor); + + // 4. 将GPU缓冲区映射到主机地址空间 (阻塞式,写入模式) + cl_int err; + void *mapped_ptr = clEnqueueMapBuffer(queue, buffer, CL_TRUE, CL_MAP_WRITE, 0, metadata.size, 0, nullptr, nullptr, &err); + check_cl_error(err, "OpenCLBackend::load_from_file clEnqueueMapBuffer"); + if (mapped_ptr == nullptr) { + fprintf(stderr, "Error: Failed to map OpenCL buffer for tensor '%s'.\n", tensor->name().c_str()); + return false; + } + + // 5. 将文件指针移动到权重数据的起始位置,并直接读入映射后的内存 + fseek(fp, metadata.offset, SEEK_SET); + size_t read_size = fread(mapped_ptr, 1, metadata.size, fp); + if (read_size != metadata.size) { + fprintf(stderr, "Error: File read failed for tensor '%s'. Expected %llu, got %zu.\n", tensor->name().c_str(), metadata.size, read_size); + // 出错也要确保解映射 + clEnqueueUnmapMemObject(queue, buffer, mapped_ptr, 0, nullptr, nullptr); + clFinish(queue); // 等待命令完成 + return false; + } + + // 6. 解除内存映射,并将控制权交还GPU + cl_event unmap_event; + err = clEnqueueUnmapMemObject(queue, buffer, mapped_ptr, 0, nullptr, &unmap_event); + check_cl_error(err, "OpenCLBackend::load_from_file clEnqueueUnmapMemObject"); + + // 7. 阻塞等待解映射操作完成,确保数据对GPU可见 + clWaitForEvents(1, &unmap_event); + clReleaseEvent(unmap_event); + + // 8. 数据已在设备上,主机指针应失效,防止误用 + tensor->forceResetHostPointer(nullptr); + + return true; // 表示加载已由本函数成功处理 +} + +void registerOpenCLBackendCreator() { + InsertBackendCreatorMap(MLLM_OPENCL, std::make_shared()); +} + +std::vector OpenCLBackend::runLayer(Layer *layer, std::vector inputs, int N) { + throw std::runtime_error("runLayer not implemented for OpenCLBackend"); +} + +std::vector OpenCLBackend::runOp(Op *op, std::vector inputs, std::vector out_names, bool in_place) { + Module *module = inputs.empty() ? Module::llm_model_ptr : inputs[0].module(); + static map> empty_activation_tensors; + map> &activation_tensors = module ? module->activation_tensors : empty_activation_tensors; + if (module && module->doTrace) { + if (module->tracedFlag) { + vector results = {}; + for (auto &name : out_names) results.push_back(*activation_tensors[name]); + return results; + } + for (auto &input : inputs) { + if (input.shouldInGraphs() && activation_tensors.find(input.name()) == activation_tensors.end()) { + activation_tensors[input.name()] = std::make_shared(op->backend()); + activation_tensors[input.name()]->setName(input.name()); + activation_tensors[input.name()]->setModule(module); + } + } + for (const auto &out_name : out_names) { + if (activation_tensors.find(out_name) == activation_tensors.end()) { + activation_tensors[out_name] = std::make_shared(op->backend()); + activation_tensors[out_name]->setName(out_name); + activation_tensors[out_name]->setModule(module); + } + } + vector> inPtrs; + for (auto &input : inputs) { + inPtrs.push_back(input.shouldInGraphs() ? activation_tensors[input.name()] : + std::shared_ptr(&input, [](Tensor *) {})); + } + vector> outPtrs = {}; + for (auto &name : out_names) outPtrs.push_back(activation_tensors[name]); + op->setUp(inPtrs, outPtrs); + vector results = {}; + for (auto &name : out_names) results.push_back(*activation_tensors[name]); + return results; + } + vector> input_tensors; + for (auto &input : inputs) { + input_tensors.push_back(std::shared_ptr(&input, [](Tensor *) {})); + } + vector> out_tensors; + if (!in_place) { + for (const auto &out_name : out_names) { + auto out_tensor = std::make_shared(op->backend()); + out_tensor->setName(out_name); + out_tensors.push_back(out_tensor); + } + } else { + for (size_t i = 0; i < input_tensors.size() && i < out_names.size(); ++i) { + input_tensors[i]->setName(out_names[i]); + out_tensors.push_back(input_tensors[i]); + } + } + op->reshape(input_tensors, out_tensors); + op->setUp(input_tensors, out_tensors); + op->execute(input_tensors, out_tensors); + vector results; + for (const auto &out_tensor : out_tensors) { + results.push_back(*out_tensor); +#ifdef DEBUGSAVETENSOR + out_tensor->cpu(); + if (out_tensor->dtype() == MLLM_TYPE_F32) { + out_tensor->saveData(); + } + if (out_tensor->dtype() == MLLM_TYPE_F16) { + out_tensor->saveData(); + } + out_tensor->cl(); +#endif + } + return results; +} + +std::vector OpenCLBackend::runForward(Module *module, std::vector inputs, std::vector args) { + if (Module::llm_model_ptr && (Module::llm_model_ptr->doLoad || Module::llm_model_ptr->doChangeBn)) { + auto outputs = module->Forward(inputs, args); + return outputs; + } + uint64_t time_start, time_end; + bool ouilter_flag = (inputs[0].ttype() == TensorType::INPUT_TENSOR); + if (ouilter_flag) { + for (int i = 0; i < inputs.size(); i++) { + auto &input = inputs[i]; + input.setModule(module); + input.setTtype(TensorType::NORMAL_TENSOR); + } + Module::llm_model_ptr = module; + if (module->prefilling_token_size_ == 0) { + module->prefilling_token_size_ = inputs[0].sequence() * inputs[0].batch(); + } else if (module->decoding_token_size_ == 0) { + module->decoding_token_size_ = inputs[0].sequence() * inputs[0].batch(); + } + time_start = mllm_time_us(); + // exe_times.clear(); + } + auto output = module->Forward(inputs, args); + if (ouilter_flag) { + this->finishQueue(); + time_end = mllm_time_us(); + double inference_time_ = (time_end - time_start) / 1000.0F; +#ifdef DEBUGOPTIME + this->reportProfilingResult(); + std::cout << "One token total inference time: " << inference_time_ << " ms" << std::endl; +#endif + module->inference_times_.push_back(inference_time_); + } + return output; +} + +#if defined(MLLM_TARGET_ANDROID) +void OpenCLBackend::loadOpenCLSymbols() { + static const std::vector android_paths = { + "libOpenCL.so", + "libGLES_mali.so", + "libmali.so", + "libOpenCL-pixel.so", + "/system/vendor/lib64/libOpenCL.so", + "/system/lib64/libOpenCL.so", + "/system/vendor/lib64/egl/libGLES_mali.so", + "/system/lib64/egl/libGLES_mali.so"}; + + for (const auto &path : android_paths) { + symbols_.handle = dlopen(path.c_str(), RTLD_NOW | RTLD_LOCAL); + if (symbols_.handle) break; + } + + if (!symbols_.handle) { + throw std::runtime_error("Failed to load OpenCL library on Android"); + } + +#define LOAD_FUNC(name) \ + symbols_.name = reinterpret_cast(dlsym(symbols_.handle, #name)); \ + if (!symbols_.name) { \ + std::cerr << "Failed to load: " << #name << std::endl; \ + } + + LOAD_FUNC(clGetPlatformIDs); + LOAD_FUNC(clGetDeviceIDs); + LOAD_FUNC(clGetDeviceInfo); + LOAD_FUNC(clCreateContext); + LOAD_FUNC(clCreateCommandQueue); + LOAD_FUNC(clReleaseCommandQueue); + LOAD_FUNC(clReleaseContext); + LOAD_FUNC(clCreateProgramWithSource); + LOAD_FUNC(clBuildProgram); + LOAD_FUNC(clGetProgramBuildInfo); + LOAD_FUNC(clCreateProgramWithBinary); + LOAD_FUNC(clGetProgramInfo); + LOAD_FUNC(clReleaseProgram); + LOAD_FUNC(clCreateKernel); + LOAD_FUNC(clReleaseKernel); + LOAD_FUNC(clSetKernelArg); + LOAD_FUNC(clEnqueueNDRangeKernel); + LOAD_FUNC(clCreateBuffer); + LOAD_FUNC(clReleaseMemObject); + LOAD_FUNC(clEnqueueWriteBuffer); + LOAD_FUNC(clEnqueueReadBuffer); + LOAD_FUNC(clFinish); + LOAD_FUNC(clCreateSampler); + LOAD_FUNC(clReleaseSampler); + LOAD_FUNC(clCreateImage); + LOAD_FUNC(clEnqueueWriteImage); + LOAD_FUNC(clEnqueueReadImage); + LOAD_FUNC(clEnqueueWriteBufferRect); + LOAD_FUNC(clEnqueueReadBufferRect); + LOAD_FUNC(clReleaseDevice); + LOAD_FUNC(clRetainDevice); + LOAD_FUNC(clCreateCommandQueueWithProperties); + LOAD_FUNC(clRetainCommandQueue); + LOAD_FUNC(clSVMAlloc); + LOAD_FUNC(clSVMFree); + LOAD_FUNC(clEnqueueSVMMap); + LOAD_FUNC(clEnqueueSVMUnmap); + LOAD_FUNC(clSetKernelArgSVMPointer); + LOAD_FUNC(clCreateSubBuffer); + LOAD_FUNC(clEnqueueCopyBuffer); + LOAD_FUNC(clEnqueueCopyBufferToImage); + LOAD_FUNC(clEnqueueCopyBufferRect); + LOAD_FUNC(clWaitForEvents); + LOAD_FUNC(clGetEventProfilingInfo); + LOAD_FUNC(clReleaseEvent); + LOAD_FUNC(clEnqueueCopyImageToBuffer); + LOAD_FUNC(clEnqueueCopyImage); + LOAD_FUNC(clEnqueueCopyBufferRect); + LOAD_FUNC(clGetMemObjectInfo); + LOAD_FUNC(clEnqueueMapBuffer); + LOAD_FUNC(clEnqueueUnmapMemObject); + +#undef LOAD_FUNC +} +#endif + +void OpenCLBackend::addProfilingEvent(const std::string &op_name, cl_event event) { + profiling_events_.push_back(event); + event_op_names_[event] = op_name; +} + +void OpenCLBackend::reportProfilingResult() { + if (profiling_events_.empty()) { + return; + } + + clWaitForEvents(profiling_events_.size(), profiling_events_.data()); + + std::cout << "--- OpenCL Kernel Profiling Report ---" << std::endl; + double total_duration_ms = 0.0; + for (cl_event event : profiling_events_) { + cl_ulong start_time, end_time; + clGetEventProfilingInfo(event, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &start_time, NULL); + clGetEventProfilingInfo(event, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &end_time, NULL); + + double duration_ms = (end_time - start_time) / 1000000.0; + std::cout << "OpenCL Operator [" << event_op_names_[event] << "] took " << duration_ms << " ms" << std::endl; + total_duration_ms += duration_ms; + clReleaseEvent(event); + } + std::cout << "---- Total Duration: " << total_duration_ms << " ms ---" << std::endl; + + // 清空 + profiling_events_.clear(); + event_op_names_.clear(); +} +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/OpenCLBackend.hpp b/mllm/backends/opencl/OpenCLBackend.hpp new file mode 100644 index 000000000..e33c4a277 --- /dev/null +++ b/mllm/backends/opencl/OpenCLBackend.hpp @@ -0,0 +1,129 @@ +#ifndef OPENCL_BACKEND_H +#define OPENCL_BACKEND_H + +#include "Backend.hpp" +#include "OpenCLMemoryManager.hpp" +#include +#include +#include +#include +#include +#ifdef __APPLE__ +#include +#else +#include +#endif + +namespace mllm { +struct DeviceMemory; +class Layer; + +// 【关键修正 1】: 在类定义之前,提供 OpenCLSymbols 的前向声明 +#if defined(MLLM_TARGET_ANDROID) +struct OpenCLSymbols; +#endif + +class OpenCLBackend : public Backend { +public: + class Creator { + public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const = 0; + virtual ~Creator() = default; + }; + + OpenCLBackend(const BackendConfig &config); + ~OpenCLBackend() override; + + cl_context getContext() const { + return context_; + } + cl_device_id getDevice() const { + return device_; + } + cl_command_queue getQueue() const { + return queue_; + } + void finishQueue(); + cl_program getProgram(const std::string &program_name, const std::string &build_options = ""); + + Op *opCreate(const OpParam &op_param, std::string name = "", int threadCount = 4) override; + TensorFunction *funcCreate(TensorFuncType type) override; + void alloc_device(DeviceMemory &mem, DataType dtype) override; + void free_device(DeviceMemory &mem) override; + void copy_from_host(const DeviceMemory &dest, const void *src) override; + void copy_to_host(void *dest, const DeviceMemory &src) override; + std::vector runLayer(Layer *layer, std::vector inputs, int N) override; + std::vector runOp(Op *op, std::vector input, std::vector out_names, bool in_place) override; + std::vector runForward(Module *module, std::vector inputs, std::vector args) override; + void registerOps() override; + void registerFuncs() override; + void convert_fp_data(Tensor *src, Tensor *dest) override; + bool load_from_file(Tensor *tensor, ParamLoader *loader) override; + + cl_mem get_cl_mem(const Tensor &tensor) const; + bool is_image_from_buffer_supported() const { + return image_from_buffer_supported_; + } + cl_uint get_image_pitch_alignment_in_bytes() const { + return image_pitch_alignment_bytes_; + } + // bool &has_fp16_support() { + // return has_fp16_support_; + // } + bool has_fp16_support() const { // 1. 移除引用'&' 2. 增加const关键字 + return has_fp16_support_; + } + void addProfilingEvent(const std::string &op_name, cl_event event); + void reportProfilingResult(); // 新增分析结果的函数 + + size_t getMaxImage2dWidth() const { + return max_image2d_width_; + } + +#if defined(MLLM_TARGET_ANDROID) +public: + // 【关键修正 2】: getSymbols 的声明保持 public,返回一个指向前向声明类型的指针 + static OpenCLSymbols *getSymbols(); + +private: + static void loadOpenCLSymbols(); + static OpenCLSymbols symbols_; +#endif + +private: + static std::shared_ptr createMemoryManager(cl_context &context, cl_device_id &device); + + cl_context context_ = nullptr; + cl_device_id device_ = nullptr; + cl_command_queue queue_ = nullptr; + + std::map program_cache_; + std::map> op_creator_map_; + + bool image_from_buffer_supported_ = false; + cl_uint image_pitch_alignment_bytes_ = 0; + bool has_fp16_support_ = false; + + size_t max_image2d_width_ = 0; + std::string kernel_root_path_; + + cl_kernel kernel_fp32_to_fp16_buffer_ = nullptr; + cl_kernel kernel_fp16_to_fp32_buffer_ = nullptr; + cl_kernel kernel_fp32_to_fp16_image_ = nullptr; + cl_kernel kernel_fp16_to_fp32_image_ = nullptr; + cl_sampler sampler_ = nullptr; + + std::vector profiling_events_; + std::map event_op_names_; +}; + +class OpenCLBackendCreator : public BackendCreator { +public: + Backend *create(BackendConfig config) override { + return new OpenCLBackend(config); + } +}; + +} // namespace mllm + +#endif // OPENCL_BACKEND_H \ No newline at end of file diff --git a/mllm/backends/opencl/OpenCLMemoryManager.cpp b/mllm/backends/opencl/OpenCLMemoryManager.cpp new file mode 100644 index 000000000..d94e0e0d0 --- /dev/null +++ b/mllm/backends/opencl/OpenCLMemoryManager.cpp @@ -0,0 +1,65 @@ + + +#include "OpenCLMemoryManager.hpp" +#include +#include // 用于调试输出 + +namespace mllm { + +OpenCLMemoryManager::OpenCLMemoryManager(cl_context context) : + context_(context) { + assert(context_ != nullptr); +} + +OpenCLMemoryManager::~OpenCLMemoryManager() { + std::lock_guard lock(pool_mutex_); + for (auto const &[size, buffer] : memory_pool_) { + if (buffer) { + clReleaseMemObject(buffer); + } + } + memory_pool_.clear(); +} + +void OpenCLMemoryManager::alloc(void **ptr, size_t size, size_t alignment) { + assert(ptr != nullptr); + assert(size > 0); + + std::lock_guard lock(pool_mutex_); + auto it = memory_pool_.lower_bound(size); + + if (it != memory_pool_.end()) { + // 找到了合适的内存块 + cl_mem buffer = it->second; // 获取内存句柄 + memory_pool_.erase(it); // 从池中移除 + *ptr = buffer; // 将句柄赋给指针 + // std::cout << "[OpenCL Memory Pool] Reused buffer of size " << it->first << " for request of " << size << std::endl; + } else { + // 如果池中没有合适的内存块,则分配新的内存 + cl_int err; + cl_mem buffer = clCreateBuffer(context_, CL_MEM_READ_WRITE, size, nullptr, &err); + check_cl_error(err, "OpenCLMemoryManager::clCreateBuffer (new allocation)"); + *ptr = buffer; + // std::cout << "[OpenCL Memory Pool] Allocated new buffer of size " << size << std::endl; + } +} + +void OpenCLMemoryManager::free(void *ptr) { + if (ptr != nullptr) { + std::lock_guard lock(pool_mutex_); + cl_mem buffer = static_cast(ptr); + + size_t buffer_size = 0; + cl_int err = clGetMemObjectInfo(buffer, CL_MEM_SIZE, sizeof(size_t), &buffer_size, nullptr); + check_cl_error(err, "OpenCLMemoryManager::clGetMemObjectInfo (on free)"); + + if (buffer_size > 0) { + memory_pool_.insert({buffer_size, buffer}); + // std::cout << "[OpenCL Memory Pool] Returned buffer of size " << buffer_size << " to pool." << std::endl; + } else { + clReleaseMemObject(buffer); + } + } +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/OpenCLMemoryManager.hpp b/mllm/backends/opencl/OpenCLMemoryManager.hpp new file mode 100644 index 000000000..b63587afb --- /dev/null +++ b/mllm/backends/opencl/OpenCLMemoryManager.hpp @@ -0,0 +1,38 @@ +#ifndef OPENCL_MEMORY_MANAGER_H +#define OPENCL_MEMORY_MANAGER_H + +#include "MemoryManager.hpp" +#include +#include // 引入map用于内存池 +#include // 引入mutex用于线程安全 + +#ifdef __APPLE__ +#include +#else +#include +#endif + +void check_cl_error(cl_int err, const std::string &operation); + +namespace mllm { + +class OpenCLMemoryManager : public MemoryManager { +public: + explicit OpenCLMemoryManager(cl_context context); + + ~OpenCLMemoryManager() override; + + void alloc(void **ptr, size_t size, size_t alignment) override; + void free(void *ptr) override; + +private: + cl_context context_; // 需要OpenCL上下文来创建缓冲区 + + std::multimap memory_pool_; + + std::mutex pool_mutex_; +}; + +} // namespace mllm + +#endif // OPENCL_MEMORY_MANAGER_H \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/add.cl b/mllm/backends/opencl/kernel/add.cl new file mode 100644 index 000000000..1f46ea6c3 --- /dev/null +++ b/mllm/backends/opencl/kernel/add.cl @@ -0,0 +1,171 @@ +__kernel void add_float( + __global const float *A, + __global const float *B, + __global float *C) { + size_t index = get_global_id(0); + C[index] = A[index] + B[index]; +} + +/* + * 定义一个全局的采样器(sampler)。采样器是用于配置如何从图像对象中读取数据的。 + * CLK_NORMALIZED_COORDS_FALSE: 使用非归一化的整数坐标(像素坐标),而不是[0.0, 1.0]的浮点坐标。 + * CLK_ADDRESS_CLAMP_TO_EDGE: 当读取坐标超出图像边界时,自动返回最接近的边界上的像素值,可有效防止越界读取。 + * CLK_FILTER_NEAREST: 读取最接近坐标的那个像素,不做任何插值,这对于数据计算是必须的。 + */ +// const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST; +/** + * @brief 使用 image2d_t 对两个 float 类型的张量进行高效的元素级相加。 + * @param sampler 用于读取图像的采样器对象。 + * @param inputA 输入张量 A,作为只读的 2D 图像对象。 + * @param inputB 输入张量 B,作为只读的 2D 图像对象。 + * @param output 输出张量 C,作为只写的 2D 图像对象。 + * @param width 图像的逻辑宽度(单位:像素)。 + * @param height 图像的逻辑高度(单位:像素)。 + */ +__kernel void add_float_image2d( + sampler_t sampler, // 采样器现在是第一个参数 + __read_only image2d_t inputA, + __read_only image2d_t inputB, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + + if (pos.x >= width || pos.y >= height) { + return; + } + + float4 inA = read_imagef(inputA, sampler, pos); + float4 inB = read_imagef(inputB, sampler, pos); + float4 result = inA + inB; + write_imagef(output, pos, result); +} + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +/** + * @brief [FP16 Buffer版] 使用向量指令对两个 half 类型的张量进行高效的元素级相加。 + * 内核利用 vload4/vstore4 一次处理4个 half 元素。 + * @param A 输入张量 A (__global const half*) + * @param B 输入张量 B (__global const half*) + * @param C 输出张量 C (__global half*) + */ +__kernel void add_fp16_vector( + __global const half *A, + __global const half *B, + __global half *C) { + const int i = get_global_id(0); + + // 高效地加载 4 个 half (共64位) 数据 + half4 a_vec = vload4(i, A); + half4 b_vec = vload4(i, B); + + // 向量加法 + half4 c_vec = a_vec + b_vec; + + // 高效地写回 4 个 half 数据 + vstore4(c_vec, i, C); +} + +/** + * @brief [FP16 Image版] 使用 image2d_t 对两个 half 类型的张量进行高效的元素级相加。 + * 利用硬件纹理缓存和 read_imageh/write_imageh 函数。 + * @param sampler 用于读取图像的采样器对象。 + * @param inputA 输入张量 A,作为只读的 2D 图像对象 (数据类型为 half)。 + * @param inputB 输入张量 B,作为只读的 2D 图像对象 (数据类型为 half)。 + * @param output 输出张量 C,作为只写的 2D 图像对象 (数据类型为 half)。 + * @param width 图像的逻辑宽度(单位:像素)。 + * @param height 图像的逻辑高度(单位:像素)。 + */ +__kernel void add_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + __read_only image2d_t inputB, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + + if (pos.x >= width || pos.y >= height) { + return; + } + + // 使用 read_imageh 读取 half4 向量 + half4 inA = read_imageh(inputA, sampler, pos); + half4 inB = read_imageh(inputB, sampler, pos); + + half4 result = inA + inB; + + // 使用 write_imageh 写回 half4 向量 + write_imageh(output, pos, result); +} + +// ================================================================== +// 4. Tensor + Scalar 内核 +// ================================================================== + +/** + * @brief [FP32 Buffer版] 将一个标量 `B` 加到张量 `A` 的每个元素上。 + */ +__kernel void add_scalar_float( + __global const float *A, + const float B, + __global float *C) { + size_t index = get_global_id(0); + C[index] = A[index] + B; +} + +/** + * @brief [FP16 Buffer版] 使用向量指令将一个标量 `B` 加到张量 `A` 的每个元素上。 + */ +__kernel void add_scalar_fp16_vector( + __global const half *A, + const half B, + __global half *C) { + const int i = get_global_id(0); + half4 a_vec = vload4(i, A); + // 将标量 B 广播成一个 half4 向量 + half4 b_vec = (half4)(B); + half4 c_vec = a_vec + b_vec; + vstore4(c_vec, i, C); +} + +/** + * @brief [FP32 Image版] 将一个标量 `B` 加到张量 `A` 的每个像素上。 + */ +__kernel void add_scalar_float_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + const float B, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + + float4 inA = read_imagef(inputA, sampler, pos); + // 将标量 B 广播成一个 float4 向量 + float4 inB = (float4)(B); + float4 result = inA + inB; + write_imagef(output, pos, result); +} + +/** + * @brief [FP16 Image版] 将一个标量 `B` 加到张量 `A` 的每个像素上。 + */ +__kernel void add_scalar_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + const half B, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + + half4 inA = read_imageh(inputA, sampler, pos); + // 将标量 B 广播成一个 half4 向量 + half4 inB = (half4)(B); + half4 result = inA + inB; + write_imageh(output, pos, result); +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/argsort.cl b/mllm/backends/opencl/kernel/argsort.cl new file mode 100644 index 000000000..ca0451afc --- /dev/null +++ b/mllm/backends/opencl/kernel/argsort.cl @@ -0,0 +1,256 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// ============================================================================ +// 通用内核 (FP32 和 索引初始化) +// ============================================================================ + +/** + * @brief 初始化索引数组,值为 0, 1, 2, ..., N-1。 + */ +__kernel void init_indices( + __global int *indices, + const int N) { + const int gid = get_global_id(0); + const int local_idx = gid % N; + indices[gid] = local_idx; +} + +/** + * @brief Bitonic Sort 的一个步骤 (FP32 版本)。 + */ +__kernel void bitonic_argsort_step_fp32( + __global float *values, + __global int *indices, + const int N, + const int stage, + const int pass, + const int descending) { + const int i = get_global_id(0); + const int row = get_global_id(1); + int k = 1 << pass; + int j = i & (k - 1); + int ix = (i << 1) - j; + int iy = ix + k; + int sort_increasing = ((ix >> (stage + 1)) & 1) == 0; + if (descending) { + sort_increasing = !sort_increasing; + } + + if (ix < N) { // 仅当比较对的第一个元素在界内时才处理 + const int row_offset = row * N; + const int global_ix = row_offset + ix; + const int global_iy = row_offset + iy; + + float val1 = values[global_ix]; + // 如果 iy 越界, 根据排序方向,将其值视作 INFINITY (无穷大) 或 -INFINITY (负无穷大) + float val2 = (iy < N) ? values[global_iy] : (descending ? -INFINITY : INFINITY); + + if ((val1 > val2) == sort_increasing) { + // 当需要交换时,只对真实存在的元素进行写操作 + int index_from_ix = indices[global_ix]; + int index_from_iy = (iy < N) ? indices[global_iy] : -1; // 用-1作为无效索引哨兵 + + values[global_ix] = val2; + indices[global_ix] = index_from_iy; + + // 只有当 iy 也在界内时,才把 val1 的值和原始索引写入 iy 位置 + if (iy < N) { + values[global_iy] = val1; + indices[global_iy] = index_from_ix; + } + } + } +} + +/** + * @brief 将排序后的 int 类型索引转换为 float 类型并写入输出。 + */ +__kernel void cast_indices_to_fp32( + __global const int *sorted_indices, + __global float *output) { + const int gid = get_global_id(0); + output[gid] = (float)sorted_indices[gid]; +} + +// ============================================================================ +// FP16 内核 (根据硬件支持情况进行条件编译) +// ============================================================================ + +#if defined(SUPPORTS_FP16) + +// =================== 方案A: 设备支持原生 FP16 ===================== + +/** + * @brief Bitonic Sort 的一个步骤 (原生FP16版本)。 + */ +__kernel void bitonic_argsort_step_fp16( + __global half *values, + __global int *indices, + const int N, + const int stage, + const int pass, + const int descending) { + const int i = get_global_id(0); + const int row = get_global_id(1); + int k = 1 << pass; + int j = i & (k - 1); + int ix = (i << 1) - j; + int iy = ix + k; + int sort_increasing = ((ix >> (stage + 1)) & 1) == 0; + if (descending) { + sort_increasing = !sort_increasing; + } + + if (ix < N) { + const int row_offset = row * N; + const int global_ix = row_offset + ix; + const int global_iy = row_offset + iy; + + half val1 = values[global_ix]; + + // ====================== ✨✨✨ 核心修正区域 as_half ✨✨✨ ====================== + // 修正: 显式地将整型字面量转换为 ushort (16-bit) 来解决 as_half 的歧义 + half infinity_h = as_half((ushort)0x7C00); + half neg_infinity_h = as_half((ushort)0xFC00); + // ========================================================================== + + half val2 = (iy < N) ? values[global_iy] : (descending ? neg_infinity_h : infinity_h); + + if ((val1 > val2) == sort_increasing) { + int index_from_ix = indices[global_ix]; + int index_from_iy = (iy < N) ? indices[global_iy] : -1; + + values[global_ix] = val2; + indices[global_ix] = index_from_iy; + + if (iy < N) { + values[global_iy] = val1; + indices[global_iy] = index_from_ix; + } + } + } +} + +/** + * @brief 将排序后的 int 类型索引转换为 half 类型并写入输出。 + */ +__kernel void cast_indices_to_fp16( + __global const int *sorted_indices, + __global half *output) { + const int gid = get_global_id(0); + output[gid] = (half)sorted_indices[gid]; +} + +#else + +// =================== 方案B: 软件模拟 FP16 (兼容版) ===================== +// ✨✨✨ 核心修正区域:严格仿照您的榜样代码 ✨✨✨ + +// 辅助函数: 将 ushort (存储着 half 的二进制位) 转换为 float +// (该函数仿照您项目中的 kernel/convert_fp.cl 和 scatter_add.cl) +static float ushort_to_float(ushort u) { + uint sign = (u >> 15) & 1; + uint exponent = (u >> 10) & 0x1F; + uint mantissa = u & 0x3FF; + uint result_uint; + if (exponent == 0) { + if (mantissa == 0) { + result_uint = sign << 31; + } else { + exponent = 1; + while ((mantissa & 0x400) == 0) { + mantissa <<= 1; + exponent++; + } + mantissa &= 0x3FF; + exponent = 127 - 15 - exponent + 1; + result_uint = (sign << 31) | (exponent << 23) | (mantissa << 13); + } + } else if (exponent == 0x1F) { + result_uint = (sign << 31) | (0xFF << 23) | (mantissa << 13); + } else { + exponent = exponent - 15 + 127; + result_uint = (sign << 31) | (exponent << 23) | (mantissa << 13); + } + return as_float(result_uint); +} + +// 辅助函数: 将 float 转换为 ushort +static ushort float_to_ushort(float f) { + uint u = as_uint(f); + uint sign = (u >> 16) & 0x8000; + int exponent = ((u >> 23) & 0xFF) - 127; + uint mantissa = u & 0x7FFFFF; + if (exponent > 15) { + return sign | 0x7C00; + } // Infinity + if (exponent < -14) { + mantissa = (mantissa | 0x800000) >> (1 - exponent); + return sign | (mantissa >> 13); + } + return sign | ((exponent + 15) << 10) | (mantissa >> 13); +} + +/** + * @brief Bitonic Sort 的一个步骤 (兼容版FP16)。 + * 使用与原生版相同的名称,但输入输出为 ushort*。 + */ +__kernel void bitonic_argsort_step_fp16( + __global ushort *values, // 数据以 ushort 形式存储 + __global int *indices, + const int N, + const int stage, + const int pass, + const int descending) { + const int i = get_global_id(0); + const int row = get_global_id(1); + int k = 1 << pass; + int j = i & (k - 1); + int ix = (i << 1) - j; + int iy = ix + k; + int sort_increasing = ((ix >> (stage + 1)) & 1) == 0; + if (descending) { + sort_increasing = !sort_increasing; + } + + if (ix < N) { + const int row_offset = row * N; + const int global_ix = row_offset + ix; + const int global_iy = row_offset + iy; + + // 核心: 读出 ushort, 转换为 float 进行比较 + float val1 = ushort_to_float(values[global_ix]); + float val2 = (iy < N) ? ushort_to_float(values[global_iy]) : (descending ? -INFINITY : INFINITY); + + if ((val1 > val2) == sort_increasing) { + // 交换时,直接交换原始的 ushort 值 + ushort val_from_ix = values[global_ix]; + // 如果 iy 越界,需要一个代表无穷大的 ushort 值 + ushort val_from_iy = (iy < N) ? values[global_iy] : float_to_ushort(val2); + values[global_ix] = val_from_iy; + + // 交换索引 + int temp_idx_from_ix = indices[global_ix]; + int temp_idx_from_iy = (iy < N) ? indices[global_iy] : -1; + indices[global_ix] = temp_idx_from_iy; + + if (iy < N) { + values[global_iy] = val_from_ix; + indices[global_iy] = temp_idx_from_ix; + } + } + } +} + +/** + * @brief 将排序后的 int 类型索引转换为 ushort 类型并写入输出 (兼容版)。 + */ +__kernel void cast_indices_to_fp16( + __global const int *sorted_indices, + __global ushort *output) { // 输出是 ushort* + const int gid = get_global_id(0); + // 先将 int 转为 float, 再将 float 的二进制位表示转为 ushort + output[gid] = float_to_ushort((float)sorted_indices[gid]); +} + +#endif // SUPPORTS_FP16 \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/bincount.cl b/mllm/backends/opencl/kernel/bincount.cl new file mode 100644 index 000000000..8aa74430b --- /dev/null +++ b/mllm/backends/opencl/kernel/bincount.cl @@ -0,0 +1,107 @@ +// opencl/kernel/bincount.cl + +#if defined(SUPPORTS_FP16) +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#endif + +// ===================== FP16 (ushort) <-> FP32 (float) 转换辅助函数 ===================== +// 只有在不支持原生 FP16 时才需要 +#if !defined(SUPPORTS_FP16) +static float ushort_to_float(ushort u) { + uint sign = (u >> 15) & 1; + uint exponent = (u >> 10) & 0x1F; + uint mantissa = u & 0x3FF; + if (exponent == 0x1F) { return as_float((sign << 31) | (0xFF << 23) | (mantissa << 13)); } + if (exponent == 0) { + if (mantissa == 0) { return as_float(sign << 31); } + exponent = 1; + while ((mantissa & 0x400) == 0) { + mantissa <<= 1; + exponent++; + } + mantissa &= 0x3FF; + exponent = 127 - 15 - exponent + 1; + return as_float((sign << 31) | (exponent << 23) | (mantissa << 13)); + } + return as_float((sign << 31) | ((exponent - 15 + 127) << 23) | (mantissa << 13)); +} + +static ushort float_to_ushort(float f) { + uint u = as_uint(f); + uint sign = (u >> 16) & 0x8000; + int exponent = ((u >> 23) & 0xFF) - 127; + uint mantissa = u & 0x7FFFFF; + if (exponent > 15) { return sign | 0x7C00; } + if (exponent < -14) { + mantissa = (mantissa | 0x800000) >> (1 - exponent); + return sign | (mantissa >> 13); + } + return sign | ((exponent + 15) << 10) | (mantissa >> 13); +} +#endif + +// ========================== 内核 1: 执行 Bincount 到整数缓冲区 ========================== +__kernel void bincount_count( +#if defined(SUPPORTS_FP16) + __global const half *input, +#else + __global const ushort *input, +#endif + __global int *out_counts, + const int size, + const int max_val) { + + for (int i = get_global_id(0); i < size; i += get_global_size(0)) { + float val; +#if defined(SUPPORTS_FP16) + val = input[i]; +#else + val = ushort_to_float(input[i]); +#endif + int index = (int)val; + if (index >= 0 && index <= max_val) { + atomic_add(&out_counts[index], 1); + } + } +} + +// Float32 版本的 bincount 内核 +__kernel void bincount_count_fp32( + __global const float *input, + __global int *out_counts, + const int size, + const int max_val) { + for (int i = get_global_id(0); i < size; i += get_global_size(0)) { + int index = (int)input[i]; + if (index >= 0 && index <= max_val) { + atomic_add(&out_counts[index], 1); + } + } +} + +// ========================== 内核 2: 将整数计数转换为 FP32/FP16 ========================== +__kernel void cast_int_to_float( + __global const int *int_buffer, + __global float *float_buffer, + const int count) { + for (int i = get_global_id(0); i < count; i += get_global_size(0)) { + float_buffer[i] = (float)int_buffer[i]; + } +} + +__kernel void cast_int_to_half( + __global const int *int_buffer, +#if defined(SUPPORTS_FP16) + __global half *half_buffer, +#else + __global ushort *half_buffer, +#endif + const int count) { + for (int i = get_global_id(0); i < count; i += get_global_size(0)) { +#if defined(SUPPORTS_FP16) + half_buffer[i] = (half)int_buffer[i]; +#else + half_buffer[i] = float_to_ushort((float)int_buffer[i]); +#endif + } +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/cliptensor.cl b/mllm/backends/opencl/kernel/cliptensor.cl new file mode 100644 index 000000000..7a83fced0 --- /dev/null +++ b/mllm/backends/opencl/kernel/cliptensor.cl @@ -0,0 +1,102 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// Kernel for clipping along the SEQUENCE dimension +__kernel void clip_sequence_fp32( + __global const float *src_data, + __global const float *indices, + __global float *dst_data, + const int B, const int H, const int S_in, const int D, const int S_out) { + // Each work-item handles one element in the H*D plane of the output + const int d = get_global_id(0); + const int h = get_global_id(1); + const int bs = get_global_id(2); // Combined batch and output sequence index + + const int b = bs / S_out; + const int s_out = bs % S_out; + + if (d >= D || h >= H || b >= B) { + return; + } + + // Get the source sequence index from the indices tensor + const int s_in = (int)indices[s_out]; + + // Assuming BSHD layout for both input and output + // src_offset for [b, s_in, h, d] + size_t src_offset = ((size_t)b * S_in + s_in) * H * D + (size_t)h * D + d; + // dst_offset for [b, s_out, h, d] + size_t dst_offset = ((size_t)b * S_out + s_out) * H * D + (size_t)h * D + d; + + dst_data[dst_offset] = src_data[src_offset]; +} + +// Kernel for clipping along the DIMENSION dimension +__kernel void clip_dimension_fp32( + __global const float *src_data, + __global const float *indices, + __global float *dst_data, + const int B, const int H, const int S, const int D_in, const int D_out) { + // Each work-item handles one element in the output tensor + const int d_out = get_global_id(0); + const int s = get_global_id(1); + const int bh = get_global_id(2); + const int b = bh / H; + const int h = bh % H; + + if (d_out >= D_out || s >= S || b >= B) { + return; + } + + // Get the source dimension index from the indices tensor + const int d_in = (int)indices[d_out]; + + // Assuming BSHD layout for both input and output + // src_offset for [b, s, h, d_in] + size_t src_offset = ((size_t)b * S + s) * H * D_in + (size_t)h * D_in + d_in; + // dst_offset for [b, s, h, d_out] + size_t dst_offset = ((size_t)b * S + s) * H * D_out + (size_t)h * D_out + d_out; + + dst_data[dst_offset] = src_data[src_offset]; +} + +// ========================== FP16 Versions ============================== + +__kernel void clip_sequence_fp16( + __global const half *src_data, + __global const half *indices, + __global half *dst_data, + const int B, const int H, const int S_in, const int D, const int S_out) { + const int d = get_global_id(0); + const int h = get_global_id(1); + const int bs = get_global_id(2); + const int b = bs / S_out; + const int s_out = bs % S_out; + + if (d >= D || h >= H || b >= B) { + return; + } + const int s_in = (int)indices[s_out]; + size_t src_offset = ((size_t)b * S_in + s_in) * H * D + (size_t)h * D + d; + size_t dst_offset = ((size_t)b * S_out + s_out) * H * D + (size_t)h * D + d; + dst_data[dst_offset] = src_data[src_offset]; +} + +__kernel void clip_dimension_fp16( + __global const half *src_data, + __global const half *indices, + __global half *dst_data, + const int B, const int H, const int S, const int D_in, const int D_out) { + const int d_out = get_global_id(0); + const int s = get_global_id(1); + const int bh = get_global_id(2); + const int b = bh / H; + const int h = bh % H; + + if (d_out >= D_out || s >= S || b >= B) { + return; + } + const int d_in = (int)indices[d_out]; + size_t src_offset = ((size_t)b * S + s) * H * D_in + (size_t)h * D_in + d_in; + size_t dst_offset = ((size_t)b * S + s) * H * D_out + (size_t)h * D_out + d_out; + dst_data[dst_offset] = src_data[src_offset]; +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/convert_fp.cl b/mllm/backends/opencl/kernel/convert_fp.cl new file mode 100644 index 000000000..ed3d7db09 --- /dev/null +++ b/mllm/backends/opencl/kernel/convert_fp.cl @@ -0,0 +1,160 @@ +// =================================================================== +// **部分一:兼容模式内核(适用于无原生FP16支持的硬件)** +// =================================================================== + +// 将一个32位float稳健地转换为16位ushort (模拟FP16) +ushort float_to_half_bits(const float f) { + // 将 float 的位模式存储在 uint 中 + const uint u = as_uint(f); + // 1. 正确提取各部分 + // 提取符号位 (bit 31) + const uint sign_bit = u & 0x80000000; + // 提取指数位 (bits 30-23) + int exponent = (int)((u >> 23) & 0xff) - 127; // FP32 bias is 127 + // 提取尾数位 (bits 22-0) + uint mantissa = u & 0x007fffff; + // 将32位符号位移动到16位的位置 + const ushort half_sign = (ushort)(sign_bit >> 16); + // 2. 处理特殊值:NaN 和 Infinity + if (exponent == 128) { // FP32 exponent is all 1s + if (mantissa == 0) { + // FP32 is Infinity, convert to FP16 Infinity + return half_sign | 0x7c00; + } else { + // FP32 is NaN, convert to FP16 NaN + // 保留尾数最高位以标识为NaN + return half_sign | 0x7c00 | (ushort)(mantissa >> 13); + } + } + // 3. 处理上溢到 Infinity + if (exponent > 15) { // Exponent too large for FP16 normal + return half_sign | 0x7c00; // Overflow -> Infinity + } + // 4. 处理正规数 (Normal Numbers) + if (exponent >= -14) { + // 将FP32指数转换为FP16指数 (FP16 bias is 15) + ushort half_exponent = (ushort)(exponent + 15); + // 添加被截断部分的最高位,以实现简单的“四舍五入” + mantissa = mantissa + (1 << 12); + // 如果舍入导致尾数上溢,需要调整指数 + if (mantissa & 0x00800000) { + mantissa = 0; + half_exponent++; + if (half_exponent == 31) { // 溢出到 Infinity + return half_sign | 0x7c00; + } + } + ushort half_mantissa = (ushort)(mantissa >> 13); + return half_sign | (half_exponent << 10) | half_mantissa; + } + // 5. 处理非正规数 (Denormalized Numbers) + if (exponent >= -24) { + // 加上隐藏位,然后根据指数进行移位 + mantissa = (mantissa | 0x00800000) >> (14 - exponent); + // 同样可以增加舍入逻辑 + mantissa += 1; + return half_sign | (ushort)(mantissa >> 1); + } + // 6. 处理下溢到 0 + return half_sign; // Underflow -> Zero +} + +// [已优化] 将一个16位ushort (模拟FP16) 转换回32位float +float half_bits_to_float(const ushort h) { + const uint sign = (uint)(h & 0x8000) << 16; + uint exponent = (h & 0x7c00) >> 10; + uint mantissa = h & 0x03ff; + + if (exponent == 0x1f) { // Infinity or NaN + // 直接构造对应的FP32 Infinity/NaN + return as_float(sign | 0x7f800000 | (mantissa << 13)); + } + + if (exponent == 0) { // Zero or Denormal + if (mantissa == 0) { // Zero + return as_float(sign); + } + // Denormalized: 找到隐藏的 '1' + while ((mantissa & 0x0400) == 0) { + mantissa <<= 1; + exponent--; + } + exponent++; // 补偿循环多减的一次 + mantissa &= 0x03ff; // 移除隐藏的 '1' + } + + // 转换为FP32的指数和尾数 + exponent = exponent + (127 - 15); + mantissa = mantissa << 13; + + return as_float(sign | (exponent << 23) | mantissa); +} + +__kernel void convert_fp32_to_fp16_buffer_compat( // 兼容版内核 + __global const float *input, + __global ushort *output, + const int count) { + int i = get_global_id(0); + if (i < count) output[i] = float_to_half_bits(input[i]); +} + +__kernel void convert_fp16_to_fp32_buffer_compat( // 兼容版内核 + __global const ushort *input, + __global float *output, + const int count) { + int i = get_global_id(0); + if (i < count) output[i] = half_bits_to_float(input[i]); +} + +// =================================================================== +// **部分二:高性能内核(需要硬件原生支持FP16)** +// =================================================================== +#ifdef SUPPORTS_FP16 + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// --- Buffer Kernels (高性能版) --- +__kernel void convert_fp32_to_fp16_buffer_ext( + __global const float *input, + __global half *output, + const int count) { + int i = get_global_id(0); + if (i < count) output[i] = convert_half(input[i]); +} + +__kernel void convert_fp16_to_fp32_buffer_ext( + __global const half *input, + __global float *output, + const int count) { + int i = get_global_id(0); + if (i < count) output[i] = vload_half(i, input); +} + +// --- Image Kernels (高性能版) --- +__kernel void convert_fp32_to_fp16_image2d( + sampler_t sampler, + __read_only image2d_t input_fp32, + __write_only image2d_t output_fp16, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) return; + float4 data_fp32 = read_imagef(input_fp32, sampler, pos); + half4 data_fp16 = convert_half4(data_fp32); + write_imageh(output_fp16, pos, data_fp16); +} + +__kernel void convert_fp16_to_fp32_image2d( + sampler_t sampler, + __read_only image2d_t input_fp16, + __write_only image2d_t output_fp32, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) return; + half4 data_fp16 = read_imageh(input_fp16, sampler, pos); + float4 data_fp32 = convert_float4(data_fp16); + write_imagef(output_fp32, pos, data_fp32); +} + +#endif // SUPPORTS_FP16 \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/div.cl b/mllm/backends/opencl/kernel/div.cl new file mode 100644 index 000000000..708bae408 --- /dev/null +++ b/mllm/backends/opencl/kernel/div.cl @@ -0,0 +1,362 @@ +// 文件名: kernel/div.cl + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +// ================================================================== +// 1. Tensor / Tensor Division Kernels (FP32) +// ================================================================== + +__kernel void div_float( + __global const float *A, + __global const float *B, + __global float *C, + const int b_dim, + const int a_dim) { + size_t index = get_global_id(0); + float b_val; + + // If b_dim is 1 and a_dim is greater than 1, apply broadcasting + if (b_dim == 1 && a_dim > 1) { + // Correct Broadcasting Logic: + // 1. Get the current d coordinate from the global index of A + int d_coord = index % a_dim; + // 2. Get the BSH part of the index for A + size_t a_bsh_index = index / a_dim; + // 3. Since B's dimension is 1, its BSH index is the same as its global index + size_t b_index = a_bsh_index; + // This is the correct index for B + b_val = B[b_index]; + } else { + // Original element-wise division + b_val = B[index]; + } + + // 添加保护防止除以零 + C[index] = b_val == 0.0f ? 0.0f : A[index] / b_val; +} + +__kernel void div_float_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + __read_only image2d_t inputB, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + float4 inA = read_imagef(inputA, sampler, pos); + float4 inB = read_imagef(inputB, sampler, pos); + // 添加保护防止除以零 + float4 result = (inB.x == 0.0f && inB.y == 0.0f && inB.z == 0.0f && inB.w == 0.0f) ? + (float4)(0.0f) : + inA / inB; + write_imagef(output, pos, result); +} + +// ================================================================== +// 2. Tensor / Scalar Division Kernels (FP32) +// ================================================================== + +__kernel void div_scalar_float( + __global const float *A, + const float B, + __global float *C) { + size_t index = get_global_id(0); + C[index] = B == 0.0f ? 0.0f : A[index] / B; +} + +__kernel void div_scalar_float_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + const float B, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + float4 inA = read_imagef(inputA, sampler, pos); + float4 inB = (float4)(B); + float4 result = (B == 0.0f) ? (float4)(0.0f) : inA / inB; + write_imagef(output, pos, result); +} + +// ================================================================== +// FP16 Kernels Implementations with Preprocessor Guards +// ================================================================== + +#ifdef SUPPORTS_FP16 + +__kernel void div_fp16_vector( + __global const half *A, + __global const half *B, + __global half *C, + const int b_dim, + const int a_dim) { + const int i = get_global_id(0); + // If b_dim is 1 and a_dim is greater than 1, apply broadcasting + if (b_dim == 1 && a_dim > 1) { + // Broadcasting case with correct indexing + const int start_idx_A = i * 4; + for (int j = 0; j < 4; ++j) { + int current_idx_A = start_idx_A + j; + // Correct Broadcasting Logic: + size_t a_bsh_index = current_idx_A / a_dim; + size_t b_index = a_bsh_index; // This is the correct index for B + + half b_val = B[b_index]; + half a_val = A[current_idx_A]; + C[current_idx_A] = (b_val == (half)0.0h) ? (half)0.0h : a_val / b_val; + } + } else { + // Original element-wise vectorized division + half4 a_vec = vload4(i, A); + half4 b_vec = vload4(i, B); + // 添加保护防止除以零 (转换为 float 进行比较) + half4 c_vec = (all(convert_float4(b_vec) == 0.0f)) ? + (half4)(0.0h) : + a_vec / b_vec; + vstore4(c_vec, i, C); + } +} + +// 新增的标量内核,用于处理任意尺寸的张量 +__kernel void div_fp16_scalar( + __global const half *A, + __global const half *B, + __global half *C, + const int b_dim, + const int a_dim) { + size_t index = get_global_id(0); + half b_val; + if (b_dim == 1 && a_dim > 1) { + size_t a_bsh_index = index / a_dim; + size_t b_index = a_bsh_index; + b_val = B[b_index]; + } else { + b_val = B[index]; + } + C[index] = (b_val == (half)0.0h) ? (half)0.0h : A[index] / b_val; +} + +__kernel void div_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + __read_only image2d_t inputB, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + half4 inA = read_imageh(inputA, sampler, pos); + half4 inB = read_imageh(inputB, sampler, pos); + half4 result = (all(convert_float4(inB) == 0.0f)) ? (half4)(0.0h) : inA / inB; + write_imageh(output, pos, result); +} + +__kernel void div_scalar_fp16_vector( + __global const half *A, + const float B, + __global half *C) { + const int i = get_global_id(0); + float4 a_vec_f = convert_float4(vload4(i, A)); + + // B 已经是 float,无需转换 + float4 c_vec_f = a_vec_f / B; + vstore4(convert_half4_rte(c_vec_f), i, C); +} + +__kernel void div_scalar_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + const float B, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { + return; + } + + // 1. 读取 half4 数据并立即提升到 float4 + float4 inA_f = convert_float4(read_imageh(inputA, sampler, pos)); + // 2. 在 float 精度下进行计算 + float4 result_f = inA_f / B; + // 3. 将结果转换回 half4 并写入 + write_imageh(output, pos, convert_half4_rte(result_f)); +} + +#else // !SUPPORTS_FP16 + +// ===================== B. FP16实现 (软件回退) ===================== +// 当硬件不支持时, 不使用'half'类型. +// 我们用'ushort'来存储16位数据, 并手动转换到'float'进行计算. +inline float half_to_float(ushort h) { + const uint s = (h >> 15) & 0x0001; + const uint e = (h >> 10) & 0x001f; + const uint f = h & 0x03ff; + uint float_val; + if (e == 0) { + if (f == 0) { // +0 or -0 + float_val = s << 31; + } else { // Denormalized number to normalized float + uint f_shifted = f; + uint e_shifted = e; + while ((f_shifted & 0x0400) == 0) { + f_shifted <<= 1; + e_shifted--; + } + e_shifted++; + f_shifted &= ~0x0400; + float_val = (s << 31) | ((e_shifted + 112) << 23) | (f_shifted << 13); + } + } else if (e == 31) { // Inf or NaN + if (f == 0) { // +/- Infinity + float_val = (s << 31) | 0x7f800000; + } else { // NaN + float_val = (s << 31) | 0x7f800000 | (f << 13); + } + } else { // Normalized number + float_val = (s << 31) | ((e + 112) << 23) | (f << 13); + } + + return as_float(float_val); +} + +// 帮助函数: 将 float 转换为 ushort (存储为half) +inline ushort float_to_half(float f) { + uint u = as_uint(f); + uint s = (u >> 16) & 0x8000; + int e = ((u >> 23) & 0xFF) - 127; + uint f_mant = u & 0x7FFFFF; + + if (e > 15) return (ushort)(s | 0x7C00); + if (e < -14) { + f_mant |= 0x800000; + return (ushort)(s | (f_mant >> (-e - 14))); + } + return (ushort)(s | ((e + 15) << 10) | (f_mant >> 13)); +} + +__kernel void div_fp16_vector( + __global const ushort *A, + __global const ushort *B, + __global ushort *C, + const int b_dim, + const int a_dim) { + const int i = get_global_id(0); + // If b_dim is 1 and a_dim is greater than 1, apply broadcasting + if (b_dim == 1 && a_dim > 1) { + const int start_idx_A = i * 4; + for (int j = 0; j < 4; ++j) { + int current_idx_A = start_idx_A + j; + // Correct Broadcasting Logic: + size_t a_bsh_index = current_idx_A / a_dim; + size_t b_index = a_bsh_index; + + float a_val = half_to_float(A[current_idx_A]); + float b_val = half_to_float(B[b_index]); + + float result = b_val == 0.0f ? + 0.0f : + a_val / b_val; + C[current_idx_A] = float_to_half(result); + } + } else { + // Original element-wise division for software fallback + const int start_idx = i * 4; + float4 a_vec = (float4)(half_to_float(A[start_idx]), half_to_float(A[start_idx + 1]), half_to_float(A[start_idx + 2]), half_to_float(A[start_idx + 3])); + float4 b_vec = (float4)(half_to_float(B[start_idx]), half_to_float(B[start_idx + 1]), half_to_float(B[start_idx + 2]), half_to_float(B[start_idx + 3])); + + float4 c_vec; + c_vec.x = b_vec.x == 0.0f ? 0.0f : a_vec.x / b_vec.x; + c_vec.y = b_vec.y == 0.0f ? + 0.0f : + a_vec.y / b_vec.y; + c_vec.z = b_vec.z == 0.0f ? 0.0f : a_vec.z / b_vec.z; + c_vec.w = b_vec.w == 0.0f ? 0.0f : a_vec.w / b_vec.w; + + C[start_idx] = float_to_half(c_vec.x); + C[start_idx + 1] = float_to_half(c_vec.y); + C[start_idx + 2] = float_to_half(c_vec.z); + C[start_idx + 3] = float_to_half(c_vec.w); + } +} + +// 新增的标量内核的软件回退实现 +__kernel void div_fp16_scalar( + __global const ushort *A, + __global const ushort *B, + __global ushort *C, + const int b_dim, + const int a_dim) { + size_t index = get_global_id(0); + float b_val; + + if (b_dim == 1 && a_dim > 1) { + size_t a_bsh_index = index / a_dim; + size_t b_index = a_bsh_index; + b_val = half_to_float(B[b_index]); + } else { + b_val = half_to_float(B[index]); + } + + float a_val = half_to_float(A[index]); + float result = b_val == 0.0f ? 0.0f : a_val / b_val; + C[index] = float_to_half(result); +} + +__kernel void div_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + __read_only image2d_t inputB, + __write_only image2d_t output, + const int width, + const int height) { + // 这是一个存根(stub)实现, 因为不支持cl_khr_fp16的平台 + // 通常也不支持CL_HALF_FLOAT图像格式. + // 主机代码中该路径已通过&& false禁用. + // 仅用于保证内核能被创建. + return; +} + +__kernel void div_scalar_fp16_vector( + __global const ushort *A, + const float B, + __global ushort *C) { + // 每个工作项依然负责4个元素,但我们将逐个处理它们 + const int i = get_global_id(0) * 4; + // 临时存储4个float类型的结果 + float results[4]; + + // 核心安全检查 + if (B == 0.0f) { + results[0] = 0.0f; + results[1] = 0.0f; + results[2] = 0.0f; + results[3] = 0.0f; + } else { + // 【关键改动】像 flash_attention.cl 一样,逐个加载、转换、计算 + results[0] = half_to_float(A[i + 0]) / B; + results[1] = half_to_float(A[i + 1]) / B; + results[2] = half_to_float(A[i + 2]) / B; + results[3] = half_to_float(A[i + 3]) / B; + } + + // 逐个转换回 half 并存储 + C[i + 0] = float_to_half(results[0]); + C[i + 1] = float_to_half(results[1]); + C[i + 2] = float_to_half(results[2]); + C[i + 3] = float_to_half(results[3]); +} + +__kernel void div_scalar_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + const ushort B, + __write_only image2d_t output, + const int width, + const int height) { + // 存根(stub)实现. + return; +} + +#endif // SUPPORTS_FP16 \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/div_int.cl b/mllm/backends/opencl/kernel/div_int.cl new file mode 100644 index 000000000..3956d7a05 --- /dev/null +++ b/mllm/backends/opencl/kernel/div_int.cl @@ -0,0 +1,181 @@ +// 文件名: kernel/div_int.cl + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// ================================================================== +// 1. Tensor / Scalar Division with Integer Truncation (FP32) +// ================================================================== + +__kernel void div_int_scalar_float( + __global const float *A, + const float B, + __global float *C) { + size_t index = get_global_id(0); + int val_a = convert_int_rtz(A[index]); + int val_b = convert_int_rtz(B); + int result = (val_b == 0) ? 0 : (val_a / val_b); + C[index] = (float)result; +} + +__kernel void div_int_scalar_float_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + const float B, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + int4 val_a = convert_int4_rtz(read_imagef(inputA, sampler, pos)); + int val_b = convert_int_rtz(B); + int4 result = (val_b == 0) ? (int4)(0) : (val_a / val_b); + write_imagef(output, pos, convert_float4(result)); +} + +// ================================================================== +// 2. FP16 Kernels (Hardware vs. Software Fallback) +// ================================================================== + +#ifdef SUPPORTS_FP16 + +// A. FP16实现 (硬件原生支持) +__kernel void div_int_scalar_fp16_vector( + __global const half *A, + const float B, + __global half *C) { + const int i = get_global_id(0); + float4 val_a_f = convert_float4(vload4(i, A)); + int4 val_a = convert_int4_rtz(val_a_f); + int val_b = convert_int_rtz(B); + int4 result = (val_b == 0) ? (int4)(0) : (val_a / val_b); + vstore4(convert_half4_rte(result), i, C); +} + +__kernel void div_int_scalar_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + const float B, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + float4 val_a_f = convert_float4(read_imageh(inputA, sampler, pos)); + int4 val_a = convert_int4_rtz(val_a_f); + int val_b = convert_int_rtz(B); + int4 result = (val_b == 0) ? (int4)(0) : (val_a / val_b); + write_imageh(output, pos, convert_half4_rte(result)); +} + +// ADDED: Scalar Kernel (for any data count) +__kernel void div_int_scalar_fp16( + __global const half *A, + const float B, + __global half *C) { + size_t index = get_global_id(0); + float val_a_f = convert_float(A[index]); + int val_a = convert_int_rtz(val_a_f); + int val_b = convert_int_rtz(B); + int result = (val_b == 0) ? 0 : (val_a / val_b); + C[index] = convert_half_rte((float)result); +} + +#else // !SUPPORTS_FP16 + +// B. FP16实现 (软件回退) +inline float half_to_float(ushort h) { + const uint s = (h >> 15) & 0x0001; + const uint e = (h >> 10) & 0x001f; + const uint f = h & 0x03ff; + uint float_val; + if (e == 0) { + if (f == 0) { + float_val = s << 31; + } else { + uint f_shifted = f; + uint e_shifted = e; + while ((f_shifted & 0x0400) == 0) { + f_shifted <<= 1; + e_shifted--; + } + e_shifted++; + f_shifted &= ~0x0400; + float_val = (s << 31) | ((e_shifted + 112) << 23) | (f_shifted << 13); + } + } else if (e == 31) { + if (f == 0) { + float_val = (s << 31) | 0x7f800000; + } else { + float_val = (s << 31) | 0x7f800000 | (f << 13); + } + } else { + float_val = (s << 31) | ((e + 112) << 23) | (f << 13); + } + return as_float(float_val); +} + +inline ushort float_to_half(float f) { + uint u = as_uint(f); + uint s = (u >> 16) & 0x8000; + int e = ((u >> 23) & 0xFF) - 127; + uint f_mant = u & 0x7FFFFF; + + if (e > 15) return (ushort)(s | 0x7C00); + if (e < -14) { + f_mant |= 0x800000; + return (ushort)(s | (f_mant >> (-e - 14))); + } + return (ushort)(s | ((e + 15) << 10) | (f_mant >> 13)); +} + +__kernel void div_int_scalar_fp16_vector( + __global const ushort *A, + const float B, + __global ushort *C) { + const int i = get_global_id(0) * 4; + int val_b = convert_int_rtz(B); + if (val_b == 0) { + C[i + 0] = float_to_half(0.0f); + C[i + 1] = float_to_half(0.0f); + C[i + 2] = float_to_half(0.0f); + C[i + 3] = float_to_half(0.0f); + return; + } + int val_a0 = convert_int_rtz(half_to_float(A[i + 0])); + int val_a1 = convert_int_rtz(half_to_float(A[i + 1])); + int val_a2 = convert_int_rtz(half_to_float(A[i + 2])); + int val_a3 = convert_int_rtz(half_to_float(A[i + 3])); + int res0 = val_a0 / val_b; + int res1 = val_a1 / val_b; + int res2 = val_a2 / val_b; + int res3 = val_a3 / val_b; + C[i + 0] = float_to_half((float)res0); + C[i + 1] = float_to_half((float)res1); + C[i + 2] = float_to_half((float)res2); + C[i + 3] = float_to_half((float)res3); +} + +__kernel void div_int_scalar_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + const float B, + __write_only image2d_t output, + const int width, + const int height) { + // Stub implementation as this path is not used for non-aligned data + return; +} + +// ADDED: Scalar Kernel Fallback (for any data count) +__kernel void div_int_scalar_fp16( + __global const ushort *A, + const float B, + __global ushort *C) { + size_t index = get_global_id(0); + int val_a = convert_int_rtz(half_to_float(A[index])); + int val_b = convert_int_rtz(B); + int result = (val_b == 0) ? 0 : (val_a / val_b); + C[index] = float_to_half((float)result); +} + +#endif // SUPPORTS_FP16 \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/embedding.cl b/mllm/backends/opencl/kernel/embedding.cl new file mode 100644 index 000000000..e06a3e9f6 --- /dev/null +++ b/mllm/backends/opencl/kernel/embedding.cl @@ -0,0 +1,125 @@ +// 文件名: kernel/embedding.cl +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// 与 C++ DataType.hpp 中定义匹配的 Q4_0 量化块结构 +typedef struct { + half d; + unsigned char qs[16]; +} block_q4_0; + +// ============================================================================ +// ========================== FP32 Embedding Kernel =========================== +// ============================================================================ +__kernel void embedding_fp32( + __global const float *input_ids_float, + __global const float *weights, + __global float *output, + const int vocab_size, + const int hidden_size, + const int sequence_len) { + const int d_idx = get_global_id(0); + const int token_idx = get_global_id(1); + + if (d_idx >= hidden_size || token_idx >= sequence_len) { + return; + } + + const int token_id = (int)input_ids_float[token_idx]; + if (token_id < 0 || token_id >= vocab_size) { + output[token_idx * hidden_size + d_idx] = 0.0f; + return; + } + + const size_t src_idx = (size_t)token_id * hidden_size + d_idx; + const size_t dst_idx = (size_t)token_idx * hidden_size + d_idx; + + output[dst_idx] = weights[src_idx]; +} + +// ============================================================================ +// ========================== Q4_0 Embedding Kernel =========================== +// ============================================================================ +__kernel void embedding_q4_0( + __global const float *input_ids_float, + __global const block_q4_0 *weights, + __global float *output, + const int vocab_size, + const int hidden_size, + const int sequence_len) { + const int d_idx = get_global_id(0); + const int token_idx = get_global_id(1); + + if (d_idx >= hidden_size || token_idx >= sequence_len) { + return; + } + + const int token_id = (int)input_ids_float[token_idx]; + if (token_id < 0 || token_id >= vocab_size) { + output[token_idx * hidden_size + d_idx] = 0.0f; + return; + } + + const size_t weight_idx = (size_t)token_id * hidden_size + d_idx; + const int block_idx = weight_idx / 32; + const int quant_idx_in_block = weight_idx % 32; + const __global block_q4_0 *b = &weights[block_idx]; + + // 正确的解量化逻辑 + const int qs_index = quant_idx_in_block % 16; + // [修正] 将 uchar 替换为 unsigned char 提高兼容性 + const unsigned char quant_pair = b->qs[qs_index]; + int nibble; + + if (quant_idx_in_block < 16) { + // 元素在块的前半部分 (0-15), 取低4位 + nibble = (quant_pair & 0x0F); + } else { + // 元素在块的后半部分 (16-31), 取高4位 + nibble = (quant_pair >> 4); + } + + const float dequantized_value = (float)b->d * (float)(nibble - 8); + const size_t dst_idx = (size_t)token_idx * hidden_size + d_idx; + output[dst_idx] = dequantized_value; +} +// ============================================================================ +// ================= Q4_0 Embedding Kernel (Output: FP16) ============ +// ============================================================================ +__kernel void embedding_q4_0_fp16( + __global const half *input_ids_half, // ✨ **核心修正点**: 输入类型改为 half + __global const block_q4_0 *weights, + __global half *output, + const int vocab_size, + const int hidden_size, + const int sequence_len) { + const int d_idx = get_global_id(0); + const int token_idx = get_global_id(1); + + if (d_idx >= hidden_size || token_idx >= sequence_len) { + return; + } + + // ✨ **核心修正点**: 从 half 类型的输入中读取 token ID + const int token_id = (int)input_ids_half[token_idx]; + const size_t dst_idx = (size_t)token_idx * hidden_size + d_idx; + + if (token_id < 0 || token_id >= vocab_size) { + output[dst_idx] = (half)0.0f; + return; + } + + // 解量化逻辑与 embedding_q4_0 完全相同 + const size_t weight_idx = (size_t)token_id * hidden_size + d_idx; + const int block_idx = weight_idx / 32; + const int quant_idx_in_block = weight_idx % 32; + const __global block_q4_0 *b = &weights[block_idx]; + const int qs_index = quant_idx_in_block % 16; + const unsigned char quant_pair = b->qs[qs_index]; + int nibble = (quant_idx_in_block < 16) ? (quant_pair & 0x0F) : (quant_pair >> 4); + + // 计算结果为 float + const float dequantized_value = (float)b->d * (float)(nibble - 8); + + // 将 float 结果转换为 half 并存储 + output[dst_idx] = (half)dequantized_value; +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/flash_attention.cl b/mllm/backends/opencl/kernel/flash_attention.cl new file mode 100644 index 000000000..3797b5a4e --- /dev/null +++ b/mllm/backends/opencl/kernel/flash_attention.cl @@ -0,0 +1,884 @@ +// 文件: opencl/kernel/flash_attention.cl +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +#define F32_MAX 3.402823466e+38f +// 根据您的硬件能力和模型维度进行调整 +#define DIM_MAX 128 +// K/V瓦片的序列长度,32或64是常见选择 +#define TILE_S 32 +// 工作组大小,128或256是常见选择 +#define WGS 128 + +__kernel void flash_attention_2_prefill_fp32( + __global const float *Q, + __global const float *K, + __global const float *V, + __global float *O, + const int q_head_size, + const int kv_head_size, + const int seq_size_q, + const int seq_size_k, + const int dim_size, + const int causal_mask_flag) { + __local float k_tile[TILE_S][DIM_MAX]; + __local float v_tile[TILE_S][DIM_MAX]; + + // 1. 并行模型与索引:一个工作组计算一个输出行 O[b, s_q, h, :] + // 全局ID的每一行代表一个输出向量 + const int row_idx = get_group_id(0); + const int local_id = get_local_id(0); // 线程在工作组内的ID (0 to WGS-1) + + const int b = row_idx / (seq_size_q * q_head_size); + const int s_q_h_idx = row_idx % (seq_size_q * q_head_size); + const int s_q = s_q_h_idx / q_head_size; + const int h = s_q_h_idx % q_head_size; + + // 处理GQA/MQA: 多个Q头可能对应同一个KV头 + const int kv_h_idx = h / (q_head_size / kv_head_size); + + // 2. 初始化线程私有累加器 (在寄存器中,速度最快) + float my_max_score = -F32_MAX; + float my_sum_exp = 0.0f; + float my_acc_o[DIM_MAX]; + for (int d = 0; d < dim_size; ++d) { + my_acc_o[d] = 0.0f; + } + + // 将当前查询向量Q加载到私有内存(寄存器)中,以便在循环中反复使用 + float my_q[DIM_MAX]; + const long q_base_addr = (long)b * seq_size_q * q_head_size * dim_size + (long)s_q * q_head_size * dim_size + (long)h * dim_size; + for (int d = 0; d < dim_size; ++d) { + my_q[d] = Q[q_base_addr + d]; + } + // 注意:此处需要一个屏障,因为每个线程只加载了Q的一部分,但下面计算需要完整的Q + // 但由于并行模型是冗余计算,每个线程最终都会有完整的my_q,所以也可以不加。 + // 为了逻辑严谨和未来可能的优化,我们这里假设每个线程都拿到了完整的Q。 + // 在实践中,更优化的方式是让一个warp加载Q然后广播,但为简单起见,这里每个线程都读。 + + const float scale = rsqrt((float)dim_size); + const int max_s_k = (causal_mask_flag && seq_size_q > 1) ? (s_q + 1) : seq_size_k; + + // 3. 沿K/V序列长度的分块循环 + for (int s_k_start = 0; s_k_start < max_s_k; s_k_start += TILE_S) { + // a. 工作组协作加载K, V块到高速的__local内存 + for (int i = local_id; i < TILE_S * dim_size; i += WGS) { + int s_local = i / dim_size; + int d_local = i % dim_size; + int s_k_global = s_k_start + s_local; + + if (s_k_global < seq_size_k) { + long kv_offset = (long)b * seq_size_k * kv_head_size * dim_size + (long)s_k_global * kv_head_size * dim_size + (long)kv_h_idx * dim_size + d_local; + k_tile[s_local][d_local] = K[kv_offset]; + v_tile[s_local][d_local] = V[kv_offset]; + } else { + // 超出范围的数据用0填充 + k_tile[s_local][d_local] = 0.0f; + v_tile[s_local][d_local] = 0.0f; + } + } + // 同步点:确保所有线程都完成了加载,K/V tile现在对所有线程可见 + barrier(CLK_LOCAL_MEM_FENCE); + + // b. 每个线程独立处理已加载的K/V瓦片,执行在线Softmax + for (int s_local = 0; s_local < TILE_S; ++s_local) { + int s_k_global = s_k_start + s_local; + if (s_k_global < max_s_k) { + // 计算分数 (Q * K^T) + float score = 0.0f; + for (int d = 0; d < dim_size; ++d) { + score += my_q[d] * k_tile[s_local][d]; + } + score *= scale; + + float old_max = my_max_score; + my_max_score = fmax(old_max, score); + + float scale_factor = exp(old_max - my_max_score); + my_sum_exp *= scale_factor; + + for (int d = 0; d < dim_size; ++d) { + my_acc_o[d] *= scale_factor; + } + + float p = exp(score - my_max_score); + my_sum_exp += p; + + for (int d = 0; d < dim_size; ++d) { + my_acc_o[d] += p * v_tile[s_local][d]; + } + } + } + // 同步点:确保所有线程都处理完当前块,才能进入下一轮循环加载新块 + barrier(CLK_LOCAL_MEM_FENCE); + } + + // 4. 最终写回 + // 只需要一个线程来执行最后的归一化和写回操作,避免写入冲突 + if (local_id == 0) { + float inv_sum_exp = 1.0f / (my_sum_exp + 1e-6f); + long o_offset = (long)b * seq_size_q * q_head_size * dim_size + (long)s_q * q_head_size * dim_size + (long)h * dim_size; + for (int d = 0; d < dim_size; ++d) { + O[o_offset + d] = my_acc_o[d] * inv_sum_exp; + } + } +} + +__kernel void flash_attention_2_decode_fp32( + __global const float *Q, + __global const float *K, + __global const float *V, + __global float *O, + const int q_head_size, + const int kv_head_size, + const int seq_size_k, + const int dim_size) { + __local float q_vec[DIM_MAX]; + __local float l_max_score; + __local float l_sum_exp; + __local float partial_sums[WGS]; + __local float p, scale_factor; + + // 1. 并行模型与索引 + const int row_idx = get_group_id(0); + const int local_id = get_local_id(0); + + const int b = row_idx / q_head_size; + const int h = row_idx % q_head_size; + const int kv_h_idx = h / (q_head_size / kv_head_size); + + // 2. 协作加载Q向量 + const long q_addr = (long)b * q_head_size * dim_size + (long)h * dim_size; + for (int d = local_id; d < dim_size; d += WGS) { + q_vec[d] = Q[q_addr + d]; + } + + // 3. 初始化 + if (local_id == 0) { + l_max_score = -F32_MAX; + l_sum_exp = 0.0f; + } + + const int dims_per_thread = (dim_size + WGS - 1) / WGS; + float my_acc_o[DIM_MAX / WGS + 1]; + for (int i = 0; i < dims_per_thread; ++i) my_acc_o[i] = 0.0f; + + const float scale = rsqrt((float)dim_size); + barrier(CLK_LOCAL_MEM_FENCE); + + // 4. 遍历所有Key向量 + for (int s_k = 0; s_k < seq_size_k; ++s_k) { + // ... 并行计算点积与规约 (这部分逻辑不变) ... + float partial_sum = 0.0f; + long k_addr = (long)b * seq_size_k * kv_head_size * dim_size + (long)s_k * kv_head_size * dim_size + (long)kv_h_idx * dim_size; + for (int i = 0; i < dims_per_thread; ++i) { + int d = local_id + i * WGS; + if (d < dim_size) { + partial_sum += q_vec[d] * K[k_addr + d]; + } + } + partial_sums[local_id] = partial_sum; + barrier(CLK_LOCAL_MEM_FENCE); + + for (int offset = WGS / 2; offset > 0; offset /= 2) { + if (local_id < offset) { + partial_sums[local_id] += partial_sums[local_id + offset]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + // c. 由线程0计算并更新共享的统计量 + // __local float p, scale_factor; // <--- 从这里移除 + if (local_id == 0) { + float score = partial_sums[0] * scale; + float old_max = l_max_score; + l_max_score = fmax(old_max, score); + scale_factor = exp(old_max - l_max_score); + p = exp(score - l_max_score); + l_sum_exp = l_sum_exp * scale_factor + p; + } + barrier(CLK_LOCAL_MEM_FENCE); + + // d. 所有线程并行累加V向量 + long v_addr = (long)b * seq_size_k * kv_head_size * dim_size + (long)s_k * kv_head_size * dim_size + (long)kv_h_idx * dim_size; + for (int i = 0; i < dims_per_thread; ++i) { + int d = local_id + i * WGS; + if (d < dim_size) { + my_acc_o[i] = my_acc_o[i] * scale_factor + p * V[v_addr + d]; + } + } + } + + // 5. 最终写回 + barrier(CLK_LOCAL_MEM_FENCE); + float inv_sum_exp = 1.0f / (l_sum_exp + 1e-6f); + long o_addr = (long)b * q_head_size * dim_size + (long)h * dim_size; + + for (int i = 0; i < dims_per_thread; ++i) { + int d = local_id + i * WGS; + if (d < dim_size) { + O[o_addr + d] = my_acc_o[i] * inv_sum_exp; + } + } +} + +// ================================================================================================= +// +// FP16 KERNELS START HERE +// +// ================================================================================================= +#if !defined(SUPPORTS_FP16) +// ------------------------------------------------------------------------------------------------- +// [回退版] FP16 Prefill (Tiled) Kernel +// ------------------------------------------------------------------------------------------------- +__kernel void flash_attention_2_prefill_fp16( + __global const half *Q, + __global const half *K, + __global const half *V, + __global half *O, + const int q_head_size, + const int kv_head_size, + const int seq_size_q, + const int seq_size_k, + const int dim_size, + const int causal_mask_flag) { + __local half k_tile[TILE_S][DIM_MAX]; + __local half v_tile[TILE_S][DIM_MAX]; + + const int row_idx = get_group_id(0); + const int local_id = get_local_id(0); + const int b = row_idx / (seq_size_q * q_head_size); + const int s_q_h_idx = row_idx % (seq_size_q * q_head_size); + const int s_q = s_q_h_idx / q_head_size; + const int h = s_q_h_idx % q_head_size; + const int kv_h_idx = h / (q_head_size / kv_head_size); + + float my_max_score = -F32_MAX; + float my_sum_exp = 0.0f; + float my_acc_o[DIM_MAX]; + for (int d = 0; d < dim_size; ++d) { + my_acc_o[d] = 0.0f; + } + + half my_q[DIM_MAX]; + const long q_base_addr = (long)b * seq_size_q * q_head_size * dim_size + (long)s_q * q_head_size * dim_size + (long)h * dim_size; + for (int d = 0; d < dim_size; ++d) { + my_q[d] = Q[q_base_addr + d]; + } + + const float scale = rsqrt((float)dim_size); + const int max_s_k = (causal_mask_flag && seq_size_q > 1) ? (s_q + 1) : seq_size_k; + + for (int s_k_start = 0; s_k_start < max_s_k; s_k_start += TILE_S) { + for (int i = local_id; i < TILE_S * dim_size; i += WGS) { + int s_local = i / dim_size; + int d_local = i % dim_size; + int s_k_global = s_k_start + s_local; + if (s_k_global < seq_size_k) { + long kv_offset = (long)b * seq_size_k * kv_head_size * dim_size + (long)s_k_global * kv_head_size * dim_size + (long)kv_h_idx * dim_size + d_local; + k_tile[s_local][d_local] = K[kv_offset]; + v_tile[s_local][d_local] = V[kv_offset]; + } else { + k_tile[s_local][d_local] = 0.0h; + v_tile[s_local][d_local] = 0.0h; + } + } + barrier(CLK_LOCAL_MEM_FENCE); + + for (int s_local = 0; s_local < TILE_S; ++s_local) { + int s_k_global = s_k_start + s_local; + if (s_k_global < max_s_k) { + float score = 0.0f; + for (int d = 0; d < dim_size; ++d) { + score += (float)my_q[d] * (float)k_tile[s_local][d]; + } + score *= scale; + + float old_max = my_max_score; + my_max_score = fmax(old_max, score); + + float scale_factor = exp(old_max - my_max_score); + my_sum_exp *= scale_factor; + for (int d = 0; d < dim_size; ++d) { + my_acc_o[d] *= scale_factor; + } + + float p = exp(score - my_max_score); + my_sum_exp += p; + + for (int d = 0; d < dim_size; ++d) { + my_acc_o[d] += p * (float)v_tile[s_local][d]; + } + } + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + if (local_id == 0) { + float inv_sum_exp = 1.0f / (my_sum_exp + 1e-6f); + long o_offset = (long)b * seq_size_q * q_head_size * dim_size + (long)s_q * q_head_size * dim_size + (long)h * dim_size; + for (int d = 0; d < dim_size; ++d) { + O[o_offset + d] = (half)(my_acc_o[d] * inv_sum_exp); + } + } +} + +#else +// ------------------------------------------------------------------------------------------------- +// [高性能版] FP16 Prefill (Tiled) Kernel +// ------------------------------------------------------------------------------------------------- + +#define VEC_SIZE 8 +__kernel void flash_attention_2_prefill_fp16( + __global const half *Q, + __global const half *K, + __global const half *V, + __global half *O, + const int q_head_size, + const int kv_head_size, + const int seq_size_q, + const int seq_size_k, + const int dim_size, + const int causal_mask_flag) { + __local half k_tile[TILE_S * DIM_MAX]; + __local half v_tile[TILE_S * DIM_MAX]; + __local half q_local[DIM_MAX]; + + const int row_idx = get_group_id(0); + const int local_id = get_local_id(0); + + const int b = row_idx / (seq_size_q * q_head_size); + const int s_q_h_idx = row_idx % (seq_size_q * q_head_size); + const int s_q = s_q_h_idx / q_head_size; + const int h = s_q_h_idx % q_head_size; + const int kv_h_idx = h / (q_head_size / kv_head_size); + + float my_max_score = -F32_MAX; + float my_sum_exp = 0.0f; + float8 my_acc_o_vec[DIM_MAX / VEC_SIZE]; + for (int i = 0; i < dim_size / VEC_SIZE; ++i) { + my_acc_o_vec[i] = (float8)(0.0f); + } + + const long q_base_addr = (long)b * seq_size_q * q_head_size * dim_size + (long)s_q * q_head_size * dim_size + (long)h * dim_size; + for (int d = local_id; d < dim_size; d += WGS) { + q_local[d] = Q[q_base_addr + d]; + } + barrier(CLK_LOCAL_MEM_FENCE); + + const float scale = rsqrt((float)dim_size); + const int max_s_k = (causal_mask_flag && seq_size_q > 1) ? (s_q + 1) : seq_size_k; + const int dim_vec = dim_size / VEC_SIZE; + + for (int s_k_start = 0; s_k_start < max_s_k; s_k_start += TILE_S) { + for (int i = local_id; i < TILE_S * dim_vec; i += WGS) { + int s_local = i / dim_vec; + int d_vec_idx = i % dim_vec; + int s_k_global = s_k_start + s_local; + + if (s_k_global < seq_size_k) { + long kv_offset = (long)b * seq_size_k * kv_head_size * dim_size + (long)s_k_global * kv_head_size * dim_size + (long)kv_h_idx * dim_size + (long)d_vec_idx * VEC_SIZE; + *((__local half8 *)k_tile + i) = *((__global const half8 *)(K + kv_offset)); + *((__local half8 *)v_tile + i) = *((__global const half8 *)(V + kv_offset)); + } else { + *((__local half8 *)k_tile + i) = (half8)(0.0h); + *((__local half8 *)v_tile + i) = (half8)(0.0h); + } + } + barrier(CLK_LOCAL_MEM_FENCE); + + for (int s_local = 0; s_local < TILE_S; ++s_local) { + int s_k_global = s_k_start + s_local; + if (s_k_global < max_s_k) { + float score = 0.0f; + for (int d_vec = 0; d_vec < dim_vec; ++d_vec) { + float8 q_f_vec = vload_half8(d_vec, q_local); + float8 k_f_vec = vload_half8(s_local * dim_vec + d_vec, k_tile); + float8 mul_res = q_f_vec * k_f_vec; + score += mul_res.s0 + mul_res.s1 + mul_res.s2 + mul_res.s3 + mul_res.s4 + mul_res.s5 + mul_res.s6 + mul_res.s7; + } + score *= scale; + float old_max = my_max_score; + my_max_score = fmax(old_max, score); + float scale_factor = exp(old_max - my_max_score); + my_sum_exp *= scale_factor; + float p = exp(score - my_max_score); + my_sum_exp += p; + for (int d_vec = 0; d_vec < dim_vec; ++d_vec) { + float8 v_f_vec = vload_half8(s_local * dim_vec + d_vec, v_tile); + my_acc_o_vec[d_vec] = mad((float8)(p), v_f_vec, my_acc_o_vec[d_vec] * scale_factor); + } + } + } + barrier(CLK_LOCAL_MEM_FENCE); + } + if (local_id == 0) { + float inv_sum_exp = 1.0f / (my_sum_exp + 1e-6f); + long o_offset = (long)b * seq_size_q * q_head_size * dim_size + (long)s_q * q_head_size * dim_size + (long)h * dim_size; + for (int d_vec = 0; d_vec < dim_vec; ++d_vec) { + float8 out_f_vec = my_acc_o_vec[d_vec] * inv_sum_exp; + vstore_half8(out_f_vec, 0, (__global half *)(O + o_offset + d_vec * VEC_SIZE)); + } + } +} + +#endif // SUPPORTS_FP16 +// ------------------------------------------------------------------------------------------------- +// FP16 Decode Kernel +// ------------------------------------------------------------------------------------------------- +__kernel void flash_attention_2_decode_fp16( + __global const half *Q, + __global const half *K, + __global const half *V, + __global half *O, + const int q_head_size, + const int kv_head_size, + const int seq_size_k, + const int dim_size) { + __local half q_vec[DIM_MAX]; + __local float l_max_score; + __local float l_sum_exp; + __local float partial_sums[WGS]; + __local float p, scale_factor; + + const int row_idx = get_group_id(0); + const int local_id = get_local_id(0); + + const int b = row_idx / q_head_size; + const int h = row_idx % q_head_size; + const int kv_h_idx = h / (q_head_size / kv_head_size); + + const long q_addr = (long)b * q_head_size * dim_size + (long)h * dim_size; + for (int d = local_id; d < dim_size; d += WGS) { + q_vec[d] = Q[q_addr + d]; + } + + if (local_id == 0) { + l_max_score = -F32_MAX; + l_sum_exp = 0.0f; + } + + const int dims_per_thread = (dim_size + WGS - 1) / WGS; + float my_acc_o[DIM_MAX / WGS + 1]; + for (int i = 0; i < dims_per_thread; ++i) my_acc_o[i] = 0.0f; + + const float scale = rsqrt((float)dim_size); + barrier(CLK_LOCAL_MEM_FENCE); + + for (int s_k = 0; s_k < seq_size_k; ++s_k) { + float partial_sum = 0.0f; + long k_addr = (long)b * seq_size_k * kv_head_size * dim_size + (long)s_k * kv_head_size * dim_size + (long)kv_h_idx * dim_size; + for (int i = 0; i < dims_per_thread; ++i) { + int d = local_id + i * WGS; + if (d < dim_size) { + partial_sum += (float)q_vec[d] * (float)K[k_addr + d]; + } + } + partial_sums[local_id] = partial_sum; + barrier(CLK_LOCAL_MEM_FENCE); + + for (int offset = WGS / 2; offset > 0; offset /= 2) { + if (local_id < offset) { + partial_sums[local_id] += partial_sums[local_id + offset]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + if (local_id == 0) { + float score = partial_sums[0] * scale; + float old_max = l_max_score; + l_max_score = fmax(old_max, score); + scale_factor = exp(old_max - l_max_score); + p = exp(score - l_max_score); + l_sum_exp = l_sum_exp * scale_factor + p; + } + barrier(CLK_LOCAL_MEM_FENCE); + + long v_addr = (long)b * seq_size_k * kv_head_size * dim_size + (long)s_k * kv_head_size * dim_size + (long)kv_h_idx * dim_size; + for (int i = 0; i < dims_per_thread; ++i) { + int d = local_id + i * WGS; + if (d < dim_size) { + my_acc_o[i] = my_acc_o[i] * scale_factor + p * (float)V[v_addr + d]; + } + } + } + + barrier(CLK_LOCAL_MEM_FENCE); + float inv_sum_exp = 1.0f / (l_sum_exp + 1e-6f); + long o_addr = (long)b * q_head_size * dim_size + (long)h * dim_size; + for (int i = 0; i < dims_per_thread; ++i) { + int d = local_id + i * WGS; + if (d < dim_size) { + O[o_addr + d] = (half)(my_acc_o[i] * inv_sum_exp); + } + } +} + +// ---------- [Image 版] FP32 Prefill Kernel ---------- +__kernel void flash_attention_2_prefill_fp32_image( + sampler_t sampler, + __read_only image2d_t Q, __read_only image2d_t K, __read_only image2d_t V, + __write_only image2d_t O, + const int q_head_size, const int kv_head_size, + const int seq_size_q, const int seq_size_k, + const int dim_size, const int causal_mask_flag) { + const int row_idx = get_group_id(0); + const int local_id = get_local_id(0); + const int b = row_idx / (seq_size_q * q_head_size); + const int s_h_idx = row_idx % (seq_size_q * q_head_size); + const int s_q = s_h_idx / q_head_size; + const int h = s_h_idx % q_head_size; + const int kv_h_idx = h / (q_head_size / kv_head_size); + float my_max_score = -F32_MAX; + float my_sum_exp = 0.0f; + float my_acc_o[128]; + for (int d = 0; d < dim_size; ++d) my_acc_o[d] = 0.0f; + float my_q[128]; + const int q_y_coord = (b * seq_size_q * q_head_size) + (s_q * q_head_size) + h; + for (int d_pixel = 0; d_pixel < dim_size / 4; ++d_pixel) { + float4 q_pix = read_imagef(Q, sampler, (int2)(d_pixel, q_y_coord)); + my_q[d_pixel * 4 + 0] = q_pix.x; + my_q[d_pixel * 4 + 1] = q_pix.y; + my_q[d_pixel * 4 + 2] = q_pix.z; + my_q[d_pixel * 4 + 3] = q_pix.w; + } + const float scale = rsqrt((float)dim_size); + const int max_s_k = (causal_mask_flag && seq_size_q > 1) ? (s_q + 1) : seq_size_k; + for (int s_k_start = 0; s_k_start < max_s_k; s_k_start += TILE_S) { + for (int s_local = 0; s_local < TILE_S; ++s_local) { + int s_k_global = s_k_start + s_local; + if (s_k_global < max_s_k) { + float score = 0.0f; + const int k_y_coord = (b * seq_size_k * kv_head_size) + (s_k_global * kv_head_size) + kv_h_idx; + for (int d_pixel = 0; d_pixel < dim_size / 4; ++d_pixel) { + float4 k_pix = read_imagef(K, sampler, (int2)(d_pixel, k_y_coord)); + score += my_q[d_pixel * 4 + 0] * k_pix.x; + score += my_q[d_pixel * 4 + 1] * k_pix.y; + score += my_q[d_pixel * 4 + 2] * k_pix.z; + score += my_q[d_pixel * 4 + 3] * k_pix.w; + } + score *= scale; + float old_max = my_max_score; + my_max_score = fmax(old_max, score); + float scale_factor = exp(old_max - my_max_score); + my_sum_exp *= scale_factor; + float p = exp(score - my_max_score); + const int v_y_coord = k_y_coord; + for (int d_pixel = 0; d_pixel < dim_size / 4; ++d_pixel) { + float4 v_pix = read_imagef(V, sampler, (int2)(d_pixel, v_y_coord)); + my_acc_o[d_pixel * 4 + 0] = my_acc_o[d_pixel * 4 + 0] * scale_factor + p * v_pix.x; + my_acc_o[d_pixel * 4 + 1] = my_acc_o[d_pixel * 4 + 1] * scale_factor + p * v_pix.y; + my_acc_o[d_pixel * 4 + 2] = my_acc_o[d_pixel * 4 + 2] * scale_factor + p * v_pix.z; + my_acc_o[d_pixel * 4 + 3] = my_acc_o[d_pixel * 4 + 3] * scale_factor + p * v_pix.w; + } + my_sum_exp += p; + } + } + } + if (local_id == 0) { + float inv_sum_exp = 1.0f / (my_sum_exp + 1e-6f); + const int o_y_coord = (b * seq_size_q * q_head_size) + (s_q * q_head_size) + h; + for (int d_pixel = 0; d_pixel < dim_size / 4; ++d_pixel) { + float4 out_pixel; + out_pixel.x = my_acc_o[d_pixel * 4 + 0] * inv_sum_exp; + out_pixel.y = my_acc_o[d_pixel * 4 + 1] * inv_sum_exp; + out_pixel.z = my_acc_o[d_pixel * 4 + 2] * inv_sum_exp; + out_pixel.w = my_acc_o[d_pixel * 4 + 3] * inv_sum_exp; + write_imagef(O, (int2)(d_pixel, o_y_coord), out_pixel); + } + } +} + +// ---------- [Image 版] FP32 Decode Kernel [最终修正] ---------- +__kernel void flash_attention_2_decode_fp32_image( + sampler_t sampler, + __read_only image2d_t Q, __read_only image2d_t K, __read_only image2d_t V, + __write_only image2d_t O, + const int q_head_size, const int kv_head_size, + const int seq_size_k, const int dim_size) { + const int gx = get_global_id(0); + const int gy = get_global_id(1); + if (gx >= (dim_size / 4)) return; + const int b = gy / q_head_size; + const int h = gy % q_head_size; + const int kv_h_idx = h / (q_head_size / kv_head_size); + float max_score = -F32_MAX; + float sum_exp = 0.0f; + float4 acc_o = (float4)(0.0f); + const float scale = rsqrt((float)dim_size); + const int q_y_coord = gy; + for (int s_k = 0; s_k < seq_size_k; ++s_k) { + float score = 0.0f; + const int k_y_coord = (b * seq_size_k * kv_head_size) + (s_k * kv_head_size) + kv_h_idx; + for (int d_pixel = 0; d_pixel < dim_size / 4; ++d_pixel) { + float4 q_pix = read_imagef(Q, sampler, (int2)(d_pixel, q_y_coord)); + float4 k_pix = read_imagef(K, sampler, (int2)(d_pixel, k_y_coord)); + score += dot(q_pix, k_pix); + } + score *= scale; + float old_max = max_score; + max_score = fmax(old_max, score); + float scale_factor = exp(old_max - max_score); + sum_exp *= scale_factor; + acc_o *= scale_factor; + float p = exp(score - max_score); + sum_exp += p; + const int v_y_coord = k_y_coord; + float4 v_pix = read_imagef(V, sampler, (int2)(gx, v_y_coord)); + acc_o = mad(p, v_pix, acc_o); + } + float inv_sum_exp = 1.0f / (sum_exp + 1e-6f); + acc_o *= inv_sum_exp; + write_imagef(O, (int2)(gx, gy), acc_o); +} + +// ================================================================================================= +// 3. FP16 全 Image 版本的内核 +// ================================================================================================= +#if defined(SUPPORTS_FP16) +// ================================================================================================= +// FP16 Prefill Kernel (Image) +// ================================================================================================= +__kernel void flash_attention_2_prefill_fp16_image( + sampler_t sampler, + __read_only image2d_t Q, __read_only image2d_t K, __read_only image2d_t V, + __write_only image2d_t O, + const int q_head_size, const int kv_head_size, + const int seq_size_q, const int seq_size_k, + const int dim_size, const int causal_mask_flag) { + const int row_idx = get_group_id(0); + const int local_id = get_local_id(0); + const int b = row_idx / (seq_size_q * q_head_size); + const int s_h_idx = row_idx % (seq_size_q * q_head_size); + const int s_q = s_h_idx / q_head_size; + const int h = s_h_idx % q_head_size; + const int kv_h_idx = h / (q_head_size / kv_head_size); + float my_max_score = -F32_MAX; + float my_sum_exp = 0.0f; + float4 my_acc_o[32]; + float4 my_q[32]; +#pragma unroll + for (int i = 0; i < 32; ++i) { + my_acc_o[i] = (float4)(0.0f); + } + const int q_y_coord = (b * seq_size_q * q_head_size) + (s_q * q_head_size) + h; + const int dim_vec_size = dim_size / 4; +#pragma unroll + for (int d_vec = 0; d_vec < dim_vec_size; ++d_vec) { + my_q[d_vec] = convert_float4(read_imageh(Q, sampler, (int2)(d_vec, q_y_coord))); + } + const float scale = rsqrt((float)dim_size); + const int max_s_k = (causal_mask_flag && seq_size_q > 1) ? (s_q + 1) : seq_size_k; + for (int s_k_global = 0; s_k_global < max_s_k; ++s_k_global) { + float score = 0.0f; + const int k_y_coord = (b * seq_size_k * kv_head_size) + (s_k_global * kv_head_size) + kv_h_idx; +#pragma unroll + for (int d_vec = 0; d_vec < dim_vec_size; ++d_vec) { + float4 k_pix = convert_float4(read_imageh(K, sampler, (int2)(d_vec, k_y_coord))); + score += dot(my_q[d_vec], k_pix); + } + score *= scale; + float old_max = my_max_score; + my_max_score = fmax(old_max, score); + float scale_factor = exp(old_max - my_max_score); + my_sum_exp *= scale_factor; + float p = exp(score - my_max_score); + const int v_y_coord = k_y_coord; +#pragma unroll + for (int d_vec = 0; d_vec < dim_vec_size; ++d_vec) { + float4 v_pix = convert_float4(read_imageh(V, sampler, (int2)(d_vec, v_y_coord))); + my_acc_o[d_vec] = my_acc_o[d_vec] * scale_factor + p * v_pix; + } + my_sum_exp += p; + } + if (local_id == 0) { + float inv_sum_exp = 1.0f / (my_sum_exp + 1e-6f); + const int o_y_coord = (b * seq_size_q * q_head_size) + (s_q * q_head_size) + h; +#pragma unroll + for (int d_vec = 0; d_vec < dim_vec_size; ++d_vec) { + float4 out_pixel_f = my_acc_o[d_vec] * inv_sum_exp; + write_imageh(O, (int2)(d_vec, o_y_coord), convert_half4_rte(out_pixel_f)); + } + } +} + +// ================================================================================================= +// FP16 Decode Kernel (Image) +// ================================================================================================= +__kernel void flash_attention_2_decode_fp16_image( + sampler_t sampler, + __read_only image2d_t Q, __read_only image2d_t K, __read_only image2d_t V, + __write_only image2d_t O, + const int q_head_size, const int kv_head_size, + const int seq_size_k, const int dim_size) { + const int gx = get_global_id(0); // Corresponds to dimension vector index + const int gy = get_global_id(1); // Corresponds to batch/head/sequence index + if (gx >= (dim_size / 4)) return; + const int b = gy / q_head_size; + const int h = gy % q_head_size; + const int kv_h_idx = h / (q_head_size / kv_head_size); + float max_score = -F32_MAX; + float sum_exp = 0.0f; + float4 acc_o = (float4)(0.0f); + const float scale = rsqrt((float)dim_size); + const int q_y_coord = gy; + const int dim_vec_size = dim_size / 4; + for (int s_k = 0; s_k < seq_size_k; ++s_k) { + float score = 0.0f; + const int k_y_coord = (b * seq_size_k * kv_head_size) + (s_k * kv_head_size) + kv_h_idx; +#pragma unroll + for (int d_vec = 0; d_vec < dim_vec_size; ++d_vec) { + float4 q_pix = convert_float4(read_imageh(Q, sampler, (int2)(d_vec, q_y_coord))); + float4 k_pix = convert_float4(read_imageh(K, sampler, (int2)(d_vec, k_y_coord))); + score += dot(q_pix, k_pix); + } + score *= scale; + float old_max = max_score; + max_score = fmax(old_max, score); + float scale_factor = exp(old_max - max_score); + sum_exp *= scale_factor; + acc_o *= scale_factor; + float p = exp(score - max_score); + sum_exp += p; + const int v_y_coord = k_y_coord; + float4 v_pix = convert_float4(read_imageh(V, sampler, (int2)(gx, v_y_coord))); + acc_o = mad(p, v_pix, acc_o); + } + float inv_sum_exp = 1.0f / (sum_exp + 1e-6f); + acc_o *= inv_sum_exp; + write_imageh(O, (int2)(gx, gy), convert_half4_rte(acc_o)); +} + +#else + +// ---------- [Image 版] FP16 Prefill Kernel [兼容回退版] ---------- +__kernel void flash_attention_2_prefill_fp16_image( + sampler_t sampler, + __read_only image2d_t Q, __read_only image2d_t K, __read_only image2d_t V, + __write_only image2d_t O, + const int q_head_size, const int kv_head_size, + const int seq_size_q, const int seq_size_k, + const int dim_size, const int causal_mask_flag) { + const int row_idx = get_group_id(0); + const int local_id = get_local_id(0); + const int b = row_idx / (seq_size_q * q_head_size); + const int s_h_idx = row_idx % (seq_size_q * q_head_size); + const int s_q = s_h_idx / q_head_size; + const int h = s_h_idx % q_head_size; + const int kv_h_idx = h / (q_head_size / kv_head_size); + + float my_max_score = -F32_MAX; + float my_sum_exp = 0.0f; + float my_acc_o[128]; // 假设 DIM_MAX <= 128 + for (int d = 0; d < dim_size; ++d) my_acc_o[d] = 0.0f; + + float my_q[128]; + const int q_y_coord = (b * seq_size_q * q_head_size) + (s_q * q_head_size) + h; + for (int d_pixel = 0; d_pixel < dim_size / 4; ++d_pixel) { + // 直接读取CL_FLOAT图像 + float4 q_pix = read_imagef(Q, sampler, (int2)(d_pixel, q_y_coord)); + my_q[d_pixel * 4 + 0] = q_pix.x; + my_q[d_pixel * 4 + 1] = q_pix.y; + my_q[d_pixel * 4 + 2] = q_pix.z; + my_q[d_pixel * 4 + 3] = q_pix.w; + } + + const float scale = rsqrt((float)dim_size); + const int max_s_k = (causal_mask_flag && seq_size_q > 1) ? (s_q + 1) : seq_size_k; + + for (int s_k_global = 0; s_k_global < max_s_k; ++s_k_global) { + float score = 0.0f; + const int k_y_coord = (b * seq_size_k * kv_head_size) + (s_k_global * kv_head_size) + kv_h_idx; + for (int d_pixel = 0; d_pixel < dim_size / 4; ++d_pixel) { + float4 k_pix = read_imagef(K, sampler, (int2)(d_pixel, k_y_coord)); + score += my_q[d_pixel * 4 + 0] * k_pix.x; + score += my_q[d_pixel * 4 + 1] * k_pix.y; + score += my_q[d_pixel * 4 + 2] * k_pix.z; + score += my_q[d_pixel * 4 + 3] * k_pix.w; + } + score *= scale; + + float old_max = my_max_score; + my_max_score = fmax(old_max, score); + float scale_factor = exp(old_max - my_max_score); + my_sum_exp *= scale_factor; + float p = exp(score - my_max_score); + + const int v_y_coord = k_y_coord; + for (int d_pixel = 0; d_pixel < dim_size / 4; ++d_pixel) { + float4 v_pix = read_imagef(V, sampler, (int2)(d_pixel, v_y_coord)); + my_acc_o[d_pixel * 4 + 0] = my_acc_o[d_pixel * 4 + 0] * scale_factor + p * v_pix.x; + my_acc_o[d_pixel * 4 + 1] = my_acc_o[d_pixel * 4 + 1] * scale_factor + p * v_pix.y; + my_acc_o[d_pixel * 4 + 2] = my_acc_o[d_pixel * 4 + 2] * scale_factor + p * v_pix.z; + my_acc_o[d_pixel * 4 + 3] = my_acc_o[d_pixel * 4 + 3] * scale_factor + p * v_pix.w; + } + my_sum_exp += p; + } + + if (local_id == 0) { + float inv_sum_exp = 1.0f / (my_sum_exp + 1e-6f); + const int o_y_coord = (b * seq_size_q * q_head_size) + (s_q * q_head_size) + h; + for (int d_pixel = 0; d_pixel < dim_size / 4; ++d_pixel) { + float4 out_pixel; + out_pixel.x = my_acc_o[d_pixel * 4 + 0] * inv_sum_exp; + out_pixel.y = my_acc_o[d_pixel * 4 + 1] * inv_sum_exp; + out_pixel.z = my_acc_o[d_pixel * 4 + 2] * inv_sum_exp; + out_pixel.w = my_acc_o[d_pixel * 4 + 3] * inv_sum_exp; + // 直接写入CL_FLOAT图像 + write_imagef(O, (int2)(d_pixel, o_y_coord), out_pixel); + } + } +} + +// ---------- [Image 版] FP16 Decode Kernel [兼容回退版] ---------- +__kernel void flash_attention_2_decode_fp16_image( + sampler_t sampler, + __read_only image2d_t Q, __read_only image2d_t K, __read_only image2d_t V, + __write_only image2d_t O, + const int q_head_size, const int kv_head_size, + const int seq_size_k, const int dim_size) { + const int gx = get_global_id(0); + const int gy = get_global_id(1); + + if (gx >= (dim_size / 4)) return; + + const int b = gy / q_head_size; + const int h = gy % q_head_size; + const int kv_h_idx = h / (q_head_size / kv_head_size); + + float max_score = -F32_MAX; + float sum_exp = 0.0f; + float4 acc_o = (float4)(0.0f); + + const float scale = rsqrt((float)dim_size); + const int q_y_coord = gy; + + for (int s_k = 0; s_k < seq_size_k; ++s_k) { + float score = 0.0f; + const int k_y_coord = (b * seq_size_k * kv_head_size) + (s_k * kv_head_size) + kv_h_idx; + for (int d_pixel = 0; d_pixel < dim_size / 4; ++d_pixel) { + float4 q_pix = read_imagef(Q, sampler, (int2)(d_pixel, q_y_coord)); + float4 k_pix = read_imagef(K, sampler, (int2)(d_pixel, k_y_coord)); + score += dot(q_pix, k_pix); + } + score *= scale; + + float old_max = max_score; + max_score = fmax(old_max, score); + float scale_factor = exp(old_max - max_score); + sum_exp *= scale_factor; + acc_o *= scale_factor; + float p = exp(score - max_score); + sum_exp += p; + + const int v_y_coord = k_y_coord; + float4 v_pix = read_imagef(V, sampler, (int2)(gx, v_y_coord)); + acc_o = mad(p, v_pix, acc_o); + } + + float inv_sum_exp = 1.0f / (sum_exp + 1e-6f); + acc_o *= inv_sum_exp; + + // 直接写入CL_FLOAT图像 + write_imagef(O, (int2)(gx, gy), acc_o); +} +#endif // SUPPORTS_FP16 diff --git a/mllm/backends/opencl/kernel/kvcache.cl b/mllm/backends/opencl/kernel/kvcache.cl new file mode 100644 index 000000000..0e9125135 --- /dev/null +++ b/mllm/backends/opencl/kernel/kvcache.cl @@ -0,0 +1,129 @@ +// 文件名: kernel/kvcache.cl + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// ================================================================== +// Kernels for BSHD (Batch, Sequence, Head, Dim) Layout +// ================================================================== +__kernel void update_kv_cache_fp32_bshd( + __global const float *src, + __global float *cache, + const int H_in, + const int S_in, + const int D, + const int H_cache, + const int S_cache, + const int n_rep, + const int cache_offset) { + const int d_vec_idx = get_global_id(0); + const int s_idx = get_global_id(1); + const int h_idx_dst = get_global_id(2); + + const int D_vec = D / 4; + if (d_vec_idx >= D_vec || s_idx >= S_in || h_idx_dst >= H_cache) { + return; + } + + const int h_idx_src = h_idx_dst / n_rep; + const int s_idx_dst = cache_offset + s_idx; + + // BSHD [Batch, Sequence, Head, Dim] layout indexing + size_t src_offset = (size_t)s_idx * H_in * D + (size_t)h_idx_src * D + (size_t)d_vec_idx * 4; + size_t dst_offset = (size_t)s_idx_dst * H_cache * D + (size_t)h_idx_dst * D + (size_t)d_vec_idx * 4; + + float4 data_vec = vload4(0, src + src_offset); + vstore4(data_vec, 0, cache + dst_offset); +} + +__kernel void update_kv_cache_fp16_bshd( + __global const half *src, + __global half *cache, + const int H_in, + const int S_in, + const int D, + const int H_cache, + const int S_cache, + const int n_rep, + const int cache_offset) { + const int d_vec_idx = get_global_id(0); + const int s_idx = get_global_id(1); + const int h_idx_dst = get_global_id(2); + + const int D_vec = D / 4; + if (d_vec_idx >= D_vec || s_idx >= S_in || h_idx_dst >= H_cache) { + return; + } + + const int h_idx_src = h_idx_dst / n_rep; + const int s_idx_dst = cache_offset + s_idx; + + // BSHD [Batch, Sequence, Head, Dim] layout indexing + size_t src_offset = (size_t)s_idx * H_in * D + (size_t)h_idx_src * D + (size_t)d_vec_idx * 4; + size_t dst_offset = (size_t)s_idx_dst * H_cache * D + (size_t)h_idx_dst * D + (size_t)d_vec_idx * 4; + + half4 data_vec = vload4(0, src + src_offset); + vstore4(data_vec, 0, cache + dst_offset); +} + +// ================================================================== +// Kernels for BHSD (Batch, Head, Sequence, Dim) Layout (New) +// ================================================================== +__kernel void update_kv_cache_fp32_bhsd( + __global const float *src, + __global float *cache, + const int H_in, + const int S_in, + const int D, + const int H_cache, + const int S_cache, + const int n_rep, + const int cache_offset) { + const int d_vec_idx = get_global_id(0); + const int s_idx = get_global_id(1); + const int h_idx_dst = get_global_id(2); + + const int D_vec = D / 4; + if (d_vec_idx >= D_vec || s_idx >= S_in || h_idx_dst >= H_cache) { + return; + } + + const int h_idx_src = h_idx_dst / n_rep; + const int s_idx_dst = cache_offset + s_idx; + + // BHSD [Batch, Head, Sequence, Dim] layout indexing + size_t src_offset = (size_t)h_idx_src * S_in * D + (size_t)s_idx * D + (size_t)d_vec_idx * 4; + size_t dst_offset = (size_t)h_idx_dst * S_cache * D + (size_t)s_idx_dst * D + (size_t)d_vec_idx * 4; + + float4 data_vec = vload4(0, src + src_offset); + vstore4(data_vec, 0, cache + dst_offset); +} + +__kernel void update_kv_cache_fp16_bhsd( + __global const half *src, + __global half *cache, + const int H_in, + const int S_in, + const int D, + const int H_cache, + const int S_cache, + const int n_rep, + const int cache_offset) { + const int d_vec_idx = get_global_id(0); + const int s_idx = get_global_id(1); + const int h_idx_dst = get_global_id(2); + + const int D_vec = D / 4; + if (d_vec_idx >= D_vec || s_idx >= S_in || h_idx_dst >= H_cache) { + return; + } + + const int h_idx_src = h_idx_dst / n_rep; + const int s_idx_dst = cache_offset + s_idx; + + // BHSD [Batch, Head, Sequence, Dim] layout indexing + size_t src_offset = (size_t)h_idx_src * S_in * D + (size_t)s_idx * D + (size_t)d_vec_idx * 4; + size_t dst_offset = (size_t)h_idx_dst * S_cache * D + (size_t)s_idx_dst * D + (size_t)d_vec_idx * 4; + + half4 data_vec = vload4(0, src + src_offset); + vstore4(data_vec, 0, cache + dst_offset); +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/like.cl b/mllm/backends/opencl/kernel/like.cl new file mode 100644 index 000000000..28f2b7c55 --- /dev/null +++ b/mllm/backends/opencl/kernel/like.cl @@ -0,0 +1,21 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +__kernel void like( + __global void *output, + const float like_value, + const int count, + const int dtype_size) { // sizeof(float) or sizeof(half) + + const int gid = get_global_id(0); + if (gid >= count) { + return; + } + + if (dtype_size == 4) { // float + __global float *out_fp32 = (__global float *)output; + out_fp32[gid] = like_value; + } else if (dtype_size == 2) { // half + __global half *out_fp16 = (__global half *)output; + out_fp16[gid] = (half)like_value; + } +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/matmul.cl b/mllm/backends/opencl/kernel/matmul.cl new file mode 100644 index 000000000..bcddf1e50 --- /dev/null +++ b/mllm/backends/opencl/kernel/matmul.cl @@ -0,0 +1,334 @@ +// opencl/kernel/matmul.cl + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// ================================================================== +// 1. 宏定义和数据结构 +// ================================================================== + +#define TILE_SIZE 16 +#define QK4_0 32 +#define QK8_0 32 + +typedef struct { + half d; + uchar qs[QK4_0 / 2]; +} block_q4_0; + +// ================================================================== +// 2. FP32 BSHD GEMM 内核 +// ================================================================== + +/** + * @brief 高性能浮点矩阵乘法 (FP32 * FP32),支持 BSHD 布局 + */ +__kernel void gemm_fp32( + __global const float *A, + __global const float *B, + __global float *C, + const int M, const int K, const int N, + const int H, const int K_b) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + __local float a_tile[TILE_SIZE][TILE_SIZE]; + __local float b_tile[TILE_SIZE][TILE_SIZE]; + + float acc = 0.0f; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + const int a_k_idx = k_start + local_col; + const int b_k_idx = k_start + local_row; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * M * H * K + (long)s * H * K + (long)h * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0f; + } + if (n < N && b_k_idx < K_b) { + b_tile[local_row][local_col] = B[(long)b * K_b * H * N + (long)b_k_idx * H * N + (long)h * N + n]; + } else { + b_tile[local_row][local_col] = 0.0f; + } + barrier(CLK_LOCAL_MEM_FENCE); + + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + acc += a_tile[local_row][k_tile] * b_tile[k_tile][local_col]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + if (s < M && n < N) { + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = acc; + } +} + +// ================================================================== +// 3. FP32 BHSD GEMM 内核 (New) +// ================================================================== +/** + * @brief 高性能浮点矩阵乘法 (FP32 * FP32),支持 BHSD 布局 + */ +__kernel void gemm_fp32_bhsd( + __global const float *A, + __global const float *B, + __global float *C, + const int M, const int K, const int N, + const int H, const int K_b) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + __local float a_tile[TILE_SIZE][TILE_SIZE]; + __local float b_tile[TILE_SIZE][TILE_SIZE]; + + float acc = 0.0f; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + const int a_k_idx = k_start + local_col; + const int b_k_idx = k_start + local_row; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * H * M * K + (long)h * M * K + (long)s * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0f; + } + if (n < N && b_k_idx < K_b) { + b_tile[local_row][local_col] = B[(long)b * H * K_b * N + (long)h * K_b * N + (long)b_k_idx * N + n]; + } else { + b_tile[local_row][local_col] = 0.0f; + } + barrier(CLK_LOCAL_MEM_FENCE); + + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + acc += a_tile[local_row][k_tile] * b_tile[k_tile][local_col]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + if (s < M && n < N) { + C[(long)b * H * M * N + (long)h * M * N + (long)s * N + n] = acc; + } +} + +#if !defined(SUPPORTS_FP16) +// ================================================================== +// 4. FP16 GEMM 内核 (Fallback) +// ================================================================== + +// ---------- [FP16 BSHD 回退版] ---------- +__kernel void gemm_fp16( + __global const half *A, __global const half *B, __global half *C, + const int M, const int K, const int N, const int H, const int K_b) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + if (s >= M || n >= N) return; + + float acc = 0.0f; + for (int k = 0; k < K; ++k) { + long a_idx = (long)b * M * H * K + (long)s * H * K + (long)h * K + k; + long b_idx = (long)b * K_b * H * N + (long)k * H * N + (long)h * N + n; + acc += (float)A[a_idx] * (float)B[b_idx]; + } + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = (half)acc; +} + +// ---------- [FP16 BHSD 回退版] (New) ---------- +__kernel void gemm_fp16_bhsd( + __global const half *A, __global const half *B, __global half *C, + const int M, const int K, const int N, const int H, const int K_b) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + if (s >= M || n >= N) return; + + float acc = 0.0f; + for (int k = 0; k < K; ++k) { + long a_idx = (long)b * H * M * K + (long)h * M * K + (long)s * K + k; + long b_idx = (long)b * H * K_b * N + (long)h * K_b * N + (long)k * N + n; + acc += (float)A[a_idx] * (float)B[b_idx]; + } + C[(long)b * H * M * N + (long)h * M * N + (long)s * N + n] = (half)acc; +} + +#else +// ================================================================== +// 5. FP16 GEMM 内核 (High-Performance) +// ================================================================== + +// ---------- [FP16 BSHD 高性能版] ---------- +__kernel void gemm_fp16( + __global const half *A, __global const half *B, __global half *C, + const int M, const int K, const int N, const int H, const int K_b) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + __local half a_tile[TILE_SIZE][TILE_SIZE]; + __local half b_tile[TILE_SIZE][TILE_SIZE]; + + half acc = 0.0h; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + const int a_k_idx = k_start + local_col; + const int b_k_idx = k_start + local_row; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * M * H * K + (long)s * H * K + (long)h * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0h; + } + if (n < N && b_k_idx < K_b) { + b_tile[local_row][local_col] = B[(long)b * K_b * H * N + (long)b_k_idx * H * N + (long)h * N + n]; + } else { + b_tile[local_row][local_col] = 0.0h; + } + barrier(CLK_LOCAL_MEM_FENCE); + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + acc += a_tile[local_row][k_tile] * b_tile[k_tile][local_col]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + if (s < M && n < N) { + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = acc; + } +} + +// ---------- [FP16 BHSD 高性能版] ---------- + +__kernel void gemm_fp16_bhsd( + __global const half *A, __global const half *B, __global half *C, + const int M, const int K, const int N, const int H, const int K_b) { + const int s_out = get_global_id(1); + const int n_out = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + __local half a_tile[TILE_SIZE][TILE_SIZE]; + __local half b_tile[TILE_SIZE][TILE_SIZE]; + half acc = 0.0f; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + const int a_s_idx = get_group_id(1) * TILE_SIZE + local_row; + const int a_k_idx = k_start + local_col; + if (a_s_idx < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * H * M * K + (long)h * M * K + (long)a_s_idx * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0h; + } + const int b_n_idx = get_group_id(0) * TILE_SIZE + local_col; + const int b_k_idx = k_start + local_row; + if (b_n_idx < N && b_k_idx < K_b) { + b_tile[local_col][local_row] = B[(long)b * H * K_b * N + (long)h * K_b * N + (long)b_k_idx * N + b_n_idx]; + } else { + b_tile[local_col][local_row] = 0.0h; + } + barrier(CLK_LOCAL_MEM_FENCE); + +#pragma unroll + for (int k_tile_vec = 0; k_tile_vec < TILE_SIZE; k_tile_vec += 4) { + half4 a_vec = vload4(0, &a_tile[local_row][k_tile_vec]); + half4 b_vec = vload4(0, &b_tile[local_col][k_tile_vec]); + acc += dot(a_vec, b_vec); + } + barrier(CLK_LOCAL_MEM_FENCE); + } + if (s_out < M && n_out < N) { + C[(long)b * H * M * N + (long)h * M * N + (long)s_out * N + n_out] = acc; + } +} + +// #define TILE_M 8 +// #define TILE_N 4 +// #define K_STEP 4 + +// __kernel void gemm_fp16_bhsd( +// __global const half *A, +// __global const half *B, +// __global half *C, +// const int M, +// const int K, +// const int N, +// const int H, +// const int K_b) { +// const int gx = get_global_id(0); +// const int gy = get_global_id(1); +// const int bh_idx = get_global_id(2); +// const int m_base = gy * TILE_M; +// const int n_base = gx * TILE_N; +// if (m_base >= M || n_base >= N) { +// return; +// } +// const int b = bh_idx / H; +// const int h = bh_idx % H; +// __global const half *a_ptr = A + ((long)b * H + h) * M * K; +// __global const half *b_ptr = B + ((long)b * H + h) * K_b * N; +// half4 a_reg; +// half4 b_tile_reg[K_STEP]; +// half c_acc[TILE_M][TILE_N] = {{0.0h}}; +// for (int k_outer = 0; k_outer < K; k_outer += K_STEP) { +// #pragma unroll +// for (int i = 0; i < K_STEP; ++i) { +// if (k_outer + i < K) { +// b_tile_reg[i] = vload4(0, b_ptr + (k_outer + i) * N + n_base); +// } else { +// b_tile_reg[i] = (half4)(0.0h); +// } +// } +// #pragma unroll +// for (int m_local = 0; m_local < TILE_M; ++m_local) { +// if (m_base + m_local < M) { +// a_reg = vload4(0, a_ptr + (m_base + m_local) * K + k_outer); +// // 计算 C[m_local][0] +// half4 b_col0 = (half4)(b_tile_reg[0].s0, b_tile_reg[1].s0, b_tile_reg[2].s0, b_tile_reg[3].s0); +// c_acc[m_local][0] += dot(a_reg, b_col0); +// // 计算 C[m_local][1] +// half4 b_col1 = (half4)(b_tile_reg[0].s1, b_tile_reg[1].s1, b_tile_reg[2].s1, b_tile_reg[3].s1); +// c_acc[m_local][1] += dot(a_reg, b_col1); +// // 计算 C[m_local][2] +// half4 b_col2 = (half4)(b_tile_reg[0].s2, b_tile_reg[1].s2, b_tile_reg[2].s2, b_tile_reg[3].s2); +// c_acc[m_local][2] += dot(a_reg, b_col2); +// // 计算 C[m_local][3] +// half4 b_col3 = (half4)(b_tile_reg[0].s3, b_tile_reg[1].s3, b_tile_reg[2].s3, b_tile_reg[3].s3); +// c_acc[m_local][3] += dot(a_reg, b_col3); +// } +// } +// } +// __global half *c_ptr = C + ((long)b * H + h) * M * N; +// #pragma unroll +// for (int i = 0; i < TILE_M; ++i) { +// if (m_base + i < M) { +// #pragma unroll +// for (int j = 0; j < TILE_N; ++j) { +// if (n_base + j < N) { +// c_ptr[(long)(m_base + i) * N + (n_base + j)] = c_acc[i][j]; +// } +// } +// } +// } +// } + +#endif // SUPPORTS_FP16 diff --git a/mllm/backends/opencl/kernel/matmul_transb.cl b/mllm/backends/opencl/kernel/matmul_transb.cl new file mode 100644 index 000000000..a58a2f5aa --- /dev/null +++ b/mllm/backends/opencl/kernel/matmul_transb.cl @@ -0,0 +1,436 @@ +// opencl/kernel/matmul.cl + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// ================================================================== +// 1. 宏定义和数据结构 +// ================================================================== + +#define TILE_SIZE 16 +#define QK4_0 32 +#define QK8_0 32 + +typedef struct { + half d; + uchar qs[QK4_0 / 2]; +} block_q4_0; + +// ================================================================== +// 2. FP32 GEMM 内核 +// ================================================================== +/** + * @brief 高性能 FP32 GEMM,计算 C = A * B^T + * @param A 矩阵 A,布局为 (B, M, H, K) + * @param B 矩阵 B,布局为 (B, N, H, K) + * @param C 矩阵 C,布局为 (B, M, H, N) + */ +__kernel void gemm_fp32_transb( + __global const float *A, + __global const float *B, + __global float *C, + const int M, const int K, const int N, + const int H) { + // --- 1. 索引计算 --- + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + // --- 2. 初始化和局部内存 --- + __local float a_tile[TILE_SIZE][TILE_SIZE]; + __local float b_tile[TILE_SIZE][TILE_SIZE]; + + float acc = 0.0f; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + + // --- 3. 沿 K 维度进行分块计算 --- + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + + // --- 3a. 协作加载 A 和 B 的 tile --- + // 加载 A 的 tile: A[s, k] + const int a_k_idx = k_start + local_col; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * M * H * K + (long)s * H * K + (long)h * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0f; + } + + // 加载 B 的 tile: B[n, k] + // 整个工作组协作加载 B 的一个 TILE_SIZE * TILE_SIZE 区域 + const int b_n_idx = get_group_id(0) * TILE_SIZE + local_row; + const int b_k_idx = k_start + local_col; + if (b_n_idx < N && b_k_idx < K) { + // B 布局为 (B, N, H, K) + b_tile[local_row][local_col] = B[(long)b * N * H * K + (long)b_n_idx * H * K + (long)h * K + b_k_idx]; + } else { + b_tile[local_row][local_col] = 0.0f; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + // --- 3b. 在局部内存中计算点积 --- + // C[s, n] = sum_k A[s, k] * B[n, k] + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + // A[s, k] 从 a_tile[local_row][k_tile] 获取 + // B[n, k] 从 b_tile[local_col][k_tile] 获取 + acc += a_tile[local_row][k_tile] * b_tile[local_col][k_tile]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + } + + // --- 4. 将结果写回全局内存 --- + if (s < M && n < N) { + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = acc; + } +} + +__kernel void gemm_fp32_q4_0_transb( + __global const float *A, + __global const uchar *B_q, + __global float *C, + const int M, const int K, const int N, + const int H, const int K_b) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + __global const block_q4_0 *B = (__global const block_q4_0 *)B_q; + __local float a_tile[TILE_SIZE][TILE_SIZE]; + __local float b_dequant_tile[TILE_SIZE][TILE_SIZE]; + float acc = 0.0f; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + + const int a_k_idx = k_start + local_col; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * M * H * K + (long)s * H * K + (long)h * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0f; + } + + const int n_for_load = get_group_id(0) * TILE_SIZE + local_row; + const int k_for_load = k_start + local_col; + + if (n_for_load < N && k_for_load < K) { + const int k_block_idx = k_for_load / QK4_0; + const int k_in_block = k_for_load % QK4_0; + + const long b_block_mem_idx = (long)b * N * H * (K / QK4_0) + (long)n_for_load * H * (K / QK4_0) + (long)h * (K / QK4_0) + k_block_idx; + const __global block_q4_0 *b_block_ptr = &B[b_block_mem_idx]; + + const float d_b = vload_half(0, (__global half *)(&(b_block_ptr->d))); + + const int qs_idx = k_in_block % (QK4_0 / 2); + const uchar q_packed = b_block_ptr->qs[qs_idx]; + + char q_nibble; + if (k_in_block >= (QK4_0 / 2)) { + q_nibble = (q_packed >> 4); + } else { + q_nibble = (q_packed & 0x0F); + } + b_dequant_tile[local_row][local_col] = (float)(q_nibble - 8) * d_b; + } else { + b_dequant_tile[local_row][local_col] = 0.0f; + } + barrier(CLK_LOCAL_MEM_FENCE); + + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + acc += a_tile[local_row][k_tile] * b_dequant_tile[local_col][k_tile]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + if (s < M && n < N) { + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = acc; + } +} + +/**************************************************************************************************/ +/* 新增算子: gemm_fp32_fp16_transb */ +/* 功能: 高性能混合精度矩阵乘法 C(fp32) = A(fp32) * B(fp16)^T */ +/* 架构: 采用与gemm_fp32_transb相同的Tiling架构,确保高性能和边界安全。 */ +/**************************************************************************************************/ +__kernel void gemm_fp32_fp16_transb( + __global const float *A, + __global const half *B, + __global float *C, + const int M, const int K, const int N, + const int H) { + // --- 1. 索引计算 --- + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + // --- 2. 初始化和局部内存 --- + __local float a_tile[TILE_SIZE][TILE_SIZE]; + __local float b_tile[TILE_SIZE][TILE_SIZE]; // B Tile也使用float以保持精度 + + float acc = 0.0f; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + + // --- 3. 沿 K 维度进行分块计算 --- + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + + // --- 3a. 协作加载 A (FP32) tile --- + const int a_k_idx = k_start + local_col; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * M * H * K + (long)s * H * K + (long)h * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0f; + } + + // --- 3b. 协作加载 B (FP16) tile 并立即转换为 FP32 --- + const int b_n_idx = get_group_id(0) * TILE_SIZE + local_row; + const int b_k_idx = k_start + local_col; + if (b_n_idx < N && b_k_idx < K) { + // 从全局内存读取half,转换为float,存入局部内存 + b_tile[local_row][local_col] = (float)B[(long)b * N * H * K + (long)b_n_idx * H * K + (long)h * K + b_k_idx]; + } else { + b_tile[local_row][local_col] = 0.0f; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + // --- 3c. 在局部内存中计算点积 (全部为FP32) --- + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + acc += a_tile[local_row][k_tile] * b_tile[local_col][k_tile]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + } + + // --- 4. 将结果写回全局内存 --- + if (s < M && n < N) { + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = acc; + } +} + +// ================================================================== +// 3. FP16 GEMM 内核 +// ================================================================== + +/** + * @brief 高性能 FP16 GEMM,计算 C = A * B^T + */ +#if !defined(SUPPORTS_FP16) +// ---------- [FP16_transb 回退版] ---------- +__kernel void gemm_fp16_transb( + __global const half *A, + __global const half *B, + __global half *C, + const int M, const int K, const int N, + const int H) { + // 1. 索引计算:每个工作项负责计算输出 C 的一个元素 + const int s = get_global_id(1); // C 的行索引 (M 维度) + const int n = get_global_id(0); // C 的列索引 (N 维度) + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + // 边界检查 + if (s >= M || n >= N) return; + + // 2. 计算:C[s,n] = sum_k(A[s,k] * B[n,k]) + // 使用 float 累加器以保证精度,与原始回退版保持一致 + float acc = 0.0f; + for (int k = 0; k < K; ++k) { + // A[s,k] 的索引,布局 (B, M, H, K) + long a_idx = (long)b * M * H * K + (long)s * H * K + (long)h * K + k; + // B[n,k] 的索引,布局 (B, N, H, K) + long b_idx = (long)b * N * H * K + (long)n * H * K + (long)h * K + k; + + acc += (float)A[a_idx] * (float)B[b_idx]; + } + + // 3. 写回结果 + long c_idx = (long)b * M * H * N + (long)s * H * N + (long)h * N + n; + C[c_idx] = (half)acc; +} +#else +// ---------- [FP16_transb 高性能版] ---------- +__kernel void gemm_fp16_transb( + __global const half *A, + __global const half *B, + __global half *C, + const int M, const int K, const int N, + const int H) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + __local half a_tile[TILE_SIZE][TILE_SIZE]; + __local half b_tile[TILE_SIZE][TILE_SIZE]; + + half acc = 0.0h; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + + const int a_k_idx = k_start + local_col; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * M * H * K + (long)s * H * K + (long)h * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0h; + } + + const int b_n_idx = get_group_id(0) * TILE_SIZE + local_row; + const int b_k_idx = k_start + local_col; + if (b_n_idx < N && b_k_idx < K) { + b_tile[local_row][local_col] = B[(long)b * N * H * K + (long)b_n_idx * H * K + (long)h * K + b_k_idx]; + } else { + b_tile[local_row][local_col] = 0.0h; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + acc += a_tile[local_row][k_tile] * b_tile[local_col][k_tile]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + } + + if (s < M && n < N) { + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = acc; + } +} +#endif // SUPPORTS_FP16 +#if !defined(SUPPORTS_FP16) +// ---------- [FP16_Q4_0 回退版 - 已修正] ---------- +__kernel void gemm_fp16_q4_0_transb( + __global const half *A, + __global const block_q4_0 *B, + __global half *C, + const int M, const int K, const int N, + const int H, const int K_b) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + if (s >= M || n >= N) { + return; + } + + float acc = 0.0f; + + const long a_row_offset = (long)b * M * H * K + (long)s * H * K + (long)h * K; + const long b_row_offset = (long)b * N * H * (K / QK4_0) + (long)n * H * (K / QK4_0) + (long)h * (K / QK4_0); + + for (int k_block_idx = 0; k_block_idx < K / QK4_0; ++k_block_idx) { + const __global block_q4_0 *b_block_ptr = &B[b_row_offset + k_block_idx]; + const float d_b = vload_half(0, (__global half *)(&(b_block_ptr->d))); + + const __global half *a_ptr = A + a_row_offset + k_block_idx * QK4_0; + + for (int j = 0; j < QK4_0 / 2; ++j) { + const uchar q_packed = b_block_ptr->qs[j]; + + const float b_val_0 = (float)((q_packed & 0x0F) - 8) * d_b; + const float b_val_1 = (float)((q_packed >> 4) - 8) * d_b; + + acc += (float)a_ptr[j] * b_val_0; + acc += (float)a_ptr[j + QK4_0 / 2] * b_val_1; + } + } + + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = (half)acc; +} +#else +// ---------- [FP16_Q4_0 高性能版 - 已修正] ---------- +__kernel void gemm_fp16_q4_0_transb( + __global const half *A, + __global const block_q4_0 *B, + __global half *C, + const int M, const int K, const int N, + const int H, const int K_b) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + __local half a_tile[TILE_SIZE][TILE_SIZE]; + __local float b_dequant_tile[TILE_SIZE][TILE_SIZE]; + float acc = 0.0f; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + + const int a_k_idx = k_start + local_col; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * M * H * K + (long)s * H * K + (long)h * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0h; + } + + const int n_for_load = get_group_id(0) * TILE_SIZE + local_row; + const int k_for_load = k_start + local_col; + + if (n_for_load < N && k_for_load < K) { + const int k_block_idx = k_for_load / QK4_0; + const int k_in_block = k_for_load % QK4_0; + + const long b_block_mem_idx = (long)b * N * H * (K / QK4_0) + (long)n_for_load * H * (K / QK4_0) + (long)h * (K / QK4_0) + k_block_idx; + const __global block_q4_0 *b_block_ptr = &B[b_block_mem_idx]; + + const float d_b = vload_half(0, (__global half *)(&(b_block_ptr->d))); + + const int qs_idx = k_in_block % (QK4_0 / 2); + const uchar q_packed = b_block_ptr->qs[qs_idx]; + + char q_nibble; + if (k_in_block >= (QK4_0 / 2)) { + q_nibble = (q_packed >> 4); + } else { + q_nibble = (q_packed & 0x0F); + } + b_dequant_tile[local_row][local_col] = (float)(q_nibble - 8) * d_b; + } else { + b_dequant_tile[local_row][local_col] = 0.0f; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + acc += (float)a_tile[local_row][k_tile] * b_dequant_tile[local_col][k_tile]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + if (s < M && n < N) { + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = (half)acc; + } +} + +#endif // SUPPORTS_FP16 \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/matmul_transb_bias.cl b/mllm/backends/opencl/kernel/matmul_transb_bias.cl new file mode 100644 index 000000000..beb0a3ad1 --- /dev/null +++ b/mllm/backends/opencl/kernel/matmul_transb_bias.cl @@ -0,0 +1,1972 @@ +// kernel/matmul_transb_bias.cl + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#define TILE_SIZE 16 +#define QK4_0 32 +#define QK8_0 32 + +typedef struct { + half d; + uchar qs[QK4_0 / 2]; +} block_q4_0; + +// ================================================================== +// 1. FP32 Fused GEMM + Bias Kernel +// ================================================================== +__kernel void gemm_fp32_transb_bias( + __global const float *A, + __global const float *B, + __global const float *bias, + __global float *C, + const int M, const int K, const int N, + const int H, const int K_b, + const int has_bias) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + __local float a_tile[TILE_SIZE][TILE_SIZE]; + __local float b_tile[TILE_SIZE][TILE_SIZE]; + + float acc = 0.0f; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + + const int a_k_idx = k_start + local_col; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * M * H * K + (long)s * H * K + (long)h * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0f; + } + + const int b_n_idx = get_group_id(0) * TILE_SIZE + local_row; + const int b_k_idx = k_start + local_col; + if (b_n_idx < N && b_k_idx < K) { + b_tile[local_row][local_col] = B[(long)b * N * H * K + (long)b_n_idx * H * K + (long)h * K + b_k_idx]; + } else { + b_tile[local_row][local_col] = 0.0f; + } + barrier(CLK_LOCAL_MEM_FENCE); + + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + acc += a_tile[local_row][k_tile] * b_tile[local_col][k_tile]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + if (s < M && n < N) { + if (has_bias != 0) { + acc += bias[n]; + } + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = acc; + } +} + +__kernel void gemm_fp32_fp16_transb_bias( + __global const float *A, + __global const half *B, + __global const float *bias, + __global float *C, + const int M, const int K, const int N, + const int H, + const int has_bias) { + // --- 1. 索引计算 --- + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + // --- 2. 初始化和局部内存 --- + __local float a_tile[TILE_SIZE][TILE_SIZE]; + __local float b_tile[TILE_SIZE][TILE_SIZE]; + + float acc = 0.0f; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + + // --- 3. 沿 K 维度进行分块计算 --- + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + + // --- 3a. 协作加载 A (FP32) tile --- + const int a_k_idx = k_start + local_col; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * M * H * K + (long)s * H * K + (long)h * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0f; + } + + // --- 3b. 协作加载 B (FP16) tile 并立即转换为 FP32 --- + const int b_n_idx = get_group_id(0) * TILE_SIZE + local_row; + const int b_k_idx = k_start + local_col; + if (b_n_idx < N && b_k_idx < K) { + b_tile[local_row][local_col] = (float)B[(long)b * N * H * K + (long)b_n_idx * H * K + (long)h * K + b_k_idx]; + } else { + b_tile[local_row][local_col] = 0.0f; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + // --- 3c. 在局部内存中计算点积 (全部为FP32) --- + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + acc += a_tile[local_row][k_tile] * b_tile[local_col][k_tile]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + } + + // --- 4. 添加偏置(可选)并将结果写回全局内存 --- + if (s < M && n < N) { + if (has_bias != 0) { + acc += bias[n]; + } + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = acc; + } +} + +// ================================================================== +// 2. FP16 Fused GEMM + Bias Kernel +// ================================================================== +#if defined(SUPPORTS_FP16) +// ---------- [FP16_transb_bias 高性能版] ---------- +__kernel void gemm_fp16_transb_bias( + __global const half *A, + __global const half *B, + __global const float *bias, + __global half *C, + const int M, const int K, const int N, + const int H, const int K_b, + const int has_bias) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + const int local_row = get_local_id(1); + const int local_col = get_local_id(0); + + __local half a_tile[TILE_SIZE][TILE_SIZE]; + __local half b_tile[TILE_SIZE][TILE_SIZE]; + + half acc = 0.0h; + const int num_tiles = (K + TILE_SIZE - 1) / TILE_SIZE; + + for (int t = 0; t < num_tiles; ++t) { + const int k_start = t * TILE_SIZE; + const int a_k_idx = k_start + local_col; + if (s < M && a_k_idx < K) { + a_tile[local_row][local_col] = A[(long)b * M * H * K + (long)s * H * K + (long)h * K + a_k_idx]; + } else { + a_tile[local_row][local_col] = 0.0h; + } + + const int b_n_idx = get_group_id(0) * TILE_SIZE + local_row; + const int b_k_idx = k_start + local_col; + if (b_n_idx < N && b_k_idx < K) { + b_tile[local_row][local_col] = B[(long)b * N * H * K + (long)b_n_idx * H * K + (long)h * K + b_k_idx]; + } else { + b_tile[local_row][local_col] = 0.0h; + } + barrier(CLK_LOCAL_MEM_FENCE); + + for (int k_tile = 0; k_tile < TILE_SIZE; ++k_tile) { + acc += a_tile[local_row][k_tile] * b_tile[local_col][k_tile]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + if (s < M && n < N) { + if (has_bias != 0) { + acc += bias[n]; + } + C[(long)b * M * H * N + (long)s * H * N + (long)h * N + n] = acc; + } +} +#else +// ---------- [FP16_transb_bias 回退版] ---------- +__kernel void gemm_fp16_transb_bias( + __global const half *A, + __global const half *B, + __global const float *bias, + __global half *C, + const int M, const int K, const int N, + const int H, const int K_b, + const int has_bias) { + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + if (s >= M || n >= N) return; + + float acc = 0.0f; + for (int k = 0; k < K; ++k) { + long a_idx = (long)b * M * H * K + (long)s * H * K + (long)h * K + k; + long b_idx = (long)b * N * H * K + (long)n * H * K + (long)h * K + k; + acc += (float)A[a_idx] * (float)B[b_idx]; + } + + if (has_bias != 0) { + acc += bias[n]; + } + long c_idx = (long)b * M * H * N + (long)s * H * N + (long)h * N + n; + C[c_idx] = (half)acc; +} +#endif // SUPPORTS_FP16 + +// ================================================================== +// 3. FP32 * Q4_0 Fused GEMV + Bias Kernels (for M = 1, Decoding) +// ================================================================== +__kernel void gemv_fp32_q4_0_transb_bias( + __global const float *A, + __global const block_q4_0 *B, + __global const float *bias, + __global float *C, + const int K, const int N, + const int H, + const int has_bias) { + // [修正] 从 group ID 而非 global ID 获取索引,确保工作组内索引统一 + const int n = get_group_id(0); + const int bh_idx = get_group_id(1); + const int b = bh_idx / H; + const int h = bh_idx % H; + + // 边界检查 + if (n >= N) return; + + const int local_id = get_local_id(0); + const int wg_size = get_local_size(0); + __local float partial_sums[256]; + + float private_acc = 0.0f; + const long a_base_idx = (long)b * H * K + (long)h * K; + const long b_row_offset_blocks = (long)b * N * H * (K / QK4_0) + (long)n * H * (K / QK4_0) + (long)h * (K / QK4_0); + + // 并行计算,每个线程计算一部分 + for (int k = local_id; k < K; k += wg_size) { + const int k_block_idx = k / QK4_0; + const int k_in_block = k % QK4_0; + + const __global block_q4_0 *b_block_ptr = &B[b_row_offset_blocks + k_block_idx]; +#if defined(SUPPORTS_FP16) + const float d_b = vload_half(0, (__global half *)(&(b_block_ptr->d))); +#else + const float d_b = (float)(b_block_ptr->d); // TODO Change here [gemini] +#endif + // 反量化逻辑 (这部分与 gemm 内核一致,是正确的) + const uchar q_packed = b_block_ptr->qs[k_in_block % 16]; + char q_nibble = (k_in_block < 16) ? (q_packed & 0x0F) : (q_packed >> 4); + const float b_val = (float)(q_nibble - 8) * d_b; + + private_acc += A[a_base_idx + k] * b_val; + } + + // 将各自的部分和存入局部内存 + partial_sums[local_id] = private_acc; + barrier(CLK_LOCAL_MEM_FENCE); + + // 在工作组内进行规约求和 + for (int offset = wg_size / 2; offset > 0; offset >>= 1) { + if (local_id < offset) { + partial_sums[local_id] += partial_sums[local_id + offset]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + // 由工作组的第一个线程写入最终结果 + if (local_id == 0) { + float final_val = partial_sums[0]; + if (has_bias != 0) { + final_val += bias[n]; + } + const long c_idx = (long)b * H * N + (long)h * N + n; + C[c_idx] = final_val; + } +} +// ================================================================== +// 4. FP32 * Q4_0 Fused GEMM + Bias Kernels (for M > 1, Training) +// ================================================================== + +__kernel void gemm_fp32_q4_0_transb_bias( + __global const float *A, + __global const block_q4_0 *B, + __global const float *bias, + __global float *C, + const int M, const int K, const int N, + const int H, const int K_b, + const int has_bias) { + // --- 1. 索引计算 (与原版保持一致) --- + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + // --- 2. 边界检查 --- + if (s >= M || n >= N) { + return; + } + + // --- 3. 基于寄存器的优化计算 --- + + float acc = 0.0f; + + const long a_row_offset = (long)b * M * H * K + (long)s * H * K + (long)h * K; + const long b_row_offset_blocks = (long)b * N * H * (K / QK4_0) + (long)n * H * (K / QK4_0) + (long)h * (K / QK4_0); + + // 主循环:沿K维度进行,每次处理一个大小为 QK4_0 (32) 的块 + for (int k_block_idx = 0; k_block_idx < K / QK4_0; ++k_block_idx) { + const __global block_q4_0 *b_block_ptr = &B[b_row_offset_blocks + k_block_idx]; + // const float d_b = vload_half(0, (__global half *)(&(b_block_ptr->d))); + +#if defined(SUPPORTS_FP16) + const float d_b = vload_half(0, (__global half *)(&(b_block_ptr->d))); +#else + const float d_b = (float)(b_block_ptr->d); // TODO Change here [gemini] +#endif + const __global float *a_ptr = A + a_row_offset + k_block_idx * QK4_0; + + // ** 向量化核心 ** + // 循环展开,每次处理块内16个 'uchar' 中的4个,对应8个浮点数 + for (int j = 0; j < QK4_0 / 2; j += 4) { // QK4_0/2 = 16 + + // ** 修正点:安全地加载和解包B矩阵的值 ** + // 之前使用*((__global uint*))是不安全的,现改为逐字节加载 + const uchar q_packed0 = b_block_ptr->qs[j + 0]; + const uchar q_packed1 = b_block_ptr->qs[j + 1]; + const uchar q_packed2 = b_block_ptr->qs[j + 2]; + const uchar q_packed3 = b_block_ptr->qs[j + 3]; + + // A矩阵的向量化加载是安全的 + const float4 a_vals_lo = vload4(0, a_ptr + j); + const float4 a_vals_hi = vload4(0, a_ptr + j + (QK4_0 / 2)); // (QK4_0 / 2) = 16 + + // 将4个uchar解包成两个float4向量 + float4 b_dequant_lo; + b_dequant_lo.x = (float)((q_packed0 & 0x0F) - 8) * d_b; // qs[j] 的低4位 + b_dequant_lo.y = (float)((q_packed1 & 0x0F) - 8) * d_b; // qs[j+1] 的低4位 + b_dequant_lo.z = (float)((q_packed2 & 0x0F) - 8) * d_b; // qs[j+2] 的低4位 + b_dequant_lo.w = (float)((q_packed3 & 0x0F) - 8) * d_b; // qs[j+3] 的低4位 + + float4 b_dequant_hi; + b_dequant_hi.x = (float)((q_packed0 >> 4) - 8) * d_b; // qs[j] 的高4位 + b_dequant_hi.y = (float)((q_packed1 >> 4) - 8) * d_b; // qs[j+1] 的高4位 + b_dequant_hi.z = (float)((q_packed2 >> 4) - 8) * d_b; // qs[j+2] 的高4位 + b_dequant_hi.w = (float)((q_packed3 >> 4) - 8) * d_b; // qs[j+3] 的高4位 + + // ** 核心计算 ** + acc += dot(a_vals_lo, b_dequant_lo); + acc += dot(a_vals_hi, b_dequant_hi); + } + } + + if (has_bias != 0) { + acc += bias[n]; + } + + const long c_idx = (long)b * M * H * N + (long)s * H * N + (long)h * N + n; + C[c_idx] = acc; +} + +// ================================================================== +// 5. FP16 * Q4_0 Fused GEMV + Bias Kernel (for M=1, Decoding) +// ================================================================== +#if defined(SUPPORTS_FP16) + +// ---------- [高性能版 - 向量化 + 并行规约] ---------- + +__kernel void gemv_fp16_q4_0_transb_bias( + __global const half *A, + __global const block_q4_0 *B, + __global const float *bias, + __global half *C, + const int K, + const int N, + const int H, + const int has_bias) { + const int n = get_group_id(0); + const int bh_idx = get_group_id(1); + const int b = bh_idx / H; + const int h = bh_idx % H; + if (n >= N) return; + const int local_id = get_local_id(0); + const int wg_size = get_local_size(0); + __local float partial_sums[256]; + float private_acc = 0.0f; + const long a_base_idx = (long)b * H * K + (long)h * K; + const long b_row_offset_blocks = (long)b * N * H * (K / QK4_0) + (long)n * H * (K / QK4_0) + (long)h * (K / QK4_0); + const int num_k_blocks = K / QK4_0; + + for (int k_block_idx = local_id; k_block_idx < num_k_blocks; k_block_idx += wg_size) { + const __global block_q4_0 *b_block_ptr = &B[b_row_offset_blocks + k_block_idx]; + const float d_b = vload_half(0, (__global half *)(&(b_block_ptr->d))); + const __global half *a_ptr = A + a_base_idx + k_block_idx * QK4_0; + +#pragma unroll + for (int j = 0; j < QK4_0 / 2; j += 4) { // j = 0, 4, 8, 12 + const uchar q_packed0 = b_block_ptr->qs[j + 0]; + const uchar q_packed1 = b_block_ptr->qs[j + 1]; + const uchar q_packed2 = b_block_ptr->qs[j + 2]; + const uchar q_packed3 = b_block_ptr->qs[j + 3]; + const int vec_offset_lo = j / 4; + const int vec_offset_hi = j / 4 + 4; + const float4 a_vals_lo = convert_float4(vload4(vec_offset_lo, a_ptr)); + const float4 a_vals_hi = convert_float4(vload4(vec_offset_hi, a_ptr)); + float4 b_dequant_lo, b_dequant_hi; + b_dequant_lo.x = (float)((q_packed0 & 0x0F) - 8) * d_b; + b_dequant_lo.y = (float)((q_packed1 & 0x0F) - 8) * d_b; + b_dequant_lo.z = (float)((q_packed2 & 0x0F) - 8) * d_b; + b_dequant_lo.w = (float)((q_packed3 & 0x0F) - 8) * d_b; + b_dequant_hi.x = (float)((q_packed0 >> 4) - 8) * d_b; + b_dequant_hi.y = (float)((q_packed1 >> 4) - 8) * d_b; + b_dequant_hi.z = (float)((q_packed2 >> 4) - 8) * d_b; + b_dequant_hi.w = (float)((q_packed3 >> 4) - 8) * d_b; + private_acc += dot(a_vals_lo, b_dequant_lo); + private_acc += dot(a_vals_hi, b_dequant_hi); + } + } + partial_sums[local_id] = private_acc; + barrier(CLK_LOCAL_MEM_FENCE); + for (int offset = wg_size / 2; offset > 0; offset >>= 1) { + if (local_id < offset) { + partial_sums[local_id] += partial_sums[local_id + offset]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + if (local_id == 0) { + float final_val = partial_sums[0]; + if (has_bias != 0) { + final_val += bias[n]; + } + const long c_idx = (long)b * H * N + (long)h * N + n; + vstore_half_rte(final_val, 0, &C[c_idx]); + } +} + +// #pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// 定义一个 half16 向量的横向求和辅助函数 +inline float hsum_half16(half16 v) { + half8 r1 = v.lo + v.hi; + half4 r2 = r1.lo + r1.hi; + half2 r3 = r2.lo + r2.hi; + return (float)(r3.x + r3.y); +} + +__kernel void gemv_fp16_q4_0_transb_bias_half16( + __global const half *A, + __global const block_q4_0 *B, + __global const float *bias, + __global half *C, + const int K, + const int N, + const int H, + const int has_bias) { + const int n = get_group_id(0); + const int bh_idx = get_group_id(1); + const int b = bh_idx / H; + const int h = bh_idx % H; + if (n >= N) return; + + const int local_id = get_local_id(0); + const int wg_size = get_local_size(0); + + // 使用 float 累加器保证精度(推荐) + float private_acc = 0.0f; + // 若要追求极致速度(有精度风险),可替换为下一行 + // half private_acc = 0.0h; + + // 使用 float 局部内存保证精度(推荐) + __local float partial_sums[256]; + // 若要追求极致速度,可替换为下一行 + // __local half partial_sums[256]; + + const long a_base_idx = (long)b * H * K + (long)h * K; + const long b_row_offset_blocks = (long)b * N * H * (K / QK4_0) + (long)n * H * (K / QK4_0) + (long)h * (K / QK4_0); + + // K维度被切分为大小为 QK4_0 (32) 的块 + const int num_k_blocks = K / QK4_0; + + for (int k_block_idx = local_id; k_block_idx < num_k_blocks; k_block_idx += wg_size) { + const __global block_q4_0 *b_block_ptr = &B[b_row_offset_blocks + k_block_idx]; + const half d_b = b_block_ptr->d; + + // QK4_0 = 32, a_ptr 指向一个32个half的块 + const __global half *a_ptr = A + a_base_idx + k_block_idx * QK4_0; + + // ** 优化核心:一次处理16个元素 ** + // 处理块的前半部分 (0-15) + const half16 a_vals_lo = vload16(0, a_ptr); + const uchar8 q_packed_lo = vload8(0, b_block_ptr->qs); // 加载8个uchar (包含16个4-bit值) + + // 高效解量化 + char16 b_s_lo; + b_s_lo.s0 = (q_packed_lo.s0 & 0x0F) - 8; + b_s_lo.s1 = (q_packed_lo.s0 >> 4) - 8; + b_s_lo.s2 = (q_packed_lo.s1 & 0x0F) - 8; + b_s_lo.s3 = (q_packed_lo.s1 >> 4) - 8; + b_s_lo.s4 = (q_packed_lo.s2 & 0x0F) - 8; + b_s_lo.s5 = (q_packed_lo.s2 >> 4) - 8; + b_s_lo.s6 = (q_packed_lo.s3 & 0x0F) - 8; + b_s_lo.s7 = (q_packed_lo.s3 >> 4) - 8; + b_s_lo.s8 = (q_packed_lo.s4 & 0x0F) - 8; + b_s_lo.s9 = (q_packed_lo.s4 >> 4) - 8; + b_s_lo.sa = (q_packed_lo.s5 & 0x0F) - 8; + b_s_lo.sb = (q_packed_lo.s5 >> 4) - 8; + b_s_lo.sc = (q_packed_lo.s6 & 0x0F) - 8; + b_s_lo.sd = (q_packed_lo.s6 >> 4) - 8; + b_s_lo.se = (q_packed_lo.s7 & 0x0F) - 8; + b_s_lo.sf = (q_packed_lo.s7 >> 4) - 8; + + const half16 b_vals_dequant_lo = convert_half16(b_s_lo) * d_b; + private_acc += hsum_half16(a_vals_lo * b_vals_dequant_lo); + + // 处理块的后半部分 (16-31) + const half16 a_vals_hi = vload16(0, a_ptr + 16); + const uchar8 q_packed_hi = vload8(0, b_block_ptr->qs + 8); // 加载后8个uchar + + char16 b_s_hi; + b_s_hi.s0 = (q_packed_hi.s0 & 0x0F) - 8; + b_s_hi.s1 = (q_packed_hi.s0 >> 4) - 8; + b_s_hi.s2 = (q_packed_hi.s1 & 0x0F) - 8; + b_s_hi.s3 = (q_packed_hi.s1 >> 4) - 8; + b_s_hi.s4 = (q_packed_hi.s2 & 0x0F) - 8; + b_s_hi.s5 = (q_packed_hi.s2 >> 4) - 8; + b_s_hi.s6 = (q_packed_hi.s3 & 0x0F) - 8; + b_s_hi.s7 = (q_packed_hi.s3 >> 4) - 8; + b_s_hi.s8 = (q_packed_hi.s4 & 0x0F) - 8; + b_s_hi.s9 = (q_packed_hi.s4 >> 4) - 8; + b_s_hi.sa = (q_packed_hi.s5 & 0x0F) - 8; + b_s_hi.sb = (q_packed_hi.s5 >> 4) - 8; + b_s_hi.sc = (q_packed_hi.s6 & 0x0F) - 8; + b_s_hi.sd = (q_packed_hi.s6 >> 4) - 8; + b_s_hi.se = (q_packed_hi.s7 & 0x0F) - 8; + b_s_hi.sf = (q_packed_hi.s7 >> 4) - 8; + + const half16 b_vals_dequant_hi = convert_half16(b_s_hi) * d_b; + private_acc += hsum_half16(a_vals_hi * b_vals_dequant_hi); + } + + partial_sums[local_id] = private_acc; + barrier(CLK_LOCAL_MEM_FENCE); + + // 并行规约 + for (int offset = wg_size / 2; offset > 0; offset >>= 1) { + if (local_id < offset) { + partial_sums[local_id] += partial_sums[local_id + offset]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + // 第一个线程写入最终结果 + if (local_id == 0) { + float final_val = partial_sums[0]; + if (has_bias != 0) { + final_val += bias[n]; + } + const long c_idx = (long)b * H * N + (long)h * N + n; + vstore_half_rte(final_val, 0, &C[c_idx]); + } +} + +#else +// ---------- [兼容版 - 纯标量 + 并行规约] ---------- +__kernel void gemv_fp16_q4_0_transb_bias( + __global const half *A, + __global const block_q4_0 *B, + __global const float *bias, + __global half *C, + const int K, + const int N, + const int H, + const int has_bias) { + // --- 1. 索引计算 (与原版一致) --- + const int n = get_group_id(0); + const int bh_idx = get_group_id(1); + const int b = bh_idx / H; + const int h = bh_idx % H; + if (n >= N) return; + const int local_id = get_local_id(0); + const int wg_size = get_local_size(0); + __local float partial_sums[256]; + float private_acc = 0.0f; + const long a_base_idx = (long)b * H * K + (long)h * K; + const long b_row_offset_blocks = (long)b * N * H * (K / QK4_0) + (long)n * H * (K / QK4_0) + (long)h * (K / QK4_0); + const int num_k_blocks = K / QK4_0; + for (int k_block_idx = local_id; k_block_idx < num_k_blocks; k_block_idx += wg_size) { + const __global block_q4_0 *b_block_ptr = &B[b_row_offset_blocks + k_block_idx]; + const float d_b = vload_half(0, (__global half *)(&(b_block_ptr->d))); + const __global half *a_ptr = A + a_base_idx + k_block_idx * QK4_0; +#pragma unroll + for (int j = 0; j < QK4_0 / 2; j += 4) { + const uchar q_packed0 = b_block_ptr->qs[j + 0]; + const uchar q_packed1 = b_block_ptr->qs[j + 1]; + const uchar q_packed2 = b_block_ptr->qs[j + 2]; + const uchar q_packed3 = b_block_ptr->qs[j + 3]; + // 将vload4替换为手动的标量加载,以避免内存对齐问题。 + float4 a_vals_lo, a_vals_hi; + a_vals_lo.x = (float)a_ptr[j + 0]; + a_vals_lo.y = (float)a_ptr[j + 1]; + a_vals_lo.z = (float)a_ptr[j + 2]; + a_vals_lo.w = (float)a_ptr[j + 3]; + a_vals_hi.x = (float)a_ptr[j + 16 + 0]; + a_vals_hi.y = (float)a_ptr[j + 16 + 1]; + a_vals_hi.z = (float)a_ptr[j + 16 + 2]; + a_vals_hi.w = (float)a_ptr[j + 16 + 3]; + float4 b_dequant_lo, b_dequant_hi; + b_dequant_lo.x = (float)((q_packed0 & 0x0F) - 8) * d_b; + b_dequant_lo.y = (float)((q_packed1 & 0x0F) - 8) * d_b; + b_dequant_lo.z = (float)((q_packed2 & 0x0F) - 8) * d_b; + b_dequant_lo.w = (float)((q_packed3 & 0x0F) - 8) * d_b; + b_dequant_hi.x = (float)((q_packed0 >> 4) - 8) * d_b; + b_dequant_hi.y = (float)((q_packed1 >> 4) - 8) * d_b; + b_dequant_hi.z = (float)((q_packed2 >> 4) - 8) * d_b; + b_dequant_hi.w = (float)((q_packed3 >> 4) - 8) * d_b; + private_acc += dot(a_vals_lo, b_dequant_lo); + private_acc += dot(a_vals_hi, b_dequant_hi); + } + } + partial_sums[local_id] = private_acc; + barrier(CLK_LOCAL_MEM_FENCE); + for (int offset = wg_size / 2; offset > 0; offset >>= 1) { + if (local_id < offset) { + partial_sums[local_id] += partial_sums[local_id + offset]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + if (local_id == 0) { + float final_val = partial_sums[0]; + if (has_bias != 0) { + final_val += bias[n]; + } + const long c_idx = (long)b * H * N + (long)h * N + n; + vstore_half_rte(final_val, 0, &C[c_idx]); + } +} + +#endif // SUPPORTS_FP16 + +// ================================================================== +// 6. FP16 * Q4_0 Fused GEMM + Bias Kernel (for M>1, Prefill) +// ================================================================== +#if defined(SUPPORTS_FP16) +// ---------- [高性能版 - Tiling + 寄存器] ---------- + +#define TILE_M 64 +#define TILE_N 64 +#define TILE_K 16 +#define WPT_M 8 +#define WPT_N 8 +#define THREADS_X (TILE_N / WPT_N) // 8 +#define THREADS_Y (TILE_M / WPT_M) // 8 + +__kernel void gemm_fp16_q4_0_transb_bias( + __global const half *A, + __global const block_q4_0 *B, + __global const float *bias, + __global half *C, + const int M, const int K, const int N, + const int H, const int K_b, + const int has_bias) { + const int group_m_idx = get_group_id(1); + const int group_n_idx = get_group_id(0); + const int local_m_idx = get_local_id(1); + const int local_n_idx = get_local_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + __local half a_tile[TILE_M][TILE_K + 1]; + __local half b_tile[TILE_K][TILE_N + 1]; + float acc[WPT_M][WPT_N]; +#pragma unroll + for (int i = 0; i < WPT_M; ++i) { +#pragma unroll + for (int j = 0; j < WPT_N; ++j) { + acc[i][j] = 0.0f; + } + } + + const long base_a_offset = (long)b * M * H * K + (long)h * K; + const long base_b_offset_blocks = (long)b * N * H * (K / QK4_0) + (long)h * (K / QK4_0); + const int num_k_tiles = (K + TILE_K - 1) / TILE_K; + for (int t = 0; t < num_k_tiles; ++t) { + const int k_start = t * TILE_K; +#pragma unroll + for (int i = 0; i < WPT_M; ++i) { + const int m_local = local_m_idx * WPT_M + i; + const int k_local = local_n_idx; + const int m_global = group_m_idx * TILE_M + m_local; + if (m_global < M) { + for (int k_load_step = 0; k_load_step < TILE_K / THREADS_X; ++k_load_step) { + int k_global = k_start + k_local + k_load_step * THREADS_X; + if (k_global < K) { + a_tile[m_local][k_local + k_load_step * THREADS_X] = A[base_a_offset + m_global * K + k_global]; + } else { + a_tile[m_local][k_local + k_load_step * THREADS_X] = 0.0h; + } + } + } else { + for (int k_load_step = 0; k_load_step < TILE_K / THREADS_X; ++k_load_step) { + a_tile[m_local][k_local + k_load_step * THREADS_X] = 0.0h; + } + } + } + +#pragma unroll + for (int i = 0; i < WPT_N; ++i) { + const int n_local = local_n_idx * WPT_N + i; + const int k_local = local_m_idx; + const int n_global = group_n_idx * TILE_N + n_local; + if (n_global < N) { + for (int k_load_step = 0; k_load_step < TILE_K / THREADS_Y; ++k_load_step) { + int k_global = k_start + k_local + k_load_step * THREADS_Y; + if (k_global < K) { + const int k_block_idx = k_global / QK4_0; + const int k_in_block = k_global % QK4_0; + const __global block_q4_0 *b_block_ptr = &B[base_b_offset_blocks + n_global * (K / QK4_0) + k_block_idx]; + + const float d_b = vload_half(0, (__global half *)(&(b_block_ptr->d))); + + // --- **修正核心** --- + // 1. 根据 k_in_block (0-31) 计算在 qs 数组 (0-15) 中的索引 + const uchar qs_sub_idx = k_in_block % 16; + // 2. 从 qs 数组中取出打包好的两个 4-bit 值 + const uchar q_packed = b_block_ptr->qs[qs_sub_idx]; + // 3. 判断 k_in_block 是在块的前半部分还是后半部分,来决定是取高4位还是低4位 + const bool is_low_nibble = (k_in_block < 16); + char q_nibble = is_low_nibble ? ((q_packed & 0x0F) - 8) : ((q_packed >> 4) - 8); + + // 4. 解量化并存入 b_tile + b_tile[k_local + k_load_step * THREADS_Y][n_local] = (half)((float)q_nibble * d_b); + + } else { + b_tile[k_local + k_load_step * THREADS_Y][n_local] = 0.0h; + } + } + } else { + for (int k_load_step = 0; k_load_step < TILE_K / THREADS_Y; ++k_load_step) { + b_tile[k_local + k_load_step * THREADS_Y][n_local] = 0.0h; + } + } + } + + barrier(CLK_LOCAL_MEM_FENCE); +#pragma unroll + for (int k_tile = 0; k_tile < TILE_K; ++k_tile) { +#pragma unroll + for (int m = 0; m < WPT_M; ++m) { + half a_val = a_tile[local_m_idx * WPT_M + m][k_tile]; +#pragma unroll + for (int n = 0; n < WPT_N; ++n) { + half b_val = b_tile[k_tile][local_n_idx * WPT_N + n]; + acc[m][n] = mad((float)a_val, (float)b_val, acc[m][n]); + } + } + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + long c_offset = (long)b * M * H * N + (long)h * N; +#pragma unroll + for (int m = 0; m < WPT_M; ++m) { + int m_global = group_m_idx * TILE_M + local_m_idx * WPT_M + m; + if (m_global < M) { +#pragma unroll + for (int n = 0; n < WPT_N; ++n) { + int n_global = group_n_idx * TILE_N + local_n_idx * WPT_N + n; + if (n_global < N) { + float result = acc[m][n]; + if (has_bias) { + result += bias[n_global]; + } + C[c_offset + m_global * N + n_global] = (half)result; + } + } + } + } +} + +#else +// ---------- [兼容版] ---------- +__kernel void gemm_fp16_q4_0_transb_bias( + __global const half *A, + __global const block_q4_0 *B, + __global const float *bias, + __global half *C, + const int M, const int K, const int N, + const int H, const int K_b, + const int has_bias) { + // --- 1. 索引计算 --- + const int s = get_global_id(1); + const int n = get_global_id(0); + const int bh_idx = get_global_id(2); + const int b = bh_idx / H; + const int h = bh_idx % H; + + // --- 2. 边界检查 --- + if (s >= M || n >= N) { + return; + } + + // --- 3. 核心计算 --- + float acc = 0.0f; + const long a_row_offset = (long)b * M * H * K + (long)s * H * K + (long)h * K; + const long b_row_offset_blocks = (long)b * N * H * (K / QK4_0) + (long)n * H * (K / QK4_0) + (long)h * (K / QK4_0); + + for (int k_block_idx = 0; k_block_idx < K / QK4_0; ++k_block_idx) { + const __global block_q4_0 *b_block_ptr = &B[b_row_offset_blocks + k_block_idx]; + // 使用 vload_half 确保从 half* 安全加载到 float + const float d_b = vload_half(0, (__global half *)(&(b_block_ptr->d))); + const __global half *a_ptr = A + a_row_offset + k_block_idx * QK4_0; + + for (int j = 0; j < QK4_0 / 2; ++j) { + const uchar q_packed = b_block_ptr->qs[j]; + const char q_lo = (q_packed & 0x0F) - 8; + const char q_hi = (q_packed >> 4) - 8; + + acc += vload_half(j, a_ptr) * (float)q_lo * d_b; + acc += vload_half(j + QK4_0 / 2, a_ptr) * (float)q_hi * d_b; + } + } + + // --- 4. 偏置和写回 --- + if (has_bias != 0) { + acc += bias[n]; + } + const long c_idx = (long)b * M * H * N + (long)s * H * N + (long)h * N + n; + + // 使用 vstore_half_rte 进行精确舍入并存储 + // vstore_half_rte 将 float 类型的 acc 舍入为最接近的 half 值,并将结果存入 C 的 c_idx 位置 + vstore_half_rte(acc, 0, &C[c_idx]); +} +#endif // SUPPORTS_FP16 + +// ================================================================== +// 7. FP32 * Q4_0 Fused GEMM + Bias Kernel (Image Pipe) +// [最终修正版 - 动态解量化 & 修复索引] +// ================================================================== +__kernel void gemm_fp32_q4_0_transb_bias_image_pipe( + sampler_t sampler, + __read_only image2d_t A, + __global const block_q4_0 *B, + __global const float *bias, + __write_only image2d_t C, + const int M, const int K, const int N, + const int H, const int K_b, + const int has_bias) { + // 1. 索引计算 + const int gx = get_global_id(0); + const int gy = get_global_id(1); + const int n_start = gx * 4; + if (n_start >= N) { return; } + + float4 acc = (float4)(0.0f, 0.0f, 0.0f, 0.0f); + const int K_blocks = K / 32; + const long base_offset = 0; // B=1, H=1 + const long n_stride = K_blocks; + + // 3. 沿 K 维度循环计算 + for (int k = 0; k < K; ++k) { + // ** [核心修正] ** + // a. 正确地从 Image A 中读取 FP32 值 + int pixel_x = k / 4; + int component = k % 4; + float4 a_pixel = read_imagef(A, sampler, (int2)(pixel_x, gy)); + float a_val; + if (component == 0) + a_val = a_pixel.x; + else if (component == 1) + a_val = a_pixel.y; + else if (component == 2) + a_val = a_pixel.z; + else + a_val = a_pixel.w; + + // b. 实时解量化 4 个 B 值 (这部分逻辑是正确的) + const int k_block_idx = k / 32; + const int k_in_block = k % 32; + const long b_block_mem_idx0 = base_offset + (long)(n_start + 0) * n_stride + k_block_idx; + const long b_block_mem_idx1 = base_offset + (long)(n_start + 1) * n_stride + k_block_idx; + const long b_block_mem_idx2 = base_offset + (long)(n_start + 2) * n_stride + k_block_idx; + const long b_block_mem_idx3 = base_offset + (long)(n_start + 3) * n_stride + k_block_idx; + + const float d_b0 = vload_half(0, (__global half *)(&(B[b_block_mem_idx0].d))); + const float d_b1 = vload_half(0, (__global half *)(&(B[b_block_mem_idx1].d))); + const float d_b2 = vload_half(0, (__global half *)(&(B[b_block_mem_idx2].d))); + const float d_b3 = vload_half(0, (__global half *)(&(B[b_block_mem_idx3].d))); + + const uchar q_packed0 = B[b_block_mem_idx0].qs[k_in_block % 16]; + const uchar q_packed1 = B[b_block_mem_idx1].qs[k_in_block % 16]; + const uchar q_packed2 = B[b_block_mem_idx2].qs[k_in_block % 16]; + const uchar q_packed3 = B[b_block_mem_idx3].qs[k_in_block % 16]; + + const char q_nibble0 = (k_in_block < 16) ? ((q_packed0 & 0x0F) - 8) : ((q_packed0 >> 4) - 8); + const char q_nibble1 = (k_in_block < 16) ? ((q_packed1 & 0x0F) - 8) : ((q_packed1 >> 4) - 8); + const char q_nibble2 = (k_in_block < 16) ? ((q_packed2 & 0x0F) - 8) : ((q_packed2 >> 4) - 8); + const char q_nibble3 = (k_in_block < 16) ? ((q_packed3 & 0x0F) - 8) : ((q_packed3 >> 4) - 8); + + float4 b_vals = (float4)((float)q_nibble0 * d_b0, (float)q_nibble1 * d_b1, (float)q_nibble2 * d_b2, (float)q_nibble3 * d_b3); + + acc = mad(a_val, b_vals, acc); + } + + if (has_bias != 0) { + float4 bias_vals = vload4(0, bias + n_start); + acc += bias_vals; + } + + write_imagef(C, (int2)(gx, gy), acc); +} + +// ================================================================== +// 8. FP16 * Q4_0 Fused GEMM + Bias Kernel (Image Pipe) +// ================================================================== +#if defined(SUPPORTS_FP16) +/* +__kernel void gemm_fp16_q4_0_transb_bias_image_pipe( + sampler_t sampler, + __read_only image2d_t A, + __global const block_q4_0 *B, + __global const float *bias, + __write_only image2d_t C, + const int K, const int N, + const int H, + const int has_bias) { + // 1. 并行策略: 每个工作项计算一个输出像素 (float4) + const int gx = get_global_id(0); + const int n_start = gx * 4; + if (n_start >= N) { return; } + + // 2. 寄存器累加器 + float4 acc = (float4)(0.0f); + const int K_blocks = K / 32; + const long n_stride = K_blocks; + const int K_vec_size = K / 4; + + // 3. 核心计算循环:沿K维度4倍展开, 深度优化指令级并行 + for (int k_vec = 0; k_vec < K_vec_size; ++k_vec) { + const int k = k_vec * 4; + + // a. 向量化读取A + half4 a_pixel_h = read_imageh(A, sampler, (int2)(k_vec, 0)); + float4 a_vals = convert_float4(a_pixel_h); + + // b. 为 a_vals 的每个分量并行准备权重向量,打破依赖 + // --- 解量化与 a_vals.x (k+0) 相乘的 b_vals_k0 --- + const int k0_block_idx = (k + 0) / 32; + const int k0_in_block = (k + 0) % 32; + const long b_idx0_k0 = (long)(n_start + 0) * n_stride + k0_block_idx; + const long b_idx1_k0 = (long)(n_start + 1) * n_stride + k0_block_idx; + const long b_idx2_k0 = (long)(n_start + 2) * n_stride + k0_block_idx; + const long b_idx3_k0 = (long)(n_start + 3) * n_stride + k0_block_idx; + const float d0_k0 = vload_half(0, (__global half *)(&(B[b_idx0_k0].d))); + const float d1_k0 = vload_half(0, (__global half *)(&(B[b_idx1_k0].d))); + const float d2_k0 = vload_half(0, (__global half *)(&(B[b_idx2_k0].d))); + const float d3_k0 = vload_half(0, (__global half *)(&(B[b_idx3_k0].d))); + const uchar qp0_k0 = B[b_idx0_k0].qs[k0_in_block % 16]; + const uchar qp1_k0 = B[b_idx1_k0].qs[k0_in_block % 16]; + const uchar qp2_k0 = B[b_idx2_k0].qs[k0_in_block % 16]; + const uchar qp3_k0 = B[b_idx3_k0].qs[k0_in_block % 16]; + const char qn0_k0 = (k0_in_block < 16) ? ((qp0_k0 & 0x0F) - 8) : ((qp0_k0 >> 4) - 8); + const char qn1_k0 = (k0_in_block < 16) ? ((qp1_k0 & 0x0F) - 8) : ((qp1_k0 >> 4) - 8); + const char qn2_k0 = (k0_in_block < 16) ? ((qp2_k0 & 0x0F) - 8) : ((qp2_k0 >> 4) - 8); + const char qn3_k0 = (k0_in_block < 16) ? ((qp3_k0 & 0x0F) - 8) : ((qp3_k0 >> 4) - 8); + float4 b_vals_k0 = (float4)((float)qn0_k0 * d0_k0, (float)qn1_k0 * d1_k0, (float)qn2_k0 * d2_k0, (float)qn3_k0 * d3_k0); + + // --- 解量化与 a_vals.y (k+1) 相乘的 b_vals_k1 --- + const int k1_block_idx = (k + 1) / 32; + const int k1_in_block = (k + 1) % 32; + const long b_idx0_k1 = (long)(n_start + 0) * n_stride + k1_block_idx; + const long b_idx1_k1 = (long)(n_start + 1) * n_stride + k1_block_idx; + const long b_idx2_k1 = (long)(n_start + 2) * n_stride + k1_block_idx; + const long b_idx3_k1 = (long)(n_start + 3) * n_stride + k1_block_idx; + const float d0_k1 = vload_half(0, (__global half *)(&(B[b_idx0_k1].d))); + const float d1_k1 = vload_half(0, (__global half *)(&(B[b_idx1_k1].d))); + const float d2_k1 = vload_half(0, (__global half *)(&(B[b_idx2_k1].d))); + const float d3_k1 = vload_half(0, (__global half *)(&(B[b_idx3_k1].d))); + const uchar qp0_k1 = B[b_idx0_k1].qs[k1_in_block % 16]; + const uchar qp1_k1 = B[b_idx1_k1].qs[k1_in_block % 16]; + const uchar qp2_k1 = B[b_idx2_k1].qs[k1_in_block % 16]; + const uchar qp3_k1 = B[b_idx3_k1].qs[k1_in_block % 16]; + const char qn0_k1 = (k1_in_block < 16) ? ((qp0_k1 & 0x0F) - 8) : ((qp0_k1 >> 4) - 8); + const char qn1_k1 = (k1_in_block < 16) ? ((qp1_k1 & 0x0F) - 8) : ((qp1_k1 >> 4) - 8); + const char qn2_k1 = (k1_in_block < 16) ? ((qp2_k1 & 0x0F) - 8) : ((qp2_k1 >> 4) - 8); + const char qn3_k1 = (k1_in_block < 16) ? ((qp3_k1 & 0x0F) - 8) : ((qp3_k1 >> 4) - 8); + float4 b_vals_k1 = (float4)((float)qn0_k1 * d0_k1, (float)qn1_k1 * d1_k1, (float)qn2_k1 * d2_k1, (float)qn3_k1 * d3_k1); + + // --- 解量化与 a_vals.z (k+2) 相乘的 b_vals_k2 --- + const int k2_block_idx = (k + 2) / 32; + const int k2_in_block = (k + 2) % 32; + const long b_idx0_k2 = (long)(n_start + 0) * n_stride + k2_block_idx; + const long b_idx1_k2 = (long)(n_start + 1) * n_stride + k2_block_idx; + const long b_idx2_k2 = (long)(n_start + 2) * n_stride + k2_block_idx; + const long b_idx3_k2 = (long)(n_start + 3) * n_stride + k2_block_idx; + const float d0_k2 = vload_half(0, (__global half *)(&(B[b_idx0_k2].d))); + const float d1_k2 = vload_half(0, (__global half *)(&(B[b_idx1_k2].d))); + const float d2_k2 = vload_half(0, (__global half *)(&(B[b_idx2_k2].d))); + const float d3_k2 = vload_half(0, (__global half *)(&(B[b_idx3_k2].d))); + const uchar qp0_k2 = B[b_idx0_k2].qs[k2_in_block % 16]; + const uchar qp1_k2 = B[b_idx1_k2].qs[k2_in_block % 16]; + const uchar qp2_k2 = B[b_idx2_k2].qs[k2_in_block % 16]; + const uchar qp3_k2 = B[b_idx3_k2].qs[k2_in_block % 16]; + const char qn0_k2 = (k2_in_block < 16) ? ((qp0_k2 & 0x0F) - 8) : ((qp0_k2 >> 4) - 8); + const char qn1_k2 = (k2_in_block < 16) ? ((qp1_k2 & 0x0F) - 8) : ((qp1_k2 >> 4) - 8); + const char qn2_k2 = (k2_in_block < 16) ? ((qp2_k2 & 0x0F) - 8) : ((qp2_k2 >> 4) - 8); + const char qn3_k2 = (k2_in_block < 16) ? ((qp3_k2 & 0x0F) - 8) : ((qp3_k2 >> 4) - 8); + float4 b_vals_k2 = (float4)((float)qn0_k2 * d0_k2, (float)qn1_k2 * d1_k2, (float)qn2_k2 * d2_k2, (float)qn3_k2 * d3_k2); + + // --- 解量化与 a_vals.w (k+3) 相乘的 b_vals_k3 --- + const int k3_block_idx = (k + 3) / 32; + const int k3_in_block = (k + 3) % 32; + const long b_idx0_k3 = (long)(n_start + 0) * n_stride + k3_block_idx; + const long b_idx1_k3 = (long)(n_start + 1) * n_stride + k3_block_idx; + const long b_idx2_k3 = (long)(n_start + 2) * n_stride + k3_block_idx; + const long b_idx3_k3 = (long)(n_start + 3) * n_stride + k3_block_idx; + const float d0_k3 = vload_half(0, (__global half *)(&(B[b_idx0_k3].d))); + const float d1_k3 = vload_half(0, (__global half *)(&(B[b_idx1_k3].d))); + const float d2_k3 = vload_half(0, (__global half *)(&(B[b_idx2_k3].d))); + const float d3_k3 = vload_half(0, (__global half *)(&(B[b_idx3_k3].d))); + const uchar qp0_k3 = B[b_idx0_k3].qs[k3_in_block % 16]; + const uchar qp1_k3 = B[b_idx1_k3].qs[k3_in_block % 16]; + const uchar qp2_k3 = B[b_idx2_k3].qs[k3_in_block % 16]; + const uchar qp3_k3 = B[b_idx3_k3].qs[k3_in_block % 16]; + const char qn0_k3 = (k3_in_block < 16) ? ((qp0_k3 & 0x0F) - 8) : ((qp0_k3 >> 4) - 8); + const char qn1_k3 = (k3_in_block < 16) ? ((qp1_k3 & 0x0F) - 8) : ((qp1_k3 >> 4) - 8); + const char qn2_k3 = (k3_in_block < 16) ? ((qp2_k3 & 0x0F) - 8) : ((qp2_k3 >> 4) - 8); + const char qn3_k3 = (k3_in_block < 16) ? ((qp3_k3 & 0x0F) - 8) : ((qp3_k3 >> 4) - 8); + float4 b_vals_k3 = (float4)((float)qn0_k3 * d0_k3, (float)qn1_k3 * d1_k3, (float)qn2_k3 * d2_k3, (float)qn3_k3 * d3_k3); + + // c. 并行执行 4次独立的MAD指令 + acc = mad(a_vals.x, b_vals_k0, acc); + acc = mad(a_vals.y, b_vals_k1, acc); + acc = mad(a_vals.z, b_vals_k2, acc); + acc = mad(a_vals.w, b_vals_k3, acc); + } + + // 4. 扫尾处理: K不是4的倍数时 + for (int k = K_vec_size * 4; k < K; ++k) { + int pixel_x = k / 4; + int component = k % 4; + half4 a_pixel = read_imageh(A, sampler, (int2)(pixel_x, 0)); + float a_val; + if (component == 0) + a_val = (float)a_pixel.x; + else if (component == 1) + a_val = (float)a_pixel.y; + else if (component == 2) + a_val = (float)a_pixel.z; + else + a_val = (float)a_pixel.w; + + const int k_block_idx = k / 32; + const int k_in_block = k % 32; + const long b_idx0 = (long)(n_start + 0) * n_stride + k_block_idx; + const long b_idx1 = (long)(n_start + 1) * n_stride + k_block_idx; + const long b_idx2 = (long)(n_start + 2) * n_stride + k_block_idx; + const long b_idx3 = (long)(n_start + 3) * n_stride + k_block_idx; + const float d0 = vload_half(0, (__global half *)(&(B[b_idx0].d))); + const float d1 = vload_half(0, (__global half *)(&(B[b_idx1].d))); + const float d2 = vload_half(0, (__global half *)(&(B[b_idx2].d))); + const float d3 = vload_half(0, (__global half *)(&(B[b_idx3].d))); + const uchar qp0 = B[b_idx0].qs[k_in_block % 16]; + const uchar qp1 = B[b_idx1].qs[k_in_block % 16]; + const uchar qp2 = B[b_idx2].qs[k_in_block % 16]; + const uchar qp3 = B[b_idx3].qs[k_in_block % 16]; + const char qn0 = (k_in_block < 16) ? ((qp0 & 0x0F) - 8) : ((qp0 >> 4) - 8); + const char qn1 = (k_in_block < 16) ? ((qp1 & 0x0F) - 8) : ((qp1 >> 4) - 8); + const char qn2 = (k_in_block < 16) ? ((qp2 & 0x0F) - 8) : ((qp2 >> 4) - 8); + const char qn3 = (k_in_block < 16) ? ((qp3 & 0x0F) - 8) : ((qp3 >> 4) - 8); + float4 b_vals = (float4)((float)qn0 * d0, (float)qn1 * d1, (float)qn2 * d2, (float)qn3 * d3); + acc = mad(a_val, b_vals, acc); + } + + // 5. 添加偏置 + if (has_bias != 0) { + acc += vload4(0, bias + n_start); + } + + // 6. 将结果像素写入输出 Image C + write_imageh(C, (int2)(gx, 0), convert_half4_rte(acc)); +} +*/ +__kernel void gemm_fp16_q4_0_transb_bias_image_pipe( + sampler_t sampler, + __read_only image2d_t A, + __global const block_q4_0 *B, + __global const float *bias, + __write_only image2d_t C, + const int M, const int K, const int N, + const int H, const int K_b, + const int has_bias) { + const int gx = get_global_id(0); + const int gy = get_global_id(1); // gy 对应 M 维度 + const int n_start = gx * 4; + + if (n_start >= N || gy >= M) { return; } + + half4 acc = (half4)(0.0h); + + const int K_blocks = K / 32; + const long n_stride = K_blocks; + const int K_vec_size_x8 = K / 8; + + for (int k_vec = 0; k_vec < K_vec_size_x8; ++k_vec) { + const int k = k_vec * 8; + + // **核心区别**: 读取A时,y坐标使用 gy + half4 a_vals_lo = read_imageh(A, sampler, (int2)(k / 4, gy)); + half4 a_vals_hi = read_imageh(A, sampler, (int2)(k / 4 + 1, gy)); + + // --- 8倍展开的解量化与计算(代码同GEMV版,此处为简洁省略)--- + // --- k+0 --- + const int k0_block_idx = (k + 0) / 32; + const int k0_in_block = (k + 0) % 32; + const long b_idx0_k0 = (long)(n_start + 0) * n_stride + k0_block_idx; + const long b_idx1_k0 = (long)(n_start + 1) * n_stride + k0_block_idx; + const long b_idx2_k0 = (long)(n_start + 2) * n_stride + k0_block_idx; + const long b_idx3_k0 = (long)(n_start + 3) * n_stride + k0_block_idx; + const half d0_k0 = vload_half(0, (__global half *)(&(B[b_idx0_k0].d))); + const half d1_k0 = vload_half(0, (__global half *)(&(B[b_idx1_k0].d))); + const half d2_k0 = vload_half(0, (__global half *)(&(B[b_idx2_k0].d))); + const half d3_k0 = vload_half(0, (__global half *)(&(B[b_idx3_k0].d))); + const uchar qp0_k0 = B[b_idx0_k0].qs[k0_in_block % 16]; + const uchar qp1_k0 = B[b_idx1_k0].qs[k0_in_block % 16]; + const uchar qp2_k0 = B[b_idx2_k0].qs[k0_in_block % 16]; + const uchar qp3_k0 = B[b_idx3_k0].qs[k0_in_block % 16]; + const half qn0_k0 = (half)((k0_in_block < 16) ? ((qp0_k0 & 0x0F) - 8) : ((qp0_k0 >> 4) - 8)); + const half qn1_k0 = (half)((k0_in_block < 16) ? ((qp1_k0 & 0x0F) - 8) : ((qp1_k0 >> 4) - 8)); + const half qn2_k0 = (half)((k0_in_block < 16) ? ((qp2_k0 & 0x0F) - 8) : ((qp2_k0 >> 4) - 8)); + const half qn3_k0 = (half)((k0_in_block < 16) ? ((qp3_k0 & 0x0F) - 8) : ((qp3_k0 >> 4) - 8)); + half4 b_vals_k0 = (half4)(qn0_k0 * d0_k0, qn1_k0 * d1_k0, qn2_k0 * d2_k0, qn3_k0 * d3_k0); + // ... k+1 到 k+7 的代码 ... + const int k1_block_idx = (k + 1) / 32; + const int k1_in_block = (k + 1) % 32; + const long b_idx0_k1 = (long)(n_start + 0) * n_stride + k1_block_idx; + const long b_idx1_k1 = (long)(n_start + 1) * n_stride + k1_block_idx; + const long b_idx2_k1 = (long)(n_start + 2) * n_stride + k1_block_idx; + const long b_idx3_k1 = (long)(n_start + 3) * n_stride + k1_block_idx; + const half d0_k1 = vload_half(0, (__global half *)(&(B[b_idx0_k1].d))); + const half d1_k1 = vload_half(0, (__global half *)(&(B[b_idx1_k1].d))); + const half d2_k1 = vload_half(0, (__global half *)(&(B[b_idx2_k1].d))); + const half d3_k1 = vload_half(0, (__global half *)(&(B[b_idx3_k1].d))); + const uchar qp0_k1 = B[b_idx0_k1].qs[k1_in_block % 16]; + const uchar qp1_k1 = B[b_idx1_k1].qs[k1_in_block % 16]; + const uchar qp2_k1 = B[b_idx2_k1].qs[k1_in_block % 16]; + const uchar qp3_k1 = B[b_idx3_k1].qs[k1_in_block % 16]; + const half qn0_k1 = (half)((k1_in_block < 16) ? ((qp0_k1 & 0x0F) - 8) : ((qp0_k1 >> 4) - 8)); + const half qn1_k1 = (half)((k1_in_block < 16) ? ((qp1_k1 & 0x0F) - 8) : ((qp1_k1 >> 4) - 8)); + const half qn2_k1 = (half)((k1_in_block < 16) ? ((qp2_k1 & 0x0F) - 8) : ((qp2_k1 >> 4) - 8)); + const half qn3_k1 = (half)((k1_in_block < 16) ? ((qp3_k1 & 0x0F) - 8) : ((qp3_k1 >> 4) - 8)); + half4 b_vals_k1 = (half4)(qn0_k1 * d0_k1, qn1_k1 * d1_k1, qn2_k1 * d2_k1, qn3_k1 * d3_k1); + const int k2_block_idx = (k + 2) / 32; + const int k2_in_block = (k + 2) % 32; + const long b_idx0_k2 = (long)(n_start + 0) * n_stride + k2_block_idx; + const long b_idx1_k2 = (long)(n_start + 1) * n_stride + k2_block_idx; + const long b_idx2_k2 = (long)(n_start + 2) * n_stride + k2_block_idx; + const long b_idx3_k2 = (long)(n_start + 3) * n_stride + k2_block_idx; + const half d0_k2 = vload_half(0, (__global half *)(&(B[b_idx0_k2].d))); + const half d1_k2 = vload_half(0, (__global half *)(&(B[b_idx1_k2].d))); + const half d2_k2 = vload_half(0, (__global half *)(&(B[b_idx2_k2].d))); + const half d3_k2 = vload_half(0, (__global half *)(&(B[b_idx3_k2].d))); + const uchar qp0_k2 = B[b_idx0_k2].qs[k2_in_block % 16]; + const uchar qp1_k2 = B[b_idx1_k2].qs[k2_in_block % 16]; + const uchar qp2_k2 = B[b_idx2_k2].qs[k2_in_block % 16]; + const uchar qp3_k2 = B[b_idx3_k2].qs[k2_in_block % 16]; + const half qn0_k2 = (half)((k2_in_block < 16) ? ((qp0_k2 & 0x0F) - 8) : ((qp0_k2 >> 4) - 8)); + const half qn1_k2 = (half)((k2_in_block < 16) ? ((qp1_k2 & 0x0F) - 8) : ((qp1_k2 >> 4) - 8)); + const half qn2_k2 = (half)((k2_in_block < 16) ? ((qp2_k2 & 0x0F) - 8) : ((qp2_k2 >> 4) - 8)); + const half qn3_k2 = (half)((k2_in_block < 16) ? ((qp3_k2 & 0x0F) - 8) : ((qp3_k2 >> 4) - 8)); + half4 b_vals_k2 = (half4)(qn0_k2 * d0_k2, qn1_k2 * d1_k2, qn2_k2 * d2_k2, qn3_k2 * d3_k2); + const int k3_block_idx = (k + 3) / 32; + const int k3_in_block = (k + 3) % 32; + const long b_idx0_k3 = (long)(n_start + 0) * n_stride + k3_block_idx; + const long b_idx1_k3 = (long)(n_start + 1) * n_stride + k3_block_idx; + const long b_idx2_k3 = (long)(n_start + 2) * n_stride + k3_block_idx; + const long b_idx3_k3 = (long)(n_start + 3) * n_stride + k3_block_idx; + const half d0_k3 = vload_half(0, (__global half *)(&(B[b_idx0_k3].d))); + const half d1_k3 = vload_half(0, (__global half *)(&(B[b_idx1_k3].d))); + const half d2_k3 = vload_half(0, (__global half *)(&(B[b_idx2_k3].d))); + const half d3_k3 = vload_half(0, (__global half *)(&(B[b_idx3_k3].d))); + const uchar qp0_k3 = B[b_idx0_k3].qs[k3_in_block % 16]; + const uchar qp1_k3 = B[b_idx1_k3].qs[k3_in_block % 16]; + const uchar qp2_k3 = B[b_idx2_k3].qs[k3_in_block % 16]; + const uchar qp3_k3 = B[b_idx3_k3].qs[k3_in_block % 16]; + const half qn0_k3 = (half)((k3_in_block < 16) ? ((qp0_k3 & 0x0F) - 8) : ((qp0_k3 >> 4) - 8)); + const half qn1_k3 = (half)((k3_in_block < 16) ? ((qp1_k3 & 0x0F) - 8) : ((qp1_k3 >> 4) - 8)); + const half qn2_k3 = (half)((k3_in_block < 16) ? ((qp2_k3 & 0x0F) - 8) : ((qp2_k3 >> 4) - 8)); + const half qn3_k3 = (half)((k3_in_block < 16) ? ((qp3_k3 & 0x0F) - 8) : ((qp3_k3 >> 4) - 8)); + half4 b_vals_k3 = (half4)(qn0_k3 * d0_k3, qn1_k3 * d1_k3, qn2_k3 * d2_k3, qn3_k3 * d3_k3); + const int k4_block_idx = (k + 4) / 32; + const int k4_in_block = (k + 4) % 32; + const long b_idx0_k4 = (long)(n_start + 0) * n_stride + k4_block_idx; + const long b_idx1_k4 = (long)(n_start + 1) * n_stride + k4_block_idx; + const long b_idx2_k4 = (long)(n_start + 2) * n_stride + k4_block_idx; + const long b_idx3_k4 = (long)(n_start + 3) * n_stride + k4_block_idx; + const half d0_k4 = vload_half(0, (__global half *)(&(B[b_idx0_k4].d))); + const half d1_k4 = vload_half(0, (__global half *)(&(B[b_idx1_k4].d))); + const half d2_k4 = vload_half(0, (__global half *)(&(B[b_idx2_k4].d))); + const half d3_k4 = vload_half(0, (__global half *)(&(B[b_idx3_k4].d))); + const uchar qp0_k4 = B[b_idx0_k4].qs[k4_in_block % 16]; + const uchar qp1_k4 = B[b_idx1_k4].qs[k4_in_block % 16]; + const uchar qp2_k4 = B[b_idx2_k4].qs[k4_in_block % 16]; + const uchar qp3_k4 = B[b_idx3_k4].qs[k4_in_block % 16]; + const half qn0_k4 = (half)((k4_in_block < 16) ? ((qp0_k4 & 0x0F) - 8) : ((qp0_k4 >> 4) - 8)); + const half qn1_k4 = (half)((k4_in_block < 16) ? ((qp1_k4 & 0x0F) - 8) : ((qp1_k4 >> 4) - 8)); + const half qn2_k4 = (half)((k4_in_block < 16) ? ((qp2_k4 & 0x0F) - 8) : ((qp2_k4 >> 4) - 8)); + const half qn3_k4 = (half)((k4_in_block < 16) ? ((qp3_k4 & 0x0F) - 8) : ((qp3_k4 >> 4) - 8)); + half4 b_vals_k4 = (half4)(qn0_k4 * d0_k4, qn1_k4 * d1_k4, qn2_k4 * d2_k4, qn3_k4 * d3_k4); + const int k5_block_idx = (k + 5) / 32; + const int k5_in_block = (k + 5) % 32; + const long b_idx0_k5 = (long)(n_start + 0) * n_stride + k5_block_idx; + const long b_idx1_k5 = (long)(n_start + 1) * n_stride + k5_block_idx; + const long b_idx2_k5 = (long)(n_start + 2) * n_stride + k5_block_idx; + const long b_idx3_k5 = (long)(n_start + 3) * n_stride + k5_block_idx; + const half d0_k5 = vload_half(0, (__global half *)(&(B[b_idx0_k5].d))); + const half d1_k5 = vload_half(0, (__global half *)(&(B[b_idx1_k5].d))); + const half d2_k5 = vload_half(0, (__global half *)(&(B[b_idx2_k5].d))); + const half d3_k5 = vload_half(0, (__global half *)(&(B[b_idx3_k5].d))); + const uchar qp0_k5 = B[b_idx0_k5].qs[k5_in_block % 16]; + const uchar qp1_k5 = B[b_idx1_k5].qs[k5_in_block % 16]; + const uchar qp2_k5 = B[b_idx2_k5].qs[k5_in_block % 16]; + const uchar qp3_k5 = B[b_idx3_k5].qs[k5_in_block % 16]; + const half qn0_k5 = (half)((k5_in_block < 16) ? ((qp0_k5 & 0x0F) - 8) : ((qp0_k5 >> 4) - 8)); + const half qn1_k5 = (half)((k5_in_block < 16) ? ((qp1_k5 & 0x0F) - 8) : ((qp1_k5 >> 4) - 8)); + const half qn2_k5 = (half)((k5_in_block < 16) ? ((qp2_k5 & 0x0F) - 8) : ((qp2_k5 >> 4) - 8)); + const half qn3_k5 = (half)((k5_in_block < 16) ? ((qp3_k5 & 0x0F) - 8) : ((qp3_k5 >> 4) - 8)); + half4 b_vals_k5 = (half4)(qn0_k5 * d0_k5, qn1_k5 * d1_k5, qn2_k5 * d2_k5, qn3_k5 * d3_k5); + const int k6_block_idx = (k + 6) / 32; + const int k6_in_block = (k + 6) % 32; + const long b_idx0_k6 = (long)(n_start + 0) * n_stride + k6_block_idx; + const long b_idx1_k6 = (long)(n_start + 1) * n_stride + k6_block_idx; + const long b_idx2_k6 = (long)(n_start + 2) * n_stride + k6_block_idx; + const long b_idx3_k6 = (long)(n_start + 3) * n_stride + k6_block_idx; + const half d0_k6 = vload_half(0, (__global half *)(&(B[b_idx0_k6].d))); + const half d1_k6 = vload_half(0, (__global half *)(&(B[b_idx1_k6].d))); + const half d2_k6 = vload_half(0, (__global half *)(&(B[b_idx2_k6].d))); + const half d3_k6 = vload_half(0, (__global half *)(&(B[b_idx3_k6].d))); + const uchar qp0_k6 = B[b_idx0_k6].qs[k6_in_block % 16]; + const uchar qp1_k6 = B[b_idx1_k6].qs[k6_in_block % 16]; + const uchar qp2_k6 = B[b_idx2_k6].qs[k6_in_block % 16]; + const uchar qp3_k6 = B[b_idx3_k6].qs[k6_in_block % 16]; + const half qn0_k6 = (half)((k6_in_block < 16) ? ((qp0_k6 & 0x0F) - 8) : ((qp0_k6 >> 4) - 8)); + const half qn1_k6 = (half)((k6_in_block < 16) ? ((qp1_k6 & 0x0F) - 8) : ((qp1_k6 >> 4) - 8)); + const half qn2_k6 = (half)((k6_in_block < 16) ? ((qp2_k6 & 0x0F) - 8) : ((qp2_k6 >> 4) - 8)); + const half qn3_k6 = (half)((k6_in_block < 16) ? ((qp3_k6 & 0x0F) - 8) : ((qp3_k6 >> 4) - 8)); + half4 b_vals_k6 = (half4)(qn0_k6 * d0_k6, qn1_k6 * d1_k6, qn2_k6 * d2_k6, qn3_k6 * d3_k6); + const int k7_block_idx = (k + 7) / 32; + const int k7_in_block = (k + 7) % 32; + const long b_idx0_k7 = (long)(n_start + 0) * n_stride + k7_block_idx; + const long b_idx1_k7 = (long)(n_start + 1) * n_stride + k7_block_idx; + const long b_idx2_k7 = (long)(n_start + 2) * n_stride + k7_block_idx; + const long b_idx3_k7 = (long)(n_start + 3) * n_stride + k7_block_idx; + const half d0_k7 = vload_half(0, (__global half *)(&(B[b_idx0_k7].d))); + const half d1_k7 = vload_half(0, (__global half *)(&(B[b_idx1_k7].d))); + const half d2_k7 = vload_half(0, (__global half *)(&(B[b_idx2_k7].d))); + const half d3_k7 = vload_half(0, (__global half *)(&(B[b_idx3_k7].d))); + const uchar qp0_k7 = B[b_idx0_k7].qs[k7_in_block % 16]; + const uchar qp1_k7 = B[b_idx1_k7].qs[k7_in_block % 16]; + const uchar qp2_k7 = B[b_idx2_k7].qs[k7_in_block % 16]; + const uchar qp3_k7 = B[b_idx3_k7].qs[k7_in_block % 16]; + const half qn0_k7 = (half)((k7_in_block < 16) ? ((qp0_k7 & 0x0F) - 8) : ((qp0_k7 >> 4) - 8)); + const half qn1_k7 = (half)((k7_in_block < 16) ? ((qp1_k7 & 0x0F) - 8) : ((qp1_k7 >> 4) - 8)); + const half qn2_k7 = (half)((k7_in_block < 16) ? ((qp2_k7 & 0x0F) - 8) : ((qp2_k7 >> 4) - 8)); + const half qn3_k7 = (half)((k7_in_block < 16) ? ((qp3_k7 & 0x0F) - 8) : ((qp3_k7 >> 4) - 8)); + half4 b_vals_k7 = (half4)(qn0_k7 * d0_k7, qn1_k7 * d1_k7, qn2_k7 * d2_k7, qn3_k7 * d3_k7); + + acc = mad(a_vals_lo.x, b_vals_k0, acc); + acc = mad(a_vals_lo.y, b_vals_k1, acc); + acc = mad(a_vals_lo.z, b_vals_k2, acc); + acc = mad(a_vals_lo.w, b_vals_k3, acc); + acc = mad(a_vals_hi.x, b_vals_k4, acc); + acc = mad(a_vals_hi.y, b_vals_k5, acc); + acc = mad(a_vals_hi.z, b_vals_k6, acc); + acc = mad(a_vals_hi.w, b_vals_k7, acc); + } + + // 扫尾和偏置 + for (int k = K_vec_size_x8 * 8; k < K; ++k) { + int pixel_x = k / 4; + int component = k % 4; + half4 a_pixel = read_imageh(A, sampler, (int2)(pixel_x, gy)); + half a_val; + if (component == 0) + a_val = a_pixel.x; + else if (component == 1) + a_val = a_pixel.y; + else if (component == 2) + a_val = a_pixel.z; + else + a_val = a_pixel.w; + const int k_block_idx = k / 32; + const int k_in_block = k % 32; + const long b_idx0 = (long)(n_start + 0) * n_stride + k_block_idx; + const long b_idx1 = (long)(n_start + 1) * n_stride + k_block_idx; + const long b_idx2 = (long)(n_start + 2) * n_stride + k_block_idx; + const long b_idx3 = (long)(n_start + 3) * n_stride + k_block_idx; + const half d0 = vload_half(0, (__global half *)(&(B[b_idx0].d))); + const half d1 = vload_half(0, (__global half *)(&(B[b_idx1].d))); + const half d2 = vload_half(0, (__global half *)(&(B[b_idx2].d))); + const half d3 = vload_half(0, (__global half *)(&(B[b_idx3].d))); + const uchar qp0 = B[b_idx0].qs[k_in_block % 16]; + const uchar qp1 = B[b_idx1].qs[k_in_block % 16]; + const uchar qp2 = B[b_idx2].qs[k_in_block % 16]; + const uchar qp3 = B[b_idx3].qs[k_in_block % 16]; + const half qn0 = (half)((k_in_block < 16) ? ((qp0 & 0x0F) - 8) : ((qp0 >> 4) - 8)); + const half qn1 = (half)((k_in_block < 16) ? ((qp1 & 0x0F) - 8) : ((qp1 >> 4) - 8)); + const half qn2 = (half)((k_in_block < 16) ? ((qp2 & 0x0F) - 8) : ((qp2 >> 4) - 8)); + const half qn3 = (half)((k_in_block < 16) ? ((qp3 & 0x0F) - 8) : ((qp3 >> 4) - 8)); + half4 b_vals = (half4)(qn0 * d0, qn1 * d1, qn2 * d2, qn3 * d3); + acc = mad(a_val, b_vals, acc); + } + + if (has_bias != 0) { + half4 bias_h = convert_half4_rte(vload4(0, bias + n_start)); + if (n_start < N) acc.x += bias_h.x; + if (n_start + 1 < N) acc.y += bias_h.y; + if (n_start + 2 < N) acc.z += bias_h.z; + if (n_start + 3 < N) acc.w += bias_h.w; + } + + write_imageh(C, (int2)(gx, gy), acc); +} +#else +// ---------- [兼容回退版 - Fallback Version] ---------- +__kernel void gemm_fp16_q4_0_transb_bias_image_pipe( + sampler_t sampler, + __read_only image2d_t A, + __global const block_q4_0 *B, + __global const float *bias, + __write_only image2d_t C, + const int M, const int K, const int N, + const int H, const int K_b, + const int has_bias) { + // 逻辑与FP32版本完全相同,因为Host端会准备好FP32的Image + const int gx = get_global_id(0); + const int gy = get_global_id(1); + const int n_start = gx * 4; + if (n_start >= N) { return; } + + float4 acc = (float4)(0.0f, 0.0f, 0.0f, 0.0f); + const int K_blocks = K / 32; + const long base_offset = 0; + const long n_stride = K_blocks; + + for (int k = 0; k < K; ++k) { + int pixel_x = k / 4; + int component = k % 4; + // 使用 read_imagef 读取,因为Image是CL_FLOAT格式 + float4 a_pixel = read_imagef(A, sampler, (int2)(pixel_x, gy)); + float a_val; + if (component == 0) + a_val = a_pixel.x; + else if (component == 1) + a_val = a_pixel.y; + else if (component == 2) + a_val = a_pixel.z; + else + a_val = a_pixel.w; + + const int k_block_idx = k / 32; + const int k_in_block = k % 32; + const long b_block_mem_idx0 = base_offset + (long)(n_start + 0) * n_stride + k_block_idx; + const long b_block_mem_idx1 = base_offset + (long)(n_start + 1) * n_stride + k_block_idx; + const long b_block_mem_idx2 = base_offset + (long)(n_start + 2) * n_stride + k_block_idx; + const long b_block_mem_idx3 = base_offset + (long)(n_start + 3) * n_stride + k_block_idx; + + const float d_b0 = vload_half(0, (__global half *)(&(B[b_block_mem_idx0].d))); + const float d_b1 = vload_half(0, (__global half *)(&(B[b_block_mem_idx1].d))); + const float d_b2 = vload_half(0, (__global half *)(&(B[b_block_mem_idx2].d))); + const float d_b3 = vload_half(0, (__global half *)(&(B[b_block_mem_idx3].d))); + + const uchar q_packed0 = B[b_block_mem_idx0].qs[k_in_block % 16]; + const uchar q_packed1 = B[b_block_mem_idx1].qs[k_in_block % 16]; + const uchar q_packed2 = B[b_block_mem_idx2].qs[k_in_block % 16]; + const uchar q_packed3 = B[b_block_mem_idx3].qs[k_in_block % 16]; + + const char q_nibble0 = (k_in_block < 16) ? ((q_packed0 & 0x0F) - 8) : ((q_packed0 >> 4) - 8); + const char q_nibble1 = (k_in_block < 16) ? ((q_packed1 & 0x0F) - 8) : ((q_packed1 >> 4) - 8); + const char q_nibble2 = (k_in_block < 16) ? ((q_packed2 & 0x0F) - 8) : ((q_packed2 >> 4) - 8); + const char q_nibble3 = (k_in_block < 16) ? ((q_packed3 & 0x0F) - 8) : ((q_packed3 >> 4) - 8); + + float4 b_vals = (float4)((float)q_nibble0 * d_b0, (float)q_nibble1 * d_b1, (float)q_nibble2 * d_b2, (float)q_nibble3 * d_b3); + + acc = mad(a_val, b_vals, acc); + } + + if (has_bias != 0) { + float4 bias_vals = vload4(0, bias + n_start); + acc += bias_vals; + } + + // 使用 write_imagef 写入,因为输出Image也是CL_FLOAT格式 + write_imagef(C, (int2)(gx, gy), acc); +} +#endif // SUPPORTS_FP16 + +// ================================================================== +// 9. FP32 * Q4_0 Fused GEMV + Bias Kernel (All Image Pipe) [最终修正版] +// C = A(Image) * B(Q4_0 Buffer)^T + Bias +// ================================================================== +__kernel void gemv_fp32_q4_0_transb_bias_image_pipe( + sampler_t sampler, + __read_only image2d_t A, + __global const block_q4_0 *B, + __global const float *bias, + __write_only image2d_t C, // 【修改】C 也是 Image + const int K, const int N, + const int H, + const int has_bias) { + // 1. 并行策略: 每个工作项计算一个输出像素 (float4) + const int gx = get_global_id(0); // 对应 C 的 x 坐标, 范围 [0, N/4 - 1] + const int n_start = gx * 4; + if (n_start >= N) { return; } + + // 2. 初始化累加器 + float4 acc = (float4)(0.0f, 0.0f, 0.0f, 0.0f); + + const int K_blocks = K / 32; + const long n_stride = K_blocks; + + // 3. 沿 K 维度循环计算 + for (int k = 0; k < K; ++k) { + // a. 正确地从 Image A 中读取 FP32 值 (y坐标永远是0) + int pixel_x = k / 4; + int component = k % 4; + float4 a_pixel = read_imagef(A, sampler, (int2)(pixel_x, 0)); + float a_val; + if (component == 0) + a_val = a_pixel.x; + else if (component == 1) + a_val = a_pixel.y; + else if (component == 2) + a_val = a_pixel.z; + else + a_val = a_pixel.w; + + // b. 解量化 B 的4个值 + const int k_block_idx = k / 32; + const int k_in_block = k % 32; + const long b_block_mem_idx0 = (long)(n_start + 0) * n_stride + k_block_idx; + const long b_block_mem_idx1 = (long)(n_start + 1) * n_stride + k_block_idx; + const long b_block_mem_idx2 = (long)(n_start + 2) * n_stride + k_block_idx; + const long b_block_mem_idx3 = (long)(n_start + 3) * n_stride + k_block_idx; + + const float d_b0 = vload_half(0, (__global half *)(&(B[b_block_mem_idx0].d))); + const float d_b1 = vload_half(0, (__global half *)(&(B[b_block_mem_idx1].d))); + const float d_b2 = vload_half(0, (__global half *)(&(B[b_block_mem_idx2].d))); + const float d_b3 = vload_half(0, (__global half *)(&(B[b_block_mem_idx3].d))); + + const uchar q_packed0 = B[b_block_mem_idx0].qs[k_in_block % 16]; + const char q_nibble0 = (k_in_block < 16) ? ((q_packed0 & 0x0F) - 8) : ((q_packed0 >> 4) - 8); + const uchar q_packed1 = B[b_block_mem_idx1].qs[k_in_block % 16]; + const char q_nibble1 = (k_in_block < 16) ? ((q_packed1 & 0x0F) - 8) : ((q_packed1 >> 4) - 8); + const uchar q_packed2 = B[b_block_mem_idx2].qs[k_in_block % 16]; + const char q_nibble2 = (k_in_block < 16) ? ((q_packed2 & 0x0F) - 8) : ((q_packed2 >> 4) - 8); + const uchar q_packed3 = B[b_block_mem_idx3].qs[k_in_block % 16]; + const char q_nibble3 = (k_in_block < 16) ? ((q_packed3 & 0x0F) - 8) : ((q_packed3 >> 4) - 8); + + float4 b_vals = (float4)((float)q_nibble0 * d_b0, (float)q_nibble1 * d_b1, (float)q_nibble2 * d_b2, (float)q_nibble3 * d_b3); + + acc = mad(a_val, b_vals, acc); + } + + if (has_bias != 0) { + float4 bias_vals = vload4(0, bias + n_start); + acc += bias_vals; + } + + // 4. 将结果像素写入输出 Image C (y坐标永远是0) + write_imagef(C, (int2)(gx, 0), acc); +} + +// ================================================================== +// 10. FP16 * Q4_0 Fused GEMV + Bias Kernel (All Image Pipe) [最终修正版] +// ================================================================== +#if defined(SUPPORTS_FP16) +// ---------- [高性能版] ---------- +/* +__kernel void gemv_fp16_q4_0_transb_bias_image_pipe( + sampler_t sampler, + __read_only image2d_t A, // 输入向量 A, 形状 [1, K], 映射为 [K/4, 1] 的 half4 Image + __global const block_q4_0 *B, // 权重矩阵 B, 形状 [N, K] + __global const float *bias, // 偏置向量, 形状 [N] + __write_only image2d_t C, // 输出向量 C, 形状 [1, N], 映射为 [N/4, 1] 的 half4 Image + const int K, const int N, + const int H, // H=1, 为了兼容性保留 + const int has_bias) { + // 1. 并行策略: 每个工作项计算一个输出像素 (float4) + const int gx = get_global_id(0); // C 的 x 坐标, 范围 [0, N/4 - 1] + const int n_start = gx * 4; + if (n_start >= N) { return; } + + // 2. 寄存器累加器:使用float4在寄存器中进行高精度累加 + float4 acc = (float4)(0.0f); + const int K_blocks = K / 32; + const long n_stride = K_blocks; + const int K_vec_size = K / 4; + + // 3. 核心计算循环:沿K维度4倍展开, 深度优化指令级并行 + // 每次循环处理K维度的4个元素 + for (int k_vec = 0; k_vec < K_vec_size; ++k_vec) { + const int k = k_vec * 4; + + // a. 向量化读取A: 一次读取4个half, 组成一个half4 + half4 a_pixel_h = read_imageh(A, sampler, (int2)(k_vec, 0)); + float4 a_vals = convert_float4(a_pixel_h); + + // b. 为 a_vals 的每个分量(x,y,z,w) 分别解量化B的值 + // 这部分代码虽然冗长,但将所有计算和访存操作完全展开, + // 能让编译器和硬件最大程度地并行调度,隐藏延迟。 + + // --- 解量化与 a_vals.x (k+0) 相乘的 b_vals_k0 --- + const int k0_block_idx = (k + 0) / 32; + const int k0_in_block = (k + 0) % 32; + const long b_idx0_k0 = (long)(n_start + 0) * n_stride + k0_block_idx; + const long b_idx1_k0 = (long)(n_start + 1) * n_stride + k0_block_idx; + const long b_idx2_k0 = (long)(n_start + 2) * n_stride + k0_block_idx; + const long b_idx3_k0 = (long)(n_start + 3) * n_stride + k0_block_idx; + const float d0_k0 = vload_half(0, (__global half *)(&(B[b_idx0_k0].d))); + const float d1_k0 = vload_half(0, (__global half *)(&(B[b_idx1_k0].d))); + const float d2_k0 = vload_half(0, (__global half *)(&(B[b_idx2_k0].d))); + const float d3_k0 = vload_half(0, (__global half *)(&(B[b_idx3_k0].d))); + const uchar qp0_k0 = B[b_idx0_k0].qs[k0_in_block % 16]; + const uchar qp1_k0 = B[b_idx1_k0].qs[k0_in_block % 16]; + const uchar qp2_k0 = B[b_idx2_k0].qs[k0_in_block % 16]; + const uchar qp3_k0 = B[b_idx3_k0].qs[k0_in_block % 16]; + const char qn0_k0 = (k0_in_block < 16) ? ((qp0_k0 & 0x0F) - 8) : ((qp0_k0 >> 4) - 8); + const char qn1_k0 = (k0_in_block < 16) ? ((qp1_k0 & 0x0F) - 8) : ((qp1_k0 >> 4) - 8); + const char qn2_k0 = (k0_in_block < 16) ? ((qp2_k0 & 0x0F) - 8) : ((qp2_k0 >> 4) - 8); + const char qn3_k0 = (k0_in_block < 16) ? ((qp3_k0 & 0x0F) - 8) : ((qp3_k0 >> 4) - 8); + float4 b_vals_k0 = (float4)((float)qn0_k0 * d0_k0, (float)qn1_k0 * d1_k0, (float)qn2_k0 * d2_k0, (float)qn3_k0 * d3_k0); + + // --- 解量化与 a_vals.y (k+1) 相乘的 b_vals_k1 --- + const int k1_block_idx = (k + 1) / 32; + const int k1_in_block = (k + 1) % 32; + const long b_idx0_k1 = (long)(n_start + 0) * n_stride + k1_block_idx; + const long b_idx1_k1 = (long)(n_start + 1) * n_stride + k1_block_idx; + const long b_idx2_k1 = (long)(n_start + 2) * n_stride + k1_block_idx; + const long b_idx3_k1 = (long)(n_start + 3) * n_stride + k1_block_idx; + const float d0_k1 = vload_half(0, (__global half *)(&(B[b_idx0_k1].d))); + const float d1_k1 = vload_half(0, (__global half *)(&(B[b_idx1_k1].d))); + const float d2_k1 = vload_half(0, (__global half *)(&(B[b_idx2_k1].d))); + const float d3_k1 = vload_half(0, (__global half *)(&(B[b_idx3_k1].d))); + const uchar qp0_k1 = B[b_idx0_k1].qs[k1_in_block % 16]; + const uchar qp1_k1 = B[b_idx1_k1].qs[k1_in_block % 16]; + const uchar qp2_k1 = B[b_idx2_k1].qs[k1_in_block % 16]; + const uchar qp3_k1 = B[b_idx3_k1].qs[k1_in_block % 16]; + const char qn0_k1 = (k1_in_block < 16) ? ((qp0_k1 & 0x0F) - 8) : ((qp0_k1 >> 4) - 8); + const char qn1_k1 = (k1_in_block < 16) ? ((qp1_k1 & 0x0F) - 8) : ((qp1_k1 >> 4) - 8); + const char qn2_k1 = (k1_in_block < 16) ? ((qp2_k1 & 0x0F) - 8) : ((qp2_k1 >> 4) - 8); + const char qn3_k1 = (k1_in_block < 16) ? ((qp3_k1 & 0x0F) - 8) : ((qp3_k1 >> 4) - 8); + float4 b_vals_k1 = (float4)((float)qn0_k1 * d0_k1, (float)qn1_k1 * d1_k1, (float)qn2_k1 * d2_k1, (float)qn3_k1 * d3_k1); + + // --- 解量化与 a_vals.z (k+2) 相乘的 b_vals_k2 --- + const int k2_block_idx = (k + 2) / 32; + const int k2_in_block = (k + 2) % 32; + const long b_idx0_k2 = (long)(n_start + 0) * n_stride + k2_block_idx; + const long b_idx1_k2 = (long)(n_start + 1) * n_stride + k2_block_idx; + const long b_idx2_k2 = (long)(n_start + 2) * n_stride + k2_block_idx; + const long b_idx3_k2 = (long)(n_start + 3) * n_stride + k2_block_idx; + const float d0_k2 = vload_half(0, (__global half *)(&(B[b_idx0_k2].d))); + const float d1_k2 = vload_half(0, (__global half *)(&(B[b_idx1_k2].d))); + const float d2_k2 = vload_half(0, (__global half *)(&(B[b_idx2_k2].d))); + const float d3_k2 = vload_half(0, (__global half *)(&(B[b_idx3_k2].d))); + const uchar qp0_k2 = B[b_idx0_k2].qs[k2_in_block % 16]; + const uchar qp1_k2 = B[b_idx1_k2].qs[k2_in_block % 16]; + const uchar qp2_k2 = B[b_idx2_k2].qs[k2_in_block % 16]; + const uchar qp3_k2 = B[b_idx3_k2].qs[k2_in_block % 16]; + const char qn0_k2 = (k2_in_block < 16) ? ((qp0_k2 & 0x0F) - 8) : ((qp0_k2 >> 4) - 8); + const char qn1_k2 = (k2_in_block < 16) ? ((qp1_k2 & 0x0F) - 8) : ((qp1_k2 >> 4) - 8); + const char qn2_k2 = (k2_in_block < 16) ? ((qp2_k2 & 0x0F) - 8) : ((qp2_k2 >> 4) - 8); + const char qn3_k2 = (k2_in_block < 16) ? ((qp3_k2 & 0x0F) - 8) : ((qp3_k2 >> 4) - 8); + float4 b_vals_k2 = (float4)((float)qn0_k2 * d0_k2, (float)qn1_k2 * d1_k2, (float)qn2_k2 * d2_k2, (float)qn3_k2 * d3_k2); + + // --- 解量化与 a_vals.w (k+3) 相乘的 b_vals_k3 --- + const int k3_block_idx = (k + 3) / 32; + const int k3_in_block = (k + 3) % 32; + const long b_idx0_k3 = (long)(n_start + 0) * n_stride + k3_block_idx; + const long b_idx1_k3 = (long)(n_start + 1) * n_stride + k3_block_idx; + const long b_idx2_k3 = (long)(n_start + 2) * n_stride + k3_block_idx; + const long b_idx3_k3 = (long)(n_start + 3) * n_stride + k3_block_idx; + const float d0_k3 = vload_half(0, (__global half *)(&(B[b_idx0_k3].d))); + const float d1_k3 = vload_half(0, (__global half *)(&(B[b_idx1_k3].d))); + const float d2_k3 = vload_half(0, (__global half *)(&(B[b_idx2_k3].d))); + const float d3_k3 = vload_half(0, (__global half *)(&(B[b_idx3_k3].d))); + const uchar qp0_k3 = B[b_idx0_k3].qs[k3_in_block % 16]; + const uchar qp1_k3 = B[b_idx1_k3].qs[k3_in_block % 16]; + const uchar qp2_k3 = B[b_idx2_k3].qs[k3_in_block % 16]; + const uchar qp3_k3 = B[b_idx3_k3].qs[k3_in_block % 16]; + const char qn0_k3 = (k3_in_block < 16) ? ((qp0_k3 & 0x0F) - 8) : ((qp0_k3 >> 4) - 8); + const char qn1_k3 = (k3_in_block < 16) ? ((qp1_k3 & 0x0F) - 8) : ((qp1_k3 >> 4) - 8); + const char qn2_k3 = (k3_in_block < 16) ? ((qp2_k3 & 0x0F) - 8) : ((qp2_k3 >> 4) - 8); + const char qn3_k3 = (k3_in_block < 16) ? ((qp3_k3 & 0x0F) - 8) : ((qp3_k3 >> 4) - 8); + float4 b_vals_k3 = (float4)((float)qn0_k3 * d0_k3, (float)qn1_k3 * d1_k3, (float)qn2_k3 * d2_k3, (float)qn3_k3 * d3_k3); + + // c. 累加: 4次独立的MAD指令,可以被硬件并行执行 + acc = mad(a_vals.x, b_vals_k0, acc); + acc = mad(a_vals.y, b_vals_k1, acc); + acc = mad(a_vals.z, b_vals_k2, acc); + acc = mad(a_vals.w, b_vals_k3, acc); + } + + // 4. 扫尾处理: K不是4的倍数时,处理余下的1-3个元素 (保持不变) + for (int k = K_vec_size * 4; k < K; ++k) { + int pixel_x = k / 4; + int component = k % 4; + half4 a_pixel = read_imageh(A, sampler, (int2)(pixel_x, 0)); + float a_val; + if (component == 0) + a_val = (float)a_pixel.x; + else if (component == 1) + a_val = (float)a_pixel.y; + else if (component == 2) + a_val = (float)a_pixel.z; + else + a_val = (float)a_pixel.w; + + const int k_block_idx = k / 32; + const int k_in_block = k % 32; + const long b_idx0 = (long)(n_start + 0) * n_stride + k_block_idx; + const long b_idx1 = (long)(n_start + 1) * n_stride + k_block_idx; + const long b_idx2 = (long)(n_start + 2) * n_stride + k_block_idx; + const long b_idx3 = (long)(n_start + 3) * n_stride + k_block_idx; + const float d0 = vload_half(0, (__global half *)(&(B[b_idx0].d))); + const float d1 = vload_half(0, (__global half *)(&(B[b_idx1].d))); + const float d2 = vload_half(0, (__global half *)(&(B[b_idx2].d))); + const float d3 = vload_half(0, (__global half *)(&(B[b_idx3].d))); + const uchar qp0 = B[b_idx0].qs[k_in_block % 16]; + const uchar qp1 = B[b_idx1].qs[k_in_block % 16]; + const uchar qp2 = B[b_idx2].qs[k_in_block % 16]; + const uchar qp3 = B[b_idx3].qs[k_in_block % 16]; + const char qn0 = (k_in_block < 16) ? ((qp0 & 0x0F) - 8) : ((qp0 >> 4) - 8); + const char qn1 = (k_in_block < 16) ? ((qp1 & 0x0F) - 8) : ((qp1 >> 4) - 8); + const char qn2 = (k_in_block < 16) ? ((qp2 & 0x0F) - 8) : ((qp2 >> 4) - 8); + const char qn3 = (k_in_block < 16) ? ((qp3 & 0x0F) - 8) : ((qp3 >> 4) - 8); + float4 b_vals = (float4)((float)qn0 * d0, (float)qn1 * d1, (float)qn2 * d2, (float)qn3 * d3); + + acc = mad(a_val, b_vals, acc); + } + + // 5. 添加偏置 (优化为单次vload4,因为N通常是4的倍数) + if (has_bias != 0) { + float4 bias_vals = vload4(0, bias + n_start); + acc += bias_vals; + } + + // 6. 将结果像素写入输出 Image C + write_imageh(C, (int2)(gx, 0), convert_half4_rte(acc)); +} +*/ +__kernel void gemv_fp16_q4_0_transb_bias_image_pipe( + sampler_t sampler, + __read_only image2d_t A, + __global const block_q4_0 *B, + __global const float *bias, + __write_only image2d_t C, + const int K, const int N, + const int H, + const int has_bias) { + const int gx = get_global_id(0); + const int n_start = gx * 4; + if (n_start >= N) { return; } + + half4 acc = (half4)(0.0h); + + const int K_blocks = K / 32; + const long n_stride = K_blocks; + const int K_vec_size_x8 = K / 8; + + for (int k_vec = 0; k_vec < K_vec_size_x8; ++k_vec) { + const int k = k_vec * 8; + + // 读取A时,y坐标固定为0,因为是GEMV + half4 a_vals_lo = read_imageh(A, sampler, (int2)(k / 4, 0)); + half4 a_vals_hi = read_imageh(A, sampler, (int2)(k / 4 + 1, 0)); + + // --- 8倍展开的解量化与计算(代码同上一版,此处为简洁省略)--- + // --- k+0 --- + const int k0_block_idx = (k + 0) / 32; + const int k0_in_block = (k + 0) % 32; + const long b_idx0_k0 = (long)(n_start + 0) * n_stride + k0_block_idx; + const long b_idx1_k0 = (long)(n_start + 1) * n_stride + k0_block_idx; + const long b_idx2_k0 = (long)(n_start + 2) * n_stride + k0_block_idx; + const long b_idx3_k0 = (long)(n_start + 3) * n_stride + k0_block_idx; + const half d0_k0 = vload_half(0, (__global half *)(&(B[b_idx0_k0].d))); + const half d1_k0 = vload_half(0, (__global half *)(&(B[b_idx1_k0].d))); + const half d2_k0 = vload_half(0, (__global half *)(&(B[b_idx2_k0].d))); + const half d3_k0 = vload_half(0, (__global half *)(&(B[b_idx3_k0].d))); + const uchar qp0_k0 = B[b_idx0_k0].qs[k0_in_block % 16]; + const uchar qp1_k0 = B[b_idx1_k0].qs[k0_in_block % 16]; + const uchar qp2_k0 = B[b_idx2_k0].qs[k0_in_block % 16]; + const uchar qp3_k0 = B[b_idx3_k0].qs[k0_in_block % 16]; + const half qn0_k0 = (half)((k0_in_block < 16) ? ((qp0_k0 & 0x0F) - 8) : ((qp0_k0 >> 4) - 8)); + const half qn1_k0 = (half)((k0_in_block < 16) ? ((qp1_k0 & 0x0F) - 8) : ((qp1_k0 >> 4) - 8)); + const half qn2_k0 = (half)((k0_in_block < 16) ? ((qp2_k0 & 0x0F) - 8) : ((qp2_k0 >> 4) - 8)); + const half qn3_k0 = (half)((k0_in_block < 16) ? ((qp3_k0 & 0x0F) - 8) : ((qp3_k0 >> 4) - 8)); + half4 b_vals_k0 = (half4)(qn0_k0 * d0_k0, qn1_k0 * d1_k0, qn2_k0 * d2_k0, qn3_k0 * d3_k0); + // ... k+1 到 k+7 的代码 ... + const int k1_block_idx = (k + 1) / 32; + const int k1_in_block = (k + 1) % 32; + const long b_idx0_k1 = (long)(n_start + 0) * n_stride + k1_block_idx; + const long b_idx1_k1 = (long)(n_start + 1) * n_stride + k1_block_idx; + const long b_idx2_k1 = (long)(n_start + 2) * n_stride + k1_block_idx; + const long b_idx3_k1 = (long)(n_start + 3) * n_stride + k1_block_idx; + const half d0_k1 = vload_half(0, (__global half *)(&(B[b_idx0_k1].d))); + const half d1_k1 = vload_half(0, (__global half *)(&(B[b_idx1_k1].d))); + const half d2_k1 = vload_half(0, (__global half *)(&(B[b_idx2_k1].d))); + const half d3_k1 = vload_half(0, (__global half *)(&(B[b_idx3_k1].d))); + const uchar qp0_k1 = B[b_idx0_k1].qs[k1_in_block % 16]; + const uchar qp1_k1 = B[b_idx1_k1].qs[k1_in_block % 16]; + const uchar qp2_k1 = B[b_idx2_k1].qs[k1_in_block % 16]; + const uchar qp3_k1 = B[b_idx3_k1].qs[k1_in_block % 16]; + const half qn0_k1 = (half)((k1_in_block < 16) ? ((qp0_k1 & 0x0F) - 8) : ((qp0_k1 >> 4) - 8)); + const half qn1_k1 = (half)((k1_in_block < 16) ? ((qp1_k1 & 0x0F) - 8) : ((qp1_k1 >> 4) - 8)); + const half qn2_k1 = (half)((k1_in_block < 16) ? ((qp2_k1 & 0x0F) - 8) : ((qp2_k1 >> 4) - 8)); + const half qn3_k1 = (half)((k1_in_block < 16) ? ((qp3_k1 & 0x0F) - 8) : ((qp3_k1 >> 4) - 8)); + half4 b_vals_k1 = (half4)(qn0_k1 * d0_k1, qn1_k1 * d1_k1, qn2_k1 * d2_k1, qn3_k1 * d3_k1); + const int k2_block_idx = (k + 2) / 32; + const int k2_in_block = (k + 2) % 32; + const long b_idx0_k2 = (long)(n_start + 0) * n_stride + k2_block_idx; + const long b_idx1_k2 = (long)(n_start + 1) * n_stride + k2_block_idx; + const long b_idx2_k2 = (long)(n_start + 2) * n_stride + k2_block_idx; + const long b_idx3_k2 = (long)(n_start + 3) * n_stride + k2_block_idx; + const half d0_k2 = vload_half(0, (__global half *)(&(B[b_idx0_k2].d))); + const half d1_k2 = vload_half(0, (__global half *)(&(B[b_idx1_k2].d))); + const half d2_k2 = vload_half(0, (__global half *)(&(B[b_idx2_k2].d))); + const half d3_k2 = vload_half(0, (__global half *)(&(B[b_idx3_k2].d))); + const uchar qp0_k2 = B[b_idx0_k2].qs[k2_in_block % 16]; + const uchar qp1_k2 = B[b_idx1_k2].qs[k2_in_block % 16]; + const uchar qp2_k2 = B[b_idx2_k2].qs[k2_in_block % 16]; + const uchar qp3_k2 = B[b_idx3_k2].qs[k2_in_block % 16]; + const half qn0_k2 = (half)((k2_in_block < 16) ? ((qp0_k2 & 0x0F) - 8) : ((qp0_k2 >> 4) - 8)); + const half qn1_k2 = (half)((k2_in_block < 16) ? ((qp1_k2 & 0x0F) - 8) : ((qp1_k2 >> 4) - 8)); + const half qn2_k2 = (half)((k2_in_block < 16) ? ((qp2_k2 & 0x0F) - 8) : ((qp2_k2 >> 4) - 8)); + const half qn3_k2 = (half)((k2_in_block < 16) ? ((qp3_k2 & 0x0F) - 8) : ((qp3_k2 >> 4) - 8)); + half4 b_vals_k2 = (half4)(qn0_k2 * d0_k2, qn1_k2 * d1_k2, qn2_k2 * d2_k2, qn3_k2 * d3_k2); + const int k3_block_idx = (k + 3) / 32; + const int k3_in_block = (k + 3) % 32; + const long b_idx0_k3 = (long)(n_start + 0) * n_stride + k3_block_idx; + const long b_idx1_k3 = (long)(n_start + 1) * n_stride + k3_block_idx; + const long b_idx2_k3 = (long)(n_start + 2) * n_stride + k3_block_idx; + const long b_idx3_k3 = (long)(n_start + 3) * n_stride + k3_block_idx; + const half d0_k3 = vload_half(0, (__global half *)(&(B[b_idx0_k3].d))); + const half d1_k3 = vload_half(0, (__global half *)(&(B[b_idx1_k3].d))); + const half d2_k3 = vload_half(0, (__global half *)(&(B[b_idx2_k3].d))); + const half d3_k3 = vload_half(0, (__global half *)(&(B[b_idx3_k3].d))); + const uchar qp0_k3 = B[b_idx0_k3].qs[k3_in_block % 16]; + const uchar qp1_k3 = B[b_idx1_k3].qs[k3_in_block % 16]; + const uchar qp2_k3 = B[b_idx2_k3].qs[k3_in_block % 16]; + const uchar qp3_k3 = B[b_idx3_k3].qs[k3_in_block % 16]; + const half qn0_k3 = (half)((k3_in_block < 16) ? ((qp0_k3 & 0x0F) - 8) : ((qp0_k3 >> 4) - 8)); + const half qn1_k3 = (half)((k3_in_block < 16) ? ((qp1_k3 & 0x0F) - 8) : ((qp1_k3 >> 4) - 8)); + const half qn2_k3 = (half)((k3_in_block < 16) ? ((qp2_k3 & 0x0F) - 8) : ((qp2_k3 >> 4) - 8)); + const half qn3_k3 = (half)((k3_in_block < 16) ? ((qp3_k3 & 0x0F) - 8) : ((qp3_k3 >> 4) - 8)); + half4 b_vals_k3 = (half4)(qn0_k3 * d0_k3, qn1_k3 * d1_k3, qn2_k3 * d2_k3, qn3_k3 * d3_k3); + const int k4_block_idx = (k + 4) / 32; + const int k4_in_block = (k + 4) % 32; + const long b_idx0_k4 = (long)(n_start + 0) * n_stride + k4_block_idx; + const long b_idx1_k4 = (long)(n_start + 1) * n_stride + k4_block_idx; + const long b_idx2_k4 = (long)(n_start + 2) * n_stride + k4_block_idx; + const long b_idx3_k4 = (long)(n_start + 3) * n_stride + k4_block_idx; + const half d0_k4 = vload_half(0, (__global half *)(&(B[b_idx0_k4].d))); + const half d1_k4 = vload_half(0, (__global half *)(&(B[b_idx1_k4].d))); + const half d2_k4 = vload_half(0, (__global half *)(&(B[b_idx2_k4].d))); + const half d3_k4 = vload_half(0, (__global half *)(&(B[b_idx3_k4].d))); + const uchar qp0_k4 = B[b_idx0_k4].qs[k4_in_block % 16]; + const uchar qp1_k4 = B[b_idx1_k4].qs[k4_in_block % 16]; + const uchar qp2_k4 = B[b_idx2_k4].qs[k4_in_block % 16]; + const uchar qp3_k4 = B[b_idx3_k4].qs[k4_in_block % 16]; + const half qn0_k4 = (half)((k4_in_block < 16) ? ((qp0_k4 & 0x0F) - 8) : ((qp0_k4 >> 4) - 8)); + const half qn1_k4 = (half)((k4_in_block < 16) ? ((qp1_k4 & 0x0F) - 8) : ((qp1_k4 >> 4) - 8)); + const half qn2_k4 = (half)((k4_in_block < 16) ? ((qp2_k4 & 0x0F) - 8) : ((qp2_k4 >> 4) - 8)); + const half qn3_k4 = (half)((k4_in_block < 16) ? ((qp3_k4 & 0x0F) - 8) : ((qp3_k4 >> 4) - 8)); + half4 b_vals_k4 = (half4)(qn0_k4 * d0_k4, qn1_k4 * d1_k4, qn2_k4 * d2_k4, qn3_k4 * d3_k4); + const int k5_block_idx = (k + 5) / 32; + const int k5_in_block = (k + 5) % 32; + const long b_idx0_k5 = (long)(n_start + 0) * n_stride + k5_block_idx; + const long b_idx1_k5 = (long)(n_start + 1) * n_stride + k5_block_idx; + const long b_idx2_k5 = (long)(n_start + 2) * n_stride + k5_block_idx; + const long b_idx3_k5 = (long)(n_start + 3) * n_stride + k5_block_idx; + const half d0_k5 = vload_half(0, (__global half *)(&(B[b_idx0_k5].d))); + const half d1_k5 = vload_half(0, (__global half *)(&(B[b_idx1_k5].d))); + const half d2_k5 = vload_half(0, (__global half *)(&(B[b_idx2_k5].d))); + const half d3_k5 = vload_half(0, (__global half *)(&(B[b_idx3_k5].d))); + const uchar qp0_k5 = B[b_idx0_k5].qs[k5_in_block % 16]; + const uchar qp1_k5 = B[b_idx1_k5].qs[k5_in_block % 16]; + const uchar qp2_k5 = B[b_idx2_k5].qs[k5_in_block % 16]; + const uchar qp3_k5 = B[b_idx3_k5].qs[k5_in_block % 16]; + const half qn0_k5 = (half)((k5_in_block < 16) ? ((qp0_k5 & 0x0F) - 8) : ((qp0_k5 >> 4) - 8)); + const half qn1_k5 = (half)((k5_in_block < 16) ? ((qp1_k5 & 0x0F) - 8) : ((qp1_k5 >> 4) - 8)); + const half qn2_k5 = (half)((k5_in_block < 16) ? ((qp2_k5 & 0x0F) - 8) : ((qp2_k5 >> 4) - 8)); + const half qn3_k5 = (half)((k5_in_block < 16) ? ((qp3_k5 & 0x0F) - 8) : ((qp3_k5 >> 4) - 8)); + half4 b_vals_k5 = (half4)(qn0_k5 * d0_k5, qn1_k5 * d1_k5, qn2_k5 * d2_k5, qn3_k5 * d3_k5); + const int k6_block_idx = (k + 6) / 32; + const int k6_in_block = (k + 6) % 32; + const long b_idx0_k6 = (long)(n_start + 0) * n_stride + k6_block_idx; + const long b_idx1_k6 = (long)(n_start + 1) * n_stride + k6_block_idx; + const long b_idx2_k6 = (long)(n_start + 2) * n_stride + k6_block_idx; + const long b_idx3_k6 = (long)(n_start + 3) * n_stride + k6_block_idx; + const half d0_k6 = vload_half(0, (__global half *)(&(B[b_idx0_k6].d))); + const half d1_k6 = vload_half(0, (__global half *)(&(B[b_idx1_k6].d))); + const half d2_k6 = vload_half(0, (__global half *)(&(B[b_idx2_k6].d))); + const half d3_k6 = vload_half(0, (__global half *)(&(B[b_idx3_k6].d))); + const uchar qp0_k6 = B[b_idx0_k6].qs[k6_in_block % 16]; + const uchar qp1_k6 = B[b_idx1_k6].qs[k6_in_block % 16]; + const uchar qp2_k6 = B[b_idx2_k6].qs[k6_in_block % 16]; + const uchar qp3_k6 = B[b_idx3_k6].qs[k6_in_block % 16]; + const half qn0_k6 = (half)((k6_in_block < 16) ? ((qp0_k6 & 0x0F) - 8) : ((qp0_k6 >> 4) - 8)); + const half qn1_k6 = (half)((k6_in_block < 16) ? ((qp1_k6 & 0x0F) - 8) : ((qp1_k6 >> 4) - 8)); + const half qn2_k6 = (half)((k6_in_block < 16) ? ((qp2_k6 & 0x0F) - 8) : ((qp2_k6 >> 4) - 8)); + const half qn3_k6 = (half)((k6_in_block < 16) ? ((qp3_k6 & 0x0F) - 8) : ((qp3_k6 >> 4) - 8)); + half4 b_vals_k6 = (half4)(qn0_k6 * d0_k6, qn1_k6 * d1_k6, qn2_k6 * d2_k6, qn3_k6 * d3_k6); + const int k7_block_idx = (k + 7) / 32; + const int k7_in_block = (k + 7) % 32; + const long b_idx0_k7 = (long)(n_start + 0) * n_stride + k7_block_idx; + const long b_idx1_k7 = (long)(n_start + 1) * n_stride + k7_block_idx; + const long b_idx2_k7 = (long)(n_start + 2) * n_stride + k7_block_idx; + const long b_idx3_k7 = (long)(n_start + 3) * n_stride + k7_block_idx; + const half d0_k7 = vload_half(0, (__global half *)(&(B[b_idx0_k7].d))); + const half d1_k7 = vload_half(0, (__global half *)(&(B[b_idx1_k7].d))); + const half d2_k7 = vload_half(0, (__global half *)(&(B[b_idx2_k7].d))); + const half d3_k7 = vload_half(0, (__global half *)(&(B[b_idx3_k7].d))); + const uchar qp0_k7 = B[b_idx0_k7].qs[k7_in_block % 16]; + const uchar qp1_k7 = B[b_idx1_k7].qs[k7_in_block % 16]; + const uchar qp2_k7 = B[b_idx2_k7].qs[k7_in_block % 16]; + const uchar qp3_k7 = B[b_idx3_k7].qs[k7_in_block % 16]; + const half qn0_k7 = (half)((k7_in_block < 16) ? ((qp0_k7 & 0x0F) - 8) : ((qp0_k7 >> 4) - 8)); + const half qn1_k7 = (half)((k7_in_block < 16) ? ((qp1_k7 & 0x0F) - 8) : ((qp1_k7 >> 4) - 8)); + const half qn2_k7 = (half)((k7_in_block < 16) ? ((qp2_k7 & 0x0F) - 8) : ((qp2_k7 >> 4) - 8)); + const half qn3_k7 = (half)((k7_in_block < 16) ? ((qp3_k7 & 0x0F) - 8) : ((qp3_k7 >> 4) - 8)); + half4 b_vals_k7 = (half4)(qn0_k7 * d0_k7, qn1_k7 * d1_k7, qn2_k7 * d2_k7, qn3_k7 * d3_k7); + + acc = mad(a_vals_lo.x, b_vals_k0, acc); + acc = mad(a_vals_lo.y, b_vals_k1, acc); + acc = mad(a_vals_lo.z, b_vals_k2, acc); + acc = mad(a_vals_lo.w, b_vals_k3, acc); + acc = mad(a_vals_hi.x, b_vals_k4, acc); + acc = mad(a_vals_hi.y, b_vals_k5, acc); + acc = mad(a_vals_hi.z, b_vals_k6, acc); + acc = mad(a_vals_hi.w, b_vals_k7, acc); + } + + for (int k = K_vec_size_x8 * 8; k < K; ++k) { + int pixel_x = k / 4; + int component = k % 4; + half4 a_pixel = read_imageh(A, sampler, (int2)(pixel_x, 0)); + half a_val; + if (component == 0) + a_val = a_pixel.x; + else if (component == 1) + a_val = a_pixel.y; + else if (component == 2) + a_val = a_pixel.z; + else + a_val = a_pixel.w; + const int k_block_idx = k / 32; + const int k_in_block = k % 32; + const long b_idx0 = (long)(n_start + 0) * n_stride + k_block_idx; + const long b_idx1 = (long)(n_start + 1) * n_stride + k_block_idx; + const long b_idx2 = (long)(n_start + 2) * n_stride + k_block_idx; + const long b_idx3 = (long)(n_start + 3) * n_stride + k_block_idx; + const half d0 = vload_half(0, (__global half *)(&(B[b_idx0].d))); + const half d1 = vload_half(0, (__global half *)(&(B[b_idx1].d))); + const half d2 = vload_half(0, (__global half *)(&(B[b_idx2].d))); + const half d3 = vload_half(0, (__global half *)(&(B[b_idx3].d))); + const uchar qp0 = B[b_idx0].qs[k_in_block % 16]; + const uchar qp1 = B[b_idx1].qs[k_in_block % 16]; + const uchar qp2 = B[b_idx2].qs[k_in_block % 16]; + const uchar qp3 = B[b_idx3].qs[k_in_block % 16]; + const half qn0 = (half)((k_in_block < 16) ? ((qp0 & 0x0F) - 8) : ((qp0 >> 4) - 8)); + const half qn1 = (half)((k_in_block < 16) ? ((qp1 & 0x0F) - 8) : ((qp1 >> 4) - 8)); + const half qn2 = (half)((k_in_block < 16) ? ((qp2 & 0x0F) - 8) : ((qp2 >> 4) - 8)); + const half qn3 = (half)((k_in_block < 16) ? ((qp3 & 0x0F) - 8) : ((qp3 >> 4) - 8)); + half4 b_vals = (half4)(qn0 * d0, qn1 * d1, qn2 * d2, qn3 * d3); + acc = mad(a_val, b_vals, acc); + } + + if (has_bias != 0) { + acc += convert_half4_rte(vload4(0, bias + n_start)); + } + write_imageh(C, (int2)(gx, 0), acc); +} +#else +// ---------- [兼容回退版] ---------- +__kernel void gemv_fp16_q4_0_transb_bias_image_pipe( + sampler_t sampler, + __read_only image2d_t A, + __global const block_q4_0 *B, + __global const float *bias, + __write_only image2d_t C, + const int K, const int N, + const int H, + const int has_bias) { + // 兼容版逻辑与FP32版本完全相同 + const int gx = get_global_id(0); + const int n_start = gx * 4; + if (n_start >= N) { return; } + + float4 acc = (float4)(0.0f, 0.0f, 0.0f, 0.0f); + const int K_blocks = K / 32; + const long n_stride = K_blocks; + + for (int k = 0; k < K; ++k) { + int pixel_x = k / 4; + int component = k % 4; + float4 a_pixel = read_imagef(A, sampler, (int2)(pixel_x, 0)); + float a_val; + if (component == 0) + a_val = a_pixel.x; + else if (component == 1) + a_val = a_pixel.y; + else if (component == 2) + a_val = a_pixel.z; + else + a_val = a_pixel.w; + + const int k_block_idx = k / 32; + const int k_in_block = k % 32; + const long b_block_mem_idx0 = (long)(n_start + 0) * n_stride + k_block_idx; + const long b_block_mem_idx1 = (long)(n_start + 1) * n_stride + k_block_idx; + const long b_block_mem_idx2 = (long)(n_start + 2) * n_stride + k_block_idx; + const long b_block_mem_idx3 = (long)(n_start + 3) * n_stride + k_block_idx; + + const float d_b0 = vload_half(0, (__global half *)(&(B[b_block_mem_idx0].d))); + const float d_b1 = vload_half(0, (__global half *)(&(B[b_block_mem_idx1].d))); + const float d_b2 = vload_half(0, (__global half *)(&(B[b_block_mem_idx2].d))); + const float d_b3 = vload_half(0, (__global half *)(&(B[b_block_mem_idx3].d))); + + const uchar q_packed0 = B[b_block_mem_idx0].qs[k_in_block % 16]; + const char q_nibble0 = (k_in_block < 16) ? ((q_packed0 & 0x0F) - 8) : ((q_packed0 >> 4) - 8); + const uchar q_packed1 = B[b_block_mem_idx1].qs[k_in_block % 16]; + const char q_nibble1 = (k_in_block < 16) ? ((q_packed1 & 0x0F) - 8) : ((q_packed1 >> 4) - 8); + const uchar q_packed2 = B[b_block_mem_idx2].qs[k_in_block % 16]; + const char q_nibble2 = (k_in_block < 16) ? ((q_packed2 & 0x0F) - 8) : ((q_packed2 >> 4) - 8); + const uchar q_packed3 = B[b_block_mem_idx3].qs[k_in_block % 16]; + const char q_nibble3 = (k_in_block < 16) ? ((q_packed3 & 0x0F) - 8) : ((q_packed3 >> 4) - 8); + + float4 b_vals = (float4)((float)q_nibble0 * d_b0, (float)q_nibble1 * d_b1, (float)q_nibble2 * d_b2, (float)q_nibble3 * d_b3); + + acc = mad(a_val, b_vals, acc); + } + + if (has_bias != 0) { + float4 bias_vals = vload4(0, bias + n_start); + acc += bias_vals; + } + + // 兼容版输出的Image也是FP32格式 + write_imagef(C, (int2)(gx, 0), acc); +} +#endif // SUPPORTS_FP16 \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/mul.cl b/mllm/backends/opencl/kernel/mul.cl new file mode 100644 index 000000000..a52668a56 --- /dev/null +++ b/mllm/backends/opencl/kernel/mul.cl @@ -0,0 +1,135 @@ +// 文件名: kernel/mul.cl + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// ================================================================== +// 1. Tensor * Tensor Multiplication Kernels +// ================================================================== + +__kernel void mul_float( + __global const float *A, + __global const float *B, + __global float *C, + const int b_dim, + const int a_dim) { + size_t index = get_global_id(0); + + // If b_dim is 1 and a_dim is greater than 1, apply broadcasting + if (b_dim == 1 && a_dim > 1) { + size_t a_bsh_index = index / a_dim; + size_t b_index = a_bsh_index; + C[index] = A[index] * B[b_index]; + } else { + // Original element-wise multiplication + C[index] = A[index] * B[index]; + } +} + +__kernel void mul_float_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + __read_only image2d_t inputB, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + float4 inA = read_imagef(inputA, sampler, pos); + float4 inB = read_imagef(inputB, sampler, pos); + float4 result = inA * inB; + write_imagef(output, pos, result); +} + +__kernel void mul_fp16_vector( + __global const half *A, + __global const half *B, + __global half *C, + const int b_dim, + const int a_dim) { + const int i = get_global_id(0); + + // If b_dim is 1 and a_dim is greater than 1, apply broadcasting + if (b_dim == 1 && a_dim > 1) { + const int start_idx_A = i * 4; + for (int j = 0; j < 4; ++j) { + int current_idx_A = start_idx_A + j; + size_t a_bsh_index = current_idx_A / a_dim; + size_t b_index = a_bsh_index; + C[current_idx_A] = A[current_idx_A] * B[b_index]; + } + } else { + // Original element-wise vectorized multiplication + half4 a_vec = vload4(i, A); + half4 b_vec = vload4(i, B); + half4 c_vec = a_vec * b_vec; + vstore4(c_vec, i, C); + } +} + +__kernel void mul_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + __read_only image2d_t inputB, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + half4 inA = read_imageh(inputA, sampler, pos); + half4 inB = read_imageh(inputB, sampler, pos); + half4 result = inA * inB; + write_imageh(output, pos, result); +} + +// ================================================================== +// 2. Tensor * Scalar Multiplication Kernels +// ================================================================== + +__kernel void mul_scalar_float( + __global const float *A, + const float B, + __global float *C) { + size_t index = get_global_id(0); + C[index] = A[index] * B; +} + +__kernel void mul_scalar_fp16_vector( + __global const half *A, + const half B, + __global half *C) { + const int i = get_global_id(0); + half4 a_vec = vload4(i, A); + half4 b_vec = (half4)(B); + half4 c_vec = a_vec * b_vec; + vstore4(c_vec, i, C); +} + +__kernel void mul_scalar_float_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + const float B, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + float4 inA = read_imagef(inputA, sampler, pos); + float4 inB = (float4)(B); + float4 result = inA * inB; + write_imagef(output, pos, result); +} + +__kernel void mul_scalar_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + const half B, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + if (pos.x >= width || pos.y >= height) { return; } + half4 inA = read_imageh(inputA, sampler, pos); + half4 inB = (half4)(B); + half4 result = inA * inB; + write_imageh(output, pos, result); +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/rmsnorm.cl b/mllm/backends/opencl/kernel/rmsnorm.cl new file mode 100644 index 000000000..c9d9cda9f --- /dev/null +++ b/mllm/backends/opencl/kernel/rmsnorm.cl @@ -0,0 +1,162 @@ +// 文件名: rmsnorm.cl + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// 定义工作组内用于并行归约的线程数(必须是2的幂) +#define RMSNORM_WG_SIZE 256 + +// 与 C++ DataType.hpp 中定义匹配的量化块结构 +typedef struct { + half d; + uchar qs[16]; // QK4_0 / 2 = 16 +} block_q4_0; + +// 内核辅助函数:实时从 Q4_0 块中解量化一个元素 +inline float dequantize_q4_0(const __global block_q4_0 *blocks, int index) { + const int block_idx = index / 32; // QK4_0 is 32 + const int quant_idx_in_block = index % 32; + + // 定位到对应的 Q4_0 块 + const __global block_q4_0 *b = &blocks[block_idx]; + + // 从 uchar 中提取 4-bit 的量化值 + const uchar quant_pair = b->qs[quant_idx_in_block / 2]; + const int nibble = (quant_idx_in_block % 2 == 0) ? (quant_pair & 0x0F) : (quant_pair >> 4); + + // 应用反量化公式 + return (float)b->d * (float)(nibble - 8); +} + +__kernel void rmsnorm_f32_q4( + __global const float *src, // 输入张量 (fp32) + __global float *dst, // 输出张量 (fp32) + __global const void *weights, // 权重张量 (可以是 fp32 或 q4_0) + const int weight_is_q4, // 标志位:0 表示权重是 fp32, 1 表示是 q4_0 + const int D, // Dimension, 即每行的长度 + const float epsilon, // epsilon 值,防止除以零 + const int add_unit_offset // 标志位:是否对权重执行 +1 操作 +) { + // 1. 获取ID + const int row_id = get_group_id(0); // 每个工作组处理一行,行ID由工作组ID决定 + const int local_id = get_local_id(0); // 工作组内的线程ID + + // 2. 在本地内存中声明共享数组 + __local float local_sum_sq[RMSNORM_WG_SIZE]; + + // 3. 并行计算平方和 + float thread_sum_sq = 0.0f; // 每个线程计算一部分元素的平方和 + for (int i = local_id; i < D; i += RMSNORM_WG_SIZE) { + float val = src[row_id * D + i]; + thread_sum_sq += val * val; + } + local_sum_sq[local_id] = thread_sum_sq; + + // 4. 工作组内归约(Reduction),计算整个行的总平方和 + barrier(CLK_LOCAL_MEM_FENCE); + for (int s = RMSNORM_WG_SIZE / 2; s > 0; s >>= 1) { + if (local_id < s) { + local_sum_sq[local_id] += local_sum_sq[local_id + s]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + // 此时,local_sum_sq[0] 中存放了整个行的平方和 + + // 5. 计算 RMS 缩放因子并安全地广播 + float rms_val; + // 只有工作组的第一个线程进行这个标量计算 + if (local_id == 0) { + float variance = local_sum_sq[0] / D; + rms_val = rsqrt(variance + epsilon); + local_sum_sq[0] = rms_val; // 线程0计算结果并存入共享内存 + } + + // 同步点:确保所有线程都等待线程0将rms_val写入共享内存 + barrier(CLK_LOCAL_MEM_FENCE); + + // 所有线程(包括线程0)从共享内存中读取广播的值 + rms_val = local_sum_sq[0]; + + // 6. 并行执行归一化和应用权重 + for (int i = local_id; i < D; i += RMSNORM_WG_SIZE) { + // a. 获取权重值 + float weight_val; + if (weight_is_q4) { + weight_val = dequantize_q4_0((const __global block_q4_0 *)weights, i); + } else { + weight_val = ((const __global float *)weights)[i]; + } + + // b. 根据标志位决定是否加1 + if (add_unit_offset) { + weight_val += 1.0f; + } + + // c. 计算最终结果并写回全局内存 + size_t index = row_id * D + i; + dst[index] = src[index] * rms_val * weight_val; + } +} + +// ================================================================== +// 2. FP16 Input Kernel (rmsnorm_f16_q4) +// ================================================================== +__kernel void rmsnorm_f16_q4( + __global const half *src, // 输入张量 (fp16) + __global half *dst, // 输出张量 (fp16) + __global const void *weights, // 权重张量 (可以是 fp32 或 q4_0) + const int weight_is_q4, // 标志位 + const int D, // Dimension + const float epsilon, // Epsilon (仍然是 float) + const int add_unit_offset // 标志位 +) { + const int row_id = get_group_id(0); + const int local_id = get_local_id(0); + + __local float local_sum_sq[RMSNORM_WG_SIZE]; + + // 使用 float 累加器以保证精度 + float thread_sum_sq = 0.0f; + for (int i = local_id; i < D; i += RMSNORM_WG_SIZE) { + // 从 half 转换为 float 进行计算 + float val = (float)src[row_id * D + i]; + thread_sum_sq += val * val; + } + local_sum_sq[local_id] = thread_sum_sq; + + // 工作组内归约 (与fp32版本完全相同) + barrier(CLK_LOCAL_MEM_FENCE); + for (int s = RMSNORM_WG_SIZE / 2; s > 0; s >>= 1) { + if (local_id < s) { + local_sum_sq[local_id] += local_sum_sq[local_id + s]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + // 计算 RMS 缩放因子 (与fp32版本完全相同) + float rms_val; + if (local_id == 0) { + float variance = local_sum_sq[0] / D; + rms_val = rsqrt(variance + epsilon); + local_sum_sq[0] = rms_val; + } + barrier(CLK_LOCAL_MEM_FENCE); + rms_val = local_sum_sq[0]; + + // 归一化和应用权重 + for (int i = local_id; i < D; i += RMSNORM_WG_SIZE) { + float weight_val; + if (weight_is_q4) { + weight_val = dequantize_q4_0((const __global block_q4_0 *)weights, i); + } else { + weight_val = ((const __global float *)weights)[i]; + } + if (add_unit_offset) { + weight_val += 1.0f; + } + + size_t index = row_id * D + i; + // 计算结果为 float,最后转换回 half 存入 dst + float src_val = (float)src[index]; + dst[index] = (half)(src_val * rms_val * weight_val); + } +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/rope.cl b/mllm/backends/opencl/kernel/rope.cl new file mode 100644 index 000000000..476c385a9 --- /dev/null +++ b/mllm/backends/opencl/kernel/rope.cl @@ -0,0 +1,136 @@ +// 文件名: kernel/rope.cl +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// LLaMA-style RoPE 内核 (FP32) +__kernel void rope_llama_fp32( + __global float *data, + __global const float *sin_table, + __global const float *cos_table, + const int partial_dim, + const int head_dim, + const int seq_len, + const int pos_offset) { + const int d = get_global_id(0); // Dimension index within a pair (0 to partial_dim/2 - 1) + const int s = get_global_id(1); // Sequence index + const int h = get_global_id(2); // Head index + + if (d >= (partial_dim / 2) || s >= seq_len || h >= head_dim) { + return; + } + + const int pos = s + pos_offset; + const int idx_d = d * 2; + + // BSHD 布局: index = s * (H * D) + h * D + d + size_t base_idx = (size_t)s * head_dim * partial_dim + (size_t)h * partial_dim + idx_d; + + float in0 = data[base_idx]; + float in1 = data[base_idx + 1]; + + float sin_val = sin_table[pos * (partial_dim / 2) + d]; + float cos_val = cos_table[pos * (partial_dim / 2) + d]; + + data[base_idx] = in0 * cos_val - in1 * sin_val; + data[base_idx + 1] = in0 * sin_val + in1 * cos_val; +} + +// HuggingFace-style RoPE 内核 (FP32) +__kernel void rope_hf_fp32( + __global float *data, + __global const float *sin_table, + __global const float *cos_table, + const int partial_dim, + const int head_dim, + const int seq_len, + const int pos_offset) { + const int d = get_global_id(0); // Half dimension index + const int s = get_global_id(1); // Sequence index + const int h = get_global_id(2); // Head index + + const int half_dim = partial_dim / 2; + if (d >= half_dim || s >= seq_len || h >= head_dim) { + return; + } + + const int pos = s + pos_offset; + + // BSHD 布局 + size_t base_idx0 = (size_t)s * head_dim * partial_dim + (size_t)h * partial_dim + d; + size_t base_idx1 = base_idx0 + half_dim; + + float in0 = data[base_idx0]; + float in1 = data[base_idx1]; + + float sin_val = sin_table[pos * half_dim + d]; + float cos_val = cos_table[pos * half_dim + d]; + + data[base_idx0] = in0 * cos_val - in1 * sin_val; + data[base_idx1] = in0 * sin_val + in1 * cos_val; +} + +// ================================================================== +// =================== 新增: FP16 Kernels ===================== +// ================================================================== + +// LLaMA-style RoPE 内核 (FP16) +__kernel void rope_llama_fp16( + __global half *data, + __global const half *sin_table, + __global const half *cos_table, + const int partial_dim, + const int head_dim, + const int seq_len, + const int pos_offset) { + const int d = get_global_id(0); + const int s = get_global_id(1); + const int h = get_global_id(2); + + if (d >= (partial_dim / 2) || s >= seq_len || h >= head_dim) { + return; + } + + const int pos = s + pos_offset; + const int idx_d = d * 2; + size_t base_idx = (size_t)s * head_dim * partial_dim + (size_t)h * partial_dim + idx_d; + + half in0 = data[base_idx]; + half in1 = data[base_idx + 1]; + + half sin_val = sin_table[pos * partial_dim + idx_d]; + half cos_val = cos_table[pos * partial_dim + idx_d]; + + data[base_idx] = in0 * cos_val - in1 * sin_val; + data[base_idx + 1] = in0 * sin_val + in1 * cos_val; +} + +// HuggingFace-style RoPE 内核 (FP16) +__kernel void rope_hf_fp16( + __global half *data, + __global const half *sin_table, + __global const half *cos_table, + const int partial_dim, + const int head_dim, + const int seq_len, + const int pos_offset) { + const int d = get_global_id(0); + const int s = get_global_id(1); + const int h = get_global_id(2); + + const int half_dim = partial_dim / 2; + if (d >= half_dim || s >= seq_len || h >= head_dim) { + return; + } + + const int pos = s + pos_offset; + size_t base_idx0 = (size_t)s * head_dim * partial_dim + (size_t)h * partial_dim + d; + size_t base_idx1 = base_idx0 + half_dim; + + half in0 = data[base_idx0]; + half in1 = data[base_idx1]; + + half sin_val = sin_table[pos * half_dim + d]; + half cos_val = cos_table[pos * half_dim + d]; + + data[base_idx0] = in0 * cos_val - in1 * sin_val; + data[base_idx1] = in0 * sin_val + in1 * cos_val; +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/scatter_add.cl b/mllm/backends/opencl/kernel/scatter_add.cl new file mode 100644 index 000000000..e4ab06810 --- /dev/null +++ b/mllm/backends/opencl/kernel/scatter_add.cl @@ -0,0 +1,211 @@ +// 请用此完整代码替换您的 kernel/scatter_add.cl 文件 + +#pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics : enable + +// ======================================================================== +// 通用部分:fp32 内核 和 fp32 原子加法 +// 这部分代码已经是正确的,保持不变。 +// ======================================================================== + +void atomic_add_float(__global float *addr, float val) { + union { + unsigned int u32; + float f32; + } next, expected, current; + current.f32 = *addr; + do { + expected.f32 = current.f32; + next.f32 = expected.f32 + val; + current.u32 = atomic_cmpxchg((volatile __global unsigned int *)addr, expected.u32, next.u32); + } while (current.u32 != expected.u32); +} + +__kernel void scatter_add_fp32( + __global float *self_data, + __global const float *value_data, + __global const float *index_data, + const int B, const int H, const int D, + const int S_self, const int S_value) { + const int d = get_global_id(0); + const int h = get_global_id(1); + const int bs_val = get_global_id(2); + const int b = bs_val / S_value; + const int s_val = bs_val % S_value; + + if (d >= D || h >= H || b >= B) { + return; + } + size_t value_offset = (size_t)b * S_value * H * D + (size_t)s_val * H * D + (size_t)h * D + d; + float value_to_add = value_data[value_offset]; + int target_seq_index = (int)index_data[s_val]; + size_t self_offset = (size_t)b * S_self * H * D + (size_t)target_seq_index * H * D + (size_t)h * D + d; + atomic_add_float(&self_data[self_offset], value_to_add); +} + +// ======================================================================== +// FP16 实现部分:根据 SUPPORTS_FP16 宏进行条件编译 +// ======================================================================== + +#ifdef SUPPORTS_FP16 + +// A. 如果设备支持 FP16,我们编译这部分代码 + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +/* + * [最终修正] atomic_add_half: + * 1. 使用 uintptr_t 和位运算代替取模,提高可移植性,解决编译失败问题。 + * 2. 使用 float 进行中间加法计算,保证精度和兼容性。 + */ +void atomic_add_half(__global half *addr, half val) { + // uintptr_t 是专门为存储指针而设计的整数类型,转换更安全。 + uintptr_t ptr_val = (uintptr_t)addr; + // 使用位运算判断地址是在一个32位字的前半部分(0)还是后半部分(1)。 + // (ptr_val >> 1) & 1 等价于 ((ptr_val % 4) / 2) + int half_idx_in_uint = (ptr_val >> 1) & 1; + + // 找到包含当前 half 的那个4字节对齐的地址 + volatile __global uint *addr_as_uint = (volatile __global uint *)(ptr_val - (half_idx_in_uint * 2)); + + union { + uint u32; + half2 f16x2; + } next, expected, current; + + do { + current.u32 = *addr_as_uint; + expected.u32 = current.u32; + + if (half_idx_in_uint == 0) { // 更新前半部分 (s0) + float sum = (float)expected.f16x2.s0 + (float)val; + next.f16x2.s0 = (half)sum; + next.f16x2.s1 = expected.f16x2.s1; + } else { // 更新后半部分 (s1) + float sum = (float)expected.f16x2.s1 + (float)val; + next.f16x2.s0 = expected.f16x2.s0; + next.f16x2.s1 = (half)sum; + } + } while (atomic_cmpxchg(addr_as_uint, expected.u32, next.u32) != expected.u32); +} + +__kernel void scatter_add_fp16( + __global half *self_data, + __global const half *value_data, + __global const half *index_data, + const int B, const int H, const int D, + const int S_self, const int S_value) { + const int d = get_global_id(0); + const int h = get_global_id(1); + const int bs_val = get_global_id(2); + const int b = bs_val / S_value; + const int s_val = bs_val % S_value; + if (d >= D || h >= H || b >= B) { + return; + } + size_t value_offset = (size_t)b * S_value * H * D + (size_t)s_val * H * D + (size_t)h * D + d; + half value_to_add = value_data[value_offset]; + int target_seq_index = (int)convert_float(index_data[s_val]); + size_t self_offset = (size_t)b * S_self * H * D + (size_t)target_seq_index * H * D + (size_t)h * D + d; + atomic_add_half(&self_data[self_offset], value_to_add); +} +#else +// 辅助函数: 将 ushort (存储着 half 的二进制位) 转换为 float +// 注意: 这个函数内部不创建 half 变量,只进行位运算和类型双关转换 +static float ushort_to_float(ushort u) { + uint sign = (u >> 15) & 1; + uint exponent = (u >> 10) & 0x1F; + uint mantissa = u & 0x3FF; + uint result_uint; + if (exponent == 0) { + if (mantissa == 0) { + result_uint = sign << 31; + } else { + exponent = 1; + while ((mantissa & 0x400) == 0) { + mantissa <<= 1; + exponent++; + } + mantissa &= 0x3FF; + exponent = 127 - 15 - exponent + 1; + result_uint = (sign << 31) | (exponent << 23) | (mantissa << 13); + } + } else if (exponent == 0x1F) { + result_uint = (sign << 31) | (0xFF << 23) | (mantissa << 13); + } else { + exponent = exponent - 15 + 127; + result_uint = (sign << 31) | (exponent << 23) | (mantissa << 13); + } + return as_float(result_uint); +} + +// 辅助函数: 将 float 转换为 ushort +static ushort float_to_ushort(float f) { + uint u = as_uint(f); + uint sign = (u >> 16) & 0x8000; + int exponent = ((u >> 23) & 0xFF) - 127; + uint mantissa = u & 0x7FFFFF; + if (exponent > 15) { return sign | 0x7C00; } // Infinity + if (exponent < -14) { + mantissa = (mantissa | 0x800000) >> (1 - exponent); + return sign | (mantissa >> 13); + } + return sign | ((exponent + 15) << 10) | (mantissa >> 13); +} + +// 辅助函数: 在一个 ushort 地址上,原子地加上一个 float 值 +void atomic_add_float_to_ushort_location(__global ushort *addr, float val_to_add) { + uintptr_t ptr_val = (uintptr_t)addr; + int ushort_idx_in_uint = (ptr_val >> 1) & 1; + volatile __global uint *addr_as_uint = (volatile __global uint *)(ptr_val - (ushort_idx_in_uint * 2)); + + union { + uint u32; + ushort us16[2]; + } next, expected, current; + + do { + current.u32 = *addr_as_uint; + expected.u32 = current.u32; + + // 核心逻辑:解包 -> 转float -> 计算 -> 转ushort -> 打包 + ushort old_val_ushort = expected.us16[ushort_idx_in_uint]; + float old_val_float = ushort_to_float(old_val_ushort); + float new_val_float = old_val_float + val_to_add; + ushort new_val_ushort = float_to_ushort(new_val_float); + + next.u32 = expected.u32; // 先复制 + next.us16[ushort_idx_in_uint] = new_val_ushort; // 再修改目标部分 + + } while (atomic_cmpxchg(addr_as_uint, expected.u32, next.u32) != expected.u32); +} + +__kernel void scatter_add_fp16( + __global ushort *self_data, + __global const ushort *value_data, + __global const ushort *index_data, + const int B, const int H, const int D, + const int S_self, const int S_value) { + const int d = get_global_id(0); + const int h = get_global_id(1); + const int bs_val = get_global_id(2); + const int b = bs_val / S_value; + const int s_val = bs_val % S_value; + if (d >= D || h >= H || b >= B) { return; } + + // 1. 读取源数据和索引,并立即转换为 float + size_t value_offset = (size_t)b * S_value * H * D + (size_t)s_val * H * D + (size_t)h * D + d; + // 使用 vload_half 读取并转换,遵从编译器建议 + float value_to_add = vload_half(value_offset, (__global half *)value_data); + + // 读取索引也同样处理 + float index_as_float = vload_half(s_val, (__global half *)index_data); + int target_seq_index = (int)index_as_float; + + // 2. 计算目标地址 + size_t self_offset = (size_t)b * S_self * H * D + (size_t)target_seq_index * H * D + (size_t)h * D + d; + + // 3. 执行软件模拟的原子加法 + // 因为 vload/vstore 不支持原子操作,所以必须使用手动实现的原子函数 + atomic_add_float_to_ushort_location(&self_data[self_offset], value_to_add); +} +#endif // SUPPORTS_FP16 \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/silu.cl b/mllm/backends/opencl/kernel/silu.cl new file mode 100644 index 000000000..1bbd8ff48 --- /dev/null +++ b/mllm/backends/opencl/kernel/silu.cl @@ -0,0 +1,71 @@ +// 文件名: kernel/silu.cl + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// ============================================================================ +// ========================== FP32 (float) Kernel ============================= +// ============================================================================ +/** + * @brief 对 float 张量执行 SiLU (Swish) 激活函数。 + * out = x * (1 / (1 + exp(-x))) + */ +__kernel void silu_fp32( + __global const float *input, + __global float *output) { + const int i = get_global_id(0); + const float x = input[i]; + + // 计算 SiLU + output[i] = x / (1.0f + exp(-x)); +} + +// ============================================================================ +// ========================== FP16 (half) Kernel ============================== +// ============================================================================ +/** + * @brief 对 half 张量使用向量指令执行 SiLU (Swish) 激活函数。 + * 一次处理4个 half 元素。 + */ +// __kernel void silu_fp16_vector( +// __global const half *input, +// __global half *output) { +// const int i = get_global_id(0); + +// // 高效地加载 4 个 half 数据 +// half4 x = vload4(i, input); + +// // 计算 SiLU +// // 注意: OpenCL C 中,exp() 等数学函数可以直接作用于向量类型 +// half4 result = x / ((half4)(1.0h) + exp(-x)); + +// // 高效地写回 4 个 half 数据 +// vstore4(result, i, output); +// } + +__kernel void silu_fp16( + __global const half *input, + __global half *output, + const int count) { + // --- 向量化部分 --- + const int vec_idx = get_global_id(0); + const int vec_limit = count / 4; + + if (vec_idx < vec_limit) { + const int i = vec_idx * 4; + half4 x = vload4(0, input + i); + half4 result = x / ((half4)(1.0h) + exp(-x)); + vstore4(result, 0, output + i); + } + + // --- 标量处理部分 (处理余数) --- + // ✨ **核心修正点** ✨: 'local_id' 替换为 'get_local_id(0)' + // 让第一个工作组中ID合适的线程来处理余下的部分 + const int remainder_start = vec_limit * 4; + if (get_local_id(0) < (count - remainder_start) && get_group_id(0) == 0) { + const int i = remainder_start + get_local_id(0); + if (i < count) { // 再次检查边界,确保安全 + const half x = input[i]; + output[i] = x / (1.0h + exp(-x)); + } + } +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/softmax.cl b/mllm/backends/opencl/kernel/softmax.cl new file mode 100644 index 000000000..021442a08 --- /dev/null +++ b/mllm/backends/opencl/kernel/softmax.cl @@ -0,0 +1,159 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +#define SOFTMAX_BLOCK_SIZE 256 + +#define HALF_MAX 65504.0h +#ifdef SUPPORTS_FP16 +// ============================================================================ +// ========================== FP16 (half) 高性能版 ============================= +// ============================================================================ +__kernel void softmax_fp16_along_d( + const __global half *src, + __global half *dst, + const int B, const int H, const int S, const int D, + const int do_causal_mask) { + const int row_id = get_group_id(0); + if (row_id >= B * H * S) return; + const int local_id = get_local_id(0); + int effective_D = D; + if (do_causal_mask) { + const int current_s = row_id % S; + effective_D = current_s + 1; + } + __local half local_max[SOFTMAX_BLOCK_SIZE]; + __local float local_sum[SOFTMAX_BLOCK_SIZE]; + half thread_max = -HALF_MAX; + for (int i = local_id; i < effective_D; i += SOFTMAX_BLOCK_SIZE) { + thread_max = max(thread_max, src[row_id * D + i]); + } + local_max[local_id] = thread_max; + barrier(CLK_LOCAL_MEM_FENCE); + for (int s = SOFTMAX_BLOCK_SIZE / 2; s > 0; s >>= 1) { + if (local_id < s) { local_max[local_id] = max(local_max[local_id], local_max[local_id + s]); } + barrier(CLK_LOCAL_MEM_FENCE); + } + const half row_max = local_max[0]; + barrier(CLK_LOCAL_MEM_FENCE); + + float thread_sum = 0.0f; + for (int i = local_id; i < effective_D; i += SOFTMAX_BLOCK_SIZE) { + half val = exp(src[row_id * D + i] - row_max); + thread_sum += (float)val; + dst[row_id * D + i] = val; + } + local_sum[local_id] = thread_sum; + barrier(CLK_LOCAL_MEM_FENCE); + for (int s = SOFTMAX_BLOCK_SIZE / 2; s > 0; s >>= 1) { + if (local_id < s) { local_sum[local_id] += local_sum[local_id + s]; } + barrier(CLK_LOCAL_MEM_FENCE); + } + const float row_sum = local_sum[0]; + barrier(CLK_LOCAL_MEM_FENCE); + for (int i = local_id; i < effective_D; i += SOFTMAX_BLOCK_SIZE) { + dst[row_id * D + i] /= (half)row_sum; + } + if (do_causal_mask) { + for (int i = local_id + effective_D; i < D; i += SOFTMAX_BLOCK_SIZE) { + dst[row_id * D + i] = (half)0.0f; + } + } +} +#else +// ============================================================================ +// ========================== FP16 (half) 兼容版 ============================== +// ============================================================================ +__kernel void softmax_fp16_along_d( + const __global half *src, + __global half *dst, + const int B, const int H, const int S, const int D, + const int do_causal_mask) { + const int row_id = get_global_id(0); + if (row_id >= B * H * S) return; + + int effective_D = D; + if (do_causal_mask) { + effective_D = (row_id % S) + 1; + } + const __global half *p_src = src + row_id * D; + __global half *p_dst = dst + row_id * D; + + float max_val = -INFINITY; + for (int i = 0; i < effective_D; ++i) { + max_val = max(max_val, (float)p_src[i]); + } + float sum = 0.0f; + for (int i = 0; i < effective_D; ++i) { + float val_f32 = exp((float)p_src[i] - max_val); + p_dst[i] = (half)val_f32; + sum += val_f32; + } + for (int i = 0; i < effective_D; ++i) { + p_dst[i] = (half)((float)p_dst[i] / sum); + } + if (do_causal_mask) { + for (int i = effective_D; i < D; ++i) { + p_dst[i] = (half)0.0f; + } + } +} +#endif // SUPPORTS_FP16 + +// ============================================================================ +// ========================== FP32 (float) Kernel ============================= +// ============================================================================ +__kernel void softmax_f32_along_d( + const __global float *src, + __global float *dst, + const int B, const int H, const int S, const int D, + const int do_causal_mask) { + const int row_id = get_group_id(0); + if (row_id >= B * H * S) return; + + const int local_id = get_local_id(0); + int effective_D = D; + if (do_causal_mask) { + effective_D = (row_id % S) + 1; + } + + __local float local_max[SOFTMAX_BLOCK_SIZE]; + __local float local_sum[SOFTMAX_BLOCK_SIZE]; + + float thread_max = -INFINITY; + for (int i = local_id; i < effective_D; i += SOFTMAX_BLOCK_SIZE) { + thread_max = max(thread_max, src[row_id * D + i]); + } + local_max[local_id] = thread_max; + barrier(CLK_LOCAL_MEM_FENCE); + + for (int s = SOFTMAX_BLOCK_SIZE / 2; s > 0; s >>= 1) { + if (local_id < s) { local_max[local_id] = max(local_max[local_id], local_max[local_id + s]); } + barrier(CLK_LOCAL_MEM_FENCE); + } + const float row_max = local_max[0]; + barrier(CLK_LOCAL_MEM_FENCE); + + float thread_sum = 0.0f; + for (int i = local_id; i < effective_D; i += SOFTMAX_BLOCK_SIZE) { + float val = exp(src[row_id * D + i] - row_max); + thread_sum += val; + dst[row_id * D + i] = val; + } + local_sum[local_id] = thread_sum; + barrier(CLK_LOCAL_MEM_FENCE); + + for (int s = SOFTMAX_BLOCK_SIZE / 2; s > 0; s >>= 1) { + if (local_id < s) { local_sum[local_id] += local_sum[local_id + s]; } + barrier(CLK_LOCAL_MEM_FENCE); + } + const float row_sum = local_sum[0]; + barrier(CLK_LOCAL_MEM_FENCE); + + for (int i = local_id; i < effective_D; i += SOFTMAX_BLOCK_SIZE) { + dst[row_id * D + i] /= row_sum; + } + if (do_causal_mask) { + for (int i = local_id + effective_D; i < D; i += SOFTMAX_BLOCK_SIZE) { + dst[row_id * D + i] = 0.0f; + } + } +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/split.cl b/mllm/backends/opencl/kernel/split.cl new file mode 100644 index 000000000..0d3a04d81 --- /dev/null +++ b/mllm/backends/opencl/kernel/split.cl @@ -0,0 +1,47 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +__kernel void split_fp32( + __global const float *input, + __global float *output, + const int outer_size, + const int split_dim_size, + const int inner_size, + const int offset, + const int total_split_dim_size) { + const int inner_idx = get_global_id(0); + const int split_idx = get_global_id(1); + const int outer_idx = get_global_id(2); + + if (inner_idx >= inner_size || split_idx >= split_dim_size || outer_idx >= outer_size) { + return; + } + + size_t src_offset = (size_t)outer_idx * total_split_dim_size * inner_size + (size_t)(split_idx + offset) * inner_size + inner_idx; + + size_t dst_offset = (size_t)outer_idx * split_dim_size * inner_size + (size_t)split_idx * inner_size + inner_idx; + + output[dst_offset] = input[src_offset]; +} + +__kernel void split_fp16( + __global const half *input, + __global half *output, + const int outer_size, + const int split_dim_size, + const int inner_size, + const int offset, + const int total_split_dim_size) { + const int inner_idx = get_global_id(0); + const int split_idx = get_global_id(1); + const int outer_idx = get_global_id(2); + + if (inner_idx >= inner_size || split_idx >= split_dim_size || outer_idx >= outer_size) { + return; + } + + size_t src_offset = (size_t)outer_idx * total_split_dim_size * inner_size + (size_t)(split_idx + offset) * inner_size + inner_idx; + + size_t dst_offset = (size_t)outer_idx * split_dim_size * inner_size + (size_t)split_idx * inner_size + inner_idx; + + output[dst_offset] = input[src_offset]; +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/sub.cl b/mllm/backends/opencl/kernel/sub.cl new file mode 100644 index 000000000..13e3ff17f --- /dev/null +++ b/mllm/backends/opencl/kernel/sub.cl @@ -0,0 +1,76 @@ +// 文件名: kernel/sub.cl + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// ================================================================== +// 1. Tensor - Tensor Subtraction Kernels +// ================================================================== + +/** + * @brief [FP32 Buffer版] 两个 float 张量的元素级减法 C = A - B + */ +__kernel void sub_float( + __global const float *A, + __global const float *B, + __global float *C) { + size_t index = get_global_id(0); + C[index] = A[index] - B[index]; +} + +/** + * @brief [FP32 Image版] 两个 float 图像的元素级减法 + */ +__kernel void sub_float_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + __read_only image2d_t inputB, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + + if (pos.x >= width || pos.y >= height) { + return; + } + + float4 inA = read_imagef(inputA, sampler, pos); + float4 inB = read_imagef(inputB, sampler, pos); + float4 result = inA - inB; + write_imagef(output, pos, result); +} + +/** + * @brief [FP16 Buffer版] 两个 half 张量的向量化元素级减法 + */ +__kernel void sub_fp16_vector( + __global const half *A, + __global const half *B, + __global half *C) { + const int i = get_global_id(0); + half4 a_vec = vload4(i, A); + half4 b_vec = vload4(i, B); + half4 c_vec = a_vec - b_vec; + vstore4(c_vec, i, C); +} + +/** + * @brief [FP16 Image版] 两个 half 图像的元素级减法 + */ +__kernel void sub_fp16_image2d( + sampler_t sampler, + __read_only image2d_t inputA, + __read_only image2d_t inputB, + __write_only image2d_t output, + const int width, + const int height) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + + if (pos.x >= width || pos.y >= height) { + return; + } + + half4 inA = read_imageh(inputA, sampler, pos); + half4 inB = read_imageh(inputB, sampler, pos); + half4 result = inA - inB; + write_imageh(output, pos, result); +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/sum.cl b/mllm/backends/opencl/kernel/sum.cl new file mode 100644 index 000000000..f0de879aa --- /dev/null +++ b/mllm/backends/opencl/kernel/sum.cl @@ -0,0 +1,81 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +#define SUM_WG_SIZE 256 // 工作组大小, 必须是2的幂 + +// ========================== FP32 (float) Kernel ============================= +__kernel void sum_fp32( + __global const float *input, + __global float *output, + const int outer_size, + const int inner_size, + const int reduce_size) { + // 1. 获取全局和局部ID + const int inner_idx = get_group_id(0); + const int outer_idx = get_group_id(1); + const int local_id = get_local_id(0); + + // 2. 在本地内存中声明共享数组,用于存储部分和 + __local float partial_sums[SUM_WG_SIZE]; + + // 3. 并行计算部分和 + float thread_sum = 0.0f; + // 每个线程负责累加 `reduce_size` 维度上的一部分数据 + for (int i = local_id; i < reduce_size; i += SUM_WG_SIZE) { + size_t offset = (size_t)outer_idx * reduce_size * inner_size + (size_t)i * inner_size + inner_idx; + thread_sum += input[offset]; + } + partial_sums[local_id] = thread_sum; + + // 4. 工作组内同步,确保所有线程都已算完自己的部分和 + barrier(CLK_LOCAL_MEM_FENCE); + + // 5. 在本地内存中进行并行规约 + for (int s = SUM_WG_SIZE / 2; s > 0; s >>= 1) { + if (local_id < s) { + partial_sums[local_id] += partial_sums[local_id + s]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + // 6. 由工作组的第一个线程将最终结果写回全局内存 + if (local_id == 0) { + size_t out_offset = (size_t)outer_idx * inner_size + inner_idx; + output[out_offset] = partial_sums[0]; + } +} + +// ========================== FP16 (half) Kernel ============================== +__kernel void sum_fp16( + __global const half *input, + __global half *output, + const int outer_size, + const int inner_size, + const int reduce_size) { + const int inner_idx = get_group_id(0); + const int outer_idx = get_group_id(1); + const int local_id = get_local_id(0); + + // 使用 float 累加器以保证精度 + __local float partial_sums[SUM_WG_SIZE]; + float thread_sum = 0.0f; + + for (int i = local_id; i < reduce_size; i += SUM_WG_SIZE) { + size_t offset = (size_t)outer_idx * reduce_size * inner_size + (size_t)i * inner_size + inner_idx; + thread_sum += (float)input[offset]; + } + partial_sums[local_id] = thread_sum; + + barrier(CLK_LOCAL_MEM_FENCE); + + for (int s = SUM_WG_SIZE / 2; s > 0; s >>= 1) { + if (local_id < s) { + partial_sums[local_id] += partial_sums[local_id + s]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + if (local_id == 0) { + size_t out_offset = (size_t)outer_idx * inner_size + inner_idx; + output[out_offset] = (half)partial_sums[0]; + } +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/topk.cl b/mllm/backends/opencl/kernel/topk.cl new file mode 100644 index 000000000..1554b54a7 --- /dev/null +++ b/mllm/backends/opencl/kernel/topk.cl @@ -0,0 +1,164 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +#define WG_SIZE 256 // 工作组大小,必须是2的幂 + +// 交换两个 pair +void swap_pairs(volatile __local float *values, volatile __local int *indices, int i, int j) { + float temp_val = values[i]; + values[i] = values[j]; + values[j] = temp_val; + + int temp_idx = indices[i]; + indices[i] = indices[j]; + indices[j] = temp_idx; +} + +// ========================== FP32 (float) Kernel ============================= +__kernel void topk_fp32( + __global const float *input, + __global float *topk_values, + __global float *topk_indices, + const int D, // dimension to sort along + const int k) { + // ** FIX: All __local memory declarations moved to the top-level scope ** + __local float local_values[WG_SIZE]; + __local int local_indices[WG_SIZE]; + __local float wg_max_vals[WG_SIZE]; + __local int wg_max_indices[WG_SIZE]; + + const int row_id = get_group_id(0); + const int local_id = get_local_id(0); + + // 1. 并行加载一行数据到本地内存 + for (int i = local_id; i < D; i += WG_SIZE) { + local_values[i] = input[row_id * D + i]; + local_indices[i] = i; + } + // 处理 D < WG_SIZE 的情况,将多余的本地内存元素初始化为最小值 + if (local_id >= D && local_id < WG_SIZE) { + local_values[local_id] = -FLT_MAX; + local_indices[local_id] = -1; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + // 2. 在本地内存中进行部分排序 (选择排序的变体) + for (int i = 0; i < k; ++i) { + // 在工作组内并行查找 [i, D) 区间内的最大值 + float thread_max_val = -FLT_MAX; + int thread_max_idx = -1; + for (int j = i + local_id; j < D; j += WG_SIZE) { + if (local_values[j] > thread_max_val) { + thread_max_val = local_values[j]; + thread_max_idx = j; + } + } + + // 使用本地内存进行归约,找到整个工作组的最大值 + wg_max_vals[local_id] = thread_max_val; + wg_max_indices[local_id] = thread_max_idx; + barrier(CLK_LOCAL_MEM_FENCE); + + for (int s = WG_SIZE / 2; s > 0; s >>= 1) { + if (local_id < s) { + if (wg_max_vals[local_id + s] > wg_max_vals[local_id]) { + wg_max_vals[local_id] = wg_max_vals[local_id + s]; + wg_max_indices[local_id] = wg_max_indices[local_id + s]; + } + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + int max_idx = -1; + if (local_id == 0) { + max_idx = wg_max_indices[0]; + } + barrier(CLK_LOCAL_MEM_FENCE); // 确保所有线程都看到了max_idx + + // 将找到的最大值与第 i 个元素交换 + if (max_idx != -1 && max_idx != i) { + if (local_id == 0) { + swap_pairs(local_values, local_indices, i, max_idx); + } + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + // 3. 由前 k 个线程将结果写回全局内存 + if (local_id < k) { + topk_values[row_id * k + local_id] = local_values[local_id]; + // 将索引转换为 float 类型写入 + topk_indices[row_id * k + local_id] = (float)local_indices[local_id]; + } +} + +// ========================== FP16 (half) Kernel ============================== +__kernel void topk_fp16( + __global const half *input, + __global half *topk_values, + __global half *topk_indices, + const int D, + const int k) { + // ** FIX: All __local memory declarations moved to the top-level scope ** + __local float local_values[WG_SIZE]; + __local int local_indices[WG_SIZE]; + __local float wg_max_vals[WG_SIZE]; + __local int wg_max_indices[WG_SIZE]; + + const int row_id = get_group_id(0); + const int local_id = get_local_id(0); + + for (int i = local_id; i < D; i += WG_SIZE) { + local_values[i] = (float)input[row_id * D + i]; + local_indices[i] = i; + } + if (local_id >= D && local_id < WG_SIZE) { + local_values[local_id] = -FLT_MAX; + local_indices[local_id] = -1; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + for (int i = 0; i < k; ++i) { + float thread_max_val = -FLT_MAX; + int thread_max_idx = -1; + for (int j = i + local_id; j < D; j += WG_SIZE) { + if (local_values[j] > thread_max_val) { + thread_max_val = local_values[j]; + thread_max_idx = j; + } + } + + wg_max_vals[local_id] = thread_max_val; + wg_max_indices[local_id] = thread_max_idx; + barrier(CLK_LOCAL_MEM_FENCE); + + for (int s = WG_SIZE / 2; s > 0; s >>= 1) { + if (local_id < s) { + if (wg_max_vals[local_id + s] > wg_max_vals[local_id]) { + wg_max_vals[local_id] = wg_max_vals[local_id + s]; + wg_max_indices[local_id] = wg_max_indices[local_id + s]; + } + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + int max_idx = -1; + if (local_id == 0) { + max_idx = wg_max_indices[0]; + } + barrier(CLK_LOCAL_MEM_FENCE); + + if (max_idx != -1 && max_idx != i) { + if (local_id == 0) { + swap_pairs(local_values, local_indices, i, max_idx); + } + } + barrier(CLK_LOCAL_MEM_FENCE); + } + + if (local_id < k) { + topk_values[row_id * k + local_id] = (half)local_values[local_id]; + topk_indices[row_id * k + local_id] = (half)local_indices[local_id]; + } +} \ No newline at end of file diff --git a/mllm/backends/opencl/kernel/transpose.cl b/mllm/backends/opencl/kernel/transpose.cl new file mode 100644 index 000000000..90c4fe60a --- /dev/null +++ b/mllm/backends/opencl/kernel/transpose.cl @@ -0,0 +1,271 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// 定义在本地内存中转置的块大小 +#define BLOCK_DIM 16 + +// // ============================================================================ +// // ========================== FP32 (float) Kernel ============================= +// // ============================================================================ +// __kernel void transpose_float_2d( +// const __global float *src, // 源矩阵指针 (指向一个 S x D 块) +// __global float *dst, // 目标矩阵指针 (指向一个 D x S 块) +// const int S, // 源矩阵的行数 +// const int D // 源矩阵的列数 +// ) { +// // 1. 定义一个本地内存块 +// __local float tile[BLOCK_DIM][BLOCK_DIM + 1]; // +1 用于避免 bank conflict + +// // 2. 计算全局ID (在 S x D 矩阵内的坐标) +// int d = get_global_id(0); +// int s = get_global_id(1); + +// // 3. 从全局内存加载数据到本地内存 +// if (d < D && s < S) { +// tile[get_local_id(1)][get_local_id(0)] = src[s * D + d]; +// } + +// // 4. 同步工作组,确保所有数据都已加载 +// barrier(CLK_LOCAL_MEM_FENCE); + +// // 5. 计算转置后的新坐标 +// int new_d = get_group_id(1) * BLOCK_DIM + get_local_id(0); +// int new_s = get_group_id(0) * BLOCK_DIM + get_local_id(1); + +// // 6. 从本地内存读取转置后的元素并写回全局内存 +// if (new_d < S && new_s < D) { +// // 读取时索引交换,实现转置 +// dst[new_s * S + new_d] = tile[get_local_id(0)][get_local_id(1)]; +// } +// } + +// // ============================================================================ +// // ========================== FP16 (half) Kernel ============================== +// // ============================================================================ +// __kernel void transpose_fp16_2d( +// const __global half *src, +// __global half *dst, +// const int S, +// const int D) { +// __local half tile[BLOCK_DIM][BLOCK_DIM + 1]; + +// int d = get_global_id(0); +// int s = get_global_id(1); + +// if (d < D && s < S) { +// tile[get_local_id(1)][get_local_id(0)] = src[s * D + d]; +// } + +// barrier(CLK_LOCAL_MEM_FENCE); + +// int new_d = get_group_id(1) * BLOCK_DIM + get_local_id(0); +// int new_s = get_group_id(0) * BLOCK_DIM + get_local_id(1); + +// if (new_d < S && new_s < D) { +// dst[new_s * S + new_d] = tile[get_local_id(0)][get_local_id(1)]; +// } +// } + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +// 定义在本地内存中转置的块大小 +#define BLOCK_DIM 16 + +// ============================================================================ +// ========================== FP32 (float) Kernel ============================= +// ============================================================================ +__kernel void transpose_float_2d( + const __global float *src, // 源矩阵的基地址指针 + __global float *dst, // 目标矩阵的基地址指针 + const int S, // 子矩阵的行数 + const int D, // 子矩阵的列数 + const int src_offset_elements, // 【新增】源数据在基地址上的偏移量 (以元素为单位) + const int dst_offset_elements // 【新增】目标数据在基地址上的偏移量 (以元素为单位) +) { + // 1. 定义一个本地内存块 + __local float tile[BLOCK_DIM][BLOCK_DIM + 1]; // +1 用于避免 bank conflict + + // 【修改】根据偏移量获取子矩阵的实际指针 + const __global float *src_ptr = src + src_offset_elements; + __global float *dst_ptr = dst + dst_offset_elements; + + // 2. 计算全局ID (在 S x D 子矩阵内的坐标) + int d = get_global_id(0); + int s = get_global_id(1); + + // 3. 从全局内存加载数据到本地内存 + if (d < D && s < S) { + tile[get_local_id(1)][get_local_id(0)] = src_ptr[s * D + d]; + } + + // 4. 同步工作组,确保所有数据都已加载 + barrier(CLK_LOCAL_MEM_FENCE); + + // 5. 计算转置后的新坐标 + int new_d = get_group_id(1) * BLOCK_DIM + get_local_id(0); + int new_s = get_group_id(0) * BLOCK_DIM + get_local_id(1); + + // 6. 从本地内存读取转置后的元素并写回全局内存 + if (new_d < S && new_s < D) { + // 读取时索引交换,实现转置 + dst_ptr[new_s * S + new_d] = tile[get_local_id(0)][get_local_id(1)]; + } +} + +// ============================================================================ +// ========================== FP16 (half) Kernel ============================== +// ============================================================================ +__kernel void transpose_fp16_2d( + const __global half *src, + __global half *dst, + const int S, + const int D, + const int src_offset_elements, // 【新增】源数据在基地址上的偏移量 + const int dst_offset_elements // 【新增】目标数据在基地址上的偏移量 +) { + __local half tile[BLOCK_DIM][BLOCK_DIM + 1]; + + // 【修改】根据偏移量获取子矩阵的实际指针 + const __global half *src_ptr = src + src_offset_elements; + __global half *dst_ptr = dst + dst_offset_elements; + + int d = get_global_id(0); + int s = get_global_id(1); + + if (d < D && s < S) { + tile[get_local_id(1)][get_local_id(0)] = src_ptr[s * D + d]; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + int new_d = get_group_id(1) * BLOCK_DIM + get_local_id(0); + int new_s = get_group_id(0) * BLOCK_DIM + get_local_id(1); + + if (new_d < S && new_s < D) { + dst_ptr[new_s * S + new_d] = tile[get_local_id(0)][get_local_id(1)]; + } +} + +// ============================================================================ +// ============ BSHD -> BHSD FP32 (float) Kernel ============================== +// ============================================================================ +__kernel void transpose_bshd2bhsd_fp32( + const __global float *src, // 源张量指针 (BSHD 布局) + __global float *dst, // 目标张量指针 (BHSD 布局) + const int B, // Batch size + const int H, // Head count + const int S, // Sequence length + const int D // Dimension +) { + // 使用3D工作项ID来唯一标识目标张量中的每一个元素 + const int d = get_global_id(0); + const int s = get_global_id(1); + const int hb = get_global_id(2); // h 和 b 的组合索引 + + // 检查边界 + if (d >= D || s >= S || hb >= H * B) { + return; + } + + // 从组合索引中分解出 h 和 b + const int h = hb % H; + const int b = hb / H; + + // 根据 BSHD 布局计算源索引 + // src[b][s][h][d] + size_t src_idx = (size_t)b * S * H * D + (size_t)s * H * D + (size_t)h * D + d; + + // 根据 BHSD 布局计算目标索引 + // dst[b][h][s][d] + size_t dst_idx = (size_t)b * H * S * D + (size_t)h * S * D + (size_t)s * D + d; + + // 执行数据拷贝 + dst[dst_idx] = src[src_idx]; +} + +// ============================================================================ +// ============ BSHD -> BHSD FP16 (half) Kernel =============================== +// ============================================================================ +__kernel void transpose_bshd2bhsd_fp16( + const __global half *src, + __global half *dst, + const int B, + const int H, + const int S, + const int D) { + const int d = get_global_id(0); + const int s = get_global_id(1); + const int hb = get_global_id(2); + + if (d >= D || s >= S || hb >= H * B) { + return; + } + + const int h = hb % H; + const int b = hb / H; + + size_t src_idx = (size_t)b * S * H * D + (size_t)s * H * D + (size_t)h * D + d; + size_t dst_idx = (size_t)b * H * S * D + (size_t)h * S * D + (size_t)s * D + d; + + dst[dst_idx] = src[src_idx]; +} + +// ============================================================================ +// ============ BHSD -> BSHD FP32 (float) Kernel (新增) ======================== +// ============================================================================ +__kernel void transpose_bhsd2bshd_fp32( + const __global float *src, // 源张量指针 (BHSD 布局) + __global float *dst, // 目标张量指针 (BSHD 布局) + const int B, + const int H, + const int S, + const int D) { + const int d = get_global_id(0); + const int s = get_global_id(1); + const int hb = get_global_id(2); + + if (d >= D || s >= S || hb >= H * B) { + return; + } + + const int h = hb % H; + const int b = hb / H; + + // 根据 BHSD 布局计算源索引 + // src[b][h][s][d] + size_t src_idx = (size_t)b * H * S * D + (size_t)h * S * D + (size_t)s * D + d; + + // 根据 BSHD 布局计算目标索引 + // dst[b][s][h][d] + size_t dst_idx = (size_t)b * S * H * D + (size_t)s * H * D + (size_t)h * D + d; + + dst[dst_idx] = src[src_idx]; +} + +// ============================================================================ +// ============ BHSD -> BSHD FP16 (half) Kernel (新增) ======================== +// ============================================================================ +__kernel void transpose_bhsd2bshd_fp16( + const __global half *src, + __global half *dst, + const int B, + const int H, + const int S, + const int D) { + const int d = get_global_id(0); + const int s = get_global_id(1); + const int hb = get_global_id(2); + + if (d >= D || s >= S || hb >= H * B) { + return; + } + + const int h = hb % H; + const int b = hb / H; + + // 根据 BHSD 布局计算源索引 + size_t src_idx = (size_t)b * H * S * D + (size_t)h * S * D + (size_t)s * D + d; + // 根据 BSHD 布局计算目标索引 + size_t dst_idx = (size_t)b * S * H * D + (size_t)s * H * D + (size_t)h * D + d; + + dst[dst_idx] = src[src_idx]; +} \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLAddOp.cpp b/mllm/backends/opencl/op/OpenCLAddOp.cpp new file mode 100644 index 000000000..694c30849 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLAddOp.cpp @@ -0,0 +1,198 @@ +#include "OpenCLAddOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" + +namespace mllm { + +OpenCLAddOp::OpenCLAddOp(Backend *bn, std::string name, float data) : + Op(bn, std::move(name)), data_(data) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/add.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + kernel_fp32_buffer_ = clCreateKernel(program, "add_scalar_float", &err); + check_cl_error(err, "clCreateKernel for add_scalar_float"); + + kernel_fp32_image_ = clCreateKernel(program, "add_scalar_float_image2d", &err); + check_cl_error(err, "clCreateKernel for add_scalar_float_image2d"); + + kernel_fp16_buffer_ = clCreateKernel(program, "add_scalar_fp16_vector", &err); + check_cl_error(err, "clCreateKernel for add_scalar_fp16_vector"); + + kernel_fp16_image_ = clCreateKernel(program, "add_scalar_fp16_image2d", &err); + check_cl_error(err, "clCreateKernel for add_scalar_fp16_image2d"); + + sampler_ = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler"); +} + +OpenCLAddOp::~OpenCLAddOp() { + if (kernel_fp32_buffer_) clReleaseKernel(kernel_fp32_buffer_); + if (kernel_fp32_image_) clReleaseKernel(kernel_fp32_image_); + if (kernel_fp16_buffer_) clReleaseKernel(kernel_fp16_buffer_); + if (kernel_fp16_image_) clReleaseKernel(kernel_fp16_image_); + if (sampler_) clReleaseSampler(sampler_); +} + +ErrorCode OpenCLAddOp::reshape(vector> inputs, vector> outputs) { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLAddOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + auto output = outputs[0]; + output->setDtype(inputs[0]->dtype()); + + auto &out_mem = output->device_memory(); + if (output->dimension() % 4 == 0 && false) { + out_mem.type = MEM_TYPE_IMAGE_2D; + out_mem.image_width = output->dimension() / 4; + out_mem.image_height = output->batch() * output->head() * output->sequence(); + } else { + out_mem.type = MEM_TYPE_BUFFER; + } + output->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLAddOp::execute(vector> inputs, vector> outputs) { + /* + auto input_dtype = inputs[0]->dtype(); + auto output = outputs[0]; + if (output->device_memory().type == MEM_TYPE_IMAGE_2D) { + // --- Image 优化路径 --- + cl_kernel kernel_to_use = nullptr; + if (input_dtype == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_image_; + } else { // MLLM_TYPE_F16 + kernel_to_use = kernel_fp16_image_; + } + + std::vector temp_tensor_storage; + cl_mem inA_mem = get_image_from_tensor(inputs[0], ocl_backend_, temp_tensor_storage); + cl_mem out_mem_handle = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &inA_mem); + if (input_dtype == MLLM_TYPE_F32) { + clSetKernelArg(kernel_to_use, 2, sizeof(float), &data_); + } else { // MLLM_TYPE_F16 + mllm_fp16_t data_fp16 = MLLM_FP32_TO_FP16(data_); + clSetKernelArg(kernel_to_use, 2, sizeof(mllm_fp16_t), &data_fp16); + } + clSetKernelArg(kernel_to_use, 3, sizeof(cl_mem), &out_mem_handle); + + const int width = static_cast(output->device_memory().image_width); + const int height = static_cast(output->device_memory().image_height); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &height); + + const size_t global_work_size[2] = {(size_t)width, (size_t)height}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + + } else { + // --- 普通 Buffer 回退路径 --- + cl_kernel kernel_to_use = nullptr; + if (input_dtype == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_buffer_; + } else { // MLLM_TYPE_F16 + kernel_to_use = kernel_fp16_buffer_; + } + + cl_mem in0_buf = ocl_backend_->get_cl_mem(*inputs[0]); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in0_buf); + if (input_dtype == MLLM_TYPE_F32) { + clSetKernelArg(kernel_to_use, 1, sizeof(float), &data_); + } else { // MLLM_TYPE_F16 + mllm_fp16_t data_fp16 = MLLM_FP32_TO_FP16(data_); + clSetKernelArg(kernel_to_use, 1, sizeof(mllm_fp16_t), &data_fp16); + } + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + + size_t count = inputs[0]->count(); + if (input_dtype == MLLM_TYPE_F16) { count /= 4; } + + const size_t global_work_size[1] = {count}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } + */ + auto input = inputs[0]; + auto output = outputs[0]; + // 假设我们决定在这个算子中使用Image路径以获得最佳性能 + bool use_image_path = (output->dimension() % 4 == 0); + if (use_image_path) { + // =================================================== + // ================ Image 优化路径 =================== + // =================================================== + // 1. 将输入和输出Tensor原地转换为Image2D类型 + tensorGlobal2Image(*input); + tensorGlobal2Image(*output); // 注意:这里假设output在setUp中是以Buffer类型分配的 + // 2. 选择内核 + cl_kernel kernel_to_use = nullptr; + if (input->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_image_; + } else { // MLLM_TYPE_F16 + kernel_to_use = kernel_fp16_image_; + } + // 3. 获取Image句柄并设置参数 + cl_mem in_img = ocl_backend_->get_cl_mem(*input); + cl_mem out_img = ocl_backend_->get_cl_mem(*output); + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &in_img); + if (input->dtype() == MLLM_TYPE_F32) { + clSetKernelArg(kernel_to_use, 2, sizeof(float), &data_); + } else { // MLLM_TYPE_F16 + mllm_fp16_t data_fp16 = MLLM_FP32_TO_FP16(data_); + clSetKernelArg(kernel_to_use, 2, sizeof(mllm_fp16_t), &data_fp16); + } + clSetKernelArg(kernel_to_use, 3, sizeof(cl_mem), &out_img); + const int width = static_cast(output->device_memory().image_width); + const int height = static_cast(output->device_memory().image_height); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &height); + // 4. 执行内核 + const size_t global_work_size[2] = {(size_t)width, (size_t)height}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + // 5. [重要] 如果后续的算子需要Buffer类型,则将Tensor转换回去 + tensorImage2Global(*input); + tensorImage2Global(*output); + } else { + // =================================================== + // ============= 普通 Buffer 回退路径 ================ + // =================================================== + // 确保输入输出都是Buffer(如果之前被转换过) + tensorImage2Global(*input); + tensorImage2Global(*output); + cl_kernel kernel_to_use = nullptr; + if (input->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_buffer_; + } else { // MLLM_TYPE_F16 + kernel_to_use = kernel_fp16_buffer_; + } + cl_mem in0_buf = ocl_backend_->get_cl_mem(*input); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in0_buf); + if (input->dtype() == MLLM_TYPE_F32) { + clSetKernelArg(kernel_to_use, 1, sizeof(float), &data_); + } else { // MLLM_TYPE_F16 + mllm_fp16_t data_fp16 = MLLM_FP32_TO_FP16(data_); + clSetKernelArg(kernel_to_use, 1, sizeof(mllm_fp16_t), &data_fp16); + } + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + size_t count = input->count(); + if (input->dtype() == MLLM_TYPE_F16) { count /= 4; } + const size_t global_work_size[1] = {count}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLAddOp.hpp b/mllm/backends/opencl/op/OpenCLAddOp.hpp new file mode 100644 index 000000000..63b9729e1 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLAddOp.hpp @@ -0,0 +1,42 @@ +#ifndef OPENCL_ADD_OP_HPP +#define OPENCL_ADD_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLAddOp : public Op { +public: + OpenCLAddOp(Backend *bn, std::string name, float data); + ~OpenCLAddOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + float data_; // 用于存储要加的标量 + + cl_kernel kernel_fp32_buffer_ = nullptr; + cl_kernel kernel_fp32_image_ = nullptr; + cl_kernel kernel_fp16_buffer_ = nullptr; + cl_kernel kernel_fp16_image_ = nullptr; + + cl_sampler sampler_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +// OpenCLAddOp 的创建器 +class OpenCLAddOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // 从 op_param 中解析出要加的标量数据 + float data = op_param["data"]; + return new OpenCLAddOp(bn, name, data); + } +}; + +} // namespace mllm + +#endif // OPENCL_ADD_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLAddTwoOp.cpp b/mllm/backends/opencl/op/OpenCLAddTwoOp.cpp new file mode 100644 index 000000000..93bdca9f0 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLAddTwoOp.cpp @@ -0,0 +1,162 @@ +#include "OpenCLAddTwoOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLAddTwoOp::OpenCLAddTwoOp(Backend *bn, std::string name) : + Op(bn, std::move(name)) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + // 只获取一次 Program + const std::string kernel_path = "kernel/add.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + // --- 创建全部四个内核 --- + kernel_fp32_buffer_ = clCreateKernel(program, "add_float", &err); + check_cl_error(err, "clCreateKernel for add_float"); + + kernel_fp32_image_ = clCreateKernel(program, "add_float_image2d", &err); + check_cl_error(err, "clCreateKernel for add_float_image2d"); + + kernel_fp16_buffer_ = clCreateKernel(program, "add_fp16_vector", &err); + check_cl_error(err, "clCreateKernel for add_fp16_vector"); + + kernel_fp16_image_ = clCreateKernel(program, "add_fp16_image2d", &err); + check_cl_error(err, "clCreateKernel for add_fp16_image2d"); + + // --- 创建 Sampler --- + sampler_ = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler"); +} + +// 替换您的析构函数 +OpenCLAddTwoOp::~OpenCLAddTwoOp() { + if (kernel_fp32_buffer_) clReleaseKernel(kernel_fp32_buffer_); + if (kernel_fp32_image_) clReleaseKernel(kernel_fp32_image_); + if (kernel_fp16_buffer_) clReleaseKernel(kernel_fp16_buffer_); + if (kernel_fp16_image_) clReleaseKernel(kernel_fp16_image_); + if (sampler_) clReleaseSampler(sampler_); +} + +ErrorCode OpenCLAddTwoOp::reshape(vector> inputs, vector> outputs) { + // 加法操作要求输入和输出的形状一致 + auto input0_shape = inputs[0]->shape(); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLAddTwoOp::setUp(vector> inputs, vector> outputs) { + // 确保输入在设备上 + for (auto &input : inputs) { + input->to(MLLM_OPENCL); + } + auto output = outputs[0]; + output->to(MLLM_OPENCL); + // 根据输入设置输出的数据类型 + output->setDtype(inputs[0]->dtype()); + + auto &out_mem = output->device_memory(); + + // **核心修改:直接决策为 Image 或 Buffer** + if (output->dimension() % 4 == 0 && false) { + // 条件满足,直接为输出张量申请 Image2D 类型的内存 + out_mem.type = MEM_TYPE_IMAGE_2D; // **直接设为 Image2D** + out_mem.image_width = output->dimension() / 4; + out_mem.image_height = output->batch() * output->head() * output->sequence(); + } else { + // 条件不满足,回退到普通 Buffer + out_mem.type = MEM_TYPE_BUFFER; + } + + // alloc() 现在会根据 out_mem.type 直接创建出 Image 或 Buffer + output->alloc(); + return MLLM_NO_ERROR; +} +ErrorCode OpenCLAddTwoOp::execute(vector> inputs, vector> outputs) { + auto input1 = inputs[0]; + auto input2 = inputs[1]; + auto output = outputs[0]; + + // 决定是否走Image优化路径 + bool use_image_path = (output->dimension() % 4 == 0) && false; + + if (use_image_path) { + // =================================================== + // ================ Image 优化路径 =================== + // =================================================== + + // 1. 将两个输入和输出Tensor原地转换为Image2D类型 + tensorGlobal2Image(*input1); + tensorGlobal2Image(*input2); + tensorGlobal2Image(*output); + + // 2. 选择内核 + cl_kernel kernel_to_use = (input1->dtype() == MLLM_TYPE_F32) ? kernel_fp32_image_ : kernel_fp16_image_; + + // 3. 获取Image句柄并设置参数 + cl_mem inA_img = ocl_backend_->get_cl_mem(*input1); + cl_mem inB_img = ocl_backend_->get_cl_mem(*input2); + cl_mem out_img = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &inA_img); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &inB_img); + clSetKernelArg(kernel_to_use, 3, sizeof(cl_mem), &out_img); + + const int width = static_cast(output->device_memory().image_width); + const int height = static_cast(output->device_memory().image_height); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &height); + + // 4. 执行内核 + const size_t global_work_size[2] = {(size_t)width, (size_t)height}; + cl_event event; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "add2", event); + + // 5. [重要] 将所有Tensor转换回Buffer格式,以供后续算子使用 + tensorImage2Global(*input1); + tensorImage2Global(*input2); + tensorImage2Global(*output); + + } else { + // =================================================== + // ============= 普通 Buffer 回退路径 ================ + // =================================================== + + // 确保所有Tensor都是Buffer格式 + tensorImage2Global(*input1); + tensorImage2Global(*input2); + tensorImage2Global(*output); + + cl_kernel kernel_to_use = (input1->dtype() == MLLM_TYPE_F32) ? kernel_fp32_buffer_ : kernel_fp16_buffer_; + + cl_mem in0_buf = ocl_backend_->get_cl_mem(*input1); + cl_mem in1_buf = ocl_backend_->get_cl_mem(*input2); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in0_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &in1_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + + size_t count = input1->count(); + if (input1->dtype() == MLLM_TYPE_F16) { + if (count % 4 != 0) { + throw std::runtime_error("[addTwo]For FP16 vector kernel, tensor count must be a multiple of 4."); + } + count /= 4; + } + + const size_t global_work_size[1] = {count}; + cl_event event; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "add2", event); + } + + return MLLM_NO_ERROR; +} +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLAddTwoOp.hpp b/mllm/backends/opencl/op/OpenCLAddTwoOp.hpp new file mode 100644 index 000000000..98365bc22 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLAddTwoOp.hpp @@ -0,0 +1,39 @@ +#ifndef OPENCL_ADD_FUNC_OP_HPP +#define OPENCL_ADD_FUNC_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLAddTwoOp : public Op { +public: + OpenCLAddTwoOp(Backend *bn, std::string name); + ~OpenCLAddTwoOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + cl_kernel kernel_fp32_buffer_ = nullptr; + cl_kernel kernel_fp32_image_ = nullptr; + cl_kernel kernel_fp16_buffer_ = nullptr; + cl_kernel kernel_fp16_image_ = nullptr; + + cl_sampler sampler_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +// OpenCLAddTwoOp 的创建器,用于工厂模式 +class OpenCLAddTwoOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // 对于简单的加法,我们可能不需要 op_param 和 threadCount + return new OpenCLAddTwoOp(bn, name); + } +}; + +} // namespace mllm + +#endif // OPENCL_ADD_FUNC_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLArgSortOp.cpp b/mllm/backends/opencl/op/OpenCLArgSortOp.cpp new file mode 100644 index 000000000..fd7d7cef8 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLArgSortOp.cpp @@ -0,0 +1,133 @@ +#include "OpenCLArgSortOp.hpp" +#include "Types.hpp" +// #include "utils/OpenCLTools.hpp" +#include +#include // For error message formatting + +namespace mllm { + +// 构造函数、析构函数、reshape 和 setUp 保持不变... +OpenCLArgSortOp::OpenCLArgSortOp(Backend *bn, std::string name) : + Op(bn, std::move(name)) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + support_fp16_ = ocl_backend_->has_fp16_support(); + const std::string kernel_path = "kernel/argsort.cl"; + + std::string build_options; + if (support_fp16_) { + build_options += " -DSUPPORTS_FP16"; + } + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + + cl_int err; + kernel_init_indices_ = clCreateKernel(program, "init_indices", &err); + check_cl_error(err, "clCreateKernel init_indices"); + kernel_argsort_fp32_ = clCreateKernel(program, "bitonic_argsort_step_fp32", &err); + check_cl_error(err, "clCreateKernel bitonic_argsort_step_fp32"); + kernel_cast_indices_fp32_ = clCreateKernel(program, "cast_indices_to_fp32", &err); + check_cl_error(err, "clCreateKernel cast_indices_to_fp32"); + + kernel_argsort_fp16_ = clCreateKernel(program, "bitonic_argsort_step_fp16", &err); + check_cl_error(err, "clCreateKernel bitonic_argsort_step_fp16"); + kernel_cast_indices_fp16_ = clCreateKernel(program, "cast_indices_to_fp16", &err); + check_cl_error(err, "clCreateKernel cast_indices_to_fp16"); +} + +OpenCLArgSortOp::~OpenCLArgSortOp() { + if (kernel_init_indices_) clReleaseKernel(kernel_init_indices_); + if (kernel_argsort_fp32_) clReleaseKernel(kernel_argsort_fp32_); + if (kernel_cast_indices_fp32_) clReleaseKernel(kernel_cast_indices_fp32_); + if (kernel_argsort_fp16_) clReleaseKernel(kernel_argsort_fp16_); + if (kernel_cast_indices_fp16_) clReleaseKernel(kernel_cast_indices_fp16_); +} + +ErrorCode OpenCLArgSortOp::reshape(vector> inputs, vector> outputs) { + assert(inputs.size() == 1 && outputs.size() == 1); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLArgSortOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLArgSortOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + + const int batch_size = input->batch() * input->head() * input->sequence(); + const int N = input->dimension(); + cl_int err; + + const size_t input_bytes = (size_t)input->count() * input->dtypeSize(); + cl_mem input_buf = ocl_backend_->get_cl_mem(*input); + cl_mem output_buf = ocl_backend_->get_cl_mem(*output); + + // 1. 创建临时 buffer + cl_mem temp_values_buf = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_WRITE, input_bytes, nullptr, &err); + check_cl_error(err, "clCreateBuffer for temp_values"); + cl_mem indices_buf = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_WRITE, (size_t)batch_size * N * sizeof(int), nullptr, &err); + check_cl_error(err, "clCreateBuffer for indices"); + + // 2. 在设备上复制数据到临时 buffer + err = clEnqueueCopyBuffer(ocl_backend_->getQueue(), input_buf, temp_values_buf, 0, 0, input_bytes, 0, nullptr, nullptr); + check_cl_error(err, "clEnqueueCopyBuffer to temp_values_buf"); + + // 3. 初始化索引 buffer + size_t global_work_size_init = (size_t)batch_size * N; + clSetKernelArg(kernel_init_indices_, 0, sizeof(cl_mem), &indices_buf); + clSetKernelArg(kernel_init_indices_, 1, sizeof(int), &N); + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_init_indices_, 1, nullptr, &global_work_size_init, nullptr, 0, nullptr, nullptr); + check_cl_error(err, "clEnqueueNDRangeKernel init_indices"); + + // 4. 执行 Bitonic Sort + cl_kernel kernel_sort = (input->dtype() == MLLM_TYPE_F32) ? kernel_argsort_fp32_ : kernel_argsort_fp16_; + int descending = 0; // 0 for ascending + + int power_of_2_N = 1; + while (power_of_2_N < N) { + power_of_2_N <<= 1; + } + int num_stages = (N > 1) ? std::log2(power_of_2_N) : 0; + + for (int stage = 0; stage < num_stages; ++stage) { + for (int pass = stage; pass >= 0; --pass) { + size_t global_work_size_sort[2] = {(size_t)power_of_2_N / 2, (size_t)batch_size}; + + clSetKernelArg(kernel_sort, 0, sizeof(cl_mem), &temp_values_buf); + clSetKernelArg(kernel_sort, 1, sizeof(cl_mem), &indices_buf); + clSetKernelArg(kernel_sort, 2, sizeof(int), &N); + clSetKernelArg(kernel_sort, 3, sizeof(int), &stage); + clSetKernelArg(kernel_sort, 4, sizeof(int), &pass); + clSetKernelArg(kernel_sort, 5, sizeof(int), &descending); + + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_sort, 2, nullptr, global_work_size_sort, nullptr, 0, nullptr, nullptr); + if (err != CL_SUCCESS) { + std::string error_msg = "clEnqueueNDRangeKernel bitonic_sort failed with code " + std::to_string(err) + + " at stage " + std::to_string(stage) + ", pass " + std::to_string(pass); + throw std::runtime_error(error_msg); + } + } + } + + // 5. 转换索引类型并写入输出 + cl_kernel kernel_cast = (output->dtype() == MLLM_TYPE_F32) ? kernel_cast_indices_fp32_ : kernel_cast_indices_fp16_; + size_t global_work_size_cast = (size_t)batch_size * N; + clSetKernelArg(kernel_cast, 0, sizeof(cl_mem), &indices_buf); + clSetKernelArg(kernel_cast, 1, sizeof(cl_mem), &output_buf); + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_cast, 1, nullptr, &global_work_size_cast, nullptr, 0, nullptr, nullptr); + check_cl_error(err, "clEnqueueNDRangeKernel cast_indices"); + + // 6. 释放临时 buffer + clReleaseMemObject(temp_values_buf); + clReleaseMemObject(indices_buf); + + return MLLM_NO_ERROR; +} + +} // namespace mllm diff --git a/mllm/backends/opencl/op/OpenCLArgSortOp.hpp b/mllm/backends/opencl/op/OpenCLArgSortOp.hpp new file mode 100644 index 000000000..908cf7cba --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLArgSortOp.hpp @@ -0,0 +1,41 @@ +#ifndef OPENCL_ARGSORT_OP_HPP +#define OPENCL_ARGSORT_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLArgSortOp : public Op { +public: + // 构造函数与CPU版本类似,但不需要threadCount + OpenCLArgSortOp(Backend *bn, std::string name); + ~OpenCLArgSortOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + // 内核对象 + cl_kernel kernel_init_indices_ = nullptr; + cl_kernel kernel_argsort_fp32_ = nullptr; + cl_kernel kernel_argsort_fp16_ = nullptr; + cl_kernel kernel_cast_indices_fp32_ = nullptr; + cl_kernel kernel_cast_indices_fp16_ = nullptr; + + OpenCLBackend *ocl_backend_ = nullptr; + bool support_fp16_ = false; +}; + +class OpenCLArgSortOpCreator : public OpenCLBackend::Creator { +public: + // create 方法与CPU版本类似 + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new OpenCLArgSortOp(bn, name); + } +}; + +} // namespace mllm + +#endif // OPENCL_ARGSORT_OP_HPP diff --git a/mllm/backends/opencl/op/OpenCLBinCountOp.cpp b/mllm/backends/opencl/op/OpenCLBinCountOp.cpp new file mode 100644 index 000000000..ddbcb911c --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLBinCountOp.cpp @@ -0,0 +1,129 @@ +// opencl/op/OpenCLBinCountOp.cpp + +#include "OpenCLBinCountOp.hpp" +#include "Types.hpp" +#include +#include +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" +// opencl/op/OpenCLBinCountOp.cpp +namespace mllm { + +// 构造函数、reshape、setUp函数与上一版相同,这里省略... +OpenCLBinCountOp::OpenCLBinCountOp(Backend *bn, std::string name) : + Op(bn, std::move(name)) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/bincount.cl"; + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + + auto program = ocl_backend_->getProgram(kernel_path, build_options); + cl_int err; + + kernel_map_["bincount_count_fp32"] = clCreateKernel(program, "bincount_count_fp32", &err); + check_cl_error(err, "clCreateKernel bincount_count_fp32"); + kernel_map_["bincount_count_fp16"] = clCreateKernel(program, "bincount_count", &err); + check_cl_error(err, "clCreateKernel bincount_count_fp16"); + kernel_map_["cast_int_to_float"] = clCreateKernel(program, "cast_int_to_float", &err); + check_cl_error(err, "clCreateKernel cast_int_to_float"); + kernel_map_["cast_int_to_half"] = clCreateKernel(program, "cast_int_to_half", &err); + check_cl_error(err, "clCreateKernel cast_int_to_half"); +} + +OpenCLBinCountOp::~OpenCLBinCountOp() { + for (auto &pair : kernel_map_) { + if (pair.second) { + clReleaseKernel(pair.second); + } + } +} + +ErrorCode OpenCLBinCountOp::reshape(vector> inputs, vector> outputs) { + assert(inputs[0]->batch() == 1 && inputs[0]->sequence() == 1 && inputs[0]->head() == 1); + outputs[0]->reshape(1, 1, 1, 0); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLBinCountOp::setUp(vector> inputs, vector> outputs) { + if (inputs[0]->dtype() != MLLM_TYPE_F32 && inputs[0]->dtype() != MLLM_TYPE_F16) { + return NOT_SUPPORT; + } + inputs[0]->to(MLLM_OPENCL); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLBinCountOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + // ocl_backend_->finishQueue(); // todo 同步问题 + + input->cpu(); + int size = input->dimension(); + int max_val = 0; + if (size > 0) { + if (input->dtype() == MLLM_TYPE_F32) { + float *data_ptr = input->hostPtr(); + max_val = static_cast(*std::max_element(data_ptr, data_ptr + size)); + } else { // MLLM_TYPE_F16 + std::vector float_vec(size); + for (int i = 0; i < size; ++i) { + float_vec[i] = MLLM_FP16_TO_FP32(input->dataAt(0, 0, 0, i)); + } + max_val = static_cast(*std::max_element(float_vec.begin(), float_vec.end())); + } + } + int output_size = max_val + 1; + output->reshape(1, 1, 1, output_size); + output->alloc(); + cl_int err; + cl_mem tmp_count_buffer = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_WRITE, output_size * sizeof(int), nullptr, &err); + check_cl_error(err, "clCreateBuffer for tmp_count_buffer"); + + int zero = 0; + cl_event fill_event; + err = clEnqueueFillBuffer(ocl_backend_->getQueue(), tmp_count_buffer, &zero, sizeof(int), 0, output_size * sizeof(int), 0, nullptr, &fill_event); + check_cl_error(err, "clEnqueueFillBuffer for tmp_count_buffer"); + ocl_backend_->addProfilingEvent("bincount_fill_zero", fill_event); + + input->cl(); + + cl_kernel count_kernel = (input->dtype() == MLLM_TYPE_F32) ? kernel_map_["bincount_count_fp32"] : kernel_map_["bincount_count_fp16"]; + cl_mem in_buf = ocl_backend_->get_cl_mem(*input); + clSetKernelArg(count_kernel, 0, sizeof(cl_mem), &in_buf); + clSetKernelArg(count_kernel, 1, sizeof(cl_mem), &tmp_count_buffer); + clSetKernelArg(count_kernel, 2, sizeof(int), &size); + clSetKernelArg(count_kernel, 3, sizeof(int), &max_val); + + const size_t local_work_size = 256; + const size_t global_work_size = ((size > 0 ? size : 1) + local_work_size - 1) / local_work_size * local_work_size; + + cl_event count_event; + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), count_kernel, 1, nullptr, &global_work_size, &local_work_size, 1, &fill_event, &count_event); + check_cl_error(err, "clEnqueueNDRangeKernel for bincount_count"); + ocl_backend_->addProfilingEvent("bincount_count", count_event); + + cl_kernel cast_kernel = (output->dtype() == MLLM_TYPE_F32) ? kernel_map_["cast_int_to_float"] : kernel_map_["cast_int_to_half"]; + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + clSetKernelArg(cast_kernel, 0, sizeof(cl_mem), &tmp_count_buffer); + clSetKernelArg(cast_kernel, 1, sizeof(cl_mem), &out_buf); + clSetKernelArg(cast_kernel, 2, sizeof(int), &output_size); + + const size_t cast_global_work_size = ((output_size > 0 ? output_size : 1) + local_work_size - 1) / local_work_size * local_work_size; + cl_event cast_event; + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), cast_kernel, 1, nullptr, &cast_global_work_size, &local_work_size, 1, &count_event, &cast_event); + check_cl_error(err, "clEnqueueNDRangeKernel for cast"); + ocl_backend_->addProfilingEvent("bincount_cast", cast_event); + clReleaseMemObject(tmp_count_buffer); + clWaitForEvents(1, &cast_event); + clReleaseEvent(fill_event); + clReleaseEvent(count_event); + clReleaseEvent(cast_event); + + return MLLM_NO_ERROR; +} +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLBinCountOp.hpp b/mllm/backends/opencl/op/OpenCLBinCountOp.hpp new file mode 100644 index 000000000..b481efd4f --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLBinCountOp.hpp @@ -0,0 +1,35 @@ +// opencl/op/OpenCLBinCountOp.hpp + +#ifndef OPENCL_BINCOUNT_OP_HPP +#define OPENCL_BINCOUNT_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLBinCountOp : public Op { +public: + OpenCLBinCountOp(Backend *bn, std::string name); + ~OpenCLBinCountOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + +private: + std::map kernel_map_; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLBinCountOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // bincount 通常没有额外参数 + return new OpenCLBinCountOp(bn, name); + } +}; + +} // namespace mllm + +#endif // OPENCL_BINCOUNT_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLClipOp.cpp b/mllm/backends/opencl/op/OpenCLClipOp.cpp new file mode 100644 index 000000000..bae62eafe --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLClipOp.cpp @@ -0,0 +1,184 @@ +// 文件名: ops/OpenCLClipOp.cpp + +#include "OpenCLClipOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" // 包含 check_cl_error +#include + +namespace mllm { + +// 构造函数 +OpenCLClipOp::OpenCLClipOp(Backend *bn, std::string name, const std::vector &b, const std::vector &h, const std::vector &s, const std::vector &d) : + Op(bn, std::move(name)), b_(b), h_(h), s_(s), d_(d) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) { + throw std::runtime_error("Backend is not OpenCLBackend for OpenCLClipOp"); + } +} + +// reshape方法,逻辑与CPU版本完全一致 +ErrorCode OpenCLClipOp::reshape(vector> inputs, vector> outputs) { + int dim_b = inputs[0]->batch(); + int dim_h = inputs[0]->head(); + int dim_s = inputs[0]->sequence(); + int dim_d = inputs[0]->dimension(); + + std::vector *, int *>> data = {{&b_, &dim_b}, {&h_, &dim_h}, {&s_, &dim_s}, {&d_, &dim_d}}; + for (auto &pair : data) { + if (pair.first->size() == 2) { // [start, end) + *pair.second = (*pair.first)[1] - (*pair.first)[0]; + } else if (pair.first->size() == 1) { // [index] + *pair.second = 1; + } + } + + outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +// setUp方法,准备输入输出张量 +ErrorCode OpenCLClipOp::setUp(vector> inputs, vector> outputs) { + // 确保输入张量在OpenCL设备上 + inputs[0]->to(MLLM_OPENCL); + + // 根据裁剪参数计算输出形状 + reshape(inputs, outputs); + + // 为输出张量分配设备内存 + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +// execute方法,执行实际的裁剪操作 +ErrorCode OpenCLClipOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + size_t element_size = input->dtypeSize(); + + cl_mem in_buf = ocl_backend_->get_cl_mem(*input); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + cl_command_queue queue = ocl_backend_->getQueue(); + cl_int err; + + // 根据不同的裁剪参数执行相应的拷贝操作 + if (!b_.empty()) { + // 裁剪 'batch' 维度。这部分数据是连续的,可一次性拷贝。 + int b_start = b_[0]; + if (b_start < 0) b_start += input->batch(); + int b_end = (b_.size() == 2) ? b_[1] : b_start + 1; + if (b_.size() == 2 && b_end < 0) b_end += input->batch(); + + if (b_start < 0 || b_end > input->batch() || b_start >= b_end) { + return NOT_SUPPORT; + } + int count_b = b_end - b_start; + + size_t src_offset_bytes = (size_t)b_start * input->head() * input->sequence() * input->dimension() * element_size; + size_t copy_size_bytes = (size_t)count_b * input->head() * input->sequence() * input->dimension() * element_size; + + err = clEnqueueCopyBuffer(queue, in_buf, out_buf, src_offset_bytes, 0, copy_size_bytes, 0, nullptr, nullptr); + check_cl_error(err, "clEnqueueCopyBuffer for batch clipping"); + + } else if (!s_.empty()) { + // ============================ 已修正的逻辑 ============================ + // 裁剪 'sequence' 维度。 + int s_start = s_[0]; + if (s_start < 0) { + s_start += input->sequence(); + } + + int s_end; + if (s_.size() == 2) { + s_end = s_[1]; + if (s_end < 0) { + s_end += input->sequence(); + } + } else { // s_.size() == 1 + s_end = s_start + 1; + } + + // 增加健壮性检查 + if (s_start < 0 || s_end > input->sequence() || s_start >= s_end) { + std::cerr << "Error: Invalid sequence clip range. Input sequence is " << input->sequence() + << ", but calculated range is [" << s_start << ", " << s_end << ")." << std::endl; + return NOT_SUPPORT; + } + + int count_s = s_end - s_start; + // ============================ 修正结束 ============================ + + size_t copy_size_per_batch = (size_t)input->head() * count_s * input->dimension() * element_size; + for (int b = 0; b < input->batch(); ++b) { + size_t src_offset_bytes = input->offset(b, 0, s_start, 0) * element_size; + size_t dst_offset_bytes = output->offset(b, 0, 0, 0) * element_size; + cl_event event; + err = clEnqueueCopyBuffer(queue, in_buf, out_buf, src_offset_bytes, dst_offset_bytes, copy_size_per_batch, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "clip", event); + check_cl_error(err, "clEnqueueCopyBuffer for sequence clipping"); + } + + } else if (!d_.empty()) { + // 裁剪 'dimension' 维度。这是典型的非连续内存拷贝,使用clEnqueueCopyBufferRect效率最高。 + int d_start = d_[0]; + if (d_start < 0) d_start += input->dimension(); + + int d_end; + if (d_.size() == 2) { + d_end = d_[1]; + if (d_end < 0) d_end += input->dimension(); + } else { // d_.size() == 1 + d_end = d_start + 1; + } + + if (d_start < 0 || d_end > input->dimension() || d_start >= d_end) { + return NOT_SUPPORT; + } + int count_d = d_end - d_start; + + // 定义源、目标和区域的3D参数 + size_t src_origin[3] = {(size_t)d_start * element_size, 0, 0}; // X, Y, Z in bytes + size_t dst_origin[3] = {0, 0, 0}; + size_t region[3] = {(size_t)count_d * element_size, (size_t)input->sequence(), (size_t)(input->batch() * input->head())}; + + // 定义内存布局的行间距和切片间距 + size_t src_row_pitch = input->dimension() * element_size; + size_t src_slice_pitch = input->sequence() * src_row_pitch; + size_t dst_row_pitch = output->dimension() * element_size; + size_t dst_slice_pitch = output->sequence() * dst_row_pitch; + + cl_event event; + err = clEnqueueCopyBufferRect(queue, in_buf, out_buf, src_origin, dst_origin, region, + src_row_pitch, src_slice_pitch, dst_row_pitch, dst_slice_pitch, + 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name(), event); + check_cl_error(err, "clEnqueueCopyBufferRect for dimension clipping"); + + } else { + std::cerr << "[TODO] OpenCLClipOp does not support this clipping parameter configuration!" << std::endl; + return NOT_SUPPORT; + } + + return MLLM_NO_ERROR; +} + +// 创建器实现 +Op *OpenCLClipOpCreator::create(OpParam op_param, Backend *bn, string name, int threadCount) const { + // 从op_param中解析出向量参数 + + // Example structure: {"b_size": 1, "b_0": 5, "h_size": 0, ...} + int b_size = op_param.at("b_size"); + int h_size = op_param.at("h_size"); + int s_size = op_param.at("s_size"); + int d_size = op_param.at("d_size"); + + std::vector b, h, s, d; + for (int i = 0; i < b_size; ++i) b.push_back(op_param.at("b_" + std::to_string(i))); + for (int i = 0; i < h_size; ++i) h.push_back(op_param.at("h_" + std::to_string(i))); + for (int i = 0; i < s_size; ++i) s.push_back(op_param.at("s_" + std::to_string(i))); + for (int i = 0; i < d_size; ++i) d.push_back(op_param.at("d_" + std::to_string(i))); + + return new OpenCLClipOp(bn, name, b, h, s, d); +} + +} // namespace mllm diff --git a/mllm/backends/opencl/op/OpenCLClipOp.hpp b/mllm/backends/opencl/op/OpenCLClipOp.hpp new file mode 100644 index 000000000..2c275c26e --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLClipOp.hpp @@ -0,0 +1,55 @@ +// 文件名: ops/OpenCLClipOp.hpp + +#ifndef OPENCL_CLIP_OP_HPP +#define OPENCL_CLIP_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" +#include + +namespace mllm { + +/** + * @brief OpenCL实现的Clip操作,用于裁剪张量的部分区域。 + */ +class OpenCLClipOp : public Op { +public: + /** + * @brief 构造函数 + * @param bn 后端指针 + * @param name 操作名 + * @param b 裁剪batch维度的参数 + * @param h 裁剪head维度的参数 + * @param s 裁剪sequence维度的参数 + * @param d 裁剪dimension维度的参数 + */ + OpenCLClipOp(Backend *bn, std::string name, const std::vector &b, const std::vector &h, const std::vector &s, const std::vector &d); + + // 默认析构函数即可 + ~OpenCLClipOp() override = default; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + // 存储各个维度的裁剪参数 + std::vector b_; + std::vector h_; + std::vector s_; + std::vector d_; + + OpenCLBackend *ocl_backend_ = nullptr; +}; + +/** + * @brief OpenCLClipOp的创建器类,用于工厂模式创建实例。 + */ +class OpenCLClipOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override; +}; + +} // namespace mllm + +#endif // OPENCL_CLIP_OP_HPP diff --git a/mllm/backends/opencl/op/OpenCLClipTensorOp.cpp b/mllm/backends/opencl/op/OpenCLClipTensorOp.cpp new file mode 100644 index 000000000..0ebebc8fa --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLClipTensorOp.cpp @@ -0,0 +1,128 @@ +#include "OpenCLClipTensorOp.hpp" +#include "Types.hpp" +// #include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLClipTensorOp::OpenCLClipTensorOp(Backend *bn, std::string name, Chl dim) : + Op(bn, std::move(name)), dim_(dim) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/cliptensor.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + kernel_seq_fp32_ = clCreateKernel(program, "clip_sequence_fp32", &err); + check_cl_error(err, "clCreateKernel for clip_sequence_fp32"); + kernel_seq_fp16_ = clCreateKernel(program, "clip_sequence_fp16", &err); + check_cl_error(err, "clCreateKernel for clip_sequence_fp16"); + + kernel_dim_fp32_ = clCreateKernel(program, "clip_dimension_fp32", &err); + check_cl_error(err, "clCreateKernel for clip_dimension_fp32"); + kernel_dim_fp16_ = clCreateKernel(program, "clip_dimension_fp16", &err); + check_cl_error(err, "clCreateKernel for clip_dimension_fp16"); +} + +OpenCLClipTensorOp::~OpenCLClipTensorOp() { + if (kernel_seq_fp32_) clReleaseKernel(kernel_seq_fp32_); + if (kernel_seq_fp16_) clReleaseKernel(kernel_seq_fp16_); + if (kernel_dim_fp32_) clReleaseKernel(kernel_dim_fp32_); + if (kernel_dim_fp16_) clReleaseKernel(kernel_dim_fp16_); +} + +ErrorCode OpenCLClipTensorOp::reshape(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto indices = inputs[1]; + auto output = outputs[0]; + + if (dim_ == SEQUENCE) { + int new_seq = indices->dimension(); // Indices are 1D, stored in the dimension field + output->reshape(input->batch(), input->head(), new_seq, input->dimension()); + } else if (dim_ == DIMENSION) { + int new_dim = indices->dimension(); + output->reshape(input->batch(), input->head(), input->sequence(), new_dim); + } else { + return NOT_SUPPORT; + } + output->setDtype(input->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLClipTensorOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + inputs[1]->to(MLLM_OPENCL); // Indices tensor also needs to be on the device + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLClipTensorOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto indices = inputs[1]; + auto output = outputs[0]; + + if (input->ctype() != BSHD || output->ctype() != BSHD) { + return NOT_SUPPORT; + } + + cl_kernel kernel_to_use = nullptr; + cl_event event; + cl_int err; + + cl_mem in_buf = ocl_backend_->get_cl_mem(*input); + cl_mem indices_buf = ocl_backend_->get_cl_mem(*indices); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + + if (dim_ == SEQUENCE) { + kernel_to_use = (input->dtype() == MLLM_TYPE_F32) ? kernel_seq_fp32_ : kernel_seq_fp16_; + + const int B = input->batch(); + const int H = input->head(); + const int S_in = input->sequence(); + const int D = input->dimension(); + const int S_out = output->sequence(); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &indices_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &B); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &H); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &S_in); + clSetKernelArg(kernel_to_use, 6, sizeof(int), &D); + clSetKernelArg(kernel_to_use, 7, sizeof(int), &S_out); + + const size_t global_work_size[3] = {(size_t)D, (size_t)H, (size_t)B * S_out}; + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, global_work_size, nullptr, 0, nullptr, &event); + check_cl_error(err, "clEnqueueNDRangeKernel for Clip Sequence"); + + } else if (dim_ == DIMENSION) { + kernel_to_use = (input->dtype() == MLLM_TYPE_F32) ? kernel_dim_fp32_ : kernel_dim_fp16_; + + const int B = input->batch(); + const int H = input->head(); + const int S = input->sequence(); + const int D_in = input->dimension(); + const int D_out = output->dimension(); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &indices_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &B); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &H); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &S); + clSetKernelArg(kernel_to_use, 6, sizeof(int), &D_in); + clSetKernelArg(kernel_to_use, 7, sizeof(int), &D_out); + + const size_t global_work_size[3] = {(size_t)D_out, (size_t)S, (size_t)B * H}; + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, global_work_size, nullptr, 0, nullptr, &event); + check_cl_error(err, "clEnqueueNDRangeKernel for Clip Dimension"); + + } else { + return NOT_SUPPORT; + } + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLClipTensorOp.hpp b/mllm/backends/opencl/op/OpenCLClipTensorOp.hpp new file mode 100644 index 000000000..cf8ef773a --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLClipTensorOp.hpp @@ -0,0 +1,37 @@ +#ifndef OPENCL_CLIP_TENSOR_OP_HPP +#define OPENCL_CLIP_TENSOR_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLClipTensorOp : public Op { +public: + OpenCLClipTensorOp(Backend *bn, std::string name, Chl dim); + ~OpenCLClipTensorOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + Chl dim_; + cl_kernel kernel_seq_fp32_ = nullptr; + cl_kernel kernel_seq_fp16_ = nullptr; + cl_kernel kernel_dim_fp32_ = nullptr; + cl_kernel kernel_dim_fp16_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLClipTensorOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + Chl dim = (Chl)op_param.at("dim"); + return new OpenCLClipTensorOp(bn, name, dim); + } +}; + +} // namespace mllm + +#endif // OPENCL_CLIP_TENSOR_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLDivIntOp.cpp b/mllm/backends/opencl/op/OpenCLDivIntOp.cpp new file mode 100644 index 000000000..dea5fd68a --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLDivIntOp.cpp @@ -0,0 +1,127 @@ +#include "OpenCLDivIntOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" +// #include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" + +namespace mllm { + +OpenCLDivIntOp::OpenCLDivIntOp(Backend *bn, std::string name, float data) : + Op(bn, std::move(name)), data_(data) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/div_int.cl"; + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + + cl_int err; + kernel_fp32_buffer_ = clCreateKernel(program, "div_int_scalar_float", &err); + check_cl_error(err, "clCreateKernel for div_int_scalar_float"); + kernel_fp32_image_ = clCreateKernel(program, "div_int_scalar_float_image2d", &err); + check_cl_error(err, "clCreateKernel for div_int_scalar_float_image2d"); + + // Load vectorized kernel + kernel_fp16_buffer_ = clCreateKernel(program, "div_int_scalar_fp16_vector", &err); + check_cl_error(err, "clCreateKernel for div_int_scalar_fp16_vector"); + + // Load image kernel + kernel_fp16_image_ = clCreateKernel(program, "div_int_scalar_fp16_image2d", &err); + check_cl_error(err, "clCreateKernel for div_int_scalar_fp16_image2d"); + + // ADDED: Load the new scalar kernel + kernel_fp16_buffer_scalar_ = clCreateKernel(program, "div_int_scalar_fp16", &err); + check_cl_error(err, "clCreateKernel for div_int_scalar_fp16"); + + sampler_ = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler"); +} + +OpenCLDivIntOp::~OpenCLDivIntOp() { + if (kernel_fp32_buffer_) clReleaseKernel(kernel_fp32_buffer_); + if (kernel_fp32_image_) clReleaseKernel(kernel_fp32_image_); + if (kernel_fp16_buffer_) clReleaseKernel(kernel_fp16_buffer_); + if (kernel_fp16_image_) clReleaseKernel(kernel_fp16_image_); + if (kernel_fp16_buffer_scalar_) clReleaseKernel(kernel_fp16_buffer_scalar_); + if (sampler_) clReleaseSampler(sampler_); +} + +ErrorCode OpenCLDivIntOp::reshape(vector> inputs, vector> outputs) { + outputs[0]->setCtype(inputs[0]->ctype()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLDivIntOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + auto output = outputs[0]; + output->setDtype(inputs[0]->dtype()); + auto &out_mem = output->device_memory(); + if (output->dimension() % 4 == 0 && false) { + out_mem.type = MEM_TYPE_IMAGE_2D; + out_mem.image_width = output->dimension() / 4; + out_mem.image_height = output->batch() * output->head() * output->sequence(); + } else { + out_mem.type = MEM_TYPE_BUFFER; + } + output->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLDivIntOp::execute(vector> inputs, vector> outputs) { + auto input_dtype = inputs[0]->dtype(); + auto output = outputs[0]; + + if (output->device_memory().type == MEM_TYPE_IMAGE_2D) { + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_image_ : kernel_fp16_image_; + std::vector temp_tensor_storage; + cl_mem inA_mem = get_image_from_tensor(inputs[0], ocl_backend_, temp_tensor_storage); + cl_mem out_mem_handle = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &inA_mem); + clSetKernelArg(kernel_to_use, 2, sizeof(float), &data_); + clSetKernelArg(kernel_to_use, 3, sizeof(cl_mem), &out_mem_handle); + const int width = static_cast(output->device_memory().image_width); + const int height = static_cast(output->device_memory().image_height); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &height); + const size_t global_work_size[2] = {(size_t)width, (size_t)height}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } else { // Buffer Path + cl_mem in0_buf = ocl_backend_->get_cl_mem(*inputs[0]); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + size_t count = inputs[0]->count(); + + cl_kernel kernel_to_use; + size_t global_work_size[1]; + + if (input_dtype == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_buffer_; + global_work_size[0] = count; + } else { // MLLM_TYPE_F16 + if (count % 4 == 0) { + // Use the fast, vectorized kernel for aligned data + kernel_to_use = kernel_fp16_buffer_; + global_work_size[0] = count / 4; + } else { + // Use the robust, scalar kernel for non-aligned data + kernel_to_use = kernel_fp16_buffer_scalar_; + global_work_size[0] = count; + } + } + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in0_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(float), &data_); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLDivIntOp.hpp b/mllm/backends/opencl/op/OpenCLDivIntOp.hpp new file mode 100644 index 000000000..ce2065c3d --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLDivIntOp.hpp @@ -0,0 +1,41 @@ +#ifndef OPENCL_DIV_INT_OP_HPP +#define OPENCL_DIV_INT_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLDivIntOp : public Op { +public: + OpenCLDivIntOp(Backend *bn, std::string name, float data); + ~OpenCLDivIntOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + float data_; // 用于存储要除的标量 + + cl_kernel kernel_fp32_buffer_ = nullptr; + cl_kernel kernel_fp32_image_ = nullptr; + cl_kernel kernel_fp16_buffer_ = nullptr; // Vectorized version + cl_kernel kernel_fp16_image_ = nullptr; + cl_kernel kernel_fp16_buffer_scalar_ = nullptr; // ADDED: Scalar (element-wise) version + + cl_sampler sampler_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLDivIntOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + float data = op_param["data"]; + return new OpenCLDivIntOp(bn, name, data); + } +}; + +} // namespace mllm + +#endif // OPENCL_DIV_INT_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLDivOp.cpp b/mllm/backends/opencl/op/OpenCLDivOp.cpp new file mode 100644 index 000000000..d26b81d41 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLDivOp.cpp @@ -0,0 +1,116 @@ +#include "OpenCLDivOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" + +namespace mllm { + +OpenCLDivOp::OpenCLDivOp(Backend *bn, std::string name, float data) : + Op(bn, std::move(name)), data_(data) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/div.cl"; + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + + cl_int err; + kernel_fp32_buffer_ = clCreateKernel(program, "div_scalar_float", &err); + check_cl_error(err, "clCreateKernel for div_scalar_float"); + kernel_fp32_image_ = clCreateKernel(program, "div_scalar_float_image2d", &err); + check_cl_error(err, "clCreateKernel for div_scalar_float_image2d"); + kernel_fp16_buffer_ = clCreateKernel(program, "div_scalar_fp16_vector", &err); + check_cl_error(err, "clCreateKernel for div_scalar_fp16_vector"); + kernel_fp16_image_ = clCreateKernel(program, "div_scalar_fp16_image2d", &err); + check_cl_error(err, "clCreateKernel for div_scalar_fp16_image2d"); + + sampler_ = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler"); +} + +OpenCLDivOp::~OpenCLDivOp() { + if (kernel_fp32_buffer_) clReleaseKernel(kernel_fp32_buffer_); + if (kernel_fp32_image_) clReleaseKernel(kernel_fp32_image_); + if (kernel_fp16_buffer_) clReleaseKernel(kernel_fp16_buffer_); + if (kernel_fp16_image_) clReleaseKernel(kernel_fp16_image_); + if (sampler_) clReleaseSampler(sampler_); +} + +ErrorCode OpenCLDivOp::reshape(vector> inputs, vector> outputs) { + outputs[0]->setCtype(inputs[0]->ctype()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLDivOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + auto output = outputs[0]; + output->setDtype(inputs[0]->dtype()); + auto &out_mem = output->device_memory(); + if (output->dimension() % 4 == 0 && false) { + out_mem.type = MEM_TYPE_IMAGE_2D; + out_mem.image_width = output->dimension() / 4; + out_mem.image_height = output->batch() * output->head() * output->sequence(); + } else { + out_mem.type = MEM_TYPE_BUFFER; + } + output->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLDivOp::execute(vector> inputs, vector> outputs) { + auto input_dtype = inputs[0]->dtype(); + auto output = outputs[0]; + + if (output->device_memory().type == MEM_TYPE_IMAGE_2D) { + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_image_ : kernel_fp16_image_; + std::vector temp_tensor_storage; + cl_mem inA_mem = get_image_from_tensor(inputs[0], ocl_backend_, temp_tensor_storage); + cl_mem out_mem_handle = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &inA_mem); + clSetKernelArg(kernel_to_use, 2, sizeof(float), &data_); + // if (input_dtype == MLLM_TYPE_F32) { + // clSetKernelArg(kernel_to_use, 2, sizeof(float), &data_); + // } else { + // mllm_fp16_t data_fp16 = MLLM_FP32_TO_FP16(data_); + // clSetKernelArg(kernel_to_use, 2, sizeof(mllm_fp16_t), &data_fp16); + // } + clSetKernelArg(kernel_to_use, 3, sizeof(cl_mem), &out_mem_handle); + const int width = static_cast(output->device_memory().image_width); + const int height = static_cast(output->device_memory().image_height); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &height); + const size_t global_work_size[2] = {(size_t)width, (size_t)height}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } else { + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_buffer_ : kernel_fp16_buffer_; + cl_mem in0_buf = ocl_backend_->get_cl_mem(*inputs[0]); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in0_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(float), &data_); + // if (input_dtype == MLLM_TYPE_F32) { + // clSetKernelArg(kernel_to_use, 1, sizeof(float), &data_); + // } else { + // mllm_fp16_t data_fp16 = MLLM_FP32_TO_FP16(data_); + // clSetKernelArg(kernel_to_use, 1, sizeof(mllm_fp16_t), &data_fp16); + // } + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + + size_t count = inputs[0]->count(); + if (input_dtype == MLLM_TYPE_F16) { count /= 4; } + + const size_t global_work_size[1] = {count}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLDivOp.hpp b/mllm/backends/opencl/op/OpenCLDivOp.hpp new file mode 100644 index 000000000..0b59a8be7 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLDivOp.hpp @@ -0,0 +1,40 @@ +#ifndef OPENCL_DIV_OP_HPP +#define OPENCL_DIV_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLDivOp : public Op { +public: + OpenCLDivOp(Backend *bn, std::string name, float data); + ~OpenCLDivOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + float data_; // 用于存储要除的标量 + + cl_kernel kernel_fp32_buffer_ = nullptr; + cl_kernel kernel_fp32_image_ = nullptr; + cl_kernel kernel_fp16_buffer_ = nullptr; + cl_kernel kernel_fp16_image_ = nullptr; + + cl_sampler sampler_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLDivOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + float data = op_param["data"]; + return new OpenCLDivOp(bn, name, data); + } +}; + +} // namespace mllm + +#endif // OPENCL_DIV_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLDivTwoOp.cpp b/mllm/backends/opencl/op/OpenCLDivTwoOp.cpp new file mode 100644 index 000000000..a967dc168 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLDivTwoOp.cpp @@ -0,0 +1,131 @@ +#include "OpenCLDivTwoOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLDivTwoOp::OpenCLDivTwoOp(Backend *bn, std::string name) : + Op(bn, std::move(name)) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/div.cl"; + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + + cl_int err; + kernel_fp32_buffer_ = clCreateKernel(program, "div_float", &err); + check_cl_error(err, "clCreateKernel for div_float"); + kernel_fp32_image_ = clCreateKernel(program, "div_float_image2d", &err); + check_cl_error(err, "clCreateKernel for div_float_image2d"); + + // Kernel for vectorized FP16 (count must be multiple of 4) + kernel_fp16_buffer_ = clCreateKernel(program, "div_fp16_vector", &err); + check_cl_error(err, "clCreateKernel for div_fp16_vector"); + + // Kernel for scalar FP16 (handles any count) + kernel_fp16_scalar_ = clCreateKernel(program, "div_fp16_scalar", &err); + check_cl_error(err, "clCreateKernel for div_fp16_scalar"); + + kernel_fp16_image_ = clCreateKernel(program, "div_fp16_image2d", &err); + check_cl_error(err, "clCreateKernel for div_fp16_image2d"); + + sampler_ = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler"); +} + +OpenCLDivTwoOp::~OpenCLDivTwoOp() { + if (kernel_fp32_buffer_) clReleaseKernel(kernel_fp32_buffer_); + if (kernel_fp32_image_) clReleaseKernel(kernel_fp32_image_); + if (kernel_fp16_buffer_) clReleaseKernel(kernel_fp16_buffer_); + if (kernel_fp16_scalar_) clReleaseKernel(kernel_fp16_scalar_); // Release new kernel + if (kernel_fp16_image_) clReleaseKernel(kernel_fp16_image_); + if (sampler_) clReleaseSampler(sampler_); +} + +ErrorCode OpenCLDivTwoOp::reshape(vector> inputs, vector> outputs) { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLDivTwoOp::setUp(vector> inputs, vector> outputs) { + for (auto &input : inputs) { + input->to(MLLM_OPENCL); + } + auto output = outputs[0]; + output->setDtype(inputs[0]->dtype()); + auto &out_mem = output->device_memory(); + if (output->dimension() % 4 == 0 && false) { + out_mem.type = MEM_TYPE_IMAGE_2D; + out_mem.image_width = output->dimension() / 4; + out_mem.image_height = output->batch() * output->head() * output->sequence(); + } else { + out_mem.type = MEM_TYPE_BUFFER; + } + output->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLDivTwoOp::execute(vector> inputs, vector> outputs) { + auto input_dtype = inputs[0]->dtype(); + auto output = outputs[0]; + + if (output->device_memory().type == MEM_TYPE_IMAGE_2D) { + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_image_ : kernel_fp16_image_; + std::vector temp_tensor_storage; + cl_mem inA_mem = get_image_from_tensor(inputs[0], ocl_backend_, temp_tensor_storage); + cl_mem inB_mem = get_image_from_tensor(inputs[1], ocl_backend_, temp_tensor_storage); + cl_mem out_mem_handle = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &inA_mem); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &inB_mem); + clSetKernelArg(kernel_to_use, 3, sizeof(cl_mem), &out_mem_handle); + const int width = static_cast(output->device_memory().image_width); + const int height = static_cast(output->device_memory().image_height); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &height); + const size_t global_work_size[2] = {(size_t)width, (size_t)height}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } else { + // === MODIFIED LOGIC FOR BUFFER-BASED EXECUTION === + cl_kernel kernel_to_use; + size_t count = inputs[0]->count(); + size_t global_work_size[1]; + + if (input_dtype == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_buffer_; + global_work_size[0] = count; + } else { // MLLM_TYPE_F16 + if (count % 4 == 0) { + // Use the fast vectorized kernel if count is a multiple of 4 + kernel_to_use = kernel_fp16_buffer_; + global_work_size[0] = count / 4; + } else { + // Use the robust scalar kernel for other cases + kernel_to_use = kernel_fp16_scalar_; + global_work_size[0] = count; + } + } + + cl_mem in0_buf = ocl_backend_->get_cl_mem(*inputs[0]); + cl_mem in1_buf = ocl_backend_->get_cl_mem(*inputs[1]); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in0_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &in1_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + const int b_dim = inputs[1]->dimension(); + const int a_dim = inputs[0]->dimension(); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &b_dim); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &a_dim); + + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLDivTwoOp.hpp b/mllm/backends/opencl/op/OpenCLDivTwoOp.hpp new file mode 100644 index 000000000..7695a5607 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLDivTwoOp.hpp @@ -0,0 +1,38 @@ +#ifndef OPENCL_DIV_TWO_OP_HPP +#define OPENCL_DIV_TWO_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLDivTwoOp : public Op { +public: + OpenCLDivTwoOp(Backend *bn, std::string name); + ~OpenCLDivTwoOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + cl_kernel kernel_fp32_buffer_ = nullptr; + cl_kernel kernel_fp32_image_ = nullptr; + cl_kernel kernel_fp16_buffer_ = nullptr; // Note: this kernel is "div_fp16_vector" + cl_kernel kernel_fp16_scalar_ = nullptr; // Added for non-multiple-of-4 cases + cl_kernel kernel_fp16_image_ = nullptr; + + cl_sampler sampler_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLDivTwoOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new OpenCLDivTwoOp(bn, name); + } +}; + +} // namespace mllm + +#endif // OPENCL_DIV_TWO_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLEmbeddingOp.cpp b/mllm/backends/opencl/op/OpenCLEmbeddingOp.cpp new file mode 100644 index 000000000..9d0c48fc6 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLEmbeddingOp.cpp @@ -0,0 +1,119 @@ +#include "OpenCLEmbeddingOp.hpp" +#include "Types.hpp" +// #include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLEmbeddingOp::OpenCLEmbeddingOp(Backend *bn, std::string name, int vocab_size, int hidden_size) : + Op(bn, std::move(name)), vocab_size_(vocab_size), hidden_size_(hidden_size) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/embedding.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + kernel_fp32_ = clCreateKernel(program, "embedding_fp32", &err); + check_cl_error(err, "clCreateKernel for embedding_fp32"); + + kernel_q4_0_ = clCreateKernel(program, "embedding_q4_0", &err); + check_cl_error(err, "clCreateKernel for embedding_q4_0"); + + kernel_q4_0_fp16_ = clCreateKernel(program, "embedding_q4_0_fp16", &err); + check_cl_error(err, "clCreateKernel for embedding_q4_0_fp16"); +} + +OpenCLEmbeddingOp::~OpenCLEmbeddingOp() { + if (kernel_fp32_) clReleaseKernel(kernel_fp32_); + if (kernel_q4_0_) clReleaseKernel(kernel_q4_0_); + if (kernel_q4_0_fp16_) clReleaseKernel(kernel_q4_0_fp16_); +} + +ErrorCode OpenCLEmbeddingOp::reshape(vector> inputs, vector> outputs) { + // 输出张量的形状是 [B, H, S, D],其中 D 是 hidden_size + // 注意:Embedding通常不关心H,这里假设H=1 + outputs[0]->reshape(inputs[0]->batch(), 1, inputs[0]->sequence(), hidden_size_); + // Embedding的输出总是FP32 + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLEmbeddingOp::load(AbstructLoader &loader) { + weight_.setBackend(Backend::global_backends[MLLM_CPU].get()); + // 从模型文件中加载权重 + weight_.setName(name() + ".weight"); + // 权重的形状是 [vocab_size, hidden_size],我们用 BHSD 来模拟 [1, 1, vocab_size, hidden_size] + weight_.reshape(1, 1, vocab_size_, hidden_size_); + + if (loader.getDataType(weight_.name()) != MLLM_TYPE_COUNT) { + weight_.setDtype(loader.getDataType(weight_.name())); + weight_.alloc(); + loader.load(&weight_); + } else { + // 如果模型文件中没有,可能需要一个默认的空权重 + weight_.setDtype(MLLM_TYPE_F32); + weight_.alloc(); + } + weight_.to(MLLM_OPENCL); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLEmbeddingOp::free(vector> inputs, vector> outputs) { + weight_.free(); + return Op::free(inputs, outputs); +} + +ErrorCode OpenCLEmbeddingOp::setUp(vector> inputs, vector> outputs) { + // 确保所有张量都在OpenCL设备上 + inputs[0]->to(MLLM_OPENCL); // input_ids + // 输出总是 FP32 + outputs[0]->to(MLLM_OPENCL); + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLEmbeddingOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; // (B, 1, S, 1) or (B, S) - + auto output = outputs[0]; // (B, 1, S, D) + + cl_kernel kernel_to_use = nullptr; + if (weight_.dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_; + } else if (weight_.dtype() == MLLM_TYPE_Q4_0) { + if (output->dtype() == MLLM_TYPE_F16) { + kernel_to_use = kernel_q4_0_fp16_; // 调用新的FP16输出内核 + } else { + kernel_to_use = kernel_q4_0_; // 保留对FP32输出的兼容 + } + } else { + return NOT_SUPPORT; + } + + cl_mem in_id_buf = ocl_backend_->get_cl_mem(*input); + cl_mem weight_buf = ocl_backend_->get_cl_mem(weight_); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + + // input tensor (token_ids) is usually flat, e.g. (B*S) + const int sequence_len = input->batch() * input->sequence(); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in_id_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &weight_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &vocab_size_); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &hidden_size_); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &sequence_len); + + // 启动2D内核: + // - 维度0 (X): 对应 hidden_size,每个工作项负责拷贝一个维度 + // - 维度1 (Y): 对应 token 数量 (B*S),每个工作项负责处理一个 token + const size_t global_work_size[2] = {(size_t)hidden_size_, (size_t)sequence_len}; + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name(), event); + check_cl_error(err, "clEnqueueNDRangeKernel for Embedding"); + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLEmbeddingOp.hpp b/mllm/backends/opencl/op/OpenCLEmbeddingOp.hpp new file mode 100644 index 000000000..16f4854d1 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLEmbeddingOp.hpp @@ -0,0 +1,43 @@ +#ifndef OPENCL_EMBEDDING_OP_HPP +#define OPENCL_EMBEDDING_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLEmbeddingOp : public Op { +public: + OpenCLEmbeddingOp(Backend *bn, std::string name, int vocab_size, int hidden_size); + ~OpenCLEmbeddingOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode load(AbstructLoader &loader) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + ErrorCode free(vector> inputs, vector> outputs) override; + +private: + int vocab_size_; + int hidden_size_; + Tensor weight_; + + cl_kernel kernel_fp32_ = nullptr; + cl_kernel kernel_q4_0_ = nullptr; + cl_kernel kernel_q4_0_fp16_ = nullptr; + + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLEmbeddingOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + int vocab_size = op_param["vocab_size"]; + int hidden_size = op_param["hidden_size"]; + return new OpenCLEmbeddingOp(bn, name, vocab_size, hidden_size); + } +}; + +} // namespace mllm + +#endif // OPENCL_EMBEDDING_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLFlashAttentionOp.cpp b/mllm/backends/opencl/op/OpenCLFlashAttentionOp.cpp new file mode 100644 index 000000000..b36fffe32 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLFlashAttentionOp.cpp @@ -0,0 +1,279 @@ +#include "OpenCLFlashAttentionOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" + +// 宏定义现在只控制Tile的行列数和工作组大小 +#define Br 8 +#define Bc 8 +#define WGS_S 8 +#define WGS_D 8 +// 定义一个与内核中 LOCAL_MEM_SIZE 计算逻辑相关的维度上限 +#define DIM_MAX 128 +// 定义工作组大小,必须与内核中的WGS一致 +#define WGS 128 + +namespace mllm { + +OpenCLFlashAttentionOp::OpenCLFlashAttentionOp(Backend *bn, std::string name, bool causal_mask) : + Op(bn, std::move(name)), causal_mask_(causal_mask) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/flash_attention.cl"; + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + + cl_int err; + kernel_fp32_ = clCreateKernel(program, "flash_attention_2_prefill_fp32", &err); + check_cl_error(err, "clCreateKernel for flash_attention_2_prefill_fp32"); + + kernel_fp32_decode_ = clCreateKernel(program, "flash_attention_2_decode_fp32", &err); + check_cl_error(err, "clCreateKernel for flash_attention_2_decode_fp32"); + + kernel_fp16_ = clCreateKernel(program, "flash_attention_2_prefill_fp16", &err); + check_cl_error(err, "clCreateKernel for flash_attention_2_prefill_fp16"); + + kernel_fp16_decode_ = clCreateKernel(program, "flash_attention_2_decode_fp16", &err); + check_cl_error(err, "clCreateKernel for flash_attention_2_decode_fp16"); + + kernel_fp32_image_ = clCreateKernel(program, "flash_attention_2_prefill_fp32_image", &err); + check_cl_error(err, "clCreateKernel for flash_attention_2_prefill_fp32_image"); + kernel_fp32_decode_image_ = clCreateKernel(program, "flash_attention_2_decode_fp32_image", &err); + check_cl_error(err, "clCreateKernel for flash_attention_2_decode_fp32_image"); + kernel_fp16_image_ = clCreateKernel(program, "flash_attention_2_prefill_fp16_image", &err); + check_cl_error(err, "clCreateKernel for flash_attention_2_prefill_fp16_image"); + kernel_fp16_decode_image_ = clCreateKernel(program, "flash_attention_2_decode_fp16_image", &err); + check_cl_error(err, "clCreateKernel for flash_attention_2_decode_fp16_image"); + + sampler_ = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler for FlashAttention"); +} + +OpenCLFlashAttentionOp::~OpenCLFlashAttentionOp() { + if (kernel_fp32_) clReleaseKernel(kernel_fp32_); + if (kernel_fp32_decode_) clReleaseKernel(kernel_fp32_decode_); + if (kernel_fp16_) clReleaseKernel(kernel_fp16_); + if (kernel_fp16_decode_) clReleaseKernel(kernel_fp16_decode_); + if (kernel_fp32_image_) clReleaseKernel(kernel_fp32_image_); + if (kernel_fp32_decode_image_) clReleaseKernel(kernel_fp32_decode_image_); + if (kernel_fp16_image_) clReleaseKernel(kernel_fp16_image_); + if (kernel_fp16_decode_image_) clReleaseKernel(kernel_fp16_decode_image_); + if (sampler_) clReleaseSampler(sampler_); +} + +ErrorCode OpenCLFlashAttentionOp::reshape(vector> inputs, vector> outputs) { + auto q_tensor = inputs[0]; + auto o_tensor = outputs[0]; + o_tensor->reshape(q_tensor->batch(), q_tensor->head(), q_tensor->sequence(), q_tensor->dimension()); + o_tensor->setDtype(q_tensor->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLFlashAttentionOp::setUp(vector> inputs, vector> outputs) { + for (auto &input : inputs) { + input->to(MLLM_OPENCL); + } + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->to(MLLM_OPENCL); + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLFlashAttentionOp::execute(vector> inputs, vector> outputs) { + auto q_tensor = inputs[0]; + auto k_tensor = inputs[1]; + auto v_tensor = inputs[2]; + auto o_tensor = outputs[0]; + + const auto data_type = q_tensor->dtype(); + + const int dim_size = q_tensor->dimension(); + + if (dim_size > DIM_MAX) { + throw std::runtime_error("FlashAttention Error: Tensor dimension size (" + std::to_string(dim_size) + ") exceeds kernel's compiled limit (DIM_MAX=" + std::to_string(DIM_MAX) + ")."); + } + + cl_mem q_buf = ocl_backend_->get_cl_mem(*q_tensor); + cl_mem k_buf = ocl_backend_->get_cl_mem(*k_tensor); + cl_mem v_buf = ocl_backend_->get_cl_mem(*v_tensor); + cl_mem o_buf = ocl_backend_->get_cl_mem(*o_tensor); + + const int batch_size = q_tensor->batch(); + const int q_head_size = q_tensor->head(); + const int kv_head_size = k_tensor->head(); + const int seq_size_q = q_tensor->sequence(); + const int seq_size_k = k_tensor->sequence(); + + int causal_mask_int = causal_mask_ ? 1 : 0; + if (seq_size_q == 1) { + causal_mask_int = 0; + } + // 2. 决策:是否使用全 Image 优化路径 + +#if !defined(__APPLE__) || !defined(__aarch64__) + bool use_image_path = (seq_size_q > 1) && (q_tensor->dimension() % 4 == 0) && (k_tensor->dimension() % 4 == 0) && (v_tensor->dimension() % 4 == 0) && (o_tensor->dimension() % 4 == 0); +#else + bool use_image_path = false; +#endif + + // bool use_image_path = false; + if (use_image_path) { + // a. 将所有相关张量原地转换为 Image + tensorGlobal2Image(*q_tensor); + tensorGlobal2Image(*k_tensor); + tensorGlobal2Image(*v_tensor); + tensorGlobal2Image(*o_tensor); + cl_mem q_img = ocl_backend_->get_cl_mem(*q_tensor); + cl_mem k_img = ocl_backend_->get_cl_mem(*k_tensor); + cl_mem v_img = ocl_backend_->get_cl_mem(*v_tensor); + cl_mem o_img = ocl_backend_->get_cl_mem(*o_tensor); + cl_kernel kernel_to_use = nullptr; + cl_event event; + cl_int err; + if (seq_size_q == 1) { // Decode 阶段 (GEMV-like) + if (data_type == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_decode_image_; + } else { // MLLM_TYPE_F16 + kernel_to_use = kernel_fp16_decode_image_; + } + int arg_idx = 0; + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &q_img); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &k_img); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &v_img); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &o_img); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &q_head_size); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &kv_head_size); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &seq_size_k); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &dim_size); + const size_t global_work_size[2] = { + o_tensor->device_memory().image_width, // N / 4 + o_tensor->device_memory().image_height // B * H + }; + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "flash_attention2_decode_image", event); + check_cl_error(err, "clEnqueueNDRangeKernel for FlashAttention Decode Image"); + } else { // Prefill 阶段 (GEMM-like) + if (data_type == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_image_; + } else { // MLLM_TYPE_F16 + kernel_to_use = kernel_fp16_image_; + } + int arg_idx = 0; + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &q_img); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &k_img); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &v_img); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &o_img); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &q_head_size); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &kv_head_size); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &seq_size_q); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &seq_size_k); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &dim_size); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &causal_mask_int); + const size_t local_work_size[1] = {WGS}; + const size_t num_output_rows = batch_size * q_head_size * seq_size_q; + const size_t global_work_size[1] = {num_output_rows * WGS}; + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "flash_attention2_prefill_image", event); + check_cl_error(err, "clEnqueueNDRangeKernel for FlashAttention Prefill Image"); + } + + // d. 将所有张量转换回 Buffer 以便后续操作 + tensorImage2Global(*q_tensor); + tensorImage2Global(*k_tensor); + tensorImage2Global(*v_tensor); + tensorImage2Global(*o_tensor); + } else { + if (data_type == MLLM_TYPE_F32 && seq_size_q == 1) { // Decode 阶段 - FP32 + cl_kernel kernel = kernel_fp32_decode_; + clSetKernelArg(kernel, 0, sizeof(cl_mem), &q_buf); + clSetKernelArg(kernel, 1, sizeof(cl_mem), &k_buf); + clSetKernelArg(kernel, 2, sizeof(cl_mem), &v_buf); + clSetKernelArg(kernel, 3, sizeof(cl_mem), &o_buf); + clSetKernelArg(kernel, 4, sizeof(int), &q_head_size); + clSetKernelArg(kernel, 5, sizeof(int), &kv_head_size); + clSetKernelArg(kernel, 6, sizeof(int), &seq_size_k); + clSetKernelArg(kernel, 7, sizeof(int), &dim_size); + const size_t local_work_size[3] = {WGS, 1, 1}; + const size_t num_output_rows = batch_size * q_head_size; + size_t global_work_size[3] = {num_output_rows * WGS, 1, 1}; + cl_event event; + cl_int err = clEnqueueNDRangeKernel( + ocl_backend_->getQueue(), kernel, 1, nullptr, + global_work_size, local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "flash_attention2", event); + check_cl_error(err, "clEnqueueNDRangeKernel for FlashAttention Decode"); + } else if (data_type == MLLM_TYPE_F32) { // Prefill 阶段 - FP32 + cl_kernel kernel = kernel_fp32_; + clSetKernelArg(kernel, 0, sizeof(cl_mem), &q_buf); + clSetKernelArg(kernel, 1, sizeof(cl_mem), &k_buf); + clSetKernelArg(kernel, 2, sizeof(cl_mem), &v_buf); + clSetKernelArg(kernel, 3, sizeof(cl_mem), &o_buf); + clSetKernelArg(kernel, 4, sizeof(int), &q_head_size); + clSetKernelArg(kernel, 5, sizeof(int), &kv_head_size); + clSetKernelArg(kernel, 6, sizeof(int), &seq_size_q); + clSetKernelArg(kernel, 7, sizeof(int), &seq_size_k); + clSetKernelArg(kernel, 8, sizeof(int), &dim_size); + clSetKernelArg(kernel, 9, sizeof(int), &causal_mask_int); + const size_t local_work_size[3] = {WGS, 1, 1}; + const size_t num_output_rows = batch_size * q_head_size * seq_size_q; + size_t global_work_size[3] = {num_output_rows * WGS, 1, 1}; + cl_event event; + cl_int err = clEnqueueNDRangeKernel( + ocl_backend_->getQueue(), kernel, 1, nullptr, + global_work_size, local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "flash_attention2", event); + check_cl_error(err, "clEnqueueNDRangeKernel for FlashAttention V2"); + } else if (data_type == MLLM_TYPE_F16 && seq_size_q == 1) { // Decode 阶段 - FP16 + cl_kernel kernel = kernel_fp16_decode_; + clSetKernelArg(kernel, 0, sizeof(cl_mem), &q_buf); + clSetKernelArg(kernel, 1, sizeof(cl_mem), &k_buf); + clSetKernelArg(kernel, 2, sizeof(cl_mem), &v_buf); + clSetKernelArg(kernel, 3, sizeof(cl_mem), &o_buf); + clSetKernelArg(kernel, 4, sizeof(int), &q_head_size); + clSetKernelArg(kernel, 5, sizeof(int), &kv_head_size); + clSetKernelArg(kernel, 6, sizeof(int), &seq_size_k); + clSetKernelArg(kernel, 7, sizeof(int), &dim_size); + const size_t local_work_size[3] = {WGS, 1, 1}; + const size_t num_output_rows = batch_size * q_head_size; + size_t global_work_size[3] = {num_output_rows * WGS, 1, 1}; + cl_event event; + cl_int err = clEnqueueNDRangeKernel( + ocl_backend_->getQueue(), kernel, 1, nullptr, + global_work_size, local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "flash_attention2_fp16_decode", event); + check_cl_error(err, "clEnqueueNDRangeKernel for FlashAttention Decode FP16"); + } else if (data_type == MLLM_TYPE_F16) { // Prefill 阶段 - FP16 + cl_kernel kernel = kernel_fp16_; + clSetKernelArg(kernel, 0, sizeof(cl_mem), &q_buf); + clSetKernelArg(kernel, 1, sizeof(cl_mem), &k_buf); + clSetKernelArg(kernel, 2, sizeof(cl_mem), &v_buf); + clSetKernelArg(kernel, 3, sizeof(cl_mem), &o_buf); + clSetKernelArg(kernel, 4, sizeof(int), &q_head_size); + clSetKernelArg(kernel, 5, sizeof(int), &kv_head_size); + clSetKernelArg(kernel, 6, sizeof(int), &seq_size_q); + clSetKernelArg(kernel, 7, sizeof(int), &seq_size_k); + clSetKernelArg(kernel, 8, sizeof(int), &dim_size); + clSetKernelArg(kernel, 9, sizeof(int), &causal_mask_int); + const size_t local_work_size[3] = {WGS, 1, 1}; + const size_t num_output_rows = batch_size * q_head_size * seq_size_q; + size_t global_work_size[3] = {num_output_rows * WGS, 1, 1}; + cl_event event; + cl_int err = clEnqueueNDRangeKernel( + ocl_backend_->getQueue(), kernel, 1, nullptr, + global_work_size, local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "flash_attention2_fp16", event); + check_cl_error(err, "clEnqueueNDRangeKernel for FlashAttention V2 FP16"); + + } else { + return NOT_SUPPORT; + } + } + + return MLLM_NO_ERROR; +} +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLFlashAttentionOp.hpp b/mllm/backends/opencl/op/OpenCLFlashAttentionOp.hpp new file mode 100644 index 000000000..185258506 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLFlashAttentionOp.hpp @@ -0,0 +1,43 @@ +#ifndef OPENCL_FLASHATTENTION_OP_HPP +#define OPENCL_FLASHATTENTION_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLFlashAttentionOp : public Op { +public: + OpenCLFlashAttentionOp(Backend *bn, std::string name, bool causal_mask); + ~OpenCLFlashAttentionOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + bool causal_mask_; + cl_kernel kernel_fp32_ = nullptr; + cl_kernel kernel_fp32_decode_ = nullptr; + cl_kernel kernel_fp16_ = nullptr; + cl_kernel kernel_fp16_decode_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; + + cl_kernel kernel_fp32_image_ = nullptr; + cl_kernel kernel_fp32_decode_image_ = nullptr; + cl_kernel kernel_fp16_image_ = nullptr; + cl_kernel kernel_fp16_decode_image_ = nullptr; + cl_sampler sampler_ = nullptr; +}; + +class OpenCLFlashAttentionOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + bool causal_mask = (bool)op_param.at("causal_mask"); + return new OpenCLFlashAttentionOp(bn, name, causal_mask); + } +}; + +} // namespace mllm + +#endif // OPENCL_FLASHATTENTION_OP_HPP diff --git a/mllm/backends/opencl/op/OpenCLKVCacheOp.cpp b/mllm/backends/opencl/op/OpenCLKVCacheOp.cpp new file mode 100644 index 000000000..4acc1a476 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLKVCacheOp.cpp @@ -0,0 +1,170 @@ +// 文件: OpenCLKVCacheOp.cpp + +#include "OpenCLKVCacheOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLKVCacheOp::OpenCLKVCacheOp(Backend *bn, std::string name, int hidden, int head, int n_rep, bool fa2, int cache_max) : + Op(bn, std::move(name)), hidden_(hidden), head_(head), n_rep_(n_rep), cache_limit_(cache_max), fa2_(fa2) { + if (fa2_) { + n_rep_ = 1; // Flash Attention 2 does not use n_rep + } + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + cache_ = std::make_shared(bn); + cache_->setName(name + ".Cache"); + const int KVCache_batch = 1; + cache_->reshape(KVCache_batch, head_ * n_rep_, cache_limit_, hidden_); + cache_->setDtype(MLLM_TYPE_F32); + cache_->alloc(); + cache_->cl(); + + const std::string kernel_path = "kernel/kvcache.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + // Load BSHD kernels + kernel_fp32_bshd_ = clCreateKernel(program, "update_kv_cache_fp32_bshd", &err); + check_cl_error(err, "clCreateKernel for update_kv_cache_fp32_bshd"); + kernel_fp16_bshd_ = clCreateKernel(program, "update_kv_cache_fp16_bshd", &err); + check_cl_error(err, "clCreateKernel for update_kv_cache_fp16_bshd"); + + // Load BHSD kernels + kernel_fp32_bhsd_ = clCreateKernel(program, "update_kv_cache_fp32_bhsd", &err); + check_cl_error(err, "clCreateKernel for update_kv_cache_fp32_bhsd"); + kernel_fp16_bhsd_ = clCreateKernel(program, "update_kv_cache_fp16_bhsd", &err); + check_cl_error(err, "clCreateKernel for update_kv_cache_fp16_bhsd"); +} + +OpenCLKVCacheOp::~OpenCLKVCacheOp() { + if (kernel_fp32_bshd_) clReleaseKernel(kernel_fp32_bshd_); + if (kernel_fp16_bshd_) clReleaseKernel(kernel_fp16_bshd_); + if (kernel_fp32_bhsd_) clReleaseKernel(kernel_fp32_bhsd_); + if (kernel_fp16_bhsd_) clReleaseKernel(kernel_fp16_bhsd_); +} + +ErrorCode OpenCLKVCacheOp::reshape(vector> inputs, vector> outputs) { + outputs[0]->setCtype(inputs[0]->ctype()); + const int new_sequence_length = cache_seq_len_ + inputs[0]->sequence(); + outputs[0]->reshape(cache_->batch(), cache_->head(), new_sequence_length, cache_->dimension()); + outputs[0]->setDtype(cache_->dtype()); + if (inputs[0]->ctype() == BHSD && cache_->ctype() != inputs[0]->ctype()) { + // cache_->cpu(); + cache_->setCtype(BHSD); + cache_->reshape(cache_->batch(), head_ * n_rep_, cache_limit_, hidden_); + // cache_->alloc(); + // cache_->cl(); + } + if (cache_->dtype() != inputs[0]->dtype()) { + cache_->setDtype(inputs[0]->dtype()); + cache_->alloc(); + } + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLKVCacheOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + outputs[0]->setDtype(cache_->dtype()); + outputs[0]->alloc(); + + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLKVCacheOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + + if (cache_seq_len_ + input->sequence() > cache_limit_) { + std::cerr << "KVCache is full, cannot update." << std::endl; + return MLLM_NO_ERROR; // Cache is full + } + + cl_kernel kernel_to_use = nullptr; + auto ctype = input->ctype(); + + if (input->dtype() == MLLM_TYPE_F32) { + if (cache_->dtype() != MLLM_TYPE_F32) { /* Realloc logic for cache_ */ + } + if (ctype == BSHD) { + kernel_to_use = kernel_fp32_bshd_; + } else if (ctype == BHSD) { + kernel_to_use = kernel_fp32_bhsd_; + } + } else if (input->dtype() == MLLM_TYPE_F16) { + if (cache_->dtype() != MLLM_TYPE_F16) { /* Realloc logic for cache_ */ + } + if (ctype == BSHD) { + kernel_to_use = kernel_fp16_bshd_; + } else if (ctype == BHSD) { + kernel_to_use = kernel_fp16_bhsd_; + } + } + + cl_mem src_buf = ocl_backend_->get_cl_mem(*input); + cl_mem cache_buf = ocl_backend_->get_cl_mem(*cache_); + + const int h_in = input->head(); + const int s_in = input->sequence(); + const int d_in = input->dimension(); + const int h_cache = cache_->head(); + const int s_cache = cache_->sequence(); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &src_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &cache_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(int), &h_in); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &s_in); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &d_in); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &h_cache); + clSetKernelArg(kernel_to_use, 6, sizeof(int), &s_cache); + clSetKernelArg(kernel_to_use, 7, sizeof(int), &n_rep_); + clSetKernelArg(kernel_to_use, 8, sizeof(int), &cache_seq_len_); + + const size_t global_work_size[3] = {(size_t)d_in, (size_t)s_in, (size_t)h_in * n_rep_}; + + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + check_cl_error(err, "clEnqueueNDRangeKernel for KVCache Update"); + + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + if (ctype == BHSD) { + const size_t batch_size = output->batch(); + const size_t head_size = output->head(); + const size_t seq_len_out = output->sequence(); + const size_t dim_size = output->dimension(); + const size_t seq_len_cache = cache_->sequence(); // cache_limit + const size_t dtype_size = output->dtypeSize(); + for (size_t b = 0; b < batch_size; ++b) { + for (size_t h = 0; h < head_size; ++h) { + size_t src_offset_bytes = (b * head_size * seq_len_cache + h * seq_len_cache) * dim_size * dtype_size; + size_t dst_offset_bytes = (b * head_size * seq_len_out + h * seq_len_out) * dim_size * dtype_size; + size_t bytes_per_head = seq_len_out * dim_size * dtype_size; + cl_event event; + err = clEnqueueCopyBuffer( + ocl_backend_->getQueue(), + cache_buf, + out_buf, + src_offset_bytes, + dst_offset_bytes, + bytes_per_head, + 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name(), event); + check_cl_error(err, "clEnqueueCopyBuffer for BHSD head"); + } + } + } else { + // 对于 BSHD 布局,可以直接进行线性复制 + size_t bytes_to_copy = output->count() * output->dtypeSize(); + cl_event event; + err = clEnqueueCopyBuffer(ocl_backend_->getQueue(), cache_buf, out_buf, 0, 0, bytes_to_copy, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name(), event); + check_cl_error(err, "clEnqueueCopyBuffer from cache to output"); + } + + cache_seq_len_ += s_in; + + return MLLM_NO_ERROR; +} + +} // namespace mllm diff --git a/mllm/backends/opencl/op/OpenCLKVCacheOp.hpp b/mllm/backends/opencl/op/OpenCLKVCacheOp.hpp new file mode 100644 index 000000000..1267a82c8 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLKVCacheOp.hpp @@ -0,0 +1,60 @@ +#ifndef OPENCL_KVCACHE_OP_HPP +#define OPENCL_KVCACHE_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLKVCacheOp : public Op { +public: + OpenCLKVCacheOp(Backend *bn, std::string name, int hidden, int head, int n_rep, bool fa2, int cache_max); + ~OpenCLKVCacheOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + int getCacheSeqLen() override { + return cache_seq_len_; + } + void clearCache() override { + cache_seq_len_ = 0; + cache_->cache_seq_len_ = cache_seq_len_; + } + +private: + shared_ptr cache_; + int n_rep_; + int cache_limit_; + int cache_seq_len_ = 0; + int hidden_; + int head_; + bool fa2_; + + // Kernels for BSHD layout + cl_kernel kernel_fp32_bshd_ = nullptr; + cl_kernel kernel_fp16_bshd_ = nullptr; + + // Kernels for BHSD layout (New) + cl_kernel kernel_fp32_bhsd_ = nullptr; + cl_kernel kernel_fp16_bhsd_ = nullptr; + + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLKVCacheOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + int n_rep = (int)op_param["n_rep"]; + int cache_max = (int)op_param["cache_max"]; + bool for_xnn = (bool)op_param["for_xnn"]; + int hidden = (int)op_param["hidden"]; + int head = (int)op_param["head"]; + bool fa2 = (bool)op_param["fa2"]; + return new OpenCLKVCacheOp(bn, name, hidden, head, n_rep, fa2, cache_max); + } +}; + +} // namespace mllm + +#endif // OPENCL_KVCACHE_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLLikeOp.cpp b/mllm/backends/opencl/op/OpenCLLikeOp.cpp new file mode 100644 index 000000000..9da8ba797 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLLikeOp.cpp @@ -0,0 +1,60 @@ +#include "OpenCLLikeOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLLikeOp::OpenCLLikeOp(Backend *bn, std::string name, float like_value) : + Op(bn, std::move(name)), like_value_(like_value) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/like.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + // 内核将处理所有数据类型,但在内核内部进行转换 + kernel_ = clCreateKernel(program, "like", &err); + check_cl_error(err, "clCreateKernel for like"); +} + +OpenCLLikeOp::~OpenCLLikeOp() { + if (kernel_) clReleaseKernel(kernel_); +} + +ErrorCode OpenCLLikeOp::reshape(vector> inputs, vector> outputs) { + // 输出的形状和数据类型与输入完全一致 + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->to(MLLM_OPENCL); // 确保输出张量在OpenCL上 + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLLikeOp::setUp(vector> inputs, vector> outputs) { + // Like 操作不需要输入张量的数据,所以 inputs[0] 无需 to(MLLM_OPENCL) + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLLikeOp::execute(vector> inputs, vector> outputs) { + auto output = outputs[0]; + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + const int count = output->count(); + const int dtype_size = output->dtypeSize(); + + clSetKernelArg(kernel_, 0, sizeof(cl_mem), &out_buf); + clSetKernelArg(kernel_, 1, sizeof(float), &like_value_); + clSetKernelArg(kernel_, 2, sizeof(int), &count); + clSetKernelArg(kernel_, 3, sizeof(int), &dtype_size); + + const size_t global_work_size = (size_t)count; + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_, 1, nullptr, + &global_work_size, nullptr, 0, nullptr, &event); + check_cl_error(err, "clEnqueueNDRangeKernel for Like"); + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLLikeOp.hpp b/mllm/backends/opencl/op/OpenCLLikeOp.hpp new file mode 100644 index 000000000..66d8a727c --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLLikeOp.hpp @@ -0,0 +1,35 @@ +#ifndef OPENCL_LIKE_OP_HPP +#define OPENCL_LIKE_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLLikeOp : public Op { +public: + OpenCLLikeOp(Backend *bn, std::string name, float like_value); + ~OpenCLLikeOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + float like_value_; + cl_kernel kernel_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLLikeOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // 从 op_param 中获取 "like_value" + float like_value = op_param.at("like_value"); + return new OpenCLLikeOp(bn, name, like_value); + } +}; + +} // namespace mllm + +#endif // OPENCL_LIKE_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLLinearOp.cpp b/mllm/backends/opencl/op/OpenCLLinearOp.cpp new file mode 100644 index 000000000..5d5eeae87 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLLinearOp.cpp @@ -0,0 +1,302 @@ +#include "OpenCLLinearOp.hpp" +#include "Backend.hpp" +#include "DataType.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" +#include +// #include +#include +#include + +namespace mllm { + +OpenCLLinearOp::OpenCLLinearOp(Backend *bn, string opName, int in_features, int out_features, bool bias) : + Op(bn, opName), in_features_(in_features), out_features_(out_features), support_bias_(bias) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) { + throw std::runtime_error("Backend for OpenCLLinearOp is not OpenCLBackend"); + } + + const std::string kernel_path = "kernel/matmul_transb_bias.cl"; + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + cl_int err; + kernel_fp32_transb_bias_ = clCreateKernel(program, "gemm_fp32_transb_bias", &err); + check_cl_error(err, "CreateKernel gemm_fp32_transb_bias"); + kernel_fp16_transb_bias_ = clCreateKernel(program, "gemm_fp16_transb_bias", &err); + check_cl_error(err, "CreateKernel gemm_fp16_transb_bias"); + kernel_fp16_q4_0_transb_bias_ = clCreateKernel(program, "gemm_fp16_q4_0_transb_bias", &err); + check_cl_error(err, "CreateKernel gemm_fp16_q4_0_transb_bias"); + kernel_fp32_q4_0_transb_bias_ = clCreateKernel(program, "gemm_fp32_q4_0_transb_bias", &err); + check_cl_error(err, "CreateKernel gemm_fp32_q4_0_transb_bias"); + kernel_gemv_fp32_q4_0_transb_bias_ = clCreateKernel(program, "gemv_fp32_q4_0_transb_bias", &err); + check_cl_error(err, "CreateKernel gemv_fp32_q4_0_transb_bias"); + kernel_gemv_fp16_q4_0_transb_bias_ = clCreateKernel(program, "gemv_fp16_q4_0_transb_bias", &err); + check_cl_error(err, "CreateKernel gemv_fp16_q4_0_transb_bias"); + if (ocl_backend_->has_fp16_support()) { + kernel_gemv_fp16_q4_0_transb_bias_half16_ = clCreateKernel(program, "gemv_fp16_q4_0_transb_bias_half16", &err); + check_cl_error(err, "CreateKernel gemv_fp16_q4_0_transb_bias_half16"); + } + + kernel_fp32_q4_0_transb_bias_image2d_ = clCreateKernel(program, "gemm_fp32_q4_0_transb_bias_image_pipe", &err); + check_cl_error(err, "CreateKernel gemm_fp32_q4_0_transb_bias_image_pipe"); + kernel_fp16_q4_0_transb_bias_image2d_ = clCreateKernel(program, "gemm_fp16_q4_0_transb_bias_image_pipe", &err); + check_cl_error(err, "CreateKernel gemm_fp16_q4_0_transb_bias_image_pipe"); + kernel_gemv_fp32_q4_0_transb_bias_image2d_ = clCreateKernel(program, "gemv_fp32_q4_0_transb_bias_image_pipe", &err); + check_cl_error(err, "CreateKernel gemv_fp32_q4_0_transb_bias_image_pipe"); + kernel_gemv_fp16_q4_0_transb_bias_image2d_ = clCreateKernel(program, "gemv_fp16_q4_0_transb_bias_image_pipe", &err); + check_cl_error(err, "CreateKernel gemv_fp16_q4_0_transb_bias_image_pipe"); +} + +OpenCLLinearOp::~OpenCLLinearOp() { + if (kernel_fp32_transb_bias_) clReleaseKernel(kernel_fp32_transb_bias_); + if (kernel_fp16_transb_bias_) clReleaseKernel(kernel_fp16_transb_bias_); + if (kernel_fp32_q4_0_transb_bias_) clReleaseKernel(kernel_fp32_q4_0_transb_bias_); + if (kernel_fp16_q4_0_transb_bias_) clReleaseKernel(kernel_fp16_q4_0_transb_bias_); + if (kernel_gemv_fp32_q4_0_transb_bias_) clReleaseKernel(kernel_gemv_fp32_q4_0_transb_bias_); + if (kernel_gemv_fp16_q4_0_transb_bias_) clReleaseKernel(kernel_gemv_fp16_q4_0_transb_bias_); + if (kernel_gemv_fp16_q4_0_transb_bias_half16_) clReleaseKernel(kernel_gemv_fp16_q4_0_transb_bias_half16_); + if (kernel_fp32_q4_0_transb_bias_image2d_) clReleaseKernel(kernel_fp32_q4_0_transb_bias_image2d_); + if (kernel_fp16_q4_0_transb_bias_image2d_) clReleaseKernel(kernel_fp16_q4_0_transb_bias_image2d_); + if (kernel_gemv_fp32_q4_0_transb_bias_image2d_) clReleaseKernel(kernel_gemv_fp32_q4_0_transb_bias_image2d_); + if (kernel_gemv_fp16_q4_0_transb_bias_image2d_) clReleaseKernel(kernel_gemv_fp16_q4_0_transb_bias_image2d_); +} + +ErrorCode OpenCLLinearOp::reshape(vector> inputs, vector> outputs) { + assert(inputs.size() == 1); + assert(outputs.size() == 1); + + // Input: [batch, 1, seq_len, in_features] + // Output: [batch, 1, seq_len, out_features] + assert(inputs[0]->head() == 1); + assert(in_features_ == inputs[0]->dimension()); + + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), out_features_); + outputs[0]->setDtype(inputs[0]->dtype()); + // #if !defined(__APPLE__) || !defined(__aarch64__) + // const size_t max_image_width = ocl_backend_->getMaxImage2dWidth(); + // if (out_features_ % 4 == 0 && (out_features_ / 4) <= max_image_width) { // inputs[0]->sequence() == 1 && + // auto &out_mem = outputs[0]->device_memory(); + // out_mem.type = MEM_TYPE_IMAGE_2D; + // out_mem.image_width = out_features_ / 4; + // out_mem.image_height = inputs[0]->batch() * inputs[0]->head() * inputs[0]->sequence(); + // } + // #endif + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLLinearOp::load(AbstructLoader &loader) { + weight_.setName(name() + ".weight"); + weight_.setBackend(ocl_backend_); + weight_.reshape(1, 1, out_features_, in_features_); + weight_.setDtype(loader.getDataType(weight_.name())); + weight_.alloc(); + loader.load(&weight_); + // weight_.saveQ4Data_d(); + + if (support_bias_) { + bias_.setName(name() + ".bias"); + bias_.setBackend(ocl_backend_); + bias_.reshape(1, 1, 1, out_features_); + bias_.setDtype(loader.getDataType(bias_.name())); + bias_.alloc(); + loader.load(&bias_); + } + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLLinearOp::setUp(vector> inputs, vector> outputs) { + // Move all tensors to OpenCL device + inputs[0]->to(MLLM_OPENCL); + outputs[0]->to(MLLM_OPENCL); + // Allocate output memory + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} +ErrorCode OpenCLLinearOp::free(vector> inputs, vector> outputs) { + weight_.unload(); + if (support_bias_) { + bias_.unload(); + } + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLLinearOp::execute(vector> inputs, vector> outputs) { + auto &A = inputs[0]; + auto &W = weight_; + auto &C = outputs[0]; + + // 1. 选择 Matmul+Bias 内核 + cl_kernel kernel_to_use = nullptr; + // 2. 设置参数并执行 + const int M = C->sequence(); + const int K = A->dimension(); + const int N = C->dimension(); + const int B_size = A->batch(); + const int H_size = A->head(); + const int K_b = W.dimension(); + + cl_mem a_mem = ocl_backend_->get_cl_mem(*A); + cl_mem w_mem = ocl_backend_->get_cl_mem(W); + cl_mem c_mem = ocl_backend_->get_cl_mem(*C); + + cl_mem bias_mem_arg = support_bias_ ? ocl_backend_->get_cl_mem(bias_) : a_mem; // 使用 a_mem 作为哑参数 + const int has_bias_flag = support_bias_ ? 1 : 0; + cl_event event; + cl_int err; + + if (M == 1 && (A->dtype() == MLLM_TYPE_F32 || A->dtype() == MLLM_TYPE_F16) && W.dtype() == MLLM_TYPE_Q4_0) { + bool use_image_path_for_gemv = (A->dimension() % 4 == 0 && C->dimension() % 4 == 0) && (C->device_memory().type == MEM_TYPE_IMAGE_2D); + if (use_image_path_for_gemv) { + // --- GEMV All Image 路径 --- + tensorGlobal2Image(*A); + tensorGlobal2Image(*C); + cl_kernel kernel_to_use = nullptr; + if (A->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_gemv_fp32_q4_0_transb_bias_image2d_; + } else { // MLLM_TYPE_F16 + kernel_to_use = kernel_gemv_fp16_q4_0_transb_bias_image2d_; + } + cl_mem a_img_mem = ocl_backend_->get_cl_mem(*A); + cl_mem c_img_mem = ocl_backend_->get_cl_mem(*C); + cl_sampler sampler = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler for GEMV Image"); + int arg_idx = 0; + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_sampler), &sampler); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &a_img_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &w_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &bias_mem_arg); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &c_img_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &K); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &N); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &H_size); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &has_bias_flag); + const size_t global_work_size[2] = {(size_t)N / 4, 1}; // Width: N/4, Height: 1 + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "_gemv_image", event); + check_cl_error(err, "EnqueueNDRangeKernel GEMV Image"); + clReleaseSampler(sampler); + tensorImage2Global(*A); + tensorImage2Global(*C); + } else { + cl_kernel kernel_to_use = nullptr; + if (A->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_gemv_fp32_q4_0_transb_bias_; + } else { // MLLM_TYPE_F16 + kernel_to_use = kernel_gemv_fp16_q4_0_transb_bias_; + } + int arg_idx = 0; + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &a_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &w_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &bias_mem_arg); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &c_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &K); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &N); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &H_size); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &has_bias_flag); + if (ocl_backend_->has_fp16_support() && A->dtype() == MLLM_TYPE_F16) { + const size_t local_work_size[2] = {128, 1}; // 建议从128开始 + const size_t global_work_size[2] = {(size_t)N * local_work_size[0], (size_t)(B_size * H_size)}; + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, local_work_size, 0, nullptr, &event); + } else { + const size_t local_work_size[2] = {256, 1}; + const size_t global_work_size[2] = {(size_t)N * local_work_size[0], (size_t)(B_size * H_size)}; + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, local_work_size, 0, nullptr, &event); + } + ocl_backend_->addProfilingEvent(this->name(), event); + check_cl_error(err, "EnqueueNDRangeKernel GEMV"); + } + } else if (C->device_memory().type == MEM_TYPE_IMAGE_2D) { + tensorGlobal2Image(*inputs[0]); + tensorGlobal2Image(*C); + cl_kernel kernel_to_use = nullptr; + if (A->dtype() == MLLM_TYPE_F32 && W.dtype() == MLLM_TYPE_Q4_0) { + kernel_to_use = kernel_fp32_q4_0_transb_bias_image2d_; + } else if (A->dtype() == MLLM_TYPE_F16 && W.dtype() == MLLM_TYPE_Q4_0) { + kernel_to_use = kernel_fp16_q4_0_transb_bias_image2d_; + } else { + throw std::runtime_error("Unsupported data types for OpenCLLinearOp Image Path."); + } + cl_mem a_img_mem = ocl_backend_->get_cl_mem(*A); + cl_mem c_img_mem = ocl_backend_->get_cl_mem(*C); + cl_sampler sampler = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler for LinearOp Image"); + int arg_idx = 0; + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_sampler), &sampler); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &a_img_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &w_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &bias_mem_arg); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &c_img_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &M); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &K); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &N); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &H_size); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &K_b); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &has_bias_flag); + const size_t global_work_size[2] = {(size_t)N / 4, (size_t)(B_size * H_size * M)}; + err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "_image", event); + check_cl_error(err, "EnqueueNDRangeKernel Image GEMM"); + clReleaseSampler(sampler); + tensorImage2Global(*C); + tensorImage2Global(*A); + } else { + if (A->dtype() == MLLM_TYPE_F32 && W.dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_transb_bias_; + } else if (A->dtype() == MLLM_TYPE_F16 && W.dtype() == MLLM_TYPE_F16) { + kernel_to_use = kernel_fp16_transb_bias_; + } else if (A->dtype() == MLLM_TYPE_F32 && W.dtype() == MLLM_TYPE_Q4_0) { + kernel_to_use = kernel_fp32_q4_0_transb_bias_; + } else if (A->dtype() == MLLM_TYPE_F16 && W.dtype() == MLLM_TYPE_Q4_0) { + kernel_to_use = kernel_fp16_q4_0_transb_bias_; + } else { + throw std::runtime_error("Unsupported data types for OpenCLLinearOp."); + } + int arg_idx = 0; + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &a_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &w_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &bias_mem_arg); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &c_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &M); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &K); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &N); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &H_size); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &K_b); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &has_bias_flag); + if (kernel_to_use == kernel_fp16_q4_0_transb_bias_ && ocl_backend_->has_fp16_support()) { + const size_t TILE_M = 64; + const size_t TILE_N = 64; + const size_t THREADS_X = 8; + const size_t THREADS_Y = 8; + const size_t global_work_size[3] = { + (size_t)ceil((float)N / TILE_N) * THREADS_X, + (size_t)ceil((float)M / TILE_M) * THREADS_Y, + (size_t)(B_size * H_size)}; + const size_t local_work_size[3] = {THREADS_X, THREADS_Y, 1}; + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, global_work_size, local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "_tiled_q4", event); + check_cl_error(err, "EnqueueNDRangeKernel tiled gemm_fp16_q4_0_transb_bias"); + + } else { + const size_t TILE_SIZE = 16; + const size_t global_work_size[3] = { + (size_t)(((N + TILE_SIZE - 1) / TILE_SIZE) * TILE_SIZE), + (size_t)(((M + TILE_SIZE - 1) / TILE_SIZE) * TILE_SIZE), + (size_t)(B_size * H_size)}; + const size_t local_work_size[3] = {TILE_SIZE, TILE_SIZE, 1}; + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, global_work_size, local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name(), event); + check_cl_error(err, "EnqueueNDRangeKernel fused matmul_bias"); + } + } + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLLinearOp.hpp b/mllm/backends/opencl/op/OpenCLLinearOp.hpp new file mode 100644 index 000000000..616d4631d --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLLinearOp.hpp @@ -0,0 +1,56 @@ +#ifndef MLLM_OPENCLLINEAROP_H +#define MLLM_OPENCLLINEAROP_H + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLLinearOp final : public Op { +public: + OpenCLLinearOp(Backend *bn, string opName, int in_features, int out_features, bool bias); + virtual ~OpenCLLinearOp() override; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode load(AbstructLoader &loader) override; + virtual ErrorCode execute(vector> inputs, vector> outputs) override; + virtual ErrorCode free(vector> inputs, vector> outputs) override; + virtual ErrorCode setUp(vector> inputs, vector> outputs) override; + +private: + int in_features_; + int out_features_; + bool support_bias_; + + Tensor weight_; + Tensor bias_; + + // 使用新的带有 bias 后缀的内核 + cl_kernel kernel_fp32_transb_bias_ = nullptr; + cl_kernel kernel_fp16_transb_bias_ = nullptr; + cl_kernel kernel_fp16_q4_0_transb_bias_ = nullptr; + cl_kernel kernel_fp32_q4_0_transb_bias_ = nullptr; + cl_kernel kernel_gemv_fp32_q4_0_transb_bias_ = nullptr; // GEMV + cl_kernel kernel_gemv_fp16_q4_0_transb_bias_ = nullptr; // GEMV + cl_kernel kernel_gemv_fp16_q4_0_transb_bias_half16_ = nullptr; // GEMV for K%16==0 + + cl_kernel kernel_fp32_q4_0_transb_bias_image2d_ = nullptr; + cl_kernel kernel_fp16_q4_0_transb_bias_image2d_ = nullptr; + + cl_kernel kernel_gemv_fp32_q4_0_transb_bias_image2d_ = nullptr; + cl_kernel kernel_gemv_fp16_q4_0_transb_bias_image2d_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLLinearOpCreator : public OpenCLBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + int in_features = op_param["in_features"]; + int out_features = op_param["out_features"]; + bool bias = op_param["bias"]; + return new OpenCLLinearOp(bn, name, in_features, out_features, bias); + } +}; + +} // namespace mllm + +#endif // MLLM_OPENCLLINEAROP_H \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLMatmulOp.cpp b/mllm/backends/opencl/op/OpenCLMatmulOp.cpp new file mode 100644 index 000000000..4916e4860 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLMatmulOp.cpp @@ -0,0 +1,202 @@ + +#include "OpenCLMatmulOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" +#include +#include + +namespace mllm { + +// Constructor remains the same +OpenCLMatmulOp::OpenCLMatmulOp(Backend *bn, std::string name) : + Op(bn, std::move(name)) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) { + throw std::runtime_error("Backend for MatmulOp is not OpenCLBackend"); + } + const std::string kernel_path = "kernel/matmul.cl"; + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += "-DSUPPORTS_FP16"; + } + + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + cl_int err; + kernel_fp32_ = clCreateKernel(program, "gemm_fp32", &err); + check_cl_error(err, "CreateKernel gemm_fp32"); + kernel_fp16_ = clCreateKernel(program, "gemm_fp16", &err); + check_cl_error(err, "CreateKernel gemm_fp16"); + kernel_fp32_bhsd_ = clCreateKernel(program, "gemm_fp32_bhsd", &err); + check_cl_error(err, "CreateKernel gemm_fp32_bhsd"); + kernel_fp16_bhsd_ = clCreateKernel(program, "gemm_fp16_bhsd", &err); + check_cl_error(err, "CreateKernel gemm_fp16_bhsd"); + + const std::string kernel_transb_path = "kernel/matmul_transb.cl"; + cl_program program_tansb = ocl_backend_->getProgram(kernel_transb_path, build_options); + kernel_fp32_transb_ = clCreateKernel(program_tansb, "gemm_fp32_transb", &err); + check_cl_error(err, "CreateKernel gemm_fp32_transb"); + kernel_fp16_transb_ = clCreateKernel(program_tansb, "gemm_fp16_transb", &err); + check_cl_error(err, "CreateKernel gemm_fp16_transb"); + kernel_fp32_q4_0_transb = clCreateKernel(program_tansb, "gemm_fp32_q4_0_transb", &err); + check_cl_error(err, "CreateKernel gemm_fp32_q4_0_transb"); + kernel_fp16_q4_0_transb = clCreateKernel(program_tansb, "gemm_fp16_q4_0_transb", &err); + check_cl_error(err, "CreateKernel gemm_fp16_q4_0_transb"); +} + +// Destructor remains the same +OpenCLMatmulOp::~OpenCLMatmulOp() { + if (kernel_fp32_) clReleaseKernel(kernel_fp32_); + if (kernel_fp16_) clReleaseKernel(kernel_fp16_); + if (kernel_fp32_bhsd_) clReleaseKernel(kernel_fp32_bhsd_); + if (kernel_fp16_bhsd_) clReleaseKernel(kernel_fp16_bhsd_); + if (kernel_fp32_transb_) clReleaseKernel(kernel_fp32_transb_); + if (kernel_fp16_transb_) clReleaseKernel(kernel_fp16_transb_); + if (kernel_fp32_q4_0_transb) clReleaseKernel(kernel_fp32_q4_0_transb); + if (kernel_fp16_q4_0_transb) clReleaseKernel(kernel_fp16_q4_0_transb); +} + +ErrorCode OpenCLMatmulOp::reshape(vector> inputs, vector> outputs) { + auto &A = inputs[0]; + auto &B = inputs[1]; + auto &C = outputs[0]; + + int M = A->sequence(); + int K = A->dimension(); + int N; + + // 智能判断是标准乘法还是转置乘法,并设置标志 + // 标准: A(M,K) * B(K,N) => A.dimension() == B.sequence() + // 转置: A(M,K) * B_T(N,K) => A.dimension() == B.dimension() + if (A->dimension() == B->sequence()) { + // 标准乘法 + use_transb_ = false; // <--- 设置标志 + N = B->dimension(); + } else if (A->dimension() == B->dimension()) { + // 转置乘法 + use_transb_ = true; // <--- 设置标志 + N = B->sequence(); + } else { + // 不支持的矩阵乘法形状 + return NOT_SUPPORT; + } + assert(inputs[0]->ctype() == inputs[1]->ctype() && "Input tensors must have the same ctype"); + C->setCtype(inputs[0]->ctype()); + // 重塑输出张量 C + C->reshape(A->batch(), A->head(), M, N); + C->setDtype(A->dtype()); + return MLLM_NO_ERROR; +} + +// setUp 函数保持不变,reshape 已确保输出张量维度正确,alloc 将分配正确的空间 +ErrorCode OpenCLMatmulOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + inputs[1]->to(MLLM_OPENCL); + outputs[0]->to(MLLM_OPENCL); + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLMatmulOp::execute(vector> inputs, vector> outputs) { + auto &A = inputs[0]; + auto &B = inputs[1]; + auto &C = outputs[0]; + + cl_kernel kernel_to_use = nullptr; + DataType in_type = A->dtype(); + DataType weight_type = B->dtype(); + + // === 修改后的内核选择逻辑 === + if (use_transb_) { + // 选择转置内核 + if (in_type == MLLM_TYPE_F32 && weight_type == MLLM_TYPE_F32) + kernel_to_use = kernel_fp32_transb_; + else if (in_type == MLLM_TYPE_F16 && weight_type == MLLM_TYPE_F16) + kernel_to_use = kernel_fp16_transb_; + else if (in_type == MLLM_TYPE_F32 && weight_type == MLLM_TYPE_Q4_0) { + // assert(inputs[0]->head() == 1 && "fp32_q40 only support head==1"); + kernel_to_use = kernel_fp32_q4_0_transb; + } else if (in_type == MLLM_TYPE_F16 && weight_type == MLLM_TYPE_Q4_0) { + // assert(inputs[0]->head() == 1 && "fp16_q40 only support head==1"); + kernel_to_use = kernel_fp16_q4_0_transb; + } else + throw std::runtime_error("Unsupported data types for OpenCL Matmul operation."); + } else { + if (A->ctype() == BHSD) { + if (in_type == MLLM_TYPE_F32 && weight_type == MLLM_TYPE_F32) + kernel_to_use = kernel_fp32_bhsd_; + else if (in_type == MLLM_TYPE_F16 && weight_type == MLLM_TYPE_F16) + kernel_to_use = kernel_fp16_bhsd_; + else + throw std::runtime_error("Unsupported data types for OpenCL Matmul BHSD operation."); + } else { // Default to BSHD + if (in_type == MLLM_TYPE_F32 && weight_type == MLLM_TYPE_F32) + kernel_to_use = kernel_fp32_; + else if (in_type == MLLM_TYPE_F16 && weight_type == MLLM_TYPE_F16) + kernel_to_use = kernel_fp16_; + else + throw std::runtime_error("Unsupported data types for OpenCL Matmul BSHD operation."); + } + } + + if (kernel_to_use == nullptr) { + throw std::runtime_error("No suitable OpenCL Matmul kernel found for the given data types and shape."); + } + + const int M = C->sequence(); + const int K = A->dimension(); + const int N = C->dimension(); + const int B_size = A->batch(); + const int H_size = A->head(); + const int K_b = (A->dimension() == B->sequence()) ? B->sequence() : B->dimension(); // K for B + + cl_mem a_mem = ocl_backend_->get_cl_mem(*A); + cl_mem b_mem = ocl_backend_->get_cl_mem(*B); + cl_mem c_mem = ocl_backend_->get_cl_mem(*C); + + int arg_idx = 0; + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &a_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &b_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(cl_mem), &c_mem); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &M); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &K); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &N); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &H_size); + clSetKernelArg(kernel_to_use, arg_idx++, sizeof(int), &K_b); + cl_event event; + + // if (ocl_backend_->has_fp16_support() && !use_transb_ && A->ctype() == BHSD && in_type == MLLM_TYPE_F16 && weight_type == MLLM_TYPE_F16) { + // const int TILE_M = 8; + // const int TILE_N = 4; + // const size_t global_work_size[3] = { + // (size_t)((N + TILE_N - 1) / TILE_N), + // (size_t)((M + TILE_M - 1) / TILE_M), + // (size_t)(B_size * H_size)}; + // const size_t *local_work_size = nullptr; + // cl_int err = clEnqueueNDRangeKernel( + // ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, + // global_work_size, + // local_work_size, + // 0, nullptr, &event); + // check_cl_error(err, "EnqueueNDRangeKernel Tiled Matmul 3D"); + // } else { + const int TILE_SIZE = 16; + const size_t global_work_size[3] = { + (size_t)(((N + TILE_SIZE - 1) / TILE_SIZE) * TILE_SIZE), + (size_t)(((M + TILE_SIZE - 1) / TILE_SIZE) * TILE_SIZE), + (size_t)(B_size * H_size)}; + const size_t local_work_size[3] = {TILE_SIZE, TILE_SIZE, 1}; + + cl_int err = clEnqueueNDRangeKernel( + ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, + global_work_size, + local_work_size, + 0, nullptr, &event); + + check_cl_error(err, "EnqueueNDRangeKernel Tiled Matmul 3D"); + // } + + ocl_backend_->addProfilingEvent(this->name() + "mat_mul", event); + + return MLLM_NO_ERROR; +} +} // namespace mllm diff --git a/mllm/backends/opencl/op/OpenCLMatmulOp.hpp b/mllm/backends/opencl/op/OpenCLMatmulOp.hpp new file mode 100644 index 000000000..404c56911 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLMatmulOp.hpp @@ -0,0 +1,41 @@ +#ifndef OPENCL_MATMUL_OP_HPP +#define OPENCL_MATMUL_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLMatmulOp : public Op { +public: + OpenCLMatmulOp(Backend *bn, std::string name); + ~OpenCLMatmulOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + cl_kernel kernel_fp32_ = nullptr; + cl_kernel kernel_fp16_ = nullptr; + cl_kernel kernel_fp32_bhsd_ = nullptr; + cl_kernel kernel_fp16_bhsd_ = nullptr; + cl_kernel kernel_fp32_transb_ = nullptr; + cl_kernel kernel_fp16_transb_ = nullptr; + cl_kernel kernel_fp32_q4_0_transb = nullptr; + cl_kernel kernel_fp16_q4_0_transb = nullptr; + + OpenCLBackend *ocl_backend_ = nullptr; + bool use_transb_ = false; +}; + +class OpenCLMatmulOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new OpenCLMatmulOp(bn, name); + } +}; + +} // namespace mllm + +#endif // OPENCL_MATMUL_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLMulOp.cpp b/mllm/backends/opencl/op/OpenCLMulOp.cpp new file mode 100644 index 000000000..ad51a47b4 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLMulOp.cpp @@ -0,0 +1,108 @@ +#include "OpenCLMulOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" + +namespace mllm { + +OpenCLMulOp::OpenCLMulOp(Backend *bn, std::string name, float data) : + Op(bn, std::move(name)), data_(data) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/mul.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + kernel_fp32_buffer_ = clCreateKernel(program, "mul_scalar_float", &err); + check_cl_error(err, "clCreateKernel for mul_scalar_float"); + kernel_fp32_image_ = clCreateKernel(program, "mul_scalar_float_image2d", &err); + check_cl_error(err, "clCreateKernel for mul_scalar_float_image2d"); + kernel_fp16_buffer_ = clCreateKernel(program, "mul_scalar_fp16_vector", &err); + check_cl_error(err, "clCreateKernel for mul_scalar_fp16_vector"); + kernel_fp16_image_ = clCreateKernel(program, "mul_scalar_fp16_image2d", &err); + check_cl_error(err, "clCreateKernel for mul_scalar_fp16_image2d"); + + sampler_ = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler"); +} + +OpenCLMulOp::~OpenCLMulOp() { + if (kernel_fp32_buffer_) clReleaseKernel(kernel_fp32_buffer_); + if (kernel_fp32_image_) clReleaseKernel(kernel_fp32_image_); + if (kernel_fp16_buffer_) clReleaseKernel(kernel_fp16_buffer_); + if (kernel_fp16_image_) clReleaseKernel(kernel_fp16_image_); + if (sampler_) clReleaseSampler(sampler_); +} + +ErrorCode OpenCLMulOp::reshape(vector> inputs, vector> outputs) { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLMulOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + auto output = outputs[0]; + output->setDtype(inputs[0]->dtype()); + auto &out_mem = output->device_memory(); + if (output->dimension() % 4 == 0 && false) { + out_mem.type = MEM_TYPE_IMAGE_2D; + out_mem.image_width = output->dimension() / 4; + out_mem.image_height = output->batch() * output->head() * output->sequence(); + } else { + out_mem.type = MEM_TYPE_BUFFER; + } + output->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLMulOp::execute(vector> inputs, vector> outputs) { + auto input_dtype = inputs[0]->dtype(); + auto output = outputs[0]; + + if (output->device_memory().type == MEM_TYPE_IMAGE_2D) { + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_image_ : kernel_fp16_image_; + std::vector temp_tensor_storage; + cl_mem inA_mem = get_image_from_tensor(inputs[0], ocl_backend_, temp_tensor_storage); + cl_mem out_mem_handle = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &inA_mem); + if (input_dtype == MLLM_TYPE_F32) { + clSetKernelArg(kernel_to_use, 2, sizeof(float), &data_); + } else { + mllm_fp16_t data_fp16 = MLLM_FP32_TO_FP16(data_); + clSetKernelArg(kernel_to_use, 2, sizeof(mllm_fp16_t), &data_fp16); + } + clSetKernelArg(kernel_to_use, 3, sizeof(cl_mem), &out_mem_handle); + const int width = static_cast(output->device_memory().image_width); + const int height = static_cast(output->device_memory().image_height); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &height); + const size_t global_work_size[2] = {(size_t)width, (size_t)height}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } else { + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_buffer_ : kernel_fp16_buffer_; + cl_mem in0_buf = ocl_backend_->get_cl_mem(*inputs[0]); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in0_buf); + if (input_dtype == MLLM_TYPE_F32) { + clSetKernelArg(kernel_to_use, 1, sizeof(float), &data_); + } else { + mllm_fp16_t data_fp16 = MLLM_FP32_TO_FP16(data_); + clSetKernelArg(kernel_to_use, 1, sizeof(mllm_fp16_t), &data_fp16); + } + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + + size_t count = inputs[0]->count(); + if (input_dtype == MLLM_TYPE_F16) { count /= 4; } + + const size_t global_work_size[1] = {count}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLMulOp.hpp b/mllm/backends/opencl/op/OpenCLMulOp.hpp new file mode 100644 index 000000000..1efbd1355 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLMulOp.hpp @@ -0,0 +1,40 @@ +#ifndef OPENCL_MUL_OP_HPP +#define OPENCL_MUL_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLMulOp : public Op { +public: + OpenCLMulOp(Backend *bn, std::string name, float data); + ~OpenCLMulOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + float data_; // 用于存储要乘的标量 + + cl_kernel kernel_fp32_buffer_ = nullptr; + cl_kernel kernel_fp32_image_ = nullptr; + cl_kernel kernel_fp16_buffer_ = nullptr; + cl_kernel kernel_fp16_image_ = nullptr; + + cl_sampler sampler_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLMulOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + float data = op_param["data"]; + return new OpenCLMulOp(bn, name, data); + } +}; + +} // namespace mllm + +#endif // OPENCL_MUL_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLMulTwoOp.cpp b/mllm/backends/opencl/op/OpenCLMulTwoOp.cpp new file mode 100644 index 000000000..ef411d819 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLMulTwoOp.cpp @@ -0,0 +1,111 @@ +#include "OpenCLMulTwoOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLMulTwoOp::OpenCLMulTwoOp(Backend *bn, std::string name) : + Op(bn, std::move(name)) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/mul.cl"; + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + + cl_int err; + kernel_fp32_buffer_ = clCreateKernel(program, "mul_float", &err); + check_cl_error(err, "clCreateKernel for mul_float"); + kernel_fp32_image_ = clCreateKernel(program, "mul_float_image2d", &err); + check_cl_error(err, "clCreateKernel for mul_float_image2d"); + kernel_fp16_buffer_ = clCreateKernel(program, "mul_fp16_vector", &err); + check_cl_error(err, "clCreateKernel for mul_fp16_vector"); + kernel_fp16_image_ = clCreateKernel(program, "mul_fp16_image2d", &err); + check_cl_error(err, "clCreateKernel for mul_fp16_image2d"); + + sampler_ = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler"); +} + +OpenCLMulTwoOp::~OpenCLMulTwoOp() { + if (kernel_fp32_buffer_) clReleaseKernel(kernel_fp32_buffer_); + if (kernel_fp32_image_) clReleaseKernel(kernel_fp32_image_); + if (kernel_fp16_buffer_) clReleaseKernel(kernel_fp16_buffer_); + if (kernel_fp16_image_) clReleaseKernel(kernel_fp16_image_); + if (sampler_) clReleaseSampler(sampler_); +} + +ErrorCode OpenCLMulTwoOp::reshape(vector> inputs, vector> outputs) { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLMulTwoOp::setUp(vector> inputs, vector> outputs) { + for (auto &input : inputs) { + input->to(MLLM_OPENCL); + } + auto output = outputs[0]; + output->setDtype(inputs[0]->dtype()); + auto &out_mem = output->device_memory(); + if (output->dimension() % 4 == 0 && false) { + out_mem.type = MEM_TYPE_IMAGE_2D; + out_mem.image_width = output->dimension() / 4; + out_mem.image_height = output->batch() * output->head() * output->sequence(); + } else { + out_mem.type = MEM_TYPE_BUFFER; + } + output->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLMulTwoOp::execute(vector> inputs, vector> outputs) { + auto input_dtype = inputs[0]->dtype(); + auto output = outputs[0]; + + if (output->device_memory().type == MEM_TYPE_IMAGE_2D) { + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_image_ : kernel_fp16_image_; + std::vector temp_tensor_storage; + cl_mem inA_mem = get_image_from_tensor(inputs[0], ocl_backend_, temp_tensor_storage); + cl_mem inB_mem = get_image_from_tensor(inputs[1], ocl_backend_, temp_tensor_storage); + cl_mem out_mem_handle = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &inA_mem); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &inB_mem); + clSetKernelArg(kernel_to_use, 3, sizeof(cl_mem), &out_mem_handle); + const int width = static_cast(output->device_memory().image_width); + const int height = static_cast(output->device_memory().image_height); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &height); + const size_t global_work_size[2] = {(size_t)width, (size_t)height}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } else { + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_buffer_ : kernel_fp16_buffer_; + cl_mem in0_buf = ocl_backend_->get_cl_mem(*inputs[0]); + cl_mem in1_buf = ocl_backend_->get_cl_mem(*inputs[1]); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in0_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &in1_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + const int b_dim = inputs[1]->dimension(); + const int a_dim = inputs[0]->dimension(); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &b_dim); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &a_dim); + size_t count = inputs[0]->count(); + if (input_dtype == MLLM_TYPE_F16) { + if (count % 4 != 0) { + throw std::runtime_error("[mulTwo]For FP16 vector kernel, tensor count must be a multiple of 4."); + } + count /= 4; + } + const size_t global_work_size[1] = {count}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLMulTwoOp.hpp b/mllm/backends/opencl/op/OpenCLMulTwoOp.hpp new file mode 100644 index 000000000..2f1b991a2 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLMulTwoOp.hpp @@ -0,0 +1,37 @@ +#ifndef OPENCL_MUL_TWO_OP_HPP +#define OPENCL_MUL_TWO_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLMulTwoOp : public Op { +public: + OpenCLMulTwoOp(Backend *bn, std::string name); + ~OpenCLMulTwoOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + cl_kernel kernel_fp32_buffer_ = nullptr; + cl_kernel kernel_fp32_image_ = nullptr; + cl_kernel kernel_fp16_buffer_ = nullptr; + cl_kernel kernel_fp16_image_ = nullptr; + + cl_sampler sampler_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLMulTwoOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new OpenCLMulTwoOp(bn, name); + } +}; + +} // namespace mllm + +#endif // OPENCL_MUL_TWO_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLRMSNormOp.cpp b/mllm/backends/opencl/op/OpenCLRMSNormOp.cpp new file mode 100644 index 000000000..7a7f8009a --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLRMSNormOp.cpp @@ -0,0 +1,117 @@ +// OpenCLRMSNormOp.cpp + +#include "OpenCLRMSNormOp.hpp" +#include "Types.hpp" +// #include "utils/OpenCLTools.hpp" + +namespace mllm { + +// 构造函数 +OpenCLRMSNormOp::OpenCLRMSNormOp(Backend *bn, std::string name, int normSize, float epsilon, bool add_unit_offset) : + Op(bn, std::move(name)), normSize_(normSize), epsilon_(epsilon), add_unit_offset_(add_unit_offset) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/rmsnorm.cl"; + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + + cl_int err; + // ✨ 修改: 创建两个内核 + kernel_fp32_ = clCreateKernel(program, "rmsnorm_f32_q4", &err); + check_cl_error(err, "clCreateKernel for rmsnorm_f32_q4"); + + kernel_fp16_ = clCreateKernel(program, "rmsnorm_f16_q4", &err); + check_cl_error(err, "clCreateKernel for rmsnorm_f16_q4"); +} + +// ✨ 新增: 自定义析构函数 +OpenCLRMSNormOp::~OpenCLRMSNormOp() { + if (kernel_fp32_) clReleaseKernel(kernel_fp32_); + if (kernel_fp16_) clReleaseKernel(kernel_fp16_); +} + +// reshape, load, free, setUp 函数保持不变 +ErrorCode OpenCLRMSNormOp::reshape(vector> inputs, vector> outputs) { + assert(normSize_ == inputs[0]->dimension()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + // ✨ 新增: 确保输出类型与输入一致 + outputs[0]->setDtype(inputs[0]->dtype()); + return Op::reshape(inputs, outputs); +} +ErrorCode OpenCLRMSNormOp::load(AbstructLoader &loader) { + weight_.setBackend(Backend::global_backends[MLLM_CPU].get()); + weight_.setName(name() + ".weight"); + weight_.reshape(1, 1, 1, normSize_); + if (loader.getDataType(weight_.name()) != MLLM_TYPE_COUNT) { + weight_.setDtype(loader.getDataType(weight_.name())); + weight_.alloc(); + loader.load(&weight_); + weight_.to(MLLM_OPENCL); + } else { + weight_.setDtype(MLLM_TYPE_F32); + weight_.alloc(); + weight_.to(MLLM_OPENCL); + } + return Op::load(loader); +} +ErrorCode OpenCLRMSNormOp::free(vector> inputs, vector> outputs) { + weight_.free(); + return Op::free(inputs, outputs); +} +ErrorCode OpenCLRMSNormOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->alloc(); + outputs[0]->to(MLLM_OPENCL); + return MLLM_NO_ERROR; +} + +// execute 函数 +ErrorCode OpenCLRMSNormOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + + // ✨ 修改: 根据输入类型选择内核 + cl_kernel kernel_to_use = nullptr; + if (input->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_; + } else if (input->dtype() == MLLM_TYPE_F16) { + kernel_to_use = kernel_fp16_; + } else { + return NOT_SUPPORT; + } + + const int D = input->dimension(); + const int weight_is_q4 = (weight_.dtype() == MLLM_TYPE_Q4_0) ? 1 : 0; + const int add_unit_offset_int = add_unit_offset_ ? 1 : 0; + + cl_mem src_buf = ocl_backend_->get_cl_mem(*input); + cl_mem dst_buf = ocl_backend_->get_cl_mem(*output); + cl_mem w_buf = ocl_backend_->get_cl_mem(weight_); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &src_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &dst_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &w_buf); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &weight_is_q4); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &D); + clSetKernelArg(kernel_to_use, 5, sizeof(float), &epsilon_); + clSetKernelArg(kernel_to_use, 6, sizeof(int), &add_unit_offset_int); + + const size_t total_rows = (size_t)input->batch() * input->head() * input->sequence(); + const size_t local_work_size = 256; + const size_t global_work_size = total_rows * local_work_size; + + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, &global_work_size, &local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name(), event); + check_cl_error(err, "clEnqueueNDRangeKernel for RMSNorm"); + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLRMSNormOp.hpp b/mllm/backends/opencl/op/OpenCLRMSNormOp.hpp new file mode 100644 index 000000000..31943528b --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLRMSNormOp.hpp @@ -0,0 +1,43 @@ +#ifndef OPENCL_RMSNORM_OP_HPP +#define OPENCL_RMSNORM_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLRMSNormOp : public Op { +public: + OpenCLRMSNormOp(Backend *bn, std::string name, int normSize, float epsilon = 1e-6, bool add_unit_offset_ = false); + ~OpenCLRMSNormOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode load(AbstructLoader &loader) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + ErrorCode free(vector> inputs, vector> outputs) override; + +private: + float epsilon_; + Tensor weight_; + int normSize_; + bool add_unit_offset_; + + cl_kernel kernel_fp32_ = nullptr; + cl_kernel kernel_fp16_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLRMSNormOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + int normSize = (int)op_param["norm_size"]; + float epsilon = op_param.count("epsilon") ? (float)op_param["epsilon"] : 1e-6f; + bool add_unit_offset = op_param.count("add_unit_offset") ? (bool)op_param["add_unit_offset"] : false; + return new OpenCLRMSNormOp(bn, name, normSize, epsilon, add_unit_offset); + } +}; + +} // namespace mllm + +#endif // OPENCL_RMSNORM_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLRoPEOp.cpp b/mllm/backends/opencl/op/OpenCLRoPEOp.cpp new file mode 100644 index 000000000..af0d65d16 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLRoPEOp.cpp @@ -0,0 +1,357 @@ +#include "OpenCLRoPEOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" // For MLLM_FP32_TO_FP16 +#include + +namespace mllm { + +cl_mem OpenCLRoPEOp::sin_buffer_fp32_ = nullptr; +cl_mem OpenCLRoPEOp::cos_buffer_fp32_ = nullptr; +cl_mem OpenCLRoPEOp::sin_buffer_fp16_ = nullptr; +cl_mem OpenCLRoPEOp::cos_buffer_fp16_ = nullptr; +size_t OpenCLRoPEOp::buffer_size_fp32_ = 0; +size_t OpenCLRoPEOp::buffer_size_fp16_ = 0; +vector> OpenCLRoPEOp::sin_table_cpu_fp32_; +vector> OpenCLRoPEOp::cos_table_cpu_fp32_; +int OpenCLRoPEOp::partial_dim_cached_ = -1; + +// === 本地辅助函数 (sinusoidal_position_embedding_*) 保持不变 === +namespace { +void sinusoidal_position_embedding_llama(int seq_len, int output_dim, float rope_theta, + vector> &sin_table, vector> &cos_table) { + sin_table.resize(seq_len, vector(output_dim)); + cos_table.resize(seq_len, vector(output_dim)); + vector theta(output_dim / 2); + for (int i = 0; i < output_dim / 2; ++i) { + theta[i] = 1.0f / powf(rope_theta, (float)(2 * i) / output_dim); + } + for (int s = 0; s < seq_len; ++s) { + for (int d = 0; d < output_dim; d += 2) { + float t = (float)s * theta[d / 2]; + float sin_val = sinf(t); + float cos_val = cosf(t); + sin_table[s][d] = sin_val; + cos_table[s][d] = cos_val; + sin_table[s][d + 1] = sin_val; + cos_table[s][d + 1] = cos_val; + } + } +} + +void sinusoidal_position_embedding_huggingface(int seq_len, int output_dim, float rope_theta, + vector> &sin_table, vector> &cos_table) { + sin_table.resize(seq_len, vector(output_dim / 2)); + cos_table.resize(seq_len, vector(output_dim / 2)); + vector theta(output_dim / 2); + for (int i = 0; i < output_dim / 2; ++i) { + theta[i] = 1.0f / powf(rope_theta, (float)(2 * i) / output_dim); + } + for (int s = 0; s < seq_len; ++s) { + for (int d = 0; d < output_dim / 2; ++d) { + float t = (float)s * theta[d]; + sin_table[s][d] = sinf(t); + cos_table[s][d] = cosf(t); + } + } +} +} // namespace + +// === 构造函数实现 (保持不变) === +OpenCLRoPEOp::OpenCLRoPEOp(Backend *bn, string opName, OpParam &config, int threadCount) : + Op(bn, opName), config_(config) { + _init(threadCount); +} +OpenCLRoPEOp::OpenCLRoPEOp(Backend *bn, string opName, int pose_type, int threadCount) : + Op(bn, opName) { + config_["pose_type"] = pose_type; + _init(threadCount); +} +OpenCLRoPEOp::OpenCLRoPEOp(Backend *bn, string opName, int pose_type, float rope_theta, int max_position_embeddings, int threadCount) : + Op(bn, opName) { + config_["pose_type"] = pose_type; + config_["rope_theta"] = rope_theta; + config_["max_position_embeddings"] = max_position_embeddings; + _init(threadCount); +} +OpenCLRoPEOp::OpenCLRoPEOp(Backend *bn, string opName, int pose_type, float rope_theta, float partial_rotary_factor, int max_position_embeddings, int threadCount) : + Op(bn, opName) { + config_["pose_type"] = pose_type; + config_["rope_theta"] = rope_theta; + config_["partial_rotary_factor"] = partial_rotary_factor; + config_["max_position_embeddings"] = max_position_embeddings; + _init(threadCount); +} + +// ✨ 修改: _init 函数,创建所有内核 +void OpenCLRoPEOp::_init(int threadCount) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + pose_type_ = (RoPEType)config_.at("pose_type"); + if (config_.find("rope_theta") != config_.end()) rope_theta_ = config_.at("rope_theta"); + if (config_.find("partial_rotary_factor") != config_.end()) partial_rotary_factor_ = config_.at("partial_rotary_factor"); + if (config_.find("max_position_embeddings") != config_.end()) pos_max_ = config_.at("max_position_embeddings"); + + const std::string kernel_path = "kernel/rope.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + cl_int err; + + kernel_llama_fp32_ = clCreateKernel(program, "rope_llama_fp32", &err); + check_cl_error(err, "clCreateKernel rope_llama_fp32"); + kernel_hf_fp32_ = clCreateKernel(program, "rope_hf_fp32", &err); + check_cl_error(err, "clCreateKernel rope_hf_fp32"); + + kernel_llama_fp16_ = clCreateKernel(program, "rope_llama_fp16", &err); + check_cl_error(err, "clCreateKernel rope_llama_fp16"); + kernel_hf_fp16_ = clCreateKernel(program, "rope_hf_fp16", &err); + check_cl_error(err, "clCreateKernel rope_hf_fp16"); +} + +// ✨ 修改: 析构函数,释放所有资源 +OpenCLRoPEOp::~OpenCLRoPEOp() { + if (kernel_llama_fp32_) clReleaseKernel(kernel_llama_fp32_); + if (kernel_hf_fp32_) clReleaseKernel(kernel_hf_fp32_); + // if (sin_buffer_fp32_) clReleaseMemObject(sin_buffer_fp32_); + // if (cos_buffer_fp32_) clReleaseMemObject(cos_buffer_fp32_); + + if (kernel_llama_fp16_) clReleaseKernel(kernel_llama_fp16_); + if (kernel_hf_fp16_) clReleaseKernel(kernel_hf_fp16_); + // if (sin_buffer_fp16_) clReleaseMemObject(sin_buffer_fp16_); + // if (cos_buffer_fp16_) clReleaseMemObject(cos_buffer_fp16_); +} + +/* +void OpenCLRoPEOp::_computeSinCosTable(int partial_dim) { + if (!sin_table_cpu_fp32_.empty() && partial_dim_cached_ == partial_dim) { + return; + } + partial_dim_cached_ = partial_dim; + + if (pose_type_ == LLAMAROPE) { + sinusoidal_position_embedding_llama(pos_max_, partial_dim, rope_theta_, sin_table_cpu_fp32_, cos_table_cpu_fp32_); + } else if (pose_type_ == HFHUBROPE || pose_type_ == MLAROPE) { + sinusoidal_position_embedding_huggingface(pos_max_, partial_dim, rope_theta_, sin_table_cpu_fp32_, cos_table_cpu_fp32_); + } else { + throw std::runtime_error("Unsupported RoPE type for OpenCL"); + } + + int seq_len = sin_table_cpu_fp32_.size(); + int table_dim = sin_table_cpu_fp32_[0].size(); + + // --- 1. 处理 FP32 Buffer --- + size_t new_buffer_size_fp32 = (size_t)seq_len * table_dim * sizeof(float); + if (buffer_size_fp32_ != new_buffer_size_fp32) { + if (sin_buffer_fp32_) clReleaseMemObject(sin_buffer_fp32_); + if (cos_buffer_fp32_) clReleaseMemObject(cos_buffer_fp32_); + cl_int err; + sin_buffer_fp32_ = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_ONLY, new_buffer_size_fp32, nullptr, &err); + check_cl_error(err, "clCreateBuffer for sin_buffer_fp32_"); + cos_buffer_fp32_ = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_ONLY, new_buffer_size_fp32, nullptr, &err); + check_cl_error(err, "clCreateBuffer for cos_buffer_fp32_"); + buffer_size_fp32_ = new_buffer_size_fp32; + } + + vector sin_flat_fp32, cos_flat_fp32; + sin_flat_fp32.reserve(seq_len * table_dim); + cos_flat_fp32.reserve(seq_len * table_dim); + for (int i = 0; i < seq_len; ++i) { + sin_flat_fp32.insert(sin_flat_fp32.end(), sin_table_cpu_fp32_[i].begin(), sin_table_cpu_fp32_[i].end()); + cos_flat_fp32.insert(cos_flat_fp32.end(), cos_table_cpu_fp32_[i].begin(), cos_table_cpu_fp32_[i].end()); + } + clEnqueueWriteBuffer(ocl_backend_->getQueue(), sin_buffer_fp32_, CL_TRUE, 0, buffer_size_fp32_, sin_flat_fp32.data(), 0, nullptr, nullptr); + clEnqueueWriteBuffer(ocl_backend_->getQueue(), cos_buffer_fp32_, CL_TRUE, 0, buffer_size_fp32_, cos_flat_fp32.data(), 0, nullptr, nullptr); + + // --- 2. 处理 FP16 Buffer --- + size_t new_buffer_size_fp16 = (size_t)seq_len * table_dim * sizeof(mllm_fp16_t); + if (buffer_size_fp16_ != new_buffer_size_fp16) { + if (sin_buffer_fp16_) clReleaseMemObject(sin_buffer_fp16_); + if (cos_buffer_fp16_) clReleaseMemObject(cos_buffer_fp16_); + cl_int err; + sin_buffer_fp16_ = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_ONLY, new_buffer_size_fp16, nullptr, &err); + check_cl_error(err, "clCreateBuffer for sin_buffer_fp16_"); + cos_buffer_fp16_ = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_ONLY, new_buffer_size_fp16, nullptr, &err); + check_cl_error(err, "clCreateBuffer for cos_buffer_fp16_"); + buffer_size_fp16_ = new_buffer_size_fp16; + } + + vector sin_flat_fp16, cos_flat_fp16; + sin_flat_fp16.reserve(seq_len * table_dim); + cos_flat_fp16.reserve(seq_len * table_dim); + for (int i = 0; i < seq_len; ++i) { + for (int j = 0; j < table_dim; ++j) { + sin_flat_fp16.push_back(MLLM_FP32_TO_FP16(sin_table_cpu_fp32_[i][j])); + cos_flat_fp16.push_back(MLLM_FP32_TO_FP16(cos_table_cpu_fp32_[i][j])); + } + } + clEnqueueWriteBuffer(ocl_backend_->getQueue(), sin_buffer_fp16_, CL_TRUE, 0, buffer_size_fp16_, sin_flat_fp16.data(), 0, nullptr, nullptr); + clEnqueueWriteBuffer(ocl_backend_->getQueue(), cos_buffer_fp16_, CL_TRUE, 0, buffer_size_fp16_, cos_flat_fp16.data(), 0, nullptr, nullptr); +} +*/ +void OpenCLRoPEOp::_computeSinCosTable(int partial_dim) { + if (!sin_table_cpu_fp32_.empty() && partial_dim_cached_ == partial_dim) { + return; + } + partial_dim_cached_ = partial_dim; + + if (pose_type_ == LLAMAROPE) { + sinusoidal_position_embedding_llama(pos_max_, partial_dim, rope_theta_, sin_table_cpu_fp32_, cos_table_cpu_fp32_); + } else if (pose_type_ == HFHUBROPE || pose_type_ == MLAROPE) { + sinusoidal_position_embedding_huggingface(pos_max_, partial_dim, rope_theta_, sin_table_cpu_fp32_, cos_table_cpu_fp32_); + } else { + throw std::runtime_error("Unsupported RoPE type for OpenCL"); + } + + int seq_len = sin_table_cpu_fp32_.size(); + int table_dim = sin_table_cpu_fp32_[0].size(); + + // FP32 + size_t new_buffer_size_fp32 = (size_t)seq_len * table_dim * sizeof(float); + if (buffer_size_fp32_ != new_buffer_size_fp32) { + if (sin_buffer_fp32_) clReleaseMemObject(sin_buffer_fp32_); + if (cos_buffer_fp32_) clReleaseMemObject(cos_buffer_fp32_); + cl_int err; + sin_buffer_fp32_ = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_ONLY, new_buffer_size_fp32, nullptr, &err); + check_cl_error(err, "clCreateBuffer sin_buffer_fp32_"); + cos_buffer_fp32_ = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_ONLY, new_buffer_size_fp32, nullptr, &err); + check_cl_error(err, "clCreateBuffer cos_buffer_fp32_"); + buffer_size_fp32_ = new_buffer_size_fp32; + } + + vector sin_flat_fp32, cos_flat_fp32; + sin_flat_fp32.reserve(seq_len * table_dim); + cos_flat_fp32.reserve(seq_len * table_dim); + for (int i = 0; i < seq_len; ++i) { + sin_flat_fp32.insert(sin_flat_fp32.end(), sin_table_cpu_fp32_[i].begin(), sin_table_cpu_fp32_[i].end()); + cos_flat_fp32.insert(cos_flat_fp32.end(), cos_table_cpu_fp32_[i].begin(), cos_table_cpu_fp32_[i].end()); + } + clEnqueueWriteBuffer(ocl_backend_->getQueue(), sin_buffer_fp32_, CL_TRUE, 0, buffer_size_fp32_, sin_flat_fp32.data(), 0, nullptr, nullptr); + clEnqueueWriteBuffer(ocl_backend_->getQueue(), cos_buffer_fp32_, CL_TRUE, 0, buffer_size_fp32_, cos_flat_fp32.data(), 0, nullptr, nullptr); + + // FP16 + size_t new_buffer_size_fp16 = (size_t)seq_len * table_dim * sizeof(mllm_fp16_t); + if (buffer_size_fp16_ != new_buffer_size_fp16) { + if (sin_buffer_fp16_) clReleaseMemObject(sin_buffer_fp16_); + if (cos_buffer_fp16_) clReleaseMemObject(cos_buffer_fp16_); + cl_int err; + sin_buffer_fp16_ = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_ONLY, new_buffer_size_fp16, nullptr, &err); + check_cl_error(err, "clCreateBuffer sin_buffer_fp16_"); + cos_buffer_fp16_ = clCreateBuffer(ocl_backend_->getContext(), CL_MEM_READ_ONLY, new_buffer_size_fp16, nullptr, &err); + check_cl_error(err, "clCreateBuffer cos_buffer_fp16_"); + buffer_size_fp16_ = new_buffer_size_fp16; + } + + vector sin_flat_fp16, cos_flat_fp16; + sin_flat_fp16.reserve(seq_len * table_dim); + cos_flat_fp16.reserve(seq_len * table_dim); + for (int i = 0; i < seq_len; ++i) { + for (int j = 0; j < table_dim; ++j) { + sin_flat_fp16.push_back(MLLM_FP32_TO_FP16(sin_table_cpu_fp32_[i][j])); + cos_flat_fp16.push_back(MLLM_FP32_TO_FP16(cos_table_cpu_fp32_[i][j])); + } + } + clEnqueueWriteBuffer(ocl_backend_->getQueue(), sin_buffer_fp16_, CL_TRUE, 0, buffer_size_fp16_, sin_flat_fp16.data(), 0, nullptr, nullptr); + clEnqueueWriteBuffer(ocl_backend_->getQueue(), cos_buffer_fp16_, CL_TRUE, 0, buffer_size_fp16_, cos_flat_fp16.data(), 0, nullptr, nullptr); +} + +ErrorCode OpenCLRoPEOp::reshape(vector> inputs, vector> outputs) { + int partial_dim = inputs[0]->dimension() * partial_rotary_factor_; + _computeSinCosTable(partial_dim); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); // 确保输出类型与输入一致 + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLRoPEOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +// ✨ 修改: execute 函数,根据数据类型选择内核 +ErrorCode OpenCLRoPEOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + + // RoPE是in-place操作,先将输入数据拷贝到输出 + clEnqueueCopyBuffer(ocl_backend_->getQueue(), ocl_backend_->get_cl_mem(*input), ocl_backend_->get_cl_mem(*output), 0, 0, input->size(), 0, nullptr, nullptr); + + cl_kernel kernel_to_use = nullptr; + cl_mem sin_buf_to_use = nullptr; + cl_mem cos_buf_to_use = nullptr; + int partial_dim = output->dimension() * partial_rotary_factor_; + size_t d_work_size = 0; + + if (output->dtype() == MLLM_TYPE_F32) { + sin_buf_to_use = sin_buffer_fp32_; + cos_buf_to_use = cos_buffer_fp32_; + if (pose_type_ == LLAMAROPE) { + kernel_to_use = kernel_llama_fp32_; + d_work_size = partial_dim / 2; + } else { // HFHUBROPE, MLAROPE + kernel_to_use = kernel_hf_fp32_; + d_work_size = partial_dim / 2; + } + } else if (output->dtype() == MLLM_TYPE_F16) { + sin_buf_to_use = sin_buffer_fp16_; + cos_buf_to_use = cos_buffer_fp16_; + if (pose_type_ == LLAMAROPE) { + kernel_to_use = kernel_llama_fp16_; + d_work_size = partial_dim / 2; + } else { // HFHUBROPE, MLAROPE + kernel_to_use = kernel_hf_fp16_; + d_work_size = partial_dim / 2; + } + } else { + std::runtime_error("Unsupported RoPE data type for OpenCL: " + std::to_string(output->dtype())); + return NOT_SUPPORT; + } + + if (kernel_to_use == nullptr || sin_buf_to_use == nullptr || cos_buf_to_use == nullptr) { + std::runtime_error("RoPE kernel or buffers not initialized properly."); + return NOT_SUPPORT; // 安全检查 + } + + cl_mem data_buf = ocl_backend_->get_cl_mem(*output); + int head_dim = output->head() * output->batch(); // 将 batch 和 head 合并 + int seq_len = output->sequence(); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &data_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &sin_buf_to_use); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &cos_buf_to_use); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &partial_dim); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &head_dim); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &seq_len); + clSetKernelArg(kernel_to_use, 6, sizeof(int), &pos_offset_); + + size_t global_work_size[3] = {d_work_size, (size_t)seq_len, (size_t)head_dim}; + + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, global_work_size, nullptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name(), event); + check_cl_error(err, "clEnqueueNDRangeKernel for RoPE"); + + pos_offset_ += output->sequence(); + return MLLM_NO_ERROR; +} + +// Creator 的实现保持不变 +Op *OpenCLRoPEOpCreator::create(OpParam op_param, Backend *bn, string name, int threadCount) const { + auto it = op_param.find("rope_type"); + if (it != op_param.end()) { + return new OpenCLRoPEOp(bn, name, op_param, threadCount); + } + int pose_type = op_param["pose_type"]; + if (op_param.find("rope_theta") == op_param.end()) { + return new OpenCLRoPEOp(bn, name, pose_type, threadCount); + } + float rope_theta = op_param["rope_theta"]; + int max_position_embeddings = op_param["max_position_embeddings"]; + if (op_param.find("partial_rotary_factor") == op_param.end()) { + return new OpenCLRoPEOp(bn, name, pose_type, rope_theta, max_position_embeddings, threadCount); + } + float partial_rotary_factor = op_param["partial_rotary_factor"]; + return new OpenCLRoPEOp(bn, name, pose_type, rope_theta, partial_rotary_factor, max_position_embeddings, threadCount); +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLRoPEOp.hpp b/mllm/backends/opencl/op/OpenCLRoPEOp.hpp new file mode 100644 index 000000000..ad2baa42a --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLRoPEOp.hpp @@ -0,0 +1,80 @@ +#ifndef OPENCL_ROPE_OP_HPP +#define OPENCL_ROPE_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" +#include "Types.hpp" + +namespace mllm { + +class OpenCLRoPEOp : public Op { +public: + OpenCLRoPEOp(Backend *bn, string opName, OpParam &config, int threadCount); + OpenCLRoPEOp(Backend *bn, string opName, int pose_type, int threadCount); + OpenCLRoPEOp(Backend *bn, string opName, int pose_type, float rope_theta, int max_position_embeddings, int threadCount); + OpenCLRoPEOp(Backend *bn, string opName, int pose_type, float rope_theta, float partial_rotary_factor, int max_position_embeddings, int threadCount); + + ~OpenCLRoPEOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + + void clearCache() override { + pos_offset_ = 0; + } + +private: + // 原来的成员变量改为 static(全局共享) + static cl_mem sin_buffer_fp32_; + static cl_mem cos_buffer_fp32_; + static cl_mem sin_buffer_fp16_; + static cl_mem cos_buffer_fp16_; + static size_t buffer_size_fp32_; + static size_t buffer_size_fp16_; + static vector> sin_table_cpu_fp32_; + static vector> cos_table_cpu_fp32_; + static int partial_dim_cached_; + + void _init(int threadCount); + void _computeSinCosTable(int partial_dim); + + // FP32 buffers + // cl_mem sin_buffer_fp32_ = nullptr; + // cl_mem cos_buffer_fp32_ = nullptr; + // size_t buffer_size_fp32_ = 0; + // vector> sin_table_cpu_fp32_; + // vector> cos_table_cpu_fp32_; + + // FP16 buffers + // cl_mem sin_buffer_fp16_ = nullptr; + // cl_mem cos_buffer_fp16_ = nullptr; + // size_t buffer_size_fp16_ = 0; + + // FP32 kernels + cl_kernel kernel_llama_fp32_ = nullptr; + cl_kernel kernel_hf_fp32_ = nullptr; + + // FP16 kernels + cl_kernel kernel_llama_fp16_ = nullptr; + cl_kernel kernel_hf_fp16_ = nullptr; + + OpParam config_; + RoPEType pose_type_; + int pos_max_ = 4096; + float rope_theta_ = 10000.0f; + float partial_rotary_factor_ = 1.0f; + // int partial_dim_cached_ = 0; + int pos_offset_ = 0; + + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLRoPEOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override; +}; + +} // namespace mllm + +#endif // OPENCL_ROPE_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLScatterAddOp.cpp b/mllm/backends/opencl/op/OpenCLScatterAddOp.cpp new file mode 100644 index 000000000..81b9faa20 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLScatterAddOp.cpp @@ -0,0 +1,96 @@ +#include "OpenCLScatterAddOp.hpp" +#include "Types.hpp" +// #include "utils/OpenCLTools.hpp" + +namespace mllm { + +// 构造函数、析构函数、reshape、setUp 保持不变... +OpenCLScatterAddOp::OpenCLScatterAddOp(Backend *bn, std::string name, Chl dim) : + Op(bn, std::move(name)), dim_(dim) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/scatter_add.cl"; + // cl_program program = ocl_backend_->getProgram(kernel_path, "-cl-std=CL1.2"); + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + + cl_program program = ocl_backend_->getProgram(kernel_path, build_options + " -cl-std=CL1.2"); + + cl_int err; + kernel_fp32_ = clCreateKernel(program, "scatter_add_fp32", &err); + check_cl_error(err, "clCreateKernel for scatter_add_fp32"); + kernel_fp16_ = clCreateKernel(program, "scatter_add_fp16", &err); + check_cl_error(err, "clCreateKernel for scatter_add_fp16"); +} + +OpenCLScatterAddOp::~OpenCLScatterAddOp() { + if (kernel_fp32_) clReleaseKernel(kernel_fp32_); + if (kernel_fp16_) clReleaseKernel(kernel_fp16_); +} + +ErrorCode OpenCLScatterAddOp::reshape(vector> inputs, vector> outputs) { + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLScatterAddOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + inputs[1]->to(MLLM_OPENCL); + inputs[2]->to(MLLM_OPENCL); + return MLLM_NO_ERROR; +} + +// **execute 函数已修正** +ErrorCode OpenCLScatterAddOp::execute(vector> inputs, vector> outputs) { + auto self = inputs[0]; + auto value = inputs[1]; + auto indices = inputs[2]; + + if (dim_ != SEQUENCE) { + std::cerr << "This version of OpenCLScatterAddOp only supports SEQUENCE dimension." << std::endl; + return NOT_SUPPORT; + } + if (self->ctype() != BSHD || value->ctype() != BSHD) { + return NOT_SUPPORT; + } + + cl_kernel kernel_to_use; + if (self->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_; + } else if (self->dtype() == MLLM_TYPE_F16) { + kernel_to_use = kernel_fp16_; + } else { + return NOT_SUPPORT; + } + + cl_mem self_buf = ocl_backend_->get_cl_mem(*self); + cl_mem value_buf = ocl_backend_->get_cl_mem(*value); + cl_mem indices_buf = ocl_backend_->get_cl_mem(*indices); + + const int B = self->batch(); + const int H = self->head(); + const int D = self->dimension(); + const int S_self = self->sequence(); + const int S_value = value->sequence(); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &self_buf); // 参数 0: self + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &value_buf); // 参数 1: value + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &indices_buf); // 参数 2: indices + + clSetKernelArg(kernel_to_use, 3, sizeof(int), &B); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &H); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &D); + clSetKernelArg(kernel_to_use, 6, sizeof(int), &S_self); + clSetKernelArg(kernel_to_use, 7, sizeof(int), &S_value); + const size_t global_work_size[3] = {(size_t)D, (size_t)H, (size_t)B * S_value}; + + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, global_work_size, nullptr, 0, nullptr, &event); + check_cl_error(err, "clEnqueueNDRangeKernel for ScatterAdd (in-place, sequence)"); + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLScatterAddOp.hpp b/mllm/backends/opencl/op/OpenCLScatterAddOp.hpp new file mode 100644 index 000000000..8f3ebee99 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLScatterAddOp.hpp @@ -0,0 +1,37 @@ +#ifndef OPENCL_SCATTER_ADD_OP_HPP +#define OPENCL_SCATTER_ADD_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" +#include "Types.hpp" + +namespace mllm { + +class OpenCLScatterAddOp : public Op { +public: + OpenCLScatterAddOp(Backend *bn, std::string name, Chl dim); + ~OpenCLScatterAddOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + Chl dim_; // The axis of `self` that `indices` refers to. + cl_kernel kernel_fp32_ = nullptr; + cl_kernel kernel_fp16_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLScatterAddOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // CPU版本的命名是"dim",我们保持一致 + // Chl dim = (Chl)op_param.at("dim"); + return new OpenCLScatterAddOp(bn, name, SEQUENCE); + } +}; + +} // namespace mllm + +#endif // OPENCL_SCATTER_ADD_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSiLUOp.cpp b/mllm/backends/opencl/op/OpenCLSiLUOp.cpp new file mode 100644 index 000000000..9ef4e4ff4 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSiLUOp.cpp @@ -0,0 +1,91 @@ +// 文件名: ops/OpenCLSiLUOp.cpp + +#include "OpenCLSiLUOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" +#include + +namespace mllm { + +OpenCLSiLUOp::OpenCLSiLUOp(Backend *bn, std::string name) : + Op(bn, std::move(name)) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/silu.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + kernel_fp32_ = clCreateKernel(program, "silu_fp32", &err); + check_cl_error(err, "clCreateKernel for silu_fp32"); + + kernel_fp16_ = clCreateKernel(program, "silu_fp16", &err); + check_cl_error(err, "clCreateKernel for silu_fp16"); +} + +OpenCLSiLUOp::~OpenCLSiLUOp() { + if (kernel_fp32_) clReleaseKernel(kernel_fp32_); + if (kernel_fp16_) clReleaseKernel(kernel_fp16_); +} + +ErrorCode OpenCLSiLUOp::reshape(vector> inputs, vector> outputs) { + // SiLU 是元素级操作,输出形状与输入形状完全相同 + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSiLUOp::setUp(vector> inputs, vector> outputs) { + // 确保输入在设备上,并为输出分配内存 + inputs[0]->to(MLLM_OPENCL); + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSiLUOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + const int count = input->count(); + + cl_kernel kernel_to_use = nullptr; + size_t global_work_size[1]; + const size_t *local_work_size_ptr = nullptr; + const size_t local_work_size[1] = {256}; // 典型的工作组大小 + + if (input->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_; + global_work_size[0] = count; + } else if (input->dtype() == MLLM_TYPE_F16) { + kernel_to_use = kernel_fp16_; + + // ✨ **核心修正**: 简化并修正启动配置 + // 我们只需要启动足够的线程来处理向量部分 + size_t vec_count = count / 4; + // 向上取整到工作组大小的倍数 + global_work_size[0] = ((vec_count + local_work_size[0] - 1) / local_work_size[0]) * local_work_size[0]; + // 如果元素总数少于一个工作组的大小,确保至少启动一个工作组 + if (global_work_size[0] == 0 && count > 0) { + global_work_size[0] = local_work_size[0]; + } + local_work_size_ptr = local_work_size; + + } else { + return NOT_SUPPORT; + } + + cl_mem in_buf = ocl_backend_->get_cl_mem(*input); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &out_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(int), &count); + + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, local_work_size_ptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name(), event); + check_cl_error(err, "clEnqueueNDRangeKernel for SiLU"); + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSiLUOp.hpp b/mllm/backends/opencl/op/OpenCLSiLUOp.hpp new file mode 100644 index 000000000..cbd7c6679 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSiLUOp.hpp @@ -0,0 +1,37 @@ +// 文件名: ops/OpenCLSiLUOp.hpp + +#ifndef OPENCL_SILU_OP_HPP +#define OPENCL_SILU_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLSiLUOp : public Op { +public: + OpenCLSiLUOp(Backend *bn, std::string name); + ~OpenCLSiLUOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + cl_kernel kernel_fp32_ = nullptr; + cl_kernel kernel_fp16_ = nullptr; + + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLSiLUOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // SiLU通常没有额外参数,但保留 op_param 以备将来扩展 + return new OpenCLSiLUOp(bn, name); + } +}; + +} // namespace mllm + +#endif // OPENCL_SILU_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSoftMaxOp.cpp b/mllm/backends/opencl/op/OpenCLSoftMaxOp.cpp new file mode 100644 index 000000000..5b2f55900 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSoftMaxOp.cpp @@ -0,0 +1,113 @@ +#include "OpenCLSoftMaxOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLSoftMaxOp::OpenCLSoftMaxOp(Backend *bn, std::string name, int axis, bool do_causal_mask) : + Op(bn, std::move(name)), axis_(axis), do_causal_mask_(do_causal_mask) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + if (axis_ == DIMENSION) { + const std::string kernel_path = "kernel/softmax.cl"; + + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + + cl_int err; + + kernel_fp32_d_ = clCreateKernel(program, "softmax_f32_along_d", &err); + check_cl_error(err, "clCreateKernel for softmax_f32_along_d"); + + // 仅当硬件支持FP16时,才创建FP16内核 + // if (ocl_backend_->has_fp16_support()) { + kernel_fp16_d_ = clCreateKernel(program, "softmax_fp16_along_d", &err); + check_cl_error(err, "clCreateKernel for softmax_fp16_along_d"); + // } + } +} + +OpenCLSoftMaxOp::~OpenCLSoftMaxOp() { + if (kernel_fp32_d_) clReleaseKernel(kernel_fp32_d_); + if (kernel_fp16_d_) clReleaseKernel(kernel_fp16_d_); +} + +ErrorCode OpenCLSoftMaxOp::reshape(vector> inputs, vector> outputs) { + outputs[0]->setCtype(inputs[0]->ctype()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSoftMaxOp::setUp(vector> inputs, vector> outputs) { + // 确保输入在设备上,并为输出分配内存 + for (auto &input : inputs) { + input->to(MLLM_OPENCL); + } + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSoftMaxOp::execute(vector> inputs, vector> outputs) { + if (axis_ != DIMENSION) { + throw std::runtime_error("OpenCLSoftMaxOp currently only supports axis=DIMENSION"); + } + + auto input = inputs[0]; + auto output = outputs[0]; + + cl_kernel kernel_to_use = nullptr; + if (input->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_d_; + } else if (input->dtype() == MLLM_TYPE_F16) { + // 如果fp16内核未创建成功(因为硬件不支持),这里会是nullptr + if (kernel_fp16_d_ == nullptr) { + throw std::runtime_error("FP16 Softmax kernel is not available on this device."); + } + kernel_to_use = kernel_fp16_d_; + } else { + return NOT_SUPPORT; + } + + const int B = input->batch(); + const int H = input->head(); + const int S = input->sequence(); + const int D = input->dimension(); + int do_causal_mask_int = do_causal_mask_ ? 1 : 0; + if (input->sequence() == 1) { + do_causal_mask_int = 0; + } + + cl_mem src_buf = ocl_backend_->get_cl_mem(*input); + cl_mem dst_buf = ocl_backend_->get_cl_mem(*output); + + // 设置所有内核参数 + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &src_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &dst_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(int), &B); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &H); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &S); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &D); + clSetKernelArg(kernel_to_use, 6, sizeof(int), &do_causal_mask_int); + + // --- 核心修正:修改内核启动配置 --- + const size_t total_rows = (size_t)B * H * S; + const size_t local_work_size = 256; // 每个工作组的线程数,必须与 .cl 文件中的 SOFTMAX_BLOCK_SIZE 一致 + + // 全局工作大小 = 总行数 * 每个工作组的线程数 + // 这会启动 total_rows 个工作组,每个组有 local_work_size 个线程 + const size_t global_work_size = total_rows * local_work_size; + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, &global_work_size, &local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name(), event); + check_cl_error(err, "clEnqueueNDRangeKernel for SoftMax"); + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSoftMaxOp.hpp b/mllm/backends/opencl/op/OpenCLSoftMaxOp.hpp new file mode 100644 index 000000000..834d30152 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSoftMaxOp.hpp @@ -0,0 +1,40 @@ +#ifndef OPENCL_SOFTMAX_OP_HPP +#define OPENCL_SOFTMAX_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLSoftMaxOp : public Op { +public: + OpenCLSoftMaxOp(Backend *bn, std::string name, int axis, bool do_causal_mask); + ~OpenCLSoftMaxOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + int axis_ = 0; + bool do_causal_mask_ = false; + + cl_kernel kernel_fp32_d_ = nullptr; + cl_kernel kernel_fp16_d_ = nullptr; + + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLSoftMaxOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + int axis = op_param["axis"]; + bool do_causal_mask = op_param["do_causal_mask"]; + // threadCount is not used for OpenCL ops + return new OpenCLSoftMaxOp(bn, name, axis, do_causal_mask); + } +}; + +} // namespace mllm + +#endif // OPENCL_SOFTMAX_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSplitOp.cpp b/mllm/backends/opencl/op/OpenCLSplitOp.cpp new file mode 100644 index 000000000..8920e79be --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSplitOp.cpp @@ -0,0 +1,126 @@ +#include "OpenCLSplitOp.hpp" +#include "Types.hpp" +// #include "utils/OpenCLTools.hpp" +// #include + +namespace mllm { + +OpenCLSplitOp::OpenCLSplitOp(Backend *bn, std::string name, int num_splits, const std::vector &each_dims, Chl split_dim, int head_size) : + Op(bn, std::move(name)), num_splits_(num_splits), each_dims_(each_dims), split_dim_(split_dim), head_size_(head_size) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/split.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + kernel_fp32_ = clCreateKernel(program, "split_fp32", &err); + check_cl_error(err, "clCreateKernel for split_fp32"); + + kernel_fp16_ = clCreateKernel(program, "split_fp16", &err); + check_cl_error(err, "clCreateKernel for split_fp16"); +} + +OpenCLSplitOp::~OpenCLSplitOp() { + if (kernel_fp32_) clReleaseKernel(kernel_fp32_); + if (kernel_fp16_) clReleaseKernel(kernel_fp16_); +} + +ErrorCode OpenCLSplitOp::reshape(vector> inputs, vector> outputs) { + auto input = inputs[0]; + int split_dim_size = 0; + for (const auto &dim : each_dims_) { + split_dim_size += dim; + } + + for (int i = 0; i < num_splits_; ++i) { + switch (split_dim_) { + case Chl::HEAD: + outputs[i]->reshape(input->batch(), each_dims_[i], input->sequence(), input->dimension()); + break; + case Chl::SEQUENCE: + outputs[i]->reshape(input->batch(), input->head(), each_dims_[i], input->dimension()); + break; + case Chl::DIMENSION: + outputs[i]->reshape(input->batch(), input->head(), input->sequence(), each_dims_[i]); + break; + default: + return NOT_SUPPORT; + } + outputs[i]->setDtype(input->dtype()); + } + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSplitOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + for (auto &output : outputs) { + output->setDtype(inputs[0]->dtype()); + output->to(MLLM_OPENCL); + output->alloc(); + } + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSplitOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + + cl_kernel kernel_to_use = (input->dtype() == MLLM_TYPE_F32) ? kernel_fp32_ : kernel_fp16_; + + cl_mem in_buf = ocl_backend_->get_cl_mem(*input); + + std::vector offsets(num_splits_, 0); + for (int i = 1; i < num_splits_; ++i) { + offsets[i] = offsets[i - 1] + each_dims_[i - 1]; + } + + int outer_size = 1; + int inner_size = 1; + + switch (split_dim_) { + case Chl::HEAD: + outer_size = input->batch(); + inner_size = input->sequence() * input->dimension(); + break; + case Chl::SEQUENCE: + outer_size = input->batch() * input->head(); + inner_size = input->dimension(); + break; + case Chl::DIMENSION: + outer_size = input->batch() * input->head() * input->sequence(); + inner_size = 1; + break; + default: + return NOT_SUPPORT; + } + int dims = input->shape(split_dim_); + + for (int i = 0; i < num_splits_; ++i) { + cl_mem out_buf = ocl_backend_->get_cl_mem(*outputs[i]); + int split_dim_size = each_dims_[i]; + int offset = offsets[i]; + if (inner_size == 0 || split_dim_size == 0 || outer_size == 0) { + continue; + } + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &out_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(int), &outer_size); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &split_dim_size); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &inner_size); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &offset); + clSetKernelArg(kernel_to_use, 6, sizeof(int), &dims); + + const size_t global_work_size[3] = {(size_t)inner_size, (size_t)split_dim_size, (size_t)outer_size}; + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, global_work_size, nullptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "split", event); + if (err != CL_SUCCESS) { + std::cout << "clEnqueueNDRangeKernel error: split" << inner_size << " " << split_dim_size << " " << outer_size << std::endl; + } + check_cl_error(err, "clEnqueueNDRangeKernel for Split"); + } + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSplitOp.hpp b/mllm/backends/opencl/op/OpenCLSplitOp.hpp new file mode 100644 index 000000000..fe8f31968 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSplitOp.hpp @@ -0,0 +1,46 @@ +#ifndef OPENCL_SPLIT_OP_HPP +#define OPENCL_SPLIT_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" +#include + +namespace mllm { + +class OpenCLSplitOp : public Op { +public: + OpenCLSplitOp(Backend *bn, std::string name, int num_splits, const std::vector &each_dims, Chl split_dim, int head_size = 1); + ~OpenCLSplitOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + int num_splits_; + std::vector each_dims_; + int head_size_ = 1; + Chl split_dim_; + + cl_kernel kernel_fp32_ = nullptr; + cl_kernel kernel_fp16_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLSplitOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + int num_splits = static_cast(op_param.at("num_splits")); + std::vector each_dims; + for (int i = 0; i < num_splits; ++i) { + each_dims.push_back(static_cast(op_param.at("dim_" + std::to_string(i)))); + } + Chl split_dim = (Chl)op_param.at("split_dim"); + int head_size = static_cast(op_param.at("head_size")); + return new OpenCLSplitOp(bn, name, num_splits, each_dims, split_dim, head_size); + } +}; + +} // namespace mllm + +#endif // OPENCL_SPLIT_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSubOp.cpp b/mllm/backends/opencl/op/OpenCLSubOp.cpp new file mode 100644 index 000000000..ff436550e --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSubOp.cpp @@ -0,0 +1,128 @@ +#include "OpenCLSubOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" +#include "backends/cpu/third_party/ggml/QuantizeFP16.hpp" + +namespace mllm { + +// 构造函数:与AddOp类似,但内核名称是硬编码的,所以我们仍然指向 "add.cl" +OpenCLSubOp::OpenCLSubOp(Backend *bn, std::string name, float data) : + Op(bn, std::move(name)), data_(data) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + // 复用 add.cl 内核文件 + const std::string kernel_path = "kernel/add.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + // 注意:我们创建的是 add_scalar_* 内核,因为我们将在内核中通过数学技巧实现减法 + kernel_fp32_buffer_ = clCreateKernel(program, "add_scalar_float", &err); + check_cl_error(err, "clCreateKernel for add_scalar_float (used by Sub)"); + + kernel_fp32_image_ = clCreateKernel(program, "add_scalar_float_image2d", &err); + check_cl_error(err, "clCreateKernel for add_scalar_float_image2d (used by Sub)"); + + kernel_fp16_buffer_ = clCreateKernel(program, "add_scalar_fp16_vector", &err); + check_cl_error(err, "clCreateKernel for add_scalar_fp16_vector (used by Sub)"); + + kernel_fp16_image_ = clCreateKernel(program, "add_scalar_fp16_image2d", &err); + check_cl_error(err, "clCreateKernel for add_scalar_fp16_image2d (used by Sub)"); + + sampler_ = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler"); +} + +OpenCLSubOp::~OpenCLSubOp() { + if (kernel_fp32_buffer_) clReleaseKernel(kernel_fp32_buffer_); + if (kernel_fp32_image_) clReleaseKernel(kernel_fp32_image_); + if (kernel_fp16_buffer_) clReleaseKernel(kernel_fp16_buffer_); + if (kernel_fp16_image_) clReleaseKernel(kernel_fp16_image_); + if (sampler_) clReleaseSampler(sampler_); +} + +ErrorCode OpenCLSubOp::reshape(vector> inputs, vector> outputs) { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSubOp::setUp(vector> inputs, vector> outputs) { + // 与 AddOp 逻辑相同 + inputs[0]->to(MLLM_OPENCL); + auto output = outputs[0]; + output->setDtype(inputs[0]->dtype()); + + auto &out_mem = output->device_memory(); + if (output->dimension() % 4 == 0 && false) { + out_mem.type = MEM_TYPE_IMAGE_2D; + out_mem.image_width = output->dimension() / 4; + out_mem.image_height = output->batch() * output->head() * output->sequence(); + } else { + out_mem.type = MEM_TYPE_BUFFER; + } + output->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSubOp::execute(vector> inputs, vector> outputs) { + auto input_dtype = inputs[0]->dtype(); + auto output = outputs[0]; + + // ✨ 核心技巧:A - B 等价于 A + (-B) + // 我们将要减去的标量取反,然后调用加法内核 + float neg_data = -data_; + + if (output->device_memory().type == MEM_TYPE_IMAGE_2D) { + // Image 路径 + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_image_ : kernel_fp16_image_; + std::vector temp_tensor_storage; + cl_mem inA_mem = get_image_from_tensor(inputs[0], ocl_backend_, temp_tensor_storage); + cl_mem out_mem_handle = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &inA_mem); + if (input_dtype == MLLM_TYPE_F32) { + clSetKernelArg(kernel_to_use, 2, sizeof(float), &neg_data); + } else { + mllm_fp16_t data_fp16 = MLLM_FP32_TO_FP16(neg_data); + clSetKernelArg(kernel_to_use, 2, sizeof(mllm_fp16_t), &data_fp16); + } + clSetKernelArg(kernel_to_use, 3, sizeof(cl_mem), &out_mem_handle); + + const int width = static_cast(output->device_memory().image_width); + const int height = static_cast(output->device_memory().image_height); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &height); + + const size_t global_work_size[2] = {(size_t)width, (size_t)height}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + + } else { + // Buffer 路径 + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_buffer_ : kernel_fp16_buffer_; + cl_mem in0_buf = ocl_backend_->get_cl_mem(*inputs[0]); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in0_buf); + if (input_dtype == MLLM_TYPE_F32) { + clSetKernelArg(kernel_to_use, 1, sizeof(float), &neg_data); + } else { + mllm_fp16_t data_fp16 = MLLM_FP32_TO_FP16(neg_data); + clSetKernelArg(kernel_to_use, 1, sizeof(mllm_fp16_t), &data_fp16); + } + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + + size_t count = inputs[0]->count(); + if (input_dtype == MLLM_TYPE_F16) { count /= 4; } + + const size_t global_work_size[1] = {count}; + cl_event event; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "sub", event); + } + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSubOp.hpp b/mllm/backends/opencl/op/OpenCLSubOp.hpp new file mode 100644 index 000000000..8574f468d --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSubOp.hpp @@ -0,0 +1,42 @@ +#ifndef OPENCL_SUBTRACTION_OP_HPP +#define OPENCL_SUBTRACTION_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLSubOp : public Op { +public: + OpenCLSubOp(Backend *bn, std::string name, float data); + ~OpenCLSubOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + float data_; // 用于存储要减去的标量 + + cl_kernel kernel_fp32_buffer_ = nullptr; + cl_kernel kernel_fp32_image_ = nullptr; + cl_kernel kernel_fp16_buffer_ = nullptr; + cl_kernel kernel_fp16_image_ = nullptr; + + cl_sampler sampler_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +// OpenCLSubOp 的创建器 +class OpenCLSubOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // 从 op_param 中解析出要减去的标量数据 + float data = op_param["data"]; + return new OpenCLSubOp(bn, name, data); + } +}; + +} // namespace mllm + +#endif // OPENCL_SUBTRACTION_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSubTwoOp.cpp b/mllm/backends/opencl/op/OpenCLSubTwoOp.cpp new file mode 100644 index 000000000..66cc5d988 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSubTwoOp.cpp @@ -0,0 +1,124 @@ +// 文件名: ops/OpenCLSubTwoOp.cpp + +#include "OpenCLSubTwoOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLSubTwoOp::OpenCLSubTwoOp(Backend *bn, std::string name) : + Op(bn, std::move(name)) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + // 关键:加载新的 sub.cl 内核文件 + const std::string kernel_path = "kernel/sub.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + // 关键:创建减法内核 + kernel_fp32_buffer_ = clCreateKernel(program, "sub_float", &err); + check_cl_error(err, "clCreateKernel for sub_float"); + + kernel_fp32_image_ = clCreateKernel(program, "sub_float_image2d", &err); + check_cl_error(err, "clCreateKernel for sub_float_image2d"); + + kernel_fp16_buffer_ = clCreateKernel(program, "sub_fp16_vector", &err); + check_cl_error(err, "clCreateKernel for sub_fp16_vector"); + + kernel_fp16_image_ = clCreateKernel(program, "sub_fp16_image2d", &err); + check_cl_error(err, "clCreateKernel for sub_fp16_image2d"); + + sampler_ = clCreateSampler(ocl_backend_->getContext(), CL_FALSE, CL_ADDRESS_CLAMP_TO_EDGE, CL_FILTER_NEAREST, &err); + check_cl_error(err, "clCreateSampler"); +} + +OpenCLSubTwoOp::~OpenCLSubTwoOp() { + if (kernel_fp32_buffer_) clReleaseKernel(kernel_fp32_buffer_); + if (kernel_fp32_image_) clReleaseKernel(kernel_fp32_image_); + if (kernel_fp16_buffer_) clReleaseKernel(kernel_fp16_buffer_); + if (kernel_fp16_image_) clReleaseKernel(kernel_fp16_image_); + if (sampler_) clReleaseSampler(sampler_); +} + +ErrorCode OpenCLSubTwoOp::reshape(vector> inputs, vector> outputs) { + // 形状逻辑与AddTwoOp相同 + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSubTwoOp::setUp(vector> inputs, vector> outputs) { + // setUp逻辑与AddTwoOp相同 + for (auto &input : inputs) { + input->to(MLLM_OPENCL); + } + auto output = outputs[0]; + output->setDtype(inputs[0]->dtype()); + + auto &out_mem = output->device_memory(); + if (output->dimension() % 4 == 0 && false) { + out_mem.type = MEM_TYPE_IMAGE_2D; + out_mem.image_width = output->dimension() / 4; + out_mem.image_height = output->batch() * output->head() * output->sequence(); + } else { + out_mem.type = MEM_TYPE_BUFFER; + } + output->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSubTwoOp::execute(vector> inputs, vector> outputs) { + // 这里的逻辑与 AddTwoOp 完全相同,只是 kernel_to_use 指向的是减法内核 + auto input_dtype = inputs[0]->dtype(); + auto output = outputs[0]; + + if (output->device_memory().type == MEM_TYPE_IMAGE_2D) { + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_image_ : kernel_fp16_image_; + + std::vector temp_tensor_storage; + cl_mem inA_mem = get_image_from_tensor(inputs[0], ocl_backend_, temp_tensor_storage); + cl_mem inB_mem = get_image_from_tensor(inputs[1], ocl_backend_, temp_tensor_storage); + cl_mem out_mem_handle = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_sampler), &sampler_); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &inA_mem); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &inB_mem); + clSetKernelArg(kernel_to_use, 3, sizeof(cl_mem), &out_mem_handle); + + const int width = static_cast(output->device_memory().image_width); + const int height = static_cast(output->device_memory().image_height); + + clSetKernelArg(kernel_to_use, 4, sizeof(int), &width); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &height); + + const size_t global_work_size[2] = {(size_t)width, (size_t)height}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + + } else { + cl_kernel kernel_to_use = (input_dtype == MLLM_TYPE_F32) ? kernel_fp32_buffer_ : kernel_fp16_buffer_; + + cl_mem in0_buf = ocl_backend_->get_cl_mem(*inputs[0]); + cl_mem in1_buf = ocl_backend_->get_cl_mem(*inputs[1]); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in0_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &in1_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &out_buf); + + size_t count = inputs[0]->count(); + if (input_dtype == MLLM_TYPE_F16) { + if (count % 4 != 0) { + throw std::runtime_error("[subTwo]For FP16 vector kernel, tensor count must be a multiple of 4."); + } + count /= 4; + } + + const size_t global_work_size[1] = {count}; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, global_work_size, nullptr, 0, nullptr, nullptr); + } + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSubTwoOp.hpp b/mllm/backends/opencl/op/OpenCLSubTwoOp.hpp new file mode 100644 index 000000000..0ecbd28b7 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSubTwoOp.hpp @@ -0,0 +1,40 @@ +// 文件名: ops/OpenCLSubTwoOp.hpp + +#ifndef OPENCL_SUBTRACTION_TWO_OP_HPP +#define OPENCL_SUBTRACTION_TWO_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLSubTwoOp : public Op { +public: + OpenCLSubTwoOp(Backend *bn, std::string name); + ~OpenCLSubTwoOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + cl_kernel kernel_fp32_buffer_ = nullptr; + cl_kernel kernel_fp32_image_ = nullptr; + cl_kernel kernel_fp16_buffer_ = nullptr; + cl_kernel kernel_fp16_image_ = nullptr; + + cl_sampler sampler_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +// OpenCLSubTwoOp 的创建器 +class OpenCLSubTwoOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + return new OpenCLSubTwoOp(bn, name); + } +}; + +} // namespace mllm + +#endif // OPENCL_SUBTRACTION_TWO_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSumOp.cpp b/mllm/backends/opencl/op/OpenCLSumOp.cpp new file mode 100644 index 000000000..7592fdc44 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSumOp.cpp @@ -0,0 +1,118 @@ +#include "OpenCLSumOp.hpp" +#include "Types.hpp" +// #include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLSumOp::OpenCLSumOp(Backend *bn, std::string name, Chl axis) : + Op(bn, std::move(name)), axis_(axis) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/sum.cl"; + + std::string build_options; + if (ocl_backend_->has_fp16_support()) { + build_options += " -DSUPPORTS_FP16"; + } + + cl_program program = ocl_backend_->getProgram(kernel_path, build_options); + + cl_int err; + kernel_fp32_ = clCreateKernel(program, "sum_fp32", &err); + check_cl_error(err, "clCreateKernel for sum_fp32"); + + kernel_fp16_ = clCreateKernel(program, "sum_fp16", &err); + check_cl_error(err, "clCreateKernel for sum_fp16"); +} + +OpenCLSumOp::~OpenCLSumOp() { + if (kernel_fp32_) clReleaseKernel(kernel_fp32_); + if (kernel_fp16_) clReleaseKernel(kernel_fp16_); +} + +ErrorCode OpenCLSumOp::reshape(vector> inputs, vector> outputs) { + int batch = inputs[0]->batch(); + int head = inputs[0]->head(); + int sequence = inputs[0]->sequence(); + int dimension = inputs[0]->dimension(); + + switch (axis_) { + case BATCH: batch = 1; break; + case HEAD: head = 1; break; + case SEQUENCE: sequence = 1; break; + case DIMENSION: dimension = 1; break; + default: break; + } + + outputs[0]->reshape(batch, head, sequence, dimension); + outputs[0]->setDtype(inputs[0]->dtype()); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSumOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLSumOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + + cl_kernel kernel_to_use = (input->dtype() == MLLM_TYPE_F32) ? kernel_fp32_ : kernel_fp16_; + + int outer_size = 1; + int inner_size = 1; + int reduce_size = 1; + + // 1. Get the actual memory index for the reduction axis + const int axis_mem_idx = input->chls().at(axis_); + + // 2. Get the total number of axes from the shape vector's size + const int num_axes = input->shape().size(); + + // 3. Calculate outer_size (product of dimensions before the reduction axis) + for (int i = 0; i < axis_mem_idx; ++i) { + outer_size *= input->legacyShape(i); + } + + // 4. Get the size of the reduction dimension + reduce_size = input->legacyShape(axis_mem_idx); + + // 5. Calculate inner_size (product of dimensions after the reduction axis) + for (int i = axis_mem_idx + 1; i < num_axes; ++i) { + inner_size *= input->legacyShape(i); + } + + cl_mem in_buf = ocl_backend_->get_cl_mem(*input); + cl_mem out_buf = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &out_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(int), &outer_size); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &inner_size); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &reduce_size); + + // 1. 将 local_work_size 定义为二维数组 + const size_t local_work_size[2] = {256, 1}; // 第一个维度是规约并行度,第二个是1 + + // 2. 确保 global_work_size 的每个维度都是 local_work_size 对应维度的整数倍 + const size_t global_work_size[2] = { + (size_t)inner_size * local_work_size[0], + (size_t)outer_size * local_work_size[1] // outer_size * 1 + }; + // ============================================================== + + cl_event event; + // 3. 将二维的 local_work_size 数组传递给内核 + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, + global_work_size, local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "sum", event); + check_cl_error(err, "clEnqueueNDRangeKernel for Sum"); + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLSumOp.hpp b/mllm/backends/opencl/op/OpenCLSumOp.hpp new file mode 100644 index 000000000..d5c5b4bc9 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLSumOp.hpp @@ -0,0 +1,35 @@ +#ifndef OPENCL_SUM_OP_HPP +#define OPENCL_SUM_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLSumOp : public Op { +public: + OpenCLSumOp(Backend *bn, std::string name, Chl axis); + ~OpenCLSumOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + Chl axis_; + cl_kernel kernel_fp32_ = nullptr; + cl_kernel kernel_fp16_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLSumOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + Chl axis = (Chl)op_param.at("dim"); + return new OpenCLSumOp(bn, name, axis); + } +}; + +} // namespace mllm + +#endif // OPENCL_SUM_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLTopkOp.cpp b/mllm/backends/opencl/op/OpenCLTopkOp.cpp new file mode 100644 index 000000000..1ec406a3e --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLTopkOp.cpp @@ -0,0 +1,92 @@ +#include "OpenCLTopkOp.hpp" +#include "Types.hpp" +// #include "utils/OpenCLTools.hpp" + +namespace mllm { + +OpenCLTopkOp::OpenCLTopkOp(Backend *bn, std::string name, int k, Chl dim) : + Op(bn, std::move(name)), k_(k), dim_(dim) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/topk.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + kernel_fp32_ = clCreateKernel(program, "topk_fp32", &err); + check_cl_error(err, "clCreateKernel for topk_fp32"); + + kernel_fp16_ = clCreateKernel(program, "topk_fp16", &err); + check_cl_error(err, "clCreateKernel for topk_fp16"); +} + +OpenCLTopkOp::~OpenCLTopkOp() { + if (kernel_fp32_) clReleaseKernel(kernel_fp32_); + if (kernel_fp16_) clReleaseKernel(kernel_fp16_); +} + +ErrorCode OpenCLTopkOp::reshape(vector> inputs, vector> outputs) { + if (dim_ != DIMENSION) { + return NOT_SUPPORT; + } + assert(outputs.size() == 2); // topk returns values and indices + + auto input = inputs[0]; + auto values_out = outputs[0]; + auto indices_out = outputs[1]; + + values_out->reshape(input->batch(), input->head(), input->sequence(), k_); + indices_out->reshape(input->batch(), input->head(), input->sequence(), k_); + + values_out->setDtype(input->dtype()); + // 虽然索引通常是整数,但为了简化和保持一致性,这里也使用与输入相同的数据类型 + indices_out->setDtype(input->dtype()); + + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLTopkOp::setUp(vector> inputs, vector> outputs) { + inputs[0]->to(MLLM_OPENCL); + for (auto &output : outputs) { + output->setDtype(inputs[0]->dtype()); + output->alloc(); + } + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLTopkOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto values_out = outputs[0]; + auto indices_out = outputs[1]; + + cl_kernel kernel_to_use = (input->dtype() == MLLM_TYPE_F32) ? kernel_fp32_ : kernel_fp16_; + + cl_mem in_buf = ocl_backend_->get_cl_mem(*input); + cl_mem values_buf = ocl_backend_->get_cl_mem(*values_out); + cl_mem indices_buf = ocl_backend_->get_cl_mem(*indices_out); + + const int B = input->batch(); + const int H = input->head(); + const int S = input->sequence(); + const int D = input->dimension(); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &in_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &values_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(cl_mem), &indices_buf); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &D); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &k_); + + // 启动一个工作组来处理每一行 + const size_t total_rows = (size_t)B * H * S; + const size_t local_work_size = 256; // 必须是2的幂,与内核中的 WG_SIZE 保持一致 + const size_t global_work_size = total_rows * local_work_size; + cl_event event; + cl_int err = clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 1, nullptr, + &global_work_size, &local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "topk", event); + check_cl_error(err, "clEnqueueNDRangeKernel for TopK"); + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLTopkOp.hpp b/mllm/backends/opencl/op/OpenCLTopkOp.hpp new file mode 100644 index 000000000..e1a222905 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLTopkOp.hpp @@ -0,0 +1,38 @@ +#ifndef OPENCL_TOPK_OP_HPP +#define OPENCL_TOPK_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLTopkOp : public Op { +public: + OpenCLTopkOp(Backend *bn, std::string name, int k, Chl dim); + ~OpenCLTopkOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + int k_; + Chl dim_; + + cl_kernel kernel_fp32_ = nullptr; + cl_kernel kernel_fp16_ = nullptr; + OpenCLBackend *ocl_backend_ = nullptr; +}; + +class OpenCLTopkOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + int k = static_cast(op_param.at("k")); + Chl dim = (Chl)op_param.at("dim"); + return new OpenCLTopkOp(bn, name, k, dim); + } +}; + +} // namespace mllm + +#endif // OPENCL_TOPK_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLTransposeOp.cpp b/mllm/backends/opencl/op/OpenCLTransposeOp.cpp new file mode 100644 index 000000000..654e01191 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLTransposeOp.cpp @@ -0,0 +1,176 @@ +#include "OpenCLTransposeOp.hpp" +#include "Types.hpp" +#include "utils/OpenCLTools.hpp" +#include + +namespace mllm { + +OpenCLTransposeOp::OpenCLTransposeOp(Backend *bn, std::string name, const vector> &axiss) : + Op(bn, std::move(name)), axiss_(axiss) { + ocl_backend_ = dynamic_cast(backend_); + if (ocl_backend_ == nullptr) throw std::runtime_error("Backend is not OpenCLBackend"); + + const std::string kernel_path = "kernel/transpose.cl"; + cl_program program = ocl_backend_->getProgram(kernel_path); + + cl_int err; + kernel_fp32_2d_ = clCreateKernel(program, "transpose_float_2d", &err); + check_cl_error(err, "clCreateKernel for transpose_float_2d"); + + kernel_fp16_2d_ = clCreateKernel(program, "transpose_fp16_2d", &err); + check_cl_error(err, "clCreateKernel for transpose_fp16_2d"); + + kernel_fp32_bshd_ = clCreateKernel(program, "transpose_bshd2bhsd_fp32", &err); + check_cl_error(err, "clCreateKernel for transpose_bshd2bhsd_fp32"); + kernel_fp16_bshd_ = clCreateKernel(program, "transpose_bshd2bhsd_fp16", &err); + check_cl_error(err, "clCreateKernel for transpose_bshd2bhsd_fp16"); + + kernel_fp32_bhsd_ = clCreateKernel(program, "transpose_bhsd2bshd_fp32", &err); + check_cl_error(err, "clCreateKernel for transpose_bhsd2bshd_fp32"); + kernel_fp16_bhsd_ = clCreateKernel(program, "transpose_bhsd2bshd_fp16", &err); + check_cl_error(err, "clCreateKernel for transpose_bhsd2bshd_fp16"); +} + +OpenCLTransposeOp::~OpenCLTransposeOp() { + if (kernel_fp32_2d_) clReleaseKernel(kernel_fp32_2d_); + if (kernel_fp16_2d_) clReleaseKernel(kernel_fp16_2d_); + if (kernel_fp32_bshd_) clReleaseKernel(kernel_fp32_bshd_); + if (kernel_fp16_bshd_) clReleaseKernel(kernel_fp16_bshd_); +} + +ErrorCode OpenCLTransposeOp::reshape(vector> inputs, vector> outputs) { + // Transpose on BHSD -> BHDS (swapping Sequence and Dimension) + if (axiss_.size() == 1 && axiss_[0].first == SEQUENCE && axiss_[0].second == DIMENSION && inputs[0]->ctype() == BHSD) { + outputs[0]->setCtype(BHSD); + outputs[0]->setDtype(inputs[0]->dtype()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->dimension(), inputs[0]->sequence()); + } else if (axiss_.size() == 1 && axiss_[0].first == HEAD && axiss_[0].second == SEQUENCE) { + // H,S 转置 (BSHD -> BHSD) + auto input = inputs[0]; + auto output = outputs[0]; + // 这一步是元数据变换,定义逻辑形状 + output->transCopyShape(input->shape()); + output->chls() = input->chls(); + std::swap(output->chls()[HEAD], output->chls()[SEQUENCE]); + output->changeCtype(input->shape().size()); + // 物理形状与输入保持一致,因为将在execute中进行物理拷贝 + output->reshape(input->batch(), input->head(), input->sequence(), input->dimension()); + } else { + std::cerr << "error reshape" << std::endl; + } + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLTransposeOp::setUp(vector> inputs, vector> outputs) { + // 确保输入和输出的数据类型一致 + outputs[0]->setDtype(inputs[0]->dtype()); + + // 将输入张量的数据转移到OpenCL设备 + for (auto &input : inputs) { + input->to(MLLM_OPENCL); + } + + // 为输出张量分配内存,您的框架会根据分页策略进行管理 + outputs[0]->alloc(); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLTransposeOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + + cl_mem parent_src_buffer = ocl_backend_->get_cl_mem(*input); + cl_mem parent_dst_buffer = ocl_backend_->get_cl_mem(*output); + cl_int err; + + // === 新增: 处理 H,S 轴转置的逻辑分支 === + if (axiss_.size() == 1 && axiss_[0].first == HEAD && axiss_[0].second == SEQUENCE) { + cl_kernel kernel_to_use = nullptr; + if (input->ctype() == BSHD) { // BSHD -> BHSD + if (input->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_bshd_; + } else { + kernel_to_use = kernel_fp16_bshd_; + } + } else if (input->ctype() == BHSD) { // BHSD -> BSHD + if (input->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_bhsd_; + } else { + kernel_to_use = kernel_fp16_bhsd_; + } + } else { + return NOT_SUPPORT; + } + const int B = input->batch(); + const int H = input->head(); + const int S = input->sequence(); + const int D = input->dimension(); + + cl_mem src_buf = ocl_backend_->get_cl_mem(*input); + cl_mem dst_buf = ocl_backend_->get_cl_mem(*output); + + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &src_buf); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &dst_buf); + clSetKernelArg(kernel_to_use, 2, sizeof(int), &B); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &H); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &S); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &D); + + const size_t global_work_size[3] = {(size_t)D, (size_t)S, (size_t)H * B}; + cl_event event; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 3, nullptr, global_work_size, nullptr, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "transpose", event); + } + // === 处理 S,D 轴转置的逻辑保持不变 === + else if (axiss_.size() == 1 && axiss_[0].first == SEQUENCE && axiss_[0].second == DIMENSION && inputs[0]->ctype() == BHSD) { + cl_kernel kernel_to_use = nullptr; + size_t element_size = 0; + if (input->dtype() == MLLM_TYPE_F32) { + kernel_to_use = kernel_fp32_2d_; + element_size = sizeof(float); + } else if (input->dtype() == MLLM_TYPE_F16) { + kernel_to_use = kernel_fp16_2d_; + element_size = sizeof(mllm_fp16_t); + } else { + return NOT_SUPPORT; + } + for (int b = 0; b < input->batch(); ++b) { + for (int h = 0; h < input->head(); ++h) { + const int S = input->sequence(); + const int D = input->dimension(); + + // 计算偏移量(以元素数量为单位) + int src_offset_elements = (b * input->head() + h) * S * D; + int dst_offset_elements = (b * output->head() + h) * D * S; + + // 不再创建 SubBuffer + // clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &src_sub_buffer); + // clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &dst_sub_buffer); + + // 传递父缓冲区和偏移量 + clSetKernelArg(kernel_to_use, 0, sizeof(cl_mem), &parent_src_buffer); + clSetKernelArg(kernel_to_use, 1, sizeof(cl_mem), &parent_dst_buffer); + clSetKernelArg(kernel_to_use, 2, sizeof(int), &S); + clSetKernelArg(kernel_to_use, 3, sizeof(int), &D); + clSetKernelArg(kernel_to_use, 4, sizeof(int), &src_offset_elements); + clSetKernelArg(kernel_to_use, 5, sizeof(int), &dst_offset_elements); + + const size_t block_dim = 16; + const size_t global_work_size[2] = { + (size_t)D + ((block_dim - (size_t)D % block_dim) % block_dim), + (size_t)S + ((block_dim - (size_t)S % block_dim) % block_dim)}; + const size_t local_work_size[2] = {block_dim, block_dim}; + cl_event event; + clEnqueueNDRangeKernel(ocl_backend_->getQueue(), kernel_to_use, 2, nullptr, global_work_size, local_work_size, 0, nullptr, &event); + ocl_backend_->addProfilingEvent(this->name() + "transpose", event); + } + } + } else { + std::cerr << "OpenCLTransposeOp execute error: unsupported transpose axis or ctype" << std::endl; + return NOT_SUPPORT; + } + + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLTransposeOp.hpp b/mllm/backends/opencl/op/OpenCLTransposeOp.hpp new file mode 100644 index 000000000..47213b5c7 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLTransposeOp.hpp @@ -0,0 +1,49 @@ +#ifndef OPENCL_TRANSPOSE_FUNC_OP_HPP +#define OPENCL_TRANSPOSE_FUNC_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLTransposeOp : public Op { +public: + OpenCLTransposeOp(Backend *bn, std::string name, const vector> &axiss); + ~OpenCLTransposeOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + cl_kernel kernel_fp32_2d_ = nullptr; + cl_kernel kernel_fp16_2d_ = nullptr; + + cl_kernel kernel_fp32_bshd_ = nullptr; + cl_kernel kernel_fp16_bshd_ = nullptr; + + cl_kernel kernel_fp32_bhsd_ = nullptr; + cl_kernel kernel_fp16_bhsd_ = nullptr; + + OpenCLBackend *ocl_backend_ = nullptr; + vector> axiss_; +}; + +class OpenCLTransposeOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // Example: {"num_pairs": 1, "axis1_0": 2, "axis2_0": 1} (HEAD, SEQUENCE) + int num_pairs = static_cast(op_param.at("num_pairs")); + vector> axiss; + for (int i = 0; i < num_pairs; ++i) { + Chl axis1 = (Chl)op_param.at("axis1_" + std::to_string(i)); + Chl axis2 = (Chl)op_param.at("axis2_" + std::to_string(i)); + axiss.push_back({axis1, axis2}); + } + return new OpenCLTransposeOp(bn, name, axiss); + } +}; + +} // namespace mllm + +#endif // OPENCL_TRANSPOSE_FUNC_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLViewOp.cpp b/mllm/backends/opencl/op/OpenCLViewOp.cpp new file mode 100644 index 000000000..5b358fc56 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLViewOp.cpp @@ -0,0 +1,105 @@ +#include "OpenCLViewOp.hpp" +#include "Types.hpp" +// #include "utils/OpenCLTools.hpp" +#include +#include + +namespace mllm { + +OpenCLViewOp::OpenCLViewOp(Backend *bn, std::string name, int b, int h, int s, int d) : + Op(bn, std::move(name)), b(b), h(h), s(s), d(d) { +} + +OpenCLViewOp::~OpenCLViewOp() { +} + +ErrorCode OpenCLViewOp::reshape(vector> inputs, vector> outputs) { + int dim_b = inputs[0]->batch(); + int dim_h = inputs[0]->head(); + int dim_s = inputs[0]->sequence(); + int dim_d = inputs[0]->dimension(); + if (b == -1 && h == 1 && s == 1 && d == -1) { // sequence & head & dimension -> dimension + dim_s = 1; + dim_h = 1; + dim_d = inputs[0]->sequence() * inputs[0]->head() * inputs[0]->dimension(); + } else if (b == -1 && h == -1 && s == 1 && d == 1) { // sequence & head & dimension -> sequence + dim_s = inputs[0]->sequence() * inputs[0]->head() * inputs[0]->dimension(); + dim_h = 1; + dim_d = 1; + } else if (b == 1 && h == 1 && s == -1 && d != -1 && inputs[0]->ctype() == BCTHW) { // batch & head & sequence -> sequence + dim_b = 1; + dim_s = inputs[0]->channel() * inputs[0]->time() * inputs[0]->batch() * inputs[0]->height() * inputs[0]->width() / d; + dim_h = 1; + dim_d = d; + } else if (b == 1 && h == 1 && s == -1 && d != -1) { // batch & head & sequence -> sequence + dim_b = 1; + dim_s = inputs[0]->sequence() * inputs[0]->batch() * inputs[0]->head() * inputs[0]->dimension() / d; + dim_h = 1; + dim_d = d; + } else if (b == -1 && h != -1 && s == -1 && d != -1) { // head & dimension + if (h != ANYDIM && d != ANYDIM) { + assert(inputs[0]->dimension() * inputs[0]->head() == h * d); + dim_h = h; + dim_d = d; + } else if (h != ANYDIM) { + dim_h = h; + dim_d = inputs[0]->dimension() * inputs[0]->head() / h; + } else if (d != ANYDIM) { + dim_h = inputs[0]->dimension() * inputs[0]->head() / d; + dim_d = d; + } else { + std::cout << "[TODO]Tensor.View not support!!!!" << std::endl; + } + } else if (b == -1 && h != -1 && s != -1 && d == -1) { // head & sequence + if (h != ANYDIM && s != ANYDIM) { + assert(inputs[0]->sequence() * inputs[0]->head() == h * s); + dim_h = h; + dim_s = s; + } else if (h != ANYDIM) { + dim_h = h; + dim_s = inputs[0]->sequence() * inputs[0]->head() / h; + } else if (s != ANYDIM) { + dim_h = inputs[0]->sequence() * inputs[0]->head() / s; + dim_s = s; + } else { + std::cout << "[TODO]Tensor.View not support!!!!" << std::endl; + } + } else if (b != -1 && h == -1 && s != -1 && d == -1) { // batch & sequence + if (b != ANYDIM && s != ANYDIM) { + assert(inputs[0]->sequence() * inputs[0]->batch() == b * s); + dim_b = b; + dim_s = s; + } else if (b != ANYDIM) { + dim_b = b; + dim_s = inputs[0]->sequence() * inputs[0]->batch() / b; + } else if (s != ANYDIM) { + dim_b = inputs[0]->sequence() * inputs[0]->batch() / s; + dim_s = s; + } else { + std::cout << "[TODO]Tensor.View not support!!!!" << std::endl; + } + } else { + std::cout << "[TODO]Tensor.View not support!!!!" << std::endl; + } + if (inputs[0]->ctype() == BCTHW && inputs[0]->name() == outputs[0]->name()) { + outputs[0]->setCtype(BSHD); + } + outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLViewOp::setUp(vector> inputs, vector> outputs) { + assert(inputs[0]->backend()->type() == MLLM_OPENCL); + if (inputs[0]->sequence() > 0) { + assert(inputs[0] == outputs[0]); + } + return MLLM_NO_ERROR; +} + +ErrorCode OpenCLViewOp::execute(vector> inputs, vector> outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + return MLLM_NO_ERROR; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/opencl/op/OpenCLViewOp.hpp b/mllm/backends/opencl/op/OpenCLViewOp.hpp new file mode 100644 index 000000000..0ea575698 --- /dev/null +++ b/mllm/backends/opencl/op/OpenCLViewOp.hpp @@ -0,0 +1,37 @@ +#ifndef OPENCL_VIEW_OP_HPP +#define OPENCL_VIEW_OP_HPP + +#include "Op.hpp" +#include "../OpenCLBackend.hpp" + +namespace mllm { + +class OpenCLViewOp : public Op { +public: + OpenCLViewOp(Backend *bn, std::string name, int b, int h, int s, int d); + ~OpenCLViewOp() override; + + ErrorCode reshape(vector> inputs, vector> outputs) override; + ErrorCode setUp(vector> inputs, vector> outputs) override; + ErrorCode execute(vector> inputs, vector> outputs) override; + +private: + int b, h, s, d; +}; + +// OpenCLViewOp 的创建器 +class OpenCLViewOpCreator : public OpenCLBackend::Creator { +public: + Op *create(OpParam op_param, Backend *bn, string name, int threadCount) const override { + // 从 op_param 中解析出要减去的标量数据 + int b = static_cast(op_param.at("b")); + int h = static_cast(op_param.at("h")); + int s = static_cast(op_param.at("s")); + int d = static_cast(op_param.at("d")); + return new OpenCLViewOp(bn, name, b, h, s, d); + } +}; + +} // namespace mllm + +#endif // OPENCL_VIEW_OP_HPP \ No newline at end of file diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl.h new file mode 100644 index 000000000..d55772019 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl.h @@ -0,0 +1,1920 @@ +/******************************************************************************* + * Copyright (c) 2008-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef __OPENCL_CL_H +#define __OPENCL_CL_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(_WIN32) && defined(_MSC_VER) && __CL_HAS_ANON_STRUCT__ + /* Disable warning C4201: nonstandard extension used : nameless struct/union */ + #pragma warning( push ) + #pragma warning( disable : 4201 ) +#endif + +/******************************************************************************/ + +typedef struct _cl_platform_id * cl_platform_id; +typedef struct _cl_device_id * cl_device_id; +typedef struct _cl_context * cl_context; +typedef struct _cl_command_queue * cl_command_queue; +typedef struct _cl_mem * cl_mem; +typedef struct _cl_program * cl_program; +typedef struct _cl_kernel * cl_kernel; +typedef struct _cl_event * cl_event; +typedef struct _cl_sampler * cl_sampler; + +typedef cl_uint cl_bool; /* WARNING! Unlike cl_ types in cl_platform.h, cl_bool is not guaranteed to be the same size as the bool in kernels. */ +typedef cl_ulong cl_bitfield; +typedef cl_ulong cl_properties; +typedef cl_bitfield cl_device_type; +typedef cl_uint cl_platform_info; +typedef cl_uint cl_device_info; +typedef cl_bitfield cl_device_fp_config; +typedef cl_uint cl_device_mem_cache_type; +typedef cl_uint cl_device_local_mem_type; +typedef cl_bitfield cl_device_exec_capabilities; +#ifdef CL_VERSION_2_0 +typedef cl_bitfield cl_device_svm_capabilities; +#endif +typedef cl_bitfield cl_command_queue_properties; +#ifdef CL_VERSION_1_2 +typedef intptr_t cl_device_partition_property; +typedef cl_bitfield cl_device_affinity_domain; +#endif + +typedef intptr_t cl_context_properties; +typedef cl_uint cl_context_info; +#ifdef CL_VERSION_2_0 +typedef cl_properties cl_queue_properties; +#endif +typedef cl_uint cl_command_queue_info; +typedef cl_uint cl_channel_order; +typedef cl_uint cl_channel_type; +typedef cl_bitfield cl_mem_flags; +#ifdef CL_VERSION_2_0 +typedef cl_bitfield cl_svm_mem_flags; +#endif +typedef cl_uint cl_mem_object_type; +typedef cl_uint cl_mem_info; +#ifdef CL_VERSION_1_2 +typedef cl_bitfield cl_mem_migration_flags; +#endif +typedef cl_uint cl_image_info; +#ifdef CL_VERSION_1_1 +typedef cl_uint cl_buffer_create_type; +#endif +typedef cl_uint cl_addressing_mode; +typedef cl_uint cl_filter_mode; +typedef cl_uint cl_sampler_info; +typedef cl_bitfield cl_map_flags; +#ifdef CL_VERSION_2_0 +typedef intptr_t cl_pipe_properties; +typedef cl_uint cl_pipe_info; +#endif +typedef cl_uint cl_program_info; +typedef cl_uint cl_program_build_info; +#ifdef CL_VERSION_1_2 +typedef cl_uint cl_program_binary_type; +#endif +typedef cl_int cl_build_status; +typedef cl_uint cl_kernel_info; +#ifdef CL_VERSION_1_2 +typedef cl_uint cl_kernel_arg_info; +typedef cl_uint cl_kernel_arg_address_qualifier; +typedef cl_uint cl_kernel_arg_access_qualifier; +typedef cl_bitfield cl_kernel_arg_type_qualifier; +#endif +typedef cl_uint cl_kernel_work_group_info; +#ifdef CL_VERSION_2_1 +typedef cl_uint cl_kernel_sub_group_info; +#endif +typedef cl_uint cl_event_info; +typedef cl_uint cl_command_type; +typedef cl_uint cl_profiling_info; +#ifdef CL_VERSION_2_0 +typedef cl_properties cl_sampler_properties; +typedef cl_uint cl_kernel_exec_info; +#endif +#ifdef CL_VERSION_3_0 +typedef cl_bitfield cl_device_atomic_capabilities; +typedef cl_bitfield cl_device_device_enqueue_capabilities; +typedef cl_uint cl_khronos_vendor_id; +typedef cl_properties cl_mem_properties; +#endif +typedef cl_uint cl_version; + +typedef struct _cl_image_format { + cl_channel_order image_channel_order; + cl_channel_type image_channel_data_type; +} cl_image_format; + +#ifdef CL_VERSION_1_2 + +typedef struct _cl_image_desc { + cl_mem_object_type image_type; + size_t image_width; + size_t image_height; + size_t image_depth; + size_t image_array_size; + size_t image_row_pitch; + size_t image_slice_pitch; + cl_uint num_mip_levels; + cl_uint num_samples; +#if defined(CL_VERSION_2_0) && __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ union { +#endif + cl_mem buffer; +#if defined(CL_VERSION_2_0) && __CL_HAS_ANON_STRUCT__ + cl_mem mem_object; + }; +#endif +} cl_image_desc; + +#endif + +#ifdef CL_VERSION_1_1 + +typedef struct _cl_buffer_region { + size_t origin; + size_t size; +} cl_buffer_region; + +#endif + +#ifdef CL_VERSION_3_0 + +#define CL_NAME_VERSION_MAX_NAME_SIZE 64 + +typedef struct _cl_name_version { + cl_version version; + char name[CL_NAME_VERSION_MAX_NAME_SIZE]; +} cl_name_version; + +#endif + +/******************************************************************************/ + +/* Error Codes */ +#define CL_SUCCESS 0 +#define CL_DEVICE_NOT_FOUND -1 +#define CL_DEVICE_NOT_AVAILABLE -2 +#define CL_COMPILER_NOT_AVAILABLE -3 +#define CL_MEM_OBJECT_ALLOCATION_FAILURE -4 +#define CL_OUT_OF_RESOURCES -5 +#define CL_OUT_OF_HOST_MEMORY -6 +#define CL_PROFILING_INFO_NOT_AVAILABLE -7 +#define CL_MEM_COPY_OVERLAP -8 +#define CL_IMAGE_FORMAT_MISMATCH -9 +#define CL_IMAGE_FORMAT_NOT_SUPPORTED -10 +#define CL_BUILD_PROGRAM_FAILURE -11 +#define CL_MAP_FAILURE -12 +#ifdef CL_VERSION_1_1 +#define CL_MISALIGNED_SUB_BUFFER_OFFSET -13 +#define CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST -14 +#endif +#ifdef CL_VERSION_1_2 +#define CL_COMPILE_PROGRAM_FAILURE -15 +#define CL_LINKER_NOT_AVAILABLE -16 +#define CL_LINK_PROGRAM_FAILURE -17 +#define CL_DEVICE_PARTITION_FAILED -18 +#define CL_KERNEL_ARG_INFO_NOT_AVAILABLE -19 +#endif + +#define CL_INVALID_VALUE -30 +#define CL_INVALID_DEVICE_TYPE -31 +#define CL_INVALID_PLATFORM -32 +#define CL_INVALID_DEVICE -33 +#define CL_INVALID_CONTEXT -34 +#define CL_INVALID_QUEUE_PROPERTIES -35 +#define CL_INVALID_COMMAND_QUEUE -36 +#define CL_INVALID_HOST_PTR -37 +#define CL_INVALID_MEM_OBJECT -38 +#define CL_INVALID_IMAGE_FORMAT_DESCRIPTOR -39 +#define CL_INVALID_IMAGE_SIZE -40 +#define CL_INVALID_SAMPLER -41 +#define CL_INVALID_BINARY -42 +#define CL_INVALID_BUILD_OPTIONS -43 +#define CL_INVALID_PROGRAM -44 +#define CL_INVALID_PROGRAM_EXECUTABLE -45 +#define CL_INVALID_KERNEL_NAME -46 +#define CL_INVALID_KERNEL_DEFINITION -47 +#define CL_INVALID_KERNEL -48 +#define CL_INVALID_ARG_INDEX -49 +#define CL_INVALID_ARG_VALUE -50 +#define CL_INVALID_ARG_SIZE -51 +#define CL_INVALID_KERNEL_ARGS -52 +#define CL_INVALID_WORK_DIMENSION -53 +#define CL_INVALID_WORK_GROUP_SIZE -54 +#define CL_INVALID_WORK_ITEM_SIZE -55 +#define CL_INVALID_GLOBAL_OFFSET -56 +#define CL_INVALID_EVENT_WAIT_LIST -57 +#define CL_INVALID_EVENT -58 +#define CL_INVALID_OPERATION -59 +#define CL_INVALID_GL_OBJECT -60 +#define CL_INVALID_BUFFER_SIZE -61 +#define CL_INVALID_MIP_LEVEL -62 +#define CL_INVALID_GLOBAL_WORK_SIZE -63 +#ifdef CL_VERSION_1_1 +#define CL_INVALID_PROPERTY -64 +#endif +#ifdef CL_VERSION_1_2 +#define CL_INVALID_IMAGE_DESCRIPTOR -65 +#define CL_INVALID_COMPILER_OPTIONS -66 +#define CL_INVALID_LINKER_OPTIONS -67 +#define CL_INVALID_DEVICE_PARTITION_COUNT -68 +#endif +#ifdef CL_VERSION_2_0 +#define CL_INVALID_PIPE_SIZE -69 +#define CL_INVALID_DEVICE_QUEUE -70 +#endif +#ifdef CL_VERSION_2_2 +#define CL_INVALID_SPEC_ID -71 +#define CL_MAX_SIZE_RESTRICTION_EXCEEDED -72 +#endif + + +/* cl_bool */ +#define CL_FALSE 0 +#define CL_TRUE 1 +#ifdef CL_VERSION_1_2 +#define CL_BLOCKING CL_TRUE +#define CL_NON_BLOCKING CL_FALSE +#endif + +/* cl_platform_info */ +#define CL_PLATFORM_PROFILE 0x0900 +#define CL_PLATFORM_VERSION 0x0901 +#define CL_PLATFORM_NAME 0x0902 +#define CL_PLATFORM_VENDOR 0x0903 +#define CL_PLATFORM_EXTENSIONS 0x0904 +#ifdef CL_VERSION_2_1 +#define CL_PLATFORM_HOST_TIMER_RESOLUTION 0x0905 +#endif +#ifdef CL_VERSION_3_0 +#define CL_PLATFORM_NUMERIC_VERSION 0x0906 +#define CL_PLATFORM_EXTENSIONS_WITH_VERSION 0x0907 +#endif + +/* cl_device_type - bitfield */ +#define CL_DEVICE_TYPE_DEFAULT (1 << 0) +#define CL_DEVICE_TYPE_CPU (1 << 1) +#define CL_DEVICE_TYPE_GPU (1 << 2) +#define CL_DEVICE_TYPE_ACCELERATOR (1 << 3) +#ifdef CL_VERSION_1_2 +#define CL_DEVICE_TYPE_CUSTOM (1 << 4) +#endif +#define CL_DEVICE_TYPE_ALL 0xFFFFFFFF + +/* cl_device_info */ +#define CL_DEVICE_TYPE 0x1000 +#define CL_DEVICE_VENDOR_ID 0x1001 +#define CL_DEVICE_MAX_COMPUTE_UNITS 0x1002 +#define CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS 0x1003 +#define CL_DEVICE_MAX_WORK_GROUP_SIZE 0x1004 +#define CL_DEVICE_MAX_WORK_ITEM_SIZES 0x1005 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR 0x1006 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT 0x1007 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT 0x1008 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG 0x1009 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT 0x100A +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE 0x100B +#define CL_DEVICE_MAX_CLOCK_FREQUENCY 0x100C +#define CL_DEVICE_ADDRESS_BITS 0x100D +#define CL_DEVICE_MAX_READ_IMAGE_ARGS 0x100E +#define CL_DEVICE_MAX_WRITE_IMAGE_ARGS 0x100F +#define CL_DEVICE_MAX_MEM_ALLOC_SIZE 0x1010 +#define CL_DEVICE_IMAGE2D_MAX_WIDTH 0x1011 +#define CL_DEVICE_IMAGE2D_MAX_HEIGHT 0x1012 +#define CL_DEVICE_IMAGE3D_MAX_WIDTH 0x1013 +#define CL_DEVICE_IMAGE3D_MAX_HEIGHT 0x1014 +#define CL_DEVICE_IMAGE3D_MAX_DEPTH 0x1015 +#define CL_DEVICE_IMAGE_SUPPORT 0x1016 +#define CL_DEVICE_MAX_PARAMETER_SIZE 0x1017 +#define CL_DEVICE_MAX_SAMPLERS 0x1018 +#define CL_DEVICE_MEM_BASE_ADDR_ALIGN 0x1019 +#define CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE 0x101A +#define CL_DEVICE_SINGLE_FP_CONFIG 0x101B +#define CL_DEVICE_GLOBAL_MEM_CACHE_TYPE 0x101C +#define CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE 0x101D +#define CL_DEVICE_GLOBAL_MEM_CACHE_SIZE 0x101E +#define CL_DEVICE_GLOBAL_MEM_SIZE 0x101F +#define CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE 0x1020 +#define CL_DEVICE_MAX_CONSTANT_ARGS 0x1021 +#define CL_DEVICE_LOCAL_MEM_TYPE 0x1022 +#define CL_DEVICE_LOCAL_MEM_SIZE 0x1023 +#define CL_DEVICE_ERROR_CORRECTION_SUPPORT 0x1024 +#define CL_DEVICE_PROFILING_TIMER_RESOLUTION 0x1025 +#define CL_DEVICE_ENDIAN_LITTLE 0x1026 +#define CL_DEVICE_AVAILABLE 0x1027 +#define CL_DEVICE_COMPILER_AVAILABLE 0x1028 +#define CL_DEVICE_EXECUTION_CAPABILITIES 0x1029 +#define CL_DEVICE_QUEUE_PROPERTIES 0x102A /* deprecated */ +#ifdef CL_VERSION_2_0 +#define CL_DEVICE_QUEUE_ON_HOST_PROPERTIES 0x102A +#endif +#define CL_DEVICE_NAME 0x102B +#define CL_DEVICE_VENDOR 0x102C +#define CL_DRIVER_VERSION 0x102D +#define CL_DEVICE_PROFILE 0x102E +#define CL_DEVICE_VERSION 0x102F +#define CL_DEVICE_EXTENSIONS 0x1030 +#define CL_DEVICE_PLATFORM 0x1031 +#ifdef CL_VERSION_1_2 +#define CL_DEVICE_DOUBLE_FP_CONFIG 0x1032 +#endif +/* 0x1033 reserved for CL_DEVICE_HALF_FP_CONFIG which is already defined in "cl_ext.h" */ +#ifdef CL_VERSION_1_1 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF 0x1034 +#define CL_DEVICE_HOST_UNIFIED_MEMORY 0x1035 /* deprecated */ +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR 0x1036 +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT 0x1037 +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_INT 0x1038 +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG 0x1039 +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT 0x103A +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE 0x103B +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF 0x103C +#define CL_DEVICE_OPENCL_C_VERSION 0x103D +#endif +#ifdef CL_VERSION_1_2 +#define CL_DEVICE_LINKER_AVAILABLE 0x103E +#define CL_DEVICE_BUILT_IN_KERNELS 0x103F +#define CL_DEVICE_IMAGE_MAX_BUFFER_SIZE 0x1040 +#define CL_DEVICE_IMAGE_MAX_ARRAY_SIZE 0x1041 +#define CL_DEVICE_PARENT_DEVICE 0x1042 +#define CL_DEVICE_PARTITION_MAX_SUB_DEVICES 0x1043 +#define CL_DEVICE_PARTITION_PROPERTIES 0x1044 +#define CL_DEVICE_PARTITION_AFFINITY_DOMAIN 0x1045 +#define CL_DEVICE_PARTITION_TYPE 0x1046 +#define CL_DEVICE_REFERENCE_COUNT 0x1047 +#define CL_DEVICE_PREFERRED_INTEROP_USER_SYNC 0x1048 +#define CL_DEVICE_PRINTF_BUFFER_SIZE 0x1049 +#endif +#ifdef CL_VERSION_2_0 +#define CL_DEVICE_IMAGE_PITCH_ALIGNMENT 0x104A +#define CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT 0x104B +#define CL_DEVICE_MAX_READ_WRITE_IMAGE_ARGS 0x104C +#define CL_DEVICE_MAX_GLOBAL_VARIABLE_SIZE 0x104D +#define CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES 0x104E +#define CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE 0x104F +#define CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE 0x1050 +#define CL_DEVICE_MAX_ON_DEVICE_QUEUES 0x1051 +#define CL_DEVICE_MAX_ON_DEVICE_EVENTS 0x1052 +#define CL_DEVICE_SVM_CAPABILITIES 0x1053 +#define CL_DEVICE_GLOBAL_VARIABLE_PREFERRED_TOTAL_SIZE 0x1054 +#define CL_DEVICE_MAX_PIPE_ARGS 0x1055 +#define CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS 0x1056 +#define CL_DEVICE_PIPE_MAX_PACKET_SIZE 0x1057 +#define CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT 0x1058 +#define CL_DEVICE_PREFERRED_GLOBAL_ATOMIC_ALIGNMENT 0x1059 +#define CL_DEVICE_PREFERRED_LOCAL_ATOMIC_ALIGNMENT 0x105A +#endif +#ifdef CL_VERSION_2_1 +#define CL_DEVICE_IL_VERSION 0x105B +#define CL_DEVICE_MAX_NUM_SUB_GROUPS 0x105C +#define CL_DEVICE_SUB_GROUP_INDEPENDENT_FORWARD_PROGRESS 0x105D +#endif +#ifdef CL_VERSION_3_0 +#define CL_DEVICE_NUMERIC_VERSION 0x105E +#define CL_DEVICE_EXTENSIONS_WITH_VERSION 0x1060 +#define CL_DEVICE_ILS_WITH_VERSION 0x1061 +#define CL_DEVICE_BUILT_IN_KERNELS_WITH_VERSION 0x1062 +#define CL_DEVICE_ATOMIC_MEMORY_CAPABILITIES 0x1063 +#define CL_DEVICE_ATOMIC_FENCE_CAPABILITIES 0x1064 +#define CL_DEVICE_NON_UNIFORM_WORK_GROUP_SUPPORT 0x1065 +#define CL_DEVICE_OPENCL_C_ALL_VERSIONS 0x1066 +#define CL_DEVICE_PREFERRED_WORK_GROUP_SIZE_MULTIPLE 0x1067 +#define CL_DEVICE_WORK_GROUP_COLLECTIVE_FUNCTIONS_SUPPORT 0x1068 +#define CL_DEVICE_GENERIC_ADDRESS_SPACE_SUPPORT 0x1069 +/* 0x106A to 0x106E - Reserved for upcoming KHR extension */ +#define CL_DEVICE_OPENCL_C_FEATURES 0x106F +#define CL_DEVICE_DEVICE_ENQUEUE_CAPABILITIES 0x1070 +#define CL_DEVICE_PIPE_SUPPORT 0x1071 +#define CL_DEVICE_LATEST_CONFORMANCE_VERSION_PASSED 0x1072 +#endif + +/* cl_device_fp_config - bitfield */ +#define CL_FP_DENORM (1 << 0) +#define CL_FP_INF_NAN (1 << 1) +#define CL_FP_ROUND_TO_NEAREST (1 << 2) +#define CL_FP_ROUND_TO_ZERO (1 << 3) +#define CL_FP_ROUND_TO_INF (1 << 4) +#define CL_FP_FMA (1 << 5) +#ifdef CL_VERSION_1_1 +#define CL_FP_SOFT_FLOAT (1 << 6) +#endif +#ifdef CL_VERSION_1_2 +#define CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT (1 << 7) +#endif + +/* cl_device_mem_cache_type */ +#define CL_NONE 0x0 +#define CL_READ_ONLY_CACHE 0x1 +#define CL_READ_WRITE_CACHE 0x2 + +/* cl_device_local_mem_type */ +#define CL_LOCAL 0x1 +#define CL_GLOBAL 0x2 + +/* cl_device_exec_capabilities - bitfield */ +#define CL_EXEC_KERNEL (1 << 0) +#define CL_EXEC_NATIVE_KERNEL (1 << 1) + +/* cl_command_queue_properties - bitfield */ +#define CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE (1 << 0) +#define CL_QUEUE_PROFILING_ENABLE (1 << 1) +#ifdef CL_VERSION_2_0 +#define CL_QUEUE_ON_DEVICE (1 << 2) +#define CL_QUEUE_ON_DEVICE_DEFAULT (1 << 3) +#endif + +/* cl_context_info */ +#define CL_CONTEXT_REFERENCE_COUNT 0x1080 +#define CL_CONTEXT_DEVICES 0x1081 +#define CL_CONTEXT_PROPERTIES 0x1082 +#ifdef CL_VERSION_1_1 +#define CL_CONTEXT_NUM_DEVICES 0x1083 +#endif + +/* cl_context_properties */ +#define CL_CONTEXT_PLATFORM 0x1084 +#ifdef CL_VERSION_1_2 +#define CL_CONTEXT_INTEROP_USER_SYNC 0x1085 +#endif + +#ifdef CL_VERSION_1_2 + +/* cl_device_partition_property */ +#define CL_DEVICE_PARTITION_EQUALLY 0x1086 +#define CL_DEVICE_PARTITION_BY_COUNTS 0x1087 +#define CL_DEVICE_PARTITION_BY_COUNTS_LIST_END 0x0 +#define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN 0x1088 + +#endif + +#ifdef CL_VERSION_1_2 + +/* cl_device_affinity_domain */ +#define CL_DEVICE_AFFINITY_DOMAIN_NUMA (1 << 0) +#define CL_DEVICE_AFFINITY_DOMAIN_L4_CACHE (1 << 1) +#define CL_DEVICE_AFFINITY_DOMAIN_L3_CACHE (1 << 2) +#define CL_DEVICE_AFFINITY_DOMAIN_L2_CACHE (1 << 3) +#define CL_DEVICE_AFFINITY_DOMAIN_L1_CACHE (1 << 4) +#define CL_DEVICE_AFFINITY_DOMAIN_NEXT_PARTITIONABLE (1 << 5) + +#endif + +#ifdef CL_VERSION_2_0 + +/* cl_device_svm_capabilities */ +#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER (1 << 0) +#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER (1 << 1) +#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM (1 << 2) +#define CL_DEVICE_SVM_ATOMICS (1 << 3) + +#endif + +/* cl_command_queue_info */ +#define CL_QUEUE_CONTEXT 0x1090 +#define CL_QUEUE_DEVICE 0x1091 +#define CL_QUEUE_REFERENCE_COUNT 0x1092 +#define CL_QUEUE_PROPERTIES 0x1093 +#ifdef CL_VERSION_2_0 +#define CL_QUEUE_SIZE 0x1094 +#endif +#ifdef CL_VERSION_2_1 +#define CL_QUEUE_DEVICE_DEFAULT 0x1095 +#endif +#ifdef CL_VERSION_3_0 +#define CL_QUEUE_PROPERTIES_ARRAY 0x1098 +#endif + +/* cl_mem_flags and cl_svm_mem_flags - bitfield */ +#define CL_MEM_READ_WRITE (1 << 0) +#define CL_MEM_WRITE_ONLY (1 << 1) +#define CL_MEM_READ_ONLY (1 << 2) +#define CL_MEM_USE_HOST_PTR (1 << 3) +#define CL_MEM_ALLOC_HOST_PTR (1 << 4) +#define CL_MEM_COPY_HOST_PTR (1 << 5) +/* reserved (1 << 6) */ +#ifdef CL_VERSION_1_2 +#define CL_MEM_HOST_WRITE_ONLY (1 << 7) +#define CL_MEM_HOST_READ_ONLY (1 << 8) +#define CL_MEM_HOST_NO_ACCESS (1 << 9) +#endif +#ifdef CL_VERSION_2_0 +#define CL_MEM_SVM_FINE_GRAIN_BUFFER (1 << 10) /* used by cl_svm_mem_flags only */ +#define CL_MEM_SVM_ATOMICS (1 << 11) /* used by cl_svm_mem_flags only */ +#define CL_MEM_KERNEL_READ_AND_WRITE (1 << 12) +#endif + +#ifdef CL_VERSION_1_2 + +/* cl_mem_migration_flags - bitfield */ +#define CL_MIGRATE_MEM_OBJECT_HOST (1 << 0) +#define CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED (1 << 1) + +#endif + +/* cl_channel_order */ +#define CL_R 0x10B0 +#define CL_A 0x10B1 +#define CL_RG 0x10B2 +#define CL_RA 0x10B3 +#define CL_RGB 0x10B4 +#define CL_RGBA 0x10B5 +#define CL_BGRA 0x10B6 +#define CL_ARGB 0x10B7 +#define CL_INTENSITY 0x10B8 +#define CL_LUMINANCE 0x10B9 +#ifdef CL_VERSION_1_1 +#define CL_Rx 0x10BA +#define CL_RGx 0x10BB +#define CL_RGBx 0x10BC +#endif +#ifdef CL_VERSION_2_0 +#define CL_DEPTH 0x10BD +#define CL_sRGB 0x10BF +#define CL_sRGBx 0x10C0 +#define CL_sRGBA 0x10C1 +#define CL_sBGRA 0x10C2 +#define CL_ABGR 0x10C3 +#endif + +/* cl_channel_type */ +#define CL_SNORM_INT8 0x10D0 +#define CL_SNORM_INT16 0x10D1 +#define CL_UNORM_INT8 0x10D2 +#define CL_UNORM_INT16 0x10D3 +#define CL_UNORM_SHORT_565 0x10D4 +#define CL_UNORM_SHORT_555 0x10D5 +#define CL_UNORM_INT_101010 0x10D6 +#define CL_SIGNED_INT8 0x10D7 +#define CL_SIGNED_INT16 0x10D8 +#define CL_SIGNED_INT32 0x10D9 +#define CL_UNSIGNED_INT8 0x10DA +#define CL_UNSIGNED_INT16 0x10DB +#define CL_UNSIGNED_INT32 0x10DC +#define CL_HALF_FLOAT 0x10DD +#define CL_FLOAT 0x10DE +#ifdef CL_VERSION_2_1 +#define CL_UNORM_INT_101010_2 0x10E0 +#endif + +/* cl_mem_object_type */ +#define CL_MEM_OBJECT_BUFFER 0x10F0 +#define CL_MEM_OBJECT_IMAGE2D 0x10F1 +#define CL_MEM_OBJECT_IMAGE3D 0x10F2 +#ifdef CL_VERSION_1_2 +#define CL_MEM_OBJECT_IMAGE2D_ARRAY 0x10F3 +#define CL_MEM_OBJECT_IMAGE1D 0x10F4 +#define CL_MEM_OBJECT_IMAGE1D_ARRAY 0x10F5 +#define CL_MEM_OBJECT_IMAGE1D_BUFFER 0x10F6 +#endif +#ifdef CL_VERSION_2_0 +#define CL_MEM_OBJECT_PIPE 0x10F7 +#endif + +/* cl_mem_info */ +#define CL_MEM_TYPE 0x1100 +#define CL_MEM_FLAGS 0x1101 +#define CL_MEM_SIZE 0x1102 +#define CL_MEM_HOST_PTR 0x1103 +#define CL_MEM_MAP_COUNT 0x1104 +#define CL_MEM_REFERENCE_COUNT 0x1105 +#define CL_MEM_CONTEXT 0x1106 +#ifdef CL_VERSION_1_1 +#define CL_MEM_ASSOCIATED_MEMOBJECT 0x1107 +#define CL_MEM_OFFSET 0x1108 +#endif +#ifdef CL_VERSION_2_0 +#define CL_MEM_USES_SVM_POINTER 0x1109 +#endif +#ifdef CL_VERSION_3_0 +#define CL_MEM_PROPERTIES 0x110A +#endif + +/* cl_image_info */ +#define CL_IMAGE_FORMAT 0x1110 +#define CL_IMAGE_ELEMENT_SIZE 0x1111 +#define CL_IMAGE_ROW_PITCH 0x1112 +#define CL_IMAGE_SLICE_PITCH 0x1113 +#define CL_IMAGE_WIDTH 0x1114 +#define CL_IMAGE_HEIGHT 0x1115 +#define CL_IMAGE_DEPTH 0x1116 +#ifdef CL_VERSION_1_2 +#define CL_IMAGE_ARRAY_SIZE 0x1117 +#define CL_IMAGE_BUFFER 0x1118 +#define CL_IMAGE_NUM_MIP_LEVELS 0x1119 +#define CL_IMAGE_NUM_SAMPLES 0x111A +#endif + + +/* cl_pipe_info */ +#ifdef CL_VERSION_2_0 +#define CL_PIPE_PACKET_SIZE 0x1120 +#define CL_PIPE_MAX_PACKETS 0x1121 +#endif +#ifdef CL_VERSION_3_0 +#define CL_PIPE_PROPERTIES 0x1122 +#endif + +/* cl_addressing_mode */ +#define CL_ADDRESS_NONE 0x1130 +#define CL_ADDRESS_CLAMP_TO_EDGE 0x1131 +#define CL_ADDRESS_CLAMP 0x1132 +#define CL_ADDRESS_REPEAT 0x1133 +#ifdef CL_VERSION_1_1 +#define CL_ADDRESS_MIRRORED_REPEAT 0x1134 +#endif + +/* cl_filter_mode */ +#define CL_FILTER_NEAREST 0x1140 +#define CL_FILTER_LINEAR 0x1141 + +/* cl_sampler_info */ +#define CL_SAMPLER_REFERENCE_COUNT 0x1150 +#define CL_SAMPLER_CONTEXT 0x1151 +#define CL_SAMPLER_NORMALIZED_COORDS 0x1152 +#define CL_SAMPLER_ADDRESSING_MODE 0x1153 +#define CL_SAMPLER_FILTER_MODE 0x1154 +#ifdef CL_VERSION_2_0 +/* These enumerants are for the cl_khr_mipmap_image extension. + They have since been added to cl_ext.h with an appropriate + KHR suffix, but are left here for backwards compatibility. */ +#define CL_SAMPLER_MIP_FILTER_MODE 0x1155 +#define CL_SAMPLER_LOD_MIN 0x1156 +#define CL_SAMPLER_LOD_MAX 0x1157 +#endif +#ifdef CL_VERSION_3_0 +#define CL_SAMPLER_PROPERTIES 0x1158 +#endif + +/* cl_map_flags - bitfield */ +#define CL_MAP_READ (1 << 0) +#define CL_MAP_WRITE (1 << 1) +#ifdef CL_VERSION_1_2 +#define CL_MAP_WRITE_INVALIDATE_REGION (1 << 2) +#endif + +/* cl_program_info */ +#define CL_PROGRAM_REFERENCE_COUNT 0x1160 +#define CL_PROGRAM_CONTEXT 0x1161 +#define CL_PROGRAM_NUM_DEVICES 0x1162 +#define CL_PROGRAM_DEVICES 0x1163 +#define CL_PROGRAM_SOURCE 0x1164 +#define CL_PROGRAM_BINARY_SIZES 0x1165 +#define CL_PROGRAM_BINARIES 0x1166 +#ifdef CL_VERSION_1_2 +#define CL_PROGRAM_NUM_KERNELS 0x1167 +#define CL_PROGRAM_KERNEL_NAMES 0x1168 +#endif +#ifdef CL_VERSION_2_1 +#define CL_PROGRAM_IL 0x1169 +#endif +#ifdef CL_VERSION_2_2 +#define CL_PROGRAM_SCOPE_GLOBAL_CTORS_PRESENT 0x116A +#define CL_PROGRAM_SCOPE_GLOBAL_DTORS_PRESENT 0x116B +#endif + +/* cl_program_build_info */ +#define CL_PROGRAM_BUILD_STATUS 0x1181 +#define CL_PROGRAM_BUILD_OPTIONS 0x1182 +#define CL_PROGRAM_BUILD_LOG 0x1183 +#ifdef CL_VERSION_1_2 +#define CL_PROGRAM_BINARY_TYPE 0x1184 +#endif +#ifdef CL_VERSION_2_0 +#define CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE 0x1185 +#endif + +#ifdef CL_VERSION_1_2 + +/* cl_program_binary_type */ +#define CL_PROGRAM_BINARY_TYPE_NONE 0x0 +#define CL_PROGRAM_BINARY_TYPE_COMPILED_OBJECT 0x1 +#define CL_PROGRAM_BINARY_TYPE_LIBRARY 0x2 +#define CL_PROGRAM_BINARY_TYPE_EXECUTABLE 0x4 + +#endif + +/* cl_build_status */ +#define CL_BUILD_SUCCESS 0 +#define CL_BUILD_NONE -1 +#define CL_BUILD_ERROR -2 +#define CL_BUILD_IN_PROGRESS -3 + +/* cl_kernel_info */ +#define CL_KERNEL_FUNCTION_NAME 0x1190 +#define CL_KERNEL_NUM_ARGS 0x1191 +#define CL_KERNEL_REFERENCE_COUNT 0x1192 +#define CL_KERNEL_CONTEXT 0x1193 +#define CL_KERNEL_PROGRAM 0x1194 +#ifdef CL_VERSION_1_2 +#define CL_KERNEL_ATTRIBUTES 0x1195 +#endif + +#ifdef CL_VERSION_1_2 + +/* cl_kernel_arg_info */ +#define CL_KERNEL_ARG_ADDRESS_QUALIFIER 0x1196 +#define CL_KERNEL_ARG_ACCESS_QUALIFIER 0x1197 +#define CL_KERNEL_ARG_TYPE_NAME 0x1198 +#define CL_KERNEL_ARG_TYPE_QUALIFIER 0x1199 +#define CL_KERNEL_ARG_NAME 0x119A + +#endif + +#ifdef CL_VERSION_1_2 + +/* cl_kernel_arg_address_qualifier */ +#define CL_KERNEL_ARG_ADDRESS_GLOBAL 0x119B +#define CL_KERNEL_ARG_ADDRESS_LOCAL 0x119C +#define CL_KERNEL_ARG_ADDRESS_CONSTANT 0x119D +#define CL_KERNEL_ARG_ADDRESS_PRIVATE 0x119E + +#endif + +#ifdef CL_VERSION_1_2 + +/* cl_kernel_arg_access_qualifier */ +#define CL_KERNEL_ARG_ACCESS_READ_ONLY 0x11A0 +#define CL_KERNEL_ARG_ACCESS_WRITE_ONLY 0x11A1 +#define CL_KERNEL_ARG_ACCESS_READ_WRITE 0x11A2 +#define CL_KERNEL_ARG_ACCESS_NONE 0x11A3 + +#endif + +#ifdef CL_VERSION_1_2 + +/* cl_kernel_arg_type_qualifier */ +#define CL_KERNEL_ARG_TYPE_NONE 0 +#define CL_KERNEL_ARG_TYPE_CONST (1 << 0) +#define CL_KERNEL_ARG_TYPE_RESTRICT (1 << 1) +#define CL_KERNEL_ARG_TYPE_VOLATILE (1 << 2) +#ifdef CL_VERSION_2_0 +#define CL_KERNEL_ARG_TYPE_PIPE (1 << 3) +#endif + +#endif + +/* cl_kernel_work_group_info */ +#define CL_KERNEL_WORK_GROUP_SIZE 0x11B0 +#define CL_KERNEL_COMPILE_WORK_GROUP_SIZE 0x11B1 +#define CL_KERNEL_LOCAL_MEM_SIZE 0x11B2 +#define CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE 0x11B3 +#define CL_KERNEL_PRIVATE_MEM_SIZE 0x11B4 +#ifdef CL_VERSION_1_2 +#define CL_KERNEL_GLOBAL_WORK_SIZE 0x11B5 +#endif + +#ifdef CL_VERSION_2_1 + +/* cl_kernel_sub_group_info */ +#define CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE 0x2033 +#define CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE 0x2034 +#define CL_KERNEL_LOCAL_SIZE_FOR_SUB_GROUP_COUNT 0x11B8 +#define CL_KERNEL_MAX_NUM_SUB_GROUPS 0x11B9 +#define CL_KERNEL_COMPILE_NUM_SUB_GROUPS 0x11BA + +#endif + +#ifdef CL_VERSION_2_0 + +/* cl_kernel_exec_info */ +#define CL_KERNEL_EXEC_INFO_SVM_PTRS 0x11B6 +#define CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM 0x11B7 + +#endif + +/* cl_event_info */ +#define CL_EVENT_COMMAND_QUEUE 0x11D0 +#define CL_EVENT_COMMAND_TYPE 0x11D1 +#define CL_EVENT_REFERENCE_COUNT 0x11D2 +#define CL_EVENT_COMMAND_EXECUTION_STATUS 0x11D3 +#ifdef CL_VERSION_1_1 +#define CL_EVENT_CONTEXT 0x11D4 +#endif + +/* cl_command_type */ +#define CL_COMMAND_NDRANGE_KERNEL 0x11F0 +#define CL_COMMAND_TASK 0x11F1 +#define CL_COMMAND_NATIVE_KERNEL 0x11F2 +#define CL_COMMAND_READ_BUFFER 0x11F3 +#define CL_COMMAND_WRITE_BUFFER 0x11F4 +#define CL_COMMAND_COPY_BUFFER 0x11F5 +#define CL_COMMAND_READ_IMAGE 0x11F6 +#define CL_COMMAND_WRITE_IMAGE 0x11F7 +#define CL_COMMAND_COPY_IMAGE 0x11F8 +#define CL_COMMAND_COPY_IMAGE_TO_BUFFER 0x11F9 +#define CL_COMMAND_COPY_BUFFER_TO_IMAGE 0x11FA +#define CL_COMMAND_MAP_BUFFER 0x11FB +#define CL_COMMAND_MAP_IMAGE 0x11FC +#define CL_COMMAND_UNMAP_MEM_OBJECT 0x11FD +#define CL_COMMAND_MARKER 0x11FE +#define CL_COMMAND_ACQUIRE_GL_OBJECTS 0x11FF +#define CL_COMMAND_RELEASE_GL_OBJECTS 0x1200 +#ifdef CL_VERSION_1_1 +#define CL_COMMAND_READ_BUFFER_RECT 0x1201 +#define CL_COMMAND_WRITE_BUFFER_RECT 0x1202 +#define CL_COMMAND_COPY_BUFFER_RECT 0x1203 +#define CL_COMMAND_USER 0x1204 +#endif +#ifdef CL_VERSION_1_2 +#define CL_COMMAND_BARRIER 0x1205 +#define CL_COMMAND_MIGRATE_MEM_OBJECTS 0x1206 +#define CL_COMMAND_FILL_BUFFER 0x1207 +#define CL_COMMAND_FILL_IMAGE 0x1208 +#endif +#ifdef CL_VERSION_2_0 +#define CL_COMMAND_SVM_FREE 0x1209 +#define CL_COMMAND_SVM_MEMCPY 0x120A +#define CL_COMMAND_SVM_MEMFILL 0x120B +#define CL_COMMAND_SVM_MAP 0x120C +#define CL_COMMAND_SVM_UNMAP 0x120D +#endif +#ifdef CL_VERSION_3_0 +#define CL_COMMAND_SVM_MIGRATE_MEM 0x120E +#endif + +/* command execution status */ +#define CL_COMPLETE 0x0 +#define CL_RUNNING 0x1 +#define CL_SUBMITTED 0x2 +#define CL_QUEUED 0x3 + +/* cl_buffer_create_type */ +#ifdef CL_VERSION_1_1 +#define CL_BUFFER_CREATE_TYPE_REGION 0x1220 +#endif + +/* cl_profiling_info */ +#define CL_PROFILING_COMMAND_QUEUED 0x1280 +#define CL_PROFILING_COMMAND_SUBMIT 0x1281 +#define CL_PROFILING_COMMAND_START 0x1282 +#define CL_PROFILING_COMMAND_END 0x1283 +#ifdef CL_VERSION_2_0 +#define CL_PROFILING_COMMAND_COMPLETE 0x1284 +#endif + +/* cl_device_atomic_capabilities - bitfield */ +#ifdef CL_VERSION_3_0 +#define CL_DEVICE_ATOMIC_ORDER_RELAXED (1 << 0) +#define CL_DEVICE_ATOMIC_ORDER_ACQ_REL (1 << 1) +#define CL_DEVICE_ATOMIC_ORDER_SEQ_CST (1 << 2) +#define CL_DEVICE_ATOMIC_SCOPE_WORK_ITEM (1 << 3) +#define CL_DEVICE_ATOMIC_SCOPE_WORK_GROUP (1 << 4) +#define CL_DEVICE_ATOMIC_SCOPE_DEVICE (1 << 5) +#define CL_DEVICE_ATOMIC_SCOPE_ALL_DEVICES (1 << 6) +#endif + +/* cl_device_device_enqueue_capabilities - bitfield */ +#ifdef CL_VERSION_3_0 +#define CL_DEVICE_QUEUE_SUPPORTED (1 << 0) +#define CL_DEVICE_QUEUE_REPLACEABLE_DEFAULT (1 << 1) +#endif + +/* cl_khronos_vendor_id */ +#define CL_KHRONOS_VENDOR_ID_CODEPLAY 0x10004 + +/* cl_version */ +#define CL_VERSION_MAJOR_BITS (10) +#define CL_VERSION_MINOR_BITS (10) +#define CL_VERSION_PATCH_BITS (12) + +#define CL_VERSION_MAJOR_MASK ((1 << CL_VERSION_MAJOR_BITS) - 1) +#define CL_VERSION_MINOR_MASK ((1 << CL_VERSION_MINOR_BITS) - 1) +#define CL_VERSION_PATCH_MASK ((1 << CL_VERSION_PATCH_BITS) - 1) + +#define CL_VERSION_MAJOR(version) \ + ((version) >> (CL_VERSION_MINOR_BITS + CL_VERSION_PATCH_BITS)) + +#define CL_VERSION_MINOR(version) \ + (((version) >> CL_VERSION_PATCH_BITS) & CL_VERSION_MINOR_MASK) + +#define CL_VERSION_PATCH(version) ((version) & CL_VERSION_PATCH_MASK) + +#define CL_MAKE_VERSION(major, minor, patch) \ + ((((major) & CL_VERSION_MAJOR_MASK) \ + << (CL_VERSION_MINOR_BITS + CL_VERSION_PATCH_BITS)) | \ + (((minor) & CL_VERSION_MINOR_MASK) << CL_VERSION_PATCH_BITS) | \ + ((patch) & CL_VERSION_PATCH_MASK)) + +/********************************************************************************************************/ + +/* CL_NO_PROTOTYPES implies CL_NO_CORE_PROTOTYPES: */ +#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_CORE_PROTOTYPES) +#define CL_NO_CORE_PROTOTYPES +#endif + +#if !defined(CL_NO_CORE_PROTOTYPES) + +/* Platform API */ +extern CL_API_ENTRY cl_int CL_API_CALL +clGetPlatformIDs(cl_uint num_entries, + cl_platform_id * platforms, + cl_uint * num_platforms) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetPlatformInfo(cl_platform_id platform, + cl_platform_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +/* Device APIs */ +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceIDs(cl_platform_id platform, + cl_device_type device_type, + cl_uint num_entries, + cl_device_id * devices, + cl_uint * num_devices) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceInfo(cl_device_id device, + cl_device_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_2 + +extern CL_API_ENTRY cl_int CL_API_CALL +clCreateSubDevices(cl_device_id in_device, + const cl_device_partition_property * properties, + cl_uint num_devices, + cl_device_id * out_devices, + cl_uint * num_devices_ret) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainDevice(cl_device_id device) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseDevice(cl_device_id device) CL_API_SUFFIX__VERSION_1_2; + +#endif + +#ifdef CL_VERSION_2_1 + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetDefaultDeviceCommandQueue(cl_context context, + cl_device_id device, + cl_command_queue command_queue) CL_API_SUFFIX__VERSION_2_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceAndHostTimer(cl_device_id device, + cl_ulong* device_timestamp, + cl_ulong* host_timestamp) CL_API_SUFFIX__VERSION_2_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetHostTimer(cl_device_id device, + cl_ulong * host_timestamp) CL_API_SUFFIX__VERSION_2_1; + +#endif + +/* Context APIs */ +extern CL_API_ENTRY cl_context CL_API_CALL +clCreateContext(const cl_context_properties * properties, + cl_uint num_devices, + const cl_device_id * devices, + void (CL_CALLBACK * pfn_notify)(const char * errinfo, + const void * private_info, + size_t cb, + void * user_data), + void * user_data, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_context CL_API_CALL +clCreateContextFromType(const cl_context_properties * properties, + cl_device_type device_type, + void (CL_CALLBACK * pfn_notify)(const char * errinfo, + const void * private_info, + size_t cb, + void * user_data), + void * user_data, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainContext(cl_context context) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseContext(cl_context context) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetContextInfo(cl_context context, + cl_context_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_3_0 + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetContextDestructorCallback(cl_context context, + void (CL_CALLBACK* pfn_notify)(cl_context context, + void* user_data), + void* user_data) CL_API_SUFFIX__VERSION_3_0; + +#endif + +/* Command Queue APIs */ + +#ifdef CL_VERSION_2_0 + +extern CL_API_ENTRY cl_command_queue CL_API_CALL +clCreateCommandQueueWithProperties(cl_context context, + cl_device_id device, + const cl_queue_properties * properties, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_2_0; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainCommandQueue(cl_command_queue command_queue) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseCommandQueue(cl_command_queue command_queue) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetCommandQueueInfo(cl_command_queue command_queue, + cl_command_queue_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +/* Memory Object APIs */ +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateBuffer(cl_context context, + cl_mem_flags flags, + size_t size, + void * host_ptr, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_1 + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateSubBuffer(cl_mem buffer, + cl_mem_flags flags, + cl_buffer_create_type buffer_create_type, + const void * buffer_create_info, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_1; + +#endif + +#ifdef CL_VERSION_1_2 + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateImage(cl_context context, + cl_mem_flags flags, + const cl_image_format * image_format, + const cl_image_desc * image_desc, + void * host_ptr, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +#endif + +#ifdef CL_VERSION_2_0 + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreatePipe(cl_context context, + cl_mem_flags flags, + cl_uint pipe_packet_size, + cl_uint pipe_max_packets, + const cl_pipe_properties * properties, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_2_0; + +#endif + +#ifdef CL_VERSION_3_0 + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateBufferWithProperties(cl_context context, + const cl_mem_properties * properties, + cl_mem_flags flags, + size_t size, + void * host_ptr, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_3_0; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateImageWithProperties(cl_context context, + const cl_mem_properties * properties, + cl_mem_flags flags, + const cl_image_format * image_format, + const cl_image_desc * image_desc, + void * host_ptr, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_3_0; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainMemObject(cl_mem memobj) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseMemObject(cl_mem memobj) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSupportedImageFormats(cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint num_entries, + cl_image_format * image_formats, + cl_uint * num_image_formats) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetMemObjectInfo(cl_mem memobj, + cl_mem_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetImageInfo(cl_mem image, + cl_image_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_2_0 + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetPipeInfo(cl_mem pipe, + cl_pipe_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_2_0; + +#endif + +#ifdef CL_VERSION_1_1 + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetMemObjectDestructorCallback(cl_mem memobj, + void (CL_CALLBACK * pfn_notify)(cl_mem memobj, + void * user_data), + void * user_data) CL_API_SUFFIX__VERSION_1_1; + +#endif + +/* SVM Allocation APIs */ + +#ifdef CL_VERSION_2_0 + +extern CL_API_ENTRY void * CL_API_CALL +clSVMAlloc(cl_context context, + cl_svm_mem_flags flags, + size_t size, + cl_uint alignment) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY void CL_API_CALL +clSVMFree(cl_context context, + void * svm_pointer) CL_API_SUFFIX__VERSION_2_0; + +#endif + +/* Sampler APIs */ + +#ifdef CL_VERSION_2_0 + +extern CL_API_ENTRY cl_sampler CL_API_CALL +clCreateSamplerWithProperties(cl_context context, + const cl_sampler_properties * sampler_properties, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_2_0; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainSampler(cl_sampler sampler) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseSampler(cl_sampler sampler) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSamplerInfo(cl_sampler sampler, + cl_sampler_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +/* Program Object APIs */ +extern CL_API_ENTRY cl_program CL_API_CALL +clCreateProgramWithSource(cl_context context, + cl_uint count, + const char ** strings, + const size_t * lengths, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_program CL_API_CALL +clCreateProgramWithBinary(cl_context context, + cl_uint num_devices, + const cl_device_id * device_list, + const size_t * lengths, + const unsigned char ** binaries, + cl_int * binary_status, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_2 + +extern CL_API_ENTRY cl_program CL_API_CALL +clCreateProgramWithBuiltInKernels(cl_context context, + cl_uint num_devices, + const cl_device_id * device_list, + const char * kernel_names, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +#endif + +#ifdef CL_VERSION_2_1 + +extern CL_API_ENTRY cl_program CL_API_CALL +clCreateProgramWithIL(cl_context context, + const void* il, + size_t length, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_2_1; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainProgram(cl_program program) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseProgram(cl_program program) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clBuildProgram(cl_program program, + cl_uint num_devices, + const cl_device_id * device_list, + const char * options, + void (CL_CALLBACK * pfn_notify)(cl_program program, + void * user_data), + void * user_data) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_2 + +extern CL_API_ENTRY cl_int CL_API_CALL +clCompileProgram(cl_program program, + cl_uint num_devices, + const cl_device_id * device_list, + const char * options, + cl_uint num_input_headers, + const cl_program * input_headers, + const char ** header_include_names, + void (CL_CALLBACK * pfn_notify)(cl_program program, + void * user_data), + void * user_data) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_program CL_API_CALL +clLinkProgram(cl_context context, + cl_uint num_devices, + const cl_device_id * device_list, + const char * options, + cl_uint num_input_programs, + const cl_program * input_programs, + void (CL_CALLBACK * pfn_notify)(cl_program program, + void * user_data), + void * user_data, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +#endif + +#ifdef CL_VERSION_2_2 + +extern CL_API_ENTRY CL_API_PREFIX__VERSION_2_2_DEPRECATED cl_int CL_API_CALL +clSetProgramReleaseCallback(cl_program program, + void (CL_CALLBACK * pfn_notify)(cl_program program, + void * user_data), + void * user_data) CL_API_SUFFIX__VERSION_2_2_DEPRECATED; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetProgramSpecializationConstant(cl_program program, + cl_uint spec_id, + size_t spec_size, + const void* spec_value) CL_API_SUFFIX__VERSION_2_2; + +#endif + +#ifdef CL_VERSION_1_2 + +extern CL_API_ENTRY cl_int CL_API_CALL +clUnloadPlatformCompiler(cl_platform_id platform) CL_API_SUFFIX__VERSION_1_2; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetProgramInfo(cl_program program, + cl_program_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetProgramBuildInfo(cl_program program, + cl_device_id device, + cl_program_build_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +/* Kernel Object APIs */ +extern CL_API_ENTRY cl_kernel CL_API_CALL +clCreateKernel(cl_program program, + const char * kernel_name, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCreateKernelsInProgram(cl_program program, + cl_uint num_kernels, + cl_kernel * kernels, + cl_uint * num_kernels_ret) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_2_1 + +extern CL_API_ENTRY cl_kernel CL_API_CALL +clCloneKernel(cl_kernel source_kernel, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_2_1; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainKernel(cl_kernel kernel) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseKernel(cl_kernel kernel) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelArg(cl_kernel kernel, + cl_uint arg_index, + size_t arg_size, + const void * arg_value) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_2_0 + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelArgSVMPointer(cl_kernel kernel, + cl_uint arg_index, + const void * arg_value) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelExecInfo(cl_kernel kernel, + cl_kernel_exec_info param_name, + size_t param_value_size, + const void * param_value) CL_API_SUFFIX__VERSION_2_0; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelInfo(cl_kernel kernel, + cl_kernel_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_2 + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelArgInfo(cl_kernel kernel, + cl_uint arg_indx, + cl_kernel_arg_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_2; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelWorkGroupInfo(cl_kernel kernel, + cl_device_id device, + cl_kernel_work_group_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_2_1 + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelSubGroupInfo(cl_kernel kernel, + cl_device_id device, + cl_kernel_sub_group_info param_name, + size_t input_value_size, + const void* input_value, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_2_1; + +#endif + +/* Event Object APIs */ +extern CL_API_ENTRY cl_int CL_API_CALL +clWaitForEvents(cl_uint num_events, + const cl_event * event_list) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetEventInfo(cl_event event, + cl_event_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_1 + +extern CL_API_ENTRY cl_event CL_API_CALL +clCreateUserEvent(cl_context context, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_1; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainEvent(cl_event event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseEvent(cl_event event) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_1 + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetUserEventStatus(cl_event event, + cl_int execution_status) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetEventCallback(cl_event event, + cl_int command_exec_callback_type, + void (CL_CALLBACK * pfn_notify)(cl_event event, + cl_int event_command_status, + void * user_data), + void * user_data) CL_API_SUFFIX__VERSION_1_1; + +#endif + +/* Profiling APIs */ +extern CL_API_ENTRY cl_int CL_API_CALL +clGetEventProfilingInfo(cl_event event, + cl_profiling_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +/* Flush and Finish APIs */ +extern CL_API_ENTRY cl_int CL_API_CALL +clFlush(cl_command_queue command_queue) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clFinish(cl_command_queue command_queue) CL_API_SUFFIX__VERSION_1_0; + +/* Enqueued Commands APIs */ +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReadBuffer(cl_command_queue command_queue, + cl_mem buffer, + cl_bool blocking_read, + size_t offset, + size_t size, + void * ptr, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_1 + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReadBufferRect(cl_command_queue command_queue, + cl_mem buffer, + cl_bool blocking_read, + const size_t * buffer_origin, + const size_t * host_origin, + const size_t * region, + size_t buffer_row_pitch, + size_t buffer_slice_pitch, + size_t host_row_pitch, + size_t host_slice_pitch, + void * ptr, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_1; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueWriteBuffer(cl_command_queue command_queue, + cl_mem buffer, + cl_bool blocking_write, + size_t offset, + size_t size, + const void * ptr, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_1 + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueWriteBufferRect(cl_command_queue command_queue, + cl_mem buffer, + cl_bool blocking_write, + const size_t * buffer_origin, + const size_t * host_origin, + const size_t * region, + size_t buffer_row_pitch, + size_t buffer_slice_pitch, + size_t host_row_pitch, + size_t host_slice_pitch, + const void * ptr, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_1; + +#endif + +#ifdef CL_VERSION_1_2 + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueFillBuffer(cl_command_queue command_queue, + cl_mem buffer, + const void * pattern, + size_t pattern_size, + size_t offset, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_2; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCopyBuffer(cl_command_queue command_queue, + cl_mem src_buffer, + cl_mem dst_buffer, + size_t src_offset, + size_t dst_offset, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_1 + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCopyBufferRect(cl_command_queue command_queue, + cl_mem src_buffer, + cl_mem dst_buffer, + const size_t * src_origin, + const size_t * dst_origin, + const size_t * region, + size_t src_row_pitch, + size_t src_slice_pitch, + size_t dst_row_pitch, + size_t dst_slice_pitch, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_1; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReadImage(cl_command_queue command_queue, + cl_mem image, + cl_bool blocking_read, + const size_t * origin, + const size_t * region, + size_t row_pitch, + size_t slice_pitch, + void * ptr, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueWriteImage(cl_command_queue command_queue, + cl_mem image, + cl_bool blocking_write, + const size_t * origin, + const size_t * region, + size_t input_row_pitch, + size_t input_slice_pitch, + const void * ptr, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_2 + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueFillImage(cl_command_queue command_queue, + cl_mem image, + const void * fill_color, + const size_t * origin, + const size_t * region, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_2; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCopyImage(cl_command_queue command_queue, + cl_mem src_image, + cl_mem dst_image, + const size_t * src_origin, + const size_t * dst_origin, + const size_t * region, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCopyImageToBuffer(cl_command_queue command_queue, + cl_mem src_image, + cl_mem dst_buffer, + const size_t * src_origin, + const size_t * region, + size_t dst_offset, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCopyBufferToImage(cl_command_queue command_queue, + cl_mem src_buffer, + cl_mem dst_image, + size_t src_offset, + const size_t * dst_origin, + const size_t * region, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY void * CL_API_CALL +clEnqueueMapBuffer(cl_command_queue command_queue, + cl_mem buffer, + cl_bool blocking_map, + cl_map_flags map_flags, + size_t offset, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY void * CL_API_CALL +clEnqueueMapImage(cl_command_queue command_queue, + cl_mem image, + cl_bool blocking_map, + cl_map_flags map_flags, + const size_t * origin, + const size_t * region, + size_t * image_row_pitch, + size_t * image_slice_pitch, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueUnmapMemObject(cl_command_queue command_queue, + cl_mem memobj, + void * mapped_ptr, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_2 + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueMigrateMemObjects(cl_command_queue command_queue, + cl_uint num_mem_objects, + const cl_mem * mem_objects, + cl_mem_migration_flags flags, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_2; + +#endif + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueNDRangeKernel(cl_command_queue command_queue, + cl_kernel kernel, + cl_uint work_dim, + const size_t * global_work_offset, + const size_t * global_work_size, + const size_t * local_work_size, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueNativeKernel(cl_command_queue command_queue, + void (CL_CALLBACK * user_func)(void *), + void * args, + size_t cb_args, + cl_uint num_mem_objects, + const cl_mem * mem_list, + const void ** args_mem_loc, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +#ifdef CL_VERSION_1_2 + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueMarkerWithWaitList(cl_command_queue command_queue, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueBarrierWithWaitList(cl_command_queue command_queue, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_2; + +#endif + +#ifdef CL_VERSION_2_0 + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMFree(cl_command_queue command_queue, + cl_uint num_svm_pointers, + void * svm_pointers[], + void (CL_CALLBACK * pfn_free_func)(cl_command_queue queue, + cl_uint num_svm_pointers, + void * svm_pointers[], + void * user_data), + void * user_data, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMemcpy(cl_command_queue command_queue, + cl_bool blocking_copy, + void * dst_ptr, + const void * src_ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMemFill(cl_command_queue command_queue, + void * svm_ptr, + const void * pattern, + size_t pattern_size, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMap(cl_command_queue command_queue, + cl_bool blocking_map, + cl_map_flags flags, + void * svm_ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMUnmap(cl_command_queue command_queue, + void * svm_ptr, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_2_0; + +#endif + +#ifdef CL_VERSION_2_1 + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMigrateMem(cl_command_queue command_queue, + cl_uint num_svm_pointers, + const void ** svm_pointers, + const size_t * sizes, + cl_mem_migration_flags flags, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_2_1; + +#endif + +#ifdef CL_VERSION_1_2 + +/* Extension function access + * + * Returns the extension function address for the given function name, + * or NULL if a valid function can not be found. The client must + * check to make sure the address is not NULL, before using or + * calling the returned function address. + */ +extern CL_API_ENTRY void * CL_API_CALL +clGetExtensionFunctionAddressForPlatform(cl_platform_id platform, + const char * func_name) CL_API_SUFFIX__VERSION_1_2; + +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS + /* + * WARNING: + * This API introduces mutable state into the OpenCL implementation. It has been REMOVED + * to better facilitate thread safety. The 1.0 API is not thread safe. It is not tested by the + * OpenCL 1.1 conformance test, and consequently may not work or may not work dependably. + * It is likely to be non-performant. Use of this API is not advised. Use at your own risk. + * + * Software developers previously relying on this API are instructed to set the command queue + * properties when creating the queue, instead. + */ + extern CL_API_ENTRY cl_int CL_API_CALL + clSetCommandQueueProperty(cl_command_queue command_queue, + cl_command_queue_properties properties, + cl_bool enable, + cl_command_queue_properties * old_properties) CL_API_SUFFIX__VERSION_1_0_DEPRECATED; +#endif /* CL_USE_DEPRECATED_OPENCL_1_0_APIS */ + +/* Deprecated OpenCL 1.1 APIs */ +extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL +clCreateImage2D(cl_context context, + cl_mem_flags flags, + const cl_image_format * image_format, + size_t image_width, + size_t image_height, + size_t image_row_pitch, + void * host_ptr, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL +clCreateImage3D(cl_context context, + cl_mem_flags flags, + const cl_image_format * image_format, + size_t image_width, + size_t image_height, + size_t image_depth, + size_t image_row_pitch, + size_t image_slice_pitch, + void * host_ptr, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL +clEnqueueMarker(cl_command_queue command_queue, + cl_event * event) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL +clEnqueueWaitForEvents(cl_command_queue command_queue, + cl_uint num_events, + const cl_event * event_list) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL +clEnqueueBarrier(cl_command_queue command_queue) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL +clUnloadCompiler(void) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_1_DEPRECATED void * CL_API_CALL +clGetExtensionFunctionAddress(const char * func_name) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +/* Deprecated OpenCL 2.0 APIs */ +extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_2_DEPRECATED cl_command_queue CL_API_CALL +clCreateCommandQueue(cl_context context, + cl_device_id device, + cl_command_queue_properties properties, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2_DEPRECATED; + +extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_2_DEPRECATED cl_sampler CL_API_CALL +clCreateSampler(cl_context context, + cl_bool normalized_coords, + cl_addressing_mode addressing_mode, + cl_filter_mode filter_mode, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2_DEPRECATED; + +extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_2_DEPRECATED cl_int CL_API_CALL +clEnqueueTask(cl_command_queue command_queue, + cl_kernel kernel, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_2_DEPRECATED; + +#endif /* !defined(CL_NO_CORE_PROTOTYPES) */ + +#ifdef __cplusplus +} +#endif + +#if defined(_WIN32) && defined(_MSC_VER) && __CL_HAS_ANON_STRUCT__ + #pragma warning( pop ) +#endif + +#endif /* __OPENCL_CL_H */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_d3d10.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_d3d10.h new file mode 100644 index 000000000..6b56c775b --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_d3d10.h @@ -0,0 +1,268 @@ +/******************************************************************************* + * Copyright (c) 2008-2023 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef OPENCL_CL_D3D10_H_ +#define OPENCL_CL_D3D10_H_ + +/* +** This header is generated from the Khronos OpenCL XML API Registry. +*/ + +#if defined(_MSC_VER) +#if _MSC_VER >=1500 +#pragma warning( push ) +#pragma warning( disable : 4201 ) +#pragma warning( disable : 5105 ) +#endif +#endif +#include +#if defined(_MSC_VER) +#if _MSC_VER >=1500 +#pragma warning( pop ) +#endif +#endif + +#include + +/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES) +#define CL_NO_EXTENSION_PROTOTYPES +#endif + +/* CL_NO_EXTENSION_PROTOTYPES implies + CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and + CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*************************************************************** +* cl_khr_d3d10_sharing +***************************************************************/ +#define cl_khr_d3d10_sharing 1 +#define CL_KHR_D3D10_SHARING_EXTENSION_NAME \ + "cl_khr_d3d10_sharing" + + +#define CL_KHR_D3D10_SHARING_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_uint cl_d3d10_device_source_khr; +typedef cl_uint cl_d3d10_device_set_khr; + +/* Error codes */ +#define CL_INVALID_D3D10_DEVICE_KHR -1002 +#define CL_INVALID_D3D10_RESOURCE_KHR -1003 +#define CL_D3D10_RESOURCE_ALREADY_ACQUIRED_KHR -1004 +#define CL_D3D10_RESOURCE_NOT_ACQUIRED_KHR -1005 + +/* cl_d3d10_device_source_khr */ +#define CL_D3D10_DEVICE_KHR 0x4010 +#define CL_D3D10_DXGI_ADAPTER_KHR 0x4011 + +/* cl_d3d10_device_set_khr */ +#define CL_PREFERRED_DEVICES_FOR_D3D10_KHR 0x4012 +#define CL_ALL_DEVICES_FOR_D3D10_KHR 0x4013 + +/* cl_context_info */ +#define CL_CONTEXT_D3D10_DEVICE_KHR 0x4014 +#define CL_CONTEXT_D3D10_PREFER_SHARED_RESOURCES_KHR 0x402C + +/* cl_mem_info */ +#define CL_MEM_D3D10_RESOURCE_KHR 0x4015 + +/* cl_image_info */ +#define CL_IMAGE_D3D10_SUBRESOURCE_KHR 0x4016 + +/* cl_command_type */ +#define CL_COMMAND_ACQUIRE_D3D10_OBJECTS_KHR 0x4017 +#define CL_COMMAND_RELEASE_D3D10_OBJECTS_KHR 0x4018 + + +typedef cl_int CL_API_CALL +clGetDeviceIDsFromD3D10KHR_t( + cl_platform_id platform, + cl_d3d10_device_source_khr d3d_device_source, + void* d3d_object, + cl_d3d10_device_set_khr d3d_device_set, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices); + +typedef clGetDeviceIDsFromD3D10KHR_t * +clGetDeviceIDsFromD3D10KHR_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_mem CL_API_CALL +clCreateFromD3D10BufferKHR_t( + cl_context context, + cl_mem_flags flags, + ID3D10Buffer* resource, + cl_int* errcode_ret); + +typedef clCreateFromD3D10BufferKHR_t * +clCreateFromD3D10BufferKHR_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_mem CL_API_CALL +clCreateFromD3D10Texture2DKHR_t( + cl_context context, + cl_mem_flags flags, + ID3D10Texture2D* resource, + UINT subresource, + cl_int* errcode_ret); + +typedef clCreateFromD3D10Texture2DKHR_t * +clCreateFromD3D10Texture2DKHR_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_mem CL_API_CALL +clCreateFromD3D10Texture3DKHR_t( + cl_context context, + cl_mem_flags flags, + ID3D10Texture3D* resource, + UINT subresource, + cl_int* errcode_ret); + +typedef clCreateFromD3D10Texture3DKHR_t * +clCreateFromD3D10Texture3DKHR_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL +clEnqueueAcquireD3D10ObjectsKHR_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueAcquireD3D10ObjectsKHR_t * +clEnqueueAcquireD3D10ObjectsKHR_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL +clEnqueueReleaseD3D10ObjectsKHR_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReleaseD3D10ObjectsKHR_t * +clEnqueueReleaseD3D10ObjectsKHR_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceIDsFromD3D10KHR( + cl_platform_id platform, + cl_d3d10_device_source_khr d3d_device_source, + void* d3d_object, + cl_d3d10_device_set_khr d3d_device_set, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromD3D10BufferKHR( + cl_context context, + cl_mem_flags flags, + ID3D10Buffer* resource, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromD3D10Texture2DKHR( + cl_context context, + cl_mem_flags flags, + ID3D10Texture2D* resource, + UINT subresource, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromD3D10Texture3DKHR( + cl_context context, + cl_mem_flags flags, + ID3D10Texture3D* resource, + UINT subresource, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireD3D10ObjectsKHR( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseD3D10ObjectsKHR( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_intel_sharing_format_query_d3d10 +***************************************************************/ +#define cl_intel_sharing_format_query_d3d10 1 +#define CL_INTEL_SHARING_FORMAT_QUERY_D3D10_EXTENSION_NAME \ + "cl_intel_sharing_format_query_d3d10" + + +#define CL_INTEL_SHARING_FORMAT_QUERY_D3D10_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* when cl_khr_d3d10_sharing is supported */ + +typedef cl_int CL_API_CALL +clGetSupportedD3D10TextureFormatsINTEL_t( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint num_entries, + DXGI_FORMAT* d3d10_formats, + cl_uint* num_texture_formats); + +typedef clGetSupportedD3D10TextureFormatsINTEL_t * +clGetSupportedD3D10TextureFormatsINTEL_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSupportedD3D10TextureFormatsINTEL( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint num_entries, + DXGI_FORMAT* d3d10_formats, + cl_uint* num_texture_formats) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#ifdef __cplusplus +} +#endif + +#endif /* OPENCL_CL_D3D10_H_ */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_d3d11.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_d3d11.h new file mode 100644 index 000000000..384c8f428 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_d3d11.h @@ -0,0 +1,270 @@ +/******************************************************************************* + * Copyright (c) 2008-2023 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef OPENCL_CL_D3D11_H_ +#define OPENCL_CL_D3D11_H_ + +/* +** This header is generated from the Khronos OpenCL XML API Registry. +*/ + +#if defined(_MSC_VER) +#if _MSC_VER >=1500 +#pragma warning( push ) +#pragma warning( disable : 4201 ) +#pragma warning( disable : 5105 ) +#endif +#endif +#include +#if defined(_MSC_VER) +#if _MSC_VER >=1500 +#pragma warning( pop ) +#endif +#endif + +#include + +/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES) +#define CL_NO_EXTENSION_PROTOTYPES +#endif + +/* CL_NO_EXTENSION_PROTOTYPES implies + CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and + CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*************************************************************** +* cl_khr_d3d11_sharing +***************************************************************/ +#define cl_khr_d3d11_sharing 1 +#define CL_KHR_D3D11_SHARING_EXTENSION_NAME \ + "cl_khr_d3d11_sharing" + + +#define CL_KHR_D3D11_SHARING_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_uint cl_d3d11_device_source_khr; +typedef cl_uint cl_d3d11_device_set_khr; + +/* Error codes */ +#define CL_INVALID_D3D11_DEVICE_KHR -1006 +#define CL_INVALID_D3D11_RESOURCE_KHR -1007 +#define CL_D3D11_RESOURCE_ALREADY_ACQUIRED_KHR -1008 +#define CL_D3D11_RESOURCE_NOT_ACQUIRED_KHR -1009 + +/* cl_d3d11_device_source_khr */ +#define CL_D3D11_DEVICE_KHR 0x4019 +#define CL_D3D11_DXGI_ADAPTER_KHR 0x401A + +/* cl_d3d11_device_set_khr */ +#define CL_PREFERRED_DEVICES_FOR_D3D11_KHR 0x401B +#define CL_ALL_DEVICES_FOR_D3D11_KHR 0x401C + +/* cl_context_info */ +#define CL_CONTEXT_D3D11_DEVICE_KHR 0x401D +#define CL_CONTEXT_D3D11_PREFER_SHARED_RESOURCES_KHR 0x402D + +/* cl_mem_info */ +#define CL_MEM_D3D11_RESOURCE_KHR 0x401E + +/* cl_image_info */ +#define CL_IMAGE_D3D11_SUBRESOURCE_KHR 0x401F + +/* cl_command_type */ +#define CL_COMMAND_ACQUIRE_D3D11_OBJECTS_KHR 0x4020 +#define CL_COMMAND_RELEASE_D3D11_OBJECTS_KHR 0x4021 + + +typedef cl_int CL_API_CALL +clGetDeviceIDsFromD3D11KHR_t( + cl_platform_id platform, + cl_d3d11_device_source_khr d3d_device_source, + void* d3d_object, + cl_d3d11_device_set_khr d3d_device_set, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices); + +typedef clGetDeviceIDsFromD3D11KHR_t * +clGetDeviceIDsFromD3D11KHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_mem CL_API_CALL +clCreateFromD3D11BufferKHR_t( + cl_context context, + cl_mem_flags flags, + ID3D11Buffer* resource, + cl_int* errcode_ret); + +typedef clCreateFromD3D11BufferKHR_t * +clCreateFromD3D11BufferKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_mem CL_API_CALL +clCreateFromD3D11Texture2DKHR_t( + cl_context context, + cl_mem_flags flags, + ID3D11Texture2D* resource, + UINT subresource, + cl_int* errcode_ret); + +typedef clCreateFromD3D11Texture2DKHR_t * +clCreateFromD3D11Texture2DKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_mem CL_API_CALL +clCreateFromD3D11Texture3DKHR_t( + cl_context context, + cl_mem_flags flags, + ID3D11Texture3D* resource, + UINT subresource, + cl_int* errcode_ret); + +typedef clCreateFromD3D11Texture3DKHR_t * +clCreateFromD3D11Texture3DKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueAcquireD3D11ObjectsKHR_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueAcquireD3D11ObjectsKHR_t * +clEnqueueAcquireD3D11ObjectsKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueReleaseD3D11ObjectsKHR_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReleaseD3D11ObjectsKHR_t * +clEnqueueReleaseD3D11ObjectsKHR_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceIDsFromD3D11KHR( + cl_platform_id platform, + cl_d3d11_device_source_khr d3d_device_source, + void* d3d_object, + cl_d3d11_device_set_khr d3d_device_set, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromD3D11BufferKHR( + cl_context context, + cl_mem_flags flags, + ID3D11Buffer* resource, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromD3D11Texture2DKHR( + cl_context context, + cl_mem_flags flags, + ID3D11Texture2D* resource, + UINT subresource, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromD3D11Texture3DKHR( + cl_context context, + cl_mem_flags flags, + ID3D11Texture3D* resource, + UINT subresource, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireD3D11ObjectsKHR( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseD3D11ObjectsKHR( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_intel_sharing_format_query_d3d11 +***************************************************************/ +#define cl_intel_sharing_format_query_d3d11 1 +#define CL_INTEL_SHARING_FORMAT_QUERY_D3D11_EXTENSION_NAME \ + "cl_intel_sharing_format_query_d3d11" + + +#define CL_INTEL_SHARING_FORMAT_QUERY_D3D11_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* when cl_khr_d3d11_sharing is supported */ + +typedef cl_int CL_API_CALL +clGetSupportedD3D11TextureFormatsINTEL_t( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint plane, + cl_uint num_entries, + DXGI_FORMAT* d3d11_formats, + cl_uint* num_texture_formats); + +typedef clGetSupportedD3D11TextureFormatsINTEL_t * +clGetSupportedD3D11TextureFormatsINTEL_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSupportedD3D11TextureFormatsINTEL( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint plane, + cl_uint num_entries, + DXGI_FORMAT* d3d11_formats, + cl_uint* num_texture_formats) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#ifdef __cplusplus +} +#endif + +#endif /* OPENCL_CL_D3D11_H_ */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_dx9_media_sharing.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_dx9_media_sharing.h new file mode 100644 index 000000000..b079379d0 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_dx9_media_sharing.h @@ -0,0 +1,386 @@ +/******************************************************************************* + * Copyright (c) 2008-2023 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef OPENCL_CL_DX9_MEDIA_SHARING_H_ +#define OPENCL_CL_DX9_MEDIA_SHARING_H_ + +/* +** This header is generated from the Khronos OpenCL XML API Registry. +*/ + +#if defined(_WIN32) +#if defined(_MSC_VER) +#if _MSC_VER >=1500 +#pragma warning( push ) +#pragma warning( disable : 4201 ) +#pragma warning( disable : 5105 ) +#endif +#endif +#include +#if defined(_MSC_VER) +#if _MSC_VER >=1500 +#pragma warning( pop ) +#endif +#endif +#endif + +#include + +/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES) +#define CL_NO_EXTENSION_PROTOTYPES +#endif + +/* CL_NO_EXTENSION_PROTOTYPES implies + CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and + CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*************************************************************** +* cl_khr_dx9_media_sharing +***************************************************************/ +#define cl_khr_dx9_media_sharing 1 +#define CL_KHR_DX9_MEDIA_SHARING_EXTENSION_NAME \ + "cl_khr_dx9_media_sharing" + + +#define CL_KHR_DX9_MEDIA_SHARING_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_uint cl_dx9_media_adapter_type_khr; +typedef cl_uint cl_dx9_media_adapter_set_khr; + +#if defined(_WIN32) +typedef struct _cl_dx9_surface_info_khr { + IDirect3DSurface9* resource; + HANDLE shared_handle; +} cl_dx9_surface_info_khr; + +#endif /* defined(_WIN32) */ + +/* Error codes */ +#define CL_INVALID_DX9_MEDIA_ADAPTER_KHR -1010 +#define CL_INVALID_DX9_MEDIA_SURFACE_KHR -1011 +#define CL_DX9_MEDIA_SURFACE_ALREADY_ACQUIRED_KHR -1012 +#define CL_DX9_MEDIA_SURFACE_NOT_ACQUIRED_KHR -1013 + +/* cl_media_adapter_type_khr */ +#define CL_ADAPTER_D3D9_KHR 0x2020 +#define CL_ADAPTER_D3D9EX_KHR 0x2021 +#define CL_ADAPTER_DXVA_KHR 0x2022 + +/* cl_media_adapter_set_khr */ +#define CL_PREFERRED_DEVICES_FOR_DX9_MEDIA_ADAPTER_KHR 0x2023 +#define CL_ALL_DEVICES_FOR_DX9_MEDIA_ADAPTER_KHR 0x2024 + +/* cl_context_info */ +#define CL_CONTEXT_ADAPTER_D3D9_KHR 0x2025 +#define CL_CONTEXT_ADAPTER_D3D9EX_KHR 0x2026 +#define CL_CONTEXT_ADAPTER_DXVA_KHR 0x2027 + +/* cl_mem_info */ +#define CL_MEM_DX9_MEDIA_ADAPTER_TYPE_KHR 0x2028 +#define CL_MEM_DX9_MEDIA_SURFACE_INFO_KHR 0x2029 + +/* cl_image_info */ +#define CL_IMAGE_DX9_MEDIA_PLANE_KHR 0x202A + +/* cl_command_type */ +#define CL_COMMAND_ACQUIRE_DX9_MEDIA_SURFACES_KHR 0x202B +#define CL_COMMAND_RELEASE_DX9_MEDIA_SURFACES_KHR 0x202C + + +typedef cl_int CL_API_CALL +clGetDeviceIDsFromDX9MediaAdapterKHR_t( + cl_platform_id platform, + cl_uint num_media_adapters, + cl_dx9_media_adapter_type_khr* media_adapter_type, + void* media_adapters, + cl_dx9_media_adapter_set_khr media_adapter_set, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices); + +typedef clGetDeviceIDsFromDX9MediaAdapterKHR_t * +clGetDeviceIDsFromDX9MediaAdapterKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_mem CL_API_CALL +clCreateFromDX9MediaSurfaceKHR_t( + cl_context context, + cl_mem_flags flags, + cl_dx9_media_adapter_type_khr adapter_type, + void* surface_info, + cl_uint plane, + cl_int* errcode_ret); + +typedef clCreateFromDX9MediaSurfaceKHR_t * +clCreateFromDX9MediaSurfaceKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueAcquireDX9MediaSurfacesKHR_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueAcquireDX9MediaSurfacesKHR_t * +clEnqueueAcquireDX9MediaSurfacesKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueReleaseDX9MediaSurfacesKHR_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReleaseDX9MediaSurfacesKHR_t * +clEnqueueReleaseDX9MediaSurfacesKHR_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceIDsFromDX9MediaAdapterKHR( + cl_platform_id platform, + cl_uint num_media_adapters, + cl_dx9_media_adapter_type_khr* media_adapter_type, + void* media_adapters, + cl_dx9_media_adapter_set_khr media_adapter_set, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromDX9MediaSurfaceKHR( + cl_context context, + cl_mem_flags flags, + cl_dx9_media_adapter_type_khr adapter_type, + void* surface_info, + cl_uint plane, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireDX9MediaSurfacesKHR( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseDX9MediaSurfacesKHR( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_intel_dx9_media_sharing +***************************************************************/ +#define cl_intel_dx9_media_sharing 1 +#define CL_INTEL_DX9_MEDIA_SHARING_EXTENSION_NAME \ + "cl_intel_dx9_media_sharing" + + +#define CL_INTEL_DX9_MEDIA_SHARING_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef cl_uint cl_dx9_device_source_intel; +typedef cl_uint cl_dx9_device_set_intel; + +/* Error codes */ +#define CL_INVALID_DX9_DEVICE_INTEL -1010 +#define CL_INVALID_DX9_RESOURCE_INTEL -1011 +#define CL_DX9_RESOURCE_ALREADY_ACQUIRED_INTEL -1012 +#define CL_DX9_RESOURCE_NOT_ACQUIRED_INTEL -1013 + +/* cl_dx9_device_source_intel */ +#define CL_D3D9_DEVICE_INTEL 0x4022 +#define CL_D3D9EX_DEVICE_INTEL 0x4070 +#define CL_DXVA_DEVICE_INTEL 0x4071 + +/* cl_dx9_device_set_intel */ +#define CL_PREFERRED_DEVICES_FOR_DX9_INTEL 0x4024 +#define CL_ALL_DEVICES_FOR_DX9_INTEL 0x4025 + +/* cl_context_info */ +#define CL_CONTEXT_D3D9_DEVICE_INTEL 0x4026 +#define CL_CONTEXT_D3D9EX_DEVICE_INTEL 0x4072 +#define CL_CONTEXT_DXVA_DEVICE_INTEL 0x4073 + +/* cl_mem_info */ +#define CL_MEM_DX9_RESOURCE_INTEL 0x4027 +#define CL_MEM_DX9_SHARED_HANDLE_INTEL 0x4074 + +/* cl_image_info */ +#define CL_IMAGE_DX9_PLANE_INTEL 0x4075 + +/* cl_command_type */ +#define CL_COMMAND_ACQUIRE_DX9_OBJECTS_INTEL 0x402A +#define CL_COMMAND_RELEASE_DX9_OBJECTS_INTEL 0x402B + + +typedef cl_int CL_API_CALL +clGetDeviceIDsFromDX9INTEL_t( + cl_platform_id platform, + cl_dx9_device_source_intel dx9_device_source, + void* dx9_object, + cl_dx9_device_set_intel dx9_device_set, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices); + +typedef clGetDeviceIDsFromDX9INTEL_t * +clGetDeviceIDsFromDX9INTEL_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_mem CL_API_CALL +clCreateFromDX9MediaSurfaceINTEL_t( + cl_context context, + cl_mem_flags flags, + IDirect3DSurface9* resource, + HANDLE sharedHandle, + UINT plane, + cl_int* errcode_ret); + +typedef clCreateFromDX9MediaSurfaceINTEL_t * +clCreateFromDX9MediaSurfaceINTEL_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_int CL_API_CALL +clEnqueueAcquireDX9ObjectsINTEL_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueAcquireDX9ObjectsINTEL_t * +clEnqueueAcquireDX9ObjectsINTEL_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_int CL_API_CALL +clEnqueueReleaseDX9ObjectsINTEL_t( + cl_command_queue command_queue, + cl_uint num_objects, + cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReleaseDX9ObjectsINTEL_t * +clEnqueueReleaseDX9ObjectsINTEL_fn CL_API_SUFFIX__VERSION_1_1; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceIDsFromDX9INTEL( + cl_platform_id platform, + cl_dx9_device_source_intel dx9_device_source, + void* dx9_object, + cl_dx9_device_set_intel dx9_device_set, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromDX9MediaSurfaceINTEL( + cl_context context, + cl_mem_flags flags, + IDirect3DSurface9* resource, + HANDLE sharedHandle, + UINT plane, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireDX9ObjectsINTEL( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseDX9ObjectsINTEL( + cl_command_queue command_queue, + cl_uint num_objects, + cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_1; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_intel_sharing_format_query_dx9 +***************************************************************/ +#define cl_intel_sharing_format_query_dx9 1 +#define CL_INTEL_SHARING_FORMAT_QUERY_DX9_EXTENSION_NAME \ + "cl_intel_sharing_format_query_dx9" + + +#define CL_INTEL_SHARING_FORMAT_QUERY_DX9_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* when cl_khr_dx9_media_sharing or cl_intel_dx9_media_sharing is supported */ + +typedef cl_int CL_API_CALL +clGetSupportedDX9MediaSurfaceFormatsINTEL_t( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint plane, + cl_uint num_entries, + D3DFORMAT* dx9_formats, + cl_uint* num_surface_formats); + +typedef clGetSupportedDX9MediaSurfaceFormatsINTEL_t * +clGetSupportedDX9MediaSurfaceFormatsINTEL_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSupportedDX9MediaSurfaceFormatsINTEL( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint plane, + cl_uint num_entries, + D3DFORMAT* dx9_formats, + cl_uint* num_surface_formats) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#ifdef __cplusplus +} +#endif + +#endif /* OPENCL_CL_DX9_MEDIA_SHARING_H_ */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_dx9_media_sharing_intel.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_dx9_media_sharing_intel.h new file mode 100644 index 000000000..f6518d7f6 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_dx9_media_sharing_intel.h @@ -0,0 +1,18 @@ +/******************************************************************************* + * Copyright (c) 2008-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#include +#pragma message("The Intel DX9 media sharing extensions have been moved into cl_dx9_media_sharing.h. Please include cl_dx9_media_sharing.h directly.") diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_egl.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_egl.h new file mode 100644 index 000000000..68aefec76 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_egl.h @@ -0,0 +1,185 @@ +/******************************************************************************* + * Copyright (c) 2008-2023 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef OPENCL_CL_EGL_H_ +#define OPENCL_CL_EGL_H_ + +/* +** This header is generated from the Khronos OpenCL XML API Registry. +*/ + +#include + +/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES) +#define CL_NO_EXTENSION_PROTOTYPES +#endif + +/* CL_NO_EXTENSION_PROTOTYPES implies + CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and + CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*************************************************************** +* cl_khr_egl_image +***************************************************************/ +#define cl_khr_egl_image 1 +#define CL_KHR_EGL_IMAGE_EXTENSION_NAME \ + "cl_khr_egl_image" + + +#define CL_KHR_EGL_IMAGE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* Command type for events created with clEnqueueAcquireEGLObjectsKHR */ +#define CL_COMMAND_EGL_FENCE_SYNC_OBJECT_KHR 0x202F +#define CL_COMMAND_ACQUIRE_EGL_OBJECTS_KHR 0x202D +#define CL_COMMAND_RELEASE_EGL_OBJECTS_KHR 0x202E + +/* Error type for clCreateFromEGLImageKHR */ +#define CL_INVALID_EGL_OBJECT_KHR -1093 +#define CL_EGL_RESOURCE_NOT_ACQUIRED_KHR -1092 + +/* CLeglImageKHR is an opaque handle to an EGLImage */ +typedef void* CLeglImageKHR; + +/* CLeglDisplayKHR is an opaque handle to an EGLDisplay */ +typedef void* CLeglDisplayKHR; + +/* properties passed to clCreateFromEGLImageKHR */ +typedef intptr_t cl_egl_image_properties_khr; + + +typedef cl_mem CL_API_CALL +clCreateFromEGLImageKHR_t( + cl_context context, + CLeglDisplayKHR egldisplay, + CLeglImageKHR eglimage, + cl_mem_flags flags, + const cl_egl_image_properties_khr* properties, + cl_int* errcode_ret); + +typedef clCreateFromEGLImageKHR_t * +clCreateFromEGLImageKHR_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL +clEnqueueAcquireEGLObjectsKHR_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueAcquireEGLObjectsKHR_t * +clEnqueueAcquireEGLObjectsKHR_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL +clEnqueueReleaseEGLObjectsKHR_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReleaseEGLObjectsKHR_t * +clEnqueueReleaseEGLObjectsKHR_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromEGLImageKHR( + cl_context context, + CLeglDisplayKHR egldisplay, + CLeglImageKHR eglimage, + cl_mem_flags flags, + const cl_egl_image_properties_khr* properties, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireEGLObjectsKHR( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseEGLObjectsKHR( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_egl_event +***************************************************************/ +#define cl_khr_egl_event 1 +#define CL_KHR_EGL_EVENT_EXTENSION_NAME \ + "cl_khr_egl_event" + + +#define CL_KHR_EGL_EVENT_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* CLeglDisplayKHR is an opaque handle to an EGLDisplay */ +/* type CLeglDisplayKHR */ + +/* CLeglSyncKHR is an opaque handle to an EGLSync object */ +typedef void* CLeglSyncKHR; + + +typedef cl_event CL_API_CALL +clCreateEventFromEGLSyncKHR_t( + cl_context context, + CLeglSyncKHR sync, + CLeglDisplayKHR display, + cl_int* errcode_ret); + +typedef clCreateEventFromEGLSyncKHR_t * +clCreateEventFromEGLSyncKHR_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_event CL_API_CALL +clCreateEventFromEGLSyncKHR( + cl_context context, + CLeglSyncKHR sync, + CLeglDisplayKHR display, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#ifdef __cplusplus +} +#endif + +#endif /* OPENCL_CL_EGL_H_ */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_ext.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_ext.h new file mode 100644 index 000000000..688e24316 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_ext.h @@ -0,0 +1,4435 @@ +/******************************************************************************* + * Copyright (c) 2008-2023 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef OPENCL_CL_EXT_H_ +#define OPENCL_CL_EXT_H_ + +/* +** This header is generated from the Khronos OpenCL XML API Registry. +*/ + +#include + +/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES) +#define CL_NO_EXTENSION_PROTOTYPES +#endif + +/* CL_NO_EXTENSION_PROTOTYPES implies + CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and + CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*************************************************************** +* cl_khr_command_buffer (beta) +***************************************************************/ +#if defined(CL_ENABLE_BETA_EXTENSIONS) + +#define cl_khr_command_buffer 1 +#define CL_KHR_COMMAND_BUFFER_EXTENSION_NAME \ + "cl_khr_command_buffer" + + +#define CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION CL_MAKE_VERSION(0, 9, 7) + +typedef cl_bitfield cl_device_command_buffer_capabilities_khr; +typedef struct _cl_command_buffer_khr* cl_command_buffer_khr; +typedef cl_uint cl_sync_point_khr; +typedef cl_uint cl_command_buffer_info_khr; +typedef cl_uint cl_command_buffer_state_khr; +typedef cl_properties cl_command_buffer_properties_khr; +typedef cl_bitfield cl_command_buffer_flags_khr; +typedef cl_properties cl_command_properties_khr; +typedef struct _cl_mutable_command_khr* cl_mutable_command_khr; + +/* cl_device_info */ +#define CL_DEVICE_COMMAND_BUFFER_CAPABILITIES_KHR 0x12A9 +#define CL_DEVICE_COMMAND_BUFFER_SUPPORTED_QUEUE_PROPERTIES_KHR 0x129A +#define CL_DEVICE_COMMAND_BUFFER_REQUIRED_QUEUE_PROPERTIES_KHR 0x12AA + +/* cl_device_command_buffer_capabilities_khr - bitfield */ +#define CL_COMMAND_BUFFER_CAPABILITY_KERNEL_PRINTF_KHR (1 << 0) +#define CL_COMMAND_BUFFER_CAPABILITY_DEVICE_SIDE_ENQUEUE_KHR (1 << 1) +#define CL_COMMAND_BUFFER_CAPABILITY_SIMULTANEOUS_USE_KHR (1 << 2) + +/* cl_command_buffer_properties_khr */ +#define CL_COMMAND_BUFFER_FLAGS_KHR 0x1293 + +/* cl_command_buffer_flags_khr - bitfield */ +#define CL_COMMAND_BUFFER_SIMULTANEOUS_USE_KHR (1 << 0) + +/* Error codes */ +#define CL_INVALID_COMMAND_BUFFER_KHR -1138 +#define CL_INVALID_SYNC_POINT_WAIT_LIST_KHR -1139 +#define CL_INCOMPATIBLE_COMMAND_QUEUE_KHR -1140 + +/* cl_command_buffer_info_khr */ +#define CL_COMMAND_BUFFER_QUEUES_KHR 0x1294 +#define CL_COMMAND_BUFFER_NUM_QUEUES_KHR 0x1295 +#define CL_COMMAND_BUFFER_REFERENCE_COUNT_KHR 0x1296 +#define CL_COMMAND_BUFFER_STATE_KHR 0x1297 +#define CL_COMMAND_BUFFER_PROPERTIES_ARRAY_KHR 0x1298 +#define CL_COMMAND_BUFFER_CONTEXT_KHR 0x1299 + +/* cl_command_buffer_state_khr */ +#define CL_COMMAND_BUFFER_STATE_RECORDING_KHR 0 +#define CL_COMMAND_BUFFER_STATE_EXECUTABLE_KHR 1 +#define CL_COMMAND_BUFFER_STATE_PENDING_KHR 2 + +/* cl_command_type */ +#define CL_COMMAND_COMMAND_BUFFER_KHR 0x12A8 + + +typedef cl_command_buffer_khr CL_API_CALL +clCreateCommandBufferKHR_t( + cl_uint num_queues, + const cl_command_queue* queues, + const cl_command_buffer_properties_khr* properties, + cl_int* errcode_ret); + +typedef clCreateCommandBufferKHR_t * +clCreateCommandBufferKHR_fn ; + +typedef cl_int CL_API_CALL +clFinalizeCommandBufferKHR_t( + cl_command_buffer_khr command_buffer); + +typedef clFinalizeCommandBufferKHR_t * +clFinalizeCommandBufferKHR_fn ; + +typedef cl_int CL_API_CALL +clRetainCommandBufferKHR_t( + cl_command_buffer_khr command_buffer); + +typedef clRetainCommandBufferKHR_t * +clRetainCommandBufferKHR_fn ; + +typedef cl_int CL_API_CALL +clReleaseCommandBufferKHR_t( + cl_command_buffer_khr command_buffer); + +typedef clReleaseCommandBufferKHR_t * +clReleaseCommandBufferKHR_fn ; + +typedef cl_int CL_API_CALL +clEnqueueCommandBufferKHR_t( + cl_uint num_queues, + cl_command_queue* queues, + cl_command_buffer_khr command_buffer, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueCommandBufferKHR_t * +clEnqueueCommandBufferKHR_fn ; + +typedef cl_int CL_API_CALL +clCommandBarrierWithWaitListKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandBarrierWithWaitListKHR_t * +clCommandBarrierWithWaitListKHR_fn ; + +typedef cl_int CL_API_CALL +clCommandCopyBufferKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem src_buffer, + cl_mem dst_buffer, + size_t src_offset, + size_t dst_offset, + size_t size, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandCopyBufferKHR_t * +clCommandCopyBufferKHR_fn ; + +typedef cl_int CL_API_CALL +clCommandCopyBufferRectKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem src_buffer, + cl_mem dst_buffer, + const size_t* src_origin, + const size_t* dst_origin, + const size_t* region, + size_t src_row_pitch, + size_t src_slice_pitch, + size_t dst_row_pitch, + size_t dst_slice_pitch, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandCopyBufferRectKHR_t * +clCommandCopyBufferRectKHR_fn ; + +typedef cl_int CL_API_CALL +clCommandCopyBufferToImageKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem src_buffer, + cl_mem dst_image, + size_t src_offset, + const size_t* dst_origin, + const size_t* region, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandCopyBufferToImageKHR_t * +clCommandCopyBufferToImageKHR_fn ; + +typedef cl_int CL_API_CALL +clCommandCopyImageKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem src_image, + cl_mem dst_image, + const size_t* src_origin, + const size_t* dst_origin, + const size_t* region, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandCopyImageKHR_t * +clCommandCopyImageKHR_fn ; + +typedef cl_int CL_API_CALL +clCommandCopyImageToBufferKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem src_image, + cl_mem dst_buffer, + const size_t* src_origin, + const size_t* region, + size_t dst_offset, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandCopyImageToBufferKHR_t * +clCommandCopyImageToBufferKHR_fn ; + +typedef cl_int CL_API_CALL +clCommandFillBufferKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem buffer, + const void* pattern, + size_t pattern_size, + size_t offset, + size_t size, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandFillBufferKHR_t * +clCommandFillBufferKHR_fn ; + +typedef cl_int CL_API_CALL +clCommandFillImageKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem image, + const void* fill_color, + const size_t* origin, + const size_t* region, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandFillImageKHR_t * +clCommandFillImageKHR_fn ; + +typedef cl_int CL_API_CALL +clCommandNDRangeKernelKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_kernel kernel, + cl_uint work_dim, + const size_t* global_work_offset, + const size_t* global_work_size, + const size_t* local_work_size, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandNDRangeKernelKHR_t * +clCommandNDRangeKernelKHR_fn ; + +typedef cl_int CL_API_CALL +clGetCommandBufferInfoKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_buffer_info_khr param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetCommandBufferInfoKHR_t * +clGetCommandBufferInfoKHR_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_command_buffer_khr CL_API_CALL +clCreateCommandBufferKHR( + cl_uint num_queues, + const cl_command_queue* queues, + const cl_command_buffer_properties_khr* properties, + cl_int* errcode_ret) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clFinalizeCommandBufferKHR( + cl_command_buffer_khr command_buffer) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainCommandBufferKHR( + cl_command_buffer_khr command_buffer) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseCommandBufferKHR( + cl_command_buffer_khr command_buffer) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCommandBufferKHR( + cl_uint num_queues, + cl_command_queue* queues, + cl_command_buffer_khr command_buffer, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandBarrierWithWaitListKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandCopyBufferKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem src_buffer, + cl_mem dst_buffer, + size_t src_offset, + size_t dst_offset, + size_t size, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandCopyBufferRectKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem src_buffer, + cl_mem dst_buffer, + const size_t* src_origin, + const size_t* dst_origin, + const size_t* region, + size_t src_row_pitch, + size_t src_slice_pitch, + size_t dst_row_pitch, + size_t dst_slice_pitch, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandCopyBufferToImageKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem src_buffer, + cl_mem dst_image, + size_t src_offset, + const size_t* dst_origin, + const size_t* region, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandCopyImageKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem src_image, + cl_mem dst_image, + const size_t* src_origin, + const size_t* dst_origin, + const size_t* region, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandCopyImageToBufferKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem src_image, + cl_mem dst_buffer, + const size_t* src_origin, + const size_t* region, + size_t dst_offset, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandFillBufferKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem buffer, + const void* pattern, + size_t pattern_size, + size_t offset, + size_t size, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandFillImageKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_mem image, + const void* fill_color, + const size_t* origin, + const size_t* region, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandNDRangeKernelKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + cl_kernel kernel, + cl_uint work_dim, + const size_t* global_work_offset, + const size_t* global_work_size, + const size_t* local_work_size, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetCommandBufferInfoKHR( + cl_command_buffer_khr command_buffer, + cl_command_buffer_info_khr param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/* From version 0.9.4 of the extension */ + +typedef cl_int CL_API_CALL +clCommandSVMMemcpyKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + void* dst_ptr, + const void* src_ptr, + size_t size, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandSVMMemcpyKHR_t * +clCommandSVMMemcpyKHR_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_int CL_API_CALL +clCommandSVMMemFillKHR_t( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + void* svm_ptr, + const void* pattern, + size_t pattern_size, + size_t size, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle); + +typedef clCommandSVMMemFillKHR_t * +clCommandSVMMemFillKHR_fn CL_API_SUFFIX__VERSION_2_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandSVMMemcpyKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + void* dst_ptr, + const void* src_ptr, + size_t size, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCommandSVMMemFillKHR( + cl_command_buffer_khr command_buffer, + cl_command_queue command_queue, + const cl_command_properties_khr* properties, + void* svm_ptr, + const void* pattern, + size_t pattern_size, + size_t size, + cl_uint num_sync_points_in_wait_list, + const cl_sync_point_khr* sync_point_wait_list, + cl_sync_point_khr* sync_point, + cl_mutable_command_khr* mutable_handle) CL_API_SUFFIX__VERSION_2_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#endif /* defined(CL_ENABLE_BETA_EXTENSIONS) */ + +/*************************************************************** +* cl_khr_command_buffer_multi_device (beta) +***************************************************************/ +#if defined(CL_ENABLE_BETA_EXTENSIONS) + +#define cl_khr_command_buffer_multi_device 1 +#define CL_KHR_COMMAND_BUFFER_MULTI_DEVICE_EXTENSION_NAME \ + "cl_khr_command_buffer_multi_device" + + +#define CL_KHR_COMMAND_BUFFER_MULTI_DEVICE_EXTENSION_VERSION CL_MAKE_VERSION(0, 9, 2) + +typedef cl_bitfield cl_platform_command_buffer_capabilities_khr; + +/* cl_platform_info */ +#define CL_PLATFORM_COMMAND_BUFFER_CAPABILITIES_KHR 0x0908 + +/* cl_platform_command_buffer_capabilities_khr - bitfield */ +#define CL_COMMAND_BUFFER_PLATFORM_UNIVERSAL_SYNC_KHR (1 << 0) +#define CL_COMMAND_BUFFER_PLATFORM_REMAP_QUEUES_KHR (1 << 1) +#define CL_COMMAND_BUFFER_PLATFORM_AUTOMATIC_REMAP_KHR (1 << 2) + +/* cl_device_info */ +#define CL_DEVICE_COMMAND_BUFFER_NUM_SYNC_DEVICES_KHR 0x12AB +#define CL_DEVICE_COMMAND_BUFFER_SYNC_DEVICES_KHR 0x12AC + +/* cl_device_command_buffer_capabilities_khr - bitfield */ +#define CL_COMMAND_BUFFER_CAPABILITY_MULTIPLE_QUEUE_KHR (1 << 4) + +/* cl_command_buffer_flags_khr - bitfield */ +#define CL_COMMAND_BUFFER_DEVICE_SIDE_SYNC_KHR (1 << 2) + + +typedef cl_command_buffer_khr CL_API_CALL +clRemapCommandBufferKHR_t( + cl_command_buffer_khr command_buffer, + cl_bool automatic, + cl_uint num_queues, + const cl_command_queue* queues, + cl_uint num_handles, + const cl_mutable_command_khr* handles, + cl_mutable_command_khr* handles_ret, + cl_int* errcode_ret); + +typedef clRemapCommandBufferKHR_t * +clRemapCommandBufferKHR_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_command_buffer_khr CL_API_CALL +clRemapCommandBufferKHR( + cl_command_buffer_khr command_buffer, + cl_bool automatic, + cl_uint num_queues, + const cl_command_queue* queues, + cl_uint num_handles, + const cl_mutable_command_khr* handles, + cl_mutable_command_khr* handles_ret, + cl_int* errcode_ret) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#endif /* defined(CL_ENABLE_BETA_EXTENSIONS) */ + +/*************************************************************** +* cl_khr_command_buffer_mutable_dispatch (beta) +***************************************************************/ +#if defined(CL_ENABLE_BETA_EXTENSIONS) + +#define cl_khr_command_buffer_mutable_dispatch 1 +#define CL_KHR_COMMAND_BUFFER_MUTABLE_DISPATCH_EXTENSION_NAME \ + "cl_khr_command_buffer_mutable_dispatch" + + +#define CL_KHR_COMMAND_BUFFER_MUTABLE_DISPATCH_EXTENSION_VERSION CL_MAKE_VERSION(0, 9, 3) + +typedef cl_uint cl_command_buffer_update_type_khr; +typedef cl_bitfield cl_mutable_dispatch_fields_khr; +typedef cl_uint cl_mutable_command_info_khr; +typedef struct _cl_mutable_dispatch_arg_khr { + cl_uint arg_index; + size_t arg_size; + const void* arg_value; +} cl_mutable_dispatch_arg_khr; +typedef struct _cl_mutable_dispatch_exec_info_khr { + cl_uint param_name; + size_t param_value_size; + const void* param_value; +} cl_mutable_dispatch_exec_info_khr; +typedef struct _cl_mutable_dispatch_config_khr { + cl_mutable_command_khr command; + cl_uint num_args; + cl_uint num_svm_args; + cl_uint num_exec_infos; + cl_uint work_dim; + const cl_mutable_dispatch_arg_khr* arg_list; + const cl_mutable_dispatch_arg_khr* arg_svm_list; + const cl_mutable_dispatch_exec_info_khr* exec_info_list; + const size_t* global_work_offset; + const size_t* global_work_size; + const size_t* local_work_size; +} cl_mutable_dispatch_config_khr; +typedef cl_bitfield cl_mutable_dispatch_asserts_khr; + +/* cl_command_buffer_flags_khr - bitfield */ +#define CL_COMMAND_BUFFER_MUTABLE_KHR (1 << 1) + +/* Error codes */ +#define CL_INVALID_MUTABLE_COMMAND_KHR -1141 + +/* cl_device_info */ +#define CL_DEVICE_MUTABLE_DISPATCH_CAPABILITIES_KHR 0x12B0 + +/* cl_command_properties_khr */ +#define CL_MUTABLE_DISPATCH_UPDATABLE_FIELDS_KHR 0x12B1 + +/* cl_mutable_dispatch_fields_khr - bitfield */ +#define CL_MUTABLE_DISPATCH_GLOBAL_OFFSET_KHR (1 << 0) +#define CL_MUTABLE_DISPATCH_GLOBAL_SIZE_KHR (1 << 1) +#define CL_MUTABLE_DISPATCH_LOCAL_SIZE_KHR (1 << 2) +#define CL_MUTABLE_DISPATCH_ARGUMENTS_KHR (1 << 3) +#define CL_MUTABLE_DISPATCH_EXEC_INFO_KHR (1 << 4) + +/* cl_mutable_command_info_khr */ +#define CL_MUTABLE_COMMAND_COMMAND_QUEUE_KHR 0x12A0 +#define CL_MUTABLE_COMMAND_COMMAND_BUFFER_KHR 0x12A1 +#define CL_MUTABLE_COMMAND_COMMAND_TYPE_KHR 0x12AD +#define CL_MUTABLE_COMMAND_PROPERTIES_ARRAY_KHR 0x12A2 +#define CL_MUTABLE_DISPATCH_KERNEL_KHR 0x12A3 +#define CL_MUTABLE_DISPATCH_DIMENSIONS_KHR 0x12A4 +#define CL_MUTABLE_DISPATCH_GLOBAL_WORK_OFFSET_KHR 0x12A5 +#define CL_MUTABLE_DISPATCH_GLOBAL_WORK_SIZE_KHR 0x12A6 +#define CL_MUTABLE_DISPATCH_LOCAL_WORK_SIZE_KHR 0x12A7 + +/* cl_command_buffer_update_type_khr */ +#define CL_STRUCTURE_TYPE_MUTABLE_DISPATCH_CONFIG_KHR 0 + +/* cl_command_buffer_properties_khr */ +#define CL_COMMAND_BUFFER_MUTABLE_DISPATCH_ASSERTS_KHR 0x12B7 + +/* cl_command_properties_khr */ +#define CL_MUTABLE_DISPATCH_ASSERTS_KHR 0x12B8 + +/* cl_mutable_dispatch_asserts_khr - bitfield */ +#define CL_MUTABLE_DISPATCH_ASSERT_NO_ADDITIONAL_WORK_GROUPS_KHR (1 << 0) + + +typedef cl_int CL_API_CALL +clUpdateMutableCommandsKHR_t( + cl_command_buffer_khr command_buffer, + cl_uint num_configs, + const cl_command_buffer_update_type_khr* config_types, + const void** configs); + +typedef clUpdateMutableCommandsKHR_t * +clUpdateMutableCommandsKHR_fn ; + +typedef cl_int CL_API_CALL +clGetMutableCommandInfoKHR_t( + cl_mutable_command_khr command, + cl_mutable_command_info_khr param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetMutableCommandInfoKHR_t * +clGetMutableCommandInfoKHR_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clUpdateMutableCommandsKHR( + cl_command_buffer_khr command_buffer, + cl_uint num_configs, + const cl_command_buffer_update_type_khr* config_types, + const void** configs) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetMutableCommandInfoKHR( + cl_mutable_command_khr command, + cl_mutable_command_info_khr param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#endif /* defined(CL_ENABLE_BETA_EXTENSIONS) */ + +/*************************************************************** +* cl_khr_fp64 +***************************************************************/ +#define cl_khr_fp64 1 +#define CL_KHR_FP64_EXTENSION_NAME \ + "cl_khr_fp64" + + +#define CL_KHR_FP64_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +#if !defined(CL_VERSION_1_2) +/* cl_device_info - defined in CL.h for OpenCL 1.2 and newer */ +#define CL_DEVICE_DOUBLE_FP_CONFIG 0x1032 + +#endif /* !defined(CL_VERSION_1_2) */ + +/*************************************************************** +* cl_khr_fp16 +***************************************************************/ +#define cl_khr_fp16 1 +#define CL_KHR_FP16_EXTENSION_NAME \ + "cl_khr_fp16" + + +#define CL_KHR_FP16_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_HALF_FP_CONFIG 0x1033 + +/*************************************************************** +* cl_APPLE_SetMemObjectDestructor +***************************************************************/ +#define cl_APPLE_SetMemObjectDestructor 1 +#define CL_APPLE_SETMEMOBJECTDESTRUCTOR_EXTENSION_NAME \ + "cl_APPLE_SetMemObjectDestructor" + + +#define CL_APPLE_SETMEMOBJECTDESTRUCTOR_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + + +typedef cl_int CL_API_CALL +clSetMemObjectDestructorAPPLE_t( + cl_mem memobj, + void (CL_CALLBACK* pfn_notify)(cl_mem memobj, void* user_data), + void* user_data); + +typedef clSetMemObjectDestructorAPPLE_t * +clSetMemObjectDestructorAPPLE_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetMemObjectDestructorAPPLE( + cl_mem memobj, + void (CL_CALLBACK* pfn_notify)(cl_mem memobj, void* user_data), + void* user_data) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_APPLE_ContextLoggingFunctions +***************************************************************/ +#define cl_APPLE_ContextLoggingFunctions 1 +#define CL_APPLE_CONTEXTLOGGINGFUNCTIONS_EXTENSION_NAME \ + "cl_APPLE_ContextLoggingFunctions" + + +#define CL_APPLE_CONTEXTLOGGINGFUNCTIONS_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + + +typedef void CL_API_CALL +clLogMessagesToSystemLogAPPLE_t( + const char* errstr, + const void* private_info, + size_t cb, + void* user_data); + +typedef clLogMessagesToSystemLogAPPLE_t * +clLogMessagesToSystemLogAPPLE_fn CL_API_SUFFIX__VERSION_1_0; + +typedef void CL_API_CALL +clLogMessagesToStdoutAPPLE_t( + const char* errstr, + const void* private_info, + size_t cb, + void* user_data); + +typedef clLogMessagesToStdoutAPPLE_t * +clLogMessagesToStdoutAPPLE_fn CL_API_SUFFIX__VERSION_1_0; + +typedef void CL_API_CALL +clLogMessagesToStderrAPPLE_t( + const char* errstr, + const void* private_info, + size_t cb, + void* user_data); + +typedef clLogMessagesToStderrAPPLE_t * +clLogMessagesToStderrAPPLE_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY void CL_API_CALL +clLogMessagesToSystemLogAPPLE( + const char* errstr, + const void* private_info, + size_t cb, + void* user_data) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY void CL_API_CALL +clLogMessagesToStdoutAPPLE( + const char* errstr, + const void* private_info, + size_t cb, + void* user_data) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY void CL_API_CALL +clLogMessagesToStderrAPPLE( + const char* errstr, + const void* private_info, + size_t cb, + void* user_data) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_icd +***************************************************************/ +#define cl_khr_icd 1 +#define CL_KHR_ICD_EXTENSION_NAME \ + "cl_khr_icd" + + +#define CL_KHR_ICD_EXTENSION_VERSION CL_MAKE_VERSION(2, 0, 0) + +/* cl_platform_info */ +#define CL_PLATFORM_ICD_SUFFIX_KHR 0x0920 + +/* Error codes */ +#define CL_PLATFORM_NOT_FOUND_KHR -1001 + +/* ICD 2 tag value */ +#if INTPTR_MAX == INT32_MAX +#define CL_ICD2_TAG_KHR ((intptr_t)0x434C3331) +#else +#define CL_ICD2_TAG_KHR ((intptr_t)0x4F50454E434C3331) +#endif + + +typedef cl_int CL_API_CALL +clIcdGetPlatformIDsKHR_t( + cl_uint num_entries, + cl_platform_id* platforms, + cl_uint* num_platforms); + +typedef clIcdGetPlatformIDsKHR_t * +clIcdGetPlatformIDsKHR_fn ; + +typedef void* CL_API_CALL +clIcdGetFunctionAddressForPlatformKHR_t( + cl_platform_id platform, + const char* func_name); + +typedef clIcdGetFunctionAddressForPlatformKHR_t * +clIcdGetFunctionAddressForPlatformKHR_fn ; + +typedef cl_int CL_API_CALL +clIcdSetPlatformDispatchDataKHR_t( + cl_platform_id platform, + void* dispatch_data); + +typedef clIcdSetPlatformDispatchDataKHR_t * +clIcdSetPlatformDispatchDataKHR_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clIcdGetPlatformIDsKHR( + cl_uint num_entries, + cl_platform_id* platforms, + cl_uint* num_platforms) ; + +extern CL_API_ENTRY void* CL_API_CALL +clIcdGetFunctionAddressForPlatformKHR( + cl_platform_id platform, + const char* func_name) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clIcdSetPlatformDispatchDataKHR( + cl_platform_id platform, + void* dispatch_data) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_il_program +***************************************************************/ +#define cl_khr_il_program 1 +#define CL_KHR_IL_PROGRAM_EXTENSION_NAME \ + "cl_khr_il_program" + + +#define CL_KHR_IL_PROGRAM_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_IL_VERSION_KHR 0x105B + +/* cl_program_info */ +#define CL_PROGRAM_IL_KHR 0x1169 + + +typedef cl_program CL_API_CALL +clCreateProgramWithILKHR_t( + cl_context context, + const void* il, + size_t length, + cl_int* errcode_ret); + +typedef clCreateProgramWithILKHR_t * +clCreateProgramWithILKHR_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_program CL_API_CALL +clCreateProgramWithILKHR( + cl_context context, + const void* il, + size_t length, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_image2d_from_buffer +***************************************************************/ +#define cl_khr_image2d_from_buffer 1 +#define CL_KHR_IMAGE2D_FROM_BUFFER_EXTENSION_NAME \ + "cl_khr_image2d_from_buffer" + + +#define CL_KHR_IMAGE2D_FROM_BUFFER_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_IMAGE_PITCH_ALIGNMENT_KHR 0x104A +#define CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT_KHR 0x104B + +/*************************************************************** +* cl_khr_initialize_memory +***************************************************************/ +#define cl_khr_initialize_memory 1 +#define CL_KHR_INITIALIZE_MEMORY_EXTENSION_NAME \ + "cl_khr_initialize_memory" + + +#define CL_KHR_INITIALIZE_MEMORY_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_bitfield cl_context_memory_initialize_khr; + +/* cl_context_properties */ +#define CL_CONTEXT_MEMORY_INITIALIZE_KHR 0x2030 + +/* cl_context_memory_initialize_khr */ +#define CL_CONTEXT_MEMORY_INITIALIZE_LOCAL_KHR (1 << 0) +#define CL_CONTEXT_MEMORY_INITIALIZE_PRIVATE_KHR (1 << 1) + +/*************************************************************** +* cl_khr_terminate_context +***************************************************************/ +#define cl_khr_terminate_context 1 +#define CL_KHR_TERMINATE_CONTEXT_EXTENSION_NAME \ + "cl_khr_terminate_context" + + +#define CL_KHR_TERMINATE_CONTEXT_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_bitfield cl_device_terminate_capability_khr; + +/* cl_device_info */ +#define CL_DEVICE_TERMINATE_CAPABILITY_KHR 0x2031 + +/* cl_context_properties */ +#define CL_CONTEXT_TERMINATE_KHR 0x2032 + +/* cl_device_terminate_capability_khr */ +#define CL_DEVICE_TERMINATE_CAPABILITY_CONTEXT_KHR (1 << 0) + +/* Error codes */ +#define CL_CONTEXT_TERMINATED_KHR -1121 + + +typedef cl_int CL_API_CALL +clTerminateContextKHR_t( + cl_context context); + +typedef clTerminateContextKHR_t * +clTerminateContextKHR_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clTerminateContextKHR( + cl_context context) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_spir +***************************************************************/ +#define cl_khr_spir 1 +#define CL_KHR_SPIR_EXTENSION_NAME \ + "cl_khr_spir" + + +#define CL_KHR_SPIR_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_SPIR_VERSIONS 0x40E0 + +/* cl_program_binary_type */ +#define CL_PROGRAM_BINARY_TYPE_INTERMEDIATE 0x40E1 + +/*************************************************************** +* cl_khr_create_command_queue +***************************************************************/ +#define cl_khr_create_command_queue 1 +#define CL_KHR_CREATE_COMMAND_QUEUE_EXTENSION_NAME \ + "cl_khr_create_command_queue" + + +#define CL_KHR_CREATE_COMMAND_QUEUE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_properties cl_queue_properties_khr; + + +typedef cl_command_queue CL_API_CALL +clCreateCommandQueueWithPropertiesKHR_t( + cl_context context, + cl_device_id device, + const cl_queue_properties_khr* properties, + cl_int* errcode_ret); + +typedef clCreateCommandQueueWithPropertiesKHR_t * +clCreateCommandQueueWithPropertiesKHR_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_command_queue CL_API_CALL +clCreateCommandQueueWithPropertiesKHR( + cl_context context, + cl_device_id device, + const cl_queue_properties_khr* properties, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_nv_device_attribute_query +***************************************************************/ +#define cl_nv_device_attribute_query 1 +#define CL_NV_DEVICE_ATTRIBUTE_QUERY_EXTENSION_NAME \ + "cl_nv_device_attribute_query" + + +#define CL_NV_DEVICE_ATTRIBUTE_QUERY_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000 +#define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001 +#define CL_DEVICE_REGISTERS_PER_BLOCK_NV 0x4002 +#define CL_DEVICE_WARP_SIZE_NV 0x4003 +#define CL_DEVICE_GPU_OVERLAP_NV 0x4004 +#define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005 +#define CL_DEVICE_INTEGRATED_MEMORY_NV 0x4006 + +/*************************************************************** +* cl_amd_device_attribute_query +***************************************************************/ +#define cl_amd_device_attribute_query 1 +#define CL_AMD_DEVICE_ATTRIBUTE_QUERY_EXTENSION_NAME \ + "cl_amd_device_attribute_query" + + +#define CL_AMD_DEVICE_ATTRIBUTE_QUERY_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_PROFILING_TIMER_OFFSET_AMD 0x4036 +#define CL_DEVICE_TOPOLOGY_AMD 0x4037 +#define CL_DEVICE_BOARD_NAME_AMD 0x4038 +#define CL_DEVICE_GLOBAL_FREE_MEMORY_AMD 0x4039 +#define CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD 0x4040 +#define CL_DEVICE_SIMD_WIDTH_AMD 0x4041 +#define CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD 0x4042 +#define CL_DEVICE_WAVEFRONT_WIDTH_AMD 0x4043 +#define CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD 0x4044 +#define CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD 0x4045 +#define CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD 0x4046 +#define CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD 0x4047 +#define CL_DEVICE_LOCAL_MEM_BANKS_AMD 0x4048 +#define CL_DEVICE_THREAD_TRACE_SUPPORTED_AMD 0x4049 +#define CL_DEVICE_GFXIP_MAJOR_AMD 0x404A +#define CL_DEVICE_GFXIP_MINOR_AMD 0x404B +#define CL_DEVICE_AVAILABLE_ASYNC_QUEUES_AMD 0x404C +#define CL_DEVICE_PREFERRED_WORK_GROUP_SIZE_AMD 0x4030 +#define CL_DEVICE_MAX_WORK_GROUP_SIZE_AMD 0x4031 +#define CL_DEVICE_PREFERRED_CONSTANT_BUFFER_SIZE_AMD 0x4033 +#define CL_DEVICE_PCIE_ID_AMD 0x4034 + +/*************************************************************** +* cl_arm_printf +***************************************************************/ +#define cl_arm_printf 1 +#define CL_ARM_PRINTF_EXTENSION_NAME \ + "cl_arm_printf" + + +#define CL_ARM_PRINTF_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_context_properties */ +#define CL_PRINTF_CALLBACK_ARM 0x40B0 +#define CL_PRINTF_BUFFERSIZE_ARM 0x40B1 + +/*************************************************************** +* cl_ext_device_fission +***************************************************************/ +#define cl_ext_device_fission 1 +#define CL_EXT_DEVICE_FISSION_EXTENSION_NAME \ + "cl_ext_device_fission" + + +#define CL_EXT_DEVICE_FISSION_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_ulong cl_device_partition_property_ext; + +/* Error codes */ +#define CL_DEVICE_PARTITION_FAILED_EXT -1057 +#define CL_INVALID_PARTITION_COUNT_EXT -1058 +#define CL_INVALID_PARTITION_NAME_EXT -1059 + +/* cl_device_info */ +#define CL_DEVICE_PARENT_DEVICE_EXT 0x4054 +#define CL_DEVICE_PARTITION_TYPES_EXT 0x4055 +#define CL_DEVICE_AFFINITY_DOMAINS_EXT 0x4056 +#define CL_DEVICE_REFERENCE_COUNT_EXT 0x4057 +#define CL_DEVICE_PARTITION_STYLE_EXT 0x4058 + +/* cl_device_partition_property_ext */ +#define CL_DEVICE_PARTITION_EQUALLY_EXT 0x4050 +#define CL_DEVICE_PARTITION_BY_COUNTS_EXT 0x4051 +#define CL_DEVICE_PARTITION_BY_NAMES_EXT 0x4052 +#define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN_EXT 0x4053 + +/* cl_device_partition_property_ext - affinity domains */ +#define CL_AFFINITY_DOMAIN_L1_CACHE_EXT 0x1 +#define CL_AFFINITY_DOMAIN_L2_CACHE_EXT 0x2 +#define CL_AFFINITY_DOMAIN_L3_CACHE_EXT 0x3 +#define CL_AFFINITY_DOMAIN_L4_CACHE_EXT 0x4 +#define CL_AFFINITY_DOMAIN_NUMA_EXT 0x10 +#define CL_AFFINITY_DOMAIN_NEXT_FISSIONABLE_EXT 0x100 + +/* cl_device_partition_property_ext - list terminators */ +#define CL_PROPERTIES_LIST_END_EXT ((cl_device_partition_property_ext)0) +#define CL_PARTITION_BY_COUNTS_LIST_END_EXT ((cl_device_partition_property_ext)0) +#define CL_PARTITION_BY_NAMES_LIST_END_EXT ((cl_device_partition_property_ext)0 - 1) + + +typedef cl_int CL_API_CALL +clReleaseDeviceEXT_t( + cl_device_id device); + +typedef clReleaseDeviceEXT_t * +clReleaseDeviceEXT_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_int CL_API_CALL +clRetainDeviceEXT_t( + cl_device_id device); + +typedef clRetainDeviceEXT_t * +clRetainDeviceEXT_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_int CL_API_CALL +clCreateSubDevicesEXT_t( + cl_device_id in_device, + const cl_device_partition_property_ext* properties, + cl_uint num_entries, + cl_device_id* out_devices, + cl_uint* num_devices); + +typedef clCreateSubDevicesEXT_t * +clCreateSubDevicesEXT_fn CL_API_SUFFIX__VERSION_1_1; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseDeviceEXT( + cl_device_id device) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainDeviceEXT( + cl_device_id device) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCreateSubDevicesEXT( + cl_device_id in_device, + const cl_device_partition_property_ext* properties, + cl_uint num_entries, + cl_device_id* out_devices, + cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_1; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_ext_migrate_memobject +***************************************************************/ +#define cl_ext_migrate_memobject 1 +#define CL_EXT_MIGRATE_MEMOBJECT_EXTENSION_NAME \ + "cl_ext_migrate_memobject" + + +#define CL_EXT_MIGRATE_MEMOBJECT_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_bitfield cl_mem_migration_flags_ext; + +/* cl_mem_migration_flags_ext */ +#define CL_MIGRATE_MEM_OBJECT_HOST_EXT (1 << 0) + +/* cl_command_type */ +#define CL_COMMAND_MIGRATE_MEM_OBJECT_EXT 0x4040 + + +typedef cl_int CL_API_CALL +clEnqueueMigrateMemObjectEXT_t( + cl_command_queue command_queue, + cl_uint num_mem_objects, + const cl_mem* mem_objects, + cl_mem_migration_flags_ext flags, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueMigrateMemObjectEXT_t * +clEnqueueMigrateMemObjectEXT_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueMigrateMemObjectEXT( + cl_command_queue command_queue, + cl_uint num_mem_objects, + const cl_mem* mem_objects, + cl_mem_migration_flags_ext flags, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_ext_cxx_for_opencl +***************************************************************/ +#define cl_ext_cxx_for_opencl 1 +#define CL_EXT_CXX_FOR_OPENCL_EXTENSION_NAME \ + "cl_ext_cxx_for_opencl" + + +#define CL_EXT_CXX_FOR_OPENCL_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_CXX_FOR_OPENCL_NUMERIC_VERSION_EXT 0x4230 + +/*************************************************************** +* cl_qcom_ext_host_ptr +***************************************************************/ +#define cl_qcom_ext_host_ptr 1 +#define CL_QCOM_EXT_HOST_PTR_EXTENSION_NAME \ + "cl_qcom_ext_host_ptr" + + +#define CL_QCOM_EXT_HOST_PTR_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef cl_uint cl_image_pitch_info_qcom; +typedef struct _cl_mem_ext_host_ptr { + cl_uint allocation_type; + cl_uint host_cache_policy; +} cl_mem_ext_host_ptr; + +/* cl_mem_flags */ +#define CL_MEM_EXT_HOST_PTR_QCOM (1 << 29) + +/* cl_device_info */ +#define CL_DEVICE_EXT_MEM_PADDING_IN_BYTES_QCOM 0x40A0 +#define CL_DEVICE_PAGE_SIZE_QCOM 0x40A1 + +/* cl_image_pitch_info_qcom */ +#define CL_IMAGE_ROW_ALIGNMENT_QCOM 0x40A2 +#define CL_IMAGE_SLICE_ALIGNMENT_QCOM 0x40A3 + +/* cl_uint host_cache_policy */ +#define CL_MEM_HOST_UNCACHED_QCOM 0x40A4 +#define CL_MEM_HOST_WRITEBACK_QCOM 0x40A5 +#define CL_MEM_HOST_WRITETHROUGH_QCOM 0x40A6 +#define CL_MEM_HOST_WRITE_COMBINING_QCOM 0x40A7 + + +typedef cl_int CL_API_CALL +clGetDeviceImageInfoQCOM_t( + cl_device_id device, + size_t image_width, + size_t image_height, + const cl_image_format* image_format, + cl_image_pitch_info_qcom param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetDeviceImageInfoQCOM_t * +clGetDeviceImageInfoQCOM_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceImageInfoQCOM( + cl_device_id device, + size_t image_width, + size_t image_height, + const cl_image_format* image_format, + cl_image_pitch_info_qcom param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_qcom_ext_host_ptr_iocoherent +***************************************************************/ +#define cl_qcom_ext_host_ptr_iocoherent 1 +#define CL_QCOM_EXT_HOST_PTR_IOCOHERENT_EXTENSION_NAME \ + "cl_qcom_ext_host_ptr_iocoherent" + + +#define CL_QCOM_EXT_HOST_PTR_IOCOHERENT_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_uint host_cache_policy */ +#define CL_MEM_HOST_IOCOHERENT_QCOM 0x40A9 + +/*************************************************************** +* cl_qcom_ion_host_ptr +***************************************************************/ +#define cl_qcom_ion_host_ptr 1 +#define CL_QCOM_ION_HOST_PTR_EXTENSION_NAME \ + "cl_qcom_ion_host_ptr" + + +#define CL_QCOM_ION_HOST_PTR_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* type cl_mem_ext_host_ptr */ +typedef struct _cl_mem_ion_host_ptr { + cl_mem_ext_host_ptr ext_host_ptr; + int ion_filedesc; + void* ion_hostptr; +} cl_mem_ion_host_ptr; + +/* cl_uint allocation_type */ +#define CL_MEM_ION_HOST_PTR_QCOM 0x40A8 + +/*************************************************************** +* cl_qcom_android_native_buffer_host_ptr +***************************************************************/ +#define cl_qcom_android_native_buffer_host_ptr 1 +#define CL_QCOM_ANDROID_NATIVE_BUFFER_HOST_PTR_EXTENSION_NAME \ + "cl_qcom_android_native_buffer_host_ptr" + + +#define CL_QCOM_ANDROID_NATIVE_BUFFER_HOST_PTR_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* type cl_mem_ext_host_ptr */ +typedef struct _cl_mem_android_native_buffer_host_ptr { + cl_mem_ext_host_ptr ext_host_ptr; + void* anb_ptr; +} cl_mem_android_native_buffer_host_ptr; + +/* cl_uint allocation_type */ +#define CL_MEM_ANDROID_NATIVE_BUFFER_HOST_PTR_QCOM 0x40C6 + +/*************************************************************** +* cl_img_yuv_image +***************************************************************/ +#define cl_img_yuv_image 1 +#define CL_IMG_YUV_IMAGE_EXTENSION_NAME \ + "cl_img_yuv_image" + + +#define CL_IMG_YUV_IMAGE_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_channel_order */ +#define CL_NV21_IMG 0x40D0 +#define CL_YV12_IMG 0x40D1 + +/*************************************************************** +* cl_img_cached_allocations +***************************************************************/ +#define cl_img_cached_allocations 1 +#define CL_IMG_CACHED_ALLOCATIONS_EXTENSION_NAME \ + "cl_img_cached_allocations" + + +#define CL_IMG_CACHED_ALLOCATIONS_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_mem_flags */ +#define CL_MEM_USE_UNCACHED_CPU_MEMORY_IMG (1 << 26) +#define CL_MEM_USE_CACHED_CPU_MEMORY_IMG (1 << 27) + +/*************************************************************** +* cl_img_use_gralloc_ptr +***************************************************************/ +#define cl_img_use_gralloc_ptr 1 +#define CL_IMG_USE_GRALLOC_PTR_EXTENSION_NAME \ + "cl_img_use_gralloc_ptr" + + +#define CL_IMG_USE_GRALLOC_PTR_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* Error codes */ +#define CL_GRALLOC_RESOURCE_NOT_ACQUIRED_IMG 0x40D4 +#define CL_INVALID_GRALLOC_OBJECT_IMG 0x40D5 + +/* cl_mem_flags */ +#define CL_MEM_USE_GRALLOC_PTR_IMG (1 << 28) + +/* cl_command_type */ +#define CL_COMMAND_ACQUIRE_GRALLOC_OBJECTS_IMG 0x40D2 +#define CL_COMMAND_RELEASE_GRALLOC_OBJECTS_IMG 0x40D3 + + +typedef cl_int CL_API_CALL +clEnqueueAcquireGrallocObjectsIMG_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueAcquireGrallocObjectsIMG_t * +clEnqueueAcquireGrallocObjectsIMG_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueReleaseGrallocObjectsIMG_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReleaseGrallocObjectsIMG_t * +clEnqueueReleaseGrallocObjectsIMG_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireGrallocObjectsIMG( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseGrallocObjectsIMG( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_img_generate_mipmap +***************************************************************/ +#define cl_img_generate_mipmap 1 +#define CL_IMG_GENERATE_MIPMAP_EXTENSION_NAME \ + "cl_img_generate_mipmap" + + +#define CL_IMG_GENERATE_MIPMAP_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef cl_uint cl_mipmap_filter_mode_img; + +/* cl_mipmap_filter_mode_img */ +#define CL_MIPMAP_FILTER_ANY_IMG 0x0 +#define CL_MIPMAP_FILTER_BOX_IMG 0x1 + +/* cl_command_type */ +#define CL_COMMAND_GENERATE_MIPMAP_IMG 0x40D6 + + +typedef cl_int CL_API_CALL +clEnqueueGenerateMipmapIMG_t( + cl_command_queue command_queue, + cl_mem src_image, + cl_mem dst_image, + cl_mipmap_filter_mode_img mipmap_filter_mode, + const size_t* array_region, + const size_t* mip_region, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueGenerateMipmapIMG_t * +clEnqueueGenerateMipmapIMG_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueGenerateMipmapIMG( + cl_command_queue command_queue, + cl_mem src_image, + cl_mem dst_image, + cl_mipmap_filter_mode_img mipmap_filter_mode, + const size_t* array_region, + const size_t* mip_region, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_img_mem_properties +***************************************************************/ +#define cl_img_mem_properties 1 +#define CL_IMG_MEM_PROPERTIES_EXTENSION_NAME \ + "cl_img_mem_properties" + + +#define CL_IMG_MEM_PROPERTIES_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_mem_properties */ +#define CL_MEM_ALLOC_FLAGS_IMG 0x40D7 + +/* cl_mem_alloc_flags_img */ +#define CL_MEM_ALLOC_RELAX_REQUIREMENTS_IMG (1 << 0) +#define CL_MEM_ALLOC_GPU_WRITE_COMBINE_IMG (1 << 1) +#define CL_MEM_ALLOC_GPU_CACHED_IMG (1 << 2) +#define CL_MEM_ALLOC_CPU_LOCAL_IMG (1 << 3) +#define CL_MEM_ALLOC_GPU_LOCAL_IMG (1 << 4) +#define CL_MEM_ALLOC_GPU_PRIVATE_IMG (1 << 5) + +/* cl_device_info */ +#define CL_DEVICE_MEMORY_CAPABILITIES_IMG 0x40D8 + +/*************************************************************** +* cl_khr_subgroups +***************************************************************/ +#define cl_khr_subgroups 1 +#define CL_KHR_SUBGROUPS_EXTENSION_NAME \ + "cl_khr_subgroups" + + +#define CL_KHR_SUBGROUPS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +#if !defined(CL_VERSION_2_1) +/* defined in CL.h for OpenCL 2.1 and newer */ +typedef cl_uint cl_kernel_sub_group_info; + +#endif /* !defined(CL_VERSION_2_1) */ + +/* cl_kernel_sub_group_info */ +#define CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE_KHR 0x2033 +#define CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE_KHR 0x2034 + + +typedef cl_int CL_API_CALL +clGetKernelSubGroupInfoKHR_t( + cl_kernel in_kernel, + cl_device_id in_device, + cl_kernel_sub_group_info param_name, + size_t input_value_size, + const void* input_value, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetKernelSubGroupInfoKHR_t * +clGetKernelSubGroupInfoKHR_fn CL_API_SUFFIX__VERSION_2_0_DEPRECATED; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelSubGroupInfoKHR( + cl_kernel in_kernel, + cl_device_id in_device, + cl_kernel_sub_group_info param_name, + size_t input_value_size, + const void* input_value, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_2_0_DEPRECATED; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_mipmap_image +***************************************************************/ +#define cl_khr_mipmap_image 1 +#define CL_KHR_MIPMAP_IMAGE_EXTENSION_NAME \ + "cl_khr_mipmap_image" + + +#define CL_KHR_MIPMAP_IMAGE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_sampler_properties */ +#define CL_SAMPLER_MIP_FILTER_MODE_KHR 0x1155 +#define CL_SAMPLER_LOD_MIN_KHR 0x1156 +#define CL_SAMPLER_LOD_MAX_KHR 0x1157 + +/*************************************************************** +* cl_khr_priority_hints +***************************************************************/ +#define cl_khr_priority_hints 1 +#define CL_KHR_PRIORITY_HINTS_EXTENSION_NAME \ + "cl_khr_priority_hints" + + +#define CL_KHR_PRIORITY_HINTS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* To be used by clGetEventInfo */ +typedef cl_uint cl_queue_priority_khr; + +/* cl_queue_properties */ +#define CL_QUEUE_PRIORITY_KHR 0x1096 + +/* cl_queue_priority_khr */ +#define CL_QUEUE_PRIORITY_HIGH_KHR (1 << 0) +#define CL_QUEUE_PRIORITY_MED_KHR (1 << 1) +#define CL_QUEUE_PRIORITY_LOW_KHR (1 << 2) + +/*************************************************************** +* cl_khr_throttle_hints +***************************************************************/ +#define cl_khr_throttle_hints 1 +#define CL_KHR_THROTTLE_HINTS_EXTENSION_NAME \ + "cl_khr_throttle_hints" + + +#define CL_KHR_THROTTLE_HINTS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* To be used by clGetEventInfo */ +typedef cl_uint cl_queue_throttle_khr; + +/* cl_queue_properties */ +#define CL_QUEUE_THROTTLE_KHR 0x1097 + +/* cl_queue_throttle_khr */ +#define CL_QUEUE_THROTTLE_HIGH_KHR (1 << 0) +#define CL_QUEUE_THROTTLE_MED_KHR (1 << 1) +#define CL_QUEUE_THROTTLE_LOW_KHR (1 << 2) + +/*************************************************************** +* cl_khr_subgroup_named_barrier +***************************************************************/ +#define cl_khr_subgroup_named_barrier 1 +#define CL_KHR_SUBGROUP_NAMED_BARRIER_EXTENSION_NAME \ + "cl_khr_subgroup_named_barrier" + + +#define CL_KHR_SUBGROUP_NAMED_BARRIER_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_MAX_NAMED_BARRIER_COUNT_KHR 0x2035 + +/*************************************************************** +* cl_khr_extended_versioning +***************************************************************/ +#define cl_khr_extended_versioning 1 +#define CL_KHR_EXTENDED_VERSIONING_EXTENSION_NAME \ + "cl_khr_extended_versioning" + + +#define CL_KHR_EXTENDED_VERSIONING_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +#define CL_VERSION_MAJOR_BITS_KHR 10 +#define CL_VERSION_MINOR_BITS_KHR 10 +#define CL_VERSION_PATCH_BITS_KHR 12 + +#define CL_VERSION_MAJOR_MASK_KHR ((1 << CL_VERSION_MAJOR_BITS_KHR) - 1) +#define CL_VERSION_MINOR_MASK_KHR ((1 << CL_VERSION_MINOR_BITS_KHR) - 1) +#define CL_VERSION_PATCH_MASK_KHR ((1 << CL_VERSION_PATCH_BITS_KHR) - 1) + +#define CL_VERSION_MAJOR_KHR(version) ((version) >> (CL_VERSION_MINOR_BITS_KHR + CL_VERSION_PATCH_BITS_KHR)) +#define CL_VERSION_MINOR_KHR(version) (((version) >> CL_VERSION_PATCH_BITS_KHR) & CL_VERSION_MINOR_MASK_KHR) +#define CL_VERSION_PATCH_KHR(version) ((version) & CL_VERSION_PATCH_MASK_KHR) + +#define CL_MAKE_VERSION_KHR(major, minor, patch) \ + ((((major) & CL_VERSION_MAJOR_MASK_KHR) << (CL_VERSION_MINOR_BITS_KHR + CL_VERSION_PATCH_BITS_KHR)) | \ + (((minor) & CL_VERSION_MINOR_MASK_KHR) << CL_VERSION_PATCH_BITS_KHR) | \ + ((patch) & CL_VERSION_PATCH_MASK_KHR)) + +#define CL_NAME_VERSION_MAX_NAME_SIZE_KHR 64 + +typedef cl_uint cl_version_khr; +typedef struct _cl_name_version_khr { + cl_version_khr version; + char name[CL_NAME_VERSION_MAX_NAME_SIZE_KHR]; +} cl_name_version_khr; + +/* cl_platform_info */ +#define CL_PLATFORM_NUMERIC_VERSION_KHR 0x0906 +#define CL_PLATFORM_EXTENSIONS_WITH_VERSION_KHR 0x0907 + +/* cl_device_info */ +#define CL_DEVICE_NUMERIC_VERSION_KHR 0x105E +#define CL_DEVICE_OPENCL_C_NUMERIC_VERSION_KHR 0x105F +#define CL_DEVICE_EXTENSIONS_WITH_VERSION_KHR 0x1060 +#define CL_DEVICE_ILS_WITH_VERSION_KHR 0x1061 +#define CL_DEVICE_BUILT_IN_KERNELS_WITH_VERSION_KHR 0x1062 + +/*************************************************************** +* cl_khr_device_uuid +***************************************************************/ +#define cl_khr_device_uuid 1 +#define CL_KHR_DEVICE_UUID_EXTENSION_NAME \ + "cl_khr_device_uuid" + + +#define CL_KHR_DEVICE_UUID_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* Size Constants */ +#define CL_UUID_SIZE_KHR 16 +#define CL_LUID_SIZE_KHR 8 + +/* cl_device_info */ +#define CL_DEVICE_UUID_KHR 0x106A +#define CL_DRIVER_UUID_KHR 0x106B +#define CL_DEVICE_LUID_VALID_KHR 0x106C +#define CL_DEVICE_LUID_KHR 0x106D +#define CL_DEVICE_NODE_MASK_KHR 0x106E + +/*************************************************************** +* cl_khr_pci_bus_info +***************************************************************/ +#define cl_khr_pci_bus_info 1 +#define CL_KHR_PCI_BUS_INFO_EXTENSION_NAME \ + "cl_khr_pci_bus_info" + + +#define CL_KHR_PCI_BUS_INFO_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef struct _cl_device_pci_bus_info_khr { + cl_uint pci_domain; + cl_uint pci_bus; + cl_uint pci_device; + cl_uint pci_function; +} cl_device_pci_bus_info_khr; + +/* cl_device_info */ +#define CL_DEVICE_PCI_BUS_INFO_KHR 0x410F + +/*************************************************************** +* cl_khr_suggested_local_work_size +***************************************************************/ +#define cl_khr_suggested_local_work_size 1 +#define CL_KHR_SUGGESTED_LOCAL_WORK_SIZE_EXTENSION_NAME \ + "cl_khr_suggested_local_work_size" + + +#define CL_KHR_SUGGESTED_LOCAL_WORK_SIZE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + + +typedef cl_int CL_API_CALL +clGetKernelSuggestedLocalWorkSizeKHR_t( + cl_command_queue command_queue, + cl_kernel kernel, + cl_uint work_dim, + const size_t* global_work_offset, + const size_t* global_work_size, + size_t* suggested_local_work_size); + +typedef clGetKernelSuggestedLocalWorkSizeKHR_t * +clGetKernelSuggestedLocalWorkSizeKHR_fn CL_API_SUFFIX__VERSION_3_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelSuggestedLocalWorkSizeKHR( + cl_command_queue command_queue, + cl_kernel kernel, + cl_uint work_dim, + const size_t* global_work_offset, + const size_t* global_work_size, + size_t* suggested_local_work_size) CL_API_SUFFIX__VERSION_3_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_integer_dot_product +***************************************************************/ +#define cl_khr_integer_dot_product 1 +#define CL_KHR_INTEGER_DOT_PRODUCT_EXTENSION_NAME \ + "cl_khr_integer_dot_product" + + +#define CL_KHR_INTEGER_DOT_PRODUCT_EXTENSION_VERSION CL_MAKE_VERSION(2, 0, 0) + +typedef cl_bitfield cl_device_integer_dot_product_capabilities_khr; +typedef struct _cl_device_integer_dot_product_acceleration_properties_khr { + cl_bool signed_accelerated; + cl_bool unsigned_accelerated; + cl_bool mixed_signedness_accelerated; + cl_bool accumulating_saturating_signed_accelerated; + cl_bool accumulating_saturating_unsigned_accelerated; + cl_bool accumulating_saturating_mixed_signedness_accelerated; +} cl_device_integer_dot_product_acceleration_properties_khr; + +/* cl_device_integer_dot_product_capabilities_khr */ +#define CL_DEVICE_INTEGER_DOT_PRODUCT_INPUT_4x8BIT_PACKED_KHR (1 << 0) +#define CL_DEVICE_INTEGER_DOT_PRODUCT_INPUT_4x8BIT_KHR (1 << 1) + +/* cl_device_info */ +#define CL_DEVICE_INTEGER_DOT_PRODUCT_CAPABILITIES_KHR 0x1073 +#define CL_DEVICE_INTEGER_DOT_PRODUCT_ACCELERATION_PROPERTIES_8BIT_KHR 0x1074 +#define CL_DEVICE_INTEGER_DOT_PRODUCT_ACCELERATION_PROPERTIES_4x8BIT_PACKED_KHR 0x1075 + +/*************************************************************** +* cl_khr_external_memory +***************************************************************/ +#define cl_khr_external_memory 1 +#define CL_KHR_EXTERNAL_MEMORY_EXTENSION_NAME \ + "cl_khr_external_memory" + + +#define CL_KHR_EXTERNAL_MEMORY_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 1) + +typedef cl_uint cl_external_memory_handle_type_khr; + +/* cl_platform_info */ +#define CL_PLATFORM_EXTERNAL_MEMORY_IMPORT_HANDLE_TYPES_KHR 0x2044 + +/* cl_device_info */ +#define CL_DEVICE_EXTERNAL_MEMORY_IMPORT_HANDLE_TYPES_KHR 0x204F +#define CL_DEVICE_EXTERNAL_MEMORY_IMPORT_ASSUME_LINEAR_IMAGES_HANDLE_TYPES_KHR 0x2052 + +/* cl_mem_properties */ +#define CL_MEM_DEVICE_HANDLE_LIST_KHR 0x2051 +#define CL_MEM_DEVICE_HANDLE_LIST_END_KHR 0 + +/* cl_command_type */ +#define CL_COMMAND_ACQUIRE_EXTERNAL_MEM_OBJECTS_KHR 0x2047 +#define CL_COMMAND_RELEASE_EXTERNAL_MEM_OBJECTS_KHR 0x2048 + + +typedef cl_int CL_API_CALL +clEnqueueAcquireExternalMemObjectsKHR_t( + cl_command_queue command_queue, + cl_uint num_mem_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueAcquireExternalMemObjectsKHR_t * +clEnqueueAcquireExternalMemObjectsKHR_fn CL_API_SUFFIX__VERSION_3_0; + +typedef cl_int CL_API_CALL +clEnqueueReleaseExternalMemObjectsKHR_t( + cl_command_queue command_queue, + cl_uint num_mem_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReleaseExternalMemObjectsKHR_t * +clEnqueueReleaseExternalMemObjectsKHR_fn CL_API_SUFFIX__VERSION_3_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireExternalMemObjectsKHR( + cl_command_queue command_queue, + cl_uint num_mem_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_3_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseExternalMemObjectsKHR( + cl_command_queue command_queue, + cl_uint num_mem_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_3_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_external_memory_dma_buf +***************************************************************/ +#define cl_khr_external_memory_dma_buf 1 +#define CL_KHR_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME \ + "cl_khr_external_memory_dma_buf" + + +#define CL_KHR_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_external_memory_handle_type_khr */ +#define CL_EXTERNAL_MEMORY_HANDLE_DMA_BUF_KHR 0x2067 + +/*************************************************************** +* cl_khr_external_memory_opaque_fd +***************************************************************/ +#define cl_khr_external_memory_opaque_fd 1 +#define CL_KHR_EXTERNAL_MEMORY_OPAQUE_FD_EXTENSION_NAME \ + "cl_khr_external_memory_opaque_fd" + + +#define CL_KHR_EXTERNAL_MEMORY_OPAQUE_FD_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_external_memory_handle_type_khr */ +#define CL_EXTERNAL_MEMORY_HANDLE_OPAQUE_FD_KHR 0x2060 + +/*************************************************************** +* cl_khr_external_memory_win32 +***************************************************************/ +#define cl_khr_external_memory_win32 1 +#define CL_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME \ + "cl_khr_external_memory_win32" + + +#define CL_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_VERSION CL_MAKE_VERSION(1, 1, 0) + +/* cl_external_memory_handle_type_khr */ +#define CL_EXTERNAL_MEMORY_HANDLE_OPAQUE_WIN32_KHR 0x2061 +#define CL_EXTERNAL_MEMORY_HANDLE_OPAQUE_WIN32_KMT_KHR 0x2062 +#define CL_EXTERNAL_MEMORY_HANDLE_OPAQUE_WIN32_NAME_KHR 0x2069 + +/*************************************************************** +* cl_khr_external_semaphore +***************************************************************/ +#define cl_khr_external_semaphore 1 +#define CL_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME \ + "cl_khr_external_semaphore" + + +#define CL_KHR_EXTERNAL_SEMAPHORE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 1) + +typedef struct _cl_semaphore_khr * cl_semaphore_khr; +typedef cl_uint cl_external_semaphore_handle_type_khr; + +/* cl_platform_info */ +#define CL_PLATFORM_SEMAPHORE_IMPORT_HANDLE_TYPES_KHR 0x2037 +#define CL_PLATFORM_SEMAPHORE_EXPORT_HANDLE_TYPES_KHR 0x2038 + +/* cl_device_info */ +#define CL_DEVICE_SEMAPHORE_IMPORT_HANDLE_TYPES_KHR 0x204D +#define CL_DEVICE_SEMAPHORE_EXPORT_HANDLE_TYPES_KHR 0x204E + +/* cl_semaphore_properties_khr */ +#define CL_SEMAPHORE_EXPORT_HANDLE_TYPES_KHR 0x203F +#define CL_SEMAPHORE_EXPORT_HANDLE_TYPES_LIST_END_KHR 0 + +/* cl_semaphore_info_khr */ +#define CL_SEMAPHORE_EXPORTABLE_KHR 0x2054 + + +typedef cl_int CL_API_CALL +clGetSemaphoreHandleForTypeKHR_t( + cl_semaphore_khr sema_object, + cl_device_id device, + cl_external_semaphore_handle_type_khr handle_type, + size_t handle_size, + void* handle_ptr, + size_t* handle_size_ret); + +typedef clGetSemaphoreHandleForTypeKHR_t * +clGetSemaphoreHandleForTypeKHR_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSemaphoreHandleForTypeKHR( + cl_semaphore_khr sema_object, + cl_device_id device, + cl_external_semaphore_handle_type_khr handle_type, + size_t handle_size, + void* handle_ptr, + size_t* handle_size_ret) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_external_semaphore_dx_fence (beta) +***************************************************************/ +#if defined(CL_ENABLE_BETA_EXTENSIONS) + +#define cl_khr_external_semaphore_dx_fence 1 +#define CL_KHR_EXTERNAL_SEMAPHORE_DX_FENCE_EXTENSION_NAME \ + "cl_khr_external_semaphore_dx_fence" + + +#define CL_KHR_EXTERNAL_SEMAPHORE_DX_FENCE_EXTENSION_VERSION CL_MAKE_VERSION(0, 9, 0) + +/* cl_external_semaphore_handle_type_khr */ +#define CL_SEMAPHORE_HANDLE_D3D12_FENCE_KHR 0x2059 + +#endif /* defined(CL_ENABLE_BETA_EXTENSIONS) */ + +/*************************************************************** +* cl_khr_external_semaphore_opaque_fd +***************************************************************/ +#define cl_khr_external_semaphore_opaque_fd 1 +#define CL_KHR_EXTERNAL_SEMAPHORE_OPAQUE_FD_EXTENSION_NAME \ + "cl_khr_external_semaphore_opaque_fd" + + +#define CL_KHR_EXTERNAL_SEMAPHORE_OPAQUE_FD_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_external_semaphore_handle_type_khr */ +#define CL_SEMAPHORE_HANDLE_OPAQUE_FD_KHR 0x2055 + +/*************************************************************** +* cl_khr_external_semaphore_sync_fd +***************************************************************/ +#define cl_khr_external_semaphore_sync_fd 1 +#define CL_KHR_EXTERNAL_SEMAPHORE_SYNC_FD_EXTENSION_NAME \ + "cl_khr_external_semaphore_sync_fd" + + +#define CL_KHR_EXTERNAL_SEMAPHORE_SYNC_FD_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_properties cl_semaphore_reimport_properties_khr; + +/* cl_external_semaphore_handle_type_khr */ +#define CL_SEMAPHORE_HANDLE_SYNC_FD_KHR 0x2058 + + +typedef cl_int CL_API_CALL +clReImportSemaphoreSyncFdKHR_t( + cl_semaphore_khr sema_object, + cl_semaphore_reimport_properties_khr* reimport_props, + int fd); + +typedef clReImportSemaphoreSyncFdKHR_t * +clReImportSemaphoreSyncFdKHR_fn CL_API_SUFFIX__VERSION_3_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clReImportSemaphoreSyncFdKHR( + cl_semaphore_khr sema_object, + cl_semaphore_reimport_properties_khr* reimport_props, + int fd) CL_API_SUFFIX__VERSION_3_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_external_semaphore_win32 (beta) +***************************************************************/ +#if defined(CL_ENABLE_BETA_EXTENSIONS) + +#define cl_khr_external_semaphore_win32 1 +#define CL_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME \ + "cl_khr_external_semaphore_win32" + + +#define CL_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_VERSION CL_MAKE_VERSION(0, 9, 1) + +/* cl_external_semaphore_handle_type_khr */ +#define CL_SEMAPHORE_HANDLE_OPAQUE_WIN32_KHR 0x2056 +#define CL_SEMAPHORE_HANDLE_OPAQUE_WIN32_KMT_KHR 0x2057 +#define CL_SEMAPHORE_HANDLE_OPAQUE_WIN32_NAME_KHR 0x2068 + +#endif /* defined(CL_ENABLE_BETA_EXTENSIONS) */ + +/*************************************************************** +* cl_khr_semaphore +***************************************************************/ +#define cl_khr_semaphore 1 +#define CL_KHR_SEMAPHORE_EXTENSION_NAME \ + "cl_khr_semaphore" + + +#define CL_KHR_SEMAPHORE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* type cl_semaphore_khr */ +typedef cl_properties cl_semaphore_properties_khr; +typedef cl_uint cl_semaphore_info_khr; +typedef cl_uint cl_semaphore_type_khr; +typedef cl_ulong cl_semaphore_payload_khr; + +/* cl_semaphore_type */ +#define CL_SEMAPHORE_TYPE_BINARY_KHR 1 + +/* cl_platform_info */ +#define CL_PLATFORM_SEMAPHORE_TYPES_KHR 0x2036 + +/* cl_device_info */ +#define CL_DEVICE_SEMAPHORE_TYPES_KHR 0x204C + +/* cl_semaphore_info_khr */ +#define CL_SEMAPHORE_CONTEXT_KHR 0x2039 +#define CL_SEMAPHORE_REFERENCE_COUNT_KHR 0x203A +#define CL_SEMAPHORE_PROPERTIES_KHR 0x203B +#define CL_SEMAPHORE_PAYLOAD_KHR 0x203C + +/* cl_semaphore_info_khr or cl_semaphore_properties_khr */ +#define CL_SEMAPHORE_TYPE_KHR 0x203D +#define CL_SEMAPHORE_DEVICE_HANDLE_LIST_KHR 0x2053 +#define CL_SEMAPHORE_DEVICE_HANDLE_LIST_END_KHR 0 + +/* cl_command_type */ +#define CL_COMMAND_SEMAPHORE_WAIT_KHR 0x2042 +#define CL_COMMAND_SEMAPHORE_SIGNAL_KHR 0x2043 + +/* Error codes */ +#define CL_INVALID_SEMAPHORE_KHR -1142 + + +typedef cl_semaphore_khr CL_API_CALL +clCreateSemaphoreWithPropertiesKHR_t( + cl_context context, + const cl_semaphore_properties_khr* sema_props, + cl_int* errcode_ret); + +typedef clCreateSemaphoreWithPropertiesKHR_t * +clCreateSemaphoreWithPropertiesKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueWaitSemaphoresKHR_t( + cl_command_queue command_queue, + cl_uint num_sema_objects, + const cl_semaphore_khr* sema_objects, + const cl_semaphore_payload_khr* sema_payload_list, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueWaitSemaphoresKHR_t * +clEnqueueWaitSemaphoresKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueSignalSemaphoresKHR_t( + cl_command_queue command_queue, + cl_uint num_sema_objects, + const cl_semaphore_khr* sema_objects, + const cl_semaphore_payload_khr* sema_payload_list, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSignalSemaphoresKHR_t * +clEnqueueSignalSemaphoresKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clGetSemaphoreInfoKHR_t( + cl_semaphore_khr sema_object, + cl_semaphore_info_khr param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetSemaphoreInfoKHR_t * +clGetSemaphoreInfoKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clReleaseSemaphoreKHR_t( + cl_semaphore_khr sema_object); + +typedef clReleaseSemaphoreKHR_t * +clReleaseSemaphoreKHR_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clRetainSemaphoreKHR_t( + cl_semaphore_khr sema_object); + +typedef clRetainSemaphoreKHR_t * +clRetainSemaphoreKHR_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_semaphore_khr CL_API_CALL +clCreateSemaphoreWithPropertiesKHR( + cl_context context, + const cl_semaphore_properties_khr* sema_props, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueWaitSemaphoresKHR( + cl_command_queue command_queue, + cl_uint num_sema_objects, + const cl_semaphore_khr* sema_objects, + const cl_semaphore_payload_khr* sema_payload_list, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSignalSemaphoresKHR( + cl_command_queue command_queue, + cl_uint num_sema_objects, + const cl_semaphore_khr* sema_objects, + const cl_semaphore_payload_khr* sema_payload_list, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSemaphoreInfoKHR( + cl_semaphore_khr sema_object, + cl_semaphore_info_khr param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseSemaphoreKHR( + cl_semaphore_khr sema_object) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainSemaphoreKHR( + cl_semaphore_khr sema_object) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_arm_import_memory +***************************************************************/ +#define cl_arm_import_memory 1 +#define CL_ARM_IMPORT_MEMORY_EXTENSION_NAME \ + "cl_arm_import_memory" + + +#define CL_ARM_IMPORT_MEMORY_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef intptr_t cl_import_properties_arm; + +/* cl_import_properties_arm */ +#define CL_IMPORT_TYPE_ARM 0x40B2 +#define CL_IMPORT_TYPE_HOST_ARM 0x40B3 +#define CL_IMPORT_TYPE_DMA_BUF_ARM 0x40B4 +#define CL_IMPORT_TYPE_PROTECTED_ARM 0x40B5 +#define CL_IMPORT_TYPE_ANDROID_HARDWARE_BUFFER_ARM 0x41E2 +#define CL_IMPORT_DMA_BUF_DATA_CONSISTENCY_WITH_HOST_ARM 0x41E3 +#define CL_IMPORT_MEMORY_WHOLE_ALLOCATION_ARM SIZE_MAX +#define CL_IMPORT_ANDROID_HARDWARE_BUFFER_PLANE_INDEX_ARM 0x41EF +#define CL_IMPORT_ANDROID_HARDWARE_BUFFER_LAYER_INDEX_ARM 0x41F0 + + +typedef cl_mem CL_API_CALL +clImportMemoryARM_t( + cl_context context, + cl_mem_flags flags, + const cl_import_properties_arm* properties, + void* memory, + size_t size, + cl_int* errcode_ret); + +typedef clImportMemoryARM_t * +clImportMemoryARM_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_mem CL_API_CALL +clImportMemoryARM( + cl_context context, + cl_mem_flags flags, + const cl_import_properties_arm* properties, + void* memory, + size_t size, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_arm_shared_virtual_memory +***************************************************************/ +#define cl_arm_shared_virtual_memory 1 +#define CL_ARM_SHARED_VIRTUAL_MEMORY_EXTENSION_NAME \ + "cl_arm_shared_virtual_memory" + + +#define CL_ARM_SHARED_VIRTUAL_MEMORY_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef cl_bitfield cl_svm_mem_flags_arm; +typedef cl_uint cl_kernel_exec_info_arm; +typedef cl_bitfield cl_device_svm_capabilities_arm; + +/* cl_device_info */ +#define CL_DEVICE_SVM_CAPABILITIES_ARM 0x40B6 + +/* cl_mem_info */ +#define CL_MEM_USES_SVM_POINTER_ARM 0x40B7 + +/* cl_kernel_exec_info_arm */ +#define CL_KERNEL_EXEC_INFO_SVM_PTRS_ARM 0x40B8 +#define CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM_ARM 0x40B9 + +/* cl_command_type */ +#define CL_COMMAND_SVM_FREE_ARM 0x40BA +#define CL_COMMAND_SVM_MEMCPY_ARM 0x40BB +#define CL_COMMAND_SVM_MEMFILL_ARM 0x40BC +#define CL_COMMAND_SVM_MAP_ARM 0x40BD +#define CL_COMMAND_SVM_UNMAP_ARM 0x40BE + +/* cl_device_svm_capabilities_arm */ +#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER_ARM (1 << 0) +#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER_ARM (1 << 1) +#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM_ARM (1 << 2) +#define CL_DEVICE_SVM_ATOMICS_ARM (1 << 3) + +/* cl_svm_mem_flags_arm */ +#define CL_MEM_SVM_FINE_GRAIN_BUFFER_ARM (1 << 10) +#define CL_MEM_SVM_ATOMICS_ARM (1 << 11) + + +typedef void* CL_API_CALL +clSVMAllocARM_t( + cl_context context, + cl_svm_mem_flags_arm flags, + size_t size, + cl_uint alignment); + +typedef clSVMAllocARM_t * +clSVMAllocARM_fn CL_API_SUFFIX__VERSION_1_2; + +typedef void CL_API_CALL +clSVMFreeARM_t( + cl_context context, + void* svm_pointer); + +typedef clSVMFreeARM_t * +clSVMFreeARM_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueSVMFreeARM_t( + cl_command_queue command_queue, + cl_uint num_svm_pointers, + void* svm_pointers[], + void (CL_CALLBACK* pfn_free_func)(cl_command_queue queue, cl_uint num_svm_pointers, void * svm_pointers[], void *user_data), + void* user_data, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMFreeARM_t * +clEnqueueSVMFreeARM_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueSVMMemcpyARM_t( + cl_command_queue command_queue, + cl_bool blocking_copy, + void* dst_ptr, + const void* src_ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMMemcpyARM_t * +clEnqueueSVMMemcpyARM_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueSVMMemFillARM_t( + cl_command_queue command_queue, + void* svm_ptr, + const void* pattern, + size_t pattern_size, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMMemFillARM_t * +clEnqueueSVMMemFillARM_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueSVMMapARM_t( + cl_command_queue command_queue, + cl_bool blocking_map, + cl_map_flags flags, + void* svm_ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMMapARM_t * +clEnqueueSVMMapARM_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueSVMUnmapARM_t( + cl_command_queue command_queue, + void* svm_ptr, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMUnmapARM_t * +clEnqueueSVMUnmapARM_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clSetKernelArgSVMPointerARM_t( + cl_kernel kernel, + cl_uint arg_index, + const void* arg_value); + +typedef clSetKernelArgSVMPointerARM_t * +clSetKernelArgSVMPointerARM_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clSetKernelExecInfoARM_t( + cl_kernel kernel, + cl_kernel_exec_info_arm param_name, + size_t param_value_size, + const void* param_value); + +typedef clSetKernelExecInfoARM_t * +clSetKernelExecInfoARM_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY void* CL_API_CALL +clSVMAllocARM( + cl_context context, + cl_svm_mem_flags_arm flags, + size_t size, + cl_uint alignment) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY void CL_API_CALL +clSVMFreeARM( + cl_context context, + void* svm_pointer) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMFreeARM( + cl_command_queue command_queue, + cl_uint num_svm_pointers, + void* svm_pointers[], + void (CL_CALLBACK* pfn_free_func)(cl_command_queue queue, cl_uint num_svm_pointers, void * svm_pointers[], void *user_data), + void* user_data, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMemcpyARM( + cl_command_queue command_queue, + cl_bool blocking_copy, + void* dst_ptr, + const void* src_ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMemFillARM( + cl_command_queue command_queue, + void* svm_ptr, + const void* pattern, + size_t pattern_size, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMapARM( + cl_command_queue command_queue, + cl_bool blocking_map, + cl_map_flags flags, + void* svm_ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMUnmapARM( + cl_command_queue command_queue, + void* svm_ptr, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelArgSVMPointerARM( + cl_kernel kernel, + cl_uint arg_index, + const void* arg_value) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelExecInfoARM( + cl_kernel kernel, + cl_kernel_exec_info_arm param_name, + size_t param_value_size, + const void* param_value) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_arm_get_core_id +***************************************************************/ +#if defined(CL_VERSION_1_2) + +#define cl_arm_get_core_id 1 +#define CL_ARM_GET_CORE_ID_EXTENSION_NAME \ + "cl_arm_get_core_id" + + +#define CL_ARM_GET_CORE_ID_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_COMPUTE_UNITS_BITFIELD_ARM 0x40BF + +#endif /* defined(CL_VERSION_1_2) */ + +/*************************************************************** +* cl_arm_job_slot_selection +***************************************************************/ +#define cl_arm_job_slot_selection 1 +#define CL_ARM_JOB_SLOT_SELECTION_EXTENSION_NAME \ + "cl_arm_job_slot_selection" + + +#define CL_ARM_JOB_SLOT_SELECTION_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_JOB_SLOTS_ARM 0x41E0 + +/* cl_queue_properties */ +#define CL_QUEUE_JOB_SLOT_ARM 0x41E1 + +/*************************************************************** +* cl_arm_scheduling_controls +***************************************************************/ +#define cl_arm_scheduling_controls 1 +#define CL_ARM_SCHEDULING_CONTROLS_EXTENSION_NAME \ + "cl_arm_scheduling_controls" + + +#define CL_ARM_SCHEDULING_CONTROLS_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* Types */ +typedef cl_bitfield cl_device_scheduling_controls_capabilities_arm; + +/* cl_device_scheduling_controls_capabilities_arm */ +#define CL_DEVICE_SCHEDULING_KERNEL_BATCHING_ARM (1 << 0) +#define CL_DEVICE_SCHEDULING_WORKGROUP_BATCH_SIZE_ARM (1 << 1) +#define CL_DEVICE_SCHEDULING_WORKGROUP_BATCH_SIZE_MODIFIER_ARM (1 << 2) +#define CL_DEVICE_SCHEDULING_DEFERRED_FLUSH_ARM (1 << 3) +#define CL_DEVICE_SCHEDULING_REGISTER_ALLOCATION_ARM (1 << 4) +#define CL_DEVICE_SCHEDULING_WARP_THROTTLING_ARM (1 << 5) +#define CL_DEVICE_SCHEDULING_COMPUTE_UNIT_BATCH_QUEUE_SIZE_ARM (1 << 6) +#define CL_DEVICE_SCHEDULING_COMPUTE_UNIT_LIMIT_ARM (1 << 7) + +/* cl_device_info */ +#define CL_DEVICE_SCHEDULING_CONTROLS_CAPABILITIES_ARM 0x41E4 +#define CL_DEVICE_SUPPORTED_REGISTER_ALLOCATIONS_ARM 0x41EB +#define CL_DEVICE_MAX_WARP_COUNT_ARM 0x41EA + +/* cl_kernel_exec_info */ +#define CL_KERNEL_EXEC_INFO_WORKGROUP_BATCH_SIZE_ARM 0x41E5 +#define CL_KERNEL_EXEC_INFO_WORKGROUP_BATCH_SIZE_MODIFIER_ARM 0x41E6 +#define CL_KERNEL_EXEC_INFO_WARP_COUNT_LIMIT_ARM 0x41E8 +#define CL_KERNEL_EXEC_INFO_COMPUTE_UNIT_MAX_QUEUED_BATCHES_ARM 0x41F1 + +/* cl_kernel_info */ +#define CL_KERNEL_MAX_WARP_COUNT_ARM 0x41E9 + +/* cl_queue_properties */ +#define CL_QUEUE_KERNEL_BATCHING_ARM 0x41E7 +#define CL_QUEUE_DEFERRED_FLUSH_ARM 0x41EC +#define CL_QUEUE_COMPUTE_UNIT_LIMIT_ARM 0x41F3 + +/*************************************************************** +* cl_arm_controlled_kernel_termination +***************************************************************/ +#define cl_arm_controlled_kernel_termination 1 +#define CL_ARM_CONTROLLED_KERNEL_TERMINATION_EXTENSION_NAME \ + "cl_arm_controlled_kernel_termination" + + +#define CL_ARM_CONTROLLED_KERNEL_TERMINATION_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* Types */ +typedef cl_bitfield cl_device_controlled_termination_capabilities_arm; + +/* Error codes */ +#define CL_COMMAND_TERMINATED_ITSELF_WITH_FAILURE_ARM -1108 + +/* cl_device_controlled_termination_capabilities_arm */ +#define CL_DEVICE_CONTROLLED_TERMINATION_SUCCESS_ARM (1 << 0) +#define CL_DEVICE_CONTROLLED_TERMINATION_FAILURE_ARM (1 << 1) +#define CL_DEVICE_CONTROLLED_TERMINATION_QUERY_ARM (1 << 2) + +/* cl_device_info */ +#define CL_DEVICE_CONTROLLED_TERMINATION_CAPABILITIES_ARM 0x41EE + +/* cl_event_info */ +#define CL_EVENT_COMMAND_TERMINATION_REASON_ARM 0x41ED + +/* cl_command_termination_reason_arm */ +#define CL_COMMAND_TERMINATION_COMPLETION_ARM 0 +#define CL_COMMAND_TERMINATION_CONTROLLED_SUCCESS_ARM 1 +#define CL_COMMAND_TERMINATION_CONTROLLED_FAILURE_ARM 2 +#define CL_COMMAND_TERMINATION_ERROR_ARM 3 + +/*************************************************************** +* cl_arm_protected_memory_allocation +***************************************************************/ +#define cl_arm_protected_memory_allocation 1 +#define CL_ARM_PROTECTED_MEMORY_ALLOCATION_EXTENSION_NAME \ + "cl_arm_protected_memory_allocation" + + +#define CL_ARM_PROTECTED_MEMORY_ALLOCATION_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +#define CL_MEM_PROTECTED_ALLOC_ARM ((cl_bitfield)1 << 36) + +/*************************************************************** +* cl_intel_exec_by_local_thread +***************************************************************/ +#define cl_intel_exec_by_local_thread 1 +#define CL_INTEL_EXEC_BY_LOCAL_THREAD_EXTENSION_NAME \ + "cl_intel_exec_by_local_thread" + + +#define CL_INTEL_EXEC_BY_LOCAL_THREAD_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_command_queue_properties - bitfield */ +#define CL_QUEUE_THREAD_LOCAL_EXEC_ENABLE_INTEL ((cl_bitfield)1 << 31) + +/*************************************************************** +* cl_intel_device_attribute_query +***************************************************************/ +#define cl_intel_device_attribute_query 1 +#define CL_INTEL_DEVICE_ATTRIBUTE_QUERY_EXTENSION_NAME \ + "cl_intel_device_attribute_query" + + +#define CL_INTEL_DEVICE_ATTRIBUTE_QUERY_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef cl_bitfield cl_device_feature_capabilities_intel; + +/* cl_device_feature_capabilities_intel */ +#define CL_DEVICE_FEATURE_FLAG_DP4A_INTEL (1 << 0) +#define CL_DEVICE_FEATURE_FLAG_DPAS_INTEL (1 << 1) + +/* cl_device_info */ +#define CL_DEVICE_IP_VERSION_INTEL 0x4250 +#define CL_DEVICE_ID_INTEL 0x4251 +#define CL_DEVICE_NUM_SLICES_INTEL 0x4252 +#define CL_DEVICE_NUM_SUB_SLICES_PER_SLICE_INTEL 0x4253 +#define CL_DEVICE_NUM_EUS_PER_SUB_SLICE_INTEL 0x4254 +#define CL_DEVICE_NUM_THREADS_PER_EU_INTEL 0x4255 +#define CL_DEVICE_FEATURE_CAPABILITIES_INTEL 0x4256 + +/*************************************************************** +* cl_intel_device_partition_by_names +***************************************************************/ +#define cl_intel_device_partition_by_names 1 +#define CL_INTEL_DEVICE_PARTITION_BY_NAMES_EXTENSION_NAME \ + "cl_intel_device_partition_by_names" + + +#define CL_INTEL_DEVICE_PARTITION_BY_NAMES_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +#define CL_DEVICE_PARTITION_BY_NAMES_INTEL 0x4052 +#define CL_PARTITION_BY_NAMES_LIST_END_INTEL -1 + +/*************************************************************** +* cl_intel_accelerator +***************************************************************/ +#define cl_intel_accelerator 1 +#define CL_INTEL_ACCELERATOR_EXTENSION_NAME \ + "cl_intel_accelerator" + + +#define CL_INTEL_ACCELERATOR_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef struct _cl_accelerator_intel* cl_accelerator_intel; +typedef cl_uint cl_accelerator_type_intel; +typedef cl_uint cl_accelerator_info_intel; + +/* cl_accelerator_info_intel */ +#define CL_ACCELERATOR_DESCRIPTOR_INTEL 0x4090 +#define CL_ACCELERATOR_REFERENCE_COUNT_INTEL 0x4091 +#define CL_ACCELERATOR_CONTEXT_INTEL 0x4092 +#define CL_ACCELERATOR_TYPE_INTEL 0x4093 + +/* Error codes */ +#define CL_INVALID_ACCELERATOR_INTEL -1094 +#define CL_INVALID_ACCELERATOR_TYPE_INTEL -1095 +#define CL_INVALID_ACCELERATOR_DESCRIPTOR_INTEL -1096 +#define CL_ACCELERATOR_TYPE_NOT_SUPPORTED_INTEL -1097 + + +typedef cl_accelerator_intel CL_API_CALL +clCreateAcceleratorINTEL_t( + cl_context context, + cl_accelerator_type_intel accelerator_type, + size_t descriptor_size, + const void* descriptor, + cl_int* errcode_ret); + +typedef clCreateAcceleratorINTEL_t * +clCreateAcceleratorINTEL_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clGetAcceleratorInfoINTEL_t( + cl_accelerator_intel accelerator, + cl_accelerator_info_intel param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetAcceleratorInfoINTEL_t * +clGetAcceleratorInfoINTEL_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clRetainAcceleratorINTEL_t( + cl_accelerator_intel accelerator); + +typedef clRetainAcceleratorINTEL_t * +clRetainAcceleratorINTEL_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clReleaseAcceleratorINTEL_t( + cl_accelerator_intel accelerator); + +typedef clReleaseAcceleratorINTEL_t * +clReleaseAcceleratorINTEL_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_accelerator_intel CL_API_CALL +clCreateAcceleratorINTEL( + cl_context context, + cl_accelerator_type_intel accelerator_type, + size_t descriptor_size, + const void* descriptor, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetAcceleratorInfoINTEL( + cl_accelerator_intel accelerator, + cl_accelerator_info_intel param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainAcceleratorINTEL( + cl_accelerator_intel accelerator) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseAcceleratorINTEL( + cl_accelerator_intel accelerator) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_intel_motion_estimation +***************************************************************/ +#define cl_intel_motion_estimation 1 +#define CL_INTEL_MOTION_ESTIMATION_EXTENSION_NAME \ + "cl_intel_motion_estimation" + + +#define CL_INTEL_MOTION_ESTIMATION_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef struct _cl_motion_estimation_desc_intel { + cl_uint mb_block_type; + cl_uint subpixel_mode; + cl_uint sad_adjust_mode; + cl_uint search_path_type; +} cl_motion_estimation_desc_intel; + +/* cl_accelerator_type_intel */ +#define CL_ACCELERATOR_TYPE_MOTION_ESTIMATION_INTEL 0x0 + +/* cl_uint mb_block_type */ +#define CL_ME_MB_TYPE_16x16_INTEL 0x0 +#define CL_ME_MB_TYPE_8x8_INTEL 0x1 +#define CL_ME_MB_TYPE_4x4_INTEL 0x2 + +/* cl_uint subpixel_mode */ +#define CL_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0 +#define CL_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1 +#define CL_ME_SUBPIXEL_MODE_QPEL_INTEL 0x2 + +/* cl_uint sad_adjust_mode */ +#define CL_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0 +#define CL_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x1 + +/* cl_uint search_path_type */ +#define CL_ME_SEARCH_PATH_RADIUS_2_2_INTEL 0x0 +#define CL_ME_SEARCH_PATH_RADIUS_4_4_INTEL 0x1 +#define CL_ME_SEARCH_PATH_RADIUS_16_12_INTEL 0x5 + +/*************************************************************** +* cl_intel_advanced_motion_estimation +***************************************************************/ +#define cl_intel_advanced_motion_estimation 1 +#define CL_INTEL_ADVANCED_MOTION_ESTIMATION_EXTENSION_NAME \ + "cl_intel_advanced_motion_estimation" + + +#define CL_INTEL_ADVANCED_MOTION_ESTIMATION_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_ME_VERSION_INTEL 0x407E + +#define CL_ME_VERSION_LEGACY_INTEL 0x0 +#define CL_ME_VERSION_ADVANCED_VER_1_INTEL 0x1 +#define CL_ME_VERSION_ADVANCED_VER_2_INTEL 0x2 + +#define CL_ME_CHROMA_INTRA_PREDICT_ENABLED_INTEL 0x1 +#define CL_ME_LUMA_INTRA_PREDICT_ENABLED_INTEL 0x2 + +#define CL_ME_SKIP_BLOCK_TYPE_16x16_INTEL 0x0 +#define CL_ME_SKIP_BLOCK_TYPE_8x8_INTEL 0x4 + +#define CL_ME_COST_PENALTY_NONE_INTEL 0x0 +#define CL_ME_COST_PENALTY_LOW_INTEL 0x1 +#define CL_ME_COST_PENALTY_NORMAL_INTEL 0x2 +#define CL_ME_COST_PENALTY_HIGH_INTEL 0x3 + +#define CL_ME_COST_PRECISION_QPEL_INTEL 0x0 +#define CL_ME_COST_PRECISION_HPEL_INTEL 0x1 +#define CL_ME_COST_PRECISION_PEL_INTEL 0x2 +#define CL_ME_COST_PRECISION_DPEL_INTEL 0x3 + +#define CL_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0 +#define CL_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1 +#define CL_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2 +#define CL_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3 +#define CL_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4 +#define CL_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4 +#define CL_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5 +#define CL_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6 +#define CL_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7 +#define CL_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8 + +#define CL_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0 +#define CL_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1 +#define CL_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2 +#define CL_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3 + +#define CL_ME_FORWARD_INPUT_MODE_INTEL 0x1 +#define CL_ME_BACKWARD_INPUT_MODE_INTEL 0x2 +#define CL_ME_BIDIRECTION_INPUT_MODE_INTEL 0x3 + +#define CL_ME_BIDIR_WEIGHT_QUARTER_INTEL 16 +#define CL_ME_BIDIR_WEIGHT_THIRD_INTEL 21 +#define CL_ME_BIDIR_WEIGHT_HALF_INTEL 32 +#define CL_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 43 +#define CL_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 48 + +/*************************************************************** +* cl_intel_simultaneous_sharing +***************************************************************/ +#define cl_intel_simultaneous_sharing 1 +#define CL_INTEL_SIMULTANEOUS_SHARING_EXTENSION_NAME \ + "cl_intel_simultaneous_sharing" + + +#define CL_INTEL_SIMULTANEOUS_SHARING_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_SIMULTANEOUS_INTEROPS_INTEL 0x4104 +#define CL_DEVICE_NUM_SIMULTANEOUS_INTEROPS_INTEL 0x4105 + +/*************************************************************** +* cl_intel_egl_image_yuv +***************************************************************/ +#define cl_intel_egl_image_yuv 1 +#define CL_INTEL_EGL_IMAGE_YUV_EXTENSION_NAME \ + "cl_intel_egl_image_yuv" + + +#define CL_INTEL_EGL_IMAGE_YUV_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_egl_image_properties_khr */ +#define CL_EGL_YUV_PLANE_INTEL 0x4107 + +/*************************************************************** +* cl_intel_packed_yuv +***************************************************************/ +#define cl_intel_packed_yuv 1 +#define CL_INTEL_PACKED_YUV_EXTENSION_NAME \ + "cl_intel_packed_yuv" + + +#define CL_INTEL_PACKED_YUV_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_channel_order */ +#define CL_YUYV_INTEL 0x4076 +#define CL_UYVY_INTEL 0x4077 +#define CL_YVYU_INTEL 0x4078 +#define CL_VYUY_INTEL 0x4079 + +/*************************************************************** +* cl_intel_required_subgroup_size +***************************************************************/ +#define cl_intel_required_subgroup_size 1 +#define CL_INTEL_REQUIRED_SUBGROUP_SIZE_EXTENSION_NAME \ + "cl_intel_required_subgroup_size" + + +#define CL_INTEL_REQUIRED_SUBGROUP_SIZE_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_SUB_GROUP_SIZES_INTEL 0x4108 + +/* cl_kernel_work_group_info */ +#define CL_KERNEL_SPILL_MEM_SIZE_INTEL 0x4109 + +/* cl_kernel_sub_group_info */ +#define CL_KERNEL_COMPILE_SUB_GROUP_SIZE_INTEL 0x410A + +/*************************************************************** +* cl_intel_driver_diagnostics +***************************************************************/ +#define cl_intel_driver_diagnostics 1 +#define CL_INTEL_DRIVER_DIAGNOSTICS_EXTENSION_NAME \ + "cl_intel_driver_diagnostics" + + +#define CL_INTEL_DRIVER_DIAGNOSTICS_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef cl_bitfield cl_diagnostic_verbose_level_intel; + +/* cl_context_properties */ +#define CL_CONTEXT_SHOW_DIAGNOSTICS_INTEL 0x4106 + +/* cl_diagnostic_verbose_level_intel */ +#define CL_CONTEXT_DIAGNOSTICS_LEVEL_ALL_INTEL 0xff +#define CL_CONTEXT_DIAGNOSTICS_LEVEL_GOOD_INTEL (1 << 0) +#define CL_CONTEXT_DIAGNOSTICS_LEVEL_BAD_INTEL (1 << 1) +#define CL_CONTEXT_DIAGNOSTICS_LEVEL_NEUTRAL_INTEL (1 << 2) + +/*************************************************************** +* cl_intel_planar_yuv +***************************************************************/ +#define cl_intel_planar_yuv 1 +#define CL_INTEL_PLANAR_YUV_EXTENSION_NAME \ + "cl_intel_planar_yuv" + + +#define CL_INTEL_PLANAR_YUV_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_channel_order */ +#define CL_NV12_INTEL 0x410E + +/* cl_mem_flags */ +#define CL_MEM_NO_ACCESS_INTEL (1 << 24) +#define CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL (1 << 25) + +/* cl_device_info */ +#define CL_DEVICE_PLANAR_YUV_MAX_WIDTH_INTEL 0x417E +#define CL_DEVICE_PLANAR_YUV_MAX_HEIGHT_INTEL 0x417F + +/*************************************************************** +* cl_intel_device_side_avc_motion_estimation +***************************************************************/ +#define cl_intel_device_side_avc_motion_estimation 1 +#define CL_INTEL_DEVICE_SIDE_AVC_MOTION_ESTIMATION_EXTENSION_NAME \ + "cl_intel_device_side_avc_motion_estimation" + + +#define CL_INTEL_DEVICE_SIDE_AVC_MOTION_ESTIMATION_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_AVC_ME_VERSION_INTEL 0x410B +#define CL_DEVICE_AVC_ME_SUPPORTS_TEXTURE_SAMPLER_USE_INTEL 0x410C +#define CL_DEVICE_AVC_ME_SUPPORTS_PREEMPTION_INTEL 0x410D + +/* returned by CL_DEVICE_AVC_ME_VERSION_INTEL */ +#define CL_AVC_ME_VERSION_0_INTEL 0x0 +#define CL_AVC_ME_VERSION_1_INTEL 0x1 + +/* Inter macro-block major shape values */ +#define CL_AVC_ME_MAJOR_16x16_INTEL 0x0 +#define CL_AVC_ME_MAJOR_16x8_INTEL 0x1 +#define CL_AVC_ME_MAJOR_8x16_INTEL 0x2 +#define CL_AVC_ME_MAJOR_8x8_INTEL 0x3 + +/* Inter macro-block minor shape values */ +#define CL_AVC_ME_MINOR_8x8_INTEL 0x0 +#define CL_AVC_ME_MINOR_8x4_INTEL 0x1 +#define CL_AVC_ME_MINOR_4x8_INTEL 0x2 +#define CL_AVC_ME_MINOR_4x4_INTEL 0x3 + +/* Inter macro-block major direction values */ +#define CL_AVC_ME_MAJOR_FORWARD_INTEL 0x0 +#define CL_AVC_ME_MAJOR_BACKWARD_INTEL 0x1 +#define CL_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2 + +/* Inter (IME) partition mask values */ +#define CL_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0 +#define CL_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E +#define CL_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D +#define CL_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B +#define CL_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77 +#define CL_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F +#define CL_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F +#define CL_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F + +/* Search window configuration */ +#define CL_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0 +#define CL_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1 +#define CL_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2 +#define CL_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3 +#define CL_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4 +#define CL_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5 +#define CL_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6 +#define CL_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7 +#define CL_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8 +#define CL_AVC_ME_SEARCH_WINDOW_16x12_RADIUS_INTEL 0x9 +#define CL_AVC_ME_SEARCH_WINDOW_4x4_RADIUS_INTEL 0x2 +#define CL_AVC_ME_SEARCH_WINDOW_2x2_RADIUS_INTEL 0xa + +/* SAD adjustment mode */ +#define CL_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0 +#define CL_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2 + +/* Pixel resolution */ +#define CL_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0 +#define CL_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1 +#define CL_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3 + +/* Cost precision values */ +#define CL_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0 +#define CL_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1 +#define CL_AVC_ME_COST_PRECISION_PEL_INTEL 0x2 +#define CL_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3 + +/* Inter bidirectional weights */ +#define CL_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10 +#define CL_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15 +#define CL_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20 +#define CL_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B +#define CL_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30 + +/* Inter border reached values */ +#define CL_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0 +#define CL_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2 +#define CL_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4 +#define CL_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8 + +/* Inter skip block partition type */ +#define CL_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0 +#define CL_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000 + +/* Inter skip motion vector mask */ +#define CL_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24) +#define CL_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24) +#define CL_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24) +#define CL_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24) +#define CL_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24) +#define CL_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24) +#define CL_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24) +#define CL_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24) +#define CL_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26) +#define CL_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26) +#define CL_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28) +#define CL_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28) +#define CL_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30) +#define CL_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30) + +/* Block based skip type values */ +#define CL_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00 +#define CL_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80 + +/* cl_intel_device_side_avc_motion_estimation.?? */ +#define CL_AVC_ME_INTRA_16x16_INTEL 0x0 +#define CL_AVC_ME_INTRA_8x8_INTEL 0x1 +#define CL_AVC_ME_INTRA_4x4_INTEL 0x2 + +/* Luma intra partition mask values */ +#define CL_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6 +#define CL_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5 +#define CL_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3 + +/* Intra neighbor availability mask values */ +#define CL_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60 +#define CL_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10 +#define CL_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8 +#define CL_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4 + +/* Luma intra modes */ +#define CL_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0 +#define CL_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1 +#define CL_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2 +#define CL_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3 +#define CL_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4 +#define CL_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4 +#define CL_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5 +#define CL_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6 +#define CL_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7 +#define CL_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8 + +/* Chroma intra modes */ +#define CL_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0 +#define CL_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1 +#define CL_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2 +#define CL_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3 + +/* Reference image select values */ +#define CL_AVC_ME_FRAME_FORWARD_INTEL 0x1 +#define CL_AVC_ME_FRAME_BACKWARD_INTEL 0x2 +#define CL_AVC_ME_FRAME_DUAL_INTEL 0x3 + +/* Slice type values */ +#define CL_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0 +#define CL_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1 +#define CL_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2 + +/* Interlaced image field polarity values */ +#define CL_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0 +#define CL_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1 + +/*************************************************************** +* cl_intel_unified_shared_memory +***************************************************************/ +#define cl_intel_unified_shared_memory 1 +#define CL_INTEL_UNIFIED_SHARED_MEMORY_EXTENSION_NAME \ + "cl_intel_unified_shared_memory" + + +#define CL_INTEL_UNIFIED_SHARED_MEMORY_EXTENSION_VERSION CL_MAKE_VERSION(1, 1, 0) + +typedef cl_bitfield cl_device_unified_shared_memory_capabilities_intel; +typedef cl_properties cl_mem_properties_intel; +typedef cl_bitfield cl_mem_alloc_flags_intel; +typedef cl_uint cl_mem_info_intel; +typedef cl_uint cl_unified_shared_memory_type_intel; +typedef cl_uint cl_mem_advice_intel; + +/* cl_device_info */ +#define CL_DEVICE_HOST_MEM_CAPABILITIES_INTEL 0x4190 +#define CL_DEVICE_DEVICE_MEM_CAPABILITIES_INTEL 0x4191 +#define CL_DEVICE_SINGLE_DEVICE_SHARED_MEM_CAPABILITIES_INTEL 0x4192 +#define CL_DEVICE_CROSS_DEVICE_SHARED_MEM_CAPABILITIES_INTEL 0x4193 +#define CL_DEVICE_SHARED_SYSTEM_MEM_CAPABILITIES_INTEL 0x4194 + +/* cl_unified_shared_memory_capabilities_intel - bitfield */ +#define CL_UNIFIED_SHARED_MEMORY_ACCESS_INTEL (1 << 0) +#define CL_UNIFIED_SHARED_MEMORY_ATOMIC_ACCESS_INTEL (1 << 1) +#define CL_UNIFIED_SHARED_MEMORY_CONCURRENT_ACCESS_INTEL (1 << 2) +#define CL_UNIFIED_SHARED_MEMORY_CONCURRENT_ATOMIC_ACCESS_INTEL (1 << 3) + +/* cl_mem_properties_intel */ +#define CL_MEM_ALLOC_FLAGS_INTEL 0x4195 + +/* cl_mem_alloc_flags_intel - bitfield */ +#define CL_MEM_ALLOC_WRITE_COMBINED_INTEL (1 << 0) +#define CL_MEM_ALLOC_INITIAL_PLACEMENT_DEVICE_INTEL (1 << 1) +#define CL_MEM_ALLOC_INITIAL_PLACEMENT_HOST_INTEL (1 << 2) + +/* cl_mem_alloc_info_intel */ +#define CL_MEM_ALLOC_TYPE_INTEL 0x419A +#define CL_MEM_ALLOC_BASE_PTR_INTEL 0x419B +#define CL_MEM_ALLOC_SIZE_INTEL 0x419C +#define CL_MEM_ALLOC_DEVICE_INTEL 0x419D + +/* cl_unified_shared_memory_type_intel */ +#define CL_MEM_TYPE_UNKNOWN_INTEL 0x4196 +#define CL_MEM_TYPE_HOST_INTEL 0x4197 +#define CL_MEM_TYPE_DEVICE_INTEL 0x4198 +#define CL_MEM_TYPE_SHARED_INTEL 0x4199 + +/* cl_kernel_exec_info */ +#define CL_KERNEL_EXEC_INFO_INDIRECT_HOST_ACCESS_INTEL 0x4200 +#define CL_KERNEL_EXEC_INFO_INDIRECT_DEVICE_ACCESS_INTEL 0x4201 +#define CL_KERNEL_EXEC_INFO_INDIRECT_SHARED_ACCESS_INTEL 0x4202 +#define CL_KERNEL_EXEC_INFO_USM_PTRS_INTEL 0x4203 + +/* cl_command_type */ +#define CL_COMMAND_MEMFILL_INTEL 0x4204 +#define CL_COMMAND_MEMCPY_INTEL 0x4205 +#define CL_COMMAND_MIGRATEMEM_INTEL 0x4206 +#define CL_COMMAND_MEMADVISE_INTEL 0x4207 + + +typedef void* CL_API_CALL +clHostMemAllocINTEL_t( + cl_context context, + const cl_mem_properties_intel* properties, + size_t size, + cl_uint alignment, + cl_int* errcode_ret); + +typedef clHostMemAllocINTEL_t * +clHostMemAllocINTEL_fn ; + +typedef void* CL_API_CALL +clDeviceMemAllocINTEL_t( + cl_context context, + cl_device_id device, + const cl_mem_properties_intel* properties, + size_t size, + cl_uint alignment, + cl_int* errcode_ret); + +typedef clDeviceMemAllocINTEL_t * +clDeviceMemAllocINTEL_fn ; + +typedef void* CL_API_CALL +clSharedMemAllocINTEL_t( + cl_context context, + cl_device_id device, + const cl_mem_properties_intel* properties, + size_t size, + cl_uint alignment, + cl_int* errcode_ret); + +typedef clSharedMemAllocINTEL_t * +clSharedMemAllocINTEL_fn ; + +typedef cl_int CL_API_CALL +clMemFreeINTEL_t( + cl_context context, + void* ptr); + +typedef clMemFreeINTEL_t * +clMemFreeINTEL_fn ; + +typedef cl_int CL_API_CALL +clMemBlockingFreeINTEL_t( + cl_context context, + void* ptr); + +typedef clMemBlockingFreeINTEL_t * +clMemBlockingFreeINTEL_fn ; + +typedef cl_int CL_API_CALL +clGetMemAllocInfoINTEL_t( + cl_context context, + const void* ptr, + cl_mem_info_intel param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetMemAllocInfoINTEL_t * +clGetMemAllocInfoINTEL_fn ; + +typedef cl_int CL_API_CALL +clSetKernelArgMemPointerINTEL_t( + cl_kernel kernel, + cl_uint arg_index, + const void* arg_value); + +typedef clSetKernelArgMemPointerINTEL_t * +clSetKernelArgMemPointerINTEL_fn ; + +typedef cl_int CL_API_CALL +clEnqueueMemFillINTEL_t( + cl_command_queue command_queue, + void* dst_ptr, + const void* pattern, + size_t pattern_size, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueMemFillINTEL_t * +clEnqueueMemFillINTEL_fn ; + +typedef cl_int CL_API_CALL +clEnqueueMemcpyINTEL_t( + cl_command_queue command_queue, + cl_bool blocking, + void* dst_ptr, + const void* src_ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueMemcpyINTEL_t * +clEnqueueMemcpyINTEL_fn ; + +typedef cl_int CL_API_CALL +clEnqueueMemAdviseINTEL_t( + cl_command_queue command_queue, + const void* ptr, + size_t size, + cl_mem_advice_intel advice, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueMemAdviseINTEL_t * +clEnqueueMemAdviseINTEL_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY void* CL_API_CALL +clHostMemAllocINTEL( + cl_context context, + const cl_mem_properties_intel* properties, + size_t size, + cl_uint alignment, + cl_int* errcode_ret) ; + +extern CL_API_ENTRY void* CL_API_CALL +clDeviceMemAllocINTEL( + cl_context context, + cl_device_id device, + const cl_mem_properties_intel* properties, + size_t size, + cl_uint alignment, + cl_int* errcode_ret) ; + +extern CL_API_ENTRY void* CL_API_CALL +clSharedMemAllocINTEL( + cl_context context, + cl_device_id device, + const cl_mem_properties_intel* properties, + size_t size, + cl_uint alignment, + cl_int* errcode_ret) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clMemFreeINTEL( + cl_context context, + void* ptr) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clMemBlockingFreeINTEL( + cl_context context, + void* ptr) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetMemAllocInfoINTEL( + cl_context context, + const void* ptr, + cl_mem_info_intel param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelArgMemPointerINTEL( + cl_kernel kernel, + cl_uint arg_index, + const void* arg_value) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueMemFillINTEL( + cl_command_queue command_queue, + void* dst_ptr, + const void* pattern, + size_t pattern_size, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueMemcpyINTEL( + cl_command_queue command_queue, + cl_bool blocking, + void* dst_ptr, + const void* src_ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueMemAdviseINTEL( + cl_command_queue command_queue, + const void* ptr, + size_t size, + cl_mem_advice_intel advice, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#if defined(CL_VERSION_1_2) +/* Requires OpenCL 1.2 for cl_mem_migration_flags: */ + +typedef cl_int CL_API_CALL +clEnqueueMigrateMemINTEL_t( + cl_command_queue command_queue, + const void* ptr, + size_t size, + cl_mem_migration_flags flags, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueMigrateMemINTEL_t * +clEnqueueMigrateMemINTEL_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueMigrateMemINTEL( + cl_command_queue command_queue, + const void* ptr, + size_t size, + cl_mem_migration_flags flags, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#endif /* defined(CL_VERSION_1_2) */ + +/* deprecated, use clEnqueueMemFillINTEL instead */ + +typedef cl_int CL_API_CALL +clEnqueueMemsetINTEL_t( + cl_command_queue command_queue, + void* dst_ptr, + cl_int value, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueMemsetINTEL_t * +clEnqueueMemsetINTEL_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueMemsetINTEL( + cl_command_queue command_queue, + void* dst_ptr, + cl_int value, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_intel_mem_alloc_buffer_location +***************************************************************/ +#define cl_intel_mem_alloc_buffer_location 1 +#define CL_INTEL_MEM_ALLOC_BUFFER_LOCATION_EXTENSION_NAME \ + "cl_intel_mem_alloc_buffer_location" + + +#define CL_INTEL_MEM_ALLOC_BUFFER_LOCATION_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_mem_properties_intel */ +#define CL_MEM_ALLOC_BUFFER_LOCATION_INTEL 0x419E + +/* cl_mem_alloc_info_intel */ +/* enum CL_MEM_ALLOC_BUFFER_LOCATION_INTEL */ + +/*************************************************************** +* cl_intel_create_buffer_with_properties +***************************************************************/ +#define cl_intel_create_buffer_with_properties 1 +#define CL_INTEL_CREATE_BUFFER_WITH_PROPERTIES_EXTENSION_NAME \ + "cl_intel_create_buffer_with_properties" + + +#define CL_INTEL_CREATE_BUFFER_WITH_PROPERTIES_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* type cl_mem_properties_intel */ + + +typedef cl_mem CL_API_CALL +clCreateBufferWithPropertiesINTEL_t( + cl_context context, + const cl_mem_properties_intel* properties, + cl_mem_flags flags, + size_t size, + void* host_ptr, + cl_int* errcode_ret); + +typedef clCreateBufferWithPropertiesINTEL_t * +clCreateBufferWithPropertiesINTEL_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateBufferWithPropertiesINTEL( + cl_context context, + const cl_mem_properties_intel* properties, + cl_mem_flags flags, + size_t size, + void* host_ptr, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_intel_program_scope_host_pipe +***************************************************************/ +#define cl_intel_program_scope_host_pipe 1 +#define CL_INTEL_PROGRAM_SCOPE_HOST_PIPE_EXTENSION_NAME \ + "cl_intel_program_scope_host_pipe" + + +#define CL_INTEL_PROGRAM_SCOPE_HOST_PIPE_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* clGetEventInfo response when param_name is CL_EVENT_COMMAND_TYPE */ +#define CL_COMMAND_READ_HOST_PIPE_INTEL 0x4214 +#define CL_COMMAND_WRITE_HOST_PIPE_INTEL 0x4215 + +/* clGetProgramInfo param_name */ +#define CL_PROGRAM_NUM_HOST_PIPES_INTEL 0x4216 +#define CL_PROGRAM_HOST_PIPE_NAMES_INTEL 0x4217 + + +typedef cl_int CL_API_CALL +clEnqueueReadHostPipeINTEL_t( + cl_command_queue command_queue, + cl_program program, + const char* pipe_symbol, + cl_bool blocking_read, + void* ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReadHostPipeINTEL_t * +clEnqueueReadHostPipeINTEL_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL +clEnqueueWriteHostPipeINTEL_t( + cl_command_queue command_queue, + cl_program program, + const char* pipe_symbol, + cl_bool blocking_write, + const void* ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueWriteHostPipeINTEL_t * +clEnqueueWriteHostPipeINTEL_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReadHostPipeINTEL( + cl_command_queue command_queue, + cl_program program, + const char* pipe_symbol, + cl_bool blocking_read, + void* ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueWriteHostPipeINTEL( + cl_command_queue command_queue, + cl_program program, + const char* pipe_symbol, + cl_bool blocking_write, + const void* ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_intel_mem_channel_property +***************************************************************/ +#define cl_intel_mem_channel_property 1 +#define CL_INTEL_MEM_CHANNEL_PROPERTY_EXTENSION_NAME \ + "cl_intel_mem_channel_property" + + +#define CL_INTEL_MEM_CHANNEL_PROPERTY_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_mem_properties_intel */ +#define CL_MEM_CHANNEL_INTEL 0x4213 + +/*************************************************************** +* cl_intel_mem_force_host_memory +***************************************************************/ +#define cl_intel_mem_force_host_memory 1 +#define CL_INTEL_MEM_FORCE_HOST_MEMORY_EXTENSION_NAME \ + "cl_intel_mem_force_host_memory" + + +#define CL_INTEL_MEM_FORCE_HOST_MEMORY_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_mem_flags */ +#define CL_MEM_FORCE_HOST_MEMORY_INTEL (1 << 20) + +/*************************************************************** +* cl_intel_command_queue_families +***************************************************************/ +#define cl_intel_command_queue_families 1 +#define CL_INTEL_COMMAND_QUEUE_FAMILIES_EXTENSION_NAME \ + "cl_intel_command_queue_families" + + +#define CL_INTEL_COMMAND_QUEUE_FAMILIES_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef cl_bitfield cl_command_queue_capabilities_intel; + +#define CL_QUEUE_FAMILY_MAX_NAME_SIZE_INTEL 64 + +typedef struct _cl_queue_family_properties_intel { + cl_command_queue_properties properties; + cl_command_queue_capabilities_intel capabilities; + cl_uint count; + char name[CL_QUEUE_FAMILY_MAX_NAME_SIZE_INTEL]; +} cl_queue_family_properties_intel; + +/* cl_device_info */ +#define CL_DEVICE_QUEUE_FAMILY_PROPERTIES_INTEL 0x418B + +/* cl_queue_properties */ +#define CL_QUEUE_FAMILY_INTEL 0x418C +#define CL_QUEUE_INDEX_INTEL 0x418D + +/* cl_command_queue_capabilities_intel */ +#define CL_QUEUE_DEFAULT_CAPABILITIES_INTEL 0 +#define CL_QUEUE_CAPABILITY_CREATE_SINGLE_QUEUE_EVENTS_INTEL (1 << 0) +#define CL_QUEUE_CAPABILITY_CREATE_CROSS_QUEUE_EVENTS_INTEL (1 << 1) +#define CL_QUEUE_CAPABILITY_SINGLE_QUEUE_EVENT_WAIT_LIST_INTEL (1 << 2) +#define CL_QUEUE_CAPABILITY_CROSS_QUEUE_EVENT_WAIT_LIST_INTEL (1 << 3) +#define CL_QUEUE_CAPABILITY_TRANSFER_BUFFER_INTEL (1 << 8) +#define CL_QUEUE_CAPABILITY_TRANSFER_BUFFER_RECT_INTEL (1 << 9) +#define CL_QUEUE_CAPABILITY_MAP_BUFFER_INTEL (1 << 10) +#define CL_QUEUE_CAPABILITY_FILL_BUFFER_INTEL (1 << 11) +#define CL_QUEUE_CAPABILITY_TRANSFER_IMAGE_INTEL (1 << 12) +#define CL_QUEUE_CAPABILITY_MAP_IMAGE_INTEL (1 << 13) +#define CL_QUEUE_CAPABILITY_FILL_IMAGE_INTEL (1 << 14) +#define CL_QUEUE_CAPABILITY_TRANSFER_BUFFER_IMAGE_INTEL (1 << 15) +#define CL_QUEUE_CAPABILITY_TRANSFER_IMAGE_BUFFER_INTEL (1 << 16) +#define CL_QUEUE_CAPABILITY_MARKER_INTEL (1 << 24) +#define CL_QUEUE_CAPABILITY_BARRIER_INTEL (1 << 25) +#define CL_QUEUE_CAPABILITY_KERNEL_INTEL (1 << 26) + +/*************************************************************** +* cl_intel_queue_no_sync_operations +***************************************************************/ +#define cl_intel_queue_no_sync_operations 1 +#define CL_INTEL_QUEUE_NO_SYNC_OPERATIONS_EXTENSION_NAME \ + "cl_intel_queue_no_sync_operations" + + +#define CL_INTEL_QUEUE_NO_SYNC_OPERATIONS_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_command_queue_properties */ +#define CL_QUEUE_NO_SYNC_OPERATIONS_INTEL (1 << 29) + +/*************************************************************** +* cl_intel_sharing_format_query +***************************************************************/ +#define cl_intel_sharing_format_query 1 +#define CL_INTEL_SHARING_FORMAT_QUERY_EXTENSION_NAME \ + "cl_intel_sharing_format_query" + + +#define CL_INTEL_SHARING_FORMAT_QUERY_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/*************************************************************** +* cl_ext_image_requirements_info +***************************************************************/ +#if defined(CL_VERSION_3_0) + +#define cl_ext_image_requirements_info 1 +#define CL_EXT_IMAGE_REQUIREMENTS_INFO_EXTENSION_NAME \ + "cl_ext_image_requirements_info" + + +#define CL_EXT_IMAGE_REQUIREMENTS_INFO_EXTENSION_VERSION CL_MAKE_VERSION(0, 5, 0) + +/* Types */ +typedef cl_uint cl_image_requirements_info_ext; + +/* cl_image_requirements_info_ext */ +#define CL_IMAGE_REQUIREMENTS_BASE_ADDRESS_ALIGNMENT_EXT 0x1292 +#define CL_IMAGE_REQUIREMENTS_ROW_PITCH_ALIGNMENT_EXT 0x1290 +#define CL_IMAGE_REQUIREMENTS_SIZE_EXT 0x12B2 +#define CL_IMAGE_REQUIREMENTS_MAX_WIDTH_EXT 0x12B3 +#define CL_IMAGE_REQUIREMENTS_MAX_HEIGHT_EXT 0x12B4 +#define CL_IMAGE_REQUIREMENTS_MAX_DEPTH_EXT 0x12B5 +#define CL_IMAGE_REQUIREMENTS_MAX_ARRAY_SIZE_EXT 0x12B6 + +/* Enqueued Commands APIs */ + +typedef cl_int CL_API_CALL +clGetImageRequirementsInfoEXT_t( + cl_context context, + const cl_mem_properties* properties, + cl_mem_flags flags, + const cl_image_format* image_format, + const cl_image_desc* image_desc, + cl_image_requirements_info_ext param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetImageRequirementsInfoEXT_t * +clGetImageRequirementsInfoEXT_fn CL_API_SUFFIX__VERSION_3_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetImageRequirementsInfoEXT( + cl_context context, + const cl_mem_properties* properties, + cl_mem_flags flags, + const cl_image_format* image_format, + const cl_image_desc* image_desc, + cl_image_requirements_info_ext param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_3_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#endif /* defined(CL_VERSION_3_0) */ + +/*************************************************************** +* cl_ext_image_from_buffer +***************************************************************/ +#if defined(CL_VERSION_3_0) + +#define cl_ext_image_from_buffer 1 +#define CL_EXT_IMAGE_FROM_BUFFER_EXTENSION_NAME \ + "cl_ext_image_from_buffer" + + +#define CL_EXT_IMAGE_FROM_BUFFER_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_image_requirements_info_ext */ +#define CL_IMAGE_REQUIREMENTS_SLICE_PITCH_ALIGNMENT_EXT 0x1291 + +#endif /* defined(CL_VERSION_3_0) */ + +/*************************************************************** +* cl_loader_info +***************************************************************/ +#define cl_loader_info 1 +#define CL_LOADER_INFO_EXTENSION_NAME \ + "cl_loader_info" + + +#define CL_LOADER_INFO_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_uint cl_icdl_info; + +/* cl_icdl_info */ +#define CL_ICDL_OCL_VERSION 1 +#define CL_ICDL_VERSION 2 +#define CL_ICDL_NAME 3 +#define CL_ICDL_VENDOR 4 + + +typedef cl_int CL_API_CALL +clGetICDLoaderInfoOCLICD_t( + cl_icdl_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetICDLoaderInfoOCLICD_t * +clGetICDLoaderInfoOCLICD_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetICDLoaderInfoOCLICD( + cl_icdl_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_depth_images +***************************************************************/ +#define cl_khr_depth_images 1 +#define CL_KHR_DEPTH_IMAGES_EXTENSION_NAME \ + "cl_khr_depth_images" + + +#define CL_KHR_DEPTH_IMAGES_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +#if !defined(CL_VERSION_2_0) +/* cl_channel_order - defined in CL.h for OpenCL 2.0 and newer */ +#define CL_DEPTH 0x10BD + +#endif /* !defined(CL_VERSION_2_0) */ + +/*************************************************************** +* cl_ext_float_atomics +***************************************************************/ +#define cl_ext_float_atomics 1 +#define CL_EXT_FLOAT_ATOMICS_EXTENSION_NAME \ + "cl_ext_float_atomics" + + +#define CL_EXT_FLOAT_ATOMICS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_bitfield cl_device_fp_atomic_capabilities_ext; + +/* cl_device_fp_atomic_capabilities_ext */ +#define CL_DEVICE_GLOBAL_FP_ATOMIC_LOAD_STORE_EXT (1 << 0) +#define CL_DEVICE_GLOBAL_FP_ATOMIC_ADD_EXT (1 << 1) +#define CL_DEVICE_GLOBAL_FP_ATOMIC_MIN_MAX_EXT (1 << 2) +#define CL_DEVICE_LOCAL_FP_ATOMIC_LOAD_STORE_EXT (1 << 16) +#define CL_DEVICE_LOCAL_FP_ATOMIC_ADD_EXT (1 << 17) +#define CL_DEVICE_LOCAL_FP_ATOMIC_MIN_MAX_EXT (1 << 18) + +/* cl_device_info */ +#define CL_DEVICE_SINGLE_FP_ATOMIC_CAPABILITIES_EXT 0x4231 +#define CL_DEVICE_DOUBLE_FP_ATOMIC_CAPABILITIES_EXT 0x4232 +#define CL_DEVICE_HALF_FP_ATOMIC_CAPABILITIES_EXT 0x4233 + +/*************************************************************** +* cl_intel_create_mem_object_properties +***************************************************************/ +#define cl_intel_create_mem_object_properties 1 +#define CL_INTEL_CREATE_MEM_OBJECT_PROPERTIES_EXTENSION_NAME \ + "cl_intel_create_mem_object_properties" + + +#define CL_INTEL_CREATE_MEM_OBJECT_PROPERTIES_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* cl_mem_properties */ +#define CL_MEM_LOCALLY_UNCACHED_RESOURCE_INTEL 0x4218 +#define CL_MEM_DEVICE_ID_INTEL 0x4219 + +/*************************************************************** +* cl_pocl_content_size +***************************************************************/ +#define cl_pocl_content_size 1 +#define CL_POCL_CONTENT_SIZE_EXTENSION_NAME \ + "cl_pocl_content_size" + + +#define CL_POCL_CONTENT_SIZE_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + + +typedef cl_int CL_API_CALL +clSetContentSizeBufferPoCL_t( + cl_mem buffer, + cl_mem content_size_buffer); + +typedef clSetContentSizeBufferPoCL_t * +clSetContentSizeBufferPoCL_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetContentSizeBufferPoCL( + cl_mem buffer, + cl_mem content_size_buffer) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_ext_image_raw10_raw12 +***************************************************************/ +#define cl_ext_image_raw10_raw12 1 +#define CL_EXT_IMAGE_RAW10_RAW12_EXTENSION_NAME \ + "cl_ext_image_raw10_raw12" + + +#define CL_EXT_IMAGE_RAW10_RAW12_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_channel_type */ +#define CL_UNSIGNED_INT_RAW10_EXT 0x10E3 +#define CL_UNSIGNED_INT_RAW12_EXT 0x10E4 + +/*************************************************************** +* cl_khr_3d_image_writes +***************************************************************/ +#define cl_khr_3d_image_writes 1 +#define CL_KHR_3D_IMAGE_WRITES_EXTENSION_NAME \ + "cl_khr_3d_image_writes" + + +#define CL_KHR_3D_IMAGE_WRITES_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_async_work_group_copy_fence +***************************************************************/ +#define cl_khr_async_work_group_copy_fence 1 +#define CL_KHR_ASYNC_WORK_GROUP_COPY_FENCE_EXTENSION_NAME \ + "cl_khr_async_work_group_copy_fence" + + +#define CL_KHR_ASYNC_WORK_GROUP_COPY_FENCE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_byte_addressable_store +***************************************************************/ +#define cl_khr_byte_addressable_store 1 +#define CL_KHR_BYTE_ADDRESSABLE_STORE_EXTENSION_NAME \ + "cl_khr_byte_addressable_store" + + +#define CL_KHR_BYTE_ADDRESSABLE_STORE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_device_enqueue_local_arg_types +***************************************************************/ +#define cl_khr_device_enqueue_local_arg_types 1 +#define CL_KHR_DEVICE_ENQUEUE_LOCAL_ARG_TYPES_EXTENSION_NAME \ + "cl_khr_device_enqueue_local_arg_types" + + +#define CL_KHR_DEVICE_ENQUEUE_LOCAL_ARG_TYPES_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_expect_assume +***************************************************************/ +#define cl_khr_expect_assume 1 +#define CL_KHR_EXPECT_ASSUME_EXTENSION_NAME \ + "cl_khr_expect_assume" + + +#define CL_KHR_EXPECT_ASSUME_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_extended_async_copies +***************************************************************/ +#define cl_khr_extended_async_copies 1 +#define CL_KHR_EXTENDED_ASYNC_COPIES_EXTENSION_NAME \ + "cl_khr_extended_async_copies" + + +#define CL_KHR_EXTENDED_ASYNC_COPIES_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_extended_bit_ops +***************************************************************/ +#define cl_khr_extended_bit_ops 1 +#define CL_KHR_EXTENDED_BIT_OPS_EXTENSION_NAME \ + "cl_khr_extended_bit_ops" + + +#define CL_KHR_EXTENDED_BIT_OPS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_external_memory_android_hardware_buffer (beta) +***************************************************************/ +#if defined(CL_ENABLE_BETA_EXTENSIONS) + +#define cl_khr_external_memory_android_hardware_buffer 1 +#define CL_KHR_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME \ + "cl_khr_external_memory_android_hardware_buffer" + + +#define CL_KHR_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_VERSION CL_MAKE_VERSION(0, 9, 2) + +/* cl_external_memory_handle_type_khr */ +#define CL_EXTERNAL_MEMORY_HANDLE_ANDROID_HARDWARE_BUFFER_KHR 0x2070 + +#endif /* defined(CL_ENABLE_BETA_EXTENSIONS) */ + +/*************************************************************** +* cl_khr_global_int32_base_atomics +***************************************************************/ +#define cl_khr_global_int32_base_atomics 1 +#define CL_KHR_GLOBAL_INT32_BASE_ATOMICS_EXTENSION_NAME \ + "cl_khr_global_int32_base_atomics" + + +#define CL_KHR_GLOBAL_INT32_BASE_ATOMICS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_global_int32_extended_atomics +***************************************************************/ +#define cl_khr_global_int32_extended_atomics 1 +#define CL_KHR_GLOBAL_INT32_EXTENDED_ATOMICS_EXTENSION_NAME \ + "cl_khr_global_int32_extended_atomics" + + +#define CL_KHR_GLOBAL_INT32_EXTENDED_ATOMICS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_int64_base_atomics +***************************************************************/ +#define cl_khr_int64_base_atomics 1 +#define CL_KHR_INT64_BASE_ATOMICS_EXTENSION_NAME \ + "cl_khr_int64_base_atomics" + + +#define CL_KHR_INT64_BASE_ATOMICS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_int64_extended_atomics +***************************************************************/ +#define cl_khr_int64_extended_atomics 1 +#define CL_KHR_INT64_EXTENDED_ATOMICS_EXTENSION_NAME \ + "cl_khr_int64_extended_atomics" + + +#define CL_KHR_INT64_EXTENDED_ATOMICS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_kernel_clock +***************************************************************/ +#define cl_khr_kernel_clock 1 +#define CL_KHR_KERNEL_CLOCK_EXTENSION_NAME \ + "cl_khr_kernel_clock" + + +#define CL_KHR_KERNEL_CLOCK_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_KERNEL_CLOCK_CAPABILITIES_KHR 0x1076 + +typedef cl_bitfield cl_device_kernel_clock_capabilities_khr; + +/* cl_device_kernel_clock_capabilities_khr */ +#define CL_DEVICE_KERNEL_CLOCK_SCOPE_DEVICE_KHR (1 << 0) +#define CL_DEVICE_KERNEL_CLOCK_SCOPE_WORK_GROUP_KHR (1 << 1) +#define CL_DEVICE_KERNEL_CLOCK_SCOPE_SUB_GROUP_KHR (1 << 2) + +/*************************************************************** +* cl_khr_local_int32_base_atomics +***************************************************************/ +#define cl_khr_local_int32_base_atomics 1 +#define CL_KHR_LOCAL_INT32_BASE_ATOMICS_EXTENSION_NAME \ + "cl_khr_local_int32_base_atomics" + + +#define CL_KHR_LOCAL_INT32_BASE_ATOMICS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_local_int32_extended_atomics +***************************************************************/ +#define cl_khr_local_int32_extended_atomics 1 +#define CL_KHR_LOCAL_INT32_EXTENDED_ATOMICS_EXTENSION_NAME \ + "cl_khr_local_int32_extended_atomics" + + +#define CL_KHR_LOCAL_INT32_EXTENDED_ATOMICS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_mipmap_image_writes +***************************************************************/ +#define cl_khr_mipmap_image_writes 1 +#define CL_KHR_MIPMAP_IMAGE_WRITES_EXTENSION_NAME \ + "cl_khr_mipmap_image_writes" + + +#define CL_KHR_MIPMAP_IMAGE_WRITES_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_select_fprounding_mode +***************************************************************/ +#define cl_khr_select_fprounding_mode 1 +#define CL_KHR_SELECT_FPROUNDING_MODE_EXTENSION_NAME \ + "cl_khr_select_fprounding_mode" + + +#define CL_KHR_SELECT_FPROUNDING_MODE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_spirv_extended_debug_info +***************************************************************/ +#define cl_khr_spirv_extended_debug_info 1 +#define CL_KHR_SPIRV_EXTENDED_DEBUG_INFO_EXTENSION_NAME \ + "cl_khr_spirv_extended_debug_info" + + +#define CL_KHR_SPIRV_EXTENDED_DEBUG_INFO_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_spirv_linkonce_odr +***************************************************************/ +#define cl_khr_spirv_linkonce_odr 1 +#define CL_KHR_SPIRV_LINKONCE_ODR_EXTENSION_NAME \ + "cl_khr_spirv_linkonce_odr" + + +#define CL_KHR_SPIRV_LINKONCE_ODR_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_spirv_no_integer_wrap_decoration +***************************************************************/ +#define cl_khr_spirv_no_integer_wrap_decoration 1 +#define CL_KHR_SPIRV_NO_INTEGER_WRAP_DECORATION_EXTENSION_NAME \ + "cl_khr_spirv_no_integer_wrap_decoration" + + +#define CL_KHR_SPIRV_NO_INTEGER_WRAP_DECORATION_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_spirv_queries +***************************************************************/ +#define cl_khr_spirv_queries 1 +#define CL_KHR_SPIRV_QUERIES_EXTENSION_NAME \ + "cl_khr_spirv_queries" + + +#define CL_KHR_SPIRV_QUERIES_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_device_info */ +#define CL_DEVICE_SPIRV_EXTENDED_INSTRUCTION_SETS_KHR 0x12B9 +#define CL_DEVICE_SPIRV_EXTENSIONS_KHR 0x12BA +#define CL_DEVICE_SPIRV_CAPABILITIES_KHR 0x12BB + +/*************************************************************** +* cl_khr_srgb_image_writes +***************************************************************/ +#define cl_khr_srgb_image_writes 1 +#define CL_KHR_SRGB_IMAGE_WRITES_EXTENSION_NAME \ + "cl_khr_srgb_image_writes" + + +#define CL_KHR_SRGB_IMAGE_WRITES_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_subgroup_ballot +***************************************************************/ +#define cl_khr_subgroup_ballot 1 +#define CL_KHR_SUBGROUP_BALLOT_EXTENSION_NAME \ + "cl_khr_subgroup_ballot" + + +#define CL_KHR_SUBGROUP_BALLOT_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_subgroup_clustered_reduce +***************************************************************/ +#define cl_khr_subgroup_clustered_reduce 1 +#define CL_KHR_SUBGROUP_CLUSTERED_REDUCE_EXTENSION_NAME \ + "cl_khr_subgroup_clustered_reduce" + + +#define CL_KHR_SUBGROUP_CLUSTERED_REDUCE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_subgroup_extended_types +***************************************************************/ +#define cl_khr_subgroup_extended_types 1 +#define CL_KHR_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME \ + "cl_khr_subgroup_extended_types" + + +#define CL_KHR_SUBGROUP_EXTENDED_TYPES_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_subgroup_non_uniform_arithmetic +***************************************************************/ +#define cl_khr_subgroup_non_uniform_arithmetic 1 +#define CL_KHR_SUBGROUP_NON_UNIFORM_ARITHMETIC_EXTENSION_NAME \ + "cl_khr_subgroup_non_uniform_arithmetic" + + +#define CL_KHR_SUBGROUP_NON_UNIFORM_ARITHMETIC_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_subgroup_non_uniform_vote +***************************************************************/ +#define cl_khr_subgroup_non_uniform_vote 1 +#define CL_KHR_SUBGROUP_NON_UNIFORM_VOTE_EXTENSION_NAME \ + "cl_khr_subgroup_non_uniform_vote" + + +#define CL_KHR_SUBGROUP_NON_UNIFORM_VOTE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_subgroup_rotate +***************************************************************/ +#define cl_khr_subgroup_rotate 1 +#define CL_KHR_SUBGROUP_ROTATE_EXTENSION_NAME \ + "cl_khr_subgroup_rotate" + + +#define CL_KHR_SUBGROUP_ROTATE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_subgroup_shuffle +***************************************************************/ +#define cl_khr_subgroup_shuffle 1 +#define CL_KHR_SUBGROUP_SHUFFLE_EXTENSION_NAME \ + "cl_khr_subgroup_shuffle" + + +#define CL_KHR_SUBGROUP_SHUFFLE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_subgroup_shuffle_relative +***************************************************************/ +#define cl_khr_subgroup_shuffle_relative 1 +#define CL_KHR_SUBGROUP_SHUFFLE_RELATIVE_EXTENSION_NAME \ + "cl_khr_subgroup_shuffle_relative" + + +#define CL_KHR_SUBGROUP_SHUFFLE_RELATIVE_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_khr_work_group_uniform_arithmetic +***************************************************************/ +#define cl_khr_work_group_uniform_arithmetic 1 +#define CL_KHR_WORK_GROUP_UNIFORM_ARITHMETIC_EXTENSION_NAME \ + "cl_khr_work_group_uniform_arithmetic" + + +#define CL_KHR_WORK_GROUP_UNIFORM_ARITHMETIC_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/*************************************************************** +* cl_ext_buffer_device_address +***************************************************************/ +#define cl_ext_buffer_device_address 1 +#define CL_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME \ + "cl_ext_buffer_device_address" + + +#define CL_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 2) + +typedef cl_ulong cl_mem_device_address_ext; + + +typedef cl_int CL_API_CALL +clSetKernelArgDevicePointerEXT_t( + cl_kernel kernel, + cl_uint arg_index, + cl_mem_device_address_ext arg_value); + +typedef clSetKernelArgDevicePointerEXT_t * +clSetKernelArgDevicePointerEXT_fn CL_API_SUFFIX__VERSION_3_0; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelArgDevicePointerEXT( + cl_kernel kernel, + cl_uint arg_index, + cl_mem_device_address_ext arg_value) CL_API_SUFFIX__VERSION_3_0; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/* cl_mem_properties */ +#define CL_MEM_DEVICE_PRIVATE_ADDRESS_EXT 0x5000 + +/* cl_mem_info */ +#define CL_MEM_DEVICE_ADDRESS_EXT 0x5001 + +/* cl_kernel_exec_info */ +#define CL_KERNEL_EXEC_INFO_DEVICE_PTRS_EXT 0x5002 + +/*************************************************************** +* cl_ext_image_unorm_int_2_101010 +***************************************************************/ +#define cl_ext_image_unorm_int_2_101010 1 +#define CL_EXT_IMAGE_UNORM_INT_2_101010_EXTENSION_NAME \ + "cl_ext_image_unorm_int_2_101010" + + +#define CL_EXT_IMAGE_UNORM_INT_2_101010_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_channel_type */ +#define CL_UNORM_INT_2_101010_EXT 0x10E5 + +/*************************************************************** +* cl_ext_image_unsigned_10x6_12x4_14x2 +***************************************************************/ +#define cl_ext_image_unsigned_10x6_12x4_14x2 1 +#define CL_EXT_IMAGE_UNSIGNED_10X6_12X4_14X2_EXTENSION_NAME \ + "cl_ext_image_unsigned_10x6_12x4_14x2" + + +#define CL_EXT_IMAGE_UNSIGNED_10X6_12X4_14X2_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_channel_type */ +#define CL_UNSIGNED_INT10X6_EXT 0x10E6 +#define CL_UNSIGNED_INT12X4_EXT 0x10E7 +#define CL_UNSIGNED_INT14X2_EXT 0x10E8 +#define CL_UNORM_INT10X6_EXT 0x10E1 +#define CL_UNORM_INT12X4_EXT 0x10E9 +#define CL_UNORM_INT14X2_EXT 0x10EA + +/*************************************************************** +* cl_ext_immutable_memory_objects +***************************************************************/ +#define cl_ext_immutable_memory_objects 1 +#define CL_EXT_IMMUTABLE_MEMORY_OBJECTS_EXTENSION_NAME \ + "cl_ext_immutable_memory_objects" + + +#define CL_EXT_IMMUTABLE_MEMORY_OBJECTS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_mem_flags */ +#define CL_MEM_IMMUTABLE_EXT (1 << 6) + +/*************************************************************** +* cl_img_cancel_command +***************************************************************/ +#define cl_img_cancel_command 1 +#define CL_IMG_CANCEL_COMMAND_EXTENSION_NAME \ + "cl_img_cancel_command" + + +#define CL_IMG_CANCEL_COMMAND_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* Error codes */ +#define CL_CANCELLED_IMG -1126 + + +typedef cl_int CL_API_CALL +clCancelCommandsIMG_t( + const cl_event* event_list, + size_t num_events_in_list); + +typedef clCancelCommandsIMG_t * +clCancelCommandsIMG_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clCancelCommandsIMG( + const cl_event* event_list, + size_t num_events_in_list) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_qcom_perf_hint +***************************************************************/ +#define cl_qcom_perf_hint 1 +#define CL_QCOM_PERF_HINT_EXTENSION_NAME \ + "cl_qcom_perf_hint" + + +#define CL_QCOM_PERF_HINT_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 5) + +typedef cl_uint cl_perf_hint_qcom; + +/* cl_perf_hint_qcom */ +#define CL_PERF_HINT_HIGH_QCOM 0x40C3 +#define CL_PERF_HINT_NORMAL_QCOM 0x40C4 +#define CL_PERF_HINT_LOW_QCOM 0x40C5 + +/* cl_context_info */ +#define CL_CONTEXT_PERF_HINT_QCOM 0x40C2 + + +typedef cl_int CL_API_CALL +clSetPerfHintQCOM_t( + cl_context context, + cl_perf_hint_qcom perf_hint); + +typedef clSetPerfHintQCOM_t * +clSetPerfHintQCOM_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetPerfHintQCOM( + cl_context context, + cl_perf_hint_qcom perf_hint) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#ifdef __cplusplus +} +#endif + +#endif /* OPENCL_CL_EXT_H_ */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_ext_intel.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_ext_intel.h new file mode 100644 index 000000000..a7ae87a34 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_ext_intel.h @@ -0,0 +1,19 @@ +/******************************************************************************* + * Copyright (c) 2008-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +#include +#pragma message("The Intel extensions have been moved into cl_ext.h. Please include cl_ext.h directly.") diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_function_types.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_function_types.h new file mode 100644 index 000000000..124f53ba7 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_function_types.h @@ -0,0 +1,1184 @@ +/* + * Copyright (c) 2023 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * OpenCL is a trademark of Apple Inc. used under license by Khronos. + */ + +#ifndef OPENCL_CL_FUNCTION_TYPES_H_ +#define OPENCL_CL_FUNCTION_TYPES_H_ + +#include + +typedef cl_int CL_API_CALL clGetPlatformIDs_t( + cl_uint num_entries, + cl_platform_id* platforms, + cl_uint* num_platforms); + +typedef clGetPlatformIDs_t * +clGetPlatformIDs_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetPlatformInfo_t( + cl_platform_id platform, + cl_platform_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetPlatformInfo_t * +clGetPlatformInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetDeviceIDs_t( + cl_platform_id platform, + cl_device_type device_type, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices); + +typedef clGetDeviceIDs_t * +clGetDeviceIDs_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetDeviceInfo_t( + cl_device_id device, + cl_device_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetDeviceInfo_t * +clGetDeviceInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_context CL_API_CALL clCreateContext_t( + const cl_context_properties* properties, + cl_uint num_devices, + const cl_device_id* devices, + void (CL_CALLBACK* pfn_notify)(const char* errinfo, const void* private_info, size_t cb, void* user_data), + void* user_data, + cl_int* errcode_ret); + +typedef clCreateContext_t * +clCreateContext_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_context CL_API_CALL clCreateContextFromType_t( + const cl_context_properties* properties, + cl_device_type device_type, + void (CL_CALLBACK* pfn_notify)(const char* errinfo, const void* private_info, size_t cb, void* user_data), + void* user_data, + cl_int* errcode_ret); + +typedef clCreateContextFromType_t * +clCreateContextFromType_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clRetainContext_t( + cl_context context); + +typedef clRetainContext_t * +clRetainContext_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clReleaseContext_t( + cl_context context); + +typedef clReleaseContext_t * +clReleaseContext_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetContextInfo_t( + cl_context context, + cl_context_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetContextInfo_t * +clGetContextInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clRetainCommandQueue_t( + cl_command_queue command_queue); + +typedef clRetainCommandQueue_t * +clRetainCommandQueue_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clReleaseCommandQueue_t( + cl_command_queue command_queue); + +typedef clReleaseCommandQueue_t * +clReleaseCommandQueue_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetCommandQueueInfo_t( + cl_command_queue command_queue, + cl_command_queue_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetCommandQueueInfo_t * +clGetCommandQueueInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_mem CL_API_CALL clCreateBuffer_t( + cl_context context, + cl_mem_flags flags, + size_t size, + void* host_ptr, + cl_int* errcode_ret); + +typedef clCreateBuffer_t * +clCreateBuffer_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clRetainMemObject_t( + cl_mem memobj); + +typedef clRetainMemObject_t * +clRetainMemObject_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clReleaseMemObject_t( + cl_mem memobj); + +typedef clReleaseMemObject_t * +clReleaseMemObject_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetSupportedImageFormats_t( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint num_entries, + cl_image_format* image_formats, + cl_uint* num_image_formats); + +typedef clGetSupportedImageFormats_t * +clGetSupportedImageFormats_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetMemObjectInfo_t( + cl_mem memobj, + cl_mem_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetMemObjectInfo_t * +clGetMemObjectInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetImageInfo_t( + cl_mem image, + cl_image_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetImageInfo_t * +clGetImageInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clRetainSampler_t( + cl_sampler sampler); + +typedef clRetainSampler_t * +clRetainSampler_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clReleaseSampler_t( + cl_sampler sampler); + +typedef clReleaseSampler_t * +clReleaseSampler_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetSamplerInfo_t( + cl_sampler sampler, + cl_sampler_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetSamplerInfo_t * +clGetSamplerInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_program CL_API_CALL clCreateProgramWithSource_t( + cl_context context, + cl_uint count, + const char** strings, + const size_t* lengths, + cl_int* errcode_ret); + +typedef clCreateProgramWithSource_t * +clCreateProgramWithSource_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_program CL_API_CALL clCreateProgramWithBinary_t( + cl_context context, + cl_uint num_devices, + const cl_device_id* device_list, + const size_t* lengths, + const unsigned char** binaries, + cl_int* binary_status, + cl_int* errcode_ret); + +typedef clCreateProgramWithBinary_t * +clCreateProgramWithBinary_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clRetainProgram_t( + cl_program program); + +typedef clRetainProgram_t * +clRetainProgram_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clReleaseProgram_t( + cl_program program); + +typedef clReleaseProgram_t * +clReleaseProgram_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clBuildProgram_t( + cl_program program, + cl_uint num_devices, + const cl_device_id* device_list, + const char* options, + void (CL_CALLBACK* pfn_notify)(cl_program program, void* user_data), + void* user_data); + +typedef clBuildProgram_t * +clBuildProgram_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetProgramInfo_t( + cl_program program, + cl_program_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetProgramInfo_t * +clGetProgramInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetProgramBuildInfo_t( + cl_program program, + cl_device_id device, + cl_program_build_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetProgramBuildInfo_t * +clGetProgramBuildInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_kernel CL_API_CALL clCreateKernel_t( + cl_program program, + const char* kernel_name, + cl_int* errcode_ret); + +typedef clCreateKernel_t * +clCreateKernel_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clCreateKernelsInProgram_t( + cl_program program, + cl_uint num_kernels, + cl_kernel* kernels, + cl_uint* num_kernels_ret); + +typedef clCreateKernelsInProgram_t * +clCreateKernelsInProgram_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clRetainKernel_t( + cl_kernel kernel); + +typedef clRetainKernel_t * +clRetainKernel_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clReleaseKernel_t( + cl_kernel kernel); + +typedef clReleaseKernel_t * +clReleaseKernel_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clSetKernelArg_t( + cl_kernel kernel, + cl_uint arg_index, + size_t arg_size, + const void* arg_value); + +typedef clSetKernelArg_t * +clSetKernelArg_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetKernelInfo_t( + cl_kernel kernel, + cl_kernel_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetKernelInfo_t * +clGetKernelInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetKernelWorkGroupInfo_t( + cl_kernel kernel, + cl_device_id device, + cl_kernel_work_group_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetKernelWorkGroupInfo_t * +clGetKernelWorkGroupInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clWaitForEvents_t( + cl_uint num_events, + const cl_event* event_list); + +typedef clWaitForEvents_t * +clWaitForEvents_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetEventInfo_t( + cl_event event, + cl_event_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetEventInfo_t * +clGetEventInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clRetainEvent_t( + cl_event event); + +typedef clRetainEvent_t * +clRetainEvent_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clReleaseEvent_t( + cl_event event); + +typedef clReleaseEvent_t * +clReleaseEvent_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clGetEventProfilingInfo_t( + cl_event event, + cl_profiling_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetEventProfilingInfo_t * +clGetEventProfilingInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clFlush_t( + cl_command_queue command_queue); + +typedef clFlush_t * +clFlush_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clFinish_t( + cl_command_queue command_queue); + +typedef clFinish_t * +clFinish_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueReadBuffer_t( + cl_command_queue command_queue, + cl_mem buffer, + cl_bool blocking_read, + size_t offset, + size_t size, + void* ptr, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReadBuffer_t * +clEnqueueReadBuffer_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueWriteBuffer_t( + cl_command_queue command_queue, + cl_mem buffer, + cl_bool blocking_write, + size_t offset, + size_t size, + const void* ptr, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueWriteBuffer_t * +clEnqueueWriteBuffer_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueCopyBuffer_t( + cl_command_queue command_queue, + cl_mem src_buffer, + cl_mem dst_buffer, + size_t src_offset, + size_t dst_offset, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueCopyBuffer_t * +clEnqueueCopyBuffer_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueReadImage_t( + cl_command_queue command_queue, + cl_mem image, + cl_bool blocking_read, + const size_t* origin, + const size_t* region, + size_t row_pitch, + size_t slice_pitch, + void* ptr, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReadImage_t * +clEnqueueReadImage_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueWriteImage_t( + cl_command_queue command_queue, + cl_mem image, + cl_bool blocking_write, + const size_t* origin, + const size_t* region, + size_t input_row_pitch, + size_t input_slice_pitch, + const void* ptr, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueWriteImage_t * +clEnqueueWriteImage_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueCopyImage_t( + cl_command_queue command_queue, + cl_mem src_image, + cl_mem dst_image, + const size_t* src_origin, + const size_t* dst_origin, + const size_t* region, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueCopyImage_t * +clEnqueueCopyImage_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueCopyImageToBuffer_t( + cl_command_queue command_queue, + cl_mem src_image, + cl_mem dst_buffer, + const size_t* src_origin, + const size_t* region, + size_t dst_offset, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueCopyImageToBuffer_t * +clEnqueueCopyImageToBuffer_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueCopyBufferToImage_t( + cl_command_queue command_queue, + cl_mem src_buffer, + cl_mem dst_image, + size_t src_offset, + const size_t* dst_origin, + const size_t* region, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueCopyBufferToImage_t * +clEnqueueCopyBufferToImage_fn CL_API_SUFFIX__VERSION_1_0; + +typedef void* CL_API_CALL clEnqueueMapBuffer_t( + cl_command_queue command_queue, + cl_mem buffer, + cl_bool blocking_map, + cl_map_flags map_flags, + size_t offset, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event, + cl_int* errcode_ret); + +typedef clEnqueueMapBuffer_t * +clEnqueueMapBuffer_fn CL_API_SUFFIX__VERSION_1_0; + +typedef void* CL_API_CALL clEnqueueMapImage_t( + cl_command_queue command_queue, + cl_mem image, + cl_bool blocking_map, + cl_map_flags map_flags, + const size_t* origin, + const size_t* region, + size_t* image_row_pitch, + size_t* image_slice_pitch, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event, + cl_int* errcode_ret); + +typedef clEnqueueMapImage_t * +clEnqueueMapImage_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueUnmapMemObject_t( + cl_command_queue command_queue, + cl_mem memobj, + void* mapped_ptr, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueUnmapMemObject_t * +clEnqueueUnmapMemObject_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueNDRangeKernel_t( + cl_command_queue command_queue, + cl_kernel kernel, + cl_uint work_dim, + const size_t* global_work_offset, + const size_t* global_work_size, + const size_t* local_work_size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueNDRangeKernel_t * +clEnqueueNDRangeKernel_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clEnqueueNativeKernel_t( + cl_command_queue command_queue, + void (CL_CALLBACK* user_func)(void*), + void* args, + size_t cb_args, + cl_uint num_mem_objects, + const cl_mem* mem_list, + const void** args_mem_loc, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueNativeKernel_t * +clEnqueueNativeKernel_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL clSetCommandQueueProperty_t( + cl_command_queue command_queue, + cl_command_queue_properties properties, + cl_bool enable, + cl_command_queue_properties* old_properties); + +typedef clSetCommandQueueProperty_t * +clSetCommandQueueProperty_fn CL_API_SUFFIX__VERSION_1_0_DEPRECATED; + +typedef cl_mem CL_API_CALL clCreateImage2D_t( + cl_context context, + cl_mem_flags flags, + const cl_image_format* image_format, + size_t image_width, + size_t image_height, + size_t image_row_pitch, + void* host_ptr, + cl_int* errcode_ret); + +typedef clCreateImage2D_t * +clCreateImage2D_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +typedef cl_mem CL_API_CALL clCreateImage3D_t( + cl_context context, + cl_mem_flags flags, + const cl_image_format* image_format, + size_t image_width, + size_t image_height, + size_t image_depth, + size_t image_row_pitch, + size_t image_slice_pitch, + void* host_ptr, + cl_int* errcode_ret); + +typedef clCreateImage3D_t * +clCreateImage3D_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +typedef cl_int CL_API_CALL clEnqueueMarker_t( + cl_command_queue command_queue, + cl_event* event); + +typedef clEnqueueMarker_t * +clEnqueueMarker_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +typedef cl_int CL_API_CALL clEnqueueWaitForEvents_t( + cl_command_queue command_queue, + cl_uint num_events, + const cl_event* event_list); + +typedef clEnqueueWaitForEvents_t * +clEnqueueWaitForEvents_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +typedef cl_int CL_API_CALL clEnqueueBarrier_t( + cl_command_queue command_queue); + +typedef clEnqueueBarrier_t * +clEnqueueBarrier_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +typedef cl_int CL_API_CALL clUnloadCompiler_t( + void ); + +typedef clUnloadCompiler_t * +clUnloadCompiler_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +typedef void* CL_API_CALL clGetExtensionFunctionAddress_t( + const char* func_name); + +typedef clGetExtensionFunctionAddress_t * +clGetExtensionFunctionAddress_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +typedef cl_command_queue CL_API_CALL clCreateCommandQueue_t( + cl_context context, + cl_device_id device, + cl_command_queue_properties properties, + cl_int* errcode_ret); + +typedef clCreateCommandQueue_t * +clCreateCommandQueue_fn CL_API_SUFFIX__VERSION_1_2_DEPRECATED; + +typedef cl_sampler CL_API_CALL clCreateSampler_t( + cl_context context, + cl_bool normalized_coords, + cl_addressing_mode addressing_mode, + cl_filter_mode filter_mode, + cl_int* errcode_ret); + +typedef clCreateSampler_t * +clCreateSampler_fn CL_API_SUFFIX__VERSION_1_2_DEPRECATED; + +typedef cl_int CL_API_CALL clEnqueueTask_t( + cl_command_queue command_queue, + cl_kernel kernel, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueTask_t * +clEnqueueTask_fn CL_API_SUFFIX__VERSION_1_2_DEPRECATED; + +#ifdef CL_VERSION_1_1 + +typedef cl_mem CL_API_CALL clCreateSubBuffer_t( + cl_mem buffer, + cl_mem_flags flags, + cl_buffer_create_type buffer_create_type, + const void* buffer_create_info, + cl_int* errcode_ret); + +typedef clCreateSubBuffer_t * +clCreateSubBuffer_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_int CL_API_CALL clSetMemObjectDestructorCallback_t( + cl_mem memobj, + void (CL_CALLBACK* pfn_notify)(cl_mem memobj, void* user_data), + void* user_data); + +typedef clSetMemObjectDestructorCallback_t * +clSetMemObjectDestructorCallback_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_event CL_API_CALL clCreateUserEvent_t( + cl_context context, + cl_int* errcode_ret); + +typedef clCreateUserEvent_t * +clCreateUserEvent_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_int CL_API_CALL clSetUserEventStatus_t( + cl_event event, + cl_int execution_status); + +typedef clSetUserEventStatus_t * +clSetUserEventStatus_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_int CL_API_CALL clSetEventCallback_t( + cl_event event, + cl_int command_exec_callback_type, + void (CL_CALLBACK* pfn_notify)(cl_event event, cl_int event_command_status, void *user_data), + void* user_data); + +typedef clSetEventCallback_t * +clSetEventCallback_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_int CL_API_CALL clEnqueueReadBufferRect_t( + cl_command_queue command_queue, + cl_mem buffer, + cl_bool blocking_read, + const size_t* buffer_origin, + const size_t* host_origin, + const size_t* region, + size_t buffer_row_pitch, + size_t buffer_slice_pitch, + size_t host_row_pitch, + size_t host_slice_pitch, + void* ptr, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReadBufferRect_t * +clEnqueueReadBufferRect_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_int CL_API_CALL clEnqueueWriteBufferRect_t( + cl_command_queue command_queue, + cl_mem buffer, + cl_bool blocking_write, + const size_t* buffer_origin, + const size_t* host_origin, + const size_t* region, + size_t buffer_row_pitch, + size_t buffer_slice_pitch, + size_t host_row_pitch, + size_t host_slice_pitch, + const void* ptr, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueWriteBufferRect_t * +clEnqueueWriteBufferRect_fn CL_API_SUFFIX__VERSION_1_1; + +typedef cl_int CL_API_CALL clEnqueueCopyBufferRect_t( + cl_command_queue command_queue, + cl_mem src_buffer, + cl_mem dst_buffer, + const size_t* src_origin, + const size_t* dst_origin, + const size_t* region, + size_t src_row_pitch, + size_t src_slice_pitch, + size_t dst_row_pitch, + size_t dst_slice_pitch, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueCopyBufferRect_t * +clEnqueueCopyBufferRect_fn CL_API_SUFFIX__VERSION_1_1; + +#endif /* CL_VERSION_1_1 */ + +#ifdef CL_VERSION_1_2 + +typedef cl_int CL_API_CALL clCreateSubDevices_t( + cl_device_id in_device, + const cl_device_partition_property* properties, + cl_uint num_devices, + cl_device_id* out_devices, + cl_uint* num_devices_ret); + +typedef clCreateSubDevices_t * +clCreateSubDevices_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL clRetainDevice_t( + cl_device_id device); + +typedef clRetainDevice_t * +clRetainDevice_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL clReleaseDevice_t( + cl_device_id device); + +typedef clReleaseDevice_t * +clReleaseDevice_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_mem CL_API_CALL clCreateImage_t( + cl_context context, + cl_mem_flags flags, + const cl_image_format* image_format, + const cl_image_desc* image_desc, + void* host_ptr, + cl_int* errcode_ret); + +typedef clCreateImage_t * +clCreateImage_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_program CL_API_CALL clCreateProgramWithBuiltInKernels_t( + cl_context context, + cl_uint num_devices, + const cl_device_id* device_list, + const char* kernel_names, + cl_int* errcode_ret); + +typedef clCreateProgramWithBuiltInKernels_t * +clCreateProgramWithBuiltInKernels_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL clCompileProgram_t( + cl_program program, + cl_uint num_devices, + const cl_device_id* device_list, + const char* options, + cl_uint num_input_headers, + const cl_program* input_headers, + const char** header_include_names, + void (CL_CALLBACK* pfn_notify)(cl_program program, void* user_data), + void* user_data); + +typedef clCompileProgram_t * +clCompileProgram_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_program CL_API_CALL clLinkProgram_t( + cl_context context, + cl_uint num_devices, + const cl_device_id* device_list, + const char* options, + cl_uint num_input_programs, + const cl_program* input_programs, + void (CL_CALLBACK* pfn_notify)(cl_program program, void* user_data), + void* user_data, + cl_int* errcode_ret); + +typedef clLinkProgram_t * +clLinkProgram_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL clUnloadPlatformCompiler_t( + cl_platform_id platform); + +typedef clUnloadPlatformCompiler_t * +clUnloadPlatformCompiler_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL clGetKernelArgInfo_t( + cl_kernel kernel, + cl_uint arg_index, + cl_kernel_arg_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetKernelArgInfo_t * +clGetKernelArgInfo_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL clEnqueueFillBuffer_t( + cl_command_queue command_queue, + cl_mem buffer, + const void* pattern, + size_t pattern_size, + size_t offset, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueFillBuffer_t * +clEnqueueFillBuffer_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL clEnqueueFillImage_t( + cl_command_queue command_queue, + cl_mem image, + const void* fill_color, + const size_t* origin, + const size_t* region, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueFillImage_t * +clEnqueueFillImage_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL clEnqueueMigrateMemObjects_t( + cl_command_queue command_queue, + cl_uint num_mem_objects, + const cl_mem* mem_objects, + cl_mem_migration_flags flags, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueMigrateMemObjects_t * +clEnqueueMigrateMemObjects_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL clEnqueueMarkerWithWaitList_t( + cl_command_queue command_queue, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueMarkerWithWaitList_t * +clEnqueueMarkerWithWaitList_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL clEnqueueBarrierWithWaitList_t( + cl_command_queue command_queue, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueBarrierWithWaitList_t * +clEnqueueBarrierWithWaitList_fn CL_API_SUFFIX__VERSION_1_2; + +typedef void* CL_API_CALL clGetExtensionFunctionAddressForPlatform_t( + cl_platform_id platform, + const char* func_name); + +typedef clGetExtensionFunctionAddressForPlatform_t * +clGetExtensionFunctionAddressForPlatform_fn CL_API_SUFFIX__VERSION_1_2; + +#endif /* CL_VERSION_1_2 */ + +#ifdef CL_VERSION_2_0 + +typedef cl_command_queue CL_API_CALL clCreateCommandQueueWithProperties_t( + cl_context context, + cl_device_id device, + const cl_queue_properties* properties, + cl_int* errcode_ret); + +typedef clCreateCommandQueueWithProperties_t * +clCreateCommandQueueWithProperties_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_mem CL_API_CALL clCreatePipe_t( + cl_context context, + cl_mem_flags flags, + cl_uint pipe_packet_size, + cl_uint pipe_max_packets, + const cl_pipe_properties* properties, + cl_int* errcode_ret); + +typedef clCreatePipe_t * +clCreatePipe_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_int CL_API_CALL clGetPipeInfo_t( + cl_mem pipe, + cl_pipe_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetPipeInfo_t * +clGetPipeInfo_fn CL_API_SUFFIX__VERSION_2_0; + +typedef void* CL_API_CALL clSVMAlloc_t( + cl_context context, + cl_svm_mem_flags flags, + size_t size, + cl_uint alignment); + +typedef clSVMAlloc_t * +clSVMAlloc_fn CL_API_SUFFIX__VERSION_2_0; + +typedef void CL_API_CALL clSVMFree_t( + cl_context context, + void* svm_pointer); + +typedef clSVMFree_t * +clSVMFree_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_sampler CL_API_CALL clCreateSamplerWithProperties_t( + cl_context context, + const cl_sampler_properties* sampler_properties, + cl_int* errcode_ret); + +typedef clCreateSamplerWithProperties_t * +clCreateSamplerWithProperties_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_int CL_API_CALL clSetKernelArgSVMPointer_t( + cl_kernel kernel, + cl_uint arg_index, + const void* arg_value); + +typedef clSetKernelArgSVMPointer_t * +clSetKernelArgSVMPointer_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_int CL_API_CALL clSetKernelExecInfo_t( + cl_kernel kernel, + cl_kernel_exec_info param_name, + size_t param_value_size, + const void* param_value); + +typedef clSetKernelExecInfo_t * +clSetKernelExecInfo_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_int CL_API_CALL clEnqueueSVMFree_t( + cl_command_queue command_queue, + cl_uint num_svm_pointers, + void* svm_pointers[], + void (CL_CALLBACK* pfn_free_func)(cl_command_queue queue, cl_uint num_svm_pointers, void* svm_pointers[], void* user_data), + void* user_data, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMFree_t * +clEnqueueSVMFree_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_int CL_API_CALL clEnqueueSVMMemcpy_t( + cl_command_queue command_queue, + cl_bool blocking_copy, + void* dst_ptr, + const void* src_ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMMemcpy_t * +clEnqueueSVMMemcpy_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_int CL_API_CALL clEnqueueSVMMemFill_t( + cl_command_queue command_queue, + void* svm_ptr, + const void* pattern, + size_t pattern_size, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMMemFill_t * +clEnqueueSVMMemFill_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_int CL_API_CALL clEnqueueSVMMap_t( + cl_command_queue command_queue, + cl_bool blocking_map, + cl_map_flags flags, + void* svm_ptr, + size_t size, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMMap_t * +clEnqueueSVMMap_fn CL_API_SUFFIX__VERSION_2_0; + +typedef cl_int CL_API_CALL clEnqueueSVMUnmap_t( + cl_command_queue command_queue, + void* svm_ptr, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMUnmap_t * +clEnqueueSVMUnmap_fn CL_API_SUFFIX__VERSION_2_0; + +#endif /* CL_VERSION_2_0 */ + +#ifdef CL_VERSION_2_1 + +typedef cl_int CL_API_CALL clSetDefaultDeviceCommandQueue_t( + cl_context context, + cl_device_id device, + cl_command_queue command_queue); + +typedef clSetDefaultDeviceCommandQueue_t * +clSetDefaultDeviceCommandQueue_fn CL_API_SUFFIX__VERSION_2_1; + +typedef cl_int CL_API_CALL clGetDeviceAndHostTimer_t( + cl_device_id device, + cl_ulong* device_timestamp, + cl_ulong* host_timestamp); + +typedef clGetDeviceAndHostTimer_t * +clGetDeviceAndHostTimer_fn CL_API_SUFFIX__VERSION_2_1; + +typedef cl_int CL_API_CALL clGetHostTimer_t( + cl_device_id device, + cl_ulong* host_timestamp); + +typedef clGetHostTimer_t * +clGetHostTimer_fn CL_API_SUFFIX__VERSION_2_1; + +typedef cl_program CL_API_CALL clCreateProgramWithIL_t( + cl_context context, + const void* il, + size_t length, + cl_int* errcode_ret); + +typedef clCreateProgramWithIL_t * +clCreateProgramWithIL_fn CL_API_SUFFIX__VERSION_2_1; + +typedef cl_kernel CL_API_CALL clCloneKernel_t( + cl_kernel source_kernel, + cl_int* errcode_ret); + +typedef clCloneKernel_t * +clCloneKernel_fn CL_API_SUFFIX__VERSION_2_1; + +typedef cl_int CL_API_CALL clGetKernelSubGroupInfo_t( + cl_kernel kernel, + cl_device_id device, + cl_kernel_sub_group_info param_name, + size_t input_value_size, + const void* input_value, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetKernelSubGroupInfo_t * +clGetKernelSubGroupInfo_fn CL_API_SUFFIX__VERSION_2_1; + +typedef cl_int CL_API_CALL clEnqueueSVMMigrateMem_t( + cl_command_queue command_queue, + cl_uint num_svm_pointers, + const void** svm_pointers, + const size_t* sizes, + cl_mem_migration_flags flags, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueSVMMigrateMem_t * +clEnqueueSVMMigrateMem_fn CL_API_SUFFIX__VERSION_2_1; + +#endif /* CL_VERSION_2_1 */ + +#ifdef CL_VERSION_2_2 + +typedef cl_int CL_API_CALL clSetProgramSpecializationConstant_t( + cl_program program, + cl_uint spec_id, + size_t spec_size, + const void* spec_value); + +typedef clSetProgramSpecializationConstant_t * +clSetProgramSpecializationConstant_fn CL_API_SUFFIX__VERSION_2_2; + +typedef cl_int CL_API_CALL clSetProgramReleaseCallback_t( + cl_program program, + void (CL_CALLBACK* pfn_notify)(cl_program program, void* user_data), + void* user_data); + +typedef clSetProgramReleaseCallback_t * +clSetProgramReleaseCallback_fn CL_API_SUFFIX__VERSION_2_2_DEPRECATED; + +#endif /* CL_VERSION_2_2 */ + +#ifdef CL_VERSION_3_0 + +typedef cl_int CL_API_CALL clSetContextDestructorCallback_t( + cl_context context, + void (CL_CALLBACK* pfn_notify)(cl_context context, void* user_data), + void* user_data); + +typedef clSetContextDestructorCallback_t * +clSetContextDestructorCallback_fn CL_API_SUFFIX__VERSION_3_0; + +typedef cl_mem CL_API_CALL clCreateBufferWithProperties_t( + cl_context context, + const cl_mem_properties* properties, + cl_mem_flags flags, + size_t size, + void* host_ptr, + cl_int* errcode_ret); + +typedef clCreateBufferWithProperties_t * +clCreateBufferWithProperties_fn CL_API_SUFFIX__VERSION_3_0; + +typedef cl_mem CL_API_CALL clCreateImageWithProperties_t( + cl_context context, + const cl_mem_properties* properties, + cl_mem_flags flags, + const cl_image_format* image_format, + const cl_image_desc* image_desc, + void* host_ptr, + cl_int* errcode_ret); + +typedef clCreateImageWithProperties_t * +clCreateImageWithProperties_fn CL_API_SUFFIX__VERSION_3_0; + +#endif /* CL_VERSION_3_0 */ + +#endif /* OPENCL_CL_FUNCTION_TYPES_H_ */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_gl.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_gl.h new file mode 100644 index 000000000..552560f71 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_gl.h @@ -0,0 +1,421 @@ +/******************************************************************************* + * Copyright (c) 2008-2023 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef OPENCL_CL_GL_H_ +#define OPENCL_CL_GL_H_ + +/* +** This header is generated from the Khronos OpenCL XML API Registry. +*/ + +#include + +/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES) +#define CL_NO_EXTENSION_PROTOTYPES +#endif + +/* CL_NO_EXTENSION_PROTOTYPES implies + CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and + CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*************************************************************** +* cl_khr_gl_sharing +***************************************************************/ +#define cl_khr_gl_sharing 1 +#define CL_KHR_GL_SHARING_EXTENSION_NAME \ + "cl_khr_gl_sharing" + + +#define CL_KHR_GL_SHARING_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef int cl_GLint; +typedef unsigned int cl_GLenum; +typedef unsigned int cl_GLuint; + +typedef cl_uint cl_gl_context_info; + +/* Error codes */ +#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000 + +/* cl_gl_context_info */ +#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006 +#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007 + +/* Additional cl_context_properties */ +#define CL_GL_CONTEXT_KHR 0x2008 +#define CL_EGL_DISPLAY_KHR 0x2009 +#define CL_GLX_DISPLAY_KHR 0x200A +#define CL_WGL_HDC_KHR 0x200B +#define CL_CGL_SHAREGROUP_KHR 0x200C + +typedef cl_uint cl_gl_object_type; +typedef cl_uint cl_gl_texture_info; +typedef cl_uint cl_gl_platform_info; + +/* cl_gl_object_type */ +#define CL_GL_OBJECT_BUFFER 0x2000 +#define CL_GL_OBJECT_TEXTURE2D 0x2001 +#define CL_GL_OBJECT_TEXTURE3D 0x2002 +#define CL_GL_OBJECT_RENDERBUFFER 0x2003 + +#if defined(CL_VERSION_1_2) +/* cl_gl_object_type */ +#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E +#define CL_GL_OBJECT_TEXTURE1D 0x200F +#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010 +#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011 + +#endif /* defined(CL_VERSION_1_2) */ + +/* cl_gl_texture_info */ +#define CL_GL_TEXTURE_TARGET 0x2004 +#define CL_GL_MIPMAP_LEVEL 0x2005 + + +typedef cl_int CL_API_CALL +clGetGLContextInfoKHR_t( + const cl_context_properties* properties, + cl_gl_context_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetGLContextInfoKHR_t * +clGetGLContextInfoKHR_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_mem CL_API_CALL +clCreateFromGLBuffer_t( + cl_context context, + cl_mem_flags flags, + cl_GLuint bufobj, + cl_int* errcode_ret); + +typedef clCreateFromGLBuffer_t * +clCreateFromGLBuffer_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetGLContextInfoKHR( + const cl_context_properties* properties, + cl_gl_context_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLBuffer( + cl_context context, + cl_mem_flags flags, + cl_GLuint bufobj, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#if defined(CL_VERSION_1_2) + +typedef cl_mem CL_API_CALL +clCreateFromGLTexture_t( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret); + +typedef clCreateFromGLTexture_t * +clCreateFromGLTexture_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLTexture( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#endif /* defined(CL_VERSION_1_2) */ + + +typedef cl_mem CL_API_CALL +clCreateFromGLRenderbuffer_t( + cl_context context, + cl_mem_flags flags, + cl_GLuint renderbuffer, + cl_int* errcode_ret); + +typedef clCreateFromGLRenderbuffer_t * +clCreateFromGLRenderbuffer_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL +clGetGLObjectInfo_t( + cl_mem memobj, + cl_gl_object_type* gl_object_type, + cl_GLuint* gl_object_name); + +typedef clGetGLObjectInfo_t * +clGetGLObjectInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL +clGetGLTextureInfo_t( + cl_mem memobj, + cl_gl_texture_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetGLTextureInfo_t * +clGetGLTextureInfo_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL +clEnqueueAcquireGLObjects_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueAcquireGLObjects_t * +clEnqueueAcquireGLObjects_fn CL_API_SUFFIX__VERSION_1_0; + +typedef cl_int CL_API_CALL +clEnqueueReleaseGLObjects_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReleaseGLObjects_t * +clEnqueueReleaseGLObjects_fn CL_API_SUFFIX__VERSION_1_0; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLRenderbuffer( + cl_context context, + cl_mem_flags flags, + cl_GLuint renderbuffer, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetGLObjectInfo( + cl_mem memobj, + cl_gl_object_type* gl_object_type, + cl_GLuint* gl_object_name) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetGLTextureInfo( + cl_mem memobj, + cl_gl_texture_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireGLObjects( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseGLObjects( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_0; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/* OpenCL 1.0 APIs that were deprecated in OpenCL 1.2 */ + +typedef cl_mem CL_API_CALL +clCreateFromGLTexture2D_t( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret); + +typedef clCreateFromGLTexture2D_t * +clCreateFromGLTexture2D_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +typedef cl_mem CL_API_CALL +clCreateFromGLTexture3D_t( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret); + +typedef clCreateFromGLTexture3D_t * +clCreateFromGLTexture3D_fn CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLTexture2D( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLTexture3D( + cl_context context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texture, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_gl_event +***************************************************************/ +#define cl_khr_gl_event 1 +#define CL_KHR_GL_EVENT_EXTENSION_NAME \ + "cl_khr_gl_event" + + +#define CL_KHR_GL_EVENT_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef struct __GLsync * cl_GLsync; + +/* cl_command_type */ +#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D + + +typedef cl_event CL_API_CALL +clCreateEventFromGLsyncKHR_t( + cl_context context, + cl_GLsync sync, + cl_int* errcode_ret); + +typedef clCreateEventFromGLsyncKHR_t * +clCreateEventFromGLsyncKHR_fn CL_API_SUFFIX__VERSION_1_1; + +#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_event CL_API_CALL +clCreateEventFromGLsyncKHR( + cl_context context, + cl_GLsync sync, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1; + +#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_khr_gl_depth_images +***************************************************************/ +#define cl_khr_gl_depth_images 1 +#define CL_KHR_GL_DEPTH_IMAGES_EXTENSION_NAME \ + "cl_khr_gl_depth_images" + + +#define CL_KHR_GL_DEPTH_IMAGES_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_channel_order */ +#define CL_DEPTH_STENCIL 0x10BE + +/* cl_channel_type */ +#define CL_UNORM_INT24 0x10DF + +/*************************************************************** +* cl_khr_gl_msaa_sharing +***************************************************************/ +#define cl_khr_gl_msaa_sharing 1 +#define CL_KHR_GL_MSAA_SHARING_EXTENSION_NAME \ + "cl_khr_gl_msaa_sharing" + + +#define CL_KHR_GL_MSAA_SHARING_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +/* cl_gl_texture_info */ +#define CL_GL_NUM_SAMPLES 0x2012 + +/*************************************************************** +* cl_intel_sharing_format_query_gl +***************************************************************/ +#define cl_intel_sharing_format_query_gl 1 +#define CL_INTEL_SHARING_FORMAT_QUERY_GL_EXTENSION_NAME \ + "cl_intel_sharing_format_query_gl" + + +#define CL_INTEL_SHARING_FORMAT_QUERY_GL_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* when cl_khr_gl_sharing is supported */ + +typedef cl_int CL_API_CALL +clGetSupportedGLTextureFormatsINTEL_t( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint num_entries, + cl_GLenum* gl_formats, + cl_uint* num_texture_formats); + +typedef clGetSupportedGLTextureFormatsINTEL_t * +clGetSupportedGLTextureFormatsINTEL_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSupportedGLTextureFormatsINTEL( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint num_entries, + cl_GLenum* gl_formats, + cl_uint* num_texture_formats) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#ifdef __cplusplus +} +#endif + +#endif /* OPENCL_CL_GL_H_ */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_gl_ext.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_gl_ext.h new file mode 100644 index 000000000..b5da13eb6 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_gl_ext.h @@ -0,0 +1,18 @@ +/******************************************************************************* + * Copyright (c) 2008-2021 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#include +#pragma message("The extensions in cl_gl_ext.h have been moved into cl_gl.h. Please include cl_gl.h directly.") diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_half.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_half.h new file mode 100644 index 000000000..ecc422332 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_half.h @@ -0,0 +1,440 @@ +/******************************************************************************* + * Copyright (c) 2019-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +/** + * This is a header-only utility library that provides OpenCL host code with + * routines for converting to/from cl_half values. + * + * Example usage: + * + * #include + * ... + * cl_half h = cl_half_from_float(0.5f, CL_HALF_RTE); + * cl_float f = cl_half_to_float(h); + */ + +#ifndef OPENCL_CL_HALF_H +#define OPENCL_CL_HALF_H + +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +/** + * Rounding mode used when converting to cl_half. + */ +typedef enum +{ + CL_HALF_RTE, // round to nearest even + CL_HALF_RTZ, // round towards zero + CL_HALF_RTP, // round towards positive infinity + CL_HALF_RTN, // round towards negative infinity +} cl_half_rounding_mode; + + +/* Private utility macros. */ +#define CL_HALF_EXP_MASK 0x7C00 +#define CL_HALF_MAX_FINITE_MAG 0x7BFF + + +/* + * Utility to deal with values that overflow when converting to half precision. + */ +static inline cl_half cl_half_handle_overflow(cl_half_rounding_mode rounding_mode, + uint16_t sign) +{ + if (rounding_mode == CL_HALF_RTZ) + { + // Round overflow towards zero -> largest finite number (preserving sign) + return (sign << 15) | CL_HALF_MAX_FINITE_MAG; + } + else if (rounding_mode == CL_HALF_RTP && sign) + { + // Round negative overflow towards positive infinity -> most negative finite number + return (1 << 15) | CL_HALF_MAX_FINITE_MAG; + } + else if (rounding_mode == CL_HALF_RTN && !sign) + { + // Round positive overflow towards negative infinity -> largest finite number + return CL_HALF_MAX_FINITE_MAG; + } + + // Overflow to infinity + return (sign << 15) | CL_HALF_EXP_MASK; +} + +/* + * Utility to deal with values that underflow when converting to half precision. + */ +static inline cl_half cl_half_handle_underflow(cl_half_rounding_mode rounding_mode, + uint16_t sign) +{ + if (rounding_mode == CL_HALF_RTP && !sign) + { + // Round underflow towards positive infinity -> smallest positive value + return (sign << 15) | 1; + } + else if (rounding_mode == CL_HALF_RTN && sign) + { + // Round underflow towards negative infinity -> largest negative value + return (sign << 15) | 1; + } + + // Flush to zero + return (sign << 15); +} + + +/** + * Convert a cl_float to a cl_half. + */ +static inline cl_half cl_half_from_float(cl_float f, cl_half_rounding_mode rounding_mode) +{ + // Type-punning to get direct access to underlying bits + union + { + cl_float f; + uint32_t i; + } f32; + f32.f = f; + + // Extract sign bit + uint16_t sign = f32.i >> 31; + + // Extract FP32 exponent and mantissa + uint32_t f_exp = (f32.i >> (CL_FLT_MANT_DIG - 1)) & 0xFF; + uint32_t f_mant = f32.i & ((1 << (CL_FLT_MANT_DIG - 1)) - 1); + + // Remove FP32 exponent bias + int32_t exp = f_exp - CL_FLT_MAX_EXP + 1; + + // Add FP16 exponent bias + uint16_t h_exp = (uint16_t)(exp + CL_HALF_MAX_EXP - 1); + + // Position of the bit that will become the FP16 mantissa LSB + uint32_t lsb_pos = CL_FLT_MANT_DIG - CL_HALF_MANT_DIG; + + // Check for NaN / infinity + if (f_exp == 0xFF) + { + if (f_mant) + { + // NaN -> propagate mantissa and silence it + uint16_t h_mant = (uint16_t)(f_mant >> lsb_pos); + h_mant |= 0x200; + return (sign << 15) | CL_HALF_EXP_MASK | h_mant; + } + else + { + // Infinity -> zero mantissa + return (sign << 15) | CL_HALF_EXP_MASK; + } + } + + // Check for zero + if (!f_exp && !f_mant) + { + return (sign << 15); + } + + // Check for overflow + if (exp >= CL_HALF_MAX_EXP) + { + return cl_half_handle_overflow(rounding_mode, sign); + } + + // Check for underflow + if (exp < (CL_HALF_MIN_EXP - CL_HALF_MANT_DIG - 1)) + { + return cl_half_handle_underflow(rounding_mode, sign); + } + + // Check for value that will become denormal + if (exp < -14) + { + // Denormal -> include the implicit 1 from the FP32 mantissa + h_exp = 0; + f_mant |= 1 << (CL_FLT_MANT_DIG - 1); + + // Mantissa shift amount depends on exponent + lsb_pos = -exp + (CL_FLT_MANT_DIG - 25); + } + + // Generate FP16 mantissa by shifting FP32 mantissa + uint16_t h_mant = (uint16_t)(f_mant >> lsb_pos); + + // Check whether we need to round + uint32_t halfway = 1 << (lsb_pos - 1); + uint32_t mask = (halfway << 1) - 1; + switch (rounding_mode) + { + case CL_HALF_RTE: + if ((f_mant & mask) > halfway) + { + // More than halfway -> round up + h_mant += 1; + } + else if ((f_mant & mask) == halfway) + { + // Exactly halfway -> round to nearest even + if (h_mant & 0x1) + h_mant += 1; + } + break; + case CL_HALF_RTZ: + // Mantissa has already been truncated -> do nothing + break; + case CL_HALF_RTP: + if ((f_mant & mask) && !sign) + { + // Round positive numbers up + h_mant += 1; + } + break; + case CL_HALF_RTN: + if ((f_mant & mask) && sign) + { + // Round negative numbers down + h_mant += 1; + } + break; + } + + // Check for mantissa overflow + if (h_mant & 0x400) + { + h_exp += 1; + h_mant = 0; + } + + return (sign << 15) | (h_exp << 10) | h_mant; +} + + +/** + * Convert a cl_double to a cl_half. + */ +static inline cl_half cl_half_from_double(cl_double d, cl_half_rounding_mode rounding_mode) +{ + // Type-punning to get direct access to underlying bits + union + { + cl_double d; + uint64_t i; + } f64; + f64.d = d; + + // Extract sign bit + uint16_t sign = f64.i >> 63; + + // Extract FP64 exponent and mantissa + uint64_t d_exp = (f64.i >> (CL_DBL_MANT_DIG - 1)) & 0x7FF; + uint64_t d_mant = f64.i & (((uint64_t)1 << (CL_DBL_MANT_DIG - 1)) - 1); + + // Remove FP64 exponent bias + int64_t exp = d_exp - CL_DBL_MAX_EXP + 1; + + // Add FP16 exponent bias + uint16_t h_exp = (uint16_t)(exp + CL_HALF_MAX_EXP - 1); + + // Position of the bit that will become the FP16 mantissa LSB + uint32_t lsb_pos = CL_DBL_MANT_DIG - CL_HALF_MANT_DIG; + + // Check for NaN / infinity + if (d_exp == 0x7FF) + { + if (d_mant) + { + // NaN -> propagate mantissa and silence it + uint16_t h_mant = (uint16_t)(d_mant >> lsb_pos); + h_mant |= 0x200; + return (sign << 15) | CL_HALF_EXP_MASK | h_mant; + } + else + { + // Infinity -> zero mantissa + return (sign << 15) | CL_HALF_EXP_MASK; + } + } + + // Check for zero + if (!d_exp && !d_mant) + { + return (sign << 15); + } + + // Check for overflow + if (exp >= CL_HALF_MAX_EXP) + { + return cl_half_handle_overflow(rounding_mode, sign); + } + + // Check for underflow + if (exp < (CL_HALF_MIN_EXP - CL_HALF_MANT_DIG - 1)) + { + return cl_half_handle_underflow(rounding_mode, sign); + } + + // Check for value that will become denormal + if (exp < -14) + { + // Include the implicit 1 from the FP64 mantissa + h_exp = 0; + d_mant |= (uint64_t)1 << (CL_DBL_MANT_DIG - 1); + + // Mantissa shift amount depends on exponent + lsb_pos = (uint32_t)(-exp + (CL_DBL_MANT_DIG - 25)); + } + + // Generate FP16 mantissa by shifting FP64 mantissa + uint16_t h_mant = (uint16_t)(d_mant >> lsb_pos); + + // Check whether we need to round + uint64_t halfway = (uint64_t)1 << (lsb_pos - 1); + uint64_t mask = (halfway << 1) - 1; + switch (rounding_mode) + { + case CL_HALF_RTE: + if ((d_mant & mask) > halfway) + { + // More than halfway -> round up + h_mant += 1; + } + else if ((d_mant & mask) == halfway) + { + // Exactly halfway -> round to nearest even + if (h_mant & 0x1) + h_mant += 1; + } + break; + case CL_HALF_RTZ: + // Mantissa has already been truncated -> do nothing + break; + case CL_HALF_RTP: + if ((d_mant & mask) && !sign) + { + // Round positive numbers up + h_mant += 1; + } + break; + case CL_HALF_RTN: + if ((d_mant & mask) && sign) + { + // Round negative numbers down + h_mant += 1; + } + break; + } + + // Check for mantissa overflow + if (h_mant & 0x400) + { + h_exp += 1; + h_mant = 0; + } + + return (sign << 15) | (h_exp << 10) | h_mant; +} + + +/** + * Convert a cl_half to a cl_float. + */ +static inline cl_float cl_half_to_float(cl_half h) +{ + // Type-punning to get direct access to underlying bits + union + { + cl_float f; + uint32_t i; + } f32; + + // Extract sign bit + uint16_t sign = h >> 15; + + // Extract FP16 exponent and mantissa + uint16_t h_exp = (h >> (CL_HALF_MANT_DIG - 1)) & 0x1F; + uint16_t h_mant = h & 0x3FF; + + // Remove FP16 exponent bias + int32_t exp = h_exp - CL_HALF_MAX_EXP + 1; + + // Add FP32 exponent bias + uint32_t f_exp = exp + CL_FLT_MAX_EXP - 1; + + // Check for NaN / infinity + if (h_exp == 0x1F) + { + if (h_mant) + { + // NaN -> propagate mantissa and silence it + uint32_t f_mant = h_mant << (CL_FLT_MANT_DIG - CL_HALF_MANT_DIG); + f_mant |= 0x400000; + f32.i = (sign << 31) | 0x7F800000 | f_mant; + return f32.f; + } + else + { + // Infinity -> zero mantissa + f32.i = (sign << 31) | 0x7F800000; + return f32.f; + } + } + + // Check for zero / denormal + if (h_exp == 0) + { + if (h_mant == 0) + { + // Zero -> zero exponent + f_exp = 0; + } + else + { + // Denormal -> normalize it + // - Shift mantissa to make most-significant 1 implicit + // - Adjust exponent accordingly + uint32_t shift = 0; + while ((h_mant & 0x400) == 0) + { + h_mant <<= 1; + shift++; + } + h_mant &= 0x3FF; + f_exp -= shift - 1; + } + } + + f32.i = (sign << 31) | (f_exp << 23) | (h_mant << 13); + return f32.f; +} + + +#undef CL_HALF_EXP_MASK +#undef CL_HALF_MAX_FINITE_MAG + + +#ifdef __cplusplus +} +#endif + + +#endif /* OPENCL_CL_HALF_H */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_icd.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_icd.h new file mode 100644 index 000000000..bce60ea25 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_icd.h @@ -0,0 +1,342 @@ +/******************************************************************************* + * Copyright (c) 2019-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef OPENCL_CL_ICD_H +#define OPENCL_CL_ICD_H + +#include +#include +#include +#include +#include +#include + +#if defined(_WIN32) +#include +#include +#include +#endif + +#if defined(_WIN32) && defined(_MSC_VER) && __CL_HAS_ANON_STRUCT__ + /* Disable warning C4201: nonstandard extension used : nameless struct/union */ + #pragma warning( push ) + #pragma warning( disable : 4201 ) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Vendor dispatch table structure */ + +typedef struct _cl_icd_dispatch { + /* OpenCL 1.0 */ +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ union { +#endif + clGetPlatformIDs_t *clGetPlatformIDs; +#if __CL_HAS_ANON_STRUCT__ + /* Set to CL_ICD2_TAG_KHR for cl_khr_icd 2.0.0 */ + intptr_t clGetPlatformIDs_icd2_tag; + }; +#endif + clGetPlatformInfo_t *clGetPlatformInfo; + clGetDeviceIDs_t *clGetDeviceIDs; + clGetDeviceInfo_t *clGetDeviceInfo; + clCreateContext_t *clCreateContext; + clCreateContextFromType_t *clCreateContextFromType; + clRetainContext_t *clRetainContext; + clReleaseContext_t *clReleaseContext; + clGetContextInfo_t *clGetContextInfo; + clCreateCommandQueue_t *clCreateCommandQueue; + clRetainCommandQueue_t *clRetainCommandQueue; + clReleaseCommandQueue_t *clReleaseCommandQueue; + clGetCommandQueueInfo_t *clGetCommandQueueInfo; + clSetCommandQueueProperty_t *clSetCommandQueueProperty; + clCreateBuffer_t *clCreateBuffer; + clCreateImage2D_t *clCreateImage2D; + clCreateImage3D_t *clCreateImage3D; + clRetainMemObject_t *clRetainMemObject; + clReleaseMemObject_t *clReleaseMemObject; + clGetSupportedImageFormats_t *clGetSupportedImageFormats; + clGetMemObjectInfo_t *clGetMemObjectInfo; + clGetImageInfo_t *clGetImageInfo; + clCreateSampler_t *clCreateSampler; + clRetainSampler_t *clRetainSampler; + clReleaseSampler_t *clReleaseSampler; + clGetSamplerInfo_t *clGetSamplerInfo; + clCreateProgramWithSource_t *clCreateProgramWithSource; + clCreateProgramWithBinary_t *clCreateProgramWithBinary; + clRetainProgram_t *clRetainProgram; + clReleaseProgram_t *clReleaseProgram; + clBuildProgram_t *clBuildProgram; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ union { +#endif + clUnloadCompiler_t *clUnloadCompiler; +#if __CL_HAS_ANON_STRUCT__ + /* Set to CL_ICD2_TAG_KHR for cl_khr_icd 2.0.0 */ + intptr_t clUnloadCompiler_icd2_tag; + }; +#endif + clGetProgramInfo_t *clGetProgramInfo; + clGetProgramBuildInfo_t *clGetProgramBuildInfo; + clCreateKernel_t *clCreateKernel; + clCreateKernelsInProgram_t *clCreateKernelsInProgram; + clRetainKernel_t *clRetainKernel; + clReleaseKernel_t *clReleaseKernel; + clSetKernelArg_t *clSetKernelArg; + clGetKernelInfo_t *clGetKernelInfo; + clGetKernelWorkGroupInfo_t *clGetKernelWorkGroupInfo; + clWaitForEvents_t *clWaitForEvents; + clGetEventInfo_t *clGetEventInfo; + clRetainEvent_t *clRetainEvent; + clReleaseEvent_t *clReleaseEvent; + clGetEventProfilingInfo_t *clGetEventProfilingInfo; + clFlush_t *clFlush; + clFinish_t *clFinish; + clEnqueueReadBuffer_t *clEnqueueReadBuffer; + clEnqueueWriteBuffer_t *clEnqueueWriteBuffer; + clEnqueueCopyBuffer_t *clEnqueueCopyBuffer; + clEnqueueReadImage_t *clEnqueueReadImage; + clEnqueueWriteImage_t *clEnqueueWriteImage; + clEnqueueCopyImage_t *clEnqueueCopyImage; + clEnqueueCopyImageToBuffer_t *clEnqueueCopyImageToBuffer; + clEnqueueCopyBufferToImage_t *clEnqueueCopyBufferToImage; + clEnqueueMapBuffer_t *clEnqueueMapBuffer; + clEnqueueMapImage_t *clEnqueueMapImage; + clEnqueueUnmapMemObject_t *clEnqueueUnmapMemObject; + clEnqueueNDRangeKernel_t *clEnqueueNDRangeKernel; + clEnqueueTask_t *clEnqueueTask; + clEnqueueNativeKernel_t *clEnqueueNativeKernel; + clEnqueueMarker_t *clEnqueueMarker; + clEnqueueWaitForEvents_t *clEnqueueWaitForEvents; + clEnqueueBarrier_t *clEnqueueBarrier; + clGetExtensionFunctionAddress_t *clGetExtensionFunctionAddress; + clCreateFromGLBuffer_t *clCreateFromGLBuffer; + clCreateFromGLTexture2D_t *clCreateFromGLTexture2D; + clCreateFromGLTexture3D_t *clCreateFromGLTexture3D; + clCreateFromGLRenderbuffer_t *clCreateFromGLRenderbuffer; + clGetGLObjectInfo_t *clGetGLObjectInfo; + clGetGLTextureInfo_t *clGetGLTextureInfo; + clEnqueueAcquireGLObjects_t *clEnqueueAcquireGLObjects; + clEnqueueReleaseGLObjects_t *clEnqueueReleaseGLObjects; + clGetGLContextInfoKHR_t *clGetGLContextInfoKHR; + + /* cl_khr_d3d10_sharing */ +#ifdef _WIN32 + clGetDeviceIDsFromD3D10KHR_t *clGetDeviceIDsFromD3D10KHR; + clCreateFromD3D10BufferKHR_t *clCreateFromD3D10BufferKHR; + clCreateFromD3D10Texture2DKHR_t *clCreateFromD3D10Texture2DKHR; + clCreateFromD3D10Texture3DKHR_t *clCreateFromD3D10Texture3DKHR; + clEnqueueAcquireD3D10ObjectsKHR_t *clEnqueueAcquireD3D10ObjectsKHR; + clEnqueueReleaseD3D10ObjectsKHR_t *clEnqueueReleaseD3D10ObjectsKHR; +#else + void *clGetDeviceIDsFromD3D10KHR; + void *clCreateFromD3D10BufferKHR; + void *clCreateFromD3D10Texture2DKHR; + void *clCreateFromD3D10Texture3DKHR; + void *clEnqueueAcquireD3D10ObjectsKHR; + void *clEnqueueReleaseD3D10ObjectsKHR; +#endif + + /* OpenCL 1.1 */ +#ifdef CL_VERSION_1_1 + clSetEventCallback_t *clSetEventCallback; + clCreateSubBuffer_t *clCreateSubBuffer; + clSetMemObjectDestructorCallback_t *clSetMemObjectDestructorCallback; + clCreateUserEvent_t *clCreateUserEvent; + clSetUserEventStatus_t *clSetUserEventStatus; + clEnqueueReadBufferRect_t *clEnqueueReadBufferRect; + clEnqueueWriteBufferRect_t *clEnqueueWriteBufferRect; + clEnqueueCopyBufferRect_t *clEnqueueCopyBufferRect; +#else + void *clSetEventCallback; + void *clCreateSubBuffer; + void *clSetMemObjectDestructorCallback; + void *clCreateUserEvent; + void *clSetUserEventStatus; + void *clEnqueueReadBufferRect; + void *clEnqueueWriteBufferRect; + void *clEnqueueCopyBufferRect; +#endif + + /* cl_ext_device_fission */ + clCreateSubDevicesEXT_t *clCreateSubDevicesEXT; + clRetainDeviceEXT_t *clRetainDeviceEXT; + clReleaseDeviceEXT_t *clReleaseDeviceEXT; + + /* cl_khr_gl_event */ + clCreateEventFromGLsyncKHR_t *clCreateEventFromGLsyncKHR; + + /* OpenCL 1.2 */ +#ifdef CL_VERSION_1_2 + clCreateSubDevices_t *clCreateSubDevices; + clRetainDevice_t *clRetainDevice; + clReleaseDevice_t *clReleaseDevice; + clCreateImage_t *clCreateImage; + clCreateProgramWithBuiltInKernels_t *clCreateProgramWithBuiltInKernels; + clCompileProgram_t *clCompileProgram; + clLinkProgram_t *clLinkProgram; + clUnloadPlatformCompiler_t *clUnloadPlatformCompiler; + clGetKernelArgInfo_t *clGetKernelArgInfo; + clEnqueueFillBuffer_t *clEnqueueFillBuffer; + clEnqueueFillImage_t *clEnqueueFillImage; + clEnqueueMigrateMemObjects_t *clEnqueueMigrateMemObjects; + clEnqueueMarkerWithWaitList_t *clEnqueueMarkerWithWaitList; + clEnqueueBarrierWithWaitList_t *clEnqueueBarrierWithWaitList; + clGetExtensionFunctionAddressForPlatform_t * + clGetExtensionFunctionAddressForPlatform; + clCreateFromGLTexture_t *clCreateFromGLTexture; +#else + void *clCreateSubDevices; + void *clRetainDevice; + void *clReleaseDevice; + void *clCreateImage; + void *clCreateProgramWithBuiltInKernels; + void *clCompileProgram; + void *clLinkProgram; + void *clUnloadPlatformCompiler; + void *clGetKernelArgInfo; + void *clEnqueueFillBuffer; + void *clEnqueueFillImage; + void *clEnqueueMigrateMemObjects; + void *clEnqueueMarkerWithWaitList; + void *clEnqueueBarrierWithWaitList; + void *clGetExtensionFunctionAddressForPlatform; + void *clCreateFromGLTexture; +#endif + + /* cl_khr_d3d11_sharing and cl_khr_dx9_media_sharing */ +#ifdef _WIN32 + clGetDeviceIDsFromD3D11KHR_t *clGetDeviceIDsFromD3D11KHR; + clCreateFromD3D11BufferKHR_t *clCreateFromD3D11BufferKHR; + clCreateFromD3D11Texture2DKHR_t *clCreateFromD3D11Texture2DKHR; + clCreateFromD3D11Texture3DKHR_t *clCreateFromD3D11Texture3DKHR; + clCreateFromDX9MediaSurfaceKHR_t *clCreateFromDX9MediaSurfaceKHR; + clEnqueueAcquireD3D11ObjectsKHR_t *clEnqueueAcquireD3D11ObjectsKHR; + clEnqueueReleaseD3D11ObjectsKHR_t *clEnqueueReleaseD3D11ObjectsKHR; + clGetDeviceIDsFromDX9MediaAdapterKHR_t * + clGetDeviceIDsFromDX9MediaAdapterKHR; + clEnqueueAcquireDX9MediaSurfacesKHR_t * + clEnqueueAcquireDX9MediaSurfacesKHR; + clEnqueueReleaseDX9MediaSurfacesKHR_t * + clEnqueueReleaseDX9MediaSurfacesKHR; +#else + void *clGetDeviceIDsFromD3D11KHR; + void *clCreateFromD3D11BufferKHR; + void *clCreateFromD3D11Texture2DKHR; + void *clCreateFromD3D11Texture3DKHR; + void *clCreateFromDX9MediaSurfaceKHR; + void *clEnqueueAcquireD3D11ObjectsKHR; + void *clEnqueueReleaseD3D11ObjectsKHR; + void *clGetDeviceIDsFromDX9MediaAdapterKHR; + void *clEnqueueAcquireDX9MediaSurfacesKHR; + void *clEnqueueReleaseDX9MediaSurfacesKHR; +#endif + + /* cl_khr_egl_image */ + clCreateFromEGLImageKHR_t *clCreateFromEGLImageKHR; + clEnqueueAcquireEGLObjectsKHR_t *clEnqueueAcquireEGLObjectsKHR; + clEnqueueReleaseEGLObjectsKHR_t *clEnqueueReleaseEGLObjectsKHR; + + /* cl_khr_egl_event */ + clCreateEventFromEGLSyncKHR_t *clCreateEventFromEGLSyncKHR; + + /* OpenCL 2.0 */ +#ifdef CL_VERSION_2_0 + clCreateCommandQueueWithProperties_t *clCreateCommandQueueWithProperties; + clCreatePipe_t *clCreatePipe; + clGetPipeInfo_t *clGetPipeInfo; + clSVMAlloc_t *clSVMAlloc; + clSVMFree_t *clSVMFree; + clEnqueueSVMFree_t *clEnqueueSVMFree; + clEnqueueSVMMemcpy_t *clEnqueueSVMMemcpy; + clEnqueueSVMMemFill_t *clEnqueueSVMMemFill; + clEnqueueSVMMap_t *clEnqueueSVMMap; + clEnqueueSVMUnmap_t *clEnqueueSVMUnmap; + clCreateSamplerWithProperties_t *clCreateSamplerWithProperties; + clSetKernelArgSVMPointer_t *clSetKernelArgSVMPointer; + clSetKernelExecInfo_t *clSetKernelExecInfo; +#else + void *clCreateCommandQueueWithProperties; + void *clCreatePipe; + void *clGetPipeInfo; + void *clSVMAlloc; + void *clSVMFree; + void *clEnqueueSVMFree; + void *clEnqueueSVMMemcpy; + void *clEnqueueSVMMemFill; + void *clEnqueueSVMMap; + void *clEnqueueSVMUnmap; + void *clCreateSamplerWithProperties; + void *clSetKernelArgSVMPointer; + void *clSetKernelExecInfo; +#endif + + /* cl_khr_sub_groups */ + clGetKernelSubGroupInfoKHR_t *clGetKernelSubGroupInfoKHR; + + /* OpenCL 2.1 */ +#ifdef CL_VERSION_2_1 + clCloneKernel_t *clCloneKernel; + clCreateProgramWithIL_t *clCreateProgramWithIL; + clEnqueueSVMMigrateMem_t *clEnqueueSVMMigrateMem; + clGetDeviceAndHostTimer_t *clGetDeviceAndHostTimer; + clGetHostTimer_t *clGetHostTimer; + clGetKernelSubGroupInfo_t *clGetKernelSubGroupInfo; + clSetDefaultDeviceCommandQueue_t *clSetDefaultDeviceCommandQueue; +#else + void *clCloneKernel; + void *clCreateProgramWithIL; + void *clEnqueueSVMMigrateMem; + void *clGetDeviceAndHostTimer; + void *clGetHostTimer; + void *clGetKernelSubGroupInfo; + void *clSetDefaultDeviceCommandQueue; +#endif + + /* OpenCL 2.2 */ +#ifdef CL_VERSION_2_2 + clSetProgramReleaseCallback_t *clSetProgramReleaseCallback; + clSetProgramSpecializationConstant_t *clSetProgramSpecializationConstant; +#else + void *clSetProgramReleaseCallback; + void *clSetProgramSpecializationConstant; +#endif + + /* OpenCL 3.0 */ +#ifdef CL_VERSION_3_0 + clCreateBufferWithProperties_t *clCreateBufferWithProperties; + clCreateImageWithProperties_t *clCreateImageWithProperties; + clSetContextDestructorCallback_t *clSetContextDestructorCallback; +#else + void *clCreateBufferWithProperties; + void *clCreateImageWithProperties; + void *clSetContextDestructorCallback; +#endif + +} cl_icd_dispatch; + +#ifdef __cplusplus +} +#endif + +#if defined(_WIN32) && defined(_MSC_VER) && __CL_HAS_ANON_STRUCT__ + #pragma warning( pop ) +#endif + +#endif /* #ifndef OPENCL_CL_ICD_H */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_layer.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_layer.h new file mode 100644 index 000000000..245f7b532 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_layer.h @@ -0,0 +1,125 @@ +/******************************************************************************* + * Copyright (c) 2008-2023 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef OPENCL_CL_LAYER_H_ +#define OPENCL_CL_LAYER_H_ + +/* +** This header is generated from the Khronos OpenCL XML API Registry. +*/ + +#include + +#include + +/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES) +#define CL_NO_EXTENSION_PROTOTYPES +#endif + +/* CL_NO_EXTENSION_PROTOTYPES implies + CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and + CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*************************************************************** +* cl_loader_layers +***************************************************************/ +#define cl_loader_layers 1 +#define CL_LOADER_LAYERS_EXTENSION_NAME \ + "cl_loader_layers" + + +#define CL_LOADER_LAYERS_EXTENSION_VERSION CL_MAKE_VERSION(1, 0, 0) + +typedef cl_uint cl_layer_info; +typedef cl_uint cl_layer_api_version; + +/* cl_layer_info */ +#define CL_LAYER_API_VERSION 0x4240 +#define CL_LAYER_NAME 0x4241 + +/* Misc API enums */ +#define CL_LAYER_API_VERSION_100 100 + + +typedef cl_int CL_API_CALL +clGetLayerInfo_t( + cl_layer_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +typedef clGetLayerInfo_t * +clGetLayerInfo_fn ; + +typedef cl_int CL_API_CALL +clInitLayer_t( + cl_uint num_entries, + const cl_icd_dispatch* target_dispatch, + cl_uint* num_entries_ret, + const cl_icd_dispatch** layer_dispatch_ret); + +typedef clInitLayer_t * +clInitLayer_fn ; + +/* +** The function pointer typedefs prefixed with "pfn_" are provided for +** compatibility with earlier versions of the headers. New code is +** encouraged to use the function pointer typedefs that are suffixed with +** "_fn" instead, for consistency. +*/ + +typedef clGetLayerInfo_t * +pfn_clGetLayerInfo ; + +typedef clInitLayer_t * +pfn_clInitLayer ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetLayerInfo( + cl_layer_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret) ; + +extern CL_API_ENTRY cl_int CL_API_CALL +clInitLayer( + cl_uint num_entries, + const cl_icd_dispatch* target_dispatch, + cl_uint* num_entries_ret, + const cl_icd_dispatch** layer_dispatch_ret) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#ifdef __cplusplus +} +#endif + +#endif /* OPENCL_CL_LAYER_H_ */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_platform.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_platform.h new file mode 100644 index 000000000..5f92d6faa --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_platform.h @@ -0,0 +1,1407 @@ +/******************************************************************************* + * Copyright (c) 2008-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef __CL_PLATFORM_H +#define __CL_PLATFORM_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(_WIN32) + #if !defined(CL_API_ENTRY) + #define CL_API_ENTRY + #endif + #if !defined(CL_API_CALL) + #define CL_API_CALL __stdcall + #endif + #if !defined(CL_CALLBACK) + #define CL_CALLBACK __stdcall + #endif +#else + #if !defined(CL_API_ENTRY) + #define CL_API_ENTRY + #endif + #if !defined(CL_API_CALL) + #define CL_API_CALL + #endif + #if !defined(CL_CALLBACK) + #define CL_CALLBACK + #endif +#endif + +/* + * Deprecation flags refer to the last version of the header in which the + * feature was not deprecated. + * + * E.g. VERSION_1_1_DEPRECATED means the feature is present in 1.1 without + * deprecation but is deprecated in versions later than 1.1. + */ + +#ifndef CL_API_SUFFIX_USER +#define CL_API_SUFFIX_USER +#endif + +#ifndef CL_API_PREFIX_USER +#define CL_API_PREFIX_USER +#endif + +#define CL_API_SUFFIX_COMMON CL_API_SUFFIX_USER +#define CL_API_PREFIX_COMMON CL_API_PREFIX_USER + +#define CL_API_SUFFIX__VERSION_1_0 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_1_1 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_1_2 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_2_0 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_2_1 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_2_2 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__VERSION_3_0 CL_API_SUFFIX_COMMON +#define CL_API_SUFFIX__EXPERIMENTAL CL_API_SUFFIX_COMMON + + +#ifdef __GNUC__ + #define CL_API_SUFFIX_DEPRECATED __attribute__((deprecated)) + #define CL_API_PREFIX_DEPRECATED +#elif defined(_MSC_VER) && !defined(__clang__) + #define CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX_DEPRECATED __declspec(deprecated) +#else + #define CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX_DEPRECATED +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS + #define CL_API_SUFFIX__VERSION_1_0_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_1_0_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_1_0_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_1_0_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS + #define CL_API_SUFFIX__VERSION_1_1_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_1_1_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_1_1_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_1_1_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_1_2_APIS + #define CL_API_SUFFIX__VERSION_1_2_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_1_2_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_1_2_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_1_2_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED + #endif + +#ifdef CL_USE_DEPRECATED_OPENCL_2_0_APIS + #define CL_API_SUFFIX__VERSION_2_0_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_2_0_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_2_0_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_2_0_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_2_1_APIS + #define CL_API_SUFFIX__VERSION_2_1_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_2_1_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_2_1_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_2_1_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED +#endif + +#ifdef CL_USE_DEPRECATED_OPENCL_2_2_APIS + #define CL_API_SUFFIX__VERSION_2_2_DEPRECATED CL_API_SUFFIX_COMMON + #define CL_API_PREFIX__VERSION_2_2_DEPRECATED CL_API_PREFIX_COMMON +#else + #define CL_API_SUFFIX__VERSION_2_2_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED + #define CL_API_PREFIX__VERSION_2_2_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED +#endif + +#if (defined (_WIN32) && defined(_MSC_VER)) + +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wlanguage-extension-token" +#endif + +/* intptr_t is used in cl.h and provided by stddef.h in Visual C++, but not in clang */ +/* stdint.h was missing before Visual Studio 2010, include it for later versions and for clang */ +#if defined(__clang__) || _MSC_VER >= 1600 + #include +#endif + +/* scalar types */ +typedef signed __int8 cl_char; +typedef unsigned __int8 cl_uchar; +typedef signed __int16 cl_short; +typedef unsigned __int16 cl_ushort; +typedef signed __int32 cl_int; +typedef unsigned __int32 cl_uint; +typedef signed __int64 cl_long; +typedef unsigned __int64 cl_ulong; + +typedef unsigned __int16 cl_half; +typedef float cl_float; +typedef double cl_double; + +#if defined(__clang__) +#pragma clang diagnostic pop +#endif + +/* Macro names and corresponding values defined by OpenCL */ +#define CL_CHAR_BIT 8 +#define CL_SCHAR_MAX 127 +#define CL_SCHAR_MIN (-127-1) +#define CL_CHAR_MAX CL_SCHAR_MAX +#define CL_CHAR_MIN CL_SCHAR_MIN +#define CL_UCHAR_MAX 255 +#define CL_SHRT_MAX 32767 +#define CL_SHRT_MIN (-32767-1) +#define CL_USHRT_MAX 65535 +#define CL_INT_MAX 2147483647 +#define CL_INT_MIN (-2147483647-1) +#define CL_UINT_MAX 0xffffffffU +#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL) +#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL) +#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL) + +#define CL_FLT_DIG 6 +#define CL_FLT_MANT_DIG 24 +#define CL_FLT_MAX_10_EXP +38 +#define CL_FLT_MAX_EXP +128 +#define CL_FLT_MIN_10_EXP -37 +#define CL_FLT_MIN_EXP -125 +#define CL_FLT_RADIX 2 +#define CL_FLT_MAX 340282346638528859811704183484516925440.0f +#define CL_FLT_MIN 1.175494350822287507969e-38f +#define CL_FLT_EPSILON 1.1920928955078125e-7f + +#define CL_HALF_DIG 3 +#define CL_HALF_MANT_DIG 11 +#define CL_HALF_MAX_10_EXP +4 +#define CL_HALF_MAX_EXP +16 +#define CL_HALF_MIN_10_EXP -4 +#define CL_HALF_MIN_EXP -13 +#define CL_HALF_RADIX 2 +#define CL_HALF_MAX 65504.0f +#define CL_HALF_MIN 6.103515625e-05f +#define CL_HALF_EPSILON 9.765625e-04f + +#define CL_DBL_DIG 15 +#define CL_DBL_MANT_DIG 53 +#define CL_DBL_MAX_10_EXP +308 +#define CL_DBL_MAX_EXP +1024 +#define CL_DBL_MIN_10_EXP -307 +#define CL_DBL_MIN_EXP -1021 +#define CL_DBL_RADIX 2 +#define CL_DBL_MAX 1.7976931348623158e+308 +#define CL_DBL_MIN 2.225073858507201383090e-308 +#define CL_DBL_EPSILON 2.220446049250313080847e-16 + +#define CL_M_E 2.7182818284590452354 +#define CL_M_LOG2E 1.4426950408889634074 +#define CL_M_LOG10E 0.43429448190325182765 +#define CL_M_LN2 0.69314718055994530942 +#define CL_M_LN10 2.30258509299404568402 +#define CL_M_PI 3.14159265358979323846 +#define CL_M_PI_2 1.57079632679489661923 +#define CL_M_PI_4 0.78539816339744830962 +#define CL_M_1_PI 0.31830988618379067154 +#define CL_M_2_PI 0.63661977236758134308 +#define CL_M_2_SQRTPI 1.12837916709551257390 +#define CL_M_SQRT2 1.41421356237309504880 +#define CL_M_SQRT1_2 0.70710678118654752440 + +#define CL_M_E_F 2.718281828f +#define CL_M_LOG2E_F 1.442695041f +#define CL_M_LOG10E_F 0.434294482f +#define CL_M_LN2_F 0.693147181f +#define CL_M_LN10_F 2.302585093f +#define CL_M_PI_F 3.141592654f +#define CL_M_PI_2_F 1.570796327f +#define CL_M_PI_4_F 0.785398163f +#define CL_M_1_PI_F 0.318309886f +#define CL_M_2_PI_F 0.636619772f +#define CL_M_2_SQRTPI_F 1.128379167f +#define CL_M_SQRT2_F 1.414213562f +#define CL_M_SQRT1_2_F 0.707106781f + +#define CL_NAN (CL_INFINITY - CL_INFINITY) +#define CL_HUGE_VALF ((cl_float) 1e50) +#define CL_HUGE_VAL ((cl_double) 1e500) +#define CL_MAXFLOAT CL_FLT_MAX +#define CL_INFINITY CL_HUGE_VALF + +#else + +#include + +/* scalar types */ +typedef int8_t cl_char; +typedef uint8_t cl_uchar; +typedef int16_t cl_short; +typedef uint16_t cl_ushort; +typedef int32_t cl_int; +typedef uint32_t cl_uint; +typedef int64_t cl_long; +typedef uint64_t cl_ulong; + +typedef uint16_t cl_half; +typedef float cl_float; +typedef double cl_double; + +/* Macro names and corresponding values defined by OpenCL */ +#define CL_CHAR_BIT 8 +#define CL_SCHAR_MAX 127 +#define CL_SCHAR_MIN (-127-1) +#define CL_CHAR_MAX CL_SCHAR_MAX +#define CL_CHAR_MIN CL_SCHAR_MIN +#define CL_UCHAR_MAX 255 +#define CL_SHRT_MAX 32767 +#define CL_SHRT_MIN (-32767-1) +#define CL_USHRT_MAX 65535 +#define CL_INT_MAX 2147483647 +#define CL_INT_MIN (-2147483647-1) +#define CL_UINT_MAX 0xffffffffU +#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL) +#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL) +#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL) + +#define CL_FLT_DIG 6 +#define CL_FLT_MANT_DIG 24 +#define CL_FLT_MAX_10_EXP +38 +#define CL_FLT_MAX_EXP +128 +#define CL_FLT_MIN_10_EXP -37 +#define CL_FLT_MIN_EXP -125 +#define CL_FLT_RADIX 2 +#define CL_FLT_MAX 340282346638528859811704183484516925440.0f +#define CL_FLT_MIN 1.175494350822287507969e-38f +#define CL_FLT_EPSILON 1.1920928955078125e-7f + +#define CL_HALF_DIG 3 +#define CL_HALF_MANT_DIG 11 +#define CL_HALF_MAX_10_EXP +4 +#define CL_HALF_MAX_EXP +16 +#define CL_HALF_MIN_10_EXP -4 +#define CL_HALF_MIN_EXP -13 +#define CL_HALF_RADIX 2 +#define CL_HALF_MAX 65504.0f +#define CL_HALF_MIN 6.103515625e-05f +#define CL_HALF_EPSILON 9.765625e-04f + +#define CL_DBL_DIG 15 +#define CL_DBL_MANT_DIG 53 +#define CL_DBL_MAX_10_EXP +308 +#define CL_DBL_MAX_EXP +1024 +#define CL_DBL_MIN_10_EXP -307 +#define CL_DBL_MIN_EXP -1021 +#define CL_DBL_RADIX 2 +#define CL_DBL_MAX 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0 +#define CL_DBL_MIN 2.225073858507201383090e-308 +#define CL_DBL_EPSILON 2.220446049250313080847e-16 + +#define CL_M_E 2.7182818284590452354 +#define CL_M_LOG2E 1.4426950408889634074 +#define CL_M_LOG10E 0.43429448190325182765 +#define CL_M_LN2 0.69314718055994530942 +#define CL_M_LN10 2.30258509299404568402 +#define CL_M_PI 3.14159265358979323846 +#define CL_M_PI_2 1.57079632679489661923 +#define CL_M_PI_4 0.78539816339744830962 +#define CL_M_1_PI 0.31830988618379067154 +#define CL_M_2_PI 0.63661977236758134308 +#define CL_M_2_SQRTPI 1.12837916709551257390 +#define CL_M_SQRT2 1.41421356237309504880 +#define CL_M_SQRT1_2 0.70710678118654752440 + +#define CL_M_E_F 2.718281828f +#define CL_M_LOG2E_F 1.442695041f +#define CL_M_LOG10E_F 0.434294482f +#define CL_M_LN2_F 0.693147181f +#define CL_M_LN10_F 2.302585093f +#define CL_M_PI_F 3.141592654f +#define CL_M_PI_2_F 1.570796327f +#define CL_M_PI_4_F 0.785398163f +#define CL_M_1_PI_F 0.318309886f +#define CL_M_2_PI_F 0.636619772f +#define CL_M_2_SQRTPI_F 1.128379167f +#define CL_M_SQRT2_F 1.414213562f +#define CL_M_SQRT1_2_F 0.707106781f + +#if defined( __GNUC__ ) + #define CL_HUGE_VALF __builtin_huge_valf() + #define CL_HUGE_VAL __builtin_huge_val() + #define CL_NAN __builtin_nanf( "" ) +#else + #define CL_HUGE_VALF ((cl_float) 1e50) + #define CL_HUGE_VAL ((cl_double) 1e500) + float nanf( const char * ); + #define CL_NAN nanf( "" ) +#endif +#define CL_MAXFLOAT CL_FLT_MAX +#define CL_INFINITY CL_HUGE_VALF + +#endif + +#include + +/* + * Vector types + * + * Note: OpenCL requires that all types be naturally aligned. + * This means that vector types must be naturally aligned. + * For example, a vector of four floats must be aligned to + * a 16 byte boundary (calculated as 4 * the natural 4-byte + * alignment of the float). The alignment qualifiers here + * will only function properly if your compiler supports them + * and if you don't actively work to defeat them. For example, + * in order for a cl_float4 to be 16 byte aligned in a struct, + * the start of the struct must itself be 16-byte aligned. + * + * Maintaining proper alignment is the user's responsibility. + */ + +/* Define basic vector types */ +#if defined( __VEC__ ) + #if !defined(__clang__) + #include /* may be omitted depending on compiler. AltiVec spec provides no way to detect whether the header is required. */ + #endif + typedef __vector unsigned char __cl_uchar16; + typedef __vector signed char __cl_char16; + typedef __vector unsigned short __cl_ushort8; + typedef __vector signed short __cl_short8; + typedef __vector unsigned int __cl_uint4; + typedef __vector signed int __cl_int4; + typedef __vector float __cl_float4; + #define __CL_UCHAR16__ 1 + #define __CL_CHAR16__ 1 + #define __CL_USHORT8__ 1 + #define __CL_SHORT8__ 1 + #define __CL_UINT4__ 1 + #define __CL_INT4__ 1 + #define __CL_FLOAT4__ 1 +#endif + +#if defined( __SSE__ ) + #if defined( __MINGW64__ ) + #include + #else + #include + #endif + #if defined( __GNUC__ ) + typedef float __cl_float4 __attribute__((vector_size(16))); + #else + typedef __m128 __cl_float4; + #endif + #define __CL_FLOAT4__ 1 +#endif + +#if defined( __SSE2__ ) + #if defined( __MINGW64__ ) + #include + #else + #include + #endif + #if defined( __GNUC__ ) + typedef cl_uchar __cl_uchar16 __attribute__((vector_size(16))); + typedef cl_char __cl_char16 __attribute__((vector_size(16))); + typedef cl_ushort __cl_ushort8 __attribute__((vector_size(16))); + typedef cl_short __cl_short8 __attribute__((vector_size(16))); + typedef cl_uint __cl_uint4 __attribute__((vector_size(16))); + typedef cl_int __cl_int4 __attribute__((vector_size(16))); + typedef cl_ulong __cl_ulong2 __attribute__((vector_size(16))); + typedef cl_long __cl_long2 __attribute__((vector_size(16))); + typedef cl_double __cl_double2 __attribute__((vector_size(16))); + #else + typedef __m128i __cl_uchar16; + typedef __m128i __cl_char16; + typedef __m128i __cl_ushort8; + typedef __m128i __cl_short8; + typedef __m128i __cl_uint4; + typedef __m128i __cl_int4; + typedef __m128i __cl_ulong2; + typedef __m128i __cl_long2; + typedef __m128d __cl_double2; + #endif + #define __CL_UCHAR16__ 1 + #define __CL_CHAR16__ 1 + #define __CL_USHORT8__ 1 + #define __CL_SHORT8__ 1 + #define __CL_INT4__ 1 + #define __CL_UINT4__ 1 + #define __CL_ULONG2__ 1 + #define __CL_LONG2__ 1 + #define __CL_DOUBLE2__ 1 +#endif + +#if defined( __MMX__ ) + #include + #if defined( __GNUC__ ) + typedef cl_uchar __cl_uchar8 __attribute__((vector_size(8))); + typedef cl_char __cl_char8 __attribute__((vector_size(8))); + typedef cl_ushort __cl_ushort4 __attribute__((vector_size(8))); + typedef cl_short __cl_short4 __attribute__((vector_size(8))); + typedef cl_uint __cl_uint2 __attribute__((vector_size(8))); + typedef cl_int __cl_int2 __attribute__((vector_size(8))); + typedef cl_ulong __cl_ulong1 __attribute__((vector_size(8))); + typedef cl_long __cl_long1 __attribute__((vector_size(8))); + typedef cl_float __cl_float2 __attribute__((vector_size(8))); + #else + typedef __m64 __cl_uchar8; + typedef __m64 __cl_char8; + typedef __m64 __cl_ushort4; + typedef __m64 __cl_short4; + typedef __m64 __cl_uint2; + typedef __m64 __cl_int2; + typedef __m64 __cl_ulong1; + typedef __m64 __cl_long1; + typedef __m64 __cl_float2; + #endif + #define __CL_UCHAR8__ 1 + #define __CL_CHAR8__ 1 + #define __CL_USHORT4__ 1 + #define __CL_SHORT4__ 1 + #define __CL_INT2__ 1 + #define __CL_UINT2__ 1 + #define __CL_ULONG1__ 1 + #define __CL_LONG1__ 1 + #define __CL_FLOAT2__ 1 +#endif + +#if defined( __AVX__ ) + #if defined( __MINGW64__ ) + #include + #else + #include + #endif + #if defined( __GNUC__ ) + typedef cl_float __cl_float8 __attribute__((vector_size(32))); + typedef cl_double __cl_double4 __attribute__((vector_size(32))); + #else + typedef __m256 __cl_float8; + typedef __m256d __cl_double4; + #endif + #define __CL_FLOAT8__ 1 + #define __CL_DOUBLE4__ 1 +#endif + +/* Define capabilities for anonymous struct members. */ +#if !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L +#define __CL_HAS_ANON_STRUCT__ 1 +#define __CL_ANON_STRUCT__ +#elif defined(_WIN32) && defined(_MSC_VER) && !defined(__STDC__) +#define __CL_HAS_ANON_STRUCT__ 1 +#define __CL_ANON_STRUCT__ +#elif defined(__GNUC__) && ! defined(__STRICT_ANSI__) +#define __CL_HAS_ANON_STRUCT__ 1 +#define __CL_ANON_STRUCT__ __extension__ +#elif defined(__clang__) +#define __CL_HAS_ANON_STRUCT__ 1 +#define __CL_ANON_STRUCT__ __extension__ +#else +#define __CL_HAS_ANON_STRUCT__ 0 +#define __CL_ANON_STRUCT__ +#endif + +#if defined(_WIN32) && defined(_MSC_VER) && __CL_HAS_ANON_STRUCT__ + /* Disable warning C4201: nonstandard extension used : nameless struct/union */ + #pragma warning( push ) + #pragma warning( disable : 4201 ) +#endif + +/* Define alignment keys */ +#if defined( __GNUC__ ) || defined(__INTEGRITY) + #define CL_ALIGNED(_x) __attribute__ ((aligned(_x))) +#elif defined( _WIN32) && (_MSC_VER) + /* Alignment keys neutered on windows because MSVC can't swallow function arguments with alignment requirements */ + /* http://msdn.microsoft.com/en-us/library/373ak2y1%28VS.71%29.aspx */ + /* #include */ + /* #define CL_ALIGNED(_x) _CRT_ALIGN(_x) */ + #define CL_ALIGNED(_x) +#else + #warning Need to implement some method to align data here + #define CL_ALIGNED(_x) +#endif + +/* Indicate whether .xyzw, .s0123 and .hi.lo are supported */ +#if __CL_HAS_ANON_STRUCT__ + /* .xyzw and .s0123...{f|F} are supported */ + #define CL_HAS_NAMED_VECTOR_FIELDS 1 + /* .hi and .lo are supported */ + #define CL_HAS_HI_LO_VECTOR_FIELDS 1 +#endif + +/* Define cl_vector types */ + +/* ---- cl_charn ---- */ +typedef union +{ + cl_char CL_ALIGNED(2) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_char lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2; +#endif +}cl_char2; + +typedef union +{ + cl_char CL_ALIGNED(4) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_char2 lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2[2]; +#endif +#if defined( __CL_CHAR4__) + __cl_char4 v4; +#endif +}cl_char4; + +/* cl_char3 is identical in size, alignment and behavior to cl_char4. See section 6.1.5. */ +typedef cl_char4 cl_char3; + +typedef union +{ + cl_char CL_ALIGNED(8) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_char4 lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2[4]; +#endif +#if defined( __CL_CHAR4__) + __cl_char4 v4[2]; +#endif +#if defined( __CL_CHAR8__ ) + __cl_char8 v8; +#endif +}cl_char8; + +typedef union +{ + cl_char CL_ALIGNED(16) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_char8 lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2[8]; +#endif +#if defined( __CL_CHAR4__) + __cl_char4 v4[4]; +#endif +#if defined( __CL_CHAR8__ ) + __cl_char8 v8[2]; +#endif +#if defined( __CL_CHAR16__ ) + __cl_char16 v16; +#endif +}cl_char16; + + +/* ---- cl_ucharn ---- */ +typedef union +{ + cl_uchar CL_ALIGNED(2) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_uchar lo, hi; }; +#endif +#if defined( __cl_uchar2__) + __cl_uchar2 v2; +#endif +}cl_uchar2; + +typedef union +{ + cl_uchar CL_ALIGNED(4) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_uchar2 lo, hi; }; +#endif +#if defined( __CL_UCHAR2__) + __cl_uchar2 v2[2]; +#endif +#if defined( __CL_UCHAR4__) + __cl_uchar4 v4; +#endif +}cl_uchar4; + +/* cl_uchar3 is identical in size, alignment and behavior to cl_uchar4. See section 6.1.5. */ +typedef cl_uchar4 cl_uchar3; + +typedef union +{ + cl_uchar CL_ALIGNED(8) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_uchar4 lo, hi; }; +#endif +#if defined( __CL_UCHAR2__) + __cl_uchar2 v2[4]; +#endif +#if defined( __CL_UCHAR4__) + __cl_uchar4 v4[2]; +#endif +#if defined( __CL_UCHAR8__ ) + __cl_uchar8 v8; +#endif +}cl_uchar8; + +typedef union +{ + cl_uchar CL_ALIGNED(16) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_uchar8 lo, hi; }; +#endif +#if defined( __CL_UCHAR2__) + __cl_uchar2 v2[8]; +#endif +#if defined( __CL_UCHAR4__) + __cl_uchar4 v4[4]; +#endif +#if defined( __CL_UCHAR8__ ) + __cl_uchar8 v8[2]; +#endif +#if defined( __CL_UCHAR16__ ) + __cl_uchar16 v16; +#endif +}cl_uchar16; + + +/* ---- cl_shortn ---- */ +typedef union +{ + cl_short CL_ALIGNED(4) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_short lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2; +#endif +}cl_short2; + +typedef union +{ + cl_short CL_ALIGNED(8) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_short2 lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2[2]; +#endif +#if defined( __CL_SHORT4__) + __cl_short4 v4; +#endif +}cl_short4; + +/* cl_short3 is identical in size, alignment and behavior to cl_short4. See section 6.1.5. */ +typedef cl_short4 cl_short3; + +typedef union +{ + cl_short CL_ALIGNED(16) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_short4 lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2[4]; +#endif +#if defined( __CL_SHORT4__) + __cl_short4 v4[2]; +#endif +#if defined( __CL_SHORT8__ ) + __cl_short8 v8; +#endif +}cl_short8; + +typedef union +{ + cl_short CL_ALIGNED(32) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_short8 lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2[8]; +#endif +#if defined( __CL_SHORT4__) + __cl_short4 v4[4]; +#endif +#if defined( __CL_SHORT8__ ) + __cl_short8 v8[2]; +#endif +#if defined( __CL_SHORT16__ ) + __cl_short16 v16; +#endif +}cl_short16; + + +/* ---- cl_ushortn ---- */ +typedef union +{ + cl_ushort CL_ALIGNED(4) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_ushort lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2; +#endif +}cl_ushort2; + +typedef union +{ + cl_ushort CL_ALIGNED(8) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_ushort2 lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2[2]; +#endif +#if defined( __CL_USHORT4__) + __cl_ushort4 v4; +#endif +}cl_ushort4; + +/* cl_ushort3 is identical in size, alignment and behavior to cl_ushort4. See section 6.1.5. */ +typedef cl_ushort4 cl_ushort3; + +typedef union +{ + cl_ushort CL_ALIGNED(16) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_ushort4 lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2[4]; +#endif +#if defined( __CL_USHORT4__) + __cl_ushort4 v4[2]; +#endif +#if defined( __CL_USHORT8__ ) + __cl_ushort8 v8; +#endif +}cl_ushort8; + +typedef union +{ + cl_ushort CL_ALIGNED(32) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_ushort8 lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2[8]; +#endif +#if defined( __CL_USHORT4__) + __cl_ushort4 v4[4]; +#endif +#if defined( __CL_USHORT8__ ) + __cl_ushort8 v8[2]; +#endif +#if defined( __CL_USHORT16__ ) + __cl_ushort16 v16; +#endif +}cl_ushort16; + + +/* ---- cl_halfn ---- */ +typedef union +{ + cl_half CL_ALIGNED(4) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_half lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2; +#endif +}cl_half2; + +typedef union +{ + cl_half CL_ALIGNED(8) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_half2 lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2[2]; +#endif +#if defined( __CL_HALF4__) + __cl_half4 v4; +#endif +}cl_half4; + +/* cl_half3 is identical in size, alignment and behavior to cl_half4. See section 6.1.5. */ +typedef cl_half4 cl_half3; + +typedef union +{ + cl_half CL_ALIGNED(16) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_half4 lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2[4]; +#endif +#if defined( __CL_HALF4__) + __cl_half4 v4[2]; +#endif +#if defined( __CL_HALF8__ ) + __cl_half8 v8; +#endif +}cl_half8; + +typedef union +{ + cl_half CL_ALIGNED(32) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_half8 lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2[8]; +#endif +#if defined( __CL_HALF4__) + __cl_half4 v4[4]; +#endif +#if defined( __CL_HALF8__ ) + __cl_half8 v8[2]; +#endif +#if defined( __CL_HALF16__ ) + __cl_half16 v16; +#endif +}cl_half16; + +/* ---- cl_intn ---- */ +typedef union +{ + cl_int CL_ALIGNED(8) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_int lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2; +#endif +}cl_int2; + +typedef union +{ + cl_int CL_ALIGNED(16) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_int2 lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2[2]; +#endif +#if defined( __CL_INT4__) + __cl_int4 v4; +#endif +}cl_int4; + +/* cl_int3 is identical in size, alignment and behavior to cl_int4. See section 6.1.5. */ +typedef cl_int4 cl_int3; + +typedef union +{ + cl_int CL_ALIGNED(32) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_int4 lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2[4]; +#endif +#if defined( __CL_INT4__) + __cl_int4 v4[2]; +#endif +#if defined( __CL_INT8__ ) + __cl_int8 v8; +#endif +}cl_int8; + +typedef union +{ + cl_int CL_ALIGNED(64) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_int8 lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2[8]; +#endif +#if defined( __CL_INT4__) + __cl_int4 v4[4]; +#endif +#if defined( __CL_INT8__ ) + __cl_int8 v8[2]; +#endif +#if defined( __CL_INT16__ ) + __cl_int16 v16; +#endif +}cl_int16; + + +/* ---- cl_uintn ---- */ +typedef union +{ + cl_uint CL_ALIGNED(8) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_uint lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2; +#endif +}cl_uint2; + +typedef union +{ + cl_uint CL_ALIGNED(16) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_uint2 lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2[2]; +#endif +#if defined( __CL_UINT4__) + __cl_uint4 v4; +#endif +}cl_uint4; + +/* cl_uint3 is identical in size, alignment and behavior to cl_uint4. See section 6.1.5. */ +typedef cl_uint4 cl_uint3; + +typedef union +{ + cl_uint CL_ALIGNED(32) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_uint4 lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2[4]; +#endif +#if defined( __CL_UINT4__) + __cl_uint4 v4[2]; +#endif +#if defined( __CL_UINT8__ ) + __cl_uint8 v8; +#endif +}cl_uint8; + +typedef union +{ + cl_uint CL_ALIGNED(64) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_uint8 lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2[8]; +#endif +#if defined( __CL_UINT4__) + __cl_uint4 v4[4]; +#endif +#if defined( __CL_UINT8__ ) + __cl_uint8 v8[2]; +#endif +#if defined( __CL_UINT16__ ) + __cl_uint16 v16; +#endif +}cl_uint16; + +/* ---- cl_longn ---- */ +typedef union +{ + cl_long CL_ALIGNED(16) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_long lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2; +#endif +}cl_long2; + +typedef union +{ + cl_long CL_ALIGNED(32) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_long2 lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2[2]; +#endif +#if defined( __CL_LONG4__) + __cl_long4 v4; +#endif +}cl_long4; + +/* cl_long3 is identical in size, alignment and behavior to cl_long4. See section 6.1.5. */ +typedef cl_long4 cl_long3; + +typedef union +{ + cl_long CL_ALIGNED(64) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_long4 lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2[4]; +#endif +#if defined( __CL_LONG4__) + __cl_long4 v4[2]; +#endif +#if defined( __CL_LONG8__ ) + __cl_long8 v8; +#endif +}cl_long8; + +typedef union +{ + cl_long CL_ALIGNED(128) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_long8 lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2[8]; +#endif +#if defined( __CL_LONG4__) + __cl_long4 v4[4]; +#endif +#if defined( __CL_LONG8__ ) + __cl_long8 v8[2]; +#endif +#if defined( __CL_LONG16__ ) + __cl_long16 v16; +#endif +}cl_long16; + + +/* ---- cl_ulongn ---- */ +typedef union +{ + cl_ulong CL_ALIGNED(16) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_ulong lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2; +#endif +}cl_ulong2; + +typedef union +{ + cl_ulong CL_ALIGNED(32) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_ulong2 lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2[2]; +#endif +#if defined( __CL_ULONG4__) + __cl_ulong4 v4; +#endif +}cl_ulong4; + +/* cl_ulong3 is identical in size, alignment and behavior to cl_ulong4. See section 6.1.5. */ +typedef cl_ulong4 cl_ulong3; + +typedef union +{ + cl_ulong CL_ALIGNED(64) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_ulong4 lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2[4]; +#endif +#if defined( __CL_ULONG4__) + __cl_ulong4 v4[2]; +#endif +#if defined( __CL_ULONG8__ ) + __cl_ulong8 v8; +#endif +}cl_ulong8; + +typedef union +{ + cl_ulong CL_ALIGNED(128) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_ulong8 lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2[8]; +#endif +#if defined( __CL_ULONG4__) + __cl_ulong4 v4[4]; +#endif +#if defined( __CL_ULONG8__ ) + __cl_ulong8 v8[2]; +#endif +#if defined( __CL_ULONG16__ ) + __cl_ulong16 v16; +#endif +}cl_ulong16; + + +/* --- cl_floatn ---- */ + +typedef union +{ + cl_float CL_ALIGNED(8) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_float lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2; +#endif +}cl_float2; + +typedef union +{ + cl_float CL_ALIGNED(16) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_float2 lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2[2]; +#endif +#if defined( __CL_FLOAT4__) + __cl_float4 v4; +#endif +}cl_float4; + +/* cl_float3 is identical in size, alignment and behavior to cl_float4. See section 6.1.5. */ +typedef cl_float4 cl_float3; + +typedef union +{ + cl_float CL_ALIGNED(32) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_float4 lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2[4]; +#endif +#if defined( __CL_FLOAT4__) + __cl_float4 v4[2]; +#endif +#if defined( __CL_FLOAT8__ ) + __cl_float8 v8; +#endif +}cl_float8; + +typedef union +{ + cl_float CL_ALIGNED(64) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_float8 lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2[8]; +#endif +#if defined( __CL_FLOAT4__) + __cl_float4 v4[4]; +#endif +#if defined( __CL_FLOAT8__ ) + __cl_float8 v8[2]; +#endif +#if defined( __CL_FLOAT16__ ) + __cl_float16 v16; +#endif +}cl_float16; + +/* --- cl_doublen ---- */ + +typedef union +{ + cl_double CL_ALIGNED(16) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_double lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2; +#endif +}cl_double2; + +typedef union +{ + cl_double CL_ALIGNED(32) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_double2 lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2[2]; +#endif +#if defined( __CL_DOUBLE4__) + __cl_double4 v4; +#endif +}cl_double4; + +/* cl_double3 is identical in size, alignment and behavior to cl_double4. See section 6.1.5. */ +typedef cl_double4 cl_double3; + +typedef union +{ + cl_double CL_ALIGNED(64) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_double4 lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2[4]; +#endif +#if defined( __CL_DOUBLE4__) + __cl_double4 v4[2]; +#endif +#if defined( __CL_DOUBLE8__ ) + __cl_double8 v8; +#endif +}cl_double8; + +typedef union +{ + cl_double CL_ALIGNED(128) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_double8 lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2[8]; +#endif +#if defined( __CL_DOUBLE4__) + __cl_double4 v4[4]; +#endif +#if defined( __CL_DOUBLE8__ ) + __cl_double8 v8[2]; +#endif +#if defined( __CL_DOUBLE16__ ) + __cl_double16 v16; +#endif +}cl_double16; + +/* Macro to facilitate debugging + * Usage: + * Place CL_PROGRAM_STRING_DEBUG_INFO on the line before the first line of your source. + * The first line ends with: CL_PROGRAM_STRING_DEBUG_INFO \" + * Each line thereafter of OpenCL C source must end with: \n\ + * The last line ends in "; + * + * Example: + * + * const char *my_program = CL_PROGRAM_STRING_DEBUG_INFO "\ + * kernel void foo( int a, float * b ) \n\ + * { \n\ + * // my comment \n\ + * *b[ get_global_id(0)] = a; \n\ + * } \n\ + * "; + * + * This should correctly set up the line, (column) and file information for your source + * string so you can do source level debugging. + */ +#define __CL_STRINGIFY( _x ) # _x +#define _CL_STRINGIFY( _x ) __CL_STRINGIFY( _x ) +#define CL_PROGRAM_STRING_DEBUG_INFO "#line " _CL_STRINGIFY(__LINE__) " \"" __FILE__ "\" \n\n" + +#ifdef __cplusplus +} +#endif + +#if defined(_WIN32) && defined(_MSC_VER) && __CL_HAS_ANON_STRUCT__ + #pragma warning( pop ) +#endif + +#endif /* __CL_PLATFORM_H */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_va_api_media_sharing_intel.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_va_api_media_sharing_intel.h new file mode 100644 index 000000000..9fb8863f2 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_va_api_media_sharing_intel.h @@ -0,0 +1,220 @@ +/******************************************************************************* + * Copyright (c) 2008-2023 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H_ +#define OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H_ + +/* +** This header is generated from the Khronos OpenCL XML API Registry. +*/ + +#include + +#include + +/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES) +#define CL_NO_EXTENSION_PROTOTYPES +#endif + +/* CL_NO_EXTENSION_PROTOTYPES implies + CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and + CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */ +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif +#if defined(CL_NO_EXTENSION_PROTOTYPES) && \ + !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) +#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*************************************************************** +* cl_intel_sharing_format_query_va_api +***************************************************************/ +#define cl_intel_sharing_format_query_va_api 1 +#define CL_INTEL_SHARING_FORMAT_QUERY_VA_API_EXTENSION_NAME \ + "cl_intel_sharing_format_query_va_api" + + +#define CL_INTEL_SHARING_FORMAT_QUERY_VA_API_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +/* when cl_intel_va_api_media_sharing is supported */ + +typedef cl_int CL_API_CALL +clGetSupportedVA_APIMediaSurfaceFormatsINTEL_t( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint plane, + cl_uint num_entries, + VAImageFormat* va_api_formats, + cl_uint* num_surface_formats); + +typedef clGetSupportedVA_APIMediaSurfaceFormatsINTEL_t * +clGetSupportedVA_APIMediaSurfaceFormatsINTEL_fn ; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSupportedVA_APIMediaSurfaceFormatsINTEL( + cl_context context, + cl_mem_flags flags, + cl_mem_object_type image_type, + cl_uint plane, + cl_uint num_entries, + VAImageFormat* va_api_formats, + cl_uint* num_surface_formats) ; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +/*************************************************************** +* cl_intel_va_api_media_sharing +***************************************************************/ +#define cl_intel_va_api_media_sharing 1 +#define CL_INTEL_VA_API_MEDIA_SHARING_EXTENSION_NAME \ + "cl_intel_va_api_media_sharing" + + +#define CL_INTEL_VA_API_MEDIA_SHARING_EXTENSION_VERSION CL_MAKE_VERSION(0, 0, 0) + +typedef cl_uint cl_va_api_device_source_intel; +typedef cl_uint cl_va_api_device_set_intel; + +/* Error codes */ +#define CL_INVALID_VA_API_MEDIA_ADAPTER_INTEL -1098 +#define CL_INVALID_VA_API_MEDIA_SURFACE_INTEL -1099 +#define CL_VA_API_MEDIA_SURFACE_ALREADY_ACQUIRED_INTEL -1100 +#define CL_VA_API_MEDIA_SURFACE_NOT_ACQUIRED_INTEL -1101 + +/* cl_va_api_device_source_intel */ +#define CL_VA_API_DISPLAY_INTEL 0x4094 + +/* cl_va_api_device_set_intel */ +#define CL_PREFERRED_DEVICES_FOR_VA_API_INTEL 0x4095 +#define CL_ALL_DEVICES_FOR_VA_API_INTEL 0x4096 + +/* cl_context_info */ +#define CL_CONTEXT_VA_API_DISPLAY_INTEL 0x4097 + +/* cl_mem_info */ +#define CL_MEM_VA_API_MEDIA_SURFACE_INTEL 0x4098 + +/* cl_image_info */ +#define CL_IMAGE_VA_API_PLANE_INTEL 0x4099 + +/* cl_command_type */ +#define CL_COMMAND_ACQUIRE_VA_API_MEDIA_SURFACES_INTEL 0x409A +#define CL_COMMAND_RELEASE_VA_API_MEDIA_SURFACES_INTEL 0x409B + + +typedef cl_int CL_API_CALL +clGetDeviceIDsFromVA_APIMediaAdapterINTEL_t( + cl_platform_id platform, + cl_va_api_device_source_intel media_adapter_type, + void* media_adapter, + cl_va_api_device_set_intel media_adapter_set, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices); + +typedef clGetDeviceIDsFromVA_APIMediaAdapterINTEL_t * +clGetDeviceIDsFromVA_APIMediaAdapterINTEL_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_mem CL_API_CALL +clCreateFromVA_APIMediaSurfaceINTEL_t( + cl_context context, + cl_mem_flags flags, + VASurfaceID* surface, + cl_uint plane, + cl_int* errcode_ret); + +typedef clCreateFromVA_APIMediaSurfaceINTEL_t * +clCreateFromVA_APIMediaSurfaceINTEL_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueAcquireVA_APIMediaSurfacesINTEL_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueAcquireVA_APIMediaSurfacesINTEL_t * +clEnqueueAcquireVA_APIMediaSurfacesINTEL_fn CL_API_SUFFIX__VERSION_1_2; + +typedef cl_int CL_API_CALL +clEnqueueReleaseVA_APIMediaSurfacesINTEL_t( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +typedef clEnqueueReleaseVA_APIMediaSurfacesINTEL_t * +clEnqueueReleaseVA_APIMediaSurfacesINTEL_fn CL_API_SUFFIX__VERSION_1_2; + +#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceIDsFromVA_APIMediaAdapterINTEL( + cl_platform_id platform, + cl_va_api_device_source_intel media_adapter_type, + void* media_adapter, + cl_va_api_device_set_intel media_adapter_set, + cl_uint num_entries, + cl_device_id* devices, + cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromVA_APIMediaSurfaceINTEL( + cl_context context, + cl_mem_flags flags, + VASurfaceID* surface, + cl_uint plane, + cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireVA_APIMediaSurfacesINTEL( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseVA_APIMediaSurfacesINTEL( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event) CL_API_SUFFIX__VERSION_1_2; + +#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */ + +#ifdef __cplusplus +} +#endif + +#endif /* OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H_ */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_version.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_version.h new file mode 100644 index 000000000..3844938d5 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/cl_version.h @@ -0,0 +1,81 @@ +/******************************************************************************* + * Copyright (c) 2018-2020 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef __CL_VERSION_H +#define __CL_VERSION_H + +/* Detect which version to target */ +#if !defined(CL_TARGET_OPENCL_VERSION) +#pragma message("cl_version.h: CL_TARGET_OPENCL_VERSION is not defined. Defaulting to 300 (OpenCL 3.0)") +#define CL_TARGET_OPENCL_VERSION 300 +#endif +#if CL_TARGET_OPENCL_VERSION != 100 && \ + CL_TARGET_OPENCL_VERSION != 110 && \ + CL_TARGET_OPENCL_VERSION != 120 && \ + CL_TARGET_OPENCL_VERSION != 200 && \ + CL_TARGET_OPENCL_VERSION != 210 && \ + CL_TARGET_OPENCL_VERSION != 220 && \ + CL_TARGET_OPENCL_VERSION != 300 +#pragma message("cl_version: CL_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120, 200, 210, 220, 300). Defaulting to 300 (OpenCL 3.0)") +#undef CL_TARGET_OPENCL_VERSION +#define CL_TARGET_OPENCL_VERSION 300 +#endif + + +/* OpenCL Version */ +#if CL_TARGET_OPENCL_VERSION >= 300 && !defined(CL_VERSION_3_0) +#define CL_VERSION_3_0 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 220 && !defined(CL_VERSION_2_2) +#define CL_VERSION_2_2 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 210 && !defined(CL_VERSION_2_1) +#define CL_VERSION_2_1 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 200 && !defined(CL_VERSION_2_0) +#define CL_VERSION_2_0 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 120 && !defined(CL_VERSION_1_2) +#define CL_VERSION_1_2 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 110 && !defined(CL_VERSION_1_1) +#define CL_VERSION_1_1 1 +#endif +#if CL_TARGET_OPENCL_VERSION >= 100 && !defined(CL_VERSION_1_0) +#define CL_VERSION_1_0 1 +#endif + +/* Allow deprecated APIs for older OpenCL versions. */ +#if CL_TARGET_OPENCL_VERSION <= 220 && !defined(CL_USE_DEPRECATED_OPENCL_2_2_APIS) +#define CL_USE_DEPRECATED_OPENCL_2_2_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 210 && !defined(CL_USE_DEPRECATED_OPENCL_2_1_APIS) +#define CL_USE_DEPRECATED_OPENCL_2_1_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS) +#define CL_USE_DEPRECATED_OPENCL_2_0_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS) +#define CL_USE_DEPRECATED_OPENCL_1_2_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) +#define CL_USE_DEPRECATED_OPENCL_1_1_APIS +#endif +#if CL_TARGET_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS) +#define CL_USE_DEPRECATED_OPENCL_1_0_APIS +#endif + +#endif /* __CL_VERSION_H */ diff --git a/mllm/backends/opencl/third_party/OpenCL-Headers/CL/opencl.h b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/opencl.h new file mode 100644 index 000000000..ef8dd1e03 --- /dev/null +++ b/mllm/backends/opencl/third_party/OpenCL-Headers/CL/opencl.h @@ -0,0 +1,32 @@ +/******************************************************************************* + * Copyright (c) 2008-2021 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#ifndef __OPENCL_H +#define __OPENCL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +#ifdef __cplusplus +} +#endif + +#endif /* __OPENCL_H */ diff --git a/mllm/backends/opencl/utils/OpenCLTools.hpp b/mllm/backends/opencl/utils/OpenCLTools.hpp new file mode 100644 index 000000000..45c031260 --- /dev/null +++ b/mllm/backends/opencl/utils/OpenCLTools.hpp @@ -0,0 +1,224 @@ +#ifndef OPENCL_TOOLS_HPP +#define OPENCL_TOOLS_HPP + +#include "Tensor.hpp" +#include "../OpenCLBackend.hpp" +#include + +namespace mllm { + +std::string inline get_kernel_path(const std::string ¤t_file, const std::string &relative_kernel_path) { + // 将源文件路径转换为 filesystem::path 对象 + std::filesystem::path source_path(current_file); + // 获取源文件所在的目录 + std::filesystem::path source_dir = source_path.parent_path(); + // 组合目录和相对内核路径,生成绝对路径 + std::filesystem::path kernel_path = source_dir / relative_kernel_path; + // 返回字符串格式的路径 + return kernel_path.string(); +} + +/** + * @brief 从一个已在设备上的Tensor获取一个可用于内核计算的Image2D句柄。 + * 该函数是算子内部使用的核心工具。 + * + * 工作流程: + * 1. 检查输入Tensor是否已经是Image2D类型,如果是,直接返回其句柄。 + * 2. 如果输入Tensor是Buffer类型,则在设备上创建一个临时的Image2D对象。 + * 3. 执行一次设备内的Buffer-to-Image内存拷贝。 + * 4. 返回新创建的临时Image2D的句柄。 + * + * @param input_tensor 一个指向已在OpenCL设备上的Tensor的共享指针。 + * @param ocl_backend OpenCL后端实例,用于执行OpenCL命令。 + * @param temp_storage 一个Tensor的vector引用,用于存储函数内部创建的临时Image Tensor, + * 以确保其生命周期至少持续到内核执行完毕。调用者必须管理此vector的生命周期。 + * @return 一个可用于 clSetKernelArg 的 cl_mem 句柄(指向一个Image2D对象)。 + */ +static inline cl_mem get_image_from_tensor( + const std::shared_ptr &input_tensor, + OpenCLBackend *ocl_backend, + std::vector &temp_storage) { + auto &dev_mem = input_tensor->device_memory(); + + if (dev_mem.type == MEM_TYPE_IMAGE_2D) { + return ocl_backend->get_cl_mem(*input_tensor); + } + if (dev_mem.type != MEM_TYPE_BUFFER) { + throw std::runtime_error("Input must be a Buffer or Image type on device."); + } + + // ================== 零拷贝路径 ================== + // 条件:硬件支持扩展,且输入Buffer是带有正确行间距信息创建的。 + if (ocl_backend->is_image_from_buffer_supported() && dev_mem.image_row_pitch_in_bytes > 0) { + cl_image_format format = {CL_RGBA, CL_FLOAT}; + cl_image_desc desc = {}; + desc.image_type = CL_MEM_OBJECT_IMAGE2D; + desc.image_width = dev_mem.image_width; // 使用创建时保存的元数据 + desc.image_height = dev_mem.image_height; + desc.image_row_pitch = dev_mem.image_row_pitch_in_bytes; + desc.buffer = ocl_backend->get_cl_mem(*input_tensor); + + cl_int err; + cl_mem image_view = clCreateImage(ocl_backend->getContext(), CL_MEM_READ_ONLY, &format, &desc, nullptr, &err); + check_cl_error(err, "clCreateImage from buffer (Zero-Copy)"); + + Tensor wrapper_tensor(ocl_backend); + wrapper_tensor.device_memory().handle = image_view; + wrapper_tensor.device_memory().type = MEM_TYPE_IMAGE_2D; + temp_storage.push_back(std::move(wrapper_tensor)); + + return image_view; + } + + // ==================================================================================== + // 最终推荐的实现:直接、高效的内存拷贝路径 + // + // 这是在无法从源头控制Buffer创建方式时,最简单、最高效的解决方案。 + // ==================================================================================== + { + Tensor temp_image(input_tensor->batch(), input_tensor->head(), input_tensor->sequence(), input_tensor->dimension(), ocl_backend, false); + auto &img_mem = temp_image.device_memory(); + img_mem.type = MEM_TYPE_IMAGE_2D; + img_mem.image_width = input_tensor->dimension() / 4; + img_mem.image_height = input_tensor->batch() * input_tensor->head() * input_tensor->sequence(); + temp_image.alloc(); + + cl_mem src_buffer = ocl_backend->get_cl_mem(*input_tensor); + cl_mem dst_image = ocl_backend->get_cl_mem(temp_image); + + const size_t origin[3] = {0, 0, 0}; + const size_t region[3] = {img_mem.image_width, img_mem.image_height, 1}; + cl_int err = clEnqueueCopyBufferToImage( + ocl_backend->getQueue(), + src_buffer, dst_image, + 0, origin, region, + 0, nullptr, nullptr); + check_cl_error(err, "clEnqueueCopyBufferToImage (Fallback Copy)"); + + temp_storage.push_back(std::move(temp_image)); + return dst_image; + } +} + +/** + * @brief 将一个Tensor的设备内存从Buffer类型原地转换为Image2D类型。 + * + * 该函数直接修改传入Tensor的内部状态。它会分配一个新的Image2D内存, + * 将原始Buffer的数据拷贝过去,然后释放原始的Buffer,最后更新Tensor的内存类型信息。 + * 如果已经是Image2D,则不执行任何操作。 + * + * @param tensor 要转换的Tensor的引用。Tensor必须在OpenCL设备上。 + */ +static inline void tensorGlobal2Image(Tensor &tensor) { + auto ocl_backend = dynamic_cast(tensor.backend()); + if (!ocl_backend) { + throw std::runtime_error("Tensor backend is not OpenCLBackend for tensorGlobal2Image."); + } + + auto &dev_mem = tensor.device_memory(); + + // 如果已经是Image2D,则无需转换 + if (dev_mem.type == MEM_TYPE_IMAGE_2D) { + return; + } + + if (dev_mem.type != MEM_TYPE_BUFFER || dev_mem.handle == nullptr) { + throw std::runtime_error("tensorGlobal2Image requires a valid Buffer on the device."); + } + + if (tensor.dimension() % 4 != 0) { + throw std::runtime_error("Image2D conversion requires the dimension to be a multiple of 4."); + } + + // 1. 创建一个新的Image2D内存对象 + cl_image_format format = {CL_RGBA}; + format.image_channel_data_type = (tensor.dtype() == MLLM_TYPE_F32) ? CL_FLOAT : CL_HALF_FLOAT; + + cl_image_desc desc = {}; + desc.image_type = CL_MEM_OBJECT_IMAGE2D; + desc.image_width = tensor.dimension() / 4; + desc.image_height = tensor.batch() * tensor.head() * tensor.sequence(); + + cl_int err; + cl_mem new_image_handle = clCreateImage(ocl_backend->getContext(), CL_MEM_READ_WRITE, &format, &desc, nullptr, &err); + check_cl_error(err, "clCreateImage in tensorGlobal2Image"); + + // 2. 将数据从旧的Buffer拷贝到新的Image + cl_mem src_buffer_handle = static_cast(dev_mem.handle); + const size_t origin[3] = {0, 0, 0}; + const size_t region[3] = {desc.image_width, desc.image_height, 1}; + err = clEnqueueCopyBufferToImage( + ocl_backend->getQueue(), + src_buffer_handle, new_image_handle, + 0, origin, region, + 0, nullptr, nullptr); + check_cl_error(err, "clEnqueueCopyBufferToImage in tensorGlobal2Image"); + + // 3. 释放旧的Buffer内存 + clReleaseMemObject(src_buffer_handle); + + // 4. 更新Tensor的内部状态 + dev_mem.handle = new_image_handle; + dev_mem.type = MEM_TYPE_IMAGE_2D; + dev_mem.image_width = desc.image_width; + dev_mem.image_height = desc.image_height; +} + +/** + * @brief 将一个Tensor的设备内存从Image2D类型原地转换为Buffer类型。 + * + * 该函数直接修改传入Tensor的内部状态。它会分配一个新的Buffer内存, + * 将原始Image2D的数据拷贝过去,然后释放原始的Image,最后更新Tensor的内存类型信息。 + * 如果已经是Buffer,则不执行任何操作。 + * + * @param tensor 要转换的Tensor的引用。Tensor必须在OpenCL设备上。 + */ +static inline void tensorImage2Global(Tensor &tensor) { + auto ocl_backend = dynamic_cast(tensor.backend()); + if (!ocl_backend) { + throw std::runtime_error("Tensor backend is not OpenCLBackend for tensorImage2Global."); + } + + auto &dev_mem = tensor.device_memory(); + + // 如果已经是Buffer,则无需转换 + if (dev_mem.type == MEM_TYPE_BUFFER) { + return; + } + + if (dev_mem.type != MEM_TYPE_IMAGE_2D || dev_mem.handle == nullptr) { + throw std::runtime_error("tensorImage2Global requires a valid Image2D on the device."); + } + + // 1. 创建一个新的Buffer内存对象 + size_t buffer_size = tensor.count() * tensor.dtypeSize(); + cl_int err; + cl_mem new_buffer_handle = clCreateBuffer(ocl_backend->getContext(), CL_MEM_READ_WRITE, buffer_size, nullptr, &err); + check_cl_error(err, "clCreateBuffer in tensorImage2Global"); + + // 2. 将数据从旧的Image拷贝到新的Buffer + cl_mem src_image_handle = static_cast(dev_mem.handle); + const size_t origin[3] = {0, 0, 0}; + const size_t region[3] = {dev_mem.image_width, dev_mem.image_height, 1}; + err = clEnqueueCopyImageToBuffer( + ocl_backend->getQueue(), + src_image_handle, new_buffer_handle, + origin, region, 0, + 0, nullptr, nullptr); + check_cl_error(err, "clEnqueueCopyImageToBuffer in tensorImage2Global"); + + // 3. 释放旧的Image内存 + clReleaseMemObject(src_image_handle); + + // 4. 更新Tensor的内部状态 + dev_mem.handle = new_buffer_handle; + dev_mem.type = MEM_TYPE_BUFFER; + // 清理Image相关的元数据 + dev_mem.image_width = 0; + dev_mem.image_height = 0; + dev_mem.image_row_pitch_in_bytes = 0; +} + +} // namespace mllm + +#endif // OPENCL_TOOLS_HPP \ No newline at end of file diff --git a/src/backends/qnn/CMakeLists.txt b/mllm/backends/qnn/CMakeLists.txt similarity index 54% rename from src/backends/qnn/CMakeLists.txt rename to mllm/backends/qnn/CMakeLists.txt index 1844b39d4..df1d22367 100644 --- a/src/backends/qnn/CMakeLists.txt +++ b/mllm/backends/qnn/CMakeLists.txt @@ -7,22 +7,6 @@ file(GLOB MLLM_QNN_SRC ${CMAKE_CURRENT_LIST_DIR}/*.hpp ${CMAKE_CURRENT_LIST_DIR}/*.h - ${CMAKE_CURRENT_LIST_DIR}/Log/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/Log/*.hpp - - ${CMAKE_CURRENT_LIST_DIR}/PAL/include/PAL/*.hpp - ${CMAKE_CURRENT_LIST_DIR}/PAL/src/common/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/PAL/src/linux/*.cpp - - ${CMAKE_CURRENT_LIST_DIR}/Utils/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/Utils/*.hpp - - ${CMAKE_CURRENT_LIST_DIR}/WrapperUtils/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/WrapperUtils/*.hpp - - ${CMAKE_CURRENT_LIST_DIR}/Model/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/Model/*.hpp - ${CMAKE_CURRENT_LIST_DIR}/op/*.cpp ${CMAKE_CURRENT_LIST_DIR}/op/*.hpp @@ -31,7 +15,7 @@ file(GLOB MLLM_QNN_SRC if (MLLM_OPENMP) find_package(OpenMP REQUIRED) if(OpenMP_FOUND) - message(STATUS "found openmp") + message(STATUS "found openmp for QNN") set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} ${OPENMP_C_FLAGS}) set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${OPENMP_CXX_FLAGS}) else() @@ -41,28 +25,26 @@ endif() # import android ndk cmake toolchain if (ARM) - include(${ANDROID_NDK}/build/cmake/android.toolchain.cmake) add_definitions(-DQNN_ARM) message(STATUS "QNN ARM detected") endif() add_library( - MLLM_QNN + mllm_qnn OBJECT ${MLLM_QNN_SRC} ) if(OpenMP_FOUND) - message(STATUS "found openmp") if(ARM AND NOT APK) message(STATUS "[ARM] found openmp") - target_compile_options(MLLM_QNN PRIVATE -fopenmp) - target_link_libraries(MLLM_QNN PUBLIC -fopenmp -static-openmp) + target_compile_options(mllm_qnn PRIVATE -fopenmp) + target_link_libraries(mllm_qnn PUBLIC -fopenmp -static-openmp) else() - target_link_libraries(MLLM_QNN + target_link_libraries(mllm_qnn PUBLIC OpenMP::OpenMP_CXX ) endif() endif() -target_link_libraries(MLLM_QNN PUBLIC fmt::fmt-header-only) \ No newline at end of file +target_link_libraries(mllm_qnn PUBLIC fmt::fmt-header-only) \ No newline at end of file diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/config/LLaMAOpPackageHtp.xml b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/config/LLaMAOpPackageHtp.xml similarity index 91% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/config/LLaMAOpPackageHtp.xml rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/config/LLaMAOpPackageHtp.xml index 259f786ef..4cf6e694d 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/config/LLaMAOpPackageHtp.xml +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/config/LLaMAOpPackageHtp.xml @@ -680,6 +680,74 @@ Confidential and Proprietary - Qualcomm Technologies, Inc. HTP + + RoPESimple + + + LLaMA RoPE Simple + Only calculate: + value1 = in_value * cos_value - in_value_2 * sin_value and + value2 = in_value * sin_value + in_value_2 * cos_value. + + + + + in[0] + + input activation + + true + BACKEND_SPECIFIC + + 4D + NHWC + [N, C, H , W] + + + + + in[1] + + RoPE sin weights + + true + BACKEND_SPECIFIC + + 2D + [ seq, hidden state ] + + + + + in[2] + + RoPE cos weights + + true + BACKEND_SPECIFIC + + 2D + [ seq, hidden state ] + + + + + out[0] + + output activation + + true + BACKEND_SPECIFIC + + 4D + [N, C, H , W] + + + + + HTP + + LLaMADequantize @@ -729,6 +797,70 @@ Confidential and Proprietary - Qualcomm Technologies, Inc. HTP + + + LLaMADequantizeAdd + + + LLaMA Dequantize and Add + + + + + in[0] + + input activation + + true + BACKEND_SPECIFIC + + 4D + NHWC + [N, C, H , W] + + + + + in[1] + + input bias + + true + BACKEND_SPECIFIC + + 4D + NHWC + [N, C, H , W] + + + + + out[0] + + output activation + + true + BACKEND_SPECIFIC + + 4D + [N, C, H , W] + + + + + scale + true + QNN_DATATYPE_FLOAT_32 + + SCALAR + + N-1 + + + + HTP + + LLaMAQuantize @@ -1420,7 +1552,7 @@ Confidential and Proprietary - Qualcomm Technologies, Inc. out[0] QNN_DATATYPE_UFIXED_POINT_8 - QNN_DATATYPE_UFIXED_POINT_16 + QNN_DATATYPE_SFIXED_POINT_16 @@ -1554,16 +1686,39 @@ Confidential and Proprietary - Qualcomm Technologies, Inc. out[0] QNN_DATATYPE_SFIXED_POINT_8 + QNN_DATATYPE_SFIXED_POINT_16 - + LLaMADequantize in[0] QNN_DATATYPE_SFIXED_POINT_8 + QNN_DATATYPE_SFIXED_POINT_16 + + + out[0] + QNN_DATATYPE_FLOAT_16 + QNN_DATATYPE_FLOAT_32 + + + + + + LLaMADequantizeAdd + + + in[0] + QNN_DATATYPE_SFIXED_POINT_8 + QNN_DATATYPE_SFIXED_POINT_16 + + + in[1] + QNN_DATATYPE_FLOAT_16 + QNN_DATATYPE_FLOAT_32 out[0] diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/LLaMAPackageInterface.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/LLaMAPackageInterface.cpp similarity index 55% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/LLaMAPackageInterface.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/LLaMAPackageInterface.cpp index de7261b56..a9a7456f6 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/LLaMAPackageInterface.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/LLaMAPackageInterface.cpp @@ -19,39 +19,41 @@ BEGIN_PKG_OPS_OPTS_LIST() * registered to the HTP Core. * Append the latest OpName at the bottom */ -DECLARE_PKG_OPS_OPTS_LIST(PKG_IRoPE) -DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMALinear) -DECLARE_PKG_OPS_OPTS_LIST(PKG_SplitInput) -DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMAReLU) -DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMASuperSiLU) -DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMAQuantize) -DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMAMul) +DECLARE_PKG_OPS_OPTS_LIST(PKG_RMSNorm) DECLARE_PKG_OPS_OPTS_LIST(PKG_KVCache) +DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMADequantizeAdd) +DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMAMul) +DECLARE_PKG_OPS_OPTS_LIST(PKG_MergeOutput) +DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMAReLU) +DECLARE_PKG_OPS_OPTS_LIST(PKG_CausalMask) +DECLARE_PKG_OPS_OPTS_LIST(PKG_SiLU) DECLARE_PKG_OPS_OPTS_LIST(PKG_Attention) DECLARE_PKG_OPS_OPTS_LIST(PKG_QLayerNorm) +DECLARE_PKG_OPS_OPTS_LIST(PKG_RoPE) +DECLARE_PKG_OPS_OPTS_LIST(PKG_RoPESimple) +DECLARE_PKG_OPS_OPTS_LIST(PKG_WNop) DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMAAdd) -DECLARE_PKG_OPS_OPTS_LIST(PKG_CausalMask) +DECLARE_PKG_OPS_OPTS_LIST(PKG_IRoPE) +DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMALinear) +DECLARE_PKG_OPS_OPTS_LIST(PKG_SplitInput) DECLARE_PKG_OPS_OPTS_LIST(PKG_HeadMatmul) -DECLARE_PKG_OPS_OPTS_LIST(PKG_RoPE) DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMADequantize) -DECLARE_PKG_OPS_OPTS_LIST(PKG_WNop) -DECLARE_PKG_OPS_OPTS_LIST(PKG_MergeOutput) -DECLARE_PKG_OPS_OPTS_LIST(PKG_RMSNorm) -DECLARE_PKG_OPS_OPTS_LIST(PKG_SiLU) +DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMASuperSiLU) +DECLARE_PKG_OPS_OPTS_LIST(PKG_LLaMAQuantize) END_PKG_OPS_OPTS_LIST() // op package info -static constexpr auto sg_packageName = THIS_PKG_NAME_STR; // package name passed in as compile flag +static constexpr auto sg_packageName = THIS_PKG_NAME_STR; // package name passed in as compile flag -static std::array sg_opNames{{"IRoPE", "LLaMALinear", "SplitInput", "LLaMAReLU", "LLaMASuperSiLU", "LLaMAQuantize", "LLaMAMul", "KVCache", "Attention", "QLayerNorm", "LLaMAAdd", "CausalMask", "HeadMatmul", "RoPE", "LLaMADequantize", "WNop", "MergeOutput", "RMSNorm", "SiLU"}}; +static std::array sg_opNames{{"RMSNorm", "KVCache", "LLaMADequantizeAdd", "LLaMAMul", "MergeOutput", "LLaMAReLU", "CausalMask", "SiLU", "Attention", "QLayerNorm", "RoPE", "RoPESimple", "WNop", "LLaMAAdd", "IRoPE", "LLaMALinear", "SplitInput", "HeadMatmul", "LLaMADequantize", "LLaMASuperSiLU", "LLaMAQuantize"}}; -static Qnn_ApiVersion_t sg_sdkApiVersion = QNN_HTP_API_VERSION_INIT; +static Qnn_ApiVersion_t sg_sdkApiVersion = QNN_HTP_API_VERSION_INIT; static QnnOpPackage_Info_t sg_packageInfo = QNN_OP_PACKAGE_INFO_INIT; // global data static QnnOpPackage_GlobalInfrastructure_t sg_globalInfra = -nullptr; // global infrastructure not in use for now + nullptr; // global infrastructure not in use for now static bool sg_packageInitialized = false; /* @@ -72,27 +74,26 @@ static bool sg_packageInitialized = false; * for alternative logging method provided by HTP core, please refer to log.h */ static QnnLog_Callback_t sg_logCallback = - nullptr; // user provided call back function pointer for logging + nullptr; // user provided call back function pointer for logging static QnnLog_Level_t sg_maxLogLevel = - (QnnLog_Level_t)0; // maximal log level used in user provided logging + (QnnLog_Level_t)0; // maximal log level used in user provided logging static bool sg_logInitialized = - false; // tracks whether user provided logging method has been initialized - + false; // tracks whether user provided logging method has been initialized /* -* op initialization -* needs to be global in the package -* one initialization per package before any op definitions -* syntax: INIT_PACKAGE_OP_DEF() -*/ + * op initialization + * needs to be global in the package + * one initialization per package before any op definitions + * syntax: INIT_PACKAGE_OP_DEF() + */ INIT_PACKAGE_OP_DEF() /* -* optimization initialization -* needs to be global in the package -* one initialization per package before any optimization definitions -* syntax: INIT_PACKAGE_OPTIMIZATION_DEF() -*/ + * optimization initialization + * needs to be global in the package + * one initialization per package before any optimization definitions + * syntax: INIT_PACKAGE_OPTIMIZATION_DEF() + */ INIT_PACKAGE_OPTIMIZATION_DEF() /* @@ -143,8 +144,8 @@ INIT_PACKAGE_PARAM_ORDER_DEF() // LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS() /* -* Declare and define the special intialize function for HTP Backend to load -*/ + * Declare and define the special intialize function for HTP Backend to load + */ INIT_PKG_CORE_INIT_FUNC() /* op package API's */ @@ -153,43 +154,43 @@ Qnn_ErrorHandle_t LLaMAPackageInit(QnnOpPackage_GlobalInfrastructure_t infrastru if (sg_packageInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_ALREADY_INITIALIZED; /* - * op parameter order registration - * registers all defined op parameter orders in the package - * syntax: REGISTER_PACKAGE_PARAM_ORDERS() - */ + * op parameter order registration + * registers all defined op parameter orders in the package + * syntax: REGISTER_PACKAGE_PARAM_ORDERS() + */ REGISTER_PACKAGE_PARAM_ORDERS() /* - * op axis parameter name registration - * registers all axis parameter names in the package - * used with LIST_PACKAGE_AXIS_PARAMS(...) - * syntax: REGISTER_PACKAGE_AXIS_PARAMS() - */ + * op axis parameter name registration + * registers all axis parameter names in the package + * used with LIST_PACKAGE_AXIS_PARAMS(...) + * syntax: REGISTER_PACKAGE_AXIS_PARAMS() + */ REGISTER_PACKAGE_AXIS_PARAMS() /* - * per-channel scale op name registration - * registers all per-channel scale op names in the package - * used with LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS(...) - * syntax: REGISTER_PACKAGE_PER_CHANNEL_QUANTIZED_OPS() - */ + * per-channel scale op name registration + * registers all per-channel scale op names in the package + * used with LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS(...) + * syntax: REGISTER_PACKAGE_PER_CHANNEL_QUANTIZED_OPS() + */ REGISTER_PACKAGE_PER_CHANNEL_QUANTIZED_OPS() - sg_globalInfra = infrastructure; + sg_globalInfra = infrastructure; sg_packageInitialized = true; return QNN_SUCCESS; } -Qnn_ErrorHandle_t LLaMAPackageGetInfo(const QnnOpPackage_Info_t** info) { +Qnn_ErrorHandle_t LLaMAPackageGetInfo(const QnnOpPackage_Info_t **info) { if (!sg_packageInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_NOT_INITIALIZED; if (!info) return QNN_OP_PACKAGE_ERROR_INVALID_INFO; - sg_packageInfo = QNN_OP_PACKAGE_INFO_INIT; - sg_packageInfo.packageName = sg_packageName; + sg_packageInfo = QNN_OP_PACKAGE_INFO_INIT; + sg_packageInfo.packageName = sg_packageName; sg_packageInfo.operationNames = sg_opNames.data(); - sg_packageInfo.numOperations = sg_opNames.size(); - sg_packageInfo.sdkBuildId = QNN_SDK_BUILD_ID; - sg_packageInfo.sdkApiVersion = &sg_sdkApiVersion; + sg_packageInfo.numOperations = sg_opNames.size(); + sg_packageInfo.sdkBuildId = QNN_SDK_BUILD_ID; + sg_packageInfo.sdkApiVersion = &sg_sdkApiVersion; *info = &sg_packageInfo; return QNN_SUCCESS; @@ -199,8 +200,8 @@ Qnn_ErrorHandle_t LLaMAPackageLogInitialize(QnnLog_Callback_t callback, QnnLog_L if (sg_logInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_ALREADY_INITIALIZED; if (!callback) return QNN_LOG_ERROR_INVALID_ARGUMENT; if (maxLogLevel < QNN_LOG_LEVEL_ERROR) return QNN_LOG_ERROR_INVALID_ARGUMENT; - sg_logCallback = callback; - sg_maxLogLevel = maxLogLevel; + sg_logCallback = callback; + sg_maxLogLevel = maxLogLevel; sg_logInitialized = true; return QNN_SUCCESS; } @@ -213,13 +214,13 @@ Qnn_ErrorHandle_t LLaMAPackageLogSetLevel(QnnLog_Level_t maxLogLevel) { Qnn_ErrorHandle_t LLaMAPackageLogTerminate() { if (!sg_logInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_NOT_INITIALIZED; - sg_logCallback = nullptr; - sg_maxLogLevel = (QnnLog_Level_t)0; + sg_logCallback = nullptr; + sg_maxLogLevel = (QnnLog_Level_t)0; sg_logInitialized = false; return QNN_SUCCESS; } -Qnn_ErrorHandle_t LLaMAPackageValidateOpConfig (Qnn_OpConfig_t opConfig){ +Qnn_ErrorHandle_t LLaMAPackageValidateOpConfig(Qnn_OpConfig_t opConfig) { if (std::string(sg_packageName) != opConfig.v1.packageName) { return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } @@ -228,108 +229,97 @@ Qnn_ErrorHandle_t LLaMAPackageValidateOpConfig (Qnn_OpConfig_t opConfig){ * Check if op config type matches any registered ops * If a match is found, check number of inputs, outputs and params */ - if (std::string(opConfig.v1.typeName) == "IRoPE"){ - if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 4 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + if (std::string(opConfig.v1.typeName) == "RMSNorm") { + if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "LLaMALinear"){ - if (opConfig.v1.numOfParams != 4 || opConfig.v1.numOfInputs != 3 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "KVCache") { + if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "SplitInput"){ - if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 2){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "LLaMADequantizeAdd") { + if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "LLaMAReLU"){ - if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 1 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "LLaMAMul") { + if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "LLaMASuperSiLU"){ - if (opConfig.v1.numOfParams != 3 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "MergeOutput") { + if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 4 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "LLaMAQuantize"){ - if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 1 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "LLaMAReLU") { + if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 1 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "LLaMAMul"){ - if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "CausalMask") { + if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 1 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "KVCache"){ - if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "SiLU") { + if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 1 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "Attention"){ - if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 5 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "Attention") { + if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 5 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "QLayerNorm"){ - if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 3 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "QLayerNorm") { + if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 3 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "LLaMAAdd"){ - if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "RoPE") { + if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 4 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "CausalMask"){ - if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 1 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "RoPESimple") { + if (opConfig.v1.numOfInputs != 3 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "HeadMatmul"){ - if (opConfig.v1.numOfParams != 2 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "WNop") { + if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 2) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "RoPE"){ - if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 4 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "LLaMAAdd") { + if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "LLaMADequantize"){ - if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 1 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "IRoPE") { + if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 4 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "WNop"){ - if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 2){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "LLaMALinear") { + if (opConfig.v1.numOfParams != 4 || opConfig.v1.numOfInputs != 3 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "MergeOutput"){ - if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 4 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "SplitInput") { + if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 2) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "RMSNorm"){ - if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "HeadMatmul") { + if (opConfig.v1.numOfParams != 2 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else if (std::string(opConfig.v1.typeName) == "SiLU"){ - if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 1 || opConfig.v1.numOfOutputs != 1){ - return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } else if (std::string(opConfig.v1.typeName) == "LLaMADequantize") { + if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 1 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } - } - else{ + } else if (std::string(opConfig.v1.typeName) == "LLaMASuperSiLU") { + if (opConfig.v1.numOfParams != 3 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } + } else if (std::string(opConfig.v1.typeName) == "LLaMAQuantize") { + if (opConfig.v1.numOfParams != 1 || opConfig.v1.numOfInputs != 1 || opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } + } else { return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; } /* - * additional validation code here - * */ + * additional validation code here + * */ return QNN_SUCCESS; } @@ -347,36 +337,33 @@ Qnn_ErrorHandle_t LLaMAPackageValidateOpConfig (Qnn_OpConfig_t opConfig){ */ Qnn_ErrorHandle_t LLaMAPackageTerminate() { -if (!sg_packageInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_NOT_INITIALIZED; + if (!sg_packageInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_NOT_INITIALIZED; -sg_globalInfra = nullptr; -sg_packageInitialized = false; -return QNN_SUCCESS; + sg_globalInfra = nullptr; + sg_packageInitialized = false; + return QNN_SUCCESS; } #ifdef __cplusplus extern "C" { #endif - /* latest version */ -Qnn_ErrorHandle_t LLaMAPackageInterfaceProvider(QnnOpPackage_Interface_t* interface) { - if (!interface) return QNN_OP_PACKAGE_ERROR_INVALID_ARGUMENT; - interface->interfaceVersion = {1, 4, 0}; - interface->v1_4.init = LLaMAPackageInit; - interface->v1_4.terminate = LLaMAPackageTerminate; - interface->v1_4.getInfo = LLaMAPackageGetInfo; - interface->v1_4.validateOpConfig = LLaMAPackageValidateOpConfig; - interface->v1_4.createOpImpl = nullptr; - interface->v1_4.freeOpImpl = nullptr; - interface->v1_4.logInitialize = LLaMAPackageLogInitialize; - interface->v1_4.logSetLevel = LLaMAPackageLogSetLevel; - interface->v1_4.logTerminate = LLaMAPackageLogTerminate; - return QNN_SUCCESS; +Qnn_ErrorHandle_t LLaMAPackageInterfaceProvider(QnnOpPackage_Interface_t *interface) { + if (!interface) return QNN_OP_PACKAGE_ERROR_INVALID_ARGUMENT; + interface->interfaceVersion = {1, 4, 0}; + interface->v1_4.init = LLaMAPackageInit; + interface->v1_4.terminate = LLaMAPackageTerminate; + interface->v1_4.getInfo = LLaMAPackageGetInfo; + interface->v1_4.validateOpConfig = LLaMAPackageValidateOpConfig; + interface->v1_4.createOpImpl = nullptr; + interface->v1_4.freeOpImpl = nullptr; + interface->v1_4.logInitialize = LLaMAPackageLogInitialize; + interface->v1_4.logSetLevel = LLaMAPackageLogSetLevel; + interface->v1_4.logTerminate = LLaMAPackageLogTerminate; + return QNN_SUCCESS; } #ifdef __cplusplus } #endif - - diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/Attention.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/Attention.cpp similarity index 75% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/Attention.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/Attention.cpp index e3db65468..559985e48 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/Attention.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/Attention.cpp @@ -9,18 +9,16 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_Attention); - // op execute function declarations -template +template GraphStatus attentionImpl(TensorType1 &out_0, const TensorType1 &in_0, const TensorType1 &in_1, - const TensorType& in_2, - const TensorType& in_3, - const TensorType& in_4); + const TensorType &in_2, + const TensorType &in_3, + const TensorType &in_4); // forward declaration of sample cost function static float attentionCostFunc(const Op *op); @@ -65,11 +63,11 @@ DEF_PACKAGE_OP((attentionImpl), "Attention") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -83,47 +81,41 @@ DEF_PACKAGE_OP((attentionImpl), "Attention") * Qnn_addNode */ - /* execute functions for ops */ -template +template GraphStatus attentionImpl(TensorType1 &out_0, const TensorType1 &in_0, const TensorType1 &in_1, - const TensorType& in_2, - const TensorType& in_3, - const TensorType& in_4) + const TensorType &in_2, + const TensorType &in_3, + const TensorType &in_4) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - return GraphStatus::Success; + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + return GraphStatus::Success; } -__attribute__((unused)) static float attentionCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float attentionCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/CausalMask.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/CausalMask.cpp similarity index 64% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/CausalMask.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/CausalMask.cpp index c3100cea1..a9ab7cd61 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/CausalMask.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/CausalMask.cpp @@ -13,11 +13,10 @@ BEGIN_PKG_OP_DEFINITION(PKG_CausalMask); - // op execute function declarations -template -GraphStatus causalmaskImpl(TensorType& out_0, - const TensorType& in_0); +template +GraphStatus causalmaskImpl(TensorType &out_0, + const TensorType &in_0); // forward declaration of sample cost function static float causalmaskCostFunc(const Op *op); @@ -62,11 +61,11 @@ DEF_PACKAGE_OP((causalmaskImpl), "CausalMask") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -80,78 +79,67 @@ DEF_PACKAGE_OP((causalmaskImpl), "CausalMask") * Qnn_addNode */ - /* execute functions for ops */ -template -GraphStatus causalmaskImpl(TensorType& out_0, - const TensorType& in_0) +template +GraphStatus causalmaskImpl(TensorType &out_0, + const TensorType &in_0) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - out_0.set_dims(in_0); - - int old_dim = 0; - - // NHSD - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - // S > 1 => mask - if (w_in > 1) { - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // CausalMask - for (Idx d = 0; d < d_in; d++) { - - float in_value = in_0(b, h, w, d); - - if (d > w + old_dim) - out_0(b, h, w, d) = in_value - MASK_INFINITY; - else - out_0(b, h, w, d) = in_value; - - } + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + out_0.set_dims(in_0); + + int old_dim = 0; + + // NHSD + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + // S > 1 => mask + if (w_in > 1) { + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // CausalMask + for (Idx d = 0; d < d_in; d++) { + float in_value = in_0(b, h, w, d); + + if (d > w + old_dim) + out_0(b, h, w, d) = in_value - MASK_INFINITY; + else + out_0(b, h, w, d) = in_value; + } + } + } } - } + } else { + auto in_ptr = in_0.raw_data_const(); + auto out_ptr = out_0.raw_data(); + memcpy(out_ptr, in_ptr, b_in * h_in * w_in * d_in * 4); } - } else { - auto in_ptr = in_0.raw_data_const(); - auto out_ptr = out_0.raw_data(); - memcpy(out_ptr, in_ptr, b_in*h_in*w_in*d_in*4); - } - - - - return GraphStatus::Success; + return GraphStatus::Success; } -__attribute__((unused)) static float causalmaskCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float causalmaskCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/HeadMatmul.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/HeadMatmul.cpp similarity index 63% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/HeadMatmul.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/HeadMatmul.cpp index 18440880c..8dbd62f29 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/HeadMatmul.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/HeadMatmul.cpp @@ -9,25 +9,24 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_HeadMatmul); static Qnn_Scalar_t sg_opDefaultTranspose_In0Scalar = {.dataType = Qnn_DataType_t::QNN_DATATYPE_BOOL_8, - .bool8Value = false}; + .bool8Value = false}; static Qnn_Param_t sg_opDefaultTranspose_In0 = {.paramType = QNN_PARAMTYPE_SCALAR, - .scalarParam = sg_opDefaultTranspose_In0Scalar}; + .scalarParam = sg_opDefaultTranspose_In0Scalar}; static Qnn_Scalar_t sg_opDefaultTranspose_In1Scalar = {.dataType = Qnn_DataType_t::QNN_DATATYPE_BOOL_8, - .bool8Value = false}; + .bool8Value = false}; static Qnn_Param_t sg_opDefaultTranspose_In1 = {.paramType = QNN_PARAMTYPE_SCALAR, - .scalarParam = sg_opDefaultTranspose_In1Scalar}; + .scalarParam = sg_opDefaultTranspose_In1Scalar}; // op execute function declarations -template -GraphStatus headmatmulImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1, - const QuantUint16Tensor& transpose_in0, - const QuantUint16Tensor& transpose_in1); +template +GraphStatus headmatmulImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1, + const QuantUint16Tensor &transpose_in0, + const QuantUint16Tensor &transpose_in1); // forward declaration of sample cost function static float headmatmulCostFunc(const Op *op); @@ -72,11 +71,11 @@ DEF_PACKAGE_OP((headmatmulImpl), "HeadMatmul") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -89,7 +88,7 @@ DEF_PACKAGE_OP((headmatmulImpl), "HeadMatmul") * graph construction will skip this parameter when this parameter is not provided at * Qnn_addNode */ -DEF_PACKAGE_PARAM_ORDER("HeadMatmul", +DEF_PACKAGE_PARAM_ORDER("HeadMatmul", "transpose_in0", false, &sg_opDefaultTranspose_In0, @@ -97,77 +96,65 @@ DEF_PACKAGE_PARAM_ORDER("HeadMatmul", false, &sg_opDefaultTranspose_In1) - /* execute functions for ops */ -template -GraphStatus headmatmulImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1, - const QuantUint16Tensor& transpose_in0, - const QuantUint16Tensor& transpose_in1) +template +GraphStatus headmatmulImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1, + const QuantUint16Tensor &transpose_in0, + const QuantUint16Tensor &transpose_in1) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - - auto transpose_in0_ = transpose_in0(0,0,0,0); - auto transpose_in1_ = transpose_in1(0,0,0,0); + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + auto transpose_in0_ = transpose_in0(0, 0, 0, 0); + auto transpose_in1_ = transpose_in1(0, 0, 0, 0); auto [b_in, h_in, w_in, d_in] = in_0.dims(); auto [b_in2, h_in2, w_in2, d_in2] = in_1.dims(); if (transpose_in0_ && transpose_in1_) { - - // Q KT head matmul - const size_t dims[] = {b_in, w_in, h_in, h_in}; - out_0.set_dims(dims); - debuglog("HeadMatmul execute... dims=(%zdx%zdx%zdx%zd)", out_0.dim(0), out_0.dim(1), out_0.dim(2), out_0.dim(3)); - + // Q KT head matmul + const size_t dims[] = {b_in, w_in, h_in, h_in}; + out_0.set_dims(dims); + debuglog("HeadMatmul execute... dims=(%zdx%zdx%zdx%zd)", out_0.dim(0), out_0.dim(1), out_0.dim(2), out_0.dim(3)); } else if (transpose_in0_) { - } else if (transpose_in1_) { + // QKT V head matmul + const size_t dims[] = {b_in, w_in, h_in, d_in2}; + out_0.set_dims(dims); + debuglog("HeadMatmul execute... dims=(%zdx%zdx%zdx%zd)", out_0.dim(0), out_0.dim(1), out_0.dim(2), out_0.dim(3)); - // QKT V head matmul - const size_t dims[] = {b_in, w_in, h_in, d_in2}; - out_0.set_dims(dims); - debuglog("HeadMatmul execute... dims=(%zdx%zdx%zdx%zd)", out_0.dim(0), out_0.dim(1), out_0.dim(2), out_0.dim(3)); - - // Todo out matrix needs transpose, we directly calculate the final dimensions. + // Todo out matrix needs transpose, we directly calculate the final dimensions. } else { - } - - return GraphStatus::Success; + return GraphStatus::Success; } -__attribute__((unused)) static float headmatmulCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float headmatmulCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/IRoPE.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/IRoPE.cpp new file mode 100755 index 000000000..b0b4184c0 --- /dev/null +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/IRoPE.cpp @@ -0,0 +1,211 @@ +//============================================================================== +// Auto Generated Code for LLaMAPackage +//============================================================================== + +#include "HTP/core/constraints.h" +#include "HTP/core/op_package_feature_support.h" +#include "HTP/core/op_register_ext.h" +#include "HTP/core/optimize.h" +#include "QnnOpPackage.h" +#include "HTP/core/simple_reg.h" +#include "HTP/core/tensor.h" + +BEGIN_PKG_OP_DEFINITION(PKG_IRoPE); + +// op execute function declarations +template +GraphStatus iropeImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1, + const TensorType &cos, + const TensorType1 &h_cnt, + const Tensor &pose_type); + +// forward declaration of sample cost function +static float iropeCostFunc(const Op *op); + +/* + * method 1 for defining op, using default cost value (i.e. GLACIAL) and default flag (Flags::RESOURCE_HVX) + * syntax: DEF_PACKAGE_OP(F,OP) + * e.g. DEF_PACKAGE_OP((iropeImpl), "IRoPE") + */ +DEF_PACKAGE_OP((iropeImpl), "IRoPE") + +/* + * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) + * and provided flags + * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) + * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, + * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) + * e.g. DEF_PACKAGE_OP_AND_COST_AND_FLAGS((iropeImpl), "IRoPE", SNAIL) + */ + +/* + * method 3 for defining op with cost function pointer and provided flags + * cost function pointer type: typedef float (*cost_function) (const Op * op); + * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) + * e.g. DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((iropeImpl), + * "IRoPE", iropeCostFunc, Flags::RESOURCE_HVX) + */ + +/* + * optimization definitions + * need to be global in the package + * one definition per optimization + * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) + * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) + * HTP core provides some replacement functions for op package to use + * for more information about optimization rules, please refer to HTP core documentations + */ + +/* + * op parameter order definitions + * need to be global in the package + * one definition per op, and this is optional + * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) + * one or more parameters can be specified for each op + * order of parameters listed determines the order of parameters passed into op execution functions + * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode + * will be passed into op execution functions + * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted + * name will be abandoned + * if two or more op packages with the same package name will be registered, they cannot list + * conflicting parameter orders + * PARAM refers to parameter name as a string literal + * MANDATORY refers to whether this parameter is required to be provided at Qnn_addNode + * DEFAULT is used when MANDATORY is false + * if provided as Qnn_Param_t*, + * DEFAULT will be used for graph construction when this parameter is not provided at + * Qnn_addNode + * if provided as nullptr, + * graph construction will skip this parameter when this parameter is not provided at + * Qnn_addNode + */ +DEF_PACKAGE_PARAM_ORDER("IRoPE", + "pose_type", + true, + nullptr) + +/* execute functions for ops */ + +template +GraphStatus iropeImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &sin, + const TensorType &cos, + const TensorType1 &h_cnt, + const Tensor &pose_type) + +{ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + auto pose_type_ = pose_type(0, 0, 0, 0); + auto h_cnt_ = static_cast(h_cnt(0, 0, 0, 0)); + + out_0.set_dims(in_0); + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + uint32_t half_dimension = d_in / 2; + + auto sin_ptr = (uint8_t *)sin.raw_data_const(); + auto cos_ptr = (uint8_t *)cos.raw_data_const(); + + auto in_ptr = (uint8_t *)in_0.raw_data_const(); + + sin_ptr += half_dimension * h_cnt_; + cos_ptr += half_dimension * h_cnt_; + + // float scale_ = in_0.interface_scale() * sin.interface_scale() * cos.interface_scale(); + + if (pose_type_ == 4) { + DType dtype = out_0.get_dtype(); + + if (dtype == DType::Float32) { + auto out_ptr = (float *)out_0.raw_data(); + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + int partial_dimension = d_in; + for (Idx d = 0; d < partial_dimension / 2; ++d) { + int in_value = *in_ptr; + int in_value_2 = *(in_ptr + half_dimension); + + int sin_value = *(sin_ptr + d); + int cos_value = *(cos_ptr + d); + float value = (in_value - 128) * (cos_value - 128) * cos.interface_scale() - (in_value_2 - 128) * (sin_value - 128) * sin.interface_scale(); + float value2 = (in_value - 128) * (sin_value - 128) * sin.interface_scale() + (in_value_2 - 128) * (cos_value - 128) * cos.interface_scale(); + + *out_ptr = value; + *(out_ptr + half_dimension) = value2; + + out_ptr++; + in_ptr++; + } + + in_ptr += half_dimension; + out_ptr += half_dimension; + } + + sin_ptr += half_dimension; + cos_ptr += half_dimension; + } + } + } else if (dtype == DType::Float16) { + auto out_ptr = (__fp16 *)out_0.raw_data(); + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + int partial_dimension = d_in; + for (Idx d = 0; d < partial_dimension / 2; ++d) { + int in_value = *in_ptr; + int in_value_2 = *(in_ptr + half_dimension); + + int sin_value = *(sin_ptr + d); + int cos_value = *(cos_ptr + d); + float value = (in_value - 128) * (cos_value - 128) * cos.interface_scale() - (in_value_2 - 128) * (sin_value - 128) * sin.interface_scale(); + float value2 = (in_value - 128) * (sin_value - 128) * sin.interface_scale() + (in_value_2 - 128) * (cos_value - 128) * cos.interface_scale(); + + *out_ptr = static_cast<__fp16>(value); + *(out_ptr + half_dimension) = static_cast<__fp16>(value2); + + out_ptr++; + in_ptr++; + } + + in_ptr += half_dimension; + out_ptr += half_dimension; + } + + sin_ptr += half_dimension; + cos_ptr += half_dimension; + } + } + } + } + + return GraphStatus::Success; +} + +__attribute__((unused)) static float iropeCostFunc(const Op *op) { + /* + * add code here + * */ + + float cost = 0.0; // add cost computation here + return cost; +} + +/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), + where is as BEGIN_PKG_OP_DEFINITION +*/ +END_PKG_OP_DEFINITION(PKG_IRoPE); \ No newline at end of file diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/KVCache.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/KVCache.cpp similarity index 83% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/KVCache.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/KVCache.cpp index 6d3ecb2d3..bf11ce8c8 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/KVCache.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/KVCache.cpp @@ -9,16 +9,14 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_KVCache); - // op execute function declarations -template -GraphStatus kvcacheImpl(TensorType& out_0, - const TensorType& in_0, +template +GraphStatus kvcacheImpl(TensorType &out_0, + const TensorType &in_0, const TensorType1 &seq_pos, - const Tensor& hidden_dim); + const Tensor &hidden_dim); // forward declaration of sample cost function static float kvcacheCostFunc(const Op *op); @@ -63,11 +61,11 @@ DEF_PACKAGE_OP((kvcacheImpl), "KVCache") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -80,12 +78,11 @@ DEF_PACKAGE_OP((kvcacheImpl), "KVCache") * graph construction will skip this parameter when this parameter is not provided at * Qnn_addNode */ -DEF_PACKAGE_PARAM_ORDER("KVCache", +DEF_PACKAGE_PARAM_ORDER("KVCache", "hidden_dim", true, nullptr) - /* execute functions for ops */ // #ifndef REFERENCE_OP @@ -100,7 +97,6 @@ DEF_PACKAGE_PARAM_ORDER("KVCache", // #define ONE 0x3F800000 // #define M_ONE 0xAF800000 - // int32_t hvx_memcpy_af(float *restrict input, float *restrict output, uint32_t size) // { // HVX_Vector *input_v_ptr; @@ -113,7 +109,6 @@ DEF_PACKAGE_PARAM_ORDER("KVCache", // int32_t vectors_in_rounddown = size / 32; // int32_t leftover_size = leftover * sizeof(float); - // /* Check input arguments. Return error status if some argument has invalid value */ // if ((input == 0) || (output == 0) || (size == 0)) // { @@ -187,7 +182,6 @@ DEF_PACKAGE_PARAM_ORDER("KVCache", // return 0; // } - // template // GraphStatus kvcacheImpl(TensorType& out_0, // const TensorType& in_0, @@ -207,10 +201,10 @@ DEF_PACKAGE_PARAM_ORDER("KVCache", // * // * Please check in SDK documentation for more information. // */ - + // out_0.set_dims(in_0); // auto [b_in, h_in, w_in, d_in] = in_0.dims(); - + // uint32_t seq_pos_ = seq_pos(0,0,0,0); // // uint32_t hidden_dim_ = hidden_dim(0,0,0,0); @@ -226,91 +220,76 @@ DEF_PACKAGE_PARAM_ORDER("KVCache", // hvx_memcpy_af(out_ptr, in_ptr, h_in * w_in * d_in); - // return GraphStatus::Success; // } - // #else -template -GraphStatus kvcacheImpl(TensorType& out_0, - const TensorType& in_0, +template +GraphStatus kvcacheImpl(TensorType &out_0, + const TensorType &in_0, const TensorType1 &seq_pos, - const Tensor& hidden_dim) + const Tensor &hidden_dim) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - uint32_t seq_pos_ = seq_pos(0,0,0,0); - const size_t dims[] = {b_in, h_in + seq_pos_, w_in, d_in}; - - out_0.set_dims(dims); - - // uint32_t hidden_dim_ = hidden_dim(0,0,0,0); - - // // const size_t dims[] = {b_in, h_in, seq_pos_+1, hidden_dim_}; - // // out_0.set_dims(dims); - - // NSHD + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + uint32_t seq_pos_ = seq_pos(0, 0, 0, 0); + const size_t dims[] = {b_in, h_in + seq_pos_, w_in, d_in}; + + out_0.set_dims(dims); + + // uint32_t hidden_dim_ = hidden_dim(0,0,0,0); + + // // const size_t dims[] = {b_in, h_in, seq_pos_+1, hidden_dim_}; + // // out_0.set_dims(dims); + + // NSHD DType dtype = in_0.get_dtype(); - const uint8_t *in_ptr = (uint8_t*)in_0.raw_data_const(); - uint8_t *out_ptr = (uint8_t*)out_0.raw_data(); + const uint8_t *in_ptr = (uint8_t *)in_0.raw_data_const(); + uint8_t *out_ptr = (uint8_t *)out_0.raw_data(); if (dtype == DType::QUInt8) { - out_ptr += seq_pos_ * w_in * d_in; memcpy(out_ptr, in_ptr, h_in * w_in * d_in * sizeof(uint8_t)); } else if (dtype == DType::Float16) { - out_ptr += seq_pos_ * w_in * d_in * sizeof(float) / 2; memcpy(out_ptr, in_ptr, h_in * w_in * d_in * sizeof(float) / 2); } else if (dtype == DType::Float32) { - out_ptr += seq_pos_ * w_in * d_in * sizeof(float); memcpy(out_ptr, in_ptr, h_in * w_in * d_in * sizeof(float)); } - - - - return GraphStatus::Success; + return GraphStatus::Success; } - // #endif +__attribute__((unused)) static float kvcacheCostFunc(const Op *op) { + /* + * add code here + * */ -__attribute__((unused)) static float kvcacheCostFunc(const Op *op) -{ - /* - * add code here - * */ - - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAAdd.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAAdd.cpp similarity index 58% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAAdd.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAAdd.cpp index f883d3cb3..99576ab8b 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAAdd.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAAdd.cpp @@ -9,15 +9,13 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_LLaMAAdd); - // op execute function declarations -template -GraphStatus llamaaddImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1); +template +GraphStatus llamaaddImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1); // forward declaration of sample cost function static float llamaaddCostFunc(const Op *op); @@ -62,11 +60,11 @@ DEF_PACKAGE_OP((llamaaddImpl), "LLaMAAdd") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -80,7 +78,6 @@ DEF_PACKAGE_OP((llamaaddImpl), "LLaMAAdd") * Qnn_addNode */ - /* execute functions for ops */ #ifndef REFERENCE_OP @@ -90,17 +87,15 @@ DEF_PACKAGE_OP((llamaaddImpl), "LLaMAAdd") #include #include -#define BLOCK_SIZE (8*1024/VLEN) /* vector chunks */ -#define L2FETCH_AHEAD (BLOCK_SIZE) +#define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +#define L2FETCH_AHEAD (BLOCK_SIZE) int32_t hvx_add_af( float *restrict input, float *restrict input2, float *restrict output, - uint32_t size) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + uint32_t size) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } @@ -120,25 +115,21 @@ int32_t hvx_add_af( sline1p = *iptr++; sline2p = *iptr2++; - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { sline1c = *iptr++; sline2c = *iptr2++; sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); - // Our add consider uint8->int8 bugs from QNN. // sline2 = Q6_Vb_vsub_VbVb(sline2, v128); *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(sline1, sline2)); @@ -149,134 +140,116 @@ int32_t hvx_add_af( } if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - - sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) input2); - - // sline2 = Q6_Vb_vsub_VbVb(sline2, v128); - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(sline1, sline2)); + sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); + // sline2 = Q6_Vb_vsub_VbVb(sline2, v128); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(sline1, sline2)); } // Handle leftover elements. if (leftover_size > 0) { - sline1c = (is_in_one_chunk(iptr, leftover_size, VLEN) - ? sline1p - : *iptr++); - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - + sline1c = (is_in_one_chunk(iptr, leftover_size, VLEN) ? sline1p : *iptr++); + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sline2c = (is_in_one_chunk(iptr2, leftover_size, VLEN) - ? sline2p - : *iptr2++); - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); + sline2c = (is_in_one_chunk(iptr2, leftover_size, VLEN) ? sline2p : *iptr2++); + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); - // sline2 = Q6_Vb_vsub_VbVb(sline2, v128); - vstu_variable(optr, leftover_size, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(sline1, sline2))); + // sline2 = Q6_Vb_vsub_VbVb(sline2, v128); + vstu_variable(optr, leftover_size, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(sline1, sline2))); } return 0; } -template -GraphStatus llamaaddImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1) +template +GraphStatus llamaaddImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ out_0.set_dims(in_0); - - auto in_ptr = (float*)in_0.raw_data_const(); - auto in2_ptr = (float*)in_1.raw_data_const(); - auto out_ptr = (float*)out_0.raw_data(); + auto in_ptr = (float *)in_0.raw_data_const(); + auto in2_ptr = (float *)in_1.raw_data_const(); + auto out_ptr = (float *)out_0.raw_data(); auto [b_in, h_in, w_in, d_in] = in_0.dims(); - size_t size = b_in*h_in*w_in*d_in; + size_t size = b_in * h_in * w_in * d_in; hvx_add_af(in_ptr, in2_ptr, out_ptr, size); - return GraphStatus::Success; + return GraphStatus::Success; } #else - -template -GraphStatus llamaaddImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1) +template +GraphStatus llamaaddImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - out_0.set_dims(in_0); - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // mul - for (Idx d = 0; d < d_in; d++) { - float inval = in_0(b, h, w, d); - float inval2 = in_1(b, h, w, d); - float outval = inval + inval2; - - out_0(b, h, w, d) = outval; - - } + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + out_0.set_dims(in_0); + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // mul + for (Idx d = 0; d < d_in; d++) { + float inval = in_0(b, h, w, d); + float inval2 = in_1(b, h, w, d); + float outval = inval + inval2; + + out_0(b, h, w, d) = outval; + } + } } - } } - - return GraphStatus::Success; + return GraphStatus::Success; } - - #endif -__attribute__((unused)) static float llamaaddCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float llamaaddCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMADequantize.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMADequantize.cpp new file mode 100755 index 000000000..c8b03b53b --- /dev/null +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMADequantize.cpp @@ -0,0 +1,540 @@ +//============================================================================== +// Auto Generated Code for LLaMAPackage +//============================================================================== + +#include "HTP/core/constraints.h" +#include "HTP/core/op_package_feature_support.h" +#include "HTP/core/op_register_ext.h" +#include "HTP/core/optimize.h" +#include "QnnOpPackage.h" +#include "HTP/core/simple_reg.h" + +BEGIN_PKG_OP_DEFINITION(PKG_LLaMADequantize); + +// op execute function declarations +template +GraphStatus llamadequantizeImpl(TensorType1 &out_0, + const TensorType1 &in_0, + const PlainFloatTensor &scale); + +// forward declaration of sample cost function +static float llamadequantizeCostFunc(const Op *op); + +/* + * method 1 for defining op, using default cost value (i.e. GLACIAL) and default flag (Flags::RESOURCE_HVX) + * syntax: DEF_PACKAGE_OP(F,OP) + * e.g. DEF_PACKAGE_OP((llamadequantizeImpl), "LLaMADequantize") + */ +DEF_PACKAGE_OP((llamadequantizeImpl), "LLaMADequantize") + +/* + * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) + * and provided flags + * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) + * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, + * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) + * e.g. DEF_PACKAGE_OP_AND_COST_AND_FLAGS((llamadequantizeImpl), "LLaMADequantize", SNAIL) + */ + +/* + * method 3 for defining op with cost function pointer and provided flags + * cost function pointer type: typedef float (*cost_function) (const Op * op); + * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) + * e.g. DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((llamadequantizeImpl), + * "LLaMADequantize", llamadequantizeCostFunc, Flags::RESOURCE_HVX) + */ + +/* + * optimization definitions + * need to be global in the package + * one definition per optimization + * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) + * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) + * HTP core provides some replacement functions for op package to use + * for more information about optimization rules, please refer to HTP core documentations + */ + +/* + * op parameter order definitions + * need to be global in the package + * one definition per op, and this is optional + * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) + * one or more parameters can be specified for each op + * order of parameters listed determines the order of parameters passed into op execution functions + * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode + * will be passed into op execution functions + * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted + * name will be abandoned + * if two or more op packages with the same package name will be registered, they cannot list + * conflicting parameter orders + * PARAM refers to parameter name as a string literal + * MANDATORY refers to whether this parameter is required to be provided at Qnn_addNode + * DEFAULT is used when MANDATORY is false + * if provided as Qnn_Param_t*, + * DEFAULT will be used for graph construction when this parameter is not provided at + * Qnn_addNode + * if provided as nullptr, + * graph construction will skip this parameter when this parameter is not provided at + * Qnn_addNode + */ +DEF_PACKAGE_PARAM_ORDER("LLaMADequantize", + "scale", + true, + nullptr) + +#ifndef REFERENCE_OP +/* execute functions for ops */ +#include "qhmath_hvx.h" +#include "hvx_internal.h" +#include +#include + +#define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +#define L2FETCH_AHEAD (BLOCK_SIZE) + +static inline int32_t float_to_fp16s(float input) { + union { + int32_t i; + __fp16 f[2]; + } fp32 = {.f = {(__fp16)input, (__fp16)input}}; + return fp32.i; +} + +static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) { + union { + float f; + uint32_t i; + } fp32 = {.f = x}; + return fp32.i; +} + +/* execute functions for ops */ +int32_t qhmath_hvx_dequantize_ahf( + int8_t *restrict input, + int8_t *restrict output, + uint32_t size, + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_UVector *optr = (HVX_UVector *)output; + + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector scale_vec; + + int32_t block, l2fetch_block; + int32_t leftover = size & 127; + int32_t vectors_in_rounddown = size / 128; // element number! + // int32_t leftover_size = leftover * sizeof(float); + + sline1p = *iptr++; + + uint32_t convert = 0x00800080; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + + scale_vec = Q6_V_vsplat_R(float_to_fp16s(scale)); + HVX_Vector zero_v_sf = Q6_V_vzero(); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; ++j) { + sline1c = *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), scale_vec)); + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), scale_vec)); + + sline1p = sline1c; + } + } + + if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), scale_vec)); + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), scale_vec)); + } + + return 0; +} + +int32_t qhmath_hvx_dequantize_ui16_ahf( + int8_t *restrict input, + int8_t *restrict output, + uint32_t size, + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_UVector *optr = (HVX_UVector *)output; + + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector scale_vec; + + int32_t block, l2fetch_block; + int32_t leftover = size & 63; + int32_t vectors_in_rounddown = size / 64; // element number! + // int32_t leftover_size = leftover * sizeof(float); + + sline1p = *iptr++; + + uint32_t convert = 0x80008000; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + + scale_vec = Q6_V_vsplat_R(float_to_fp16s(scale)); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; ++j) { + sline1c = *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + HVX_Vector temp = sline1; + + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(temp, convert_vector); + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), scale_vec)); + + sline1p = sline1c; + } + } + + if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + HVX_Vector temp = sline1; + + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(temp, convert_vector); + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), scale_vec)); + } + + return 0; +} + +// Only support 128x dimension +int32_t qhmath_hvx_dequantize_af( + int8_t *restrict input, + int8_t *restrict output, + uint32_t size, + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_UVector *optr = (HVX_UVector *)output; + + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector scale_vec; + HVX_Vector one_vec; + + int32_t block, l2fetch_block; + int32_t leftover = size & 127; + int32_t vectors_in_rounddown = size / 128; + // int32_t leftover_size = leftover * sizeof(float); + + sline1p = *iptr++; + + uint32_t convert = 0x00800080; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + + scale_vec = Q6_V_vsplat_R(float_to_bits(scale)); + one_vec = Q6_V_vsplat_R(float_to_fp16s(1.0)); + HVX_Vector zero_v_sf = Q6_V_vzero(); + scale_vec = Q6_Vqf32_vadd_VsfVsf(scale_vec, Q6_V_vzero()); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; ++j) { + sline1c = *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + HVX_VectorPair result1 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), one_vec); + result1 = Q6_W_vshuff_VVR(Q6_V_hi_W(result1), Q6_V_lo_W(result1), -4); + + HVX_VectorPair result2 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), one_vec); + result2 = Q6_W_vshuff_VVR(Q6_V_hi_W(result2), Q6_V_lo_W(result2), -4); + + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), scale_vec)); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), scale_vec)); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), scale_vec)); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), scale_vec)); + + sline1p = sline1c; + } + } + + if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + HVX_VectorPair result1 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), one_vec); + result1 = Q6_W_vshuff_VVR(Q6_V_hi_W(result1), Q6_V_lo_W(result1), -4); + + HVX_VectorPair result2 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), one_vec); + result2 = Q6_W_vshuff_VVR(Q6_V_hi_W(result2), Q6_V_lo_W(result2), -4); + + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), scale_vec)); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), scale_vec)); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), scale_vec)); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), scale_vec)); + } + + return 0; +} + +int32_t qhmath_hvx_dequantize_ui16_af( + int8_t *restrict input, + int8_t *restrict output, + uint32_t size, + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_UVector *optr = (HVX_UVector *)output; + + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector scale_vec; + HVX_Vector one_vec; + + int32_t block, l2fetch_block; + int32_t leftover = size & 63; + int32_t vectors_in_rounddown = size / 64; + // int32_t leftover_size = leftover * sizeof(float); + + sline1p = *iptr++; + + uint32_t convert = 0x80008000; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + + scale_vec = Q6_V_vsplat_R(float_to_bits(scale)); + one_vec = Q6_V_vsplat_R(float_to_fp16s(1.0)); + scale_vec = Q6_Vqf32_vadd_VsfVsf(scale_vec, Q6_V_vzero()); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; ++j) { + sline1c = *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + HVX_Vector temp = Q6_Vh_vsub_VhVh(sline1, convert_vector); + HVX_VectorPair result = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(temp), one_vec); + result = Q6_W_vshuff_VVR(Q6_V_hi_W(result), Q6_V_lo_W(result), -4); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result), scale_vec)); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result), scale_vec)); + + sline1p = sline1c; + } + } + + if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + HVX_Vector temp = Q6_Vh_vsub_VhVh(sline1, convert_vector); + HVX_VectorPair result = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(temp), one_vec); + result = Q6_W_vshuff_VVR(Q6_V_hi_W(result), Q6_V_lo_W(result), -4); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result), scale_vec)); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result), scale_vec)); + } + + return 0; +} + +template +GraphStatus llamadequantizeImpl(TensorType1 &out_0, + const TensorType1 &in_0, + const PlainFloatTensor &scale) + +{ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + // HVX Method -- FP32 Version + out_0.set_dims(in_0); + + // NHWC + auto in_ptr = (int8_t *)in_0.raw_data_const(); + auto out_ptr = (int8_t *)out_0.raw_data(); + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + float scale_ = scale(0, 0, 0, 0); + + size_t size = b_in * h_in * w_in * d_in; + + if (in_0.get_dtype() == DType::QUInt8 && out_0.get_dtype() == DType::Float16) { + qhmath_hvx_dequantize_ahf(in_ptr, out_ptr, size, scale_); + } else if (in_0.get_dtype() == DType::QUInt16 && out_0.get_dtype() == DType::Float16) { + qhmath_hvx_dequantize_ui16_ahf(in_ptr, out_ptr, size, scale_); + } else if (in_0.get_dtype() == DType::QUInt16 && out_0.get_dtype() == DType::Float32) { + qhmath_hvx_dequantize_ui16_af(in_ptr, out_ptr, size, scale_); + } else { + qhmath_hvx_dequantize_af(in_ptr, out_ptr, size, scale_); + } + + return GraphStatus::Success; +} +#else +template +GraphStatus llamadequantizeImpl(TensorType1 &out_0, + const TensorType1 &in_0, + const PlainFloatTensor &scale) + +{ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + // HVX Method -- FP32 Version + out_0.set_dims(in_0); + + float scale_ = scale(0, 0, 0, 0); + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + if (in_0.get_dtype() == DType::QUInt8 && out_0.get_dtype() == DType::Float32) { + auto out_ptr = (float *)out_0.raw_data(); + auto in_ptr = (uint8_t *)in_0.raw_data_const(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + int32_t inval = static_cast(*in_ptr++); + *out_ptr++ = (inval - 128) * scale_; + } + } + } + } + } else if (in_0.get_dtype() == DType::QUInt16 && out_0.get_dtype() == DType::Float32) { + auto out_ptr = (float *)out_0.raw_data(); + auto in_ptr = (uint16_t *)in_0.raw_data_const(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + int32_t inval = static_cast(*in_ptr++); + *out_ptr++ = (inval - 32768) * scale_; + } + } + } + } + } else if (in_0.get_dtype() == DType::QUInt16 && out_0.get_dtype() == DType::Float16) { + auto out_ptr = (__fp16 *)out_0.raw_data(); + auto in_ptr = (uint16_t *)in_0.raw_data_const(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + int32_t inval = static_cast(*in_ptr++); + *out_ptr++ = (__fp16)((inval - 32768) * scale_); + } + } + } + } + } else if (in_0.get_dtype() == DType::QUInt8 && out_0.get_dtype() == DType::Float16) { + auto out_ptr = (__fp16 *)out_0.raw_data(); + auto in_ptr = (uint8_t *)in_0.raw_data_const(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + int32_t inval = static_cast(*in_ptr++); + *out_ptr++ = (__fp16)((inval - 128) * scale_); + } + } + } + } + } + + return GraphStatus::Success; +} + +#endif + +__attribute__((unused)) static float llamadequantizeCostFunc(const Op *op) { + /* + * add code here + * */ + + float cost = 0.0; // add cost computation here + return cost; +} + +/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), + where is as BEGIN_PKG_OP_DEFINITION +*/ +END_PKG_OP_DEFINITION(PKG_LLaMADequantize); \ No newline at end of file diff --git a/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMADequantizeAdd.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMADequantizeAdd.cpp new file mode 100755 index 000000000..86158e228 --- /dev/null +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMADequantizeAdd.cpp @@ -0,0 +1,694 @@ +//============================================================================== +// Auto Generated Code for LLaMAPackage +//============================================================================== + +#include "HTP/core/constraints.h" +#include "HTP/core/op_package_feature_support.h" +#include "HTP/core/op_register_ext.h" +#include "HTP/core/optimize.h" +#include "QnnOpPackage.h" +#include "HTP/core/simple_reg.h" + +BEGIN_PKG_OP_DEFINITION(PKG_LLaMADequantizeAdd); + +// op execute function declarations +template +GraphStatus llamadequantizeaddImpl(TensorType1 &out_0, + const TensorType1 &in_0, + const TensorType &in_1, + const PlainFloatTensor &scale); + +// forward declaration of sample cost function +static float llamadequantizeaddCostFunc(const Op *op); + +/* + * method 1 for defining op, using default cost value (i.e. GLACIAL) and default flag (Flags::RESOURCE_HVX) + * syntax: DEF_PACKAGE_OP(F,OP) + * e.g. DEF_PACKAGE_OP((llamadequantizeaddImpl), "LLaMADequantizeAdd") + */ +DEF_PACKAGE_OP((llamadequantizeaddImpl), "LLaMADequantizeAdd") + +/* + * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) + * and provided flags + * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) + * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, + * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) + * e.g. DEF_PACKAGE_OP_AND_COST_AND_FLAGS((llamadequantizeaddImpl), "LLaMADequantizeAdd", SNAIL) + */ + +/* + * method 3 for defining op with cost function pointer and provided flags + * cost function pointer type: typedef float (*cost_function) (const Op * op); + * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) + * e.g. DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((llamadequantizeaddImpl), + * "LLaMADequantizeAdd", llamadequantizeaddCostFunc, Flags::RESOURCE_HVX) + */ + +/* + * optimization definitions + * need to be global in the package + * one definition per optimization + * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) + * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) + * HTP core provides some replacement functions for op package to use + * for more information about optimization rules, please refer to HTP core documentations + */ + +/* + * op parameter order definitions + * need to be global in the package + * one definition per op, and this is optional + * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) + * one or more parameters can be specified for each op + * order of parameters listed determines the order of parameters passed into op execution functions + * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode + * will be passed into op execution functions + * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted + * name will be abandoned + * if two or more op packages with the same package name will be registered, they cannot list + * conflicting parameter orders + * PARAM refers to parameter name as a string literal + * MANDATORY refers to whether this parameter is required to be provided at Qnn_addNode + * DEFAULT is used when MANDATORY is false + * if provided as Qnn_Param_t*, + * DEFAULT will be used for graph construction when this parameter is not provided at + * Qnn_addNode + * if provided as nullptr, + * graph construction will skip this parameter when this parameter is not provided at + * Qnn_addNode + */ +DEF_PACKAGE_PARAM_ORDER("LLaMADequantizeAdd", + "scale", + true, + nullptr) + +/* execute functions for ops */ +#ifndef REFERENCE_OP +#include "qhmath_hvx.h" +#include "hvx_internal.h" +#include +#include + +#define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +#define L2FETCH_AHEAD (BLOCK_SIZE) + +static inline int32_t float_to_fp16s(float input) { + union { + int32_t i; + __fp16 f[2]; + } fp32 = {.f = {(__fp16)input, (__fp16)input}}; + return fp32.i; +} + +static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) { + union { + float f; + uint32_t i; + } fp32 = {.f = x}; + return fp32.i; +} + +/* execute functions for ops */ +int32_t qhmath_hvx_dequantize_add_ahf( + int8_t *restrict input, + float_t *restrict bias, + int8_t *restrict output, + uint32_t size, + float scale) { + if ((input == NULL) || (bias == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_Vector *bptr = (HVX_Vector *)bias; + HVX_UVector *optr = (HVX_UVector *)output; + + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector scale_vec; + + int32_t block, l2fetch_block; + int32_t leftover = size & 127; + int32_t vectors_in_rounddown = size / 128; // element number! + // int32_t leftover_size = leftover * sizeof(float); + + sline1p = *iptr++; + + uint32_t convert = 0x00800080; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + + scale_vec = Q6_V_vsplat_R(float_to_fp16s(scale)); + HVX_Vector zero_v_sf = Q6_V_vzero(); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; ++j) { + sline1c = *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + HVX_Vector bvec1 = *bptr++; // load 32 float elements + HVX_Vector bvec2 = *bptr++; + // see HVX documention for Vector shuffle and deal cross-lane + // Q6_Vhf_equals_Wqf32 will use elements in the lower vector as the odd elements, so need to transpose here + HVX_VectorPair bias_pair = Q6_W_vdeal_VVR(Q6_Vqf32_equals_Vsf(bvec2), Q6_Vqf32_equals_Vsf(bvec1), -4); // make a fp32 pair + HVX_Vector hf16_bias = Q6_Vhf_equals_Wqf32(bias_pair); + + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vadd_Vqf16Vhf( + Q6_Vqf16_vmpy_VhfVhf( + Q6_Vhf_equals_Vh(sout1), scale_vec), + hf16_bias)); + + bvec1 = *bptr++; // load 32 float elements + bvec2 = *bptr++; + // see HVX documention for Vector shuffle and deal cross-lane + // Q6_Vhf_equals_Wqf32 will use elements in the lower vector as the odd elements, so need to transpose here + bias_pair = Q6_W_vdeal_VVR(Q6_Vqf32_equals_Vsf(bvec2), Q6_Vqf32_equals_Vsf(bvec1), -4); // make a fp32 pair + hf16_bias = Q6_Vhf_equals_Wqf32(bias_pair); + + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vadd_Vqf16Vhf( + Q6_Vqf16_vmpy_VhfVhf( + Q6_Vhf_equals_Vh(sout2), scale_vec), + hf16_bias)); + + sline1p = sline1c; + } + } + + if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + HVX_Vector bvec1 = *bptr++; // load 32 float elements + HVX_Vector bvec2 = *bptr++; + HVX_VectorPair bias_pair = Q6_W_vshuff_VVR(Q6_Vqf32_equals_Vsf(bvec1), Q6_Vqf32_equals_Vsf(bvec2), -4); // make a fp32 pair + HVX_Vector hf16_bias = Q6_Vhf_equals_Wqf32(bias_pair); + + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vadd_Vqf16Vhf( + Q6_Vqf16_vmpy_VhfVhf( + Q6_Vhf_equals_Vh(sout1), scale_vec), + hf16_bias)); + + bvec1 = *bptr++; // load 32 float elements + bvec2 = *bptr++; + bias_pair = Q6_W_vshuff_VVR(Q6_Vqf32_equals_Vsf(bvec1), Q6_Vqf32_equals_Vsf(bvec2), -4); // make a fp32 pair + hf16_bias = Q6_Vhf_equals_Wqf32(bias_pair); + + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vadd_Vqf16Vhf( + Q6_Vqf16_vmpy_VhfVhf( + Q6_Vhf_equals_Vh(sout2), scale_vec), + hf16_bias)); + } + + return 0; +} + +int32_t qhmath_hvx_dequantize_add_ui16_ahf( + int8_t *restrict input, + float_t *restrict bias, + int8_t *restrict output, + uint32_t size, + float scale) { + if ((input == NULL) || (bias == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_Vector *bptr = (HVX_Vector *)bias; + HVX_UVector *optr = (HVX_UVector *)output; + + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector scale_vec; + + int32_t block, l2fetch_block; + int32_t leftover = size & 63; + int32_t vectors_in_rounddown = size / 64; // element number! + // int32_t leftover_size = leftover * sizeof(float); + + sline1p = *iptr++; + + uint32_t convert = 0x80008000; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + + scale_vec = Q6_V_vsplat_R(float_to_fp16s(scale)); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; ++j) { + sline1c = *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + HVX_Vector bvec1 = *bptr++; // load 32 float elements + HVX_Vector bvec2 = *bptr++; + // see HVX documention for Vector shuffle and deal cross-lane + // Q6_Vhf_equals_Wqf32 will use elements in the lower vector as the odd elements, so need to transpose here + HVX_VectorPair bias_pair = Q6_W_vdeal_VVR(Q6_Vqf32_equals_Vsf(bvec2), Q6_Vqf32_equals_Vsf(bvec1), -4); // make a fp32 pair + HVX_Vector hf16_bias = Q6_Vhf_equals_Wqf32(bias_pair); + + HVX_Vector temp = sline1; + + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(temp, convert_vector); + HVX_Vector qf16_val = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), scale_vec); + + *optr++ = Q6_Vhf_equals_Vqf16( + Q6_Vqf16_vadd_Vqf16Vhf(qf16_val, hf16_bias)); + + sline1p = sline1c; + } + } + + if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + HVX_Vector bvec1 = *bptr++; // load 32 float elements + HVX_Vector bvec2 = *bptr++; + + // see HVX documention for Vector shuffle and deal cross-lane + // Q6_Vhf_equals_Wqf32 will use elements in the lower vector as the odd elements, so need to transpose here + HVX_VectorPair bias_pair = Q6_W_vdeal_VVR(Q6_Vqf32_equals_Vsf(bvec2), Q6_Vqf32_equals_Vsf(bvec1), -4); // make a fp32 pair + HVX_Vector hf16_bias = Q6_Vhf_equals_Wqf32(bias_pair); + + HVX_Vector temp = sline1; + + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(temp, convert_vector); + HVX_Vector qf16_val = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), scale_vec); + + *optr++ = Q6_Vhf_equals_Vqf16( + Q6_Vqf16_vadd_Vqf16Vhf(qf16_val, hf16_bias)); + } + + return 0; +} + +// Only support 128x dimension +int32_t qhmath_hvx_dequantize_add_af( + int8_t *restrict input, + float_t *restrict bias, + int8_t *restrict output, + uint32_t size, + float scale) { + if ((input == NULL) || (bias == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_Vector *bptr = (HVX_Vector *)bias; + HVX_UVector *optr = (HVX_UVector *)output; + + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector scale_vec; + HVX_Vector one_vec; + + int32_t block, l2fetch_block; + int32_t leftover = size & 127; + int32_t vectors_in_rounddown = size / 128; + // int32_t leftover_size = leftover * sizeof(float); + + sline1p = *iptr++; + + uint32_t convert = 0x00800080; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + + scale_vec = Q6_V_vsplat_R(float_to_bits(scale)); + one_vec = Q6_V_vsplat_R(float_to_fp16s(1.0)); + HVX_Vector zero_v_sf = Q6_V_vzero(); + scale_vec = Q6_Vqf32_vadd_VsfVsf(scale_vec, Q6_V_vzero()); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; ++j) { + sline1c = *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + HVX_Vector bvec1 = *bptr++; // load 32 float elements + HVX_Vector bvec2 = *bptr++; + HVX_Vector bvec3 = *bptr++; + HVX_Vector bvec4 = *bptr++; + + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + HVX_VectorPair result1 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), one_vec); + result1 = Q6_W_vshuff_VVR(Q6_V_hi_W(result1), Q6_V_lo_W(result1), -4); + + HVX_VectorPair result2 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), one_vec); + result2 = Q6_W_vshuff_VVR(Q6_V_hi_W(result2), Q6_V_lo_W(result2), -4); + + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), scale_vec), + Q6_Vqf32_equals_Vsf(bvec1))); + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), scale_vec), + Q6_Vqf32_equals_Vsf(bvec2))); + + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), scale_vec), + Q6_Vqf32_equals_Vsf(bvec3))); + + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), scale_vec), + Q6_Vqf32_equals_Vsf(bvec4))); + + sline1p = sline1c; + } + } + + if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + // NOTE: assume bias size is multiple of 128 + HVX_Vector bvec1 = *bptr++; // load 32 float elements + HVX_Vector bvec2 = *bptr++; + HVX_Vector bvec3 = *bptr++; + HVX_Vector bvec4 = *bptr++; + + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + HVX_VectorPair result1 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), one_vec); + result1 = Q6_W_vshuff_VVR(Q6_V_hi_W(result1), Q6_V_lo_W(result1), -4); + + HVX_VectorPair result2 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), one_vec); + result2 = Q6_W_vshuff_VVR(Q6_V_hi_W(result2), Q6_V_lo_W(result2), -4); + + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), scale_vec), + Q6_Vqf32_equals_Vsf(bvec1))); + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), scale_vec), + Q6_Vqf32_equals_Vsf(bvec2))); + + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), scale_vec), + Q6_Vqf32_equals_Vsf(bvec3))); + + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), scale_vec), + Q6_Vqf32_equals_Vsf(bvec4))); + } + + return 0; +} + +int32_t qhmath_hvx_dequantize_add_ui16_af( + int8_t *restrict input, + float_t *restrict bias, + int8_t *restrict output, + uint32_t size, + float scale) { + if ((input == NULL) || (bias == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_Vector *bptr = (HVX_Vector *)bias; + HVX_UVector *optr = (HVX_UVector *)output; + + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector scale_vec; + HVX_Vector one_vec; + + int32_t block, l2fetch_block; + int32_t leftover = size & 63; + int32_t vectors_in_rounddown = size / 64; + // int32_t leftover_size = leftover * sizeof(float); + + sline1p = *iptr++; + + uint32_t convert = 0x80008000; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + + scale_vec = Q6_V_vsplat_R(float_to_bits(scale)); + one_vec = Q6_V_vsplat_R(float_to_fp16s(1.0)); + scale_vec = Q6_Vqf32_vadd_VsfVsf(scale_vec, Q6_V_vzero()); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; ++j) { + sline1c = *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + HVX_Vector bvec1 = *bptr++; // load 32 float elements + HVX_Vector bvec2 = *bptr++; + + HVX_Vector temp = Q6_Vh_vsub_VhVh(sline1, convert_vector); + HVX_VectorPair result = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(temp), one_vec); + result = Q6_W_vshuff_VVR(Q6_V_hi_W(result), Q6_V_lo_W(result), -4); + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result), scale_vec), Q6_Vqf32_equals_Vsf(bvec1))); + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result), scale_vec), Q6_Vqf32_equals_Vsf(bvec2))); + + sline1p = sline1c; + } + } + + if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + HVX_Vector bvec1 = *bptr++; // load 32 float elements + HVX_Vector bvec2 = *bptr++; + + HVX_Vector temp = Q6_Vh_vsub_VhVh(sline1, convert_vector); + HVX_VectorPair result = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(temp), one_vec); + result = Q6_W_vshuff_VVR(Q6_V_hi_W(result), Q6_V_lo_W(result), -4); + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result), scale_vec), Q6_Vqf32_equals_Vsf(bvec1))); + *optr++ = Q6_Vsf_equals_Vqf32( + Q6_Vqf32_vadd_Vqf32Vqf32( + Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result), scale_vec), Q6_Vqf32_equals_Vsf(bvec2))); + } + + return 0; +} + +template +GraphStatus llamadequantizeaddImpl(TensorType1 &out_0, + const TensorType1 &in_0, + const TensorType &in_1, + const PlainFloatTensor &scale) + +{ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + // HVX Method -- FP32 Version + out_0.set_dims(in_0); + + // NHWC + auto bias_ptr = (float *)in_1.raw_data_const(); + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + float scale_ = scale(0, 0, 0, 0); + + if (d_in % 128 != 0) { + return GraphStatus::ErrorDimensions; + } + + // call the kernel function for every dim() (assume total_size == bias_length) + // NOTE: in modeling, the dequantize add can appear after linear multihead attention, so w_in * d_in == bias_length + // in other positions, the w_in will be 1 + if (in_0.get_dtype() == DType::QUInt8 && out_0.get_dtype() == DType::Float16) { + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + auto in_ptr = (int8_t *)in_0.raw_data_const() + (((b * h_in) + h) * w_in * d_in); + auto out_ptr = (int8_t *)((int16_t *)out_0.raw_data() + (((b * h_in) + h) * w_in * d_in)); + qhmath_hvx_dequantize_add_ahf(in_ptr, bias_ptr, out_ptr, w_in * d_in, scale_); + } + } + } else if (in_0.get_dtype() == DType::QUInt16 && out_0.get_dtype() == DType::Float16) { + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + auto in_ptr = (int8_t *)((int16_t *)in_0.raw_data_const() + (((b * h_in) + h) * w_in * d_in)); + auto out_ptr = (int8_t *)((int16_t *)out_0.raw_data() + (((b * h_in) + h) * w_in * d_in)); + qhmath_hvx_dequantize_add_ui16_ahf(in_ptr, bias_ptr, out_ptr, w_in * d_in, scale_); + } + } + } else if (in_0.get_dtype() == DType::QUInt16 && out_0.get_dtype() == DType::Float32) { + // NOTE: correct + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + auto in_ptr = (int8_t *)((int16_t *)in_0.raw_data_const() + (((b * h_in) + h) * w_in * d_in)); + auto out_ptr = (int8_t *)((float_t *)out_0.raw_data() + (((b * h_in) + h) * w_in * d_in)); + qhmath_hvx_dequantize_add_ui16_af(in_ptr, bias_ptr, out_ptr, w_in * d_in, scale_); + } + } + } else if (in_0.get_dtype() == DType::QUInt8 && out_0.get_dtype() == DType::Float32) { + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + auto in_ptr = (int8_t *)in_0.raw_data_const() + (((b * h_in) + h) * w_in * d_in); + auto out_ptr = (int8_t *)((float_t *)out_0.raw_data() + ((((b * h_in) + h) * w_in * d_in))); + qhmath_hvx_dequantize_add_af(in_ptr, bias_ptr, out_ptr, w_in * d_in, scale_); + } + } + } else { + return GraphStatus::GraphErrorCode::ErrorUnsupported; + } + + return GraphStatus::Success; +} +#else + +template +GraphStatus llamadequantizeaddImpl(TensorType1 &out_0, + const TensorType1 &in_0, + const TensorType &in_1, + const PlainFloatTensor &scale) + +{ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + out_0.set_dims(in_0); + + float scale_ = scale(0, 0, 0, 0); + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + if (in_0.get_dtype() == DType::QUInt8 && out_0.get_dtype() == DType::Float32) { + auto out_ptr = (float *)out_0.raw_data(); + auto in_ptr = (uint8_t *)in_0.raw_data_const(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + int32_t inval = static_cast(*in_ptr++); + *out_ptr++ = (inval - 128) * scale_ + in_1(0, 0, 0, w * d_in + d); + } + } + } + } + } else if (in_0.get_dtype() == DType::QUInt16 && out_0.get_dtype() == DType::Float32) { + auto out_ptr = (float *)out_0.raw_data(); + auto in_ptr = (uint16_t *)in_0.raw_data_const(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + int32_t inval = static_cast(*in_ptr++); + *out_ptr++ = (inval - 32768) * scale_ + in_1(0, 0, 0, w * d_in + d); + } + } + } + } + } else if (in_0.get_dtype() == DType::QUInt16 && out_0.get_dtype() == DType::Float16) { + auto out_ptr = (__fp16 *)out_0.raw_data(); + auto in_ptr = (uint16_t *)in_0.raw_data_const(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + int32_t inval = static_cast(*in_ptr++); + *out_ptr++ = (__fp16)((inval - 32768) * scale_ + in_1(0, 0, 0, w * d_in + d)); + } + } + } + } + } else if (in_0.get_dtype() == DType::QUInt8 && out_0.get_dtype() == DType::Float16) { + auto out_ptr = (__fp16 *)out_0.raw_data(); + auto in_ptr = (uint8_t *)in_0.raw_data_const(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + int32_t inval = static_cast(*in_ptr++); + *out_ptr++ = (__fp16)((inval - 128) * scale_ + in_1(0, 0, 0, w * d_in + d)); + } + } + } + } + } + return GraphStatus::Success; +} + +#endif + +__attribute__((unused)) static float llamadequantizeaddCostFunc(const Op *op) { + /* + * add code here + * */ + + float cost = 0.0; // add cost computation here + return cost; +} + +/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), + where is as BEGIN_PKG_OP_DEFINITION +*/ +END_PKG_OP_DEFINITION(PKG_LLaMADequantizeAdd); \ No newline at end of file diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMALinear.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMALinear.cpp similarity index 70% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMALinear.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMALinear.cpp index 5c3358dac..44487d245 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMALinear.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMALinear.cpp @@ -9,20 +9,18 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_LLaMALinear); - // op execute function declarations -template -GraphStatus llamalinearImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1, - const TensorType& in_2, - const PlainFloatTensor& in_scale, - const PlainFloatTensor& weight_scale, - const PlainFloatTensor& bias_scale, - const PlainFloatTensor& output_scale); +template +GraphStatus llamalinearImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1, + const TensorType &in_2, + const PlainFloatTensor &in_scale, + const PlainFloatTensor &weight_scale, + const PlainFloatTensor &bias_scale, + const PlainFloatTensor &output_scale); // forward declaration of sample cost function static float llamalinearCostFunc(const Op *op); @@ -67,11 +65,11 @@ DEF_PACKAGE_OP((llamalinearImpl), "LLaMALinear") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -84,7 +82,7 @@ DEF_PACKAGE_OP((llamalinearImpl), "LLaMALinear") * graph construction will skip this parameter when this parameter is not provided at * Qnn_addNode */ -DEF_PACKAGE_PARAM_ORDER("LLaMALinear", +DEF_PACKAGE_PARAM_ORDER("LLaMALinear", "in_scale", true, nullptr, @@ -98,13 +96,12 @@ DEF_PACKAGE_PARAM_ORDER("LLaMALinear", true, nullptr) - /* execute functions for ops */ float Round(float num) { float floor_num = floor(num); float ceil_num = ceil(num); - + if (num - floor_num < ceil_num - num) { return floor_num; } else { @@ -112,34 +109,34 @@ float Round(float num) { } } -template -GraphStatus llamalinearImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1, - const TensorType& in_2, - const PlainFloatTensor& in_scale, - const PlainFloatTensor& weight_scale, - const PlainFloatTensor& bias_scale, - const PlainFloatTensor& output_scale) +template +GraphStatus llamalinearImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1, + const TensorType &in_2, + const PlainFloatTensor &in_scale, + const PlainFloatTensor &weight_scale, + const PlainFloatTensor &bias_scale, + const PlainFloatTensor &output_scale) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - // 假设输入张量是4维的,NHWC格式 + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + // 假设输入张量是4维的,NHWC格式 int batch_size = in_0.dims()[0]; int height = in_0.dims()[1]; int width = in_0.dims()[2]; - int in_features = in_0.dims()[3]; // 输入的通道数 + int in_features = in_0.dims()[3]; // 输入的通道数 int out_features = in_1.dims()[3]; // 输出的特征数(即输出通道数) // 检查输入张量的形状是否匹配 @@ -147,24 +144,23 @@ GraphStatus llamalinearImpl(TensorType& out_0, return GraphStatus::ErrorFatal; } - // 获取量化比例 - float w_scale = weight_scale(0,0,0,0); - float i_scale = in_scale(0,0,0,0); - float b_scale = bias_scale(0,0,0,0); - float o_scale = output_scale(0,0,0,0); - + float w_scale = weight_scale(0, 0, 0, 0); + float i_scale = in_scale(0, 0, 0, 0); + float b_scale = bias_scale(0, 0, 0, 0); + float o_scale = output_scale(0, 0, 0, 0); + // 初始化输出张量 size_t dims[] = {static_cast(batch_size), static_cast(height), static_cast(width), static_cast(out_features)}; out_0.set_dims(dims); // only support float bias now. - auto in0_ptr = (uint8_t*)in_0.raw_data_const(); - auto in1_ptr = (uint8_t*)in_1.raw_data_const(); - auto in2_ptr = (uint8_t*)in_2.raw_data_const(); - auto out_ptr = (int8_t*)out_0.raw_data(); - + auto in0_ptr = (uint8_t *)in_0.raw_data_const(); + auto in1_ptr = (uint8_t *)in_1.raw_data_const(); + auto in2_ptr = (uint8_t *)in_2.raw_data_const(); + auto out_ptr = (int8_t *)out_0.raw_data(); + // 进行量化Linear乘法 for (int b = 0; b < batch_size; ++b) { for (int h = 0; h < height; ++h) { @@ -174,24 +170,24 @@ GraphStatus llamalinearImpl(TensorType& out_0, for (int k = 0; k < in_features; ++k) { int in_index = b * height * width * in_features + h * width * in_features + w * in_features + k; int weight_index = k * out_features + n; - acc += ((static_cast(in0_ptr[in_index])-128) * i_scale) * ((static_cast(in1_ptr[weight_index])-128) * w_scale); + acc += ((static_cast(in0_ptr[in_index]) - 128) * i_scale) * ((static_cast(in1_ptr[weight_index]) - 128) * w_scale); } // 加上偏置并进行反量化 float result = acc; - result += (static_cast(in2_ptr[n])-128) * b_scale; + result += (static_cast(in2_ptr[n]) - 128) * b_scale; // 将结果限制在uint8范围内 int out_index = b * height * width * out_features + h * width * out_features + w * out_features + n; result = Round(result / o_scale); long v = lroundf(result); - + if (v > 127) v = 127; - + if (v < -128) v = -128; - + if (out_0.get_dtype() == DType::QUInt8) v += 128; @@ -201,23 +197,18 @@ GraphStatus llamalinearImpl(TensorType& out_0, } } - return GraphStatus::Success; + return GraphStatus::Success; } -__attribute__((unused)) static float llamalinearCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float llamalinearCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAMul.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAMul.cpp similarity index 53% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAMul.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAMul.cpp index 36c614ea8..802acbacf 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAMul.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAMul.cpp @@ -9,15 +9,13 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_LLaMAMul); - // op execute function declarations -template -GraphStatus llamamulImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1); +template +GraphStatus llamamulImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1); // forward declaration of sample cost function static float llamamulCostFunc(const Op *op); @@ -62,11 +60,11 @@ DEF_PACKAGE_OP((llamamulImpl), "LLaMAMul") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -80,7 +78,6 @@ DEF_PACKAGE_OP((llamamulImpl), "LLaMAMul") * Qnn_addNode */ - /* execute functions for ops */ #ifndef REFERENCE_OP @@ -89,17 +86,15 @@ DEF_PACKAGE_OP((llamamulImpl), "LLaMAMul") #include #include -#define BLOCK_SIZE (8*1024/VLEN) /* vector chunks */ -#define L2FETCH_AHEAD (BLOCK_SIZE) +#define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +#define L2FETCH_AHEAD (BLOCK_SIZE) int32_t hvx_mul_af( float *restrict input, float *restrict input2, float *restrict output, - uint32_t size) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + uint32_t size) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } @@ -117,19 +112,16 @@ int32_t hvx_mul_af( sline1p = *iptr++; sline2p = *iptr2++; - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { sline1c = *iptr++; sline2c = *iptr2++; sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); @@ -143,31 +135,24 @@ int32_t hvx_mul_af( } if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - - sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) input2); - - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline1, sline2)); + sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline1, sline2)); } // Handle leftover elements. if (leftover_size > 0) { - sline1c = (is_in_one_chunk(iptr, leftover_size, VLEN) - ? sline1p - : *iptr++); - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + sline1c = (is_in_one_chunk(iptr, leftover_size, VLEN) ? sline1p : *iptr++); + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + sline2c = (is_in_one_chunk(iptr2, leftover_size, VLEN) ? sline2p : *iptr2++); + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); - sline2c = (is_in_one_chunk(iptr2, leftover_size, VLEN) - ? sline2p - : *iptr2++); - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); - - vstu_variable(optr, leftover_size, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline1, sline2))); + vstu_variable(optr, leftover_size, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline1, sline2))); } return 0; @@ -177,10 +162,8 @@ int32_t hvx_mul_ahf( __fp16 *restrict input, __fp16 *restrict input2, __fp16 *restrict output, - uint32_t size) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + uint32_t size) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } @@ -198,19 +181,16 @@ int32_t hvx_mul_ahf( sline1p = *iptr++; sline2p = *iptr2++; - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { sline1c = *iptr++; sline2c = *iptr2++; sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); @@ -224,160 +204,140 @@ int32_t hvx_mul_ahf( } if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - - sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) input2); - - *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(sline1, sline2)); + sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); + *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(sline1, sline2)); } // Handle leftover elements. if (leftover_size > 0) { - sline1c = (is_in_one_chunk(iptr, leftover_size, VLEN) - ? sline1p - : *iptr++); - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - + sline1c = (is_in_one_chunk(iptr, leftover_size, VLEN) ? sline1p : *iptr++); + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sline2c = (is_in_one_chunk(iptr2, leftover_size, VLEN) - ? sline2p - : *iptr2++); - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); + sline2c = (is_in_one_chunk(iptr2, leftover_size, VLEN) ? sline2p : *iptr2++); + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); - vstu_variable(optr, leftover_size, Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(sline1, sline2))); + vstu_variable(optr, leftover_size, Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(sline1, sline2))); } return 0; } -template -GraphStatus llamamulImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1) +template +GraphStatus llamamulImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - out_0.set_dims(in_0); - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - size_t size = b_in*h_in*w_in*d_in; - - DType dtype = in_0.get_dtype(); - - if (dtype == DType::Float16) { - auto in_ptr = (__fp16*)in_0.raw_data_const(); - auto in2_ptr = (__fp16*)in_1.raw_data_const(); - auto out_ptr = (__fp16*)out_0.raw_data(); - - hvx_mul_ahf(in_ptr, in2_ptr, out_ptr, size); - - } else { - auto in_ptr = (float*)in_0.raw_data_const(); - auto in2_ptr = (float*)in_1.raw_data_const(); - auto out_ptr = (float*)out_0.raw_data(); - - hvx_mul_af(in_ptr, in2_ptr, out_ptr, size); - } - - return GraphStatus::Success; + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + out_0.set_dims(in_0); + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + size_t size = b_in * h_in * w_in * d_in; + + DType dtype = in_0.get_dtype(); + + if (dtype == DType::Float16) { + auto in_ptr = (__fp16 *)in_0.raw_data_const(); + auto in2_ptr = (__fp16 *)in_1.raw_data_const(); + auto out_ptr = (__fp16 *)out_0.raw_data(); + + hvx_mul_ahf(in_ptr, in2_ptr, out_ptr, size); + + } else { + auto in_ptr = (float *)in_0.raw_data_const(); + auto in2_ptr = (float *)in_1.raw_data_const(); + auto out_ptr = (float *)out_0.raw_data(); + + hvx_mul_af(in_ptr, in2_ptr, out_ptr, size); + } + + return GraphStatus::Success; } #else - -template -GraphStatus llamamulImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1) +template +GraphStatus llamamulImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - out_0.set_dims(in_0); - - DType dtype = in_0.get_dtype(); - - - auto out_ptr = (__fp16*)out_0.raw_data(); - auto in_ptr = (__fp16*)in_0.raw_data_const(); - auto in_ptr2 = (__fp16*)in_1.raw_data_const(); - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // mul - for (Idx d = 0; d < d_in; d++) { - - if (dtype == DType::Float16) { - - __fp16 inval = *in_ptr++; - __fp16 inval2 = *in_ptr2++; - __fp16 outval = inval * inval2; - - *out_ptr++ = outval; - } - - if (dtype == DType::Float32) { - float inval = in_0(b, h, w, d); - float inval2 = in_1(b, h, w, d); - float outval = inval * inval2; - - out_0(b, h, w, d) = outval; + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + out_0.set_dims(in_0); + + DType dtype = in_0.get_dtype(); + + auto out_ptr = (__fp16 *)out_0.raw_data(); + auto in_ptr = (__fp16 *)in_0.raw_data_const(); + auto in_ptr2 = (__fp16 *)in_1.raw_data_const(); + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // mul + for (Idx d = 0; d < d_in; d++) { + if (dtype == DType::Float16) { + __fp16 inval = *in_ptr++; + __fp16 inval2 = *in_ptr2++; + __fp16 outval = inval * inval2; + + *out_ptr++ = outval; + } + + if (dtype == DType::Float32) { + float inval = in_0(b, h, w, d); + float inval2 = in_1(b, h, w, d); + float outval = inval * inval2; + + out_0(b, h, w, d) = outval; + } + } } - - } } - } } - - return GraphStatus::Success; + return GraphStatus::Success; } - - #endif -__attribute__((unused)) static float llamamulCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float llamamulCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAQuantize.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAQuantize.cpp similarity index 63% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAQuantize.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAQuantize.cpp index 23b357b51..e846ee328 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAQuantize.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAQuantize.cpp @@ -9,15 +9,13 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_LLaMAQuantize); - // op execute function declarations -template +template GraphStatus llamaquantizeImpl(TensorType1 &out_0, const TensorType1 &in_0, - const PlainFloatTensor& scale); + const PlainFloatTensor &scale); // forward declaration of sample cost function static float llamaquantizeCostFunc(const Op *op); @@ -62,11 +60,11 @@ DEF_PACKAGE_OP((llamaquantizeImpl), "LLaMAQuantize") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -79,29 +77,29 @@ DEF_PACKAGE_OP((llamaquantizeImpl), "LLaMAQuantize") * graph construction will skip this parameter when this parameter is not provided at * Qnn_addNode */ -DEF_PACKAGE_PARAM_ORDER("LLaMAQuantize", +DEF_PACKAGE_PARAM_ORDER("LLaMAQuantize", "scale", true, nullptr) #ifndef REFERENCE_OP - #include "qhmath_hvx.h" #include "hvx_internal.h" #include #include -#define BLOCK_SIZE (8*1024/VLEN) /* vector chunks */ -#define L2FETCH_AHEAD (BLOCK_SIZE) +#define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +#define L2FETCH_AHEAD (BLOCK_SIZE) -static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) -{ - union { float f; uint32_t i; } fp32 = { .f = x }; +static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) { + union { + float f; + uint32_t i; + } fp32 = {.f = x}; return fp32.i; } -static inline int32_t float_to_fp16s(float input) -{ +static inline int32_t float_to_fp16s(float input) { union { int32_t i; __fp16 f[2]; @@ -116,7 +114,6 @@ static inline int32_t float_to_fp16s(float input) #define FP16_SIGN 15 #define FP16_NEG_1 0xbc00 - /* execute functions for ops */ int32_t qhmath_hvx_quantize_ahf( __fp16 *restrict input, @@ -124,15 +121,13 @@ int32_t qhmath_hvx_quantize_ahf( uint32_t size, float low_level, float high_level, - float scale) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } - HVX_Vector *iptr = (HVX_Vector *) input; - HVX_UVector *optr = (HVX_UVector *) output; + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_UVector *optr = (HVX_UVector *)output; HVX_Vector sline1p, sline1c, sline1; HVX_Vector sline2p, sline2c, sline2; @@ -153,7 +148,7 @@ int32_t qhmath_hvx_quantize_ahf( HVX_Vector uintconvert = Q6_V_vsplat_R(0x80808080); - float es = 0.5; + float es = 0.5; low_level_vec = Q6_V_vsplat_R(float_to_fp16s(low_level)); high_level_vec = Q6_V_vsplat_R(float_to_fp16s(high_level)); scale_vec = Q6_V_vsplat_R(float_to_fp16s(scale)); @@ -170,22 +165,19 @@ int32_t qhmath_hvx_quantize_ahf( HVX_Vector negone = Q6_Vh_vsplat_R(FP16_NEG_1); HVX_Vector zero = Q6_V_vzero(); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; j+=4) - { + for (int32_t j = 0; j < block; j += 4) { sline1c = *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sout1 = Q6_Vqf16_vmpy_VhfVhf(sline1,scale_vec); + sout1 = Q6_Vqf16_vmpy_VhfVhf(sline1, scale_vec); sout1 = Q6_Vqf16_vadd_Vqf16Vqf16(sout1, es_vec); sout1 = Q6_Vhf_equals_Vqf16(sout1); sout1 = Q6_Vhf_vmin_VhfVhf(sout1, high_level_vec); @@ -228,9 +220,9 @@ int32_t qhmath_hvx_quantize_ahf( sout1 = Q6_Vh_equals_Vhf(sout1); sline2c = *iptr++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) input); + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input); - sout2 = Q6_Vqf16_vmpy_VhfVhf(sline2,scale_vec); + sout2 = Q6_Vqf16_vmpy_VhfVhf(sline2, scale_vec); sout2 = Q6_Vqf16_vadd_Vqf16Vqf16(sout2, es_vec); sout2 = Q6_Vhf_equals_Vqf16(sout2); sout2 = Q6_Vhf_vmin_VhfVhf(sout2, high_level_vec); @@ -273,9 +265,9 @@ int32_t qhmath_hvx_quantize_ahf( sout2 = Q6_Vh_equals_Vhf(sout2); sline3c = *iptr++; - sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t) input); + sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t)input); - sout3 = Q6_Vqf16_vmpy_VhfVhf(sline3,scale_vec); + sout3 = Q6_Vqf16_vmpy_VhfVhf(sline3, scale_vec); sout3 = Q6_Vqf16_vadd_Vqf16Vqf16(sout3, es_vec); sout3 = Q6_Vhf_equals_Vqf16(sout3); sout3 = Q6_Vhf_vmin_VhfVhf(sout3, high_level_vec); @@ -318,9 +310,9 @@ int32_t qhmath_hvx_quantize_ahf( sout3 = Q6_Vh_equals_Vhf(sout3); sline4c = *iptr++; - sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t) input); + sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t)input); - sout4 = Q6_Vqf16_vmpy_VhfVhf(sline4,scale_vec); + sout4 = Q6_Vqf16_vmpy_VhfVhf(sline4, scale_vec); sout4 = Q6_Vqf16_vadd_Vqf16Vqf16(sout4, es_vec); sout4 = Q6_Vhf_equals_Vqf16(sout4); sout4 = Q6_Vhf_vmin_VhfVhf(sout4, high_level_vec); @@ -362,15 +354,266 @@ int32_t qhmath_hvx_quantize_ahf( sout4 = Q6_Vh_equals_Vhf(sout4); - HVX_Vector reql_h = Q6_Vb_vpack_VhVh_sat(sout2, sout1); *optr++ = Q6_Vb_vadd_VbVb(reql_h, uintconvert); HVX_Vector reqh_h = Q6_Vb_vpack_VhVh_sat(sout4, sout3); *optr++ = Q6_Vb_vadd_VbVb(reqh_h, uintconvert); + sline1p = sline1c; + sline2p = sline2c; + sline3p = sline3c; + sline4p = sline4c; + } + } + + return 0; +} + +int32_t qhmath_hvx_quantize_ui16_ahf( + __fp16 *restrict input, + __fp16 *restrict output, + uint32_t size, + float low_level, + float high_level, + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_UVector *optr = (HVX_UVector *)output; + + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector sline2p, sline2c, sline2; + HVX_Vector sline3p, sline3c, sline3; + HVX_Vector sline4p, sline4c, sline4; + + HVX_Vector sout1, sout2, sout3, sout4; + HVX_Vector low_level_vec, high_level_vec, scale_vec, es_vec; + int32_t block, l2fetch_block; + // int32_t leftover = size & 31; + int32_t vectors_in_rounddown = size / 64; + // int32_t leftover_size = leftover * sizeof(float); + + sline1p = *iptr++; + sline2p = *iptr++; + sline3p = *iptr++; + sline4p = *iptr++; + + HVX_Vector uintconvert = Q6_V_vsplat_R(0x80008000); + + float es = 0.5; + low_level_vec = Q6_V_vsplat_R(float_to_fp16s(low_level)); + high_level_vec = Q6_V_vsplat_R(float_to_fp16s(high_level)); + scale_vec = Q6_V_vsplat_R(float_to_fp16s(scale)); + es_vec = Q6_V_vsplat_R(float_to_fp16s(es)); + + HVX_Vector zero_v_sf = Q6_V_vzero(); + es_vec = Q6_Vqf16_vadd_VhfVhf(es_vec, zero_v_sf); + + HVX_Vector expmask = Q6_Vh_vsplat_R(FP16_EXPONENT_MASK); + HVX_Vector expbias = Q6_Vh_vsplat_R(FP16_EXPONENT_BIAS); + HVX_Vector manmask = Q6_Vh_vsplat_R(FP16_MANTISA_MASK); + HVX_Vector exp23 = Q6_Vh_vsplat_R(23 - 1); + HVX_Vector exp0 = Q6_Vh_vsplat_R(0 - 1); + HVX_Vector negone = Q6_Vh_vsplat_R(FP16_NEG_1); + HVX_Vector zero = Q6_V_vzero(); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; j += 4) { + sline1c = *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + sout1 = Q6_Vqf16_vmpy_VhfVhf(sline1, scale_vec); + sout1 = Q6_Vqf16_vadd_Vqf16Vqf16(sout1, es_vec); + sout1 = Q6_Vhf_equals_Vqf16(sout1); + sout1 = Q6_Vhf_vmin_VhfVhf(sout1, high_level_vec); + sout1 = Q6_Vhf_vmax_VhfVhf(sout1, low_level_vec); + + { + HVX_Vector exp = Q6_Vh_vasr_VhR(sout1, FP16_MANTISA); + exp = Q6_V_vand_VV(exp, expmask); + exp = Q6_Vh_vsub_VhVh(exp, expbias); + + HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); + HVX_Vector manzero = Q6_V_vand_VV(sout1, man); + + HVX_Vector sign = Q6_Vh_vasr_VhR(sout1, FP16_SIGN); + HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); + + HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); + HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); + HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); + + HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout1, man); + man = Q6_V_vnot_V(man); + HVX_Vector exppos_signpos = Q6_V_vand_VV(sout1, man); + exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); + HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout1, 1); + HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); + + // exp >= 0 + HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); + tsout1 = Q6_V_vmux_QVV(maneqzero, sout1, tsout1); + + // exp < 0 (-1, 1) + HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout1, negone); + tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); + + tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); + sout1 = Q6_V_vmux_QVV(expgte23, sout1, tsout1); + } + + sout1 = Q6_Vh_equals_Vhf(sout1); + + sline2c = *iptr++; + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input); + + sout2 = Q6_Vqf16_vmpy_VhfVhf(sline2, scale_vec); + sout2 = Q6_Vqf16_vadd_Vqf16Vqf16(sout2, es_vec); + sout2 = Q6_Vhf_equals_Vqf16(sout2); + sout2 = Q6_Vhf_vmin_VhfVhf(sout2, high_level_vec); + sout2 = Q6_Vhf_vmax_VhfVhf(sout2, low_level_vec); + + { + HVX_Vector exp = Q6_Vh_vasr_VhR(sout2, FP16_MANTISA); + exp = Q6_V_vand_VV(exp, expmask); + exp = Q6_Vh_vsub_VhVh(exp, expbias); + + HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); + HVX_Vector manzero = Q6_V_vand_VV(sout2, man); + + HVX_Vector sign = Q6_Vh_vasr_VhR(sout2, FP16_SIGN); + HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); + + HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); + HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); + HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); + + HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout2, man); + man = Q6_V_vnot_V(man); + HVX_Vector exppos_signpos = Q6_V_vand_VV(sout2, man); + exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); + HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout2, 1); + HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); + + // exp >= 0 + HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); + tsout1 = Q6_V_vmux_QVV(maneqzero, sout2, tsout1); + + // exp < 0 (-1, 1) + HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout2, negone); + tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); + + tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); + sout2 = Q6_V_vmux_QVV(expgte23, sout2, tsout1); + } + + sout2 = Q6_Vh_equals_Vhf(sout2); + + sline3c = *iptr++; + sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t)input); + + sout3 = Q6_Vqf16_vmpy_VhfVhf(sline3, scale_vec); + sout3 = Q6_Vqf16_vadd_Vqf16Vqf16(sout3, es_vec); + sout3 = Q6_Vhf_equals_Vqf16(sout3); + sout3 = Q6_Vhf_vmin_VhfVhf(sout3, high_level_vec); + sout3 = Q6_Vhf_vmax_VhfVhf(sout3, low_level_vec); + + { + HVX_Vector exp = Q6_Vh_vasr_VhR(sout3, FP16_MANTISA); + exp = Q6_V_vand_VV(exp, expmask); + exp = Q6_Vh_vsub_VhVh(exp, expbias); + + HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); + HVX_Vector manzero = Q6_V_vand_VV(sout3, man); + + HVX_Vector sign = Q6_Vh_vasr_VhR(sout3, FP16_SIGN); + HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); + + HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); + HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); + HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); + + HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout3, man); + man = Q6_V_vnot_V(man); + HVX_Vector exppos_signpos = Q6_V_vand_VV(sout3, man); + exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); + HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout3, 1); + HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); + + // exp >= 0 + HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); + tsout1 = Q6_V_vmux_QVV(maneqzero, sout3, tsout1); + + // exp < 0 (-1, 1) + HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout3, negone); + tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); + + tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); + sout3 = Q6_V_vmux_QVV(expgte23, sout3, tsout1); + } + + sout3 = Q6_Vh_equals_Vhf(sout3); + + sline4c = *iptr++; + sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t)input); + + sout4 = Q6_Vqf16_vmpy_VhfVhf(sline4, scale_vec); + sout4 = Q6_Vqf16_vadd_Vqf16Vqf16(sout4, es_vec); + sout4 = Q6_Vhf_equals_Vqf16(sout4); + sout4 = Q6_Vhf_vmin_VhfVhf(sout4, high_level_vec); + sout4 = Q6_Vhf_vmax_VhfVhf(sout4, low_level_vec); + + { + HVX_Vector exp = Q6_Vh_vasr_VhR(sout4, FP16_MANTISA); + exp = Q6_V_vand_VV(exp, expmask); + exp = Q6_Vh_vsub_VhVh(exp, expbias); + + HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); + HVX_Vector manzero = Q6_V_vand_VV(sout4, man); + + HVX_Vector sign = Q6_Vh_vasr_VhR(sout4, FP16_SIGN); + HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); + + HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); + HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); + HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); + + HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout4, man); + man = Q6_V_vnot_V(man); + HVX_Vector exppos_signpos = Q6_V_vand_VV(sout4, man); + exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); + HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout4, 1); + HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); + + // exp >= 0 + HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); + tsout1 = Q6_V_vmux_QVV(maneqzero, sout4, tsout1); + + // exp < 0 (-1, 1) + HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout4, negone); + tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); + + tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); + sout4 = Q6_V_vmux_QVV(expgte23, sout4, tsout1); + } + + sout4 = Q6_Vh_equals_Vhf(sout4); + + *optr++ = Q6_Vh_vadd_VhVh(sout1, uintconvert); + *optr++ = Q6_Vh_vadd_VhVh(sout2, uintconvert); + *optr++ = Q6_Vh_vadd_VhVh(sout3, uintconvert); + *optr++ = Q6_Vh_vadd_VhVh(sout4, uintconvert); - sline1p = sline1c; sline2p = sline2c; sline3p = sline3c; @@ -387,15 +630,13 @@ int32_t qhmath_hvx_quantize_ahf_int8( uint32_t size, float low_level, float high_level, - float scale) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } - HVX_Vector *iptr = (HVX_Vector *) input; - HVX_UVector *optr = (HVX_UVector *) output; + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_UVector *optr = (HVX_UVector *)output; HVX_Vector sline1p, sline1c, sline1; HVX_Vector sline2p, sline2c, sline2; @@ -414,7 +655,7 @@ int32_t qhmath_hvx_quantize_ahf_int8( sline3p = *iptr++; sline4p = *iptr++; - float es = 0.5; + float es = 0.5; low_level_vec = Q6_V_vsplat_R(float_to_fp16s(low_level)); high_level_vec = Q6_V_vsplat_R(float_to_fp16s(high_level)); scale_vec = Q6_V_vsplat_R(float_to_fp16s(scale)); @@ -431,22 +672,19 @@ int32_t qhmath_hvx_quantize_ahf_int8( HVX_Vector negone = Q6_Vh_vsplat_R(FP16_NEG_1); HVX_Vector zero = Q6_V_vzero(); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; j+=4) - { + for (int32_t j = 0; j < block; j += 4) { sline1c = *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sout1 = Q6_Vqf16_vmpy_VhfVhf(sline1,scale_vec); + sout1 = Q6_Vqf16_vmpy_VhfVhf(sline1, scale_vec); sout1 = Q6_Vqf16_vadd_Vqf16Vqf16(sout1, es_vec); sout1 = Q6_Vhf_equals_Vqf16(sout1); sout1 = Q6_Vhf_vmin_VhfVhf(sout1, high_level_vec); @@ -489,9 +727,9 @@ int32_t qhmath_hvx_quantize_ahf_int8( sout1 = Q6_Vh_equals_Vhf(sout1); sline2c = *iptr++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) input); + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input); - sout2 = Q6_Vqf16_vmpy_VhfVhf(sline2,scale_vec); + sout2 = Q6_Vqf16_vmpy_VhfVhf(sline2, scale_vec); sout2 = Q6_Vqf16_vadd_Vqf16Vqf16(sout2, es_vec); sout2 = Q6_Vhf_equals_Vqf16(sout2); sout2 = Q6_Vhf_vmin_VhfVhf(sout2, high_level_vec); @@ -534,9 +772,9 @@ int32_t qhmath_hvx_quantize_ahf_int8( sout2 = Q6_Vh_equals_Vhf(sout2); sline3c = *iptr++; - sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t) input); + sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t)input); - sout3 = Q6_Vqf16_vmpy_VhfVhf(sline3,scale_vec); + sout3 = Q6_Vqf16_vmpy_VhfVhf(sline3, scale_vec); sout3 = Q6_Vqf16_vadd_Vqf16Vqf16(sout3, es_vec); sout3 = Q6_Vhf_equals_Vqf16(sout3); sout3 = Q6_Vhf_vmin_VhfVhf(sout3, high_level_vec); @@ -579,9 +817,9 @@ int32_t qhmath_hvx_quantize_ahf_int8( sout3 = Q6_Vh_equals_Vhf(sout3); sline4c = *iptr++; - sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t) input); + sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t)input); - sout4 = Q6_Vqf32_vmpy_VsfVsf(sline4,scale_vec); + sout4 = Q6_Vqf32_vmpy_VsfVsf(sline4, scale_vec); sout4 = Q6_Vqf16_vadd_Vqf16Vqf16(sout4, es_vec); sout4 = Q6_Vhf_equals_Vqf16(sout4); sout4 = Q6_Vhf_vmin_VhfVhf(sout4, high_level_vec); @@ -623,15 +861,12 @@ int32_t qhmath_hvx_quantize_ahf_int8( sout4 = Q6_Vh_equals_Vhf(sout4); - HVX_Vector reql_h = Q6_Vb_vpack_VhVh_sat(sout2, sout1); *optr++ = reql_h; HVX_Vector reqh_h = Q6_Vb_vpack_VhVh_sat(sout4, sout3); *optr++ = reqh_h; - - sline1p = sline1c; sline2p = sline2c; sline3p = sline3c; @@ -657,15 +892,13 @@ int32_t qhmath_hvx_quantize_af( uint32_t size, float low_level, float high_level, - float scale) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } - HVX_Vector *iptr = (HVX_Vector *) input; - HVX_UVector *optr = (HVX_UVector *) output; + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_UVector *optr = (HVX_UVector *)output; HVX_Vector sline1p, sline1c, sline1; HVX_Vector sline2p, sline2c, sline2; @@ -684,7 +917,7 @@ int32_t qhmath_hvx_quantize_af( sline3p = *iptr++; sline4p = *iptr++; - float es = 0.5f; + float es = 0.5f; low_level_vec = Q6_V_vsplat_R(float_to_bits(low_level)); high_level_vec = Q6_V_vsplat_R(float_to_bits(high_level)); scale_vec = Q6_V_vsplat_R(float_to_bits(scale)); @@ -696,7 +929,6 @@ int32_t qhmath_hvx_quantize_af( HVX_Vector uintconvert = Q6_V_vsplat_R(0x80808080); - // HVX_Vector expmask = Q6_V_vsplat_R(FLOAT_EXPONENT_MASK); // HVX_Vector expbias = Q6_V_vsplat_R(FLOAT_EXPONENT_BIAS); // HVX_Vector manmask = Q6_V_vsplat_R(FLOAT_MANTISA_MASK); @@ -705,22 +937,19 @@ int32_t qhmath_hvx_quantize_af( // HVX_Vector negone = Q6_V_vsplat_R(FLOAT_NEG_1); // HVX_Vector zero = Q6_V_vzero(); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; j+=4) - { + for (int32_t j = 0; j < block; j += 4) { sline1c = *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sout1 = Q6_Vqf32_vmpy_VsfVsf(sline1,scale_vec); + sout1 = Q6_Vqf32_vmpy_VsfVsf(sline1, scale_vec); sout1 = Q6_Vqf32_vadd_Vqf32Vqf32(sout1, es_vec); sout1 = Q6_Vsf_equals_Vqf32(sout1); sout1 = Q6_Vsf_vmin_VsfVsf(sout1, high_level_vec); @@ -767,9 +996,9 @@ int32_t qhmath_hvx_quantize_af( // sout1 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout1, Q6_V_vzero()), 0); sline2c = *iptr++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) input); + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input); - sout2 = Q6_Vqf32_vmpy_VsfVsf(sline2,scale_vec); + sout2 = Q6_Vqf32_vmpy_VsfVsf(sline2, scale_vec); sout2 = Q6_Vqf32_vadd_Vqf32Vqf32(sout2, es_vec); sout2 = Q6_Vsf_equals_Vqf32(sout2); sout2 = Q6_Vsf_vmin_VsfVsf(sout2, high_level_vec); @@ -816,9 +1045,9 @@ int32_t qhmath_hvx_quantize_af( // sout2 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout2, Q6_V_vzero()), 0); sline3c = *iptr++; - sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t) input); + sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t)input); - sout3 = Q6_Vqf32_vmpy_VsfVsf(sline3,scale_vec); + sout3 = Q6_Vqf32_vmpy_VsfVsf(sline3, scale_vec); sout3 = Q6_Vqf32_vadd_Vqf32Vqf32(sout3, es_vec); sout3 = Q6_Vsf_equals_Vqf32(sout3); sout3 = Q6_Vsf_vmin_VsfVsf(sout3, high_level_vec); @@ -860,22 +1089,20 @@ int32_t qhmath_hvx_quantize_af( // sout3 = Q6_V_vmux_QVV(expgte23, sout3, tsout1); // } - sout3 = Q6_Vw_equals_Vsf(sout3); sout3 = Q6_Vw_vasr_VwR(sout3, ROUND_2_SCALE); // sout3 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout3, Q6_V_vzero()), 0); sline4c = *iptr++; - sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t) input); + sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t)input); - sout4 = Q6_Vqf32_vmpy_VsfVsf(sline4,scale_vec); + sout4 = Q6_Vqf32_vmpy_VsfVsf(sline4, scale_vec); sout4 = Q6_Vqf32_vadd_Vqf32Vqf32(sout4, es_vec); sout4 = Q6_Vsf_equals_Vqf32(sout4); sout4 = Q6_Vsf_vmin_VsfVsf(sout4, high_level_vec); sout4 = Q6_Vsf_vmax_VsfVsf(sout4, low_level_vec); sout4 = Q6_Vqf32_vmpy_VsfVsf(sout4, round_scale_vec); sout4 = Q6_Vsf_equals_Vqf32(sout4); - // { // HVX_Vector exp = Q6_Vw_vasr_VwR(sout4, FLOAT_MANTISA); @@ -915,7 +1142,6 @@ int32_t qhmath_hvx_quantize_af( sout4 = Q6_Vw_vasr_VwR(sout4, ROUND_2_SCALE); // sout4 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout4, Q6_V_vzero()), 0); - HVX_Vector reql_h = Q6_Vh_vpack_VwVw_sat(sout2, sout1); HVX_Vector reqh_h = Q6_Vh_vpack_VwVw_sat(sout4, sout3); HVX_Vector req_b = Q6_Vb_vpack_VhVh_sat(reqh_h, reql_h); @@ -932,21 +1158,150 @@ int32_t qhmath_hvx_quantize_af( return 0; } +#define INT16_ROUND_2_SCALE 15 +#define INT16_ROUND_SCALSE ((1 << INT16_ROUND_2_SCALE) * 1.0f) + +int32_t qhmath_hvx_quantize_ui16_af( + float *restrict input, + float *restrict output, + uint32_t size, + float low_level, + float high_level, + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_UVector *optr = (HVX_UVector *)output; + + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector sline2p, sline2c, sline2; + HVX_Vector sline3p, sline3c, sline3; + HVX_Vector sline4p, sline4c, sline4; + + HVX_Vector sout1, sout2, sout3, sout4; + HVX_Vector low_level_vec, high_level_vec, scale_vec, es_vec, round_scale_vec; + int32_t block, l2fetch_block; + // int32_t leftover = size & 31; + int32_t vectors_in_rounddown = size / 32; + // int32_t leftover_size = leftover * sizeof(float); + + sline1p = *iptr++; + sline2p = *iptr++; + sline3p = *iptr++; + sline4p = *iptr++; + + float es = 0.5f; + low_level_vec = Q6_V_vsplat_R(float_to_bits(low_level)); + high_level_vec = Q6_V_vsplat_R(float_to_bits(high_level)); + scale_vec = Q6_V_vsplat_R(float_to_bits(scale)); + es_vec = Q6_V_vsplat_R(float_to_bits(es)); + round_scale_vec = Q6_V_vsplat_R(float_to_bits(INT16_ROUND_SCALSE)); + + HVX_Vector zero_v_sf = Q6_V_vzero(); + es_vec = Q6_Vqf32_vadd_VsfVsf(es_vec, zero_v_sf); + + HVX_Vector uintconvert = Q6_V_vsplat_R(0x80008000); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; j += 4) { + sline1c = *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + sout1 = Q6_Vqf32_vmpy_VsfVsf(sline1, scale_vec); + sout1 = Q6_Vqf32_vadd_Vqf32Vqf32(sout1, es_vec); + sout1 = Q6_Vsf_equals_Vqf32(sout1); + sout1 = Q6_Vsf_vmin_VsfVsf(sout1, high_level_vec); + sout1 = Q6_Vsf_vmax_VsfVsf(sout1, low_level_vec); + sout1 = Q6_Vqf32_vmpy_VsfVsf(sout1, round_scale_vec); + sout1 = Q6_Vsf_equals_Vqf32(sout1); + + sout1 = Q6_Vw_equals_Vsf(sout1); + sout1 = Q6_Vw_vasr_VwR(sout1, INT16_ROUND_2_SCALE); + // sout1 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout1, Q6_V_vzero()), 0); + + sline2c = *iptr++; + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input); + + sout2 = Q6_Vqf32_vmpy_VsfVsf(sline2, scale_vec); + sout2 = Q6_Vqf32_vadd_Vqf32Vqf32(sout2, es_vec); + sout2 = Q6_Vsf_equals_Vqf32(sout2); + sout2 = Q6_Vsf_vmin_VsfVsf(sout2, high_level_vec); + sout2 = Q6_Vsf_vmax_VsfVsf(sout2, low_level_vec); + sout2 = Q6_Vqf32_vmpy_VsfVsf(sout2, round_scale_vec); + sout2 = Q6_Vsf_equals_Vqf32(sout2); + + sout2 = Q6_Vw_equals_Vsf(sout2); + sout2 = Q6_Vw_vasr_VwR(sout2, INT16_ROUND_2_SCALE); + // sout2 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout2, Q6_V_vzero()), 0); + + sline3c = *iptr++; + sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t)input); + + sout3 = Q6_Vqf32_vmpy_VsfVsf(sline3, scale_vec); + sout3 = Q6_Vqf32_vadd_Vqf32Vqf32(sout3, es_vec); + sout3 = Q6_Vsf_equals_Vqf32(sout3); + sout3 = Q6_Vsf_vmin_VsfVsf(sout3, high_level_vec); + sout3 = Q6_Vsf_vmax_VsfVsf(sout3, low_level_vec); + sout3 = Q6_Vqf32_vmpy_VsfVsf(sout3, round_scale_vec); + sout3 = Q6_Vsf_equals_Vqf32(sout3); + + sout3 = Q6_Vw_equals_Vsf(sout3); + sout3 = Q6_Vw_vasr_VwR(sout3, INT16_ROUND_2_SCALE); + // sout3 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout3, Q6_V_vzero()), 0); + + sline4c = *iptr++; + sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t)input); + + sout4 = Q6_Vqf32_vmpy_VsfVsf(sline4, scale_vec); + sout4 = Q6_Vqf32_vadd_Vqf32Vqf32(sout4, es_vec); + sout4 = Q6_Vsf_equals_Vqf32(sout4); + sout4 = Q6_Vsf_vmin_VsfVsf(sout4, high_level_vec); + sout4 = Q6_Vsf_vmax_VsfVsf(sout4, low_level_vec); + sout4 = Q6_Vqf32_vmpy_VsfVsf(sout4, round_scale_vec); + sout4 = Q6_Vsf_equals_Vqf32(sout4); + + sout4 = Q6_Vw_equals_Vsf(sout4); + sout4 = Q6_Vw_vasr_VwR(sout4, INT16_ROUND_2_SCALE); + // sout4 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout4, Q6_V_vzero()), 0); + + HVX_Vector reql_h = Q6_Vh_vpack_VwVw_sat(sout2, sout1); + HVX_Vector reqh_h = Q6_Vh_vpack_VwVw_sat(sout4, sout3); + + *optr++ = Q6_Vh_vadd_VhVh(reql_h, uintconvert); + *optr++ = Q6_Vh_vadd_VhVh(reqh_h, uintconvert); + + sline1p = sline1c; + sline2p = sline2c; + sline3p = sline3c; + sline4p = sline4c; + } + } + + return 0; +} + int32_t qhmath_hvx_quantize_af_out_int8( float *restrict input, float *restrict output, uint32_t size, float low_level, float high_level, - float scale) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } - HVX_Vector *iptr = (HVX_Vector *) input; - HVX_UVector *optr = (HVX_UVector *) output; + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_UVector *optr = (HVX_UVector *)output; HVX_Vector sline1p, sline1c, sline1; HVX_Vector sline2p, sline2c, sline2; @@ -965,7 +1320,7 @@ int32_t qhmath_hvx_quantize_af_out_int8( sline3p = *iptr++; sline4p = *iptr++; - float es = 0.5f; + float es = 0.5f; low_level_vec = Q6_V_vsplat_R(float_to_bits(low_level)); high_level_vec = Q6_V_vsplat_R(float_to_bits(high_level)); scale_vec = Q6_V_vsplat_R(float_to_bits(scale)); @@ -974,7 +1329,6 @@ int32_t qhmath_hvx_quantize_af_out_int8( HVX_Vector zero_v_sf = Q6_V_vzero(); es_vec = Q6_Vqf32_vadd_VsfVsf(es_vec, zero_v_sf); - HVX_Vector expmask = Q6_V_vsplat_R(FLOAT_EXPONENT_MASK); HVX_Vector expbias = Q6_V_vsplat_R(FLOAT_EXPONENT_BIAS); HVX_Vector manmask = Q6_V_vsplat_R(FLOAT_MANTISA_MASK); @@ -983,22 +1337,19 @@ int32_t qhmath_hvx_quantize_af_out_int8( HVX_Vector negone = Q6_V_vsplat_R(FLOAT_NEG_1); HVX_Vector zero = Q6_V_vzero(); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; j+=4) - { + for (int32_t j = 0; j < block; j += 4) { sline1c = *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sout1 = Q6_Vqf32_vmpy_VsfVsf(sline1,scale_vec); + sout1 = Q6_Vqf32_vmpy_VsfVsf(sline1, scale_vec); sout1 = Q6_Vqf32_vadd_Vqf32Vqf32(sout1, es_vec); sout1 = Q6_Vsf_equals_Vqf32(sout1); sout1 = Q6_Vsf_vmin_VsfVsf(sout1, high_level_vec); @@ -1042,9 +1393,9 @@ int32_t qhmath_hvx_quantize_af_out_int8( // sout1 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout1, Q6_V_vzero()), 0); sline2c = *iptr++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) input); + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input); - sout2 = Q6_Vqf32_vmpy_VsfVsf(sline2,scale_vec); + sout2 = Q6_Vqf32_vmpy_VsfVsf(sline2, scale_vec); sout2 = Q6_Vqf32_vadd_Vqf32Vqf32(sout2, es_vec); sout2 = Q6_Vsf_equals_Vqf32(sout2); sout2 = Q6_Vsf_vmin_VsfVsf(sout2, high_level_vec); @@ -1088,9 +1439,9 @@ int32_t qhmath_hvx_quantize_af_out_int8( // sout2 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout2, Q6_V_vzero()), 0); sline3c = *iptr++; - sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t) input); + sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t)input); - sout3 = Q6_Vqf32_vmpy_VsfVsf(sline3,scale_vec); + sout3 = Q6_Vqf32_vmpy_VsfVsf(sline3, scale_vec); sout3 = Q6_Vqf32_vadd_Vqf32Vqf32(sout3, es_vec); sout3 = Q6_Vsf_equals_Vqf32(sout3); sout3 = Q6_Vsf_vmin_VsfVsf(sout3, high_level_vec); @@ -1130,14 +1481,13 @@ int32_t qhmath_hvx_quantize_af_out_int8( sout3 = Q6_V_vmux_QVV(expgte23, sout3, tsout1); } - sout3 = Q6_Vw_equals_Vsf(sout3); // sout3 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout3, Q6_V_vzero()), 0); sline4c = *iptr++; - sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t) input); + sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t)input); - sout4 = Q6_Vqf32_vmpy_VsfVsf(sline4,scale_vec); + sout4 = Q6_Vqf32_vmpy_VsfVsf(sline4, scale_vec); sout4 = Q6_Vqf32_vadd_Vqf32Vqf32(sout4, es_vec); sout4 = Q6_Vsf_equals_Vqf32(sout4); sout4 = Q6_Vsf_vmin_VsfVsf(sout4, high_level_vec); @@ -1180,7 +1530,6 @@ int32_t qhmath_hvx_quantize_af_out_int8( sout4 = Q6_Vw_equals_Vsf(sout4); // sout4 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout4, Q6_V_vzero()), 0); - HVX_Vector reql_h = Q6_Vh_vpack_VwVw_sat(sout2, sout1); HVX_Vector reqh_h = Q6_Vh_vpack_VwVw_sat(sout4, sout3); HVX_Vector req_b = Q6_Vb_vpack_VhVh_sat(reqh_h, reql_h); @@ -1197,145 +1546,228 @@ int32_t qhmath_hvx_quantize_af_out_int8( return 0; } - -template +template GraphStatus llamaquantizeImpl(TensorType1 &out_0, const TensorType1 &in_0, - const PlainFloatTensor& scale) + const PlainFloatTensor &scale) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - - // HVX Method -- FP32 Version - out_0.set_dims(in_0); - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - float scale_ = scale(0,0,0,0); - - scale_ = 1.0f/scale_; - - size_t size = b_in*h_in*w_in*d_in; - DType dtype = in_0.get_dtype(); - - if (dtype == DType::Float16 && out_0.get_dtype() == DType::QUInt8) { - // NHWC - auto in_ptr = (__fp16*)in_0.raw_data_const(); - auto out_ptr = (__fp16*)out_0.raw_data(); - - qhmath_hvx_quantize_ahf(in_ptr, out_ptr, size, -128.0f, 127.0f, scale_); - - } - if (dtype == DType::Float32 && out_0.get_dtype() == DType::QUInt8) { - - // NHWC - auto in_ptr = (float*)in_0.raw_data_const(); - auto out_ptr = (float*)out_0.raw_data(); - qhmath_hvx_quantize_af(in_ptr, out_ptr, size, -128.0f, 127.0f, scale_); - - } - - if (dtype == DType::Float16 && out_0.get_dtype() == DType::QInt8) { - // NHWC - auto in_ptr = (__fp16*)in_0.raw_data_const(); - auto out_ptr = (__fp16*)out_0.raw_data(); - - qhmath_hvx_quantize_ahf_int8(in_ptr, out_ptr, size, -128.0f, 127.0f, scale_); - - } - - if (dtype == DType::Float32 && out_0.get_dtype() == DType::QInt8) { - - // NHWC - auto in_ptr = (float*)in_0.raw_data_const(); - auto out_ptr = (float*)out_0.raw_data(); - qhmath_hvx_quantize_af_out_int8(in_ptr, out_ptr, size, -128.0f, 127.0f, scale_); - - - } - -// auto out_ptr = (int8_t*)out_0.raw_data(); - -// out_ptr[0] = (int)dtype; -// out_ptr[1] = (int)out_0.get_dtype(); - - return GraphStatus::Success; + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + // HVX Method -- FP32 Version + out_0.set_dims(in_0); + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + float scale_ = scale(0, 0, 0, 0); + + scale_ = 1.0f / scale_; + + size_t size = b_in * h_in * w_in * d_in; + DType dtype = in_0.get_dtype(); + + if (dtype == DType::Float16 && out_0.get_dtype() == DType::QUInt8) { + // NHWC + auto in_ptr = (__fp16 *)in_0.raw_data_const(); + auto out_ptr = (__fp16 *)out_0.raw_data(); + + qhmath_hvx_quantize_ahf(in_ptr, out_ptr, size, -128.0f, 127.0f, scale_); + } + if (dtype == DType::Float32 && out_0.get_dtype() == DType::QUInt8) { + // NHWC + auto in_ptr = (float *)in_0.raw_data_const(); + auto out_ptr = (float *)out_0.raw_data(); + qhmath_hvx_quantize_af(in_ptr, out_ptr, size, -128.0f, 127.0f, scale_); + } + + if (dtype == DType::Float16 && out_0.get_dtype() == DType::QUInt16) { + // NHWC + auto in_ptr = (__fp16 *)in_0.raw_data_const(); + auto out_ptr = (__fp16 *)out_0.raw_data(); + + qhmath_hvx_quantize_ui16_ahf(in_ptr, out_ptr, size, -32768.0f, 32767.0f, scale_); + } + if (dtype == DType::Float32 && out_0.get_dtype() == DType::QUInt16) { + // NHWC + auto in_ptr = (float *)in_0.raw_data_const(); + auto out_ptr = (float *)out_0.raw_data(); + qhmath_hvx_quantize_ui16_af(in_ptr, out_ptr, size, -32768.0f, 32767.0f, scale_); + } + + if (dtype == DType::Float16 && out_0.get_dtype() == DType::QInt8) { + // NHWC + auto in_ptr = (__fp16 *)in_0.raw_data_const(); + auto out_ptr = (__fp16 *)out_0.raw_data(); + + qhmath_hvx_quantize_ahf_int8(in_ptr, out_ptr, size, -128.0f, 127.0f, scale_); + } + + if (dtype == DType::Float32 && out_0.get_dtype() == DType::QInt8) { + // NHWC + auto in_ptr = (float *)in_0.raw_data_const(); + auto out_ptr = (float *)out_0.raw_data(); + qhmath_hvx_quantize_af_out_int8(in_ptr, out_ptr, size, -128.0f, 127.0f, scale_); + } + + // auto out_ptr = (int8_t*)out_0.raw_data(); + + // out_ptr[0] = (int)dtype; + // out_ptr[1] = (int)out_0.get_dtype(); + + return GraphStatus::Success; } #else extern float Round(float num); -template +template GraphStatus llamaquantizeImpl(TensorType1 &out_0, const TensorType1 &in_0, - const PlainFloatTensor& scale) + const PlainFloatTensor &scale) { out_0.set_dims(in_0); - float scale_ = scale(0,0,0,0); - - auto out_ptr = (int8_t*)out_0.raw_data(); + float scale_ = scale(0, 0, 0, 0); auto [b_in, h_in, w_in, d_in] = in_0.dims(); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - - for (Idx w = 0; w < w_in; w++) { - - for (Idx d = 0; d < d_in; d++) { - - float inval = in_0(b, h, w, d); - - // float result = Round(inval / scale_); - - - long v = lroundf(inval / scale_); - - if (v > 127) - v = 127; - - if (v < -128) - v = -128; - - if (out_0.get_dtype() == DType::QUInt8) - v += 128; - - *out_ptr++ = static_cast(v); + + DType dtype = in_0.get_dtype(); + if (dtype == DType::Float32 && out_0.get_dtype() == DType::QUInt8) { + auto out_ptr = (int8_t *)out_0.raw_data(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + float inval = in_0(b, h, w, d); + + // float result = Round(inval / scale_); + + long v = lroundf(inval / scale_); + + if (v > 127) + v = 127; + + if (v < -128) + v = -128; + + v += 128; + + *out_ptr++ = static_cast(v); + } + } + } + } + } + + if (dtype == DType::Float32 && out_0.get_dtype() == DType::QUInt16) { + auto out_ptr = (int16_t *)out_0.raw_data(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + float inval = in_0(b, h, w, d); + + // float result = Round(inval / scale_); + + long v = lroundf(inval / scale_); + + if (v > 32767) + v = 32767; + + if (v < -32768) + v = -32768; + + v += 32768; + + *out_ptr++ = static_cast(v); + } + } } + } + } + + if (dtype == DType::Float32 && out_0.get_dtype() == DType::QUInt8) { + auto out_ptr = (int8_t *)out_0.raw_data(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + float inval = in_0(b, h, w, d); + // float result = Round(inval / scale_); + + long v = lroundf(inval / scale_); + + if (v > 127) + v = 127; + + if (v < -128) + v = -128; + + v += 128; + + *out_ptr++ = static_cast(v); + } + } } } } - return GraphStatus::Success; -} -#endif + if (dtype == DType::Float16 && out_0.get_dtype() == DType::QUInt16) { + auto out_ptr = (int16_t *)out_0.raw_data(); -__attribute__((unused)) static float llamaquantizeCostFunc(const Op *op) -{ - /* - * add code here - * */ + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + float inval = in_0(b, h, w, d); - float cost = 0.0; // add cost computation here - return cost; -} + // float result = Round(inval / scale_); + + long v = lroundf(inval / scale_); + if (v > 32767) + v = 32767; + if (v < -32768) + v = -32768; + v += 32768; + *out_ptr++ = static_cast(v); + } + } + } + } + } + + return GraphStatus::Success; +} +#endif + +__attribute__((unused)) static float llamaquantizeCostFunc(const Op *op) { + /* + * add code here + * */ + + float cost = 0.0; // add cost computation here + return cost; +} /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAReLU.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAReLU.cpp similarity index 77% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAReLU.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAReLU.cpp index 1ef2c0c93..c56bb8719 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAReLU.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMAReLU.cpp @@ -9,14 +9,12 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_LLaMAReLU); - // op execute function declarations -template -GraphStatus llamareluImpl(TensorType& out_0, - const TensorType& in_0); +template +GraphStatus llamareluImpl(TensorType &out_0, + const TensorType &in_0); // forward declaration of sample cost function static float llamareluCostFunc(const Op *op); @@ -61,11 +59,11 @@ DEF_PACKAGE_OP((llamareluImpl), "LLaMAReLU") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -79,7 +77,6 @@ DEF_PACKAGE_OP((llamareluImpl), "LLaMAReLU") * Qnn_addNode */ - /* execute functions for ops */ // #ifndef REFERENCE_OP @@ -94,7 +91,6 @@ DEF_PACKAGE_OP((llamareluImpl), "LLaMAReLU") // #define ONE 0x3F800000 // #define M_ONE 0xAF800000 - // int32_t hvx_relu_au8(uint8_t *restrict input, uint8_t *restrict output, uint32_t size) // { // HVX_Vector *input_v_ptr; @@ -107,7 +103,6 @@ DEF_PACKAGE_OP((llamareluImpl), "LLaMAReLU") // int32_t vectors_in_rounddown = size / 128; // int32_t leftover_size = leftover * sizeof(uint8_t); - // /* Check input arguments. Return error status if some argument has invalid value */ // if ((input == 0) || (output == 0) || (size == 0)) // { @@ -200,7 +195,6 @@ DEF_PACKAGE_OP((llamareluImpl), "LLaMAReLU") // * // * Please check in SDK documentation for more information. // */ - // out_0.set_dims(in_0); @@ -214,91 +208,80 @@ DEF_PACKAGE_OP((llamareluImpl), "LLaMAReLU") // return GraphStatus::Success; // } // #else -template -GraphStatus llamareluImpl(TensorType& out_0, - const TensorType& in_0) +template +GraphStatus llamareluImpl(TensorType &out_0, + const TensorType &in_0) { - out_0.set_dims(in_0); + out_0.set_dims(in_0); // NHWC - if (in_0.get_dtype() == DType::QUInt8) { - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // SiLU - for (Idx d = 0; d < d_in; d++) { - uint8_t inval = in_0(b, h, w, d); - if (inval < 0) - inval = 0; - - out_0(b, h, w, d) = inval; - - } + if (in_0.get_dtype() == DType::QUInt8) { + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // SiLU + for (Idx d = 0; d < d_in; d++) { + uint8_t inval = in_0(b, h, w, d); + if (inval < 0) + inval = 0; + + out_0(b, h, w, d) = inval; + } + } + } } - } - } - } else if (in_0.get_dtype() == DType::Float16) { - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - auto out_ptr = (__fp16*)out_0.raw_data(); - auto in_ptr = (__fp16*)in_0.raw_data_const(); - - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - - for (Idx d = 0; d < d_in; d++) { - __fp16 inval = *in_ptr++; - if (inval < 0) - inval = 0; - - *out_ptr++ = inval; - - } + } else if (in_0.get_dtype() == DType::Float16) { + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + auto out_ptr = (__fp16 *)out_0.raw_data(); + auto in_ptr = (__fp16 *)in_0.raw_data_const(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + __fp16 inval = *in_ptr++; + if (inval < 0) + inval = 0; + + *out_ptr++ = inval; + } + } + } } - } - } - } else if(in_0.get_dtype() == DType::Float32) { - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - for (Idx d = 0; d < d_in; d++) { - float inval = in_0(b, h, w, d); - if (inval < 0) - inval = 0; - - out_0(b, h, w, d) = inval; - - } + } else if (in_0.get_dtype() == DType::Float32) { + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx d = 0; d < d_in; d++) { + float inval = in_0(b, h, w, d); + if (inval < 0) + inval = 0; + + out_0(b, h, w, d) = inval; + } + } + } } - } } - } - - return GraphStatus::Success; + return GraphStatus::Success; } // #endif +__attribute__((unused)) static float llamareluCostFunc(const Op *op) { + /* + * add code here + * */ -__attribute__((unused)) static float llamareluCostFunc(const Op *op) -{ - /* - * add code here - * */ - - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMASuperSiLU.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMASuperSiLU.cpp new file mode 100755 index 000000000..3976f60ba --- /dev/null +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMASuperSiLU.cpp @@ -0,0 +1,1262 @@ +//============================================================================== +// Auto Generated Code for LLaMAPackage +//============================================================================== + +#include "HTP/core/constraints.h" +#include "HTP/core/op_package_feature_support.h" +#include "HTP/core/op_register_ext.h" +#include "HTP/core/optimize.h" +#include "QnnOpPackage.h" +#include "HTP/core/simple_reg.h" + +BEGIN_PKG_OP_DEFINITION(PKG_LLaMASuperSiLU); + +// op execute function declarations +template +GraphStatus llamasupersiluImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1, + const PlainFloatTensor &a_scale, + const PlainFloatTensor &b_scale, + const PlainFloatTensor &o_scale); + +// forward declaration of sample cost function +static float llamasupersiluCostFunc(const Op *op); + +/* + * method 1 for defining op, using default cost value (i.e. GLACIAL) and default flag (Flags::RESOURCE_HVX) + * syntax: DEF_PACKAGE_OP(F,OP) + * e.g. DEF_PACKAGE_OP((llamasupersiluImpl), "LLaMASuperSiLU") + */ +DEF_PACKAGE_OP((llamasupersiluImpl), "LLaMASuperSiLU") + +/* + * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) + * and provided flags + * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) + * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, + * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) + * e.g. DEF_PACKAGE_OP_AND_COST_AND_FLAGS((llamasupersiluImpl), "LLaMASuperSiLU", SNAIL) + */ + +/* + * method 3 for defining op with cost function pointer and provided flags + * cost function pointer type: typedef float (*cost_function) (const Op * op); + * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) + * e.g. DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((llamasupersiluImpl), + * "LLaMASuperSiLU", llamasupersiluCostFunc, Flags::RESOURCE_HVX) + */ + +/* + * optimization definitions + * need to be global in the package + * one definition per optimization + * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) + * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) + * HTP core provides some replacement functions for op package to use + * for more information about optimization rules, please refer to HTP core documentations + */ + +/* + * op parameter order definitions + * need to be global in the package + * one definition per op, and this is optional + * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) + * one or more parameters can be specified for each op + * order of parameters listed determines the order of parameters passed into op execution functions + * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode + * will be passed into op execution functions + * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted + * name will be abandoned + * if two or more op packages with the same package name will be registered, they cannot list + * conflicting parameter orders + * PARAM refers to parameter name as a string literal + * MANDATORY refers to whether this parameter is required to be provided at Qnn_addNode + * DEFAULT is used when MANDATORY is false + * if provided as Qnn_Param_t*, + * DEFAULT will be used for graph construction when this parameter is not provided at + * Qnn_addNode + * if provided as nullptr, + * graph construction will skip this parameter when this parameter is not provided at + * Qnn_addNode + */ +DEF_PACKAGE_PARAM_ORDER("LLaMASuperSiLU", + "a_scale", + true, + nullptr, + "b_scale", + true, + nullptr, + "o_scale", + true, + nullptr) + +/* execute functions for ops */ + +#ifndef REFERENCE_OP + +#include "qhmath_hvx.h" +#include "hvx_internal.h" +#include +#include + +#define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +#define L2FETCH_AHEAD (BLOCK_SIZE) + +#define FP16_MANTISA 10 +#define FP16_EXPONENT_MASK 0x1f +#define FP16_EXPONENT_BIAS 0xf +#define FP16_MANTISA_MASK 0x000003ff +#define FP16_SIGN 15 +#define FP16_NEG_1 0xbc00 +#define ROUND_2_SCALE 22 +#define ROUND_SCALSE ((1 << ROUND_2_SCALE) * 1.0f) + +static inline int32_t float_to_fp16s(float input) { + union { + int32_t i; + __fp16 f[2]; + } fp32 = {.f = {(__fp16)input, (__fp16)input}}; + return fp32.i; +} + +static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) { + union { + float f; + uint32_t i; + } fp32 = {.f = x}; + return fp32.i; +} + +static const float fp16_c0_coeffs[32] __attribute__((aligned(VLEN))) = + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.13239719960243818, + 0.2216255210749415, + 0.3447664743728659, + 0.48137452032585476, + 0.5716299228719798, + 0.5547323231605259, + 0.5046287748870234, + 0.4999985574626892, + 0.5000036514755082, + 0.49475652448004626, + 0.4441393352532763, + 0.428500379952032, + 0.5173297285470642, + 0.6541461039833616, + 0.7783931007462818, + 0.8678015179911097, +}; +static const float fp16_c1_coeffs[32] __attribute__((aligned(VLEN))) = + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.05928005756790343, + 0.11063222460270064, + 0.1932879057003057, + 0.30302440212086995, + 0.3922924462181049, + 0.36546332659415875, + 0.2644148210990377, + 0.24989020912329707, + 0.2498532691910313, + 0.2661055781198988, + 0.36728015359480604, + 0.39215270010450015, + 0.3041825601732039, + 0.1940762094668647, + 0.11061794856987572, + 0.059174800917353595, +}; +static const float fp16_c2_coeffs[32] __attribute__((aligned(VLEN))) = + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.010145494303219278, + 0.02123968384425681, + 0.04207468332514667, + 0.07519946712591977, + 0.10840620196267145, + 0.09270738184406795, + 0.015322371881818012, + -0.0009948273994921822, + 0.0011544907060402412, + -0.017040517565094934, + -0.09379878876657094, + -0.10835043868732394, + -0.07558705272699548, + -0.04228875316413285, + -0.021235740718738055, + -0.010124599879590107, +}; +static const float fp16_c3_coeffs[32] __attribute__((aligned(VLEN))) = + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0007841223015974933, + 0.001850453397354219, + 0.004187899308371771, + 0.008640952434084206, + 0.01414741414964877, + 0.010117749275618, + -0.01654848996354919, + -0.02395108399453624, + -0.024199111971064446, + -0.015783556879607072, + 0.010407672131558174, + 0.014137608186323335, + 0.008698510795258909, + 0.004213708431213342, + 0.0018499827774393985, + 0.0007822799742289481, +}; +static const float fp16_c4_coeffs[32] __attribute__((aligned(VLEN))) = + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 2.3031641204975905e-05, + 6.150442488966733e-05, + 0.00015997783736818624, + 0.00038491646239693526, + 0.0007283649599237781, + 0.00034439150914392054, + -0.003142246198646662, + -0.004120389580321761, + 0.004246050162553198, + 0.0030162727520777893, + -0.00037312974308425725, + -0.0007277242855014247, + -0.00038811687679772674, + -0.0001611434776868886, + -6.14837984586862e-05, + -2.297076123375133e-05, +}; + +int32_t hvx_supersilu_ahf( + uint8_t *restrict input, + uint8_t *restrict input2, + uint8_t *restrict output, + float a_scale, + float b_scale, + float o_scale, + uint32_t size) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_Vector *iptr2 = (HVX_Vector *)input2; + HVX_UVector *optr = (HVX_UVector *)output; + HVX_Vector sline1p, sline1c, sline1; + HVX_Vector sline2p, sline2c, sline2; + + int32_t block, l2fetch_block; + int32_t leftover = size & 128; + int32_t vectors_in_rounddown = size / 128; + // int32_t leftover_size = leftover * sizeof(__fp16); + + sline1p = *iptr++; + sline2p = *iptr2++; + + // dequantize + uint32_t convert = 0x00800080; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + + HVX_Vector a_scale_vec = Q6_V_vsplat_R(float_to_fp16s(a_scale)); + HVX_Vector b_scale_vec = Q6_V_vsplat_R(float_to_fp16s(b_scale)); + HVX_Vector zero_v_sf = Q6_V_vzero(); + + // silu + HVX_Vector input_min_v_hf; + HVX_Vector input_shifted_v_hf; + HVX_Vector input_scaled_v; + HVX_VectorPair input_vp_qf32; + // HVX_Vector input_v_qf16; + HVX_Vector mask_idx1_v, mask_idx2_v; + HVX_Vector const16_0_v_hf; + HVX_Vector zero_v_hf, one_v_hf; + HVX_Vector tmp_v; + HVX_Vector idx1_v, idx2_v; + HVX_Vector scale_v; + HVX_DV output_dv; + HVX_DV c0_coeff_dv; + HVX_VectorPair c0_coeff_vp; + HVX_Vector c0_coeff_v; + HVX_DV c1_coeff_dv; + HVX_VectorPair c1_coeff_vp; + HVX_Vector c1_coeff_v; + HVX_DV c2_coeff_dv; + HVX_VectorPair c2_coeff_vp; + HVX_Vector c2_coeff_v; + HVX_DV c3_coeff_dv; + HVX_VectorPair c3_coeff_vp; + HVX_Vector c3_coeff_v; + HVX_DV c4_coeff_dv; + HVX_VectorPair c4_coeff_vp; + HVX_Vector c4_coeff_v; + + scale_v = Q6_Vh_vsplat_R(0x3bfe); + + /* Vector of ones used as mpy neutral element in conversions from hf vector to qf32 vector pair */ + one_v_hf = Q6_Vh_vsplat_R(0x3c00); + + /* + * Vector of zeroes used as neutral element in hf to qf16 conversions. + * NOTE: Some of conversions (i.e conversion of scale factor and coefficients) + * can be avoided in real-time, but this is not done in order to don't + * sacrify code readibility in expense of insignificant performance improvement. + */ + zero_v_hf = Q6_V_vzero(); + + /* Mask for extracting only 4 bits of mantissa */ + mask_idx1_v = Q6_Vh_vsplat_R(0x000F); + + mask_idx2_v = Q6_V_vsplat_R(0x00001010); + + /* 16.0 in IEEE 16-bit floating-point representation */ + const16_0_v_hf = Q6_Vh_vsplat_R(0x4c00); + + /* + * Prepare vector of input_min values, that is used later in shifting input range. + * input_min is low boundary of specified input range. + */ + input_min_v_hf = Q6_Vh_vsplat_R(0xc800); + + /* Convert scale factor from hf to q16. Use the same vector for both formats */ + scale_v = Q6_Vqf16_vadd_VhfVhf(scale_v, zero_v_hf); + + /* Load coefficients */ + c0_coeff_v = *((HVX_Vector *)(fp16_c0_coeffs)); + c1_coeff_v = *((HVX_Vector *)(fp16_c1_coeffs)); + c2_coeff_v = *((HVX_Vector *)(fp16_c2_coeffs)); + c3_coeff_v = *((HVX_Vector *)(fp16_c3_coeffs)); + c4_coeff_v = *((HVX_Vector *)(fp16_c4_coeffs)); + + /* Convert coefficients from hf to qf32 format. Use the same vector for both representations */ + c0_coeff_v = Q6_Vqf32_vadd_VsfVsf(c0_coeff_v, zero_v_hf); + c1_coeff_v = Q6_Vqf32_vadd_VsfVsf(c1_coeff_v, zero_v_hf); + c2_coeff_v = Q6_Vqf32_vadd_VsfVsf(c2_coeff_v, zero_v_hf); + c3_coeff_v = Q6_Vqf32_vadd_VsfVsf(c3_coeff_v, zero_v_hf); + c4_coeff_v = Q6_Vqf32_vadd_VsfVsf(c4_coeff_v, zero_v_hf); + + /* Split 32-bit coefficients to lower and upper part in order to obtain them later with VLUT16. */ + c0_coeff_dv.VV = Q6_Wuw_vzxt_Vuh(c0_coeff_v); + c1_coeff_dv.VV = Q6_Wuw_vzxt_Vuh(c1_coeff_v); + c2_coeff_dv.VV = Q6_Wuw_vzxt_Vuh(c2_coeff_v); + c3_coeff_dv.VV = Q6_Wuw_vzxt_Vuh(c3_coeff_v); + c4_coeff_dv.VV = Q6_Wuw_vzxt_Vuh(c4_coeff_v); + + // quantize + HVX_Vector low_level_vec, high_level_vec, o_scale_vec, es_vec, round_scale_vec; + HVX_Vector uintconvert = Q6_V_vsplat_R(0x80808080); + HVX_Vector vmb = Q6_V_vsplat_R(0x40004000); + + float post_scale_flt = a_scale * b_scale * o_scale; + int scexp = flt_getexp(post_scale_flt); + int rsh = min_i32(-scexp, 7); // e.g. 0.11 -> 0.88, rsh = 3 + float rsh_fac = flt_power2(rsh); + + int adj_bias = roundf_i32(128 * rsh_fac); + adj_bias = Q6_R_combine_RlRl(adj_bias, adj_bias); + + HVX_Vector vadj = Q6_V_vsplat_R(adj_bias); + + float es = 0.5; + low_level_vec = Q6_V_vsplat_R(float_to_fp16s(-128.0f)); + high_level_vec = Q6_V_vsplat_R(float_to_fp16s(127.0f)); + o_scale_vec = Q6_V_vsplat_R(float_to_fp16s(post_scale_flt * rsh_fac * (1 << 15))); + // one_vec = Q6_V_vsplat_R(float_to_fp16s(1.0f)); + // o_scale_vec = Q6_Vqf16_vadd_VhfVhf(o_scale_vec, zero_v_hf); + es_vec = Q6_V_vsplat_R(float_to_fp16s(es)); + round_scale_vec = Q6_V_vsplat_R(float_to_bits(ROUND_SCALSE)); + + es_vec = Q6_Vqf16_vadd_VhfVhf(es_vec, zero_v_sf); + round_scale_vec = Q6_Vqf32_vadd_VsfVsf(round_scale_vec, zero_v_sf); + + HVX_Vector expmask = Q6_Vh_vsplat_R(FP16_EXPONENT_MASK); + HVX_Vector expbias = Q6_Vh_vsplat_R(FP16_EXPONENT_BIAS); + HVX_Vector manmask = Q6_Vh_vsplat_R(FP16_MANTISA_MASK); + HVX_Vector exp23 = Q6_Vh_vsplat_R(23 - 1); + HVX_Vector exp0 = Q6_Vh_vsplat_R(0 - 1); + HVX_Vector negone = Q6_Vh_vsplat_R(FP16_NEG_1); + HVX_Vector zero = Q6_V_vzero(); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + block = Q6_R_min_RR(i, BLOCK_SIZE); + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t j = 0; j < block; ++j) { + sline1c = *iptr++; + sline2c = *iptr2++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); + + HVX_Vector sline1_high; + HVX_Vector sline1_low; + // HVX_Vector sline2_high; + // HVX_Vector sline2_low; + + { + // dequantize sline1 qf16 + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + sline1_low = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), a_scale_vec); + sline1_low = Q6_Vhf_equals_Vqf16(sline1_low); + sline1_high = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), a_scale_vec); + sline1_high = Q6_Vhf_equals_Vqf16(sline1_high); + } + + // { + // // dequantize sline2 qf16 + // HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline2, zero_v_sf); + + // temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + // HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + // HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + // sline2_low = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), b_scale_vec); + // sline2_low = Q6_Vhf_equals_Vqf16(sline2_low); + // sline2_high = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), b_scale_vec); + // sline2_high = Q6_Vhf_equals_Vqf16(sline2_high); + // } + + { + // silu sline1_low + tmp_v = Q6_Vh_vdeal_Vh(sline1_low); + + /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ + input_shifted_v_hf = Q6_Vqf16_vsub_VhfVhf(tmp_v, input_min_v_hf); + + /* + * Scale shifted input range from [0, input_max - input_min] to [0,16.0) + * in order to get corresponding coefficient indexes + */ + input_scaled_v = Q6_Vqf16_vmpy_Vqf16Vqf16(input_shifted_v_hf, scale_v); + + /* + * VLUT 16 requires integer indexes. Shift scaled input range from [0,16.0) + * to [16.0,32.0) in order to convert float indexes to integer values. + * Float values, represented in IEEE 754, in range [16.0,32.0] have the + * same exponent, which means 4 MSB of mantissa carry information about + * integer index. + * Use the same input_scaled_v vector for hf and qf16 representation + */ + input_scaled_v = Q6_Vqf16_vadd_Vqf16Vhf(input_scaled_v, const16_0_v_hf); + + /* Convert back from qf16 to hf in order to extract integer index */ + tmp_v = Q6_Vhf_equals_Vqf16(input_scaled_v); + + /* Only 4 MSB bits of mantissa represent segment index */ + idx1_v = Q6_Vuh_vlsr_VuhR(tmp_v, 6); + + /* Ensure only 4 MSB bits of mantissa are used as indexes */ + idx1_v = Q6_V_vand_VV(idx1_v, mask_idx1_v); + + idx1_v = Q6_Vb_vshuff_Vb(idx1_v); + idx1_v = Q6_V_vor_VV(idx1_v, mask_idx2_v); + idx2_v = Q6_Vw_vasl_VwR(idx1_v, 16); + + /* Obtain the polynomial coefficients from lookup table */ + c0_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c0_coeff_dv.VV), 1); + c0_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c0_coeff_vp, idx2_v, Q6_V_hi_W(c0_coeff_dv.VV), 1); + c1_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c1_coeff_dv.VV), 1); + c1_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c1_coeff_vp, idx2_v, Q6_V_hi_W(c1_coeff_dv.VV), 1); + c2_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c2_coeff_dv.VV), 1); + c2_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c2_coeff_vp, idx2_v, Q6_V_hi_W(c2_coeff_dv.VV), 1); + c3_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c3_coeff_dv.VV), 1); + c3_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c3_coeff_vp, idx2_v, Q6_V_hi_W(c3_coeff_dv.VV), 1); + c4_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c4_coeff_dv.VV), 1); + c4_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c4_coeff_vp, idx2_v, Q6_V_hi_W(c4_coeff_dv.VV), 1); + + /* Convert input from hf vector to qf32 vector pair for Horner's method*/ + input_vp_qf32 = Q6_Wqf32_vmpy_VhfVhf(sline1_low, one_v_hf); + + /* Perform evaluation of polynomial using Horner's method */ + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(c4_coeff_vp), Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c3_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c2_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c1_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c0_coeff_vp)); + + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(c4_coeff_vp), Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c3_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c2_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c1_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c0_coeff_vp)); + + // x * sigmod + // output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(input_vp_qf32), output_dv.V.lo); + // output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(input_vp_qf32), output_dv.V.hi); + + sline1_low = Q6_Vhf_equals_Wqf32(output_dv.VV); + } + + { + // silu sline1_high + tmp_v = Q6_Vh_vdeal_Vh(sline1_high); + + /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ + input_shifted_v_hf = Q6_Vqf16_vsub_VhfVhf(tmp_v, input_min_v_hf); + + /* + * Scale shifted input range from [0, input_max - input_min] to [0,16.0) + * in order to get corresponding coefficient indexes + */ + input_scaled_v = Q6_Vqf16_vmpy_Vqf16Vqf16(input_shifted_v_hf, scale_v); + + /* + * VLUT 16 requires integer indexes. Shift scaled input range from [0,16.0) + * to [16.0,32.0) in order to convert float indexes to integer values. + * Float values, represented in IEEE 754, in range [16.0,32.0] have the + * same exponent, which means 4 MSB of mantissa carry information about + * integer index. + * Use the same input_scaled_v vector for hf and qf16 representation + */ + input_scaled_v = Q6_Vqf16_vadd_Vqf16Vhf(input_scaled_v, const16_0_v_hf); + + /* Convert back from qf16 to hf in order to extract integer index */ + tmp_v = Q6_Vhf_equals_Vqf16(input_scaled_v); + + /* Only 4 MSB bits of mantissa represent segment index */ + idx1_v = Q6_Vuh_vlsr_VuhR(tmp_v, 6); + + /* Ensure only 4 MSB bits of mantissa are used as indexes */ + idx1_v = Q6_V_vand_VV(idx1_v, mask_idx1_v); + + idx1_v = Q6_Vb_vshuff_Vb(idx1_v); + idx1_v = Q6_V_vor_VV(idx1_v, mask_idx2_v); + idx2_v = Q6_Vw_vasl_VwR(idx1_v, 16); + + /* Obtain the polynomial coefficients from lookup table */ + c0_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c0_coeff_dv.VV), 1); + c0_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c0_coeff_vp, idx2_v, Q6_V_hi_W(c0_coeff_dv.VV), 1); + c1_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c1_coeff_dv.VV), 1); + c1_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c1_coeff_vp, idx2_v, Q6_V_hi_W(c1_coeff_dv.VV), 1); + c2_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c2_coeff_dv.VV), 1); + c2_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c2_coeff_vp, idx2_v, Q6_V_hi_W(c2_coeff_dv.VV), 1); + c3_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c3_coeff_dv.VV), 1); + c3_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c3_coeff_vp, idx2_v, Q6_V_hi_W(c3_coeff_dv.VV), 1); + c4_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c4_coeff_dv.VV), 1); + c4_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c4_coeff_vp, idx2_v, Q6_V_hi_W(c4_coeff_dv.VV), 1); + + /* Convert input from hf vector to qf32 vector pair for Horner's method*/ + input_vp_qf32 = Q6_Wqf32_vmpy_VhfVhf(sline1_high, one_v_hf); + + /* Perform evaluation of polynomial using Horner's method */ + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(c4_coeff_vp), Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c3_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c2_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c1_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c0_coeff_vp)); + + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(c4_coeff_vp), Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c3_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c2_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c1_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c0_coeff_vp)); + + // x * sigmod + // output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(input_vp_qf32), output_dv.V.lo); + // output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(input_vp_qf32), output_dv.V.hi); + + sline1_high = Q6_Vhf_equals_Wqf32(output_dv.VV); + } + + HVX_Vector sline_high; + HVX_Vector sline_low; + + // { + // // mul + // sline_high = Q6_Vqf16_vmpy_VhfVhf(sline1_high, sline2_high); + // sline_low = Q6_Vqf16_vmpy_VhfVhf(sline1_low, sline2_low); + + // sline_high = Q6_Vhf_equals_Vqf16(sline_high); + // sline_low = Q6_Vhf_equals_Vqf16(sline_low); + // } + + HVX_VectorPair mul_output; + { + // uint8 mul + // (a-128)*(b-128) = a*b - 128 (a+b) + 128*128 + HVX_VectorPair prod1 = Q6_Wuh_vmpyacc_WuhVubVub(Q6_W_vcombine_VV(vmb, vmb), sline1, sline2); + HVX_VectorPair prod2 = Q6_Wh_vmpa_WubRub(Q6_W_vcombine_VV(sline2, sline1), 0x80808080); + mul_output = Q6_Wh_vsub_WhWh(prod1, prod2); + + mul_output = Q6_W_vshuff_VVR(Q6_V_hi_W(mul_output), Q6_V_lo_W(mul_output), -2); + + // sline_low = Q6_Vqf16_vmpy_VhfVhf(sline1_low, Q6_Vhf_equals_Vh(Q6_V_lo_W(mul_output))); + // sline_high = Q6_Vqf16_vmpy_VhfVhf(sline1_high, Q6_Vhf_equals_Vh(Q6_V_hi_W(mul_output))); + } + + { + // scaling quantize + sline_low = Q6_Vqf16_vmpy_VhfVhf(sline1_low, o_scale_vec); + sline_low = Q6_Vh_equals_Vhf(Q6_Vhf_equals_Vqf16(sline_low)); + sline_low = Q6_Vh_vadd_VhVh_sat(Q6_Vh_vmpy_VhVh_s1_rnd_sat(Q6_V_lo_W(mul_output), sline_low), vadj); + + sline_high = Q6_Vqf16_vmpy_VhfVhf(sline1_high, o_scale_vec); + sline_high = Q6_Vh_equals_Vhf(Q6_Vhf_equals_Vqf16(sline_high)); + sline_high = Q6_Vh_vadd_VhVh_sat(Q6_Vh_vmpy_VhVh_s1_rnd_sat(sline_high, Q6_V_hi_W(mul_output)), vadj); + + HVX_Vector sout = Q6_Vub_vasr_VhVhR_rnd_sat(sline_high, sline_low, rsh); + sout = Q6_Vb_vdeal_Vb(sout); + *optr++ = sout; + } + + // { + // // quantize + // HVX_Vector sout1 = Q6_Vqf16_vmpy_Vqf16Vhf(sline_low, o_scale_vec); + // sout1 = Q6_Vqf16_vadd_Vqf16Vqf16(sout1, es_vec); + // sout1 = Q6_Vhf_equals_Vqf16(sout1); + // sout1 = Q6_Vhf_vmin_VhfVhf(sout1, high_level_vec); + // sout1 = Q6_Vhf_vmax_VhfVhf(sout1, low_level_vec); + // HVX_VectorPair sout1_pair = Q6_Wqf32_vmpy_VhfVhf(sout1, one_vec); + // HVX_Vector sout1_low = Q6_Vsf_equals_Vqf32( Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sout1_pair), round_scale_vec)); + // HVX_Vector sout1_high = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sout1_pair), round_scale_vec)); + + // sout1_pair = Q6_W_vshuff_VVR(sout1_high, sout1_low, -4); + // sout1_low = Q6_V_lo_W(sout1_pair); + // sout1_high = Q6_V_hi_W(sout1_pair); + + // // { + // // HVX_Vector exp = Q6_Vh_vasr_VhR(sout1, FP16_MANTISA); + // // exp = Q6_V_vand_VV(exp, expmask); + // // exp = Q6_Vh_vsub_VhVh(exp, expbias); + + // // HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); + // // HVX_Vector manzero = Q6_V_vand_VV(sout1, man); + + // // HVX_Vector sign = Q6_Vh_vasr_VhR(sout1, FP16_SIGN); + // // HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); + + // // HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); + // // HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); + // // HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); + + // // HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout1, man); + // // man = Q6_V_vnot_V(man); + // // HVX_Vector exppos_signpos = Q6_V_vand_VV(sout1, man); + // // exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); + // // HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout1, 1); + // // HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); + + // // // exp >= 0 + // // HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); + // // tsout1 = Q6_V_vmux_QVV(maneqzero, sout1, tsout1); + + // // // exp < 0 (-1, 1) + // // HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout1, negone); + // // tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); + + // // tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); + // // sout1 = Q6_V_vmux_QVV(expgte23, sout1, tsout1); + // // } + + // sout1_low = Q6_Vw_equals_Vsf(sout1_low); + // sout1_low = Q6_Vw_vasr_VwR(sout1_low, ROUND_2_SCALE); + // sout1_high = Q6_Vw_equals_Vsf(sout1_high); + // sout1_high = Q6_Vw_vasr_VwR(sout1_high, ROUND_2_SCALE); + + // HVX_Vector sout2 = Q6_Vqf16_vmpy_Vqf16Vhf(sline_high, o_scale_vec); + // sout2 = Q6_Vqf16_vadd_Vqf16Vqf16(sout2, es_vec); + // sout2 = Q6_Vhf_equals_Vqf16(sout2); + // sout2 = Q6_Vhf_vmin_VhfVhf(sout2, high_level_vec); + // sout2 = Q6_Vhf_vmax_VhfVhf(sout2, low_level_vec); + // HVX_VectorPair sout2_pair = Q6_Wqf32_vmpy_VhfVhf(sout2, one_vec); + // HVX_Vector sout2_low = Q6_Vsf_equals_Vqf32( Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sout2_pair), round_scale_vec)); + // HVX_Vector sout2_high = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sout2_pair), round_scale_vec)); + + // sout2_pair = Q6_W_vshuff_VVR(sout2_high, sout2_low, -4); + // sout2_low = Q6_V_lo_W(sout2_pair); + // sout2_high = Q6_V_hi_W(sout2_pair); + + // // { + // // HVX_Vector exp = Q6_Vh_vasr_VhR(sout2, FP16_MANTISA); + // // exp = Q6_V_vand_VV(exp, expmask); + // // exp = Q6_Vh_vsub_VhVh(exp, expbias); + + // // HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); + // // HVX_Vector manzero = Q6_V_vand_VV(sout2, man); + + // // HVX_Vector sign = Q6_Vh_vasr_VhR(sout2, FP16_SIGN); + // // HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); + + // // HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); + // // HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); + // // HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); + + // // HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout2, man); + // // man = Q6_V_vnot_V(man); + // // HVX_Vector exppos_signpos = Q6_V_vand_VV(sout2, man); + // // exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); + // // HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout2, 1); + // // HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); + + // // // exp >= 0 + // // HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); + // // tsout1 = Q6_V_vmux_QVV(maneqzero, sout2, tsout1); + + // // // exp < 0 (-1, 1) + // // HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout2, negone); + // // tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); + + // // tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); + // // sout2 = Q6_V_vmux_QVV(expgte23, sout2, tsout1); + // // } + + // sout2_low = Q6_Vw_equals_Vsf(sout2_low); + // sout2_low = Q6_Vw_vasr_VwR(sout2_low, ROUND_2_SCALE); + // sout2_high = Q6_Vw_equals_Vsf(sout2_high); + // sout2_high = Q6_Vw_vasr_VwR(sout2_high, ROUND_2_SCALE); + + // HVX_Vector reql_h = Q6_Vh_vpack_VwVw_sat(sout1_high, sout1_low); + // HVX_Vector reqh_h = Q6_Vh_vpack_VwVw_sat(sout2_high, sout2_low); + // HVX_Vector req_b = Q6_Vb_vpack_VhVh_sat(reqh_h, reql_h); + + // *optr++ = Q6_Vb_vadd_VbVb(req_b, uintconvert); + // } + + sline1p = sline1c; + sline2p = sline2c; + } + } + + if (vectors_in_rounddown > 0) { + o_scale_vec = Q6_V_vsplat_R(float_to_fp16s(o_scale)); + + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); + + HVX_Vector sline1_high; + HVX_Vector sline1_low; + HVX_Vector sline2_high; + HVX_Vector sline2_low; + + { + // dequantize sline1 qf16 + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + sline1_low = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), a_scale_vec); + sline1_low = Q6_Vhf_equals_Vqf16(sline1_low); + sline1_high = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), a_scale_vec); + sline1_high = Q6_Vhf_equals_Vqf16(sline1_high); + } + + { + // dequantize sline2 qf16 + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline2, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + sline2_low = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), b_scale_vec); + sline2_low = Q6_Vhf_equals_Vqf16(sline2_low); + sline2_high = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), b_scale_vec); + sline2_high = Q6_Vhf_equals_Vqf16(sline2_high); + } + + { + // silu sline1_low + tmp_v = Q6_Vh_vdeal_Vh(sline1_low); + + /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ + input_shifted_v_hf = Q6_Vqf16_vsub_VhfVhf(tmp_v, input_min_v_hf); + + /* + * Scale shifted input range from [0, input_max - input_min] to [0,16.0) + * in order to get corresponding coefficient indexes + */ + input_scaled_v = Q6_Vqf16_vmpy_Vqf16Vqf16(input_shifted_v_hf, scale_v); + + /* + * VLUT 16 requires integer indexes. Shift scaled input range from [0,16.0) + * to [16.0,32.0) in order to convert float indexes to integer values. + * Float values, represented in IEEE 754, in range [16.0,32.0] have the + * same exponent, which means 4 MSB of mantissa carry information about + * integer index. + * Use the same input_scaled_v vector for hf and qf16 representation + */ + input_scaled_v = Q6_Vqf16_vadd_Vqf16Vhf(input_scaled_v, const16_0_v_hf); + + /* Convert back from qf16 to hf in order to extract integer index */ + tmp_v = Q6_Vhf_equals_Vqf16(input_scaled_v); + + /* Only 4 MSB bits of mantissa represent segment index */ + idx1_v = Q6_Vuh_vlsr_VuhR(tmp_v, 6); + + /* Ensure only 4 MSB bits of mantissa are used as indexes */ + idx1_v = Q6_V_vand_VV(idx1_v, mask_idx1_v); + + idx1_v = Q6_Vb_vshuff_Vb(idx1_v); + idx1_v = Q6_V_vor_VV(idx1_v, mask_idx2_v); + idx2_v = Q6_Vw_vasl_VwR(idx1_v, 16); + + /* Obtain the polynomial coefficients from lookup table */ + c0_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c0_coeff_dv.VV), 1); + c0_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c0_coeff_vp, idx2_v, Q6_V_hi_W(c0_coeff_dv.VV), 1); + c1_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c1_coeff_dv.VV), 1); + c1_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c1_coeff_vp, idx2_v, Q6_V_hi_W(c1_coeff_dv.VV), 1); + c2_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c2_coeff_dv.VV), 1); + c2_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c2_coeff_vp, idx2_v, Q6_V_hi_W(c2_coeff_dv.VV), 1); + c3_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c3_coeff_dv.VV), 1); + c3_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c3_coeff_vp, idx2_v, Q6_V_hi_W(c3_coeff_dv.VV), 1); + c4_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c4_coeff_dv.VV), 1); + c4_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c4_coeff_vp, idx2_v, Q6_V_hi_W(c4_coeff_dv.VV), 1); + + /* Convert input from hf vector to qf32 vector pair for Horner's method*/ + input_vp_qf32 = Q6_Wqf32_vmpy_VhfVhf(sline1_low, one_v_hf); + + /* Perform evaluation of polynomial using Horner's method */ + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(c4_coeff_vp), Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c3_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c2_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c1_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c0_coeff_vp)); + + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(c4_coeff_vp), Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c3_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c2_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c1_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c0_coeff_vp)); + + // x * sigmod + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(input_vp_qf32), output_dv.V.lo); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(input_vp_qf32), output_dv.V.hi); + + sline1_low = Q6_Vhf_equals_Wqf32(output_dv.VV); + } + + { + // silu sline1_high + tmp_v = Q6_Vh_vdeal_Vh(sline1_high); + + /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ + input_shifted_v_hf = Q6_Vqf16_vsub_VhfVhf(tmp_v, input_min_v_hf); + + /* + * Scale shifted input range from [0, input_max - input_min] to [0,16.0) + * in order to get corresponding coefficient indexes + */ + input_scaled_v = Q6_Vqf16_vmpy_Vqf16Vqf16(input_shifted_v_hf, scale_v); + + /* + * VLUT 16 requires integer indexes. Shift scaled input range from [0,16.0) + * to [16.0,32.0) in order to convert float indexes to integer values. + * Float values, represented in IEEE 754, in range [16.0,32.0] have the + * same exponent, which means 4 MSB of mantissa carry information about + * integer index. + * Use the same input_scaled_v vector for hf and qf16 representation + */ + input_scaled_v = Q6_Vqf16_vadd_Vqf16Vhf(input_scaled_v, const16_0_v_hf); + + /* Convert back from qf16 to hf in order to extract integer index */ + tmp_v = Q6_Vhf_equals_Vqf16(input_scaled_v); + + /* Only 4 MSB bits of mantissa represent segment index */ + idx1_v = Q6_Vuh_vlsr_VuhR(tmp_v, 6); + + /* Ensure only 4 MSB bits of mantissa are used as indexes */ + idx1_v = Q6_V_vand_VV(idx1_v, mask_idx1_v); + + idx1_v = Q6_Vb_vshuff_Vb(idx1_v); + idx1_v = Q6_V_vor_VV(idx1_v, mask_idx2_v); + idx2_v = Q6_Vw_vasl_VwR(idx1_v, 16); + + /* Obtain the polynomial coefficients from lookup table */ + c0_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c0_coeff_dv.VV), 1); + c0_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c0_coeff_vp, idx2_v, Q6_V_hi_W(c0_coeff_dv.VV), 1); + c1_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c1_coeff_dv.VV), 1); + c1_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c1_coeff_vp, idx2_v, Q6_V_hi_W(c1_coeff_dv.VV), 1); + c2_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c2_coeff_dv.VV), 1); + c2_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c2_coeff_vp, idx2_v, Q6_V_hi_W(c2_coeff_dv.VV), 1); + c3_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c3_coeff_dv.VV), 1); + c3_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c3_coeff_vp, idx2_v, Q6_V_hi_W(c3_coeff_dv.VV), 1); + c4_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c4_coeff_dv.VV), 1); + c4_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c4_coeff_vp, idx2_v, Q6_V_hi_W(c4_coeff_dv.VV), 1); + + /* Convert input from hf vector to qf32 vector pair for Horner's method*/ + input_vp_qf32 = Q6_Wqf32_vmpy_VhfVhf(sline1_high, one_v_hf); + + /* Perform evaluation of polynomial using Horner's method */ + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(c4_coeff_vp), Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c3_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c2_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c1_coeff_vp)); + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); + output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c0_coeff_vp)); + + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(c4_coeff_vp), Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c3_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c2_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c1_coeff_vp)); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); + output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c0_coeff_vp)); + + // x * sigmod + output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(input_vp_qf32), output_dv.V.lo); + output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(input_vp_qf32), output_dv.V.hi); + + sline1_high = Q6_Vhf_equals_Wqf32(output_dv.VV); + } + + HVX_Vector sline_high; + HVX_Vector sline_low; + + { + // mul + sline_high = Q6_Vqf16_vmpy_VhfVhf(sline1_high, sline2_high); + sline_low = Q6_Vqf16_vmpy_VhfVhf(sline1_low, sline2_low); + + sline_high = Q6_Vhf_equals_Vqf16(sline_high); + sline_low = Q6_Vhf_equals_Vqf16(sline_low); + } + + { + // quantize + HVX_Vector sout1 = Q6_Vqf16_vmpy_VhfVhf(sline_low, o_scale_vec); + sout1 = Q6_Vqf16_vadd_Vqf16Vqf16(sout1, es_vec); + sout1 = Q6_Vhf_equals_Vqf16(sout1); + sout1 = Q6_Vhf_vmin_VhfVhf(sout1, high_level_vec); + sout1 = Q6_Vhf_vmax_VhfVhf(sout1, low_level_vec); + + { + HVX_Vector exp = Q6_Vh_vasr_VhR(sout1, FP16_MANTISA); + exp = Q6_V_vand_VV(exp, expmask); + exp = Q6_Vh_vsub_VhVh(exp, expbias); + + HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); + HVX_Vector manzero = Q6_V_vand_VV(sout1, man); + + HVX_Vector sign = Q6_Vh_vasr_VhR(sout1, FP16_SIGN); + HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); + + HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); + HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); + HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); + + HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout1, man); + man = Q6_V_vnot_V(man); + HVX_Vector exppos_signpos = Q6_V_vand_VV(sout1, man); + exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); + HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout1, 1); + HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); + + // exp >= 0 + HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); + tsout1 = Q6_V_vmux_QVV(maneqzero, sout1, tsout1); + + // exp < 0 (-1, 1) + HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout1, negone); + tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); + + tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); + sout1 = Q6_V_vmux_QVV(expgte23, sout1, tsout1); + } + + sout1 = Q6_Vh_equals_Vhf(sout1); + + HVX_Vector sout2 = Q6_Vqf16_vmpy_VhfVhf(sline_high, o_scale_vec); + sout2 = Q6_Vqf16_vadd_Vqf16Vqf16(sout2, es_vec); + sout2 = Q6_Vhf_equals_Vqf16(sout2); + sout2 = Q6_Vhf_vmin_VhfVhf(sout2, high_level_vec); + sout2 = Q6_Vhf_vmax_VhfVhf(sout2, low_level_vec); + + { + HVX_Vector exp = Q6_Vh_vasr_VhR(sout2, FP16_MANTISA); + exp = Q6_V_vand_VV(exp, expmask); + exp = Q6_Vh_vsub_VhVh(exp, expbias); + + HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); + HVX_Vector manzero = Q6_V_vand_VV(sout2, man); + + HVX_Vector sign = Q6_Vh_vasr_VhR(sout2, FP16_SIGN); + HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); + + HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); + HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); + HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); + + HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout2, man); + man = Q6_V_vnot_V(man); + HVX_Vector exppos_signpos = Q6_V_vand_VV(sout2, man); + exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); + HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout2, 1); + HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); + + // exp >= 0 + HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); + tsout1 = Q6_V_vmux_QVV(maneqzero, sout2, tsout1); + + // exp < 0 (-1, 1) + HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout2, negone); + tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); + + tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); + sout2 = Q6_V_vmux_QVV(expgte23, sout2, tsout1); + } + + sout2 = Q6_Vh_equals_Vhf(sout2); + + HVX_Vector reql_h = Q6_Vb_vpack_VhVh_sat(sout2, sout1); + *optr++ = Q6_Vb_vadd_VbVb(reql_h, uintconvert); + } + } + + // // Handle leftover elements. + // if (leftover_size > 0) { + // sline1c = (is_in_one_chunk(iptr, leftover_size, VLEN) + // ? sline1p + // : *iptr++); + // sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + + // sline2c = (is_in_one_chunk(iptr2, leftover_size, VLEN) + // ? sline2p + // : *iptr2++); + // sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); + + // vstu_variable(optr, leftover_size, Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(sline1, sline2))); + // } + + return 0; +} + +template +GraphStatus llamasupersiluImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1, + const PlainFloatTensor &a_scale, + const PlainFloatTensor &b_scale, + const PlainFloatTensor &o_scale) + +{ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + out_0.set_dims(in_0); + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + size_t size = b_in * h_in * w_in * d_in; + + float a_scale_ = a_scale(0, 0, 0, 0); + float b_scale_ = b_scale(0, 0, 0, 0); + float o_scale_ = o_scale(0, 0, 0, 0); + + auto in_ptr = (uint8_t *)in_0.raw_data_const(); + auto in_ptr2 = (uint8_t *)in_1.raw_data_const(); + + auto out_ptr = (uint8_t *)out_0.raw_data(); + + DType dtype = in_0.get_dtype(); + + if (dtype == DType::QUInt8 && out_0.get_dtype() == DType::QUInt8) { + hvx_supersilu_ahf(in_ptr, in_ptr2, out_ptr, a_scale_, b_scale_, 1.0f / o_scale_, size); + } + + return GraphStatus::Success; +} + +#else + +template +GraphStatus llamasupersiluImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1, + const PlainFloatTensor &a_scale, + const PlainFloatTensor &b_scale, + const PlainFloatTensor &o_scale) + +{ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + out_0.set_dims(in_0); + + float a_scale_ = a_scale(0, 0, 0, 0); + float b_scale_ = b_scale(0, 0, 0, 0); + float o_scale_ = o_scale(0, 0, 0, 0); + + auto in_ptr = (uint8_t *)in_0.raw_data_const(); + auto in_ptr2 = (uint8_t *)in_1.raw_data_const(); + + auto out_ptr = (uint8_t *)out_0.raw_data(); + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // mul + for (Idx d = 0; d < d_in; d++) { + int32_t a_inval = static_cast(*in_ptr++); + float a_inval_fp16 = (a_inval - 128) * a_scale_; + + int32_t b_inval = static_cast(*in_ptr2++); + float b_inval_fp16 = (b_inval - 128) * b_scale_; + + a_inval_fp16 = a_inval_fp16 * (1 / (1 + expf(-a_inval_fp16))); + + float inval = a_inval_fp16 * b_inval_fp16; + + long v = lroundf(inval / o_scale_); + + if (v > 127) + v = 127; + + if (v < -128) + v = -128; + + v += 128; + + *out_ptr++ = static_cast(v); + } + } + } + } + + return GraphStatus::Success; +} + +#endif + +__attribute__((unused)) static float llamasupersiluCostFunc(const Op *op) { + /* + * add code here + * */ + + float cost = 0.0; // add cost computation here + return cost; +} + +/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), + where is as BEGIN_PKG_OP_DEFINITION +*/ +END_PKG_OP_DEFINITION(PKG_LLaMASuperSiLU); \ No newline at end of file diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/MergeOutput.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/MergeOutput.cpp similarity index 54% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/MergeOutput.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/MergeOutput.cpp index 6c573ab2d..e001c4e3e 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/MergeOutput.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/MergeOutput.cpp @@ -9,18 +9,16 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_MergeOutput); - // op execute function declarations -template -GraphStatus mergeoutputImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1, - const TensorType& in_2, - const TensorType& in_3, - const Tensor& num); +template +GraphStatus mergeoutputImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1, + const TensorType &in_2, + const TensorType &in_3, + const Tensor &num); // forward declaration of sample cost function static float mergeoutputCostFunc(const Op *op); @@ -65,11 +63,11 @@ DEF_PACKAGE_OP((mergeoutputImpl), "MergeOutput") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -83,90 +81,81 @@ DEF_PACKAGE_OP((mergeoutputImpl), "MergeOutput") * Qnn_addNode */ - /* execute functions for ops */ -template -GraphStatus mergeoutputImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1, - const TensorType& in_2, - const TensorType& in_3, - const Tensor& num) +template +GraphStatus mergeoutputImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &in_1, + const TensorType &in_2, + const TensorType &in_3, + const Tensor &num) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - - auto [b_in_0, h_in_0, w_in_0, d_in_0] = in_0.dims(); - auto [b_in_1, h_in_1, w_in_1, d_in_1] = in_1.dims(); - auto [b_in_2, h_in_2, w_in_2, d_in_2] = in_2.dims(); - auto [b_in_3, h_in_3, w_in_3, d_in_3] = in_3.dims(); - - const size_t dims[] = {b_in_0, h_in_0 + h_in_1 + h_in_2 + h_in_3 * 4, w_in_0, d_in_0}; + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ - out_0.set_dims(dims); + auto [b_in_0, h_in_0, w_in_0, d_in_0] = in_0.dims(); + auto [b_in_1, h_in_1, w_in_1, d_in_1] = in_1.dims(); + auto [b_in_2, h_in_2, w_in_2, d_in_2] = in_2.dims(); + auto [b_in_3, h_in_3, w_in_3, d_in_3] = in_3.dims(); - DType dtype = in_0.get_dtype(); - uint32_t bitwidth = 4; + const size_t dims[] = {b_in_0, h_in_0 + h_in_1 + h_in_2 + h_in_3 * 4, w_in_0, d_in_0}; - if (dtype == DType::QUInt8 || dtype == DType::QInt8) { + out_0.set_dims(dims); - bitwidth = 1; + DType dtype = in_0.get_dtype(); + uint32_t bitwidth = 4; - } else if (dtype == DType::Float16) { + if (dtype == DType::QUInt8 || dtype == DType::QInt8) { + bitwidth = 1; - bitwidth = 2; - } else if (dtype == DType::Float32) { + } else if (dtype == DType::Float16) { + bitwidth = 2; + } else if (dtype == DType::Float32) { + bitwidth = 4; + } - bitwidth = 4; - } + const uint8_t *in_ptr_0 = (uint8_t *)in_0.raw_data_const(); + const uint8_t *in_ptr_1 = (uint8_t *)in_1.raw_data_const(); + const uint8_t *in_ptr_2 = (uint8_t *)in_2.raw_data_const(); + // const uint8_t *in_ptr_3 = (uint8_t*)in_3.raw_data_const(); - const uint8_t *in_ptr_0 = (uint8_t*)in_0.raw_data_const(); - const uint8_t *in_ptr_1 = (uint8_t*)in_1.raw_data_const(); - const uint8_t *in_ptr_2 = (uint8_t*)in_2.raw_data_const(); -// const uint8_t *in_ptr_3 = (uint8_t*)in_3.raw_data_const(); - - uint8_t *out_ptr = (uint8_t*)out_0.raw_data(); + uint8_t *out_ptr = (uint8_t *)out_0.raw_data(); - memcpy(out_ptr, in_ptr_0, b_in_0 * h_in_0 * w_in_0 * d_in_0 * bitwidth); - out_ptr += b_in_0 * h_in_0 * w_in_0 * d_in_0 * bitwidth; + memcpy(out_ptr, in_ptr_0, b_in_0 * h_in_0 * w_in_0 * d_in_0 * bitwidth); + out_ptr += b_in_0 * h_in_0 * w_in_0 * d_in_0 * bitwidth; - memcpy(out_ptr, in_ptr_1, b_in_1 * h_in_1 * w_in_1 * d_in_1 * bitwidth); - out_ptr += b_in_1 * h_in_1 * w_in_1 * d_in_1 * bitwidth; + memcpy(out_ptr, in_ptr_1, b_in_1 * h_in_1 * w_in_1 * d_in_1 * bitwidth); + out_ptr += b_in_1 * h_in_1 * w_in_1 * d_in_1 * bitwidth; - memcpy(out_ptr, in_ptr_2, b_in_2 * h_in_2 * w_in_2 * d_in_2 * bitwidth); - out_ptr += b_in_2 * h_in_2 * w_in_2 * d_in_2 * bitwidth; + memcpy(out_ptr, in_ptr_2, b_in_2 * h_in_2 * w_in_2 * d_in_2 * bitwidth); + out_ptr += b_in_2 * h_in_2 * w_in_2 * d_in_2 * bitwidth; -// memcpy(out_ptr, in_ptr_3, b_in_3 * h_in_3 * w_in_3 * d_in_3 * bitwidth * 4); + // memcpy(out_ptr, in_ptr_3, b_in_3 * h_in_3 * w_in_3 * d_in_3 * bitwidth * 4); - return GraphStatus::Success; + return GraphStatus::Success; } -__attribute__((unused)) static float mergeoutputCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float mergeoutputCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/QLayerNorm.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/QLayerNorm.cpp similarity index 64% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/QLayerNorm.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/QLayerNorm.cpp index 0bb733e4f..be61c7286 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/QLayerNorm.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/QLayerNorm.cpp @@ -9,16 +9,14 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_QLayerNorm); - // op execute function declarations -template -GraphStatus qlayernormImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& weights, - const TensorType& bias); +template +GraphStatus qlayernormImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &weights, + const TensorType &bias); // forward declaration of sample cost function static float qlayernormCostFunc(const Op *op); @@ -63,11 +61,11 @@ DEF_PACKAGE_OP((qlayernormImpl), "QLayerNorm") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -81,7 +79,6 @@ DEF_PACKAGE_OP((qlayernormImpl), "QLayerNorm") * Qnn_addNode */ - /* execute functions for ops */ #ifndef REFERENCE_OP @@ -90,18 +87,16 @@ DEF_PACKAGE_OP((qlayernormImpl), "QLayerNorm") #include #include -#define BLOCK_SIZE (8*1024/VLEN) /* vector chunks */ -#define L2FETCH_AHEAD (BLOCK_SIZE) +#define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +#define L2FETCH_AHEAD (BLOCK_SIZE) int32_t hvx_qlayernorm_af( float *restrict input, float *restrict weights, float *restrict bias, float *restrict output, - uint32_t size) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + uint32_t size) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } @@ -125,58 +120,47 @@ int32_t hvx_qlayernorm_af( // sline1p = *iptr++; - // x sum HVX_Vector xsum = Q6_Vqf32_vadd_VsfVsf(Q6_V_vzero(), Q6_V_vzero()); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { sline1c = *iptr++; sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - xsum = Q6_Vqf32_vadd_Vqf32Vqf32(xsum, sline1); - + xsum = Q6_Vqf32_vadd_Vqf32Vqf32(xsum, sline1); sline1p = sline1c; } } if (vectors_in_rounddown > 0) { - - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - xsum = Q6_Vqf32_vadd_Vqf32Vqf32(xsum, sline1); - + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + xsum = Q6_Vqf32_vadd_Vqf32Vqf32(xsum, sline1); } union { - float f; - uint32_t ui; + float f; + uint32_t ui; } mean_value; mean_value.f = 0.0f; - - - for (int32_t i = 64; i >= 4; i >>= 1) - { + for (int32_t i = 64; i >= 4; i >>= 1) { xsum = Q6_Vqf32_vadd_Vqf32Vqf32(xsum, Q6_V_vlalign_VVR(xsum, zero, i)); } xsum = Q6_Vsf_equals_Vqf32(xsum); - *(HVX_Vector *) tmp_buf = xsum; + *(HVX_Vector *)tmp_buf = xsum; mean_value.f = xsum[31] / size; - // x-e^2 sum iptr = (HVX_Vector *)input; sline1p = *iptr++; @@ -185,57 +169,49 @@ int32_t hvx_qlayernorm_af( HVX_Vector mean_vsf = Q6_V_vsplat_R(mean_value.ui); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { sline1c = *iptr++; sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); sline1 = Q6_Vqf32_vsub_Vqf32Vqf32(sline1, mean_vsf); - x2sum = Q6_Vqf32_vadd_Vqf32Vqf32(x2sum, Q6_Vqf32_vmpy_Vqf32Vqf32(sline1, sline1)); - + x2sum = Q6_Vqf32_vadd_Vqf32Vqf32(x2sum, Q6_Vqf32_vmpy_Vqf32Vqf32(sline1, sline1)); + sline1p = sline1c; } } if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - - sline1 = Q6_Vqf32_vsub_Vqf32Vqf32(sline1, mean_vsf); - x2sum = Q6_Vqf32_vadd_Vqf32Vqf32(x2sum, Q6_Vqf32_vmpy_Vqf32Vqf32(sline1, sline1)); + sline1 = Q6_Vqf32_vsub_Vqf32Vqf32(sline1, mean_vsf); + x2sum = Q6_Vqf32_vadd_Vqf32Vqf32(x2sum, Q6_Vqf32_vmpy_Vqf32Vqf32(sline1, sline1)); } float epsilon_ = 1e-5; union { - float f; - uint32_t ui; + float f; + uint32_t ui; } sum_value; sum_value.f = 0.0f; - - - for (int32_t i = 64; i >= 4; i >>= 1) - { + for (int32_t i = 64; i >= 4; i >>= 1) { x2sum = Q6_Vqf32_vadd_Vqf32Vqf32(x2sum, Q6_V_vlalign_VVR(x2sum, zero, i)); } x2sum = Q6_Vsf_equals_Vqf32(x2sum); - *(HVX_Vector *) tmp_buf = x2sum; + *(HVX_Vector *)tmp_buf = x2sum; sum_value.f = 1.0f / sqrtf(x2sum[31] / size + epsilon_); - // x * 1/rsqrt(sum) iptr = (HVX_Vector *)input; sline1p = *iptr++; @@ -245,20 +221,17 @@ int32_t hvx_qlayernorm_af( HVX_Vector irsqrt_vsf = Q6_V_vsplat_R(sum_value.ui); HVX_Vector irsqrt_vqf32 = Q6_Vqf32_vadd_VsfVsf(irsqrt_vsf, Q6_V_vzero()); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); l2fetch(iptr3 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { sline1c = *iptr++; sline2c = *iptr2++; sline3c = *iptr3++; @@ -281,107 +254,97 @@ int32_t hvx_qlayernorm_af( } if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - - sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) weights); + sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)weights); - sline3c = is_aligned(iptr3, VLEN) && leftover == 0 ? sline3p : *iptr3++; - sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t) weights); + sline3c = is_aligned(iptr3, VLEN) && leftover == 0 ? sline3p : *iptr3++; + sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t)weights); - sline1 = Q6_Vqf32_vsub_VsfVsf(sline1, mean_vsf); + sline1 = Q6_Vqf32_vsub_VsfVsf(sline1, mean_vsf); - HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(sline1, sline2); - middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); - middle_value_qf32 = Q6_Vqf32_vadd_Vqf32Vqf32(middle_value_qf32, sline3); - - *optr++ = Q6_Vsf_equals_Vqf32(middle_value_qf32); + HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(sline1, sline2); + middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); + middle_value_qf32 = Q6_Vqf32_vadd_Vqf32Vqf32(middle_value_qf32, sline3); + *optr++ = Q6_Vsf_equals_Vqf32(middle_value_qf32); } - if (leftover_size > 0) - return -1; + return -1; return 0; } -template -GraphStatus qlayernormImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& weights, - const TensorType& bias) +template +GraphStatus qlayernormImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &weights, + const TensorType &bias) { - out_0.set_dims(in_0); - - // NHWC - - auto in_ptr = (float*)in_0.raw_data_const(); - auto out_ptr = (float*)out_0.raw_data(); - auto weights_ptr = (float*)weights.raw_data_const(); - auto bias_ptr = (float*)bias.raw_data_const(); - - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // RMS - hvx_qlayernorm_af(in_ptr, weights_ptr, bias_ptr, out_ptr, d_in); - - in_ptr += d_in; - out_ptr += d_in; - } + out_0.set_dims(in_0); + + // NHWC + + auto in_ptr = (float *)in_0.raw_data_const(); + auto out_ptr = (float *)out_0.raw_data(); + auto weights_ptr = (float *)weights.raw_data_const(); + auto bias_ptr = (float *)bias.raw_data_const(); + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // RMS + hvx_qlayernorm_af(in_ptr, weights_ptr, bias_ptr, out_ptr, d_in); + + in_ptr += d_in; + out_ptr += d_in; + } + } } - } - return GraphStatus::Success; + return GraphStatus::Success; } #else -template -GraphStatus qlayernormImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& weights, - const TensorType& bias) +template +GraphStatus qlayernormImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &weights, + const TensorType &bias) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - return GraphStatus::Success; + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + return GraphStatus::Success; } #endif -__attribute__((unused)) static float qlayernormCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float qlayernormCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RMSNorm.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RMSNorm.cpp similarity index 69% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RMSNorm.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RMSNorm.cpp index bd079a2c9..b3e551aa0 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RMSNorm.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RMSNorm.cpp @@ -9,15 +9,13 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_RMSNorm); - // op execute function declarations -template -GraphStatus rmsnormImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& weights); +template +GraphStatus rmsnormImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &weights); // forward declaration of sample cost function static float rmsnormCostFunc(const Op *op); @@ -62,11 +60,11 @@ DEF_PACKAGE_OP((rmsnormImpl), "RMSNorm") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -80,7 +78,6 @@ DEF_PACKAGE_OP((rmsnormImpl), "RMSNorm") * Qnn_addNode */ - /* execute functions for ops */ #ifndef REFERENCE_OP @@ -90,17 +87,15 @@ DEF_PACKAGE_OP((rmsnormImpl), "RMSNorm") #include #include -#define BLOCK_SIZE (8*1024/VLEN) /* vector chunks */ -#define L2FETCH_AHEAD (BLOCK_SIZE) +#define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +#define L2FETCH_AHEAD (BLOCK_SIZE) int32_t hvx_rmsnorm_af( float *restrict input, float *restrict weights, float *restrict output, - uint32_t size) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + uint32_t size) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } @@ -117,56 +112,47 @@ int32_t hvx_rmsnorm_af( sline1p = *iptr++; - // ^2 sum HVX_Vector sum = Q6_Vqf32_vadd_VsfVsf(Q6_V_vzero(), Q6_V_vzero()); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { sline1c = *iptr++; sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); - + sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); sline1p = sline1c; } } if (vectors_in_rounddown > 0) { - - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); - + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); } float epsilon_ = 1e-6; union { - float f; - uint32_t ui; + float f; + uint32_t ui; } sum_value; sum_value.f = 0.0f; - HVX_Vector zero = Q6_V_vzero(); - for (int32_t i = 64; i >= 4; i >>= 1) - { + for (int32_t i = 64; i >= 4; i >>= 1) { sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_V_vlalign_VVR(sum, zero, i)); } sum = Q6_Vsf_equals_Vqf32(sum); - sum_value.f = 1.0f / sqrtf(*((float*)&sum + 31) / size + epsilon_); + sum_value.f = 1.0f / sqrtf(*((float *)&sum + 31) / size + epsilon_); // x * 1/rsqrt(sum) iptr = (HVX_Vector *)input; @@ -176,19 +162,16 @@ int32_t hvx_rmsnorm_af( HVX_Vector irsqrt_vsf = Q6_V_vsplat_R(sum_value.ui); HVX_Vector irsqrt_vqf32 = Q6_Vqf32_vadd_VsfVsf(irsqrt_vsf, Q6_V_vzero()); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { sline1c = *iptr++; sline2c = *iptr2++; sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); @@ -203,33 +186,31 @@ int32_t hvx_rmsnorm_af( } if (vectors_in_rounddown > 0) { + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - - sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) weights); - - HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1, sline2); - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32)); + sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)weights); + HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1, sline2); + *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32)); } - if (leftover_size > 0) - return -1; + return -1; return 0; } -static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) -{ - union { float f; uint32_t i; } fp32 = { .f = x }; +static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) { + union { + float f; + uint32_t i; + } fp32 = {.f = x}; return fp32.i; } -static inline int32_t float_to_fp16s(float input) -{ +static inline int32_t float_to_fp16s(float input) { union { int32_t i; __fp16 f[2]; @@ -237,7 +218,6 @@ static inline int32_t float_to_fp16s(float input) return fp32.i; } - #define FLOAT_MANTISA 23 #define FLOAT_EXPONENT_MASK 0xff #define FLOAT_EXPONENT_BIAS 0x7f @@ -252,10 +232,8 @@ int32_t hvx_rmsnorm_auint8( float *restrict weights, uint8_t *restrict output, uint32_t size, - float scale) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } @@ -274,7 +252,7 @@ int32_t hvx_rmsnorm_auint8( float low_level = -128.0f; float high_level = 127.0f; - float es = 0.5f; + float es = 0.5f; low_level_vec = Q6_V_vsplat_R(float_to_bits(low_level)); high_level_vec = Q6_V_vsplat_R(float_to_bits(high_level)); scale_vec = Q6_V_vsplat_R(float_to_bits(scale)); @@ -287,7 +265,6 @@ int32_t hvx_rmsnorm_auint8( HVX_Vector uintconvert = Q6_V_vsplat_R(0x80808080); - // HVX_Vector expmask = Q6_V_vsplat_R(FLOAT_EXPONENT_MASK); // HVX_Vector expbias = Q6_V_vsplat_R(FLOAT_EXPONENT_BIAS); // HVX_Vector manmask = Q6_V_vsplat_R(FLOAT_MANTISA_MASK); @@ -303,53 +280,45 @@ int32_t hvx_rmsnorm_auint8( sline1p = *iptr++; - // ^2 sum HVX_Vector sum = Q6_Vqf32_vadd_VsfVsf(Q6_V_vzero(), Q6_V_vzero()); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { sline1c = *iptr++; sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); - + sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); sline1p = sline1c; } } if (vectors_in_rounddown > 0) { - - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); - + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); } float epsilon_ = 1e-6; union { - float f; - uint32_t ui; + float f; + uint32_t ui; } sum_value; sum_value.f = 0.0f; - for (int32_t i = 64; i >= 4; i >>= 1) - { + for (int32_t i = 64; i >= 4; i >>= 1) { sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_V_vlalign_VVR(sum, zero, i)); } sum = Q6_Vsf_equals_Vqf32(sum); - sum_value.f = 1.0f / sqrtf(*((float*)&sum + 31) / size + epsilon_); + sum_value.f = 1.0f / sqrtf(*((float *)&sum + 31) / size + epsilon_); // x * 1/rsqrt(sum) iptr = (HVX_Vector *)input; @@ -361,37 +330,32 @@ int32_t hvx_rmsnorm_auint8( slinewp = *iptr2++; - HVX_Vector irsqrt_vsf = Q6_V_vsplat_R(sum_value.ui); HVX_Vector irsqrt_vqf32 = Q6_Vqf32_vadd_VsfVsf(irsqrt_vsf, Q6_V_vzero()); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; j+=4) - { - + for (int32_t j = 0; j < block; j += 4) { { - sline1c = *iptr++; - slinewc = *iptr2++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); + sline1c = *iptr++; + slinewc = *iptr2++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); - HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1, slinew); - sline1 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); + HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1, slinew); + sline1 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); - slinewp = slinewc; + slinewp = slinewc; } - - sout1 = Q6_Vqf32_vmpy_Vqf32Vqf32(sline1,scale_vec); + + sout1 = Q6_Vqf32_vmpy_Vqf32Vqf32(sline1, scale_vec); sout1 = Q6_Vqf32_vadd_Vqf32Vqf32(sout1, es_vec); sout1 = Q6_Vsf_equals_Vqf32(sout1); sout1 = Q6_Vsf_vmin_VsfVsf(sout1, high_level_vec); @@ -438,19 +402,18 @@ int32_t hvx_rmsnorm_auint8( // sout1 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout1, Q6_V_vzero()), 0); { - sline2c = *iptr++; - slinewc = *iptr2++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input); - slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); + sline2c = *iptr++; + slinewc = *iptr2++; + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input); + slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); - HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline2, slinew); - sline2 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); + HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline2, slinew); + sline2 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); - slinewp = slinewc; + slinewp = slinewc; } - - sout2 = Q6_Vqf32_vmpy_Vqf32Vqf32(sline2,scale_vec); + sout2 = Q6_Vqf32_vmpy_Vqf32Vqf32(sline2, scale_vec); sout2 = Q6_Vqf32_vadd_Vqf32Vqf32(sout2, es_vec); sout2 = Q6_Vsf_equals_Vqf32(sout2); sout2 = Q6_Vsf_vmin_VsfVsf(sout2, high_level_vec); @@ -497,19 +460,18 @@ int32_t hvx_rmsnorm_auint8( // sout2 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout2, Q6_V_vzero()), 0); { - sline3c = *iptr++; - slinewc = *iptr2++; - sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t) input); - slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); + sline3c = *iptr++; + slinewc = *iptr2++; + sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t)input); + slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); - HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline3, slinew); - sline3 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); + HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline3, slinew); + sline3 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); - slinewp = slinewc; + slinewp = slinewc; } - - sout3 = Q6_Vqf32_vmpy_Vqf32Vqf32(sline3,scale_vec); + sout3 = Q6_Vqf32_vmpy_Vqf32Vqf32(sline3, scale_vec); sout3 = Q6_Vqf32_vadd_Vqf32Vqf32(sout3, es_vec); sout3 = Q6_Vsf_equals_Vqf32(sout3); sout3 = Q6_Vsf_vmin_VsfVsf(sout3, high_level_vec); @@ -551,25 +513,23 @@ int32_t hvx_rmsnorm_auint8( // sout3 = Q6_V_vmux_QVV(expgte23, sout3, tsout1); // } - sout3 = Q6_Vw_equals_Vsf(sout3); sout3 = Q6_Vw_vasr_VwR(sout3, ROUND_2_SCALE); // sout3 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout3, Q6_V_vzero()), 0); { - sline4c = *iptr++; - slinewc = *iptr2++; - sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t) input); - slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); + sline4c = *iptr++; + slinewc = *iptr2++; + sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t)input); + slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); - HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline4, slinew); - sline4 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); + HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline4, slinew); + sline4 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); - slinewp = slinewc; + slinewp = slinewc; } - - sout4 = Q6_Vqf32_vmpy_Vqf32Vqf32(sline4,scale_vec); + sout4 = Q6_Vqf32_vmpy_Vqf32Vqf32(sline4, scale_vec); sout4 = Q6_Vqf32_vadd_Vqf32Vqf32(sout4, es_vec); sout4 = Q6_Vsf_equals_Vqf32(sout4); sout4 = Q6_Vsf_vmin_VsfVsf(sout4, high_level_vec); @@ -615,7 +575,6 @@ int32_t hvx_rmsnorm_auint8( sout4 = Q6_Vw_vasr_VwR(sout4, ROUND_2_SCALE); // sout4 = qhmath_hvx_vw_convert_vqf32_rmode(Q6_Vqf32_vadd_VsfVsf(sout4, Q6_V_vzero()), 0); - HVX_Vector reql_h = Q6_Vh_vpack_VwVw_sat(sout2, sout1); HVX_Vector reqh_h = Q6_Vh_vpack_VwVw_sat(sout4, sout3); HVX_Vector req_b = Q6_Vb_vpack_VhVh_sat(reqh_h, reql_h); @@ -627,9 +586,7 @@ int32_t hvx_rmsnorm_auint8( sline3p = sline3c; sline4p = sline4c; - slinewp = slinewc; - } } @@ -641,10 +598,8 @@ int32_t hvx_rmsnorm_auint8_opt( float *restrict weights, uint8_t *restrict output, uint32_t size, - float scale) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { return -1; } @@ -663,7 +618,7 @@ int32_t hvx_rmsnorm_auint8_opt( // float low_level = -128.0f; // float high_level = 127.0f; - // float es = 0.5f; + // float es = 0.5f; // low_level_vec = Q6_V_vsplat_R(float_to_bits(low_level)); // high_level_vec = Q6_V_vsplat_R(float_to_bits(high_level)); // scale_vec = Q6_V_vsplat_R(float_to_bits(scale)); @@ -676,7 +631,6 @@ int32_t hvx_rmsnorm_auint8_opt( // HVX_Vector uintconvert = Q6_V_vsplat_R(0x80808080); - // HVX_Vector expmask = Q6_V_vsplat_R(FLOAT_EXPONENT_MASK); // HVX_Vector expbias = Q6_V_vsplat_R(FLOAT_EXPONENT_BIAS); // HVX_Vector manmask = Q6_V_vsplat_R(FLOAT_MANTISA_MASK); @@ -692,53 +646,45 @@ int32_t hvx_rmsnorm_auint8_opt( sline1p = *iptr++; - // ^2 sum HVX_Vector sum = Q6_Vqf32_vadd_VsfVsf(Q6_V_vzero(), Q6_V_vzero()); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { sline1c = *iptr++; sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); - + sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); sline1p = sline1c; } } if (vectors_in_rounddown > 0) { - - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); - + sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); } float epsilon_ = 1e-6; union { - float f; - uint32_t ui; + float f; + uint32_t ui; } sum_value; sum_value.f = 0.0f; - for (int32_t i = 64; i >= 4; i >>= 1) - { + for (int32_t i = 64; i >= 4; i >>= 1) { sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_V_vlalign_VVR(sum, zero, i)); } sum = Q6_Vsf_equals_Vqf32(sum); - sum_value.f = 1.0f / sqrtf(*((float*)&sum + 31) / size + epsilon_); + sum_value.f = 1.0f / sqrtf(*((float *)&sum + 31) / size + epsilon_); // x * 1/rsqrt(sum) iptr = (HVX_Vector *)input; @@ -750,66 +696,58 @@ int32_t hvx_rmsnorm_auint8_opt( slinewp = *iptr2++; - HVX_Vector irsqrt_vsf = Q6_V_vsplat_R(sum_value.ui); HVX_Vector irsqrt_vqf32 = Q6_Vqf32_vadd_VsfVsf(irsqrt_vsf, Q6_V_vzero()); - float post_scale_flt = scale / 64.0f; - int scexp = flt_getexp( post_scale_flt); - int rsh = min_i32( -scexp,7); // e.g. 0.11 -> 0.88, rsh = 3 + int scexp = flt_getexp(post_scale_flt); + int rsh = min_i32(-scexp, 7); // e.g. 0.11 -> 0.88, rsh = 3 float rsh_fac = flt_power2(rsh); int adj_bias = roundf_i32(128 * rsh_fac); - adj_bias = Q6_R_combine_RlRl( adj_bias, adj_bias); - + adj_bias = Q6_R_combine_RlRl(adj_bias, adj_bias); HVX_Vector zero_v_sf = Q6_V_vzero(); - float es = 0.5f; + float es = 0.5f; HVX_Vector es_vec = Q6_V_vsplat_R(float_to_fp16s(es)); es_vec = Q6_Vqf16_vadd_VhfVhf(es_vec, zero_v_sf); HVX_Vector vadj = Q6_V_vsplat_R(adj_bias); - HVX_Vector o_scale_vec = Q6_V_vsplat_R(float_to_fp16s(post_scale_flt * rsh_fac * (1<<15))); + HVX_Vector o_scale_vec = Q6_V_vsplat_R(float_to_fp16s(post_scale_flt * rsh_fac * (1 << 15))); - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } - for (int32_t j = 0; j < block; j+=4) - { - + for (int32_t j = 0; j < block; j += 4) { { - sline1c = *iptr++; - slinewc = *iptr2++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); + sline1c = *iptr++; + slinewc = *iptr2++; + sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); + slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); - HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1, slinew); - sline1 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); + HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1, slinew); + sline1 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); - slinewp = slinewc; + slinewp = slinewc; } { - sline2c = *iptr++; - slinewc = *iptr2++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input); - slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); + sline2c = *iptr++; + slinewc = *iptr2++; + sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input); + slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); - HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline2, slinew); - sline2 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); + HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline2, slinew); + sline2 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); - slinewp = slinewc; + slinewp = slinewc; } - HVX_Vector sline_low = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(sline2, sline1)); sline_low = Q6_Vqf16_vadd_Vqf16Vqf16(sline_low, es_vec); @@ -820,29 +758,28 @@ int32_t hvx_rmsnorm_auint8_opt( sline_low = Q6_Vh_vdeal_Vh(sline_low); { - sline3c = *iptr++; - slinewc = *iptr2++; - sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t) input); - slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); + sline3c = *iptr++; + slinewc = *iptr2++; + sline3 = Q6_V_valign_VVR(sline3c, sline3p, (size_t)input); + slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); - HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline3, slinew); - sline3 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); + HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline3, slinew); + sline3 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); - slinewp = slinewc; + slinewp = slinewc; } { - sline4c = *iptr++; - slinewc = *iptr2++; - sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t) input); - slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); + sline4c = *iptr++; + slinewc = *iptr2++; + sline4 = Q6_V_valign_VVR(sline4c, sline4p, (size_t)input); + slinew = Q6_V_valign_VVR(slinewc, slinewp, (size_t)weights); - HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline4, slinew); - sline4 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); + HVX_Vector middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline4, slinew); + sline4 = Q6_Vqf32_vmpy_Vqf32Vqf32(middle_value_qf32, irsqrt_vqf32); - slinewp = slinewc; + slinewp = slinewc; } - HVX_Vector sline_high = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(sline4, sline3)); sline_high = Q6_Vqf16_vadd_Vqf16Vqf16(sline_high, es_vec); @@ -852,7 +789,7 @@ int32_t hvx_rmsnorm_auint8_opt( sline_high = Q6_Vh_vdeal_Vh(sline_high); - HVX_Vector sout = Q6_Vub_vasr_VhVhR_rnd_sat( sline_high, sline_low, rsh); + HVX_Vector sout = Q6_Vub_vasr_VhVhR_rnd_sat(sline_high, sline_low, rsh); sout = Q6_Vb_vdeal_Vb(sout); *optr++ = sout; @@ -861,149 +798,132 @@ int32_t hvx_rmsnorm_auint8_opt( sline3p = sline3c; sline4p = sline4c; - slinewp = slinewc; - } } return 0; } -template -GraphStatus rmsnormImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& weights) +template +GraphStatus rmsnormImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &weights) { - out_0.set_dims(in_0); - - // NHWC + out_0.set_dims(in_0); - auto in_ptr = (float*)in_0.raw_data_const(); - auto weights_ptr = (float*)weights.raw_data_const(); + // NHWC + auto in_ptr = (float *)in_0.raw_data_const(); + auto weights_ptr = (float *)weights.raw_data_const(); - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - + auto [b_in, h_in, w_in, d_in] = in_0.dims(); - DType dtype = out_0.get_dtype(); + DType dtype = out_0.get_dtype(); - if (dtype == DType::Float32) { + if (dtype == DType::Float32) { + auto out_ptr = (float *)out_0.raw_data(); - auto out_ptr = (float*)out_0.raw_data(); + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // RMS + hvx_rmsnorm_af(in_ptr, weights_ptr, out_ptr, d_in); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // RMS - hvx_rmsnorm_af(in_ptr, weights_ptr, out_ptr, d_in); - - in_ptr += d_in; - out_ptr += d_in; + in_ptr += d_in; + out_ptr += d_in; + } + } } - } - } - } else if (dtype == DType::QUInt8) { + } else if (dtype == DType::QUInt8) { + auto out_ptr = (uint8_t *)out_0.raw_data(); + float scale_ = out_0.interface_scale(); - auto out_ptr = (uint8_t*)out_0.raw_data(); - float scale_ = out_0.get_interface_scale(); + scale_ = 1.0f / scale_; - scale_ = 1.0f/scale_; + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // RMS + hvx_rmsnorm_auint8(in_ptr, weights_ptr, out_ptr, d_in, scale_); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // RMS - hvx_rmsnorm_auint8(in_ptr, weights_ptr, out_ptr, d_in, scale_); - - in_ptr += d_in; - out_ptr += d_in; + in_ptr += d_in; + out_ptr += d_in; + } + } } - } } - } - - return GraphStatus::Success; + return GraphStatus::Success; } #else -template -GraphStatus rmsnormImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& weights) +template +GraphStatus rmsnormImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &weights) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - out_0.set_dims(in_0); + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + out_0.set_dims(in_0); // NHWC float epsilon_ = 1e-6; auto [b_in, h_in, w_in, d_in] = in_0.dims(); for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // RMS - float sum_squares = 0.0f; - for (Idx d = 0; d < d_in; d++) { - float inval = in_0(b, h, w, d); - sum_squares += inval*inval; - } - - // debuglog("silu execute... sum_squares=(%f)", sum_squares); - - float rms = sqrtf(sum_squares / d_in + epsilon_); - debuglog("rms execute... sum_squares=(%f)", 1.0f / rms); - debuglog("rms execute... sum_squares=(%f)", sum_squares); - - for (Idx d = 0; d < d_in; d++) { - float inval = in_0(b, h, w, d); - float weight = weights(0, 0, 0, d); - - out_0(b, h, w, d) = inval * weight / rms; - - } - + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // RMS + float sum_squares = 0.0f; + for (Idx d = 0; d < d_in; d++) { + float inval = in_0(b, h, w, d); + sum_squares += inval * inval; + } + + // debuglog("silu execute... sum_squares=(%f)", sum_squares); + + float rms = sqrtf(sum_squares / d_in + epsilon_); + debuglog("rms execute... sum_squares=(%f)", 1.0f / rms); + debuglog("rms execute... sum_squares=(%f)", sum_squares); + + for (Idx d = 0; d < d_in; d++) { + float inval = in_0(b, h, w, d); + float weight = weights(0, 0, 0, d); + + out_0(b, h, w, d) = inval * weight / rms; + } + } } - } } - - - return GraphStatus::Success; + return GraphStatus::Success; } #endif +__attribute__((unused)) static float rmsnormCostFunc(const Op *op) { + /* + * add code here + * */ -__attribute__((unused)) static float rmsnormCostFunc(const Op *op) -{ - /* - * add code here - * */ - - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RoPE.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RoPE.cpp new file mode 100755 index 000000000..b3174ba78 --- /dev/null +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RoPE.cpp @@ -0,0 +1,982 @@ +//============================================================================== +// Auto Generated Code for LLaMAPackage +//============================================================================== + +#include "HTP/core/constraints.h" +#include "HTP/core/op_package_feature_support.h" +#include "HTP/core/op_register_ext.h" +#include "HTP/core/optimize.h" +#include "QnnOpPackage.h" +#include "HTP/core/simple_reg.h" +#include "HTP/core/tensor.h" + +BEGIN_PKG_OP_DEFINITION(PKG_RoPE); + +// op execute function declarations +template +GraphStatus ropeImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &sin, + const TensorType &cos, + const TensorType1 &h_cnt, + const Tensor &pose_type); + +// forward declaration of sample cost function +static float ropeCostFunc(const Op *op); + +/* + * method 1 for defining op, using default cost value (i.e. GLACIAL) and default flag (Flags::RESOURCE_HVX) + * syntax: DEF_PACKAGE_OP(F,OP) + * e.g. DEF_PACKAGE_OP((ropeImpl), "RoPE") + */ +DEF_PACKAGE_OP((ropeImpl), "RoPE") + +/* + * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) + * and provided flags + * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) + * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, + * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) + * e.g. DEF_PACKAGE_OP_AND_COST_AND_FLAGS((ropeImpl), "RoPE", SNAIL) + */ + +/* + * method 3 for defining op with cost function pointer and provided flags + * cost function pointer type: typedef float (*cost_function) (const Op * op); + * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) + * e.g. DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((ropeImpl), + * "RoPE", ropeCostFunc, Flags::RESOURCE_HVX) + */ + +/* + * optimization definitions + * need to be global in the package + * one definition per optimization + * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) + * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) + * HTP core provides some replacement functions for op package to use + * for more information about optimization rules, please refer to HTP core documentations + */ + +/* + * op parameter order definitions + * need to be global in the package + * one definition per op, and this is optional + * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) + * one or more parameters can be specified for each op + * order of parameters listed determines the order of parameters passed into op execution functions + * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode + * will be passed into op execution functions + * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted + * name will be abandoned + * if two or more op packages with the same package name will be registered, they cannot list + * conflicting parameter orders + * PARAM refers to parameter name as a string literal + * MANDATORY refers to whether this parameter is required to be provided at Qnn_addNode + * DEFAULT is used when MANDATORY is false + * if provided as Qnn_Param_t*, + * DEFAULT will be used for graph construction when this parameter is not provided at + * Qnn_addNode + * if provided as nullptr, + * graph construction will skip this parameter when this parameter is not provided at + * Qnn_addNode + */ +DEF_PACKAGE_PARAM_ORDER("RoPE", + "pose_type", + true, + nullptr) + +/* execute functions for ops */ + +#ifndef REFERENCE_OP + +#include "qhmath_hvx.h" +#include "hvx_internal.h" +#include +#include + +#define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +#define L2FETCH_AHEAD (BLOCK_SIZE) +#define ONE 0x3F800000 +#define M_ONE 0xAF800000 + +int32_t hvx_rope_af( + float *restrict input, + float *restrict sin, + float *restrict cos, + float *restrict output, + uint32_t size, + uint32_t partial_dimension) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_Vector *iptr_half = (HVX_Vector *)(input + partial_dimension / 2); + HVX_Vector *iptr2 = (HVX_Vector *)sin; + HVX_Vector *iptr3 = (HVX_Vector *)cos; + HVX_UVector *optr = (HVX_UVector *)output; + HVX_UVector *optr_half = (HVX_UVector *)(output + partial_dimension / 2); + ; + HVX_Vector sline1; + HVX_Vector sline1_half; + HVX_Vector sinline1p, sinline1c, sinline1; + HVX_Vector cosline1p, cosline1c, cosline1; + + int32_t l2fetch_block; + int32_t leftover = size & 31; + int32_t vectors_in_rounddown = size / 32; + int32_t leftover_size = leftover * sizeof(float); + + sinline1p = *iptr2++; + cosline1p = *iptr3++; + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + l2fetch(iptr3 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t d = 0; d < partial_dimension / 2; d += 32) { + cosline1c = *iptr3++; + cosline1 = Q6_V_valign_VVR(cosline1c, cosline1p, (size_t)cos); + cosline1p = cosline1c; + + sinline1c = *iptr2++; + sinline1 = Q6_V_valign_VVR(sinline1c, sinline1p, (size_t)sin); + sinline1p = sinline1c; + + HVX_Vector *jiptr = iptr + d / 32; + HVX_Vector *jiptr_half = iptr_half + d / 32; + HVX_Vector *joptr = optr + d / 32; + HVX_Vector *joptr_half = optr_half + d / 32; + + for (int32_t j = 0; j < size / partial_dimension; j++) { + sline1 = *jiptr; + sline1_half = *jiptr_half; + + // auto value = in_value * cos_value - in_value_2 * sin_value; + { + HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1, cosline1); + HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1_half, sinline1); + *joptr = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); + } + + // auto value2 = in_value * sin_value + in_value_2 * cos_value; + { + HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1_half, cosline1); + HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1, sinline1); + *joptr_half = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); + } + + jiptr += partial_dimension / 32; + jiptr_half += partial_dimension / 32; + joptr += partial_dimension / 32; + joptr_half += partial_dimension / 32; + } + } + } + + // if (vectors_in_rounddown > 0) { + + // sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + // sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); + // sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); + + // } + + if (leftover_size > 0) + return -1; + + return 0; +} + +static inline int32_t float_to_fp16s(float input) { + union { + int32_t i; + __fp16 f[2]; + } fp32 = {.f = {(__fp16)input, (__fp16)input}}; + return fp32.i; +} + +int32_t hvx_rope_uint8_af( + uint8_t *restrict input, + float *restrict sin, + float *restrict cos, + float *restrict output, + uint32_t size, + uint32_t partial_dimension) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_Vector *iptr2 = (HVX_Vector *)sin; + HVX_Vector *iptr3 = (HVX_Vector *)cos; + HVX_UVector *optr = (HVX_UVector *)output; + + int32_t l2fetch_block; + int32_t leftover = size & 127; + int32_t vectors_in_rounddown = size / 128; + int32_t leftover_size = leftover * sizeof(float); + + HVX_Vector zero_v_sf = Q6_V_vzero(); + uint32_t convert = 0x00800080; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + HVX_Vector one_vec = Q6_V_vsplat_R(float_to_fp16s(1.0)); + + // + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + // + HVX_Vector sinline1_low = *iptr2; + HVX_Vector cosline1_low = *iptr3; + sinline1_low = Q6_Vqf32_vadd_VsfVsf(sinline1_low, Q6_V_vzero()); + cosline1_low = Q6_Vqf32_vadd_VsfVsf(cosline1_low, Q6_V_vzero()); + + HVX_Vector sinline1_high = *(iptr2 + 1); + HVX_Vector cosline1_high = *(iptr3 + 1); + sinline1_high = Q6_Vqf32_vadd_VsfVsf(sinline1_high, Q6_V_vzero()); + cosline1_high = Q6_Vqf32_vadd_VsfVsf(cosline1_high, Q6_V_vzero()); + + for (int32_t j = 0; j < size / partial_dimension; j++) { + HVX_Vector sline1 = *iptr++; + + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + HVX_VectorPair result1 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), one_vec); + result1 = Q6_W_vshuff_VVR(Q6_V_hi_W(result1), Q6_V_lo_W(result1), -4); + + HVX_VectorPair result2 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), one_vec); + result2 = Q6_W_vshuff_VVR(Q6_V_hi_W(result2), Q6_V_lo_W(result2), -4); + + // auto value = in_value * cos_value - in_value_2 * sin_value; + { + HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), cosline1_low); + HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), sinline1_low); + *optr = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); + } + + // auto value2 = in_value * sin_value + in_value_2 * cos_value; + { + HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), cosline1_low); + HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), sinline1_low); + *(optr + 2) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); + } + + // auto value = in_value * cos_value - in_value_2 * sin_value; + { + HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), cosline1_high); + HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), sinline1_high); + *(optr + 1) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); + } + + // auto value2 = in_value * sin_value + in_value_2 * cos_value; + { + HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), cosline1_high); + HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), sinline1_high); + *(optr + 3) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); + } + + optr += 4; + } + } + + // if (vectors_in_rounddown > 0) { + + // sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + // sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); + // sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); + + // } + + if (leftover_size > 0) + return -1; + + return 0; +} + +int32_t hvx_rope_uint8_ahf( + uint8_t *restrict input, + float *restrict sin, + float *restrict cos, + __fp16 *restrict output, + uint32_t size, + uint32_t partial_dimension, + float scale) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_Vector *iptr2 = (HVX_Vector *)sin; + HVX_Vector *iptr3 = (HVX_Vector *)cos; + HVX_UVector *optr = (HVX_UVector *)output; + + int32_t l2fetch_block; + int32_t leftover = size & 127; + int32_t vectors_in_rounddown = size / 128; + int32_t leftover_size = leftover * sizeof(float); + + HVX_Vector zero_v_sf = Q6_V_vzero(); + uint32_t convert = 0x00800080; + HVX_Vector convert_vector = Q6_V_vsplat_R(convert); + + HVX_Vector scale_vec = Q6_V_vsplat_R(float_to_fp16s(scale)); + + // + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + // + HVX_Vector sinline1_low = *iptr2; + HVX_Vector cosline1_low = *iptr3; + sinline1_low = Q6_Vqf32_vadd_VsfVsf(sinline1_low, Q6_V_vzero()); + cosline1_low = Q6_Vqf32_vadd_VsfVsf(cosline1_low, Q6_V_vzero()); + + HVX_Vector sinline1_high = *(iptr2 + 1); + HVX_Vector cosline1_high = *(iptr3 + 1); + sinline1_high = Q6_Vqf32_vadd_VsfVsf(sinline1_high, Q6_V_vzero()); + cosline1_high = Q6_Vqf32_vadd_VsfVsf(cosline1_high, Q6_V_vzero()); + + for (int32_t j = 0; j < size / partial_dimension; j++) { + HVX_Vector sline1 = *iptr++; + + HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); + + temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); + HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); + HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); + + HVX_VectorPair result1 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), scale_vec); + result1 = Q6_W_vshuff_VVR(Q6_V_hi_W(result1), Q6_V_lo_W(result1), -4); + + HVX_VectorPair result2 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), scale_vec); + result2 = Q6_W_vshuff_VVR(Q6_V_hi_W(result2), Q6_V_lo_W(result2), -4); + + { + HVX_Vector first; + HVX_Vector second; + // auto value = in_value * cos_value - in_value_2 * sin_value; + { + HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), cosline1_low); + HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), sinline1_low); + first = Q6_Vqf32_vsub_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32); + } + + // auto value = in_value * cos_value - in_value_2 * sin_value; + { + HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), cosline1_high); + HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), sinline1_high); + second = Q6_Vqf32_vsub_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32); + } + + HVX_Vector r = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(second, first)); + r = Q6_Vh_vdeal_Vh(r); + *optr = r; + } + + { + HVX_Vector first; + HVX_Vector second; + // auto value2 = in_value * sin_value + in_value_2 * cos_value; + { + HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), cosline1_low); + HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), sinline1_low); + first = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32); + } + + // auto value2 = in_value * sin_value + in_value_2 * cos_value; + { + HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), cosline1_high); + HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), sinline1_high); + second = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32); + } + HVX_Vector r = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(second, first)); + r = Q6_Vh_vdeal_Vh(r); + *(optr + 1) = r; + } + + optr += 2; + } + } + + // if (vectors_in_rounddown > 0) { + + // sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + // sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); + // sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); + + // } + + if (leftover_size > 0) + return -1; + + return 0; +} + +int32_t hvx_rope_ahf( + __fp16 *restrict input, + float *restrict sin, + float *restrict cos, + __fp16 *restrict output, + uint32_t size, + uint32_t partial_dimension) { + if ((input == NULL) || (output == NULL) || (size == 0)) { + return -1; + } + + HVX_Vector *iptr = (HVX_Vector *)input; + HVX_Vector *iptr_half = (HVX_Vector *)(input + partial_dimension / 2); + HVX_Vector *iptr2 = (HVX_Vector *)sin; + HVX_Vector *iptr3 = (HVX_Vector *)cos; + HVX_UVector *optr = (HVX_UVector *)output; + HVX_UVector *optr_half = (HVX_UVector *)(output + partial_dimension / 2); + ; + HVX_Vector sline1; + HVX_Vector sline1_half; + + int32_t l2fetch_block; + int32_t leftover = size & 63; + int32_t vectors_in_rounddown = size / 64; + int32_t leftover_size = leftover * sizeof(float); + + HVX_Vector one_vsf = Q6_V_vsplat_R(ONE); + HVX_Vector m_one_vqf32 = Q6_Vqf32_vsub_VsfVsf(Q6_V_vzero(), one_vsf); + + HVX_Vector one_vhf = Q6_V_vsplat_R(float_to_fp16s(1.0)); + // HVX_Vector m_one_vqf16 = Q6_Vqf32_vsub_VsfVsf(Q6_V_vzero(), one_vhf); + + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { + l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); + + if (l2fetch_block > 0) { + l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + l2fetch(iptr3 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); + } + + for (int32_t d = 0; d < partial_dimension / 2; d += 64) { + HVX_Vector sinline1_low = *iptr2++; + HVX_Vector cosline1_low = *iptr3++; + + HVX_Vector sinline1_high = *iptr2++; + HVX_Vector cosline1_high = *iptr3++; + + HVX_Vector *jiptr = iptr + d / 64; + HVX_Vector *jiptr_half = iptr_half + d / 64; + HVX_Vector *joptr = optr + d / 64; + HVX_Vector *joptr_half = optr_half + d / 64; + + for (int32_t j = 0; j < size / partial_dimension; j++) { + sline1 = *jiptr; + sline1_half = *jiptr_half; + + HVX_VectorPair sline1_half_pair = Q6_Wqf32_vmpy_VhfVhf(sline1_half, one_vhf); + HVX_VectorPair sline1_pair = Q6_Wqf32_vmpy_VhfVhf(sline1, one_vhf); + + sline1_half_pair = Q6_W_vshuff_VVR(Q6_V_hi_W(sline1_half_pair), Q6_V_lo_W(sline1_half_pair), -4); + sline1_pair = Q6_W_vshuff_VVR(Q6_V_hi_W(sline1_pair), Q6_V_lo_W(sline1_pair), -4); + + HVX_Vector m_sline1_half_low = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sline1_half_pair), m_one_vqf32); + HVX_Vector m_sline1_half_hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sline1_half_pair), m_one_vqf32); + + // auto value = in_value * cos_value - in_value_2 * sin_value; + HVX_Vector middle_value_low; + { + HVX_Vector cosline1_vqf32_low = Q6_Vqf32_vadd_VsfVsf(cosline1_low, Q6_V_vzero()); + HVX_Vector cos_middle_value_qf32_low = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sline1_pair), cosline1_vqf32_low); + + HVX_Vector sinline1_vqf32_low = Q6_Vqf32_vadd_VsfVsf(sinline1_low, Q6_V_vzero()); + + HVX_Vector sin_middle_value_qf32_low = Q6_Vqf32_vmpy_Vqf32Vqf32(m_sline1_half_low, sinline1_vqf32_low); + middle_value_low = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32_low, sin_middle_value_qf32_low); + } + + // auto value2 = in_value * sin_value + in_value_2 * cos_value; + + HVX_Vector middle_value_half_low; + { + HVX_Vector cosline1_vqf32_low = Q6_Vqf32_vadd_VsfVsf(cosline1_low, Q6_V_vzero()); + HVX_Vector cos_middle_value_qf32_low = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sline1_half_pair), cosline1_vqf32_low); + + HVX_Vector sinline1_vqf32_low = Q6_Vqf32_vadd_VsfVsf(sinline1_low, Q6_V_vzero()); + HVX_Vector sin_middle_value_qf32_low = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sline1_pair), sinline1_vqf32_low); + + middle_value_half_low = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32_low, sin_middle_value_qf32_low); + } + + // second qf16 vector + HVX_Vector middle_value_high; + { + HVX_Vector cosline1_vqf32_high = Q6_Vqf32_vadd_VsfVsf(cosline1_high, Q6_V_vzero()); + HVX_Vector cos_middle_value_qf32_high = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sline1_pair), cosline1_vqf32_high); + + HVX_Vector sinline1_vqf32_high = Q6_Vqf32_vadd_VsfVsf(sinline1_high, Q6_V_vzero()); + + HVX_Vector sin_middle_value_qf32_high = Q6_Vqf32_vmpy_Vqf32Vqf32(m_sline1_half_hi, sinline1_vqf32_high); + middle_value_high = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32_high, sin_middle_value_qf32_high); + } + + // auto value2 = in_value * sin_value + in_value_2 * cos_value; + + HVX_Vector middle_value_half_high; + { + HVX_Vector cosline1_vqf32_high = Q6_Vqf32_vadd_VsfVsf(cosline1_high, Q6_V_vzero()); + HVX_Vector cos_middle_value_qf32_high = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sline1_half_pair), cosline1_vqf32_high); + + HVX_Vector sinline1_vqf32_high = Q6_Vqf32_vadd_VsfVsf(sinline1_high, Q6_V_vzero()); + HVX_Vector sin_middle_value_qf32_high = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sline1_pair), sinline1_vqf32_high); + + middle_value_half_high = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32_high, sin_middle_value_qf32_high); + } + + HVX_Vector sline = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(middle_value_high, middle_value_low)); + sline = Q6_Vh_vdeal_Vh(sline); + + HVX_Vector sline_half = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(middle_value_half_high, middle_value_half_low)); + sline_half = Q6_Vh_vdeal_Vh(sline_half); + + *joptr = sline; + *joptr_half = sline_half; + + jiptr += partial_dimension / 64; + jiptr_half += partial_dimension / 64; + joptr += partial_dimension / 64; + joptr_half += partial_dimension / 64; + } + } + } + + // if (vectors_in_rounddown > 0) { + + // sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; + // sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); + // sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); + + // } + + if (leftover_size > 0) + return -1; + + return 0; +} + +template +GraphStatus ropeImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &sin, + const TensorType &cos, + const TensorType1 &h_cnt, + const Tensor &pose_type) { + out_0.set_dims(in_0); + + auto pose_type_ = pose_type(0, 0, 0, 0); + auto h_cnt_ = static_cast(h_cnt(0, 0, 0, 0)); + + if (pose_type_ == 4) { + DType dtype = out_0.get_dtype(); + + if (in_0.get_dtype() == DType::Float32 && dtype == DType::Float32) { + auto in_ptr = (float *)in_0.raw_data_const(); + auto sin_ptr = (float *)sin.raw_data_const(); + auto cos_ptr = (float *)cos.raw_data_const(); + auto out_ptr = (float *)out_0.raw_data(); + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + uint32_t half_dimension = d_in / 2; + sin_ptr += half_dimension * h_cnt_; + cos_ptr += half_dimension * h_cnt_; + + int partial_dimension = d_in; + + // NSHD + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + // for (Idx w = 0; w < w_in; w++) { + hvx_rope_af(in_ptr, sin_ptr, cos_ptr, out_ptr, w_in * d_in, partial_dimension); + + in_ptr += w_in * d_in; + out_ptr += w_in * d_in; + // } + + sin_ptr += half_dimension; + cos_ptr += half_dimension; + } + } + } else if (in_0.get_dtype() == DType::Float16 && dtype == DType::Float16) { + auto in_ptr = (__fp16 *)in_0.raw_data_const(); + auto sin_ptr = (float *)sin.raw_data_const(); + auto cos_ptr = (float *)cos.raw_data_const(); + auto out_ptr = (__fp16 *)out_0.raw_data(); + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + uint32_t half_dimension = d_in / 2; + sin_ptr += half_dimension * h_cnt_; + cos_ptr += half_dimension * h_cnt_; + + int partial_dimension = d_in; + + // NSHD + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + // for (Idx w = 0; w < w_in; w++) { + hvx_rope_ahf(in_ptr, sin_ptr, cos_ptr, out_ptr, w_in * d_in, partial_dimension); + + in_ptr += w_in * d_in; + out_ptr += w_in * d_in; + // } + + sin_ptr += half_dimension; + cos_ptr += half_dimension; + } + } + } else if (in_0.get_dtype() == DType::QUInt8 && dtype == DType::Float32) { + auto in_ptr = (uint8_t *)in_0.raw_data_const(); + auto sin_ptr = (float *)sin.raw_data_const(); + auto cos_ptr = (float *)cos.raw_data_const(); + auto out_ptr = (float *)out_0.raw_data(); + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + uint32_t half_dimension = d_in / 2; + sin_ptr += half_dimension * h_cnt_; + cos_ptr += half_dimension * h_cnt_; + + int partial_dimension = d_in; + + // NSHD + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + // for (Idx w = 0; w < w_in; w++) { + hvx_rope_uint8_af(in_ptr, sin_ptr, cos_ptr, out_ptr, w_in * d_in, partial_dimension); + + in_ptr += w_in * d_in; + out_ptr += w_in * d_in; + // } + + sin_ptr += half_dimension; + cos_ptr += half_dimension; + } + } + } else if (in_0.get_dtype() == DType::QUInt8 && dtype == DType::Float16) { + auto in_ptr = (uint8_t *)in_0.raw_data_const(); + auto sin_ptr = (float *)sin.raw_data_const(); + auto cos_ptr = (float *)cos.raw_data_const(); + auto out_ptr = (__fp16 *)out_0.raw_data(); + + float scale_ = in_0.interface_scale(); + + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + + uint32_t half_dimension = d_in / 2; + sin_ptr += half_dimension * h_cnt_; + cos_ptr += half_dimension * h_cnt_; + + int partial_dimension = d_in; + + // NSHD + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + // for (Idx w = 0; w < w_in; w++) { + hvx_rope_uint8_ahf(in_ptr, sin_ptr, cos_ptr, out_ptr, w_in * d_in, partial_dimension, scale_); + + in_ptr += w_in * d_in; + out_ptr += w_in * d_in; + // } + + sin_ptr += half_dimension; + cos_ptr += half_dimension; + } + } + } + + } else { + // only support pose_type == 2 (LLaMA) now + return GraphStatus::ErrorFatal; + } + + return GraphStatus::Success; +} + +#else + +template +GraphStatus ropeImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &sin, + const TensorType &cos, + const TensorType1 &h_cnt, + const Tensor &pose_type) + +{ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + debuglog("RoPE execute... dims=(%zdx%zdx%zdx%zd)", in_0.dim(0), in_0.dim(1), in_0.dim(2), in_0.dim(3)); + debuglog("RoPE execute... dims=(%zdx%zdx%zdx%zd)", sin.dim(0), sin.dim(1), sin.dim(2), sin.dim(3)); + debuglog("RoPE execute... dims=(%zdx%zdx%zdx%zd)", cos.dim(0), cos.dim(1), cos.dim(2), cos.dim(3)); + + // BSHD => NHWC + + // Todo: We need consider to store the sequence position if we have KV Cache + + auto pose_type_ = pose_type(0, 0, 0, 0); + auto h_cnt_ = static_cast(h_cnt(0, 0, 0, 0)); + + out_0.set_dims(in_0); + auto [b_in, h_in, w_in, d_in] = in_0.dims(); + if (pose_type_ == 4) { + DType dtype = out_0.get_dtype(); + + if (dtype == DType::Float32) { + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + int s = h; // BSHD order + int partial_dimension = d_in; + int half = (int)(partial_dimension / 2); + for (Idx d = 0; d < partial_dimension / 2; ++d) { + float in_value = in_0(b, h, w, d); + float in_value_2 = in_0(b, h, w, d + half); + float sin_value = sin(0, 0, s + h_cnt_, d); + float cos_value = cos(0, 0, s + h_cnt_, d); + auto value = in_value * cos_value - in_value_2 * sin_value; + auto value2 = in_value * sin_value + in_value_2 * cos_value; + out_0(b, h, w, d) = value; + out_0(b, h, w, d + half) = value2; + } + } + } + } + } else if (dtype == DType::Float16) { + auto in_ptr = (__fp16 *)in_0.raw_data_const(); + // auto sin_ptr = (__fp16*)sin.raw_data_const(); + // auto cos_ptr = (__fp16*)cos.raw_data_const(); + auto out_ptr = (__fp16 *)out_0.raw_data(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + int s = h; // BSHD order + int partial_dimension = d_in; + int half = (int)(partial_dimension / 2); + for (Idx d = 0; d < partial_dimension / 2; ++d) { + __fp16 in_value = *in_ptr; + __fp16 in_value_2 = *(in_ptr + half); + float sin_value = sin(0, 0, s + h_cnt_, d); + float cos_value = cos(0, 0, s + h_cnt_, d); + auto value = in_value * cos_value - in_value_2 * sin_value; + auto value2 = in_value * sin_value + in_value_2 * cos_value; + *out_ptr = static_cast<__fp16>(value); + *(out_ptr + half) = static_cast<__fp16>(value2); + + out_ptr++; + in_ptr++; + } + + out_ptr += half; + in_ptr += half; + } + } + } + } + } + + // for (Idx b = 0; b < b_in; b++) { + // for (Idx h = 0; h < h_in; h++) { + // for (Idx w = 0; w < w_in; w++) { + // // RoPE + // for (Idx d = 0; d < d_in; d++) { + + // int s = h; // BSHD order + // if (pose_type_ == 1) { + // float in_value = in_0(b, h, w, d); + // float in_value_2; + // if (d < d_in / 2) { // 偶數 0,2,4 + // in_value_2 = -in_0(b, h, w, d + d_in / 2); + // } else { + // in_value_2 = in_0(b, h, w, d - d_in / 2); + // } + // float sin_value = sin(0, 0, s +h_cnt_, d); + // float cos_value = cos(0, 0, s +h_cnt_, d); + // auto value = in_value * cos_value + in_value_2 * sin_value; + // out_0(b, h, w, d) = value; + // } + // else if (pose_type_ == 2) { + // float in_value = in_0(b, h, w, d); + // debuglog("rope execute... in_value=(%f)", in_value); + // float in_value_2; + // if (d % 2 == 0) { // 偶數 0,2,4 + // in_value_2 = -in_0(b, h, w, d + 1); + // } else { + // in_value_2 = in_0(b, h, w, d - 1); + // } + // debuglog("rope execute... in_value_2=(%f)", in_value_2); + // float sin_value = sin(0, 0, s +h_cnt_, d); + // float cos_value = cos(0, 0, s +h_cnt_, d); + // auto value = in_value * cos_value + in_value_2 * sin_value; + + // debuglog("rope execute... sin_value=(%f)", sin_value); + // debuglog("rope execute... cos_value=(%f)", cos_value); + + // debuglog("rope execute... value=(%f)", value); + // out_0(b, h, w, d) = value; + // } else if (pose_type_ == 4) { + // } else { + // float in_value = in_0(b, h, w, d); + // float in_value_2; + // float sin_value = sin(0, 0, s +h_cnt_, d); + // float cos_value = cos(0, 0, s +h_cnt_, d); + // if (d < d_in / 4) { + // in_value_2 = -in_0(b, h, w, d + d_in / 4); + // auto value = in_value * cos_value + in_value_2 * sin_value; + + // out_0(b ,h , w, d) = value; + // } else if(d < d_in / 2){ + // in_value_2 = in_0(b, h, w, d - d_in / 4); + // auto value = in_value * cos_value + in_value_2 * sin_value; + + // out_0(b ,h , w, d) = value; + // }else { + + // out_0(b ,h , w, d) = in_value; + // } + // } + + // } + // } + // } + // } + + // auto &input = inputs[0]; + // auto &output = outputs[0]; + // for (int n = 0; n < input->batch(); ++n) { + // for (int h = 0; h < input->head(); ++h) { + // for (int s = 0; s < input->sequence(); ++s) {//sequance + // #pragma omp parallel for num_threads(4) + // for (int d = 0; d < input->dimension(); ++d) { + // if (pose_type_== 1) { + // float in_value = input->dataAt(n, h, s, d); + // float in_value_2; + // if (d < input->dimension() / 2) { // 偶數 0,2,4 + // in_value_2 = -input->dataAt(n, h, s, d + input->dimension() / 2); + // } else { + // in_value_2 = input->dataAt(n, h, s, d - input->dimension() / 2); + // } + // float sin_value = sin_.dataAt(0, 0, s +h_cnt_, d); + // float cos_value = cos_.dataAt(0, 0, s +h_cnt_, d); + // auto value = in_value * cos_value + in_value_2 * sin_value; + // if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F32) { + // output->setDataAt(n, h, s, d, value); + // } + // else if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F16) { + // output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); + // } + // } + // else if (pose_type_== 2) { + // float in_value = input->dataAt(n, h, s, d); + // float in_value_2; + // if (d % 2 == 0) { // 偶數 0,2,4 + // in_value_2 = -input->dataAt(n, h, s, d + 1); + // } else { + // in_value_2 = input->dataAt(n, h, s, d - 1); + // } + // float sin_value = sin_.dataAt(0, 0, s +h_cnt_, d); + // float cos_value = cos_.dataAt(0, 0, s +h_cnt_, d); + // auto value = in_value * cos_value + in_value_2 * sin_value; + // if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F32) { + // output->setDataAt(n, h, s, d, value); + // } + // else if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F16) { + // output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); + // } + // }else{ + // float in_value = input->dataAt(n, h, s, d); + // float in_value_2; + // float sin_value = sin_.dataAt(0, 0, s +h_cnt_, d); + // float cos_value = cos_.dataAt(0, 0, s +h_cnt_, d); + // if (d < input->dimension() / 4) { + // in_value_2 = - input->dataAt(n, h, s, d + input->dimension() / 4); + // auto value = in_value * cos_value + in_value_2 * sin_value; + // if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F32) { + // output->setDataAt(n, h, s, d, value); + // } + // else if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F16) { + // output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); + // } + // } else if(d < input->dimension() / 2){ + // in_value_2 = input->dataAt(n, h, s, d - input->dimension() / 4); + // auto value = in_value * cos_value + in_value_2 * sin_value; + // if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F32) { + // output->setDataAt(n, h, s, d, value); + // } + // else if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F16) { + // output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); + // } + // }else { + // if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F32) { + // output->setDataAt(n, h, s, d, in_value); + // } + // else if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F16) { + // output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(in_value)); + // } + // } + // } + // } + // } + // } + // } + + // Todo store history position + // h_cnt_ += input->sequence(); + // if(h_cnt_ >pos_max_){ + // h_cnt_ = 0; + // } + + return GraphStatus::Success; +} + +#endif + +__attribute__((unused)) static float ropeCostFunc(const Op *op) { + /* + * add code here + * */ + + float cost = 0.0; // add cost computation here + return cost; +} + +/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), + where is as BEGIN_PKG_OP_DEFINITION +*/ +END_PKG_OP_DEFINITION(PKG_RoPE); \ No newline at end of file diff --git a/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RoPESimple.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RoPESimple.cpp new file mode 100644 index 000000000..61caf9f2d --- /dev/null +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RoPESimple.cpp @@ -0,0 +1,210 @@ +//============================================================================== +// Auto Generated Code for LLaMAPackage +//============================================================================== + +#include "HTP/core/constraints.h" +#include "HTP/core/op_package_feature_support.h" +#include "HTP/core/op_register_ext.h" +#include "HTP/core/optimize.h" +#include "QnnOpPackage.h" +#include "HTP/core/simple_reg.h" + +BEGIN_PKG_OP_DEFINITION(PKG_RoPESimple); + +// op execute function declarations +template +GraphStatus ropeSimpleImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &sin, + const TensorType &cos); + +// forward declaration of sample cost function +static float ropeSimpleCostFunc(const Op *op); + +/* + * method 1 for defining op, using default cost value (i.e. GLACIAL) and default flag (Flags::RESOURCE_HVX) + * syntax: DEF_PACKAGE_OP(F,OP) + * e.g. DEF_PACKAGE_OP((ropeImpl), "RoPE") + */ +DEF_PACKAGE_OP((ropeSimpleImpl), "RoPESimple") + +/* + * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) + * and provided flags + * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) + * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, + * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) + * e.g. DEF_PACKAGE_OP_AND_COST_AND_FLAGS((ropeImpl), "RoPE", SNAIL) + */ + +/* + * method 3 for defining op with cost function pointer and provided flags + * cost function pointer type: typedef float (*cost_function) (const Op * op); + * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) + * e.g. DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((ropeImpl), + * "RoPE", ropeCostFunc, Flags::RESOURCE_HVX) + */ + +/* + * optimization definitions + * need to be global in the package + * one definition per optimization + * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) + * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) + * HTP core provides some replacement functions for op package to use + * for more information about optimization rules, please refer to HTP core documentations + */ + +/* + * op parameter order definitions + * need to be global in the package + * one definition per op, and this is optional + * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) + * one or more parameters can be specified for each op + * order of parameters listed determines the order of parameters passed into op execution functions + * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode + * will be passed into op execution functions + * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted + * name will be abandoned + * if two or more op packages with the same package name will be registered, they cannot list + * conflicting parameter orders + * PARAM refers to parameter name as a string literal + * MANDATORY refers to whether this parameter is required to be provided at Qnn_addNode + * DEFAULT is used when MANDATORY is false + * if provided as Qnn_Param_t*, + * DEFAULT will be used for graph construction when this parameter is not provided at + * Qnn_addNode + * if provided as nullptr, + * graph construction will skip this parameter when this parameter is not provided at + * Qnn_addNode + */ + +/* execute functions for ops */ + + +// #ifndef REFERENCE_OP + +// #include "qhmath_hvx.h" +// #include "hvx_internal.h" +// #include +// #include + +// #define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +// #define L2FETCH_AHEAD (BLOCK_SIZE) +// #define ONE 0x3F800000 +// #define M_ONE 0xAF800000 + +// // TODO: hvx ropesimple implementation + +// template +// GraphStatus ropeSimpleImpl(TensorType &out_0, +// const TensorType &in_0, +// const TensorType &sin, +// const TensorType &cos) { +// out_0.set_dims(in_0); + +// return GraphStatus::Success; +// } + +// #else + +template +GraphStatus ropeSimpleImpl(TensorType &out_0, + const TensorType &in_0, + const TensorType &sin, + const TensorType &cos) + +{ + + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + debuglog("RoPESimple execute... dims=(%zdx%zdx%zdx%zd)", in_0.dim(0), in_0.dim(1), in_0.dim(2), in_0.dim(3)); + debuglog("RoPESimple execute... dims=(%zdx%zdx%zdx%zd)", sin.dim(0), sin.dim(1), sin.dim(2), sin.dim(3)); + debuglog("RoPESimple execute... dims=(%zdx%zdx%zdx%zd)", cos.dim(0), cos.dim(1), cos.dim(2), cos.dim(3)); + + // BSHD => NHWC + + out_0.set_dims(in_0); + auto [b_in, w_in, h_in, d_in] = in_0.dims(); + DType dtype = out_0.get_dtype(); + + if (dtype == DType::Float32) { + for (Idx b = 0; b < b_in; b++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx h = 0; h < h_in; h++) { + int partial_dimension = d_in; + int half = (int)(partial_dimension / 2); + for (Idx d = 0; d < partial_dimension / 2; ++d) { + float in_value = in_0(b, w, h, d); + float in_value_2 = in_0(b, w, h, d + half); + float sin_value = sin(0, 0, w, d); + float cos_value = cos(0, 0, w, d); + auto value = in_value * cos_value - in_value_2 * sin_value; + auto value2 = in_value * sin_value + in_value_2 * cos_value; + out_0(b, w, h, d) = value; + out_0(b, w, h, d + half) = value2; + } + } + } + + } + } else if (dtype == DType::Float16) { + auto in_ptr = (__fp16 *)in_0.raw_data_const(); + // auto sin_ptr = (__fp16*)sin.raw_data_const(); + // auto cos_ptr = (__fp16*)cos.raw_data_const(); + auto out_ptr = (__fp16 *)out_0.raw_data(); + + for (Idx b = 0; b < b_in; b++) { + for (Idx w = 0; w < w_in; w++) { + for (Idx h = 0; h < h_in; h++) { + int partial_dimension = d_in; + int half = (int)(partial_dimension / 2); + for (Idx d = 0; d < partial_dimension / 2; ++d) { + __fp16 in_value = *in_ptr; + __fp16 in_value_2 = *(in_ptr + half); + float sin_value = sin(0, 0, w, d); + float cos_value = cos(0, 0, w, d); + auto value = in_value * cos_value - in_value_2 * sin_value; + auto value2 = in_value * sin_value + in_value_2 * cos_value; + *out_ptr = static_cast<__fp16>(value); + *(out_ptr + half) = static_cast<__fp16>(value2); + + out_ptr++; + in_ptr++; + } + + out_ptr += half; + in_ptr += half; + } + } + } + } + return GraphStatus::Success; +} + +// #endif + +__attribute__((unused)) static float ropeCostFunc(const Op *op) { + /* + * add code here + * */ + + float cost = 0.0; // add cost computation here + return cost; +} + +/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), + where is as BEGIN_PKG_OP_DEFINITION +*/ +END_PKG_OP_DEFINITION(PKG_RoPESimple); \ No newline at end of file diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SiLU.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SiLU.cpp similarity index 79% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SiLU.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SiLU.cpp index 28271772f..8b56e7e80 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SiLU.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SiLU.cpp @@ -9,14 +9,12 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_SiLU); - // op execute function declarations -template -GraphStatus siluImpl(TensorType& out_0, - const TensorType& in_0); +template +GraphStatus siluImpl(TensorType &out_0, + const TensorType &in_0); // forward declaration of sample cost function static float siluCostFunc(const Op *op); @@ -61,11 +59,11 @@ DEF_PACKAGE_OP((siluImpl), "SiLU") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -79,7 +77,6 @@ DEF_PACKAGE_OP((siluImpl), "SiLU") * Qnn_addNode */ - /* execute functions for ops */ #ifndef REFERENCE_OP @@ -88,11 +85,10 @@ DEF_PACKAGE_OP((siluImpl), "SiLU") #include #include -#define BLOCK_SIZE (8*1024/VLEN) /* vector chunks */ -#define L2FETCH_AHEAD (BLOCK_SIZE) +#define BLOCK_SIZE (8 * 1024 / VLEN) /* vector chunks */ +#define L2FETCH_AHEAD (BLOCK_SIZE) -static inline int32_t float_to_fp16s(float input) -{ +static inline int32_t float_to_fp16s(float input) { union { int32_t i; __fp16 f[2]; @@ -100,48 +96,189 @@ static inline int32_t float_to_fp16s(float input) return fp32.i; } -static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) -{ - union { float f; uint32_t i; } fp32 = { .f = x }; +static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) { + union { + float f; + uint32_t i; + } fp32 = {.f = x}; return fp32.i; } - /* Polynomial coefficients */ static const float c0_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1329913082916337,0.22308514882873062,0.347752862580421,0.4845759228057826,0.5724725619240282,0.5532613332075828,0.5041402176920755,0.4999998945071365, -0.500005251569411,0.494975832882496,0.44426898861108216,0.42865769845972046,0.5186084804556764,0.6556781472810073,0.7780379623543565,0.8670752648575938, + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.1329913082916337, + 0.22308514882873062, + 0.347752862580421, + 0.4845759228057826, + 0.5724725619240282, + 0.5532613332075828, + 0.5041402176920755, + 0.4999998945071365, + 0.500005251569411, + 0.494975832882496, + 0.44426898861108216, + 0.42865769845972046, + 0.5186084804556764, + 0.6556781472810073, + 0.7780379623543565, + 0.8670752648575938, }; static const float c1_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0595948414501292,0.11153317908159224,0.19545701719511055,0.3058925677063833,0.3932668307015573,0.3630691859433203,0.26302954631996744,0.2499155333713503, -0.24983690256810576,0.26551386754654915,0.3670764533308477,0.39196882072648825,0.3030372911476408,0.19296191313371913,0.11084562978488391,0.059559556604464964, + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0595948414501292, + 0.11153317908159224, + 0.19545701719511055, + 0.3058925677063833, + 0.3932668307015573, + 0.3630691859433203, + 0.26302954631996744, + 0.2499155333713503, + 0.24983690256810576, + 0.26551386754654915, + 0.3670764533308477, + 0.39196882072648825, + 0.3030372911476408, + 0.19296191313371913, + 0.11084562978488391, + 0.059559556604464964, }; static const float c2_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.010207999856103376,0.02144807112969563,0.04266485934992188,0.07616157468726052,0.10882760873715347,0.09125379784995667,0.013872106909816257,-0.0008786208359828815, -0.0011993845621092196,-0.01645080326288375,-0.09367947263571219,-0.10827006684348266,-0.07520301291634655,-0.04198514892887826,-0.021290356584896874,-0.010200991240527542, + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.010207999856103376, + 0.02144807112969563, + 0.04266485934992188, + 0.07616157468726052, + 0.10882760873715347, + 0.09125379784995667, + 0.013872106909816257, + -0.0008786208359828815, + 0.0011993845621092196, + -0.01645080326288375, + -0.09367947263571219, + -0.10827006684348266, + -0.07520301291634655, + -0.04198514892887826, + -0.021290356584896874, + -0.010200991240527542, }; static const float c3_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0007896351019423816,0.0018718593077865326,0.004259190313167949,0.008784166436796144,0.014228201960903939,0.009727536748893095,-0.01721317464724529,-0.023762851116001377, --0.02424226654277249,-0.01604104065157868,0.010376786273973133,0.014122038833203628,0.008641365746408176,0.004176981844803722,0.0018557930308154783,0.0007890167735032168, + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0007896351019423816, + 0.0018718593077865326, + 0.004259190313167949, + 0.008784166436796144, + 0.014228201960903939, + 0.009727536748893095, + -0.01721317464724529, + -0.023762851116001377, + -0.02424226654277249, + -0.01604104065157868, + 0.010376786273973133, + 0.014122038833203628, + 0.008641365746408176, + 0.004176981844803722, + 0.0018557930308154783, + 0.0007890167735032168, }; static const float c4_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.3213858349988003e-05,6.232838199801025e-05,0.0001632037964535633,0.0003928983460811959,0.0007341577078787206,0.0003053082875419616,-0.003254838747910248,-0.004021655986643196, -0.004258314078650583,0.0030578644020607566,-0.00037014803880675387,-0.0007265964578827031,-0.0003849331969038772,-0.00015947916435728337,-6.171511304866758e-05,-2.319341439172678e-05, + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 2.3213858349988003e-05, + 6.232838199801025e-05, + 0.0001632037964535633, + 0.0003928983460811959, + 0.0007341577078787206, + 0.0003053082875419616, + -0.003254838747910248, + -0.004021655986643196, + 0.004258314078650583, + 0.0030578644020607566, + -0.00037014803880675387, + -0.0007265964578827031, + -0.0003849331969038772, + -0.00015947916435728337, + -6.171511304866758e-05, + -2.319341439172678e-05, }; /** @@ -151,8 +288,7 @@ static const float c4_coeffs[32] __attribute__((aligned(VLEN))) = * @param[in] length Number of elements in input/output arrays. * @return Returns 0 on successful execution. Otherwise -1. */ -int32_t hvx_silu_af(float *restrict input, float *restrict output, uint32_t size) -{ +int32_t hvx_silu_af(float *restrict input, float *restrict output, uint32_t size) { HVX_Vector *input_v_ptr; HVX_UVector *output_v_ptr; HVX_Vector input_min_v_f; @@ -191,13 +327,12 @@ int32_t hvx_silu_af(float *restrict input, float *restrict output, uint32_t size HVX_Vector f8, f_8; /* Check input arguments. Return error status if some argument has invalid value */ - if ((input == 0) || (output == 0) || (size == 0)) - { + if ((input == 0) || (output == 0) || (size == 0)) { return -1; } - input_v_ptr = (HVX_Vector *) input; - output_v_ptr = (HVX_UVector *) output; + input_v_ptr = (HVX_Vector *)input; + output_v_ptr = (HVX_UVector *)output; f8 = Q6_V_vsplat_R(float_to_bits(8.0f)); f_8 = Q6_V_vsplat_R(float_to_bits(-8.0f)); @@ -267,23 +402,20 @@ int32_t hvx_silu_af(float *restrict input, float *restrict output, uint32_t size * Handle number of whole vectors in input data. * Don't process last vector in order to avoid out-of-boundary load. */ - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(input_v_ptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } /* Process one vector at a time */ - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { slinec = *input_v_ptr++; /* Compose vector of input data from slinec and slinep */ - sline = Q6_V_valign_VVR(slinec, slinep, (size_t) input); + sline = Q6_V_valign_VVR(slinec, slinep, (size_t)input); /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ input_shifted_v_qf32 = Q6_Vqf32_vsub_VsfVsf(sline, input_min_v_f); @@ -340,7 +472,7 @@ int32_t hvx_silu_af(float *restrict input, float *restrict output, uint32_t size // x * sigmod output_v = Q6_Vqf32_vmpy_Vqf32Vqf32(input_v_qf32, output_v); - + HVX_Vector out_v = Q6_Vsf_equals_Vqf32(output_v); HVX_VectorPred islf8 = Q6_Q_vcmp_gt_VsfVsf(sline, f8); @@ -349,7 +481,6 @@ int32_t hvx_silu_af(float *restrict input, float *restrict output, uint32_t size HVX_VectorPred islf_8 = Q6_Q_vcmp_gt_VsfVsf(f_8, sline); out_v = Q6_V_vmux_QVV(islf_8, zero_v_sf, out_v); - /* Store results to the output buffer and convert from qf32 to sf */ *((HVX_UVector *)(output_v_ptr++)) = out_v; @@ -359,10 +490,9 @@ int32_t hvx_silu_af(float *restrict input, float *restrict output, uint32_t size } /* Handle last whole vector from input data */ - if (vectors_in_rounddown > 0) - { + if (vectors_in_rounddown > 0) { slinec = is_aligned(input_v_ptr, VLEN) && leftover == 0 ? slinep : *input_v_ptr++; - sline = Q6_V_valign_VVR(slinec, slinep, (size_t) input); + sline = Q6_V_valign_VVR(slinec, slinep, (size_t)input); /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ input_shifted_v_qf32 = Q6_Vqf32_vsub_VsfVsf(sline, input_min_v_f); @@ -433,13 +563,10 @@ int32_t hvx_silu_af(float *restrict input, float *restrict output, uint32_t size } /* Handle leftover elements */ - if (leftover > 0) - { - slinec = (is_in_one_chunk(input_v_ptr, leftover_size, VLEN) - ? slinep - : *input_v_ptr++); + if (leftover > 0) { + slinec = (is_in_one_chunk(input_v_ptr, leftover_size, VLEN) ? slinep : *input_v_ptr++); - sline = Q6_V_valign_VVR(slinec, slinep, (size_t) input); + sline = Q6_V_valign_VVR(slinec, slinep, (size_t)input); /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ input_shifted_v_qf32 = Q6_Vqf32_vsub_VsfVsf(sline, input_min_v_f); @@ -510,41 +637,180 @@ int32_t hvx_silu_af(float *restrict input, float *restrict output, uint32_t size return 0; } - static const float fp16_c0_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.13239719960243818,0.2216255210749415,0.3447664743728659,0.48137452032585476,0.5716299228719798,0.5547323231605259,0.5046287748870234,0.4999985574626892, -0.5000036514755082,0.49475652448004626,0.4441393352532763,0.428500379952032,0.5173297285470642,0.6541461039833616,0.7783931007462818,0.8678015179911097, + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.13239719960243818, + 0.2216255210749415, + 0.3447664743728659, + 0.48137452032585476, + 0.5716299228719798, + 0.5547323231605259, + 0.5046287748870234, + 0.4999985574626892, + 0.5000036514755082, + 0.49475652448004626, + 0.4441393352532763, + 0.428500379952032, + 0.5173297285470642, + 0.6541461039833616, + 0.7783931007462818, + 0.8678015179911097, }; static const float fp16_c1_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.05928005756790343,0.11063222460270064,0.1932879057003057,0.30302440212086995,0.3922924462181049,0.36546332659415875,0.2644148210990377,0.24989020912329707, -0.2498532691910313,0.2661055781198988,0.36728015359480604,0.39215270010450015,0.3041825601732039,0.1940762094668647,0.11061794856987572,0.059174800917353595, + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.05928005756790343, + 0.11063222460270064, + 0.1932879057003057, + 0.30302440212086995, + 0.3922924462181049, + 0.36546332659415875, + 0.2644148210990377, + 0.24989020912329707, + 0.2498532691910313, + 0.2661055781198988, + 0.36728015359480604, + 0.39215270010450015, + 0.3041825601732039, + 0.1940762094668647, + 0.11061794856987572, + 0.059174800917353595, }; static const float fp16_c2_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.010145494303219278,0.02123968384425681,0.04207468332514667,0.07519946712591977,0.10840620196267145,0.09270738184406795,0.015322371881818012,-0.0009948273994921822, -0.0011544907060402412,-0.017040517565094934,-0.09379878876657094,-0.10835043868732394,-0.07558705272699548,-0.04228875316413285,-0.021235740718738055,-0.010124599879590107, + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.010145494303219278, + 0.02123968384425681, + 0.04207468332514667, + 0.07519946712591977, + 0.10840620196267145, + 0.09270738184406795, + 0.015322371881818012, + -0.0009948273994921822, + 0.0011544907060402412, + -0.017040517565094934, + -0.09379878876657094, + -0.10835043868732394, + -0.07558705272699548, + -0.04228875316413285, + -0.021235740718738055, + -0.010124599879590107, }; static const float fp16_c3_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0007841223015974933,0.001850453397354219,0.004187899308371771,0.008640952434084206,0.01414741414964877,0.010117749275618,-0.01654848996354919,-0.02395108399453624, --0.024199111971064446,-0.015783556879607072,0.010407672131558174,0.014137608186323335,0.008698510795258909,0.004213708431213342,0.0018499827774393985,0.0007822799742289481, + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0007841223015974933, + 0.001850453397354219, + 0.004187899308371771, + 0.008640952434084206, + 0.01414741414964877, + 0.010117749275618, + -0.01654848996354919, + -0.02395108399453624, + -0.024199111971064446, + -0.015783556879607072, + 0.010407672131558174, + 0.014137608186323335, + 0.008698510795258909, + 0.004213708431213342, + 0.0018499827774393985, + 0.0007822799742289481, }; static const float fp16_c4_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.3031641204975905e-05,6.150442488966733e-05,0.00015997783736818624,0.00038491646239693526,0.0007283649599237781,0.00034439150914392054,-0.003142246198646662,-0.004120389580321761, -0.004246050162553198,0.0030162727520777893,-0.00037312974308425725,-0.0007277242855014247,-0.00038811687679772674,-0.0001611434776868886,-6.14837984586862e-05,-2.297076123375133e-05, + { + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 2.3031641204975905e-05, + 6.150442488966733e-05, + 0.00015997783736818624, + 0.00038491646239693526, + 0.0007283649599237781, + 0.00034439150914392054, + -0.003142246198646662, + -0.004120389580321761, + 0.004246050162553198, + 0.0030162727520777893, + -0.00037312974308425725, + -0.0007277242855014247, + -0.00038811687679772674, + -0.0001611434776868886, + -6.14837984586862e-05, + -2.297076123375133e-05, }; /** @@ -554,8 +820,7 @@ static const float fp16_c4_coeffs[32] __attribute__((aligned(VLEN))) = * @param[in] length Number of elements in input/output arrays. * @return Returns 0 on successful execution. Otherwise -1. */ -int32_t hvx_silu_ahf(__fp16 *restrict input, __fp16 *restrict output, uint32_t size) -{ +int32_t hvx_silu_ahf(__fp16 *restrict input, __fp16 *restrict output, uint32_t size) { HVX_Vector *input_v_ptr; HVX_UVector *output_v_ptr; HVX_Vector input_min_v_hf; @@ -594,13 +859,12 @@ int32_t hvx_silu_ahf(__fp16 *restrict input, __fp16 *restrict output, uint32_t s HVX_Vector c4_coeff_v; /* Check input arguments. Return error status if some argument has invalid value */ - if ((input == 0) || (output == 0) || (size == 0)) - { + if ((input == 0) || (output == 0) || (size == 0)) { return -1; } - input_v_ptr = (HVX_Vector *) input; - output_v_ptr = (HVX_UVector *) output; + input_v_ptr = (HVX_Vector *)input; + output_v_ptr = (HVX_UVector *)output; /* * If input data is not aligned to HVX vector size, compose aligned vectors @@ -671,19 +935,16 @@ int32_t hvx_silu_ahf(__fp16 *restrict input, __fp16 *restrict output, uint32_t s * Handle number of whole vectors in input data. * Don't process last vector in order to avoid out-of-boundary load. */ - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { + for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) { block = Q6_R_min_RR(i, BLOCK_SIZE); l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - if (l2fetch_block > 0) - { + if (l2fetch_block > 0) { l2fetch(input_v_ptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); } /* Process one vector at a time */ - for (int32_t j = 0; j < block; ++j) - { + for (int32_t j = 0; j < block; ++j) { slinec = *input_v_ptr++; /* Compose vector of input data from slinec and slinep */ @@ -776,17 +1037,15 @@ int32_t hvx_silu_ahf(__fp16 *restrict input, __fp16 *restrict output, uint32_t s // output_v = Q6_Vqf16_vmpy_Vqf16Vqf16(output_v, input_v_qf16); // *output_v_ptr++ = Q6_Vhf_equals_Vqf16(output_v); - /* Prepare slinep for next iteration */ slinep = slinec; } } /* Handle last whole vector from input data */ - if (vectors_in_rounddown > 0) - { + if (vectors_in_rounddown > 0) { slinec = is_aligned(input_v_ptr, VLEN) && leftover == 0 ? slinep : *input_v_ptr++; - sline = Q6_V_valign_VVR(slinec, slinep, (size_t) input); + sline = Q6_V_valign_VVR(slinec, slinep, (size_t)input); tmp_v = Q6_Vh_vdeal_Vh(sline); /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ input_shifted_v_hf = Q6_Vqf16_vsub_VhfVhf(tmp_v, input_min_v_hf); @@ -862,11 +1121,8 @@ int32_t hvx_silu_ahf(__fp16 *restrict input, __fp16 *restrict output, uint32_t s } /* Handle leftover elements */ - if (leftover > 0) - { - slinec = (is_in_one_chunk(input_v_ptr, leftover_size, VLEN) - ? slinep - : *input_v_ptr++); + if (leftover > 0) { + slinec = (is_in_one_chunk(input_v_ptr, leftover_size, VLEN) ? slinep : *input_v_ptr++); sline = Q6_V_valign_VVR(slinec, slinep, (size_t)input); tmp_v = Q6_Vh_vdeal_Vh(sline); @@ -949,124 +1205,110 @@ int32_t hvx_silu_ahf(__fp16 *restrict input, __fp16 *restrict output, uint32_t s #endif -template -GraphStatus siluImpl(TensorType& out_0, - const TensorType& in_0) +template +GraphStatus siluImpl(TensorType &out_0, + const TensorType &in_0) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ #ifdef REFERENCE_OP - debuglog("silu execute... inval=(%d)", in_0.get_dtype()); - debuglog("silu execute... inval=(%d)", out_0.get_dtype()); - + debuglog("silu execute... inval=(%d)", in_0.get_dtype()); + debuglog("silu execute... inval=(%d)", out_0.get_dtype()); + out_0.set_dims(in_0); // NHWC auto [b_in, h_in, w_in, d_in] = in_0.dims(); for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // SiLU - for (Idx d = 0; d < d_in; d++) { - float inval = in_0(b, h, w, d); - float outval = 1 / (1 + expf(-inval)); - - - debuglog("silu execute... inval=(%f)", inval); - debuglog("silu execute... outval=(%f)", outval); - - out_0(b, h, w, d) = inval * outval; - - } + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // SiLU + for (Idx d = 0; d < d_in; d++) { + float inval = in_0(b, h, w, d); + float outval = 1 / (1 + expf(-inval)); + + debuglog("silu execute... inval=(%f)", inval); + debuglog("silu execute... outval=(%f)", outval); + + out_0(b, h, w, d) = inval * outval; + } + } } - } } #else // HVX Method -- FP32 Version out_0.set_dims(in_0); - + DType dtype = in_0.get_dtype(); auto [b_in, h_in, w_in, d_in] = in_0.dims(); + size_t size = b_in * h_in * w_in * d_in; - size_t size = b_in*h_in*w_in*d_in; - // Noticable size >= 128 - + // SiLU inval / (1 + expf(-inval)); // sigmod 1.0/(exp(-x)+1.0) // SiLU inval * sigmod if (dtype == DType::Float16) { - - // NHWC - auto in_ptr = (__fp16*)in_0.raw_data_const(); - auto out_ptr = (__fp16*)out_0.raw_data(); - hvx_silu_ahf(in_ptr, out_ptr, size); + // NHWC + auto in_ptr = (__fp16 *)in_0.raw_data_const(); + auto out_ptr = (__fp16 *)out_0.raw_data(); + hvx_silu_ahf(in_ptr, out_ptr, size); } else { - // NHWC - auto in_ptr = (float*)in_0.raw_data_const(); - auto out_ptr = (float*)out_0.raw_data(); - hvx_silu_af(in_ptr, out_ptr, size); + // NHWC + auto in_ptr = (float *)in_0.raw_data_const(); + auto out_ptr = (float *)out_0.raw_data(); + hvx_silu_af(in_ptr, out_ptr, size); } return GraphStatus::Success; - - #endif #ifdef DEBUG for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // SiLU - for (Idx d = 0; d < d_in; d++) { - float out_value = out_0(b, h, w, d); - debuglog("silu execute... outval=(%f)", out_value); - - } + for (Idx h = 0; h < h_in; h++) { + for (Idx w = 0; w < w_in; w++) { + // SiLU + for (Idx d = 0; d < d_in; d++) { + float out_value = out_0(b, h, w, d); + debuglog("silu execute... outval=(%f)", out_value); + } + } } - } } - -#endif - +#endif - return GraphStatus::Success; + return GraphStatus::Success; } -__attribute__((unused)) static float siluCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float siluCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SplitInput.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SplitInput.cpp similarity index 61% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SplitInput.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SplitInput.cpp index f055afc51..b33decb01 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SplitInput.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/SplitInput.cpp @@ -9,17 +9,15 @@ #include "QnnOpPackage.h" #include "HTP/core/simple_reg.h" - BEGIN_PKG_OP_DEFINITION(PKG_SplitInput); - // op execute function declarations -template -GraphStatus splitinputImpl(TensorType& out_0, - TensorType& out_1, - const TensorType& in_0, +template +GraphStatus splitinputImpl(TensorType &out_0, + TensorType &out_1, + const TensorType &in_0, const TensorType1 &in_1, - const Tensor& num); + const Tensor &num); // forward declaration of sample cost function static float splitinputCostFunc(const Op *op); @@ -64,11 +62,11 @@ DEF_PACKAGE_OP((splitinputImpl), "SplitInput") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -82,84 +80,74 @@ DEF_PACKAGE_OP((splitinputImpl), "SplitInput") * Qnn_addNode */ - /* execute functions for ops */ -template -GraphStatus splitinputImpl(TensorType& out_0, - TensorType& out_1, - const TensorType& in_0, +template +GraphStatus splitinputImpl(TensorType &out_0, + TensorType &out_1, + const TensorType &in_0, const TensorType1 &in_1, - const Tensor& num) -{ - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - - // default is two. - - size_t o_size = in_1(0,0,0,0); - size_t x_size = in_1(0,0,0,1); + const Tensor &num) { + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - const size_t dims_0[] = {b_in, o_size, w_in, d_in}; - const size_t dims_1[] = {b_in, x_size, w_in, d_in}; + // default is two. - out_0.set_dims(dims_0); - out_1.set_dims(dims_1); + size_t o_size = in_1(0, 0, 0, 0); + size_t x_size = in_1(0, 0, 0, 1); - DType dtype = in_0.get_dtype(); - uint32_t bitwidth = 4; + auto [b_in, h_in, w_in, d_in] = in_0.dims(); - if (dtype == DType::QUInt8 || dtype == DType::QInt8) { + const size_t dims_0[] = {b_in, o_size, w_in, d_in}; + const size_t dims_1[] = {b_in, x_size, w_in, d_in}; - bitwidth = 1; + out_0.set_dims(dims_0); + out_1.set_dims(dims_1); - } else if (dtype == DType::Float16) { + DType dtype = in_0.get_dtype(); + uint32_t bitwidth = 4; - bitwidth = 2; - } else if (dtype == DType::Float32) { + if (dtype == DType::QUInt8 || dtype == DType::QInt8) { + bitwidth = 1; - bitwidth = 4; - } + } else if (dtype == DType::Float16) { + bitwidth = 2; + } else if (dtype == DType::Float32) { + bitwidth = 4; + } - const uint8_t *in_ptr = (uint8_t*)in_0.raw_data_const(); + const uint8_t *in_ptr = (uint8_t *)in_0.raw_data_const(); - uint8_t *out_ptr_0 = (uint8_t*)out_0.raw_data(); - uint8_t *out_ptr_1 = (uint8_t*)out_1.raw_data(); + uint8_t *out_ptr_0 = (uint8_t *)out_0.raw_data(); + uint8_t *out_ptr_1 = (uint8_t *)out_1.raw_data(); - memcpy(out_ptr_0, in_ptr, b_in * o_size * w_in * d_in * bitwidth); - in_ptr += b_in * o_size * w_in * d_in * bitwidth; + memcpy(out_ptr_0, in_ptr, b_in * o_size * w_in * d_in * bitwidth); + in_ptr += b_in * o_size * w_in * d_in * bitwidth; - memcpy(out_ptr_1, in_ptr, b_in * x_size * w_in * d_in * bitwidth * 4); + memcpy(out_ptr_1, in_ptr, b_in * x_size * w_in * d_in * bitwidth * 4); - return GraphStatus::Success; + return GraphStatus::Success; } -__attribute__((unused)) static float splitinputCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float splitinputCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/WNop.cpp b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/WNop.cpp similarity index 61% rename from src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/WNop.cpp rename to mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/WNop.cpp index 547e53589..2a7c1fd1a 100755 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/WNop.cpp +++ b/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/WNop.cpp @@ -12,17 +12,15 @@ #include #include - BEGIN_PKG_OP_DEFINITION(PKG_WNop); - // op execute function declarations -template -GraphStatus wnopImpl(TensorType& out_0, - TensorType1 &sync_var, - const TensorType& in_0, - const TensorType& in_1, - const Tensor& sync_type); +template +GraphStatus wnopImpl(TensorType &out_0, + TensorType1 &sync_var, + const TensorType &in_0, + const TensorType &in_1, + const Tensor &sync_type); // forward declaration of sample cost function static float wnopCostFunc(const Op *op); @@ -67,11 +65,11 @@ DEF_PACKAGE_OP((wnopImpl), "WNop") * one definition per op, and this is optional * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions + * order of parameters listed determines the order of parameters passed into op execution functions * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode * will be passed into op execution functions * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned + * name will be abandoned * if two or more op packages with the same package name will be registered, they cannot list * conflicting parameter orders * PARAM refers to parameter name as a string literal @@ -84,109 +82,88 @@ DEF_PACKAGE_OP((wnopImpl), "WNop") * graph construction will skip this parameter when this parameter is not provided at * Qnn_addNode */ -DEF_PACKAGE_PARAM_ORDER("WNop", +DEF_PACKAGE_PARAM_ORDER("WNop", "sync_type", true, nullptr) - /* execute functions for ops */ -template -GraphStatus wnopImpl(TensorType& out_0, - TensorType1 &sync_var, - const TensorType& in_0, - const TensorType& in_1, - const Tensor& sync_type) +template +GraphStatus wnopImpl(TensorType &out_0, + TensorType1 &sync_var, + const TensorType &in_0, + const TensorType &in_1, + const Tensor &sync_type) { - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - - - out_0.set_dims(in_0); - - auto sync_type_ = sync_type(0,0,0,0); - - - // sync_type == 0 sending signal to CPU - // sync_type == 1 waiting signal from CPU - - DType dtype = in_0.get_dtype(); - uint32_t bitwidth = 4; - - if (dtype == DType::QUInt8) { - + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + + out_0.set_dims(in_0); + + auto sync_type_ = sync_type(0, 0, 0, 0); + + // sync_type == 0 sending signal to CPU + // sync_type == 1 waiting signal from CPU + + DType dtype = in_0.get_dtype(); + uint32_t bitwidth = 4; + + if (dtype == DType::QUInt8) { bitwidth = 1; } else if (dtype == DType::Float16) { - bitwidth = 2; } else if (dtype == DType::Float32) { - bitwidth = 4; } - if (sync_type_ == 0) { - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); + if (sync_type_ == 0) { + auto [b_in, h_in, w_in, d_in] = in_0.dims(); - auto in_ptr = (void*)in_0.raw_data_const(); - auto out_ptr = (void*)out_0.raw_data(); + auto in_ptr = (void *)in_0.raw_data_const(); + auto out_ptr = (void *)out_0.raw_data(); - memcpy(out_ptr, in_ptr, b_in * h_in * w_in * d_in * bitwidth); + memcpy(out_ptr, in_ptr, b_in * h_in * w_in * d_in * bitwidth); - sync_var(0,0,0,0) = 1; + sync_var(0, 0, 0, 0) = 1; - } else if (sync_type_ == 1) { + } else if (sync_type_ == 1) { + while (in_1(0, 0, 0, 0) == 0) { + Q6_V_vzero(); + } - while (in_1(0,0,0,0) == 0) { + auto [b_in, h_in, w_in, d_in] = in_0.dims(); - Q6_V_vzero(); + auto in_ptr = (void *)in_0.raw_data_const(); + auto out_ptr = (void *)out_0.raw_data(); + memcpy(out_ptr, in_ptr, b_in * h_in * w_in * d_in * bitwidth); } - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - auto in_ptr = (void*)in_0.raw_data_const(); - auto out_ptr = (void*)out_0.raw_data(); - - memcpy(out_ptr, in_ptr, b_in * h_in * w_in * d_in * bitwidth); - - } - - - - - - - return GraphStatus::Success; + return GraphStatus::Success; } -__attribute__((unused)) static float wnopCostFunc(const Op *op) -{ - /* - * add code here - * */ +__attribute__((unused)) static float wnopCostFunc(const Op *op) { + /* + * add code here + * */ - float cost = 0.0; // add cost computation here - return cost; + float cost = 0.0; // add cost computation here + return cost; } - - - - /* At the bottom of the op file, call END_PKG_OP_DEFINITION(), where is as BEGIN_PKG_OP_DEFINITION */ diff --git a/mllm/backends/qnn/QNNBackend.cpp b/mllm/backends/qnn/QNNBackend.cpp new file mode 100755 index 000000000..b17ef9e12 --- /dev/null +++ b/mllm/backends/qnn/QNNBackend.cpp @@ -0,0 +1,1340 @@ +#include + +#include +#include +#include + +#include "Backend.hpp" +#include "Context.hpp" +#include "Log.h" +#include "Module.hpp" +#include "Layer.hpp" +#include "OpDefined.hpp" +#include "QNNBackend.hpp" +#include "QNNUtils.hpp" +#include "QNNModel.hpp" +#include "QNNMemoryManager.hpp" +#include "QnnTypes.h" +#include "HTP/QnnHtpGraph.h" +#include "HTP/QnnHtpDevice.h" + +#include "Types.hpp" +#include "op/QNNAdd.hpp" +#include "op/QNNCausalMask.hpp" +#include "op/QNNDequantizeAdd.hpp" +#include "op/QNNGELU.hpp" +#include "op/QNNQuickGELU.hpp" +#include "op/QNNLinearINT8.hpp" +#include "op/QNNMatmul.hpp" +#include "op/QNNMul.hpp" +#include "op/QNNLayerNorm.hpp" +#include "op/QNNRMSNorm.hpp" +#include "op/QNNRoPE.hpp" +#include "op/QNNRoPESimple.hpp" +#include "op/QNNScale.hpp" +#include "op/QNNSiLU.hpp" +#include "op/QNNSiLUHigh.hpp" +#include "op/QNNSoftMax.hpp" +#include "op/QNNSplit.hpp" +#include "op/QNNSubGraphFinalize.hpp" +#include "op/QNNSubGraphStart.hpp" +#include "op/QNNView.hpp" +#include "op/QNNReLU.hpp" +#include "op/QNNQuantize.hpp" +#include "op/QNNDequantize.hpp" +#include "op/QNNMergeOutput.hpp" +#include "op/QNNSplitInput.hpp" +#include "op/QNNTranspose.hpp" +#include "op/QNNSuperSiLU.hpp" +#include "op/QNNIRoPE.hpp" + +#include "memory/MemInspect.hpp" + +#ifdef DEBUGPRINT +#include "Timing.hpp" +#endif + +// Flag to determine if Backend should node validation for each opNode added +#ifdef QNN_VALIDATE_NODE +#define DO_GRAPH_NODE_VALIDATIONS 1 +#else +#define DO_GRAPH_NODE_VALIDATIONS 0 +#endif + +namespace mllm { + +void QNNBackend::registerOps() { + addCreator(ADD, (QNNBackend::Creator *)new QNNAddCreator()); + addCreator(CAUSALMASK, (QNNBackend::Creator *)(new QNNCausalMaskCreator())); + addCreator(MATMUL, (QNNBackend::Creator *)(new QNNMatmulCreator())); + addCreator(RMSNORM, (QNNBackend::Creator *)(new QNNRMSNormCreator())); + addCreator(LAYERNORM, (QNNBackend::Creator *)(new QNNLayerNormCreator())); + addCreator(ROPE, (QNNBackend::Creator *)(new QNNRoPECreator())); + addCreator(ROPESIMPLE, (QNNBackend::Creator *)(new QNNRoPESimpleCreator())); + addCreator(IROPE, (QNNBackend::Creator *)(new QNNIRoPECreator())); + addCreator(SCALE, (QNNBackend::Creator *)(new QNNScaleCreator())); + addCreator(SILU, (QNNBackend::Creator *)(new QNNSiLUCreator())); + addCreator(SILU_FULL_PRECISION, (QNNBackend::Creator *)(new QNNSiLUHighCreator())); + addCreator(SOFTMAX, (QNNBackend::Creator *)(new QNNSoftMaxCreator())); + addCreator(LINEAR, (QNNBackend::Creator *)(new QNNLinearINT8Creator())); + addCreator(LINEARINT8, (QNNBackend::Creator *)(new QNNLinearINT8Creator())); + addCreator(MUL, (QNNBackend::Creator *)(new QNNMulCreator())); + addCreator(VIEW, (QNNBackend::Creator *)(new QNNViewCreator())); + addCreator(RELU, (QNNBackend::Creator *)(new QNNReLUCreator())); + addCreator(OP_GELU, (QNNBackend::Creator *)(new QNNGELUCreator())); + addCreator(QUICKGLUE, (QNNBackend::Creator *)(new QNNQuickGELUCreator())); + addCreator(QUANTIZE, (QNNBackend::Creator *)(new QNNQuantizeCreator())); + addCreator(DEQUANTIZE, (QNNBackend::Creator *)(new QNNDequantizeCreator())); + addCreator(DEQUANTIZEADD, (QNNBackend::Creator *)(new QNNDequantizeAddCreator())); + addCreator(MERGEOUTPUT, (QNNBackend::Creator *)(new QNNMergeOutputCreator())); + addCreator(SPLITINPUT, (QNNBackend::Creator *)(new QNNSplitInputCreator())); + addCreator(TRANSPOSE, (QNNBackend::Creator *)(new QNNTransposeCreator())); + addCreator(SUPERSILU, (QNNBackend::Creator *)(new QNNSuperSiLUCreator())); + addCreator(SUBGRAPHSTART, (QNNBackend::Creator *)(new QNNSubGraphStartCreator())); + addCreator(SUBGRAPHFINALIZE, (QNNBackend::Creator *)(new QNNSubGraphFinalizeCreator())); + addCreator(SPLIT, (QNNBackend::Creator *)(new QNNSplitCreator())); +} + +QNNBackend::QNNBackend(shared_ptr mm) : + Backend(mm) { + type_ = BackendType::MLLM_QNN; // used in Tensor.device() + + QnnLog_Level_t qnnLogLevel = QNN_LOG_LEVEL_WARN; // QNN_LOG_LEVEL_INFO; // QNN_LOG_LEVEL_WARN; // default QNN log level + m_profilingLevel = ProfilingLevel::DETAILED; + m_debug = false; // when set true, NATIVE tensor will be regared as APP_READ tensor + + loadQNNSymbol(); + loadQNNSystemSymbol(); + + mRuntime = QNNRuntime::create(m_profilingLevel, qnnLogLevel); + if (!mRuntime) { + MLLM_LOG_ERROR_STREAM << "Failed to create QNN Runtime\n"; + exit(1); + } + + // check QNN capability + char *backendBuildId{nullptr}; + if (QNN_SUCCESS != mRuntime->qnnInterface.backendGetBuildId((const char **)&backendBuildId)) { + MLLM_LOG_ERROR_LEGACY("Unable to get build Id from the backend."); + } + MLLM_LOG_INFO_STREAM << "QNN Backend Build Id: " << (backendBuildId == nullptr ? "" : backendBuildId); + if (mRuntime->qnnInterface.propertyHasCapability(QNN_PROPERTY_TENSOR_SUPPORT_SPARSITY) == QNN_PROPERTY_SUPPORTED) { + MLLM_LOG_INFO("QNN backend supports tensor sparsity"); + } + if (mRuntime->qnnInterface.propertyHasCapability(QNN_PROPERTY_TENSOR_SUPPORT_DYNAMIC_DIMENSIONS) == QNN_PROPERTY_SUPPORTED) { + MLLM_LOG_INFO("QNN backend supports dynamic dimensions"); + } + if (mRuntime->qnnInterface.propertyHasCapability(QNN_PROPERTY_GRAPH_SUPPORT_EARLY_TERMINATION) == QNN_PROPERTY_SUPPORTED) { + MLLM_LOG_INFO("QNN backend supports early termination"); + } + + // register ops + this->registerOps(); + + bool contextStatus = false; + // check if the qnn_context.bin file exists + if (!std::filesystem::exists("qnn_context.bin")) { + contextStatus = mRuntime->createContext(m_context, nullptr); + } else { + contextStatus = mRuntime->retrieveContext(m_context, graphsInfo_, nullptr); + // set the flag to indicate that the context is loaded from cache + isFromCache = true; + // fill qnnModelIndexMap_ info according to graphsInfo_ + for (size_t i = 0; i < graphsInfo_.size(); i++) { + auto graphName = graphsInfo_[i]->graphName; + qnnModelIndexMap_.insert(std::make_pair(graphName, i)); + } + } + if (!contextStatus) { + MLLM_LOG_ERROR_STREAM << "Failed to create QNN context\n"; + exit(1); + } + + // assign context to qnn memory manager +#ifdef QNN_ARM + auto qnnMM = std::static_pointer_cast(mem_manager_); + qnnMM->setQnnInterfaceAndContext(mRuntime->qnnInterface, m_context); +#endif + + mPerf = QNNPerf::create(&mRuntime->qnnInterface); + mPerf->setPowerConfigBurst(); + mPerf->setRpcLatencyAndPolling(); +} + +QNNBackend::~QNNBackend() { + // free creaters in map_creator_ + for (auto &iter : map_creator_) { + delete iter.second; + } + // free qnn backend resource + mRuntime.release(); +} + +void QNNBackend::onSetUpStart(vector> &inputs, vector> &outputs, string graphName) { + // if the graph already exists, just update the qnnModelIndex_ and set the input and output buffers + if (qnnModelIndexMap_.find(graphName) != qnnModelIndexMap_.end()) { + qnnModelIndex_ = qnnModelIndexMap_[graphName]; + + inputBufferMap.insert(std::make_pair(graphName, std::vector(inputs.size()))); + outputBufferMap.insert(std::make_pair(graphName, std::vector())); + + currentInputBuffers = &inputBufferMap[graphName]; + currentOutputBuffers = &outputBufferMap[graphName]; + + // push input tensors to the buffer list + for (int i = 0; i < inputs.size(); i++) { + (*currentInputBuffers)[i] = inputs[i]->hostPtr(); + } + return; + } + // else, create a QNNModel to build graph + qnnModelIndex_ = qnnModels_.size(); + qnnModelIndexMap_.insert(std::make_pair(graphName, qnnModelIndex_)); + qnnModels_.push_back(QNNModel()); + + // initialize qnn graph info, set graph info, graph count + QnnHtpGraph_CustomConfig_t vtcmConfigInfo; + vtcmConfigInfo.option = QNN_HTP_GRAPH_CONFIG_OPTION_VTCM_SIZE; + vtcmConfigInfo.vtcmSizeInMB = 8; + QnnGraph_Config_t vtcmConfig; + vtcmConfig.option = QNN_GRAPH_CONFIG_OPTION_CUSTOM; + vtcmConfig.customConfig = &vtcmConfigInfo; + + // QnnHtpGraph_CustomConfig_t htpThreadConfig; + // htpThreadConfig.option = QNN_HTP_GRAPH_CONFIG_OPTION_NUM_HVX_THREADS; + // htpThreadConfig.numHvxThreads = 6; // set a number. MAX = number of HVX HW blocks for that SoC + // QnnGraph_Config_t threadConfig; + // threadConfig.option = QNN_GRAPH_CONFIG_OPTION_CUSTOM; + // threadConfig.customConfig = &htpThreadConfig; + + // supported in 2.34 + QnnHtpGraph_CustomConfig_t slcConfigInfo; + slcConfigInfo.option = QNN_HTP_GRAPH_CONFIG_OPTION_OPTIMIZATION; + slcConfigInfo.optimizationOption.type = QNN_HTP_GRAPH_OPTIMIZATION_TYPE_ENABLE_SLC_ALLOCATOR; + slcConfigInfo.optimizationOption.floatValue = 1; + QnnGraph_Config_t slcConfig; + slcConfig.option = QNN_GRAPH_CONFIG_OPTION_CUSTOM; + slcConfig.customConfig = &slcConfigInfo; + + const QnnGraph_Config_t *graphConfigList[] = {&vtcmConfig, &slcConfig, NULL}; + + ModelError_t err = MODEL_NO_ERROR; + if ((err = qnnModels_[qnnModelIndex_].initialize(mRuntime->backendHandle, + mRuntime->qnnInterface, + m_context, + graphName.c_str(), + m_debug, + DO_GRAPH_NODE_VALIDATIONS, + graphConfigList)) + != MODEL_NO_ERROR) { + MLLM_LOG_ERROR_STREAM << "QNNBackend graph initialization failed for graph: " << graphName + << " with error code: " << static_cast(err) << std::endl; + exit(1); + } + + for (auto &input : inputs) { + Qnn_DataType_t data_type; + auto quantizeDefined = QNN_DEFINITION_UNDEFINED; + auto quantizeType = QNN_QUANTIZATION_ENCODING_UNDEFINED; + float scale = 0.0f; + switch (input->dtype()) { + case MLLM_TYPE_F32: + data_type = QNN_DATATYPE_FLOAT_32; + break; + case MLLM_TYPE_F16: + data_type = QNN_DATATYPE_FLOAT_16; + break; + case MLLM_TYPE_I8: { + data_type = QNN_DATATYPE_SFIXED_POINT_8; + quantizeDefined = QNN_DEFINITION_DEFINED; + quantizeType = QNN_QUANTIZATION_ENCODING_SCALE_OFFSET; + scale = input->quant_param.scale; + break; + } + case MLLM_TYPE_I16: { + data_type = QNN_DATATYPE_SFIXED_POINT_16; + quantizeDefined = QNN_DEFINITION_DEFINED; + quantizeType = QNN_QUANTIZATION_ENCODING_SCALE_OFFSET; + scale = input->quant_param.scale; + break; + } + default: + MLLM_LOG_ERROR_STREAM << "[ERROR] QNNBackend not support dtype: " << input->dtype() << std::endl; + data_type = QNN_DATATYPE_FLOAT_32; + } + + uint32_t dimensionsInput[4] = { + static_cast(input->batch()), + static_cast(input->sequence()), + static_cast(input->head()), + static_cast(input->dimension()), + }; + + qnnModels_[qnnModelIndex_].addTensor(input->name().c_str(), + (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = input->name().c_str(), + .type = QNN_TENSOR_TYPE_APP_WRITE, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = data_type, + .quantizeParams = {quantizeDefined, + quantizeType, + {.scaleOffsetEncoding = {.scale = scale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsInput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}); + } + + // create a new inputBuffer and outputBuffer for the graph + inputBufferMap.insert(std::make_pair(graphName, std::vector(inputs.size()))); + outputBufferMap.insert(std::make_pair(graphName, std::vector())); + + currentInputBuffers = &inputBufferMap[graphName]; + currentOutputBuffers = &outputBufferMap[graphName]; + + // push input tensors to the buffer list + for (int i = 0; i < inputs.size(); i++) { + (*currentInputBuffers)[i] = inputs[i]->hostPtr(); + } +} + +bool QNNBackend::graphFinilize() { + // Populate the constructed graphs in provided output variables + GraphInfo_t *graphInfo = nullptr; + + // Graph finalize + CALL_QNN(getSingleGraphInfoFromModel(qnnModels_[qnnModelIndex_], &graphInfo)); + if (QNN_GRAPH_NO_ERROR != mRuntime->qnnInterface.graphFinalize(graphInfo->graph, mRuntime->profileHandle, nullptr)) { + return false; + } + CALL_QNN(qnnModels_[qnnModelIndex_].freeCachedTensors()); + if (ProfilingLevel::OFF != m_profilingLevel) { + extractBackendProfilingInfo(mRuntime->profileHandle); + } + graphsInfo_.push_back(graphInfo); + + return true; +} + +// finalize graph if needed, get qnn inputs and outputs tensors from graphInfo, register shared memory handles +void QNNBackend::onSetUpEnd(vector> &inputs, vector> &outputs, string graphName) { + // online graph building, finalize graph + if (!isFromCache) { + PRINT_MEMORY_USAGE("before graph finilize") + if (!graphFinilize()) { + MLLM_LOG_ERROR("Graph Finalization failure"); + exit(1); + } + PRINT_MEMORY_USAGE("after graph finilize") + } + + auto graphInfo = graphsInfo_[qnnModelIndex_]; + Qnn_Tensor_t *qnnInputs = graphInfo->inputTensors; + Qnn_Tensor_t *qnnOutputs = graphInfo->outputTensors; + + auto qnnMM = std::static_pointer_cast(mem_manager_); + + // register input and output tensor to qnn shared buffers + // must insure the inputs and outputs of mllm graph are the same as the qnn graph +#ifdef DEBUGPRINT + std::cout << "input tensors num:" << graphInfo->numInputTensors << std::endl; + std::cout << "output tensors num:" << graphInfo->numOutputTensors << std::endl; +#endif + + for (int i = 0; i < graphInfo->numInputTensors; i++) { + qnnInputs[i].v1.memType = QNN_TENSORMEMTYPE_MEMHANDLE; + qnnMM->registerQnnTensor((*currentInputBuffers)[i], qnnInputs[i]); +#ifdef DEBUGPRINT + std::cout << "\nregistered input tensor backend staged ptr: " << (void *)(*currentInputBuffers)[i] << std::endl; + std::cout << "qnn input tensor name: " << qnnInputs[i].v1.name << std::endl; + std::cout << "qnn input tensor scale: " << qnnInputs[i].v1.quantizeParams.scaleOffsetEncoding.scale << std::endl; +#endif + } + for (int i = 0; i < graphInfo->numOutputTensors; i++) { + qnnOutputs[i].v1.memType = QNN_TENSORMEMTYPE_MEMHANDLE; + qnnMM->registerQnnTensor((*currentOutputBuffers)[i], qnnOutputs[i]); +#ifdef DEBUGPRINT + std::cout << "\nregistered output tensor backend staged ptr: " << (void *)(*currentOutputBuffers)[i] << std::endl; + std::cout << "qnn output tensor name: " << qnnOutputs[i].v1.name << std::endl; + std::cout << "qnn output tensor scale: " << qnnOutputs[i].v1.quantizeParams.scaleOffsetEncoding.scale << std::endl; +#endif + } +} + +void QNNBackend::onExecuteStart(vector> &inputs, vector> &outputs, string graphName) { + // to support multi-thread, we need local variable. + // update currentInputBuffers, currentOutputBuffers, qnnModelIndex_ + auto t_qnnModelIndex_ = qnnModelIndexMap_[graphName]; + GraphInfo_t *graphInfo = graphsInfo_[t_qnnModelIndex_]; + +#ifdef DEBUGPRINT + uint64_t t_start = mllm_time_us(); +#endif + if (mRuntime->qnnInterface.graphExecute(graphInfo->graph, + graphInfo->inputTensors, + graphInfo->numInputTensors, + graphInfo->outputTensors, + graphInfo->numOutputTensors, + mRuntime->profileHandle, + nullptr) + != QNN_GRAPH_NO_ERROR) { + MLLM_LOG_ERROR_STREAM << "Error in executing graph: " << graphName << std::endl; + } +#ifdef DEBUGPRINT + uint64_t t_end = mllm_time_us(); + std::cout << "QNN execution time " << (t_end - t_start) / 1000.0F << " ms" << std::endl; +#endif + + if (ProfilingLevel::OFF != m_profilingLevel) { + extractBackendProfilingInfo(mRuntime->profileHandle); + } +} + +void QNNBackend::graphAddNode(string name, + string nodeType, + std::vector inputTensorNames, + std::vector outputTensors, + std::vector params, + string packageName) { + // graph has been built + if (isFromCache) { + return; + } + CALL_QNN(qnnModels_[qnnModelIndex_].addNode( + QNN_OPCONFIG_VERSION_1, // Op_Config_t Version + name.c_str(), // Node Name + packageName.c_str(), // Package Name + nodeType.c_str(), // Qnn Node Type + params, // Node Params + inputTensorNames, // Input Tensor Names + outputTensors // Output Tensors + )); +} + +void QNNBackend::modelAddTensor(std::string nodeName, Qnn_Tensor_t tensor) { + // graph has been built + if (isFromCache) { + return; + } + // std::cout << "nodeName" << nodeName << std::endl; + CALL_QNN(qnnModels_[qnnModelIndex_].addTensor(nodeName.c_str(), tensor)); +} + +void QNNBackend::extractBackendProfilingInfo( + Qnn_ProfileHandle_t profileHandle) { + if (nullptr == mRuntime->profileHandle) { + MLLM_LOG_ERROR("Backend Profile handle is nullptr; may not be initialized."); + return; + } + const QnnProfile_EventId_t *profileEvents{nullptr}; + uint32_t numEvents{0}; + if (QNN_PROFILE_NO_ERROR != mRuntime->qnnInterface.profileGetEvents(profileHandle, &profileEvents, &numEvents)) { + MLLM_LOG_ERROR("Failure in profile get events."); + return; + } + + MLLM_LOG_INFO_STREAM << "Profile Events: [" << profileEvents << "], numEvents: " << numEvents << std::endl; + for (size_t event = 0; event < numEvents; event++) { + extractProfilingEvent(*(profileEvents + event)); + extractProfilingSubEvents(*(profileEvents + event)); + } +} + +void QNNBackend::extractProfilingSubEvents( + QnnProfile_EventId_t profileEventId) { + const QnnProfile_EventId_t *profileSubEvents{nullptr}; + uint32_t numSubEvents{0}; + if (QNN_PROFILE_NO_ERROR != mRuntime->qnnInterface.profileGetSubEvents(profileEventId, &profileSubEvents, &numSubEvents)) { + MLLM_LOG_ERROR_LEGACY("Failure in profile get sub events."); + return; + } + MLLM_LOG_INFO_STREAM << "ProfileSubEvents: [" << profileSubEvents << "], numSubEvents: " << numSubEvents << std::endl; + for (size_t subEvent = 0; subEvent < numSubEvents; subEvent++) { + extractProfilingEvent(*(profileSubEvents + subEvent)); + extractProfilingSubEvents(*(profileSubEvents + subEvent)); + } +} + +void QNNBackend::extractProfilingEvent( + QnnProfile_EventId_t profileEventId) { + QnnProfile_EventData_t eventData; + if (QNN_PROFILE_NO_ERROR != mRuntime->qnnInterface.profileGetEventData(profileEventId, &eventData)) { + MLLM_LOG_ERROR_LEGACY("Failure in profile get event type."); + return; + } + MLLM_LOG_INFO_STREAM << "Printing Event Info - Event Type: [" << eventData.type + << "], Event Value: [" << eventData.value + << "], Event Identifier: [" << eventData.identifier + << "], Event Unit: [" << eventData.unit << "]" << std::endl; +} + +void QNNBackend::saveQNNContext() { + uint64_t binarySize, writtenSize; + + mRuntime->qnnInterface.contextGetBinarySize(m_context, &binarySize); + + std::unique_ptr binaryBuffer(new uint8_t[binarySize]); + + mRuntime->qnnInterface.contextGetBinary(m_context, reinterpret_cast(binaryBuffer.get()), binarySize, &writtenSize); + + if (binarySize < writtenSize) { + MLLM_LOG_ERROR_STREAM << "QNN context binary size mismatch: expected " << binarySize + << " bytes, but wrote " << writtenSize << " bytes." << std::endl; + } + std::ofstream file("qnn_context.bin", std::ios::binary); + file.write(reinterpret_cast(binaryBuffer.get()), writtenSize); + file.close(); + + std::cout << "QNN context saved to qnn_context.bin written " << writtenSize << std::endl; +} +std::vector QNNBackend::runOp(Op *op, std::vector inputs, std::vector out_names, bool in_place) { + Module *module = inputs.empty() ? Module::llm_model_ptr : inputs[0].module(); + assert(module != nullptr); + auto &activation_tensors = module->activation_tensors; + auto &activation_tensors_num = module->activation_tensors_num; + + std::vector> output_ptrs; + for (const auto &out_name : out_names) { + if (activation_tensors.find(out_name) == activation_tensors.end()) { + Backend *backend_h = Backend::global_backends[MLLM_CPU].get(); + if (!inputs.empty()) { + backend_h = inputs[0].backend(); + } + activation_tensors[out_name] = std::make_shared(backend_h); + activation_tensors[out_name]->setName(out_name); + activation_tensors[out_name]->setModule(module); + activation_tensors_num[out_name] = 0; + } + output_ptrs.push_back(activation_tensors[out_name]); + } + Backend *backend_h = Backend::global_backends[MLLM_CPU].get(); + if (!inputs.empty()) { + backend_h = inputs[0].backend(); + } + if (module->doLoad) { + std::vector results; + for (auto &out_tensor : output_ptrs) { + results.push_back(*activation_tensors[out_tensor->name()]); + } + return results; + } + + std::vector> input_ptrs; + for (auto &tensor : inputs) { + input_ptrs.push_back(activation_tensors[tensor.name()]); + } + +#ifdef DEBUGOPTIME + auto start_t = mllm_time_us(); +#endif + + switch (Tensor::tensor_status) { + case TENSOR_STATIC_INIT: + op->reshape(input_ptrs, output_ptrs); + op->setUp(input_ptrs, output_ptrs); + break; + case TENSOR_STATIC_READY: + op->execute(input_ptrs, output_ptrs); + break; + case TENSOR_STATIC_TRACE: + if (backend_h->type() == BackendType::MLLM_CPU) { + Tracer::addOp(op, input_ptrs, output_ptrs); + } else if (op->type() == SUBGRAPHSTART) { // begin of QNN graph + Tracer::addModule(input_ptrs, {}, op->name()); + } + break; + break; + default: + break; + } +#ifdef DEBUGOPTIME + if (Tensor::tensor_status == TENSOR_STATIC_READY) { + auto end_t = mllm_time_us(); + std::cout << (out_names.empty() ? "" : out_names[0]) << " | " + << Tensor::tensor_status << " time: " + << (end_t - start_t) / 1000.0F << "ms" << std::endl; + } +#endif + +#ifdef DEBUGSAVETENSOR + for (auto &out_name : out_names) { + activation_tensors[out_name]->saveNData(); + } +#endif + + std::vector results; + for (auto &out_tensor : output_ptrs) { + results.emplace_back(*activation_tensors[out_tensor->name()]); + } + return results; +} + +/* +std::vector QNNBackend::runFunc(std::vector out_names, + TensorFuncType type, + std::vector float_args, + std::vector> input_tensors, + bool in_place) { + Module *module = input_tensors.empty() ? Module::llm_model_ptr : input_tensors[0]->module(); + assert(module != nullptr); + auto &activation_tensors = module->activation_tensors; + auto &activation_tensors_num = module->activation_tensors_num; + + std::vector> output_ptrs; + for (const auto &out_name : out_names) { + if (activation_tensors.find(out_name) == activation_tensors.end()) { + Backend *backend_h = Context::Instance().globalBackends(MLLM_CPU); + if (!input_tensors.empty()) { + backend_h = input_tensors[0]->backend(); + } + activation_tensors[out_name] = std::make_shared(backend_h); + activation_tensors[out_name]->setName(out_name); + activation_tensors[out_name]->setModule(module); + activation_tensors_num[out_name] = 0; + } + output_ptrs.push_back(activation_tensors[out_name]); + } + + if (module->doLoad) { + std::vector results; + for (auto &out_tensor : output_ptrs) { + results.push_back(*activation_tensors[out_tensor->name()]); + } + return results; + } + + Backend *backend_h = Context::Instance().globalBackends(MLLM_CPU); + if (!input_tensors.empty()) { + backend_h = input_tensors[0]->backend(); + } + TensorFunction *func = backend_h->funcCreate(type); + + std::vector> input_ptrs; + for (auto &tensor : input_tensors) { + input_ptrs.push_back(activation_tensors[tensor->name()]); + } + // if (in_place) { + // for (size_t i = 0; i < input_tensors.size() && i < out_names.size(); ++i) { + // input_tensors[i]->setName(out_names[i]); + // output_ptrs.push_back(input_tensors[i]); + // } + // } + +#ifdef DEBUGOPTIME + auto start_t = mllm_time_us(); +#endif + + switch (Tensor::tensor_status) { + case TENSOR_STATIC_INIT: + func->reshape(output_ptrs, input_ptrs, float_args); + func->setUp(output_ptrs, input_ptrs, float_args); + break; + case TENSOR_STATIC_READY: + func->execute(output_ptrs, input_ptrs, float_args); + break; + case TENSOR_STATIC_TRACE: + if (backend_h->type() == BackendType::MLLM_CPU) { + Tracer::addTensorFunction(func, input_ptrs, output_ptrs, float_args); + } + break; + default: + break; + } + + // if (Backend::global_backends.size() == 1) { + // for (auto input_tensor : input_ptrs) { + // auto it = activation_tensors_num.find(input_tensor->name()); + // if (it != activation_tensors_num.end()) { + // switch (Tensor::tensor_status) { + // case TENSOR_STATIC_INIT: + // it->second += 1; + // break; + // case TENSOR_STATIC_READY: + // it->second -= 1; + // break; + // default: + // break; + // } + // if (it->second == 0 && module_tensors[input_tensor->name()]->sequence() > 1 && module_tensors[input_tensor->name()]->ttype() != GRAPH_OUTPUT) { + // activation_tensors[input_tensor->name()]->free(); + // } + // } + // } + // } + +#ifdef DEBUGOPTIME + if (Tensor::tensor_status == TENSOR_STATIC_READY) { + auto end_t = mllm_time_us(); + std::cout << (out_names.empty() ? "" : out_names[0]) << " | " + << Tensor::tensor_status << " time: " + << (end_t - start_t) / 1000.0F << "ms" << std::endl; + } +#endif + +#ifdef DEBUGSAVETENSOR + for (auto &out_name : out_names) { + activation_tensors[out_name]->saveNData(); + } +#endif + + std::vector results; + for (auto &out_tensor : output_ptrs) { + results.emplace_back(*activation_tensors[out_tensor->name()]); + } + return results; +} +*/ +std::string name_num_to_X(const std::string &input_string) { + std::regex pattern(R"(\.\d{1,3}\.)"); // Matches any number between 1 and 100 between two dots + std::string replacement = ".X."; // The string to replace the matched pattern with + std::string output_string = std::regex_replace(input_string, pattern, replacement); + return output_string; +} +std::string name_X_to_num(const std::string &input_string, int in_idx) { + std::regex pattern(".X."); // Matches any number between 1 and 100 between two dots + std::string replacement = "." + std::to_string(in_idx) + "."; // The string to replace the matched pattern with + std::string output_string = std::regex_replace(input_string, pattern, replacement); + return output_string; +} +void init_reset_KVCache(string input_name, Module *module, int saved_list_idx, map layername_2_tensorname, Backend *backend_) { + map> &activation_tensors = module->activation_tensors; + vector renameX_names; + renameX_names.push_back(input_name); + const vector suffixs = {"-view", ".split-0", ".split-1", ".split-2", "-cat", "-split-0-48"}; + vector new_names; + bool can_break = true; + auto in_x_name = renameX_names[0]; + while (can_break) { + can_break = false; + for (const auto &suffix : suffixs) { + if (in_x_name.rfind(suffix) == (in_x_name.size() - suffix.size())) { + const auto r_name = in_x_name.substr(0, in_x_name.size() - suffix.size()); + if (std::find(renameX_names.begin(), renameX_names.end(), r_name) == renameX_names.end() && std::find(new_names.begin(), new_names.end(), r_name) == new_names.end()) { + new_names.push_back(r_name); + in_x_name = r_name; + can_break = true; + } + break; + } + } + } + renameX_names.insert(renameX_names.end(), new_names.begin(), new_names.end()); + for (const auto x_name : renameX_names) { + auto name = name_X_to_num(x_name, saved_list_idx); + layername_2_tensorname[name] = name; + activation_tensors[name] = std::make_shared(backend_); + activation_tensors[name]->initFrom(*activation_tensors[x_name]); + activation_tensors[name]->setName(name); + activation_tensors[name]->setModule(module); + } +} +std::vector QNNBackend::runLayer(Layer *layer, std::vector inputs, int N) { + Module *module = inputs.empty() ? Module::llm_model_ptr : inputs[0].module(); + map> &activation_tensors = module->activation_tensors; + auto &activation_tensors_num = module->activation_tensors_num; + // Module::runlistIdx = saved_list_idx; + bool do_init = false; + + if (module->doLoad || !layer->inited_loaded) { + // set backend to current module device and try to create op + // use Module::tmp_device only when creating the op as the recersive module backend only handled in load and init stage + // layer->backend_ = Context::Instance().globalBackends(Module::tmp_device); + layer->backend_ = Backend::global_backends[Module::tmp_device].get(); + do_init = !layer->inited_loaded; + if (layer->op_ == nullptr) { + // std::cout << "asdsa " << layer->name_ << std::endl; + if (layer->param_["type"] == KVCACHE || layer->param_["type"] == KVCACHENPU) { + // std::cout << layer->name_ << std::endl; + if (kv_cache_map.find(layer->name_) == kv_cache_map.end()) { + // std::cout << layer->name_ << " is first used" << std::endl; + // for the prefill part, we need to create a new op + layer->param_["type"] = KVCACHENPU; + layer->op_ = layer->backend_->opCreate(layer->param_, layer->name_); + kv_cache_map[layer->name_] = layer->op_; + } else { + // #ifdef DEBUGPRINT + // std::cout << layer->name_ << " is shared used" << std::endl; + // #endif + // for the decoding part, we need to get created op from global container + layer->op_ = kv_cache_map[layer->name_]; + } + } else { + layer->op_ = layer->backend_->opCreate(layer->param_, layer->name_); + } + } + if (layer->param_["type"] == SUBGRAPHFINALIZE) { + for (auto &input : inputs) { + activation_tensors[input.name()]->setTtype(GRAPH_OUTPUT); + } + } + if (module->doLoad) { + layer->op_->load(*module->loader); + layer->inited_loaded = true; + } else if (layer->loaded_param) { + layer->inited_loaded = layer->loaded_param; + } else { + if (!layer->inited_loaded) { + // module->loader = new ParamLoader(""); + // op_->load(*module->loader); + auto empty_loader = new ParamLoader(""); + layer->op_->load(*empty_loader); + layer->inited_loaded = true; + } + } + vector layer_next_names = {}; + if (N > 1) { + for (int i = 0; i < N; ++i) { + layer_next_names.push_back("out-" + layer->op_->name() + "-" + std::to_string(i)); + } + } else { + layer_next_names = {"out-" + layer->op_->name()}; + } + for (const auto &layer_next_name : layer_next_names) { + string next_name; + // NOTE: QNN is using CPU ViT + if (Layer::use_layername_2_tensorname) { + if (Layer::layername_2_tensorname.find(layer_next_name) == Layer::layername_2_tensorname.end()) { + if (layer->param_["type"] == KVCACHE) { + Layer::layername_2_tensorname[layer_next_name] = layer_next_name; + init_reset_KVCache(inputs[0].name(), module, layer->saved_list_idx, Layer::layername_2_tensorname, layer->backend_); + } else { + Layer::layername_2_tensorname[layer_next_name] = name_num_to_X(layer_next_name); + } + } + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else if (Context::Instance().inference_state().getIsCPUViT() && layer_next_name.find("visual") != string::npos) { + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else { + next_name = layer_next_name; + } + if (activation_tensors.find(next_name) == activation_tensors.end()) { + activation_tensors[next_name] = std::make_shared(layer->backend_); + activation_tensors[next_name]->setName(next_name); + activation_tensors[next_name]->setModule(module); + activation_tensors_num[next_name] = 0; + } + } + if (module->doLoad) { + vector output_result = {}; + for (const auto &layer_next_name : layer_next_names) { + string next_name; + // NOTE: QNN is using CPU ViT + if (Layer::use_layername_2_tensorname) { + if (Layer::layername_2_tensorname.find(layer_next_name) == Layer::layername_2_tensorname.end()) { + if (layer->param_["type"] == KVCACHE) { + Layer::layername_2_tensorname[layer_next_name] = layer_next_name; + init_reset_KVCache(inputs[0].name(), module, layer->saved_list_idx, Layer::layername_2_tensorname, layer->backend_); + } else { + Layer::layername_2_tensorname[layer_next_name] = name_num_to_X(layer_next_name); + } + } + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else if (Context::Instance().inference_state().getIsCPUViT() && layer_next_name.find("visual") != string::npos) { + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else { + next_name = layer_next_name; + } + output_result.push_back(*activation_tensors[next_name]); + } + return output_result; + } + } + // input_tensors + vector> input_tensors; + for (auto &input : inputs) { + if (input.shouldInGraphs()) { + auto input_name = input.name(); + if (layer->param_["type"] == KVCACHE && do_init && Layer::use_layername_2_tensorname) { + input_name = name_X_to_num(input_name, layer->saved_list_idx); + } + input_tensors.push_back(activation_tensors[input_name]); + } else { + input_tensors.push_back(std::shared_ptr(&input, [](Tensor *) {})); + } + } + // output_tensors + vector layer_next_names = {}; + if (N > 1) { + for (int i = 0; i < N; ++i) { + layer_next_names.push_back("out-" + layer->op_->name() + "-" + std::to_string(i)); + } + } else { + layer_next_names = {"out-" + layer->op_->name()}; + } + vector> output_tensors = {}; + for (const auto &layer_next_name : layer_next_names) { + string next_name; + // NOTE: QNN is using CPU ViT + if (Layer::use_layername_2_tensorname) { + if (Layer::layername_2_tensorname.find(layer_next_name) == Layer::layername_2_tensorname.end()) { + if (layer->param_["type"] == KVCACHE) { + Layer::layername_2_tensorname[layer_next_name] = layer_next_name; + init_reset_KVCache(inputs[0].name(), module, layer->saved_list_idx, Layer::layername_2_tensorname, layer->backend_); + } else { + Layer::layername_2_tensorname[layer_next_name] = name_num_to_X(layer_next_name); + } + } + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else if (Context::Instance().inference_state().getIsCPUViT() && layer_next_name.find("visual") != string::npos) { + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else { + next_name = layer_next_name; + } + output_tensors.push_back(activation_tensors[next_name]); + } +#ifdef DEBUGOPTIME + auto start_t = mllm_time_us(); +#endif + switch (Tensor::tensor_status) { + case TENSOR_STATIC_INIT: { + if (Context::Instance().inference_state().isQnnGraphFrozen() && layer->backend_->type() == MLLM_QNN) { + break; + } + // std::cout << "================={Layer: " << std::endl; + // std::cout << layer->op_->name() << std::endl; + // for (const auto &in_tensor : input_tensors) { + // std::cout << " in tensor: " << in_tensor->name() << " dtype=" << in_tensor->dtype() << " " << in_tensor->batch() << ", " << in_tensor->head() << ", " << in_tensor->sequence() << ", " << in_tensor->dimension() << " ctype " << in_tensor->ctype() << " dtype " << in_tensor->dtype() << std::endl; + // } + layer->op_->reshape(input_tensors, output_tensors); + layer->op_->setUp(input_tensors, output_tensors); + // for (const auto &in_tensor : output_tensors) { + // std::cout << " ot tensor: " << in_tensor->name() << " dtype=" << in_tensor->dtype() << " " << in_tensor->batch() << ", " << in_tensor->head() << ", " << in_tensor->sequence() << ", " << in_tensor->dimension() << " ctype " << in_tensor->ctype() << " dtype " << in_tensor->dtype() << std::endl; + // } + // std::cout << "=================Layer}: " << std::endl; + break; + } + case TENSOR_STATIC_READY: { + if (Context::Instance().inference_state().isQnnGraphFrozen() && layer->backend_->type() == MLLM_QNN && layer->param_["type"] != SUBGRAPHSTART) { + break; + } + layer->op_->execute(input_tensors, output_tensors); + break; + } + case TENSOR_STATIC_TRACE: { + if (layer->backend_->type() == BackendType::MLLM_CPU) { + Tracer::addOp(layer->op_, input_tensors, output_tensors); + } else if (layer->param_["type"] == SUBGRAPHSTART) { // begin of QNN graph + Tracer::addModule(input_tensors, {}, layer->op_->name()); + } + break; + } + default: { + break; + } + } + +#ifdef DEBUGOPTIME + if (Tensor::tensor_status == TENSOR_STATIC_READY) { + auto end_t = mllm_time_us(); + std::cout << layer->op_->name() << " | " << Tensor::tensor_status << " time: " << (end_t - start_t) / 1000.0F << "ms" << std::endl; + } +#endif + vector output_result = {}; + for (const auto &layer_next_name : layer_next_names) { + string next_name; + // NOTE: QNN is using CPU ViT + if (Layer::use_layername_2_tensorname) { + if (Layer::layername_2_tensorname.find(layer_next_name) == Layer::layername_2_tensorname.end()) { + if (layer->param_["type"] == KVCACHE) { + Layer::layername_2_tensorname[layer_next_name] = layer_next_name; + init_reset_KVCache(inputs[0].name(), module, layer->saved_list_idx, Layer::layername_2_tensorname, layer->backend_); + } else { + Layer::layername_2_tensorname[layer_next_name] = name_num_to_X(layer_next_name); + } + } + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else if (Context::Instance().inference_state().getIsCPUViT() && layer_next_name.find("visual") != string::npos) { + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else { + next_name = layer_next_name; + } +#ifdef DEBUGSAVETENSOR + activation_tensors[next_name]->saveNData(layer_next_name); +#endif + output_result.push_back(*activation_tensors[next_name]); + } + return output_result; +} +std::vector QNNBackend::runForward(Module *module, std::vector inputs, std::vector args) { + // Module Loading + if (Module::llm_model_ptr && Module::llm_model_ptr->doLoad) { + auto outputs = module->Forward(inputs, args); + return outputs; + } + + // Module setUp & execute + if (inputs[0].ttype() == TensorType::INPUT_TENSOR) { + if (module->prefilling_token_size_ == 0) { // first time init + module->prefilling_token_size_ = inputs[0].sequence() * inputs[0].batch(); + } else if (module->decoding_token_size_ == 0) { + module->decoding_token_size_ = inputs[0].sequence() * inputs[0].batch(); + } + for (int i = 0; i < inputs.size(); i++) { + auto &input = inputs[i]; + input.setName("input" + std::to_string(i)); + input.setTtype(TensorType::NORMAL_TENSOR); + module->activation_tensors[input.name()] = std::shared_ptr(&input, [](Tensor *) {}); + module->activation_tensors[input.name()]->setName(input.name()); + module->activation_tensors[input.name()]->setModule(module); + } + Module::llm_model_ptr = module; + Tensor::tensor_status = TENSOR_STATIC_INIT; + + uint64_t time_start = mllm_time_us(); + module->Forward(inputs, args); + Tensor::tensor_status = TENSOR_STATIC_READY; // change to EAGER + + auto output = module->Forward(inputs, args); + uint64_t time_end = mllm_time_us(); + + double inference_time_ = (time_end - time_start) / 1000.0F; // ms + module->inference_times_.push_back(inference_time_); + + Module::llm_model_ptr->op_transposed_flag = true; + return output; + } else { // inner Modules + return module->Forward(inputs, args); + } +} + +QNNPerf::QNNPerf(const QNN_INTERFACE_VER_TYPE *qnnInterface) { + assert(qnnInterface != nullptr); + mQnnInterface = qnnInterface; + + QnnDevice_Infrastructure_t deviceInfra = nullptr; + CALL_QNN(mQnnInterface->deviceGetInfrastructure(&deviceInfra)); + QnnHtpDevice_Infrastructure_t *htpInfra = static_cast(deviceInfra); + mPerfInfra = htpInfra->perfInfra; + + uint32_t deviceId = 0; + uint32_t coreId = 0; + CALL_QNN(mPerfInfra.createPowerConfigId(deviceId, coreId, &mPowerConfigId)); + + mPowerConfigBurst = { + .option = QNN_HTP_PERF_INFRASTRUCTURE_POWER_CONFIGOPTION_DCVS_V3, + .dcvsV3Config = { + .contextId = mPowerConfigId, // use the power config id created + .setDcvsEnable = 1, + .dcvsEnable = 0, // 1- To enable Dcvs and consider dcvs power mode, 0- To disable dcvs + .powerMode = QNN_HTP_PERF_INFRASTRUCTURE_POWERMODE_PERFORMANCE_MODE, + .setSleepLatency = 1, // True to consider Latency parameter otherwise False + .sleepLatency = 40, // set dsp sleep latency ranges 10-65535 micro sec, refer hexagon sdk + .setSleepDisable = 1, // True to consider sleep disable/enable parameter otherwise False + .sleepDisable = 1, // True to disable sleep, False to re-enable sleep + .setBusParams = 1, // True to consider Bus parameter otherwise False + .busVoltageCornerMin = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER, + .busVoltageCornerTarget = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER, + .busVoltageCornerMax = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER, + .setCoreParams = 1, // True to consider Core parameter otherwise False + .coreVoltageCornerMin = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER, + .coreVoltageCornerTarget = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER, + .coreVoltageCornerMax = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER, + }, + }; + + mPowerConfigBalanced = { + .option = QNN_HTP_PERF_INFRASTRUCTURE_POWER_CONFIGOPTION_DCVS_V3, + .dcvsV3Config = { + .contextId = mPowerConfigId, // use the power config id created + .setDcvsEnable = 1, + .dcvsEnable = 1, // 1- To enable Dcvs and consider dcvs power mode, 0- To disable dcvs + .powerMode = QNN_HTP_PERF_INFRASTRUCTURE_POWERMODE_ADJUST_UP_DOWN, + .setSleepLatency = 1, // True to consider Latency parameter otherwise False + .sleepLatency = 1000, // set dsp sleep latency ranges 10-65535 micro sec, refer hexagon sdk + .setSleepDisable = 1, // True to consider sleep disable/enable parameter otherwise False + .sleepDisable = 0, // True to disable sleep, False to re-enable sleep + .setBusParams = 1, // True to consider Bus parameter otherwise False + .busVoltageCornerMin = DCVS_VOLTAGE_VCORNER_TURBO, + .busVoltageCornerTarget = DCVS_VOLTAGE_VCORNER_TURBO, + .busVoltageCornerMax = DCVS_VOLTAGE_VCORNER_TURBO, + .setCoreParams = 1, // True to consider Core parameter otherwise False + .coreVoltageCornerMin = DCVS_VOLTAGE_VCORNER_TURBO, + .coreVoltageCornerTarget = DCVS_VOLTAGE_VCORNER_TURBO, + .coreVoltageCornerMax = DCVS_VOLTAGE_VCORNER_TURBO, + }, + }; +} + +// destory power config +QNNPerf::~QNNPerf() { + CALL_QNN(mPerfInfra.destroyPowerConfigId(mPowerConfigId)); +} + +void QNNPerf::setRpcLatencyAndPolling() { + // set RPC Control Latency + QnnHtpPerfInfrastructure_PowerConfig_t rpcControlLatency; // refer QnnHtpPerfInfrastructure.h + ::memset(&rpcControlLatency, 0, sizeof(rpcControlLatency)); + rpcControlLatency.option = QNN_HTP_PERF_INFRASTRUCTURE_POWER_CONFIGOPTION_RPC_CONTROL_LATENCY; + rpcControlLatency.rpcControlLatencyConfig = 100; // use rpc control latency recommended 100 us, refer hexagon sdk + const QnnHtpPerfInfrastructure_PowerConfig_t *powerConfigs1[] = {&rpcControlLatency, NULL}; + + CALL_QNN(mPerfInfra.setPowerConfig(mPowerConfigId, powerConfigs1)); // set RPC latency config on power config ID created + + // set RPC Polling + QnnHtpPerfInfrastructure_PowerConfig_t rpcPollingTime; // refer QnnHtpPerfInfrastructure.h + ::memset(&rpcPollingTime, 0, sizeof(rpcPollingTime)); + rpcPollingTime.option = QNN_HTP_PERF_INFRASTRUCTURE_POWER_CONFIGOPTION_RPC_POLLING_TIME; + rpcPollingTime.rpcPollingTimeConfig = 9999; // use rpc polling time recommended 0-10000 us + const QnnHtpPerfInfrastructure_PowerConfig_t *powerConfigs2[] = {&rpcPollingTime, NULL}; + + CALL_QNN(mPerfInfra.setPowerConfig(mPowerConfigId, powerConfigs2)); // set RPC polling config on power config ID created +} + +void QNNPerf::setPowerConfigBurst() { + const QnnHtpPerfInfrastructure_PowerConfig_t *powerConfigs[] = {&mPowerConfigBurst, NULL}; + CALL_QNN(mPerfInfra.setPowerConfig(mPowerConfigId, powerConfigs)); +} + +void QNNPerf::setPowerConfigBalanced() { + const QnnHtpPerfInfrastructure_PowerConfig_t *powerConfigs[] = {&mPowerConfigBalanced, NULL}; + CALL_QNN(mPerfInfra.setPowerConfig(mPowerConfigId, powerConfigs)); +} + +QNNRuntime::~QNNRuntime() { + // Free Profile + if (profileHandle != nullptr) { + CALL_QNN(qnnInterface.profileFree(profileHandle)); + } + + // Free Device + CALL_QNN(qnnInterface.deviceFree(deviceHandle)); + + // Free Backend + CALL_QNN(qnnInterface.backendFree(backendHandle)); + + // Free Log + CALL_QNN(qnnInterface.logFree(logHandle)); +} + +void __mllmLoggerCallback4QnnLogger(const char *fmt, QnnLog_Level_t level, uint64_t times_tamp, + va_list argp) { + const char *level_str = ""; + switch (level) { + case QNN_LOG_LEVEL_ERROR: level_str = "[ERROR]"; break; + case QNN_LOG_LEVEL_WARN: level_str = "[WARN]"; break; + case QNN_LOG_LEVEL_INFO: level_str = "[INFO]"; break; + case QNN_LOG_LEVEL_DEBUG: level_str = "[DEBUG]"; break; + case QNN_LOG_LEVEL_VERBOSE: level_str = "[VERBOSE]"; break; + case QNN_LOG_LEVEL_MAX: level_str = "[UNKNOWN]"; break; + } + + double ms = (double)times_tamp / 1000000.0; + + { + fprintf(stdout, "QnnLogger(%8.1fms, %ld) %s: ", ms, times_tamp, level_str); + vfprintf(stdout, fmt, argp); + } +} + +QNNRuntime *QNNRuntime::initRuntime(ProfilingLevel profilingLevel, QnnLog_Level_t qnnLogLevel) { + // Create Interface + QNN_INTERFACE_VER_TYPE qnnInterface{}; + { + QnnInterface_t **interfaceProviders = nullptr; + uint32_t numProviders = 0; + if (QnnInterface_getProviders((const QnnInterface_t ***)&interfaceProviders, &numProviders) != QNN_SUCCESS) { + MLLM_LOG_ERROR_STREAM << "Failed to call 'QnnInterface_getProviders'." << std::endl; + return nullptr; + } + if (interfaceProviders == nullptr) { + MLLM_LOG_ERROR_STREAM << "Failed to get interface providers: null interface providers received." << std::endl; + return nullptr; + } + if (numProviders == 0) { + MLLM_LOG_ERROR_STREAM << "Failed to get interface providers: 0 interface providers." << std::endl; + return nullptr; + } + bool foundValidInterface = false; + for (size_t pIdx = 0; pIdx < numProviders; pIdx++) { + if (QNN_API_VERSION_MAJOR == interfaceProviders[pIdx]->apiVersion.coreApiVersion.major && QNN_API_VERSION_MINOR <= interfaceProviders[pIdx]->apiVersion.coreApiVersion.minor) { + foundValidInterface = true; + qnnInterface = interfaceProviders[pIdx]->QNN_INTERFACE_VER_NAME; + break; + } + } + if (!foundValidInterface) { + MLLM_LOG_ERROR_STREAM << "Failed to find a valid QNN interface provider." << std::endl; + return nullptr; + } + } + + // Create Log + Qnn_LogHandle_t logHandle = nullptr; + { + QnnLog_Callback_t logCallback = __mllmLoggerCallback4QnnLogger; + + if ((QNN_GET_ERROR_CODE(qnnInterface.logCreate(logCallback, QNN_LOG_LEVEL_ERROR, &logHandle)) != QNN_SUCCESS) || (logHandle == nullptr)) { + MLLM_LOG_ERROR_STREAM << "Failed to initialize logging in the backend." << std::endl; + return nullptr; + } + } + + // Create Backend + Qnn_BackendHandle_t backendHandle = nullptr; + { + const QnnBackend_Config_t **backendConfig = nullptr; + if ((QNN_GET_ERROR_CODE(qnnInterface.backendCreate(logHandle, backendConfig, &backendHandle)) != QNN_SUCCESS) || (backendHandle == nullptr)) { + MLLM_LOG_ERROR_STREAM << "Failed to create the backend." << std::endl; + return nullptr; + } + } + + // Create Device + Qnn_DeviceHandle_t deviceHandle = nullptr; + { + // Check whether the device API is supported. + if (nullptr != qnnInterface.propertyHasCapability) { + auto qnnStatus = + qnnInterface.propertyHasCapability(QNN_PROPERTY_GROUP_DEVICE); + if (QNN_PROPERTY_NOT_SUPPORTED == qnnStatus) { + MLLM_LOG_WARN_LEGACY("Device property is not supported"); + return nullptr; + } + if (QNN_PROPERTY_ERROR_UNKNOWN_KEY == qnnStatus) { + MLLM_LOG_ERROR_LEGACY("Device property is not known to backend"); + return nullptr; + } + } + } + + // Initialize Profiling + Qnn_ProfileHandle_t profileHandle = nullptr; + { + if (ProfilingLevel::OFF != profilingLevel) { + MLLM_LOG_INFO_LEGACY("Profiling turned on; level = %d", (int)profilingLevel); + if (ProfilingLevel::BASIC == profilingLevel) { + MLLM_LOG_INFO_LEGACY("Basic profiling requested. Creating Qnn Profile object."); + if (QNN_PROFILE_NO_ERROR != qnnInterface.profileCreate(backendHandle, QNN_PROFILE_LEVEL_BASIC, &profileHandle)) { + MLLM_LOG_WARN_LEGACY("Unable to create profile handle in the backend."); + return nullptr; + } + } else if (ProfilingLevel::DETAILED == profilingLevel) { + MLLM_LOG_INFO_LEGACY("Detailed profiling requested. Creating Qnn Profile object."); + if (QNN_PROFILE_NO_ERROR != qnnInterface.profileCreate(backendHandle, QNN_PROFILE_LEVEL_DETAILED, &profileHandle)) { + MLLM_LOG_ERROR_LEGACY("Unable to create profile handle in the backend."); + return nullptr; + } + } + } + } + + // Register Custom OpPackages + { + struct OpPackageInfo { + std::string path; + std::string interfaceProvider; + std::string target; + }; + + std::vector opPackages = { + {"libQnnLLaMAPackage_CPU.so", "LLaMAPackageInterfaceProvider", "CPU"}, + {"libQnnLLaMAPackage_HTP.so", "LLaMAPackageInterfaceProvider", "HTP"}}; + + for (const auto &pkg : opPackages) { + if (!qnnInterface.backendRegisterOpPackage) { + MLLM_LOG_ERROR_LEGACY("backendRegisterOpPackageFnHandle is nullptr."); + return nullptr; + } + if (QNN_BACKEND_NO_ERROR != qnnInterface.backendRegisterOpPackage(backendHandle, pkg.path.c_str(), pkg.interfaceProvider.c_str(), pkg.target.c_str())) { + MLLM_LOG_ERROR_LEGACY("Could not register Op Package: %s and interface provider: %s", + pkg.path.c_str(), pkg.interfaceProvider.c_str()); + return nullptr; + } + MLLM_LOG_INFO_LEGACY("Registered Op Package: %s and interface provider: %s", + pkg.path.c_str(), pkg.interfaceProvider.c_str()); + } + } + + // Create QNN System Interface + QNN_SYSTEM_INTERFACE_VER_TYPE qnnSystemInterface; + { + QnnSystemInterface_t **systemInterfaceProviders{nullptr}; + uint32_t numProviders{0}; + if (QNN_SUCCESS != QnnSystemInterface_getProviders((const QnnSystemInterface_t ***)&systemInterfaceProviders, &numProviders)) { + MLLM_LOG_ERROR_LEGACY("Failed to get system interface providers."); + return nullptr; + } + if (0 == numProviders) { + MLLM_LOG_ERROR_LEGACY("Failed to get interface providers: 0 interface providers."); + return nullptr; + } + bool foundValidSystemInterface = false; + for (size_t pIdx = 0; pIdx < numProviders; pIdx++) { + foundValidSystemInterface = true; + if (QNN_SYSTEM_API_VERSION_MAJOR == systemInterfaceProviders[pIdx]->systemApiVersion.major && QNN_SYSTEM_API_VERSION_MINOR <= systemInterfaceProviders[pIdx]->systemApiVersion.minor) { + qnnSystemInterface = systemInterfaceProviders[pIdx]->QNN_SYSTEM_INTERFACE_VER_NAME; + break; + } + } + if (!foundValidSystemInterface) { + MLLM_LOG_ERROR_LEGACY("Unable to find a valid system interface."); + return nullptr; + } + } + + return new QNNRuntime(qnnInterface, qnnSystemInterface, logHandle, backendHandle, deviceHandle, profileHandle); +} + +bool QNNRuntime::createContext(Qnn_ContextHandle_t &context, QnnContext_Config_t **contextConfig) { + if (QNN_CONTEXT_NO_ERROR != qnnInterface.contextCreate(backendHandle, deviceHandle, (const QnnContext_Config_t **)&contextConfig, &context)) { + MLLM_LOG_ERROR("Could not create context"); + return false; + } + return true; +} +bool QNNRuntime::retrieveContext(Qnn_ContextHandle_t &context, + std::vector &graphsInfo, + QnnContext_Config_t **contextConfig) { + // Read the binary from qnn_context.bin and get the size in byte + std::ifstream file("qnn_context.bin", std::ios::binary | std::ios::ate); + std::streamsize size = file.tellg(); + file.seekg(0, std::ios::beg); + shared_ptr binaryBuffer(new uint8_t[size], std::default_delete()); + + file.read(reinterpret_cast(binaryBuffer.get()), size); + file.close(); + + // inspect binary info + QnnSystemContext_Handle_t sysCtxHandle{nullptr}; + if (QNN_SUCCESS != qnnSystemInterface.systemContextCreate(&sysCtxHandle)) { + MLLM_LOG_ERROR("Could not create system handle."); + return false; + } + const QnnSystemContext_BinaryInfo_t *binaryInfo{nullptr}; + Qnn_ContextBinarySize_t binaryInfoSize{0}; + if (QNN_SUCCESS != qnnSystemInterface.systemContextGetBinaryInfo(sysCtxHandle, static_cast(binaryBuffer.get()), size, &binaryInfo, &binaryInfoSize)) { + MLLM_LOG_ERROR("Failed to get context binary info"); + return false; + } + + GraphInfo_t **tmpGraphsInfo = nullptr; + uint32_t graphNum; + // fill GraphInfo_t based on binary info + if (!copyMetadataToGraphsInfo(binaryInfo, tmpGraphsInfo, graphNum)) { + MLLM_LOG_ERROR("Failed to copy metadata."); + return false; + } + qnnSystemInterface.systemContextFree(sysCtxHandle); + sysCtxHandle = nullptr; + + graphsInfo.assign(tmpGraphsInfo, tmpGraphsInfo + graphNum); + + Qnn_ContextBinarySize_t writtenSize = 0; + qnnInterface.contextCreateFromBinary(backendHandle, deviceHandle, (const QnnContext_Config_t **)contextConfig, binaryBuffer.get(), size, &context, profileHandle); + + for (auto &g : graphsInfo) { + if (QNN_SUCCESS != qnnInterface.graphRetrieve(context, g->graphName, &g->graph)) { + MLLM_LOG_ERROR("Unable to retrieve graph handle"); + return false; + } + } + + MLLM_LOG_INFO_STREAM << "QNN context retrieved from qnn_context.bin"; + return true; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/qnn/QNNBackend.hpp b/mllm/backends/qnn/QNNBackend.hpp new file mode 100644 index 000000000..3c14baf25 --- /dev/null +++ b/mllm/backends/qnn/QNNBackend.hpp @@ -0,0 +1,196 @@ +#ifndef MLLM_QNNBACKEND_H +#define MLLM_QNNBACKEND_H + +#include "Backend.hpp" +#include "Op.hpp" +#include "OpDefined.hpp" +#include "ParamLoader.hpp" +#include "QNNUtils.hpp" +#include "QNNModel.hpp" +#include "QnnTypes.h" +#include "HTP/QnnHtpDevice.h" +#include "System/QnnSystemInterface.h" +#include "Types.hpp" +#include "MemoryManager.hpp" +#include + +namespace mllm { +class Module; +class Layer; +class Op; +class Tensor; +class Backend; + +enum class ProfilingLevel { OFF, + BASIC, + DETAILED, + INVALID }; +class QNNPerf { +public: + static std::unique_ptr create(const QNN_INTERFACE_VER_TYPE *qnnInterface) { + return std::unique_ptr(new QNNPerf(qnnInterface)); + } + QNNPerf(const QNN_INTERFACE_VER_TYPE *qnnInterface); + ~QNNPerf(); + void setRpcLatencyAndPolling(); + void setPowerConfigBurst(); + void setPowerConfigBalanced(); + +private: + const QNN_INTERFACE_VER_TYPE *mQnnInterface = nullptr; + QnnHtpDevice_PerfInfrastructure_t mPerfInfra{}; + uint32_t mPowerConfigId; + QnnHtpPerfInfrastructure_PowerConfig_t mPowerConfigBurst{}; + QnnHtpPerfInfrastructure_PowerConfig_t mPowerConfigBalanced{}; +}; + +class QNNRuntime { + friend class QNNBackend; + +public: + ~QNNRuntime(); + + static std::unique_ptr create(ProfilingLevel profilingLevel = ProfilingLevel::OFF, QnnLog_Level_t qnnLogLevel = QNN_LOG_LEVEL_INFO) { + return std::unique_ptr(initRuntime(profilingLevel, qnnLogLevel)); + } + + bool createContext(Qnn_ContextHandle_t &context, QnnContext_Config_t **contextConfig = nullptr); + bool retrieveContext(Qnn_ContextHandle_t &context, + std::vector &graphsInfo, + QnnContext_Config_t **contextConfig = nullptr); + +private: + QNN_INTERFACE_VER_TYPE qnnInterface; + QNN_SYSTEM_INTERFACE_VER_TYPE qnnSystemInterface; + + Qnn_LogHandle_t logHandle = nullptr; + Qnn_BackendHandle_t backendHandle = nullptr; + Qnn_DeviceHandle_t deviceHandle = nullptr; + Qnn_ProfileHandle_t profileHandle = nullptr; + + QNNRuntime(QNN_INTERFACE_VER_TYPE qnnInterface, + QNN_SYSTEM_INTERFACE_VER_TYPE qnnSystemInterface, + Qnn_LogHandle_t qnnLogHandle, + Qnn_BackendHandle_t qnnBackendHandle, + Qnn_DeviceHandle_t qnnDeviceHandle, + Qnn_ProfileHandle_t qnnProfileHandle = nullptr) : + qnnInterface(qnnInterface), + qnnSystemInterface(qnnSystemInterface), logHandle(qnnLogHandle), backendHandle(qnnBackendHandle), deviceHandle(qnnDeviceHandle), profileHandle(qnnProfileHandle) { + } + + std::string getBackendBuildId(QNN_INTERFACE_VER_TYPE &qnnInterface) { + char *backendBuildId{nullptr}; + if (QNN_SUCCESS != qnnInterface.backendGetBuildId((const char **)&backendBuildId)) { + MLLM_LOG_ERROR_LEGACY("Unable to get build Id from the backend."); + } + return (backendBuildId == nullptr ? std::string("") : std::string(backendBuildId)); + } + + static QNNRuntime *initRuntime(ProfilingLevel profilingLevel, QnnLog_Level_t qnnLogLevel); +}; + +class QNNBackend : public Backend { +public: + QNNBackend(shared_ptr mm); + ~QNNBackend(); + + Op *opCreate(const OpParam &op_param, string name = "", int threadCount = 4) override { + OpType optype = OpType(op_param.find("type")->second); + auto iter = map_creator_.find(optype); + if (iter == map_creator_.end()) { + std::cout << "NPU Op Don't support type : " << optype << ", name" << name << std::endl; + return nullptr; + } + Op *exe = nullptr; + exe = iter->second->create(op_param, this, name); + return exe; + } + + // currently, qnn don't support tensor function + TensorFunction *funcCreate(const TensorFuncType type) override { + return nullptr; + } + + class Creator { + public: + virtual ~Creator() = default; + virtual Op *create(OpParam op_param, Backend *bn, string name) const = 0; + }; + bool addCreator(OpType t, Creator *c) { + if (map_creator_.find(t) != map_creator_.end()) { + printf("Error: %d type has be added\n", t); + return false; + } + map_creator_.insert(std::make_pair(t, c)); + return true; + } + + void graphAddNode(string name, string nodeType, + std::vector inputTensorNames, std::vector outputTensors, + std::vector params, + string packageName); + + void modelAddTensor(std::string nodeName, Qnn_Tensor_t tensor); + + virtual void onSetUpStart(vector> &inputs, vector> &outputs, string graphName) override; + virtual void onSetUpEnd(vector> &inputs, vector> &outputs, string graphName) override; + virtual void onExecuteStart(vector> &inputs, vector> &outputs, string graphName = "") override; + virtual void onExecuteEnd(std::vector> &outputs, const string &graph_name) override{}; + + // std::vector runFunc( + // std::vector out_names, + // TensorFuncType type, + // std::vector float_args, + // std::vector input_tensors, + // bool in_place) override; + std::vector runLayer(Layer *layer, std::vector inputs, int N) override; + std::vector runForward(Module *module, std::vector inputs, std::vector args) override; + std::vector runOp(Op *op, std::vector input, std::vector out_names, bool in_place) override; + + void pushInputBuffers(uint8_t *ptr) { + currentInputBuffers->push_back(ptr); + } + void pushOutputBuffers(uint8_t *ptr) { + currentOutputBuffers->push_back(ptr); + } + + void saveQNNContext(); + +private: + bool graphFinilize(); + + void registerOps() override; + void registerFuncs() override{}; + + void extractBackendProfilingInfo(Qnn_ProfileHandle_t profileHandle); + + void extractProfilingSubEvents(QnnProfile_EventId_t profileEventId); + + void extractProfilingEvent(QnnProfile_EventId_t profileEventId); + + std::map> inputBufferMap, outputBufferMap; + // still use this, as in Express frontend, mllm inputs and outputs num may not match + std::vector *currentInputBuffers, *currentOutputBuffers; + + std::map map_creator_; + + std::map qnnModelIndexMap_; + std::vector qnnModels_; + int qnnModelIndex_; + + Qnn_ContextHandle_t m_context = nullptr; + bool m_debug; + + ProfilingLevel m_profilingLevel; + + std::vector graphsInfo_; + + bool isFromCache = false; + + std::unique_ptr mRuntime; + std::unique_ptr mPerf; +}; + +} // namespace mllm + +#endif // MLLM_QNNBACKEND_H \ No newline at end of file diff --git a/src/backends/qnn/QNNMemoryManager.cpp b/mllm/backends/qnn/QNNMemoryManager.cpp similarity index 64% rename from src/backends/qnn/QNNMemoryManager.cpp rename to mllm/backends/qnn/QNNMemoryManager.cpp index 98815deec..0349cdba2 100644 --- a/src/backends/qnn/QNNMemoryManager.cpp +++ b/mllm/backends/qnn/QNNMemoryManager.cpp @@ -1,6 +1,5 @@ #include "QNNMemoryManager.hpp" #include "Log.h" -#include "Logger.hpp" #include "QnnTypes.h" #include #include @@ -9,24 +8,14 @@ #include #include #include +#include namespace mllm { -template -static inline T resolveSymbol(void *libHandle, const char *sym) { - T ptr = (T)pal::dynamicloading::dlSym(libHandle, sym); - if (ptr == nullptr) { - MLLM_LOG_ERROR("Unable to access symbol {}. pal::dynamicloading::dlError(): {}", - sym, - pal::dynamicloading::dlError()); - } - return ptr; -} - QNNMemoryManager::QNNMemoryManager() { #ifdef QNN_ARM // load libcdsprpc.so - void *libCdspHandle = pal::dynamicloading::dlOpen("libcdsprpc.so", pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_LOCAL); + void *libCdspHandle = dlopen("libcdsprpc.so", RTLD_NOW | RTLD_LOCAL); if (nullptr == libCdspHandle) { MLLM_LOG_ERROR_STREAM << "dlopen libcdsprpc.so failed" << std::endl; } @@ -40,40 +29,24 @@ QNNMemoryManager::QNNMemoryManager() { MLLM_LOG_ERROR_STREAM << "dlsym failed" << std::endl; } #endif - // Get QNN Interface - void *libBackendHandle = pal::dynamicloading::dlOpen( - "libQnnHtp.so", pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_GLOBAL); - QnnInterfaceGetProvidersFn_t getInterfaceProviders{nullptr}; - getInterfaceProviders = - resolveSymbol(libBackendHandle, "QnnInterface_getProviders"); - QnnInterface_t **interfaceProviders{nullptr}; - uint32_t numProviders{0}; - if (QNN_SUCCESS != getInterfaceProviders((const QnnInterface_t ***)&interfaceProviders, &numProviders)) { - MLLM_LOG_ERROR_STREAM << "Failed to get interface providers." << std::endl; - } - for (size_t pIdx = 0; pIdx < numProviders; pIdx++) { - if (QNN_API_VERSION_MAJOR == interfaceProviders[pIdx]->apiVersion.coreApiVersion.major && QNN_API_VERSION_MINOR <= interfaceProviders[pIdx]->apiVersion.coreApiVersion.minor) { - qnnInterface_ = interfaceProviders[pIdx]->QNN_INTERFACE_VER_NAME; - break; - } - } } QNNMemoryManager::~QNNMemoryManager() { #ifdef QNN_ARM - for (auto &mem : ptrToFdAndMemHandleMap_) { - Qnn_ErrorHandle_t deregisterRet = qnnInterface_.memDeRegister(&mem.second.second, 1); + for (auto iter = ptrToFdAndMemHandleMap_.begin(); iter != ptrToFdAndMemHandleMap_.end();) { + Qnn_ErrorHandle_t deregisterRet = qnnInterface_.memDeRegister(&iter->second.second, 1); if (QNN_SUCCESS != deregisterRet) { // handle errors MLLM_LOG_ERROR_STREAM << "qnnInterface_.memDeRegister failed" << std::endl; } - rpcmem_free(mem.first); - ptrToFdAndMemHandleMap_.erase(mem.first); + rpcmem_free(iter->first); + iter = ptrToFdAndMemHandleMap_.erase(iter); } #endif } -void QNNMemoryManager::setQnnInterfaceAndContext(void *context) { +void QNNMemoryManager::setQnnInterfaceAndContext(QNN_INTERFACE_VER_TYPE qnnInterface, void *context) { + qnnInterface_ = qnnInterface; context_ = context; if (context_ == nullptr) { MLLM_LOG_ERROR_STREAM << "context is null" << std::endl; @@ -146,23 +119,6 @@ void QNNMemoryManager::registerQnnTensor(void *ptr, Qnn_Tensor_t &qnnTensor) { ptrToFdAndMemHandleMap_.insert(std::make_pair(ptr, std::make_pair(memFd, qnnTensor.v1.memHandle))); } -void QNNMemoryManager::deRegisterQnnTensor() { -#ifdef QNN_ARM - // free all buffers if it's not being used - for (auto &mem : ptrToFdAndMemHandleMap_) { - Qnn_ErrorHandle_t deregisterRet = qnnInterface_.memDeRegister(&mem.second.second, 1); - if (QNN_SUCCESS != deregisterRet) { - // handle errors - MLLM_LOG_ERROR_STREAM << "qnnInterface_.memDeRegister failed" << std::endl; - } - // rpcmem_free(mem.first); - // clear the map outside the loop. - // ptrToFdAndMemHandleMap_.erase(mem.first); - } - ptrToFdAndMemHandleMap_.clear(); -#endif -} - void QNNMemoryManager::free(void *ptr) { #ifdef QNN_ARM // if the ptr has been registered, deregister it @@ -173,7 +129,7 @@ void QNNMemoryManager::free(void *ptr) { // handle errors MLLM_LOG_ERROR_STREAM << "qnnInterface_.memDeRegister failed" << std::endl; } - ptrToFdAndMemHandleMap_.erase(it); + it = ptrToFdAndMemHandleMap_.erase(it); } rpcmem_free(ptr); #else diff --git a/src/backends/qnn/QNNMemoryManager.hpp b/mllm/backends/qnn/QNNMemoryManager.hpp similarity index 76% rename from src/backends/qnn/QNNMemoryManager.hpp rename to mllm/backends/qnn/QNNMemoryManager.hpp index 3448d4388..665da5961 100644 --- a/src/backends/qnn/QNNMemoryManager.hpp +++ b/mllm/backends/qnn/QNNMemoryManager.hpp @@ -1,17 +1,11 @@ #ifndef MLLM_QNNMEMORY_SYSTEM_H #define MLLM_QNNMEMORY_SYSTEM_H -#include "Log.h" -#include "Log/Logger.hpp" #include "MemoryManager.hpp" -#include "PAL/DynamicLoading.hpp" -#include "Utils/DynamicLoadUtil.hpp" +#include "QnnInterface.h" #include "QnnTypes.h" #include -#include #include #include -#include -#include namespace mllm { @@ -29,10 +23,9 @@ class QNNMemoryManager : public MemoryManager { void alloc(void **ptr, size_t size, size_t alignment) override; void free(void *ptr) override; - void setQnnInterfaceAndContext(void *context); + void setQnnInterfaceAndContext(QNN_INTERFACE_VER_TYPE qnnInterface,void *context); void registerQnnTensor(void *ptr, Qnn_Tensor_t &qnnTensor); - void deRegisterQnnTensor(); private: QNN_INTERFACE_VER_TYPE qnnInterface_; @@ -40,7 +33,7 @@ class QNNMemoryManager : public MemoryManager { // memHandle set, to check if the ptr is allocted by rpcmem_alloc std::set qnnMemPtrMap_; - std::map> ptrToFdAndMemHandleMap_; + std::map> ptrToFdAndMemHandleMap_; RpcMemAllocFn_t rpcmem_alloc; RpcMemFreeFn_t rpcmem_free; diff --git a/mllm/backends/qnn/QNNModel.cpp b/mllm/backends/qnn/QNNModel.cpp new file mode 100644 index 000000000..0228fd74c --- /dev/null +++ b/mllm/backends/qnn/QNNModel.cpp @@ -0,0 +1,381 @@ +#include +#include +#include +#include + +#include "QNNModel.hpp" +#include "Log.h" +#include "QnnTypeMacros.hpp" +#include "QNNUtils.hpp" + +#define FREE_MEMORY(ptr1, ptr2, ptr3) \ + do { \ + free(ptr1); \ + free(ptr2); \ + free(ptr3); \ + } while (0) + +namespace mllm { + +char *strnDup(const char *source, size_t maxlen) { + return ::strndup(source, maxlen); +} + +ModelError_t QNNModel::initialize(const Qnn_BackendHandle_t &backendHandle, + const QNN_INTERFACE_VER_TYPE &qnnInterface, + const Qnn_ContextHandle_t &context, + const char *graphName, + bool debug, + uint8_t doNodeValidations, + const QnnGraph_Config_t **graphConfigs) { + if (backendHandle == nullptr) { + MLLM_LOG_ERROR("QnnModel::initialize() nullptr passed as backend handle."); + return MODEL_CONTEXT_ERROR; + } + if (context == nullptr) { + MLLM_LOG_ERROR("QnnModel::initialize() nullptr passed as context handle."); + return MODEL_CONTEXT_ERROR; + } + if (graphName == nullptr) { + MLLM_LOG_ERROR("QnnModel::initialize() nullptr passed as graphName."); + return MODEL_GRAPH_ERROR; + } + + if (!m_graphName.empty()) { + // only one graph is allowed per QnnModel + MLLM_LOG_ERROR("QnnModel::initialize() model for graph %s already initialized.", graphName); + return MODEL_GRAPH_ERROR; + } + + if (!m_doNodeValidations) { + MLLM_LOG_WARNING( + "Node validation disabled. Backend will not perform op " + "validation prior to adding Node. \n"); + } + + m_qnnInterface = qnnInterface; + m_backendHandle = backendHandle; + m_graphName = graphName; + m_debug = debug; + m_doNodeValidations = doNodeValidations; + + if (m_qnnInterface.graphCreate(context, graphName, graphConfigs, &m_graph) != QNN_GRAPH_NO_ERROR || m_graph == nullptr) { + MLLM_LOG_ERROR("QnnModel::initialize() not able to create graph in given context."); + return MODEL_GRAPH_ERROR; + } + + return MODEL_NO_ERROR; +} + +ModelError_t QNNModel::addTensor(const char *nodeName, Qnn_Tensor_t *tensor, bool saveTensor) { + ModelError_t err; + if (!tensor) { + MLLM_LOG_ERROR("QnnModel::addTensor() NULL tensor pointer provided."); + return MODEL_TENSOR_ERROR; + } + VALIDATE_TENSOR_VERSION((*tensor), err); + + // Verify tensor being added is not a duplicate + std::string mapEntry = std::string(QNN_TENSOR_GET_NAME(tensor)); + if (m_modelTensorsMap.find(mapEntry) != m_modelTensorsMap.end()) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addTensor() creating tensor " + << mapEntry << "for node " << nodeName << "already exists."; + return MODEL_TENSOR_ERROR; + } + + const std::map dataTypeToSize = { + {QNN_DATATYPE_INT_8, 1}, + {QNN_DATATYPE_INT_16, 2}, + {QNN_DATATYPE_INT_32, 4}, + {QNN_DATATYPE_INT_64, 8}, + {QNN_DATATYPE_UINT_8, 1}, + {QNN_DATATYPE_UINT_16, 2}, + {QNN_DATATYPE_UINT_32, 4}, + {QNN_DATATYPE_UINT_64, 8}, + {QNN_DATATYPE_FLOAT_16, 2}, + {QNN_DATATYPE_FLOAT_32, 4}, + {QNN_DATATYPE_BOOL_8, 1}, + {QNN_DATATYPE_SFIXED_POINT_8, 1}, + {QNN_DATATYPE_SFIXED_POINT_16, 2}, + {QNN_DATATYPE_SFIXED_POINT_32, 4}, + {QNN_DATATYPE_UFIXED_POINT_8, 1}, + {QNN_DATATYPE_UFIXED_POINT_16, 2}, + {QNN_DATATYPE_UFIXED_POINT_32, 4}, + }; + + if (dataTypeToSize.find(QNN_TENSOR_GET_DATA_TYPE(tensor)) == dataTypeToSize.end()) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addTensor() invalid QNN data type provided, " + << QNN_TENSOR_GET_DATA_TYPE(tensor) << ", for tensor " + << QNN_TENSOR_GET_NAME(tensor) << " on node " << nodeName; + return MODEL_TENSOR_ERROR; + } + + // sanity check tensor data if addTensor used for static tensor + if (QNN_TENSOR_GET_TYPE(tensor) == QNN_TENSOR_TYPE_STATIC) { + if (QNN_TENSOR_GET_MEM_TYPE(tensor) != QNN_TENSORMEMTYPE_RAW) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addTensor() expected raw memType in provided static tensor " + << mapEntry << " for node " << nodeName; + return MODEL_TENSOR_ERROR; + } + // verify size expressed by the dims matches the raw tensor size + uint32_t qnnTensorSize = + std::accumulate(QNN_TENSOR_GET_DIMENSIONS(tensor), + QNN_TENSOR_GET_DIMENSIONS(tensor) + QNN_TENSOR_GET_RANK(tensor), + (uint32_t)dataTypeToSize.find(QNN_TENSOR_GET_DATA_TYPE(tensor))->second, + std::multiplies()); + if (qnnTensorSize != QNN_TENSOR_GET_CLIENT_BUF(tensor).dataSize) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addTensor(): Adding STATIC tensor, length mismatch between clientBuf" + << "size and tensor Dims(dim * rank * sizeof(datatype) for, nodeName:" << nodeName + << ", tensorName: " << QNN_TENSOR_GET_NAME(tensor) << "." + << "Got tensorSize: " << qnnTensorSize + << ", tensor.clientBuf.dataSize: " << QNN_TENSOR_GET_CLIENT_BUF(tensor).dataSize << "."; + return MODEL_TENSOR_ERROR; + } + } + + if (m_debug && QNN_TENSOR_GET_TYPE(tensor) == QNN_TENSOR_TYPE_NATIVE) { + // for debug, make all tensors accessible by client + QNN_TENSOR_SET_TYPE(tensor, QNN_TENSOR_TYPE_APP_READ); + } + if (m_qnnInterface.tensorCreateGraphTensor(m_graph, tensor) != QNN_TENSOR_NO_ERROR) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addTensor() Creating tensor for node:" + << nodeName << "tensorName:" << QNN_TENSOR_GET_NAME(tensor); + return MODEL_TENSOR_ERROR; + } + + if (saveTensor) { + Qnn_Tensor_t tensorCopy; + if (!mllm::deepCopyQnnTensorInfo(&tensorCopy, tensor)) { + return MODEL_TENSOR_ERROR; + } + + // save network input/outputs tensors to use for setting the Qnn graph's input and output + // tensors for populating GraphInfo_t for caller + if (QNN_TENSOR_GET_TYPE(tensor) == QNN_TENSOR_TYPE_APP_WRITE) { + m_modelInputTensors.push_back(tensorCopy); + } else if (QNN_TENSOR_GET_TYPE(tensor) == QNN_TENSOR_TYPE_APP_READ) { + m_modelOutputTensors.push_back(tensorCopy); + } + + // save created tensors for later lookup to populate graph node construction + m_modelTensorsMap[mapEntry] = tensorCopy; + } + + return MODEL_NO_ERROR; +} + +ModelError_t QNNModel::addTensor(const char *nodeName, Qnn_Tensor_t tensor, bool saveTensor) { + return addTensor(nodeName, &tensor, saveTensor); +} + +ModelError_t QNNModel::getQnnTensor(std::string nodeName, + std::string tensorName, + Qnn_Tensor_t &tensor) { + if (m_modelTensorsMap.find(tensorName) == m_modelTensorsMap.end()) { + MLLM_LOG_ERROR_STREAM << "QnnModel::getQnnTensor() tensor " + << tensorName << " not found on node " << nodeName; + return MODEL_TENSOR_ERROR; + } + tensor = m_modelTensorsMap[tensorName]; + + return MODEL_NO_ERROR; +} + +ModelError_t QNNModel::addNode(Qnn_OpConfigVersion_t version, + const char *name, + const char *packageName, + const char *type, + std::vector ¶ms, + std::vector inputNames, + std::vector &outputTensors) { + ModelError_t nodeError; + Qnn_OpConfig_t opDefinition = QNN_OPCONFIG_INIT; + opDefinition.version = version; + VALIDATE_OP_CONFIG_VERSION((opDefinition), nodeError); + + // populate Qnn param for node + Qnn_Param_t *nodeParams = (Qnn_Param_t *)malloc(params.size() * sizeof(Qnn_Param_t)); + + // populate input tensors for node + Qnn_Tensor_t *inputs = (Qnn_Tensor_t *)malloc(inputNames.size() * sizeof(Qnn_Tensor_t)); + + // populate output tensors of node + Qnn_Tensor_t *outputs = (Qnn_Tensor_t *)malloc(outputTensors.size() * sizeof(Qnn_Tensor_t)); + + if (nodeParams == nullptr || inputs == nullptr || outputs == nullptr) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addNode() failed for allocate memory for creating QNN OpConfig for node " + << name; + FREE_MEMORY(nodeParams, inputs, outputs); + return MODEL_MEMORY_ALLOCATE_ERROR; + } + uint32_t nodeParamsCounter = 0; + for (size_t i = 0; i < params.size(); i++) { + switch (params[i].paramType) { + case QNN_PARAMTYPE_TENSOR: { + Qnn_Tensor_t &tensor = params[i].tensorParam; + // Note: set saveTensor to false as no need to save tensor beyond this + // function call for params + nodeError = addTensor(name, &tensor, false); + if (nodeError != MODEL_NO_ERROR) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addNode() addTensor() failed for tensor param " + << QNN_TENSOR_GET_NAME(tensor) << " on node " << name; + FREE_MEMORY(nodeParams, inputs, outputs); + return nodeError; + } + nodeParams[nodeParamsCounter].paramType = QNN_PARAMTYPE_TENSOR; + nodeParams[nodeParamsCounter].name = params[i].name; + nodeParams[nodeParamsCounter++].tensorParam = tensor; + break; + } + case QNN_PARAMTYPE_SCALAR: { + nodeParams[nodeParamsCounter].paramType = QNN_PARAMTYPE_SCALAR; + nodeParams[nodeParamsCounter].name = params[i].name; + nodeParams[nodeParamsCounter++].scalarParam = params[i].scalarParam; + break; + } + default: { + MLLM_LOG_ERROR_STREAM << "QnnModel::addNode() unknown param type passed for param " + << params[i].name << " on node " << name; + FREE_MEMORY(nodeParams, inputs, outputs); + return MODEL_PARAMS_ERROR; + } + } + } + + size_t inputsCounter = 0; + for (size_t j = 0; j < inputNames.size(); j++) { + nodeError = getQnnTensor(name, inputNames[j], inputs[inputsCounter++]); + if (nodeError != MODEL_NO_ERROR) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addNode() getQnnTensor() failed for tensor " + << inputNames[j] << " on node " << name; + FREE_MEMORY(nodeParams, inputs, outputs); + return nodeError; + } + } + + size_t outputsCounter = 0; + m_modelOutputTensorMap[name] = {}; + for (size_t k = 0; k < outputTensors.size(); k++) { + // create node output tensors first + nodeError = addTensor(name, outputTensors[k]); + if (nodeError != MODEL_NO_ERROR) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addNode() addTensor() failed for tensor " + << QNN_TENSOR_GET_NAME(outputTensors[k]) << " on node " << name; + FREE_MEMORY(nodeParams, inputs, outputs); + return nodeError; + } + const char *outTensorName = QNN_TENSOR_GET_NAME(outputTensors[k]); + m_modelOutputTensorMap[name].push_back(outTensorName); + nodeError = getQnnTensor(name, outTensorName, outputs[outputsCounter++]); + if (nodeError != MODEL_NO_ERROR) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addNode() getQnnTensor() failed for tensor " + << outTensorName << " on node " << name; + FREE_MEMORY(nodeParams, inputs, outputs); + return nodeError; + } + } + + // define and add node to graph + QNN_OP_CFG_SET_NAME(opDefinition, name); + QNN_OP_CFG_SET_PACKAGE_NAME(opDefinition, packageName); + QNN_OP_CFG_SET_TYPE_NAME(opDefinition, type); + QNN_OP_CFG_SET_PARAMS(opDefinition, params.size(), nodeParams); + QNN_OP_CFG_SET_INPUTS(opDefinition, inputNames.size(), inputs); + QNN_OP_CFG_SET_OUTPUTS(opDefinition, outputTensors.size(), outputs); + + if (m_doNodeValidations) { + auto validationStatus = m_qnnInterface.backendValidateOpConfig(m_backendHandle, opDefinition); + if (validationStatus == QNN_BACKEND_ERROR_NOT_SUPPORTED) { + MLLM_LOG_ERROR("QnnModel::addNode() validation API not supported."); + } else if (validationStatus != QNN_SUCCESS) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addNode() validating node " + << name << " failed."; + FREE_MEMORY(nodeParams, inputs, outputs); + return MODEL_GRAPH_ERROR; + } + } + + if (m_qnnInterface.graphAddNode(m_graph, opDefinition) != QNN_GRAPH_NO_ERROR) { + MLLM_LOG_ERROR_STREAM << "QnnModel::addNode() adding node " + << name << " failed."; + FREE_MEMORY(nodeParams, inputs, outputs); + return MODEL_GRAPH_ERROR; + } + + FREE_MEMORY(nodeParams, inputs, outputs); + return MODEL_NO_ERROR; +} + +ModelError_t QNNModel::freeCachedTensors() { + ModelError_t err = MODEL_NO_ERROR; + + // cleanup cached tensors + for (std::map::iterator tensorIt = m_modelTensorsMap.begin(); + tensorIt != m_modelTensorsMap.end();) { + Qnn_Tensor_t &tensor = tensorIt->second; + if (QNN_TENSOR_GET_TYPE(tensor) != QNN_TENSOR_TYPE_APP_WRITE && QNN_TENSOR_GET_TYPE(tensor) != QNN_TENSOR_TYPE_APP_READ) { + if (!freeQnnTensor(tensor)) { + MLLM_LOG_ERROR_STREAM << "QnnModel::freeCachedTensors() failed to free tensor " + << QNN_TENSOR_GET_NAME(tensor) << "."; + err = MODEL_TENSOR_ERROR; + } + tensorIt = m_modelTensorsMap.erase(tensorIt); + } else { + tensorIt++; + } + } + return err; +} + +size_t memscpy(void *dst, size_t dstSize, const void *src, size_t copySize) { + if (!dst || !src || !dstSize || !copySize) return 0; + + size_t minSize = dstSize < copySize ? dstSize : copySize; + + memcpy(dst, src, minSize); + + return minSize; +} + +ModelError_t getSingleGraphInfoFromModel(QNNModel &model, GraphInfoPtr_t *graphInfoPtr) { + ModelError_t err = MODEL_NO_ERROR; + + *graphInfoPtr = (GraphInfo_t *)malloc(sizeof(GraphInfo_t)); + auto graphInfo = *graphInfoPtr; + if (graphInfo == nullptr) { + MLLM_LOG_ERROR("getGraphInfoFromModels() graphsInfo malloc returned nullptr."); + return MODEL_GRAPH_ERROR; + } + + graphInfo->graph = model.getQnnGraph(); + graphInfo->graphName = + strnDup(model.getQnnGraphName().c_str(), model.getQnnGraphName().size()); + if (graphInfo->graphName == nullptr) { + MLLM_LOG_ERROR("getGraphInfoFromModels() failed to construct graphName. Received nullptr."); + return MODEL_GRAPH_ERROR; + } + + // allocate and add graph input/output TensorsWrapper. Note: no need to make deep copies of + // the tensor's pointer members as they are already allocated on heap in the addTensor + // function call. + std::vector graphInputTensors = model.getGraphInputTensors(); + size_t numInputTensors = graphInputTensors.size(); + size_t inputTensorsSize = numInputTensors * sizeof(Qnn_Tensor_t); + graphInfo->inputTensors = (Qnn_Tensor_t *)malloc(inputTensorsSize); + memscpy(graphInfo->inputTensors, inputTensorsSize, graphInputTensors.data(), inputTensorsSize); + graphInfo->numInputTensors = (uint32_t)numInputTensors; + // allocate and add graph outputTensors + std::vector graphOutputTensors = model.getGraphOutputTensors(); + size_t numOutputTensors = graphOutputTensors.size(); + size_t outputTensorsSize = numOutputTensors * sizeof(Qnn_Tensor_t); + graphInfo->outputTensors = (Qnn_Tensor_t *)malloc(outputTensorsSize); + memscpy( + graphInfo->outputTensors, outputTensorsSize, graphOutputTensors.data(), outputTensorsSize); + graphInfo->numOutputTensors = (uint32_t)numOutputTensors; + + // graph composition is complete by this stage, free if any cached tensors remaining + CALL_QNN(model.freeCachedTensors()); + return err; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/qnn/QNNModel.hpp b/mllm/backends/qnn/QNNModel.hpp new file mode 100644 index 000000000..03df2d46e --- /dev/null +++ b/mllm/backends/qnn/QNNModel.hpp @@ -0,0 +1,99 @@ +#pragma once + +#include +#include +#include + +#include "QNNUtils.hpp" +#include "QnnInterface.h" + +namespace mllm { + +typedef enum ModelError { + MODEL_NO_ERROR = 0, + MODEL_TENSOR_ERROR = 1, + MODEL_PARAMS_ERROR = 2, + MODEL_NODES_ERROR = 3, + MODEL_GRAPH_ERROR = 4, + MODEL_CONTEXT_ERROR = 5, + MODEL_GENERATION_ERROR = 6, + MODEL_SETUP_ERROR = 7, + MODEL_INVALID_ARGUMENT_ERROR = 8, + MODEL_FILE_ERROR = 9, + MODEL_MEMORY_ALLOCATE_ERROR = 10, + // Value selected to ensure 32 bits. + MODEL_UNKNOWN_ERROR = 0x7FFFFFFF +} ModelError_t; + +class QNNModel { +public: + ~QNNModel() = default; + + ModelError_t initialize(const Qnn_BackendHandle_t &backendHandle, + const QNN_INTERFACE_VER_TYPE &qnnInterface, + const Qnn_ContextHandle_t &context, + const char *graphName, + bool debug, + uint8_t doNodeValidations = 1, + const QnnGraph_Config_t **graphConfigs = nullptr); + + ModelError_t addTensor(const char *nodeName, Qnn_Tensor_t *tensor, bool saveTensor = true); + + ModelError_t addTensor(const char *nodeName, Qnn_Tensor_t tensor, bool saveTensor = true); + + ModelError_t getQnnTensor(std::string nodeName, std::string tensorName, Qnn_Tensor_t &tensor); + + ModelError_t addNode(Qnn_OpConfigVersion_t version, + const char *name, + const char *packageName, + const char *type, + std::vector ¶ms, + std::vector inputNames, + std::vector &outputTensors); + + Qnn_GraphHandle_t getQnnGraph() { + return m_graph; + } + + std::string getQnnGraphName() { + return m_graphName; + } + + std::vector getGraphInputTensors() { + return m_modelInputTensors; + } + + std::vector getGraphOutputTensors() { + return m_modelOutputTensors; + } + + std::map> getOutputTensorMap() { + return m_modelOutputTensorMap; + } + + ModelError_t freeCachedTensors(); + +private: + Qnn_GraphHandle_t m_graph = nullptr; + std::string m_graphName; + bool m_debug = false; // flag to indicate if requested graph is to be run in debug mode + // (i.e. all intermediate tensors will be accessible to client) + // flag to indicate whether all addNode calls need to be validated + bool m_doNodeValidations = true; + + std::vector m_modelInputTensors; + std::vector m_modelOutputTensors; + // keeps track of graph tensors to enable creating Qnn nodes from tensor names + std::map m_modelTensorsMap; + std::map> m_modelOutputTensorMap; + + // Qnn Backend Interface Api + QNN_INTERFACE_VER_TYPE m_qnnInterface; + Qnn_BackendHandle_t m_backendHandle; + +}; // QNN_MODEL_CLASS + +// A helper function to convert QnnModel objects to Graph struct for qnn_model c +ModelError_t getSingleGraphInfoFromModel(QNNModel &model, GraphInfoPtr_t *graphInfoPtr); + +} // namespace mllm diff --git a/mllm/backends/qnn/QNNUtils.cpp b/mllm/backends/qnn/QNNUtils.cpp new file mode 100644 index 000000000..ba907c5c1 --- /dev/null +++ b/mllm/backends/qnn/QNNUtils.cpp @@ -0,0 +1,328 @@ +#include "QNNUtils.hpp" +#include "Log.h" +#include "QnnTypeMacros.hpp" +#include +#include + +namespace mllm { + +QnnInterfaceGetProvidersFn_t QnnInterface_getProviders = nullptr; + +bool loadQNNSymbol() { + MLLM_LOG_INFO_STREAM << "QNN Backend Lib: libQnnHtp.so"; + void *qnnLibHandle = nullptr; + qnnLibHandle = dlopen("libQnnHtp.so", RTLD_NOW | RTLD_LOCAL); + const char *errorOpen = dlerror(); + if (!qnnLibHandle) { + MLLM_LOG_ERROR_LEGACY("Failed to open QNN libs. Ensure that the libs related to the QNN HTP backend is available in your environment. dlerror() returns %s.\n", errorOpen); + return false; + } + + QnnInterface_getProviders = (QnnInterfaceGetProvidersFn_t)dlsym(qnnLibHandle, "QnnInterface_getProviders"); + const char *errorSym = dlerror(); + if (!QnnInterface_getProviders) { + MLLM_LOG_ERROR_LEGACY("Failed to load symbol . dlerror returns %s.\n", errorSym); + dlclose(qnnLibHandle); + return false; + } + + return true; +} + +QnnSystemInterfaceGetProvidersFn_t QnnSystemInterface_getProviders = nullptr; + +bool loadQNNSystemSymbol() { + void *systemLibraryHandle = dlopen("libQnnSystem.so", RTLD_NOW | RTLD_LOCAL); + const char *errorOpen = dlerror(); + if (!systemLibraryHandle) { + MLLM_LOG_ERROR_LEGACY("Failed to open QNN System libs. Ensure that the libs related to the QNN System backend is available in your environment. dlerror() returns %s.\n", errorOpen); + return false; + } + + QnnSystemInterface_getProviders = (QnnSystemInterfaceGetProvidersFn_t)dlsym(systemLibraryHandle, "QnnSystemInterface_getProviders"); + const char *errorSym = dlerror(); + if (!QnnSystemInterface_getProviders) { + MLLM_LOG_ERROR_LEGACY("Failed to load symbol . dlerror returns %s.\n", errorSym); + dlclose(systemLibraryHandle); + return false; + } + + return true; +} + +bool copyMetadataToGraphsInfo(const QnnSystemContext_BinaryInfo_t *binaryInfo, + GraphInfo_t **&graphsInfo, + uint32_t &graphsCount) { + if (nullptr == binaryInfo) { + MLLM_LOG_ERROR("binaryInfo is nullptr."); + return false; + } + graphsCount = 0; + if (binaryInfo->version == QNN_SYSTEM_CONTEXT_BINARY_INFO_VERSION_1) { + if (binaryInfo->contextBinaryInfoV1.graphs) { + if (!copyGraphsInfo(binaryInfo->contextBinaryInfoV1.graphs, + binaryInfo->contextBinaryInfoV1.numGraphs, + graphsInfo)) { + MLLM_LOG_ERROR("Failed while copying graphs Info."); + return false; + } + graphsCount = binaryInfo->contextBinaryInfoV1.numGraphs; + return true; + } + } else if (binaryInfo->version == QNN_SYSTEM_CONTEXT_BINARY_INFO_VERSION_2) { + if (binaryInfo->contextBinaryInfoV2.graphs) { + if (!copyGraphsInfo(binaryInfo->contextBinaryInfoV2.graphs, + binaryInfo->contextBinaryInfoV2.numGraphs, + graphsInfo)) { + MLLM_LOG_ERROR("Failed while copying graphs Info."); + return false; + } + graphsCount = binaryInfo->contextBinaryInfoV2.numGraphs; + return true; + } + } else if (binaryInfo->version == QNN_SYSTEM_CONTEXT_BINARY_INFO_VERSION_3) { + if (binaryInfo->contextBinaryInfoV3.graphs) { + if (!copyGraphsInfo(binaryInfo->contextBinaryInfoV3.graphs, + binaryInfo->contextBinaryInfoV3.numGraphs, + graphsInfo)) { + MLLM_LOG_ERROR("Failed while copying graphs Info."); + return false; + } + graphsCount = binaryInfo->contextBinaryInfoV3.numGraphs; + return true; + } + } + MLLM_LOG_ERROR("Unrecognized system context binary info version."); + return false; +} + +bool copyGraphsInfoV1(const QnnSystemContext_GraphInfoV1_t *graphInfoSrc, + GraphInfo_t *graphInfoDst) { + graphInfoDst->graphName = nullptr; + if (graphInfoSrc->graphName) { + graphInfoDst->graphName = + strndup(graphInfoSrc->graphName, strlen(graphInfoSrc->graphName)); + } + graphInfoDst->inputTensors = nullptr; + graphInfoDst->numInputTensors = 0; + if (graphInfoSrc->graphInputs) { + if (!copyTensorsInfo( + graphInfoSrc->graphInputs, graphInfoDst->inputTensors, graphInfoSrc->numGraphInputs)) { + return false; + } + graphInfoDst->numInputTensors = graphInfoSrc->numGraphInputs; + } + graphInfoDst->outputTensors = nullptr; + graphInfoDst->numOutputTensors = 0; + if (graphInfoSrc->graphOutputs) { + if (!copyTensorsInfo(graphInfoSrc->graphOutputs, + graphInfoDst->outputTensors, + graphInfoSrc->numGraphOutputs)) { + return false; + } + graphInfoDst->numOutputTensors = graphInfoSrc->numGraphOutputs; + } + return true; +} + +bool copyGraphsInfoV3(const QnnSystemContext_GraphInfoV3_t *graphInfoSrc, + GraphInfo_t *graphInfoDst) { + graphInfoDst->graphName = nullptr; + if (graphInfoSrc->graphName) { + graphInfoDst->graphName = + strndup(graphInfoSrc->graphName, strlen(graphInfoSrc->graphName)); + } + graphInfoDst->inputTensors = nullptr; + graphInfoDst->numInputTensors = 0; + if (graphInfoSrc->graphInputs) { + if (!copyTensorsInfo( + graphInfoSrc->graphInputs, graphInfoDst->inputTensors, graphInfoSrc->numGraphInputs)) { + return false; + } + graphInfoDst->numInputTensors = graphInfoSrc->numGraphInputs; + } + graphInfoDst->outputTensors = nullptr; + graphInfoDst->numOutputTensors = 0; + if (graphInfoSrc->graphOutputs) { + if (!copyTensorsInfo(graphInfoSrc->graphOutputs, + graphInfoDst->outputTensors, + graphInfoSrc->numGraphOutputs)) { + return false; + } + graphInfoDst->numOutputTensors = graphInfoSrc->numGraphOutputs; + } + return true; +} + +bool copyGraphsInfo(const QnnSystemContext_GraphInfo_t *graphsInput, + const uint32_t numGraphs, + GraphInfo_t **&graphsInfo) { + if (!graphsInput) { + MLLM_LOG_ERROR("Received nullptr for graphsInput."); + return false; + } + auto returnStatus = true; + graphsInfo = + (GraphInfo_t **)calloc(numGraphs, sizeof(GraphInfo_t *)); + GraphInfo_t *graphInfoArr = + (GraphInfo_t *)calloc(numGraphs, sizeof(GraphInfo_t)); + if (nullptr == graphsInfo || nullptr == graphInfoArr) { + MLLM_LOG_ERROR("Failure to allocate memory for *graphInfo"); + returnStatus = false; + } + if (true == returnStatus) { + for (size_t gIdx = 0; gIdx < numGraphs; gIdx++) { + if (graphsInput[gIdx].version == QNN_SYSTEM_CONTEXT_GRAPH_INFO_VERSION_1) { + copyGraphsInfoV1(&graphsInput[gIdx].graphInfoV1, &graphInfoArr[gIdx]); + } else if (graphsInput[gIdx].version == QNN_SYSTEM_CONTEXT_GRAPH_INFO_VERSION_3) { + copyGraphsInfoV3(&graphsInput[gIdx].graphInfoV3, &graphInfoArr[gIdx]); + } + graphsInfo[gIdx] = graphInfoArr + gIdx; + } + } + if (true != returnStatus) { + MLLM_LOG_ERROR("Received an ERROR during extractGraphsInfo. Freeing resources."); + if (graphsInfo) { + for (uint32_t gIdx = 0; gIdx < numGraphs; gIdx++) { + if (graphsInfo[gIdx]) { + if (nullptr != graphsInfo[gIdx]->graphName) { + free(graphsInfo[gIdx]->graphName); + graphsInfo[gIdx]->graphName = nullptr; + } + freeQnnTensors(graphsInfo[gIdx]->inputTensors, + graphsInfo[gIdx]->numInputTensors); + freeQnnTensors(graphsInfo[gIdx]->outputTensors, + graphsInfo[gIdx]->numOutputTensors); + } + } + free(*graphsInfo); + } + free(graphsInfo); + graphsInfo = nullptr; + } + return true; +} + +bool copyTensorsInfo(const Qnn_Tensor_t *tensorsInfoSrc, + Qnn_Tensor_t *&tensorWrappers, + uint32_t tensorsCount) { + auto returnStatus = true; + tensorWrappers = (Qnn_Tensor_t *)calloc(tensorsCount, sizeof(Qnn_Tensor_t)); + if (nullptr == tensorWrappers) { + MLLM_LOG_ERROR("Failed to allocate memory for tensorWrappers."); + return false; + } + for (size_t tIdx = 0; tIdx < tensorsCount; tIdx++) { + tensorWrappers[tIdx] = QNN_TENSOR_INIT; + deepCopyQnnTensorInfo(&tensorWrappers[tIdx], &tensorsInfoSrc[tIdx]); + } + return true; +} + +bool deepCopyQnnTensorInfo(Qnn_Tensor_t *dst, const Qnn_Tensor_t *src) { + if (nullptr == dst || nullptr == src) { + MLLM_LOG_ERROR("Received nullptr"); + return false; + } + // set tensor.version before using QNN_TENSOR_SET macros, as they require the version to be set + // to correctly assign values + dst->version = src->version; + const char *tensorName = QNN_TENSOR_GET_NAME(src); + if (!tensorName) { + QNN_TENSOR_SET_NAME(dst, nullptr); + } else { + QNN_TENSOR_SET_NAME(dst, ::strndup(tensorName, strlen(tensorName))); + } + QNN_TENSOR_SET_ID(dst, QNN_TENSOR_GET_ID(src)); + QNN_TENSOR_SET_TYPE(dst, QNN_TENSOR_GET_TYPE(src)); + QNN_TENSOR_SET_DATA_FORMAT(dst, QNN_TENSOR_GET_DATA_FORMAT(src)); + QNN_TENSOR_SET_DATA_TYPE(dst, QNN_TENSOR_GET_DATA_TYPE(src)); + Qnn_QuantizeParams_t qParams = QNN_QUANTIZE_PARAMS_INIT; + qParams.encodingDefinition = QNN_TENSOR_GET_QUANT_PARAMS(src).encodingDefinition; + qParams.quantizationEncoding = QNN_QUANTIZATION_ENCODING_UNDEFINED; + if (QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding == QNN_QUANTIZATION_ENCODING_SCALE_OFFSET) { + qParams.quantizationEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding; + qParams.scaleOffsetEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).scaleOffsetEncoding; + } else if (QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding == QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET) { + qParams.quantizationEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding; + qParams.axisScaleOffsetEncoding.axis = + QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.axis; + qParams.axisScaleOffsetEncoding.numScaleOffsets = + QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets; + if (QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets > 0) { + qParams.axisScaleOffsetEncoding.scaleOffset = (Qnn_ScaleOffset_t *)malloc( + QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets * sizeof(Qnn_ScaleOffset_t)); + if (qParams.axisScaleOffsetEncoding.scaleOffset) { + for (size_t idx = 0; + idx < QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets; + idx++) { + qParams.axisScaleOffsetEncoding.scaleOffset[idx].scale = + QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.scaleOffset[idx].scale; + qParams.axisScaleOffsetEncoding.scaleOffset[idx].offset = + QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.scaleOffset[idx].offset; + } + } + } + } + QNN_TENSOR_SET_QUANT_PARAMS(dst, qParams); + QNN_TENSOR_SET_RANK(dst, QNN_TENSOR_GET_RANK(src)); + QNN_TENSOR_SET_DIMENSIONS(dst, nullptr); + + auto memscpy = [](void *dst, size_t dstSize, const void *src, size_t copySize) -> size_t { + if (!dst || !src || !dstSize || !copySize) return 0; + + size_t minSize = dstSize < copySize ? dstSize : copySize; + + memcpy(dst, src, minSize); + + return minSize; + }; + if (QNN_TENSOR_GET_RANK(src) > 0) { + QNN_TENSOR_SET_DIMENSIONS(dst, (uint32_t *)malloc(QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t))); + if (QNN_TENSOR_GET_DIMENSIONS(dst)) { + memscpy(QNN_TENSOR_GET_DIMENSIONS(dst), + QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t), + QNN_TENSOR_GET_DIMENSIONS(src), + QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t)); + } + if (QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(src)) { + QNN_TENSOR_SET_IS_DYNAMIC_DIMENSIONS( + dst, (uint8_t *)malloc(QNN_TENSOR_GET_RANK(src) * sizeof(uint8_t))); + memscpy(QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(dst), + QNN_TENSOR_GET_RANK(src) * sizeof(uint8_t), + QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(src), + QNN_TENSOR_GET_RANK(src) * sizeof(uint8_t)); + } + } + QNN_TENSOR_SET_SPARSE_PARAMS(dst, QNN_TENSOR_GET_SPARSE_PARAMS(src)); + return true; +} + +bool freeQnnTensor(Qnn_Tensor_t &tensor) { + // free all pointer allocations in struct + free((void *)QNN_TENSOR_GET_NAME(tensor)); + free(QNN_TENSOR_GET_DIMENSIONS(tensor)); + if (QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(tensor)) { + free(QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(tensor)); + } + auto quant = QNN_TENSOR_GET_QUANT_PARAMS(tensor); + auto encoding = quant.quantizationEncoding; + if (encoding == QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET) { + if (quant.axisScaleOffsetEncoding.scaleOffset != nullptr) { + free(quant.axisScaleOffsetEncoding.scaleOffset); + } + } + return true; +} + +bool freeQnnTensors(Qnn_Tensor_t *&tensors, + uint32_t numTensors) { + // free all pointer allocations in struct + for (size_t i = 0; i < numTensors; i++) { + freeQnnTensor(tensors[i]); + } + free(tensors); + return true; +} + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/qnn/QNNUtils.hpp b/mllm/backends/qnn/QNNUtils.hpp new file mode 100644 index 000000000..89b7fad5d --- /dev/null +++ b/mllm/backends/qnn/QNNUtils.hpp @@ -0,0 +1,74 @@ + +#pragma once + +#include "Log.h" +#include "QnnCommon.h" +#include "QnnInterface.h" +#include "System/QnnSystemInterface.h" + +namespace mllm { + +#define CALL_QNN(apiCall) \ + do { \ + int errorCode = ((apiCall) & 0xFFFF); \ + if (errorCode != QNN_SUCCESS) { \ + MLLM_LOG_ERROR("Error in file %s, line %d: error code %d\n", \ + __FILE__, __LINE__, errorCode); \ + assert(errorCode == QNN_SUCCESS); \ + } \ + } while (0) + +// func def for loading QNN Interface +typedef Qnn_ErrorHandle_t (*QnnInterfaceGetProvidersFn_t)(const QnnInterface_t ***providerList, + uint32_t *numProviders); +// func def of loading QNN System Interface +typedef Qnn_ErrorHandle_t (*QnnSystemInterfaceGetProvidersFn_t)( + const QnnSystemInterface_t ***providerList, uint32_t *numProviders); + +extern QnnInterfaceGetProvidersFn_t QnnInterface_getProviders; +extern QnnSystemInterfaceGetProvidersFn_t QnnSystemInterface_getProviders; + +bool loadQNNSymbol(); +bool loadQNNSystemSymbol(); + +// Utils for copying metadata to GraphInfo +typedef struct GraphInfo { + Qnn_GraphHandle_t graph; + char *graphName; + Qnn_Tensor_t *inputTensors; + uint32_t numInputTensors; + Qnn_Tensor_t *outputTensors; + uint32_t numOutputTensors; +} GraphInfo_t; +typedef GraphInfo_t *GraphInfoPtr_t; + +typedef struct GraphConfigInfo { + char *graphName; + const QnnGraph_Config_t **graphConfigs; +} GraphConfigInfo_t; + +bool copyMetadataToGraphsInfo(const QnnSystemContext_BinaryInfo_t *binaryInfo, + GraphInfo_t **&graphsInfo, + uint32_t &graphsCount); + +bool copyGraphsInfo(const QnnSystemContext_GraphInfo_t *graphsInput, + const uint32_t numGraphs, + GraphInfo_t **&graphsInfo); + +bool copyGraphsInfoV1(const QnnSystemContext_GraphInfoV1_t *graphInfoSrc, + GraphInfo_t *graphInfoDst); + +bool copyGraphsInfoV3(const QnnSystemContext_GraphInfoV3_t *graphInfoSrc, + GraphInfo_t *graphInfoDst); + +bool copyTensorsInfo(const Qnn_Tensor_t *tensorsInfoSrc, + Qnn_Tensor_t *&tensorWrappers, + uint32_t tensorsCount); + +bool deepCopyQnnTensorInfo(Qnn_Tensor_t *dst, const Qnn_Tensor_t *src); + +bool freeQnnTensor(Qnn_Tensor_t &tensor); + +bool freeQnnTensors(Qnn_Tensor_t *&tensors, uint32_t numTensors); + +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/qnn/QnnTypeMacros.hpp b/mllm/backends/qnn/QnnTypeMacros.hpp new file mode 100644 index 000000000..70c70d031 --- /dev/null +++ b/mllm/backends/qnn/QnnTypeMacros.hpp @@ -0,0 +1,713 @@ +//============================================================================== +// +// Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +// All rights reserved. +// Confidential and Proprietary - Qualcomm Technologies, Inc. +// +//============================================================================== + +// TODO: remove once the SNPE build for QNN core is sorted out +#pragma once + +#include "QnnTypes.h" + +#define QNN_OP_CFG_VALID(opConfig) ((opConfig).version == QNN_OPCONFIG_VERSION_1) + +/** + * @brief Verifies the tensor object passed is of supported Qnn_Tensor_t API version + * + * @param[in] tensor Qnn_Tensor_t object to validate + * + * @return Error code + */ +inline bool validateTensorVersion(Qnn_Tensor_t tensor) { + return !(tensor.version != QNN_TENSOR_VERSION_1 && tensor.version != QNN_TENSOR_VERSION_2); +} + +/** + * @brief Verifies the tensor object passed is of supported Qnn_OpConfig_t API version + * + * @param[in] tensor Qnn_OpConfig_t object to validate + * + * @return Error code + */ +inline bool validateOpConfigVersion(Qnn_OpConfig_t opConfig) { + return !(opConfig.version != QNN_OPCONFIG_VERSION_1); +} + +inline Qnn_OpConfig_t createQnnOpConfig(const Qnn_OpConfigVersion_t version) { + Qnn_OpConfig_t opConfig = QNN_OPCONFIG_INIT; + opConfig.version = version; + if (version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1 = QNN_OPCONFIG_V1_INIT; + } + return opConfig; +} + +inline const char *getQnnOpConfigName(const Qnn_OpConfig_t &opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.name; + } + return NULL; +} + +inline const char *getQnnOpConfigName(const Qnn_OpConfig_t *opConfig) { + return getQnnOpConfigName(*opConfig); +} + +inline const char *getQnnOpConfigPackageName(const Qnn_OpConfig_t &opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.packageName; + } + return NULL; +} + +inline const char *getQnnOpConfigPackageName(const Qnn_OpConfig_t *opConfig) { + return getQnnOpConfigPackageName(*opConfig); +} + +inline const char *getQnnOpConfigTypeName(const Qnn_OpConfig_t &opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.typeName; + } + return NULL; +} + +inline const char *getQnnOpConfigTypeName(const Qnn_OpConfig_t *opConfig) { + return getQnnOpConfigTypeName(*opConfig); +} + +inline uint32_t getQnnOpConfigNumParams(const Qnn_OpConfig_t &opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.numOfParams; + } + return 0u; +} + +inline uint32_t getQnnOpConfigNumParams(const Qnn_OpConfig_t *opConfig) { + return getQnnOpConfigNumParams(*opConfig); +} + +inline Qnn_Param_t *getQnnOpConfigParams(const Qnn_OpConfig_t &opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.params; + } + return NULL; +} + +inline Qnn_Param_t *getQnnOpConfigParams(const Qnn_OpConfig_t *opConfig) { + return getQnnOpConfigParams(*opConfig); +} + +inline uint32_t getQnnOpConfigNumInputs(const Qnn_OpConfig_t &opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.numOfInputs; + } + return 0u; +} + +inline uint32_t getQnnOpConfigNumInputs(const Qnn_OpConfig_t *opConfig) { + return getQnnOpConfigNumInputs(*opConfig); +} + +inline Qnn_Tensor_t *getQnnOpConfigInputs(const Qnn_OpConfig_t &opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.inputTensors; + } + return NULL; +} + +inline Qnn_Tensor_t *getQnnOpConfigInputs(const Qnn_OpConfig_t *opConfig) { + return getQnnOpConfigInputs(*opConfig); +} + +inline uint32_t getQnnOpConfigNumOutputs(const Qnn_OpConfig_t &opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.numOfOutputs; + } + return 0u; +} + +inline uint32_t getQnnOpConfigNumOutputs(const Qnn_OpConfig_t *opConfig) { + return getQnnOpConfigNumOutputs(*opConfig); +} + +inline Qnn_Tensor_t *getQnnOpConfigOutputs(const Qnn_OpConfig_t &opConfig) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + return opConfig.v1.outputTensors; + } + return NULL; +} + +inline Qnn_Tensor_t *getQnnOpConfigOutputs(const Qnn_OpConfig_t *opConfig) { + return getQnnOpConfigOutputs(*opConfig); +} + +inline void setQnnOpConfigName(Qnn_OpConfig_t &opConfig, const char *name) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.name = name; + } +} + +inline void setQnnOpConfigName(Qnn_OpConfig_t *opConfig, const char *name) { + setQnnOpConfigName(*opConfig, name); +} + +inline void setQnnOpConfigPackageName(Qnn_OpConfig_t &opConfig, const char *packageName) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.packageName = packageName; + } +} + +inline void setQnnOpConfigPackageName(Qnn_OpConfig_t *opConfig, const char *packageName) { + setQnnOpConfigPackageName(*opConfig, packageName); +} + +inline void setQnnOpConfigTypeName(Qnn_OpConfig_t &opConfig, const char *typeName) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.typeName = typeName; + } +} + +inline void setQnnOpConfigTypeName(Qnn_OpConfig_t *opConfig, const char *typeName) { + setQnnOpConfigTypeName(*opConfig, typeName); +} + +inline void setQnnOpConfigParams(Qnn_OpConfig_t &opConfig, + uint32_t numOfParams, + Qnn_Param_t *params) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.numOfParams = numOfParams; + opConfig.v1.params = params; + } +} + +inline void setQnnOpConfigParams(Qnn_OpConfig_t *opConfig, + uint32_t numOfParams, + Qnn_Param_t *params) { + setQnnOpConfigParams(*opConfig, numOfParams, params); +} + +inline void setQnnOpConfigInputs(Qnn_OpConfig_t &opConfig, + uint32_t numOfInputs, + Qnn_Tensor_t *inputTensors) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.numOfInputs = numOfInputs; + opConfig.v1.inputTensors = inputTensors; + } +} + +inline void setQnnOpConfigInputs(Qnn_OpConfig_t *opConfig, + uint32_t numOfInputs, + Qnn_Tensor_t *inputTensors) { + setQnnOpConfigInputs(*opConfig, numOfInputs, inputTensors); +} + +inline void setQnnOpConfigOutputs(Qnn_OpConfig_t &opConfig, + uint32_t numOfOutputs, + Qnn_Tensor_t *outputTensors) { + if (opConfig.version == QNN_OPCONFIG_VERSION_1) { + opConfig.v1.numOfOutputs = numOfOutputs; + opConfig.v1.outputTensors = outputTensors; + } +} + +inline void setQnnOpConfigOutputs(Qnn_OpConfig_t *opConfig, + uint32_t numOfOutputs, + Qnn_Tensor_t *outputTensors) { + setQnnOpConfigOutputs(*opConfig, numOfOutputs, outputTensors); +} + +inline Qnn_Tensor_t createQnnTensor(const Qnn_TensorVersion_t version) { + Qnn_Tensor_t tensor = QNN_TENSOR_INIT; + tensor.version = version; + if (version == QNN_TENSOR_VERSION_1) { + tensor.v1 = QNN_TENSOR_V1_INIT; + } else if (version == QNN_TENSOR_VERSION_2) { + tensor.v2 = QNN_TENSOR_V2_INIT; + } + return tensor; +} + +inline uint32_t getQnnTensorId(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.id; +} + +inline uint32_t getQnnTensorId(const Qnn_Tensor_t *tensor) { + return getQnnTensorId(*tensor); +} + +inline const char *getQnnTensorName(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.name; +} +inline const char *getQnnTensorName(const Qnn_Tensor_t *tensor) { + return getQnnTensorName(*tensor); +} + +inline Qnn_TensorType_t getQnnTensorType(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.type; +} + +inline Qnn_TensorType_t getQnnTensorType(const Qnn_Tensor_t *tensor) { + return getQnnTensorType(*tensor); +} + +inline Qnn_TensorDataFormat_t getQnnTensorDataFormat(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.dataFormat; +} + +inline Qnn_TensorDataFormat_t getQnnTensorDataFormat(const Qnn_Tensor_t *tensor) { + return getQnnTensorDataFormat(*tensor); +} + +inline Qnn_DataType_t getQnnTensorDataType(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.dataType; +} + +inline Qnn_DataType_t getQnnTensorDataType(const Qnn_Tensor_t *tensor) { + return getQnnTensorDataType(*tensor); +} + +inline Qnn_QuantizeParams_t getQnnTensorQuantParams(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.quantizeParams; +} + +inline Qnn_QuantizeParams_t getQnnTensorQuantParams(const Qnn_Tensor_t *const tensor) { + if (tensor != nullptr) { + return getQnnTensorQuantParams(*tensor); + } + return QNN_QUANTIZE_PARAMS_INIT; +} + +inline uint32_t getQnnTensorRank(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.rank; +} + +inline uint32_t getQnnTensorRank(const Qnn_Tensor_t *const tensor) { + if (tensor != nullptr) { + return getQnnTensorRank(*tensor); + } + return 0u; +} + +inline uint32_t *getQnnTensorDimensions(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.dimensions; +} + +inline uint32_t *getQnnTensorDimensions(const Qnn_Tensor_t *tensor) { + return getQnnTensorDimensions(*tensor); +} + +inline uint8_t *getQnnTensorIsDynamicDimensions(const Qnn_Tensor_t &tensor) { + if (tensor.version == QNN_TENSOR_VERSION_2) { + return tensor.v2.isDynamicDimensions; + } + return NULL; +} + +inline uint8_t *getQnnTensorIsDynamicDimensions(const Qnn_Tensor_t *tensor) { + return getQnnTensorIsDynamicDimensions(*tensor); +} + +inline Qnn_SparseParams_t getQnnTensorSparseParams(const Qnn_Tensor_t &tensor) { + if (tensor.version == QNN_TENSOR_VERSION_2) { + return tensor.v2.sparseParams; + } + return QNN_SPARSE_PARAMS_INIT; +} + +inline Qnn_SparseParams_t getQnnTensorSparseParams(const Qnn_Tensor_t *tensor) { + return getQnnTensorSparseParams(*tensor); +} + +inline Qnn_TensorMemType_t getQnnTensorMemType(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.memType; +} + +inline Qnn_TensorMemType_t getQnnTensorMemType(const Qnn_Tensor_t *tensor) { + return getQnnTensorMemType(*tensor); +} + +inline Qnn_ClientBuffer_t getQnnTensorClientBuf(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.clientBuf; +} + +inline Qnn_ClientBuffer_t getQnnTensorClientBuf(const Qnn_Tensor_t *tensor) { + return getQnnTensorClientBuf(*tensor); +} + +inline Qnn_MemHandle_t getQnnTensorMemHandle(const Qnn_Tensor_t &tensor) { + // TensorCompatTest justifies no need to check version + return tensor.v1.memHandle; +} + +inline Qnn_MemHandle_t getQnnTensorMemHandle(const Qnn_Tensor_t *tensor) { + return getQnnTensorMemHandle(*tensor); +} + +inline void setQnnTensorId(Qnn_Tensor_t &tensor, const uint32_t id) { + // TensorCompatTest justifies no need to check version + tensor.v1.id = id; +} + +inline void setQnnTensorId(Qnn_Tensor_t *tensor, uint32_t id) { + setQnnTensorId(*tensor, id); +} + +inline void setQnnTensorName(Qnn_Tensor_t &tensor, const char *const name) { + // TensorCompatTest justifies no need to check version + tensor.v1.name = name; +} + +inline void setQnnTensorName(Qnn_Tensor_t *tensor, const char *name) { + setQnnTensorName(*tensor, name); +} + +inline void setQnnTensorType(Qnn_Tensor_t &tensor, Qnn_TensorType_t type) { + // TensorCompatTest justifies no need to check version + tensor.v1.type = type; +} + +inline void setQnnTensorType(Qnn_Tensor_t *tensor, Qnn_TensorType_t type) { + setQnnTensorType(*tensor, type); +} + +inline void setQnnTensorDataFormat(Qnn_Tensor_t &tensor, const Qnn_TensorDataFormat_t dataFormat) { + // TensorCompatTest justifies no need to check version + tensor.v1.dataFormat = dataFormat; +} + +inline void setQnnTensorDataFormat(Qnn_Tensor_t *tensor, Qnn_TensorDataFormat_t format) { + setQnnTensorDataFormat(*tensor, format); +} + +inline void setQnnTensorDataType(Qnn_Tensor_t &tensor, const Qnn_DataType_t dataType) { + // TensorCompatTest justifies no need to check version + tensor.v1.dataType = dataType; +} + +inline void setQnnTensorDataType(Qnn_Tensor_t *tensor, Qnn_DataType_t dataType) { + setQnnTensorDataType(*tensor, dataType); +} + +inline void setQnnTensorQuantParams(Qnn_Tensor_t &tensor, + const Qnn_QuantizeParams_t quantizeParams) { + // TensorCompatTest justifies no need to check version + tensor.v1.quantizeParams = quantizeParams; +} + +inline void setQnnTensorQuantParams(Qnn_Tensor_t *tensor, Qnn_QuantizeParams_t params) { + setQnnTensorQuantParams(*tensor, params); +} + +inline void setQnnTensorRank(Qnn_Tensor_t &tensor, const uint32_t rank) { + // TensorCompatTest justifies no need to check version + tensor.v1.rank = rank; +} + +inline void setQnnTensorRank(Qnn_Tensor_t *tensor, uint32_t rank) { + setQnnTensorRank(*tensor, rank); +} + +inline void setQnnTensorDimensions(Qnn_Tensor_t &tensor, uint32_t *const dimensions) { + // TensorCompatTest justifies no need to check version + tensor.v1.dimensions = dimensions; +} + +inline void setQnnTensorDimensions(Qnn_Tensor_t *tensor, uint32_t *dims) { + setQnnTensorDimensions(*tensor, dims); +} + +inline void setQnnTensorIsDynamicDimensions(Qnn_Tensor_t &tensor, uint8_t *isDynamic) { + if (tensor.version == QNN_TENSOR_VERSION_2) { + tensor.v2.isDynamicDimensions = isDynamic; + } +} + +inline void setQnnTensorIsDynamicDimensions(Qnn_Tensor_t *tensor, uint8_t *isDynamic) { + setQnnTensorIsDynamicDimensions(*tensor, isDynamic); +} + +inline void setQnnTensorSparseParams(Qnn_Tensor_t &tensor, Qnn_SparseParams_t sparseParams) { + if (tensor.version == QNN_TENSOR_VERSION_2) { + tensor.v2.sparseParams = sparseParams; + } +} + +inline void setQnnTensorSparseParams(Qnn_Tensor_t *tensor, Qnn_SparseParams_t sparseParams) { + setQnnTensorSparseParams(*tensor, sparseParams); +} + +inline void setQnnTensorMemType(Qnn_Tensor_t &tensor, const Qnn_TensorMemType_t memType) { + // TensorCompatTest justifies no need to check version + tensor.v1.memType = memType; +} + +inline void setQnnTensorMemType(Qnn_Tensor_t *tensor, Qnn_TensorMemType_t memType) { + setQnnTensorMemType(*tensor, memType); +} + +inline void setQnnTensorClientBuf(Qnn_Tensor_t &tensor, const Qnn_ClientBuffer_t clientBuf) { + // TensorCompatTest justifies no need to check version + tensor.v1.clientBuf = clientBuf; +} + +inline void setQnnTensorClientBuf(Qnn_Tensor_t *tensor, Qnn_ClientBuffer_t clientBuf) { + setQnnTensorClientBuf(*tensor, clientBuf); +} + +inline void setQnnTensorMemHandle(Qnn_Tensor_t &tensor, const Qnn_MemHandle_t memHandle) { + // TensorCompatTest justifies no need to check version + tensor.v1.memHandle = memHandle; +} + +inline void setQnnTensorMemHandle(Qnn_Tensor_t *tensor, Qnn_MemHandle_t handle) { + setQnnTensorMemHandle(*tensor, handle); +} + +inline void setQnnTensorClientBufRetrieve(Qnn_Tensor_t &tensor, + Qnn_TensorRetrieveRaw_t *const retrieve) { + if (tensor.version == QNN_TENSOR_VERSION_2) { + tensor.v2.retrieveRaw = retrieve; + } +} +inline void setQnnTensorClientBufRetrieve(Qnn_Tensor_t *const tensor, + Qnn_TensorRetrieveRaw_t *const retrieve) { + setQnnTensorClientBufRetrieve(*tensor, retrieve); +} +inline void setQnnTensorClientBufRetrieve(Qnn_Tensor_t &tensor, Qnn_TensorRetrieveRaw_t &retrieve) { + setQnnTensorClientBufRetrieve(tensor, &retrieve); +} +inline void setQnnTensorClientBufRetrieve(Qnn_Tensor_t *const tensor, + Qnn_TensorRetrieveRaw_t &retrieve) { + setQnnTensorClientBufRetrieve(*tensor, &retrieve); +} + +inline Qnn_TensorRetrieveRaw_t *getQnnTensorClientBufRetrieve(const Qnn_Tensor_t &tensor) { + if (tensor.version == QNN_TENSOR_VERSION_2) { + return tensor.v2.retrieveRaw; + } + return nullptr; +} +inline Qnn_TensorRetrieveRaw_t *getQnnTensorClientBufRetrieve(const Qnn_Tensor_t *const tensor) { + return getQnnTensorClientBufRetrieve(*tensor); +} + +inline Qnn_TensorSet_t createQnnTensorSet(const Qnn_TensorSetVersion_t version) { + Qnn_TensorSet_t tensorSet = QNN_TENSOR_SET_INIT; + tensorSet.version = version; + if (version == QNN_TENSOR_SET_VERSION_1) { + tensorSet.v1 = QNN_TENSOR_SET_V1_INIT; + } + return tensorSet; +} + +inline uint32_t getQnnTensorSetNumInputs(const Qnn_TensorSet_t &tensorSet) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + return tensorSet.v1.numInputs; + } + return 0; +} + +inline uint32_t getQnnTensorSetNumInputs(const Qnn_TensorSet_t *tensorSet) { + return getQnnTensorSetNumInputs(*tensorSet); +} + +inline Qnn_Tensor_t *getQnnTensorSetInputTensors(const Qnn_TensorSet_t &tensorSet) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + return tensorSet.v1.inputs; + } + return 0; +} + +inline Qnn_Tensor_t *getQnnTensorSetInputTensors(const Qnn_TensorSet_t *tensorSet) { + return getQnnTensorSetInputTensors(*tensorSet); +} + +inline uint32_t getQnnTensorSetNumOutputs(const Qnn_TensorSet_t &tensorSet) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + return tensorSet.v1.numOutputs; + } + return 0; +} + +inline uint32_t getQnnTensorSetNumOutputs(const Qnn_TensorSet_t *tensorSet) { + return getQnnTensorSetNumOutputs(*tensorSet); +} + +inline Qnn_Tensor_t *getQnnTensorSetOutputTensors(const Qnn_TensorSet_t &tensorSet) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + return tensorSet.v1.outputs; + } + return 0; +} + +inline Qnn_Tensor_t *getQnnTensorSetOutputTensors(const Qnn_TensorSet_t *tensorSet) { + return getQnnTensorSetOutputTensors(*tensorSet); +} + +inline void setQnnTensorSetInputTensors(Qnn_TensorSet_t &tensorSet, + Qnn_Tensor_t *inputTensors, + uint32_t const numInputs) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + tensorSet.v1.inputs = inputTensors; + tensorSet.v1.numInputs = numInputs; + } +} + +inline void setQnnTensorSetInputTensors(Qnn_TensorSet_t *tensorSet, + Qnn_Tensor_t *inputTensors, + uint32_t const numInputs) { + setQnnTensorSetInputTensors(*tensorSet, inputTensors, numInputs); +} + +inline void setQnnTensorSetOutputTensors(Qnn_TensorSet_t &tensorSet, + Qnn_Tensor_t *outputTensors, + const uint32_t numOutputs) { + if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { + tensorSet.v1.outputs = outputTensors; + tensorSet.v1.numOutputs = numOutputs; + } +} + +inline void setQnnTensorSetOutputTensors(Qnn_TensorSet_t *tensorSet, + Qnn_Tensor_t *outputTensors, + const uint32_t numOutputs) { + setQnnTensorSetOutputTensors(*tensorSet, outputTensors, numOutputs); +} + +// Validation +#define VALIDATE_TENSOR_VERSION(tensor, err) validateTensorVersion(tensor) +#define VALIDATE_OP_CONFIG_VERSION(op, err) validateOpConfigVersion(op) + +// Creator for QNN Op Config +#define QNN_OP_CFG_CREATE(version) createQnnOpConfig(version) + +// Accessors for QNN Op Config +#define QNN_OP_CFG_GET_NAME(opConfig) getQnnOpConfigName(opConfig) +#define QNN_OP_CFG_GET_PACKAGE_NAME(opConfig) getQnnOpConfigPackageName(opConfig) +#define QNN_OP_CFG_GET_TYPE_NAME(opConfig) getQnnOpConfigTypeName(opConfig) +#define QNN_OP_CFG_GET_NUM_PARAMS(opConfig) getQnnOpConfigNumParams(opConfig) +#define QNN_OP_CFG_GET_PARAMS(opConfig) getQnnOpConfigParams(opConfig) +#define QNN_OP_CFG_GET_NUM_INPUTS(opConfig) getQnnOpConfigNumInputs(opConfig) +#define QNN_OP_CFG_GET_INPUTS(opConfig) getQnnOpConfigInputs(opConfig) +#define QNN_OP_CFG_GET_NUM_OUTPUTS(opConfig) getQnnOpConfigNumOutputs(opConfig) +#define QNN_OP_CFG_GET_OUTPUTS(opConfig) getQnnOpConfigOutputs(opConfig) + +// Modifiers for QNN Op Config +#define QNN_OP_CFG_SET_NAME(opConfig, value) setQnnOpConfigName(opConfig, value) +#define QNN_OP_CFG_SET_PACKAGE_NAME(opConfig, value) setQnnOpConfigPackageName(opConfig, value) +#define QNN_OP_CFG_SET_TYPE_NAME(opConfig, value) setQnnOpConfigTypeName(opConfig, value) +#define QNN_OP_CFG_SET_PARAMS(opConfig, numOfParams, params) \ + setQnnOpConfigParams(opConfig, numOfParams, params) +#define QNN_OP_CFG_SET_INPUTS(opConfig, numOfInputs, inputTensors) \ + setQnnOpConfigInputs(opConfig, numOfInputs, inputTensors) +#define QNN_OP_CFG_SET_OUTPUTS(opConfig, numOfOutputs, outputTensors) \ + setQnnOpConfigOutputs(opConfig, numOfOutputs, outputTensors) + +// Creator for QNN Tensor +#define QNN_TENSOR_CREATE(version) createQnnTensor(version) + +// Accessors for QNN Tensor +#define QNN_TENSOR_GET_ID(tensor) getQnnTensorId(tensor) +#define QNN_TENSOR_GET_NAME(tensor) getQnnTensorName(tensor) +#define QNN_TENSOR_GET_TYPE(tensor) getQnnTensorType(tensor) +#define QNN_TENSOR_GET_DATA_FORMAT(tensor) getQnnTensorDataFormat(tensor) +#define QNN_TENSOR_GET_DATA_TYPE(tensor) getQnnTensorDataType(tensor) +#define QNN_TENSOR_GET_QUANT_PARAMS(tensor) getQnnTensorQuantParams(tensor) +#define QNN_TENSOR_GET_RANK(tensor) getQnnTensorRank(tensor) +#define QNN_TENSOR_GET_DIMENSIONS(tensor) getQnnTensorDimensions(tensor) +#define QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(tensor) getQnnTensorIsDynamicDimensions(tensor) +#define QNN_TENSOR_GET_SPARSE_PARAMS(tensor) getQnnTensorSparseParams(tensor) +#define QNN_TENSOR_GET_MEM_TYPE(tensor) getQnnTensorMemType(tensor) +#define QNN_TENSOR_GET_CLIENT_BUF(tensor) getQnnTensorClientBuf(tensor) +#define QNN_TENSOR_GET_MEM_HANDLE(tensor) getQnnTensorMemHandle(tensor) +#define QNN_TENSOR_GET_CLIENT_BUF_RETRIEVE(tensor) getQnnTensorClientBufRetrieve(tensor) + +// Modifiers for QNN Tensor +#define QNN_TENSOR_SET_ID(tensor, value) setQnnTensorId(tensor, value) +#define QNN_TENSOR_SET_NAME(tensor, value) setQnnTensorName(tensor, value) +#define QNN_TENSOR_SET_TYPE(tensor, value) setQnnTensorType(tensor, value) +#define QNN_TENSOR_SET_DATA_FORMAT(tensor, value) setQnnTensorDataFormat(tensor, value) +#define QNN_TENSOR_SET_DATA_TYPE(tensor, value) setQnnTensorDataType(tensor, value) +#define QNN_TENSOR_SET_QUANT_PARAMS(tensor, value) setQnnTensorQuantParams(tensor, value) +#define QNN_TENSOR_SET_RANK(tensor, value) setQnnTensorRank(tensor, value) +#define QNN_TENSOR_SET_DIMENSIONS(tensor, value) setQnnTensorDimensions(tensor, value) +#define QNN_TENSOR_SET_IS_DYNAMIC_DIMENSIONS(tensor, value) \ + setQnnTensorIsDynamicDimensions(tensor, value) +#define QNN_TENSOR_SET_SPARSE_PARAMS(tensor, value) setQnnTensorSparseParams(tensor, value) +#define QNN_TENSOR_SET_MEM_TYPE(tensor, value) setQnnTensorMemType(tensor, value) +#define QNN_TENSOR_SET_CLIENT_BUF(tensor, value) setQnnTensorClientBuf(tensor, value) +#define QNN_TENSOR_SET_MEM_HANDLE(tensor, value) setQnnTensorMemHandle(tensor, value) +#define QNN_TENSOR_SET_CLIENT_BUF_RETRIEVE(tensor, value) \ + setQnnTensorClientBufRetrieve(tensor, value) + +// Creator for QNN Tensor Set +#define QNN_TENSORSET_CREATE(version) createQnnTensorSet(version) + +// Accessors for QNN Tensor Set +#define QNN_TENSORSET_GET_NUM_INPUTS(tensorSet) getQnnTensorSetNumInputs(tensorSet) +#define QNN_TENSORSET_GET_INPUT_TENSORS(tensorSet) getQnnTensorSetInputTensors(tensorSet) +#define QNN_TENSORSET_GET_NUM_OUTPUTS(tensorSet) getQnnTensorSetNumOutputs(tensorSet) +#define QNN_TENSORSET_GET_OUTPUT_TENSORS(tensorSet) getQnnTensorSetOutputTensors(tensorSet) + +// Modifiers for QNN Tensor Set +#define QNN_TENSORSET_SET_INPUT_TENSORS(tensorSet, inputTensors, numInputs) \ + setQnnTensorSetInputTensors(tensorSet, inputTensors, numInputs) +#define QNN_TENSORSET_SET_OUTPUT_TENSORS(tensorSet, outputTensors, numOutputs) \ + setQnnTensorSetOutputTensors(tensorSet, outputTensors, numOutputs) + +inline bool isQnnTensorV1Compatible(const Qnn_Tensor_t &tensor) { + if (tensor.version == QNN_TENSOR_VERSION_2) { + if (tensor.v2.isDynamicDimensions != NULL) { + return false; + } + if (tensor.v2.dataFormat == QNN_TENSOR_DATA_FORMAT_SPARSE) { + return false; + } + } + return true; +} +inline bool isQnnTensorV1Compatible(const Qnn_Tensor_t *const tensor) { + return isQnnTensorV1Compatible(*tensor); +} +inline bool isQnnTensorV1Compatible(const Qnn_OpConfig_t &opConfig) { + if ((QNN_OP_CFG_GET_INPUTS(opConfig) != NULL) && (QNN_OP_CFG_GET_NUM_INPUTS(opConfig) > 0u)) { + for (uint32_t tensorIdx = 0u; tensorIdx < QNN_OP_CFG_GET_NUM_INPUTS(opConfig); tensorIdx++) { + if (!isQnnTensorV1Compatible(QNN_OP_CFG_GET_INPUTS(opConfig)[tensorIdx])) { + return false; + } + } + } + if ((QNN_OP_CFG_GET_OUTPUTS(opConfig) != NULL) && (QNN_OP_CFG_GET_NUM_OUTPUTS(opConfig) > 0u)) { + for (uint32_t tensorIdx = 0u; tensorIdx < QNN_OP_CFG_GET_NUM_OUTPUTS(opConfig); tensorIdx++) { + if (!isQnnTensorV1Compatible(QNN_OP_CFG_GET_OUTPUTS(opConfig)[tensorIdx])) { + return false; + } + } + } + if ((QNN_OP_CFG_GET_PARAMS(opConfig) != NULL) && (QNN_OP_CFG_GET_NUM_PARAMS(opConfig) > 0)) { + for (uint32_t paramIdx = 0u; paramIdx < QNN_OP_CFG_GET_NUM_PARAMS(opConfig); paramIdx++) { + const Qnn_Param_t ¶m = QNN_OP_CFG_GET_PARAMS(opConfig)[paramIdx]; + if (QNN_PARAMTYPE_TENSOR == param.paramType) { + if (!isQnnTensorV1Compatible(param.tensorParam)) { + return false; + } + } + } + } + return true; +} +inline bool isQnnTensorV1Compatible(const Qnn_OpConfig_t *const opConfig) { + return isQnnTensorV1Compatible(*opConfig); +} \ No newline at end of file diff --git a/mllm/backends/qnn/README.md b/mllm/backends/qnn/README.md new file mode 100644 index 000000000..54a6ccb92 --- /dev/null +++ b/mllm/backends/qnn/README.md @@ -0,0 +1,237 @@ +# Qualcomm AI Engine Direct(QNN/QAIRT) backend + +QNN Backend has supported running 1-3B LLMs and VLMs with full NPU vision encoder offload. Due to the memory constraint of online computation graph building, larger models may not be supported. Also, the QNN backend currently only speedups the prefilling stage of the LLM, thus needing another CPU model to do the decoding stage. Future support for QNN graph switching and decoding is under development. + +Below describes how to set up the QNN environment, compile the QNN op package, convert the model, build and run the project with QNN backend. + +## QNN Environment Set Up +This section is basically following the QNN documentation, for more details, see: [QNN Linux Setup](https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/linux_setup.html). +The QNN backend relies on the Qualcomm QNN SDK and Hexagon SDK to compile QNN Backends and LLM-specific operators. The QNN SDK can be downloaded [here](https://www.qualcomm.com/developer/software/qualcomm-ai-engine-direct-sdk). The Hexagon SDK can be downloaded using [QPM](https://qpm.qualcomm.com/). The compiling environment only supports Linux now. + +Version requirements: +* QNN: [Linux v2.34+](https://qpm.qualcomm.com/#/main/tools/details/qualcomm_neural_processing_sdk) +* Hexagon SDK: [Linux 5.x](https://qpm.qualcomm.com/#/main/tools/details/HexagonSDK5.x) (Some accounts may have no permission to access this SDK and may need to contact Qualcomm for support.) + +**NOTE:** After downloading the QNN SDK, unzip the file and move the folder name like `qairt/v2.34.0.250424` to `mllm/backends/qnn/` and rename the version to 'sdk'. The folder structure should be like `mllm/backends/qnn/sdk`. + +After downloading and installing the two SDKs use "qpm-cli", set up the sdk environment by running the following commands: + +```bash +source /bin/envsetup.sh +source /setup_sdk_env.source +``` + +After setting up the environment, you will have following ENV variables: + +* QNN_SDK_ROOT=/path/to/your/qnn/sdk +* HEXAGON_SDK_ROOT=/path/to/your/hexagon/sdk + +## Op Package Compile + +To use QNN offload, the CPU & HTP QNN op package are needed, the following scripts will build QNN op package needed by the project. `QNN_SDK_ROOT`, `HEXAGON_SDK_ROOT` and `ANDROID_NDK_ROOT` should be set in the environment. + +```bash +cd mllm/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/ +make htp_aarch64 && make htp_v75 +``` + +## Model Conversion + +The model used by QNN prefilling is in int8 format, with static per-tensor quantization. We have two techniques to improve the accuracy of the model: + +**Shadow Outlier Execution**: This technique selectively preserves the precision of specific layers by identifying outlier activations and applying a threshold-based selection (using `t01m_clip_threshold`, which refers to the activation scale threshold after removing the top 0.1% outliers compared to the original scale). By doing so, it accelerates computation on low-precision NPUs while minimizing accuracy loss. + +![Shadow Execution](../../../assets/shadow_execution.png) + +**Rotation**: Rotation quantization is a technique used to improve model quantization performance by applying rotational transformations to model weights and activations before quantization. This reduces quantization error and improves the accuracy of quantized models. + +The rotation quantization process is an implementation of [SpinQuant](https://arxiv.org/abs/2405.16406) and [QuaRot](https://arxiv.org/abs/2404.00456) for different models like Qwen. We are not intented to do exactly the same things as SpinQuant and QuaRot, instead we provide a framework to customize rotation operations for any models you want to use. + +![Rotation](../../../assets/rotation.png) + +The tools are under `tools/qnn_convertor` and `tools/rotation`. Below describes the usage of the tools. + +The quantization process consists of three main steps: + +1. Profile Activation Distributions: Collect statistical information about layer activations and generate rotation matrices +2. Export QNN Model: Quantize the model using collected statistics and export in QNN-compatible format +3. Export FP32 Rotated Model: Export the rotated FP32 model for CPU deployment + +Use the get_distribution.py script to collect activation distribution information and generate rotation matrices: + +```bash +# under tools/qnn_convertor +python get_distribution.py --config_file config/qwen1.5-1.8b.json +``` + +The profiling step requires a representative dataset to collect activation statistics. +In our example configuration: +```json +"profile_config": { + "dataset_path": "path/to/pile-val-backup/", + ... +} +``` +we use a subset of The Pile dataset (pile-val-backup). +The original hosting site for The Pile (the-eye.eu) has permanently removed the dataset due to copyright concerns. +You can use an uncopyrighted subset of The Pile as a drop-in replacement, which is available on HuggingFace:[HuggingFace: pile-uncopyrighted](https://huggingface.co/datasets/monology/pile-uncopyrighted). + +Example configuration file (config/qwen1.5-1.8b.json): +```json +{ + "profile_config": { + "dataset_path": "path/to/pile-val-backup/", + "output_path": "./dis/qwen1.5-1.8b-rot-dis.json", + "num_samples": 2, + "no_bias": true, + "model_config": { + "model_type": "qwen2", + "tokenizer_name": "path/to/Qwen1.5-1.8B-Chat", + "model_name": "path/to/Qwen1.5-1.8B-Chat", + "online_rotation": true, + "random_rotate": true, + "save_rotation": "./R/qwen1.5-1.8b-rotation-matrix.bin", + "R_path": "see explanation below" + } + }, + ... +} +``` + +Key parameters: + +- dataset_path: Path to the dataset used for analysis +- output_path: Path to save activation distribution information +- num_samples: Number of samples to analyze +- no_bias: Whether to ignore bias terms +- online_rotation: Whether to rotate the model online(rotate after loading model). Note that `online_rotation` should be set to true if we are going to convert an original model that has not been rotated. Otherwise, `online_rotation` should be set to false. +- random_rotate: Whether to use random rotation matrices +- save_rotation: Path to save rotation matrices +- R_path: Path to predefined rotation matrix. When specifying the rotation matrix, `random_rotate` and `R_path` are mutually exclusive + +Use export_qnn_model.py to export the quantized QNN model: +```bash +python export_qnn_model.py --config_file config/qwen1.5-1.8b.json +``` + +The export_config section in the configuration file: +```json +{ + ... + "export_config": { + "scale_file": "./dis/qwen1.5-1.8b-rot-dis.json", + "output_model": "./models/qwen1.5-1.8b-qnn.bin", + "t01m_clip_threshold": 64, + "quant_bias": false, + "model_config": { + "model_type": "qwen2", + "tokenizer_name": "path/to/Qwen1.5-1.8B-Chat", + "model_name": "path/to/Qwen1.5-1.8B-Chat", + "online_rotation": true, + "R_path": "./R/qwen1.5-1.8b-rotation-matrix.bin" + } + } +} +``` +Key parameters: + +- scale_file: Path to activation distribution file +- output_model: Output model path +- t01m_clip_threshold: Quantization clipping threshold +- quant_bias: Whether to quantize bias terms +- online_rotation: rotate after loading model +- R_path: Path to predefined rotation matrix + +To export an FP32 rotated .pth model for CPU deployment (still using CPU for decoding, which requires the FP32 rotated model) and performing CPU quantization methods use: + +```bash +python export_rotate_model.py --config_file config/qwen1.5-1.8b.json +``` + +`NOTE` It's recommended to set a new output model path in json file to avoid overwriting the exported .pth model for QNN. + +Now you can convert the int8 .pth model to .mllm format: + +```bash +python converter.py --input_model=model.pth --output_model=model.mllm --type=torch +``` + +## Build & Run + +Example to modify demo_qwen_npu.cpp: +```cpp +{ + ... + cmdParser.add("vocab", 'v', "specify mllm tokenizer model path", false, "path/to/qwen_vocab.mllm"); + cmdParser.add("merge", 'e', "specify mllm merge file path", false, "path/to/qwen_merges.txt"); + cmdParser.add("qnn-model", 'm', "specify mllm model path", false, "path/to/qwen-1.5-1.8b-chat-int8.mllm"); + cmdParser.add("decoding-model", '\0', "specify mllm model path", false, "path/to/qwen-1.5-1.8b-chat-q4k.mllm"); + ... + auto tokenizer = QWenTokenizer(vocab_path, merge_path); + QWenNPUConfig config(tokens_limit, "1.8B-rotated", RoPEType::HFHUBROPE); + auto model = v2::QWenForCausalLM_NPU(config, 256); + ... +} + +``` +Build the target with QNN backend. + +```bash +cd ../scripts +./build_android_qnn.sh +``` + +Currently, there are two style of modeling, the Module API and the old implementation. The demo of the Module API is in `examples/demo_qwen_npu.cpp` which is in a **user friendly style**, and the old implementation is in `examples/main_qwen_npu.cpp` which supports **the chunk pipeline prefilling**. + +Download the model from [here](https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm/blob/main/), or using the following instructions + +```bash +mkdir ../models && cd ../models +# Download int8 model used by npu & q4k model used by cpu +wget https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm/resolve/main/qwen-1.5-1.8b-chat-int8.mllm?download=true -O qwen-1.5-1.8b-chat-int8.mllm +wget https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm/resolve/main/qwen-1.5-1.8b-chat-q4k.mllm?download=true -O qwen-1.5-1.8b-chat-q4k.mllm +``` + +Run on an android phone with at least 16GB of memory. + +```bash +cd ../scripts +./run_qwen_qnn.sh +``` +If you modify or re-export the model, make sure to delete the old cache file (qnn_context.bin) on your device before running the script again. The cache will be automatically regenerated. + +Result are as followed: + +``` +> ./demo_qwen_npu +[Q] <|im_start|>system +You are a helpful assistant.<|im_end|> +<|im_start|>user +Give me a short introduction to large language model.<|im_end|> +<|im_start|>assistant + +[A] The large language model is a type of artificial intelligence that is designed to generate human-like text based on the input it receives It is typically trained on large datasets of text, such as books, articles, and web pages, and uses statistical models to learn patterns and relationships in the data The goal of a large language model is to generate text that is coherent +``` + +## Custom Op Package Development + +In QNN, you can develop your own Op package to support custom operators. The Op package is a collection of QNN operators that can be used in the QNN backend. + +If you want to develop your own QNN Op package, you can refer to the [QNN documentation](https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/op_package_gen_example.html) for more details. The package name in this project is `LLaMAPackage`. + +Generally, a QNN Op should implement an HVX version and a reference version. You can refer to 'Qualcomm Hexagon V73 HVX Programmer's Reference Manual' on Qualcomm's official website for more details about the HVX programming. + +To enable LSP for HVX, you can set clangd path to `$HEXAGON_SDK_ROOT/tools/HEXAGON_Tools/8.7.06/Tools/bin/hexagon-clangd` in your `.vscode/settings.json` file. + +```json +{ + "clangd.path": "$HEXAGON_SDK_ROOT/tools/HEXAGON_Tools/8.7.06/Tools/bin/hexagon-clangd" +} +``` + +Then you need to generate the `compile_commands.json` file for the Op package, you can use the following command: + +```bash +cd mllm/mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/ +compiledb make htp_v75 -C . +``` diff --git a/src/backends/qnn/Register.cpp b/mllm/backends/qnn/Register.cpp similarity index 100% rename from src/backends/qnn/Register.cpp rename to mllm/backends/qnn/Register.cpp diff --git a/src/backends/qnn/op/QNNAdd.cpp b/mllm/backends/qnn/op/QNNAdd.cpp similarity index 100% rename from src/backends/qnn/op/QNNAdd.cpp rename to mllm/backends/qnn/op/QNNAdd.cpp diff --git a/src/backends/qnn/op/QNNAdd.hpp b/mllm/backends/qnn/op/QNNAdd.hpp similarity index 100% rename from src/backends/qnn/op/QNNAdd.hpp rename to mllm/backends/qnn/op/QNNAdd.hpp diff --git a/src/backends/qnn/op/QNNCausalMask.cpp b/mllm/backends/qnn/op/QNNCausalMask.cpp similarity index 100% rename from src/backends/qnn/op/QNNCausalMask.cpp rename to mllm/backends/qnn/op/QNNCausalMask.cpp diff --git a/src/backends/qnn/op/QNNCausalMask.hpp b/mllm/backends/qnn/op/QNNCausalMask.hpp similarity index 100% rename from src/backends/qnn/op/QNNCausalMask.hpp rename to mllm/backends/qnn/op/QNNCausalMask.hpp diff --git a/src/backends/qnn/op/QNNCommonOp.cpp b/mllm/backends/qnn/op/QNNCommonOp.cpp similarity index 66% rename from src/backends/qnn/op/QNNCommonOp.cpp rename to mllm/backends/qnn/op/QNNCommonOp.cpp index e448429a9..29ba098cc 100644 --- a/src/backends/qnn/op/QNNCommonOp.cpp +++ b/mllm/backends/qnn/op/QNNCommonOp.cpp @@ -1,7 +1,6 @@ #include "QNNCommonOp.hpp" #include "OpDefined.hpp" #include "QnnTypes.h" -#include "WrapperUtils/QnnWrapperUtils.hpp" #include "Types.hpp" #include @@ -12,7 +11,7 @@ QNNCommonOp::QNNCommonOp(Backend *bn, string opName) : qnnBackend_ = dynamic_cast(bn); } -ErrorCode QNNCommonOp::graphAddNode(string name, string nodeType, vector> inputs, vector> outputs, vector params, string packageName, bool isNSHD, Tensor *scale) { +ErrorCode QNNCommonOp::graphAddNode(string name, string nodeType, vector> inputs, vector> outputs, vector params, string packageName, bool isNSHD) { vector inputTensorNames; for (auto &input : inputs) { inputTensorNames.push_back(input->name()); @@ -24,31 +23,34 @@ ErrorCode QNNCommonOp::graphAddNode(string name, string nodeType, vector(output->sequence()), static_cast(output->head()), static_cast(output->dimension())}; - if (!isNSHD) { + if (!isNSHD) { // qnn matmul output is in BHSD style, here handle this dimensions[1] = static_cast(output->head()); dimensions[2] = static_cast(output->sequence()); } - // TODO tensor type = MLLM_TYPE_I8 - auto data_type = QNN_DATATYPE_FLOAT_32; - if (output->dtype() == MLLM_TYPE_I8) { - data_type = QNN_DATATYPE_SFIXED_POINT_8; - } - - if (output->dtype() == MLLM_TYPE_F16) { - data_type = QNN_DATATYPE_FLOAT_16; - } - - float quantScale = 0.0f; auto quantDefine = QNN_DEFINITION_UNDEFINED; auto quantType = QNN_QUANTIZATION_ENCODING_UNDEFINED; - - if (scale != nullptr) { - quantScale = scale->hostPtr()[0] / 127.0; - quantScale = roundf(quantScale * 100000) / 100000; + auto data_type = QNN_DATATYPE_FLOAT_32; + switch (output->dtype()) { + case MLLM_TYPE_I8: + data_type = QNN_DATATYPE_SFIXED_POINT_8; + quantScale = outputs[0]->quant_param.scale; + quantDefine = QNN_DEFINITION_DEFINED; + quantType = QNN_QUANTIZATION_ENCODING_SCALE_OFFSET; + break; + case MLLM_TYPE_I16: + data_type = QNN_DATATYPE_SFIXED_POINT_16; + quantScale = outputs[0]->quant_param.scale; quantDefine = QNN_DEFINITION_DEFINED; quantType = QNN_QUANTIZATION_ENCODING_SCALE_OFFSET; + break; + case MLLM_TYPE_F16: + data_type = QNN_DATATYPE_FLOAT_16; + break; + default: + data_type = QNN_DATATYPE_FLOAT_32; + break; } inputTensorNames_.push_back(new string(output->name())); @@ -66,39 +68,43 @@ ErrorCode QNNCommonOp::graphAddNode(string name, string nodeType, vectorgraphAddNode(name, nodeType, inputTensorNames, outputTensors, params, packageName)) { - exit(1); - return ErrorCode::INVALID_VALUE; - } + qnnBackend_->graphAddNode(name, nodeType, inputTensorNames, outputTensors, params, packageName); return MLLM_NO_ERROR; } ErrorCode QNNCommonOp::graphAddNode(string name, string nodeType, vector inputTensorNames, vector outputs, vector params, string packageName) { - if (qnn_wrapper_api::ModelError_t::MODEL_NO_ERROR != qnnBackend_->graphAddNode(name, nodeType, inputTensorNames, outputs, params, packageName)) { - exit(1); - return ErrorCode::INVALID_VALUE; - } + qnnBackend_->graphAddNode(name, nodeType, inputTensorNames, outputs, params, packageName); return MLLM_NO_ERROR; } Qnn_TensorType_t QNNCommonOp::getOutputTensorType(shared_ptr tensor) const { if (tensor->ttype() == GRAPH_OUTPUT) { // in Module API, the outputs of a graph is not allocated before setUp, alloc here - if(tensor->allocted() == 0) { + if (tensor->allocted() == 0) { tensor->alloc(); } qnnBackend_->pushOutputBuffers(tensor->hostPtr()); return QNN_TENSOR_TYPE_APP_READ; } else { - if (tensor->childTensors().size() > 0 && tensor->childTensors()[0]->ttype() == GRAPH_OUTPUT) { - if (tensor->allocted() == 0) { - tensor->alloc(); + // if (tensor->childTensors().size() > 0 && tensor->childTensors()[0]->ttype() == GRAPH_OUTPUT) { + // if (tensor->allocted() == 0) { + // tensor->alloc(); + // } + // qnnBackend_->pushOutputBuffers(tensor->hostPtr()); + // return QNN_TENSOR_TYPE_APP_READ; + // } + if (!tensor->childTensors().empty()) { + auto child = tensor->childTensors()[0].lock(); + if (child && child->ttype() == GRAPH_OUTPUT) { + if (tensor->allocted() == 0) { + tensor->alloc(); + } + qnnBackend_->pushOutputBuffers(tensor->hostPtr()); + return QNN_TENSOR_TYPE_APP_READ; } - qnnBackend_->pushOutputBuffers(tensor->hostPtr()); - return QNN_TENSOR_TYPE_APP_READ; } return QNN_TENSOR_TYPE_NATIVE; // qnn input is set APP_WRITE by backend diff --git a/src/backends/qnn/op/QNNCommonOp.hpp b/mllm/backends/qnn/op/QNNCommonOp.hpp similarity index 96% rename from src/backends/qnn/op/QNNCommonOp.hpp rename to mllm/backends/qnn/op/QNNCommonOp.hpp index 23da22469..79e8b16c4 100644 --- a/src/backends/qnn/op/QNNCommonOp.hpp +++ b/mllm/backends/qnn/op/QNNCommonOp.hpp @@ -27,7 +27,7 @@ class QNNCommonOp : public Op { protected: vector inputTensorNames_; QNNBackend *qnnBackend_; - ErrorCode graphAddNode(string name, string nodeType, vector> inputs, vector> outputs, vector params = {}, string packageName = "qti.aisw", bool isNSHD = true, Tensor *scale = nullptr); + ErrorCode graphAddNode(string name, string nodeType, vector> inputs, vector> outputs, vector params = {}, string packageName = "qti.aisw", bool isNSHD = true); ErrorCode graphAddNode(string name, string nodeType, vector inputs, vector outputs, vector params = {}, string packageName = "qti.aisw"); Qnn_TensorType_t getOutputTensorType(shared_ptr tensor) const; }; diff --git a/src/backends/qnn/op/QNNDequantize.cpp b/mllm/backends/qnn/op/QNNDequantize.cpp similarity index 86% rename from src/backends/qnn/op/QNNDequantize.cpp rename to mllm/backends/qnn/op/QNNDequantize.cpp index 569deb73d..16cc54283 100644 --- a/src/backends/qnn/op/QNNDequantize.cpp +++ b/mllm/backends/qnn/op/QNNDequantize.cpp @@ -4,13 +4,15 @@ #include "Types.hpp" #include "QNNCommonOp.hpp" #include +#include "Context.hpp" namespace mllm { -QNNDequantize::QNNDequantize(Backend *bn, string opName, bool isNSHD, bool isFP32) : +QNNDequantize::QNNDequantize(Backend *bn, string opName, bool isNSHD, bool isFP32, DataType type) : QNNCommonOp(bn, opName) { isNSHD_ = isNSHD; isFP32_ = isFP32; - scale_.setBackend(bn); + activation_dtype_ = type; + scale_.setBackend(Backend::global_backends[MLLM_CPU].get()); } ErrorCode QNNDequantize::reshape(vector> inputs, vector> outputs) { @@ -20,7 +22,6 @@ ErrorCode QNNDequantize::reshape(vector> inputs, vector> inputs, vector> outputs) { - auto outName = outputs[0]->name(); uint32_t dimensionsOutput[4]; @@ -37,16 +38,21 @@ ErrorCode QNNDequantize::setUp(vector> inputs, vector()[0] / 127.0; - dequantScale = roundf(dequantScale * 100000) / 100000; - - if (name().find("q_proj") != -1) { - dequantScale = dequantScale / std::sqrt(outputs[0]->dimension()); + switch (activation_dtype_) { + case MLLM_TYPE_I8: + dequantScale = scale_.hostPtr()[0] / (pow(2, 7) - 1); + break; + case MLLM_TYPE_I16: + dequantScale = scale_.hostPtr()[0] / (pow(2, 15) - 1); + break; + default: + return NOT_SUPPORT; } if (isFP32_) { uint32_t paramsDeQuantizeDimension[1] = {1}; auto paramsDeQuantizeName = name() + "dequantize_params"; + vector paramsDeQuantize = { {.paramType = QNN_PARAMTYPE_TENSOR, .name = "scale", @@ -87,6 +93,7 @@ ErrorCode QNNDequantize::setUp(vector> inputs, vectorsetDtype(MLLM_TYPE_F16); uint32_t paramsDeQuantizeDimension[1] = {1}; auto paramsDeQuantizeName = name() + "dequantize_params"; + vector paramsDeQuantize = { {.paramType = QNN_PARAMTYPE_TENSOR, .name = "scale", @@ -150,6 +157,33 @@ ErrorCode QNNDequantize::load(AbstructLoader &loader) { scale_.alloc(); loader.load(&scale_); + if (name().find("q_proj") != -1 || name().find("k_proj") != -1 || name().find("v_proj") != -1) { + // std::cout << name() << std::endl; + + string biasName = name(); + wordToRemove = "dequantize"; + string biasTypeName = "bias"; + + int pos = biasName.find(wordToRemove); + if (pos != -1) { + biasName.erase(pos, wordToRemove.length()); + } + + // std::cout << biasName + biasTypeName << std::endl; + + int hidden_size = 1536; + if (name().find("k_proj") != -1 || name().find("v_proj") != -1) + hidden_size = 256; + + bias_.setName(biasName + biasTypeName); + bias_.reshape(1, 1, 1, hidden_size); + bias_.setDtype(MLLM_TYPE_F32); + bias_.alloc(); + loader.load(&bias_); + + // bias_.printData(); + } + return Op::load(loader); } } // namespace mllm diff --git a/src/backends/qnn/op/QNNDequantize.hpp b/mllm/backends/qnn/op/QNNDequantize.hpp similarity index 85% rename from src/backends/qnn/op/QNNDequantize.hpp rename to mllm/backends/qnn/op/QNNDequantize.hpp index b4b78fc22..229d11db4 100644 --- a/src/backends/qnn/op/QNNDequantize.hpp +++ b/mllm/backends/qnn/op/QNNDequantize.hpp @@ -3,10 +3,11 @@ #define MLLM_QNNDEQUANTIZE_H #include "QNNCommonOp.hpp" +#include "Types.hpp" namespace mllm { class QNNDequantize : public QNNCommonOp { public: - QNNDequantize(Backend *bn, string opName, bool isNSHD, bool isFP32); + QNNDequantize(Backend *bn, string opName, bool isNSHD, bool isFP32, DataType type = MLLM_TYPE_I8); virtual ~QNNDequantize() = default; virtual ErrorCode reshape(vector> inputs, vector> outputs) override; virtual ErrorCode setUp(vector> inputs, vector> outputs) override; @@ -15,12 +16,13 @@ class QNNDequantize : public QNNCommonOp { bool isNSHD_; bool isFP32_; Tensor scale_; + Tensor bias_; }; class QNNDequantizeCreator : public QNNBackend::Creator { public: virtual Op *create(OpParam op_param, Backend *bn, string name) const { - return new QNNDequantize(bn, name, (bool)op_param["isNSHD"], (bool)op_param["isFP32"]); + return new QNNDequantize(bn, name, (bool)op_param["isNSHD"], (bool)op_param["isFP32"], (DataType)op_param["inType"]); } }; diff --git a/mllm/backends/qnn/op/QNNDequantizeAdd.cpp b/mllm/backends/qnn/op/QNNDequantizeAdd.cpp new file mode 100644 index 000000000..8265e95ec --- /dev/null +++ b/mllm/backends/qnn/op/QNNDequantizeAdd.cpp @@ -0,0 +1,202 @@ + +#include "QNNDequantizeAdd.hpp" +#include "QnnTypes.h" +#include "Types.hpp" +#include "QNNCommonOp.hpp" +#include "Context.hpp" +#include + +namespace mllm { +QNNDequantizeAdd::QNNDequantizeAdd(Backend *bn, string opName, bool isNSHD, int out_features, bool isFP32, DataType type) : + QNNCommonOp(bn, opName) { + isNSHD_ = isNSHD; + isFP32_ = isFP32; + out_features_ = out_features; + activation_dtype_ = type; + scale_.setBackend(Backend::global_backends[MLLM_CPU].get()); + bias_.setBackend(Backend::global_backends[MLLM_CPU].get()); +} + +ErrorCode QNNDequantizeAdd::reshape(vector> inputs, vector> outputs) { + assert(outputs.size() == 1); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + return Op::reshape(inputs, outputs); +} + +ErrorCode QNNDequantizeAdd::setUp(vector> inputs, vector> outputs) { + auto outName = outputs[0]->name(); + uint32_t dimensionsOutput[4]; + + if (isNSHD_) { + dimensionsOutput[0] = static_cast(outputs[0]->batch()); + dimensionsOutput[1] = static_cast(outputs[0]->sequence()); + dimensionsOutput[2] = static_cast(outputs[0]->head()); + dimensionsOutput[3] = static_cast(outputs[0]->dimension()); + } else { + dimensionsOutput[0] = static_cast(outputs[0]->batch()); + dimensionsOutput[1] = static_cast(outputs[0]->head()); + dimensionsOutput[2] = static_cast(outputs[0]->sequence()); + dimensionsOutput[3] = static_cast(outputs[0]->dimension()); + } + + float dequantScale = 0; + switch (activation_dtype_) { + case MLLM_TYPE_I8: + dequantScale = scale_.hostPtr()[0] / (pow(2, 7) - 1); + break; + case MLLM_TYPE_I16: + dequantScale = scale_.hostPtr()[0] / (pow(2, 15) - 1); + break; + default: + return NOT_SUPPORT; + } + + if (isFP32_) { + uint32_t paramsDequantizeAddDimension[1] = {1}; + auto paramsDequantizeAddName = name() + "DequantizeAdd_params"; + + vector paramsDequantizeAdd = { + {.paramType = QNN_PARAMTYPE_TENSOR, + .name = "scale", + .tensorParam = + (Qnn_Tensor_t){.version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = paramsDequantizeAddName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, .offset = 0}}}, + .rank = 1, + .dimensions = paramsDequantizeAddDimension, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = (uint8_t *)&dequantScale, + .dataSize = sizeof(float)}}}}}; + + uint32_t dimensionsBias[4] = {1, 1, 1, static_cast(bias_.dimension())}; + qnnBackend_->modelAddTensor(bias_.name(), (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = bias_.name().c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_32, + .rank = 4, + .dimensions = dimensionsBias, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = bias_.hostPtr(), + .dataSize = (uint32_t)bias_.cntSize()}}}); + + vector outputTensor = {{.version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_32, + .quantizeParams = {QNN_DEFINITION_DEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = dequantScale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}; + return graphAddNode(name(), "LLaMADequantizeAdd", {inputs[0]->name(), bias_.name()}, outputTensor, paramsDequantizeAdd, "LLaMAPackage"); + } else { + outputs[0]->setDtype(MLLM_TYPE_F16); + uint32_t paramsDequantizeAddDimension[1] = {1}; + auto paramsDequantizeAddName = name() + "DequantizeAdd_params"; + + vector paramsDequantizeAdd = { + {.paramType = QNN_PARAMTYPE_TENSOR, + .name = "scale", + .tensorParam = + (Qnn_Tensor_t){.version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = paramsDequantizeAddName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, + .offset = 0}}}, + .rank = 1, + .dimensions = paramsDequantizeAddDimension, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = (uint8_t *)&dequantScale, + .dataSize = sizeof(float)}}}}}; + + uint32_t dimensionsBias[4] = {1, 1, 1, static_cast(bias_.dimension())}; + qnnBackend_->modelAddTensor(bias_.name(), (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = bias_.name().c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_32, + .rank = 4, + .dimensions = dimensionsBias, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = bias_.hostPtr(), + .dataSize = (uint32_t)bias_.cntSize()}}}); + + vector outputTensor = {{QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_16, + .quantizeParams = {QNN_DEFINITION_DEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = dequantScale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + return graphAddNode(name(), "LLaMADequantizeAdd", {inputs[0]->name(), bias_.name()}, outputTensor, paramsDequantizeAdd, "LLaMAPackage"); + } +} + +ErrorCode QNNDequantizeAdd::load(AbstructLoader &loader) { + string scaleName = name(); + string scaleTypeName = "output_scale"; + + std::string wordToRemove = "dequantize"; + int pos = scaleName.find(wordToRemove); + if (pos != -1) { + scaleName.erase(pos, wordToRemove.length()); + } + + scale_.setName(scaleName + scaleTypeName); + scale_.reshape(1, 1, 1, 1); + scale_.setDtype(MLLM_TYPE_F32); + scale_.alloc(); + loader.load(&scale_); + + string biasName = name(); + wordToRemove = "dequantize"; + string biasTypeName = "bias"; + + pos = biasName.find(wordToRemove); + if (pos != -1) { + biasName.erase(pos, wordToRemove.length()); + } + + bias_.setName(biasName + biasTypeName); + bias_.reshape(1, 1, 1, out_features_); + bias_.setDtype(MLLM_TYPE_F32); + bias_.alloc(); + loader.load(&bias_); + + return Op::load(loader); +} +} // namespace mllm diff --git a/mllm/backends/qnn/op/QNNDequantizeAdd.hpp b/mllm/backends/qnn/op/QNNDequantizeAdd.hpp new file mode 100644 index 000000000..c0863bad5 --- /dev/null +++ b/mllm/backends/qnn/op/QNNDequantizeAdd.hpp @@ -0,0 +1,32 @@ + +#ifndef MLLM_QNNDequantizeAdd_H +#define MLLM_QNNDequantizeAdd_H + +#include "QNNCommonOp.hpp" +#include "Types.hpp" +namespace mllm { +class QNNDequantizeAdd : public QNNCommonOp { +public: + QNNDequantizeAdd(Backend *bn, string opName, bool isNSHD, int out_features, bool isFP32, DataType type = MLLM_TYPE_I8); + virtual ~QNNDequantizeAdd() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode setUp(vector> inputs, vector> outputs) override; + virtual ErrorCode load(AbstructLoader &loader) override; +private: + bool isNSHD_; + bool isFP32_; + int out_features_; + Tensor scale_; + Tensor bias_; +}; + +class QNNDequantizeAddCreator : public QNNBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name) const { + return new QNNDequantizeAdd(bn, name, (bool)op_param["isNSHD"], (int)op_param["out_features"], (bool)op_param["isFP32"], (DataType)op_param["inType"]); + } +}; + +} // namespace mllm + +#endif diff --git a/src/backends/qnn/op/QNNGELU.cpp b/mllm/backends/qnn/op/QNNGELU.cpp similarity index 55% rename from src/backends/qnn/op/QNNGELU.cpp rename to mllm/backends/qnn/op/QNNGELU.cpp index 0f5850f8a..84ce6713a 100644 --- a/src/backends/qnn/op/QNNGELU.cpp +++ b/mllm/backends/qnn/op/QNNGELU.cpp @@ -2,11 +2,12 @@ #include "QNNGELU.hpp" #include "Types.hpp" #include "QNNCommonOp.hpp" +#include "Context.hpp" namespace mllm { QNNGELU::QNNGELU(Backend *bn, string opName) : QNNCommonOp(bn, opName) { - scale_.setBackend(bn); + scale_.setBackend(Backend::global_backends[MLLM_CPU].get()); } ErrorCode QNNGELU::reshape(vector> inputs, vector> outputs) { @@ -17,27 +18,11 @@ ErrorCode QNNGELU::reshape(vector> inputs, vector> inputs, vector> outputs) { - //Todo: gelu do not supprt signed fix int8 - return graphAddNode(name(), "Gelu", inputs, outputs, {}, "qti.aisw", true, &scale_); -} - -ErrorCode QNNGELU::load(AbstructLoader &loader) { - string scaleName = name(); - - std::string wordToRemove = "gelu"; - int pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); + // Todo: gelu do not supprt signed fix int8 + for (int i = 0; i < inputs.size(); ++i) { + outputs[i]->setDtype(inputs[i]->dtype()); } - - scale_.setName(scaleName + "input_scale"); - scale_.reshape(1, 1, 1, 1); - scale_.setDtype(MLLM_TYPE_F32); - scale_.alloc(); - loader.load(&scale_); - - return Op::load(loader); + return graphAddNode(name(), "Gelu", inputs, outputs, {}, "qti.aisw", true); } } // namespace mllm - diff --git a/src/backends/qnn/op/QNNGELU.hpp b/mllm/backends/qnn/op/QNNGELU.hpp similarity index 91% rename from src/backends/qnn/op/QNNGELU.hpp rename to mllm/backends/qnn/op/QNNGELU.hpp index 666fb0c64..91bd0e92d 100644 --- a/src/backends/qnn/op/QNNGELU.hpp +++ b/mllm/backends/qnn/op/QNNGELU.hpp @@ -10,7 +10,6 @@ class QNNGELU : public QNNCommonOp { virtual ~QNNGELU() = default; virtual ErrorCode reshape(vector> inputs, vector> outputs) override; virtual ErrorCode setUp(vector> inputs, vector> outputs) override; - virtual ErrorCode load(AbstructLoader &loader) override; private: Tensor scale_; diff --git a/src/backends/qnn/op/QNNIRoPE.cpp b/mllm/backends/qnn/op/QNNIRoPE.cpp similarity index 87% rename from src/backends/qnn/op/QNNIRoPE.cpp rename to mllm/backends/qnn/op/QNNIRoPE.cpp index d78c8752a..473ffec35 100644 --- a/src/backends/qnn/op/QNNIRoPE.cpp +++ b/mllm/backends/qnn/op/QNNIRoPE.cpp @@ -13,7 +13,6 @@ int QNNIRoPE::cos_max; int QNNIRoPE::global_pose_type_ = -1; int QNNIRoPE::ishape_old; - extern void sinusoidal_position_embedding_llama(int seq_len, int output_dim, vector> &sin, vector> &cos, int &sin_max, int &cos_max); extern void sinusoidal_position_embedding_huggingface(int seq_len, int output_dim, vector> &sin, vector> &cos, int &sin_max, int &cos_max, int base = 10000); @@ -24,8 +23,6 @@ QNNIRoPE::QNNIRoPE(Backend *bn, string opName, int pose_type) : sinTensor_.setBackend(bn); cosTensor_.setBackend(bn); hcntTensor_.setBackend(bn); - - scale_.setBackend(bn); } QNNIRoPE::QNNIRoPE(Backend *bn, string opName, int pose_type, float rope_theta, int max_position_embeddings) : @@ -37,8 +34,6 @@ QNNIRoPE::QNNIRoPE(Backend *bn, string opName, int pose_type, float rope_theta, sinTensor_.setBackend(bn); cosTensor_.setBackend(bn); hcntTensor_.setBackend(bn); - - scale_.setBackend(bn); } QNNIRoPE::QNNIRoPE(Backend *bn, string opName, int pose_type, float rope_theta, float partial_rotary_factor, int max_position_embeddings) : @@ -51,8 +46,6 @@ QNNIRoPE::QNNIRoPE(Backend *bn, string opName, int pose_type, float rope_theta, sinTensor_.setBackend(bn); cosTensor_.setBackend(bn); hcntTensor_.setBackend(bn); - - scale_.setBackend(bn); } ErrorCode QNNIRoPE::reshape(vector> inputs, vector> outputs) { @@ -66,9 +59,10 @@ ErrorCode QNNIRoPE::reshape(vector> inputs, vector> inputs, vector> outputs) { + float dequantScale = inputs[0]->quant_param.scale; // in case ishape is 0 when Op is the first one in the graph - if (sin_.empty() || ishape_old < ishape || global_pose_type_ != pose_type_ ) { + if (sin_.empty() || ishape_old < ishape || global_pose_type_ != pose_type_) { global_pose_type_ = pose_type_; ishape_old = ishape; if (pose_type_ == LLAMAROPE) { @@ -90,71 +84,56 @@ ErrorCode QNNIRoPE::setUp(vector> inputs, vector()[0] / 127.0; - dequantScale = roundf(dequantScale * 100000) / 100000; - if (name().find("q_proj") != -1) { dequantScale = dequantScale / std::sqrt(outputs[0]->dimension()); } auto type = QNN_DATATYPE_FLOAT_32; if (outputs[0]->dtype() == MLLM_TYPE_F32) { - std::cout << "QNNIRoPE FP32" << std::endl; sinTensor_.setName(name() + ".sin"); - sinTensor_.reshape(1, 1, pos_max_, ishape/2); + sinTensor_.reshape(1, 1, pos_max_, ishape / 2); sinTensor_.setDtype(MLLM_TYPE_I8); sinTensor_.alloc(); - cosTensor_.setName(name() + ".cos"); - cosTensor_.reshape(1, 1, pos_max_, ishape/2); + cosTensor_.reshape(1, 1, pos_max_, ishape / 2); cosTensor_.setDtype(MLLM_TYPE_I8); cosTensor_.alloc(); - for (int i = 0; i(0, 0, i, j, static_cast(sin_[i][j])); cosTensor_.setDataAt(0, 0, i, j, static_cast(cos_[i][j])); } } - - } else if (outputs[0]->dtype() == MLLM_TYPE_F16) { + } else if (outputs[0]->dtype() == MLLM_TYPE_F16) { std::cout << "QNNIRoPE FP16" << std::endl; - + sinTensor_.setName(name() + ".sin"); - sinTensor_.reshape(1, 1, pos_max_, ishape/2); + sinTensor_.reshape(1, 1, pos_max_, ishape / 2); sinTensor_.setDtype(MLLM_TYPE_I8); sinTensor_.alloc(); - cosTensor_.setName(name() + ".cos"); - cosTensor_.reshape(1, 1, pos_max_, ishape/2); + cosTensor_.reshape(1, 1, pos_max_, ishape / 2); cosTensor_.setDtype(MLLM_TYPE_I8); cosTensor_.alloc(); - for (int i = 0; i(0, 0, i, j, static_cast(sin_[i][j])); cosTensor_.setDataAt(0, 0, i, j, static_cast(cos_[i][j])); } } type = QNN_DATATYPE_FLOAT_16; + } - } - - - - - - - - uint32_t sin_dimensions[] = {static_cast(pos_max_), static_cast(ishape/2)}; - uint32_t cos_dimensions[] = {static_cast(pos_max_), static_cast(ishape/2)}; + uint32_t sin_dimensions[] = {static_cast(pos_max_), static_cast(ishape / 2)}; + uint32_t cos_dimensions[] = {static_cast(pos_max_), static_cast(ishape / 2)}; auto sinWeightsName = name() + ".sin.weights"; @@ -169,7 +148,7 @@ ErrorCode QNNIRoPE::setUp(vector> inputs, vector(1.0*sin_max/127*dequantScale), .offset = 0}}}, + {.scaleOffsetEncoding = {.scale = static_cast(1.0 * sin_max / 127 * dequantScale), .offset = 0}}}, .rank = 2, .dimensions = sin_dimensions, .memType = QNN_TENSORMEMTYPE_RAW, @@ -190,7 +169,7 @@ ErrorCode QNNIRoPE::setUp(vector> inputs, vector(1.0*cos_max/127*dequantScale), .offset = 0}}}, + {.scaleOffsetEncoding = {.scale = static_cast(1.0 * cos_max / 127 * dequantScale), .offset = 0}}}, .rank = 2, .dimensions = cos_dimensions, .memType = QNN_TENSORMEMTYPE_RAW, @@ -254,28 +233,10 @@ ErrorCode QNNIRoPE::setUp(vector> inputs, vector> inputs, vector> inputs, vector> outputs) { - h_cnt_ += inputs[0]->sequence(); - hcntTensor_.setDataAt(0,0,0,0, h_cnt_); + hcntTensor_.setDataAt(0, 0, 0, 0, h_cnt_); return QNNCommonOp::execute(inputs, outputs); } diff --git a/src/backends/qnn/op/QNNIRoPE.hpp b/mllm/backends/qnn/op/QNNIRoPE.hpp similarity index 99% rename from src/backends/qnn/op/QNNIRoPE.hpp rename to mllm/backends/qnn/op/QNNIRoPE.hpp index d392b3ade..bf0b74e09 100644 --- a/src/backends/qnn/op/QNNIRoPE.hpp +++ b/mllm/backends/qnn/op/QNNIRoPE.hpp @@ -35,8 +35,6 @@ class QNNIRoPE : public QNNCommonOp { Tensor sinTensor_; Tensor cosTensor_; - - Tensor scale_; }; class QNNIRoPECreator : public QNNBackend::Creator { diff --git a/src/backends/qnn/op/QNNLayerNorm.cpp b/mllm/backends/qnn/op/QNNLayerNorm.cpp similarity index 99% rename from src/backends/qnn/op/QNNLayerNorm.cpp rename to mllm/backends/qnn/op/QNNLayerNorm.cpp index 56f0d4334..e57443d99 100644 --- a/src/backends/qnn/op/QNNLayerNorm.cpp +++ b/mllm/backends/qnn/op/QNNLayerNorm.cpp @@ -21,6 +21,7 @@ ErrorCode QNNLayerNorm::reshape(vector> inputs, vector> inputs, vector> outputs) { uint32_t axesDim[1] = {1}; uint32_t axes[1] = {3}; + auto axesName = name() + ".axes"; vector params = { {.paramType = QNN_PARAMTYPE_SCALAR, .name = "epsilon", @@ -31,7 +32,7 @@ ErrorCode QNNLayerNorm::setUp(vector> inputs, vector +#include + +namespace mllm { +QNNLinearINT8::QNNLinearINT8(Backend *bn, string opName, int in_features, int out_features, bool bias) : + QNNCommonOp(bn, opName), in_features_(in_features), out_features_(out_features), support_bias_(bias) { + weight_.setBackend(Backend::global_backends[MLLM_CPU].get()); + bias_.setBackend(Backend::global_backends[MLLM_CPU].get()); + + weightScale_.setBackend(Backend::global_backends[MLLM_CPU].get()); + biasScale_.setBackend(Backend::global_backends[MLLM_CPU].get()); + outputScale_.setBackend(Backend::global_backends[MLLM_CPU].get()); +} + +ErrorCode QNNLinearINT8::reshape(vector> inputs, vector> outputs) { + assert(inputs.size() == 1); + assert(outputs.size() == 1); + // N | C | H | W + // ----------------------------------------------- + // 1 |out_channel | in_channel | 1 + // |out_features| in_features | + // ----------------------------------------------- + // batch |in_channel | seq_len | 1 + // |in_features | inputs[0]->sequence() | + // ----------------------------------------------- + // batch |out_channel | seq_len | 1 + // |out_features| inputs[0]->sequence() | + assert(inputs[0]->head() == 1); + assert(in_features_ == inputs[0]->dimension()); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), out_features_); + return Op::reshape(inputs, outputs); +} + +ErrorCode QNNLinearINT8::setUp(vector> inputs, vector> outputs) { + switch (inputs[0]->dtype()) { + case MLLM_TYPE_I8: + return setUpW8A8(inputs, outputs); + case MLLM_TYPE_I16: + return setUpW8A16(inputs, outputs); + default: + return NOT_SUPPORT; + } +} + +ErrorCode QNNLinearINT8::setUpW8A8(vector> &inputs, vector> &outputs) { + outputs[0]->setDtype(MLLM_TYPE_I8); + float outputScale = 0; + outputScale = outputScale_.hostPtr()[0] / (pow(2, 7) - 1); + outputs[0]->quant_param.scale = outputScale; + // add matmul param to qnn + vector paramsMatmul = { + {.paramType = QNN_PARAMTYPE_SCALAR, + .name = "transpose_in0", + .scalarParam = (Qnn_Scalar_t){QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}, + {.paramType = QNN_PARAMTYPE_SCALAR, + .name = "transpose_in1", + .scalarParam = (Qnn_Scalar_t){QNN_DATATYPE_BOOL_8, {.bool8Value = 1}}}}; + + uint32_t dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_dilation[] = {2}; + uint32_t InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_dilation[] = {1, 1}; + uint32_t dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount[] = {2, 2}; + uint32_t InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount[] = {0, 0, 0, 0}; + uint32_t dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride[] = {2}; + uint32_t InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride[] = {1, 1}; + + string strideName = name() + ".stride"; + string padName = name() + ".pad"; + vector params_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D = { + {.paramType = QNN_PARAMTYPE_TENSOR, + .name = "stride", + .tensorParam = + (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = {.id = 0, + .name = strideName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_UINT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, + .offset = 0}}}, + .rank = 1, + .dimensions = dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = + {.data = (uint8_t *)InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride, + .dataSize = 8}}}}, + {.paramType = QNN_PARAMTYPE_TENSOR, + .name = "pad_amount", + .tensorParam = + (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = {.id = 0, + .name = padName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_UINT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, + .offset = 0}}}, + .rank = 2, + .dimensions = + dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = + {.data = (uint8_t *) + InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount, + .dataSize = 16}}}}, + + }; + + // add weight tensor to qnn + // uint32_t dimensionsWeight[4] = {1, 1, static_cast(weight_.sequence()), static_cast(weight_.dimension())}; + auto dimensionsWeight = new uint32_t[4]{1, 1, (uint32_t)in_features_, (uint32_t)out_features_}; + auto qnnQuantDefined = QNN_DEFINITION_UNDEFINED; + float weightScale = 0; + + qnnQuantDefined = QNN_DEFINITION_DEFINED; + weightScale = weightScale_.hostPtr()[0]; + + qnnBackend_->modelAddTensor(weight_.name(), (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = weight_.name().c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_SFIXED_POINT_8, + .quantizeParams = {qnnQuantDefined, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = weightScale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsWeight, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = weight_.hostPtr(), + .dataSize = (uint32_t)weight_.cntSize()}}}); + // free weight host memory + weight_.free(); + + // dimensions of matmul output and bias + // uint32_t dimensionsOutput[4] = {static_cast(outputs[0]->batch()), + // static_cast(outputs[0]->sequence()), + // static_cast(outputs[0]->head()), + // static_cast(outputs[0]->dimension())}; + auto dimensionsOutput = new uint32_t[4]{(uint32_t)outputs[0]->batch(), (uint32_t)outputs[0]->sequence(), (uint32_t)outputs[0]->head(), (uint32_t)outputs[0]->dimension()}; + + auto outName = outputs[0]->name(); + + // if don't support bias, just dequantize and write to tensor with name of outputs[0] + if (!support_bias_) { + vector matmulOut = {{QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_SFIXED_POINT_8, + .quantizeParams = {QNN_DEFINITION_DEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = outputScale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + return graphAddNode(name() + ".linearint8", "Conv2d", {inputs[0]->name(), weight_.name()}, matmulOut, params_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D); + } + + // add bias tensor to qnn + uint32_t dimensionsBias[1] = {(uint32_t)out_features_}; + float biasScale = 0; + + qnnQuantDefined = QNN_DEFINITION_DEFINED; + biasScale = biasScale_.hostPtr()[0]; + + auto biasBuffer = (int8_t *)malloc(bias_.count() * sizeof(int8_t)); +#pragma omp parallel for + for (int i = 0; i < out_features_; i++) { + int32_t val = bias_.dataAt(0, 0, 0, i) + 128; + biasBuffer[i] = val; + } + + qnnBackend_->modelAddTensor(bias_.name(), (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = bias_.name().c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_UFIXED_POINT_8, + .quantizeParams = {qnnQuantDefined, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = biasScale, .offset = -128}}}, + .rank = 1, + .dimensions = dimensionsBias, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = biasBuffer, + .dataSize = (uint32_t)(bias_.count() * sizeof(int8_t))}}}); + // free bias host memory + bias_.free(); + + // final output + vector biasOutput = {{QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_SFIXED_POINT_8, + .quantizeParams = {QNN_DEFINITION_DEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = outputScale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + return graphAddNode(name() + ".linear_w8a8", "Conv2d", {inputs[0]->name(), weight_.name(), bias_.name()}, biasOutput, params_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D); +} + +ErrorCode QNNLinearINT8::setUpW8A16(vector> &inputs, vector> &outputs) { + outputs[0]->setDtype(MLLM_TYPE_I16); + float outputScale = 0; + outputScale = outputScale_.hostPtr()[0] / (pow(2, 15) - 1); + outputs[0]->quant_param.scale = outputScale; + // add matmul param to qnn + vector paramsMatmul = { + {.paramType = QNN_PARAMTYPE_SCALAR, + .name = "transpose_in0", + .scalarParam = (Qnn_Scalar_t){QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}, + {.paramType = QNN_PARAMTYPE_SCALAR, + .name = "transpose_in1", + .scalarParam = (Qnn_Scalar_t){QNN_DATATYPE_BOOL_8, {.bool8Value = 1}}}}; + + uint32_t dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_dilation[] = {2}; + uint32_t InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_dilation[] = {1, 1}; + uint32_t dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount[] = {2, 2}; + uint32_t InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount[] = {0, 0, 0, 0}; + uint32_t dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride[] = {2}; + uint32_t InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride[] = {1, 1}; + + string strideName = name() + ".stride"; + string padName = name() + ".pad"; + vector params_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D = { + {.paramType = QNN_PARAMTYPE_TENSOR, + .name = "stride", + .tensorParam = + (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = {.id = 0, + .name = strideName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_UINT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, + .offset = 0}}}, + .rank = 1, + .dimensions = dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = + {.data = (uint8_t *)InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride, + .dataSize = 8}}}}, + {.paramType = QNN_PARAMTYPE_TENSOR, + .name = "pad_amount", + .tensorParam = + (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = {.id = 0, + .name = padName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_UINT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, + .offset = 0}}}, + .rank = 2, + .dimensions = + dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = + {.data = (uint8_t *) + InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount, + .dataSize = 16}}}}, + + }; + + // add weight tensor to qnn + uint32_t dimensionsWeight[4] = {1, 1, static_cast(weight_.sequence()), static_cast(weight_.dimension())}; + + auto qnnQuantDefined = QNN_DEFINITION_UNDEFINED; + float weightScale = 0; + + qnnQuantDefined = QNN_DEFINITION_DEFINED; + weightScale = weightScale_.hostPtr()[0]; + + qnnBackend_->modelAddTensor(weight_.name(), (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = weight_.name().c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_SFIXED_POINT_8, + .quantizeParams = {qnnQuantDefined, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = weightScale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsWeight, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = weight_.hostPtr(), + .dataSize = (uint32_t)weight_.cntSize()}}}); + // free weight host memory + weight_.free(); + + // dimensions of matmul output and bias + // uint32_t dimensionsOutput[4] = {static_cast(outputs[0]->batch()), + // static_cast(outputs[0]->sequence()), + // static_cast(outputs[0]->head()), + // static_cast(outputs[0]->dimension())}; + auto dimensionsOutput = new uint32_t[4]{(uint32_t)outputs[0]->batch(), (uint32_t)outputs[0]->sequence(), (uint32_t)outputs[0]->head(), (uint32_t)outputs[0]->dimension()}; + + auto outName = outputs[0]->name(); + + // if don't support bias, just dequantize and write to tensor with name of outputs[0] + if (!support_bias_) { + vector matmulOut = {{QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_SFIXED_POINT_16, + .quantizeParams = {QNN_DEFINITION_DEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = outputScale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + return graphAddNode(name() + ".linearint8", "Conv2d", {inputs[0]->name(), weight_.name()}, matmulOut, params_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D); + } + + // add bias tensor to qnn + uint32_t dimensionsBias[1] = {(uint32_t)out_features_}; + float biasScale = 0; + + qnnQuantDefined = QNN_DEFINITION_DEFINED; + biasScale = biasScale_.hostPtr()[0]; + // create a int32 buffer, convert the bias to int32 + auto biasBuffer = (int32_t *)malloc(bias_.count() * sizeof(int32_t)); +#pragma omp parallel for + for (int i = 0; i < out_features_; i++) { + // int32_t val = bias_.dataAt(0, 0, 0, i) - 128; + int32_t val = bias_.dataAt(0, 0, 0, i); + biasBuffer[i] = val; + } + + qnnBackend_->modelAddTensor(bias_.name(), (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = bias_.name().c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_SFIXED_POINT_32, + .quantizeParams = {qnnQuantDefined, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = biasScale, .offset = 0}}}, + .rank = 1, + .dimensions = dimensionsBias, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = biasBuffer, + .dataSize = (uint32_t)(bias_.count() * sizeof(int32_t))}}}); + // free bias host memory + bias_.free(); + delete biasBuffer; + + // final output + vector biasOutput = {{QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_SFIXED_POINT_16, + .quantizeParams = {QNN_DEFINITION_DEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = outputScale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + return graphAddNode(name() + ".linear_w8a16", "Conv2d", {inputs[0]->name(), weight_.name(), bias_.name()}, biasOutput, params_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D); +} + +ErrorCode QNNLinearINT8::load(AbstructLoader &loader) { + weight_.setName(name() + ".weight"); + weight_.reshape(1, 1, in_features_, out_features_); + weight_.setDtype(MLLM_TYPE_I8); + weight_.alloc(); + loader.load(&weight_); + + bias_.setName(name() + ".bias"); + bias_.reshape(1, 1, 1, out_features_); + bias_.setDtype(MLLM_TYPE_I32); + bias_.alloc(); + if (support_bias_) { + loader.load(&bias_); + // // sign to unsign + // for (int i = 0; i < out_features_; i++) { + // int32_t val = bias_.dataAt(0, 0, 0, i); + // val += 128; + // bias_.setDataAt(0, 0, 0, i, (uint8_t)val); + // } + } else { + memset(bias_.hostPtr(), 0, bias_.cntSize()); + } + + weightScale_.setName(name() + ".weight.scale"); + weightScale_.reshape(1, 1, 1, 1); + weightScale_.setDtype(MLLM_TYPE_F32); + weightScale_.alloc(); + loader.load(&weightScale_); + + biasScale_.setName(name() + ".bias.scale"); + biasScale_.reshape(1, 1, 1, 1); + biasScale_.setDtype(MLLM_TYPE_F32); + biasScale_.alloc(); + loader.load(&biasScale_); + + outputScale_.setName(name() + ".output_scale"); + outputScale_.reshape(1, 1, 1, 1); + outputScale_.setDtype(MLLM_TYPE_F32); + outputScale_.alloc(); + loader.load(&outputScale_); + return Op::load(loader); +} + +ErrorCode QNNLinearINT8::free(vector> inputs, vector> outputs) { + return Op::free(inputs, outputs); +} +} // namespace mllm diff --git a/src/backends/qnn/op/QNNLinearINT8.hpp b/mllm/backends/qnn/op/QNNLinearINT8.hpp similarity index 86% rename from src/backends/qnn/op/QNNLinearINT8.hpp rename to mllm/backends/qnn/op/QNNLinearINT8.hpp index ea9395eb3..e53fd3ccb 100644 --- a/src/backends/qnn/op/QNNLinearINT8.hpp +++ b/mllm/backends/qnn/op/QNNLinearINT8.hpp @@ -19,12 +19,15 @@ class QNNLinearINT8 : public QNNCommonOp { bool support_bias_; Tensor weight_; Tensor bias_; -// #ifdef SMOOTHQUANT + Tensor weightScale_; Tensor biasScale_; -// #endif + Tensor outputScale_; Tensor inputScale_; + + ErrorCode setUpW8A8(vector>& inputs, vector>& outputs); + ErrorCode setUpW8A16(vector>& inputs, vector>& outputs); }; class QNNLinearINT8Creator : public QNNBackend::Creator { diff --git a/src/backends/qnn/op/QNNMatmul.cpp b/mllm/backends/qnn/op/QNNMatmul.cpp similarity index 77% rename from src/backends/qnn/op/QNNMatmul.cpp rename to mllm/backends/qnn/op/QNNMatmul.cpp index afd42e5ae..88650421d 100644 --- a/src/backends/qnn/op/QNNMatmul.cpp +++ b/mllm/backends/qnn/op/QNNMatmul.cpp @@ -25,7 +25,7 @@ ErrorCode QNNMatmul::reshape(vector> inputs, vectordimension() == inputs[1]->sequence()); - inputs[1]->transShape(SEQUENCE, DIMENSION); + // inputs[1]->transShape(SEQUENCE, DIMENSION); outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[1]->dimension()); } else if (transpose1_) { @@ -53,8 +53,8 @@ ErrorCode QNNMatmul::reshape(vector> inputs, vectorsequence() == inputs[1]->sequence()); - inputs[0]->transShape(SEQUENCE, DIMENSION); - inputs[1]->transShape(SEQUENCE, DIMENSION); + // inputs[0]->transShape(SEQUENCE, DIMENSION); + // inputs[1]->transShape(SEQUENCE, DIMENSION); outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->dimension(), inputs[1]->dimension()); } @@ -62,6 +62,24 @@ ErrorCode QNNMatmul::reshape(vector> inputs, vector> inputs, vector> outputs) { + auto qnnDtype = QNN_DATATYPE_FLOAT_32; + + if (inputs[0]->dtype() == MLLM_TYPE_I8) { + outputs[0]->setDtype(MLLM_TYPE_I8); + qnnDtype = QNN_DATATYPE_SFIXED_POINT_8; + } else if (inputs[0]->dtype() == MLLM_TYPE_I16) { + outputs[0]->setDtype(MLLM_TYPE_I16); + qnnDtype = QNN_DATATYPE_SFIXED_POINT_16; + }else if (inputs[0]->dtype() == MLLM_TYPE_F16) { + outputs[0]->setDtype(MLLM_TYPE_F16); + qnnDtype = QNN_DATATYPE_FLOAT_16; + } else if (inputs[0]->dtype() == MLLM_TYPE_F32) { + outputs[0]->setDtype(MLLM_TYPE_F32); + qnnDtype = QNN_DATATYPE_FLOAT_32; + } else { + return ErrorCode::NOT_SUPPORT; + } + if (name().find("qkv") != string::npos) { // QKV matmul only transpose v uint32_t dimVTranspose[4]; @@ -73,7 +91,7 @@ ErrorCode QNNMatmul::setUp(vector> inputs, vector paramsTranspose = { {.paramType = QNN_PARAMTYPE_TENSOR, .name = "perm", @@ -104,7 +122,7 @@ ErrorCode QNNMatmul::setUp(vector> inputs, vector> inputs, vector> inputs, vector> inputs, vector> inputs, vector paramsTranspose = { {.paramType = QNN_PARAMTYPE_TENSOR, .name = "perm", @@ -218,7 +259,7 @@ ErrorCode QNNMatmul::setUp(vector> inputs, vector> inputs, vector> inputs, vectorname()}, outKTranspose, paramsTranspose); vector paramsMatmul = { @@ -278,7 +341,7 @@ ErrorCode QNNMatmul::setUp(vector> inputs, vector> inputs, vector> outputs) { // deepCopy at reshape to let QNNCommonOp::setUp to get the correct ttype - for(int i = 0; i < inputs.size(); i++) { - outputs[i]->shallowCopyFrom(inputs[i].get(), true); + for (int i = 0; i < inputs.size(); i++) { + outputs[i]->shallowCopyFrom(inputs[i], true); } return Op::reshape(inputs, outputs); @@ -22,7 +22,7 @@ ErrorCode QNNMergeOutput::reshape(vector> inputs, vector> inputs, vector> outputs) { for (int i = 0; i < inputs.size(); i++) { - outputs[i]->shallowCopyFrom(inputs[i].get(), true); + outputs[i]->shallowCopyFrom(inputs[i], true); } return MLLM_NO_ERROR; } diff --git a/src/backends/qnn/op/QNNMergeOutput.hpp b/mllm/backends/qnn/op/QNNMergeOutput.hpp similarity index 100% rename from src/backends/qnn/op/QNNMergeOutput.hpp rename to mllm/backends/qnn/op/QNNMergeOutput.hpp diff --git a/src/backends/qnn/op/QNNMul.cpp b/mllm/backends/qnn/op/QNNMul.cpp similarity index 86% rename from src/backends/qnn/op/QNNMul.cpp rename to mllm/backends/qnn/op/QNNMul.cpp index a5d34aa46..c109ca294 100644 --- a/src/backends/qnn/op/QNNMul.cpp +++ b/mllm/backends/qnn/op/QNNMul.cpp @@ -6,8 +6,6 @@ namespace mllm { QNNMul::QNNMul(Backend *bn, string opName) : QNNCommonOp(bn, opName) { - - scale_.setBackend(bn); } ErrorCode QNNMul::reshape(vector> inputs, vector> outputs) { @@ -26,7 +24,8 @@ ErrorCode QNNMul::setUp(vector> inputs, vectordtype() == MLLM_TYPE_I8) { outputs[0]->setDtype(MLLM_TYPE_I8); - return graphAddNode(name(), "ElementWiseMultiply", inputs, outputs, {}, "qti.aisw", true, &scale_); + outputs[0]->quant_param.scale = inputs[0]->quant_param.scale; + return graphAddNode(name(), "ElementWiseMultiply", inputs, outputs, {}, "qti.aisw", true); } else { @@ -72,22 +71,6 @@ ErrorCode QNNMul::setUp(vector> inputs, vector()[0] << std::endl; - return Op::load(loader); } diff --git a/src/backends/qnn/op/QNNMul.hpp b/mllm/backends/qnn/op/QNNMul.hpp similarity index 96% rename from src/backends/qnn/op/QNNMul.hpp rename to mllm/backends/qnn/op/QNNMul.hpp index 053c82272..ed9727a5d 100644 --- a/src/backends/qnn/op/QNNMul.hpp +++ b/mllm/backends/qnn/op/QNNMul.hpp @@ -11,9 +11,6 @@ class QNNMul : public QNNCommonOp { virtual ErrorCode reshape(vector> inputs, vector> outputs) override; virtual ErrorCode setUp(vector> inputs, vector> outputs) override; virtual ErrorCode load(AbstructLoader &loader) override; - -private: - Tensor scale_; }; class QNNMulCreator : public QNNBackend::Creator { diff --git a/mllm/backends/qnn/op/QNNQuantize.cpp b/mllm/backends/qnn/op/QNNQuantize.cpp new file mode 100644 index 000000000..6bce8e70b --- /dev/null +++ b/mllm/backends/qnn/op/QNNQuantize.cpp @@ -0,0 +1,179 @@ + +#include "QNNQuantize.hpp" +#include "QnnTypes.h" +#include "Types.hpp" +#include "QNNCommonOp.hpp" +#include "Context.hpp" +#include +#include + +namespace mllm { +QNNQuantize::QNNQuantize(Backend *bn, string opName, DataType type, bool isNSHD) : + QNNCommonOp(bn, opName) { + isNSHD_ = isNSHD; + assert(type == MLLM_TYPE_I8 || type == MLLM_TYPE_I16); + activation_dtype_ = type; + scale_.setBackend(Backend::global_backends[MLLM_CPU].get()); +} + +ErrorCode QNNQuantize::reshape(vector> inputs, vector> outputs) { + assert(outputs.size() == 1); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + return Op::reshape(inputs, outputs); +} + +ErrorCode QNNQuantize::setUp(vector> inputs, vector> outputs) { + switch (activation_dtype_) { + case MLLM_TYPE_I8: + return setUpI8(inputs, outputs); + case MLLM_TYPE_I16: + return setUpI16(inputs, outputs); + default: + return NOT_SUPPORT; + } +} + +ErrorCode QNNQuantize::setUpI8(vector> &inputs, vector> &outputs) { + outputs[0]->setDtype(MLLM_TYPE_I8); + auto outName = outputs[0]->name(); + + uint32_t dimensionsOutput[4]; + + if (isNSHD_) { + dimensionsOutput[0] = static_cast(outputs[0]->batch()); + dimensionsOutput[1] = static_cast(outputs[0]->sequence()); + dimensionsOutput[2] = static_cast(outputs[0]->head()); + dimensionsOutput[3] = static_cast(outputs[0]->dimension()); + } else { + dimensionsOutput[0] = static_cast(outputs[0]->batch()); + dimensionsOutput[1] = static_cast(outputs[0]->head()); + dimensionsOutput[2] = static_cast(outputs[0]->sequence()); + dimensionsOutput[3] = static_cast(outputs[0]->dimension()); + } + + float quantScale = 0; + quantScale = scale_.hostPtr()[0] / (pow(2, 7) - 1); + outputs[0]->quant_param.scale = quantScale; + + uint32_t paramsQuantizeDimension[1] = {1}; + auto paramsQuantizeName = name() + "quantize_params"; + vector paramsQuantize = { + {.paramType = QNN_PARAMTYPE_TENSOR, + .name = "scale", + .tensorParam = + (Qnn_Tensor_t){.version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = paramsQuantizeName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, + .offset = 0}}}, + .rank = 1, + .dimensions = paramsQuantizeDimension, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = (uint8_t *)&quantScale, + .dataSize = sizeof(float)}}}}}; + + vector outputTensor = {{QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_SFIXED_POINT_8, + .quantizeParams = {QNN_DEFINITION_DEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = quantScale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + return graphAddNode(name(), "LLaMAQuantize", {inputs[0]->name()}, outputTensor, paramsQuantize, "LLaMAPackage"); +} + +ErrorCode QNNQuantize::setUpI16(vector> &inputs, vector> &outputs) { + outputs[0]->setDtype(MLLM_TYPE_I16); + float quantScale = 0; + quantScale = scale_.hostPtr()[0] / (pow(2, 15) - 1); + outputs[0]->quant_param.scale = quantScale; + auto outName = outputs[0]->name(); + + uint32_t dimensionsOutput[4]; + + if (isNSHD_) { + dimensionsOutput[0] = static_cast(outputs[0]->batch()); + dimensionsOutput[1] = static_cast(outputs[0]->sequence()); + dimensionsOutput[2] = static_cast(outputs[0]->head()); + dimensionsOutput[3] = static_cast(outputs[0]->dimension()); + } else { + dimensionsOutput[0] = static_cast(outputs[0]->batch()); + dimensionsOutput[1] = static_cast(outputs[0]->head()); + dimensionsOutput[2] = static_cast(outputs[0]->sequence()); + dimensionsOutput[3] = static_cast(outputs[0]->dimension()); + } + // std::cout << "isNSHD_ " << isNSHD_ << " dimensionsOutput " << dimensionsOutput[0] << " " << dimensionsOutput[1] << " " << dimensionsOutput[2] << " " << dimensionsOutput[3] << std::endl; + + uint32_t paramsQuantizeDimension[1] = {1}; + auto paramsQuantizeName = name() + "quantize_params"; + vector paramsQuantize = { + {.paramType = QNN_PARAMTYPE_TENSOR, + .name = "scale", + .tensorParam = + (Qnn_Tensor_t){.version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = paramsQuantizeName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, + .offset = 0}}}, + .rank = 1, + .dimensions = paramsQuantizeDimension, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = (uint8_t *)&quantScale, + .dataSize = sizeof(float)}}}}}; + + vector outputTensor = {{QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_SFIXED_POINT_16, + .quantizeParams = {QNN_DEFINITION_DEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = quantScale, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + return graphAddNode(name(), "LLaMAQuantize", {inputs[0]->name()}, outputTensor, paramsQuantize, "LLaMAPackage"); +} + +ErrorCode QNNQuantize::load(AbstructLoader &loader) { + string scaleName = name(); + + std::string wordToRemove = "quantize"; + int pos = scaleName.find(wordToRemove); + if (pos != -1) { + scaleName.erase(pos, wordToRemove.length()); + } + + scale_.setName(scaleName + "input_scale"); + scale_.reshape(1, 1, 1, 1); + scale_.setDtype(MLLM_TYPE_F32); + scale_.alloc(); + loader.load(&scale_); + + return Op::load(loader); +} +} // namespace mllm diff --git a/src/backends/qnn/op/QNNQuantize.hpp b/mllm/backends/qnn/op/QNNQuantize.hpp similarity index 65% rename from src/backends/qnn/op/QNNQuantize.hpp rename to mllm/backends/qnn/op/QNNQuantize.hpp index dbe15852e..d08e044d2 100644 --- a/src/backends/qnn/op/QNNQuantize.hpp +++ b/mllm/backends/qnn/op/QNNQuantize.hpp @@ -6,20 +6,24 @@ namespace mllm { class QNNQuantize : public QNNCommonOp { public: - QNNQuantize(Backend *bn, string opName, bool isNSHD); + QNNQuantize(Backend *bn, string opName, DataType type, bool isNSHD); virtual ~QNNQuantize() = default; virtual ErrorCode reshape(vector> inputs, vector> outputs) override; virtual ErrorCode setUp(vector> inputs, vector> outputs) override; virtual ErrorCode load(AbstructLoader &loader) override; + private: bool isNSHD_; Tensor scale_; + + ErrorCode setUpI8(vector> &inputs, vector> &outputs); + ErrorCode setUpI16(vector> &inputs, vector> &outputs); }; class QNNQuantizeCreator : public QNNBackend::Creator { public: virtual Op *create(OpParam op_param, Backend *bn, string name) const { - return new QNNQuantize(bn, name, (bool)op_param["isNSHD"]); + return new QNNQuantize(bn, name, (DataType)op_param["dtype"], (bool)op_param["isNSHD"]); } }; diff --git a/mllm/backends/qnn/op/QNNQuickGELU.cpp b/mllm/backends/qnn/op/QNNQuickGELU.cpp new file mode 100644 index 000000000..f58e24659 --- /dev/null +++ b/mllm/backends/qnn/op/QNNQuickGELU.cpp @@ -0,0 +1,156 @@ + +#include "QNNQuickGELU.hpp" +#include "QnnTypes.h" +#include "Types.hpp" +#include "QNNCommonOp.hpp" +#include "Context.hpp" +#include + +namespace mllm { +QNNQuickGELU::QNNQuickGELU(Backend *bn, string opName) : + QNNCommonOp(bn, opName) { +} + +ErrorCode QNNQuickGELU::reshape(vector> inputs, vector> outputs) { + assert(inputs.size() == 1); + assert(outputs.size() == 1); + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + return Op::reshape(inputs, outputs); +} + +ErrorCode QNNQuickGELU::setUp(vector> inputs, vector> outputs) { + outputs[0]->setDtype(inputs[0]->dtype()); + + auto outName = outputs[0]->name(); + + uint32_t scalarDimensions[1] = {1}; + float scaleData[] = {1.702f}; + mllm_fp16_t scaleDataF16[] = {static_cast(1.702f)}; + auto scaleName = name() + ".gelu_scale"; + auto qnnDtype = QNN_DATATYPE_FLOAT_32; + + switch (outputs[0]->dtype()) { + case MLLM_TYPE_F32: + qnnDtype = QNN_DATATYPE_FLOAT_32; + qnnBackend_->modelAddTensor(scaleName, (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = scaleName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, .offset = 0}}}, + .rank = 1, + .dimensions = scalarDimensions, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = scaleData, + .dataSize = 4}}}); + break; + case MLLM_TYPE_F16: + qnnDtype = QNN_DATATYPE_FLOAT_16; + qnnBackend_->modelAddTensor(scaleName, (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = scaleName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_16, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, .offset = 0}}}, + .rank = 1, + .dimensions = scalarDimensions, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = scaleDataF16, + .dataSize = 2}}}); + break; + default: + MLLM_LOG_ERROR_STREAM << "[ERROR] QNNScale not support dtype: " << outputs[0]->dtype() << std::endl; + return ErrorCode::NOT_SUPPORT; + } + + // the scale is used after q*k in qnn graph, dimension should be BHSD + uint32_t dimensions[4] = {static_cast(outputs[0]->batch()), + static_cast(outputs[0]->sequence()), + static_cast(outputs[0]->head()), + static_cast(outputs[0]->dimension())}; + // convert output to qnn tensor + auto scaleOutName = outputs[0]->name() + "-multiply"; + vector outputTensors = { + {.version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = scaleOutName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = qnnDtype, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, .offset = 0}}}, + .rank = 4, + .dimensions = dimensions, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}; + graphAddNode(name() + "-multiply", "LLaMAMul", {inputs[0]->name(), scaleName}, outputTensors, {}, "LLaMAPackage"); + + uint32_t dimensionsOutput[4]; + + dimensionsOutput[0] = static_cast(outputs[0]->batch()); + dimensionsOutput[1] = static_cast(outputs[0]->sequence()); + dimensionsOutput[2] = static_cast(outputs[0]->head()); + dimensionsOutput[3] = static_cast(outputs[0]->dimension()); + + auto type = QNN_DATATYPE_FLOAT_32; + outputs[0]->setDtype(MLLM_TYPE_F32); + + if (inputs[0]->dtype() == MLLM_TYPE_F16) { + type = QNN_DATATYPE_FLOAT_16; + outputs[0]->setDtype(MLLM_TYPE_F16); + } + + // add sigmoid node + auto sigmoidName = name() + "-sigmoid"; + vector outputSigmoid{ + {QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = sigmoidName.c_str(), + .type = QNN_TENSOR_TYPE_NATIVE, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = type, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = 0, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + graphAddNode(name() + "-sigmoid", "Sigmoid", {scaleOutName}, outputSigmoid); + + vector outputTensor = {{QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = type, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, + .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + return graphAddNode(name(), "LLaMAMul", {sigmoidName, inputs[0]->name()}, outputTensor, {}, "LLaMAPackage"); +} + +} // namespace mllm + diff --git a/mllm/backends/qnn/op/QNNQuickGELU.hpp b/mllm/backends/qnn/op/QNNQuickGELU.hpp new file mode 100644 index 000000000..800df0af9 --- /dev/null +++ b/mllm/backends/qnn/op/QNNQuickGELU.hpp @@ -0,0 +1,24 @@ + +#ifndef MLLM_QNNQUICKGELU_H +#define MLLM_QNNQUICKGELU_H + +#include "QNNCommonOp.hpp" +namespace mllm { +class QNNQuickGELU : public QNNCommonOp { +public: + QNNQuickGELU(Backend *bn, string opName); + virtual ~QNNQuickGELU() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode setUp(vector> inputs, vector> outputs) override; +}; + +class QNNQuickGELUCreator : public QNNBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name) const { + return new QNNQuickGELU(bn, name); + } +}; + +} // namespace mllm + +#endif diff --git a/src/backends/qnn/op/QNNRMSNorm.cpp b/mllm/backends/qnn/op/QNNRMSNorm.cpp similarity index 90% rename from src/backends/qnn/op/QNNRMSNorm.cpp rename to mllm/backends/qnn/op/QNNRMSNorm.cpp index 708cc2bbe..55fb1c9ed 100644 --- a/src/backends/qnn/op/QNNRMSNorm.cpp +++ b/mllm/backends/qnn/op/QNNRMSNorm.cpp @@ -3,12 +3,12 @@ #include "Types.hpp" #include "QNNCommonOp.hpp" #include +#include "Context.hpp" namespace mllm { QNNRMSNorm::QNNRMSNorm(Backend *bn, string opName, int normSize, float epsilon, bool isFP32) : QNNCommonOp(bn, opName), normSize_(normSize), epsilon_(epsilon), isFP32_(isFP32) { - weight_.setBackend(bn); - scale_.setBackend(bn); + weight_.setBackend(Backend::global_backends[MLLM_CPU].get()); } ErrorCode QNNRMSNorm::reshape(vector> inputs, vector> outputs) { @@ -18,10 +18,6 @@ ErrorCode QNNRMSNorm::reshape(vector> inputs, vector> inputs, vector> outputs) { - float quantScale = 0; - quantScale = scale_.hostPtr()[0] / 127.0; - quantScale = roundf(quantScale * 100000) / 100000; - uint32_t dimWeight[4] = {(uint32_t)normSize_}; qnnBackend_->modelAddTensor(weight_.name(), (Qnn_Tensor_t){ .version = QNN_TENSOR_VERSION_1, @@ -69,6 +65,8 @@ ErrorCode QNNRMSNorm::setUp(vector> inputs, vectorsetDtype(MLLM_TYPE_I8); + float quantScale = inputs[0]->quant_param.scale; + outputs[0]->quant_param.scale = quantScale; vector out = { (Qnn_Tensor_t){ @@ -92,6 +90,7 @@ ErrorCode QNNRMSNorm::setUp(vector> inputs, vector> inputs, vector> outputs) { @@ -22,30 +21,11 @@ ErrorCode QNNReLU::setUp(vector> inputs, vectordtype() == MLLM_TYPE_I8) { outputs[0]->setDtype(MLLM_TYPE_I8); - return graphAddNode(name(), "Relu", inputs, outputs, {}, "qti.aisw", true, &scale_); + outputs[0]->quant_param.scale = inputs[0]->quant_param.scale; + return graphAddNode(name(), "Relu", inputs, outputs, {}, "qti.aisw", true); } else { - return graphAddNode(name(), "LLaMAReLU", inputs, outputs, {}, "LLaMAPackage", true, nullptr); + return graphAddNode(name(), "LLaMAReLU", inputs, outputs, {}, "LLaMAPackage", true); } } -ErrorCode QNNReLU::load(AbstructLoader &loader) { - string scaleName = name(); - - std::string wordToRemove = "relu"; - int pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - } - - scale_.setName(scaleName + "output_scale"); - scale_.reshape(1, 1, 1, 1); - scale_.setDtype(MLLM_TYPE_F32); - scale_.alloc(); - loader.load(&scale_); - - // std::cout << scale_.hostPtr()[0] << std::endl; - - return Op::load(loader); -} - } // namespace mllm diff --git a/src/backends/qnn/op/QNNReLU.hpp b/mllm/backends/qnn/op/QNNReLU.hpp similarity index 89% rename from src/backends/qnn/op/QNNReLU.hpp rename to mllm/backends/qnn/op/QNNReLU.hpp index a019f1b36..2ebb195d8 100644 --- a/src/backends/qnn/op/QNNReLU.hpp +++ b/mllm/backends/qnn/op/QNNReLU.hpp @@ -10,12 +10,9 @@ class QNNReLU : public QNNCommonOp { virtual ~QNNReLU() = default; virtual ErrorCode reshape(vector> inputs, vector> outputs) override; virtual ErrorCode setUp(vector> inputs, vector> outputs) override; - virtual ErrorCode load(AbstructLoader &loader) override; - private: int axis_ = 0; - Tensor scale_; }; class QNNReLUCreator : public QNNBackend::Creator { diff --git a/src/backends/qnn/op/QNNRoPE.cpp b/mllm/backends/qnn/op/QNNRoPE.cpp similarity index 95% rename from src/backends/qnn/op/QNNRoPE.cpp rename to mllm/backends/qnn/op/QNNRoPE.cpp index 183633a27..a48033bc4 100644 --- a/src/backends/qnn/op/QNNRoPE.cpp +++ b/mllm/backends/qnn/op/QNNRoPE.cpp @@ -27,8 +27,6 @@ QNNRoPE::QNNRoPE(Backend *bn, string opName, int pose_type) : sinTensor_.setBackend(bn); cosTensor_.setBackend(bn); hcntTensor_.setBackend(bn); - - scale_.setBackend(bn); } QNNRoPE::QNNRoPE(Backend *bn, string opName, int pose_type, float rope_theta, int max_position_embeddings) : @@ -40,8 +38,6 @@ QNNRoPE::QNNRoPE(Backend *bn, string opName, int pose_type, float rope_theta, in sinTensor_.setBackend(bn); cosTensor_.setBackend(bn); hcntTensor_.setBackend(bn); - - scale_.setBackend(bn); } QNNRoPE::QNNRoPE(Backend *bn, string opName, int pose_type, float rope_theta, float partial_rotary_factor, int max_position_embeddings) : @@ -54,8 +50,6 @@ QNNRoPE::QNNRoPE(Backend *bn, string opName, int pose_type, float rope_theta, fl sinTensor_.setBackend(bn); cosTensor_.setBackend(bn); hcntTensor_.setBackend(bn); - - scale_.setBackend(bn); } QNNRoPE::QNNRoPE(Backend *bn, string opName, OpParam &config) : @@ -123,9 +117,9 @@ ErrorCode QNNRoPE::setUp(vector> inputs, vector()[0] / 127.0; - dequantScale = roundf(dequantScale * 100000) / 100000; + dequantScale = inputs[0]->quant_param.scale; + // TODO: better handle this if (name().find("q_proj") != -1) { dequantScale = dequantScale / std::sqrt(outputs[0]->dimension()); } @@ -276,22 +270,6 @@ ErrorCode QNNRoPE::load(AbstructLoader &loader) { hcntTensor_.reshape(1, 1, 1, 1); hcntTensor_.setDtype(MLLM_TYPE_I32); hcntTensor_.alloc(); - - string scaleName = name(); - string scaleTypeName = "output_scale"; - - std::string wordToRemove = "rope"; - int pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - } - - scale_.setName(scaleName + scaleTypeName); - scale_.reshape(1, 1, 1, 1); - scale_.setDtype(MLLM_TYPE_F32); - scale_.alloc(); - loader.load(&scale_); - return Op::load(loader); } diff --git a/src/backends/qnn/op/QNNRoPE.hpp b/mllm/backends/qnn/op/QNNRoPE.hpp similarity index 99% rename from src/backends/qnn/op/QNNRoPE.hpp rename to mllm/backends/qnn/op/QNNRoPE.hpp index 8663418ea..ffdcda669 100644 --- a/src/backends/qnn/op/QNNRoPE.hpp +++ b/mllm/backends/qnn/op/QNNRoPE.hpp @@ -38,8 +38,6 @@ class QNNRoPE : public QNNCommonOp { Tensor sinTensor_; Tensor cosTensor_; - - Tensor scale_; }; class QNNRoPECreator : public QNNBackend::Creator { diff --git a/mllm/backends/qnn/op/QNNRoPESimple.cpp b/mllm/backends/qnn/op/QNNRoPESimple.cpp new file mode 100644 index 000000000..c90576336 --- /dev/null +++ b/mllm/backends/qnn/op/QNNRoPESimple.cpp @@ -0,0 +1,67 @@ + +#include "QNNRoPESimple.hpp" +#include "Types.hpp" +#include "QNNCommonOp.hpp" +#include + +namespace mllm { + +QNNRoPESimple::QNNRoPESimple(Backend *bn, string opName) : + QNNCommonOp(bn, opName) { +} + +ErrorCode QNNRoPESimple::reshape(vector> inputs, vector> outputs) { + assert(inputs.size() == 3); + assert(outputs.size() == 1); + int partial_dimension = inputs[0]->dimension(); + assert(partial_dimension % 2 == 0); + + assert(inputs[0]->batch() == inputs[1]->batch() && inputs[0]->batch() == inputs[2]->batch()); + assert(inputs[0]->head() == inputs[1]->head() && inputs[0]->head() == inputs[2]->head()); + assert(inputs[0]->sequence() == inputs[1]->sequence() && inputs[0]->sequence() == inputs[2]->sequence()); + assert(inputs[0]->dimension() == inputs[1]->dimension() * 2 && inputs[0]->dimension() == inputs[2]->dimension() * 2); + + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + + return Op::reshape(inputs, outputs); +} + +ErrorCode QNNRoPESimple::setUp(vector> inputs, vector> outputs) { + auto type = QNN_DATATYPE_FLOAT_32; + if (inputs[0]->dtype() == MLLM_TYPE_F16) { + type = QNN_DATATYPE_FLOAT_16; + outputs[0]->setDtype(MLLM_TYPE_F16); + } else if (inputs[0]->dtype() == MLLM_TYPE_F32) { + type = QNN_DATATYPE_FLOAT_32; + outputs[0]->setDtype(MLLM_TYPE_F32); + } else { + return ErrorCode::NOT_SUPPORT; + } + + uint32_t dimOut[4] = {static_cast(inputs[0]->batch()), + static_cast(inputs[0]->sequence()), + static_cast(inputs[0]->head()), + static_cast(inputs[0]->dimension())}; + auto outName = outputs[0]->name(); + vector out = { + (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = type, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, .offset = 0}}}, + .rank = 4, + .dimensions = dimOut, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}; + + return graphAddNode(name(), "RoPESimple", {inputs[0]->name(), inputs[1]->name(), inputs[2]->name()}, out, {}, "LLaMAPackage"); +} + +} // namespace mllm diff --git a/mllm/backends/qnn/op/QNNRoPESimple.hpp b/mllm/backends/qnn/op/QNNRoPESimple.hpp new file mode 100644 index 000000000..1116b8fe6 --- /dev/null +++ b/mllm/backends/qnn/op/QNNRoPESimple.hpp @@ -0,0 +1,32 @@ + +#ifndef MLLM_QNNRoPESimple_H +#define MLLM_QNNRoPESimple_H + +#include "QNNCommonOp.hpp" +namespace mllm { +/** + * This class should be a basic RoPE implementation for QNN backend. + * Which only calculate value1 = in_value * cos_value - in_value_2 * sin_value and value2 = in_value * sin_value + in_value_2 * cos_value. + * It is similar to CPUApllyRoPEFunction. + * The sin and cos should be the inputs of this op. + */ +class QNNRoPESimple : public QNNCommonOp { +public: + QNNRoPESimple(Backend *bn, string opName); + virtual ~QNNRoPESimple() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode setUp(vector> inputs, vector> outputs) override; + +private: +}; + +class QNNRoPESimpleCreator : public QNNBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name) const { + return new QNNRoPESimple(bn, name); + } +}; + +} // namespace mllm + +#endif diff --git a/src/backends/qnn/op/QNNScale.cpp b/mllm/backends/qnn/op/QNNScale.cpp similarity index 53% rename from src/backends/qnn/op/QNNScale.cpp rename to mllm/backends/qnn/op/QNNScale.cpp index 1f73bded9..76dd70c3f 100644 --- a/src/backends/qnn/op/QNNScale.cpp +++ b/mllm/backends/qnn/op/QNNScale.cpp @@ -3,6 +3,7 @@ #include "QnnTypes.h" #include "Types.hpp" #include "QNNCommonOp.hpp" +#include #include namespace mllm { @@ -21,10 +22,92 @@ ErrorCode QNNScale::reshape(vector> inputs, vector> inputs, vector> outputs) { + outputs[0]->setDtype(inputs[0]->dtype()); // add intermediate output of matmul + bool isHaveBias = (bias_ != 0.0f); + if (!isHaveBias) { + // if no bias and bias_after_scale is false, then we don't need intermediate output + // add scale and bias tensor + uint32_t scalarDimensions[1] = {1}; + float scaleData[] = {scale_}; + float16_t scaleDataF16[] = {static_cast(scale_)}; + auto scaleName = name() + ".scale"; + auto qnnDtype = QNN_DATATYPE_FLOAT_32; + + switch (outputs[0]->dtype()) { + case MLLM_TYPE_F32: + qnnDtype = QNN_DATATYPE_FLOAT_32; + qnnBackend_->modelAddTensor(scaleName, (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = scaleName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, .offset = 0}}}, + .rank = 1, + .dimensions = scalarDimensions, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = scaleData, + .dataSize = 4}}}); + break; + case MLLM_TYPE_F16: + qnnDtype = QNN_DATATYPE_FLOAT_16; + qnnBackend_->modelAddTensor(scaleName, (Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = scaleName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_FLOAT_16, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, .offset = 0}}}, + .rank = 1, + .dimensions = scalarDimensions, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = scaleDataF16, + .dataSize = 2}}}); + break; + default: + MLLM_LOG_ERROR_STREAM << "[ERROR] QNNScale not support dtype: " << outputs[0]->dtype() << std::endl; + return ErrorCode::NOT_SUPPORT; + } + + // the scale is used after q*k in qnn graph, dimension should be BHSD + uint32_t dimensions[4] = {static_cast(inputs[0]->batch()), + static_cast(inputs[0]->head()), + static_cast(inputs[0]->sequence()), + static_cast(inputs[0]->dimension())}; + // convert output to qnn tensor + auto outName = outputs[0]->name(); + vector outputTensors = { + {.version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = qnnDtype, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, .offset = 0}}}, + .rank = 4, + .dimensions = dimensions, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}; + return graphAddNode(name(), "ElementWiseMultiply", {inputs[0]->name(), scaleName}, outputTensors); + } + + // TODO: below should deprecated uint32_t dimensions[4] = {static_cast(inputs[0]->batch()), - static_cast(inputs[0]->sequence()), static_cast(inputs[0]->head()), + static_cast(inputs[0]->sequence()), static_cast(inputs[0]->dimension())}; auto interName = name() + ".intermediate"; vector diff --git a/src/backends/qnn/op/QNNScale.hpp b/mllm/backends/qnn/op/QNNScale.hpp similarity index 100% rename from src/backends/qnn/op/QNNScale.hpp rename to mllm/backends/qnn/op/QNNScale.hpp diff --git a/src/backends/qnn/op/QNNSiLU.cpp b/mllm/backends/qnn/op/QNNSiLU.cpp similarity index 98% rename from src/backends/qnn/op/QNNSiLU.cpp rename to mllm/backends/qnn/op/QNNSiLU.cpp index 7af121896..7a40caef8 100644 --- a/src/backends/qnn/op/QNNSiLU.cpp +++ b/mllm/backends/qnn/op/QNNSiLU.cpp @@ -1,4 +1,3 @@ - #include "QNNSiLU.hpp" #include "Types.hpp" #include "QNNCommonOp.hpp" @@ -23,7 +22,6 @@ ErrorCode QNNSiLU::setUp(vector> inputs, vector(outputs[0]->head()); dimensionsOutput[3] = static_cast(outputs[0]->dimension()); - auto type = QNN_DATATYPE_FLOAT_32; outputs[0]->setDtype(MLLM_TYPE_F32); @@ -31,7 +29,6 @@ ErrorCode QNNSiLU::setUp(vector> inputs, vectorsetDtype(MLLM_TYPE_F16); } - vector outputTensor = {{QNN_TENSOR_VERSION_1, {.v1 = { @@ -51,4 +48,4 @@ ErrorCode QNNSiLU::setUp(vector> inputs, vectorname()}, outputTensor, {}, "LLaMAPackage"); } -} // namespace mllm +} // namespace mllm \ No newline at end of file diff --git a/src/backends/qnn/op/QNNSiLU.hpp b/mllm/backends/qnn/op/QNNSiLU.hpp similarity index 100% rename from src/backends/qnn/op/QNNSiLU.hpp rename to mllm/backends/qnn/op/QNNSiLU.hpp diff --git a/mllm/backends/qnn/op/QNNSiLUHigh.cpp b/mllm/backends/qnn/op/QNNSiLUHigh.cpp new file mode 100644 index 000000000..d48ff4202 --- /dev/null +++ b/mllm/backends/qnn/op/QNNSiLUHigh.cpp @@ -0,0 +1,72 @@ + +#include "QNNSiLUHigh.hpp" +#include "Types.hpp" +#include "QNNCommonOp.hpp" + +namespace mllm { +QNNSiLUHigh::QNNSiLUHigh(Backend *bn, string opName) : + QNNCommonOp(bn, opName) { +} + +ErrorCode QNNSiLUHigh::reshape(vector> inputs, vector> outputs) { + outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); + return Op::reshape(inputs, outputs); +} + +ErrorCode QNNSiLUHigh::setUp(vector> inputs, vector> outputs) { + auto outName = outputs[0]->name(); + + uint32_t dimensionsOutput[4]; + + dimensionsOutput[0] = static_cast(outputs[0]->batch()); + dimensionsOutput[1] = static_cast(outputs[0]->sequence()); + dimensionsOutput[2] = static_cast(outputs[0]->head()); + dimensionsOutput[3] = static_cast(outputs[0]->dimension()); + + auto type = QNN_DATATYPE_FLOAT_32; + outputs[0]->setDtype(MLLM_TYPE_F32); + + if (inputs[0]->dtype() == MLLM_TYPE_F16) { + type = QNN_DATATYPE_FLOAT_16; + outputs[0]->setDtype(MLLM_TYPE_F16); + } + + // add sigmoid node + auto sigmoidName = name() + "-sigmoid"; + vector outputSigmoid{ + {QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = sigmoidName.c_str(), + .type = QNN_TENSOR_TYPE_NATIVE, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = type, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = 0, .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + graphAddNode(name() + "-sigmoid", "Sigmoid", {inputs[0]->name()}, outputSigmoid); + + vector outputTensor = {{QNN_TENSOR_VERSION_1, + {.v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = type, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, + .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}}; + return graphAddNode(name(), "LLaMAMul", {sigmoidName, inputs[0]->name()}, outputTensor, {}, "LLaMAPackage"); +} +} // namespace mllm \ No newline at end of file diff --git a/mllm/backends/qnn/op/QNNSiLUHigh.hpp b/mllm/backends/qnn/op/QNNSiLUHigh.hpp new file mode 100644 index 000000000..8dcfaa1e5 --- /dev/null +++ b/mllm/backends/qnn/op/QNNSiLUHigh.hpp @@ -0,0 +1,25 @@ + +#ifndef MLLM_QNNSiLUHigh_H +#define MLLM_QNNSiLUHigh_H + +#include "QNNCommonOp.hpp" +namespace mllm { +class QNNSiLUHigh : public QNNCommonOp { +public: + QNNSiLUHigh(Backend *bn, string opName); + virtual ~QNNSiLUHigh() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode setUp(vector> inputs, vector> outputs) override; +}; + + +class QNNSiLUHighCreator : public QNNBackend::Creator { + + virtual Op *create(OpParam op_param, Backend *bn, string name) const { + return new QNNSiLUHigh(bn, name); + } +}; + +} // namespace mllm + +#endif diff --git a/src/backends/qnn/op/QNNSoftMax.cpp b/mllm/backends/qnn/op/QNNSoftMax.cpp similarity index 96% rename from src/backends/qnn/op/QNNSoftMax.cpp rename to mllm/backends/qnn/op/QNNSoftMax.cpp index ad6523dd7..e03a1a86c 100644 --- a/src/backends/qnn/op/QNNSoftMax.cpp +++ b/mllm/backends/qnn/op/QNNSoftMax.cpp @@ -18,6 +18,7 @@ ErrorCode QNNSoftMax::reshape(vector> inputs, vector> inputs, vector> outputs) { + outputs[0]->setDtype(inputs[0]->dtype()); vector params = { {.paramType = QNN_PARAMTYPE_SCALAR, .name = "axis", diff --git a/src/backends/qnn/op/QNNSoftMax.hpp b/mllm/backends/qnn/op/QNNSoftMax.hpp similarity index 100% rename from src/backends/qnn/op/QNNSoftMax.hpp rename to mllm/backends/qnn/op/QNNSoftMax.hpp diff --git a/mllm/backends/qnn/op/QNNSplit.cpp b/mllm/backends/qnn/op/QNNSplit.cpp new file mode 100644 index 000000000..1baba2c13 --- /dev/null +++ b/mllm/backends/qnn/op/QNNSplit.cpp @@ -0,0 +1,219 @@ + +#include "QNNSplit.hpp" +#include "QnnTypes.h" +#include "Types.hpp" +#include "QNNCommonOp.hpp" +#include +#include + +namespace mllm { +QNNSplit::QNNSplit(Backend *bn, string opName, int splitNum, Chl splitDim, int splitDimSize, std::vector each_dims) : + split_num_(splitNum), split_dim_(splitDim), split_dim_size_(splitDimSize), each_dims_(each_dims), QNNCommonOp(bn, opName) { +} + +ErrorCode QNNSplit::reshape(vector> inputs, vector> outputs) { + assert(split_num_ == outputs.size()); + assert(inputs.size() == 1); + switch (split_dim_) { + case Chl::HEAD: { + switch (split_dim_size_) { + case -1: /*using each_dims*/ { + // check shape + assert(!each_dims_.empty() && "split op with split_dims_size_ == 1 should has each_dims_ params"); + { + int head_sum = 0; + for (auto item : each_dims_) head_sum += item; + assert(head_sum == inputs[0]->head() && "sum(each_dims_) miss match inputs[0]'s head dim"); + } + assert(outputs.size() == each_dims_.size() && "outputs size miss match each_dims_ size"); + + // reshape output + for (size_t i = 0; i < each_dims_.size(); ++i) { + outputs[i]->reshape(inputs[0]->batch(), each_dims_[i], inputs[0]->sequence(), inputs[0]->dimension()); + } + break; + } + default: /*split for same size*/ { + assert(inputs[0]->head() % split_num_ == 0); + for (auto &output : outputs) { + output->reshape(inputs[0]->batch(), inputs[0]->head() / split_num_, inputs[0]->sequence(), inputs[0]->dimension()); + } + break; + } + } + break; + } + case Chl::SEQUENCE: { + switch (split_dim_size_) { + case -1: /*using each_dims*/ { + // check shape + assert(!each_dims_.empty() && "split op with split_dims_size_ == 1 should has each_dims_ params"); + { + int seq_sum = 0; + for (auto item : each_dims_) seq_sum += item; + assert(seq_sum == inputs[0]->sequence() && "sum(each_dims_) miss match inputs[0]'s sequence dim"); + } + assert(outputs.size() == each_dims_.size() && "outputs size miss match each_dims_ size"); + + // reshape output + for (size_t i = 0; i < each_dims_.size(); ++i) { + outputs[i]->reshape(inputs[0]->batch(), inputs[0]->head(), each_dims_[i], inputs[0]->dimension()); + } + break; + } + default: { + assert(inputs[0]->sequence() % split_num_ == 0); + for (auto &output : outputs) { + output->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence() / split_num_, inputs[0]->dimension()); + } + break; + } + } + break; + } + case Chl::DIMENSION: { + switch (split_dim_size_) { + case -1: /*using each_dims*/ { + // check shape + assert(!each_dims_.empty() && "split op with split_dims_size_ == 1 should has each_dims_ params"); + { + int dimension_sum = 0; + for (auto item : each_dims_) dimension_sum += item; + assert(dimension_sum == inputs[0]->sequence() && "sum(each_dims_) miss match inputs[0]'s dimension dim"); + } + assert(outputs.size() == each_dims_.size() && "outputs size miss match each_dims_ size"); + + // reshape output + for (size_t i = 0; i < each_dims_.size(); ++i) { + outputs[i]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), each_dims_[i]); + } + break; + } + default: { + assert(inputs[0]->dimension() % split_num_ == 0); + for (auto &output : outputs) { + output->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension() / split_num_); + } + break; + } + } + break; + } + case Chl::D_HD: { + assert(inputs[0]->dimension() % split_num_ == 0); + for (auto &output : outputs) { + output->reshape(inputs[0]->batch(), split_dim_size_, inputs[0]->sequence(), inputs[0]->dimension() / (split_num_ * split_dim_size_)); + } + break; + } + case Chl::HD: { + assert(inputs[0]->dimension() % split_num_ == 0); + for (auto &output : outputs) { + output->reshape(inputs[0]->batch(), split_dim_size_, inputs[0]->sequence(), inputs[0]->dimension() / (split_num_ * split_dim_size_)); + } + break; + } + default: { + break; + } + } + + return Op::reshape(inputs, outputs); +} + +ErrorCode QNNSplit::setUp(vector> inputs, vector> outputs) { + for(auto &output : outputs) { + output->setDtype(inputs[0]->dtype()); + } + vector split_index(split_num_ - 1); + for (int i = 0; i < split_num_; i++) { + split_index[i] = split_dim_size_ * (i + 1); + } + + uint32_t split_index_dim[1] = {2}; + auto paramTensorName = name() + ".param"; + vector params = { + {.paramType = QNN_PARAMTYPE_SCALAR, + .name = "axis", + .scalarParam = (Qnn_Scalar_t){QNN_DATATYPE_UINT_32, {.uint32Value = 3}}}, + {.paramType = QNN_PARAMTYPE_TENSOR, + .name = "split_index", + .tensorParam = (Qnn_Tensor_t){.version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = paramTensorName.c_str(), + .type = QNN_TENSOR_TYPE_STATIC, + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_UINT_32, + .quantizeParams = {QNN_DEFINITION_UNDEFINED, + QNN_QUANTIZATION_ENCODING_UNDEFINED, + {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, .offset = 0}}}, + .rank = 1, + .dimensions = split_index_dim, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = split_index.data(), .dataSize = static_cast(((split_num_ - 1)) * sizeof(uint32_t))}}}}}; + + vector out = {}; + vector> outDims; + vector outNames; + auto outPutDataType = QNN_DATATYPE_FLOAT_32; + auto quanDefined = QNN_DEFINITION_UNDEFINED; + auto quantDecoding = QNN_QUANTIZATION_ENCODING_UNDEFINED; + float quantScale = 0.0000000000000000f; + if (inputs[0]->dtype() == MLLM_TYPE_F16) { + outPutDataType = QNN_DATATYPE_FLOAT_16; + for (auto &output : outputs) { + output->setDtype(MLLM_TYPE_F16); + } + + } else if (inputs[0]->dtype() == MLLM_TYPE_I8) { + for (auto &output : outputs) { + output->setDtype(MLLM_TYPE_I8); + } + outPutDataType = QNN_DATATYPE_SFIXED_POINT_8; + quanDefined = QNN_DEFINITION_DEFINED; + quantDecoding = QNN_QUANTIZATION_ENCODING_SCALE_OFFSET; + quantScale = inputs[0]->quant_param.scale; + outputs[0]->quant_param = inputs[0]->quant_param; + } else if (inputs[0]->dtype() == MLLM_TYPE_I16) { + for (auto &output : outputs) { + output->setDtype(MLLM_TYPE_I16); + } + outPutDataType = QNN_DATATYPE_SFIXED_POINT_16; + quanDefined = QNN_DEFINITION_DEFINED; + quantDecoding = QNN_QUANTIZATION_ENCODING_SCALE_OFFSET; + quantScale = inputs[0]->quant_param.scale; + outputs[0]->quant_param = inputs[0]->quant_param; + } else if (inputs[0]->dtype() == MLLM_TYPE_I32) { + for (auto &output : outputs) { + output->setDtype(MLLM_TYPE_I32); + } + outPutDataType = QNN_DATATYPE_SFIXED_POINT_32; + } + for (int i = 0; i < split_num_; i++) { + outDims.push_back({static_cast(outputs[i]->batch()), + static_cast(outputs[i]->sequence()), + static_cast(outputs[i]->head()), + static_cast(outputs[i]->dimension())}); + outNames.push_back(new string(outputs[i]->name())); + out.push_back((Qnn_Tensor_t){ + .version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = outNames[i]->c_str(), + .type = getOutputTensorType(outputs[i]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = outPutDataType, + .quantizeParams = {quanDefined, + quantDecoding, + {.scaleOffsetEncoding = {.scale = quantScale, .offset = 0}}}, + .rank = 4, + .dimensions = outDims[i].data(), + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, .dataSize = 0}}}); + } + + return graphAddNode(name(), "Split", {inputs[0]->name()}, out, params); +} + +} // namespace mllm diff --git a/mllm/backends/qnn/op/QNNSplit.hpp b/mllm/backends/qnn/op/QNNSplit.hpp new file mode 100644 index 000000000..4356b7d17 --- /dev/null +++ b/mllm/backends/qnn/op/QNNSplit.hpp @@ -0,0 +1,50 @@ + +#ifndef MLLM_QNNSPLIT_H +#define MLLM_QNNSPLIT_H + +#include "QNNCommonOp.hpp" +#include "Types.hpp" +namespace mllm { +class QNNSplit final : public QNNCommonOp { +public: + QNNSplit(Backend *bn, string opName, int splitNum, Chl splitDim, int splitDimSize, std::vector each_dims = {}); + virtual ~QNNSplit() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode setUp(vector> inputs, vector> outputs) override; + +private: + int split_num_; + Chl split_dim_; + int split_dim_size_; + std::vector each_dims_; +}; + +class QNNSplitCreator : public QNNBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name) const { + int splitNum = (int)op_param["split_num"]; + Chl splitDim = (Chl)op_param["split_dim"]; + int splitDimSize; + if (op_param.find("split_dim_size") != op_param.end()) { + splitDimSize = (int)op_param["split_dim_size"]; + } else { + splitDimSize = -1; + } + + // if using each_dim + std::vector each_dims = {}; + if (splitDimSize == -1) { + int cnt = 0; + while (true) { + auto iter = op_param.find("split_dim_size_" + std::to_string(cnt++)); + if (iter == op_param.end()) break; + each_dims.push_back((int)iter->second); + } + } + return new QNNSplit(bn, name, splitNum, splitDim, splitDimSize, each_dims); + } +}; + +} // namespace mllm + +#endif diff --git a/src/backends/qnn/op/QNNSplitInput.cpp b/mllm/backends/qnn/op/QNNSplitInput.cpp similarity index 84% rename from src/backends/qnn/op/QNNSplitInput.cpp rename to mllm/backends/qnn/op/QNNSplitInput.cpp index 4bb9d8e1e..2badff9db 100755 --- a/src/backends/qnn/op/QNNSplitInput.cpp +++ b/mllm/backends/qnn/op/QNNSplitInput.cpp @@ -13,14 +13,14 @@ QNNSplitInput::QNNSplitInput(Backend *bn, string opName, bool isPrompt, int num) ErrorCode QNNSplitInput::reshape(vector> inputs, vector> outputs) { for (int i = 0; i < inputs.size(); i++) { - outputs[i]->shallowCopyFrom(inputs[i].get(), true); + outputs[i]->shallowCopyFrom(inputs[i], true); } return Op::reshape(inputs, outputs); } ErrorCode QNNSplitInput::setUp(vector> inputs, vector> outputs) { for (int i = 0; i < inputs.size(); i++) { - outputs[i]->shallowCopyFrom(inputs[i].get(), true); + outputs[i]->shallowCopyFrom(inputs[i], true); } return MLLM_NO_ERROR; } diff --git a/src/backends/qnn/op/QNNSplitInput.hpp b/mllm/backends/qnn/op/QNNSplitInput.hpp similarity index 100% rename from src/backends/qnn/op/QNNSplitInput.hpp rename to mllm/backends/qnn/op/QNNSplitInput.hpp diff --git a/mllm/backends/qnn/op/QNNSubGraphFinalize.cpp b/mllm/backends/qnn/op/QNNSubGraphFinalize.cpp new file mode 100644 index 000000000..55df06768 --- /dev/null +++ b/mllm/backends/qnn/op/QNNSubGraphFinalize.cpp @@ -0,0 +1,32 @@ + +#include "QNNSubGraphFinalize.hpp" +#include "Types.hpp" +#include "QNNCommonOp.hpp" +#include + +namespace mllm { +QNNSubGraphFinalize::QNNSubGraphFinalize(Backend *bn, string opName) : + QNNCommonOp(bn, opName) { +} + +ErrorCode QNNSubGraphFinalize::reshape(vector> inputs, vector> outputs) { + for(auto& t : inputs) { + t->setTtype(GRAPH_OUTPUT); + } + return Op::reshape(inputs, outputs); +} + +ErrorCode QNNSubGraphFinalize::setUp(vector> inputs, vector> outputs) { + for (auto input : inputs) { + input->to(MLLM_CPU); + } + + this->backend_->onSetUpEnd(inputs, outputs); + return MLLM_NO_ERROR; +} + +ErrorCode QNNSubGraphFinalize::free(vector> inputs, vector> outputs) { + return MLLM_NO_ERROR; +} + +} // namespace mllm diff --git a/mllm/backends/qnn/op/QNNSubGraphFinalize.hpp b/mllm/backends/qnn/op/QNNSubGraphFinalize.hpp new file mode 100644 index 000000000..f1cb2cddd --- /dev/null +++ b/mllm/backends/qnn/op/QNNSubGraphFinalize.hpp @@ -0,0 +1,25 @@ + +#ifndef MLLM_QNNSUBGRAPHFINALIZE_H +#define MLLM_QNNSUBGRAPHFINALIZE_H + +#include "QNNCommonOp.hpp" +namespace mllm { +class QNNSubGraphFinalize : public QNNCommonOp { +public: + QNNSubGraphFinalize(Backend *bn, string opName); + virtual ~QNNSubGraphFinalize() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode setUp(vector> inputs, vector> outputs) override; + virtual ErrorCode free(vector> inputs, vector> outputs) override; +}; + +class QNNSubGraphFinalizeCreator : public QNNBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name) const { + return new QNNSubGraphFinalize(bn, name); + } +}; + +} // namespace mllm + +#endif diff --git a/mllm/backends/qnn/op/QNNSubGraphStart.cpp b/mllm/backends/qnn/op/QNNSubGraphStart.cpp new file mode 100644 index 000000000..cd4a3469c --- /dev/null +++ b/mllm/backends/qnn/op/QNNSubGraphStart.cpp @@ -0,0 +1,46 @@ + +#include "QNNSubGraphStart.hpp" +#include "Types.hpp" +#include "QNNCommonOp.hpp" +#include + +namespace mllm { +QNNSubGraphStart::QNNSubGraphStart(Backend *bn, string opName) : + QNNCommonOp(bn, opName) { +} + +ErrorCode QNNSubGraphStart::reshape(vector> inputs, vector> outputs) { + return Op::reshape(inputs, outputs); +} + +ErrorCode QNNSubGraphStart::setUp(vector> inputs, vector> outputs) { + for (auto input : inputs) { + input->to(MLLM_QNN); + input->alloc(); + if (!input->childTensors().empty()) { + // for (auto &child_tensor : input->childTensors()) { + // child_tensor->shallowCopyFrom(input.get(), false); + // } + for (auto &child_wp : input->childTensors()) { + // Lock the weak_ptr to get a shared_ptr + if (auto child_sp = child_wp.lock()) { + child_sp->shallowCopyFrom(input, false); + } + } + } + } + + this->backend_->onSetUpStart(inputs, outputs, name_); + return MLLM_NO_ERROR; +} + +ErrorCode QNNSubGraphStart::free(vector> inputs, vector> outputs) { + return MLLM_NO_ERROR; +} + +ErrorCode QNNSubGraphStart::execute(vector> inputs, vector> outputs) { + this->backend_->onExecuteStart(inputs, outputs, name_); + return MLLM_NO_ERROR; +} + +} // namespace mllm diff --git a/mllm/backends/qnn/op/QNNSubGraphStart.hpp b/mllm/backends/qnn/op/QNNSubGraphStart.hpp new file mode 100644 index 000000000..ebb15824e --- /dev/null +++ b/mllm/backends/qnn/op/QNNSubGraphStart.hpp @@ -0,0 +1,26 @@ + +#ifndef MLLM_QNNSUBGRAPHSTART_H +#define MLLM_QNNSUBGRAPHSTART_H + +#include "QNNCommonOp.hpp" +namespace mllm { +class QNNSubGraphStart : public QNNCommonOp { +public: + QNNSubGraphStart(Backend *bn, string opName); + virtual ~QNNSubGraphStart() = default; + virtual ErrorCode reshape(vector> inputs, vector> outputs) override; + virtual ErrorCode setUp(vector> inputs, vector> outputs) override; + virtual ErrorCode free(vector> inputs, vector> outputs) override; + virtual ErrorCode execute(vector> inputs, vector> outputs) override; +}; + +class QNNSubGraphStartCreator : public QNNBackend::Creator { +public: + virtual Op *create(OpParam op_param, Backend *bn, string name) const { + return new QNNSubGraphStart(bn, name); + } +}; + +} // namespace mllm + +#endif diff --git a/src/backends/qnn/op/QNNSuperSiLU.cpp b/mllm/backends/qnn/op/QNNSuperSiLU.cpp similarity index 73% rename from src/backends/qnn/op/QNNSuperSiLU.cpp rename to mllm/backends/qnn/op/QNNSuperSiLU.cpp index 2e0dcc051..100a17320 100644 --- a/src/backends/qnn/op/QNNSuperSiLU.cpp +++ b/mllm/backends/qnn/op/QNNSuperSiLU.cpp @@ -31,15 +31,15 @@ ErrorCode QNNSuperSiLU::setUp(vector> inputs, vector()[0] / 127.0; - aScale = roundf(aScale * 100000) / 100000; + // aScale = roundf(aScale * 100000) / 100000; float bScale = 0; bScale = b_scale_.hostPtr()[0] / 127.0; - bScale = roundf(bScale * 100000) / 100000; + // bScale = roundf(bScale * 100000) / 100000; float oScale = 0; oScale = o_scale_.hostPtr()[0] / 127.0; - oScale = roundf(oScale * 100000) / 100000; + // oScale = roundf(oScale * 100000) / 100000; auto paramsSuperSiLuNameA = name() + ".supersilu_params.a_scale"; auto paramsSuperSiLuNameB = name() + ".supersilu_params.b_scale"; @@ -48,11 +48,11 @@ ErrorCode QNNSuperSiLU::setUp(vector> inputs, vector paramsSuperSiLu = { - {.paramType = QNN_PARAMTYPE_TENSOR, + (Qnn_Param_t){.paramType = QNN_PARAMTYPE_TENSOR, .name = "a_scale", - {.tensorParam = + .tensorParam = (Qnn_Tensor_t){.version = QNN_TENSOR_VERSION_1, - {.v1 = { + .v1 = { .id = 0, .name = paramsSuperSiLuNameA.c_str(), .type = QNN_TENSOR_TYPE_STATIC, @@ -65,13 +65,13 @@ ErrorCode QNNSuperSiLU::setUp(vector> inputs, vector> inputs, vector> inputs, vector outputTensor = {{QNN_TENSOR_VERSION_1, - {.v1 = { - .id = 0, - .name = outName.c_str(), - .type = getOutputTensorType(outputs[0]), - .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, - .dataType = QNN_DATATYPE_SFIXED_POINT_8, - .quantizeParams = {QNN_DEFINITION_DEFINED, - QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, - {.scaleOffsetEncoding = {.scale = oScale, - .offset = 0}}}, - .rank = 4, - .dimensions = dimensionsOutput, - .memType = QNN_TENSORMEMTYPE_RAW, - {.clientBuf = {.data = nullptr, - .dataSize = 0}}}}}}; + .clientBuf = {.data = (uint8_t *)&oScale, + .dataSize = sizeof(float)}}}}}; + + vector outputTensor = { + (Qnn_Tensor_t){.version = QNN_TENSOR_VERSION_1, + .v1 = { + .id = 0, + .name = outName.c_str(), + .type = getOutputTensorType(outputs[0]), + .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, + .dataType = QNN_DATATYPE_SFIXED_POINT_8, + .quantizeParams = {QNN_DEFINITION_DEFINED, + QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, + {.scaleOffsetEncoding = {.scale = oScale, + .offset = 0}}}, + .rank = 4, + .dimensions = dimensionsOutput, + .memType = QNN_TENSORMEMTYPE_RAW, + .clientBuf = {.data = nullptr, + .dataSize = 0}}}}; return graphAddNode(name(), "LLaMASuperSiLU", {inputs[0]->name(), inputs[1]->name()}, outputTensor, paramsSuperSiLu, "LLaMAPackage"); } diff --git a/src/backends/qnn/op/QNNSuperSiLU.hpp b/mllm/backends/qnn/op/QNNSuperSiLU.hpp similarity index 100% rename from src/backends/qnn/op/QNNSuperSiLU.hpp rename to mllm/backends/qnn/op/QNNSuperSiLU.hpp diff --git a/src/backends/qnn/op/QNNTranspose.cpp b/mllm/backends/qnn/op/QNNTranspose.cpp similarity index 100% rename from src/backends/qnn/op/QNNTranspose.cpp rename to mllm/backends/qnn/op/QNNTranspose.cpp diff --git a/src/backends/qnn/op/QNNTranspose.hpp b/mllm/backends/qnn/op/QNNTranspose.hpp similarity index 100% rename from src/backends/qnn/op/QNNTranspose.hpp rename to mllm/backends/qnn/op/QNNTranspose.hpp diff --git a/mllm/backends/qnn/op/QNNView.cpp b/mllm/backends/qnn/op/QNNView.cpp new file mode 100644 index 000000000..037ad2850 --- /dev/null +++ b/mllm/backends/qnn/op/QNNView.cpp @@ -0,0 +1,93 @@ + +#include "QNNView.hpp" +#include "Types.hpp" +#include "QNNCommonOp.hpp" +#include + +namespace mllm { +QNNView::QNNView(Backend *bn, string opName, vector dims, vector data_dims) : + QNNCommonOp(bn, opName) { + dim0_ = dims[0]; + dim1_ = dims[1]; + dim2_ = dims[2]; + dim3_ = dims[3]; + data_dim0_ = data_dims[0]; + data_dim1_ = data_dims[1]; + data_dim2_ = data_dims[2]; + data_dim3_ = data_dims[3]; +} + +ErrorCode QNNView::reshape(vector> inputs, vector> outputs) { + int dim0 = inputs[0]->batch(); + int dim1 = inputs[0]->sequence(); + int dim2 = inputs[0]->head(); + int dim3 = inputs[0]->dimension(); + + if (data_dim0_ == BATCH && data_dim1_ == DIMENSION && data_dim2_ == SEQUENCE && data_dim3_ == DIMENSION) { + dim1 = dim1_; + dim2 = inputs[0]->sequence(); + dim3 = inputs[0]->dimension() / dim1_; + } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == SEQUENCE && data_dim3_ == HEAD + DIMENSION) { + dim2 = dim1; + dim1 = 1; + dim3 = inputs[0]->dimension() * inputs[0]->head(); + } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == SEQUENCE + HEAD && data_dim3_ == DIMENSION) { + dim1 = 1; + dim2 = inputs[0]->sequence() * inputs[0]->head(); + } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == CHANNLE && data_dim3_ == TIME + HEIGHT + WIDTH) { + // assert(inputs[0]->ctype() == BCTHW); + dim1 = 1; + dim2 = inputs[0]->channel(); + dim3 = inputs[0]->time() * inputs[0]->height() * inputs[0]->width(); + } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == TIME + HEIGHT + WIDTH && data_dim3_ == CHANNLE) { + if (inputs[0]->ctype() == BTHWC) { + dim1 = 1; + dim2 = inputs[0]->time() * inputs[0]->height() * inputs[0]->width(); + dim3 = inputs[0]->channel(); + } else { + dim1 = 1; + dim2 = inputs[0]->time() * inputs[0]->height() * inputs[0]->channel(); + dim3 = inputs[0]->width(); + } + } else if (data_dim0_ == SEQUENCE && data_dim1_ == HEAD && data_dim2_ == BATCH && data_dim3_ == DIMENSION) { + dim0 = inputs[0]->sequence(); + dim1 = inputs[0]->head(); + dim2 = inputs[0]->batch(); + dim3 = inputs[0]->dimension(); + } else if (data_dim0_ == BATCH && data_dim1_ == HEAD && data_dim2_ == BATCH && data_dim3_ == DIMENSION) { + dim0 = inputs[0]->batch() / dim2_; + dim1 = inputs[0]->head(); + dim2 = dim2_; + dim3 = inputs[0]->dimension(); + } else if (data_dim0_ == BATCH && data_dim1_ == SEQUENCE && data_dim2_ == SEQUENCE && data_dim3_ == DIMENSION) { + dim0 = inputs[0]->batch(); + dim1 = dim1_; + dim2 = dim1_; + dim3 = inputs[0]->dimension(); + } else if (data_dim0_ == BATCH && data_dim1_ == HEAD && data_dim2_ == SEQUENCE && data_dim3_ == DIMENSION) { + dim0 = dim0_; + dim1 = dim1_; + dim2 = dim2_; + dim3 = dim3_; + } else { + std::cout << "QNNView not support!!!!" << std::endl; + } + outputs[0]->reshape(dim0, dim1, dim2, dim3); + + return Op::reshape(inputs, outputs); +} + +ErrorCode QNNView::setUp(vector> inputs, vector> outputs) { + outputs[0]->setDtype(inputs[0]->dtype()); + + if (outputs[0]->dtype() == MLLM_TYPE_I8 || outputs[0]->dtype() == MLLM_TYPE_I16) { + outputs[0]->quant_param = inputs[0]->quant_param; + } + return graphAddNode(name(), "Reshape", inputs, outputs, {}, "qti.aisw", true); +} + +ErrorCode QNNView::load(AbstructLoader &loader) { + return Op::load(loader); +} + +} // namespace mllm diff --git a/src/backends/qnn/op/QNNView.hpp b/mllm/backends/qnn/op/QNNView.hpp similarity index 98% rename from src/backends/qnn/op/QNNView.hpp rename to mllm/backends/qnn/op/QNNView.hpp index 318d37bea..5f46a99e5 100644 --- a/src/backends/qnn/op/QNNView.hpp +++ b/mllm/backends/qnn/op/QNNView.hpp @@ -21,8 +21,6 @@ class QNNView : public QNNCommonOp { int data_dim1_; int data_dim2_; int data_dim3_; - - Tensor scale_; }; class QNNViewCreator : public QNNBackend::Creator { diff --git a/src/backends/qnn/op/new_op.py b/mllm/backends/qnn/op/new_op.py similarity index 100% rename from src/backends/qnn/op/new_op.py rename to mllm/backends/qnn/op/new_op.py diff --git a/src/backends/xnnpack/CMakeLists.txt b/mllm/backends/xnnpack/CMakeLists.txt similarity index 67% rename from src/backends/xnnpack/CMakeLists.txt rename to mllm/backends/xnnpack/CMakeLists.txt index e416963b7..b353d093c 100644 --- a/src/backends/xnnpack/CMakeLists.txt +++ b/mllm/backends/xnnpack/CMakeLists.txt @@ -1,6 +1,6 @@ add_subdirectory(third_party/XNNPACK) -add_library(MllmXnnpackBackend +add_library(mllm_xnnpack OBJECT Utils/Logger.cpp @@ -36,7 +36,7 @@ add_library(MllmXnnpackBackend Functions/XpViewFunc.cpp Functions/XpMatmulFunc.cpp ) -target_include_directories(MllmXnnpackBackend PUBLIC third_party/XNNPACK/src/) -target_include_directories(MllmXnnpackBackend PUBLIC ${CMAKE_CURRENT_LIST_DIR}/../) -target_link_libraries(MllmXnnpackBackend PUBLIC XNNPACK fmt::fmt-header-only) -set_target_properties(MllmXnnpackBackend PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE) +target_include_directories(mllm_xnnpack PUBLIC third_party/XNNPACK/src/) +target_include_directories(mllm_xnnpack PUBLIC ${CMAKE_CURRENT_LIST_DIR}/../) +target_link_libraries(mllm_xnnpack PUBLIC XNNPACK fmt::fmt-header-only) +set_target_properties(mllm_xnnpack PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE) diff --git a/src/backends/xnnpack/Functions/XpBinaryFunc.cpp b/mllm/backends/xnnpack/Functions/XpBinaryFunc.cpp similarity index 93% rename from src/backends/xnnpack/Functions/XpBinaryFunc.cpp rename to mllm/backends/xnnpack/Functions/XpBinaryFunc.cpp index b6ea6792d..19b77c541 100644 --- a/src/backends/xnnpack/Functions/XpBinaryFunc.cpp +++ b/mllm/backends/xnnpack/Functions/XpBinaryFunc.cpp @@ -126,7 +126,7 @@ void XpBroadcastMulFunction::execute(vector> outputs, vector< } } -void XpBroadcastDivFunction::setup(vector> outputs, vector> inputs, vector args) { +void XpBroadcastDivFunction::reshape(vector> outputs, vector> inputs, vector args) { // reshape auto input = inputs[0]; auto output = outputs[0]; @@ -166,7 +166,7 @@ void XpBroadcastDivFunction::execute(vector> outputs, vector< } } -void XpTTAddFunction::setup(vector> outputs, vector> inputs, vector args) { +void XpTTAddFunction::reshape(vector> outputs, vector> inputs, vector args) { // reshape auto input = inputs[0]; auto output = outputs[0]; @@ -197,7 +197,7 @@ void XpTTAddFunction::execute(vector> outputs, vector> outputs, vector> inputs, vector args) { +void XpTTSubFunction::reshape(vector> outputs, vector> inputs, vector args) { // reshape auto input = inputs[0]; auto output = outputs[0]; @@ -228,7 +228,7 @@ void XpTTSubFunction::execute(vector> outputs, vector> outputs, vector> inputs, vector args) { +void XpTTMulFunction::reshape(vector> outputs, vector> inputs, vector args) { // reshape auto input = inputs[0]; auto output = outputs[0]; @@ -259,7 +259,7 @@ void XpTTMulFunction::execute(vector> outputs, vector> outputs, vector> inputs, vector args) { +void XpTTDivFunction::reshape(vector> outputs, vector> inputs, vector args) { // reshape auto input = inputs[0]; auto output = outputs[0]; diff --git a/src/backends/xnnpack/Functions/XpBinaryFunc.hpp b/mllm/backends/xnnpack/Functions/XpBinaryFunc.hpp similarity index 100% rename from src/backends/xnnpack/Functions/XpBinaryFunc.hpp rename to mllm/backends/xnnpack/Functions/XpBinaryFunc.hpp diff --git a/src/backends/xnnpack/Functions/XpMatmulFunc.cpp b/mllm/backends/xnnpack/Functions/XpMatmulFunc.cpp similarity index 100% rename from src/backends/xnnpack/Functions/XpMatmulFunc.cpp rename to mllm/backends/xnnpack/Functions/XpMatmulFunc.cpp diff --git a/src/backends/xnnpack/Functions/XpMatmulFunc.hpp b/mllm/backends/xnnpack/Functions/XpMatmulFunc.hpp similarity index 100% rename from src/backends/xnnpack/Functions/XpMatmulFunc.hpp rename to mllm/backends/xnnpack/Functions/XpMatmulFunc.hpp diff --git a/src/backends/xnnpack/Functions/XpTransposeFunc.cpp b/mllm/backends/xnnpack/Functions/XpTransposeFunc.cpp similarity index 100% rename from src/backends/xnnpack/Functions/XpTransposeFunc.cpp rename to mllm/backends/xnnpack/Functions/XpTransposeFunc.cpp diff --git a/src/backends/xnnpack/Functions/XpTransposeFunc.hpp b/mllm/backends/xnnpack/Functions/XpTransposeFunc.hpp similarity index 100% rename from src/backends/xnnpack/Functions/XpTransposeFunc.hpp rename to mllm/backends/xnnpack/Functions/XpTransposeFunc.hpp diff --git a/src/backends/xnnpack/Functions/XpViewFunc.cpp b/mllm/backends/xnnpack/Functions/XpViewFunc.cpp similarity index 100% rename from src/backends/xnnpack/Functions/XpViewFunc.cpp rename to mllm/backends/xnnpack/Functions/XpViewFunc.cpp diff --git a/src/backends/xnnpack/Functions/XpViewFunc.hpp b/mllm/backends/xnnpack/Functions/XpViewFunc.hpp similarity index 100% rename from src/backends/xnnpack/Functions/XpViewFunc.hpp rename to mllm/backends/xnnpack/Functions/XpViewFunc.hpp diff --git a/src/backends/xnnpack/Ops/XpBinary.cpp b/mllm/backends/xnnpack/Ops/XpBinary.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpBinary.cpp rename to mllm/backends/xnnpack/Ops/XpBinary.cpp diff --git a/src/backends/xnnpack/Ops/XpBinary.hpp b/mllm/backends/xnnpack/Ops/XpBinary.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpBinary.hpp rename to mllm/backends/xnnpack/Ops/XpBinary.hpp diff --git a/src/backends/xnnpack/Ops/XpCausalMask.cpp b/mllm/backends/xnnpack/Ops/XpCausalMask.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpCausalMask.cpp rename to mllm/backends/xnnpack/Ops/XpCausalMask.cpp diff --git a/src/backends/xnnpack/Ops/XpCausalMask.hpp b/mllm/backends/xnnpack/Ops/XpCausalMask.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpCausalMask.hpp rename to mllm/backends/xnnpack/Ops/XpCausalMask.hpp diff --git a/src/backends/xnnpack/Ops/XpD2H.cpp b/mllm/backends/xnnpack/Ops/XpD2H.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpD2H.cpp rename to mllm/backends/xnnpack/Ops/XpD2H.cpp diff --git a/src/backends/xnnpack/Ops/XpD2H.hpp b/mllm/backends/xnnpack/Ops/XpD2H.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpD2H.hpp rename to mllm/backends/xnnpack/Ops/XpD2H.hpp diff --git a/src/backends/xnnpack/Ops/XpDirect.cpp b/mllm/backends/xnnpack/Ops/XpDirect.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpDirect.cpp rename to mllm/backends/xnnpack/Ops/XpDirect.cpp diff --git a/src/backends/xnnpack/Ops/XpDirect.hpp b/mllm/backends/xnnpack/Ops/XpDirect.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpDirect.hpp rename to mllm/backends/xnnpack/Ops/XpDirect.hpp diff --git a/src/backends/xnnpack/Ops/XpDispatch.cpp b/mllm/backends/xnnpack/Ops/XpDispatch.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpDispatch.cpp rename to mllm/backends/xnnpack/Ops/XpDispatch.cpp diff --git a/src/backends/xnnpack/Ops/XpDispatch.hpp b/mllm/backends/xnnpack/Ops/XpDispatch.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpDispatch.hpp rename to mllm/backends/xnnpack/Ops/XpDispatch.hpp diff --git a/src/backends/xnnpack/Ops/XpEmbedding.cpp b/mllm/backends/xnnpack/Ops/XpEmbedding.cpp similarity index 98% rename from src/backends/xnnpack/Ops/XpEmbedding.cpp rename to mllm/backends/xnnpack/Ops/XpEmbedding.cpp index ac34f2e57..e9cacb925 100644 --- a/src/backends/xnnpack/Ops/XpEmbedding.cpp +++ b/mllm/backends/xnnpack/Ops/XpEmbedding.cpp @@ -1,6 +1,6 @@ #include "backends/xnnpack/Ops/XpEmbedding.hpp" -#include "backends/cpu/quantize/QuantizeQ4.hpp" -#include "backends/cpu/quantize/QuantizeQ8.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ4.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" namespace mllm::xnnpack { diff --git a/src/backends/xnnpack/Ops/XpEmbedding.hpp b/mllm/backends/xnnpack/Ops/XpEmbedding.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpEmbedding.hpp rename to mllm/backends/xnnpack/Ops/XpEmbedding.hpp diff --git a/src/backends/xnnpack/Ops/XpGeLU.cpp b/mllm/backends/xnnpack/Ops/XpGeLU.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpGeLU.cpp rename to mllm/backends/xnnpack/Ops/XpGeLU.cpp diff --git a/src/backends/xnnpack/Ops/XpGeLU.hpp b/mllm/backends/xnnpack/Ops/XpGeLU.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpGeLU.hpp rename to mllm/backends/xnnpack/Ops/XpGeLU.hpp diff --git a/src/backends/xnnpack/Ops/XpKVCache.cpp b/mllm/backends/xnnpack/Ops/XpKVCache.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpKVCache.cpp rename to mllm/backends/xnnpack/Ops/XpKVCache.cpp diff --git a/src/backends/xnnpack/Ops/XpKVCache.hpp b/mllm/backends/xnnpack/Ops/XpKVCache.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpKVCache.hpp rename to mllm/backends/xnnpack/Ops/XpKVCache.hpp diff --git a/src/backends/xnnpack/Ops/XpLinear.cpp b/mllm/backends/xnnpack/Ops/XpLinear.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpLinear.cpp rename to mllm/backends/xnnpack/Ops/XpLinear.cpp diff --git a/src/backends/xnnpack/Ops/XpLinear.hpp b/mllm/backends/xnnpack/Ops/XpLinear.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpLinear.hpp rename to mllm/backends/xnnpack/Ops/XpLinear.hpp diff --git a/src/backends/xnnpack/Ops/XpMatmul.cpp b/mllm/backends/xnnpack/Ops/XpMatmul.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpMatmul.cpp rename to mllm/backends/xnnpack/Ops/XpMatmul.cpp diff --git a/src/backends/xnnpack/Ops/XpMatmul.hpp b/mllm/backends/xnnpack/Ops/XpMatmul.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpMatmul.hpp rename to mllm/backends/xnnpack/Ops/XpMatmul.hpp diff --git a/src/backends/xnnpack/Ops/XpParameter.cpp b/mllm/backends/xnnpack/Ops/XpParameter.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpParameter.cpp rename to mllm/backends/xnnpack/Ops/XpParameter.cpp diff --git a/src/backends/xnnpack/Ops/XpParameter.hpp b/mllm/backends/xnnpack/Ops/XpParameter.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpParameter.hpp rename to mllm/backends/xnnpack/Ops/XpParameter.hpp diff --git a/src/backends/xnnpack/Ops/XpRMSNorm.cpp b/mllm/backends/xnnpack/Ops/XpRMSNorm.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpRMSNorm.cpp rename to mllm/backends/xnnpack/Ops/XpRMSNorm.cpp diff --git a/src/backends/xnnpack/Ops/XpRMSNorm.hpp b/mllm/backends/xnnpack/Ops/XpRMSNorm.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpRMSNorm.hpp rename to mllm/backends/xnnpack/Ops/XpRMSNorm.hpp diff --git a/src/backends/xnnpack/Ops/XpReLU.cpp b/mllm/backends/xnnpack/Ops/XpReLU.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpReLU.cpp rename to mllm/backends/xnnpack/Ops/XpReLU.cpp diff --git a/src/backends/xnnpack/Ops/XpReLU.hpp b/mllm/backends/xnnpack/Ops/XpReLU.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpReLU.hpp rename to mllm/backends/xnnpack/Ops/XpReLU.hpp diff --git a/src/backends/xnnpack/Ops/XpRoPE.cpp b/mllm/backends/xnnpack/Ops/XpRoPE.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpRoPE.cpp rename to mllm/backends/xnnpack/Ops/XpRoPE.cpp diff --git a/src/backends/xnnpack/Ops/XpRoPE.hpp b/mllm/backends/xnnpack/Ops/XpRoPE.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpRoPE.hpp rename to mllm/backends/xnnpack/Ops/XpRoPE.hpp diff --git a/src/backends/xnnpack/Ops/XpSDPA.cpp b/mllm/backends/xnnpack/Ops/XpSDPA.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpSDPA.cpp rename to mllm/backends/xnnpack/Ops/XpSDPA.cpp diff --git a/src/backends/xnnpack/Ops/XpSDPA.hpp b/mllm/backends/xnnpack/Ops/XpSDPA.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpSDPA.hpp rename to mllm/backends/xnnpack/Ops/XpSDPA.hpp diff --git a/src/backends/xnnpack/Ops/XpSiLU.cpp b/mllm/backends/xnnpack/Ops/XpSiLU.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpSiLU.cpp rename to mllm/backends/xnnpack/Ops/XpSiLU.cpp diff --git a/src/backends/xnnpack/Ops/XpSiLU.hpp b/mllm/backends/xnnpack/Ops/XpSiLU.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpSiLU.hpp rename to mllm/backends/xnnpack/Ops/XpSiLU.hpp diff --git a/src/backends/xnnpack/Ops/XpSoftmax.cpp b/mllm/backends/xnnpack/Ops/XpSoftmax.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpSoftmax.cpp rename to mllm/backends/xnnpack/Ops/XpSoftmax.cpp diff --git a/src/backends/xnnpack/Ops/XpSoftmax.hpp b/mllm/backends/xnnpack/Ops/XpSoftmax.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpSoftmax.hpp rename to mllm/backends/xnnpack/Ops/XpSoftmax.hpp diff --git a/src/backends/xnnpack/Ops/XpSubGraphFinalize.cpp b/mllm/backends/xnnpack/Ops/XpSubGraphFinalize.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpSubGraphFinalize.cpp rename to mllm/backends/xnnpack/Ops/XpSubGraphFinalize.cpp diff --git a/src/backends/xnnpack/Ops/XpSubGraphFinalize.hpp b/mllm/backends/xnnpack/Ops/XpSubGraphFinalize.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpSubGraphFinalize.hpp rename to mllm/backends/xnnpack/Ops/XpSubGraphFinalize.hpp diff --git a/src/backends/xnnpack/Ops/XpSubGraphStart.cpp b/mllm/backends/xnnpack/Ops/XpSubGraphStart.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpSubGraphStart.cpp rename to mllm/backends/xnnpack/Ops/XpSubGraphStart.cpp diff --git a/src/backends/xnnpack/Ops/XpSubGraphStart.hpp b/mllm/backends/xnnpack/Ops/XpSubGraphStart.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpSubGraphStart.hpp rename to mllm/backends/xnnpack/Ops/XpSubGraphStart.hpp diff --git a/src/backends/xnnpack/Ops/XpTranspose.cpp b/mllm/backends/xnnpack/Ops/XpTranspose.cpp similarity index 100% rename from src/backends/xnnpack/Ops/XpTranspose.cpp rename to mllm/backends/xnnpack/Ops/XpTranspose.cpp diff --git a/src/backends/xnnpack/Ops/XpTranspose.hpp b/mllm/backends/xnnpack/Ops/XpTranspose.hpp similarity index 100% rename from src/backends/xnnpack/Ops/XpTranspose.hpp rename to mllm/backends/xnnpack/Ops/XpTranspose.hpp diff --git a/src/backends/xnnpack/README.md b/mllm/backends/xnnpack/README.md similarity index 100% rename from src/backends/xnnpack/README.md rename to mllm/backends/xnnpack/README.md diff --git a/src/backends/xnnpack/Utils/Logger.cpp b/mllm/backends/xnnpack/Utils/Logger.cpp similarity index 100% rename from src/backends/xnnpack/Utils/Logger.cpp rename to mllm/backends/xnnpack/Utils/Logger.cpp diff --git a/src/backends/xnnpack/Utils/Logger.hpp b/mllm/backends/xnnpack/Utils/Logger.hpp similarity index 100% rename from src/backends/xnnpack/Utils/Logger.hpp rename to mllm/backends/xnnpack/Utils/Logger.hpp diff --git a/mllm/backends/xnnpack/XnnpackBackend.cpp b/mllm/backends/xnnpack/XnnpackBackend.cpp new file mode 100644 index 000000000..5f87accfb --- /dev/null +++ b/mllm/backends/xnnpack/XnnpackBackend.cpp @@ -0,0 +1,1319 @@ +#include "backends/xnnpack/XnnpackBackend.hpp" +#include "Backend.hpp" +#include "Module.hpp" +#include "layer.hpp" +#include "OpDefined.hpp" +#include "backends/xnnpack/Utils/Logger.hpp" +#include "pthreadpool.h" +#include "xnnpack.h" +#include "backends/xnnpack/Functions/XpBinaryFunc.hpp" +#include "backends/xnnpack/Ops/XpBinary.hpp" +#include "backends/xnnpack/XpMemoryManager.hpp" +#include "backends/xnnpack/Ops/XpDirect.hpp" +#include "backends/xnnpack/Ops/XpDispatch.hpp" +#include "backends/xnnpack/Ops/XpLinear.hpp" +#include "backends/xnnpack/Ops/XpMatmul.hpp" +#include "backends/xnnpack/Ops/XpRoPE.hpp" +#include "backends/xnnpack/Ops/XpSubGraphStart.hpp" +#include "backends/xnnpack/Ops/XpSubGraphFinalize.hpp" +#include "backends/xnnpack/Ops/XpD2H.hpp" +#include "backends/xnnpack/Ops/XpReLU.hpp" +#include "backends/xnnpack/Ops/XpSoftmax.hpp" +#include "backends/xnnpack/Ops/XpGeLU.hpp" +#include "backends/xnnpack/Ops/XpSiLU.hpp" +#include "backends/xnnpack/Ops/XpTranspose.hpp" +#include "backends/xnnpack/Functions/XpTransposeFunc.hpp" +#include "backends/xnnpack/Ops/XpRMSNorm.hpp" +#include "backends/xnnpack/Ops/XpKVCache.hpp" +#include "backends/xnnpack/Ops/XpCausalMask.hpp" +#include "backends/xnnpack/Ops/XpSDPA.hpp" +#include "backends/xnnpack/Functions/XpViewFunc.hpp" +#include "backends/xnnpack/Functions/XpMatmulFunc.hpp" +#include "backends/xnnpack/Ops/XpEmbedding.hpp" +#include "backends/xnnpack/Ops/XpParameter.hpp" +#include "xnnpack/allocator.h" +#include "xnnpack/memory.h" +#include "xnnpack/subgraph.h" +#include +#include +#include + +namespace mllm { + +class XpBackendCreator : public BackendCreator { + Backend *create(BackendConfig config) override { + // initialize xnnpack + if (xnn_initialize(nullptr /* allocator */) != xnn_status_success) { + ::mllm::xnnpack::Log::error("failed to initialize XNNPACK"); + return nullptr; + } + + auto mm = std::make_shared<::mllm::xnnpack::XpMemoryManager>(); + return new ::mllm::xnnpack::XnnpackBackend(mm); + }; +}; + +void registerXNNBackendCreator() { + ::mllm::xnnpack::Log::info("xnnpack backend registered"); + InsertBackendCreatorMap(MLLM_XNNPACK, std::make_shared()); +} +} // namespace mllm + +namespace mllm::xnnpack { + +XnnpackModelRuntime::XnnpackModelRuntime(pthreadpool_t threadpool) : + threadpool_(threadpool), model_(nullptr, xnn_delete_subgraph) { + num_threads_ = pthreadpool_get_threads_count(threadpool); +} + +XnnpackModelRuntime::~XnnpackModelRuntime() { + if (runtime_) { + xnn_delete_runtime(runtime_); + } + + // not release all + // FIXME: explicit memory leak. + // NOTE: explicit memory leak. + // NOTE: explicit memory leak. + // NOTE: explicit memory leak. + // NOTE: explicit memory leak. + // NOTE: explicit memory leak. + // NOTE: explicit memory leak. + // + // for (auto i = 0; i < external_values_.size(); ++i) { + // if ((model_->values[i].flags & ((uint32_t)XNN_VALUE_FLAG_EXTERNAL_INPUT)) == 1) { + // xnn_release_simd_memory(uuid_2_externals_v_[i].data); + // } + // } +} + +bool XnnpackModelRuntime::createModel(const xnn_subgraph_t &model_factory) { + model_.reset(model_factory); + if (!model_) { + Log::error("failed to create model"); + return false; + } + + for (uint32_t i = 0; i < model_->num_values; ++i) { + // if not external values. ignore alloc memory + if ((model_->values[i].flags & ((uint32_t)XNN_VALUE_FLAG_EXTERNAL_INPUT | (uint32_t)XNN_VALUE_FLAG_EXTERNAL_OUTPUT)) == 0) { + continue; + } + + // The prepared external_num > actually external_num, ignore redundant part. + if (uuid_2_externals_v_.count(i)) { + // if already alloced by user, ignore alloc memory + if (uuid_2_externals_v_[i].data) { + external_values_.push_back(xnn_external_value{i, uuid_2_externals_v_[i].data}); + continue; + } + + // Make a buffer for this external value. + size_t size = xnn_tensor_get_size(&model_->values[i]) + XNN_EXTRA_BYTES; + auto ev = xnn_external_value{i, xnn_allocate_zero_simd_memory(size)}; + uuid_2_externals_v_[i] = ev; + external_values_.push_back(ev); + } + } + + return model_ != nullptr; +} + +bool XnnpackModelRuntime::createRuntime(uint32_t flags) { + assert(!runtime_); + // flags |= XNN_FLAG_NO_OPERATOR_FUSION; + return xnn_status_success == xnn_create_runtime_v4(model_.get(), weight_cache_, nullptr, threadpool_, flags, &runtime_); +} + +bool XnnpackModelRuntime::reshapeRuntime() { + return xnn_status_success == xnn_reshape_runtime(runtime_); +} + +bool XnnpackModelRuntime::setupRuntime() { + return xnn_status_success == xnn_setup_runtime_v2(runtime_, external_values_.size(), external_values_.data()); +} + +bool XnnpackModelRuntime::invoke() { + return xnn_status_success == xnn_invoke_runtime(runtime_); +} + +void XnnpackModelRuntime::resetUuidExternalValuesMap(const std::unordered_map &ext_vals) { + uuid_2_externals_v_ = ext_vals; +} + +void XnnpackModelRuntime::setWeightCache(xnn_weights_cache_t weight_cache) { + weight_cache_ = weight_cache; +} + +xnn_runtime_t XnnpackModelRuntime::getXnnRt() { + return runtime_; +} + +std::unordered_map &XnnpackModelRuntime::__uuidToExternalsV() { + return uuid_2_externals_v_; +} + +XnnpackBackend::XnnpackBackend(std::shared_ptr mm, const XnnpackBackendOpts &opts) : + Backend(mm), opts_(opts) { + // init weight_cache_ + // xnn_create_weights_cache(&weight_cache_); + + // register ops + type_ = BackendType::MLLM_XNNPACK; + registerOps(); + registerFuncs(); +} + +XnnpackBackend::~XnnpackBackend() { +} + +bool XnnpackBackend::addCreator(OpType t, Creator *c) { + if (map_op_creator_.count(t)) { + Log::error("{} op has been added to this backend.", (int32_t)t); + return false; + } + map_op_creator_.insert({t, c}); + return true; +} + +Op *XnnpackBackend::opCreate(const OpParam &op_param, string name, int thread_count) { + auto op_type = OpType(op_param.find("type")->second); + auto iter = map_op_creator_.find(op_type); + + if (thread_count) { + Log::warn("Xnnpack use global thread pool. thread_count is set to {}, but not used.", thread_count); + } + + if (iter == map_op_creator_.end()) { + Log::error("OpType={}, Name={} is not supported yet.", int(op_param.find("type")->second), name); + return nullptr; + } + auto op = iter->second->create(op_param, this, name, thread_count); + return op; +} + +TensorFunction *XnnpackBackend::funcCreate(TensorFuncType type) { + auto iter = map_tensor_function_.find(type); + if (iter == map_tensor_function_.end()) { + Log::error("Xnnpack backend don't support func type {}", (int32_t)type); + return nullptr; + } + return iter->second; +} + +void XnnpackBackend::registerOps() { + addCreator(D2H, new XpD2HCreator()); + addCreator(ADD, new XpAddCreator()); + addCreator(DIRECT, new XpDirectCreator()); + addCreator(DISPATCH, new XpDispatchCreator()); + addCreator(SUBGRAPHSTART, new XpSubGraphStartCreator()); + addCreator(SUBGRAPHFINALIZE, new XpSubGraphFinalizeCreator()); + addCreator(LINEAR, new XpLinearCreator()); + addCreator(MATMUL, new XpMatMulCreator()); + addCreator(ROPE, new XpRoPECreator()); + addCreator(RELU, new XpReLUCreator()); + addCreator(SOFTMAX, new XpSoftmaxCreator()); + addCreator(OP_GELU, new XpGeLUCreator()); + addCreator(SILU, new XpSiLUCreator()); + addCreator(TRANSPOSE, new XpTransposeCreator()); + addCreator(RMSNORM, new XpRMSNormCreator()); + addCreator(XP_KVCACHE, new XpKVCacheCreator()); + addCreator(CAUSALMASK, new XpCausalMaskCreator()); + addCreator(SDPA, new XpSDPACreator()); + addCreator(EMBEDDING, new XpEmbeddingCreator()); + addCreator(PARAMETER, new XpParameterCreator()); +} + +void XnnpackBackend::registerFuncs() { + // broadcast element wise tensor func + map_tensor_function_[TensorFuncType::FUNC_ADD] = new XpBroadcastAddFunction(); + map_tensor_function_[TensorFuncType::FUNC_SUB] = new XpBroadcastSubFunction(); + map_tensor_function_[TensorFuncType::FUNC_MUL] = new XpBroadcastMulFunction(); + map_tensor_function_[TensorFuncType::FUNC_DIV] = new XpBroadcastDivFunction(); + + // element wise tensor func + map_tensor_function_[TensorFuncType::FUNC_TTADD] = new XpTTAddFunction(); + map_tensor_function_[TensorFuncType::FUNC_TTSUB] = new XpTTSubFunction(); + map_tensor_function_[TensorFuncType::FUNC_TTMUL] = new XpTTMulFunction(); + map_tensor_function_[TensorFuncType::FUNC_TTDIV] = new XpTTDivFunction(); + + // others + map_tensor_function_[TensorFuncType::FUNC_TRANPOSE] = new XpTransposeFunction(); + map_tensor_function_[TensorFuncType::FUNC_VIEW] = new XpViewFunction(); + map_tensor_function_[TensorFuncType::FUNC_MM] = new XpMatmulFunction(); +} + +uint32_t XnnpackCargo::getExecCnt() { + return exec_cnt_; +} + +uint32_t XnnpackCargo::incExecCnt() { + exec_cnt_++; + return exec_cnt_; +} + +void XnnpackCargo::setThreadPool(pthreadpool_t tp) { + threadpool_ = tp; +} + +std::shared_ptr XnnpackCargo::getModelRuntime() { + return model_runtime_; +} + +std::shared_ptr XnnpackCargo::recreateModelRuntime() { + model_runtime_ = std::make_shared(threadpool_); + + // set external values + model_runtime_->resetUuidExternalValuesMap(uuid_2_externals_v_); + model_runtime_->setWeightCache(weight_cache_); + + return model_runtime_; +} + +xnn_subgraph_t XnnpackCargo::getXnnSubgraph() { + return graph_; +} + +void XnnpackCargo::createSubgraph(int32_t external_nums) { + if (graph_) { + Log::error("The subgraph has already been created. Use recreateSubGraph instead."); + exit(-1); + } + + uuid_2_externals_v_.clear(); + uuid_2_mllm_tensor_.clear(); + uuid_2_mllm_weight_tensor_.clear(); + uuid_2_normal_tensor_.clear(); + activation_name_2_uuid_.clear(); + auto status = xnn_create_subgraph(external_nums, 0, &graph_); + if (status != xnn_status_success) { + Log::error("Failed to create subgrpah"); + exit(-1); + } +} + +void XnnpackCargo::recreateSubgraph(int32_t external_nums) { + if (graph_) { + // no need to delete this, the previous xnnpack runtime will manage it. + // xnn_delete_subgraph(subgraph_); + uuid_2_mllm_tensor_.clear(); + uuid_2_mllm_weight_tensor_.clear(); + uuid_2_externals_v_.clear(); + uuid_2_normal_tensor_.clear(); + activation_name_2_uuid_.clear(); + } + + auto status = xnn_create_subgraph(external_nums, 0, &graph_); + if (status != xnn_status_success) { + Log::error("Failed to create subgrpah"); + exit(-1); + } +} + +void XnnpackCargo::registerExternalValue(uint32_t uuid, const xnn_external_value &ext_v) { + if (uuid_2_externals_v_.count(uuid)) { + Log::error("when reigster a external value, found exists uuid: {}", uuid); + exit(-1); + } + + uuid_2_externals_v_.insert({uuid, ext_v}); +} + +void XnnpackCargo::updateExternalValue(uint32_t uuid, const xnn_external_value &ext_v) { + if (!uuid_2_externals_v_.count(uuid)) { + Log::error("when update a external value, found exists uuid: {}", uuid); + exit(-1); + } + uuid_2_externals_v_[uuid] = ext_v; +} + +void XnnpackCargo::registerNormalValue(uint32_t uuid) { + if (uuid_2_normal_tensor_.count(uuid)) { + Log::error("when reigster a normal value, found exists uuid: {}", uuid); + exit(-1); + } + + uuid_2_normal_tensor_.insert({uuid, true}); +} + +void XnnpackCargo::registerUuidTensor(uint32_t uuid, Tensor *t) { + if (uuid_2_mllm_tensor_.count(uuid)) { + Log::error("when reigster a tensor value, found exists uuid: {}", uuid); + exit(-1); + } + + uuid_2_mllm_tensor_.insert({uuid, t}); +} + +void XnnpackCargo::updateUuidTensor(uint32_t uuid, Tensor *t) { + if (!uuid_2_mllm_tensor_.count(uuid)) { + Log::error("XnnpackCargo::updateUuidTensor failed. {} is not exists", uuid); + exit(-1); + } + + uuid_2_mllm_tensor_[uuid] = t; +} + +void XnnpackCargo::registerUuidWeightTensor(uint32_t uuid, Tensor *t) { + if (uuid_2_mllm_weight_tensor_.count(uuid)) { + Log::error("when reigster a weight tensor value, found exists uuid: {}", uuid); + exit(-1); + } + + uuid_2_mllm_weight_tensor_.insert({uuid, t}); +} + +void *XnnpackCargo::getExternalValueptr(uint32_t uuid) { + if (uuid_2_externals_v_.count(uuid)) { + return uuid_2_externals_v_[uuid].data; + } + Log::error("getExternalValueptr return nullptr for uuid: {}", uuid); + return nullptr; +} + +bool XnnpackCargo::hasExternalValue(uint32_t uuid) { + return uuid_2_externals_v_.count(uuid); +} + +bool XnnpackCargo::hasNormalValue(uint32_t uuid) { + return uuid_2_normal_tensor_.count(uuid); +} + +bool XnnpackCargo::hasWeightValue(uint32_t uuid) { + return uuid_2_mllm_weight_tensor_.count(uuid); +} + +xnn_datatype XnnpackBackend::mllmDType2XnnDType(DataType mllm_dtype) { + switch (mllm_dtype) { + case MLLM_TYPE_F32: + return xnn_datatype_fp32; + case MLLM_TYPE_F16: + return xnn_datatype_fp16; + case MLLM_TYPE_I32: + return xnn_datatype_int32; + default: + return xnn_datatype_invalid; + } + return xnn_datatype_invalid; +} + +uint32_t XnnpackCargo::getNewEXternalId() { + return (uint32_t)uuid_2_externals_v_.size(); +} + +void XnnpackCargo::assignPtrToTensor() { + // update from runtime + uuid_2_externals_v_ = getModelRuntime()->__uuidToExternalsV(); + + // for (auto &iter : uuid_2_mllm_tensor_) { + // auto t = iter.second; + // auto uuid = iter.first; + // auto ext_v = uuid_2_externals_v_[uuid]; + // t->forceResetHostPointer(ext_v.data); + // } + + for (auto &iter : uuid_2_mllm_weight_tensor_) { + iter.second->uuid() = XNN_INVALID_VALUE_ID; + } +} + +void XnnpackCargo::setSubgraphDispatched(bool b) { + subgraph_dispatched_ = b; +} + +xnn_weights_cache_t XnnpackCargo::getWeightCache() { + return weight_cache_; +} + +bool XnnpackCargo::isWeightCacheFinalized() const { + return weight_cache_finalized_; +} + +void XnnpackCargo::setWeightCacheFinalized(bool b) { + weight_cache_finalized_ = b; +} + +bool XnnpackCargo::inActivationName(const std::string &name) { + return activation_name_2_uuid_.count(name); +} + +uint32_t XnnpackCargo::getUUIDByActivationName(const std::string &name) { + if (inActivationName(name)) return activation_name_2_uuid_[name]; + Log::error("XnnpackCargo::getUUIDByActivationName, {} not in activation name", name); + exit(-1); +} + +void XnnpackCargo::registerActivationNameAndUUID(const std::string &name, uint32_t uuid) { + if (inActivationName(name)) { + Log::error("XnnpackCargo::registerActivationNameAndUUID, {} already exists", name); + exit(-1); + } + activation_name_2_uuid_.insert({name, uuid}); +} + +void XnnpackBackend::createNewGraph(const std::string &name) { + if (graphs_.count(name)) { + if (enable_dynamic_shape) { + Log::error("XnnpackBackend::createNewGraph, {} graph already exists", name); + exit(-1); + } else { + graphs_.erase(graphs_.find(name)); + } + } + + graphs_.insert({name, std::make_shared()}); + graphs_[name]->setThreadPool(threadpool_); + graphs_[name]->createSubgraph(); +} + +std::shared_ptr XnnpackBackend::getGraph(const std::string &name) { + if (!graphs_.count(name)) { + Log::error("XnnpackBackend::getGraph, {} graph not exists"); + exit(-1); + } + return graphs_[name]; +} + +bool XnnpackBackend::hasGraph(const std::string &name) { + return graphs_.count(name); +} + +void XnnpackBackend::onSetUpStart(std::vector> &inputs, std::vector> &outputs, std::string graph_name) { + // 0. create graph + cur_processing_graph_name_ = graph_name; + + if (!hasGraph(graph_name) || !XnnpackBackend::enable_dynamic_shape) { + createNewGraph(graph_name); + auto cargo = getGraph(graph_name); + + if (XnnpackBackend::enable_legacy_wrapper) Backend::onSetUpStart(inputs, outputs, graph_name); + + // 1. register all inputs + for (auto &t : inputs) { + auto xp_dtype = XnnpackBackend::mllmDType2XnnDType(t->dtype()); + + xnn_status status; + std::vector dims; + for (auto d : t->shape()) dims.push_back(d); + + uint32_t flags = XNN_VALUE_FLAG_EXTERNAL_INPUT; + uint32_t external_id = cargo->getNewEXternalId(); + + switch (xp_dtype) { + case xnn_datatype_fp32: { + status = xnn_define_tensor_value( + cargo->getXnnSubgraph(), xp_dtype, + dims.size(), dims.data(), + /*data=*/nullptr, + external_id, flags, &t->uuid()); + break; + } + default: + Log::error("XnnpackBackend::onSetUpStart, Unsupported datatype."); + break; + } + + cargo->registerExternalValue(t->uuid(), xnn_external_value{.id = t->uuid(), .data = t->rawHostPtr()}); + cargo->registerUuidTensor(t->uuid(), t.get()); + cargo->registerActivationNameAndUUID(t->name(), t->uuid()); + + if (status != xnn_status_success) { + Log::error("xnnpack backend defineXpTensor Error"); + exit(-1); + } + } + } else { + // do not create a new graph. Reuse already exists runtime + auto cargo = getGraph(graph_name); + + for (auto &t : inputs) { + t->uuid() = cargo->getUUIDByActivationName(t->name()); + cargo->updateExternalValue(t->uuid(), xnn_external_value{.id = t->uuid(), .data = t->rawHostPtr()}); + cargo->updateUuidTensor(t->uuid(), t.get()); + std::vector dims; + for (auto d : t->shape()) dims.push_back(d); + xnn_reshape_external_value(cargo->getModelRuntime()->getXnnRt(), t->uuid(), dims.size(), dims.data()); + } + } +} + +void XnnpackBackend::onSetUpEnd(std::vector> &inputs, std::vector> &outputs, std::string graph_name) { + cur_processing_graph_name_ = graph_name; + + if (getGraph(graph_name)->getExecCnt() == 0 || !XnnpackBackend::enable_dynamic_shape) { + if (XnnpackBackend::enable_legacy_wrapper) Backend::onSetUpEnd(inputs, outputs, graph_name); + + // 0. get graph + auto cargo = getGraph(graph_name); + + // 1. register all outputs + for (auto &t : outputs) { + auto xp_dtype = XnnpackBackend::mllmDType2XnnDType(t->dtype()); + + xnn_status status; + std::vector dims; + for (auto d : t->shape()) dims.push_back(d); + + uint32_t flags = XNN_VALUE_FLAG_EXTERNAL_OUTPUT; + uint32_t external_id = cargo->getNewEXternalId(); + + switch (xp_dtype) { + case xnn_datatype_fp32: { + status = xnn_define_tensor_value( + cargo->getXnnSubgraph(), xp_dtype, + dims.size(), dims.data(), + /*data=*/nullptr, + external_id, flags, &t->uuid()); + break; + } + default: + break; + } + + cargo->registerExternalValue(t->uuid(), xnn_external_value{.id = t->uuid(), .data = t->rawHostPtr()}); + cargo->registerUuidTensor(t->uuid(), t.get()); + cargo->registerActivationNameAndUUID(t->name(), t->uuid()); + + if (status != xnn_status_success) { + Log::error("xnnpack backend defineXpTensor Error"); + exit(-1); + } + } + } else { + // do not create a new graph. Reuse already exists runtime + auto cargo = getGraph(graph_name); + + for (auto &t : outputs) { + t->uuid() = cargo->getUUIDByActivationName(t->name()); + cargo->updateExternalValue(t->uuid(), xnn_external_value{.id = t->uuid(), .data = t->rawHostPtr()}); + cargo->updateUuidTensor(t->uuid(), t.get()); + std::vector dims; + for (auto d : t->shape()) dims.push_back(d); + xnn_reshape_external_value(cargo->getModelRuntime()->getXnnRt(), t->uuid(), dims.size(), dims.data()); + } + } +} + +void XnnpackBackend::onExecuteStart(std::vector> &inputs, std::vector> &outputs, std::string graph_name) { + cur_processing_graph_name_ = graph_name; +} + +void XnnpackBackend::onExecuteEnd(std::vector> &outputs, const string &graph_name) { + cur_processing_graph_name_ = graph_name; + auto cargo = getCurProcessingGraph(); + + if (getGraph(graph_name)->getExecCnt() == 0 || !XnnpackBackend::enable_dynamic_shape) { + // recreate runtime + auto m_rt = cargo->recreateModelRuntime(); + + // create Model + m_rt->createModel(cargo->getXnnSubgraph()); + + // create runtime + m_rt->createRuntime(0); + + // auto wc = xnnbk->getWeightCache(); + // if (!xnnbk->isWeightCacheFinalized()) { + // xnn_finalize_weights_cache(wc, xnn_weights_cache_finalization_kind_hard); + // xnnbk->setWeightCacheFinalized(true); + // } + + // reshape + m_rt->reshapeRuntime(); + + // setup + m_rt->setupRuntime(); + + // run + if (!m_rt->invoke()) { + Log::error("XnnpackBackend::onExecuteStart xnn invoke failed"); + return; + } + + // update all output's ptr + cargo->assignPtrToTensor(); + + cargo->setSubgraphDispatched(true); + + cargo->incExecCnt(); + } else { + // recreate runtime + auto m_rt = cargo->getModelRuntime(); + + // setup + m_rt->setupRuntime(); + + // run + if (!m_rt->invoke()) { + Log::error("XnnpackBackend::onExecuteStart xnn invoke failed"); + return; + } + + // update all output's ptr + cargo->assignPtrToTensor(); + + cargo->setSubgraphDispatched(true); + + cargo->incExecCnt(); + } + + for (auto &o : outputs) { + o->forceResetHostPointer(getCurProcessingGraph()->getExternalValueptr(o->uuid())); + o->uuid() = XNN_INVALID_VALUE_ID; + } +} + +XnnpackCargo *XnnpackBackend::getCurProcessingGraph() { + if (!graphs_.count(cur_processing_graph_name_)) { + Log::error("XnnpackBackend::getCurProcessingGraph, {} graph not exists"); + exit(-1); + } + return graphs_[cur_processing_graph_name_].get(); +} + +int XnnpackBackend::xnn_threads = 4; + +bool XnnpackBackend::enable_dynamic_shape = true; + +bool XnnpackBackend::enable_legacy_wrapper = false; + +// std::vector XnnpackBackend::runFunc(std::vector out_names, +// TensorFuncType type, +// std::vector float_args, +// std::vector input_tensors, +// bool in_place) { +// Module *module = input_tensors.empty() ? mllm::Module::llm_model_ptr : input_tensors[0].module(); +// assert(module != nullptr); +// auto &activation_tensors = module->activation_tensors; +// auto &activation_tensors_num = module->activation_tensors_num; + +// std::vector> output_ptrs; +// for (const auto &out_name : out_names) { +// if (activation_tensors.find(out_name) == activation_tensors.end()) { +// Backend *backend_h = Backend::global_backends[MLLM_CPU].get(); +// if (!input_tensors.empty()) { +// backend_h = input_tensors[0].backend(); +// } +// activation_tensors[out_name] = std::make_shared(backend_h); +// activation_tensors[out_name]->setName(out_name); +// activation_tensors[out_name]->setModule(module); +// activation_tensors_num[out_name] = 0; +// } +// output_ptrs.push_back(activation_tensors[out_name]); +// } + +// if (module->doLoad) { +// std::vector results; +// for (auto &out_tensor : output_ptrs) { +// results.push_back(*activation_tensors[out_tensor->name()]); +// } +// return results; +// } + +// Backend *backend_h = Backend::global_backends[MLLM_CPU].get(); +// if (!input_tensors.empty()) { +// backend_h = input_tensors[0].backend(); +// } +// TensorFunction *func = backend_h->funcCreate(type); + +// std::vector> input_ptrs; +// for (auto &tensor : input_tensors) { +// input_ptrs.push_back(activation_tensors[tensor.name()]); +// } +// // if (in_place) { +// // for (size_t i = 0; i < input_tensors.size() && i < out_names.size(); ++i) { +// // input_tensors[i].setName(out_names[i]); +// // output_ptrs.push_back(input_tensors[i]); +// // } +// // } + +// #ifdef DEBUGOPTIME +// auto start_t = mllm_time_us(); +// #endif + +// switch (Tensor::tensor_status) { +// case TENSOR_STATIC_INIT: +// func->reshape(output_ptrs, input_ptrs, float_args); +// func->setUp(output_ptrs, input_ptrs, float_args); +// break; +// case TENSOR_STATIC_READY: +// func->execute(output_ptrs, input_ptrs, float_args); +// break; +// case TENSOR_STATIC_TRACE: +// if (backend_h->type() == BackendType::MLLM_CPU) { +// Tracer::addTensorFunction(func, input_ptrs, output_ptrs, float_args); +// } +// break; +// default: +// break; +// } + +// // if (Backend::global_backends.size() == 1) { +// // for (auto input_tensor : input_ptrs) { +// // auto it = activation_tensors_num.find(input_tensor->name()); +// // if (it != activation_tensors_num.end()) { +// // switch (Tensor::tensor_status) { +// // case TENSOR_STATIC_INIT: +// // it->second += 1; +// // break; +// // case TENSOR_STATIC_READY: +// // it->second -= 1; +// // break; +// // default: +// // break; +// // } +// // if (it->second == 0 && module_tensors[input_tensor->name()]->sequence() > 1 && module_tensors[input_tensor->name()]->ttype() != GRAPH_OUTPUT) { +// // activation_tensors[input_tensor->name()]->free(); +// // } +// // } +// // } +// // } + +// #ifdef DEBUGOPTIME +// if (Tensor::tensor_status == TENSOR_STATIC_READY) { +// auto end_t = mllm_time_us(); +// std::cout << (out_names.empty() ? "" : out_names[0]) << " | " +// << Tensor::tensor_status << " time: " +// << (end_t - start_t) / 1000.0F << "ms" << std::endl; +// } +// #endif + +// #ifdef DEBUGSAVETENSOR +// for (auto &out_name : out_names) { +// activation_tensors[out_name]->saveNData(); +// } +// #endif + +// std::vector results; +// for (auto &out_tensor : output_ptrs) { +// results.emplace_back(*activation_tensors[out_tensor->name()]); +// } +// return results; +// } +std::string name_num_to_X(const std::string &input_string) { + std::regex pattern(R"(\.\d{1,3}\.)"); // Matches any number between 1 and 100 between two dots + std::string replacement = ".X."; // The string to replace the matched pattern with + std::string output_string = std::regex_replace(input_string, pattern, replacement); + return output_string; +} +std::string name_X_to_num(const std::string &input_string, int in_idx) { + std::regex pattern(".X."); // Matches any number between 1 and 100 between two dots + std::string replacement = "." + std::to_string(in_idx) + "."; // The string to replace the matched pattern with + std::string output_string = std::regex_replace(input_string, pattern, replacement); + return output_string; +} +void init_reset_KVCache(string input_name, Module *module, int saved_list_idx, map layername_2_tensorname, Backend *backend_) { + map> &activation_tensors = module->activation_tensors; + vector renameX_names; + renameX_names.push_back(input_name); + const vector suffixs = {"-view", ".split-0", ".split-1", ".split-2", "-cat", "-split-0-48"}; + vector new_names; + bool can_break = true; + auto in_x_name = renameX_names[0]; + while (can_break) { + can_break = false; + for (const auto &suffix : suffixs) { + if (in_x_name.rfind(suffix) == (in_x_name.size() - suffix.size())) { + const auto r_name = in_x_name.substr(0, in_x_name.size() - suffix.size()); + if (std::find(renameX_names.begin(), renameX_names.end(), r_name) == renameX_names.end() && std::find(new_names.begin(), new_names.end(), r_name) == new_names.end()) { + new_names.push_back(r_name); + in_x_name = r_name; + can_break = true; + } + break; + } + } + } + renameX_names.insert(renameX_names.end(), new_names.begin(), new_names.end()); + for (const auto x_name : renameX_names) { + auto name = name_X_to_num(x_name, saved_list_idx); + layername_2_tensorname[name] = name; + activation_tensors[name] = std::make_shared(backend_); + activation_tensors[name]->initFrom(*activation_tensors[x_name]); + activation_tensors[name]->setName(name); + activation_tensors[name]->setModule(module); + } +} + +std::vector XnnpackBackend::runLayer(Layer *layer, std::vector inputs, int N) { + Module *module = inputs.empty() ? Module::llm_model_ptr : inputs[0].module(); + map> &activation_tensors = module->activation_tensors; + auto &activation_tensors_num = module->activation_tensors_num; + // Module::runlistIdx = saved_list_idx; + bool do_init = false; + + if (module->doLoad || !layer->inited_loaded) { + // set backend to current module device and try to create op + // use Module::tmp_device only when creating the op as the recersive module backend only handled in load and init stage + layer->backend_ = Backend::global_backends[Module::tmp_device]; + do_init = !layer->inited_loaded; + // if (layer->op_ == nullptr) { + // layer->op_ = layer->backend_->opCreate(layer->param_, layer->name_); + // } + if (layer->param_["type"] == SUBGRAPHFINALIZE) { + for (auto &input : inputs) { + activation_tensors[input.name()]->setTtype(GRAPH_OUTPUT); + } + } + // if (module->doLoad) { + // layer->op_->load(*module->loader); + // layer->inited_loaded = true; + // } else if (layer->loaded_param) { + // layer->inited_loaded = layer->loaded_param; + // } else { + // if (!layer->inited_loaded) { + // // module->loader = new ParamLoader(""); + // // op_->load(*module->loader); + // auto empty_loader = new ParamLoader(""); + // layer->op_->load(*empty_loader); + // layer->inited_loaded = true; + // } + // } + vector layer_next_names = {}; + if (N > 1) { + for (int i = 0; i < N; ++i) { + layer_next_names.push_back("out-" + layer->op_->name() + "-" + std::to_string(i)); + } + } else { + layer_next_names = {"out-" + layer->op_->name()}; + } + for (const auto &layer_next_name : layer_next_names) { + string next_name; + if (Layer::use_layername_2_tensorname) { + if (Layer::layername_2_tensorname.find(layer_next_name) == Layer::layername_2_tensorname.end()) { + if (layer->param_["type"] == KVCACHE) { + Layer::layername_2_tensorname[layer_next_name] = layer_next_name; + init_reset_KVCache(inputs[0].name(), module, layer->saved_list_idx, Layer::layername_2_tensorname, layer->backend_); + } else { + Layer::layername_2_tensorname[layer_next_name] = name_num_to_X(layer_next_name); + } + } + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else if (layer_next_name.find("visual") != string::npos) { + // QNN VLM trick: visual model use act tensor sharing + if (Layer::layername_2_tensorname.find(layer_next_name) == Layer::layername_2_tensorname.end()) { + if (layer->param_["type"] == KVCACHE) { + Layer::layername_2_tensorname[layer_next_name] = layer_next_name; + init_reset_KVCache(inputs[0].name(), module, layer->saved_list_idx, Layer::layername_2_tensorname, layer->backend_); + } else { + Layer::layername_2_tensorname[layer_next_name] = name_num_to_X(layer_next_name); + } + } + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else { + next_name = layer_next_name; + } + if (activation_tensors.find(next_name) == activation_tensors.end()) { + activation_tensors[next_name] = std::make_shared(layer->backend_); + activation_tensors[next_name]->setName(next_name); + activation_tensors[next_name]->setModule(module); + activation_tensors_num[next_name] = 0; + } + } + if (module->doLoad) { + vector output_result = {}; + for (const auto &layer_next_name : layer_next_names) { + string next_name = Layer::use_layername_2_tensorname ? Layer::layername_2_tensorname[layer_next_name] : (layer_next_name.find("visual") != string::npos ? Layer::layername_2_tensorname[layer_next_name] : layer_next_name); + output_result.push_back(*activation_tensors[next_name]); + } + return output_result; + } + } + // input_tensors + vector> input_tensors; + for (auto &input : inputs) { + if (input.shouldInGraphs()) { + auto input_name = input.name(); + if (layer->param_["type"] == KVCACHE && do_init && Layer::use_layername_2_tensorname) { + input_name = name_X_to_num(input_name, layer->saved_list_idx); + } + input_tensors.push_back(activation_tensors[input_name]); + } else { + input_tensors.push_back(std::shared_ptr(&input, [](Tensor *) {})); + } + } + // output_tensors + vector layer_next_names = {}; + if (N > 1) { + for (int i = 0; i < N; ++i) { + layer_next_names.push_back("out-" + layer->op_->name() + "-" + std::to_string(i)); + } + } else { + layer_next_names = {"out-" + layer->op_->name()}; + } + vector> output_tensors = {}; + for (const auto &layer_next_name : layer_next_names) { + string next_name = Layer::use_layername_2_tensorname ? Layer::layername_2_tensorname[layer_next_name] : (layer_next_name.find("visual") != string::npos ? Layer::layername_2_tensorname[layer_next_name] : layer_next_name); + output_tensors.push_back(activation_tensors[next_name]); + } +#ifdef DEBUGOPTIME + auto start_t = mllm_time_us(); +#endif + switch (Tensor::tensor_status) { + case TENSOR_STATIC_INIT: { + if (!Module::isFirstChunk && layer->backend_->type() == MLLM_QNN) { + } else { + layer->op_->reshape(input_tensors, output_tensors); + layer->op_->setUp(input_tensors, output_tensors); + } + break; + } + case TENSOR_STATIC_READY: { + if (!Module::isFirstChunk && layer->backend_->type() == MLLM_QNN && layer->param_["type"] != SUBGRAPHSTART) { + } else { + layer->op_->execute(input_tensors, output_tensors); + } + break; + } + case TENSOR_STATIC_TRACE: { + if (layer->backend_->type() == BackendType::MLLM_CPU) { + Tracer::addOp(layer->op_, input_tensors, output_tensors); + } else if (layer->param_["type"] == SUBGRAPHSTART) { // begin of QNN graph + Tracer::addModule(input_tensors, {}, layer->op_->name()); + } + break; + } + default: { + break; + } + } +// if (Backend::global_backends.size() == 1) { +// for (auto input_tensor : input_tensors) { +// if ((activation_tensors_num.find(input_tensor->name()) != activation_tensors_num.end())) { +// switch (Tensor::tensor_status) { +// case TENSOR_STATIC_INIT: { +// activation_tensors_num[input_tensor->name()] += 1; +// break; +// } +// case TENSOR_STATIC_READY: { +// activation_tensors_num[input_tensor->name()] -= 1; +// break; +// } +// default: { +// } +// } +// if (activation_tensors_num[input_tensor->name()] == 0 && activation_tensors[input_tensor->name()]->sequence() > 1 +// && activation_tensors[input_tensor->name()]->ttype() != GRAPH_OUTPUT) { +// activation_tensors[input_tensor->name()]->free(); +// // std::cout << input_tensor->name() << "|" << std::endl; +// } +// } +// } +// } +#ifdef DEBUGOPTIME + if (Tensor::tensor_status == TENSOR_STATIC_READY) { + auto end_t = mllm_time_us(); + std::cout << layer->op_->name() << " | " << Tensor::tensor_status << " time: " << (end_t - start_t) / 1000.0F << "ms" << std::endl; + } +#endif + vector output_result = {}; + for (const auto &layer_next_name : layer_next_names) { + string next_name = Layer::use_layername_2_tensorname ? Layer::layername_2_tensorname[layer_next_name] : (layer_next_name.find("visual") != string::npos ? Layer::layername_2_tensorname[layer_next_name] : layer_next_name); +#ifdef DEBUGSAVETENSOR + activation_tensors[next_name]->saveNData(layer_next_name); +#endif + output_result.push_back(*activation_tensors[next_name]); + } + return output_result; +} + +std::vector XnnpackBackend::runOp(Op *op, std::vector inputs, std::vector out_names, bool in_place) { + Module *module = inputs.empty() ? Module::llm_model_ptr : inputs[0].module(); + map> &activation_tensors = module->activation_tensors; + auto &activation_tensors_num = module->activation_tensors_num; + // Module::runlistIdx = saved_list_idx; + bool do_init = false; + + if (module->doTrace) { + // set backend to current module device and try to create op + // use Module::tmp_device only when creating the op as the recersive module backend only handled in load and init stage + op->backend() = Backend::global_backends[Module::tmp_device]; + // do_init = !layer->inited_loaded; + // if (layer->op_ == nullptr) { + // layer->op_ = layer->backend_->opCreate(layer->param_, layer->name_); + // } + if (op->type() == SUBGRAPHFINALIZE) { + for (auto &input : inputs) { + activation_tensors[input.name()]->setTtype(GRAPH_OUTPUT); + } + } + // if (module->doLoad) { + // layer->op_->load(*module->loader); + // layer->inited_loaded = true; + // } else if (layer->loaded_param) { + // layer->inited_loaded = layer->loaded_param; + // } else { + // if (!layer->inited_loaded) { + // // module->loader = new ParamLoader(""); + // // op_->load(*module->loader); + // auto empty_loader = new ParamLoader(""); + // layer->op_->load(*empty_loader); + // layer->inited_loaded = true; + // } + // } + vector layer_next_names = {}; + if (N > 1) { + for (int i = 0; i < N; ++i) { + layer_next_names.push_back("out-" + op->name() + "-" + std::to_string(i)); + } + } else { + layer_next_names = {"out-" + op->name()}; + } + for (const auto &layer_next_name : layer_next_names) { + string next_name; + if (Layer::use_layername_2_tensorname) { + if (Layer::layername_2_tensorname.find(layer_next_name) == Layer::layername_2_tensorname.end()) { + if (layer->param_["type"] == KVCACHE) { + Layer::layername_2_tensorname[layer_next_name] = layer_next_name; + init_reset_KVCache(inputs[0].name(), module, layer->saved_list_idx, Layer::layername_2_tensorname, layer->backend_); + } else { + Layer::layername_2_tensorname[layer_next_name] = name_num_to_X(layer_next_name); + } + } + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else if (layer_next_name.find("visual") != string::npos) { + // QNN VLM trick: visual model use act tensor sharing + if (Layer::layername_2_tensorname.find(layer_next_name) == Layer::layername_2_tensorname.end()) { + if (layer->param_["type"] == KVCACHE) { + Layer::layername_2_tensorname[layer_next_name] = layer_next_name; + init_reset_KVCache(inputs[0].name(), module, layer->saved_list_idx, Layer::layername_2_tensorname, layer->backend_); + } else { + Layer::layername_2_tensorname[layer_next_name] = name_num_to_X(layer_next_name); + } + } + next_name = Layer::layername_2_tensorname[layer_next_name]; + } else { + next_name = layer_next_name; + } + if (activation_tensors.find(next_name) == activation_tensors.end()) { + activation_tensors[next_name] = std::make_shared(op->backend()); + activation_tensors[next_name]->setName(next_name); + activation_tensors[next_name]->setModule(module); + activation_tensors_num[next_name] = 0; + } + } + vector output_result = {}; + for (const auto &layer_next_name : layer_next_names) { + string next_name = Layer::use_layername_2_tensorname ? Layer::layername_2_tensorname[layer_next_name] : (layer_next_name.find("visual") != string::npos ? Layer::layername_2_tensorname[layer_next_name] : layer_next_name); + output_result.push_back(*activation_tensors[next_name]); + } + return output_result; + } + // input_tensors + vector> input_tensors; + for (auto &input : inputs) { + if (input.shouldInGraphs()) { + auto input_name = input.name(); + if (layer->param_["type"] == KVCACHE && do_init && Layer::use_layername_2_tensorname) { + input_name = name_X_to_num(input_name, layer->saved_list_idx); + } + input_tensors.push_back(activation_tensors[input_name]); + } else { + input_tensors.push_back(std::shared_ptr(&input, [](Tensor *) {})); + } + } + // output_tensors + vector layer_next_names = {}; + if (N > 1) { + for (int i = 0; i < N; ++i) { + layer_next_names.push_back("out-" + op->name() + "-" + std::to_string(i)); + } + } else { + layer_next_names = {"out-" + op->name()}; + } + vector> output_tensors = {}; + for (const auto &layer_next_name : layer_next_names) { + string next_name = Layer::use_layername_2_tensorname ? Layer::layername_2_tensorname[layer_next_name] : (layer_next_name.find("visual") != string::npos ? Layer::layername_2_tensorname[layer_next_name] : layer_next_name); + output_tensors.push_back(activation_tensors[next_name]); + } +#ifdef DEBUGOPTIME + auto start_t = mllm_time_us(); +#endif + switch (Tensor::tensor_status) { + case TENSOR_STATIC_INIT: { + op->reshape(input_tensors, output_tensors); + op->setUp(input_tensors, output_tensors); + break; + } + case TENSOR_STATIC_READY: { + op->execute(input_tensors, output_tensors); + break; + } + case TENSOR_STATIC_TRACE: { + if (op->backend()->type() == BackendType::MLLM_CPU) { + Tracer::addOp(op, input_tensors, output_tensors); + } else if (op->type() == SUBGRAPHSTART) { // begin of QNN graph + Tracer::addModule(input_tensors, {}, op->name()); + } + break; + } + default: { + break; + } + } + if (Backend::global_backends.size() == 1) { + for (auto input_tensor : input_tensors) { + if ((activation_tensors_num.find(input_tensor->name()) != activation_tensors_num.end())) { + switch (Tensor::tensor_status) { + case TENSOR_STATIC_INIT: { + activation_tensors_num[input_tensor->name()] += 1; + break; + } + case TENSOR_STATIC_READY: { + activation_tensors_num[input_tensor->name()] -= 1; + break; + } + default: { + } + } + if (activation_tensors_num[input_tensor->name()] == 0 && activation_tensors[input_tensor->name()]->sequence() > 1 + && activation_tensors[input_tensor->name()]->ttype() != GRAPH_OUTPUT) { + activation_tensors[input_tensor->name()]->free(); + // std::cout << input_tensor->name() << "|" << std::endl; + } + } + } + } +#ifdef DEBUGOPTIME + if (Tensor::tensor_status == TENSOR_STATIC_READY) { + auto end_t = mllm_time_us(); + std::cout << op->name() << " | " << Tensor::tensor_status << " time: " << (end_t - start_t) / 1000.0F << "ms" << std::endl; + } +#endif + vector output_result = {}; + for (const auto &layer_next_name : layer_next_names) { + string next_name = Layer::use_layername_2_tensorname ? Layer::layername_2_tensorname[layer_next_name] : (layer_next_name.find("visual") != string::npos ? Layer::layername_2_tensorname[layer_next_name] : layer_next_name); +#ifdef DEBUGSAVETENSOR + activation_tensors[next_name]->saveNData(layer_next_name); +#endif + output_result.push_back(*activation_tensors[next_name]); + } + return output_result; +} +std::vector XnnpackBackend::runForward(Module *module, std::vector inputs, std::vector args) { + // set static tmp_device to device_ to init layers' op + // auto previoud_device = Module::tmp_device; + // Module::tmp_device = module->device_; + // Module Loading + if (Module::llm_model_ptr && Module::llm_model_ptr->doLoad) { + auto outputs = module->Forward(inputs, args); + // for inner module, set output tensors to GRAPH_OUTPUT + // if (inputs[0].ttype() != TensorType::INPUT_TENSOR) { // XPUs' module should not be the outermost input tensor + // for (auto &output : outputs) { + // inputs[0].module()->activation_tensors[output.name()]->setTtype(GRAPH_OUTPUT); + // } + // } + // // set Module::tmp_device to previous device + // Module::tmp_device = previoud_device; + return outputs; + } + // if (false) { + // inputs[0].setTtype(TensorType::INPUT_TENSOR); + // } + // Module setUp & execute + if (inputs[0].ttype() == TensorType::INPUT_TENSOR) { + if (module->prefilling_token_size_ == 0) { // first time init + module->prefilling_token_size_ = inputs[0].sequence() * inputs[0].batch(); + } else if (module->decoding_token_size_ == 0) { + module->decoding_token_size_ = inputs[0].sequence() * inputs[0].batch(); + } + for (int i = 0; i < inputs.size(); i++) { + auto &input = inputs[i]; + input.setName("input" + std::to_string(i)); + input.setTtype(TensorType::NORMAL_TENSOR); + module->activation_tensors[input.name()] = std::shared_ptr(&input, [](Tensor *) {}); + module->activation_tensors[input.name()]->setName(input.name()); + module->activation_tensors[input.name()]->setModule(module); + } + Module::llm_model_ptr = module; + Tensor::tensor_status = TENSOR_STATIC_INIT; + + uint64_t time_start = mllm_time_us(); + module->Forward(inputs, args); + Tensor::tensor_status = TENSOR_STATIC_READY; // change to EAGER + + auto output = module->Forward(inputs, args); + uint64_t time_end = mllm_time_us(); + + double inference_time_ = (time_end - time_start) / 1000.0F; // ms + module->inference_times_.push_back(inference_time_); + + Module::llm_model_ptr->op_transposed_flag = true; + return output; + } else { // inner Modules + // offload according to the backends' info inited during loading + if (Tensor::tensor_status == TENSOR_STATIC_INIT && module->device_ != MLLM_CPU) { // backend specific module reshape & setup + if (Module::isMultiChunkPrefilling && !Module::isFirstChunk) { // set to TENSOR_UNDEFINED and SKIP executing qnn layers + Tensor::tensor_status = TENSOR_UNDEFINED; + auto outputs = module->Forward(inputs, args); + Tensor::tensor_status = TENSOR_STATIC_INIT; + return outputs; + } + auto inputs_vec = vector>(); + auto outputs_vec = vector>(); + for (auto &i : inputs) { + inputs_vec.push_back(inputs[0].module()->activation_tensors[i.name()]); + } + + Backend::global_backends[module->device_]->onSetUpStart(inputs_vec, outputs_vec, module->getUniqueName()); + + // for xnnpack currently + for (auto &i : inputs) { + i.uuid() = inputs[0].module()->activation_tensors[i.name()]->uuid(); + } + + auto outputs = module->Forward(inputs, args); + for (auto &output : outputs) { + outputs_vec.push_back(inputs[0].module()->activation_tensors[output.name()]); + } + Backend::global_backends[module->device_]->onSetUpEnd(inputs_vec, outputs_vec, module->getUniqueName()); + + // for xnnpack currently + for (auto &o : outputs) { + o.uuid() = outputs[0].module()->activation_tensors[o.name()]->uuid(); + } + + return outputs; + } else if (Tensor::tensor_status == TENSOR_STATIC_READY && module->device_ != MLLM_CPU) { // backend specific module execute + auto inputs_vec = vector>(); + auto outputs_vec = vector>(); + for (auto &i : inputs) { + inputs_vec.push_back(inputs[0].module()->activation_tensors[i.name()]); + } + + auto outputs = module->Forward(inputs, args); + + for (auto &output : outputs) { + outputs_vec.push_back(inputs[0].module()->activation_tensors[output.name()]); + } + Backend::global_backends[module->device_]->onExecuteStart(inputs_vec, outputs_vec, module->getUniqueName()); + + Backend::global_backends[module->device_]->onExecuteEnd(outputs_vec, module->getUniqueName()); + + // for xnnpack currently + for (auto &o : outputs) { + o.uuid() = outputs[0].module()->activation_tensors[o.name()]->uuid(); + o.forceResetHostPointer(outputs[0].module()->activation_tensors[o.name()]->rawHostPtr()); + } + + return outputs; + } else if (Tensor::tensor_status == TENSOR_STATIC_TRACE && module->device_ != MLLM_CPU) { + auto inputs_vec = vector>(); + auto outputs_vec = vector>(); + for (auto &i : inputs) { + inputs_vec.push_back(inputs[0].module()->activation_tensors[i.name()]); + } + + auto outputs = module->Forward(inputs, args); + + for (auto &output : outputs) { + outputs_vec.push_back(inputs[0].module()->activation_tensors[output.name()]); + } + Tracer::addModule(inputs_vec, outputs_vec, module->getUniqueName()); + return outputs; + } + return module->Forward(inputs, args); + } +} + +} // namespace mllm::xnnpack \ No newline at end of file diff --git a/src/backends/xnnpack/XnnpackBackend.hpp b/mllm/backends/xnnpack/XnnpackBackend.hpp similarity index 89% rename from src/backends/xnnpack/XnnpackBackend.hpp rename to mllm/backends/xnnpack/XnnpackBackend.hpp index 3f39d3297..14fa0eba5 100644 --- a/src/backends/xnnpack/XnnpackBackend.hpp +++ b/mllm/backends/xnnpack/XnnpackBackend.hpp @@ -18,7 +18,10 @@ #include "Types.hpp" #include "pthreadpool.h" #include "xnnpack.h" - +namespace mllm { +class Module; +class Layer; +} // namespace mllm namespace mllm::xnnpack { class XnnpackModelRuntime { @@ -169,6 +172,17 @@ class XnnpackBackend : public Backend { void onExecuteEnd(std::vector> &outputs, const string &graph_name) override; + std::vector runFunc( + std::vector out_names, + TensorFuncType type, + std::vector float_args, + std::vector input_tensors, + bool in_place) override; + std::vector runLayer(Layer *layer, std::vector inputs, int N) override; + + std::vector runOp(Op *op, std::vector input, std::vector out_names, bool in_place) override; + std::vector runForward(Module *module, std::vector inputs, std::vector args) override; + XnnpackCargo *getCurProcessingGraph(); static int xnn_threads; diff --git a/src/backends/xnnpack/XpInterface.cpp b/mllm/backends/xnnpack/XpInterface.cpp similarity index 100% rename from src/backends/xnnpack/XpInterface.cpp rename to mllm/backends/xnnpack/XpInterface.cpp diff --git a/src/backends/xnnpack/XpInterface.hpp b/mllm/backends/xnnpack/XpInterface.hpp similarity index 100% rename from src/backends/xnnpack/XpInterface.hpp rename to mllm/backends/xnnpack/XpInterface.hpp diff --git a/src/backends/xnnpack/XpMemoryManager.cpp b/mllm/backends/xnnpack/XpMemoryManager.cpp similarity index 100% rename from src/backends/xnnpack/XpMemoryManager.cpp rename to mllm/backends/xnnpack/XpMemoryManager.cpp diff --git a/src/backends/xnnpack/XpMemoryManager.hpp b/mllm/backends/xnnpack/XpMemoryManager.hpp similarity index 100% rename from src/backends/xnnpack/XpMemoryManager.hpp rename to mllm/backends/xnnpack/XpMemoryManager.hpp diff --git a/src/backends/xnnpack/XpWrapper.cpp b/mllm/backends/xnnpack/XpWrapper.cpp similarity index 100% rename from src/backends/xnnpack/XpWrapper.cpp rename to mllm/backends/xnnpack/XpWrapper.cpp diff --git a/src/backends/xnnpack/XpWrapper.hpp b/mllm/backends/xnnpack/XpWrapper.hpp similarity index 100% rename from src/backends/xnnpack/XpWrapper.hpp rename to mllm/backends/xnnpack/XpWrapper.hpp diff --git a/src/backends/xnnpack/third_party/XNNPACK b/mllm/backends/xnnpack/third_party/XNNPACK similarity index 100% rename from src/backends/xnnpack/third_party/XNNPACK rename to mllm/backends/xnnpack/third_party/XNNPACK diff --git a/src/memory/MemInspect.cpp b/mllm/memory/MemInspect.cpp similarity index 100% rename from src/memory/MemInspect.cpp rename to mllm/memory/MemInspect.cpp diff --git a/src/memory/MemInspect.hpp b/mllm/memory/MemInspect.hpp similarity index 100% rename from src/memory/MemInspect.hpp rename to mllm/memory/MemInspect.hpp diff --git a/src/memory/MemoryPoolManager.cpp b/mllm/memory/MemoryPoolManager.cpp similarity index 100% rename from src/memory/MemoryPoolManager.cpp rename to mllm/memory/MemoryPoolManager.cpp diff --git a/src/memory/MemoryPoolManager.hpp b/mllm/memory/MemoryPoolManager.hpp similarity index 75% rename from src/memory/MemoryPoolManager.hpp rename to mllm/memory/MemoryPoolManager.hpp index 17d3a25a6..e44871f58 100644 --- a/src/memory/MemoryPoolManager.hpp +++ b/mllm/memory/MemoryPoolManager.hpp @@ -7,15 +7,18 @@ #include #include #include +#include namespace mllm { // 高性能临时内存池,仅服务 activation 分配,模型权重/KV Cache 请使用系统分配 class MemoryPoolManager : public MemoryManager { private: + std::vector raw_blocks_allocated_; struct Header { - void *raw_ptr; // 新增原始指针 - size_t size; + void *raw_ptr; // 系统分配时的原始指针 + size_t size; // 用户请求的大小 + size_t padding; // 为对齐产生的填充大小 bool is_sys; }; @@ -50,18 +53,19 @@ class MemoryPoolManager : public MemoryManager { uintptr_t base = reinterpret_cast(raw) + sizeof(Header); uintptr_t aligned = (base + alignment - 1) & ~(alignment - 1); Header *hdr = reinterpret_cast
(aligned - sizeof(Header)); - hdr->raw_ptr = raw; // 记录原始指针 + hdr->raw_ptr = raw; hdr->size = size; hdr->is_sys = true; + hdr->padding = 0; *ptr = reinterpret_cast(aligned); } void sys_free(void *ptr) { if (!ptr) return; uintptr_t user_ptr = reinterpret_cast(ptr); - Header *hdr = reinterpret_cast
(user_ptr - sizeof(Header)); // 找到Header + Header *hdr = reinterpret_cast
(user_ptr - sizeof(Header)); #if defined(_WIN32) - _aligned_free(hdr->raw_ptr); // 释放原始指针 + _aligned_free(hdr->raw_ptr); #else std::free(hdr->raw_ptr); #endif @@ -77,6 +81,7 @@ class MemoryPoolManager : public MemoryManager { posix_memalign(&raw, alignment_, alloc_size); #endif assert(raw); + raw_blocks_allocated_.push_back(raw); auto *blk = new Block{reinterpret_cast(raw), alloc_size, nullptr, free_head_}; if (free_head_) free_head_->prev = blk; free_head_ = blk; @@ -107,14 +112,19 @@ class MemoryPoolManager : public MemoryManager { else free_head_ = blk; if (cur) cur->prev = blk; + // 向前合并 if (blk->prev && blk->prev->addr + blk->prev->size == blk->addr) { - blk->prev->size += blk->size; - blk->prev->next = blk->next; - if (blk->next) blk->next->prev = blk->prev; + Block *prev_block = blk->prev; // 在delete之前,安全地缓存 prev 指针 + prev_block->size += blk->size; + prev_block->next = blk->next; + if (blk->next) { + blk->next->prev = prev_block; + } delete blk; - blk = blk->prev; + blk = prev_block; // 使用缓存的、安全的指针进行赋值 } + // 向后合并 if (blk->next && blk->addr + blk->size == blk->next->addr) { blk->size += blk->next->size; @@ -130,51 +140,62 @@ class MemoryPoolManager : public MemoryManager { pool_size_(0), alignment_(align) { expand(init); } - ~MemoryPoolManager() override { std::lock_guard lg(mutex_); + for (auto *b = free_head_; b;) { auto *next = b->next; + delete b; + b = next; + } + free_head_ = nullptr; + + for (void *raw_block : raw_blocks_allocated_) { #if defined(_WIN32) - _aligned_free(reinterpret_cast(b->addr)); + _aligned_free(raw_block); #else - std::free(reinterpret_cast(b->addr)); + std::free(raw_block); #endif - delete b; // 释放 Block 对象 - b = next; } + raw_blocks_allocated_.clear(); } void alloc(void **ptr, size_t size, size_t alignment) override { assert(size > 0); std::lock_guard lg(mutex_); - size_t req = size + sizeof(Header); - // 大块走系统 - if (req > pool_size_ * LARGE_RATIO) { + + size_t req_total = size + sizeof(Header); + + if (req_total > pool_size_ * LARGE_RATIO) { sys_alloc(ptr, size, alignment); return; } - // 小块服务,需要空间时线性扩容 - if (total_free() < req || total_free() < pool_size_ * POOL_THRESHOLD) { - expand(req); + if (total_free() < req_total || total_free() < pool_size_ * POOL_THRESHOLD) { + expand(req_total); } - // 首适应分配 + for (auto *b = free_head_; b; b = b->next) { uintptr_t start = b->addr; uintptr_t base = start + sizeof(Header); uintptr_t aligned = (base + alignment - 1) & ~(alignment - 1); + size_t padding = aligned - start - sizeof(Header); - if (b->size >= padding + req) { + size_t total_consumed = req_total + padding; + + if (b->size >= total_consumed) { uintptr_t user = aligned; auto *hdr = reinterpret_cast
(user - sizeof(Header)); + + hdr->raw_ptr = nullptr; hdr->size = size; hdr->is_sys = false; + hdr->padding = padding; + *ptr = reinterpret_cast(user); - // 更新块 - uintptr_t next = user + size; - size_t remain = b->size - (padding + req); + + size_t remain = b->size - total_consumed; if (remain > sizeof(Header)) { - b->addr = next; + b->addr = start + total_consumed; b->size = remain; } else { if (b->prev) @@ -187,16 +208,19 @@ class MemoryPoolManager : public MemoryManager { return; } } - // 再回退系统 sys_alloc(ptr, size, alignment); } void free(void *ptr) override { if (!ptr) return; std::lock_guard lg(mutex_); + auto *hdr = hdr_of(ptr); + if (!hdr->is_sys) { - insert_block(reinterpret_cast(hdr), hdr->size + sizeof(Header)); + uintptr_t block_start = reinterpret_cast(ptr) - sizeof(Header) - hdr->padding; + size_t block_size = hdr->size + sizeof(Header) + hdr->padding; + insert_block(block_start, block_size); } else { sys_free(ptr); } @@ -205,4 +229,4 @@ class MemoryPoolManager : public MemoryManager { } // namespace mllm -#endif // MLLM_MEMORY_POOL_H +#endif // MLLM_MEMORY_POOL_H \ No newline at end of file diff --git a/src/memory/SystemMemoryManager.cpp b/mllm/memory/SystemMemoryManager.cpp similarity index 67% rename from src/memory/SystemMemoryManager.cpp rename to mllm/memory/SystemMemoryManager.cpp index 04929edb8..f2f94ac64 100644 --- a/src/memory/SystemMemoryManager.cpp +++ b/mllm/memory/SystemMemoryManager.cpp @@ -3,7 +3,13 @@ #include #include #include + +// macOS 特定的内存大小查询头文件 +#ifdef __APPLE__ +#include +#else #include +#endif namespace mllm { @@ -28,13 +34,18 @@ void SystemMemoryManager::alloc(void **ptr, size_t size, size_t alignment) { void SystemMemoryManager::free(void *ptr) { if (ptr != nullptr) { -#ifdef _WIN32 - if (_msize(((void **)ptr)[-1]) > 0) { - ::free(((void **)ptr)[-1]); + void *origin = ((void **)ptr)[-1]; // 取出原始指针 +#if defined(_WIN32) + if (_msize(origin) > 0) { + ::free(origin); } -#else - if (malloc_usable_size(((void **)ptr)[-1]) > 0) { - ::free(((void **)ptr)[-1]); +#elif defined(__APPLE__) + if (malloc_size(origin) > 0) { // macOS 平台使用 malloc_size + ::free(origin); + } +#else // Linux 和其他类 Unix 系统 + if (malloc_usable_size(origin) > 0) { + ::free(origin); } #endif } diff --git a/src/memory/SystemMemoryManager.hpp b/mllm/memory/SystemMemoryManager.hpp similarity index 100% rename from src/memory/SystemMemoryManager.hpp rename to mllm/memory/SystemMemoryManager.hpp diff --git a/src/models/bert/configuration_bert.hpp b/mllm/models/bert/configuration_bert.hpp similarity index 100% rename from src/models/bert/configuration_bert.hpp rename to mllm/models/bert/configuration_bert.hpp diff --git a/src/models/bert/modeling_bert.hpp b/mllm/models/bert/modeling_bert.hpp similarity index 95% rename from src/models/bert/modeling_bert.hpp rename to mllm/models/bert/modeling_bert.hpp index adb344372..02793acd9 100644 --- a/src/models/bert/modeling_bert.hpp +++ b/mllm/models/bert/modeling_bert.hpp @@ -39,7 +39,7 @@ class BertLayer : public Module { BertLayer() = default; BertLayer(const BertConfig &config, const string &base_name) { // base_name: encoder.layer.n. - attention = MultiHeadAttention(config.hidden_size, config.num_attention_heads, config.num_attention_heads, config.hidden_size / config.num_attention_heads, SPLIT_NONE, false, false, RoPEType::NONE, -1, -1, 0, false, true, config.names_config, base_name + config.names_config._attn_base_name); + attention = MultiHeadAttention(config.hidden_size, config.num_attention_heads, config.num_attention_heads, config.hidden_size / config.num_attention_heads, SPLIT_NONE, PostQkv_NONE, false, RoPEType::NONE, -1, -1, 0, false, true, true, config.attn_implementation, config.names_config, base_name + config.names_config._attn_base_name); feed_forward = FeedForward(config.hidden_size, config.intermediate_size, config.hidden_act, true, config.names_config, base_name); diff --git a/src/models/bert/tokenization_bert.hpp b/mllm/models/bert/tokenization_bert.hpp similarity index 100% rename from src/models/bert/tokenization_bert.hpp rename to mllm/models/bert/tokenization_bert.hpp diff --git a/src/models/clip/configuration_clip.hpp b/mllm/models/clip/configuration_clip.hpp similarity index 100% rename from src/models/clip/configuration_clip.hpp rename to mllm/models/clip/configuration_clip.hpp diff --git a/src/models/clip/modeling_clip.hpp b/mllm/models/clip/modeling_clip.hpp similarity index 85% rename from src/models/clip/modeling_clip.hpp rename to mllm/models/clip/modeling_clip.hpp index c2cb383b0..0feb8dd7e 100644 --- a/src/models/clip/modeling_clip.hpp +++ b/mllm/models/clip/modeling_clip.hpp @@ -23,7 +23,7 @@ class ClipVisionEmbedding final : public Module { position_ids = Parameter(1, std::ceil(img_hw / patch) * std::ceil(img_hw / patch) + 1, 1, 1, base_name + names._position_ids_name); position_embedding = Embedding(std::ceil(img_hw / patch) * std::ceil(img_hw / patch) + 1, hidden_dim, base_name + names._position_embeddings_name); } - vector Forward(vector inputs, vector args) override { + vector Forward(vector inputs, vector args) override { auto embd = patch_embedding(inputs[0]); embd = embd.transpose({{SEQUENCE, DIMENSION}, {HEAD, SEQUENCE}}); // BSHD->BDHS->BDSH embd = embd.flatten(HEAD, SEQUENCE); @@ -42,13 +42,14 @@ class CLipVisionModel final : public Module { public: CLipVisionModel() = default; CLipVisionModel(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, int patch, int img_hw, int block_num, + string attn_implementation, const ViTNameConfig &names, const string &base_name) { embedding = ClipVisionEmbedding(hidden_dim, patch, img_hw, names, base_name + names._embd_name); pre_layrnorm = LayerNorm(hidden_dim, true, 1e-6, base_name + names._vision_pre_layrnorm_name); - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, act_fn_type, names, base_name + names._layer_name); + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, act_fn_type, attn_implementation, names, base_name + names._layer_name); norm = LayerNorm(hidden_dim, true, 1e-6, base_name + names._post_norm_name); } - vector Forward(vector inputs, vector args) override { + vector Forward(vector inputs, vector args) override { auto x = embedding(inputs)[0]; x = pre_layrnorm(x); for (auto &block : blocks) { @@ -70,7 +71,7 @@ class ClipTextMLP final : public Module { up_proj = Linear(hidden_dim, ffn_hidden, true, base_name + names._up_proj_name); act = ACT_FN[act_fn_type](base_name + names._ffn_base_name + "act"); } - vector Forward(vector inputs, vector args) override { + vector Forward(vector inputs, vector args) override { auto x = up_proj(inputs[0]); x = act(x); return {x}; @@ -86,15 +87,18 @@ class ClipTextBlock final : public Module { public: ClipTextBlock() = default; - ClipTextBlock(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, const ClipTextNameConfig &names, const string &base_name) { - attention = MultiHeadAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, SPLIT_NONE, false, false, - RoPEType::NONE, -1,-1, 0, true, true, names, base_name + names._attn_base_name); + ClipTextBlock(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, + string attn_implementation, const ClipTextNameConfig &names, const string &base_name) { + attention = MultiHeadAttention(hidden_dim, head_size, head_size, + hidden_dim / head_size, SPLIT_NONE, PostQkv_NONE, false, + RoPEType::NONE, -1, -1, 0, true, true, true, attn_implementation, + names, base_name + names._attn_base_name); mlp = ClipTextMLP(hidden_dim, ffn_hidden, act_fn_type, names, base_name + names._ffn_base_name); down_proj = Linear(ffn_hidden, hidden_dim, true, base_name + names._down_proj_name); norm1 = LayerNorm(hidden_dim, true, 1e-6, base_name + names._attn_norm_name); norm2 = LayerNorm(hidden_dim, true, 1e-6, base_name + names._ffn_norm_name); } - vector Forward(vector inputs, vector args) override { + vector Forward(vector inputs, vector args) override { auto x = norm1(inputs[0]); x = attention({x, x, x})[0]; auto tmp = x + inputs[0]; @@ -118,7 +122,7 @@ class ClipTextEmbedding final : public Module { position_ids = Parameter(1, max_position_embeddings, 1, 1, base_name + names._position_ids_name); position_embedding = Embedding(max_position_embeddings, hidden_dim, base_name + names._position_embeddings_name); } - vector Forward(vector inputs, vector args) override { + vector Forward(vector inputs, vector args) override { auto embd = token_embedding(inputs[0]); auto pos_embd = position_ids().clip({}, {}, {0, embd.sequence()}, {}); auto p_embd = position_embedding(pos_embd); @@ -134,14 +138,17 @@ class CLipTextModel final : public Module { public: CLipTextModel() = default; - CLipTextModel(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, int max_position_embeddings, int vocab_size, int block_num, + CLipTextModel(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, + int max_position_embeddings, int vocab_size, int block_num, + string attn_implementation, const ClipTextNameConfig &names, const string &base_name) { embedding = ClipTextEmbedding(vocab_size, hidden_dim, max_position_embeddings, names, base_name + names._embd_name); - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, act_fn_type, names, base_name + names._layer_name); + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, act_fn_type, + attn_implementation, names, base_name + names._layer_name); norm = LayerNorm(hidden_dim, true, 1e-6, base_name + names._post_norm_name); } - vector Forward(vector inputs, vector args) override { + vector Forward(vector inputs, vector args) override { auto x = embedding(inputs)[0]; for (auto &block : blocks) { x = block({x})[0]; @@ -164,22 +171,27 @@ class CLipModel final : public Module { config.hidden_dim, config.head_size, config.ffn_hidden, config.act_fn_type, config.max_position_embeddings, config.text_vocab_size, config.text_block_num, config.patch, config.img_hw, config.block_num, + config.attn_implementation, config.text_names_config, "text_model", config.names_config, "vision_model"){}; CLipModel(int text_hidden_dim, int text_head_size, int text_ffn_hidden, int vision_hidden_dim, int vision_head_size, int vision_ffn_hidden, const string &act_fn_type, int max_position_embeddings, int vocab_size, int text_block_num, int patch, int img_hw, int vision_block_num, + string attn_implementation, const ClipTextNameConfig &text_names, const string &text_base_name, const ViTNameConfig &vit_names, const string &vision_base_name) { - text_model = CLipTextModel(text_hidden_dim, text_head_size, text_ffn_hidden, act_fn_type, max_position_embeddings, vocab_size, text_block_num, + text_model = CLipTextModel(text_hidden_dim, text_head_size, text_ffn_hidden, + act_fn_type, max_position_embeddings, + vocab_size, text_block_num, + attn_implementation, text_names, text_base_name); text_projection = Linear(text_hidden_dim, text_hidden_dim, false, "text_projection"); vision_model = CLipVisionModel(vision_hidden_dim, vision_head_size, vision_ffn_hidden, act_fn_type, patch, img_hw, vision_block_num, - vit_names, vision_base_name); + attn_implementation, vit_names, vision_base_name); visual_projection = Linear(vision_hidden_dim, text_hidden_dim, false, "visual_projection"); } - vector Forward(vector inputs, vector args) override { + vector Forward(vector inputs, vector args) override { auto text = text_model({inputs[0]})[0]; text = text_projection(text); text = text / text.norm(2); diff --git a/src/models/clip/processing_clip.hpp b/mllm/models/clip/processing_clip.hpp similarity index 99% rename from src/models/clip/processing_clip.hpp rename to mllm/models/clip/processing_clip.hpp index 690f80f65..c738e94d5 100644 --- a/src/models/clip/processing_clip.hpp +++ b/mllm/models/clip/processing_clip.hpp @@ -29,7 +29,7 @@ class ClipProcessor : public PreProcessor { int channel = img.size(); int height = img[0].size(); int width = img[0][0].size(); - Tensor tensor1(1, height, channel, width, Backend::global_backends[type], true); + Tensor tensor1(1, height, channel, width, Backend::global_backends[type].get(), true); tensor1.setName(std::move(name)); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); diff --git a/src/models/dclm/configuration_dclm.hpp b/mllm/models/dclm/configuration_dclm.hpp similarity index 100% rename from src/models/dclm/configuration_dclm.hpp rename to mllm/models/dclm/configuration_dclm.hpp diff --git a/src/models/dclm/modeling_dclm.hpp b/mllm/models/dclm/modeling_dclm.hpp similarity index 85% rename from src/models/dclm/modeling_dclm.hpp rename to mllm/models/dclm/modeling_dclm.hpp index 8aa7cca4a..efd7a90f4 100644 --- a/src/models/dclm/modeling_dclm.hpp +++ b/mllm/models/dclm/modeling_dclm.hpp @@ -55,31 +55,33 @@ class DCLMAttention final : public Module { KVCache v_cache; Softmax softmax; - int attn_hidden_dim_; + int hidden_dim_; int head_dim_; int n_heads_; + string attn_implementation_; public: DCLMAttention() = default; DCLMAttention(const DCLMConfig &cfg, const std::string &base_name) { int head_dim = cfg.dim / cfg.n_heads; - attn_hidden_dim_ = cfg.n_heads * head_dim; + hidden_dim_ = cfg.n_heads * head_dim; head_dim_ = head_dim; n_heads_ = cfg.n_heads; + attn_implementation_ = cfg.attn_implementation; in_proj = Linear(cfg.dim, 3 * cfg.n_heads * head_dim, false, base_name + "in_proj"); out_proj = Linear(cfg.n_heads * head_dim, cfg.dim, false, base_name + "out_proj"); q_norm = LayerNorm(cfg.n_heads * head_dim, false, cfg.norm_eps, base_name + "q_norm"); k_norm = LayerNorm(cfg.n_heads * head_dim, false, cfg.norm_eps, base_name + "k_norm"); q_rope = RoPE(cfg.RoPE_type, 10000, cfg.seq_len, base_name + "q_rope"); k_rope = RoPE(cfg.RoPE_type, 10000, cfg.seq_len, base_name + "k_rope"); - k_cache = KVCache(cfg.n_heads, head_dim, 1, cfg.cache_limit, base_name + "k_cache"); - v_cache = KVCache(cfg.n_heads, head_dim, 1, cfg.cache_limit, base_name + "v_cache"); + k_cache = KVCache(cfg.n_heads, head_dim, 1, cfg.cache_limit, cfg.attn_implementation, base_name + "k_cache"); + v_cache = KVCache(cfg.n_heads, head_dim, 1, cfg.cache_limit, cfg.attn_implementation, base_name + "v_cache"); softmax = Softmax(DIMENSION, true, base_name + "softmax"); } std::vector Forward(std::vector inputs, std::vector args) override { auto qkv = in_proj(inputs[0]); - auto qkv_sp = qkv.split({attn_hidden_dim_, attn_hidden_dim_, attn_hidden_dim_}, DIMENSION); + auto qkv_sp = qkv.split({hidden_dim_, hidden_dim_, hidden_dim_}, DIMENSION); Tensor q, k, v; q = qkv_sp[0]; @@ -98,13 +100,19 @@ class DCLMAttention final : public Module { k = k_cache(k); v = v_cache(v); - k = k.transpose(SEQUENCE, DIMENSION); - auto qk = Tensor::mm(q, k); - qk = qk / std::sqrt(head_dim_); - - qk = softmax(qk, k_cache.getCacheSeqLen()); - - auto o = Tensor::mm(qk, v); + Tensor o; + if (attn_implementation_ == "flash_attention_2") { + o = Tensor::flash_attention2_forward(q, k, v, true); + } else if (attn_implementation_ == "sage_attention") { + o = Tensor::sage_attention_forward(q, k, v, true); + } else { // eager implementation + k = k.transpose(SEQUENCE, DIMENSION); + auto qk = Tensor::mm(q, k); + qk = qk / std::sqrt(head_dim_); + + qk = softmax(qk, k_cache.getCacheSeqLen()); + o = Tensor::mm(qk, v); + } o = o.view(-1, 1, -1, n_heads_ * head_dim_); o = out_proj(o); return {o}; diff --git a/src/models/dclm/tokenization_dclm.hpp b/mllm/models/dclm/tokenization_dclm.hpp similarity index 100% rename from src/models/dclm/tokenization_dclm.hpp rename to mllm/models/dclm/tokenization_dclm.hpp diff --git a/src/models/ds_qwen2/tokenization_ds_qwen2.hpp b/mllm/models/ds_qwen2/tokenization_ds_qwen2.hpp similarity index 100% rename from src/models/ds_qwen2/tokenization_ds_qwen2.hpp rename to mllm/models/ds_qwen2/tokenization_ds_qwen2.hpp diff --git a/src/models/fuyu/configuration_fuyu.hpp b/mllm/models/fuyu/configuration_fuyu.hpp similarity index 93% rename from src/models/fuyu/configuration_fuyu.hpp rename to mllm/models/fuyu/configuration_fuyu.hpp index e240d2c4e..95be8228e 100644 --- a/src/models/fuyu/configuration_fuyu.hpp +++ b/mllm/models/fuyu/configuration_fuyu.hpp @@ -55,13 +55,14 @@ class FuyuConfig { block_num = 36; patch_size = 30; chl_size = 3; - max_position_embeddings= 16384; + max_position_embeddings = 16384; rope_theta = 25000; } else { throw std::runtime_error("Unsupported model size"); } cache_limit = token_limit; } + string attn_implementation = "flash_attention_2"; // Options: "flash_attention_2", "eager" }; #endif // CONFIG_FUYU_HPP diff --git a/src/models/fuyu/modeling_fuyu.hpp b/mllm/models/fuyu/modeling_fuyu.hpp similarity index 79% rename from src/models/fuyu/modeling_fuyu.hpp rename to mllm/models/fuyu/modeling_fuyu.hpp index e459fcd95..ec36fc324 100644 --- a/src/models/fuyu/modeling_fuyu.hpp +++ b/mllm/models/fuyu/modeling_fuyu.hpp @@ -8,6 +8,7 @@ #include "Backend.hpp" #include "Layer.hpp" #include "Module.hpp" +#include "Types.hpp" #include "configuration_fuyu.hpp" #include @@ -22,9 +23,15 @@ class PersimmonBlock final : public Module { public: PersimmonBlock() = default; - PersimmonBlock(int hidden_dim, int head_size, int ffn_hidden, float rope_theta, int max_position_embeddings, int cache_limit, const FuyuNameConfig &names, const string &base_name) { - attention = MultiHeadAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, SPLIT_D_HD, true, false, - PERSIMMONROPE, rope_theta, max_position_embeddings, cache_limit, true, true, names, base_name + names._attn_base_name); + PersimmonBlock(int hidden_dim, int head_size, int ffn_hidden, float rope_theta, int max_position_embeddings, int cache_limit, + string attn_implementation, + const FuyuNameConfig &names, const string &base_name) { + attention = MultiHeadAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, + SPLIT_D_HD, PostQkv_LayerNorm, false, + PERSIMMONROPE, rope_theta, max_position_embeddings, cache_limit, + true, true, true, + attn_implementation, + names, base_name + names._attn_base_name); mlp = FeedForward(hidden_dim, ffn_hidden, "ReLU2", true, names, base_name + names._ffn_base_name); norm1 = LayerNorm(hidden_dim, true, 1e-6, base_name + names._attn_norm_name); @@ -52,8 +59,14 @@ class Persimmon final : public Module { public: Persimmon() = default; - Persimmon(int hidden_dim, int head_size, int ffn_hidden, float rope_theta, int max_position_embeddings, int cache_limit, int block_num, int vocab_size, const FuyuNameConfig &names) { - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, rope_theta, max_position_embeddings, cache_limit, names, names.blk_name); + Persimmon(int hidden_dim, int head_size, int ffn_hidden, float rope_theta, int max_position_embeddings, + int cache_limit, int block_num, int vocab_size, + string attn_implementation, + const FuyuNameConfig &names) { + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, + rope_theta, max_position_embeddings, cache_limit, + attn_implementation, + names, names.blk_name); norm = LayerNorm(hidden_dim, true, 1e-6, names.post_norm_name); lm_head = Linear(hidden_dim, vocab_size, false, names.lm_head_name); } @@ -89,15 +102,17 @@ class FuyuModel final : public Module { FuyuModel(config.vocab_size, config.hidden_dim, config.head_size, config.ffn_hidden, config.block_num, config.rope_theta, config.max_position_embeddings, config.cache_limit, config.patch_size, config.chl_size, + config.attn_implementation, config.name_config) { } FuyuModel(int vocab_size, int hidden_dim, int head_size, int ffn_hidden, int block_num, float rope_theta, int max_position_embeddings, int cache_limit, int patch_size, int chl_size, + string attn_implementation, const FuyuNameConfig &names) { embed_tokens = Embedding(vocab_size, hidden_dim, names.token_embd_name); vision_embed_tokens = Linear(patch_size * patch_size * chl_size, hidden_dim, true, names.vision_embed_tokens_name); - persimmon = Persimmon(hidden_dim, head_size, ffn_hidden, rope_theta, max_position_embeddings, cache_limit, block_num, vocab_size, names); + persimmon = Persimmon(hidden_dim, head_size, ffn_hidden, rope_theta, max_position_embeddings, cache_limit, block_num, vocab_size, attn_implementation, names); } vector Forward(vector inputs, vector args) override { auto input_ids = embed_tokens(inputs[0]); diff --git a/src/models/fuyu/processing_fuyu.hpp b/mllm/models/fuyu/processing_fuyu.hpp similarity index 99% rename from src/models/fuyu/processing_fuyu.hpp rename to mllm/models/fuyu/processing_fuyu.hpp index eebc51a42..0b618ddf5 100644 --- a/src/models/fuyu/processing_fuyu.hpp +++ b/mllm/models/fuyu/processing_fuyu.hpp @@ -188,7 +188,7 @@ class FuyuProcessor final : public PreProcessor { seq = image_patches[0].size(); dims = image_patches[0][0].size(); } - Tensor tensor1(batch, 1, seq, dims, Backend::global_backends[type], true); + Tensor tensor1(batch, 1, seq, dims, Backend::global_backends[type].get(), true); tensor1.setName(name); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); @@ -209,7 +209,7 @@ class FuyuProcessor final : public PreProcessor { batch = image_patches_indices.size(); seq = image_patches_indices[0].size(); } - Tensor tensor1(batch, 1, seq, 1, Backend::global_backends[type], true); + Tensor tensor1(batch, 1, seq, 1, Backend::global_backends[type].get(), true); tensor1.setName(name); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); diff --git a/src/models/gemma/README.md b/mllm/models/gemma/README.md similarity index 100% rename from src/models/gemma/README.md rename to mllm/models/gemma/README.md diff --git a/src/models/gemma/configuration_gemma.hpp b/mllm/models/gemma/configuration_gemma.hpp similarity index 100% rename from src/models/gemma/configuration_gemma.hpp rename to mllm/models/gemma/configuration_gemma.hpp diff --git a/src/models/gemma/modeling_gemma.hpp b/mllm/models/gemma/modeling_gemma.hpp similarity index 91% rename from src/models/gemma/modeling_gemma.hpp rename to mllm/models/gemma/modeling_gemma.hpp index 08ed1788f..88d23cecd 100644 --- a/src/models/gemma/modeling_gemma.hpp +++ b/mllm/models/gemma/modeling_gemma.hpp @@ -53,9 +53,14 @@ class GemmaDecoder final : public Module { public: GemmaDecoder() = default; GemmaDecoder(const GemmaConfig &config, const GemmaNameConfig &names, const string &base_name) { - self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, config.num_key_value_heads, - config.hidden_size / config.num_attention_heads, SPLIT_NONE, false, false, - config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, true, false, names, base_name + names._attn_base_name); + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_NONE, PostQkv_NONE, false, + config.RoPE_type, config.rope_theta, config.max_position_embeddings, + config.cache_limit, true, false, false, + config.attn_implementation, + names, base_name + names._attn_base_name); mlp = GemmaMLP(config.hidden_size, config.intermediate_size, names, base_name + names._ffn_base_name); input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, true, base_name + names._attn_norm_name); post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, true, base_name + names._ffn_norm_name); diff --git a/src/models/gemma/tokenization_gemma.hpp b/mllm/models/gemma/tokenization_gemma.hpp similarity index 100% rename from src/models/gemma/tokenization_gemma.hpp rename to mllm/models/gemma/tokenization_gemma.hpp diff --git a/src/models/gemma2/configuration_gemma2.hpp b/mllm/models/gemma2/configuration_gemma2.hpp similarity index 100% rename from src/models/gemma2/configuration_gemma2.hpp rename to mllm/models/gemma2/configuration_gemma2.hpp diff --git a/src/models/gemma2/modeling_gemma2.hpp b/mllm/models/gemma2/modeling_gemma2.hpp similarity index 89% rename from src/models/gemma2/modeling_gemma2.hpp rename to mllm/models/gemma2/modeling_gemma2.hpp index 5fb9984db..933bdb8bf 100644 --- a/src/models/gemma2/modeling_gemma2.hpp +++ b/mllm/models/gemma2/modeling_gemma2.hpp @@ -20,7 +20,7 @@ class Gemma2Attention final : public Module { head_dim = 2048 / num_heads; num_key_value_heads = config.num_key_value_heads; num_key_value_groups = num_heads / num_key_value_heads; - + attn_impl = config.attn_implementation; // init layers q_proj = Linear(hidden_size, head_dim * num_heads, false, base_name + names._q_proj_name); k_proj = Linear(hidden_size, head_dim * num_key_value_heads, false, @@ -32,8 +32,8 @@ class Gemma2Attention final : public Module { base_name + "q_rope"); k_rope = RoPE(config.RoPE_type, config.rope_theta, config.max_position_embeddings, base_name + "k_rope"); - k_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "k_cache"); - v_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "v_cache"); + k_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, config.attn_implementation, base_name + "k_cache"); + v_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, config.attn_implementation, base_name + "v_cache"); softmax = Softmax(DIMENSION, true, base_name + "softmax"); } @@ -56,15 +56,19 @@ class Gemma2Attention final : public Module { key_states = k_cache(key_states); value_states = v_cache(value_states); - // attention weight - auto atten_weight = - Tensor::mm(query_states, key_states.transpose(Chl::SEQUENCE, Chl::DIMENSION)) - / std::sqrt(head_dim); - - atten_weight = softmax(atten_weight, k_cache.getCacheSeqLen()); - - // attention output - auto atten_output = Tensor::mm(atten_weight, value_states); + Tensor atten_output; + if (attn_impl == "flash_attention_2") { + atten_output = Tensor::flash_attention2_forward(query_states, key_states, value_states, true); + } else if (attn_impl == "sage_attention") { + atten_output = Tensor::sage_attention_forward(query_states, key_states, value_states, true); + } else { // eager implementation + // attention weight + auto atten_weight = + Tensor::mm(query_states, key_states.transpose(Chl::SEQUENCE, Chl::DIMENSION)) + / std::sqrt(head_dim); + atten_weight = softmax(atten_weight, k_cache.getCacheSeqLen()); + atten_output = Tensor::mm(atten_weight, value_states); + } atten_output = atten_output.view(-1, 1, -1, head_dim * num_heads); atten_output = o_proj(atten_output); return {atten_output}; @@ -93,6 +97,7 @@ class Gemma2Attention final : public Module { KVCache k_cache; KVCache v_cache; Softmax softmax; + string attn_impl; }; class Gemma2MLP final : public Module { diff --git a/src/models/imagebind/configuration_imagebind.hpp b/mllm/models/imagebind/configuration_imagebind.hpp similarity index 100% rename from src/models/imagebind/configuration_imagebind.hpp rename to mllm/models/imagebind/configuration_imagebind.hpp diff --git a/src/models/imagebind/modeling_imagebind.hpp b/mllm/models/imagebind/modeling_imagebind.hpp similarity index 87% rename from src/models/imagebind/modeling_imagebind.hpp rename to mllm/models/imagebind/modeling_imagebind.hpp index 2fe620851..4f27e764b 100644 --- a/src/models/imagebind/modeling_imagebind.hpp +++ b/mllm/models/imagebind/modeling_imagebind.hpp @@ -7,6 +7,7 @@ #include "Layer.hpp" #include "Module.hpp" +#include "Types.hpp" #include "configuration_imagebind.hpp" #include "models/transformer/modeling_transformer.hpp" @@ -20,7 +21,9 @@ class EncoderBlock final : public Module { public: EncoderBlock() = default; - EncoderBlock(int hidden_dim, int head_size, int ffn_hidden, const string &model_type, const ImagebindNameConfig &names, const string &base_name) { + EncoderBlock(int hidden_dim, int head_size, int ffn_hidden, const string &model_type, + string attn_implementation, + const ImagebindNameConfig &names, const string &base_name) { bool do_mask = false; bool bias_kv_cat = false; if (model_type == "text") { @@ -29,8 +32,9 @@ class EncoderBlock final : public Module { bias_kv_cat = true; } attention = MultiHeadAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, - SPLIT_HD, false, bias_kv_cat, - RoPEType::NONE, -1, -1, 0, do_mask, true, + SPLIT_HD, PostQkv_NONE, bias_kv_cat, + RoPEType::NONE, -1, -1, 0, do_mask, true, true, + attn_implementation, names, base_name + names._attn_base_name); ffn = FeedForward(hidden_dim, ffn_hidden, "GELU", true, names, base_name + names._ffn_base_name); @@ -83,13 +87,16 @@ class ImagebindVisionModel final : public Module { ImagebindVisionModel(const ImagebindConfig &config) : ImagebindVisionModel(config.vision_hidden_dim, config.vision_head_size, config.vision_ffn_hidden, config.head_hidden_dim, config.patch, config.patch_time, config.img_hw, config.vision_block_num, + config.attn_implementation, config.names_config){}; ImagebindVisionModel(int hidden_dim, int head_size, int ffn_hidden, int head_hidden_dim, int patch, int patch_time, int img_hw, int block_num, + string attn_implementation, const ImagebindNameConfig &names) { embedding = ImagebindVisionEmbedding(hidden_dim, patch, patch_time, img_hw, names, names._vision_embd_name); pre_transformer_layer = LayerNorm(hidden_dim, true, 1e-6, names.vision_pre_transformer_layer_name); - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, "vision", names, names._vision_blocks_name); + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, "vision", + attn_implementation, names, names._vision_blocks_name); norm = LayerNorm(hidden_dim, true, 1e-6, names.vision_post_norm_name); head = Linear(hidden_dim, head_hidden_dim, false, names.vision_head_name); } @@ -133,14 +140,17 @@ class ImagebindTextModel final : public Module { public: ImagebindTextModel() = default; ImagebindTextModel(const ImagebindConfig &config) : - ImagebindTextModel(config.text_hidden_dim, config.text_head_size, config.text_ffn_hidden, config.head_hidden_dim, + ImagebindTextModel(config.text_hidden_dim, config.text_head_size, + config.text_ffn_hidden, config.head_hidden_dim, config.vocab_size, config.max_position_embeddings, config.text_block_num, + config.attn_implementation, config.names_config){}; ImagebindTextModel(int hidden_dim, int head_size, int ffn_hidden, int head_hidden_dim, int vocab_size, int max_position_embeddings, int block_num, + string attn_implementation, const ImagebindNameConfig &names) { embedding = ImagebindTextEmbedding(vocab_size, hidden_dim, max_position_embeddings, names, names._text_embd_name); - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, "text", names, names._text_blocks_name); + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, "text", attn_implementation, names, names._text_blocks_name); norm = LayerNorm(hidden_dim, true, 1e-6, names.text_post_norm_name); head = Linear(hidden_dim, head_hidden_dim, false, names.text_head_name); } @@ -197,12 +207,15 @@ class ImagebindAudioModel final : public Module { ImagebindAudioModel(config.audio_hidden_dim, config.audio_head_size, config.audio_ffn_hidden, config.head_hidden_dim, config.audio_kernal, config.audio_stride, config.audio_h, config.audio_w, config.audio_block_num, + config.attn_implementation, config.names_config){}; ImagebindAudioModel(int hidden_dim, int head_size, int ffn_hidden, int head_hidden_dim, int patch, int stride, int img_h, int img_w, int block_num, + string attn_implementation, const ImagebindNameConfig &names) { embedding = ImagebindAudioEmbedding(hidden_dim, patch, stride, img_h, img_w, names, names._audio_embd_name); - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, "audio", names, names._audio_blocks_name); + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, "audio", + attn_implementation, names, names._audio_blocks_name); norm = LayerNorm(hidden_dim, true, 1e-6, names.audio_post_norm_name); head = Linear(hidden_dim, head_hidden_dim, false, names.audio_head_name); } @@ -235,18 +248,24 @@ class ImagebindModel final : public Module { config.text_hidden_dim, config.text_head_size, config.text_ffn_hidden, config.vocab_size, config.max_position_embeddings, config.text_block_num, config.audio_hidden_dim, config.audio_head_size, config.audio_ffn_hidden, config.audio_kernal, config.audio_stride, config.audio_h, config.audio_w, config.audio_block_num, config.head_hidden_dim, + config.attn_implementation, config.names_config){}; - ImagebindModel(int vision_hidden_dim, int vision_head_size, int vision_ffn_hidden, int patch, int patch_time, int img_hw, int vision_block_num, + ImagebindModel(int vision_hidden_dim, int vision_head_size, int vision_ffn_hidden, int patch, int patch_time, + int img_hw, int vision_block_num, int text_hidden_dim, int text_head_size, int text_ffn_hidden, int vocab_size, int max_position_embeddings, int text_block_num, int audio_hidden_dim, int audio_head_size, int audio_ffn_hidden, int audio_kernal, int audio_stride, int audio_h, int audio_w, int audio_block_num, int head_hidden_dim, + string attn_implementation, const ImagebindNameConfig &names) { - text_model = ImagebindTextModel(text_hidden_dim, text_head_size, text_ffn_hidden, head_hidden_dim, - vocab_size, max_position_embeddings, text_block_num, names); - vision_model = ImagebindVisionModel(vision_hidden_dim, vision_head_size, vision_ffn_hidden, head_hidden_dim, - patch, patch_time, img_hw, vision_block_num, names); - audio_model = ImagebindAudioModel(audio_hidden_dim, audio_head_size, audio_ffn_hidden, head_hidden_dim, - audio_kernal, audio_stride, audio_h, audio_w, audio_block_num, names); + text_model = ImagebindTextModel(text_hidden_dim, text_head_size, + text_ffn_hidden, head_hidden_dim, + vocab_size, max_position_embeddings, text_block_num, attn_implementation, names); + vision_model = ImagebindVisionModel(vision_hidden_dim, vision_head_size, + vision_ffn_hidden, head_hidden_dim, + patch, patch_time, img_hw, vision_block_num, attn_implementation, names); + audio_model = ImagebindAudioModel(audio_hidden_dim, audio_head_size, + audio_ffn_hidden, head_hidden_dim, + audio_kernal, audio_stride, audio_h, audio_w, audio_block_num, attn_implementation, names); softmax = Softmax(DIMENSION, "final.softmax1"); softmax2 = Softmax(DIMENSION, "final.softmax2"); } diff --git a/src/models/imagebind/processing_imagebind.hpp b/mllm/models/imagebind/processing_imagebind.hpp similarity index 97% rename from src/models/imagebind/processing_imagebind.hpp rename to mllm/models/imagebind/processing_imagebind.hpp index d2ba8bcc3..75b53e032 100644 --- a/src/models/imagebind/processing_imagebind.hpp +++ b/mllm/models/imagebind/processing_imagebind.hpp @@ -14,7 +14,7 @@ using namespace mllm; class ImagebindProcessor final : public ClipProcessor { static Tensor tokens2Input(vector> tokens, int max_pos, string name = "input", BackendType type = MLLM_CPU) { const auto bsize = static_cast(tokens.size()); - Tensor tensor1(bsize, 1, max_pos, 1, Backend::global_backends[type], true); + Tensor tensor1(bsize, 1, max_pos, 1, Backend::global_backends[type].get(), true); tensor1.setName(name); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); @@ -33,7 +33,7 @@ class ImagebindProcessor final : public ClipProcessor { int channel = imgs[0].size(); int height = imgs[0][0].size(); int width = imgs[0][0][0].size(); - Tensor tensor1(Backend::global_backends[type]); + Tensor tensor1(Backend::global_backends[type].get()); tensor1.reshape(imgs.size(), channel, 2, height, width); tensor1.setDtype(MLLM_TYPE_F32); tensor1.alloc(); @@ -65,7 +65,7 @@ class ImagebindProcessor final : public ClipProcessor { int height = audio_new[0].size(); int width = audio_new[0][0].size(); - Tensor tensor1(batch, height, channel, width, Backend::global_backends[type], true); + Tensor tensor1(batch, height, channel, width, Backend::global_backends[type].get(), true); tensor1.setName(std::move(name)); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); diff --git a/mllm/models/ling/configuration_bailing_moe.hpp b/mllm/models/ling/configuration_bailing_moe.hpp new file mode 100644 index 000000000..63dbf8903 --- /dev/null +++ b/mllm/models/ling/configuration_bailing_moe.hpp @@ -0,0 +1,67 @@ +#pragma once +#include "models/transformer/configuration_transformer.hpp" + +using namespace mllm; + +class BailingMoeNameConfig : public TransformerNameConfig { +public: + std::string blk_name; + std::string token_embd_name; + std::string post_norm_name; + std::string lm_head_name; + std::string _gate_proj_name; + + void init() { + blk_name = "model.layers."; + _attn_base_name = "attention."; + _ffn_base_name = "mlp."; + _qkv_proj_name = "query_key_value"; + _o_proj_name = "dense"; + _gate_proj_name = "gate_proj"; + _up_proj_name = "up_proj"; + _down_proj_name = "down_proj"; + _attn_norm_name = "input_layernorm"; + _ffn_norm_name = "post_attention_layernorm"; + token_embd_name = "model.word_embeddings"; + post_norm_name = "model.norm"; + lm_head_name = "lm_head"; + } +}; + +struct BailingMoeConfig : public TransformerConfig { + explicit BailingMoeConfig(int token_limit, string type = "A2.75B") : //"A1.3B" + cache_limit(token_limit) { + names_config.init(); + } + + int num_experts = 64; // 64 + int num_experts_per_tok = 6; // 6 + int num_shared_experts = 2; // 2 + bool norm_topk_prob = true; // true + bool use_cache = true; // true + bool use_bias = false; // false + bool use_qkv_bias = false; // false + bool tie_word_embeddings = false; // false + + float attention_dropout = 0.0; + int bos_token_id = 1; + int eos_token_id = 126081; // 126081 + std::string hidden_act = "silu"; + int hidden_size = 2048; // 2048 + float initializer_range = 0.006; // 0.006 + int intermediate_size = 1408; // 1408 + int moe_intermediate_size = 1408; // 1408 + int max_position_embeddings = 32768; // 32768 + std::string model_type = "ling_moe"; // "ling_moe" + int num_attention_heads = 16; // 16 + int num_hidden_layers = 28; // 28 + int num_key_value_heads = 4; // 4 + double rms_norm_eps = 1e-06; // 1e-06 + float rope_theta = 600000.0; // 600000 + int vocab_size = 126464; // 126464 + int head_dim = hidden_size / num_attention_heads; // 2048/16= 128 + + int cache_limit; + RoPEType RoPE_type = RoPEType::HFHUBROPE; + BailingMoeNameConfig names_config; +}; \ No newline at end of file diff --git a/mllm/models/ling/mbp/modeling_bailing_moe_mbp.hpp b/mllm/models/ling/mbp/modeling_bailing_moe_mbp.hpp new file mode 100644 index 000000000..414dc3d56 --- /dev/null +++ b/mllm/models/ling/mbp/modeling_bailing_moe_mbp.hpp @@ -0,0 +1,496 @@ +#pragma once +#include "Layer.hpp" +#include "Module.hpp" +#include "Tensor.hpp" +#include "Trace.hpp" +#include "Types.hpp" +#include "../configuration_bailing_moe.hpp" +#include "settings_bailing_moe_mbp.hpp" +#include "models/transformer/modeling_transformer.hpp" +#include +#include +#include +#include +#include + +#define MBP_THREAD + +using namespace mllm; + +class BailingMoeMLP final : public Module { +public: + BailingMoeMLP() = default; + BailingMoeMLP(int hidden_size, int intermediate_size, const BailingMoeNameConfig &names, const std::string &base_name) { + gate_proj = Linear(hidden_size, intermediate_size, false, base_name + names._gate_proj_name); + silu = SiLU(base_name + "act"); + up_proj = Linear(hidden_size, intermediate_size, false, base_name + names._up_proj_name); + down_proj = Linear(intermediate_size, hidden_size, false, base_name + names._down_proj_name); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto x = gate_proj(inputs[0]); + x = silu(x); + auto y = up_proj(inputs[0]); + x = x * y; + x = down_proj(x); + return {x}; + } + + void load() { + gate_proj.load(); + up_proj.load(); + down_proj.load(); + } + bool loaded() { + return gate_proj.loaded() && up_proj.loaded() && down_proj.loaded(); + } + void free() { + gate_proj.free(); + up_proj.free(); + down_proj.free(); + } + +private: + Layer gate_proj; + Layer up_proj; + Layer down_proj; + Layer silu; +}; + +class BailingMoeGate final : public Module { +public: + BailingMoeGate() = default; + BailingMoeGate(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const std::string &base_name) { + gate = Linear(config.hidden_size, config.num_experts, false, base_name + "gate"); + softmax = Softmax(DIMENSION, false, base_name + "softmax"); + num_experts_per_tok = config.num_experts_per_tok; + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto scores = softmax(gate(inputs[0])); + auto experts_w_i = Tensor::topk(scores, num_experts_per_tok, DIMENSION); + auto topk_weight = experts_w_i[0]; // 1, batch*seq, 1, k + auto topk_idx = experts_w_i[1]; // 1, batch*seq, 1, k + topk_idx = topk_idx.view(-1, 1, 1, -1); // 1, 1, 1, k* batch*seq + topk_weight = topk_weight / topk_weight.sum(DIMENSION); // 1, batch*seq, 1, k + return {scores, topk_weight, topk_idx}; + } + +private: + Layer gate; + Softmax softmax; + int num_experts_per_tok{}; +}; + +class BailingMoeSparseMoeBlock final : public Module { +public: + BailingMoeSparseMoeBlock() = default; + BailingMoeSparseMoeBlock(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + experts = List(config.num_experts, config.hidden_size, config.moe_intermediate_size, names, base_name + "experts."); + gate = BailingMoeGate(config, names, base_name); + num_experts_per_tok = config.num_experts_per_tok; + num_shared_experts = config.num_shared_experts; + if (num_shared_experts > 0) { + shared_experts = BailingMoeMLP(config.hidden_size, + config.moe_intermediate_size * config.num_shared_experts, + names, base_name + "shared_experts."); + } + num_hidden_layers = config.num_hidden_layers; + } + // receive embeds + std::vector Forward(std::vector inputs, std::vector args) override { + int layer_idx = std::any_cast(args[0]); + auto hidden_states = inputs[0]; + auto identity = hidden_states; + if (hidden_states.batch() > 1) { + hidden_states = hidden_states.view(1, -1, ANYDIM, -1); // 1, batch*seq, 1, hidden + } + auto gates_t = gate({hidden_states}); // 1, batch*seq, 1, num_experts + auto scores = gates_t[0]; // 1, batch*seq, 1, num_experts + auto topk_weight = gates_t[1]; // 1, batch*seq, + auto topk_idx = gates_t[2]; // 1, batch*seq, 1, k + hidden_states = moe_infer(hidden_states, topk_weight, topk_idx, layer_idx); // 1, batch*seq, 1, hidden + if (num_shared_experts) { + hidden_states = hidden_states + shared_experts({identity})[0]; // add shared experts + } + if (hidden_states.batch() > 1) { + // expert_cache.view(ANYDIM, seq, -1, -1);//TODO + } + return {hidden_states}; + } + Tensor moe_infer(Tensor hidden_states, + Tensor &topk_weight, + Tensor &topk_idx, + int layer_idx) { + auto dtype = topk_idx.dtype(); + auto device = topk_idx.device(); + topk_idx = topk_idx.fp32().cpu(); + auto idxs = topk_idx.argsort(); // 1, 1, 1, k* batch*seq + auto tokens_per_expert = topk_idx.bincount(); // (1, 1, 1, 0) 1, 1, 1, k + idxs = idxs.to(device).to(dtype); + auto token_idxs = idxs / num_experts_per_tok; // 1, 1, 1, k* batch*seq + int start_idx = 0; + int end_idx = start_idx; + auto expert_cache = Tensor::zero_like(hidden_states); // 1, batch*seq, 1, hidden + map exp_token_idx_list, exp_idx_list; + std::vector sorted_keys; // 根据 exp_token_idx_list[i].dimension() 对键值排序 + for (int i = 0; i < experts.size(); ++i) { + if (i >= tokens_per_expert.dimension()) break; // 全部专家计算完 + int this_token_num = tokens_per_expert.dimension() ? tokens_per_expert.d(0, 0, 0, i) : 0; + if (!this_token_num) continue; + end_idx = start_idx + this_token_num; + // + auto exp_token_idx = token_idxs.clip({}, {}, {}, {start_idx, end_idx}); //(1, 1, 1, 0) 1, 1, 1, e-s + auto exp_idx = idxs.clip({}, {}, {}, {start_idx, end_idx}); //(1, 1, 1, 0) 1, 1, 1, e-s + if (topk_weight.dimension() != 1) { topk_weight = topk_weight.view(-1, -1, 1, 1); } // 1, k* batch*seq, 1, 1 + exp_token_idx_list[i] = exp_token_idx; + sorted_keys.push_back(i); + exp_idx_list[i] = exp_idx; + start_idx = end_idx; + } + // std::sort(sorted_keys.begin(), sorted_keys.end(), [&](int a, int b) { + // return exp_token_idx_list[a].dimension() > exp_token_idx_list[b].dimension(); + // }); + if (!sorted_keys.empty()) { + int mv_i = 0; + if (std::find(sorted_keys.begin(), sorted_keys.end(), mv_i) != sorted_keys.end()) { + sorted_keys.erase(std::remove(sorted_keys.begin(), sorted_keys.end(), mv_i), sorted_keys.end()); + sorted_keys.insert(sorted_keys.begin(), mv_i); + } + + if (!experts[sorted_keys[0]].loaded()) { + double time_start = (mllm_time_us() - start_time) / 1000.0F; // ms + + experts[sorted_keys[0]].load(); + + string expert_name = std::to_string(layer_idx) + "_" + std::to_string(sorted_keys[0]); + double time_end = (mllm_time_us() - start_time) / 1000.0F; // ms + load_times[expert_name] = {time_start, time_end}; + // std::cout << "load: " << layer_idx << " " << sorted_keys[0] << std::endl; + } + + // std::cout << layer_idx << "_sorted_keys ["; + for (auto s : sorted_keys) { + // std::cout << s << " "; + } + // std::cout << "]" << std::endl; + } + for (int ii = 0; ii < sorted_keys.size(); ii++) { + int expert_id = sorted_keys[ii]; + if (exp_token_idx_list.find(expert_id) == exp_token_idx_list.end()) continue; // 退出 + if (Module::doLoad) continue; // 退出 + + // step.0 + if ((ii < sorted_keys.size() - 1 && exp_token_idx_list[sorted_keys[ii + 1]].dimension() > 0) + || (ii == sorted_keys.size() - 1 && layer_idx < num_hidden_layers - 1)) { +#ifdef MBP_THREAD + int q_layer_idx, q_expert_id; + if (ii == sorted_keys.size() - 1 && layer_idx < num_hidden_layers - 1) { + q_layer_idx = layer_idx + 1; + q_expert_id = 0; + } else { + q_layer_idx = layer_idx; + q_expert_id = sorted_keys[ii + 1]; + } + LoadRequest req{q_layer_idx, q_expert_id}; + { + lock_guard lk(queue_mutex); + load_requests.push(req); + // std::cout << "load_requests.push: " << q_layer_idx << " " << q_expert_id << std::endl; + } + queue_cv.notify_one(); // 通知加载线程 +#else + if (ii < sorted_keys.size() - 1 && exp_token_idx_list[sorted_keys[ii + 1]].dimension() > 0) { + auto time_start___ = (mllm_time_us()); // ms + double time_start = (time_start___ - start_time) / 1000.0F; // ms + + experts[sorted_keys[ii + 1]].load(); + + dones[layer_idx][sorted_keys[ii + 1]].store(true, std::memory_order_release); + string expert_name = std::to_string(layer_idx) + "_" + std::to_string(sorted_keys[ii + 1]); + + auto time_end__ = (mllm_time_us()); // ms + double time_end = (time_end__ - start_time) / 1000.0F; // ms + load_times[expert_name] = {time_start, time_end}; + double tt_t = (time_end__ - time_start___) / 1000.0F; + // std::cout << "load: " << layer_idx << " " << sorted_keys[ii + 1] << " " << tt_t << std::endl; + } +#endif + } +#if defined(MBP_THREAD) && defined(MBP_THREAD_PP) + } + for (int ii = 0; ii < sorted_keys.size(); ii++) { + int expert_id = sorted_keys[ii]; + if (exp_token_idx_list.find(expert_id) == exp_token_idx_list.end()) continue; // 退出 +#endif + + // step.1 + double time_start_ = (mllm_time_us() - start_time) / 1000.0F; // ms + + auto exp_token_idx = exp_token_idx_list[expert_id]; //(1, 1, 1, 0) 1, 1, 1, e-s + auto exp_idx = exp_idx_list[expert_id]; //(1, 1, 1, 0) 1, 1, 1, e-s + auto expert_tokens = hidden_states.clip(exp_token_idx, SEQUENCE); //(1, 0, 1, hidden) 1, e-s, 1, hidden + auto topk_weight_clip = topk_weight.clip(exp_idx, SEQUENCE); //(1, 0, 1, 1) 1, e-s, 1, 1 + + string expert_name_ = std::to_string(layer_idx) + "_" + std::to_string(expert_id); + double time_end_ = (mllm_time_us() - start_time) / 1000.0F; // ms + expert_clip_times[expert_name_] = {time_start_, time_end_}; + +#ifdef MBP_THREAD + // std::cout << "wait: " << layer_idx << " " << expert_id << std::endl; + double time_start_w = (mllm_time_us() - start_time) / 1000.0F; // ms + if (layer_idx + ii > 0 && !experts[expert_id].loaded()) { + // std::cout << "wait-: " << layer_idx << " " << expert_id << std::endl; + unique_lock lock(*mtxs[layer_idx][expert_id]); // 局部锁 + cvs[layer_idx][expert_id]->wait(lock, [&] { + return dones[layer_idx][expert_id].load(memory_order_acquire); + }); + assert(dones[layer_idx][expert_id]); + } + double time_end_w = (mllm_time_us() - start_time) / 1000.0F; // ms + expert_wait_times[expert_name_] = {time_start_w, time_end_w}; + // std::cout << "waited: " << layer_idx << " " << expert_id << std::endl; +#endif + auto time_start__ = (mllm_time_us()); // ms + double time_start = (time_start__ - start_time) / 1000.0F; // ms + + // step.2 + auto expert_out = experts[expert_id]({expert_tokens})[0]; //(1, 0, 1, hidden) 1, e-s, 1, + expert_out = expert_out * topk_weight_clip; //(1, 0, 1, hidden) 1, e-s, 1, hidden + expert_cache.scatter_add(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden + experts[expert_id].free(); + // std::cout << "free: " << layer_idx << " " << expert_id << std::endl; + + string expert_name = std::to_string(layer_idx) + "_" + std::to_string(expert_id); + auto time_end__ = (mllm_time_us()); // ms + double time_end = (time_end__ - start_time) / 1000.0F; // ms + expert_cal_times[expert_name] = {time_start, time_end}; + // std::cout << "calc: " << layer_idx << " " << expert_id << " " << (time_end__ - time_start__) / (1000.0F * expert_tokens.sequence()) << std::endl; +#ifdef MBP_THREAD + // std::cout << "dones: " << layer_idx << " " << expert_id << std::endl; + dones[layer_idx][expert_id] = false; // 重置状态 +#endif + } + return expert_cache; // 1, batch*seq, 1, hidden + } + + void load_experts(int expert_idx) { + int result; + experts[expert_idx].load(); + } + +private: + BailingMoeMLP shared_experts; + std::vector experts; + BailingMoeGate gate; + int num_shared_experts{}; + int num_experts_per_tok{}; + int num_hidden_layers{}; +}; + +class BailingMoeDecoder final : public Module { +public: + BailingMoeDecoder() = default; + BailingMoeDecoder(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_HD, PostQkv_NONE, false, + config.RoPE_type, config.rope_theta, config.max_position_embeddings, + config.cache_limit, config.use_cache, config.use_qkv_bias, config.use_bias, + config.attn_implementation, names, base_name + names._attn_base_name); + moe = BailingMoeSparseMoeBlock(config, names, base_name + names._ffn_base_name); + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); + num_hidden_layers = config.num_hidden_layers; + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = input_layernorm(inputs[0]); + int layer_idx = std::any_cast(args[0]); + hidden_states = self_atten({hidden_states, hidden_states, hidden_states})[0]; + auto tmp = hidden_states + inputs[0]; + hidden_states = post_attention_layernorm(tmp); + hidden_states = moe({hidden_states}, layer_idx)[0]; + hidden_states = hidden_states + tmp; + return {hidden_states}; + } + + void load_experts(int expert_idx) { + moe.load_experts(expert_idx); + } + + MultiHeadAttention &get_attention() { + return self_atten; + } + +private: + MultiHeadAttention self_atten; + BailingMoeSparseMoeBlock moe; + Layer input_layernorm; + Layer post_attention_layernorm; + int num_hidden_layers; +}; + +class BailingMoeModel final : public Module { +public: + BailingMoeModel() = default; + BailingMoeModel(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + blocks = List(config.num_hidden_layers, config, names, base_name); + norm = RMSNorm(config.hidden_size, config.rms_norm_eps, names.post_norm_name); + } + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = inputs[0]; + int layer_idx = 0; + for (auto &block : blocks) { + hidden_states = block({hidden_states}, layer_idx)[0]; + layer_idx++; + } + hidden_states = norm(hidden_states); + return {hidden_states}; + } + + void load_experts(int layer_idx, int expert_idx) { + blocks[layer_idx].load_experts(expert_idx); + } + void clear_kvcache() override { + for (auto &block : blocks) { + auto kvcache = block.get_attention().get_cache(); + for (auto &cache : kvcache) { cache->clearCache(); } + auto ropes = block.get_attention().get_rope(); + for (auto &rope : ropes) { rope->clearCache(); } + } + } + +private: + std::vector blocks; + Layer norm; +}; + +class BailingMoeForCausalLM final : public Module { +public: + CHAINABLE_MODULE_METHODS(BailingMoeForCausalLM) + BailingMoeForCausalLM(BailingMoeConfig &config) { + auto names = config.names_config; + hidden_size = config.hidden_size; + embedding = Embedding(config.vocab_size, config.hidden_size, names.token_embd_name); + model = BailingMoeModel(config, names, names.blk_name); + lm_head = Linear(config.hidden_size, config.vocab_size, false, names.lm_head_name); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + std::vector outputs; + clearMBPtimes(); +#ifdef MBP_THREAD + start_time = mllm_time_us(); + mbp_finish.store(false, std::memory_order_relaxed); + if (inputs[0].dimension() == 1) { + // OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead. + // omp_set_nested(1); // 等价于设置环境变量 OMP_NESTED=TRUE + omp_set_max_active_levels(2); // Enable OpenMP nesting +#pragma omp parallel num_threads(2) + if (omp_get_thread_num() == 0) { // 根据线程ID决定执行哪个函数 +#if defined(__ARM_NEON) && !defined(__APPLE__) + { + struct sched_param param; + param.sched_priority = 20; // 范围 1–99,根据设备可酌情调整 + pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); + } + // ─── 2. 绑定到大核(big cluster)以减少与小核的资源争用 ────────────── + { + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + // 假设大核是 CPU 2–3,按实际设备改为合适的核号 + CPU_SET(2, &cpuset); + CPU_SET(3, &cpuset); + // CPU_SET(6, &cpuset); // 假设小核心是CPU 6 + sched_setaffinity(pthread_self(), sizeof(cpuset), &cpuset); + // sched_setaffinity(gettid(), sizeof(cpu_set_t), &cpuset); + } +#endif + mbp_load(); + } else { + outputs = do_Forward(inputs, args); + } + } else { +#endif + outputs = do_Forward(inputs, args); +#ifdef MBP_THREAD + } +#endif + return outputs; + } + void clear_kvcache() override { + model.clear_kvcache(); + } + + std::vector do_Forward(std::vector inputs, std::vector args) { + auto x = embedding(inputs[0]); + auto outputs = model({x})[0]; + if (outputs.sequence() > 1) { + outputs = outputs.clip({}, {}, {-1}, {}); + } + outputs = lm_head(outputs); + +#ifdef MBP_THREAD + // 设置 mbp_finish 为 true,结束 mbp_load 线程 + // 1. 设置内存序保证可见性 + mbp_finish.store(true, std::memory_order_release); // 改为 release 内存序 + // 2. 主动唤醒所有等待线程 + { + std::lock_guard lk(queue_mutex); + queue_cv.notify_all(); // 必须加锁后通知 + } + // 3. 添加二次状态检查(可选) + std::atomic_thread_fence(std::memory_order_seq_cst); + // std::cout << "do_Forward finish " << load_requests.size() << std::endl; +#endif + return {outputs}; + } + void load_experts(int layer_idx, int expert_idx) { + model.load_experts(layer_idx, expert_idx); + } + void mbp_load() { + while (!mbp_finish.load(std::memory_order_acquire)) { + std::unique_lock lk(queue_mutex); + queue_cv.wait(lk, [this] { + return !load_requests.empty() || mbp_finish.load(std::memory_order_acquire); + }); + + if (mbp_finish.load(std::memory_order_acquire)) { + break; + } + + while (!load_requests.empty()) { + auto req = load_requests.front(); + load_requests.pop(); + lk.unlock(); // 释放锁以便其他线程入队 + { // 执行加载 + std::unique_lock expert_lk(*mtxs[req.layer][req.expert]); + if (!dones[req.layer][req.expert].load(std::memory_order_acquire)) { + double time_start = (mllm_time_us() - start_time) / 1000.0F; // ms + + // std::cout << "load_requests.load_: " << req.layer << " " << req.expert << std::endl; + load_experts(req.layer, req.expert); + // std::cout << "load_requests.load_d: " << req.layer << " " << req.expert << std::endl; + dones[req.layer][req.expert].store(true, std::memory_order_release); + + string expert_name = std::to_string(req.layer) + "_" + std::to_string(req.expert); + double time_end = (mllm_time_us() - start_time) / 1000.0F; // ms + load_times[expert_name] = {time_start, time_end}; + } + } + cvs[req.layer][req.expert]->notify_all(); + lk.lock(); // 重新获取锁处理下一个请求 + } + } + // std::cout << "mbp_load finish" << std::endl; + } + +private: + int hidden_size; + bool tie_embedding_words; + Layer embedding; + Layer lm_head; + BailingMoeModel model; +}; diff --git a/mllm/models/ling/mbp/modeling_bailing_moe_mbp_e.hpp b/mllm/models/ling/mbp/modeling_bailing_moe_mbp_e.hpp new file mode 100644 index 000000000..096dfc0c8 --- /dev/null +++ b/mllm/models/ling/mbp/modeling_bailing_moe_mbp_e.hpp @@ -0,0 +1,555 @@ +#pragma once +#include "DataType.hpp" +#include "Layer.hpp" +#include "Module.hpp" +#include "Tensor.hpp" +#include "Trace.hpp" +#include "Types.hpp" +#include "../configuration_bailing_moe.hpp" +#include "settings_bailing_moe_mbp_e.hpp" +#include "models/transformer/modeling_transformer.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef _OPENMP +#include +#endif +#if defined(__ARM_NEON) && !defined(__APPLE__) +#include +#include +#endif +#define MBP_THREAD + +using namespace mllm; + +class BailingMoeMLP final : public Module { +public: + BailingMoeMLP() = default; + BailingMoeMLP(int hidden_size, int intermediate_size, const BailingMoeNameConfig &names, const std::string &base_name) { + gate_proj = Linear(hidden_size, intermediate_size, false, base_name + names._gate_proj_name); + silu = SiLU(base_name + "act"); + up_proj = Linear(hidden_size, intermediate_size, false, base_name + names._up_proj_name); + down_proj = Linear(intermediate_size, hidden_size, false, base_name + names._down_proj_name); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + // 检查是否为 MoE 专家调用(需要 layer_idx 和 expert_idx) + if (args.size() >= 2) { + // MoE 专家模式:使用异步加载 + int layer_idx = std::any_cast(args[0]); + int expert_idx = std::any_cast(args[1]); + int next_expert_idx = args.size() > 2 ? std::any_cast(args[2]) : -1; + int next_layer_idx = args.size() > 3 ? std::any_cast(args[3]) : -1; + + // 等待gate_proj加载完成 +#ifdef MBP_THREAD + { + double wait_start_time = (mllm_time_us() - start_time) / 1000.0F; // ms + std::unique_lock lock(*proj_mtxs[layer_idx][expert_idx][0]); // gate_proj + proj_cvs[layer_idx][expert_idx][0]->wait(lock, [&] { + return proj_dones[layer_idx][expert_idx][0].load(std::memory_order_acquire) || gate_proj.loaded(); + }); + double wait_end_time = (mllm_time_us() - start_time) / 1000.0F; // ms + std::string wait_key = std::to_string(layer_idx) + "_" + std::to_string(expert_idx) + "_gate_wait"; + expert_wait_times[wait_key] = {wait_start_time, wait_end_time}; + } + // 计算gate_proj时异步加载up_proj (只有当前专家需要时才加载) + if (!proj_dones[layer_idx][expert_idx][1].load(std::memory_order_acquire) && !up_proj.loaded()) { + ProjectionLoadRequest req{layer_idx, expert_idx, 1}; // 1: up_proj + { + std::lock_guard lk(projection_queue_mutex); + projection_load_requests.push(req); + } + projection_queue_cv.notify_one(); + } +#endif + assert(gate_proj.loaded() && "gate_proj should be loaded"); + double gate_start_time = (mllm_time_us() - start_time) / 1000.0F; // ms + auto x = gate_proj(inputs[0]); + x = silu(x); + double gate_end_time = (mllm_time_us() - start_time) / 1000.0F; // ms + std::string gate_key = std::to_string(layer_idx) + "_" + std::to_string(expert_idx) + "_gate"; + expert_cal_times[gate_key] = {gate_start_time, gate_end_time}; + + // gate_proj计算完成后立即释放,然后请求down_proj +#ifdef MBP_THREAD + gate_proj.free(); + if (!proj_dones[layer_idx][expert_idx][2].load(std::memory_order_acquire) && !down_proj.loaded()) { + ProjectionLoadRequest req{layer_idx, expert_idx, 2}; // 2: down_proj + { + std::lock_guard lk(projection_queue_mutex); + projection_load_requests.push(req); + } + projection_queue_cv.notify_one(); + } +#endif + + // 等待up_proj加载完成并计算 +#ifdef MBP_THREAD + { + double wait_start_time = (mllm_time_us() - start_time) / 1000.0F; // ms + std::unique_lock lock(*proj_mtxs[layer_idx][expert_idx][1]); // up_proj + proj_cvs[layer_idx][expert_idx][1]->wait(lock, [&] { + return proj_dones[layer_idx][expert_idx][1].load(std::memory_order_acquire) || up_proj.loaded(); + }); + double wait_end_time = (mllm_time_us() - start_time) / 1000.0F; // ms + std::string wait_key = std::to_string(layer_idx) + "_" + std::to_string(expert_idx) + "_up_wait"; + expert_wait_times[wait_key] = {wait_start_time, wait_end_time}; + } +#endif + assert(up_proj.loaded() && "up_proj should be loaded"); + double up_start_time = (mllm_time_us() - start_time) / 1000.0F; // ms + auto y = up_proj(inputs[0]); + x = x * y; + double up_end_time = (mllm_time_us() - start_time) / 1000.0F; // ms + std::string up_key = std::to_string(layer_idx) + "_" + std::to_string(expert_idx) + "_up"; + expert_cal_times[up_key] = {up_start_time, up_end_time}; + + // up_proj计算完成后立即释放,然后请求下一个专家的gate_proj +#ifdef MBP_THREAD + up_proj.free(); + // 请求下一个专家的gate_proj (只有确定需要时才预加载) + if (next_expert_idx >= 0) { + // 检查下一个专家的gate_proj是否需要预加载 + if (!proj_dones[layer_idx][next_expert_idx][0].load(std::memory_order_acquire)) { + ProjectionLoadRequest req{layer_idx, next_expert_idx, 0}; // 0: gate_proj + { + std::lock_guard lk(projection_queue_mutex); + projection_load_requests.push(req); + } + projection_queue_cv.notify_one(); + } + } else if (next_layer_idx >= 0) { + // 检查下一层第一个专家的gate_proj是否需要预加载 + if (!proj_dones[next_layer_idx][0][0].load(std::memory_order_acquire)) { + ProjectionLoadRequest req{next_layer_idx, 0, 0}; // 下一层第一个专家的gate_proj + { + std::lock_guard lk(projection_queue_mutex); + projection_load_requests.push(req); + } + projection_queue_cv.notify_one(); + } + } +#endif + + // 等待down_proj加载完成并计算 +#ifdef MBP_THREAD + { + double wait_start_time = (mllm_time_us() - start_time) / 1000.0F; // ms + std::unique_lock lock(*proj_mtxs[layer_idx][expert_idx][2]); // down_proj + proj_cvs[layer_idx][expert_idx][2]->wait(lock, [&] { + return proj_dones[layer_idx][expert_idx][2].load(std::memory_order_acquire) || down_proj.loaded(); + }); + double wait_end_time = (mllm_time_us() - start_time) / 1000.0F; // ms + std::string wait_key = std::to_string(layer_idx) + "_" + std::to_string(expert_idx) + "_down_wait"; + expert_wait_times[wait_key] = {wait_start_time, wait_end_time}; + } +#endif + assert(down_proj.loaded() && "down_proj should be loaded"); + double down_start_time = (mllm_time_us() - start_time) / 1000.0F; // ms + x = down_proj(x); + double down_end_time = (mllm_time_us() - start_time) / 1000.0F; // ms + std::string down_key = std::to_string(layer_idx) + "_" + std::to_string(expert_idx) + "_down"; + expert_cal_times[down_key] = {down_start_time, down_end_time}; + + // down_proj计算完成后立即释放 +#ifdef MBP_THREAD + down_proj.free(); +#endif + return {x}; + } else { + // 普通 MLP 模式:直接计算,不使用异步加载 + auto x = gate_proj(inputs[0]); + x = silu(x); + auto y = up_proj(inputs[0]); + x = x * y; + x = down_proj(x); + return {x}; + } + } + + void load() { + gate_proj.load(); + up_proj.load(); + down_proj.load(); + } + + bool loaded() { + return gate_proj.loaded() && up_proj.loaded() && down_proj.loaded(); + } + + void free() { + gate_proj.free(); + up_proj.free(); + down_proj.free(); + } + + // 将成员变量改为公有,以便异步加载时访问 + Layer gate_proj; + Layer up_proj; + Layer down_proj; + +private: + Layer silu; +}; + +class BailingMoeGate final : public Module { +public: + BailingMoeGate() = default; + BailingMoeGate(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const std::string &base_name) { + gate = Linear(config.hidden_size, config.num_experts, false, base_name + "gate"); + softmax = Softmax(DIMENSION, false, base_name + "softmax"); + num_experts_per_tok = config.num_experts_per_tok; + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto scores = softmax(gate(inputs[0])); + auto experts_w_i = Tensor::topk(scores, num_experts_per_tok, DIMENSION); + auto topk_weight = experts_w_i[0]; // 1, batch*seq, 1, k + auto topk_idx = experts_w_i[1]; // 1, batch*seq, 1, k + topk_idx = topk_idx.view(-1, 1, 1, -1); // 1, 1, 1, k* batch*seq + topk_weight = topk_weight / topk_weight.sum(DIMENSION); // 1, batch*seq, 1, k + return {scores, topk_weight, topk_idx}; + } + +private: + Layer gate; + Softmax softmax; + int num_experts_per_tok{}; +}; + +class BailingMoeSparseMoeBlock final : public Module { +public: + BailingMoeSparseMoeBlock() = default; + BailingMoeSparseMoeBlock(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + experts = List(config.num_experts, config.hidden_size, config.moe_intermediate_size, names, base_name + "experts."); + gate = BailingMoeGate(config, names, base_name); + num_experts_per_tok = config.num_experts_per_tok; + num_shared_experts = config.num_shared_experts; + num_hidden_layers = config.num_hidden_layers; // 添加层数信息 + if (num_shared_experts > 0) { + shared_experts = BailingMoeMLP(config.hidden_size, + config.moe_intermediate_size * config.num_shared_experts, + names, base_name + "shared_experts."); + } + } + + // receive embeds + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = inputs[0]; + auto identity = hidden_states; + if (hidden_states.batch() > 1) { + hidden_states = hidden_states.view(1, -1, ANYDIM, -1); // 1, batch*seq, 1, hidden + } + auto gates_t = gate({hidden_states}); // 1, batch*seq, 1, num_experts + auto scores = gates_t[0]; // 1, batch*seq, 1, num_experts + auto topk_weight = gates_t[1]; // 1, batch*seq, + auto topk_idx = gates_t[2]; // 1, batch*seq, 1, k + + // 获取层索引用于异步加载 + int layer_idx = args.size() > 0 ? std::any_cast(args[0]) : 0; + hidden_states = moe_infer(hidden_states, topk_weight, topk_idx, layer_idx); // 1, batch*seq, 1, hidden + + if (num_shared_experts) { + hidden_states = hidden_states + shared_experts({identity})[0]; // add shared experts + } + if (hidden_states.batch() > 1) { + // expert_cache.view(ANYDIM, seq, -1, -1);//TODO + } + return {hidden_states}; + } + + Tensor moe_infer(Tensor hidden_states, Tensor &topk_weight, Tensor &topk_idx, int layer_idx = 0) { + auto dtype = topk_idx.dtype(); + auto device = topk_idx.device(); + topk_idx = topk_idx.fp32().cpu(); + auto idxs = topk_idx.argsort(); // 1, 1, 1, k* batch*seq + auto tokens_per_expert = topk_idx.bincount(); // (1, 1, 1, 0) 1, 1, 1, k + idxs = idxs.to(device).to(dtype); + auto token_idxs = idxs / num_experts_per_tok; // 1, 1, 1, k* batch*seq + int start_idx = 0; + int end_idx = start_idx; + auto expert_cache = Tensor::zero_like(hidden_states); // 1, batch*seq, 1, hidden + + // 收集要处理的专家,并存储相关数据 + std::map exp_token_idx_list; + std::map exp_idx_list; + std::vector sorted_keys; + + start_idx = 0; + for (int i = 0; i < experts.size(); ++i) { + if (tokens_per_expert.dimension() != 0 && i >= tokens_per_expert.dimension()) + break; + int this_token_num = tokens_per_expert.dimension() ? tokens_per_expert.d(0, 0, 0, i) : 0; + if (!this_token_num) continue; + end_idx = start_idx + this_token_num; + auto exp_token_idx = token_idxs.clip({}, {}, {}, {start_idx, end_idx}); //(1, 1, 1, 0) 1, 1, 1, e-s + auto exp_idx = idxs.clip({}, {}, {}, {start_idx, end_idx}); //(1, 1, 1, 0) 1, 1, 1, e-s + if (topk_weight.dimension() != 1) { topk_weight = topk_weight.view(-1, -1, 1, 1); } // 1, k* batch*seq, 1, 1 + exp_token_idx_list[i] = exp_token_idx; + sorted_keys.push_back(i); + exp_idx_list[i] = exp_idx; + start_idx = end_idx; + } + + if (!sorted_keys.empty()) { + // 为第一个专家预加载gate_proj + if (!experts[sorted_keys[0]].gate_proj.loaded()) { + double time_start = (mllm_time_us() - start_time) / 1000.0F; // ms + experts[sorted_keys[0]].gate_proj.load(); + std::string expert_name = std::to_string(layer_idx) + "_" + std::to_string(sorted_keys[0]) + "_gate"; + double time_end = (mllm_time_us() - start_time) / 1000.0F; // ms + proj_load_times[expert_name] = {time_start, time_end}; + } +#ifdef MBP_THREAD + // 标记第一个专家的gate_proj为已加载 + proj_dones[layer_idx][sorted_keys[0]][0].store(true, std::memory_order_release); + proj_cvs[layer_idx][sorted_keys[0]][0]->notify_all(); +#endif + } + + for (int ii = 0; ii < sorted_keys.size(); ii++) { + int expert_id = sorted_keys[ii]; + if (exp_token_idx_list.find(expert_id) == exp_token_idx_list.end()) continue; // 退出 + if (Module::doLoad) continue; // 退出 + + // step.1 - 准备输入数据 + double time_start_ = (mllm_time_us() - start_time) / 1000.0F; // ms + + auto exp_token_idx = exp_token_idx_list[expert_id]; //(1, 1, 1, 0) 1, 1, 1, e-s + auto exp_idx = exp_idx_list[expert_id]; //(1, 1, 1, 0) 1, 1, 1, e-s + auto expert_tokens = hidden_states.clip(exp_token_idx, SEQUENCE); //(1, 0, 1, hidden) 1, e-s, 1, hidden + auto topk_weight_clip = topk_weight.clip(exp_idx, SEQUENCE); //(1, 0, 1, 1) 1, e-s, 1, 1 + + std::string expert_name_ = std::to_string(layer_idx) + "_" + std::to_string(expert_id); + double time_end_ = (mllm_time_us() - start_time) / 1000.0F; // ms + expert_clip_times[expert_name_] = {time_start_, time_end_}; + + auto time_start__ = (mllm_time_us()); // ms + double time_start = (time_start__ - start_time) / 1000.0F; // ms + + // step.2 - 执行专家计算(包含投影层级异步加载) + // 准备下一个专家信息 + std::vector mlp_args = {layer_idx, expert_id}; + if (ii < sorted_keys.size() - 1 && exp_token_idx_list[sorted_keys[ii + 1]].dimension() > 0) { + mlp_args.push_back(sorted_keys[ii + 1]); // next_expert_idx + mlp_args.push_back(-1); // next_layer_idx + } else if (ii == sorted_keys.size() - 1 && layer_idx < num_hidden_layers - 1) { + mlp_args.push_back(-1); // next_expert_idx + mlp_args.push_back(layer_idx + 1); // next_layer_idx + } else { + mlp_args.push_back(-1); // next_expert_idx + mlp_args.push_back(-1); // next_layer_idx + } + + auto expert_out = experts[expert_id]({expert_tokens}, mlp_args)[0]; //(1, 0, 1, hidden) 1, e-s, 1, + expert_out = expert_out * topk_weight_clip; //(1, 0, 1, hidden) 1, e-s, 1, hidden + expert_cache.scatter_add(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden + experts[expert_id].free(); + + std::string expert_name = std::to_string(layer_idx) + "_" + std::to_string(expert_id); + auto time_end__ = (mllm_time_us()); // ms + double time_end = (time_end__ - start_time) / 1000.0F; // ms + +#ifdef MBP_THREAD + // 重置投影层状态 + for (int proj_type = 0; proj_type < 3; ++proj_type) { + proj_dones[layer_idx][expert_id][proj_type].store(false, std::memory_order_relaxed); + } +#endif + } + return expert_cache; // 1, batch*seq, 1, hidden + } + + void load_experts(int expert_idx, int flag = -1) { + switch (flag) { + case -1: { + experts[expert_idx].gate_proj.load(); + experts[expert_idx].up_proj.load(); + experts[expert_idx].down_proj.load(); + break; + } + case 0: { + experts[expert_idx].gate_proj.load(); + break; + } + case 1: { + experts[expert_idx].up_proj.load(); + break; + } + case 2: { + experts[expert_idx].down_proj.load(); + break; + } + default: + break; + } + } + +private: + BailingMoeMLP shared_experts; + std::vector experts; + BailingMoeGate gate; + int num_shared_experts{}; + int num_experts_per_tok{}; + int num_hidden_layers{}; // 添加层数信息 +}; + +class BailingMoeDecoder final : public Module { +public: + BailingMoeDecoder() = default; + BailingMoeDecoder(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_HD, PostQkv_NONE, false, + config.RoPE_type, config.rope_theta, + config.max_position_embeddings, + config.cache_limit, config.use_cache, config.use_qkv_bias, config.use_bias, + config.attn_implementation, names, base_name + names._attn_base_name); + moe = BailingMoeSparseMoeBlock(config, names, base_name + names._ffn_base_name); + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); + num_hidden_layers = config.num_hidden_layers; + } + + std::vector Forward(std::vector inputs, std::vector args) override { + // 获取层索引,用于传递给 MoE + int layer_idx = args.size() > 0 ? std::any_cast(args[0]) : 0; + + auto hidden_states = input_layernorm(inputs[0]); + hidden_states = self_atten({hidden_states, hidden_states, hidden_states})[0]; + auto tmp = hidden_states + inputs[0]; + hidden_states = post_attention_layernorm(tmp); + + // 传递层索引给 MoE + std::vector moe_args = {layer_idx}; + hidden_states = moe({hidden_states}, moe_args)[0]; + hidden_states = hidden_states + tmp; + return {hidden_states}; + } + + MultiHeadAttention &get_attention() { + return self_atten; + } + + void load_experts(int expert_idx, int projection_type) { + moe.load_experts(expert_idx, projection_type); + } + +private: + MultiHeadAttention self_atten; + BailingMoeSparseMoeBlock moe; + Layer input_layernorm; + Layer post_attention_layernorm; + int num_hidden_layers; +}; + +class BailingMoeModel final : public Module { +public: + BailingMoeModel() = default; + BailingMoeModel(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + blocks = List(config.num_hidden_layers, config, names, base_name); + norm = RMSNorm(config.hidden_size, config.rms_norm_eps, names.post_norm_name); + } + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = inputs[0]; + for (int i = 0; i < blocks.size(); ++i) { + // 传递层索引给每个decoder block + std::vector block_args = {i}; + hidden_states = blocks[i]({hidden_states}, block_args)[0]; + } + hidden_states = norm(hidden_states); + return {hidden_states}; + } + + void load_experts(int layer_idx, int expert_idx, int projection_type) { + if (layer_idx >= 0 && layer_idx < blocks.size()) { + blocks[layer_idx].load_experts(expert_idx, projection_type); + } + } + + void clear_kvcache() override { + for (auto &block : blocks) { + auto kvcache = block.get_attention().get_cache(); + for (auto &cache : kvcache) { cache->clearCache(); } + auto ropes = block.get_attention().get_rope(); + for (auto &rope : ropes) { rope->clearCache(); } + } + } + +private: + std::vector blocks; + Layer norm; +}; + +class BailingMoeForCausalLM final : public Module { +public: + CHAINABLE_MODULE_METHODS(BailingMoeForCausalLM) + BailingMoeForCausalLM(BailingMoeConfig &config) { + dtype = config.dtype; + auto names = config.names_config; + hidden_size = config.hidden_size; + embedding = Embedding(config.vocab_size, config.hidden_size, names.token_embd_name); + model = BailingMoeModel(config, names, names.blk_name); + lm_head = Linear(config.hidden_size, config.vocab_size, false, names.lm_head_name); + + // 初始化异步加载相关设置 + num_layers = config.num_hidden_layers; + num_experts = config.num_experts; + ling_mbp_init(num_layers, num_experts); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + clearMBPtimes(); + start_time = mllm_time_us(); + + auto x = embedding(inputs[0]).to(dtype); + std::vector empty_args; // 为 model 创建空的参数 + auto outputs = model({x}, empty_args)[0]; + if (outputs.sequence() > 1) { + outputs = outputs.clip({}, {}, {-1}, {}); + } + outputs = lm_head(outputs); + return {outputs}; + } + + void load_projection(int layer_idx, int expert_idx, int projection_type) { + switch (projection_type) { + case 0: // gate_proj + model.load_experts(layer_idx, expert_idx, 0); + break; + case 1: // up_proj + model.load_experts(layer_idx, expert_idx, 1); + break; + case 2: // down_proj + model.load_experts(layer_idx, expert_idx, 2); + break; + default: + model.load_experts(layer_idx, expert_idx, -1); + break; + } + } + + void clear_kvcache() override { + model.clear_kvcache(); + } + +private: + int hidden_size; + bool tie_embedding_words; + Layer embedding; + Layer lm_head; + BailingMoeModel model; + DataType dtype; + int num_layers{}; + int num_experts{}; +}; diff --git a/mllm/models/ling/mbp/modeling_bailing_moe_mbppip.hpp b/mllm/models/ling/mbp/modeling_bailing_moe_mbppip.hpp new file mode 100644 index 000000000..13b1e2d83 --- /dev/null +++ b/mllm/models/ling/mbp/modeling_bailing_moe_mbppip.hpp @@ -0,0 +1,566 @@ +#pragma once +#include "Layer.hpp" +#include "Module.hpp" +#include "Tensor.hpp" +#include "Trace.hpp" +#include "Types.hpp" +#include "../configuration_bailing_moe.hpp" +#include "settings_bailing_moe_mbp.hpp" +#include "models/transformer/modeling_transformer.hpp" +#include +#include +#include +#include +#include + +#define MBP_THREAD + +using namespace mllm; + +class BailingMoeMLP final : public Module { +public: + BailingMoeMLP() = default; + BailingMoeMLP(int hidden_size, int intermediate_size, const BailingMoeNameConfig &names, const std::string &base_name) { + gate_proj = Linear(hidden_size, intermediate_size, false, base_name + names._gate_proj_name); + silu = SiLU(base_name + "act"); + up_proj = Linear(hidden_size, intermediate_size, false, base_name + names._up_proj_name); + down_proj = Linear(intermediate_size, hidden_size, false, base_name + names._down_proj_name); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto x = gate_proj(inputs[0]); + x = silu(x); + auto y = up_proj(inputs[0]); + x = x * y; + x = down_proj(x); + return {x}; + } + + void load() { + gate_proj.load(); + up_proj.load(); + down_proj.load(); + } + bool loaded() { + return gate_proj.loaded() && up_proj.loaded() && down_proj.loaded(); + } + void free() { + gate_proj.free(); + up_proj.free(); + down_proj.free(); + } + +private: + Layer gate_proj; + Layer up_proj; + Layer down_proj; + Layer silu; +}; + +class BailingMoeGate final : public Module { +public: + BailingMoeGate() = default; + BailingMoeGate(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const std::string &base_name) { + gate = Linear(config.hidden_size, config.num_experts, false, base_name + "gate"); + softmax = Softmax(DIMENSION, false, base_name + "softmax"); + num_experts_per_tok = config.num_experts_per_tok; + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto scores = softmax(gate(inputs[0])); + auto experts_w_i = Tensor::topk(scores, num_experts_per_tok, DIMENSION); + auto topk_weight = experts_w_i[0]; // 1, batch*seq, 1, k + auto topk_idx = experts_w_i[1]; // 1, batch*seq, 1, k + topk_idx = topk_idx.view(-1, 1, 1, -1); // 1, 1, 1, k* batch*seq + topk_weight = topk_weight / topk_weight.sum(DIMENSION); // 1, batch*seq, 1, k + return {scores, topk_weight, topk_idx}; + } + +private: + Layer gate; + Softmax softmax; + int num_experts_per_tok{}; +}; + +class BailingMoeSparseMoeBlock final : public Module { +public: + BailingMoeSparseMoeBlock() = default; + BailingMoeSparseMoeBlock(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + experts = List(config.num_experts, config.hidden_size, config.moe_intermediate_size, names, base_name + "experts."); + gate = BailingMoeGate(config, names, base_name); + num_experts_per_tok = config.num_experts_per_tok; + num_shared_experts = config.num_shared_experts; + if (num_shared_experts > 0) { + shared_experts = BailingMoeMLP(config.hidden_size, + config.moe_intermediate_size * config.num_shared_experts, + names, base_name + "shared_experts."); + } + num_hidden_layers = config.num_hidden_layers; + } + // receive embeds + std::vector Forward(std::vector inputs, std::vector args) override { + int layer_idx = std::any_cast(args[0]); + auto hidden_states = inputs[0]; + auto identity = hidden_states; + if (hidden_states.batch() > 1) { + hidden_states = hidden_states.view(1, -1, ANYDIM, -1); // 1, batch*seq, 1, hidden + } + auto gates_t = gate({hidden_states}); // 1, batch*seq, 1, num_experts + auto scores = gates_t[0]; // 1, batch*seq, 1, num_experts + auto topk_weight = gates_t[1]; // 1, batch*seq, + auto topk_idx = gates_t[2]; // 1, batch*seq, 1, k + hidden_states = moe_infer(hidden_states, topk_weight, topk_idx, layer_idx); // 1, batch*seq, 1, hidden + if (num_shared_experts) { + hidden_states = hidden_states + shared_experts({identity})[0]; // add shared experts + } + if (hidden_states.batch() > 1) { + // expert_cache.view(ANYDIM, seq, -1, -1);//TODO + } + return {hidden_states}; + } + Tensor moe_infer(Tensor hidden_states, + Tensor &topk_weight, + Tensor &topk_idx, + int layer_idx) { + auto dtype = topk_idx.dtype(); + auto device = topk_idx.device(); + topk_idx = topk_idx.fp32().cpu(); + auto idxs = topk_idx.argsort(); // 1, 1, 1, k* batch*seq + auto tokens_per_expert = topk_idx.bincount(); // (1, 1, 1, 0) 1, 1, 1, k + idxs = idxs.to(device).to(dtype); + auto token_idxs = idxs / num_experts_per_tok; + int start_idx = 0; + int end_idx = start_idx; + auto expert_cache = Tensor::zero_like(hidden_states); + map exp_token_idx_list, exp_idx_list; + std::vector sorted_keys; + for (int i = 0; i < experts.size(); ++i) { + if (i >= tokens_per_expert.dimension()) break; + int this_token_num = tokens_per_expert.dimension() ? tokens_per_expert.d(0, 0, 0, i) : 0; + if (!this_token_num) continue; + end_idx = start_idx + this_token_num; + auto exp_token_idx = token_idxs.clip({}, {}, {}, {start_idx, end_idx}); + auto exp_idx = idxs.clip({}, {}, {}, {start_idx, end_idx}); + if (topk_weight.dimension() != 1) { topk_weight = topk_weight.view(-1, -1, 1, 1); } // 1, k* batch*seq, 1, 1 + exp_token_idx_list[i] = exp_token_idx; + sorted_keys.push_back(i); + exp_idx_list[i] = exp_idx; + start_idx = end_idx; + } + if (!sorted_keys.empty()) { + int mv_i = 0; + if (std::find(sorted_keys.begin(), sorted_keys.end(), mv_i) != sorted_keys.end()) { + sorted_keys.erase(std::remove(sorted_keys.begin(), sorted_keys.end(), mv_i), sorted_keys.end()); + sorted_keys.insert(sorted_keys.begin(), mv_i); + } + } + + if (sorted_keys.empty() || Module::doLoad) { + return expert_cache; + } + +#ifdef MBP_THREAD + // 步骤 1: 启动流水线 - 预先为第一个专家派发任务 + { + int first_expert_id = sorted_keys[0]; + // 派发加载任务 + if (!experts[first_expert_id].loaded()) { + LoadRequest req{layer_idx, first_expert_id}; + lock_guard lk(queue_mutex); + load_requests.push(req); + queue_cv.notify_one(); + } + // 派发裁剪任务 + ClipRequest req{ + layer_idx, first_expert_id, hidden_states, + exp_token_idx_list[first_expert_id], topk_weight, exp_idx_list[first_expert_id]}; + lock_guard lk(clip_queue_mutex); + clip_requests.push(req); + clip_queue_cv.notify_one(); + } +#endif + + // 步骤 2: 循环处理 + for (int ii = 0; ii < sorted_keys.size(); ii++) { + int expert_id = sorted_keys[ii]; + string expert_name = std::to_string(layer_idx) + "_" + std::to_string(expert_id); + +#ifdef MBP_THREAD + // A. [预取] 为下一个专家 (ii+1) 派发任务 + bool is_last_expert_in_layer = (ii == sorted_keys.size() - 1); + if (!is_last_expert_in_layer || (is_last_expert_in_layer && layer_idx < num_hidden_layers - 1)) { + int q_layer_idx, q_expert_id; + bool should_dispatch_clip = false; + + if (is_last_expert_in_layer) { // 如果是本层最后一个,预取下一层的 expert 0 + q_layer_idx = layer_idx + 1; + q_expert_id = 0; + // 对于下一层的专家,我们无法知道它是否有token,因此不派发裁剪任务 + should_dispatch_clip = false; + } else { // 否则,预取本层的下一个专家 + q_layer_idx = layer_idx; + q_expert_id = sorted_keys[ii + 1]; + // 仅当该专家确实需要处理时,才派发裁剪任务 + should_dispatch_clip = exp_token_idx_list.count(q_expert_id) > 0; + } + + // 派发加载任务 + LoadRequest load_req{q_layer_idx, q_expert_id}; + lock_guard load_lk(queue_mutex); + load_requests.push(load_req); + queue_cv.notify_one(); + + // 根据判断条件派发裁剪任务 + if (should_dispatch_clip) { + ClipRequest clip_req{ + q_layer_idx, q_expert_id, hidden_states, + exp_token_idx_list[q_expert_id], topk_weight, exp_idx_list[q_expert_id]}; + lock_guard clip_lk(clip_queue_mutex); + clip_requests.push(clip_req); + clip_queue_cv.notify_one(); + } + } + + // B. [等待] 等待当前专家 (ii) 的任务完成 + // 等待加载 + double time_start_w = (mllm_time_us() - start_time) / 1000.0F; + if (!experts[expert_id].loaded()) { + unique_lock lock(*mtxs[layer_idx][expert_id]); + cvs[layer_idx][expert_id]->wait(lock, [&] { return dones[layer_idx][expert_id].load(memory_order_acquire); }); + } + double time_end_w = (mllm_time_us() - start_time) / 1000.0F; + expert_wait_times[expert_name] = {time_start_w, time_end_w}; + + // 等待裁剪并获取结果 + Tensor expert_tokens, topk_weight_clip; + { + unique_lock lock(*clip_mtxs[layer_idx][expert_id]); + clip_cvs[layer_idx][expert_id]->wait(lock, [&] { return clip_dones[layer_idx][expert_id].load(memory_order_acquire); }); + + std::lock_guard result_lk(clip_results_mutex); + auto &clipped_pair = clipped_data.at(expert_name); + expert_tokens = clipped_pair.first; + topk_weight_clip = clipped_pair.second; + } +#else + // 非多线程模式,直接加载和裁剪 + if (!experts[expert_id].loaded()) experts[expert_id].load(); + auto expert_tokens = hidden_states.clip(exp_token_idx_list[expert_id], SEQUENCE); + auto topk_weight_clip = topk_weight.clip(exp_idx_list[expert_id], SEQUENCE); +#endif + + // C. [计算] 使用准备好的数据进行计算 + double time_start_cal = (mllm_time_us() - start_time) / 1000.0F; + auto expert_out = experts[expert_id]({expert_tokens})[0]; + expert_out = expert_out * topk_weight_clip; + expert_cache.scatter_add(expert_out, exp_token_idx_list[expert_id]); + double time_end_cal = (mllm_time_us() - start_time) / 1000.0F; + expert_cal_times[expert_name] = {time_start_cal, time_end_cal}; + + // D. [清理] 清理当前专家的资源 + experts[expert_id].free(); +#ifdef MBP_THREAD + { + std::lock_guard result_lk(clip_results_mutex); + clipped_data.erase(expert_name); + } + // std::cout << clipped_data.size() << std::endl; + clip_dones[layer_idx][expert_id] = false; + dones[layer_idx][expert_id] = false; +#endif + } + return expert_cache; + } + + void load_experts(int expert_idx) { + int result; + experts[expert_idx].load(); + } + +private: + BailingMoeMLP shared_experts; + std::vector experts; + BailingMoeGate gate; + int num_shared_experts{}; + int num_experts_per_tok{}; + int num_hidden_layers{}; +}; + +class BailingMoeDecoder final : public Module { +public: + BailingMoeDecoder() = default; + BailingMoeDecoder(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_HD, PostQkv_NONE, false, + config.RoPE_type, config.rope_theta, config.max_position_embeddings, + config.cache_limit, config.use_cache, config.use_qkv_bias, config.use_bias, + config.attn_implementation, names, base_name + names._attn_base_name); + moe = BailingMoeSparseMoeBlock(config, names, base_name + names._ffn_base_name); + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); + num_hidden_layers = config.num_hidden_layers; + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = input_layernorm(inputs[0]); + int layer_idx = std::any_cast(args[0]); + hidden_states = self_atten({hidden_states, hidden_states, hidden_states})[0]; + auto tmp = hidden_states + inputs[0]; + hidden_states = post_attention_layernorm(tmp); + hidden_states = moe({hidden_states}, layer_idx)[0]; + hidden_states = hidden_states + tmp; + return {hidden_states}; + } + + void load_experts(int expert_idx) { + moe.load_experts(expert_idx); + } + + MultiHeadAttention &get_attention() { + return self_atten; + } + +private: + MultiHeadAttention self_atten; + BailingMoeSparseMoeBlock moe; + Layer input_layernorm; + Layer post_attention_layernorm; + int num_hidden_layers; +}; + +class BailingMoeModel final : public Module { +public: + BailingMoeModel() = default; + BailingMoeModel(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + blocks = List(config.num_hidden_layers, config, names, base_name); + norm = RMSNorm(config.hidden_size, config.rms_norm_eps, names.post_norm_name); + } + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = inputs[0]; + int layer_idx = 0; + for (auto &block : blocks) { + hidden_states = block({hidden_states}, layer_idx)[0]; + layer_idx++; + } + hidden_states = norm(hidden_states); + return {hidden_states}; + } + + void load_experts(int layer_idx, int expert_idx) { + blocks[layer_idx].load_experts(expert_idx); + } + void clear_kvcache() override { + for (auto &block : blocks) { + auto kvcache = block.get_attention().get_cache(); + for (auto &cache : kvcache) { cache->clearCache(); } + auto ropes = block.get_attention().get_rope(); + for (auto &rope : ropes) { rope->clearCache(); } + } + } + +private: + std::vector blocks; + Layer norm; +}; + +class BailingMoeForCausalLM final : public Module { +public: + CHAINABLE_MODULE_METHODS(BailingMoeForCausalLM) + BailingMoeForCausalLM(BailingMoeConfig &config) { + auto names = config.names_config; + hidden_size = config.hidden_size; + embedding = Embedding(config.vocab_size, config.hidden_size, names.token_embd_name); + model = BailingMoeModel(config, names, names.blk_name); + lm_head = Linear(config.hidden_size, config.vocab_size, false, names.lm_head_name); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + std::vector outputs; + clearMBPtimes(); +#ifdef MBP_THREAD + start_time = mllm_time_us(); + mbp_finish.store(false, std::memory_order_relaxed); + if (inputs[0].dimension() == 1) { + omp_set_max_active_levels(2); // Enable OpenMP nesting +#pragma omp parallel num_threads(3) + if (omp_get_thread_num() == 0) { // 根据线程ID决定执行哪个函数 +#if defined(__ARM_NEON) && !defined(__APPLE__) + { + struct sched_param param; + param.sched_priority = 21; // 范围 1–99,根据设备可酌情调整 + pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); + } + // ─── 2. 绑定到大核(big cluster)以减少与小核的资源争用 ────────────── + { + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(2, &cpuset); + sched_setaffinity(pthread_self(), sizeof(cpuset), &cpuset); + } +#endif + mbp_load(); + } else if (omp_get_thread_num() == 1) { // 线程1: 裁剪 (新增) + +#if defined(__ARM_NEON) && !defined(__APPLE__) + { + struct sched_param param; + param.sched_priority = 20; // 范围 1–99,根据设备可酌情调整 + pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); + } + // ─── 2. 绑定到大核(big cluster)以减少与小核的资源争用 ────────────── + { + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(3, &cpuset); + sched_setaffinity(pthread_self(), sizeof(cpuset), &cpuset); + } +#endif + mbp_clip(); + } else { + // #if defined(__ARM_NEON) && !defined(__APPLE__) + // { + // struct sched_param param; + // param.sched_priority = 22; // 范围 1–99,根据设备可酌情调整 + // pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); + // } + // // ─── 2. 绑定到大核(big cluster)以减少与小核的资源争用 ────────────── + // { + // cpu_set_t cpuset; + // CPU_ZERO(&cpuset); + // CPU_SET(7, &cpuset); + // sched_setaffinity(pthread_self(), sizeof(cpuset), &cpuset); + // } + // #endif + outputs = do_Forward(inputs, args); + } + } else { +#endif + outputs = do_Forward(inputs, args); +#ifdef MBP_THREAD + } +#endif + return outputs; + } + void clear_kvcache() override { + model.clear_kvcache(); + } + + std::vector do_Forward(std::vector inputs, std::vector args) { + auto x = embedding(inputs[0]); + auto outputs = model({x})[0]; + if (outputs.sequence() > 1) { + outputs = outputs.clip({}, {}, {-1}, {}); + } + outputs = lm_head(outputs); + +#ifdef MBP_THREAD + // 设置 mbp_finish 为 true,结束 mbp_load 线程 + // 1. 设置内存序保证可见性 + mbp_finish.store(true, std::memory_order_release); // 改为 release 内存序 + // 2. 主动唤醒所有等待线程 + { + std::lock_guard lk(queue_mutex); + queue_cv.notify_all(); // 必须加锁后通知 + } + { + std::lock_guard lk(clip_queue_mutex); + clip_queue_cv.notify_all(); // [新增] 唤醒 clip 线程 + } + // 3. 添加二次状态检查 + std::atomic_thread_fence(std::memory_order_seq_cst); + // std::cout << "do_Forward finish " << load_requests.size() << std::endl; +#endif + return {outputs}; + } + void load_experts(int layer_idx, int expert_idx) { + model.load_experts(layer_idx, expert_idx); + } + void mbp_load() { + while (!mbp_finish.load(std::memory_order_acquire)) { + std::unique_lock lk(queue_mutex); + queue_cv.wait(lk, [this] { + return !load_requests.empty() || mbp_finish.load(std::memory_order_acquire); + }); + + if (mbp_finish.load(std::memory_order_acquire)) { + break; + } + + while (!load_requests.empty()) { + auto req = load_requests.front(); + load_requests.pop(); + lk.unlock(); // 释放锁以便其他线程入队 + { // 执行加载 + std::unique_lock expert_lk(*mtxs[req.layer][req.expert]); + if (!dones[req.layer][req.expert].load(std::memory_order_acquire)) { + double time_start = (mllm_time_us() - start_time) / 1000.0F; // ms + + // std::cout << "load_requests.load_: " << req.layer << " " << req.expert << std::endl; + load_experts(req.layer, req.expert); + // std::cout << "load_requests.load_d: " << req.layer << " " << req.expert << std::endl; + dones[req.layer][req.expert].store(true, std::memory_order_release); + + string expert_name = std::to_string(req.layer) + "_" + std::to_string(req.expert); + double time_end = (mllm_time_us() - start_time) / 1000.0F; // ms + load_times[expert_name] = {time_start, time_end}; + } + } + cvs[req.layer][req.expert]->notify_all(); + lk.lock(); // 重新获取锁处理下一个请求 + } + } + // std::cout << "mbp_load finish" << std::endl; + } + void mbp_clip() { + while (!mbp_finish.load(std::memory_order_acquire)) { + std::unique_lock lk(clip_queue_mutex); + clip_queue_cv.wait(lk, [this] { + return !clip_requests.empty() || mbp_finish.load(std::memory_order_acquire); + }); + + if (mbp_finish.load(std::memory_order_acquire)) { + break; + } + + while (!clip_requests.empty()) { + auto req = clip_requests.front(); + clip_requests.pop(); + lk.unlock(); + + string expert_name = std::to_string(req.layer) + "_" + std::to_string(req.expert); + + // --- 执行裁剪 --- + double time_start_ = (mllm_time_us() - start_time) / 1000.0F; + auto expert_tokens = req.hidden_states.clip(req.exp_token_idx, SEQUENCE); + auto topk_weight_clip = req.topk_weight.clip(req.exp_idx, SEQUENCE); + double time_end_ = (mllm_time_us() - start_time) / 1000.0F; + expert_clip_times[expert_name] = {time_start_, time_end_}; + + // --- 存储结果 --- + { + std::lock_guard result_lk(clip_results_mutex); + clipped_data[expert_name] = {expert_tokens, topk_weight_clip}; + } + + // --- 发送完成信号 --- + { + std::unique_lock done_lk(*clip_mtxs[req.layer][req.expert]); + clip_dones[req.layer][req.expert].store(true, std::memory_order_release); + } + clip_cvs[req.layer][req.expert]->notify_all(); + + lk.lock(); + } + } + // std::cout << "mbp_clip finish" << std::endl; + } + +private: + int hidden_size; + bool tie_embedding_words; + Layer embedding; + Layer lm_head; + BailingMoeModel model; +}; diff --git a/mllm/models/ling/mbp/projection_loader.cpp b/mllm/models/ling/mbp/projection_loader.cpp new file mode 100644 index 000000000..bef508778 --- /dev/null +++ b/mllm/models/ling/mbp/projection_loader.cpp @@ -0,0 +1,104 @@ +#include "settings_bailing_moe_mbp_e.hpp" +#include "Timing.hpp" + +using namespace std; +using namespace mllm; + +// 投影层加载线程函数的实现 +void projection_loading_thread_func() { + while (!mbp_finish.load(std::memory_order_acquire)) { + std::unique_lock lk(projection_queue_mutex); + projection_queue_cv.wait(lk, [] { + return !projection_load_requests.empty() || mbp_finish.load(std::memory_order_acquire); + }); + + if (mbp_finish.load(std::memory_order_acquire)) { + break; + } + + while (!projection_load_requests.empty()) { + auto req = projection_load_requests.top(); // 从优先队列的顶部取出最高优先级的请求 + projection_load_requests.pop(); + lk.unlock(); + + // 执行投影层加载 + { + std::unique_lock proj_lk(*proj_mtxs[req.layer][req.expert][req.projection_type]); + if (!proj_dones[req.layer][req.expert][req.projection_type].load(std::memory_order_acquire)) { + double time_start = (mllm_time_us() - start_time) / 1000.0F; + + // 使用外部加载函数 + if (load_projection_impl) { + load_projection_impl(req.layer, req.expert, req.projection_type); + } + + proj_dones[req.layer][req.expert][req.projection_type].store(true, std::memory_order_release); + + // 统一key命名格式:{layer}_{expert}_{proj_type} + string proj_type_name; + switch (req.projection_type) { + case 0: proj_type_name = "gate"; break; + case 1: proj_type_name = "up"; break; + case 2: proj_type_name = "down"; break; + default: proj_type_name = "unknown"; break; + } + string proj_name = std::to_string(req.layer) + "_" + std::to_string(req.expert) + "_" + proj_type_name; + double time_end = (mllm_time_us() - start_time) / 1000.0F; + proj_load_times[proj_name] = {time_start, time_end}; + // std::cout << "Projection loaded: " << proj_name << std::endl; + + // 刷新输出缓冲区,确保日志立即显示 + std::cout.flush(); + } + } + proj_cvs[req.layer][req.expert][req.projection_type]->notify_all(); + lk.lock(); + } + } +} + +// clip线程函数的实现 +void clip_thread_func() { + while (!mbp_finish.load(std::memory_order_acquire)) { + std::unique_lock lk(clip_queue_mutex); + clip_queue_cv.wait(lk, [] { + return !clip_requests.empty() || mbp_finish.load(std::memory_order_acquire); + }); + + if (mbp_finish.load(std::memory_order_acquire)) { + break; + } + + while (!clip_requests.empty()) { + auto req = clip_requests.front(); + clip_requests.pop(); + lk.unlock(); + + // 执行clip操作 + { + std::unique_lock clip_lk(*clip_mtxs[req.layer][req.expert]); + if (!clip_dones[req.layer][req.expert].load(std::memory_order_acquire)) { + double time_start = (mllm_time_us() - start_time) / 1000.0F; + + // 执行实际的clip操作 + auto expert_tokens = req.hidden_states.clip(req.exp_token_idx, SEQUENCE); + auto topk_weight_clip = req.topk_weight.clip(req.exp_idx, SEQUENCE); + + // 存储clip结果 + string key = std::to_string(req.layer) + "_" + std::to_string(req.expert); + { + std::lock_guard results_lk(clip_results_mutex); + clipped_data[key] = {expert_tokens, topk_weight_clip}; + } + + clip_dones[req.layer][req.expert].store(true, std::memory_order_release); + + double time_end = (mllm_time_us() - start_time) / 1000.0F; + expert_clip_times[key] = {time_start, time_end}; + } + } + clip_cvs[req.layer][req.expert]->notify_all(); + lk.lock(); + } + } +} diff --git a/mllm/models/ling/mbp/settings_bailing_moe_mbp.hpp b/mllm/models/ling/mbp/settings_bailing_moe_mbp.hpp new file mode 100644 index 000000000..c5c1047cd --- /dev/null +++ b/mllm/models/ling/mbp/settings_bailing_moe_mbp.hpp @@ -0,0 +1,139 @@ +#ifndef MAP_MINICPMMOE_MBP_HPP +#define MAP_MINICPMMOE_MBP_HPP +// #include +// #include +#include +#include +// #include +#include +#include +#include +#include +#include +#include +#include +#include +#include "Tensor.hpp" + +using namespace std; +using namespace mllm; + +int mbp_load_layer_idx; +int mbp_load_expert_idx; + +struct LoadRequest { + int layer; + int expert; +}; +queue load_requests; // 替换原do_mbp_load相关变量 +mutex queue_mutex; // 队列互斥锁 +condition_variable queue_cv; // 队列条件变量 + +// ========= Clip Thread Globals (NEW) ========= +// 1. 新增 ClipRequest 结构体 +struct ClipRequest { + int layer; + int expert; + Tensor hidden_states; + Tensor exp_token_idx; + Tensor topk_weight; + Tensor exp_idx; +}; +// 2. 新增 clip 线程的任务队列、锁和条件变量 +queue clip_requests; +mutex clip_queue_mutex; +condition_variable clip_queue_cv; +// 3. 新增用于存储 clip 结果的 map 和其互斥锁 +map> clipped_data; +mutex clip_results_mutex; +//============ End Clip Thread Globals ============ + +atomic mbp_finish{false}; // 改为原子布尔 + +vector>> mtxs; // 每个层和专家一个互斥锁 +vector>> cvs; // 每个层和专家一个条件变量 +vector>> dones; // 原子布尔保证可见性 + +// --- Clipping Primitives (NEW) --- +// 4. 新增 clip 线程的同步对象 +vector>> clip_mtxs; +vector>> clip_cvs; +vector>> clip_dones; + +// 修改 MAP_MINICPMMOE_MBP_HPP 中的相关部分 + +inline void reset_syntax_mbm(int layer_idx, int expert_idx) { + // 使用原子操作重置状态 + dones[layer_idx][expert_idx].store(false, std::memory_order_release); +} + +inline void ling_mbp_init(int num_layers, int num_experts) { + // 初始化 loading 相关的变量 + mtxs.resize(num_layers); + cvs.resize(num_layers); + dones.resize(num_layers); + for (int i = 0; i < num_layers; ++i) { + mtxs[i].resize(num_experts); + cvs[i].resize(num_experts); + dones[i] = std::vector>(num_experts); + for (int j = 0; j < num_experts; ++j) { + mtxs[i][j] = make_unique(); + cvs[i][j] = make_unique(); + dones[i][j].store(false, std::memory_order_relaxed); + } + } + // 初始化 clipping 相关的变量 + clip_mtxs.resize(num_layers); + clip_cvs.resize(num_layers); + clip_dones.resize(num_layers); + for (int i = 0; i < num_layers; ++i) { + clip_mtxs[i].resize(num_experts); + clip_cvs[i].resize(num_experts); + clip_dones[i] = std::vector>(num_experts); + for (int j = 0; j < num_experts; ++j) { + clip_mtxs[i][j] = make_unique(); + clip_cvs[i][j] = make_unique(); + clip_dones[i][j].store(false, std::memory_order_relaxed); + } + } +} + +map> load_times; +map> expert_cal_times; +map> expert_clip_times; +map> expert_wait_times; +uint64_t start_time; +void clearMBPtimes() { + load_times.clear(); + expert_cal_times.clear(); + expert_clip_times.clear(); + expert_wait_times.clear(); + clipped_data.clear(); // [请确认已添加] 清理裁剪结果,防止内存泄漏 + start_time = 0; +} +void prinMBPtimes() { + double load_times_cal = 0; + cout << "load_times = [" << endl; + for (const auto &entry : load_times) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + load_times_cal += entry.second.second - entry.second.first; + } + cout << "]" << endl; + cout << "calc_times = [" << endl; + for (const auto &entry : expert_cal_times) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + cout << "]" << endl; + cout << "clip_times = [" << endl; + for (const auto &entry : expert_clip_times) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + cout << "]" << endl; + cout << "wait_times = [" << endl; + for (const auto &entry : expert_wait_times) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + cout << "]" << endl; + std::cout << "load_times_cal = " << load_times_cal << "ms" << endl; +} +#endif // MAP_MINICPMMOE_MBP_HPP \ No newline at end of file diff --git a/mllm/models/ling/mbp/settings_bailing_moe_mbp_e.hpp b/mllm/models/ling/mbp/settings_bailing_moe_mbp_e.hpp new file mode 100644 index 000000000..7a01dfe52 --- /dev/null +++ b/mllm/models/ling/mbp/settings_bailing_moe_mbp_e.hpp @@ -0,0 +1,178 @@ +#pragma once +// #include +// #include +#include +// #include +#include +#include +#include +#include +#include +#include +#include +#include +#include "Tensor.hpp" + +using namespace std; +using namespace mllm; + +int mbp_load_layer_idx; +int mbp_load_expert_idx; + +// ========= Fine-grained Loading Globals ========= +// 投影层加载请求结构体,用于细粒度加载 +struct ProjectionLoadRequest { + int layer; + int expert; + int projection_type; // 0: gate_proj, 1: up_proj, 2: down_proj + + // 用于优先队列排序:优先按expert分组,再按projection_type排序 + bool operator<(const ProjectionLoadRequest &other) const { + if (layer != other.layer) return layer > other.layer; // 层号小的优先 + if (expert != other.expert) return expert > other.expert; // 专家号小的优先 + return projection_type > other.projection_type; // gate(0) > up(1) > down(2) + } +}; + +// 投影层加载请求队列和同步对象 +priority_queue projection_load_requests; +mutex projection_queue_mutex; +condition_variable projection_queue_cv; + +// 投影层同步对象 - [layer][expert][projection_type] +vector>>> proj_mtxs; +vector>>> proj_cvs; +vector>>> proj_dones; + +// ========= Clip Thread Globals (NEW) ========= +// 4. 新增 ClipRequest 结构体 +struct ClipRequest { + int layer; + int expert; + Tensor hidden_states; + Tensor exp_token_idx; + Tensor topk_weight; + Tensor exp_idx; +}; +// 5. 新增 clip 线程的任务队列、锁和条件变量 +queue clip_requests; +mutex clip_queue_mutex; +condition_variable clip_queue_cv; +// 6. 新增用于存储 clip 结果的 map 和其互斥锁 +map> clipped_data; +mutex clip_results_mutex; +//============ End Clip Thread Globals ============ + +atomic mbp_finish{false}; // 改为原子布尔 + +// --- Clipping Primitives (NEW) --- +// 新增 clip 线程的同步对象 +vector>> clip_mtxs; +vector>> clip_cvs; +vector>> clip_dones; + +// 修改 MAP_MINICPMMOE_MBP_HPP 中的相关部分 + +inline void reset_syntax_mbm(int layer_idx, int expert_idx) { + // 已移除专家级同步对象,不需要重置 +} + +inline void ling_mbp_init(int num_layers, int num_experts) { + // 初始化细粒度投影层相关的变量 + proj_mtxs.resize(num_layers); + proj_cvs.resize(num_layers); + proj_dones.resize(num_layers); + for (int i = 0; i < num_layers; ++i) { + proj_mtxs[i].resize(num_experts); + proj_cvs[i].resize(num_experts); + proj_dones[i].resize(num_experts); + for (int j = 0; j < num_experts; ++j) { + proj_mtxs[i][j].resize(3); // 3 projection types + proj_cvs[i][j].resize(3); + proj_dones[i][j] = std::vector>(3); + for (int k = 0; k < 3; ++k) { + proj_mtxs[i][j][k] = make_unique(); + proj_cvs[i][j][k] = make_unique(); + proj_dones[i][j][k].store(false, std::memory_order_relaxed); + } + } + } + + // 初始化 clipping 相关的变量 + clip_mtxs.resize(num_layers); + clip_cvs.resize(num_layers); + clip_dones.resize(num_layers); + for (int i = 0; i < num_layers; ++i) { + clip_mtxs[i].resize(num_experts); + clip_cvs[i].resize(num_experts); + clip_dones[i] = std::vector>(num_experts); + for (int j = 0; j < num_experts; ++j) { + clip_mtxs[i][j] = make_unique(); + clip_cvs[i][j] = make_unique(); + clip_dones[i][j].store(false, std::memory_order_relaxed); + } + } +} + +map> load_times; +map> expert_cal_times; +map> expert_clip_times; +map> expert_wait_times; +map> proj_load_times; // 新增:投影层加载时间 +uint64_t start_time; + +// 全局模型指针,用于投影层加载线程访问 +class BailingMoeV2ForCausalLM; // 前向声明 +BailingMoeV2ForCausalLM *global_model_ptr = nullptr; + +// 投影层加载函数指针 +typedef void (*LoadProjectionFunc)(int layer_idx, int expert_idx, int projection_type); +LoadProjectionFunc load_projection_impl = nullptr; + +// 新增:投影层加载线程函数 +extern void projection_loading_thread_func(); +// 新增:clip线程函数 +extern void clip_thread_func(); + +void clearMBPtimes() { + load_times.clear(); + expert_cal_times.clear(); + expert_clip_times.clear(); + expert_wait_times.clear(); + proj_load_times.clear(); // 新增:清理投影层加载时间 + clipped_data.clear(); // [请确认已添加] 清理裁剪结果,防止内存泄漏 + start_time = 0; +} +void prinMBPtimes(string start_word = "") { + double load_times_cal = 0; + cout << "load_times = [" << endl; + for (const auto &entry : proj_load_times) { + if (start_word.empty() || entry.first.substr(0, start_word.length()) == start_word) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + load_times_cal += entry.second.second - entry.second.first; + } + cout << "]" << endl; + cout << "calc_times = [" << endl; + for (const auto &entry : expert_cal_times) { + if (start_word.empty() || entry.first.substr(0, start_word.length()) == start_word) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + } + cout << "]" << endl; + cout << "clip_times = [" << endl; + for (const auto &entry : expert_clip_times) { + if (start_word.empty() || entry.first.substr(0, start_word.length()) == start_word) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + } + cout << "]" << endl; + cout << "wait_times = [" << endl; + for (const auto &entry : expert_wait_times) { + if (start_word.empty() || entry.first.substr(0, start_word.length()) == start_word) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + } + cout << "]" << endl; + std::cout << "load_times_cal = " << load_times_cal << "ms" << endl; +} \ No newline at end of file diff --git a/mllm/models/ling/modeling_bailing_moe.hpp b/mllm/models/ling/modeling_bailing_moe.hpp new file mode 100644 index 000000000..3b52bac16 --- /dev/null +++ b/mllm/models/ling/modeling_bailing_moe.hpp @@ -0,0 +1,250 @@ +#pragma once +#include "DataType.hpp" +#include "Layer.hpp" +#include "Module.hpp" +#include "Tensor.hpp" +#include "Trace.hpp" +#include "Types.hpp" +#include "configuration_bailing_moe.hpp" +#include "models/transformer/modeling_transformer.hpp" +#include +#include +#include +#include +#include + +using namespace mllm; + +class BailingMoeMLP final : public Module { +public: + BailingMoeMLP() = default; + BailingMoeMLP(int hidden_size, int intermediate_size, const BailingMoeNameConfig &names, const std::string &base_name) { + gate_proj = Linear(hidden_size, intermediate_size, false, base_name + names._gate_proj_name); + silu = SiLU(base_name + "act"); + up_proj = Linear(hidden_size, intermediate_size, false, base_name + names._up_proj_name); + down_proj = Linear(intermediate_size, hidden_size, false, base_name + names._down_proj_name); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto x = gate_proj(inputs[0]); + x = silu(x); + auto y = up_proj(inputs[0]); + x = x * y; + x = down_proj(x); + return {x}; + } + +private: + Layer gate_proj; + Layer up_proj; + Layer down_proj; + Layer silu; +}; + +class BailingMoeGate final : public Module { +public: + BailingMoeGate() = default; + BailingMoeGate(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const std::string &base_name) { + gate = Linear(config.hidden_size, config.num_experts, false, base_name + "gate"); + softmax = Softmax(DIMENSION, false, base_name + "softmax"); + num_experts_per_tok = config.num_experts_per_tok; + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto scores = softmax(gate(inputs[0])); + auto experts_w_i = Tensor::topk(scores, num_experts_per_tok, DIMENSION); + auto topk_weight = experts_w_i[0]; // 1, batch*seq, 1, k + auto topk_idx = experts_w_i[1]; // 1, batch*seq, 1, k + topk_idx = topk_idx.view(-1, 1, 1, -1); // 1, 1, 1, k* batch*seq + topk_weight = topk_weight / topk_weight.sum(DIMENSION); // 1, batch*seq, 1, k + return {scores, topk_weight, topk_idx}; + } + +private: + Layer gate; + Softmax softmax; + int num_experts_per_tok{}; +}; + +class BailingMoeSparseMoeBlock final : public Module { +public: + BailingMoeSparseMoeBlock() = default; + BailingMoeSparseMoeBlock(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + experts = List(config.num_experts, config.hidden_size, config.moe_intermediate_size, names, base_name + "experts."); + gate = BailingMoeGate(config, names, base_name); + num_experts_per_tok = config.num_experts_per_tok; + num_shared_experts = config.num_shared_experts; + if (num_shared_experts > 0) { + shared_experts = BailingMoeMLP(config.hidden_size, + config.moe_intermediate_size * config.num_shared_experts, + names, base_name + "shared_experts."); + } + } + // receive embeds + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = inputs[0]; + auto identity = hidden_states; + if (hidden_states.batch() > 1) { + hidden_states = hidden_states.view(1, -1, ANYDIM, -1); // 1, batch*seq, 1, hidden + } + auto gates_t = gate({hidden_states}); // 1, batch*seq, 1, num_experts + auto scores = gates_t[0]; // 1, batch*seq, 1, num_experts + auto topk_weight = gates_t[1]; // 1, batch*seq, + auto topk_idx = gates_t[2]; // 1, batch*seq, 1, k + hidden_states = moe_infer(hidden_states, topk_weight, topk_idx); // 1, batch*seq, 1, hidden + if (num_shared_experts) { + hidden_states = hidden_states + shared_experts({identity})[0]; // add shared experts + } + if (hidden_states.batch() > 1) { + // expert_cache.view(ANYDIM, seq, -1, -1);//TODO + } + return {hidden_states}; + } + Tensor moe_infer(Tensor hidden_states, + Tensor &topk_weight, + Tensor &topk_idx) { + auto dtype = topk_idx.dtype(); + auto device = topk_idx.device(); + topk_idx = topk_idx.fp32().cpu(); + auto idxs = topk_idx.argsort(); // 1, 1, 1, k* batch*seq + auto tokens_per_expert = topk_idx.bincount(); // (1, 1, 1, 0) 1, 1, 1, k + idxs = idxs.to(device).to(dtype); + auto token_idxs = idxs / num_experts_per_tok; // 1, 1, 1, k* batch*seq + int start_idx = 0; + int end_idx = start_idx; + auto expert_cache = Tensor::zero_like(hidden_states); // 1, batch*seq, 1, hidden + for (int i = 0; i < experts.size(); ++i) { + if (tokens_per_expert.dimension() != 0 && i >= tokens_per_expert.dimension()) + break; + int this_token_num = tokens_per_expert.dimension() == 0 ? + 0 : + tokens_per_expert.d(0, 0, 0, i); + if (tokens_per_expert.dimension() != 0 && this_token_num == 0) + continue; + end_idx = start_idx + this_token_num; + // + auto exp_token_idx = token_idxs.clip({}, {}, {}, {start_idx, end_idx}); //(1, 1, 1, 0) 1, 1, 1, e-s + auto exp_idx = idxs.clip({}, {}, {}, {start_idx, end_idx}); //(1, 1, 1, 0) 1, 1, 1, e-s + auto expert_tokens = hidden_states.clip(exp_token_idx, SEQUENCE); //(1, 0, 1, hidden) 1, e-s, 1, hidden + auto expert_out = experts[i]({expert_tokens})[0]; //(1, 0, 1, hidden) 1, e-s, 1, + if (topk_weight.dimension() != 1) { topk_weight = topk_weight.view(-1, -1, 1, 1); } // 1, k* batch*seq, 1, 1 + auto expert_weights_clip = topk_weight.clip(exp_idx, SEQUENCE); //(1, 0, 1, 1) 1, e-s, 1, 1 + expert_out = expert_out * expert_weights_clip; //(1, 0, 1, hidden) 1, e-s, 1, hidden + expert_cache.scatter_add(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden + // + start_idx = end_idx; + } + return expert_cache; // 1, batch*seq, 1, hidden + } + +private: + BailingMoeMLP shared_experts; + std::vector experts; + BailingMoeGate gate; + int num_shared_experts{}; + int num_experts_per_tok{}; +}; + +class BailingMoeDecoder final : public Module { +public: + BailingMoeDecoder() = default; + BailingMoeDecoder(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_HD, PostQkv_NONE, false, + config.RoPE_type, config.rope_theta, + config.max_position_embeddings, + config.cache_limit, config.use_cache, config.use_qkv_bias, config.use_bias, + config.attn_implementation, names, base_name + names._attn_base_name); + moe = BailingMoeSparseMoeBlock(config, names, base_name + names._ffn_base_name); + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); + num_hidden_layers = config.num_hidden_layers; + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = input_layernorm(inputs[0]); + hidden_states = self_atten({hidden_states, hidden_states, hidden_states})[0]; + auto tmp = hidden_states + inputs[0]; + hidden_states = post_attention_layernorm(tmp); + hidden_states = moe({hidden_states})[0]; + hidden_states = hidden_states + tmp; + return {hidden_states}; + } + + MultiHeadAttention &get_attention() { + return self_atten; + } + +private: + MultiHeadAttention self_atten; + BailingMoeSparseMoeBlock moe; + Layer input_layernorm; + Layer post_attention_layernorm; + int num_hidden_layers; +}; + +class BailingMoeModel final : public Module { +public: + BailingMoeModel() = default; + BailingMoeModel(const BailingMoeConfig &config, const BailingMoeNameConfig &names, const string &base_name) { + blocks = List(config.num_hidden_layers, config, names, base_name); + norm = RMSNorm(config.hidden_size, config.rms_norm_eps, names.post_norm_name); + } + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = inputs[0]; + for (auto &block : blocks) { + hidden_states = block({hidden_states})[0]; + } + hidden_states = norm(hidden_states); + return {hidden_states}; + } + + void clear_kvcache() override { + for (auto &block : blocks) { + auto kvcache = block.get_attention().get_cache(); + for (auto &cache : kvcache) { cache->clearCache(); } + auto ropes = block.get_attention().get_rope(); + for (auto &rope : ropes) { rope->clearCache(); } + } + } + +private: + std::vector blocks; + Layer norm; +}; + +class BailingMoeForCausalLM final : public Module { +public: + CHAINABLE_MODULE_METHODS(BailingMoeForCausalLM) + BailingMoeForCausalLM(BailingMoeConfig &config) { + dtype = config.dtype; + auto names = config.names_config; + hidden_size = config.hidden_size; + embedding = Embedding(config.vocab_size, config.hidden_size, names.token_embd_name); + model = BailingMoeModel(config, names, names.blk_name); + lm_head = Linear(config.hidden_size, config.vocab_size, false, names.lm_head_name); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto x = embedding(inputs[0]).to(dtype); + auto outputs = model({x})[0]; + if (outputs.sequence() > 1) { + outputs = outputs.clip({}, {}, {-1}, {}); + } + outputs = lm_head(outputs); + return {outputs}; + } + void clear_kvcache() override { + model.clear_kvcache(); + } + +private: + int hidden_size; + bool tie_embedding_words; + Layer embedding; + Layer lm_head; + BailingMoeModel model; + DataType dtype; +}; diff --git a/mllm/models/ling/tokenization_bailing.hpp b/mllm/models/ling/tokenization_bailing.hpp new file mode 100644 index 000000000..5ed0cb5bc --- /dev/null +++ b/mllm/models/ling/tokenization_bailing.hpp @@ -0,0 +1,194 @@ +#ifndef TOKENIZATION_BAILING_LITE_HPP +#define TOKENIZATION_BAILING_LITE_HPP + +#include "tokenizers/BPE/Bpe.hpp" +#include +#include +#include +#include +#include +#include +#include + +using namespace mllm; + +class BaiLingTokenizer final : public BPETokenizer { +public: + explicit BaiLingTokenizer(const std::string &vocab_file, const std::string &merge_file) : + BPETokenizer(vocab_file) { + initialize_byte_to_char_map(); + for (const auto &pair : byte_to_char_map_) { + char_to_byte_map_[pair.second] = pair.first; + } + id_to_token_string_.resize(vocab_map_.size() + 1); + for (const auto &pair : vocab_map_) { + if (pair.second < id_to_token_string_.size()) { + id_to_token_string_[pair.second] = pair.first; + } + } + auto merge_file_stream = std::ifstream(merge_file); + if (!merge_file_stream.good()) { + std::cout << "merge file is broken\n"; + exit(0); + } + std::string line; + unsigned rank = 0; + std::unordered_map bpe_ranks_; + std::getline(merge_file_stream, line); + while (std::getline(merge_file_stream, line)) { + if (line.empty() || line[0] == '#') { + continue; + } + bpe_ranks_[line] = rank; + rank++; + } + BPETokenizer::setMergeRank(bpe_ranks_); + chat_template_pre = "SYSTEMYou are Ling, an assistant created by inclusionAIHUMAN"; + chat_template_end = "ASSISTANT"; + + special_tokens_ = { + bos_token_string_, eos_token_string_, "[CLS]", "[gMASK]", + "", "", + "<|arithmetic_start|>", "<|arithmetic_end|>", + "<|number_start|>", "<|number_end|>"}; + for (int i = 0; i <= 100; ++i) { + special_tokens_.push_back("<|reserved_token_" + std::to_string(i) + "|>"); + } + special_tokens_.push_back("SYSTEM"); + special_tokens_.push_back("HUMAN"); + special_tokens_.push_back("BOT"); + } + + Tensor tokenize(const std::string &text, string name = "input_ids", BackendType type = MLLM_CPU) override { + std::vector tokens_id; + auto parts = _splitWithDelimiters(text, special_tokens_); + + for (const auto &part : parts) { + if (part.empty()) continue; + + auto it = vocab_map_.find(part); + if (it != vocab_map_.end()) { + tokens_id.push_back(it->second); + } else { + std::string byte_level_string; + for (unsigned char byte : part) { + byte_level_string += u32string_to_utf8({byte_to_char_map_[byte]}); + } + std::vector bpe_pieces = BPETokenizer::bpe(byte_level_string, ""); + for (const auto &piece : bpe_pieces) { + auto vocab_it = vocab_map_.find(piece); + if (vocab_it != vocab_map_.end()) { + tokens_id.push_back(vocab_it->second); + } else { + std::cerr << "Fatal Error: BPE piece not found in vocab_map_: " << piece << std::endl; + } + } + } + } + return Tokenizer::tokens2Input(tokens_id, name, type); + } + + std::string detokenize(const std::vector &tokens) override { + std::string byte_chars_str; + for (token_id_t token_id : tokens) { + if (token_id < id_to_token_string_.size()) { + byte_chars_str += id_to_token_string_[token_id]; + } + } + std::u32string u32_byte_chars_str = utf8_to_u32string(byte_chars_str); + std::vector byte_buffer; + for (char32_t c : u32_byte_chars_str) { + auto it = char_to_byte_map_.find(c); + if (it != char_to_byte_map_.end()) { + byte_buffer.push_back(static_cast(it->second)); + } + } + return std::string(byte_buffer.begin(), byte_buffer.end()); + } + + std::pair detokenize(Tensor &result) override { + assert(result.batch() == 1); + assert(result.head() == 1); + vector scores; + for (int i = 0; i < result.dimension(); ++i) { + scores.push_back(result.dataAt(0, 0, result.sequence() - 1, i)); + } + auto token_idx = this->argmax(scores); + return {this->detokenize({token_idx}), token_idx}; + } + + std::pair postprocess(std::string &text) override { + if (text == this->eos_token_string_) return {false, ""}; + if (text == "" || text.rfind("", 0) == 0) { + return {false, ""}; + } + if (text == this->bos_token_string_ || text == "" || text == "" || text.rfind("<|reserved_token_", 0) == 0 || text.rfind("", 0) == 0) return {true, ""}; + return {true, text}; + } + +private: + const std::string bos_token_string_ = "<|startoftext|>"; + const std::string eos_token_string_ = "<|endoftext|>"; + std::vector special_tokens_; + + std::unordered_map byte_to_char_map_; + std::unordered_map char_to_byte_map_; + std::vector id_to_token_string_; + + std::vector _splitWithDelimiters(const std::string &str, const std::vector &delimiters) const { + std::vector result; + size_t last = 0; + while (last < str.size()) { + size_t min_pos = std::string::npos; + std::string best_delim; + for (const auto &delim : delimiters) { + if (!delim.empty()) { + size_t found_pos = str.find(delim, last); + if (found_pos != std::string::npos && (min_pos == std::string::npos || found_pos < min_pos)) { + min_pos = found_pos; + best_delim = delim; + } + } + } + if (min_pos != std::string::npos) { + if (min_pos > last) result.push_back(str.substr(last, min_pos - last)); + result.push_back(best_delim); + last = min_pos + best_delim.length(); + } else { + result.push_back(str.substr(last)); + break; + } + } + return result; + } + + static std::u32string utf8_to_u32string(const std::string &s) { + try { + std::wstring_convert, char32_t> conv; + return conv.from_bytes(s); + } catch (const std::range_error &) { return {}; } + } + static std::string u32string_to_utf8(const std::u32string &s) { + try { + std::wstring_convert, char32_t> conv; + return conv.to_bytes(s); + } catch (const std::range_error &) { return ""; } + } + + void initialize_byte_to_char_map() { + std::vector chars; + for (int i = 0; i < 256; ++i) { chars.push_back(static_cast(i)); } + int n = 0; + for (int i = 0; i < 256; ++i) { + if (!((i >= 33 && i <= 126) || (i >= 161 && i <= 172) || (i >= 174 && i <= 255))) { + chars[i] = 256 + n; + n++; + } + } + for (int i = 0; i < 256; ++i) { + byte_to_char_map_[static_cast(i)] = chars[i]; + } + } +}; + +#endif // TOKENIZATION_BAILING_LITE_HPP \ No newline at end of file diff --git a/src/models/llama/configuration_llama.hpp b/mllm/models/llama/configuration_llama.hpp similarity index 94% rename from src/models/llama/configuration_llama.hpp rename to mllm/models/llama/configuration_llama.hpp index f97ede2be..46256e6a6 100644 --- a/src/models/llama/configuration_llama.hpp +++ b/mllm/models/llama/configuration_llama.hpp @@ -75,9 +75,11 @@ class LLaMAConfig : public TransformerConfig { float rope_theta; int max_position_embeddings; - explicit LLaMAConfig(int token_limit, string billions = "7B", RoPEType type = LLAMAROPE, int vocab = 32000) { + explicit LLaMAConfig(int token_limit, string billions = "7B", RoPEType type = LLAMAROPE, int vocab = 32000, + string attn_implementation_ = "flash_attention_2") { names_config.init(type); vocab_size = vocab; + attn_implementation = attn_implementation_; if (billions == "7B" || billions == "7b") { hidden_dim = 4096; head_size = 32; diff --git a/src/models/llama/modeling_elastic_llama.hpp b/mllm/models/llama/modeling_elastic_llama.hpp similarity index 82% rename from src/models/llama/modeling_elastic_llama.hpp rename to mllm/models/llama/modeling_elastic_llama.hpp index a4211d454..1be63ad7b 100644 --- a/src/models/llama/modeling_elastic_llama.hpp +++ b/mllm/models/llama/modeling_elastic_llama.hpp @@ -25,43 +25,43 @@ class ElasticMultiHeadAttention final : public Module { ElasticLinear o_proj; int head_size_{}; int kv_head_size_{}; - int attn_hidden_dim_{}; + int head_dim_{}; public: ElasticMultiHeadAttention() = default; - ElasticMultiHeadAttention(int hidden_dim, int head_size, int kv_head_size, int attn_hidden_dim, + ElasticMultiHeadAttention(int hidden_dim, int head_size, int kv_head_size, int head_dim, RoPEType RoPE_type, int cache_limit, bool do_mask, bool bias, const TransformerNameConfig &names, const string &base_name) { assert(kv_head_size_ == head_size_); - attn_hidden_dim_ = attn_hidden_dim; + head_dim_ = head_dim; head_size_ = head_size; kv_head_size_ = kv_head_size; - q_proj = ElasticLinear(hidden_dim, head_size * attn_hidden_dim, bias, base_name + names._q_proj_name); - k_proj = ElasticLinear(hidden_dim, kv_head_size * attn_hidden_dim, bias, base_name + names._k_proj_name); - v_proj = ElasticLinear(hidden_dim, kv_head_size * attn_hidden_dim, bias, base_name + names._v_proj_name); + q_proj = ElasticLinear(hidden_dim, head_size * head_dim, bias, base_name + names._q_proj_name); + k_proj = ElasticLinear(hidden_dim, kv_head_size * head_dim, bias, base_name + names._k_proj_name); + v_proj = ElasticLinear(hidden_dim, kv_head_size * head_dim, bias, base_name + names._v_proj_name); if (RoPE_type > 0) { q_rope = RoPE(RoPE_type, base_name + "q_rope"); k_rope = RoPE(RoPE_type, base_name + "k_rope"); } if (cache_limit > 0) { - k_cache = KVCache(kv_head_size, attn_hidden_dim, head_size / kv_head_size, cache_limit, base_name + "k_cache"); - v_cache = KVCache(kv_head_size, attn_hidden_dim, head_size / kv_head_size, cache_limit, base_name + "v_cache"); + k_cache = KVCache(kv_head_size, head_dim, head_size / kv_head_size, cache_limit, base_name + "k_cache"); + v_cache = KVCache(kv_head_size, head_dim, head_size / kv_head_size, cache_limit, base_name + "v_cache"); } softmax = Softmax(DIMENSION, do_mask, base_name + "softmax"); - o_proj = ElasticLinear(head_size * attn_hidden_dim, hidden_dim, bias, base_name + names._o_proj_name); + o_proj = ElasticLinear(head_size * head_dim, hidden_dim, bias, base_name + names._o_proj_name); } vector Forward(vector inputs, vector args) override { vector activate_head_dims = std::any_cast>(args[0]); int activate_head_dim = activate_head_dims[0]; activate_head_dim = (activate_head_dim == -1) ? kv_head_size_ : (activate_head_dim); Tensor q, k, v; - q = q_proj(inputs[0], -1, activate_head_dim * attn_hidden_dim_); - k = k_proj(inputs[1], -1, activate_head_dim * attn_hidden_dim_); - v = v_proj(inputs[2], -1, activate_head_dim * attn_hidden_dim_); - q = q.view(-1, activate_head_dim, -1, attn_hidden_dim_); - k = k.view(-1, activate_head_dim, -1, attn_hidden_dim_); - v = v.view(-1, activate_head_dim, -1, attn_hidden_dim_); + q = q_proj(inputs[0], -1, activate_head_dim * head_dim_); + k = k_proj(inputs[1], -1, activate_head_dim * head_dim_); + v = v_proj(inputs[2], -1, activate_head_dim * head_dim_); + q = q.view(-1, activate_head_dim, -1, head_dim_); + k = k.view(-1, activate_head_dim, -1, head_dim_); + v = v.view(-1, activate_head_dim, -1, head_dim_); if (q_rope.ready() && k_rope.ready()) { q = q_rope(q); k = k_rope(k); @@ -72,15 +72,15 @@ class ElasticMultiHeadAttention final : public Module { } k = k.transpose(SEQUENCE, DIMENSION); auto qk = Tensor::mm(q, k); - qk = qk / std::sqrt(attn_hidden_dim_); // attn_hidden_dim_ + qk = qk / std::sqrt(head_dim_); // head_dim_ if (k_cache.ready() && v_cache.ready()) { qk = softmax(qk, k_cache.getCacheSeqLen()); } else { qk = softmax(qk); } auto o = Tensor::mm(qk, v); - o = o.view(-1, 1, -1, attn_hidden_dim_ * activate_head_dim); - o = o_proj(o, activate_head_dim * attn_hidden_dim_, -1); + o = o.view(-1, 1, -1, head_dim_ * activate_head_dim); + o = o_proj(o, activate_head_dim * head_dim_, -1); return {o}; } vector get_cache() { diff --git a/src/models/llama/modeling_llama.hpp b/mllm/models/llama/modeling_llama.hpp similarity index 72% rename from src/models/llama/modeling_llama.hpp rename to mllm/models/llama/modeling_llama.hpp index 5e559ca59..58883581c 100644 --- a/src/models/llama/modeling_llama.hpp +++ b/mllm/models/llama/modeling_llama.hpp @@ -44,9 +44,15 @@ class LLaMABlock final : public Module { public: LLaMABlock() = default; - LLaMABlock(int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, const LLaMANameConfig &names, const string &base_name) { - attention = MultiHeadAttention(hidden_dim, head_size, kv_head_size, hidden_dim / head_size, SPLIT_NONE, false, false, - RoPE_type, rope_theta, max_position_embeddings, cache_limit, true, false, names, base_name + names._attn_base_name); + LLaMABlock(int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, + RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, + string attn_implementation, + const LLaMANameConfig &names, const string &base_name) { + attention = MultiHeadAttention(hidden_dim, head_size, kv_head_size, hidden_dim / head_size, + SPLIT_NONE, PostQkv_NONE, false, + RoPE_type, rope_theta, max_position_embeddings, cache_limit, true, false, false, + attn_implementation, + names, base_name + names._attn_base_name); mlp = LLaMAMLP(hidden_dim, ffn_hidden, names, base_name + names._ffn_base_name); norm1 = RMSNorm(hidden_dim, 1e-6, base_name + names._attn_norm_name); norm2 = RMSNorm(hidden_dim, 1e-6, base_name + names._ffn_norm_name); @@ -74,14 +80,24 @@ class LLaMAModel final : public Module { public: explicit LLaMAModel(const LLaMAConfig &config) : - LLaMAModel(config.vocab_size, config.hidden_dim, config.head_size, config.num_key_value_heads, config.ffn_hidden, config.block_num, - config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, + LLaMAModel(config.vocab_size, config.hidden_dim, config.head_size, + config.num_key_value_heads, config.ffn_hidden, config.block_num, + config.RoPE_type, config.rope_theta, + config.max_position_embeddings, config.cache_limit, + config.attn_implementation, config.names_config, config.names_config.blk_name) { } - LLaMAModel(int vocab_size, int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, + LLaMAModel(int vocab_size, int hidden_dim, int head_size, + int kv_head_size, int ffn_hidden, int block_num, + RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, + string attn_implementation, const LLaMANameConfig &names, const string &base_name) { embedding = Embedding(vocab_size, hidden_dim, names.token_embd_name); - blocks = List(block_num, hidden_dim, head_size, kv_head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, names, base_name); + blocks = List(block_num, hidden_dim, head_size, + kv_head_size, ffn_hidden, RoPE_type, + rope_theta, max_position_embeddings, cache_limit, + attn_implementation, + names, base_name); norm = RMSNorm(hidden_dim, 1e-6, names.post_norm_name); lm_head = Linear(hidden_dim, vocab_size, false, names.lm_head_name); } diff --git a/src/models/llama/modeling_sparse_llama.hpp b/mllm/models/llama/modeling_sparse_llama.hpp similarity index 72% rename from src/models/llama/modeling_sparse_llama.hpp rename to mllm/models/llama/modeling_sparse_llama.hpp index c496eb471..17616bc5c 100644 --- a/src/models/llama/modeling_sparse_llama.hpp +++ b/mllm/models/llama/modeling_sparse_llama.hpp @@ -24,13 +24,13 @@ class SparseLLaMAMLP final : public Module { gate_proj = Linear(hidden_dim, ffn_hidden, false, base_name + names._gate_proj_name); relu = ReLU(base_name + "act"); up_proj = SparseIdLinear(hidden_dim, ffn_hidden, base_name + names._up_proj_name); - if(is_down_sparse) { + if (is_down_sparse) { down_proj = SparseLinear(ffn_hidden, hidden_dim, base_name + names._down_proj_name); - }else{ + } else { down_proj = Linear(ffn_hidden, hidden_dim, false, base_name + names._down_proj_name); } } - vector Forward(vector inputs, vector args) override { + vector Forward(vector inputs, vector args) override { auto x = inputs[0]; auto id = gate_proj(inputs[0]); auto gate = relu(id); @@ -49,14 +49,17 @@ class SparseLLaMABlock final : public Module { public: SparseLLaMABlock() = default; - SparseLLaMABlock(bool is_down_sparse, int hidden_dim, int head_size, int ffn_hidden, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, const LLaMANameConfig &names, const string &base_name) { - attention = MultiHeadAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, SPLIT_NONE, false, false, - RoPE_type, rope_theta, max_position_embeddings, cache_limit, true, false, names, base_name + names._attn_base_name); + SparseLLaMABlock(bool is_down_sparse, int hidden_dim, int head_size, int ffn_hidden, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, string attn_implementation, const LLaMANameConfig &names, const string &base_name) { + attention = MultiHeadAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, + SPLIT_NONE, PostQkv_NONE, false, + RoPE_type, rope_theta, max_position_embeddings, cache_limit, true, false, false, + attn_implementation, + names, base_name + names._attn_base_name); mlp = SparseLLaMAMLP(hidden_dim, ffn_hidden, names, base_name + names._ffn_base_name, is_down_sparse); norm1 = RMSNorm(hidden_dim, 1e-6, base_name + names._attn_norm_name); norm2 = RMSNorm(hidden_dim, 1e-6, base_name + names._ffn_norm_name); } - vector Forward(vector inputs, vector args) override { + vector Forward(vector inputs, vector args) override { auto x = norm1(inputs[0]); x = attention({x, x, x})[0]; auto tmp = x + inputs[0]; @@ -75,19 +78,22 @@ class SparseLLaMAModel final : public Module { public: explicit SparseLLaMAModel(const LLaMAConfig &config, bool is_down_sparse = false) : - SparseLLaMAModel(config.vocab_size, config.hidden_dim, config.head_size, config.ffn_hidden, config.block_num, config.RoPE_type, - config.rope_theta, config.max_position_embeddings, config.cache_limit, - config.names_config, config.names_config.blk_name, is_down_sparse) { + SparseLLaMAModel(config.vocab_size, config.hidden_dim, config.head_size, + config.ffn_hidden, config.block_num, config.RoPE_type, + config.rope_theta, config.max_position_embeddings, config.cache_limit, + config.attn_implementation, + config.names_config, config.names_config.blk_name, is_down_sparse) { } - SparseLLaMAModel(int vocab_size, int hidden_dim, int head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, - float rope_theta, int max_position_embeddings, int cache_limit, - const LLaMANameConfig &names, const string &base_name, bool is_down_sparse) { + SparseLLaMAModel(int vocab_size, int hidden_dim, int head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, + float rope_theta, int max_position_embeddings, int cache_limit, + string attn_implementation, + const LLaMANameConfig &names, const string &base_name, bool is_down_sparse) { embedding = Embedding(vocab_size, hidden_dim, names.token_embd_name); - blocks = List(block_num, is_down_sparse, hidden_dim, head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, names, base_name); + blocks = List(block_num, is_down_sparse, hidden_dim, head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, attn_implementation, names, base_name); norm = RMSNorm(hidden_dim, 1e-6, names.post_norm_name); lm_head = Linear(hidden_dim, vocab_size, false, names.lm_head_name); } - vector Forward(vector inputs, vector args) override { + vector Forward(vector inputs, vector args) override { auto x = embedding(inputs[0]); for (auto &block : blocks) { x = block({x})[0]; diff --git a/src/models/llama/tokenization_llama.hpp b/mllm/models/llama/tokenization_llama.hpp similarity index 100% rename from src/models/llama/tokenization_llama.hpp rename to mllm/models/llama/tokenization_llama.hpp diff --git a/src/models/llama3/configuration_llama3.hpp b/mllm/models/llama3/configuration_llama3.hpp similarity index 85% rename from src/models/llama3/configuration_llama3.hpp rename to mllm/models/llama3/configuration_llama3.hpp index acf0166d9..35e2a4dc2 100644 --- a/src/models/llama3/configuration_llama3.hpp +++ b/mllm/models/llama3/configuration_llama3.hpp @@ -95,6 +95,23 @@ class Llama3Config : public TransformerConfig { rope_theta = 500000.0; tie_word_embeddings = true; + rope_scaling = { + {"factor", 32.0f}, + {"high_freq_factor", 4.0f}, + {"low_freq_factor", 1.0f}, + {"original_max_position_embeddings", 8192}, + {"rope_type", std::string("llama3")}}; + } else if (billions == "1B-lm" || billions == "1b-lm") { + vocab_size = 128256; + hidden_dim = 2048; + head_size = 32; + num_key_value_heads = 8; + ffn_hidden = 8192; + block_num = 16; + max_position_embeddings = 131072; + rope_theta = 500000.0; + tie_word_embeddings = false; + rope_scaling = { {"factor", 32.0f}, {"high_freq_factor", 4.0f}, diff --git a/src/models/llama3/modeling_llama3.hpp b/mllm/models/llama3/modeling_llama3.hpp similarity index 77% rename from src/models/llama3/modeling_llama3.hpp rename to mllm/models/llama3/modeling_llama3.hpp index 66020eb7f..8c0404d87 100644 --- a/src/models/llama3/modeling_llama3.hpp +++ b/mllm/models/llama3/modeling_llama3.hpp @@ -45,16 +45,18 @@ class Llama3Attention final : public Module { int head_size_; // Size of each attention head int kv_head_size_; // Size of each key/value head int hidden_dim_; // Hidden dimension size + string attn_impl; public: Llama3Attention() = default; Llama3Attention(int hidden_dim, int head_size, int kv_head_size, RoPEType RoPE_type, float rope_theta, - int max_position_embeddings, int cache_limit, const TransformerNameConfig &names, - const string &base_name, const RoPEConfig &rope_config = {}) { + int max_position_embeddings, int cache_limit, string attn_implementation, + const TransformerNameConfig &names, const string &base_name, const RoPEConfig &rope_config = {}) { hidden_dim_ = hidden_dim; head_size_ = head_size; kv_head_size_ = kv_head_size; + attn_impl = attn_implementation; // Initialize projections q_proj = Linear(hidden_dim, head_size * (hidden_dim / head_size), false, base_name + names._q_proj_name); @@ -73,8 +75,8 @@ class Llama3Attention final : public Module { // Initialize KV cache if (cache_limit > 0) { - k_cache = KVCache(kv_head_size, hidden_dim / head_size, head_size / kv_head_size, cache_limit, base_name + "k_cache"); - v_cache = KVCache(kv_head_size, hidden_dim / head_size, head_size / kv_head_size, cache_limit, base_name + "v_cache"); + k_cache = KVCache(kv_head_size, hidden_dim / head_size, head_size / kv_head_size, cache_limit, attn_impl, base_name + "k_cache"); + v_cache = KVCache(kv_head_size, hidden_dim / head_size, head_size / kv_head_size, cache_limit, attn_impl, base_name + "v_cache"); } // Initialize softmax @@ -102,23 +104,27 @@ class Llama3Attention final : public Module { k = k_cache(k); v = v_cache(v); } - - // Transpose keys for dot product - k = k.transpose(SEQUENCE, DIMENSION); - - // Compute attention scores - Tensor qk = Tensor::mm(q, k); // Dot product of queries and keys - qk = qk / std::sqrt(hidden_dim_ / head_size_); // Scale by sqrt(d_k) - - // Apply softmax - if (k_cache.ready() && v_cache.ready()) { - qk = softmax(qk, k_cache.getCacheSeqLen()); // Masked softmax if cache is used - } else { - qk = softmax(qk); // Regular softmax + Tensor o; + if (attn_impl == "flash_attention_2") { + o = Tensor::flash_attention2_forward(q, k, v, true); + } else { // eager implementation + // Transpose keys for dot product + k = k.transpose(SEQUENCE, DIMENSION); + + // Compute attention scores + Tensor qk = Tensor::mm(q, k); // Dot product of queries and keys + qk = qk / std::sqrt(hidden_dim_ / head_size_); // Scale by sqrt(d_k) + + // Apply softmax + if (k_cache.ready() && v_cache.ready()) { + qk = softmax(qk, k_cache.getCacheSeqLen()); // Masked softmax if cache is used + } else { + qk = softmax(qk); // Regular softmax + } + + // Compute attention output + o = Tensor::mm(qk, v); // Weighted sum of values } - - // Compute attention output - Tensor o = Tensor::mm(qk, v); // Weighted sum of values o = o.view(-1, 1, -1, hidden_dim_); // Reshape to original dimensions o = o_proj(o); // Output projection @@ -154,7 +160,8 @@ class Llama3Block final : public Module { } attention = Llama3Attention(hidden_dim, head_size, kv_head_size, RoPE_type, rope_theta, - max_position_embeddings, cache_limit, names, base_name + names._attn_base_name, rope_config); + max_position_embeddings, cache_limit, config.attn_implementation, + names, base_name + names._attn_base_name, rope_config); mlp = Llama3MLP(hidden_dim, ffn_hidden, names, base_name + names._ffn_base_name); norm1 = RMSNorm(hidden_dim, 1e-6, base_name + names._attn_norm_name); norm2 = RMSNorm(hidden_dim, 1e-6, base_name + names._ffn_norm_name); @@ -179,14 +186,17 @@ class Llama3Model final : public Module { vector blocks; Layer norm; Parameter lm_head; + Layer lm_head_layer; + bool tie_embedding_words_; public: explicit Llama3Model(const Llama3Config &config) : Llama3Model(config.vocab_size, config.hidden_dim, config.head_size, config.num_key_value_heads, config.ffn_hidden, config.block_num, - config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, + config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, config.tie_word_embeddings, config.names_config, config, config.names_config.blk_name) { } Llama3Model(int vocab_size, int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, + bool tie_embedding_words, const Llama3NameConfig &names, const Llama3Config &config, const string &base_name) { @@ -194,13 +204,21 @@ class Llama3Model final : public Module { blocks = List(block_num, hidden_dim, head_size, kv_head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, names, config, base_name); norm = RMSNorm(hidden_dim, 1e-6, names.post_norm_name); // TODO: tie_word_embeddings + tie_embedding_words_ = tie_embedding_words; // this is a workaround // we just simply use the token embedding as the lm_head // but now we are not really tying the word embeddings auto lm_head_name = names.lm_head_name; - if (config.tie_word_embeddings) - lm_head_name = names.token_embd_name; - lm_head = Parameter(1, vocab_size, 1, hidden_dim, lm_head_name + ".weight"); + // assert(config.tie_word_embeddings); + if (tie_embedding_words) { + lm_head = Parameter(1, vocab_size, 1, hidden_dim, + names.token_embd_name + ".weight"); + } else { + lm_head_layer = + Linear(hidden_dim, vocab_size, false, names.lm_head_name); + } + // lm_head_name = names.token_embd_name; + // lm_head = Parameter(1, vocab_size, 1, hidden_dim, lm_head_name + ".weight"); } vector Forward(vector inputs, vector args) override { auto x = embedding(inputs[0]); @@ -208,7 +226,12 @@ class Llama3Model final : public Module { x = block({x})[0]; } x = norm(x); - x = Tensor::mm(x, lm_head().transpose(Chl::SEQUENCE, Chl::DIMENSION)); + if (tie_embedding_words_) { + x = Tensor::mm(x, lm_head().transpose(Chl::SEQUENCE, Chl::DIMENSION)); + } else { + x = lm_head_layer(x); + } + // x = Tensor::mm(x, lm_head().transpose(Chl::SEQUENCE, Chl::DIMENSION)); return {x}; } diff --git a/src/models/llama3/tokenization_llama3.hpp b/mllm/models/llama3/tokenization_llama3.hpp similarity index 99% rename from src/models/llama3/tokenization_llama3.hpp rename to mllm/models/llama3/tokenization_llama3.hpp index 2825b0401..a92fb01f0 100644 --- a/src/models/llama3/tokenization_llama3.hpp +++ b/mllm/models/llama3/tokenization_llama3.hpp @@ -109,4 +109,4 @@ class LLama3Tokenizer final : public TiktokenTokenizer { } }; -#endif // TOKENIZATION_LLAMA3_HPP +#endif // TOK ENIZATION_LLAMA3_HPP diff --git a/src/models/llava/configuration_llava.hpp b/mllm/models/llava/configuration_llava.hpp similarity index 100% rename from src/models/llava/configuration_llava.hpp rename to mllm/models/llava/configuration_llava.hpp diff --git a/src/models/llava/modeling_llava.hpp b/mllm/models/llava/modeling_llava.hpp similarity index 91% rename from src/models/llava/modeling_llava.hpp rename to mllm/models/llava/modeling_llava.hpp index eafa26b92..8aca0088f 100644 --- a/src/models/llava/modeling_llava.hpp +++ b/mllm/models/llava/modeling_llava.hpp @@ -20,9 +20,8 @@ class LLaMABodyModel final : public Module { public: LLaMABodyModel() = default; - LLaMABodyModel(int vocab_size, int hidden_dim, int head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, - const LLaMANameConfig &names, const string &base_name) { - blocks = List(block_num, hidden_dim, head_size, head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, names, base_name); + LLaMABodyModel(int vocab_size, int hidden_dim, int head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, string attn_implementation, const LLaMANameConfig &names, const string &base_name) { + blocks = List(block_num, hidden_dim, head_size, head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, attn_implementation, names, base_name); norm = RMSNorm(hidden_dim, 1e-6, names.post_norm_name); lm_head = Linear(hidden_dim, vocab_size, false, names.lm_head_name); } @@ -74,10 +73,11 @@ class LLaVAVisionModel final : public Module { public: LLaVAVisionModel() = default; LLaVAVisionModel(int hidden_dim, int head_size, int ffn_hidden, int patch, int img_hw, int block_num, + string attn_implementation, const ViTNameConfig &names, const string &base_name) { embedding = LLaVAVisionEmbedding(hidden_dim, patch, img_hw, names, base_name + names._embd_name); pre_layrnorm = LayerNorm(hidden_dim, true, 1e-6, base_name + names._vision_pre_layrnorm_name); - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, "QuickGELU", names, base_name + names._layer_name); + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, "QuickGELU", attn_implementation, names, base_name + names._layer_name); clip_len_ = std::ceil(img_hw / patch) * std::ceil(img_hw / patch) + 1; linear_1 = Linear(hidden_dim, ffn_hidden, true, "multi_modal_projector.linear_1"); gelu = GELU("multi_modal_projector.act"); @@ -106,19 +106,21 @@ class LLaVAModel final : public Module { LLaVAModel(config.vocab_size, config.hidden_dim, config.head_size, config.ffn_hidden, config.block_num, config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, config.names_config, - config.vision_hidden_dim, config.vision_head_size, config.vision_ffn_hidden, config.patch, config.img_hw, config.vision_block_num, + config.vision_hidden_dim, config.vision_head_size, config.vision_ffn_hidden, config.patch, config.img_hw, config.vision_block_num, config.attn_implementation, config.vit_names_config) { } LLaVAModel(int vocab_size, int hidden_dim, int head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, const LLaMANameConfig &names_config, int vision_hidden_dim, int vision_head_size, int vision_ffn_hidden, int patch, int img_hw, int vision_block_num, + string attn_implementation, const ViTNameConfig &vit_names_config) { text_embedding = Embedding(vocab_size, hidden_dim, names_config.token_embd_name); llama_body = LLaMABodyModel(vocab_size, hidden_dim, head_size, ffn_hidden, block_num, - RoPE_type, rope_theta, max_position_embeddings, cache_limit, + RoPE_type, rope_theta, max_position_embeddings, cache_limit, attn_implementation, names_config, names_config.blk_name); vision_tower = LLaVAVisionModel(vision_hidden_dim, vision_head_size, vision_ffn_hidden, patch, img_hw, vision_block_num, + attn_implementation, vit_names_config, vit_names_config.vison_model_name); } vector Forward(vector inputs, vector args) override { diff --git a/src/models/llava/processing_llava.hpp b/mllm/models/llava/processing_llava.hpp similarity index 100% rename from src/models/llava/processing_llava.hpp rename to mllm/models/llava/processing_llava.hpp diff --git a/src/models/minicpm/configuration_minicpm.hpp b/mllm/models/minicpm/configuration_minicpm.hpp similarity index 100% rename from src/models/minicpm/configuration_minicpm.hpp rename to mllm/models/minicpm/configuration_minicpm.hpp diff --git a/src/models/minicpm/modeling_minicpm.hpp b/mllm/models/minicpm/modeling_minicpm.hpp similarity index 92% rename from src/models/minicpm/modeling_minicpm.hpp rename to mllm/models/minicpm/modeling_minicpm.hpp index 163bc0b47..3b3d977d4 100644 --- a/src/models/minicpm/modeling_minicpm.hpp +++ b/mllm/models/minicpm/modeling_minicpm.hpp @@ -23,7 +23,7 @@ class MiniCPMMLP final : public Module { std::vector Forward(std::vector inputs, std::vector args) override { auto x = gate_proj(inputs[0]); x = silu(x); - auto y = up_proj(inputs[0]); // ERROR + auto y = up_proj(inputs[0]); x = x * y; x = down_proj(x); return {x}; @@ -41,10 +41,14 @@ class MiniCPMDecoder final : public Module { public: MiniCPMDecoder() = default; MiniCPMDecoder(const MiniCPMConfig &config, const MiniCPMNameConfig &names, const string &base_name) { - self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, config.num_key_value_heads, - config.hidden_size / config.num_attention_heads, SPLIT_NONE, false, false, + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_NONE, PostQkv_NONE, false, config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, - true, false, names, base_name + names._attn_base_name); + true, false, false, + config.attn_implementation, + names, base_name + names._attn_base_name); mlp = MiniCPMMLP(config.hidden_size, config.intermediate_size, names, base_name + names._ffn_base_name); input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); diff --git a/src/models/minicpm/tokenization_minicpm.hpp b/mllm/models/minicpm/tokenization_minicpm.hpp similarity index 100% rename from src/models/minicpm/tokenization_minicpm.hpp rename to mllm/models/minicpm/tokenization_minicpm.hpp diff --git a/src/models/minicpm3/configuration_minicpm3.hpp b/mllm/models/minicpm3/configuration_minicpm3.hpp similarity index 100% rename from src/models/minicpm3/configuration_minicpm3.hpp rename to mllm/models/minicpm3/configuration_minicpm3.hpp diff --git a/src/models/minicpm3/modeling_minicpm3.hpp b/mllm/models/minicpm3/modeling_minicpm3.hpp similarity index 100% rename from src/models/minicpm3/modeling_minicpm3.hpp rename to mllm/models/minicpm3/modeling_minicpm3.hpp diff --git a/src/models/minicpm3/tokenization_minicpm3.hpp b/mllm/models/minicpm3/tokenization_minicpm3.hpp similarity index 100% rename from src/models/minicpm3/tokenization_minicpm3.hpp rename to mllm/models/minicpm3/tokenization_minicpm3.hpp diff --git a/src/models/minicpm_moe/configuration_minicpm_moe.hpp b/mllm/models/minicpm_moe/configuration_minicpm_moe.hpp similarity index 100% rename from src/models/minicpm_moe/configuration_minicpm_moe.hpp rename to mllm/models/minicpm_moe/configuration_minicpm_moe.hpp diff --git a/src/models/minicpm_moe/mbm/modeling_minicpm_moe_mbm.hpp b/mllm/models/minicpm_moe/mbm/modeling_minicpm_moe_mbm.hpp similarity index 71% rename from src/models/minicpm_moe/mbm/modeling_minicpm_moe_mbm.hpp rename to mllm/models/minicpm_moe/mbm/modeling_minicpm_moe_mbm.hpp index bdb8f7865..64b022dad 100644 --- a/src/models/minicpm_moe/mbm/modeling_minicpm_moe_mbm.hpp +++ b/mllm/models/minicpm_moe/mbm/modeling_minicpm_moe_mbm.hpp @@ -14,6 +14,7 @@ // #include #include // #include +#include #include #include #include @@ -41,7 +42,7 @@ class MiniCPMMLP final : public Module { std::vector Forward(std::vector inputs, std::vector args) override { auto x = gate_proj(inputs[0]); x = silu(x); - auto y = up_proj(inputs[0]); // ERROR + auto y = up_proj(inputs[0]); x = x * y; x = down_proj(x); return {x}; @@ -98,116 +99,99 @@ class MiniCPMMoE final : public Module { expert_weights = expert_weights.view(-1, -1, 1, 1); // 1, k* batch*seq, 1, 1 auto idxs = expert_indices.argsort(); // 1, 1, 1, k* batch*seq auto tokens_per_expert = expert_indices.bincount(); // (1, 1, 1, 0) 1, 1, 1, k - /* - load_experts_1th(tokens_per_expert); - auto expert_cache = moe_infer(hidden_states, tokens_per_expert, expert_weights, idxs); - */ + Tensor expert_cache; #ifdef MTIME - if (Tensor::tensor_status == TENSOR_STATIC_READY && hidden_states.sequence() == 1) { - std::cout << "attn || exe time: " << (mllm_time_us() - end_infer_last) / 1000.0F << "ms" << std::endl; - } + std::cout << "attn || exe time: " << (mllm_time_us() - end_infer_last) / 1000.0F << "ms" << std::endl; #endif - if (Tensor::tensor_status == TENSOR_STATIC_READY) { - vector tokens_per_expert_vector; - for (int i = 0; i < tokens_per_expert.dimension(); ++i) { - if (tokens_per_expert.d(0, 0, 0, i)) { - tokens_per_expert_vector.push_back(i); - } + vector tokens_per_expert_vector; + for (int i = 0; i < tokens_per_expert.dimension(); ++i) { + if (tokens_per_expert.d(0, 0, 0, i)) { + tokens_per_expert_vector.push_back(i); } - // - if (layer_idx < 39 && tokens_per_expert_vector.size() == 2) { - if (mbm_maps[layer_idx].find(tokens_per_expert_vector) != mbm_maps[layer_idx].end()) { - mbm_load_expert_idxs.clear(); - auto c = mbm_maps[layer_idx][tokens_per_expert_vector]; - mbm_load_expert_idxs = c[0]; - mbm_load_layer_idx = layer_idx + 1; - do_mbm_load = true; - } - } else if (layer_idx == 39 && tokens_per_expert_vector.size() == 2) { - if (mbm_maps[layer_idx].find(tokens_per_expert_vector) != mbm_maps[layer_idx].end()) { - mbm_load_expert_idxs.clear(); - auto c = mbm_maps[layer_idx][tokens_per_expert_vector]; - mbm_load_expert_idxs = c[0]; - mbm_load_layer_idx = 0; - do_mbm_load = true; - } - } - /* - mbm_load_expert_idxs = mbm_idxs; - mbm_load_layer_idx = layer_idx; - do_mbm_load = true; - */ - if (mbm_idxs_size == 2 && tokens_per_expert_vector.size() == 2) { // layer_idx > 0 && && layer_idx < 39 - int &done = dones[layer_idx]; // 标志变量,用于表示数据是否已被修改 - cvs[layer_idx]->wait(locks[layer_idx], [&done] { return done; }); // 等待条件满足 - assert(dones[layer_idx]); - } - if (!experts_loaded(tokens_per_expert_vector)) { - load_experts(tokens_per_expert_vector); + } + // + if (layer_idx < 39 && tokens_per_expert_vector.size() == 2) { + if (mbm_maps[layer_idx].find(tokens_per_expert_vector) != mbm_maps[layer_idx].end()) { + mbm_load_expert_idxs.clear(); + auto c = mbm_maps[layer_idx][tokens_per_expert_vector]; + mbm_load_expert_idxs = c[0]; + mbm_load_layer_idx = layer_idx + 1; + do_mbm_load = true; } - assert(experts_loaded(tokens_per_expert_vector)); - expert_cache = moe_infer(hidden_states, tokens_per_expert, expert_weights, idxs); - if (mbm_idxs_size == 2 && tokens_per_expert_vector.size() == 2) { // layer_idx > 0 && && layer_idx < 39 - reset_syntax_mbm(layer_idx); + } else if (layer_idx == 39 && tokens_per_expert_vector.size() == 2) { + if (mbm_maps[layer_idx].find(tokens_per_expert_vector) != mbm_maps[layer_idx].end()) { + mbm_load_expert_idxs.clear(); + auto c = mbm_maps[layer_idx][tokens_per_expert_vector]; + mbm_load_expert_idxs = c[0]; + mbm_load_layer_idx = 0; + do_mbm_load = true; } - if (layer_idx == 0) - mbm_idxs_size = tokens_per_expert_vector.size(); - } else { - expert_cache = moe_infer(hidden_states, tokens_per_expert, expert_weights, idxs); } -#ifdef MTIME - if (Tensor::tensor_status == TENSOR_STATIC_READY && hidden_states.sequence() == 1) { - end_infer_last = mllm_time_us(); + if (mbm_idxs_size == 2 && tokens_per_expert_vector.size() == 2) { // layer_idx > 0 && && layer_idx < 39 + int &done = dones[layer_idx]; // 标志变量,用于表示数据是否已被修改 + cvs[layer_idx]->wait(locks[layer_idx], [&done] { return done; }); // 等待条件满足 + assert(dones[layer_idx]); + } + if (!experts_loaded(tokens_per_expert_vector)) { + load_experts(tokens_per_expert_vector); } + assert(experts_loaded(tokens_per_expert_vector)); + expert_cache = moe_infer(hidden_states, tokens_per_expert, expert_weights, idxs); + if (mbm_idxs_size == 2 && tokens_per_expert_vector.size() == 2) { // layer_idx > 0 && && layer_idx < 39 + reset_syntax_mbm(layer_idx); + } + if (layer_idx == 0) + mbm_idxs_size = tokens_per_expert_vector.size(); +#ifdef MTIME + end_infer_last = mllm_time_us(); #endif return {expert_cache}; } void load_experts(vector expert_idxs) { - if (Tensor::tensor_status == TENSOR_STATIC_READY) { #ifdef MTIME - auto start_infer = mllm_time_us(); + auto start_infer = mllm_time_us(); #endif - int result; - // #pragma omp parallel for num_threads(CPUBackend::cpu_threads) - for (int i = 0; i < expert_idxs.size(); ++i) { - if (expert_idxs.size() == 2) { - if (std::find(mbm_v[layer_idx].begin(), mbm_v[layer_idx].end(), expert_idxs[i]) != mbm_v[layer_idx].end()) { - // 在 mbm_v[layer_idx] 中找到了 expert_idxs[i] - if (experts[expert_idxs[i]].loaded()) { - continue; - } else { - std::cout << "[ERROR] experts load." << std::endl; - experts[expert_idxs[i]].load(); - continue; - } - } - if (mbm_v[layer_idx].size() >= mbm_num_max_experts) { - result = mbm_queue_remove(mbm_v[layer_idx], expert_idxs); - if (result != -1) { // mbm_v[layer_idx]不全是expert_idxs - experts[result].free(); - mbm_v[layer_idx].push_back(expert_idxs[i]); - // if (mbm_load_layer_idx != layer_idx) - // std::cout << layer_idx << " " << mbm_load_layer_idx << " : " << expert_idxs[i] << std::endl; - experts[expert_idxs[i]].load(); - } + int result; + // #pragma omp parallel for num_threads(CPUBackend::cpu_threads) + for (int i = 0; i < expert_idxs.size(); ++i) { + if (expert_idxs.size() == 2) { + if (std::find(mbm_v[layer_idx].begin(), mbm_v[layer_idx].end(), expert_idxs[i]) != mbm_v[layer_idx].end()) { + // 在 mbm_v[layer_idx] 中找到了 expert_idxs[i] + if (experts[expert_idxs[i]].loaded()) { + continue; } else { + std::cout << "[ERROR] experts load." << std::endl; + experts[expert_idxs[i]].load(); + continue; + } + } + if (mbm_v[layer_idx].size() >= mbm_num_max_experts) { + result = mbm_queue_remove(mbm_v[layer_idx], expert_idxs); + if (result != -1) { // mbm_v[layer_idx]不全是expert_idxs + experts[result].free(); mbm_v[layer_idx].push_back(expert_idxs[i]); + // if (mbm_load_layer_idx != layer_idx) + // std::cout << layer_idx << " " << mbm_load_layer_idx << " : " << expert_idxs[i] << std::endl; experts[expert_idxs[i]].load(); } - assert(experts[expert_idxs[i]].loaded()); } else { + mbm_v[layer_idx].push_back(expert_idxs[i]); experts[expert_idxs[i]].load(); } + assert(experts[expert_idxs[i]].loaded()); + } else { + experts[expert_idxs[i]].load(); } + } #ifdef MTIME - if (expert_idxs.size() == 2) { - auto end_infer = mllm_time_us(); - std::cout << "expert|| load time: " << (end_infer - start_infer) / 1000.0F << "ms" << std::endl; - } -#endif + if (expert_idxs.size() == 2) { + auto end_infer = mllm_time_us(); + std::cout << "expert|| load time: " << (end_infer - start_infer) / 1000.0F << "ms" << std::endl; } +#endif + // } } private: @@ -245,10 +229,8 @@ class MiniCPMMoE final : public Module { } } void free_experts(vector expert_idxs) { - if (Tensor::tensor_status == TENSOR_STATIC_READY) { - for (int i = 0; i < expert_idxs.size(); ++i) { - experts[expert_idxs[i]].free(); - } + for (int i = 0; i < expert_idxs.size(); ++i) { + experts[expert_idxs[i]].free(); } } Tensor moe_infer(Tensor &hidden_states, Tensor &tokens_per_expert, Tensor &expert_weights, Tensor &idxs) { @@ -280,17 +262,15 @@ class MiniCPMMoE final : public Module { auto expert_weights_clip = expert_weights.clip(exp_idx, SEQUENCE); //(1, 0, 1, 1) 1, e-s, 1, 1 expert_out = expert_out * expert_weights_clip; //(1, 0, 1, hidden) 1, e-s, 1, hidden - expert_cache.scatter_reduce(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden + expert_cache.scatter_add(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden start_idx = end_idx; } if (hidden_states.batch() > 1) { // expert_cache.view(ANYDIM, seq, -1, -1); } #ifdef MTIME - if (Tensor::tensor_status == TENSOR_STATIC_READY && hidden_states.sequence() == 1) { - auto end_infer = mllm_time_us(); - std::cout << "expert|| exe time: " << (end_infer - start_infer) / 1000.0F << "ms" << std::endl; - } + auto end_infer = mllm_time_us(); + std::cout << "expert|| exe time: " << (end_infer - start_infer) / 1000.0F << "ms" << std::endl; #endif return expert_cache; } @@ -306,10 +286,13 @@ class MiniCPMDecoder final : public Module { public: MiniCPMDecoder() = default; MiniCPMDecoder(const MiniCPMConfig &config, const MiniCPMNameConfig &names, const string &base_name) { - self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, config.num_key_value_heads, - config.hidden_size / config.num_attention_heads, SPLIT_NONE, false, false, + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_NONE, PostQkv_NONE, false, config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, - true, false, names, base_name + names._attn_base_name); + true, false, false, + config.attn_implementation, names, base_name + names._attn_base_name); moe = MiniCPMMoE(config, names, base_name + names._ffn_base_name); input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); @@ -389,11 +372,11 @@ class MiniCPMForCausalLM final : public Module { } std::vector Forward(std::vector inputs, std::vector args) override { std::vector outputs; - if (Tensor::tensor_status == TENSOR_STATIC_READY && inputs[0].sequence() == 1) { + if (inputs[0].dimension() == 1) { omp_set_max_active_levels(2); // Enable OpenMP nesting #pragma omp parallel num_threads(2) if (omp_get_thread_num() == 0) { // 根据线程ID决定执行哪个函数 -#if defined(__ARM_NEON) +#if defined(__ARM_NEON) && !defined(__APPLE__) // 绑定线程到特定的CPU核心 cpu_set_t cpuset; CPU_ZERO(&cpuset); diff --git a/src/models/minicpm_moe/mbm/settings_minicpm_moe_mbm.hpp b/mllm/models/minicpm_moe/mbm/settings_minicpm_moe_mbm.hpp similarity index 100% rename from src/models/minicpm_moe/mbm/settings_minicpm_moe_mbm.hpp rename to mllm/models/minicpm_moe/mbm/settings_minicpm_moe_mbm.hpp diff --git a/src/models/minicpm_moe/mbp/modeling_minicpm_moe_mbp.hpp b/mllm/models/minicpm_moe/mbp/modeling_minicpm_moe_mbp.hpp similarity index 97% rename from src/models/minicpm_moe/mbp/modeling_minicpm_moe_mbp.hpp rename to mllm/models/minicpm_moe/mbp/modeling_minicpm_moe_mbp.hpp index 7739090b5..ae3a6fb3b 100644 --- a/src/models/minicpm_moe/mbp/modeling_minicpm_moe_mbp.hpp +++ b/mllm/models/minicpm_moe/mbp/modeling_minicpm_moe_mbp.hpp @@ -12,6 +12,7 @@ #include #include // #include +#include #include #include // #include @@ -38,7 +39,7 @@ class MiniCPMMLP final : public Module { std::vector Forward(std::vector inputs, std::vector args) override { auto x = gate_proj(inputs[0]); x = silu(x); - auto y = up_proj(inputs[0]); // ERROR + auto y = up_proj(inputs[0]); x = x * y; x = down_proj(x); return {x}; @@ -216,7 +217,7 @@ class MiniCPMMoE final : public Module { // step.2 auto expert_out = experts[expert_id]({expert_tokens})[0]; //(1, 0, 1, hidden) 1, e-s, 1, expert_out = expert_out * expert_weights_clip; //(1, 0, 1, hidden) 1, e-s, 1, hidden - expert_cache.scatter_reduce(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden + expert_cache.scatter_add(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden experts[expert_id].free(); // std::cout << "free: " << layer_idx << " " << expert_id << std::endl; @@ -292,10 +293,13 @@ class MiniCPMDecoder final : public Module { public: MiniCPMDecoder() = default; MiniCPMDecoder(const MiniCPMConfig &config, const MiniCPMNameConfig &names, const string &base_name) { - self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, config.num_key_value_heads, - config.hidden_size / config.num_attention_heads, SPLIT_NONE, false, false, + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_NONE, PostQkv_NONE, false, config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, - true, false, names, base_name + names._attn_base_name); + true, false, false, + config.attn_implementation, names, base_name + names._attn_base_name); moe = MiniCPMMoE(config, names, base_name + names._ffn_base_name); input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); @@ -373,7 +377,6 @@ class MiniCPMForCausalLM final : public Module { MiniCPMForCausalLM(MiniCPMConfig &config) { num_hidden_layers = config.num_hidden_layers; num_experts = config.num_experts; - // KVCache_TYPE = 32; auto names = config.names_config; scale_emb = config.scale_emb; dim_model_base = config.dim_model_base; @@ -394,7 +397,7 @@ class MiniCPMForCausalLM final : public Module { omp_set_max_active_levels(2); // Enable OpenMP nesting #pragma omp parallel num_threads(2) if (omp_get_thread_num() == 0) { // 根据线程ID决定执行哪个函数 -#if defined(__ARM_NEON) +#if defined(__ARM_NEON) && !defined(__APPLE__) { struct sched_param param; param.sched_priority = 20; // 范围 1–99,根据设备可酌情调整 diff --git a/src/models/minicpm_moe/mbp/settings_minicpm_moe_mbp.hpp b/mllm/models/minicpm_moe/mbp/settings_minicpm_moe_mbp.hpp similarity index 100% rename from src/models/minicpm_moe/mbp/settings_minicpm_moe_mbp.hpp rename to mllm/models/minicpm_moe/mbp/settings_minicpm_moe_mbp.hpp diff --git a/src/models/minicpm_moe/modeling_minicpm_moe.hpp b/mllm/models/minicpm_moe/modeling_minicpm_moe.hpp similarity index 94% rename from src/models/minicpm_moe/modeling_minicpm_moe.hpp rename to mllm/models/minicpm_moe/modeling_minicpm_moe.hpp index 6ba25aa79..7212ffb28 100644 --- a/src/models/minicpm_moe/modeling_minicpm_moe.hpp +++ b/mllm/models/minicpm_moe/modeling_minicpm_moe.hpp @@ -25,7 +25,7 @@ class MiniCPMMLP final : public Module { std::vector Forward(std::vector inputs, std::vector args) override { auto x = gate_proj(inputs[0]); x = silu(x); - auto y = up_proj(inputs[0]); // ERROR + auto y = up_proj(inputs[0]); x = x * y; x = down_proj(x); return {x}; @@ -84,7 +84,7 @@ class MiniCPMMoE final : public Module { auto expert_out = experts[i]({expert_tokens})[0]; //(1, 0, 1, hidden) 1, e-s, 1, auto expert_weights_clip = expert_weights.clip(exp_idx, SEQUENCE); //(1, 0, 1, 1) 1, e-s, 1, 1 expert_out = expert_out * expert_weights_clip; //(1, 0, 1, hidden) 1, e-s, 1, hidden - expert_cache.scatter_reduce(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden + expert_cache.scatter_add(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden // start_idx = end_idx; } @@ -106,10 +106,13 @@ class MiniCPMDecoder final : public Module { public: MiniCPMDecoder() = default; MiniCPMDecoder(const MiniCPMConfig &config, const MiniCPMNameConfig &names, const string &base_name) { - self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, config.num_key_value_heads, - config.hidden_size / config.num_attention_heads, SPLIT_NONE, false, false, + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_NONE, PostQkv_NONE, false, config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, - true, false, names, base_name + names._attn_base_name); + true, false, false, + config.attn_implementation, names, base_name + names._attn_base_name); moe = MiniCPMMoE(config, names, base_name + names._ffn_base_name); input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); @@ -174,7 +177,6 @@ class MiniCPMModel final : public Module { class MiniCPMForCausalLM final : public Module { public: MiniCPMForCausalLM(MiniCPMConfig &config) { - // KVCache_TYPE = 32; auto names = config.names_config; scale_emb = config.scale_emb; dim_model_base = config.dim_model_base; diff --git a/src/models/mistral/configuration_mistral.hpp b/mllm/models/mistral/configuration_mistral.hpp similarity index 100% rename from src/models/mistral/configuration_mistral.hpp rename to mllm/models/mistral/configuration_mistral.hpp diff --git a/src/models/mistral/modeling_mistral.hpp b/mllm/models/mistral/modeling_mistral.hpp similarity index 92% rename from src/models/mistral/modeling_mistral.hpp rename to mllm/models/mistral/modeling_mistral.hpp index 37de46474..c7741a063 100644 --- a/src/models/mistral/modeling_mistral.hpp +++ b/mllm/models/mistral/modeling_mistral.hpp @@ -51,10 +51,13 @@ class MistralDecoder final : public Module { public: MistralDecoder() = default; MistralDecoder(const MistralConfig &config, const MistralNameConfig &names, const string &base_name) { - self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, config.num_key_value_heads, - config.hidden_size / config.num_attention_heads, SPLIT_NONE, false, false, + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_NONE, PostQkv_NONE, false, config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, - true, false, names, base_name + names._attn_base_name); + true, false,false, + config.attn_implementation, names, base_name + names._attn_base_name); mlp = MistralMLP(config.hidden_size, config.intermediate_size, names, base_name + names._ffn_base_name); input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); diff --git a/src/models/mistral/tokenization_mistral.hpp b/mllm/models/mistral/tokenization_mistral.hpp similarity index 100% rename from src/models/mistral/tokenization_mistral.hpp rename to mllm/models/mistral/tokenization_mistral.hpp diff --git a/src/models/openelm/configuration_openelm.hpp b/mllm/models/openelm/configuration_openelm.hpp similarity index 100% rename from src/models/openelm/configuration_openelm.hpp rename to mllm/models/openelm/configuration_openelm.hpp diff --git a/src/models/openelm/modeling_openelm.hpp b/mllm/models/openelm/modeling_openelm.hpp similarity index 92% rename from src/models/openelm/modeling_openelm.hpp rename to mllm/models/openelm/modeling_openelm.hpp index 8700729d1..12168fc76 100644 --- a/src/models/openelm/modeling_openelm.hpp +++ b/mllm/models/openelm/modeling_openelm.hpp @@ -51,6 +51,7 @@ class OpenELMMultiHeadCausalAttention final : public Module { Softmax softmax; int iter = 0; + string attn_impl; public: OpenELMMultiHeadCausalAttention() = default; @@ -60,6 +61,7 @@ class OpenELMMultiHeadCausalAttention final : public Module { q_heads_ = cfg.num_query_heads[layer_idx]; k_heads_ = cfg.num_kv_heads[layer_idx]; v_heads_ = cfg.num_kv_heads[layer_idx]; + attn_impl = cfg.attn_implementation; qkv_proj = Linear(cfg.model_dim, (q_heads_ + k_heads_ + v_heads_) * head_dim_, false, base_name + "qkv_proj"); q_rope = RoPE(cfg.RoPE_type, cfg.rope_freq_constant, cfg.rope_max_length, base_name + "q_rope"); @@ -70,8 +72,8 @@ class OpenELMMultiHeadCausalAttention final : public Module { out_proj = Linear(q_heads_ * head_dim_, cfg.model_dim, false, base_name + "out_proj"); - k_cache = KVCache(k_heads_, head_dim_, q_heads_ / k_heads_, cfg.cache_limit, base_name + "k_cache"); - v_cache = KVCache(v_heads_, head_dim_, q_heads_ / v_heads_, cfg.cache_limit, base_name + "v_cache"); + k_cache = KVCache(k_heads_, head_dim_, q_heads_ / k_heads_, cfg.cache_limit, attn_impl, base_name + "k_cache"); + v_cache = KVCache(v_heads_, head_dim_, q_heads_ / v_heads_, cfg.cache_limit, attn_impl, base_name + "v_cache"); softmax = Softmax(DIMENSION, true, base_name + "softmax"); } @@ -98,13 +100,16 @@ class OpenELMMultiHeadCausalAttention final : public Module { k = k_cache(k); v = v_cache(v); - k = k.transpose(SEQUENCE, DIMENSION); - auto qk = Tensor::mm(q, k); - - qk = qk / std::sqrt(head_dim_); - - qk = softmax(qk, k_cache.getCacheSeqLen()); - auto o = Tensor::mm(qk, v); + Tensor o; + if (attn_impl == "flash_attention_2") { + o = Tensor::flash_attention2_forward(q, k, v, true); + } else { // eager implementation + k = k.transpose(SEQUENCE, DIMENSION); + auto qk = Tensor::mm(q, k); + qk = qk / std::sqrt(head_dim_); + qk = softmax(qk, k_cache.getCacheSeqLen()); + o = Tensor::mm(qk, v); + } o = o.view(-1, 1, -1, q_heads_ * head_dim_); o = out_proj(o); diff --git a/src/models/opt/configuration_opt.hpp b/mllm/models/opt/configuration_opt.hpp similarity index 100% rename from src/models/opt/configuration_opt.hpp rename to mllm/models/opt/configuration_opt.hpp diff --git a/src/models/opt/configuration_opt_qnn.hpp b/mllm/models/opt/configuration_opt_qnn.hpp similarity index 100% rename from src/models/opt/configuration_opt_qnn.hpp rename to mllm/models/opt/configuration_opt_qnn.hpp diff --git a/src/models/opt/modeling_opt.hpp b/mllm/models/opt/modeling_opt.hpp similarity index 72% rename from src/models/opt/modeling_opt.hpp rename to mllm/models/opt/modeling_opt.hpp index b40270924..b83f879be 100644 --- a/src/models/opt/modeling_opt.hpp +++ b/mllm/models/opt/modeling_opt.hpp @@ -16,9 +16,13 @@ class OPTBlock final : public Module { public: OPTBlock() = default; - OPTBlock(int hidden_dim, int head_size, int ffn_hidden, int cache_limit, const optNameConfig &names, const string &base_name) { - attention = MultiHeadAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, SPLIT_NONE, false, false, - NONE, -1, -1, cache_limit, true, true, names, base_name + names._attn_base_name); + OPTBlock(int hidden_dim, int head_size, int ffn_hidden, int cache_limit, + string attn_implementation, const optNameConfig &names, const string &base_name) { + attention = MultiHeadAttention(hidden_dim, head_size, head_size, + hidden_dim / head_size, SPLIT_NONE, PostQkv_NONE, false, + NONE, -1, -1, cache_limit, true, true, true, + attn_implementation, + names, base_name + names._attn_base_name); mlp = FeedForward(hidden_dim, ffn_hidden, "ReLU", true, names, base_name + names._ffn_base_name); norm1 = LayerNorm(hidden_dim, true, 1e-05, base_name + names._attn_norm_name); @@ -47,13 +51,18 @@ class OPTModel final : public Module { public: explicit OPTModel(const OPTConfig &config) : - OPTModel(config.vocab_size, config.hidden_dim, config.head_size, config.ffn_hidden, config.block_num, config.cache_limit, config.names_config, config.names_config.blk_name) { + OPTModel(config.vocab_size, config.hidden_dim, + config.head_size, config.ffn_hidden, config.block_num, config.cache_limit, + config.attn_implementation, + config.names_config, config.names_config.blk_name) { } - OPTModel(int vocab_size, int hidden_dim, int head_size, int ffn_hidden, int block_num, int cache_limit, const optNameConfig &names, const string &base_name) { + OPTModel(int vocab_size, int hidden_dim, int head_size, int ffn_hidden, int block_num, int cache_limit, + string attn_implementation, + const optNameConfig &names, const string &base_name) { embedding = Embedding(vocab_size, hidden_dim, names.token_embd_name); pos_embedding = Embedding(2050, hidden_dim, names.pos_name); pos = Position("pos"); - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, cache_limit, names, base_name); + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, cache_limit, attn_implementation, names, base_name); norm = LayerNorm(hidden_dim, true, 1e-05, names.post_norm_name); lm_head = Linear(hidden_dim, vocab_size, false, names.lm_head_name); } diff --git a/src/models/opt/modeling_opt_qnn.hpp b/mllm/models/opt/modeling_opt_qnn.hpp similarity index 100% rename from src/models/opt/modeling_opt_qnn.hpp rename to mllm/models/opt/modeling_opt_qnn.hpp diff --git a/src/models/opt/tokenization_opt.hpp b/mllm/models/opt/tokenization_opt.hpp similarity index 100% rename from src/models/opt/tokenization_opt.hpp rename to mllm/models/opt/tokenization_opt.hpp diff --git a/src/models/phi3/configuration_phi3.hpp b/mllm/models/phi3/configuration_phi3.hpp similarity index 100% rename from src/models/phi3/configuration_phi3.hpp rename to mllm/models/phi3/configuration_phi3.hpp diff --git a/src/models/phi3/modeling_phi3.hpp b/mllm/models/phi3/modeling_phi3.hpp similarity index 88% rename from src/models/phi3/modeling_phi3.hpp rename to mllm/models/phi3/modeling_phi3.hpp index b88aaca46..fc0899156 100644 --- a/src/models/phi3/modeling_phi3.hpp +++ b/mllm/models/phi3/modeling_phi3.hpp @@ -45,9 +45,8 @@ class Phi3Block final : public Module { public: Phi3Block() = default; - Phi3Block(int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, const Phi3NameConfig &names, const string &base_name) { - attention = MultiHeadAttention(hidden_dim, head_size, kv_head_size, hidden_dim / head_size, SPLIT_HD, false, false, - RoPE_type, rope_theta, max_position_embeddings, cache_limit, true, false, names, base_name + names._attn_base_name); + Phi3Block(int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, string attn_implementation, const Phi3NameConfig &names, const string &base_name) { + attention = MultiHeadAttention(hidden_dim, head_size, kv_head_size, hidden_dim / head_size, SPLIT_HD, PostQkv_NONE, false, RoPE_type, rope_theta, max_position_embeddings, cache_limit, true, false, false,attn_implementation, names, base_name + names._attn_base_name); mlp = Phi3MLP(hidden_dim, ffn_hidden, names, base_name + names._ffn_base_name); norm1 = RMSNorm(hidden_dim, 1e-6, base_name + names._attn_norm_name); norm2 = RMSNorm(hidden_dim, 1e-6, base_name + names._ffn_norm_name); @@ -76,13 +75,13 @@ class Phi3Model final : public Module { public: explicit Phi3Model(const Phi3Config &config) : Phi3Model(config.vocab_size, config.hidden_dim, config.head_size, config.num_key_value_heads, config.ffn_hidden, config.block_num, - config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, + config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, config.attn_implementation, config.names_config, config.names_config.blk_name) { } - Phi3Model(int vocab_size, int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, + Phi3Model(int vocab_size, int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, string attn_implementation, const Phi3NameConfig &names, const string &base_name) { embedding = Embedding(vocab_size, hidden_dim, names.token_embd_name); - blocks = List(block_num, hidden_dim, head_size, kv_head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, names, base_name); + blocks = List(block_num, hidden_dim, head_size, kv_head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, attn_implementation, names, base_name); norm = RMSNorm(hidden_dim, 1e-6, names.post_norm_name); lm_head = Linear(hidden_dim, vocab_size, false, names.lm_head_name); } diff --git a/src/models/phi3/tokenization_phi3.hpp b/mllm/models/phi3/tokenization_phi3.hpp similarity index 100% rename from src/models/phi3/tokenization_phi3.hpp rename to mllm/models/phi3/tokenization_phi3.hpp diff --git a/src/models/phi3v/configuration_phi3v.hpp b/mllm/models/phi3v/configuration_phi3v.hpp similarity index 100% rename from src/models/phi3v/configuration_phi3v.hpp rename to mllm/models/phi3v/configuration_phi3v.hpp diff --git a/src/models/phi3v/modeling_phi3v.hpp b/mllm/models/phi3v/modeling_phi3v.hpp similarity index 90% rename from src/models/phi3v/modeling_phi3v.hpp rename to mllm/models/phi3v/modeling_phi3v.hpp index 9efb8c22e..7502a9a9b 100644 --- a/src/models/phi3v/modeling_phi3v.hpp +++ b/mllm/models/phi3v/modeling_phi3v.hpp @@ -50,11 +50,12 @@ class Phi3VisionModel final : public Module { public: Phi3VisionModel() = default; Phi3VisionModel(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, int patch, int img_hw, int block_num, + string attn_implementation, const Phi3VNameConfig &names, const string &base_name) { embedding = Phi3VisionEmbedding(hidden_dim, patch, img_hw, names, base_name + names._embd_name); pre_layrnorm = LayerNorm(hidden_dim, true, 1e-5, base_name + names._vision_pre_layrnorm_name); clip_len_ = std::ceil(img_hw / patch) * std::ceil(img_hw / patch) + 1; - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, act_fn_type, names, base_name + names._layer_name); + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, act_fn_type, attn_implementation, names, base_name + names._layer_name); } vector Forward(vector inputs, vector args) override { auto x = embedding(inputs)[0]; @@ -81,9 +82,9 @@ class Phi3Embedding final : public Module { public: Phi3Embedding() = default; - explicit Phi3Embedding(int vocab_size, int hidden_dim, int head_size, int ffn, int vision_hidden_dim, string &projection_cls, const Phi3VNameConfig &nameconfig, const string &base_name, const string &embd_name) { + explicit Phi3Embedding(int vocab_size, int hidden_dim, int head_size, int ffn, int vision_hidden_dim, string &projection_cls, const Phi3VNameConfig &nameconfig, string attn_implementation, const string &base_name, const string &embd_name) { embed_tokens = Embedding(vocab_size, hidden_dim, embd_name); - img_processor = Phi3VisionModel(vision_hidden_dim, 16, vision_hidden_dim * 4, "QuickGELU", 14, 336, 23, nameconfig, nameconfig.vison_model_name); + img_processor = Phi3VisionModel(vision_hidden_dim, 16, vision_hidden_dim * 4, "QuickGELU", 14, 336, 23, attn_implementation, nameconfig, nameconfig.vison_model_name); glb_GN = Parameter(1, 1, 1, vision_hidden_dim * 4, nameconfig._vision_model_prefix + nameconfig._glb_GN); sub_GN = Parameter(1, 1, 1, vision_hidden_dim * 4, nameconfig._vision_model_prefix + nameconfig._sub_GN); project_cls = projection_cls; @@ -150,15 +151,15 @@ class Phi3VModel final : public Module { public: explicit Phi3VModel(const Phi3VConfig &config) : - Phi3VModel(config.vocab_size, config.hidden_dim, config.head_size, config.num_key_value_heads, config.ffn_hidden, config.block_num, - config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, config.vision_hidden_dim, config.projection_cls, config.name_config, + Phi3VModel(config.vocab_size, config.hidden_dim, config.head_size, + config.num_key_value_heads, config.ffn_hidden, config.block_num, + config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, config.vision_hidden_dim, config.projection_cls, config.attn_implementation, config.name_config, config.names_config, config.names_config.blk_name) { } - Phi3VModel(int vocab_size, int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, int vision_hidden_dim, string projection_cls, const Phi3VNameConfig &visionconfig, - const Phi3NameConfig &names, const string &base_name) { + Phi3VModel(int vocab_size, int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, int vision_hidden_dim, string projection_cls, string attn_implementation, const Phi3VNameConfig &visionconfig, const Phi3NameConfig &names, const string &base_name) { norm = RMSNorm(hidden_dim, 1e-6, names.post_norm_name); - vision_embed_tokens = Phi3Embedding(vocab_size, hidden_dim, head_size, ffn_hidden, vision_hidden_dim, projection_cls, visionconfig, base_name, names.token_embd_name); - blocks = List(block_num, hidden_dim, head_size, kv_head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, names, base_name); + vision_embed_tokens = Phi3Embedding(vocab_size, hidden_dim, head_size, ffn_hidden, vision_hidden_dim, projection_cls, visionconfig, attn_implementation, base_name, names.token_embd_name); + blocks = List(block_num, hidden_dim, head_size, kv_head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, attn_implementation, names, base_name); norm = RMSNorm(hidden_dim, 1e-6, names.post_norm_name); lm_head = Linear(hidden_dim, vocab_size, false, names.lm_head_name); } diff --git a/src/models/phi3v/processing_phi3v.hpp b/mllm/models/phi3v/processing_phi3v.hpp similarity index 99% rename from src/models/phi3v/processing_phi3v.hpp rename to mllm/models/phi3v/processing_phi3v.hpp index 9e1bb6d02..bbcf2d024 100644 --- a/src/models/phi3v/processing_phi3v.hpp +++ b/mllm/models/phi3v/processing_phi3v.hpp @@ -162,7 +162,7 @@ class Phi3VImageProcessor { time_all = times; } } - Tensor tensor1(Backend::global_backends[type]); + Tensor tensor1(Backend::global_backends[type].get()); tensor1.reshape(batch_size, imgs[0].size(), time_all, 336, 336); tensor1.alloc(); memset(tensor1.hostPtr(), 0, tensor1.count() * sizeof(float)); @@ -222,7 +222,7 @@ class Phi3VImageProcessor { time_all = times; } } - Tensor tensor1(Backend::global_backends[type]); + Tensor tensor1(Backend::global_backends[type].get()); tensor1.reshape(batch_size * time_all, 336, imgs[0].size(), 336); tensor1.alloc(); memset(tensor1.hostPtr(), 0, tensor1.count() * sizeof(float)); @@ -344,7 +344,7 @@ class Phi3VProcessor final { //} : public PreProcessor { static Tensor tokens2Input(vector> tokens, string name = "input", BackendType type = MLLM_CPU) { const auto bsize = static_cast(tokens.size()); - Tensor tensor1(bsize, 1, static_cast(tokens[0].size()), 1, Backend::global_backends[type], true); + Tensor tensor1(bsize, 1, static_cast(tokens[0].size()), 1, Backend::global_backends[type].get(), true); tensor1.setName(name); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); diff --git a/src/models/phonelm/configuration_phonelm.hpp b/mllm/models/phonelm/configuration_phonelm.hpp similarity index 100% rename from src/models/phonelm/configuration_phonelm.hpp rename to mllm/models/phonelm/configuration_phonelm.hpp diff --git a/src/models/phonelm/modeling_phonelm.hpp b/mllm/models/phonelm/modeling_phonelm.hpp similarity index 91% rename from src/models/phonelm/modeling_phonelm.hpp rename to mllm/models/phonelm/modeling_phonelm.hpp index 31a8dfefa..8da748dd8 100644 --- a/src/models/phonelm/modeling_phonelm.hpp +++ b/mllm/models/phonelm/modeling_phonelm.hpp @@ -50,6 +50,7 @@ class PhoneLMAttention final : public Module { head_dim = config.hidden_size / num_heads; num_key_value_heads = config.num_key_value_heads; num_key_value_groups = num_heads / num_key_value_heads; + attn_impl = config.attn_implementation; // init layers q_proj = Linear(hidden_size, num_heads * head_dim, false, base_name + names._q_proj_name); @@ -62,8 +63,8 @@ class PhoneLMAttention final : public Module { base_name + "q_rope"); k_rope = IRoPE(config.RoPE_type, config.rope_theta, config.max_position_embeddings, base_name + "k_rope"); - k_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "k_cache"); - v_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "v_cache"); + k_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, attn_impl, base_name + "k_cache"); + v_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, attn_impl, base_name + "v_cache"); softmax = Softmax(DIMENSION, true, base_name + "softmax"); } @@ -84,15 +85,20 @@ class PhoneLMAttention final : public Module { k = k_cache(k); v = v_cache(v); } - k = k.transpose(SEQUENCE, DIMENSION); - auto qk = Tensor::mm(q, k); - qk = qk / std::sqrt(head_dim); - if (k_cache.ready() && v_cache.ready()) { - qk = softmax(qk, k_cache.getCacheSeqLen()); - } else { - qk = softmax(qk); + Tensor o; + if (attn_impl == "flash_attention_2") { + o = Tensor::flash_attention2_forward(q, k, v, true); + } else { // eager implementation + k = k.transpose(SEQUENCE, DIMENSION); + auto qk = Tensor::mm(q, k); + qk = qk / std::sqrt(head_dim); + if (k_cache.ready() && v_cache.ready()) { + qk = softmax(qk, k_cache.getCacheSeqLen()); + } else { + qk = softmax(qk); + } + auto o = Tensor::mm(qk, v); } - auto o = Tensor::mm(qk, v); o = o.view(-1, 1, -1, head_dim * num_heads); o = o_proj(o); return {o}; @@ -120,6 +126,7 @@ class PhoneLMAttention final : public Module { KVCache k_cache; KVCache v_cache; Softmax softmax; + string attn_impl; }; class PhoneLMDecoder final : public Module { diff --git a/src/models/phonelm/modeling_phonelm_npu.hpp b/mllm/models/phonelm/modeling_phonelm_npu.hpp similarity index 72% rename from src/models/phonelm/modeling_phonelm_npu.hpp rename to mllm/models/phonelm/modeling_phonelm_npu.hpp index c13d0906f..e47f8b76b 100644 --- a/src/models/phonelm/modeling_phonelm_npu.hpp +++ b/mllm/models/phonelm/modeling_phonelm_npu.hpp @@ -10,8 +10,11 @@ using namespace mllm; +std::set phonelmShadowLayers = {0, 1, 3, 4}; + // NPU QKV part -class PhoneLMDecoderNPUPart1 final : public Module { +class PhoneLMDecoderNPUPart1 : public Module { +protected: int hidden_size; int num_heads; int head_dim; @@ -81,6 +84,64 @@ class PhoneLMDecoderNPUPart1 final : public Module { } }; +class PhoneLMDecoderNPUPart1WithRes final : public PhoneLMDecoderNPUPart1 { + Layer input_layernorm; + Layer pre_attn_quantize; + +public: + PhoneLMDecoderNPUPart1WithRes() = default; + + PhoneLMDecoderNPUPart1WithRes(const PhoneLMConfig &config, const PhoneLMNameConfig &names, int chunk_size, const string &base_name) { + hidden_size = config.hidden_size; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; + num_key_value_heads = config.num_key_value_heads; + num_key_value_groups = num_heads / num_key_value_heads; + + auto layer_base_name = base_name.substr(0, base_name.size() - 10); + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, layer_base_name + names._attn_norm_name); + pre_attn_quantize = Quantize(true, layer_base_name + names._attn_base_name + names._q_proj_name + ".quantize"); + + pre_attn_view = View(-1, 1, -1, num_heads * head_dim, base_name + "ires_split-00_view_"); + + q_proj = Linear(hidden_size, num_heads * head_dim, false, base_name + names._q_proj_name); + k_proj = Linear(hidden_size, num_key_value_heads * head_dim, false, base_name + names._k_proj_name); + v_proj = Linear(hidden_size, num_key_value_heads * head_dim, false, base_name + names._v_proj_name); + + q_view = View(-1, num_heads, -1, head_dim, base_name + names._q_proj_name + "-00_view_"); + k_view = View(-1, num_heads, -1, head_dim, base_name + names._k_proj_name + "-00_view_"); + v_view = View(-1, num_heads, -1, head_dim, base_name + names._v_proj_name + "-00_view_"); + + q_dequant = Dequantize(true, base_name + names._q_proj_name + ".dequantize"); + k_dequant = Dequantize(true, base_name + names._k_proj_name + ".dequantize", false); + v_dequant = Dequantize(true, base_name + names._v_proj_name + ".dequantize", false); + + v_transpose = Transpose({0, 2, 3, 1}, base_name + names._v_proj_name + ".transpose"); + } + + vector Forward(vector inputs, vector args) override { + auto x = input_layernorm(inputs[0]); + x = pre_attn_quantize(x); + + x = pre_attn_view(x); + + auto query_states = q_proj(x); + auto key_states = k_proj(x); + auto value_states = v_proj(x); + + query_states = q_view(query_states); + key_states = k_view(key_states); + value_states = v_view(value_states); + + query_states = q_dequant(query_states); + key_states = k_dequant(key_states); + value_states = v_dequant(value_states); + + value_states = v_transpose(value_states); + return {query_states, key_states, value_states, inputs[0]}; + } +}; + // CPU QKV MM part class PhoneLMQKVmm final : public Module { IRoPE q_rope; @@ -140,7 +201,8 @@ class PhoneLMQKVmm final : public Module { }; // QNN mlp part -class PhoneLMDecoderNPUPart2 final : public Module { +class PhoneLMDecoderNPUPart2 : public Module { +protected: int hidden_size; int num_heads; int head_dim; @@ -250,39 +312,7 @@ class PhoneLMDecoderNPUPart2 final : public Module { } }; -class PhoneLMDecoderNPUPart2WithShadow final : public Module { - int hidden_size; - int num_heads; - int head_dim; - int num_key_value_heads; - int num_key_value_groups; - int intermediate_size; - - // NPU part2 of attention - Layer pre_oproj_view; - Layer out_proj; - Layer post_oproj_view; - Layer post_oproj_dequantize; - - // NPU mlp - Layer pre_mlp_quantize; - Layer pre_mlp_view; - Layer gate_proj; - Layer up_proj; - Layer post_up_proj_dequantize; - Layer post_gate_proj_dequantize; - Layer relu; - Layer post_attn_layernorm; - - Layer down_proj; - Layer pre_down_proj_quantize; - Layer post_down_proj_dequantize; - Layer post_mlp_view; - - Layer post_atten_res_add; - Layer post_mlp_res_add; - Layer mlp_mul; - +class PhoneLMDecoderNPUPart2WithShadow final : public PhoneLMDecoderNPUPart2 { public: PhoneLMDecoderNPUPart2WithShadow() = default; @@ -370,11 +400,17 @@ class PhoneLMNPU_CPUDecoder final : public Module { int num_key_value_heads; int num_key_value_groups; + int layer_idx; + int num_layers; + + SubgraphStart _SubgraphStart_1, _SubgraphStart_2; + SubgraphFinalize _SubgraphEnd_1, _SubgraphEnd_2; + Layer input_layernorm; Layer pre_attn_quantize; - PhoneLMDecoderNPUPart1 part1; + unique_ptr part1; PhoneLMQKVmm qkv_mm; - PhoneLMDecoderNPUPart2 part2; + unique_ptr part2; public: PhoneLMNPU_CPUDecoder() = default; @@ -386,39 +422,65 @@ class PhoneLMNPU_CPUDecoder final : public Module { num_key_value_heads = config.num_key_value_heads; num_key_value_groups = num_heads / num_key_value_heads; - input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); - pre_attn_quantize = Quantize(true, base_name + names._attn_base_name + names._q_proj_name + ".quantize"); - - part1 = PhoneLMDecoderNPUPart1(config, names, chunk_size, base_name + names._attn_base_name); - part1.to(MLLM_QNN); + // extract layer index from base_name like "model.layers.10." + std::regex re(R"(\d+)"); + std::smatch match; + std::regex_search(base_name, match, re); + layer_idx = std::stoi(match[0]); + num_layers = config.num_hidden_layers; + + if (layer_idx == 0 || phonelmShadowLayers.find(layer_idx - 1) != phonelmShadowLayers.end()) { + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + pre_attn_quantize = Quantize(true, base_name + names._attn_base_name + names._q_proj_name + ".quantize"); + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } else { + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } qkv_mm = PhoneLMQKVmm(config, names, chunk_size, base_name + names._attn_base_name); - qkv_mm.to(MLLM_CPU); - part2 = PhoneLMDecoderNPUPart2(config, names, chunk_size, base_name); - part2.to(MLLM_QNN); + part2 = make_unique(config, names, chunk_size, base_name); + + _SubgraphStart_1 = SubgraphStart(base_name + "subgraph_start1"); + _SubgraphEnd_1 = SubgraphFinalize(base_name + "subgraph_end1"); + _SubgraphStart_2 = SubgraphStart(base_name + "subgraph_start2"); + _SubgraphEnd_2 = SubgraphFinalize(base_name + "subgraph_end2"); } vector Forward(vector inputs, vector args) override { - auto x = input_layernorm(inputs[0]); - x = pre_attn_quantize(x); - - if (x.device() != MLLM_QNN) { - x = Tensor::toQNN({x})[0]; + Tensor x, q, k, v, res; + if (layer_idx == 0 || phonelmShadowLayers.find(layer_idx - 1) != phonelmShadowLayers.end()) { + x = input_layernorm(inputs[0]); + x = pre_attn_quantize(x); + + _SubgraphStart_1({x}); + + auto q_k_v = (*part1)({x}); // q,k,v + q = q_k_v[0]; + k = q_k_v[1]; + v = q_k_v[2]; + res = inputs[0]; + _SubgraphEnd_1(q_k_v); + } else { + auto q_k_v_res = (*part1)(inputs); // q,k,v,res + q = q_k_v_res[0]; + k = q_k_v_res[1]; + v = q_k_v_res[2]; + res = q_k_v_res[3]; + _SubgraphEnd_1(q_k_v_res); } - auto q_k_v = part1({x}); // q,k,v - auto o_x = qkv_mm(q_k_v)[0]; + auto o_x = qkv_mm({q, k, v})[0]; - if (o_x.device() != MLLM_QNN) { - o_x = Tensor::toQNN({o_x})[0]; - } - if (inputs[0].device() != MLLM_QNN) { - inputs[0] = Tensor::toQNN({inputs[0]})[0]; + _SubgraphStart_2({o_x, res}); + + auto out_part2 = (*part2)({o_x, res}); + + if (layer_idx == num_layers - 1) { + _SubgraphEnd_2(out_part2); } - x = part2({o_x, inputs[0]})[0]; - return {x}; + return out_part2; } }; @@ -432,9 +494,15 @@ class PhoneLMNPU_CPUDecoderWithShadow final : public Module { Layer input_layernorm; Layer pre_attn_quantize; Layer shadow_linear; - PhoneLMDecoderNPUPart1 part1; + unique_ptr part1; PhoneLMQKVmm qkv_mm; - PhoneLMDecoderNPUPart2WithShadow part2; + unique_ptr part2; + + int layer_idx; + int num_layers; + + SubgraphStart _SubgraphStart_1, _SubgraphStart_2; + SubgraphFinalize _SubgraphEnd_1, _SubgraphEnd_2; public: PhoneLMNPU_CPUDecoderWithShadow() = default; @@ -446,45 +514,69 @@ class PhoneLMNPU_CPUDecoderWithShadow final : public Module { num_key_value_heads = config.num_key_value_heads; num_key_value_groups = num_heads / num_key_value_heads; - input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); - pre_attn_quantize = Quantize(true, base_name + names._attn_base_name + names._q_proj_name + ".quantize"); - - part1 = PhoneLMDecoderNPUPart1(config, names, chunk_size, base_name + names._attn_base_name); - part1.to(MLLM_QNN); + // extract layer index from base_name like "model.layers.10." + std::regex re(R"(\d+)"); + std::smatch match; + std::regex_search(base_name, match, re); + layer_idx = std::stoi(match[0]); + num_layers = config.num_hidden_layers; + + if (layer_idx == 0 || phonelmShadowLayers.find(layer_idx - 1) != phonelmShadowLayers.end()) { + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + pre_attn_quantize = Quantize(true, base_name + names._attn_base_name + names._q_proj_name + ".quantize"); + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } else { + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } qkv_mm = PhoneLMQKVmm(config, names, chunk_size, base_name + names._attn_base_name); - qkv_mm.to(MLLM_CPU); - part2 = PhoneLMDecoderNPUPart2WithShadow(config, names, chunk_size, base_name); - part2.to(MLLM_QNN); + part2 = make_unique(config, names, chunk_size, base_name); shadow_linear = ShadowLinear(config.intermediate_size, hidden_size, 1024, false, base_name + names._ffn_base_name + names._down_proj_name + ".shadow"); + + _SubgraphStart_1 = SubgraphStart(base_name + "subgraph_start1"); + _SubgraphEnd_1 = SubgraphFinalize(base_name + "subgraph_end1"); + _SubgraphStart_2 = SubgraphStart(base_name + "subgraph_start2"); + _SubgraphEnd_2 = SubgraphFinalize(base_name + "subgraph_end2"); } vector Forward(vector inputs, vector args) override { - auto x = input_layernorm(inputs[0]); - x = pre_attn_quantize(x); - - if (x.device() != MLLM_QNN) { - x = Tensor::toQNN({x})[0]; + Tensor x, q, k, v, res; + if (layer_idx == 0 || phonelmShadowLayers.find(layer_idx - 1) != phonelmShadowLayers.end()) { + x = input_layernorm(inputs[0]); + x = pre_attn_quantize(x); + + _SubgraphStart_1({x}); + + auto q_k_v = (*part1)({x}); // q,k,v + q = q_k_v[0]; + k = q_k_v[1]; + v = q_k_v[2]; + res = inputs[0]; + _SubgraphEnd_1(q_k_v); + } else { + auto q_k_v_res = (*part1)(inputs); // q,k,v,res + q = q_k_v_res[0]; + k = q_k_v_res[1]; + v = q_k_v_res[2]; + res = q_k_v_res[3]; + _SubgraphEnd_1(q_k_v_res); } - auto q_k_v = part1({x}); // q,k,v - auto o_x = qkv_mm(q_k_v)[0]; + auto o_x = qkv_mm({q, k, v})[0]; + + _SubgraphStart_2({o_x, res}); + + auto decoder_out = (*part2)({o_x, res}); + decoder_out = Tensor::toCPU(decoder_out); + + _SubgraphEnd_2(decoder_out); - if (o_x.device() != MLLM_QNN) { - o_x = Tensor::toQNN({o_x})[0]; - } - if (inputs[0].device() != MLLM_QNN) { - inputs[0] = Tensor::toQNN({inputs[0]})[0]; - } - auto decoder_out = part2({o_x, inputs[0]}); - if (decoder_out[0].device() != MLLM_CPU) { - decoder_out = Tensor::toCPU(decoder_out); - } auto shadow_input_1 = decoder_out[0]; auto shadow_input_2 = decoder_out[1]; x = decoder_out[2]; + x = shadow_linear(shadow_input_1, shadow_input_2, x); return {x}; @@ -499,11 +591,10 @@ class PhoneLMModel_NPU final : public Module { static_assert(std::is_base_of::value, "SHADOW must be a subclass of Module"); listIdx = 0; vector> modules; - std::set shadowLayers = {0, 1, 3, 4}; - // for index in shadowLayers, create shadow decoder, for others, create normal decoder + // for index in phonelmShadowLayers, create shadow decoder, for others, create normal decoder for (int i = 0; i < n; i++) { auto new_args = change_last(args...); // 创建新的参数包,最后一个参数被修改为原来的值+ std::to_string(listIdx)+ "." - if (shadowLayers.find(listIdx) != shadowLayers.end()) { + if (phonelmShadowLayers.find(listIdx) != phonelmShadowLayers.end()) { modules.push_back(std::make_unique(std::apply([&](auto &&...args) { return SHADOW(std::forward(args)...); }, new_args))); } else { modules.push_back(std::make_unique(std::apply([&](auto &&...args) { return T1(std::forward(args)...); }, new_args))); diff --git a/src/models/qwen/README.md b/mllm/models/qwen/README.md similarity index 100% rename from src/models/qwen/README.md rename to mllm/models/qwen/README.md diff --git a/src/models/qwen/configuration_qwen.hpp b/mllm/models/qwen/configuration_qwen.hpp similarity index 59% rename from src/models/qwen/configuration_qwen.hpp rename to mllm/models/qwen/configuration_qwen.hpp index 446984385..6dfb29040 100644 --- a/src/models/qwen/configuration_qwen.hpp +++ b/mllm/models/qwen/configuration_qwen.hpp @@ -102,6 +102,25 @@ struct QWenConfig : public TransformerConfig { sliding_window = 32768; vocab_size = 151936; tie_embedding_words = true; + } else if (billionsType == "0.5b-lm") { + attention_dropout = 0.0; + bos_token_id = 151643; + eos_token_id = 151645; + std::string hidden_act = "silu"; + hidden_size = 1024; + initializer_range = 0.02; + intermediate_size = 2816; + max_position_embeddings = 32768; + max_window_layers = 21; + model_type = "qwen2"; + num_attention_heads = 16; + num_hidden_layers = 24; + num_key_value_heads = 16; + rms_norm_eps = 1e-6; + rope_theta = 1000000.0; + sliding_window = 32768; + vocab_size = 151936; + tie_embedding_words = false; } else if (billionsType == "1.8b") { attention_dropout = 0.0; std::string hidden_act = "silu"; @@ -116,6 +135,20 @@ struct QWenConfig : public TransformerConfig { sliding_window = 32768; vocab_size = 151936; tie_embedding_words = false; + } else if (billionsType == "1.8b-rotated") { + attention_dropout = 0.0; + std::string hidden_act = "silu"; + hidden_size = 2048; + intermediate_size = 5504; + max_position_embeddings = 32768; + num_attention_heads = 16; + num_hidden_layers = 24; + num_key_value_heads = 16; + rms_norm_eps = 1e-6; + rope_theta = 1000000.0; + sliding_window = 32768; + vocab_size = 151936; + tie_embedding_words = false; } else if (billionsType == "1.5b") { attention_dropout = 0.0; std::string hidden_act = "silu"; @@ -131,6 +164,21 @@ struct QWenConfig : public TransformerConfig { sliding_window = 32768; vocab_size = 151936; tie_embedding_words = true; + } else if ((billionsType == "1.5b-rotated") || (billionsType == "1.5b-lm") || (billionsType == "1.5b-vl") || (billionsType == "1.5b-vl-rotated")) { + attention_dropout = 0.0; + std::string hidden_act = "silu"; + hidden_size = 1536; + intermediate_size = 8960; + max_position_embeddings = 32768; + max_window_layers = 28; + num_attention_heads = 12; + num_hidden_layers = 28; + num_key_value_heads = 2; + rms_norm_eps = 1e-6; + rope_theta = 1000000.0; + sliding_window = 32768; + vocab_size = 151936; + tie_embedding_words = false; } else if (billionsType == "3b") { attention_dropout = 0.0; std::string hidden_act = "silu"; @@ -161,8 +209,23 @@ struct QWenConfig : public TransformerConfig { sliding_window = 131072; vocab_size = 151936; tie_embedding_words = false; + } else if (billionsType == "7b") { + attention_dropout = 0.0; + std::string hidden_act = "silu"; + hidden_size = 3584; + intermediate_size = 18944; + max_position_embeddings = 32768; + max_window_layers = 28; + num_attention_heads = 28; + num_hidden_layers = 28; + num_key_value_heads = 4; + rms_norm_eps = 1e-6; + rope_theta = 1000000.0; + sliding_window = 32768; + vocab_size = 152064; + tie_embedding_words = false; } else { - throw std::runtime_error("Unsupported model size"); + throw std::runtime_error("QWenConfig Unsupported model size"); } RoPE_type = type; }; @@ -191,4 +254,44 @@ struct QWenConfig : public TransformerConfig { QWenNameConfig names_config; }; +struct QWenNPUConfig : virtual public QWenConfig { + explicit QWenNPUConfig(int token_limit, string billions = "1.8B", RoPEType type = RoPEType::HFHUBROPE) : + QWenConfig(token_limit, billions, type) { + string billionsType; + std::transform(billions.begin(), billions.end(), std::back_inserter(billionsType), + ::tolower); + if (billionsType == "1.8b") { + shadow_layers = {1, 2, 26}; + } else if (billionsType == "1.8b-rotated") { + shadow_layers = {}; + use_i32_bias = false; + } else if (billionsType == "1.5b") { // qwen2.5 1.5B + shadow_layers = {1, 2, 4, 5, 26}; + use_high_precision_silu = true; + } else if (billionsType == "1.5b-vl") { // qwen-2-vl + shadow_layers = {1, 26}; + use_high_precision_silu = false; + tie_embedding_words = false; + } else if (billionsType == "1.5b-rotated") { // qwen2.5 1.5B rotated model + shadow_layers = {}; + use_i32_bias = false; + use_high_precision_silu = true; + } else if (billionsType == "1.5b-vl-rotated") { // qwen-2-vl rotated model + shadow_layers = {}; + use_high_precision_silu = true; + use_i32_bias = false; + tie_embedding_words = false; + } else { + throw std::runtime_error("QWenNPUConfig Unsupported model size"); + } + } + + std::set shadow_layers; + // use i32/fp32 bias for Linear in QNN, when using fp32 bias, bias will be added by DequantizeAdd + bool use_i32_bias = true; + // there are two types of QNNSiLU, a approximate int version and a (sigmoid * x) version + // for qwen2.5, input of silu act has a large range, config here to use the (sigmoid * x) + bool use_high_precision_silu = false; +}; + #endif //! CONFIG_QWEN_HPP diff --git a/src/models/qwen/modeling_qwen.hpp b/mllm/models/qwen/modeling_qwen.hpp similarity index 56% rename from src/models/qwen/modeling_qwen.hpp rename to mllm/models/qwen/modeling_qwen.hpp index e73c50a8c..73985b6c0 100644 --- a/src/models/qwen/modeling_qwen.hpp +++ b/mllm/models/qwen/modeling_qwen.hpp @@ -13,10 +13,13 @@ #define MODELING_QWEN_HPP #include "Backend.hpp" +#include "DataType.hpp" #include "Layer.hpp" #include "Module.hpp" #include "Tensor.hpp" +#include "Types.hpp" #include "configuration_qwen.hpp" +#include "models/transformer/modeling_transformer.hpp" #include using namespace mllm; @@ -51,98 +54,19 @@ class QWenMLP final : public Module { Layer silu; }; -// Copied from GemmaAttention with Gemma->Qwen and using SWA -class QWenAttention final : public Module { -public: - QWenAttention() = default; - QWenAttention(const QWenConfig &config, const QWenNameConfig &names, const string &base_name) { - hidden_size = config.hidden_size; - num_heads = config.num_attention_heads; - head_dim = config.hidden_size / num_heads; - num_key_value_heads = config.num_key_value_heads; - num_key_value_groups = num_heads / num_key_value_heads; - - // init layers - q_proj = Linear(hidden_size, num_heads * head_dim, true, base_name + names._q_proj_name); - k_proj = Linear(hidden_size, num_key_value_heads * head_dim, true, - base_name + names._k_proj_name); - v_proj = Linear(hidden_size, num_key_value_heads * head_dim, true, - base_name + names._v_proj_name); - o_proj = Linear(num_heads * head_dim, hidden_size, false, base_name + names._o_proj_name); - q_rope = RoPE(config.RoPE_type, config.rope_theta, config.max_position_embeddings, - base_name + "q_rope"); - k_rope = RoPE(config.RoPE_type, config.rope_theta, config.max_position_embeddings, - base_name + "k_rope"); - k_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "k_cache"); - v_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "v_cache"); - // mask = SlidingWindowMask(config.sliding_window, base_name + "mask"); - // mask = Causalmask(base_name + "mask"); - softmax = Softmax(DIMENSION, true, base_name + "softmax"); - } - - std::vector Forward(std::vector inputs, std::vector args) override { - auto query_states = q_proj(inputs[0]); - auto key_states = k_proj(inputs[1]); - auto value_states = v_proj(inputs[2]); - - // [batch, heads, sequence, dims] - query_states = query_states.view(-1, num_heads, -1, head_dim); - key_states = key_states.view(-1, num_key_value_heads, -1, head_dim); - value_states = value_states.view(-1, num_key_value_heads, -1, head_dim); - - // embedding - query_states = q_rope(query_states); - key_states = k_rope(key_states); - - // kv cache - key_states = k_cache(key_states); - value_states = v_cache(value_states); - - // attention weight - auto atten_weight = - Tensor::mm(query_states, key_states.transpose(Chl::SEQUENCE, Chl::DIMENSION)) - / std::sqrt(head_dim); - // atten_weight = mask(atten_weight, k_cache.getCacheSeqLen()); - atten_weight = softmax(atten_weight, k_cache.getCacheSeqLen()); - - // attention output - auto atten_output = Tensor::mm(atten_weight, value_states); - atten_output = atten_output.view(-1, 1, -1, head_dim * num_heads); - atten_output = o_proj(atten_output); - return {atten_output}; - } - - vector get_cache() { - return {&k_cache, &v_cache}; - } - vector get_rope() { - return {&q_rope, &k_rope}; - } - -private: - int hidden_size; - int num_heads; - int head_dim; - int num_key_value_heads; - int num_key_value_groups; - Layer q_proj; - Layer k_proj; - Layer v_proj; - Layer o_proj; - RoPE q_rope; - RoPE k_rope; - KVCache k_cache; - KVCache v_cache; - // Causalmask mask; - Softmax softmax; -}; - -// Copied from GemmaDecoder with Gemma->Qwen and set RmsNorm(without add_unit_offset) class QWenDecoder final : public Module { public: QWenDecoder() = default; QWenDecoder(const QWenConfig &config, const QWenNameConfig &names, const string &base_name) { - self_atten = QWenAttention(config, names, base_name + names._attn_base_name); + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, config.hidden_size / config.num_attention_heads, + SPLIT_NONE, PostQkv_NONE, false, + config.RoPE_type, config.rope_theta, + config.max_position_embeddings, + config.cache_limit, + true, true, false, + config.attn_implementation, names, + base_name + names._attn_base_name); mlp = QWenMLP(config.hidden_size, config.intermediate_size, names, base_name + names._ffn_base_name); input_layernorm = @@ -161,12 +85,12 @@ class QWenDecoder final : public Module { return {x}; } - QWenAttention &get_attention() { + MultiHeadAttention &get_attention() { return self_atten; } private: - QWenAttention self_atten; + MultiHeadAttention self_atten; QWenMLP mlp; Layer input_layernorm; Layer post_attention_layernorm; @@ -183,7 +107,9 @@ class QWenModel final : public Module { std::vector Forward(std::vector inputs, std::vector args) override { auto x = inputs[0]; - for (auto &block : blocks) { x = block({x})[0]; } + for (auto &block : blocks) { + x = block({x})[0]; + } x = norm(x); return {x}; } @@ -204,7 +130,9 @@ class QWenModel final : public Module { class QWenForCausalLM final : public Module { public: + CHAINABLE_MODULE_METHODS(QWenForCausalLM) QWenForCausalLM(QWenConfig &config) { + dtype = config.dtype; auto names = config.names_config; hidden_size = config.hidden_size; tie_embedding_words = config.tie_embedding_words; @@ -223,8 +151,7 @@ class QWenForCausalLM final : public Module { } std::vector Forward(std::vector inputs, std::vector args) override { - auto x = embedding(inputs[0]); - + auto x = embedding(inputs[0]).to(dtype); // go through model auto outputs = model({x})[0]; if (outputs.sequence() > 1) { @@ -248,6 +175,7 @@ class QWenForCausalLM final : public Module { Parameter lm_head; Layer lm_head_layer; QWenModel model; + DataType dtype; }; #endif //! MODELING_QWEN_HPP \ No newline at end of file diff --git a/src/models/qwen/modeling_qwen_npu.hpp b/mllm/models/qwen/modeling_qwen_npu_v2.hpp similarity index 61% rename from src/models/qwen/modeling_qwen_npu.hpp rename to mllm/models/qwen/modeling_qwen_npu_v2.hpp index 30ae8e1a3..a62b653e2 100644 --- a/src/models/qwen/modeling_qwen_npu.hpp +++ b/mllm/models/qwen/modeling_qwen_npu_v2.hpp @@ -1,5 +1,5 @@ -#ifndef MODELING_QWENNPU_HPP -#define MODELING_QWENNPU_HPP +#ifndef MODELING_QWENNPU_V2_HPP +#define MODELING_QWENNPU_V2_HPP #include "Backend.hpp" #include "Layer.hpp" @@ -7,11 +7,15 @@ #include "Tensor.hpp" #include "Types.hpp" #include "configuration_qwen.hpp" +#include using namespace mllm; +namespace v2 { + // NPU QKV part -class QwenDecoderNPUPart1 final : public Module { +class QwenDecoderNPUPart1 : public Module { +protected: int hidden_size; int num_heads; int head_dim; @@ -36,7 +40,7 @@ class QwenDecoderNPUPart1 final : public Module { public: QwenDecoderNPUPart1() = default; - QwenDecoderNPUPart1(const QWenConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + QwenDecoderNPUPart1(const QWenNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { hidden_size = config.hidden_size; num_heads = config.num_attention_heads; head_dim = config.hidden_size / num_heads; @@ -45,17 +49,23 @@ class QwenDecoderNPUPart1 final : public Module { pre_attn_view = View(-1, 1, -1, num_heads * head_dim, base_name + "ires_split-00_view_"); - q_proj = Linear(hidden_size, num_heads * head_dim, true, base_name + names._q_proj_name); - k_proj = Linear(hidden_size, num_key_value_heads * head_dim, true, base_name + names._k_proj_name); - v_proj = Linear(hidden_size, num_key_value_heads * head_dim, true, base_name + names._v_proj_name); + q_proj = Linear(hidden_size, num_heads * head_dim, config.use_i32_bias, base_name + names._q_proj_name); + k_proj = Linear(hidden_size, num_key_value_heads * head_dim, config.use_i32_bias, base_name + names._k_proj_name); + v_proj = Linear(hidden_size, num_key_value_heads * head_dim, config.use_i32_bias, base_name + names._v_proj_name); q_view = View(-1, num_heads, -1, head_dim, base_name + names._q_proj_name + "-00_view_"); k_view = View(-1, num_key_value_heads, -1, head_dim, base_name + names._k_proj_name + "-00_view_"); v_view = View(-1, num_key_value_heads, -1, head_dim, base_name + names._v_proj_name + "-00_view_"); - q_dequant = Dequantize(true, base_name + names._q_proj_name + ".dequantize"); - k_dequant = Dequantize(true, base_name + names._k_proj_name + ".dequantize", false); - v_dequant = Dequantize(true, base_name + names._v_proj_name + ".dequantize", false); + if (config.use_i32_bias) { + q_dequant = Dequantize(true, base_name + names._q_proj_name + ".dequantize", true, MLLM_TYPE_I16); + k_dequant = Dequantize(true, base_name + names._k_proj_name + ".dequantize", false, MLLM_TYPE_I16); + v_dequant = Dequantize(true, base_name + names._v_proj_name + ".dequantize", false, MLLM_TYPE_I16); + } else { + q_dequant = DequantizeAdd(true, num_heads * head_dim, base_name + names._q_proj_name + ".dequantize", true, MLLM_TYPE_I16); + k_dequant = DequantizeAdd(true, num_key_value_heads * head_dim, base_name + names._k_proj_name + ".dequantize", false, MLLM_TYPE_I16); + v_dequant = DequantizeAdd(true, num_key_value_heads * head_dim, base_name + names._v_proj_name + ".dequantize", false, MLLM_TYPE_I16); + } v_transpose = Transpose({0, 2, 3, 1}, base_name + names._v_proj_name + ".transpose"); } @@ -80,6 +90,70 @@ class QwenDecoderNPUPart1 final : public Module { } }; +class QwenDecoderNPUPart1WithRes final : public QwenDecoderNPUPart1 { + Layer input_layernorm; + Layer pre_attn_quantize; + +public: + QwenDecoderNPUPart1WithRes() = default; + QwenDecoderNPUPart1WithRes(const QWenNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + hidden_size = config.hidden_size; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; + num_key_value_heads = config.num_key_value_heads; + num_key_value_groups = num_heads / num_key_value_heads; + + // remove "self_attn." in base_name + auto layer_base_name = base_name.substr(0, base_name.size() - 10); + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, layer_base_name + names._attn_norm_name); + pre_attn_quantize = Quantize(true, layer_base_name + names._attn_base_name + names._q_proj_name + ".quantize", MLLM_TYPE_I16); + + pre_attn_view = View(-1, 1, -1, num_heads * head_dim, base_name + "ires_split-00_view_"); + + q_proj = Linear(hidden_size, num_heads * head_dim, config.use_i32_bias, base_name + names._q_proj_name); + k_proj = Linear(hidden_size, num_key_value_heads * head_dim, config.use_i32_bias, base_name + names._k_proj_name); + v_proj = Linear(hidden_size, num_key_value_heads * head_dim, config.use_i32_bias, base_name + names._v_proj_name); + + q_view = View(-1, num_heads, -1, head_dim, base_name + names._q_proj_name + "-00_view_"); + k_view = View(-1, num_key_value_heads, -1, head_dim, base_name + names._k_proj_name + "-00_view_"); + v_view = View(-1, num_key_value_heads, -1, head_dim, base_name + names._v_proj_name + "-00_view_"); + + if (config.use_i32_bias) { + q_dequant = Dequantize(true, base_name + names._q_proj_name + ".dequantize", true, MLLM_TYPE_I16); + k_dequant = Dequantize(true, base_name + names._k_proj_name + ".dequantize", false, MLLM_TYPE_I16); + v_dequant = Dequantize(true, base_name + names._v_proj_name + ".dequantize", false, MLLM_TYPE_I16); + } else { + q_dequant = DequantizeAdd(true, num_heads * head_dim, base_name + names._q_proj_name + ".dequantize", true, MLLM_TYPE_I16); + k_dequant = DequantizeAdd(true, num_key_value_heads * head_dim, base_name + names._k_proj_name + ".dequantize", false, MLLM_TYPE_I16); + v_dequant = DequantizeAdd(true, num_key_value_heads * head_dim, base_name + names._v_proj_name + ".dequantize", false, MLLM_TYPE_I16); + } + + v_transpose = Transpose({0, 2, 3, 1}, base_name + names._v_proj_name + ".transpose"); + } + + vector Forward(vector inputs, vector args) override { + auto x = input_layernorm(inputs[0]); + x = pre_attn_quantize(x); + + x = pre_attn_view(x); + + auto query_states = q_proj(x); + auto key_states = k_proj(x); + auto value_states = v_proj(x); + + query_states = q_view(query_states); + key_states = k_view(key_states); + value_states = v_view(value_states); + + query_states = q_dequant(query_states); + key_states = k_dequant(key_states); + value_states = v_dequant(value_states); + + value_states = v_transpose(value_states); + return {query_states, key_states, value_states, inputs[0]}; + } +}; + // CPU QKV MM part class QwenQKVmm final : public Module { RoPE q_rope; @@ -95,11 +169,14 @@ class QwenQKVmm final : public Module { int num_key_value_heads; int num_key_value_groups; + bool isScale = false; + public: QwenQKVmm() = default; - QwenQKVmm(const QWenConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + QwenQKVmm(const QWenNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { hidden_size = config.hidden_size; - num_heads = config.num_attention_heads * config.hidden_size / config.num_attention_heads; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; q_rope = RoPE(config.RoPE_type, config.rope_theta, config.max_position_embeddings, base_name + "q_rope"); k_rope = RoPE(config.RoPE_type, config.rope_theta, config.max_position_embeddings, base_name + "k_rope"); @@ -107,9 +184,15 @@ class QwenQKVmm final : public Module { k_cache = KVCache(config.num_attention_heads / config.num_key_value_heads, config.cache_limit, base_name + "k_cache", true); v_cache = KVCache(config.num_attention_heads / config.num_key_value_heads, config.cache_limit, base_name + "v_cache", true); + // k_cache = KVCache(config.num_key_value_heads, head_dim, config.num_attention_heads / config.num_key_value_heads, config.cache_limit, base_name + "k_cache", true); + // v_cache = KVCache(config.num_key_value_heads, head_dim, config.num_attention_heads / config.num_key_value_heads, config.cache_limit, base_name + "v_cache", true); + softmax = Softmax(DIMENSION, true, base_name + "softmax"); o_quantize = Quantize(true, base_name + names._o_proj_name + ".quantize"); + + if (!config.use_i32_bias) + isScale = true; } vector Forward(vector inputs, vector args) override { @@ -124,6 +207,7 @@ class QwenQKVmm final : public Module { v = v_cache(v); auto qk = Tensor::mm(q, k.transpose(Chl::SEQUENCE, Chl::DIMENSION)); + qk = qk / sqrt(head_dim); qk = softmax(qk); auto o = Tensor::mm(qk, v); @@ -134,7 +218,8 @@ class QwenQKVmm final : public Module { }; // QNN mlp part -class QwenDecoderNPUPart2 final : public Module { +class QwenDecoderNPUPart2 : public Module { +protected: int hidden_size; int num_heads; int head_dim; @@ -169,7 +254,7 @@ class QwenDecoderNPUPart2 final : public Module { public: QwenDecoderNPUPart2() = default; - QwenDecoderNPUPart2(const QWenConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + QwenDecoderNPUPart2(const QWenNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { hidden_size = config.hidden_size; num_heads = config.num_attention_heads; head_dim = config.hidden_size / num_heads; @@ -191,14 +276,20 @@ class QwenDecoderNPUPart2 final : public Module { pre_mlp_quantize = Quantize(true, mlp_base_name + names._up_proj_name + ".quantize"); pre_mlp_view = View(1, utils::closestFactors(chunk_size).first, utils::closestFactors(chunk_size).second, hidden_size, mlp_base_name + names._up_proj_name + ".quantize-00_view_"); gate_proj = Linear(hidden_size, intermediate_size, false, mlp_base_name + names._gate_proj_name); - silu = SiLU(mlp_base_name + "act"); + + if (config.use_high_precision_silu) { + silu = SiLU_Full_Precision(mlp_base_name + "act"); + } else { + silu = SiLU(mlp_base_name + "act"); + } + up_proj = Linear(hidden_size, intermediate_size, false, mlp_base_name + names._up_proj_name); post_up_proj_dequantize = Dequantize(true, mlp_base_name + names._up_proj_name + ".dequantize", false); post_gate_proj_dequantize = Dequantize(true, mlp_base_name + names._gate_proj_name + ".dequantize", false); down_proj = Linear(intermediate_size, hidden_size, false, mlp_base_name + names._down_proj_name); - pre_down_proj_quantize = Quantize(true, mlp_base_name + names._down_proj_name + ".quantize"); - post_down_proj_dequantize = Dequantize(true, mlp_base_name + names._down_proj_name + ".dequantize"); + pre_down_proj_quantize = Quantize(true, mlp_base_name + names._down_proj_name + ".quantize", MLLM_TYPE_I16); + post_down_proj_dequantize = Dequantize(true, mlp_base_name + names._down_proj_name + ".dequantize", true, MLLM_TYPE_I16); post_mlp_view = View(1, 1, chunk_size, hidden_size, mlp_base_name + names._down_proj_name + ".dequantize-00_view_"); mlp_mul = Mul(mlp_base_name + "mul"); @@ -243,42 +334,10 @@ class QwenDecoderNPUPart2 final : public Module { } }; -class QwenDecoderNPUPart2WithShadow final : public Module { - int hidden_size; - int num_heads; - int head_dim; - int num_key_value_heads; - int num_key_value_groups; - int intermediate_size; - - // NPU part2 of attention - Layer pre_oproj_view; - Layer out_proj; - Layer post_oproj_view; - Layer post_oproj_dequantize; - - // NPU mlp - Layer pre_mlp_quantize; - Layer pre_mlp_view; - Layer gate_proj; - Layer up_proj; - Layer post_up_proj_dequantize; - Layer post_gate_proj_dequantize; - Layer silu; - Layer post_attn_layernorm; - - Layer down_proj; - Layer pre_down_proj_quantize; - Layer post_down_proj_dequantize; - Layer post_mlp_view; - - Layer post_atten_res_add; - Layer post_mlp_res_add; - Layer mlp_mul; - +class QwenDecoderNPUPart2WithShadow final : public QwenDecoderNPUPart2 { public: QwenDecoderNPUPart2WithShadow() = default; - QwenDecoderNPUPart2WithShadow(const QWenConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + QwenDecoderNPUPart2WithShadow(const QWenNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { hidden_size = config.hidden_size; num_heads = config.num_attention_heads; head_dim = config.hidden_size / num_heads; @@ -300,7 +359,13 @@ class QwenDecoderNPUPart2WithShadow final : public Module { pre_mlp_quantize = Quantize(true, mlp_base_name + names._up_proj_name + ".quantize"); pre_mlp_view = View(1, utils::closestFactors(chunk_size).first, utils::closestFactors(chunk_size).second, hidden_size, mlp_base_name + names._up_proj_name + ".quantize-00_view_"); gate_proj = Linear(hidden_size, intermediate_size, false, mlp_base_name + names._gate_proj_name); - silu = SiLU(mlp_base_name + "act"); + + if (config.use_high_precision_silu) { + silu = SiLU_Full_Precision(mlp_base_name + "act"); + } else { + silu = SiLU(mlp_base_name + "act"); + } + up_proj = Linear(hidden_size, intermediate_size, false, mlp_base_name + names._up_proj_name); post_up_proj_dequantize = Dequantize(true, mlp_base_name + names._up_proj_name + ".dequantize"); post_gate_proj_dequantize = Dequantize(true, mlp_base_name + names._gate_proj_name + ".dequantize"); @@ -351,6 +416,7 @@ class QwenDecoderNPUPart2WithShadow final : public Module { gate_out = post_mlp_view(gate_out); gate_out = post_mlp_res_add(gate_out, tmp); + return {shadow_input_1, shadow_input_2, gate_out}; } }; @@ -362,51 +428,89 @@ class QwenNPU_CPUDecoder final : public Module { int num_key_value_heads; int num_key_value_groups; + int layer_idx; + int num_layers; + + SubgraphStart _SubgraphStart_1, _SubgraphStart_2; + SubgraphFinalize _SubgraphEnd_1, _SubgraphEnd_2; + Layer input_layernorm; Layer pre_attn_quantize; - QwenDecoderNPUPart1 part1; + unique_ptr part1; QwenQKVmm qkv_mm; - QwenDecoderNPUPart2 part2; + unique_ptr part2; + + std::set shadowLayer; public: QwenNPU_CPUDecoder() = default; - QwenNPU_CPUDecoder(const QWenConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + QwenNPU_CPUDecoder(const QWenNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) : + shadowLayer(config.shadow_layers) { hidden_size = config.hidden_size; num_heads = config.num_attention_heads; head_dim = config.hidden_size / num_heads; num_key_value_heads = config.num_key_value_heads; num_key_value_groups = num_heads / num_key_value_heads; - input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); - pre_attn_quantize = Quantize(true, base_name + names._attn_base_name + names._q_proj_name + ".quantize"); - - part1 = QwenDecoderNPUPart1(config, names, chunk_size, base_name + names._attn_base_name); - part1.to(MLLM_QNN); + // extract layer index from base_name like "model.layers.10." + std::regex re(R"(\d+)"); + std::smatch match; + std::regex_search(base_name, match, re); + layer_idx = std::stoi(match[0]); + num_layers = config.num_hidden_layers; + + if (layer_idx == 0 || shadowLayer.find(layer_idx - 1) != shadowLayer.end()) { + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + pre_attn_quantize = Quantize(true, base_name + names._attn_base_name + names._q_proj_name + ".quantize", MLLM_TYPE_I16); + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } else { + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } qkv_mm = QwenQKVmm(config, names, chunk_size, base_name + names._attn_base_name); - qkv_mm.to(MLLM_CPU); - part2 = QwenDecoderNPUPart2(config, names, chunk_size, base_name); - part2.to(MLLM_QNN); + part2 = make_unique(config, names, chunk_size, base_name); + + _SubgraphStart_1 = SubgraphStart(base_name + "subgraph_start1"); + _SubgraphEnd_1 = SubgraphFinalize(base_name + "subgraph_end1"); + _SubgraphStart_2 = SubgraphStart(base_name + "subgraph_start2"); + _SubgraphEnd_2 = SubgraphFinalize(base_name + "subgraph_end2"); } vector Forward(vector inputs, vector args) override { - auto x = input_layernorm(inputs[0]); - x = pre_attn_quantize(x); + Tensor x, q, k, v, res; + if (layer_idx == 0 || shadowLayer.find(layer_idx - 1) != shadowLayer.end()) { + x = input_layernorm(inputs[0]); + x = pre_attn_quantize(x); + + _SubgraphStart_1({x}); + + auto q_k_v = (*part1)({x}); // q,k,v + q = q_k_v[0]; + k = q_k_v[1]; + v = q_k_v[2]; + res = inputs[0]; + _SubgraphEnd_1(q_k_v); + } else { + auto q_k_v_res = (*part1)(inputs); // q,k,v,res + q = q_k_v_res[0]; + k = q_k_v_res[1]; + v = q_k_v_res[2]; + res = q_k_v_res[3]; + _SubgraphEnd_1(q_k_v_res); + } - x = Tensor::toQNN({x})[0]; - auto q_k_v = part1({x}); // q,k,v - q_k_v = Tensor::toCPU(q_k_v); + auto o_x = qkv_mm({q, k, v})[0]; - auto o_x = qkv_mm(q_k_v)[0]; + _SubgraphStart_2({o_x, res}); - auto qnn_tensor = Tensor::toQNN({o_x, inputs[0]}); - o_x = qnn_tensor[0]; - inputs[0] = qnn_tensor[1]; - x = part2({o_x, inputs[0]})[0]; - x = Tensor::toCPU({x})[0]; + auto out_part2 = (*part2)({o_x, res}); - return {x}; + if (layer_idx == num_layers - 1) { + _SubgraphEnd_2(out_part2); + } + + return out_part2; } }; @@ -420,53 +524,91 @@ class QwenNPU_CPUDecoderWithShadow final : public Module { Layer input_layernorm; Layer pre_attn_quantize; Layer shadow_linear; - QwenDecoderNPUPart1 part1; + unique_ptr part1; QwenQKVmm qkv_mm; - QwenDecoderNPUPart2WithShadow part2; + unique_ptr part2; + + int layer_idx; + int num_layers; + + SubgraphStart _SubgraphStart_1, _SubgraphStart_2; + SubgraphFinalize _SubgraphEnd_1, _SubgraphEnd_2; + + std::set shadowLayer; public: QwenNPU_CPUDecoderWithShadow() = default; - QwenNPU_CPUDecoderWithShadow(const QWenConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + QwenNPU_CPUDecoderWithShadow(const QWenNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) : + shadowLayer(config.shadow_layers) { hidden_size = config.hidden_size; num_heads = config.num_attention_heads; head_dim = config.hidden_size / num_heads; num_key_value_heads = config.num_key_value_heads; num_key_value_groups = num_heads / num_key_value_heads; - input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); - pre_attn_quantize = Quantize(true, base_name + names._attn_base_name + names._q_proj_name + ".quantize"); - - part1 = QwenDecoderNPUPart1(config, names, chunk_size, base_name + names._attn_base_name); - part1.to(MLLM_QNN); + // extract layer index from base_name like "model.layers.10." + std::regex re(R"(\d+)"); + std::smatch match; + std::regex_search(base_name, match, re); + layer_idx = std::stoi(match[0]); + num_layers = config.num_hidden_layers; + + if (layer_idx == 0 || shadowLayer.find(layer_idx - 1) != shadowLayer.end()) { + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + pre_attn_quantize = Quantize(true, base_name + names._attn_base_name + names._q_proj_name + ".quantize", MLLM_TYPE_I16); + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } else { + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } qkv_mm = QwenQKVmm(config, names, chunk_size, base_name + names._attn_base_name); - qkv_mm.to(MLLM_CPU); - part2 = QwenDecoderNPUPart2WithShadow(config, names, chunk_size, base_name); - part2.to(MLLM_QNN); + part2 = make_unique(config, names, chunk_size, base_name); shadow_linear = ShadowLinear(config.intermediate_size, hidden_size, 1024, false, base_name + names._ffn_base_name + names._down_proj_name + ".shadow"); + + _SubgraphStart_1 = SubgraphStart(base_name + "subgraph_start1"); + _SubgraphEnd_1 = SubgraphFinalize(base_name + "subgraph_end1"); + _SubgraphStart_2 = SubgraphStart(base_name + "subgraph_start2"); + _SubgraphEnd_2 = SubgraphFinalize(base_name + "subgraph_end2"); } vector Forward(vector inputs, vector args) override { - auto x = input_layernorm(inputs[0]); - x = pre_attn_quantize(x); + Tensor x, q, k, v, res; + if (layer_idx == 0 || shadowLayer.find(layer_idx - 1) != shadowLayer.end()) { + x = input_layernorm(inputs[0]); + x = pre_attn_quantize(x); + + _SubgraphStart_1({x}); + + auto q_k_v = (*part1)({x}); // q,k,v + q = q_k_v[0]; + k = q_k_v[1]; + v = q_k_v[2]; + res = inputs[0]; + _SubgraphEnd_1(q_k_v); + } else { + auto q_k_v_res = (*part1)(inputs); // q,k,v,res + q = q_k_v_res[0]; + k = q_k_v_res[1]; + v = q_k_v_res[2]; + res = q_k_v_res[3]; + _SubgraphEnd_1(q_k_v_res); + } - x = Tensor::toQNN({x})[0]; - auto q_k_v = part1({x}); // q,k,v - q_k_v = Tensor::toCPU(q_k_v); + auto o_x = qkv_mm({q, k, v})[0]; - auto o_x = qkv_mm(q_k_v)[0]; + _SubgraphStart_2({o_x, res}); - auto qnn_tensor = Tensor::toQNN({o_x, inputs[0]}); - o_x = qnn_tensor[0]; - inputs[0] = qnn_tensor[1]; - auto decoder_out = part2({o_x, inputs[0]}); + auto decoder_out = (*part2)({o_x, res}); decoder_out = Tensor::toCPU(decoder_out); + _SubgraphEnd_2(decoder_out); + auto shadow_input_1 = decoder_out[0]; auto shadow_input_2 = decoder_out[1]; x = decoder_out[2]; + x = shadow_linear(shadow_input_1, shadow_input_2, x); return {x}; @@ -476,16 +618,16 @@ class QwenNPU_CPUDecoderWithShadow final : public Module { // Copied from GemmaModel with Gemma->Qwen and set RmsNorm(without add_unit_offset) class QWenModel_NPU final : public Module { template - static vector> ListWithShadow(int n, Args &&...args) { + static vector> ListWithShadow(int n, std::set &shadowLayer, Args &&...args) { static_assert(std::is_base_of::value, "T1 must be a subclass of Module"); static_assert(std::is_base_of::value, "SHADOW must be a subclass of Module"); listIdx = 0; vector> modules; - std::set shadowLayers = {1, 2, 26}; + // for index in shadowLayers, create shadow decoder, for others, create normal decoder for (int i = 0; i < n; i++) { auto new_args = change_last(args...); // 创建新的参数包,最后一个参数被修改为原来的值+ std::to_string(listIdx)+ "." - if (shadowLayers.find(listIdx) != shadowLayers.end()) { + if (shadowLayer.find(listIdx) != shadowLayer.end()) { modules.push_back(std::make_unique(std::apply([&](auto &&...args) { return SHADOW(std::forward(args)...); }, new_args))); } else { modules.push_back(std::make_unique(std::apply([&](auto &&...args) { return T1(std::forward(args)...); }, new_args))); @@ -498,9 +640,9 @@ class QWenModel_NPU final : public Module { public: QWenModel_NPU() = default; - QWenModel_NPU(const QWenConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + QWenModel_NPU(const QWenNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name, std::set &shadowLayer) { // blocks = List(1, config, names, base_name); - blocks = ListWithShadow(24, config, names, chunk_size, base_name); + blocks = ListWithShadow(config.num_hidden_layers, shadowLayer, config, names, chunk_size, base_name); norm = RMSNorm(config.hidden_size, config.rms_norm_eps, names.post_norm_name); } @@ -519,13 +661,16 @@ class QWenModel_NPU final : public Module { }; class QWenForCausalLM_NPU final : public Module { + std::set &shadowLayer; + public: - QWenForCausalLM_NPU(QWenConfig &config, int chunk_size = 64) { + QWenForCausalLM_NPU(QWenNPUConfig &config, int chunk_size = 64) : + shadowLayer(config.shadow_layers) { auto names = config.names_config; hidden_size = config.hidden_size; tie_embedding_words = config.tie_embedding_words; embedding = Embedding(config.vocab_size, config.hidden_size, names.token_embd_name); - model = QWenModel_NPU(config, names, chunk_size, names.blk_name); + model = QWenModel_NPU(config, names, chunk_size, names.blk_name, shadowLayer); // Qwen-0.5 use tied embedding // Others use nn.Linear() @@ -593,5 +738,6 @@ class QWenForCausalLM_NPU final : public Module { Layer lm_head_layer; QWenModel_NPU model; }; +} // namespace v2 -#endif //! MODELING_QWENNPU_HPP chunk_size, \ No newline at end of file +#endif //! MODELING_QWENNPU_V2_HPP chunk_size, \ No newline at end of file diff --git a/src/models/qwen/modeling_qwen_sd.hpp b/mllm/models/qwen/modeling_qwen_sd.hpp similarity index 97% rename from src/models/qwen/modeling_qwen_sd.hpp rename to mllm/models/qwen/modeling_qwen_sd.hpp index d4d15b30f..9a6b9698c 100644 --- a/src/models/qwen/modeling_qwen_sd.hpp +++ b/mllm/models/qwen/modeling_qwen_sd.hpp @@ -307,9 +307,9 @@ class QWenForCausalLM final : public Module { tree_ancestors.setName("tree_ancestors"); tree_ancestors.setDtype(MLLM_TYPE_I32); tp.is_decoding = false; - static_cast(Backend::global_backends[MLLM_CPU])->setUsingDraft(false); // prefill时不使用 - static_cast(Backend::global_backends[MLLM_CPU])->setLastDraftLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setLastVerifiedPositionIds({}); + static_cast(Backend::global_backends[MLLM_CPU].get())->setUsingDraft(false); // prefill时不使用 + static_cast(Backend::global_backends[MLLM_CPU].get())->setLastDraftLength(0); + static_cast(Backend::global_backends[MLLM_CPU].get())->setLastVerifiedPositionIds({}); unsigned int cur_seq_length = input_ids.sequence(); std::vector predicted_token_ids; @@ -354,10 +354,10 @@ class QWenForCausalLM final : public Module { post_processing_for_SD(new_token_ids, tree_anc, draft_len + 1, input_ids, tree_ancestors, {}); if (step == 0) { - static_cast(Backend::global_backends[MLLM_CPU])->setUsingDraft(true); + static_cast(Backend::global_backends[MLLM_CPU].get())->setUsingDraft(true); } - static_cast(Backend::global_backends[MLLM_CPU])->setLastDraftLength(tp.last_draft_length); - static_cast(Backend::global_backends[MLLM_CPU])->setLastVerifiedPositionIds(tp.last_accept_position_ids); + static_cast(Backend::global_backends[MLLM_CPU].get())->setLastDraftLength(tp.last_draft_length); + static_cast(Backend::global_backends[MLLM_CPU].get())->setLastVerifiedPositionIds(tp.last_accept_position_ids); } tp.reset(); sa.reset(); diff --git a/src/models/qwen/modeling_qwen_xp_sdpa.hpp b/mllm/models/qwen/modeling_qwen_xp_sdpa.hpp similarity index 100% rename from src/models/qwen/modeling_qwen_xp_sdpa.hpp rename to mllm/models/qwen/modeling_qwen_xp_sdpa.hpp diff --git a/src/models/qwen/tokenization_qwen.hpp b/mllm/models/qwen/tokenization_qwen.hpp similarity index 85% rename from src/models/qwen/tokenization_qwen.hpp rename to mllm/models/qwen/tokenization_qwen.hpp index 4a803a0cf..5976d6f2b 100644 --- a/src/models/qwen/tokenization_qwen.hpp +++ b/mllm/models/qwen/tokenization_qwen.hpp @@ -15,6 +15,7 @@ #include "tokenizers/Unicode.hpp" #include #include +#include using namespace mllm; @@ -177,6 +178,54 @@ class QWenTokenizer final : public BPETokenizer { return Tokenizer::tokens2Input(ret); } + Tensor tokenize(vector texts, string name = "input", BackendType type = MLLM_CPU) { + std::vector> rets; + for (auto &text : texts) { + std::vector ret; + if (split_special_tokens_) { + const auto word_collection = unicode_regex_split(text, FIXED_PAT_STRS); + for (auto &piece : word_collection) { + std::vector tmp; + BPETokenizer::tokenize(piece, tmp, false, true, ""); + ret.insert(ret.end(), tmp.begin(), tmp.end() - 1); + } + } else { + auto parts = _splitWithDelimiters(text, special_tokens); + for (auto &p : parts) { + if (std::find(special_tokens.begin(), special_tokens.end(), p) != special_tokens.end()) { + std::string token; + for (auto b : UTF8(p)) token += byte_encoder_[b]; + + std::vector tmp; + BPETokenizer::tokenize(token, tmp, false, special_tokens, true); + ret.insert(ret.end(), tmp.begin(), tmp.end() - 1); + } else { + const auto word_collection = unicode_regex_split(p, FIXED_PAT_STRS); + for (auto &piece : word_collection) { + std::vector tmp; + BPETokenizer::tokenize(piece, tmp, false, true, ""); + assert(!tmp.empty()); + ret.insert(ret.end(), tmp.begin(), tmp.end() - 1); + } + } + } + } + rets.push_back(ret); + } + size_t max_len = 0; + for (const auto &vec : rets) { + if (vec.size() > max_len) { + max_len = vec.size(); + } + } + for (auto &vec : rets) { + if (vec.size() < max_len) { + vec.insert(vec.begin(), max_len - vec.size(), bos_id_); + } + } + return Tokenizer::tokens2Input(rets); + } + std::pair tokenizeWithPadding(std::string &text, int seqLength, int vocab_size) { std::vector ret; diff --git a/mllm/models/qwen2_5_vl/configuration_qwen2_5_vl.hpp b/mllm/models/qwen2_5_vl/configuration_qwen2_5_vl.hpp new file mode 100644 index 000000000..cc6700700 --- /dev/null +++ b/mllm/models/qwen2_5_vl/configuration_qwen2_5_vl.hpp @@ -0,0 +1,68 @@ +// +// Created by Rongjie Yi on 25-2-9. +// +#ifndef CONFIG_PHI3V_HPP +#define CONFIG_PHI3V_HPP +#include "models/qwen/configuration_qwen.hpp" +#include "models/vit/configuration_vit.hpp" +#include +// #include + +using namespace mllm; + +class Qwen2VLNameConfig : public ViTNameConfig { +public: + // string token_embd_name = "model.embed_tokens"; + string patch_embed_name = ".patch_embed"; // + string _merger_name = ".merger"; // + string _ln_q_name = ".ln_q"; // + string _m_mlp_0_name = ".mlp.0"; // + string _m_mlp_2_name = ".mlp.2"; // + string _gate_proj_name = "gate_proj"; + void init_qwen2vl() { + vison_model_name = "visual"; // + _patch_embedding_name = ".proj"; // + _layer_name = ".blocks."; // + _attn_base_name = "attn."; // + _ffn_base_name = "mlp."; // + _qkv_proj_name = "qkv"; // + _o_proj_name = "proj"; // + _up_proj_name = "up_proj"; // + _gate_proj_name = "gate_proj"; // + _down_proj_name = "down_proj"; // + _attn_norm_name = "norm1"; // + _ffn_norm_name = "norm2"; // + } +}; + +class Qwen2VLConfig : public QWenConfig { +public: + int vision_embed_dim; + int visiion_intermediate_size; + int spatial_merge_size = 2; + int spatial_patch_size = 14; + int window_size = 112; + string projection_cls; + + int bos_token_id = 151643; + int eos_token_id = 151645; + int vision_start_token_id = 151652; + int vision_end_token_id = 151653; + int vision_token_id = 151654; + int image_token_id = 151655; + int video_token_id = 151656; + vector mrope_section = {16, 24, 24}; + vector fullatt_block_indexes = {7, 15, 23, 31}; + + Qwen2VLNameConfig vision_names_config; + Qwen2VLConfig(int token_limit, string billions = "3b", RoPEType type = HFHUBROPE, int vocab = 32064, string project_cls = "MLP") : + QWenConfig(token_limit, billions, type) { + // names_config.init(type); + projection_cls = project_cls; + vision_embed_dim = 1280; + visiion_intermediate_size = 3420; + vision_names_config.init_qwen2vl(); + } +}; + +#endif // CONFIG_PHI3V_HPP diff --git a/mllm/models/qwen2_5_vl/modeling_qwen2_5_vl.hpp b/mllm/models/qwen2_5_vl/modeling_qwen2_5_vl.hpp new file mode 100644 index 000000000..e4d917b69 --- /dev/null +++ b/mllm/models/qwen2_5_vl/modeling_qwen2_5_vl.hpp @@ -0,0 +1,769 @@ +// +// Created by Rongjie Yi on 25-2-9. +// +#ifndef MODELING_QWEN2VL_HPP +#define MODELING_QWEN2VL_HPP + +#include "Layer.hpp" +#include "Module.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include "configuration_qwen2_5_vl.hpp" +// #include "models/qwen/modeling_qwen.hpp" +#include +#include +#include +#include + +using namespace mllm; + +class Qwen2PatchEmbed final : public Module { + Layer proj; + int embed_dim{}; + +public: + Qwen2PatchEmbed() = default; + Qwen2PatchEmbed(int vision_embed_dim, int patch, int img_hw, const Qwen2VLNameConfig &names, const string &base_name) { + proj = Convolution3D(3, vision_embed_dim, {2, patch, patch}, {2, patch, patch}, VALID, false, base_name + names._patch_embedding_name); + embed_dim = vision_embed_dim; + } + vector Forward(vector inputs, vector args) override { + auto embd = proj(inputs[0]); + embd = embd.view(1, 1, -1, embed_dim); + return {embd}; + } +}; + +class VisionAttention final : public Module { + Layer qkv_proj; + Softmax softmax; + Layer o_proj; + int head_size_{}; + int kv_head_size_{}; + int head_dim_{}; + string attn_impl; + +public: + VisionAttention() = default; + VisionAttention(int hidden_dim, int head_size, int kv_head_size, int head_dim, bool bias, string attn_implementation, + const TransformerNameConfig &names, const string &base_name) { + head_dim_ = head_dim; + head_size_ = head_size; + kv_head_size_ = kv_head_size; + attn_impl = attn_implementation; + + qkv_proj = Linear(hidden_dim, head_size * head_dim * 3, bias, base_name + names._qkv_proj_name); + softmax = Softmax(DIMENSION, false, base_name + "softmax"); + o_proj = Linear(head_size * head_dim, hidden_dim, bias, base_name + names._o_proj_name); + } + vector Forward(vector inputs, vector args) override { + auto cu_seqlens = inputs[1]; + auto rotary_pos_emb = inputs[2]; + auto seq_length = inputs[0].sequence(); + Tensor q, k, v; + auto qkv = qkv_proj(inputs[0]); + auto qkv_sp = qkv.split({head_dim_ * head_size_, head_dim_ * head_size_, head_dim_ * head_size_}, DIMENSION); + q = qkv_sp[0]; + k = qkv_sp[1]; + v = qkv_sp[2]; + q = q.view(-1, head_size_, -1, head_dim_); + k = k.view(-1, head_size_, -1, head_dim_); + v = v.view(-1, head_size_, -1, head_dim_); + q = Tensor::apply_rotary_pos_emb_vision(q, rotary_pos_emb); + k = Tensor::apply_rotary_pos_emb_vision(k, rotary_pos_emb); + Tensor o; + // if (attn_impl == "flash_attention_2") { + // o = Tensor::flash_attention2_forward(q, k, v, false); + // } else + { // eager implementation + k = k.transpose(SEQUENCE, DIMENSION); + auto qk = Tensor::mm(q, k); + qk = qk / std::sqrt(head_dim_); + auto attn_mask = Tensor::full(1, 1, seq_length, seq_length, + -INFINITY, qk.backend()->type()); + for (int sid = 1; sid < cu_seqlens.dimension(); ++sid) { + auto cu_end = cu_seqlens.dataAt(0, 0, 0, sid); + auto cu_start = cu_seqlens.dataAt(0, 0, 0, sid - 1); + for (int sx = cu_start; sx < cu_end; ++sx) { + for (int sy = cu_start; sy < cu_end; ++sy) { + attn_mask.setDataAt(0, 0, sx, sy, 0.0f); + } + } + } + // + // static int Layer_idx = 0; + // attn_mask.saveData("mask" + std::to_string(Layer_idx)); + // if (seq_length > 0) { + // Layer_idx++; + // } + // + qk = qk + attn_mask; + qk = softmax(qk); + o = Tensor::mm(qk, v); + } + o = o.view(-1, 1, -1, head_dim_ * head_size_); + o = o_proj(o); + return {o}; + } +}; + +class VisionMLP final : public Module { + Layer up_proj; + Layer gate_proj; + Layer act; + Layer down_proj; + +public: + VisionMLP() = default; + VisionMLP(int hidden_dim, int ffn_hidden, const string &act_fn_type, const Qwen2VLNameConfig &names, const string &base_name) { + up_proj = Linear(hidden_dim, ffn_hidden, true, base_name + names._up_proj_name); + gate_proj = Linear(hidden_dim, ffn_hidden, true, base_name + names._gate_proj_name); + act = ACT_FN[act_fn_type](base_name + names._ffn_base_name + "act"); + down_proj = Linear(ffn_hidden, hidden_dim, true, base_name + names._down_proj_name); + } + vector Forward(vector inputs, vector args) override { + auto x = gate_proj(inputs[0]); + x = act(x); + auto y = up_proj(inputs[0]); + x = x * y; + x = down_proj(x); + return {x}; + } +}; + +class VisionBlock final : public Module { + VisionAttention attention; + VisionMLP mlp; + Layer norm1; + Layer norm2; + +public: + VisionBlock() = default; + VisionBlock(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, string attn_implementation, const Qwen2VLNameConfig &names, const string &base_name) { + attention = VisionAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, true, attn_implementation, names, base_name + names._attn_base_name); + mlp = VisionMLP(hidden_dim, ffn_hidden, act_fn_type, names, base_name + names._ffn_base_name); + norm1 = RMSNorm(hidden_dim, 1e-6, base_name + names._attn_norm_name); + norm2 = RMSNorm(hidden_dim, 1e-6, base_name + names._ffn_norm_name); + } + vector Forward(vector inputs, vector args) override { + auto cu_seqlens = inputs[1]; + auto rotary_pos_emb = inputs[2]; + auto hidden_states = norm1(inputs[0]); + hidden_states = attention({hidden_states, cu_seqlens, rotary_pos_emb})[0]; + auto residual = hidden_states + inputs[0]; + hidden_states = norm2(residual); + hidden_states = mlp({hidden_states})[0]; + hidden_states = hidden_states + residual; + return {hidden_states}; + } +}; + +class PatchMerger final : public Module { + int hidden_size; + Layer ln_q; + Layer mlp0; + Layer gelu; + Layer mlp2; + +public: + PatchMerger() = default; + PatchMerger(int dim, int context_dim, int spatial_merge_size, const Qwen2VLNameConfig &names, const string &base_name) { + hidden_size = context_dim * (spatial_merge_size * spatial_merge_size); + ln_q = RMSNorm(context_dim, 1e-6, base_name + names._ln_q_name); + mlp0 = Linear(hidden_size, hidden_size, true, base_name + names._m_mlp_0_name); + gelu = GELU(base_name + ".gelu"); + mlp2 = Linear(hidden_size, dim, true, base_name + names._m_mlp_2_name); + } + vector Forward(vector inputs, vector args) override { + auto x = inputs[0]; + x = mlp2(gelu(mlp0(ln_q(x).view(1, 1, -1, hidden_size)))); + return {x}; + } +}; + +class Qwen2VisionModel final : public Module { + Qwen2PatchEmbed patch_embed; + Layer rot_pos_emb; + Layer pre_layrnorm; + vector blocks; + PatchMerger patch_merger; + vector fullatt_block_indexes; + int spatial_merge_size; + int spatial_merge_unit; + int spatial_patch_size; + int window_size; + +public: + Qwen2VisionModel() = default; + Qwen2VisionModel(int hidden_dim, int vision_embed_dim, int head_size, int mlp_hidden_dim, const string &act_fn_type, int patch, int img_hw, int block_num, string attn_implementation, const Qwen2VLConfig &config, const Qwen2VLNameConfig &names, const string &base_name) { + fullatt_block_indexes = config.fullatt_block_indexes; + spatial_merge_size = config.spatial_merge_size; + spatial_merge_unit = spatial_merge_size * spatial_merge_size; + spatial_patch_size = config.spatial_patch_size; + window_size = config.window_size; + patch_embed = Qwen2PatchEmbed(vision_embed_dim, patch, img_hw, names, base_name + names.patch_embed_name); + rot_pos_emb = VisionRoPE((vision_embed_dim / head_size) / 2, spatial_merge_size, base_name + ".rot_pos_emb"); + blocks = List(block_num, vision_embed_dim, head_size, mlp_hidden_dim, act_fn_type, attn_implementation, names, base_name + names._layer_name); + patch_merger = PatchMerger(hidden_dim, vision_embed_dim, spatial_merge_size, names, base_name + names._merger_name); + } + vector Forward(vector inputs, vector args) override { + auto hidden_states = patch_embed({inputs[0]})[0]; + auto rotary_pos_emb = rot_pos_emb(inputs[1]); + auto grid_twh = inputs[1]; + auto grid_t = grid_twh.dataAt(0, 0, 0, 0); + auto grid_h = grid_twh.dataAt(0, 0, 0, 1); + auto grid_w = grid_twh.dataAt(0, 0, 0, 2); + vector cu_seqlens_v = {0.0F, grid_t * grid_h * grid_w}; + Tensor cu_seqlens = Tensor(cu_seqlens_v); + auto window_lens = get_window_index(grid_twh, + window_size, spatial_merge_size, + spatial_patch_size, spatial_merge_unit); + auto window_index = window_lens[0]; + auto cu_window_seqlens = window_lens[1]; + Tensor cu_seqlens_new; + auto seq_len = hidden_states.sequence(); + hidden_states = hidden_states.view(-1, spatial_merge_unit, seq_len / spatial_merge_unit, -1); + hidden_states = hidden_states.clip(window_index, SEQUENCE); + hidden_states = hidden_states.view(-1, 1, seq_len, -1); + rotary_pos_emb = rotary_pos_emb.view(-1, spatial_merge_unit, seq_len / spatial_merge_unit, -1); + rotary_pos_emb = rotary_pos_emb.clip(window_index, SEQUENCE); + rotary_pos_emb = rotary_pos_emb.view(-1, 1, seq_len, -1); + for (int layer_num = 0; layer_num < blocks.size(); ++layer_num) { + if (std::find(fullatt_block_indexes.begin(), fullatt_block_indexes.end(), layer_num) != fullatt_block_indexes.end()) { + cu_seqlens_new = cu_seqlens; + } else { + cu_seqlens_new = cu_window_seqlens; + } + hidden_states = blocks[layer_num]({hidden_states, cu_seqlens_new, rotary_pos_emb})[0]; + } + hidden_states = patch_merger({hidden_states})[0]; + auto reverse_indices = window_index.argsort(); + hidden_states = hidden_states.clip(reverse_indices, SEQUENCE); + return {hidden_states}; + } + vector get_window_index( + Tensor grid_twh, + int window_size, + int spatial_merge_size, + int spatial_patch_size, + int spatial_merge_unit) { + std::vector> window_index_parts; + std::vector cu_window_seqlens = {0.0f}; + long long window_index_id = 0; + int vit_merger_window_size = window_size / spatial_merge_size / spatial_patch_size; + for (int id = 0; id < grid_twh.batch(); id++) { + int grid_t = grid_twh.dataAt(id, 0, 0, 0); + int grid_h = grid_twh.dataAt(id, 0, 0, 1); + int grid_w = grid_twh.dataAt(id, 0, 0, 2); + int llm_grid_h = grid_h / spatial_merge_size; + int llm_grid_w = grid_w / spatial_merge_size; + long long total_elements = static_cast(grid_t) * llm_grid_h * llm_grid_w; + std::vector index_flat(total_elements); + std::iota(index_flat.begin(), index_flat.end(), 0.0f); + std::vector>> index( + grid_t, std::vector>( + llm_grid_h, std::vector(llm_grid_w))); + for (int t = 0; t < grid_t; ++t) { + for (int h = 0; h < llm_grid_h; ++h) { + for (int w = 0; w < llm_grid_w; ++w) { + index[t][h][w] = index_flat[t * llm_grid_h * llm_grid_w + h * llm_grid_w + w]; + } + } + } + int pad_h = (vit_merger_window_size - (llm_grid_h % vit_merger_window_size)) % vit_merger_window_size; + int pad_w = (vit_merger_window_size - (llm_grid_w % vit_merger_window_size)) % vit_merger_window_size; + int padded_h = llm_grid_h + pad_h; + int padded_w = llm_grid_w + pad_w; + std::vector>> index_padded( + grid_t, std::vector>( + padded_h, std::vector(padded_w, -100.0f))); + for (int t = 0; t < grid_t; ++t) { + for (int h = 0; h < llm_grid_h; ++h) { + for (int w = 0; w < llm_grid_w; ++w) { + index_padded[t][h][w] = index[t][h][w]; + } + } + } + int num_windows_h = padded_h / vit_merger_window_size; + int num_windows_w = padded_w / vit_merger_window_size; + std::vector>>> permuted_windows( + grid_t, std::vector>>( + num_windows_h * num_windows_w, std::vector>( + vit_merger_window_size, std::vector(vit_merger_window_size)))); + for (int t = 0; t < grid_t; ++t) { + for (int wh = 0; wh < num_windows_h; ++wh) { + for (int ww = 0; ww < num_windows_w; ++ww) { + for (int h_in_win = 0; h_in_win < vit_merger_window_size; ++h_in_win) { + for (int w_in_win = 0; w_in_win < vit_merger_window_size; ++w_in_win) { + int original_h = wh * vit_merger_window_size + h_in_win; + int original_w = ww * vit_merger_window_size + w_in_win; + permuted_windows[t][wh * num_windows_w + ww][h_in_win][w_in_win] = index_padded[t][original_h][original_w]; + } + } + } + } + } + std::vector seqlens; + for (int t = 0; t < grid_t; ++t) { + for (int win_idx = 0; win_idx < num_windows_h * num_windows_w; ++win_idx) { + int count = 0; + for (int h = 0; h < vit_merger_window_size; ++h) { + for (int w = 0; w < vit_merger_window_size; ++w) { + if (permuted_windows[t][win_idx][h][w] != -100.0f) { + count++; + } + } + } + seqlens.push_back(static_cast(count)); + } + } + std::vector index_new; + for (int t = 0; t < grid_t; ++t) { + for (int win_idx = 0; win_idx < num_windows_h * num_windows_w; ++win_idx) { + for (int h = 0; h < vit_merger_window_size; ++h) { + for (int w = 0; w < vit_merger_window_size; ++w) { + float val = permuted_windows[t][win_idx][h][w]; + if (val != -100.0f) { + index_new.push_back(val); + } + } + } + } + } + for (float &val : index_new) { + val += window_index_id; + } + window_index_parts.push_back(index_new); + std::vector cu_seqlens_tmp(seqlens.size()); + if (!seqlens.empty()) { + cu_seqlens_tmp[0] = seqlens[0]; + for (size_t i = 1; i < seqlens.size(); ++i) { + cu_seqlens_tmp[i] = cu_seqlens_tmp[i - 1] + seqlens[i]; + } + } + for (float &val : cu_seqlens_tmp) { + val = val * spatial_merge_unit + cu_window_seqlens.back(); + } + cu_window_seqlens.insert(cu_window_seqlens.end(), cu_seqlens_tmp.begin(), cu_seqlens_tmp.end()); + window_index_id += total_elements; + } + std::vector final_window_index; + for (const auto &part : window_index_parts) { + final_window_index.insert(final_window_index.end(), part.begin(), part.end()); + } + // cu_window_seqlens去除重复元素 + cu_window_seqlens.erase(std::unique(cu_window_seqlens.begin(), cu_window_seqlens.end()), cu_window_seqlens.end()); + // 转为Tensor类型输出 + Tensor window_index_tensor(final_window_index, MLLM_CPU); + Tensor cu_window_seqlens_tensor(cu_window_seqlens, MLLM_CPU); + // window_index_tensor.setName("window_index"); + // cu_window_seqlens_tensor.setName("cu_window_seqlens"); + return {window_index_tensor, cu_window_seqlens_tensor}; + } +}; + +class QWen2MLP final : public Module { +public: + QWen2MLP() = default; + QWen2MLP(int hidden_size, int intermediate_size, const QWenNameConfig &names, + const std::string &base_name) { + gate_proj = Linear(hidden_size, intermediate_size, false, base_name + names._gate_proj_name); + silu = SiLU(base_name + "act"); + up_proj = Linear(hidden_size, intermediate_size, false, base_name + names._up_proj_name); + down_proj = + Linear(intermediate_size, hidden_size, false, base_name + names._down_proj_name); + } + std::vector Forward(std::vector inputs, std::vector args) override { + auto x = gate_proj(inputs[0]); + x = silu(x); + auto y = up_proj(inputs[0]); + x = x * y; + x = down_proj(x); + return {x}; + } + +private: + Layer gate_proj; + Layer up_proj; + Layer down_proj; + Layer silu; +}; + +// Copied from GemmaAttention with Gemma->Qwen and using SWA +class QWen2Attention final : public Module { +public: + QWen2Attention() = default; + QWen2Attention(const Qwen2VLConfig &config, const QWenNameConfig &names, const string &base_name) { + hidden_size = config.hidden_size; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; + num_key_value_heads = config.num_key_value_heads; + num_key_value_groups = num_heads / num_key_value_heads; + name = base_name; + attn_impl = config.attn_implementation; + + // init layers + q_proj = Linear(hidden_size, num_heads * head_dim, true, base_name + names._q_proj_name); + k_proj = Linear(hidden_size, num_key_value_heads * head_dim, true, + base_name + names._k_proj_name); + v_proj = Linear(hidden_size, num_key_value_heads * head_dim, true, + base_name + names._v_proj_name); + o_proj = Linear(num_heads * head_dim, hidden_size, false, base_name + names._o_proj_name); + q_rope = MultimodalRoPE(config.rope_theta, config.max_position_embeddings, config.mrope_section, base_name + "q_rope"); + k_rope = MultimodalRoPE(config.rope_theta, config.max_position_embeddings, config.mrope_section, base_name + "k_rope"); + k_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, config.attn_implementation, base_name + "k_cache"); + v_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, config.attn_implementation, base_name + "v_cache"); + softmax = Softmax(DIMENSION, true, base_name + "softmax"); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto position_ids = inputs[1]; + auto query_states = q_proj(inputs[0]); + auto key_states = k_proj(inputs[0]); + auto value_states = v_proj(inputs[0]); + query_states = query_states.view(-1, num_heads, -1, head_dim); + key_states = key_states.view(-1, num_key_value_heads, -1, head_dim); + value_states = value_states.view(-1, num_key_value_heads, -1, head_dim); + query_states = q_rope(query_states, position_ids); + key_states = k_rope(key_states, position_ids); + key_states = k_cache(key_states); + value_states = v_cache(value_states); + + Tensor atten_output; + if (attn_impl == "flash_attention_2") { + atten_output = Tensor::flash_attention2_forward(query_states, key_states, value_states, true); + } else if (attn_impl == "sage_attention") { + atten_output = Tensor::sage_attention_forward(query_states, key_states, value_states, true); + } else { // eager implementation + auto atten_weight = + Tensor::mm(query_states, key_states.transpose(Chl::SEQUENCE, Chl::DIMENSION)) + / std::sqrt(head_dim); + atten_weight = softmax(atten_weight, k_cache.getCacheSeqLen()); + atten_output = Tensor::mm(atten_weight, value_states); + } + atten_output = atten_output.view(-1, 1, -1, head_dim * num_heads); + atten_output = o_proj(atten_output); + return {atten_output}; + } + + vector get_cache() { + return {&k_cache, &v_cache}; + } + vector get_rope() { + return {&q_rope, &k_rope}; + } + +private: + int hidden_size; + int num_heads; + int head_dim; + int num_key_value_heads; + int num_key_value_groups; + Layer q_proj; + Layer k_proj; + Layer v_proj; + Layer o_proj; + MultimodalRoPE q_rope; + MultimodalRoPE k_rope; + KVCache k_cache; + KVCache v_cache; + Softmax softmax; + string name; + string attn_impl; +}; + +// Copied from GemmaDecoder with Gemma->Qwen and set RmsNorm(without add_unit_offset) +class QWen2Decoder final : public Module { +public: + QWen2Decoder() = default; + QWen2Decoder(const Qwen2VLConfig &config, const QWenNameConfig &names, const string &base_name) { + self_atten = QWen2Attention(config, names, base_name + names._attn_base_name); + mlp = QWen2MLP(config.hidden_size, config.intermediate_size, names, + base_name + names._ffn_base_name); + input_layernorm = + RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + post_attention_layernorm = + RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); + } + std::vector Forward(std::vector inputs, std::vector args) override { + auto position_ids = inputs[1]; + auto x = input_layernorm(inputs[0]); + x = self_atten({x, position_ids})[0]; + auto tmp = x + inputs[0]; + x = post_attention_layernorm(tmp); + x = mlp({x})[0]; + x = x + tmp; + return {x}; + } + QWen2Attention &get_attention() { + return self_atten; + } + +private: + QWen2Attention self_atten; + QWen2MLP mlp; + Layer input_layernorm; + Layer post_attention_layernorm; +}; + +class Qwen2VLModel final : public Module { + Qwen2VisionModel visual; + Layer embed_tokens; + + vector blocks; + Layer norm; + Parameter lm_head; + Layer lm_head_layer; + + bool tie_embedding_words; + + int64_t spatial_merge_size; + int64_t image_token_id; + int64_t video_token_id; + int64_t vision_start_token_id; + +public: + explicit Qwen2VLModel(const Qwen2VLConfig &config) { + auto vocab_size = config.vocab_size; + auto hidden_dim = config.hidden_size; + auto head_size = config.num_attention_heads; + auto ffn_hidden = config.intermediate_size; + auto projection_cls = config.projection_cls; + auto vision_embed_dim = config.vision_embed_dim; + auto visiion_intermediate_size = config.visiion_intermediate_size; + image_token_id = config.image_token_id; + auto vision_names = config.vision_names_config; + auto qwen_names = config.names_config; + tie_embedding_words = config.tie_embedding_words; + image_token_id = config.image_token_id; + video_token_id = config.video_token_id; + vision_start_token_id = config.vision_start_token_id; + + embed_tokens = Embedding(vocab_size, hidden_dim, qwen_names.token_embd_name); + visual = Qwen2VisionModel(hidden_dim, vision_embed_dim, 16, visiion_intermediate_size, "SiLU", 14, 336, 32, config.attn_implementation, config, vision_names, vision_names.vison_model_name); + + blocks = List(config.num_hidden_layers, config, qwen_names, qwen_names.blk_name); + norm = RMSNorm(hidden_dim, 1e-6, qwen_names.post_norm_name); + if (tie_embedding_words) { + lm_head = Parameter(1, config.vocab_size, 1, config.hidden_size, qwen_names.token_embd_name + ".weight"); + } else { + lm_head_layer = Linear(config.hidden_size, config.vocab_size, false, qwen_names.lm_head_name); + } + } + vector Forward(vector inputs, vector args) override { + auto position_ids = inputs[3]; + bool have_img = inputs[1].batch() > 0; + auto hidden_states = embed_tokens({inputs[0]}); + if (have_img) { + auto image_embeds = visual({inputs[1], inputs[2]})[0]; + auto n_image_features = image_embeds.sequence(); + auto where_idx = inputs[0].where(image_token_id, SEQUENCE); + hidden_states = hidden_states.index_put(image_embeds, where_idx, false); + } + for (auto &block : blocks) { + hidden_states = block({hidden_states, position_ids})[0]; + } + hidden_states = norm(hidden_states); + if (hidden_states.sequence() > 1) { + hidden_states = hidden_states.clip({}, {}, {-1}, {}); + } + if (tie_embedding_words) { + hidden_states = Tensor::mm(hidden_states, lm_head().transpose(Chl::SEQUENCE, Chl::DIMENSION)); + } else { + hidden_states = lm_head_layer(hidden_states); + } + return {hidden_states}; + } + void clear_kvcache() override { + for (auto &block : blocks) { + auto kvcahce = block.get_attention().get_cache(); + for (auto &cache : kvcahce) { + cache->clearCache(); + } + } + } + void get_position_ids(vector &inputs) { + if (inputs[0].sequence() > 1) { + Tensor video_grid_thw(0, 0, 0, 0, MLLM_CPU, true); + auto rope_indices = get_rope_index(inputs[0], inputs[2], video_grid_thw); + auto position = rope_indices[0]; + if (inputs.size() == 4) { + inputs[3] = position; + } else { + inputs.push_back(position); + } + } else { + auto &position_ids = inputs[3]; + auto last_pos = position_ids.dataAt(0, 0, 0, position_ids.dimension() - 1); + position_ids.reshape(position_ids.batch(), 1, position_ids.sequence(), 1); + for (int b = 0; b < position_ids.batch(); b++) { + for (int s = 0; s < position_ids.sequence(); s++) { + position_ids.setDataAt(b, 0, s, 0, last_pos + 1); + } + } + } + } + +private: + vector get_rope_index( + Tensor input_ids, + Tensor image_grid_thw, + Tensor video_grid_thw) { + vector> attention_mask; + auto attention_mask_shape = input_ids.sequence(); + for (int b = 0; b < input_ids.batch(); b++) { + attention_mask.emplace_back(attention_mask_shape, 1); + } + const size_t batch_size = input_ids.batch(); // input_ids.size(); + const size_t seq_len = batch_size > 0 ? input_ids.sequence() : 0; // batch_size > 0 ? input_ids[0].size() : 0; + Tensor position_ids(3, 1, batch_size, seq_len, Backend::global_backends[MLLM_CPU].get(), true); + Tensor mrope_position_deltas(1, 1, 1, batch_size, Backend::global_backends[MLLM_CPU].get(), true); + bool has_vision = (image_grid_thw.sequence() > 0) || (video_grid_thw.sequence() > 0); // image_grid_thw || video_grid_thw; + if (!has_vision) { + // Pure text case + for (size_t i = 0; i < batch_size; ++i) { + const auto &mask = !attention_mask.empty() ? attention_mask[i] : vector(seq_len, 1); + vector positions; + int64_t pos = 0; + for (size_t j = 0; j < seq_len; ++j) { + if (mask[j] == 1) { + positions.push_back(pos++); + } else { + positions.push_back(1); // Will be overwritten by mask + } + } + for (int dim = 0; dim < 3; ++dim) { + for (size_t j = 0; j < seq_len; ++j) { + position_ids.setDataAt(dim, 0, i, j, (float)(mask[j] == 1 ? positions[j] : 1)); + } + } + int64_t max_pos = pos - 1; + mrope_position_deltas.setDataAt(0, 0, 0, i, (float)((max_pos + 1) - static_cast(input_ids.sequence()))); + } + position_ids.setName("position_ids"); + mrope_position_deltas.setName("mrope_position_deltas"); + return {position_ids, mrope_position_deltas}; + } + // Process vision cases + size_t image_idx = 0, video_idx = 0; + for (size_t i = 0; i < batch_size; ++i) { + const auto &mask = !attention_mask.empty() ? attention_mask[i] : vector(seq_len, 1); + // Extract valid tokens + vector valid_tokens; + for (size_t j = 0; j < input_ids.sequence(); ++j) { + if (mask[j] == 1) valid_tokens.push_back((int)input_ids.dataAt(i, 0, j, 0)); + } + // Find vision start positions + vector vision_starts; + vector vision_types; + for (size_t j = 0; j < valid_tokens.size(); ++j) { + if (valid_tokens[j] == vision_start_token_id && j + 1 < valid_tokens.size()) { + vision_starts.push_back(j); + vision_types.push_back(valid_tokens[j + 1]); + } + } + int64_t image_count = count(vision_types.begin(), vision_types.end(), image_token_id); + int64_t video_count = vision_types.size() - image_count; + vector> llm_positions(3); + size_t st = 0; + int64_t current_max = 0; + int64_t remain_images = image_count; + int64_t remain_videos = video_count; + // Process each vision segment + for (size_t vs = 0; vs < vision_starts.size(); ++vs) { + // Find next vision token + size_t ed_image = valid_tokens.size(); + size_t ed_video = valid_tokens.size(); + if (remain_images > 0) { + auto it = find(valid_tokens.begin() + st, valid_tokens.end(), image_token_id); + if (it != valid_tokens.end()) ed_image = it - valid_tokens.begin(); + } + if (remain_videos > 0) { + auto it = find(valid_tokens.begin() + st, valid_tokens.end(), video_token_id); + if (it != valid_tokens.end()) ed_video = it - valid_tokens.begin(); + } + size_t ed = min(ed_image, ed_video); + if (ed == valid_tokens.size()) break; + // Get grid parameters + int64_t t, h, w; + bool is_image = (ed == ed_image); + if (is_image) { + t = (int64_t)image_grid_thw.dataAt(0, 0, image_idx, 0); + h = (int64_t)image_grid_thw.dataAt(0, 0, image_idx, 1); + w = (int64_t)image_grid_thw.dataAt(0, 0, image_idx, 2); + image_idx++; + remain_images--; + } else { + t = (int64_t)video_grid_thw.dataAt(0, 0, video_idx, 0); + h = (int64_t)video_grid_thw.dataAt(0, 0, video_idx, 1); + w = (int64_t)video_grid_thw.dataAt(0, 0, video_idx, 2); + video_idx++; + remain_videos--; + } + // Calculate grid dimensions + int64_t llm_grid_t = t; + int64_t llm_grid_h = h / spatial_merge_size; + int64_t llm_grid_w = w / spatial_merge_size; + // Process text segment + size_t text_len = ed - st; + if (text_len > 0) { + int64_t start_idx = current_max; + for (int64_t k = 0; k < text_len; ++k) { + for (int dim = 0; dim < 3; ++dim) { + llm_positions[dim].push_back(start_idx + k); + } + } + current_max += text_len; + } + for (int64_t ti = 0; ti < llm_grid_t; ++ti) { + for (int64_t hi = 0; hi < llm_grid_h; ++hi) { + for (int64_t wi = 0; wi < llm_grid_w; ++wi) { + llm_positions[0].push_back(current_max + ti); + llm_positions[1].push_back(current_max + hi); + llm_positions[2].push_back(current_max + wi); + } + } + } + current_max = std::max({llm_positions[0][llm_positions[0].size() - 1], + llm_positions[1][llm_positions[1].size() - 1], + llm_positions[2][llm_positions[2].size() - 1]}); + st = ed + llm_grid_t * llm_grid_h * llm_grid_w; + } + // Process remaining text + if (st < valid_tokens.size()) { + size_t text_len = valid_tokens.size() - st; + int64_t st_idx = current_max + 1; + for (int64_t k = 0; k < text_len; ++k) { + for (int dim = 0; dim < 3; ++dim) { + llm_positions[dim].push_back(st_idx + k); + } + } + current_max += text_len; + } + // Fill position_ids with valid positions + size_t valid_idx = 0; + for (size_t j = 0; j < seq_len; ++j) { + if (mask[j] == 1) { + if (valid_idx < llm_positions[0].size()) { + position_ids.setDataAt(0, 0, i, j, (float)llm_positions[0][valid_idx]); + position_ids.setDataAt(1, 0, i, j, (float)llm_positions[1][valid_idx]); + position_ids.setDataAt(2, 0, i, j, (float)llm_positions[2][valid_idx]); + valid_idx++; + } + } + } + // Calculate delta + int64_t max_pos = 0; + for (const auto &dim : llm_positions) { + for (auto val : dim) { + max_pos = max(max_pos, val); + } + } + mrope_position_deltas.setDataAt(0, 0, 0, i, (float)((max_pos + 1) - static_cast(input_ids.sequence()))); + } + position_ids.setName("position_ids"); + mrope_position_deltas.setName("mrope_position_deltas"); + return {position_ids, mrope_position_deltas}; + } +}; +#endif // MODELING_QWEN2VL_HPP \ No newline at end of file diff --git a/mllm/models/qwen2_vl/configuration_qwen2_vl.hpp b/mllm/models/qwen2_vl/configuration_qwen2_vl.hpp new file mode 100644 index 000000000..1545b80c5 --- /dev/null +++ b/mllm/models/qwen2_vl/configuration_qwen2_vl.hpp @@ -0,0 +1,101 @@ +// +// Created by Rongjie Yi on 25-2-9. +// +#ifndef CONFIG_PHI3V_HPP +#define CONFIG_PHI3V_HPP +#include "models/qwen/configuration_qwen.hpp" +#include "models/vit/configuration_vit.hpp" +// #include + +using namespace mllm; + +class Qwen2VLNameConfig : public ViTNameConfig { +public: + // string token_embd_name = "model.embed_tokens"; + string patch_embed_name = ".patch_embed"; // + string _merger_name = ".merger"; // + string _ln_q_name = ".ln_q"; // + string _m_mlp_0_name = ".mlp.0"; // + string _m_mlp_2_name = ".mlp.2"; // + void init_qwen2vl() { + vison_model_name = "visual"; // + _patch_embedding_name = ".proj"; // + _layer_name = ".blocks."; // + _attn_base_name = "attn."; // + _ffn_base_name = "mlp."; // + _qkv_proj_name = "qkv"; // + _o_proj_name = "proj"; // + _up_proj_name = "fc1"; // + _down_proj_name = "fc2"; // + _attn_norm_name = "norm1"; // + _ffn_norm_name = "norm2"; // + } +}; + +class Qwen2VLConfig : virtual public QWenConfig { +public: + int vision_embed_dim; + int spatial_merge_size = 2; + string projection_cls; + + int bos_token_id = 151643; + int eos_token_id = 151645; + int vision_start_token_id = 151652; + int vision_end_token_id = 151653; + int vision_token_id = 151654; + int image_token_id = 151655; + int video_token_id = 151656; + vector mrope_section = {16, 24, 24}; + + Qwen2VLNameConfig vision_names_config; + + Qwen2VLConfig(int token_limit, string billions = "1.5b", RoPEType type = HFHUBROPE, int vocab = 32064, string project_cls = "MLP") : + QWenConfig(token_limit, billions, type) { + // names_config.init(type); + projection_cls = project_cls; + vision_embed_dim = 1280; + vision_names_config.init_qwen2vl(); + } + + Qwen2VLConfig(const Qwen2VLConfig &other) = default; + Qwen2VLConfig(Qwen2VLConfig &&other) noexcept = default; + Qwen2VLConfig &operator=(const Qwen2VLConfig &other) = default; + Qwen2VLConfig &operator=(Qwen2VLConfig &&other) noexcept { + if (this != &other) { + QWenConfig::operator=(std::move(other)); + vision_embed_dim = other.vision_embed_dim; + spatial_merge_size = other.spatial_merge_size; + projection_cls = std::move(other.projection_cls); + + bos_token_id = other.bos_token_id; + eos_token_id = other.eos_token_id; + vision_start_token_id = other.vision_start_token_id; + vision_end_token_id = other.vision_end_token_id; + vision_token_id = other.vision_token_id; + image_token_id = other.image_token_id; + video_token_id = other.video_token_id; + + mrope_section = std::move(other.mrope_section); + vision_names_config = std::move(other.vision_names_config); + } + return *this; + } +}; + +class Qwen2VLNPUConfig : public Qwen2VLConfig, public QWenNPUConfig { +public: + Qwen2VLNPUConfig(int token_limit, string billions = "1.5b", RoPEType type = HFHUBROPE, int vocab = 32064, string project_cls = "MLP") : + QWenConfig(token_limit, billions, type), + Qwen2VLConfig(token_limit, billions, type, vocab, project_cls), + QWenNPUConfig(token_limit, billions, type) { + std::cout << "use i32 bias: " << use_i32_bias << std::endl; + std::cout << "use high silu: " << use_high_precision_silu << std::endl; + std::cout << "shadow layers: "; + for (auto i : shadow_layers) { + std::cout << i << " "; + } + std::cout << std::endl; + } +}; + +#endif // CONFIG_PHI3V_HPP diff --git a/src/models/qwen2_vl/modeling_qwen2_vl.hpp b/mllm/models/qwen2_vl/modeling_qwen2_vl.hpp similarity index 88% rename from src/models/qwen2_vl/modeling_qwen2_vl.hpp rename to mllm/models/qwen2_vl/modeling_qwen2_vl.hpp index bee84ccae..0676dea6b 100644 --- a/src/models/qwen2_vl/modeling_qwen2_vl.hpp +++ b/mllm/models/qwen2_vl/modeling_qwen2_vl.hpp @@ -4,6 +4,7 @@ #ifndef MODELING_QWEN2VL_HPP #define MODELING_QWEN2VL_HPP +#include "DataType.hpp" #include "Layer.hpp" #include "Module.hpp" #include "Tensor.hpp" @@ -11,6 +12,7 @@ #include "configuration_qwen2_vl.hpp" // #include "models/qwen/modeling_qwen.hpp" #include +#include #include #include @@ -39,19 +41,21 @@ class VisionAttention final : public Module { Layer o_proj; int head_size_{}; int kv_head_size_{}; - int attn_hidden_dim_{}; + int head_dim_{}; + string attn_impl; public: VisionAttention() = default; - VisionAttention(int hidden_dim, int head_size, int kv_head_size, int attn_hidden_dim, bool bias, + VisionAttention(int hidden_dim, int head_size, int kv_head_size, int head_dim, bool bias, string attn_implementation, const TransformerNameConfig &names, const string &base_name) { - attn_hidden_dim_ = attn_hidden_dim; + head_dim_ = head_dim; head_size_ = head_size; kv_head_size_ = kv_head_size; + attn_impl = attn_implementation; - qkv_proj = Linear(hidden_dim, head_size * attn_hidden_dim * 3, bias, base_name + names._qkv_proj_name); + qkv_proj = Linear(hidden_dim, head_size * head_dim * 3, bias, base_name + names._qkv_proj_name); softmax = Softmax(DIMENSION, false, base_name + "softmax"); - o_proj = Linear(head_size * attn_hidden_dim, hidden_dim, bias, base_name + names._o_proj_name); + o_proj = Linear(head_size * head_dim, hidden_dim, bias, base_name + names._o_proj_name); } vector Forward(vector inputs, vector args) override { auto cu_seqlens = inputs[1]; @@ -59,23 +63,27 @@ class VisionAttention final : public Module { auto seq_length = inputs[0].sequence(); Tensor q, k, v; auto qkv = qkv_proj(inputs[0]); - // auto qkv_sp = qkv.split({attn_hidden_dim_, attn_hidden_dim_, attn_hidden_dim_}, HD, head_size_); - auto qkv_sp = qkv.split({attn_hidden_dim_ * head_size_, attn_hidden_dim_ * head_size_, attn_hidden_dim_ * head_size_}, DIMENSION); + auto qkv_sp = qkv.split({head_dim_ * head_size_, head_dim_ * head_size_, head_dim_ * head_size_}, DIMENSION); q = qkv_sp[0]; k = qkv_sp[1]; v = qkv_sp[2]; - q = q.view(-1, head_size_, -1, attn_hidden_dim_); - k = k.view(-1, head_size_, -1, attn_hidden_dim_); - v = v.view(-1, head_size_, -1, attn_hidden_dim_); + q = q.view(-1, head_size_, -1, head_dim_); + k = k.view(-1, head_size_, -1, head_dim_); + v = v.view(-1, head_size_, -1, head_dim_); q = Tensor::apply_rotary_pos_emb_vision(q, rotary_pos_emb); k = Tensor::apply_rotary_pos_emb_vision(k, rotary_pos_emb); - k = k.transpose(SEQUENCE, DIMENSION); - auto qk = Tensor::mm(q, k); - qk = qk / std::sqrt(attn_hidden_dim_); - // mask - qk = softmax(qk); - auto o = Tensor::mm(qk, v); - o = o.view(-1, 1, -1, attn_hidden_dim_ * head_size_); + Tensor o; + if (attn_impl == "flash_attention_2") { + o = Tensor::flash_attention2_forward(q, k, v, false); + } else { // eager implementation + k = k.transpose(SEQUENCE, DIMENSION); + auto qk = Tensor::mm(q, k); + qk = qk / std::sqrt(head_dim_); + // mask + qk = softmax(qk); + o = Tensor::mm(qk, v); + } + o = o.view(-1, 1, -1, head_dim_ * head_size_); o = o_proj(o); return {o}; } @@ -109,8 +117,8 @@ class VisionBlock final : public Module { public: VisionBlock() = default; - VisionBlock(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, const ViTNameConfig &names, const string &base_name) { - attention = VisionAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, true, names, base_name + names._attn_base_name); + VisionBlock(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, string attn_implementation, const ViTNameConfig &names, const string &base_name) { + attention = VisionAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, true, attn_implementation, names, base_name + names._attn_base_name); mlp = VisionMLP(hidden_dim, ffn_hidden, act_fn_type, names, base_name + names._ffn_base_name); norm1 = LayerNorm(hidden_dim, true, 1e-6, base_name + names._attn_norm_name); norm2 = LayerNorm(hidden_dim, true, 1e-6, base_name + names._ffn_norm_name); @@ -160,18 +168,18 @@ class Qwen2VisionModel final : public Module { public: Qwen2VisionModel() = default; - Qwen2VisionModel(int hidden_dim, int vision_embed_dim, int head_size, int mlp_hidden_dim, const string &act_fn_type, int patch, int img_hw, int block_num, int spatial_merge_size, const Qwen2VLNameConfig &names, const string &base_name) { + Qwen2VisionModel(int hidden_dim, int vision_embed_dim, int head_size, int mlp_hidden_dim, const string &act_fn_type, int patch, int img_hw, int block_num, int spatial_merge_size, string attn_implementation, const Qwen2VLNameConfig &names, const string &base_name) { patch_embed = Qwen2PatchEmbed(vision_embed_dim, patch, img_hw, names, base_name + names.patch_embed_name); rot_pos_emb = VisionRoPE((vision_embed_dim / head_size) / 2, spatial_merge_size, base_name + ".rot_pos_emb"); - blocks = List(block_num, vision_embed_dim, head_size, mlp_hidden_dim, act_fn_type, names, base_name + names._layer_name); + blocks = List(block_num, vision_embed_dim, head_size, mlp_hidden_dim, act_fn_type, attn_implementation, names, base_name + names._layer_name); patch_merger = PatchMerger(hidden_dim, vision_embed_dim, spatial_merge_size, names, base_name + names._merger_name); } vector Forward(vector inputs, vector args) override { auto hidden_states = patch_embed({inputs[0]})[0]; auto rotary_pos_emb = rot_pos_emb(inputs[1]); - auto grid_t = inputs[0].dataAt(0, 0, 0, 0); - auto grid_h = inputs[0].dataAt(0, 0, 0, 1); - auto grid_w = inputs[0].dataAt(0, 0, 0, 2); + auto grid_t = inputs[1].dataAt(0, 0, 0, 0); + auto grid_h = inputs[1].dataAt(0, 0, 0, 1); + auto grid_w = inputs[1].dataAt(0, 0, 0, 2); vector cu_seqlens_v = {0.0F, grid_t * grid_h * grid_w}; auto cu_seqlens = Tensor(cu_seqlens_v); for (auto &block : blocks) { @@ -219,6 +227,8 @@ class QWen2Attention final : public Module { head_dim = config.hidden_size / num_heads; num_key_value_heads = config.num_key_value_heads; num_key_value_groups = num_heads / num_key_value_heads; + name = base_name; + attn_impl = config.attn_implementation; // init layers q_proj = Linear(hidden_size, num_heads * head_dim, true, base_name + names._q_proj_name); @@ -229,14 +239,13 @@ class QWen2Attention final : public Module { o_proj = Linear(num_heads * head_dim, hidden_size, false, base_name + names._o_proj_name); q_rope = MultimodalRoPE(config.rope_theta, config.max_position_embeddings, config.mrope_section, base_name + "q_rope"); k_rope = MultimodalRoPE(config.rope_theta, config.max_position_embeddings, config.mrope_section, base_name + "k_rope"); - k_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "k_cache"); - v_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "v_cache"); + k_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, attn_impl, base_name + "k_cache"); + v_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, attn_impl, base_name + "v_cache"); softmax = Softmax(DIMENSION, true, base_name + "softmax"); } std::vector Forward(std::vector inputs, std::vector args) override { auto position_ids = inputs[1]; - auto query_states = q_proj(inputs[0]); auto key_states = k_proj(inputs[0]); auto value_states = v_proj(inputs[0]); @@ -247,11 +256,19 @@ class QWen2Attention final : public Module { key_states = k_rope(key_states, position_ids); key_states = k_cache(key_states); value_states = v_cache(value_states); - auto atten_weight = - Tensor::mm(query_states, key_states.transpose(Chl::SEQUENCE, Chl::DIMENSION)) - / std::sqrt(head_dim); - atten_weight = softmax(atten_weight, k_cache.getCacheSeqLen()); - auto atten_output = Tensor::mm(atten_weight, value_states); + + Tensor atten_output; + if (attn_impl == "flash_attention_2") { + atten_output = Tensor::flash_attention2_forward(query_states, key_states, value_states, true); + } else if (attn_impl == "sage_attention") { + atten_output = Tensor::sage_attention_forward(query_states, key_states, value_states, true); + } else { // eager implementation + auto atten_weight = + Tensor::mm(query_states, key_states.transpose(Chl::SEQUENCE, Chl::DIMENSION)) + / std::sqrt(head_dim); + atten_weight = softmax(atten_weight, k_cache.getCacheSeqLen()); + atten_output = Tensor::mm(atten_weight, value_states); + } atten_output = atten_output.view(-1, 1, -1, head_dim * num_heads); atten_output = o_proj(atten_output); return {atten_output}; @@ -279,6 +296,8 @@ class QWen2Attention final : public Module { KVCache k_cache; KVCache v_cache; Softmax softmax; + string name; + string attn_impl; }; // Copied from GemmaDecoder with Gemma->Qwen and set RmsNorm(without add_unit_offset) @@ -349,7 +368,7 @@ class Qwen2VLModel final : public Module { vision_start_token_id = config.vision_start_token_id; embed_tokens = Embedding(vocab_size, hidden_dim, qwen_names.token_embd_name); - visual = Qwen2VisionModel(hidden_dim, vision_embed_dim, 16, vision_embed_dim * 4, "QuickGELU", 14, 336, 32, spatial_merge_size, vision_names, vision_names.vison_model_name); + visual = Qwen2VisionModel(hidden_dim, vision_embed_dim, 16, vision_embed_dim * 4, "QuickGELU", 14, 336, 32, spatial_merge_size, config.attn_implementation, vision_names, vision_names.vison_model_name); blocks = List(config.num_hidden_layers, config, qwen_names, qwen_names.blk_name); norm = RMSNorm(hidden_dim, 1e-6, qwen_names.post_norm_name); @@ -425,8 +444,8 @@ class Qwen2VLModel final : public Module { } const size_t batch_size = input_ids.batch(); // input_ids.size(); const size_t seq_len = batch_size > 0 ? input_ids.sequence() : 0; // batch_size > 0 ? input_ids[0].size() : 0; - Tensor position_ids(3, 1, batch_size, seq_len, Backend::global_backends[MLLM_CPU], true); - Tensor mrope_position_deltas(1, 1, 1, batch_size, Backend::global_backends[MLLM_CPU], true); + Tensor position_ids(3, 1, batch_size, seq_len, Backend::global_backends[MLLM_CPU].get(), true); + Tensor mrope_position_deltas(1, 1, 1, batch_size, Backend::global_backends[MLLM_CPU].get(), true); bool has_vision = (image_grid_thw.sequence() > 0) || (video_grid_thw.sequence() > 0); // image_grid_thw || video_grid_thw; if (!has_vision) { // Pure text case @@ -575,4 +594,4 @@ class Qwen2VLModel final : public Module { return {position_ids, mrope_position_deltas}; } }; -#endif // MODELING_PHI3_HPP \ No newline at end of file +#endif // MODELING_QWEN2VL_HPP \ No newline at end of file diff --git a/mllm/models/qwen2_vl/modeling_qwen2_vl_npu.hpp b/mllm/models/qwen2_vl/modeling_qwen2_vl_npu.hpp new file mode 100644 index 000000000..e7e9cb283 --- /dev/null +++ b/mllm/models/qwen2_vl/modeling_qwen2_vl_npu.hpp @@ -0,0 +1,1002 @@ +#ifndef MODELING_QWEN2VL_NPU_HPP +#define MODELING_QWEN2VL_NPU_HPP + +#include "Layer.hpp" +#include "Module.hpp" +#include "Tensor.hpp" +#include "Timing.hpp" +#include "Types.hpp" +#include "configuration_qwen2_vl.hpp" +#include "models/qwen2_vl/modeling_qwen2_vl.hpp" +#include +#include +#include +#include + +using namespace mllm; + +// NPU QKV part +class QwenDecoderNPUPart1 : public Module { +protected: + int hidden_size; + int num_heads; + int head_dim; + int num_key_value_heads; + int num_key_value_groups; + + // it is for speed up the QNN linear implemented by conv, TODO: should integrate into QNNLinear + Layer pre_attn_view; + + Layer q_proj; + Layer k_proj; + Layer v_proj; + + Layer q_view; + Layer k_view; + Layer v_view; + + Layer q_dequant; + Layer k_dequant; + Layer v_dequant; + Layer v_transpose; + +public: + QwenDecoderNPUPart1() = default; + QwenDecoderNPUPart1(const Qwen2VLNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + hidden_size = config.hidden_size; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; + num_key_value_heads = config.num_key_value_heads; + num_key_value_groups = num_heads / num_key_value_heads; + + pre_attn_view = View(1, utils::closestFactors(chunk_size).first, utils::closestFactors(chunk_size).second, num_heads * head_dim, base_name + "ires_split-00_view_"); + + q_proj = Linear(hidden_size, num_heads * head_dim, config.use_i32_bias, base_name + names._q_proj_name); + k_proj = Linear(hidden_size, num_key_value_heads * head_dim, config.use_i32_bias, base_name + names._k_proj_name); + v_proj = Linear(hidden_size, num_key_value_heads * head_dim, config.use_i32_bias, base_name + names._v_proj_name); + + q_view = View(1, num_heads, chunk_size, head_dim, base_name + names._q_proj_name + "-00_view_"); + k_view = View(1, num_key_value_heads, chunk_size, head_dim, base_name + names._k_proj_name + "-00_view_"); + v_view = View(1, num_key_value_heads, chunk_size, head_dim, base_name + names._v_proj_name + "-00_view_"); + + if (config.use_i32_bias) { + q_dequant = Dequantize(true, base_name + names._q_proj_name + ".dequantize", true, MLLM_TYPE_I16); + k_dequant = Dequantize(true, base_name + names._k_proj_name + ".dequantize", false, MLLM_TYPE_I16); + v_dequant = Dequantize(true, base_name + names._v_proj_name + ".dequantize", false, MLLM_TYPE_I16); + } else { + q_dequant = DequantizeAdd(true, num_heads * head_dim, base_name + names._q_proj_name + ".dequantize", true, MLLM_TYPE_I16); + k_dequant = DequantizeAdd(true, num_key_value_heads * head_dim, base_name + names._k_proj_name + ".dequantize", false, MLLM_TYPE_I16); + v_dequant = DequantizeAdd(true, num_key_value_heads * head_dim, base_name + names._v_proj_name + ".dequantize", false, MLLM_TYPE_I16); + } + + v_transpose = Transpose({0, 2, 3, 1}, base_name + names._v_proj_name + ".transpose"); + } + + vector Forward(vector inputs, vector args) override { + auto x = pre_attn_view(inputs[0]); + + auto query_states = q_proj(x); + auto key_states = k_proj(x); + auto value_states = v_proj(x); + + query_states = q_view(query_states); + key_states = k_view(key_states); + value_states = v_view(value_states); + + // return {query_states, key_states, value_states}; + + query_states = q_dequant(query_states); + key_states = k_dequant(key_states); + value_states = v_dequant(value_states); + + value_states = v_transpose(value_states); + return {query_states, key_states, value_states}; + } +}; + +class QwenDecoderNPUPart1WithRes final : public QwenDecoderNPUPart1 { + Layer input_layernorm; + Layer pre_attn_quantize; + +public: + QwenDecoderNPUPart1WithRes() = default; + QwenDecoderNPUPart1WithRes(const Qwen2VLNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + hidden_size = config.hidden_size; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; + num_key_value_heads = config.num_key_value_heads; + num_key_value_groups = num_heads / num_key_value_heads; + + // remove "self_attn." in base_name + auto layer_base_name = base_name.substr(0, base_name.size() - 10); + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, layer_base_name + names._attn_norm_name); + pre_attn_quantize = Quantize(true, layer_base_name + names._attn_base_name + names._q_proj_name + ".quantize", MLLM_TYPE_I16); + + pre_attn_view = View(1, utils::closestFactors(chunk_size).first, utils::closestFactors(chunk_size).second, num_heads * head_dim, base_name + "ires_split-00_view_"); + + q_proj = Linear(hidden_size, num_heads * head_dim, config.use_i32_bias, base_name + names._q_proj_name); + k_proj = Linear(hidden_size, num_key_value_heads * head_dim, config.use_i32_bias, base_name + names._k_proj_name); + v_proj = Linear(hidden_size, num_key_value_heads * head_dim, config.use_i32_bias, base_name + names._v_proj_name); + + q_view = View(1, num_heads, chunk_size, head_dim, base_name + names._q_proj_name + "-00_view_"); + k_view = View(1, num_key_value_heads, chunk_size, head_dim, base_name + names._k_proj_name + "-00_view_"); + v_view = View(1, num_key_value_heads, chunk_size, head_dim, base_name + names._v_proj_name + "-00_view_"); + + if (config.use_i32_bias) { + q_dequant = Dequantize(true, base_name + names._q_proj_name + ".dequantize", true, MLLM_TYPE_I16); + k_dequant = Dequantize(true, base_name + names._k_proj_name + ".dequantize", false, MLLM_TYPE_I16); + v_dequant = Dequantize(true, base_name + names._v_proj_name + ".dequantize", false, MLLM_TYPE_I16); + } else { + q_dequant = DequantizeAdd(true, num_heads * head_dim, base_name + names._q_proj_name + ".dequantize", true, MLLM_TYPE_I16); + k_dequant = DequantizeAdd(true, num_key_value_heads * head_dim, base_name + names._k_proj_name + ".dequantize", false, MLLM_TYPE_I16); + v_dequant = DequantizeAdd(true, num_key_value_heads * head_dim, base_name + names._v_proj_name + ".dequantize", false, MLLM_TYPE_I16); + } + + v_transpose = Transpose({0, 2, 3, 1}, base_name + names._v_proj_name + ".transpose"); + } + + vector Forward(vector inputs, vector args) override { + auto x = input_layernorm(inputs[0]); + x = pre_attn_quantize(x); + + x = pre_attn_view(x); + + auto query_states = q_proj(x); + auto key_states = k_proj(x); + auto value_states = v_proj(x); + + query_states = q_view(query_states); + key_states = k_view(key_states); + value_states = v_view(value_states); + + query_states = q_dequant(query_states); + key_states = k_dequant(key_states); + value_states = v_dequant(value_states); + + value_states = v_transpose(value_states); + return {query_states, key_states, value_states, inputs[0]}; + } +}; + +// CPU QKV MM part +class QwenQKVmm final : public Module { + MultimodalRoPE q_rope; + MultimodalRoPE k_rope; + KVCache k_cache; + KVCache v_cache; + Softmax softmax; + Layer o_quantize; + + int hidden_size; + int num_heads; + int head_dim; + int num_key_value_heads; + int num_key_value_groups; + +public: + QwenQKVmm() = default; + QwenQKVmm(const Qwen2VLNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + hidden_size = config.hidden_size; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; + + q_rope = MultimodalRoPE(config.rope_theta, config.max_position_embeddings, config.mrope_section, base_name + "q_rope"); + k_rope = MultimodalRoPE(config.rope_theta, config.max_position_embeddings, config.mrope_section, base_name + "k_rope"); + + k_cache = KVCache(config.num_attention_heads / config.num_key_value_heads, config.cache_limit, base_name + "k_cache", true); + v_cache = KVCache(config.num_attention_heads / config.num_key_value_heads, config.cache_limit, base_name + "v_cache", true); + + softmax = Softmax(DIMENSION, true, base_name + "softmax"); + + o_quantize = Quantize(true, base_name + names._o_proj_name + ".quantize"); + } + + vector Forward(vector inputs, vector args) override { + auto position_ids = inputs[3]; + + auto q = inputs[0]; + auto k = inputs[1]; + auto v = inputs[2]; + + q = q_rope(q, position_ids); + k = k_rope(k, position_ids); + + k = k_cache(k); + v = v_cache(v); + + auto qk = Tensor::mm(q, k.transpose(Chl::SEQUENCE, Chl::DIMENSION)); + qk = qk / std::sqrt(head_dim); + qk = softmax(qk); + auto o = Tensor::mm(qk, v); + + o = o_quantize(o); + + return {o}; + } +}; + +// QNN mlp part +class QwenDecoderNPUPart2 : public Module { +protected: + int hidden_size; + int num_heads; + int head_dim; + int num_key_value_heads; + int num_key_value_groups; + int intermediate_size; + + // NPU part2 of attention + Layer pre_oproj_view; + Layer out_proj; + Layer post_oproj_view; + Layer post_oproj_dequantize; + + // NPU mlp + Layer pre_mlp_quantize; + Layer pre_mlp_view; + Layer gate_proj; + Layer up_proj; + Layer post_up_proj_dequantize; + Layer post_gate_proj_dequantize; + Layer silu; + Layer post_attn_layernorm; + + Layer down_proj; + Layer pre_down_proj_quantize; + Layer post_down_proj_dequantize; + Layer post_mlp_view; + + Layer post_atten_res_add; + Layer post_mlp_res_add; + Layer mlp_mul; + +public: + QwenDecoderNPUPart2() = default; + QwenDecoderNPUPart2(const Qwen2VLNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + hidden_size = config.hidden_size; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; + intermediate_size = config.intermediate_size; + num_key_value_heads = config.num_key_value_heads; + num_key_value_groups = num_heads / num_key_value_heads; + + // for QNN linear speed up + pre_oproj_view = View(1, utils::closestFactors(chunk_size).first, utils::closestFactors(chunk_size).second, head_dim * num_heads, base_name + names._attn_base_name + "or_split-00_view_"); + out_proj = Linear(hidden_size, hidden_size, false, base_name + names._attn_base_name + names._o_proj_name); + post_oproj_dequantize = Dequantize(true, base_name + names._attn_base_name + names._o_proj_name + ".dequantize"); + post_oproj_view = View(1, 1, chunk_size, hidden_size, base_name + names._attn_base_name + names._o_proj_name + ".dequantize-00_view_"); + post_atten_res_add = Add(base_name + names._attn_base_name + "post_atten_add"); + + post_attn_layernorm = + RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); + + auto mlp_base_name = base_name + names._ffn_base_name; + pre_mlp_quantize = Quantize(true, mlp_base_name + names._up_proj_name + ".quantize"); + pre_mlp_view = View(1, utils::closestFactors(chunk_size).first, utils::closestFactors(chunk_size).second, hidden_size, mlp_base_name + names._up_proj_name + ".quantize-00_view_"); + gate_proj = Linear(hidden_size, intermediate_size, false, mlp_base_name + names._gate_proj_name); + + if (config.use_high_precision_silu) { + silu = SiLU_Full_Precision(mlp_base_name + "act"); + } else { + silu = SiLU(mlp_base_name + "act"); + } + + up_proj = Linear(hidden_size, intermediate_size, false, mlp_base_name + names._up_proj_name); + post_up_proj_dequantize = Dequantize(true, mlp_base_name + names._up_proj_name + ".dequantize"); + post_gate_proj_dequantize = Dequantize(true, mlp_base_name + names._gate_proj_name + ".dequantize"); + + down_proj = Linear(intermediate_size, hidden_size, false, mlp_base_name + names._down_proj_name); + pre_down_proj_quantize = Quantize(true, mlp_base_name + names._down_proj_name + ".quantize"); + post_down_proj_dequantize = Dequantize(true, mlp_base_name + names._down_proj_name + ".dequantize"); + post_mlp_view = View(1, 1, chunk_size, hidden_size, mlp_base_name + names._down_proj_name + ".dequantize-00_view_"); + + mlp_mul = Mul(mlp_base_name + "mul"); + post_mlp_res_add = Add(mlp_base_name + "res_add"); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto atten_output = inputs[0]; + auto res = inputs[1]; + + atten_output = pre_oproj_view(atten_output); + atten_output = out_proj(atten_output); + atten_output = post_oproj_dequantize(atten_output); + auto float_oproj = post_oproj_view(atten_output); + + auto tmp = post_atten_res_add(float_oproj, res); + + auto x = post_attn_layernorm(tmp); + + x = pre_mlp_quantize(x); + // reshape to 32,2 + x = pre_mlp_view(x); + + auto gate_out = gate_proj(x); + auto up_out = up_proj(x); + + gate_out = post_gate_proj_dequantize(gate_out); + auto silu_out = silu(gate_out); + + up_out = post_up_proj_dequantize(up_out); + gate_out = mlp_mul(silu_out, up_out); + + gate_out = pre_down_proj_quantize(gate_out); + gate_out = down_proj(gate_out); + gate_out = post_down_proj_dequantize(gate_out); + + // reshape to 64,1 + auto float_gate_out = post_mlp_view(gate_out); + + gate_out = post_mlp_res_add(float_gate_out, tmp); + return {gate_out, float_oproj, silu_out, float_gate_out}; + } +}; + +class QwenDecoderNPUPart2WithShadow final : public QwenDecoderNPUPart2 { +public: + QwenDecoderNPUPart2WithShadow() = default; + QwenDecoderNPUPart2WithShadow(const Qwen2VLNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) { + hidden_size = config.hidden_size; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; + intermediate_size = config.intermediate_size; + num_key_value_heads = config.num_key_value_heads; + num_key_value_groups = num_heads / num_key_value_heads; + + // for QNN linear speed up + pre_oproj_view = View(1, utils::closestFactors(chunk_size).first, utils::closestFactors(chunk_size).second, head_dim * num_heads, base_name + names._attn_base_name + "or_split-00_view_"); + out_proj = Linear(hidden_size, hidden_size, false, base_name + names._attn_base_name + names._o_proj_name); + post_oproj_dequantize = Dequantize(true, base_name + names._attn_base_name + names._o_proj_name + ".dequantize"); + post_oproj_view = View(1, 1, chunk_size, hidden_size, base_name + names._attn_base_name + names._o_proj_name + ".dequantize-00_view_"); + post_atten_res_add = Add(base_name + names._attn_base_name + "post_atten_add"); + + post_attn_layernorm = + RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); + + auto mlp_base_name = base_name + names._ffn_base_name; + pre_mlp_quantize = Quantize(true, mlp_base_name + names._up_proj_name + ".quantize"); + pre_mlp_view = View(1, utils::closestFactors(chunk_size).first, utils::closestFactors(chunk_size).second, hidden_size, mlp_base_name + names._up_proj_name + ".quantize-00_view_"); + gate_proj = Linear(hidden_size, intermediate_size, false, mlp_base_name + names._gate_proj_name); + + if (config.use_high_precision_silu) { + silu = SiLU_Full_Precision(mlp_base_name + "act"); + } else { + silu = SiLU(mlp_base_name + "act"); + } + + up_proj = Linear(hidden_size, intermediate_size, false, mlp_base_name + names._up_proj_name); + post_up_proj_dequantize = Dequantize(true, mlp_base_name + names._up_proj_name + ".dequantize"); + post_gate_proj_dequantize = Dequantize(true, mlp_base_name + names._gate_proj_name + ".dequantize"); + + down_proj = Linear(intermediate_size, hidden_size, false, mlp_base_name + names._down_proj_name); + pre_down_proj_quantize = Quantize(true, mlp_base_name + names._down_proj_name + ".quantize"); + post_down_proj_dequantize = Dequantize(true, mlp_base_name + names._down_proj_name + ".dequantize"); + post_mlp_view = View(1, 1, chunk_size, hidden_size, mlp_base_name + names._down_proj_name + ".dequantize-00_view_"); + + mlp_mul = Mul(mlp_base_name + "mul"); + post_mlp_res_add = Add(mlp_base_name + "res_add"); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto atten_output = inputs[0]; + auto res = inputs[1]; + + atten_output = pre_oproj_view(atten_output); + atten_output = out_proj(atten_output); + atten_output = post_oproj_dequantize(atten_output); + atten_output = post_oproj_view(atten_output); + + auto tmp = post_atten_res_add(atten_output, res); + + auto x = post_attn_layernorm(tmp); + + x = pre_mlp_quantize(x); + // reshape to 32,2 + x = pre_mlp_view(x); + + auto gate_out = gate_proj(x); + auto up_out = up_proj(x); + + gate_out = post_gate_proj_dequantize(gate_out); + gate_out = silu(gate_out); + + up_out = post_up_proj_dequantize(up_out); + gate_out = mlp_mul(gate_out, up_out); + + auto shadow_input_1 = gate_out; + + gate_out = pre_down_proj_quantize(gate_out); + gate_out = down_proj(gate_out); + auto shadow_input_2 = gate_out; + gate_out = post_down_proj_dequantize(gate_out); + + // reshape to 64,1 + gate_out = post_mlp_view(gate_out); + + gate_out = post_mlp_res_add(gate_out, tmp); + + return {shadow_input_1, shadow_input_2, gate_out}; + } +}; + +class QwenNPU_CPUDecoder final : public Module { + int hidden_size; + int num_heads; + int head_dim; + int num_key_value_heads; + int num_key_value_groups; + + int layer_idx; + int num_layers; + + SubgraphStart _SubgraphStart_1, _SubgraphStart_2; + SubgraphFinalize _SubgraphEnd_1, _SubgraphEnd_2; + + Layer input_layernorm; + Layer pre_attn_quantize; + unique_ptr part1; + QwenQKVmm qkv_mm; + unique_ptr part2; + + std::set shadowLayer; + +public: + QwenNPU_CPUDecoder() = default; + QwenNPU_CPUDecoder(const Qwen2VLNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) : + shadowLayer(config.shadow_layers) { + hidden_size = config.hidden_size; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; + num_key_value_heads = config.num_key_value_heads; + num_key_value_groups = num_heads / num_key_value_heads; + + // extract layer index from base_name like "model.layers.10." + std::regex re(R"(\d+)"); + std::smatch match; + std::regex_search(base_name, match, re); + layer_idx = std::stoi(match[0]); + num_layers = config.num_hidden_layers; + + if (layer_idx == 0 || shadowLayer.find(layer_idx - 1) != shadowLayer.end()) { + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + pre_attn_quantize = Quantize(true, base_name + names._attn_base_name + names._q_proj_name + ".quantize", MLLM_TYPE_I16); + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } else { + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } + + qkv_mm = QwenQKVmm(config, names, chunk_size, base_name + names._attn_base_name); + + part2 = make_unique(config, names, chunk_size, base_name); + + _SubgraphStart_1 = SubgraphStart(base_name + "subgraph_start1"); + _SubgraphEnd_1 = SubgraphFinalize(base_name + "subgraph_end1"); + _SubgraphStart_2 = SubgraphStart(base_name + "subgraph_start2"); + _SubgraphEnd_2 = SubgraphFinalize(base_name + "subgraph_end2"); + } + + vector Forward(vector inputs, vector args) override { + auto position_ids = inputs[1]; + + Tensor x, q, k, v, res; + if (layer_idx == 0 || shadowLayer.find(layer_idx - 1) != shadowLayer.end()) { + x = input_layernorm(inputs[0]); + + x = pre_attn_quantize(x); + + _SubgraphStart_1({x}); + + auto q_k_v = (*part1)({x}); // q,k,v + q = q_k_v[0]; + k = q_k_v[1]; + v = q_k_v[2]; + res = inputs[0]; + _SubgraphEnd_1(q_k_v); + + } else { + auto q_k_v_res = (*part1)(inputs); // q,k,v,res + q = q_k_v_res[0]; + k = q_k_v_res[1]; + v = q_k_v_res[2]; + res = q_k_v_res[3]; + _SubgraphEnd_1(q_k_v_res); + } + + auto o_x = qkv_mm({q, k, v, position_ids})[0]; + + _SubgraphStart_2({o_x, res}); + + auto out_part2 = (*part2)({o_x, res}); + + if (layer_idx == num_layers - 1) { + _SubgraphEnd_2(out_part2); + } + + return out_part2; + } +}; + +class QwenNPU_CPUDecoderWithShadow final : public Module { + int hidden_size; + int num_heads; + int head_dim; + int num_key_value_heads; + int num_key_value_groups; + + Layer input_layernorm; + Layer pre_attn_quantize; + Layer shadow_linear; + unique_ptr part1; + QwenQKVmm qkv_mm; + unique_ptr part2; + + int layer_idx; + int num_layers; + + SubgraphStart _SubgraphStart_1, _SubgraphStart_2; + SubgraphFinalize _SubgraphEnd_1, _SubgraphEnd_2; + + std::set shadowLayer; + +public: + QwenNPU_CPUDecoderWithShadow() = default; + QwenNPU_CPUDecoderWithShadow(const Qwen2VLNPUConfig &config, const QWenNameConfig &names, int chunk_size, const string &base_name) : + shadowLayer(config.shadow_layers) { + hidden_size = config.hidden_size; + num_heads = config.num_attention_heads; + head_dim = config.hidden_size / num_heads; + num_key_value_heads = config.num_key_value_heads; + num_key_value_groups = num_heads / num_key_value_heads; + + // extract layer index from base_name like "model.layers.10." + std::regex re(R"(\d+)"); + std::smatch match; + std::regex_search(base_name, match, re); + layer_idx = std::stoi(match[0]); + num_layers = config.num_hidden_layers; + + if (layer_idx == 0 || shadowLayer.find(layer_idx - 1) != shadowLayer.end()) { + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + pre_attn_quantize = Quantize(true, base_name + names._attn_base_name + names._q_proj_name + ".quantize", MLLM_TYPE_I16); + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } else { + part1 = make_unique(config, names, chunk_size, base_name + names._attn_base_name); + } + + qkv_mm = QwenQKVmm(config, names, chunk_size, base_name + names._attn_base_name); + + part2 = make_unique(config, names, chunk_size, base_name); + + shadow_linear = ShadowLinear(config.intermediate_size, hidden_size, 1024, false, base_name + names._ffn_base_name + names._down_proj_name + ".shadow"); + + _SubgraphStart_1 = SubgraphStart(base_name + "subgraph_start1"); + _SubgraphEnd_1 = SubgraphFinalize(base_name + "subgraph_end1"); + _SubgraphStart_2 = SubgraphStart(base_name + "subgraph_start2"); + _SubgraphEnd_2 = SubgraphFinalize(base_name + "subgraph_end2"); + } + + vector Forward(vector inputs, vector args) override { + auto position_ids = inputs[1]; + + Tensor x, q, k, v, res; + if (layer_idx == 0 || shadowLayer.find(layer_idx - 1) != shadowLayer.end()) { + x = input_layernorm(inputs[0]); + x = pre_attn_quantize(x); + + _SubgraphStart_1({x}); + + auto q_k_v = (*part1)({x}); // q,k,v + q = q_k_v[0]; + k = q_k_v[1]; + v = q_k_v[2]; + res = inputs[0]; + _SubgraphEnd_1(q_k_v); + } else { + auto q_k_v_res = (*part1)(inputs); // q,k,v,res + q = q_k_v_res[0]; + k = q_k_v_res[1]; + v = q_k_v_res[2]; + res = q_k_v_res[3]; + _SubgraphEnd_1(q_k_v_res); + } + + auto o_x = qkv_mm({q, k, v, position_ids})[0]; + + _SubgraphStart_2({o_x, res}); + + auto decoder_out = (*part2)({o_x, res}); + decoder_out = Tensor::toCPU(decoder_out); + + _SubgraphEnd_2(decoder_out); + + auto shadow_input_1 = decoder_out[0]; + auto shadow_input_2 = decoder_out[1]; + x = decoder_out[2]; + + x = shadow_linear(shadow_input_1, shadow_input_2, x); + + return {x}; + } +}; + +class Qwen2VL_ImagePatchAndEmbedding final : public Module { + Qwen2VisionModel visual; + Layer embed_tokens; + + Layer norm; + Parameter lm_head; + Layer lm_head_layer; + + bool tie_embedding_words; + + int64_t spatial_merge_size; + int64_t image_token_id; + int64_t video_token_id; + int64_t vision_start_token_id; + +public: + explicit Qwen2VL_ImagePatchAndEmbedding(const Qwen2VLNPUConfig &config) { + auto vocab_size = config.vocab_size; + auto hidden_dim = config.hidden_size; + auto head_size = config.num_attention_heads; + auto ffn_hidden = config.intermediate_size; + auto projection_cls = config.projection_cls; + auto vision_embed_dim = config.vision_embed_dim; + image_token_id = config.image_token_id; + auto vision_names = config.vision_names_config; + auto qwen_names = config.names_config; + tie_embedding_words = config.tie_embedding_words; + spatial_merge_size = config.spatial_merge_size; + image_token_id = config.image_token_id; + video_token_id = config.video_token_id; + vision_start_token_id = config.vision_start_token_id; + + embed_tokens = Embedding(vocab_size, hidden_dim, qwen_names.token_embd_name); + // visual = Qwen2VisionModel(hidden_dim, vision_embed_dim, 16, vision_embed_dim * 4, "QuickGELU", 14, 336, 32, spatial_merge_size, vision_names, vision_names.vison_model_name); + visual = Qwen2VisionModel(hidden_dim, vision_embed_dim, 16, vision_embed_dim * 4, "QuickGELU", 14, 336, 32, spatial_merge_size, config.attn_implementation, vision_names, vision_names.vison_model_name); + } + + vector Forward(vector inputs, vector args) override { + auto hidden_states = embed_tokens({inputs[0]}); + + auto image_embeds = visual({inputs[1], inputs[2]})[0]; + auto n_image_features = image_embeds.sequence(); + auto where_idx = inputs[0].where(image_token_id, SEQUENCE); + hidden_states = hidden_states.index_put(image_embeds, where_idx, false); + + return {hidden_states}; + } + + // changed from get_position_ids in CPU Qwen2VL, enable padding + // when prefilling, padding_to should be the max length of the input + // when decoding, real_seq should be the real length of the input, thus get the correct position_ids for decoding + void get_position_ids(vector &inputs, int padding_to = 0, int real_seq = 0) { + if (inputs[0].sequence() > 1) { + Tensor video_grid_thw(0, 0, 0, 0, MLLM_CPU, true); + auto rope_indices = get_rope_index_cpp(inputs[0], inputs[2], video_grid_thw, padding_to); + auto position = rope_indices[0]; + if (inputs.size() == 4) { + inputs[3] = position; + } else { + inputs.push_back(position); + } + } else { + auto &position_ids = inputs[3]; + auto last_pos = real_seq == 0 ? position_ids.dataAt(0, 0, 0, position_ids.dimension() - 1) : real_seq - 1; + position_ids.reshape(position_ids.batch(), 1, position_ids.sequence(), 1); + for (int b = 0; b < position_ids.batch(); b++) { + for (int s = 0; s < position_ids.sequence(); s++) { + position_ids.setDataAt(b, 0, s, 0, last_pos + 1); + } + } + } + } + +private: + vector get_rope_index_cpp( + Tensor input_ids, + Tensor image_grid_thw, + Tensor video_grid_thw, + int padding_to = 0) { + vector> attention_mask; + auto attention_mask_shape = input_ids.sequence(); + for (int b = 0; b < input_ids.batch(); b++) { + attention_mask.emplace_back(attention_mask_shape, 1); + } + const size_t batch_size = input_ids.batch(); // input_ids.size(); + + // NOTE: changed from original + const size_t seq_len = batch_size > 0 ? (padding_to > input_ids.sequence() ? padding_to : input_ids.sequence()) : 0; // batch_size > 0 ? input_ids[0].size() : 0; + + // Tensor position_ids(3, 1, batch_size, seq_len, Backend::global_backends[MLLM_CPU].get()), true); + // Tensor mrope_position_deltas(1, 1, 1, batch_size, Backend::global_backends[MLLM_CPU].get()), true); + Tensor position_ids(3, 1, batch_size, seq_len, Backend::global_backends[MLLM_CPU].get(), true); + Tensor mrope_position_deltas(1, 1, 1, batch_size, Backend::global_backends[MLLM_CPU].get(), true); + bool has_vision = (image_grid_thw.sequence() > 0) || (video_grid_thw.sequence() > 0); // image_grid_thw || video_grid_thw; + if (!has_vision) { + // Pure text case + for (size_t i = 0; i < batch_size; ++i) { + const auto &mask = !attention_mask.empty() ? attention_mask[i] : vector(seq_len, 1); + vector positions; + int64_t pos = 0; + for (size_t j = 0; j < seq_len; ++j) { + if (mask[j] == 1) { + positions.push_back(pos++); + } else { + positions.push_back(1); // Will be overwritten by mask + } + } + for (int dim = 0; dim < 3; ++dim) { + for (size_t j = 0; j < seq_len; ++j) { + position_ids.setDataAt(dim, 0, i, j, (float)(mask[j] == 1 ? positions[j] : 1)); + } + } + int64_t max_pos = pos - 1; + mrope_position_deltas.setDataAt(0, 0, 0, i, (float)((max_pos + 1) - static_cast(input_ids.sequence()))); + } + position_ids.setName("position_ids"); + mrope_position_deltas.setName("mrope_position_deltas"); + return {position_ids, mrope_position_deltas}; + } + // Process vision cases + size_t image_idx = 0, video_idx = 0; + for (size_t i = 0; i < batch_size; ++i) { + const auto &mask = !attention_mask.empty() ? attention_mask[i] : vector(seq_len, 1); + // Extract valid tokens + vector valid_tokens; + for (size_t j = 0; j < input_ids.sequence(); ++j) { + if (mask[j] == 1) valid_tokens.push_back((int)input_ids.dataAt(i, 0, j, 0)); + } + // Find vision start positions + vector vision_starts; + vector vision_types; + for (size_t j = 0; j < valid_tokens.size(); ++j) { + if (valid_tokens[j] == vision_start_token_id && j + 1 < valid_tokens.size()) { + vision_starts.push_back(j); + vision_types.push_back(valid_tokens[j + 1]); + } + } + int64_t image_count = count(vision_types.begin(), vision_types.end(), image_token_id); + int64_t video_count = vision_types.size() - image_count; + vector> llm_positions(3); + size_t st = 0; + int64_t current_max = 0; + int64_t remain_images = image_count; + int64_t remain_videos = video_count; + // Process each vision segment + for (size_t vs = 0; vs < vision_starts.size(); ++vs) { + // Find next vision token + size_t ed_image = valid_tokens.size(); + size_t ed_video = valid_tokens.size(); + if (remain_images > 0) { + auto it = find(valid_tokens.begin() + st, valid_tokens.end(), image_token_id); + if (it != valid_tokens.end()) ed_image = it - valid_tokens.begin(); + } + if (remain_videos > 0) { + auto it = find(valid_tokens.begin() + st, valid_tokens.end(), video_token_id); + if (it != valid_tokens.end()) ed_video = it - valid_tokens.begin(); + } + size_t ed = min(ed_image, ed_video); + if (ed == valid_tokens.size()) break; + // Get grid parameters + int64_t t, h, w; + bool is_image = (ed == ed_image); + if (is_image) { + t = (int64_t)image_grid_thw.dataAt(0, 0, image_idx, 0); + h = (int64_t)image_grid_thw.dataAt(0, 0, image_idx, 1); + w = (int64_t)image_grid_thw.dataAt(0, 0, image_idx, 2); + image_idx++; + remain_images--; + } else { + t = (int64_t)video_grid_thw.dataAt(0, 0, video_idx, 0); + h = (int64_t)video_grid_thw.dataAt(0, 0, video_idx, 1); + w = (int64_t)video_grid_thw.dataAt(0, 0, video_idx, 2); + video_idx++; + remain_videos--; + } + // Calculate grid dimensions + int64_t llm_grid_t = t; + int64_t llm_grid_h = h / spatial_merge_size; + int64_t llm_grid_w = w / spatial_merge_size; + // Process text segment + size_t text_len = ed - st; + if (text_len > 0) { + int64_t start_idx = current_max; + for (int64_t k = 0; k < text_len; ++k) { + for (int dim = 0; dim < 3; ++dim) { + llm_positions[dim].push_back(start_idx + k); + } + } + current_max += text_len; + } + for (int64_t ti = 0; ti < llm_grid_t; ++ti) { + for (int64_t hi = 0; hi < llm_grid_h; ++hi) { + for (int64_t wi = 0; wi < llm_grid_w; ++wi) { + llm_positions[0].push_back(current_max + ti); + llm_positions[1].push_back(current_max + hi); + llm_positions[2].push_back(current_max + wi); + } + } + } + current_max = std::max({llm_positions[0][llm_positions[0].size() - 1], + llm_positions[1][llm_positions[1].size() - 1], + llm_positions[2][llm_positions[2].size() - 1]}); + st = ed + llm_grid_t * llm_grid_h * llm_grid_w; + } + // Process remaining text + if (st < valid_tokens.size()) { + size_t text_len = valid_tokens.size() - st; + int64_t st_idx = current_max + 1; + for (int64_t k = 0; k < text_len; ++k) { + for (int dim = 0; dim < 3; ++dim) { + llm_positions[dim].push_back(st_idx + k); + } + } + current_max += text_len; + } + // Fill position_ids with valid positions + size_t valid_idx = 0; + for (size_t j = 0; j < seq_len; ++j) { + if (mask[j] == 1) { + if (valid_idx < llm_positions[0].size()) { + position_ids.setDataAt(0, 0, i, j, (float)llm_positions[0][valid_idx]); + position_ids.setDataAt(1, 0, i, j, (float)llm_positions[1][valid_idx]); + position_ids.setDataAt(2, 0, i, j, (float)llm_positions[2][valid_idx]); + valid_idx++; + } + } + } + // Calculate delta + int64_t max_pos = 0; + for (const auto &dim : llm_positions) { + for (auto val : dim) { + max_pos = max(max_pos, val); + } + } + mrope_position_deltas.setDataAt(0, 0, 0, i, (float)((max_pos + 1) - static_cast(input_ids.sequence()))); + } + position_ids.setName("position_ids"); + mrope_position_deltas.setName("mrope_position_deltas"); + return {position_ids, mrope_position_deltas}; + } +}; + +class Qwen2VL_PrefillBody final : public Module { + std::vector> blocks; + Layer norm; + Parameter lm_head; + Layer lm_head_layer; + int num_layer; + + bool tie_embedding_words; + + template + static vector> ListWithShadow(int n, std::set &shadowLayer, Args &&...args) { + static_assert(std::is_base_of::value, "T1 must be a subclass of Module"); + static_assert(std::is_base_of::value, "SHADOW must be a subclass of Module"); + listIdx = 0; + vector> modules; + + // for index in shadowLayers, create shadow decoder, for others, create normal decoder + for (int i = 0; i < n; i++) { + auto new_args = change_last(args...); // 创建新的参数包,最后一个参数被修改为原来的值+ std::to_string(listIdx)+ "." + if (shadowLayer.find(listIdx) != shadowLayer.end()) { + modules.push_back(std::make_unique(std::apply([&](auto &&...args) { return SHADOW(std::forward(args)...); }, new_args))); + } else { + modules.push_back(std::make_unique(std::apply([&](auto &&...args) { return T1(std::forward(args)...); }, new_args))); + } + listIdx++; + } + listIdx = 0; + return modules; + } + +public: + explicit Qwen2VL_PrefillBody(const Qwen2VLNPUConfig &config, int chunk_size, std::set &shadowLayer) { + auto vocab_size = config.vocab_size; + auto hidden_dim = config.hidden_size; + auto head_size = config.num_attention_heads; + auto qwen_names = config.names_config; + tie_embedding_words = config.tie_embedding_words; + + num_layer = config.num_hidden_layers; + + blocks = ListWithShadow(config.num_hidden_layers, shadowLayer, config, qwen_names, chunk_size, qwen_names.blk_name); + norm = RMSNorm(hidden_dim, 1e-6, qwen_names.post_norm_name); + if (tie_embedding_words) { + lm_head = Parameter(1, config.vocab_size, 1, config.hidden_size, qwen_names.token_embd_name + ".weight"); + } else { + lm_head_layer = HeadLinear(config.hidden_size, config.vocab_size, false, qwen_names.token_embd_name); + } + } + + vector Forward(vector inputs, vector args) override { + auto hidden_states = inputs[0]; + auto position_ids = inputs[1]; + + for (auto i = 0; i < blocks.size(); ++i) { + hidden_states = (*blocks[i])({hidden_states, position_ids})[0]; + } + + hidden_states = norm(hidden_states); + + if (tie_embedding_words) { + hidden_states = Tensor::mm(hidden_states, lm_head().transpose(Chl::SEQUENCE, Chl::DIMENSION)); + } else { + hidden_states = lm_head_layer(hidden_states); + } + + return {hidden_states}; + } +}; + +// CPU decoding model with only the LLM backbone +class Qwen2VL_Decoding_Model final : public Module { + Layer embed_tokens; + + vector blocks; + Layer norm; + Parameter lm_head; + Layer lm_head_layer; + + bool tie_embedding_words; + + int64_t spatial_merge_size; + int64_t image_token_id; + int64_t video_token_id; + int64_t vision_start_token_id; + +public: + explicit Qwen2VL_Decoding_Model(const Qwen2VLConfig &config) { + auto vocab_size = config.vocab_size; + auto hidden_dim = config.hidden_size; + auto head_size = config.num_attention_heads; + auto ffn_hidden = config.intermediate_size; + auto projection_cls = config.projection_cls; + auto vision_embed_dim = config.vision_embed_dim; + image_token_id = config.image_token_id; + auto vision_names = config.vision_names_config; + auto qwen_names = config.names_config; + tie_embedding_words = config.tie_embedding_words; + spatial_merge_size = config.spatial_merge_size; + image_token_id = config.image_token_id; + video_token_id = config.video_token_id; + vision_start_token_id = config.vision_start_token_id; + + embed_tokens = Embedding(vocab_size, hidden_dim, qwen_names.token_embd_name); + + blocks = List(config.num_hidden_layers, config, qwen_names, qwen_names.blk_name); + norm = RMSNorm(hidden_dim, 1e-6, qwen_names.post_norm_name); + if (tie_embedding_words) { + lm_head = Parameter(1, config.vocab_size, 1, config.hidden_size, qwen_names.token_embd_name + ".weight"); + } else { + lm_head_layer = Linear(config.hidden_size, config.vocab_size, false, qwen_names.lm_head_name); + } + } + vector Forward(vector inputs, vector args) override { + auto position_ids = inputs[3]; + + auto hidden_states = embed_tokens({inputs[0]}); + + for (auto &block : blocks) { + hidden_states = block({hidden_states, position_ids})[0]; + } + hidden_states = norm(hidden_states); + if (tie_embedding_words) { + hidden_states = Tensor::mm(hidden_states, lm_head().transpose(Chl::SEQUENCE, Chl::DIMENSION)); + } else { + hidden_states = lm_head_layer(hidden_states); + } + return {hidden_states}; + } + void clear_kvcache() override { + for (auto &block : blocks) { + auto kvcahce = block.get_attention().get_cache(); + for (auto &cache : kvcahce) { + cache->clearCache(); + } + } + } +}; + +#endif // MODELING_QWEN2VL_NPU_HPP \ No newline at end of file diff --git a/mllm/models/qwen2_vl/modeling_qwen2_vl_npuvit.hpp b/mllm/models/qwen2_vl/modeling_qwen2_vl_npuvit.hpp new file mode 100644 index 000000000..31b0da1d3 --- /dev/null +++ b/mllm/models/qwen2_vl/modeling_qwen2_vl_npuvit.hpp @@ -0,0 +1,492 @@ +#ifndef MODELING_NPU_VIT_HPP +#define MODELING_NPU_VIT_HPP + +#include "Layer.hpp" +#include "Module.hpp" +#include "Tensor.hpp" +#include "Timing.hpp" +#include "Types.hpp" +#include "configuration_qwen2_vl.hpp" +#include +#include + +using namespace mllm; +namespace npu { + +class VisionBlock_NPU final : public Module { + Layer input_quantize; + Layer qkv_dequant, q_view; + Layer k_dequant, k_view; + Layer v_dequant, v_view; + + Layer qkv_proj; + Split qkv_split; + Layer q_rope, k_rope; + Layer pre_oproj_view; + Layer o_proj; + Layer o_quantize, post_oproj_dequantize; + Layer qk_mm, qkv_mm; + Softmax softmax; + Layer scale; + int head_size_{}; + int kv_head_size_{}; + int attn_hidden_dim_{}; + + Layer post_atten_res_add; + + Layer pre_mlp_quantize; + Layer up_proj; + Layer post_up_proj_dequantize; + Layer act; + Layer pre_down_proj_quantize; + Layer down_proj; + Layer post_down_proj_dequantize; + + Layer post_mlp_res_add; + + Layer norm1; + Layer norm2; + +public: + VisionBlock_NPU() = default; + VisionBlock_NPU(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, const ViTNameConfig &names, const string &base_name) { + attn_hidden_dim_ = hidden_dim / head_size; + head_size_ = head_size; + kv_head_size_ = head_size; + + norm1 = RMSNorm(hidden_dim, 1e-6, base_name + names._attn_norm_name, true); + + // attention + auto attn_base_name = base_name + names._attn_base_name; + input_quantize = Quantize(true, attn_base_name + names._qkv_proj_name + ".quantize", MLLM_TYPE_I16); + qkv_proj = Linear(hidden_dim, head_size * attn_hidden_dim_ * 3, false, attn_base_name + names._qkv_proj_name); + // use FP16 for attention matmul + qkv_dequant = DequantizeAdd(true, head_size * attn_hidden_dim_ * 3, attn_base_name + names._qkv_proj_name + ".dequantize", false, MLLM_TYPE_I16); + + qkv_split = Split(3, DIMENSION, head_size * attn_hidden_dim_, attn_base_name + names._qkv_proj_name + ".split"); + + q_view = View(-1, 16, -1, attn_hidden_dim_, attn_base_name + names._qkv_proj_name + ".q-00_view_"); + k_view = View(-1, 16, -1, attn_hidden_dim_, attn_base_name + names._qkv_proj_name + ".k-00_view_"); + v_view = View(-1, 16, -1, attn_hidden_dim_, attn_base_name + names._qkv_proj_name + ".v-00_view_"); + + q_rope = RoPESimple(-1, attn_base_name + "q_rope"); + k_rope = RoPESimple(-1, attn_base_name + "k_rope"); + + softmax = Softmax(DIMENSION, false, attn_base_name + "softmax"); + + qk_mm = Matmul(false, true, attn_base_name + "qk_mm"); + qkv_mm = Matmul(false, false, attn_base_name + "qkv_mm"); + scale = Scale(1 / std::sqrt(attn_hidden_dim_), 0, false, attn_base_name + "scale"); + + pre_oproj_view = View(-1, 1, -1, attn_hidden_dim_ * head_size_, attn_base_name + "or_split-00_view_"); + + o_quantize = Quantize(true, attn_base_name + names._o_proj_name + ".quantize", MLLM_TYPE_I16); + o_proj = Linear(head_size * attn_hidden_dim_, hidden_dim, false, attn_base_name + names._o_proj_name); + post_oproj_dequantize = DequantizeAdd(true, hidden_dim, attn_base_name + names._o_proj_name + ".dequantize", true, MLLM_TYPE_I16); + + post_atten_res_add = Add(attn_base_name + "post_atten_add"); + + norm2 = RMSNorm(hidden_dim, 1e-6, base_name + names._ffn_norm_name, true); + + // mlp + auto mlp_base_name = base_name + names._ffn_base_name; + pre_mlp_quantize = Quantize(true, mlp_base_name + names._up_proj_name + ".quantize", MLLM_TYPE_I16); + up_proj = Linear(hidden_dim, ffn_hidden, false, mlp_base_name + names._up_proj_name); + // NOTE: QNN GeLU doesn't support FP32, use FP16 + post_up_proj_dequantize = DequantizeAdd(true, ffn_hidden, mlp_base_name + names._up_proj_name + ".dequantize", false, MLLM_TYPE_I16); + + act = ACT_FN[act_fn_type](mlp_base_name + "act"); + + pre_down_proj_quantize = Quantize(true, mlp_base_name + names._down_proj_name + ".quantize", MLLM_TYPE_I16); + down_proj = Linear(ffn_hidden, hidden_dim, false, mlp_base_name + names._down_proj_name); + post_down_proj_dequantize = DequantizeAdd(true, hidden_dim, mlp_base_name + names._down_proj_name + ".dequantize", true, MLLM_TYPE_I16); + + post_mlp_res_add = Add(mlp_base_name + "res_add"); + } + vector Forward(vector inputs, vector args) override { + auto after_norm1 = norm1(inputs[0]); + auto hidden_states = after_norm1; + + // attention + auto rotary_pos_emb_sin = inputs[1]; + auto rotary_pos_emb_cos = inputs[2]; + + Tensor q, k, v; + hidden_states = input_quantize(hidden_states); + auto int_qkv = qkv_proj(hidden_states); + auto qkv = int_qkv; + + qkv = qkv_dequant(qkv); + + auto qkv_sp = qkv_split(qkv); + + q = qkv_sp[0]; + k = qkv_sp[1]; + v = qkv_sp[2]; + + q = q_view(q); + k = k_view(k); + v = v_view(v); + + q = q_rope(q, rotary_pos_emb_sin, rotary_pos_emb_cos); + k = k_rope(k, rotary_pos_emb_sin, rotary_pos_emb_cos); + + auto qk = qk_mm(q, k); + qk = scale(qk); + qk = softmax(qk); + auto o = qkv_mm(qk, v); + + o = pre_oproj_view(o); + + o = o_quantize(o); + hidden_states = o_proj(o); + hidden_states = post_oproj_dequantize(hidden_states); + + auto residual = post_atten_res_add(hidden_states, inputs[0]); + + hidden_states = norm2(residual); + + // mlp + hidden_states = pre_mlp_quantize(hidden_states); + hidden_states = up_proj(hidden_states); + hidden_states = post_up_proj_dequantize(hidden_states); + + hidden_states = act(hidden_states); + + hidden_states = pre_down_proj_quantize(hidden_states); + hidden_states = down_proj(hidden_states); + hidden_states = post_down_proj_dequantize(hidden_states); + + hidden_states = post_mlp_res_add(hidden_states, residual); + + return {hidden_states}; + } +}; + +class Qwen2PatchEmbedForNPU final : public Module { + Layer proj; + int embed_dim{}; + +public: + Qwen2PatchEmbedForNPU() = default; + Qwen2PatchEmbedForNPU(int vision_embed_dim, int patch, int img_hw, const Qwen2VLNameConfig &names, const string &base_name) { + proj = Convolution3D(3, vision_embed_dim, {2, patch, patch}, {2, patch, patch}, VALID, false, base_name + names._patch_embedding_name); + embed_dim = vision_embed_dim; + } + vector Forward(vector inputs, vector args) override { + auto embd = proj(inputs[0]); + embd = embd.view(1, 1, -1, embed_dim); + return {embd}; + } +}; + +class RotationPatchMerger final : public Module { + int hidden_size; + Layer ln_q; + Layer mlp0; + Layer gelu; + Layer mlp2; + +public: + RotationPatchMerger() = default; + RotationPatchMerger(int dim, int context_dim, int spatial_merge_size, const Qwen2VLNameConfig &names, const string &base_name) { + hidden_size = context_dim * (spatial_merge_size * spatial_merge_size); + ln_q = RMSNorm(context_dim, 1e-6, base_name + names._ln_q_name, true); + mlp0 = Linear(hidden_size, hidden_size, true, base_name + names._m_mlp_0_name); + gelu = GELU(base_name + ".gelu"); + mlp2 = Linear(hidden_size, dim, true, base_name + names._m_mlp_2_name); + } + vector Forward(vector inputs, vector args) override { + auto x = inputs[0]; + x = mlp2(gelu(mlp0(ln_q(x).view(1, 1, -1, hidden_size)))); + return {x}; + } +}; + +class Qwen2VisionModel_NPU : public Module { + Qwen2PatchEmbedForNPU patch_embed; + + Layer rot_pos_emb, rot_pos_emb_sin, rot_pos_emb_cos; + Layer pre_layrnorm; + vector blocks; + RotationPatchMerger patch_merger; + + SubgraphStart _SubgraphStart; + SubgraphFinalize _SubgraphEnd; + +public: + Qwen2VisionModel_NPU() = default; + Qwen2VisionModel_NPU(int hidden_dim, int vision_embed_dim, int head_size, int mlp_hidden_dim, const string &act_fn_type, int patch, int img_hw, int block_num, int spatial_merge_size, const Qwen2VLNameConfig &names, const string &base_name) { + patch_embed = Qwen2PatchEmbedForNPU(vision_embed_dim, patch, img_hw, names, base_name + names.patch_embed_name); + rot_pos_emb = VisionRoPE((vision_embed_dim / head_size) / 2, spatial_merge_size, base_name + ".rot_pos_emb"); + rot_pos_emb_sin = VisionRoPESin((vision_embed_dim / head_size) / 2, spatial_merge_size, base_name + ".rot_pos_emb_sin"); + rot_pos_emb_cos = VisionRoPECos((vision_embed_dim / head_size) / 2, spatial_merge_size, base_name + ".rot_pos_emb_cos"); + + blocks = List(block_num, vision_embed_dim, head_size, mlp_hidden_dim, act_fn_type, names, base_name + names._layer_name); + patch_merger = RotationPatchMerger(hidden_dim, vision_embed_dim, spatial_merge_size, names, base_name + names._merger_name); + + _SubgraphStart = SubgraphStart(base_name + "subgraph_start"); + _SubgraphEnd = SubgraphFinalize(base_name + "subgraph_end"); + } + vector Forward(vector inputs, vector args) override { + auto hidden_states = patch_embed({inputs[0]})[0]; + + auto rotary_pos_emb_sin = rot_pos_emb_sin(inputs[1]); + auto rotary_pos_emb_cos = rot_pos_emb_cos(inputs[1]); + + _SubgraphStart({hidden_states, rotary_pos_emb_sin, rotary_pos_emb_cos}); + + for (int i = 0; i < blocks.size(); i++) { + hidden_states = blocks[i]({hidden_states, rotary_pos_emb_sin, rotary_pos_emb_cos})[0]; + } + + _SubgraphEnd({hidden_states}); + + hidden_states = patch_merger({hidden_states})[0]; + + return {hidden_states}; + } +}; + +class Qwen2VL_ImagePatchAndEmbedding final : public Module { + Qwen2VisionModel_NPU visual; + Layer embed_tokens; + + Layer norm; + Parameter lm_head; + Layer lm_head_layer; + + bool tie_embedding_words; + + int64_t spatial_merge_size; + int64_t image_token_id; + int64_t video_token_id; + int64_t vision_start_token_id; + +public: + explicit Qwen2VL_ImagePatchAndEmbedding(const Qwen2VLConfig &config) { + auto vocab_size = config.vocab_size; + auto hidden_dim = config.hidden_size; + auto head_size = config.num_attention_heads; + auto ffn_hidden = config.intermediate_size; + auto projection_cls = config.projection_cls; + auto vision_embed_dim = config.vision_embed_dim; + image_token_id = config.image_token_id; + auto vision_names = config.vision_names_config; + auto qwen_names = config.names_config; + tie_embedding_words = config.tie_embedding_words; + spatial_merge_size = config.spatial_merge_size; + image_token_id = config.image_token_id; + video_token_id = config.video_token_id; + vision_start_token_id = config.vision_start_token_id; + + embed_tokens = Embedding(vocab_size, hidden_dim, qwen_names.token_embd_name); + // NOTE: Use GELU for NPU Qwen2VL ViT. the QuickGELU is implemented using QNN 1.702*x*sigmoid(x), which is slow + visual = Qwen2VisionModel_NPU(hidden_dim, vision_embed_dim, 16, vision_embed_dim * 4, "GELU", 14, 336, 32, spatial_merge_size, vision_names, vision_names.vison_model_name); + } + + vector Forward(vector inputs, vector args) override { + auto hidden_states = embed_tokens({inputs[0]}); + + auto image_embeds = visual({inputs[1], inputs[2]})[0]; + auto n_image_features = image_embeds.sequence(); + auto where_idx = inputs[0].where(image_token_id, SEQUENCE); + hidden_states = hidden_states.index_put(image_embeds, where_idx, false); + + return {hidden_states}; + } + + // changed from get_position_ids in CPU Qwen2VL, enable padding + // when prefilling, padding_to should be the max length of the input + // when decoding, real_seq should be the real length of the input, thus get the correct position_ids for decoding + void get_position_ids(vector &inputs, int padding_to = 0, int real_seq = 0) { + if (inputs[0].sequence() > 1) { + Tensor video_grid_thw(0, 0, 0, 0, MLLM_CPU, true); + auto rope_indices = get_rope_index_cpp(inputs[0], inputs[2], video_grid_thw, padding_to); + auto position = rope_indices[0]; + if (inputs.size() == 4) { + inputs[3] = position; + } else { + inputs.push_back(position); + } + } else { + auto &position_ids = inputs[3]; + auto last_pos = real_seq == 0 ? position_ids.dataAt(0, 0, 0, position_ids.dimension() - 1) : real_seq - 1; + position_ids.reshape(position_ids.batch(), 1, position_ids.sequence(), 1); + for (int b = 0; b < position_ids.batch(); b++) { + for (int s = 0; s < position_ids.sequence(); s++) { + position_ids.setDataAt(b, 0, s, 0, last_pos + 1); + } + } + } + } + +private: + vector get_rope_index_cpp( + Tensor input_ids, + Tensor image_grid_thw, + Tensor video_grid_thw, + int padding_to = 0) { + vector> attention_mask; + auto attention_mask_shape = input_ids.sequence(); + for (int b = 0; b < input_ids.batch(); b++) { + attention_mask.emplace_back(attention_mask_shape, 1); + } + const size_t batch_size = input_ids.batch(); // input_ids.size(); + + // NOTE: changed from original + const size_t seq_len = batch_size > 0 ? (padding_to > input_ids.sequence() ? padding_to : input_ids.sequence()) : 0; // batch_size > 0 ? input_ids[0].size() : 0; + + Tensor position_ids(3, 1, batch_size, seq_len, Backend::global_backends[MLLM_CPU].get(), true); + Tensor mrope_position_deltas(1, 1, 1, batch_size, Backend::global_backends[MLLM_CPU].get(), true); + bool has_vision = (image_grid_thw.sequence() > 0) || (video_grid_thw.sequence() > 0); // image_grid_thw || video_grid_thw; + if (!has_vision) { + // Pure text case + for (size_t i = 0; i < batch_size; ++i) { + const auto &mask = !attention_mask.empty() ? attention_mask[i] : vector(seq_len, 1); + vector positions; + int64_t pos = 0; + for (size_t j = 0; j < seq_len; ++j) { + if (mask[j] == 1) { + positions.push_back(pos++); + } else { + positions.push_back(1); // Will be overwritten by mask + } + } + for (int dim = 0; dim < 3; ++dim) { + for (size_t j = 0; j < seq_len; ++j) { + position_ids.setDataAt(dim, 0, i, j, (float)(mask[j] == 1 ? positions[j] : 1)); + } + } + int64_t max_pos = pos - 1; + mrope_position_deltas.setDataAt(0, 0, 0, i, (float)((max_pos + 1) - static_cast(input_ids.sequence()))); + } + position_ids.setName("position_ids"); + mrope_position_deltas.setName("mrope_position_deltas"); + return {position_ids, mrope_position_deltas}; + } + // Process vision cases + size_t image_idx = 0, video_idx = 0; + for (size_t i = 0; i < batch_size; ++i) { + const auto &mask = !attention_mask.empty() ? attention_mask[i] : vector(seq_len, 1); + // Extract valid tokens + vector valid_tokens; + for (size_t j = 0; j < input_ids.sequence(); ++j) { + if (mask[j] == 1) valid_tokens.push_back((int)input_ids.dataAt(i, 0, j, 0)); + } + // Find vision start positions + vector vision_starts; + vector vision_types; + for (size_t j = 0; j < valid_tokens.size(); ++j) { + if (valid_tokens[j] == vision_start_token_id && j + 1 < valid_tokens.size()) { + vision_starts.push_back(j); + vision_types.push_back(valid_tokens[j + 1]); + } + } + int64_t image_count = count(vision_types.begin(), vision_types.end(), image_token_id); + int64_t video_count = vision_types.size() - image_count; + vector> llm_positions(3); + size_t st = 0; + int64_t current_max = 0; + int64_t remain_images = image_count; + int64_t remain_videos = video_count; + // Process each vision segment + for (size_t vs = 0; vs < vision_starts.size(); ++vs) { + // Find next vision token + size_t ed_image = valid_tokens.size(); + size_t ed_video = valid_tokens.size(); + if (remain_images > 0) { + auto it = find(valid_tokens.begin() + st, valid_tokens.end(), image_token_id); + if (it != valid_tokens.end()) ed_image = it - valid_tokens.begin(); + } + if (remain_videos > 0) { + auto it = find(valid_tokens.begin() + st, valid_tokens.end(), video_token_id); + if (it != valid_tokens.end()) ed_video = it - valid_tokens.begin(); + } + size_t ed = min(ed_image, ed_video); + if (ed == valid_tokens.size()) break; + // Get grid parameters + int64_t t, h, w; + bool is_image = (ed == ed_image); + if (is_image) { + t = (int64_t)image_grid_thw.dataAt(0, 0, image_idx, 0); + h = (int64_t)image_grid_thw.dataAt(0, 0, image_idx, 1); + w = (int64_t)image_grid_thw.dataAt(0, 0, image_idx, 2); + image_idx++; + remain_images--; + } else { + t = (int64_t)video_grid_thw.dataAt(0, 0, video_idx, 0); + h = (int64_t)video_grid_thw.dataAt(0, 0, video_idx, 1); + w = (int64_t)video_grid_thw.dataAt(0, 0, video_idx, 2); + video_idx++; + remain_videos--; + } + // Calculate grid dimensions + int64_t llm_grid_t = t; + int64_t llm_grid_h = h / spatial_merge_size; + int64_t llm_grid_w = w / spatial_merge_size; + // Process text segment + size_t text_len = ed - st; + if (text_len > 0) { + int64_t start_idx = current_max; + for (int64_t k = 0; k < text_len; ++k) { + for (int dim = 0; dim < 3; ++dim) { + llm_positions[dim].push_back(start_idx + k); + } + } + current_max += text_len; + } + for (int64_t ti = 0; ti < llm_grid_t; ++ti) { + for (int64_t hi = 0; hi < llm_grid_h; ++hi) { + for (int64_t wi = 0; wi < llm_grid_w; ++wi) { + llm_positions[0].push_back(current_max + ti); + llm_positions[1].push_back(current_max + hi); + llm_positions[2].push_back(current_max + wi); + } + } + } + current_max = std::max({llm_positions[0][llm_positions[0].size() - 1], + llm_positions[1][llm_positions[1].size() - 1], + llm_positions[2][llm_positions[2].size() - 1]}); + st = ed + llm_grid_t * llm_grid_h * llm_grid_w; + } + // Process remaining text + if (st < valid_tokens.size()) { + size_t text_len = valid_tokens.size() - st; + int64_t st_idx = current_max + 1; + for (int64_t k = 0; k < text_len; ++k) { + for (int dim = 0; dim < 3; ++dim) { + llm_positions[dim].push_back(st_idx + k); + } + } + current_max += text_len; + } + // Fill position_ids with valid positions + size_t valid_idx = 0; + for (size_t j = 0; j < seq_len; ++j) { + if (mask[j] == 1) { + if (valid_idx < llm_positions[0].size()) { + position_ids.setDataAt(0, 0, i, j, (float)llm_positions[0][valid_idx]); + position_ids.setDataAt(1, 0, i, j, (float)llm_positions[1][valid_idx]); + position_ids.setDataAt(2, 0, i, j, (float)llm_positions[2][valid_idx]); + valid_idx++; + } + } + } + // Calculate delta + int64_t max_pos = 0; + for (const auto &dim : llm_positions) { + for (auto val : dim) { + max_pos = max(max_pos, val); + } + } + mrope_position_deltas.setDataAt(0, 0, 0, i, (float)((max_pos + 1) - static_cast(input_ids.sequence()))); + } + position_ids.setName("position_ids"); + mrope_position_deltas.setName("mrope_position_deltas"); + return {position_ids, mrope_position_deltas}; + } +}; +} // namespace npu + +#endif // MODELING_QWEN2VL_NPU_HPP \ No newline at end of file diff --git a/src/models/qwen2_vl/processing_qwen2_vl.hpp b/mllm/models/qwen2_vl/processing_qwen2_vl.hpp similarity index 96% rename from src/models/qwen2_vl/processing_qwen2_vl.hpp rename to mllm/models/qwen2_vl/processing_qwen2_vl.hpp index 3a70e972d..398d22deb 100644 --- a/src/models/qwen2_vl/processing_qwen2_vl.hpp +++ b/mllm/models/qwen2_vl/processing_qwen2_vl.hpp @@ -31,7 +31,7 @@ Tensor vector3d2Tensor(vector>> img, string name = "input", int channel = img.size(); int height = img[0].size(); int width = img[0][0].size(); - Tensor tensor1(1, height, channel, width, Backend::global_backends[type], true); + Tensor tensor1(1, height, channel, width, Backend::global_backends[type].get(), true); tensor1.setName(std::move(name)); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); @@ -48,7 +48,7 @@ Tensor vector3d2Tensor(vector>> img, string name = "input", B int channel = img.size(); int height = img[0].size(); int width = img[0][0].size(); - Tensor tensor1(1, height, channel, width, Backend::global_backends[type], true); + Tensor tensor1(1, height, channel, width, Backend::global_backends[type].get(), true); tensor1.setName(std::move(name)); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); @@ -112,9 +112,7 @@ class Qwen2VLImageProcessor { int old_width = image.width; auto [new_height, new_width] = smart_resize(old_height, old_width, IMAGE_FACTOR, MIN_PIXELS, MAX_PIXELS); std::vector temp_image_info = {image}; - auto image_n = PreProcessor::ResizeImages({temp_image_info}, new_height, new_width, true)[0]; - // delete[] image.data; - // image.data = nullptr; + auto image_n = PreProcessor::ResizeImages({temp_image_info}, new_height, new_width, true, false, ResizeFitEdge::none, ResampleType::BICUBIC)[0]; return image_n; } @@ -191,6 +189,12 @@ class Qwen2VLImageProcessor { vector> input_ids_; pair>, vector> preprocess_images(const uint8_t *image, const size_t &image_length) { auto imageinfos = vector(); + // int width, height, channels; + // auto data = stbi_load_from_memory(image, image_length, &width, &height, &channels, 3); + // if (data == nullptr) { + // MLLM_LOG_ERROR_STREAM << "Error: Failed to load image from memory." << std::endl; + // exit(-1); + // } int width, height, channels; auto data = stbi_load_from_memory(image, image_length, &width, &height, &channels, 0); if (data == nullptr) { @@ -423,20 +427,20 @@ class Qwen2VLProcessor final : public PreProcessor { } } - void Process(const std::string &text) override{}; - void PreProcessImages(const std::vector &images, const std::vector &image_length) override{}; - void PreProcessImages(const std::vector &images_path) override{}; + void Process(const std::string &text) override {}; + void PreProcessImages(const std::vector &images, const std::vector &image_length) override {}; + void PreProcessImages(const std::vector &images_path) override {}; std::string detokenize(const vector &tokens) { return tokenizer->detokenize(tokens); } - std::pair detokenize(Tensor &result) { + std::pair detokenize(Tensor &result, int seq = 0) { assert(result.batch() == 1 && "Batch size of result is not 1. Which is not supported for now."); assert(result.head() == 1 && "The 3rd dim of result should be one. e.g.:[1, 1, seq, hidden]"); vector scores; int _dims = result.dimension(); - int _seq = result.sequence() - 1; + int _seq = seq == 0 ? result.sequence() - 1 : seq - 1; for (int i = 0; i < _dims; ++i) { auto value = result.dataAt(0, 0, _seq, i); scores.push_back(value); diff --git a/src/models/qwen2_vl/vtp/modeling_qwen2_vl.hpp b/mllm/models/qwen2_vl/vtp/modeling_qwen2_vl.hpp similarity index 80% rename from src/models/qwen2_vl/vtp/modeling_qwen2_vl.hpp rename to mllm/models/qwen2_vl/vtp/modeling_qwen2_vl.hpp index 7243ca144..b6444a22e 100644 --- a/src/models/qwen2_vl/vtp/modeling_qwen2_vl.hpp +++ b/mllm/models/qwen2_vl/vtp/modeling_qwen2_vl.hpp @@ -4,16 +4,23 @@ #ifndef MODELING_QWEN2VL_HPP #define MODELING_QWEN2VL_HPP +// #define VTP +#define NDC + +#include +#include +#include #include "Layer.hpp" #include "Module.hpp" #include "Tensor.hpp" #include "Types.hpp" #include "../configuration_qwen2_vl.hpp" -// #include "models/qwen/modeling_qwen.hpp" -// #include +#if defined(VTP) #include "vtp_tools.hpp" -#include -#include +#elif defined(NDC) +#include "ndc_tools.hpp" +#endif +#include "ui_tools.hpp" using namespace mllm; @@ -40,19 +47,21 @@ class VisionAttention final : public Module { Layer o_proj; int head_size_{}; int kv_head_size_{}; - int attn_hidden_dim_{}; + int head_dim_{}; + string attn_impl; public: VisionAttention() = default; - VisionAttention(int hidden_dim, int head_size, int kv_head_size, int attn_hidden_dim, bool bias, + VisionAttention(int hidden_dim, int head_size, int kv_head_size, int head_dim, bool bias, string attn_implementation, const TransformerNameConfig &names, const string &base_name) { - attn_hidden_dim_ = attn_hidden_dim; + head_dim_ = head_dim; head_size_ = head_size; kv_head_size_ = kv_head_size; + attn_impl = attn_implementation; - qkv_proj = Linear(hidden_dim, head_size * attn_hidden_dim * 3, bias, base_name + names._qkv_proj_name); + qkv_proj = Linear(hidden_dim, head_size * head_dim * 3, bias, base_name + names._qkv_proj_name); softmax = Softmax(DIMENSION, false, base_name + "softmax"); - o_proj = Linear(head_size * attn_hidden_dim, hidden_dim, bias, base_name + names._o_proj_name); + o_proj = Linear(head_size * head_dim, hidden_dim, bias, base_name + names._o_proj_name); } vector Forward(vector inputs, vector args) override { auto cu_seqlens = inputs[1]; @@ -60,22 +69,27 @@ class VisionAttention final : public Module { auto seq_length = inputs[0].sequence(); Tensor q, k, v; auto qkv = qkv_proj(inputs[0]); - auto qkv_sp = qkv.split({attn_hidden_dim_ * head_size_, attn_hidden_dim_ * head_size_, attn_hidden_dim_ * head_size_}, DIMENSION); + auto qkv_sp = qkv.split({head_dim_ * head_size_, head_dim_ * head_size_, head_dim_ * head_size_}, DIMENSION); q = qkv_sp[0]; k = qkv_sp[1]; v = qkv_sp[2]; - q = q.view(-1, head_size_, -1, attn_hidden_dim_); - k = k.view(-1, head_size_, -1, attn_hidden_dim_); - v = v.view(-1, head_size_, -1, attn_hidden_dim_); + q = q.view(-1, head_size_, -1, head_dim_); + k = k.view(-1, head_size_, -1, head_dim_); + v = v.view(-1, head_size_, -1, head_dim_); q = Tensor::apply_rotary_pos_emb_vision(q, rotary_pos_emb); k = Tensor::apply_rotary_pos_emb_vision(k, rotary_pos_emb); - k = k.transpose(SEQUENCE, DIMENSION); - auto qk = Tensor::mm(q, k); - qk = qk / std::sqrt(attn_hidden_dim_); - // mask - qk = softmax(qk); - auto o = Tensor::mm(qk, v); - o = o.view(-1, 1, -1, attn_hidden_dim_ * head_size_); + Tensor o; + if (attn_impl == "flash_attention_2") { + o = Tensor::flash_attention2_forward(q, k, v, false); + } else { // eager implementation + k = k.transpose(SEQUENCE, DIMENSION); + auto qk = Tensor::mm(q, k); + qk = qk / std::sqrt(head_dim_); + // mask + qk = softmax(qk); + o = Tensor::mm(qk, v); + } + o = o.view(-1, 1, -1, head_dim_ * head_size_); o = o_proj(o); return {o}; } @@ -109,8 +123,8 @@ class VisionBlock final : public Module { public: VisionBlock() = default; - VisionBlock(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, const ViTNameConfig &names, const string &base_name) { - attention = VisionAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, true, names, base_name + names._attn_base_name); + VisionBlock(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, string attn_implementation, const ViTNameConfig &names, const string &base_name) { + attention = VisionAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, true, attn_implementation, names, base_name + names._attn_base_name); mlp = VisionMLP(hidden_dim, ffn_hidden, act_fn_type, names, base_name + names._ffn_base_name); norm1 = LayerNorm(hidden_dim, true, 1e-6, base_name + names._attn_norm_name); norm2 = LayerNorm(hidden_dim, true, 1e-6, base_name + names._ffn_norm_name); @@ -160,19 +174,31 @@ class Qwen2VisionModel final : public Module { public: Qwen2VisionModel() = default; - Qwen2VisionModel(int hidden_dim, int vision_embed_dim, int head_size, int mlp_hidden_dim, const string &act_fn_type, int patch, int img_hw, int block_num, int spatial_merge_size, const Qwen2VLNameConfig &names, const string &base_name) { + Qwen2VisionModel(int hidden_dim, int vision_embed_dim, int head_size, int mlp_hidden_dim, const string &act_fn_type, int patch, int img_hw, int block_num, int spatial_merge_size, string attn_implementation, const Qwen2VLNameConfig &names, const string &base_name) { patch_embed = Qwen2PatchEmbed(vision_embed_dim, patch, img_hw, names, base_name + names.patch_embed_name); rot_pos_emb = VisionRoPE((vision_embed_dim / head_size) / 2, spatial_merge_size, base_name + ".rot_pos_emb"); - blocks = List(block_num, vision_embed_dim, head_size, mlp_hidden_dim, act_fn_type, names, base_name + names._layer_name); + blocks = List(block_num, vision_embed_dim, head_size, mlp_hidden_dim, act_fn_type, attn_implementation, names, base_name + names._layer_name); patch_merger = PatchMerger(hidden_dim, vision_embed_dim, spatial_merge_size, names, base_name + names._merger_name); } vector Forward(vector inputs, vector args) override { auto hidden_states = patch_embed({inputs[0]})[0]; auto rotary_pos_emb = rot_pos_emb(inputs[1]); - auto grid_t = inputs[0].dataAt(0, 0, 0, 0); - auto grid_h = inputs[0].dataAt(0, 0, 0, 1); - auto grid_w = inputs[0].dataAt(0, 0, 0, 2); + auto grid_t = inputs[1].dataAt(0, 0, 0, 0); + auto grid_h = inputs[1].dataAt(0, 0, 0, 1); + auto grid_w = inputs[1].dataAt(0, 0, 0, 2); vector cu_seqlens_v = {0.0F, grid_t * grid_h * grid_w}; + + if (use_pre_vit_merge) { + std::vector> region_masks = {UIRegionMask}; + auto selected_indices = process_region_mask(region_masks); + // std::cout << selected_indices.size() << " " << cu_seqlens_v[1] << std::endl; + if (selected_indices.size() != cu_seqlens_v[1]) { + cu_seqlens_v[1] = float(selected_indices.size()); + rotary_pos_emb = rotary_pos_emb.clip(selected_indices, SEQUENCE); + hidden_states = hidden_states.clip(selected_indices, SEQUENCE); + } + } + auto cu_seqlens = Tensor(cu_seqlens_v); for (auto &block : blocks) { hidden_states = block({hidden_states, cu_seqlens, rotary_pos_emb})[0]; @@ -231,7 +257,8 @@ class QWen2Attention final : public Module { k_rope = MultimodalRoPE(config.rope_theta, config.max_position_embeddings, config.mrope_section, base_name + "k_rope"); k_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "k_cache"); v_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "v_cache"); - softmax = Softmax(DIMENSION, true, base_name + "softmax"); + mask = Causalmask(base_name + "mask"); + softmax = Softmax(DIMENSION, false, base_name + "softmax"); } std::vector Forward(std::vector inputs, std::vector args) override { @@ -246,24 +273,43 @@ class QWen2Attention final : public Module { value_states = value_states.view(-1, num_key_value_heads, -1, head_dim); query_states = q_rope(query_states, position_ids); key_states = k_rope(key_states, position_ids); - key_states = k_cache(key_states); - value_states = v_cache(value_states); + auto key_cache_states = k_cache(key_states); + auto value_cache_states = v_cache(value_states); +#if defined(NDC) + // ====================================================================================== + WHERE_TOKEN_PRUNING.get_kvcache(key_cache_states, value_cache_states, key_states, value_states, + layer_index, k_cache.getCacheSeqLen()); + //====================================================================================== +#endif auto atten_weight = - Tensor::mm(query_states, key_states.transpose(Chl::SEQUENCE, Chl::DIMENSION)) + Tensor::mm(query_states, key_cache_states.transpose(Chl::SEQUENCE, Chl::DIMENSION)) / std::sqrt(head_dim); +#if defined(NDC) + if (WHERE_TOKEN_PRUNING.causal_masks.find(layer_index) != WHERE_TOKEN_PRUNING.causal_masks.end() + && atten_weight.sequence() > 1) { + atten_weight = atten_weight + WHERE_TOKEN_PRUNING.causal_masks[layer_index]; + WHERE_TOKEN_PRUNING.causal_masks.erase(layer_index); + } else { +#endif + atten_weight = mask(atten_weight, k_cache.getCacheSeqLen()); +#if defined(NDC) + } +#endif atten_weight = softmax(atten_weight, k_cache.getCacheSeqLen()); - auto atten_output = Tensor::mm(atten_weight, value_states); + auto atten_output = Tensor::mm(atten_weight, value_cache_states); atten_output = atten_output.view(-1, 1, -1, head_dim * num_heads); atten_output = o_proj(atten_output); +#if defined(VTP) //====================================================================================== // pruning stage if (WHERE_TOKEN_PRUNING.is_prefill()) { WHERE_TOKEN_PRUNING.set_prefill_layer(layer_index); WHERE_TOKEN_PRUNING.update_attn_acc_score(atten_weight); - atten_output = WHERE_TOKEN_PRUNING.prunning_attn_output(atten_output); + atten_output = WHERE_TOKEN_PRUNING.prunning_attn_output(atten_output, layer_index); } //====================================================================================== - return {atten_output}; +#endif + return {atten_output, atten_weight}; } vector get_cache() { @@ -288,6 +334,7 @@ class QWen2Attention final : public Module { KVCache k_cache; KVCache v_cache; Softmax softmax; + Causalmask mask; }; // Copied from GemmaDecoder with Gemma->Qwen and set RmsNorm(without add_unit_offset) @@ -308,17 +355,26 @@ class QWen2Decoder final : public Module { auto position_ids = inputs[1]; auto residual = inputs[0]; auto x = input_layernorm(residual); - x = self_atten({x, position_ids}, layer_index)[0]; + auto xs = self_atten({x, position_ids}, layer_index); + x = xs[0]; + auto atten_weight = xs[1]; +#if defined(VTP) //====================================================================================== // pruning stage if (WHERE_TOKEN_PRUNING.is_prefill()) { residual = WHERE_TOKEN_PRUNING.pruning_(residual); } //====================================================================================== +#endif auto tmp = x + residual; x = post_attention_layernorm(tmp); x = mlp({x})[0]; x = x + tmp; +#if defined(NDC) + //====================================================================================== + WHERE_TOKEN_PRUNING.update_hidden_pos(x, atten_weight, layer_index); + //====================================================================================== +#endif return {x}; } QWen2Attention &get_attention() { @@ -342,6 +398,8 @@ class Qwen2VLModel final : public Module { Layer lm_head_layer; bool tie_embedding_words; + int num_hidden_layers; + int num_attention_heads; int64_t spatial_merge_size; int64_t image_token_id; @@ -361,12 +419,14 @@ class Qwen2VLModel final : public Module { auto qwen_names = config.names_config; tie_embedding_words = config.tie_embedding_words; spatial_merge_size = config.spatial_merge_size; + num_hidden_layers = config.num_hidden_layers; + num_attention_heads = config.num_attention_heads; image_token_id = config.image_token_id; video_token_id = config.video_token_id; vision_start_token_id = config.vision_start_token_id; embed_tokens = Embedding(vocab_size, hidden_dim, qwen_names.token_embd_name); - visual = Qwen2VisionModel(hidden_dim, vision_embed_dim, 16, vision_embed_dim * 4, "QuickGELU", 14, 336, 32, spatial_merge_size, vision_names, vision_names.vison_model_name); + visual = Qwen2VisionModel(hidden_dim, vision_embed_dim, 16, vision_embed_dim * 4, "QuickGELU", 14, 336, 32, spatial_merge_size, config.attn_implementation, vision_names, vision_names.vison_model_name); blocks = List(config.num_hidden_layers, config, qwen_names, qwen_names.blk_name); norm = RMSNorm(hidden_dim, 1e-6, qwen_names.post_norm_name); @@ -377,12 +437,11 @@ class Qwen2VLModel final : public Module { } } vector Forward(vector inputs, vector args) override { - WHERE_TOKEN_PRUNING.init(); - if (inputs[0].sequence() <= 1) { - WHERE_TOKEN_PRUNING.prefill_stage = false; - } else { - WHERE_TOKEN_PRUNING.prefill_stage = true; - } +#if defined(VTP) || defined(NDC) + // ====================================================================================== + WHERE_TOKEN_PRUNING.init(inputs[0], num_hidden_layers, num_attention_heads); + // ====================================================================================== +#endif auto position_ids = inputs[3]; bool have_img = inputs[1].batch() > 0; auto hidden_states = embed_tokens({inputs[0]}); @@ -390,16 +449,27 @@ class Qwen2VLModel final : public Module { auto image_embeds = visual({inputs[1], inputs[2]})[0]; auto n_image_features = image_embeds.sequence(); auto where_idx = inputs[0].where(image_token_id, SEQUENCE); - // ======================================================================================================== +#if defined(VTP) || defined(NDC) + // ====================================================================================== // Pruning Stage 1 Start - if (WHERE_TOKEN_PRUNING.is_prefill()) { - WHERE_TOKEN_PRUNING.set_vision_token(where_idx, hidden_states, image_embeds); - } - // ======================================================================================================== + WHERE_TOKEN_PRUNING.set_vision_token(where_idx, hidden_states, image_embeds); + // ====================================================================================== +#endif hidden_states = hidden_states.index_put(image_embeds, where_idx, false); } +#if defined(NDC) + // ====================================================================================== + // if (WHERE_TOKEN_PRUNING.is_prefill()) { + auto past_kv_seq_len = blocks[0].get_attention().get_cache()[0]->getCacheSeqLen(); + if (past_kv_seq_len != -1) { + WHERE_TOKEN_PRUNING.ndc_prepare(hidden_states, position_ids, past_kv_seq_len); + } + // } + // ====================================================================================== +#endif int layer_index = 0; for (auto &block : blocks) { +#if defined(VTP) //====================================================================================== // pruning stage if (WHERE_TOKEN_PRUNING.is_prefill()) { @@ -407,7 +477,17 @@ class Qwen2VLModel final : public Module { position_ids = WHERE_TOKEN_PRUNING.pruning_pos(position_ids, DIMENSION); } //====================================================================================== +#endif hidden_states = block({hidden_states, position_ids}, layer_index)[0]; +#if defined(NDC) + // ====================================================================================== + // change position_ids + auto kv_seq_len = blocks[layer_index + 1].get_attention().get_cache()[0]->getCacheSeqLen(); + if (kv_seq_len != -1) { + hidden_states = WHERE_TOKEN_PRUNING.prepare_next_layer(layer_index, position_ids, hidden_states, kv_seq_len); + } + // ====================================================================================== +#endif layer_index++; } hidden_states = norm(hidden_states); @@ -419,12 +499,14 @@ class Qwen2VLModel final : public Module { } else { hidden_states = lm_head_layer(hidden_states); } +#if defined(VTP) //====================================================================================== // pruning stage if (WHERE_TOKEN_PRUNING.is_prefill() && (Tensor::tensor_status == TENSOR_STATIC_READY)) { WHERE_TOKEN_PRUNING.prefill_stage = false; } //====================================================================================== +#endif return {hidden_states}; } void clear_kvcache() override { @@ -469,8 +551,8 @@ class Qwen2VLModel final : public Module { } const size_t batch_size = input_ids.batch(); // input_ids.size(); const size_t seq_len = batch_size > 0 ? input_ids.sequence() : 0; // batch_size > 0 ? input_ids[0].size() : 0; - Tensor position_ids(3, 1, batch_size, seq_len, Backend::global_backends[MLLM_CPU], true); - Tensor mrope_position_deltas(1, 1, 1, batch_size, Backend::global_backends[MLLM_CPU], true); + Tensor position_ids(3, 1, batch_size, seq_len, Backend::global_backends[MLLM_CPU].get(), true); + Tensor mrope_position_deltas(1, 1, 1, batch_size, Backend::global_backends[MLLM_CPU].get(), true); bool has_vision = (image_grid_thw.sequence() > 0) || (video_grid_thw.sequence() > 0); // image_grid_thw || video_grid_thw; if (!has_vision) { // Pure text case diff --git a/mllm/models/qwen2_vl/vtp/ndc_tools.hpp b/mllm/models/qwen2_vl/vtp/ndc_tools.hpp new file mode 100644 index 000000000..aa8d8bc1c --- /dev/null +++ b/mllm/models/qwen2_vl/vtp/ndc_tools.hpp @@ -0,0 +1,545 @@ +// +// Created by Rongjie Yi on 25-5-29. +// +#ifndef NDC_TOOLS_HPP +#define NDC_TOOLS_HPP + +#include "Module.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace mllm; + +class DelayComputeKVCache { +public: + vector> kv_true_token_appds; + vector hidden_states_cache; + vector hidden_states_filled; + DelayComputeKVCache() { + } + void init_cache_list(int layers) { + hidden_states_cache.resize(layers); + hidden_states_filled.resize(layers); + kv_true_token_appds.resize(layers); + } + void update_hidden_states(Tensor hidden_states, int layer_idx, int original_hs_length, vector pos, bool is_prefill) { + auto b = hidden_states.batch(); + auto d = hidden_states.dimension(); + if (hidden_states_cache[layer_idx].name().empty()) { //=如果hidden_states_cache[layer_idx]为空Tensor + hidden_states_cache[layer_idx] = Tensor(b, 1, original_hs_length, d, MLLM_CPU, true); + hidden_states_cache[layer_idx].setName("hidden_states_cache_" + std::to_string(layer_idx)); + hidden_states_filled[layer_idx] = Tensor(b, 1, original_hs_length, 1, MLLM_CPU, true); + hidden_states_filled[layer_idx].setName("hidden_states_fille_" + std::to_string(layer_idx)); + } + for (int bb = 0; bb < b; ++bb) { + for (int i = 0; i < pos.size(); ++i) { + auto p = pos[i]; + memcpy(hidden_states_cache[layer_idx].ptrAt(bb, 0, p, 0), + hidden_states.ptrAt(bb, 0, i, 0), + sizeof(float) * d); + hidden_states_filled[layer_idx].setDataAt(bb, 0, p, 0, 1.0f); + } + } + } + Tensor get_hidden_states(int layer_idx, vector pos) { + return hidden_states_cache[layer_idx].clip(pos, SEQUENCE); + } + Tensor reset_hidden_states(Tensor hidden_states, int layer_idx, vector pos) { + assert(hidden_states.batch() == 1); + vector hidden_states_last; + hidden_states_last.resize(hidden_states.dimension()); + memcpy(hidden_states_last.data(), + hidden_states.ptrAt(0, 0, hidden_states.sequence() - 1, 0), + sizeof(float) * hidden_states.dimension()); + hidden_states.reshape(1, 1, pos.size() + 1, hidden_states.dimension()); + hidden_states.alloc(); + for (int i = 0; i < pos.size(); ++i) { + int p = pos[i]; + memcpy(hidden_states.ptrAt(0, 0, i, 0), + hidden_states_cache[layer_idx].ptrAt(0, 0, p, 0), + sizeof(float) * hidden_states.dimension()); + } + memcpy(hidden_states.ptrAt(0, 0, hidden_states.sequence() - 1, 0), + hidden_states_last.data(), + sizeof(float) * hidden_states.dimension()); + return hidden_states; + } + vector kv_not_filled_pos(int layer_idx, int original_kv_length) { + auto filled_token = kv_true_token_appds[layer_idx]; + vector not_filled_pos; + for (int i = 0; i < original_kv_length; ++i) { + if (std::find(filled_token.begin(), filled_token.end(), i) == filled_token.end()) { + not_filled_pos.push_back(i); + } + } + return not_filled_pos; + } + template + static void reorder_cache(Tensor &k_cache, Tensor &v_cache, + const vector &indices, + int pos_first, int cache_sequence) { + const int num_heads = v_cache.head(); + const int k_per_head = k_cache.dimension(); + const int v_per_head = v_cache.dimension(); + const int k_size = num_heads * k_per_head; + const int v_size = v_per_head; + // 1. 分配临时内存 + if (cache_sequence <= pos_first) { + pos_first = 0; + } + vector> k_cache_data(cache_sequence - pos_first); + vector>> v_cache_data(num_heads); + for (int i = pos_first; i < cache_sequence; i++) { + k_cache_data[i - pos_first].resize(k_size); + } + for (int h = 0; h < num_heads; ++h) { + v_cache_data[h].resize(cache_sequence - pos_first); + for (int i = pos_first; i < cache_sequence; i++) { + v_cache_data[h][i - pos_first].resize(v_size); + } + } + // 2. 拷贝数据到临时内存 + for (int i = pos_first; i < cache_sequence; i++) { + // K_cache拷贝(全部heads) + memcpy(k_cache_data[i - pos_first].data(), + k_cache.ptrAt(0, 0, i, 0), + sizeof(T) * k_size); + // V_cache拷贝(每个head分开) +#pragma omp parallel for num_threads(CPUBackend::cpu_threads) + for (int h = 0; h < num_heads; ++h) { + memcpy(v_cache_data[h][i - pos_first].data(), + v_cache.ptrAt(0, h, i, 0), + sizeof(T) * v_size); + } + } + // 3. 根据索引重新排序 + for (size_t idx : indices) { + if (idx >= (size_t)pos_first && idx < (size_t)cache_sequence) { + const int temp_idx = idx - pos_first; + // 写回K_cache + memcpy(k_cache.ptrAt(0, 0, idx, 0), + k_cache_data[temp_idx].data(), + sizeof(T) * k_size); + // 写回V_cache +#pragma omp parallel for num_threads(CPUBackend::cpu_threads) + for (int h = 0; h < num_heads; ++h) { + memcpy(v_cache.ptrAt(0, h, idx, 0), + v_cache_data[h][temp_idx].data(), + sizeof(T) * v_size); + } + } + } + } + + void update_kv_cache(Tensor &k_cache, Tensor &v_cache, Tensor &k_state, Tensor &v_state, int cache_sequence, int layer_idx, + bool is_prefill, string update_mode, vector pos = {}, int original_kv_length = -1) { + if (update_mode == "insert") { + assert(k_cache.masterTensor() == k_state.masterTensor()); + assert(v_cache.masterTensor() == v_state.masterTensor()); + // pos代表现在的token列表:{0,1,2,3,5,6,8,9}, 8个token及其列表 + if (is_prefill) { + kv_true_token_appds[layer_idx] = pos; // 记录当前token的列表 + assert(kv_true_token_appds[layer_idx].size() == cache_sequence); + } else { + auto new_token_pos = kv_true_token_appds[layer_idx][kv_true_token_appds[layer_idx].size() - 1] + 1; + assert(kv_true_token_appds[layer_idx].size() + 1 + pos.size() == cache_sequence); + kv_true_token_appds[layer_idx].insert(kv_true_token_appds[layer_idx].end(), pos.begin(), pos.end()); + kv_true_token_appds[layer_idx].push_back(new_token_pos); // 添加新的token位置 + auto &cur_pos = kv_true_token_appds[layer_idx]; + // for k_cache; + if (pos.size() > 1) { + auto pos_first = pos[0]; + assert(v_cache.ctype() == BHDS); + // 创建并初始化索引数组 + std::vector indices(cur_pos.size()); + std::iota(indices.begin(), indices.end(), 0); + std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) { + return cur_pos[a] < cur_pos[b]; + }); + assert(k_cache.batch() == 1); + if (k_cache.dtype() == MLLM_TYPE_F16) { + reorder_cache(k_cache, v_cache, indices, pos_first, cache_sequence); + } else { + reorder_cache(k_cache, v_cache, indices, pos_first, cache_sequence); + } + std::sort(kv_true_token_appds[layer_idx].begin(), kv_true_token_appds[layer_idx].end()); + } + } + } + } +}; + +class NdcContext { + int first_img_token_pos = 0; + int last_img_token_pos = 0; + int last_img_token_pos_l = 0; + DelayComputeKVCache kvcache_ctx; + int cur_step = -1; + vector> chosen_pos_in_each; + vector> chosen_pos_to_delay_compute; + Tensor global_position_ids; + int num_hidden_layers = 0; + int num_head = 0; + int original_kv_length = 0; + int chunk_size = 4; + +public: + // map pruning_place_cfg = {{3, 0.2}, {9, 0.2}, {12, 0.6}, {15, 0.6}, {18, 0.8}, {26, 0.8}}; + // map pruning_place_cfg = {{3, 0.2}, {6, 0.8}, {12, 0.8}, {15, 0.8}, {18, 0.8}, {26, 0.8}}; + // map pruning_place_cfg = {{3, 0.8}, {6, 0.8}, {12, 0.8}, {15, 0.8}, {18, 0.8}, {26, 0.8}}; + map pruning_place_cfg = {{6, 0.8}, {12, 0.8}, {18, 0.8}}; + // map pruning_place_cfg = {{3, 0.2}, {9, 0.2}}; + +public: + map causal_masks; // layer_idx -> causal_mask + bool prefill_stage = true; + + /** + * @brief Resets the context to its initial state. + * This function should be called to clear the state between different generation requests. + */ + void reset() { + first_img_token_pos = 0; + last_img_token_pos = 0; + last_img_token_pos_l = 0; + kvcache_ctx = DelayComputeKVCache(); // Re-initialize the KV cache context + cur_step = -1; + chosen_pos_in_each.clear(); + chosen_pos_to_delay_compute.clear(); + global_position_ids = Tensor(); // Reset to an empty tensor + num_hidden_layers = 0; + num_head = 0; + original_kv_length = 0; + causal_masks.clear(); + prefill_stage = true; + } + + void init(Tensor input_ids, int num_layers, int num_attention_heads) { + if (Module::llm_model_ptr->doLoad) { return; } + // reset(); + num_hidden_layers = num_layers; + num_head = num_attention_heads; + if (kvcache_ctx.hidden_states_cache.empty()) { + chosen_pos_in_each.resize(num_hidden_layers, {}); + kvcache_ctx.init_cache_list(num_hidden_layers); // Initialize with 1 layer, can be adjusted as needed + } + + if (input_ids.sequence() <= 1) { + prefill_stage = false; + } else { + prefill_stage = true; + } + } + bool is_prefill() { + return prefill_stage && cur_step == 0; + } + void set_vision_token(Tensor where_idx, Tensor hidden_states, Tensor image_embeds) { + if (Module::llm_model_ptr->doLoad) { return; } + first_img_token_pos = int(where_idx.dataAt(0, 0, 0, 0)); + last_img_token_pos = int(where_idx.dataAt(0, 0, 0, where_idx.dimension() - 1)) + 1; + } + + void ndc_prepare(Tensor hidden_states, Tensor position_ids, int past_kv_seq_len) { + if (Module::llm_model_ptr->doLoad) { return; } + cur_step += 1; + if (cur_step == 0) { + global_position_ids = position_ids; + original_kv_length = hidden_states.sequence(); + } + chosen_pos_in_each.resize(num_hidden_layers, {}); + int new_seq_len = hidden_states.sequence() + past_kv_seq_len; + chosen_pos_in_each[0].clear(); + for (int i = 0; i < new_seq_len; ++i) { + chosen_pos_in_each[0].push_back(i); + } + if (!is_prefill()) { + chosen_pos_to_delay_compute.resize(num_hidden_layers, {}); + } + } + void get_kvcache(Tensor &k_cache, Tensor &v_cache, Tensor &k_state, Tensor &v_state, int layer_idx, int cache_sequence) { + if (Module::llm_model_ptr->doLoad) { return; } + if (is_prefill()) { + auto chosen_pos = chosen_pos_in_each[layer_idx]; + kvcache_ctx.update_kv_cache(k_cache, v_cache, k_state, v_state, cache_sequence, layer_idx, + is_prefill(), "insert", + chosen_pos, original_kv_length); + } else { + auto chosen_pos = chosen_pos_in_each[layer_idx]; + kvcache_ctx.update_kv_cache(k_cache, v_cache, k_state, v_state, cache_sequence, layer_idx, + is_prefill(), "insert", + chosen_pos_to_delay_compute[layer_idx], original_kv_length); + } + } + + void topk_partial_sort(const vector &scores, int k, + vector &topk_values, vector &topk_indices) { + if (k <= 0 || scores.empty()) { + topk_values.clear(); + topk_indices.clear(); + return; + } + k = std::min(k, static_cast(scores.size())); + // 创建索引向量 + vector indices(scores.size()); + for (int i = 0; i < scores.size(); i++) { + indices[i] = i; + } + // 部分排序 - 将前k个最大的元素移动到前部 + std::partial_sort(indices.begin(), indices.begin() + k, indices.end(), + [&scores](int a, int b) { + return scores[a] > scores[b]; // 降序排序 + }); + // 提取结果 + topk_values.resize(k); + topk_indices.resize(k); + for (int i = 0; i < k; i++) { + topk_indices[i] = indices[i]; + topk_values[i] = scores[indices[i]]; + } + } + + vector select_high_score_visual_token_prefill(Tensor attn, int layer_idx, int chunk_size = 4) { + auto cur_chosen_pos = chosen_pos_in_each[layer_idx]; + // attention_score_analyze_prefill start + attn = attn.mean(HEAD); // 1,t,1,t + int visual_start_in_selected = -1; + int visual_end_in_selected = -1; + for (int i = 0; i < cur_chosen_pos.size(); ++i) { + auto pos = cur_chosen_pos[i]; + if (pos == first_img_token_pos - 1) { // 0 && visual_end_in_selected > 0) { + break; + } + } + int attn_seq_start = visual_end_in_selected + 1; // +1 for the end image token + int attn_seq_end = attn.sequence(); // exclusive + int attn_dim_start = visual_start_in_selected + 1; // +1 for the first image token + int attn_dim_end = visual_end_in_selected; // exclusive + vector attn_score; // 1,1,1,visual_end_in_selected - visual_start_in_selected + 1 + for (int j = attn_dim_start; j < attn_dim_end; ++j) { + float data = 0.0f; + for (int i = attn_seq_start; i < attn_seq_end; ++i) { + data += attn.dataAt(0, 0, i, j); + } + // data /= (attn_seq_end - attn_seq_start); + attn_score.push_back(data); + } + auto v_s = visual_start_in_selected; + auto v_e = visual_end_in_selected; + // attention_score_analyze_prefill end + auto pruning_rate = pruning_place_cfg[layer_idx]; + auto cur_visual_token_length = attn_score.size(); + auto keep_ratio = 1 - pruning_rate; + int k_initial = static_cast(std::ceil(cur_visual_token_length * keep_ratio)); + int k_final = (k_initial / chunk_size) * chunk_size; + k_final = std::min(k_final, static_cast(cur_visual_token_length)); // 确保不超过当前实际长度 + vector topk_vals; + vector topk_indices; + topk_partial_sort(attn_score, k_final, topk_vals, topk_indices); // torch.topk(attn_score, k_final) + vector final_token_chosen; + vector cur_chosen_pos_p1(cur_chosen_pos.begin(), cur_chosen_pos.begin() + v_s + 1); + vector cur_chosen_pos_p2(cur_chosen_pos.begin() + v_s + 1, cur_chosen_pos.begin() + v_e); + vector cur_chosen_pos_p3(cur_chosen_pos.begin() + v_e, cur_chosen_pos.end()); + final_token_chosen = cur_chosen_pos_p1; + for (auto item : topk_indices) { + final_token_chosen.push_back(cur_chosen_pos_p2[item]); + } + final_token_chosen.insert(final_token_chosen.end(), cur_chosen_pos_p3.begin(), cur_chosen_pos_p3.end()); + std::sort(final_token_chosen.begin(), final_token_chosen.end()); + return final_token_chosen; + } + vector select_high_score_visual_token_decode(Tensor attn, int layer_idx, int chunk_size = 4) { + auto cur_chosen_pos = chosen_pos_in_each[layer_idx]; + // attention_score_analyze_decode start + attn = attn.mean(HEAD); // 1,t,1,t TODO + if (attn.sequence() != 1) { + attn = attn.clip({}, {}, {-1}, {}); // 1,1,1,t + } + auto cur_chosen_tokens = chosen_pos_in_each[layer_idx]; + int visual_start_in_selected = -1; + int visual_end_in_selected = -1; + for (int i = 0; i < cur_chosen_pos.size(); ++i) { + auto pos = cur_chosen_pos[i]; + if (pos == first_img_token_pos - 1) { // 0 && visual_end_in_selected > 0) { + break; + } + } + int attn_dim_start = visual_start_in_selected + 1; // +1 for the first image token + int attn_dim_end = visual_end_in_selected; // exclusive + vector attn_score; // 1,1,1,visual_end_in_selected - visual_start_in_selected + 1 + for (int j = attn_dim_start; j < attn_dim_end; ++j) { + float data = attn.dataAt(0, 0, 0, j); + attn_score.push_back(data); + } + auto v_s = visual_start_in_selected; + auto v_e = visual_end_in_selected; + // attention_score_analyze_decode end + auto pruning_rate = pruning_place_cfg[layer_idx]; + auto cur_visual_token_length = attn_score.size(); + auto keep_ratio = 1 - pruning_rate; + int k_initial = static_cast(std::ceil(cur_visual_token_length * keep_ratio)); + int k_final = (k_initial / chunk_size) * chunk_size; + k_final = std::min(k_final, static_cast(cur_visual_token_length)); // 确保不超过当前实际长度 + vector topk_vals; + vector topk_indices; + topk_partial_sort(attn_score, k_final, topk_vals, topk_indices); // torch.topk(attn_score, k_final) + vector final_token_chosen; + vector cur_chosen_pos_p1(cur_chosen_pos.begin(), cur_chosen_pos.begin() + v_s + 1); + vector cur_chosen_pos_p2(cur_chosen_pos.begin() + v_s + 1, cur_chosen_pos.begin() + v_e); + vector cur_chosen_pos_p3(cur_chosen_pos.begin() + v_e, cur_chosen_pos.end()); + final_token_chosen = cur_chosen_pos_p1; + for (auto item : topk_indices) { + final_token_chosen.push_back(cur_chosen_pos_p2[item]); + } + final_token_chosen.insert(final_token_chosen.end(), cur_chosen_pos_p3.begin(), cur_chosen_pos_p3.end()); + std::sort(final_token_chosen.begin(), final_token_chosen.end()); + return final_token_chosen; + } + + void update_hidden_pos(Tensor hidden_states, Tensor attn_weight, int layer_idx) { + if (Module::llm_model_ptr->doLoad) { return; } + if (is_prefill()) { + auto chs_pos = chosen_pos_in_each[layer_idx]; + if (pruning_place_cfg.find(layer_idx) != pruning_place_cfg.end()) { + kvcache_ctx.update_hidden_states(hidden_states, layer_idx, original_kv_length, chs_pos, is_prefill()); + chosen_pos_in_each[layer_idx + 1] = select_high_score_visual_token_prefill(attn_weight, layer_idx, chunk_size); + } else { + if (layer_idx + 1 < num_hidden_layers) { + chosen_pos_in_each[layer_idx + 1] = chosen_pos_in_each[layer_idx]; + } + } + } else { + auto chs_pos = chosen_pos_to_delay_compute[layer_idx]; + if (pruning_place_cfg.find(layer_idx) != pruning_place_cfg.end()) { + kvcache_ctx.update_hidden_states(hidden_states, layer_idx, original_kv_length, chs_pos, is_prefill()); + chosen_pos_in_each[layer_idx + 1] = select_high_score_visual_token_decode(attn_weight, layer_idx, chunk_size); + } else { + if (layer_idx + 1 < num_hidden_layers) { + chosen_pos_in_each[layer_idx + 1] = chosen_pos_in_each[layer_idx]; + } + } + } + } + + Tensor prepare_next_layer(int layer_idx, Tensor &position_ids, Tensor &hidden_states, int kv_seq_len) { + if (Module::llm_model_ptr->doLoad) { return hidden_states; } + if (is_prefill()) { + if (pruning_place_cfg.find(layer_idx) != pruning_place_cfg.end()) { + auto this_layer_pos = chosen_pos_in_each[layer_idx]; + auto next_layer_pos = chosen_pos_in_each[layer_idx + 1]; + position_ids = global_position_ids.clip(next_layer_pos, DIMENSION); + std::vector mapping_this_2_next_pos; + for (size_t idx = 0; idx < this_layer_pos.size(); ++idx) { + int value = this_layer_pos[idx]; + if (std::find(next_layer_pos.begin(), next_layer_pos.end(), value) != next_layer_pos.end()) { + mapping_this_2_next_pos.push_back(idx); + } + } + assert(mapping_this_2_next_pos.size() == next_layer_pos.size()); + hidden_states = hidden_states.clip(mapping_this_2_next_pos, SEQUENCE); + } else { + if (layer_idx + 1 < num_hidden_layers) { + auto this_layer_pos = chosen_pos_in_each[layer_idx]; + auto next_layer_pos = chosen_pos_in_each[layer_idx + 1]; + assert(this_layer_pos.size() == next_layer_pos.size()); + assert(std::equal(this_layer_pos.begin(), this_layer_pos.end(), next_layer_pos.begin())); + } + } + } else { + if (pruning_place_cfg.find(layer_idx) != pruning_place_cfg.end()) { + auto this_layer_pos = chosen_pos_in_each[layer_idx]; + auto next_layer_pos = chosen_pos_in_each[layer_idx + 1]; + auto next_layer_kv_cache_not_filled_pos = kvcache_ctx.kv_not_filled_pos(layer_idx + 1, original_kv_length); + std::vector need_to_delay_compute_in_next_layer_pos; + for (int item : next_layer_pos) { + if (std::find(next_layer_kv_cache_not_filled_pos.begin(), + next_layer_kv_cache_not_filled_pos.end(), + item) + != next_layer_kv_cache_not_filled_pos.end()) { + need_to_delay_compute_in_next_layer_pos.push_back(item); + } + } + std::sort(need_to_delay_compute_in_next_layer_pos.begin(), + need_to_delay_compute_in_next_layer_pos.end()); + chosen_pos_to_delay_compute[layer_idx + 1] = need_to_delay_compute_in_next_layer_pos; + if (!need_to_delay_compute_in_next_layer_pos.empty()) { + position_ids = Tensor::cat( + {global_position_ids.clip(need_to_delay_compute_in_next_layer_pos, DIMENSION), + position_ids.clip({}, {}, {}, {-1})}, + DIMENSION); + hidden_states = kvcache_ctx.reset_hidden_states(hidden_states, layer_idx, need_to_delay_compute_in_next_layer_pos); + // mask + int seq = chosen_pos_to_delay_compute[layer_idx + 1].size(); + int dim = kv_seq_len + hidden_states.sequence(); + auto &delay_compute_vec = chosen_pos_to_delay_compute[layer_idx + 1]; + auto &in_each_vec = chosen_pos_in_each[layer_idx + 1]; + Tensor causal_mask(1, num_head, seq + 1, dim, MLLM_CPU, true); + causal_mask.setName("causal_mask_" + std::to_string(layer_idx + 1)); + float min_val = std::numeric_limits::lowest(); + for (int q_side_idx = 0; q_side_idx < seq; ++q_side_idx) { + // 获取当前查询位置对应的值 + int target_value = delay_compute_vec[q_side_idx]; + // 在in_each_vec中查找target_value的位置 + auto it = std::find(in_each_vec.begin(), in_each_vec.end(), target_value); + // 确保找到目标值 + if (it == in_each_vec.end()) { + // 处理未找到的情况 - 可选择报错或跳过 + std::cerr << "Error: target_value not found in chosen_pos_in_each" << std::endl; + continue; // 跳过当前迭代 + } + // 计算在向量中的索引位置 + int start_index = std::distance(in_each_vec.begin(), it) + 1; + // 设置从start_index到末尾的所有元素为min_val + for (int h = 0; h < num_head; h++) { + for (int j = 0; j < start_index; ++j) { + causal_mask.setDataAt(0, h, q_side_idx, j, 0); + } + for (int j = start_index; j < dim; ++j) { + causal_mask.setDataAt(0, h, q_side_idx, j, -INFINITY); + } + } + } + + for (int h = 0; h < num_head; h++) { + memset(causal_mask.ptrAt(0, h, causal_mask.sequence() - 1, 0), + 0, causal_mask.dimension() * sizeof(float)); + } + causal_masks[layer_idx + 1] = causal_mask; + } else { + hidden_states = hidden_states.clip({}, {}, {-1}, {}); + position_ids = position_ids.clip({}, {}, {}, {-1}); + } + } else { + if (layer_idx + 1 < num_hidden_layers) { + auto this_layer_pos = chosen_pos_in_each[layer_idx]; + auto next_layer_pos = chosen_pos_in_each[layer_idx + 1]; + chosen_pos_to_delay_compute[layer_idx + 1] = chosen_pos_to_delay_compute[layer_idx]; + } + } + } + return hidden_states; + } +}; +NdcContext WHERE_TOKEN_PRUNING; + +#endif // NDC_TOOLS_HPP \ No newline at end of file diff --git a/mllm/models/qwen2_vl/vtp/processing_qwen2_vl.hpp b/mllm/models/qwen2_vl/vtp/processing_qwen2_vl.hpp new file mode 100644 index 000000000..1d3bec083 --- /dev/null +++ b/mllm/models/qwen2_vl/vtp/processing_qwen2_vl.hpp @@ -0,0 +1,640 @@ +// +// Created by Rongjie Yi on 25-2-9. +// + +#ifndef PROCESSING_Qwen2VL_HPP +#define PROCESSING_Qwen2VL_HPP +#include +#include "OpDefined.hpp" +#include "processor/PreProcess.hpp" +#include "tokenizers/Tokenizer.hpp" +#include "models/qwen/tokenization_qwen.hpp" +#include +#include +#include +#include +#include +#include +#ifndef STB_IMAGE_IMPLEMENTATION +#define STB_IMAGE_STATIC +#define STB_IMAGE_IMPLEMENTATION +#endif +#include "stb/stb_image.h" +#ifndef STB_IMAGE_RESIZE_IMPLEMENTATION +#define STB_IMAGE_RESIZE_STATIC +#define STB_IMAGE_RESIZE_IMPLEMENTATION +#endif +#include "stb/stb_image_resize2.h" +#include "ui_tools.hpp" +#include +#include + +using namespace std; +using namespace mllm; +// 调整图像尺寸使其成为28的倍数 +std::pair smart_resize(int height, int width, int factor = 28, + int min_pixels = 3136, int max_pixels = 12845056) { + // Check aspect ratio condition + int MAX_RATIO = 200; + if (std::max(height, width) / static_cast(std::min(height, width)) > MAX_RATIO) { + throw std::invalid_argument("Absolute aspect ratio must be smaller than " + std::to_string(MAX_RATIO)); + } + + auto round_by_factor = [](int value, int f) { return ((value + f / 2) / f) * f; }; + auto floor_by_factor = [](float value, int f) { return static_cast(std::floor(value / f)) * f; }; + auto ceil_by_factor = [](float value, int f) { return static_cast(std::ceil(value / f)) * f; }; + + int h_bar = std::max(factor, round_by_factor(height, factor)); + int w_bar = std::max(factor, round_by_factor(width, factor)); + + if (h_bar * w_bar > max_pixels) { + float beta = std::sqrt((height * width) / static_cast(max_pixels)); + h_bar = floor_by_factor(height / beta, factor); + w_bar = floor_by_factor(width / beta, factor); + } else if (h_bar * w_bar < min_pixels) { + float beta = std::sqrt(min_pixels / static_cast(height * width)); + h_bar = ceil_by_factor(height * beta, factor); + w_bar = ceil_by_factor(width * beta, factor); + } + return {h_bar, w_bar}; +} +stbir_pixel_layout get_pixel_layout(int channels) { + switch (channels) { + case 1: return STBIR_1CHANNEL; + case 2: return STBIR_2CHANNEL; + case 3: return STBIR_RGB; + case 4: return STBIR_RGBA; + default: + throw std::invalid_argument("Unsupported number of channels: " + std::to_string(channels)); + } +} + +double compute_mse(const uint8_t *patch1, const uint8_t *patch2, int patch_size, int channels, int image_width_pixels) { + long long sum_sq_diff = 0; + + // 图像一行的字节数,即步长(stride) + const int stride_bytes = image_width_pixels * channels; + // patch一行的字节数 + const int patch_row_bytes = patch_size * channels; + + // 逐行遍历 patch + for (int r = 0; r < patch_size; ++r) { + // 计算当前行在 patch1 和 patch2 中的起始地址 + const uint8_t *p1_row_start = patch1 + r * stride_bytes; + const uint8_t *p2_row_start = patch2 + r * stride_bytes; + + // 比较当前行中的所有字节 + for (int c = 0; c < patch_row_bytes; ++c) { + int diff = static_cast(p1_row_start[c]) - static_cast(p2_row_start[c]); + sum_sq_diff += diff * diff; + } + } + + int num_values = patch_size * patch_size * channels; + if (num_values == 0) return 0.0; + + // 使用 double 来保证精度 + return static_cast(sum_sq_diff) / num_values; +} + +// 对应 Python 的 gen_region_masks 函数,生成一个 H x W 的像素级掩码 +// 这个函数在你最初的代码里是正确的,我们现在必须使用它。 +std::vector gen_pixel_level_region_masks(int H, int W, + const std::vector>> &rows_regions, + int patch_size) { + std::vector ret(H * W, 0); + uint32_t cnt = 0; + for (int i = 0; i < rows_regions.size(); ++i) { // i 是 patch 的行索引 + const auto ®ions = rows_regions[i]; + for (const auto ®ion : regions) { + int start_col = region.first; + int end_col = region.second; + + int y_start = i * patch_size; + int y_end = std::min((i + 1) * patch_size, H); + int x_start = start_col * patch_size; + int x_end = std::min((end_col + 1) * patch_size, W); + + for (int y = y_start; y < y_end; ++y) { + for (int x = x_start; x < x_end; ++x) { + ret[y * W + x] = cnt; + } + } + cnt++; // 每个区域使用一个独一无二的ID + } + } + return ret; +} + +// 主函数:process_image_region 的最终正确版本 +std::vector process_image_region(uint8_t *image_data, int width, int height, int channels, float threshold) { + const int patch_size = 28; + + // 步骤 1: 调整图像尺寸 (使用和 Python 一致的参数) + // 修正致命错误: max_pixels 必须与 Python 脚本中保持一致。 + const int min_pixels_val = 4 * 28 * 28; // + const int max_pixels_val = 16384 * 28 * 28; // + auto [new_height, new_width] = smart_resize(height, width, patch_size, min_pixels_val, max_pixels_val); + + // --- 图像缩放逻辑 (保持不变) --- + uint8_t *resized_data = nullptr; + uint8_t *data_ptr = image_data; + bool resized = false; + if (new_width != width || new_height != height) { + resized_data = new uint8_t[new_width * new_height * channels]; + resized = true; + stbir_pixel_layout layout = get_pixel_layout(channels); + stbir_resize(image_data, width, height, 0, resized_data, new_width, new_height, 0, layout, STBIR_TYPE_UINT8, STBIR_EDGE_CLAMP, STBIR_FILTER_DEFAULT); + data_ptr = resized_data; + width = new_width; + height = new_height; + } + + const int num_patch_rows = height / patch_size; + const int num_patch_cols = width / patch_size; + + // 步骤 2: 按行查找区域 (与之前相同,逻辑正确) + std::vector> patches(num_patch_rows, std::vector(num_patch_cols)); + for (int i = 0; i < num_patch_rows; ++i) { + for (int j = 0; j < num_patch_cols; ++j) { + patches[i][j] = data_ptr + (i * patch_size * width * channels) + (j * patch_size * channels); + } + } + + std::vector>> rows_regions; + rows_regions.reserve(num_patch_rows); + int row_index = 0; + for (const auto row_of_patches : patches) { + if (row_of_patches.empty()) { + row_index++; + continue; + } + std::vector> regions; + int start_col = 0; + for (int j = 0; j < num_patch_cols - 1; ++j) { + double mse = compute_mse(row_of_patches[j], row_of_patches[j + 1], patch_size, channels, width); + // 在这里打印关键信息 + // printf("[C++] row: %d, j: %d, mse: %.10f, mse >= threshold: %s\n", + // row_index, j, mse, (mse >= threshold ? "true" : "false")); + if (mse >= threshold) { + regions.emplace_back(start_col, j); + start_col = j + 1; + } + } + regions.emplace_back(start_col, num_patch_cols - 1); + rows_regions.push_back(regions); + } + + // 步骤 3: (必须执行) 生成与图像一样大的像素级掩码,完全复现Python行为 + std::vector pixel_mask = gen_pixel_level_region_masks(height, width, rows_regions, patch_size); + + // 步骤 4: (必须执行) 通过对像素级掩码进行采样,生成最终的块级掩码 + // 这将保证最终输出的 vector 大小是 num_patch_rows * num_patch_cols + std::vector patched_region_mask; + patched_region_mask.reserve(num_patch_rows * num_patch_cols); + + for (int i = 0; i < num_patch_rows; ++i) { + for (int j = 0; j < num_patch_cols; ++j) { + // 获取每个 patch 左上角像素的坐标 + int y_pixel = i * patch_size; + int x_pixel = j * patch_size; + // 从像素掩码中采样该点的ID,作为这个 patch 的ID + // 这等效于 Python 中的 .max(),因为同一个区域内的像素ID都相同 + patched_region_mask.push_back(pixel_mask[y_pixel * width + x_pixel]); + } + } + + if (resized) { + delete[] resized_data; + } + + return patched_region_mask; +} +// // 全局区域掩码 +// std::vector UIRegionMask; +// 定义二维点结构 + +Tensor vector3d2Tensor(vector>> img, string name = "input", BackendType type = MLLM_CPU) { + int channel = img.size(); + int height = img[0].size(); + int width = img[0][0].size(); + Tensor tensor1(1, height, channel, width, Backend::global_backends[type].get(), true); + tensor1.setName(std::move(name)); + Tensor::tensor_status = TENSOR_STATIC_INIT; + tensor1.setTtype(INPUT_TENSOR); + for (int h = 0; h < height; ++h) { + for (int c = 0; c < channel; ++c) { + for (int w = 0; w < width; ++w) { + tensor1.setDataAt(0, h, c, w, img[c][h][w]); + } + } + } + return tensor1; +} +Tensor vector3d2Tensor(vector>> img, string name = "input", BackendType type = MLLM_CPU) { + int channel = img.size(); + int height = img[0].size(); + int width = img[0][0].size(); + Tensor tensor1(1, height, channel, width, Backend::global_backends[type].get(), true); + tensor1.setName(std::move(name)); + Tensor::tensor_status = TENSOR_STATIC_INIT; + tensor1.setTtype(INPUT_TENSOR); + for (int h = 0; h < height; ++h) { + for (int c = 0; c < channel; ++c) { + for (int w = 0; w < width; ++w) { + tensor1.setDataAt(0, h, c, w, (float)img[c][h][w]); + } + } + } + return tensor1; +} + +class Qwen2VLImageProcessor { +public: + int merge_size = 2; + +private: + std::vector mean_ = {0.48145466, 0.4578275, 0.40821073}; + std::vector std_ = {0.26862954, 0.26130258, 0.27577711}; + int IMAGE_FACTOR = 28; + int MIN_PIXELS = 4 * 28 * 28; + int MAX_PIXELS = 16384 * 28 * 28; + int MAX_RATIO = 200; + int temporal_patch_size = 2; + int patch_size = 14; + + void viewTensor(Tensor &tensor1) { + assert(3 * 2 * 14 * 14 == tensor1.dimension()); + tensor1.reshape(tensor1.sequence(), 3, 2, 14, 14); + } + + std::pair smart_resize(int height, int width, int factor = 28, + int min_pixels = 3136, int max_pixels = 12845056) { + // Check aspect ratio condition + if (std::max(height, width) / static_cast(std::min(height, width)) > MAX_RATIO) { + throw std::invalid_argument("Absolute aspect ratio must be smaller than " + std::to_string(MAX_RATIO)); + } + + auto round_by_factor = [](int value, int f) { return ((value + f / 2) / f) * f; }; + auto floor_by_factor = [](float value, int f) { return static_cast(std::floor(value / f)) * f; }; + auto ceil_by_factor = [](float value, int f) { return static_cast(std::ceil(value / f)) * f; }; + + int h_bar = std::max(factor, round_by_factor(height, factor)); + int w_bar = std::max(factor, round_by_factor(width, factor)); + + if (h_bar * w_bar > max_pixels) { + float beta = std::sqrt((height * width) / static_cast(max_pixels)); + h_bar = floor_by_factor(height / beta, factor); + w_bar = floor_by_factor(width / beta, factor); + } else if (h_bar * w_bar < min_pixels) { + float beta = std::sqrt(min_pixels / static_cast(height * width)); + h_bar = ceil_by_factor(height * beta, factor); + w_bar = ceil_by_factor(width * beta, factor); + } + return {h_bar, w_bar}; + } + + ImageInfo fetch_image(ImageInfo &image) { + int old_height = image.height; + int old_width = image.width; + auto [new_height, new_width] = smart_resize(old_height, old_width, IMAGE_FACTOR, MIN_PIXELS, MAX_PIXELS); + std::vector temp_image_info = {image}; + auto image_n = PreProcessor::ResizeImages({temp_image_info}, new_height, new_width, true)[0]; + // delete[] image.data; + // image.data = nullptr; + return image_n; + } + + pair>, vector> convertPatches( + const vector>>> &imgs, + int temporal_patch_size, + int patch_size, + int merge_size, + int resized_height, + int resized_width) { + int batch = imgs.size(); + int channel = (batch == 0) ? 0 : imgs[0].size(); + vector shape = {0, 0, 0}; + // 检查输入有效性 + if (batch == 0 || channel == 0 || batch % temporal_patch_size != 0 || resized_height % patch_size != 0 || resized_width % patch_size != 0 || (resized_height / patch_size) % merge_size != 0 || (resized_width / patch_size) % merge_size != 0) { + return make_pair(vector>(), shape); + } + // 计算网格维度 + int grid_t = batch / temporal_patch_size; + int grid_h = resized_height / patch_size; + int grid_w = resized_width / patch_size; + shape = {grid_t, grid_h, grid_w}; + // 计算最终矩阵维度 + int rows = grid_t * grid_h * grid_w; + int cols = channel * temporal_patch_size * patch_size * patch_size; + vector> flatten_patches(rows, vector(cols, 0.0f)); + // 预处理常用值 + int ghm = grid_h / merge_size; + int gwm = grid_w / merge_size; + int ms = merge_size; + int area_per_row = ghm * gwm * ms * ms; + // 遍历所有输出元素 + for (int i = 0; i < rows; ++i) { + // 计算时空块坐标 + int d0 = i / area_per_row; + int remaining = i % area_per_row; + int d1 = remaining / (gwm * ms * ms); + remaining %= (gwm * ms * ms); + int d2 = remaining / (ms * ms); + remaining %= (ms * ms); + int d3 = remaining / ms; + int d4 = remaining % ms; + for (int j = 0; j < cols; ++j) { + // 解析通道和时间信息 + int d5 = j / (temporal_patch_size * patch_size * patch_size); + int remaining_j = j % (temporal_patch_size * patch_size * patch_size); + int d6 = remaining_j / (patch_size * patch_size); + remaining_j %= (patch_size * patch_size); + int d7 = remaining_j / patch_size; + int d8 = remaining_j % patch_size; + // 计算原始坐标 + int b = d0 * temporal_patch_size + d6; + int c = d5; + int h = ((d1 * ms + d3) * patch_size) + d7; + int w = ((d2 * ms + d4) * patch_size) + d8; + // 边界检查并赋值 + if (b < batch && c < channel && h < resized_height && w < resized_width && imgs[b].size() > c && imgs[b][c].size() > h && imgs[b][c][h].size() > w) { + flatten_patches[i][j] = imgs[b][c][h][w]; + } + } + } + + return make_pair(flatten_patches, shape); + } + +public: + explicit Qwen2VLImageProcessor() { + } + + void set_pixels(int min_pixelS = 4 * 28 * 28, int max_pixels = 16384 * 28 * 28) { + MIN_PIXELS = min_pixelS; + MAX_PIXELS = max_pixels; + } + vector> input_ids_; + pair>, vector> preprocess_images(const uint8_t *image, const size_t &image_length) { + auto imageinfos = vector(); + int width, height, channels; + auto data = stbi_load_from_memory(image, image_length, &width, &height, &channels, 0); + if (data == nullptr) { + MLLM_LOG_ERROR_STREAM << "Error: Failed to load image from memory." << std::endl; + exit(-1); + } + + // 如果是 ARGB 四通道,转换为 RGB 三通道 + if (channels == 4) { + uint8_t *rgb_data = new uint8_t[width * height * 3]; + for (int i = 0; i < width * height; ++i) { + rgb_data[i * 3 + 0] = data[i * 4 + 1]; // R + rgb_data[i * 3 + 1] = data[i * 4 + 2]; // G + rgb_data[i * 3 + 2] = data[i * 4 + 3]; // B + } + stbi_image_free(data); // 释放原始 ARGB 数据 + data = rgb_data; // 替换为 RGB 数据 + channels = 3; // 更新通道数 + } + float threshold = 10.0f; + UIRegionMask = process_image_region(data, width, height, channels, threshold); + + float *f32_data = nullptr; + f32_data = PreProcessor::RescaleImage(data, 255, width * height * channels); + stbi_image_free(data); + auto image_info = ImageInfo(f32_data, width, height, channels); + image_info = fetch_image(image_info); + imageinfos.emplace_back(image_info); + imageinfos = PreProcessor::NormalizeImages(imageinfos, mean_, std_); + imageinfos.emplace_back(imageinfos[0]); + vector>>> pixel_v; + PreProcessor::ImageInfos2Pixels(imageinfos, pixel_v); + auto result_patches = convertPatches(pixel_v, + temporal_patch_size, + patch_size, + merge_size, + imageinfos[0].height, // resized_height + imageinfos[0].width // resized_width + ); + return result_patches; + } + + pair>> process(const std::vector &image, const std::vector &image_length, bool view_img = true) { + vector>> pixel_values; + vector> vision_grid_thws; + for (int i = 0; i < image.size(); i++) { + auto data = image[i]; + auto size = image_length[i]; + auto result_patches = preprocess_images(data, size); + auto flatten_patches = result_patches.first; + auto grid_thw = result_patches.second; + pixel_values.push_back(flatten_patches); + vision_grid_thws.push_back(grid_thw); + } + auto pixel_values_tensor = vector3d2Tensor(pixel_values, "pixel_values"); + if (view_img) { + assert(3 * 2 * 14 * 14 == pixel_values_tensor.dimension()); + pixel_values_tensor.reshape(pixel_values_tensor.head(), 3, 2, 14, 14); + } + return {pixel_values_tensor, vision_grid_thws}; + } + + pair>> process(const std::vector &images_path, bool view_img = true) { + vector>> pixel_values; + vector> vision_grid_thws; + for (const auto &i : images_path) { + // read all file contents + std::ifstream file(i, std::ios::binary | std::ios::ate); + if (!file.is_open()) { + MLLM_LOG_ERROR_STREAM << "Cannot open file: " << i << std::endl; + exit(-1); + } + auto size = file.tellg(); + auto data = new uint8_t[size]; + file.seekg(0, std::ios::beg); + file.read(reinterpret_cast(data), size); + file.close(); + auto result_patches = preprocess_images(data, size); + auto flatten_patches = result_patches.first; + auto grid_thw = result_patches.second; + pixel_values.push_back(flatten_patches); + vision_grid_thws.push_back(grid_thw); + } + auto pixel_values_tensor = vector3d2Tensor(pixel_values, "pixel_values"); + if (view_img) { + assert(3 * 2 * 14 * 14 == pixel_values_tensor.dimension()); + pixel_values_tensor.reshape(pixel_values_tensor.head(), 3, 2, 14, 14); + } + return {pixel_values_tensor, vision_grid_thws}; + } +}; + +class Qwen2VLProcessor final : public PreProcessor { + unsigned int argmax(const vector &scores) { + if (scores.empty()) { + throw std::invalid_argument("Input vector is empty"); + } + return std::max_element(scores.begin(), scores.end()) - scores.begin(); + } + // 预定义需要替换的标记 + const string IMAGE_PAD = "<|image_pad|>"; + const string PLACEHOLDER = "<|placeholder|>"; + +public: + Qwen2VLImageProcessor image_processor; + QWenTokenizer *tokenizer; + + explicit Qwen2VLProcessor(const string &vocab_path, const string &merge_path = "", + int min_pixels = 4 * 28 * 28, int max_pixels = 16384 * 28 * 28) : + PreProcessor(224, 224, true, true, true, true, {0.5}, {0.5}) { + Module::initBackend(MLLM_CPU); + tokenizer = new QWenTokenizer(vocab_path, merge_path); + tokenizer->special_tokens = { + "<|endoftext|>", + "<|im_start|>", + "<|im_end|>", + "<|object_ref_start|>", + "<|object_ref_end|>", + "<|box_start|>", + "<|box_end|>", + "<|quad_start|>", + "<|quad_end|>", + "<|vision_start|>", + "<|vision_end|>", + "<|vision_pad|>", + "<|image_pad|>", + "<|video_pad|>", + }; + tokenizer->setSpecialTokenMap({{"<|image_pad|>", 151655}, {"<|video_pad|>", 151656}}); + string system_prompt_start = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n"; + string system_prompt_end = "<|im_end|>\n<|im_start|>assistant\n"; + tokenizer->set_chat_template(system_prompt_start, system_prompt_end); + image_processor.set_pixels(min_pixels, max_pixels); + } + + vector process(const string text_, string img_path, bool flatten_img = true, BackendType type = MLLM_CPU) { + string new_text = text_; + if (!img_path.empty()) { + auto image_inputs = image_processor.process({std::move(img_path)}, flatten_img); + auto pixel_values = image_inputs.first; + auto image_grid_thw = image_inputs.second; + auto merge_length = image_processor.merge_size * image_processor.merge_size; + + int index = 0; // 跟踪当前使用的网格配置索引 + const int PAD_LEN = IMAGE_PAD.length(); + const int HOLDER_LEN = PLACEHOLDER.length(); + size_t pos = 0; + // 第一阶段:替换image_pad为placeholder序列 + while (true) { + // 查找下一个需要替换的位置 + size_t found = new_text.find(IMAGE_PAD, pos); + if (found == string::npos || index >= image_grid_thw.size()) break; + // 计算需要插入的placeholder数量 + int product = 1; + for (int dim : image_grid_thw[index]) { + product *= dim; + } + int replace_num = product / merge_length; + // 构建替换字符串 + string replacement; + replacement.reserve(HOLDER_LEN * replace_num); + for (int i = 0; i < replace_num; ++i) { + replacement += PLACEHOLDER; + } + // 执行替换并更新扫描位置 + new_text.replace(found, PAD_LEN, replacement); + pos = found + replacement.length(); // 跳过已处理部分 + index++; + } + // 第二阶段:将placeholder恢复为image_pad + size_t ph_pos = 0; + while ((ph_pos = new_text.find(PLACEHOLDER, ph_pos))) { + if (ph_pos == string::npos) break; + new_text.replace(ph_pos, HOLDER_LEN, IMAGE_PAD); + ph_pos += PAD_LEN; // 跳过已替换部分 + } + auto input_tensor = tokenizer->tokenize(new_text); + auto image_grid_thw_tensor = vector3d2Tensor({image_grid_thw}, "image_grid_thw"); + return {input_tensor, pixel_values, image_grid_thw_tensor}; + } else { + auto input_tensor = tokenizer->tokenize(new_text); + return {input_tensor}; + } + } + + vector process(const std::string &text_, const std::vector &images, const std::vector &image_length, bool flatten_img = true, BackendType type = MLLM_CPU) { + string new_text = text_; + if (!images.empty()) { + auto image_inputs = image_processor.process(images, image_length, flatten_img); + auto pixel_values = image_inputs.first; + auto image_grid_thw = image_inputs.second; + auto merge_length = image_processor.merge_size * image_processor.merge_size; + + int index = 0; // 跟踪当前使用的网格配置索引 + const int PAD_LEN = IMAGE_PAD.length(); + const int HOLDER_LEN = PLACEHOLDER.length(); + size_t pos = 0; + // 第一阶段:替换image_pad为placeholder序列 + while (true) { + // 查找下一个需要替换的位置 + size_t found = new_text.find(IMAGE_PAD, pos); + if (found == string::npos || index >= image_grid_thw.size()) break; + // 计算需要插入的placeholder数量 + int product = 1; + for (int dim : image_grid_thw[index]) { + product *= dim; + } + int replace_num = product / merge_length; + // 构建替换字符串 + string replacement; + replacement.reserve(HOLDER_LEN * replace_num); + for (int i = 0; i < replace_num; ++i) { + replacement += PLACEHOLDER; + } + // 执行替换并更新扫描位置 + new_text.replace(found, PAD_LEN, replacement); + pos = found + replacement.length(); // 跳过已处理部分 + index++; + } + // 第二阶段:将placeholder恢复为image_pad + size_t ph_pos = 0; + while ((ph_pos = new_text.find(PLACEHOLDER, ph_pos))) { + if (ph_pos == string::npos) break; + new_text.replace(ph_pos, HOLDER_LEN, IMAGE_PAD); + ph_pos += PAD_LEN; // 跳过已替换部分 + } + auto input_tensor = tokenizer->tokenize(new_text); + auto image_grid_thw_tensor = vector3d2Tensor({image_grid_thw}, "image_grid_thw"); + return {input_tensor, pixel_values, image_grid_thw_tensor}; + } else { + auto input_tensor = tokenizer->tokenize(new_text); + return {input_tensor}; + } + } + + void Process(const std::string &text) override {}; + void PreProcessImages(const std::vector &images, const std::vector &image_length) override {}; + void PreProcessImages(const std::vector &images_path) override {}; + + std::string detokenize(const vector &tokens) { + return tokenizer->detokenize(tokens); + } + + std::pair detokenize(Tensor &result, int seq = 0) { + assert(result.batch() == 1 && "Batch size of result is not 1. Which is not supported for now."); + assert(result.head() == 1 && "The 3rd dim of result should be one. e.g.:[1, 1, seq, hidden]"); + vector scores; + int _dims = result.dimension(); + int _seq = seq == 0 ? result.sequence() - 1 : seq - 1; + for (int i = 0; i < _dims; ++i) { + auto value = result.dataAt(0, 0, _seq, i); + scores.push_back(value); + } + auto token_idx = this->argmax(scores); + auto text = tokenizer->detokenize({token_idx}); + text = std::regex_replace(text, std::regex("▁"), " "); + return make_pair(text, token_idx); + } +}; +#endif // PROCESSING_Qwen2VL_HPP diff --git a/mllm/models/qwen2_vl/vtp/ui_tools.hpp b/mllm/models/qwen2_vl/vtp/ui_tools.hpp new file mode 100644 index 000000000..d9e367aa4 --- /dev/null +++ b/mllm/models/qwen2_vl/vtp/ui_tools.hpp @@ -0,0 +1,88 @@ + +#ifndef UI_TOOLS_HPP +#define UI_TOOLS_HPP + +// 全局区域掩码 +#include +#include +#include +#include +#include // 需要包含 用于 std::sort 和 std::unique +#include // 需要包含 用于 uint32_t +bool use_pre_vit_merge = false; + +std::vector UIRegionMask; +// 输入类型为二维向量 [batch][patch_size] +std::vector process_region_mask(const std::vector> ®ion_masks) { + // 1. 验证批次大小是否为1 + const int batch_size = region_masks.size(); + if (batch_size != 1) { + throw std::runtime_error("Batch size must be 1"); + } + + // 存储每个唯一标签选中的索引 + std::vector selected_indices; + + // 随机数引擎 + std::random_device rd; + std::mt19937 rng(rd()); + + // 2. 处理批次 (循环只会执行一次) + for (int batch_idx = 0; batch_idx < batch_size; ++batch_idx) { + const auto ®ion_mask = region_masks[batch_idx]; + const int patch_size = region_mask.size(); + + // 3. 获取当前批次的唯一且已排序的标签 (这是关键的修改点) + //--------------------------------------------------------- + // 旧的、基于 unordered_set 的错误方法: + // std::unordered_set unique_labels; + // for (int i = 0; i < patch_size; ++i) { + // unique_labels.insert(region_mask[i]); + // } + //--------------------------------------------------------- + + // 新的、正确的、模仿 torch.unique() 的方法: + std::vector unique_labels = region_mask; // 复制一份 + std::sort(unique_labels.begin(), unique_labels.end()); // 排序 + // 移除相邻的重复元素,并调整 vector 大小 + unique_labels.erase(std::unique(unique_labels.begin(), unique_labels.end()), unique_labels.end()); + + // 4. 为每个标签随机选择一个索引 + for (uint32_t label : unique_labels) { + // 收集所有等于当前标签的索引位置 + std::vector indices; + for (int i = 0; i < patch_size; ++i) { + if (region_mask[i] == label) { + indices.push_back(i); + } + } + + // 验证是否有区域存在 (理论上不会触发,因为标签来自 region_mask 本身) + if (indices.empty()) { + throw std::runtime_error("No region mask found for a label that should exist."); + } + + // 随机选择一个索引 + std::uniform_int_distribution dist(0, indices.size() - 1); + int selected_idx = indices[dist(rng)]; + selected_indices.push_back(selected_idx); + } + } + + // 5. 扩展索引 (这部分逻辑你的实现是完全正确的) + // 对应 PyTorch 的: selected_indices.unsqueeze(1) * 4 + torch.arange(4) + // 和 .flatten() + std::vector final_indices; + // 预分配内存以提高效率 + final_indices.reserve(selected_indices.size() * 4); + + for (int idx : selected_indices) { + for (int ch = 0; ch < 4; ++ch) { + final_indices.push_back(idx * 4 + ch); + } + } + + return final_indices; +} + +#endif \ No newline at end of file diff --git a/src/models/qwen2_vl/vtp/vtp_tools.hpp b/mllm/models/qwen2_vl/vtp/vtp_tools.hpp similarity index 91% rename from src/models/qwen2_vl/vtp/vtp_tools.hpp rename to mllm/models/qwen2_vl/vtp/vtp_tools.hpp index 168a9cdf0..dde6c315b 100644 --- a/src/models/qwen2_vl/vtp/vtp_tools.hpp +++ b/mllm/models/qwen2_vl/vtp/vtp_tools.hpp @@ -23,14 +23,16 @@ using namespace mllm; class VtpContext { public: - void init() { + void init(Tensor input_ids, int num_hidden_layers) { + if (input_ids.sequence() <= 1) { + prefill_stage = false; + } else { + prefill_stage = true; + } if (global_selected.backend() == nullptr) global_selected = Tensor(1, 1, 1, 1, MLLM_CPU); } void set_vision_token(Tensor where_idx, Tensor hidden_states, Tensor image_embeds) { - // if (Module::llm_model_ptr->doLoad) { - // return; - // } no_visual_token_len = hidden_states.sequence() - image_embeds.sequence(); global_selected.reshape(1, 1, 1, hidden_states.sequence()); // pre_visual_token_len); global_selected.alloc(); @@ -46,9 +48,6 @@ class VtpContext { no_visual_token_len = hidden_states.sequence() - pre_visual_token_len; } bool is_prefill() { - // if (Module::llm_model_ptr->doLoad) { - // return false; - // } return prefill_stage; } void set_prefill_layer(int layer_idx_) { @@ -142,7 +141,7 @@ class VtpContext { } } } - Tensor prunning_attn_output(Tensor attn_output) { + Tensor prunning_attn_output(Tensor attn_output, int layer_idx) { if (layer_idx == 0) { return attn_output; } @@ -198,7 +197,15 @@ class VtpContext { int HEAD_TOP_K = 3; float ATTN_ACC_ALPHA = 0.2; - map pruning_setting = {{3, 0.5}}; //{{3, 0.5}}; + map pruning_setting = {{3, 0.5}, {8, 0.8}}; + // map pruning_setting = {{3, 0.2}, {9, 0.2}, {12, 0.4}, {18, 0.4}, {21, 0.8}, {26, 0.8}}; + // map pruning_setting = {{3, 0.2}, {9, 0.2}, {12, 0.2}, {18, 0.5}, {21, 0.5}, {26, 0.5}}; + // map pruning_setting = {{3, 0.8}, {9, 0.8}, {12, 0.8}, {18, 0.8}, {21, 0.8}, {26, 0.8}}; + // map pruning_setting = {{3, 0.5}}; + // map pruning_setting = {{3, 0.8}}; + + // 3, 9, 12, 18, 21, 26 + // 0.2, 0.2, 0.4, 0.4, 0.8, 0.8 private: // 实现 topk 功能 diff --git a/src/models/qwen3/configuration_qwen3.hpp b/mllm/models/qwen3/configuration_qwen3.hpp similarity index 82% rename from src/models/qwen3/configuration_qwen3.hpp rename to mllm/models/qwen3/configuration_qwen3.hpp index 138c758c6..64e227edc 100644 --- a/src/models/qwen3/configuration_qwen3.hpp +++ b/mllm/models/qwen3/configuration_qwen3.hpp @@ -67,6 +67,8 @@ class QWen3NameConfig : public TransformerNameConfig { throw std::runtime_error("Unsupported gemma RoPE type"); } } + _q_norm_name = "q_norm"; + _k_norm_name = "k_norm"; } std::string blk_name; @@ -103,8 +105,27 @@ struct QWen3Config : public TransformerConfig { rope_theta = 1000000.0; vocab_size = 151936; tie_embedding_words = true; - } - else if(billionsType == "4b"){ + } else if (billionsType == "0.6b-lm") { + attention_bias = false; + attention_dropout = 0.0; + bos_token_id = 151643; + eos_token_id = 151645; + head_dim = 128; + hidden_act = "silu"; + hidden_size = 1024; + initializer_range = 0.02; + intermediate_size = 3072; + max_position_embeddings = 40960; + max_window_layers = 28; + model_type = "qwen3"; + num_attention_heads = 16; + num_hidden_layers = 28; + num_key_value_heads = 8; + rms_norm_eps = 1e-6; + rope_theta = 1000000.0; + vocab_size = 151936; + tie_embedding_words = false; + } else if (billionsType == "4b") { attention_bias = false; attention_dropout = 0.0; bos_token_id = 151643; @@ -124,15 +145,14 @@ struct QWen3Config : public TransformerConfig { rope_theta = 1000000.0; vocab_size = 151936; tie_embedding_words = true; - } - else { + } else { throw std::runtime_error("Unsupported model size"); } RoPE_type = type; }; - //这下面是赋初始默认值,上面是构造函数,构造函数中的值会覆盖掉初始默认值 - + // 这下面是赋初始默认值,上面是构造函数,构造函数中的值会覆盖掉初始默认值 + bool attention_bias = false; float attention_dropout = 0.0; int bos_token_id = 151643; @@ -151,12 +171,11 @@ struct QWen3Config : public TransformerConfig { double rms_norm_eps = 1e-6; float rope_theta = 1000000.0; int vocab_size = 151936; - bool tie_embedding_words = true; - + bool tie_embedding_words = true; int cache_limit; RoPEType RoPE_type = RoPEType::HFHUBROPE; QWen3NameConfig names_config; }; -#endif +#endif diff --git a/src/models/qwen3/modeling_qwen3.hpp b/mllm/models/qwen3/modeling_qwen3.hpp similarity index 55% rename from src/models/qwen3/modeling_qwen3.hpp rename to mllm/models/qwen3/modeling_qwen3.hpp index 83e304518..1ed6a27a1 100644 --- a/src/models/qwen3/modeling_qwen3.hpp +++ b/mllm/models/qwen3/modeling_qwen3.hpp @@ -16,8 +16,10 @@ #include "Layer.hpp" #include "Module.hpp" #include "Tensor.hpp" +#include "Types.hpp" #include "configuration_qwen3.hpp" #include +#include "models/transformer/modeling_transformer.hpp" using namespace mllm; class QWen3MLP final : public Module { @@ -50,106 +52,20 @@ class QWen3MLP final : public Module { Layer silu; }; -class QWen3Attention final : public Module { -public: - QWen3Attention() = default; - QWen3Attention(const QWen3Config &config, const QWen3NameConfig &names, const string &base_name) { - hidden_size = config.hidden_size; - num_heads = config.num_attention_heads; - head_dim = config.head_dim; // 这里config中有head_dim,不等于相除的结果 - num_key_value_heads = config.num_key_value_heads; - num_key_value_groups = num_heads / num_key_value_heads; - rms_norm_eps = config.rms_norm_eps; - // init layers - q_proj = Linear(hidden_size, num_heads * head_dim, config.attention_bias, base_name + names._q_proj_name); - k_proj = Linear(hidden_size, num_key_value_heads * head_dim, config.attention_bias, - base_name + names._k_proj_name); - v_proj = Linear(hidden_size, num_key_value_heads * head_dim, config.attention_bias, - base_name + names._v_proj_name); - o_proj = Linear(num_heads * head_dim, hidden_size, false, base_name + names._o_proj_name); - - // 增加了RMSNorm - q_norm = RMSNorm(head_dim, rms_norm_eps, base_name + "q_norm"); - k_norm = RMSNorm(head_dim, rms_norm_eps, base_name + "k_norm"); - // 滑动窗口禁用 - - q_rope = RoPE(config.RoPE_type, config.rope_theta, config.max_position_embeddings, - base_name + "q_rope"); - k_rope = RoPE(config.RoPE_type, config.rope_theta, config.max_position_embeddings, - base_name + "k_rope"); - k_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "k_cache"); - v_cache = KVCache(num_key_value_heads, head_dim, num_key_value_groups, config.cache_limit, base_name + "v_cache"); - softmax = Softmax(DIMENSION, true, base_name + "softmax"); - } - - std::vector Forward(std::vector inputs, std::vector args) override { - auto query_states = q_proj(inputs[0]); - auto key_states = k_proj(inputs[1]); - auto value_states = v_proj(inputs[2]); - - // [batch, heads, sequence, dims] - query_states = query_states.view(-1, num_heads, -1, head_dim); - key_states = key_states.view(-1, num_key_value_heads, -1, head_dim); - value_states = value_states.view(-1, num_key_value_heads, -1, head_dim); - - // 加正则化 - query_states = q_norm(query_states); - key_states = k_norm(key_states); - - // embedding - query_states = q_rope(query_states); - key_states = k_rope(key_states); - - // kv cache - key_states = k_cache(key_states); - value_states = v_cache(value_states); - - // attention weight - auto atten_weight = - Tensor::mm(query_states, key_states.transpose(Chl::SEQUENCE, Chl::DIMENSION)) - / std::sqrt(head_dim); - atten_weight = softmax(atten_weight, k_cache.getCacheSeqLen()); - - // attention output - auto atten_output = Tensor::mm(atten_weight, value_states); - atten_output = atten_output.view(-1, 1, -1, head_dim * num_heads); - atten_output = o_proj(atten_output); - return {atten_output}; - } - - vector get_cache() { - return {&k_cache, &v_cache}; - } - vector get_rope() { - return {&q_rope, &k_rope}; - } - -private: - int hidden_size; - int num_heads; - int head_dim; - int num_key_value_heads; - int num_key_value_groups; - double rms_norm_eps; - Layer q_proj; - Layer k_proj; - Layer v_proj; - Layer o_proj; - Layer q_norm; - Layer k_norm; - RoPE q_rope; - RoPE k_rope; - KVCache k_cache; - KVCache v_cache; - // Causalmask mask; - Softmax softmax; -}; - class QWen3Decoder final : public Module { public: QWen3Decoder() = default; QWen3Decoder(const QWen3Config &config, const QWen3NameConfig &names, const string &base_name) { - self_atten = QWen3Attention(config, names, base_name + names._attn_base_name); + // 这里config中有head_dim,不等于相除的结果 + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, config.head_dim, + SPLIT_NONE, PostQkv_RMSNorm, false, + config.RoPE_type, config.rope_theta, + config.max_position_embeddings, + config.cache_limit, + true, config.attention_bias, false, + config.attn_implementation, names, + base_name + names._attn_base_name); mlp = QWen3MLP(config.hidden_size, config.intermediate_size, names, base_name + names._ffn_base_name); input_layernorm = @@ -168,12 +84,12 @@ class QWen3Decoder final : public Module { return {x}; } - QWen3Attention &get_attention() { + MultiHeadAttention &get_attention() { return self_atten; } private: - QWen3Attention self_atten; + MultiHeadAttention self_atten; QWen3MLP mlp; Layer input_layernorm; Layer post_attention_layernorm; diff --git a/mllm/models/smallthinker/configuration_smallthinker.hpp b/mllm/models/smallthinker/configuration_smallthinker.hpp new file mode 100644 index 000000000..4493202aa --- /dev/null +++ b/mllm/models/smallthinker/configuration_smallthinker.hpp @@ -0,0 +1,71 @@ +#ifndef CONFIG_SMOLTHINKER_HPP +#define CONFIG_SMOLTHINKER_HPP +#include "models/transformer/configuration_transformer.hpp" + +using namespace mllm; + +class SmallThinkerNameConfig : public TransformerNameConfig { +public: + std::string blk_name; + std::string token_embd_name; + std::string post_norm_name; + std::string lm_head_name; + std::string _gate_proj_name; + + void init() { + blk_name = "model.layers."; + _attn_base_name = "self_attn."; + _ffn_base_name = "block_sparse_moe."; + _q_proj_name = "q_proj"; + _k_proj_name = "k_proj"; + _v_proj_name = "v_proj"; + _o_proj_name = "o_proj"; + _gate_proj_name = "gate"; + _up_proj_name = "up"; + _down_proj_name = "down"; + _attn_norm_name = "input_layernorm"; + _ffn_norm_name = "post_attention_layernorm"; + token_embd_name = "model.embed_tokens"; + post_norm_name = "model.norm"; + lm_head_name = "lm_head"; + } +}; + +struct SmallThinkerConfig : public TransformerConfig { + explicit SmallThinkerConfig(int token_limit, string billions = "4BA0.6B") : + cache_limit(token_limit) { + names_config.init(); + string billionsType; + std::transform(billions.begin(), billions.end(), std::back_inserter(billionsType), + ::tolower); + if (billionsType == "4ba0.6b") { + } + if (billionsType == "4ba0.6b-lm") { + tie_embedding_words = false; + } else { + throw std::runtime_error("Unsupported model size"); + } + } + + int num_experts = 32; + int num_experts_per_tok = 4; + + // std::string hidden_act = "relu"; + int hidden_size = 1536; + int intermediate_size = 768; + int max_position_embeddings = 32768; + int num_hidden_layers = 32; + int num_attention_heads = 12; + int num_key_value_heads = 2; + double rms_norm_eps = 1e-06; + float rope_theta = 1.5e6; + int vocab_size = 151936; + int head_dim = 128; // hidden_size/num_attention_heads + + int cache_limit; + RoPEType RoPE_type = RoPEType::HFHUBROPE; + SmallThinkerNameConfig names_config; + bool tie_embedding_words = true; // false; +}; + +#endif // CONFIG_SMOLTHINKER_HPP diff --git a/mllm/models/smallthinker/mbp/modeling_smallthinker_mbp.hpp b/mllm/models/smallthinker/mbp/modeling_smallthinker_mbp.hpp new file mode 100644 index 000000000..581188246 --- /dev/null +++ b/mllm/models/smallthinker/mbp/modeling_smallthinker_mbp.hpp @@ -0,0 +1,433 @@ +#ifndef MODELING_SMOLTHINKER_HPP +#define MODELING_SMOLTHINKER_HPP + +#include "Layer.hpp" +#include "Module.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include "../configuration_smallthinker.hpp" +#include "settings_smallthinker_mbp.hpp" +#include "models/transformer/modeling_transformer.hpp" +#include +#include +#include +#include +// #include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(__ARM_NEON) && !defined(__APPLE__) +#include +#include +#include +#include +#endif + +#define MBP_THREAD + +using namespace mllm; + +class SmallThinkerMLP final : public Module { +public: + SmallThinkerMLP() = default; + SmallThinkerMLP(int hidden_size, int intermediate_size, const SmallThinkerNameConfig &names, const std::string &base_name) { + gate_proj = Linear(hidden_size, intermediate_size, false, base_name + names._gate_proj_name); + relu = ReLU(base_name + "relu"); + up_proj = Linear(hidden_size, intermediate_size, false, base_name + names._up_proj_name); + down_proj = Linear(intermediate_size, hidden_size, false, base_name + names._down_proj_name); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto x = gate_proj(inputs[0]); + x = relu(x); + auto y = up_proj(inputs[0]); + x = x * y; + x = down_proj(x); + return {x}; + } + + void load() { + gate_proj.load(); + up_proj.load(); + down_proj.load(); + } + bool loaded() { + return gate_proj.loaded() && up_proj.loaded() && down_proj.loaded(); + } + void free() { + gate_proj.free(); + up_proj.free(); + down_proj.free(); + } + +private: + Layer gate_proj; + Layer up_proj; + Layer down_proj; + Layer relu; +}; + +class SmallThinkerMoeBlock final : public Module { +public: + SmallThinkerMoeBlock() = default; + SmallThinkerMoeBlock(const SmallThinkerConfig &config, const SmallThinkerNameConfig &names, const string &base_name) { + experts = List(config.num_experts, config.hidden_size, config.intermediate_size, names, base_name + "experts."); + // primary_router = Linear(config.hidden_size, config.num_experts, false, base_name + "primary_router"); + sigmoid = Sigmoid(base_name + "sigmoid"); + num_experts_per_tok = config.num_experts_per_tok; + num_hidden_layers = config.num_hidden_layers; + } + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = inputs[0]; + int layer_idx = std::any_cast(args[0]); + if (hidden_states.batch() > 1) hidden_states = hidden_states.view(1, -1, ANYDIM, -1); // 1, batch*seq, 1, hidden + auto router_logits = inputs[1]; + auto expert_indices = inputs[2]; + auto expert_weights = sigmoid(router_logits); + expert_weights = expert_weights / expert_weights.sum(DIMENSION); // 1, batch*seq, 1, k + expert_weights = expert_weights.view(-1, -1, 1, 1); // 1, k* batch*seq, 1, 1 + // moe_infer + auto idxs = expert_indices.argsort(); // 1, 1, 1, k* batch*seq + auto tokens_per_expert = expert_indices.bincount(); // (1, 1, 1, 0) 1, 1, 1, k + auto token_idxs = idxs / num_experts_per_tok; // 1, 1, 1, k* batch*seq + int start_idx = 0; + int end_idx = start_idx; + auto expert_cache = Tensor::zero_like(hidden_states); // 1, batch*seq, 1, hidden + for (int i = 0; i < experts.size(); ++i) { + if (Module::llm_model_ptr->doTrace || (tokens_per_expert.dimension() != 0 && i >= tokens_per_expert.dimension())) { + break; + } + int this_token_num = tokens_per_expert.dimension() == 0 ? + 0 : + tokens_per_expert.d(0, 0, 0, i); + if (tokens_per_expert.dimension() != 0 && this_token_num == 0) + continue; + end_idx = start_idx + this_token_num; + // + auto exp_token_idx = token_idxs.clip({}, {}, {}, {start_idx, end_idx}); //(1, 1, 1, 0) 1, 1, 1, e-s + auto exp_idx = idxs.clip({}, {}, {}, {start_idx, end_idx}); //(1, 1, 1, 0) 1, 1, 1, e-s + + // step.1 - 裁剪数据 + double time_start_ = (mllm_time_us() - start_time) / 1000.0F; // ms + auto expert_tokens = hidden_states.clip(exp_token_idx, SEQUENCE); //(1, 0, 1, hidden) 1, e-s, 1, hidden + auto expert_weights_clip = expert_weights.clip(exp_idx, SEQUENCE); //(1, 0, 1, 1) 1, e-s, 1, 1 + + string expert_name_ = std::to_string(layer_idx) + "_" + std::to_string(i); + double time_end_ = (mllm_time_us() - start_time) / 1000.0F; // ms + expert_clip_times[expert_name_] = {time_start_, time_end_}; + +#ifdef MBP_THREAD + // step.2 - 等待加载完成 + double time_start_w = (mllm_time_us() - start_time) / 1000.0F; // ms + if (!experts[i].loaded()) { + unique_lock lock(*mtxs[layer_idx][i]); // 局部锁 + cvs[layer_idx][i]->wait(lock, [&] { + return dones[layer_idx][i].load(memory_order_acquire); + }); + assert(dones[layer_idx][i]); + } + double time_end_w = (mllm_time_us() - start_time) / 1000.0F; // ms + expert_wait_times[expert_name_] = {time_start_w, time_end_w}; +#endif + auto time_start__ = (mllm_time_us()); // ms + double time_start = (time_start__ - start_time) / 1000.0F; // ms + + // step.3 - 专家计算 + auto expert_out = experts[i]({expert_tokens})[0]; //(1, 0, 1, hidden) 1, e-s, 1, + expert_out = expert_out * expert_weights_clip; //(1, 0, 1, hidden) 1, e-s, 1, hidden + expert_cache.scatter_add(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden + + // step.4 - 释放专家内存 + experts[i].free(); + + string expert_name = std::to_string(layer_idx) + "_" + std::to_string(i); + auto time_end__ = (mllm_time_us()); // ms + double time_end = (time_end__ - start_time) / 1000.0F; // ms + expert_cal_times[expert_name] = {time_start, time_end}; + +#ifdef MBP_THREAD + dones[layer_idx][i] = false; // 重置状态 +#endif + + start_idx = end_idx; + } + + if (hidden_states.batch() > 1) { + // expert_cache.view(ANYDIM, seq, -1, -1);//TODO + } + return {expert_cache}; + } + + void load_experts(int expert_idx) { + experts[expert_idx].load(); + } + +private: + std::vector experts; + // Layer primary_router; + Layer sigmoid; + int num_experts_per_tok{}; + int num_hidden_layers{}; +}; + +class SmallThinkerDecoder final : public Module { +public: + SmallThinkerDecoder() = default; + SmallThinkerDecoder(const SmallThinkerConfig &config, const SmallThinkerNameConfig &names, const string &base_name) { + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_NONE, PostQkv_NONE, false, + config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, + true, false, false, + config.attn_implementation, names, base_name + names._attn_base_name); + block_sparse_moe = SmallThinkerMoeBlock(config, names, base_name + names._ffn_base_name); + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); + num_hidden_layers = config.num_hidden_layers; + primary_router = Linear(config.hidden_size, config.num_experts, false, base_name + names._ffn_base_name + "primary_router"); + num_experts_per_tok = config.num_experts_per_tok; + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto router_input = inputs[0]; + int layer_idx = std::any_cast(args[0]); + if (router_input.batch() > 1) router_input = router_input.view(1, -1, 1, -1); // 1, batch*seq, 1, hidden + auto router_logits = primary_router(router_input); // 1, batch*seq, 1, num_experts + auto experts_w_i = Tensor::topk(router_logits, num_experts_per_tok, DIMENSION); + router_logits = experts_w_i[0]; // 1, batch*seq, 1, k + auto expert_indices = experts_w_i[1]; // 1, batch*seq, 1, k + expert_indices = expert_indices.view(-1, 1, 1, -1); // 1, 1, 1, k* batch*seq + if (expert_indices.dimension()) { + auto start_ptr = expert_indices.ptrAt(0, 0, 0, 0); + auto ptr_len = expert_indices.dimension(); + std::vector unique_experts; + std::set seen_experts; + for (int i = 0; i < ptr_len; ++i) { + float expert_id = start_ptr[i]; + if (seen_experts.find(expert_id) == seen_experts.end()) { + seen_experts.insert(expert_id); + unique_experts.push_back((int)expert_id); + } + } + std::sort(unique_experts.begin(), unique_experts.end()); + for (int e_i = 0; e_i < unique_experts.size(); ++e_i) { + auto expert_id = unique_experts[e_i]; + // 向加载队列申请加载 layer_idx的 expert_id专家 +#ifdef MBP_THREAD + LoadRequest req{layer_idx, expert_id}; + { + lock_guard lk(queue_mutex); + load_requests.push(req); + } + queue_cv.notify_one(); // 通知加载线程 +#endif + // std::cout << "layer " << layer_idx << " Request loading expert id: " << expert_id << std::endl; + } + } + auto hidden_states = input_layernorm(router_input); + hidden_states = self_atten({hidden_states, hidden_states, hidden_states})[0]; + auto residual = hidden_states + inputs[0]; + hidden_states = post_attention_layernorm(residual); + hidden_states = block_sparse_moe({hidden_states, router_logits, expert_indices}, layer_idx)[0]; + hidden_states = hidden_states + residual; + return {hidden_states}; + } + + MultiHeadAttention &get_attention() { + return self_atten; + } + + void load_experts(int expert_idx) { + block_sparse_moe.load_experts(expert_idx); + } + +private: + MultiHeadAttention self_atten; + SmallThinkerMoeBlock block_sparse_moe; + Layer input_layernorm; + Layer post_attention_layernorm; + Layer primary_router; + int num_hidden_layers; + int num_experts_per_tok{}; +}; + +class SmallThinkerModel final : public Module { +public: + SmallThinkerModel() = default; + SmallThinkerModel(const SmallThinkerConfig &config, const SmallThinkerNameConfig &names, const string &base_name) { + blocks = List(config.num_hidden_layers, config, names, base_name); + norm = RMSNorm(config.hidden_size, config.rms_norm_eps, names.post_norm_name); + } + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = inputs[0]; + int layer_idx = 0; + for (auto &block : blocks) { + hidden_states = block({hidden_states}, layer_idx)[0]; + layer_idx++; + } + hidden_states = norm(hidden_states); + return {hidden_states}; + } + + void clear_kvcache() override { + for (auto &block : blocks) { + auto kvcache = block.get_attention().get_cache(); + for (auto &cache : kvcache) { cache->clearCache(); } + auto ropes = block.get_attention().get_rope(); + for (auto &rope : ropes) { rope->clearCache(); } + } + } + + void load_experts(int layer_idx, int expert_idx) { + blocks[layer_idx].load_experts(expert_idx); + } + +private: + std::vector blocks; + Layer norm; +}; + +class SmallThinkerForCausalLM final : public Module { +public: + CHAINABLE_MODULE_METHODS(SmallThinkerForCausalLM) + SmallThinkerForCausalLM(SmallThinkerConfig &config) { + auto names = config.names_config; + hidden_size = config.hidden_size; + tie_embedding_words = config.tie_embedding_words; + embedding = Embedding(config.vocab_size, config.hidden_size, names.token_embd_name); + model = SmallThinkerModel(config, names, names.blk_name); + if (tie_embedding_words) { + lm_head = Parameter(1, config.vocab_size, 1, config.hidden_size, names.token_embd_name + ".weight"); + } else { + lm_head_layer = Linear(config.hidden_size, config.vocab_size, false, names.lm_head_name); + } + + // 初始化 mbp 相关变量 + // mbp_init(config.num_hidden_layers, config.num_experts); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + std::vector outputs; + clearMBPtimes(); +#ifdef MBP_THREAD + start_time = mllm_time_us(); + mbp_finish.store(false, std::memory_order_relaxed); + if (inputs[0].dimension() == 1) { + omp_set_max_active_levels(2); // Enable OpenMP nesting +#pragma omp parallel num_threads(2) + if (omp_get_thread_num() == 0) { // 根据线程ID决定执行哪个函数 +#if defined(__ARM_NEON) && !defined(__APPLE__) + { + struct sched_param param; + param.sched_priority = 20; // 范围 1–99,根据设备可酌情调整 + pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); + } + // ─── 2. 绑定到大核(big cluster)以减少与小核的资源争用 ────────────── + { + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + // 假设大核是 CPU 2–3,按实际设备改为合适的核号 + CPU_SET(2, &cpuset); + CPU_SET(3, &cpuset); + // CPU_SET(6, &cpuset); // 假设小核心是CPU 6 + sched_setaffinity(pthread_self(), sizeof(cpuset), &cpuset); + // sched_setaffinity(gettid(), sizeof(cpu_set_t), &cpuset); + } +#endif + mbp_load(); + } else { + outputs = do_Forward(inputs, args); + } + } else { +#endif + outputs = do_Forward(inputs, args); +#ifdef MBP_THREAD + } +#endif + return outputs; + } + void clear_kvcache() override { + model.clear_kvcache(); + } + + std::vector do_Forward(std::vector inputs, std::vector args) { + auto x = embedding(inputs[0]); + auto outputs = model({x})[0]; + if (outputs.sequence() > 1) { + outputs = outputs.clip({}, {}, {-1}, {}); + } + if (tie_embedding_words) { + outputs = Tensor::mm(outputs, lm_head().transpose(Chl::SEQUENCE, Chl::DIMENSION)); + } else { + outputs = lm_head_layer(outputs); + } + +#ifdef MBP_THREAD + // 设置 mbp_finish 为 true,结束 mbp_load 线程 + // 1. 设置内存序保证可见性 + mbp_finish.store(true, std::memory_order_release); // 改为 release 内存序 + // 2. 主动唤醒所有等待线程 + { + std::lock_guard lk(queue_mutex); + queue_cv.notify_all(); // 必须加锁后通知 + } + // 3. 添加二次状态检查(可选) + std::atomic_thread_fence(std::memory_order_seq_cst); +#endif + return {outputs}; + } + + void load_experts(int layer_idx, int expert_idx) { + model.load_experts(layer_idx, expert_idx); + } + + void mbp_load() { + while (!mbp_finish.load(std::memory_order_acquire)) { + std::unique_lock lk(queue_mutex); + queue_cv.wait(lk, [this] { + return !load_requests.empty() || mbp_finish.load(std::memory_order_acquire); + }); + + if (mbp_finish.load(std::memory_order_acquire)) { + break; + } + + while (!load_requests.empty()) { + auto req = load_requests.front(); + load_requests.pop(); + lk.unlock(); // 释放锁以便其他线程入队 + { // 执行加载 + std::unique_lock expert_lk(*mtxs[req.layer][req.expert]); + if (!dones[req.layer][req.expert].load(std::memory_order_acquire)) { + double time_start = (mllm_time_us() - start_time) / 1000.0F; // ms + + load_experts(req.layer, req.expert); + dones[req.layer][req.expert].store(true, std::memory_order_release); + + string expert_name = std::to_string(req.layer) + "_" + std::to_string(req.expert); + double time_end = (mllm_time_us() - start_time) / 1000.0F; // ms + load_times[expert_name] = {time_start, time_end}; + } + } + cvs[req.layer][req.expert]->notify_all(); + lk.lock(); // 重新获取锁处理下一个请求 + } + } + } + +private: + int hidden_size; + bool tie_embedding_words; + Layer embedding; + Parameter lm_head; + Layer lm_head_layer; + SmallThinkerModel model; +}; + +#endif // MODELING_SMOLTHINKER_HPP \ No newline at end of file diff --git a/mllm/models/smallthinker/mbp/settings_smallthinker_mbp.hpp b/mllm/models/smallthinker/mbp/settings_smallthinker_mbp.hpp new file mode 100644 index 000000000..cb6f38018 --- /dev/null +++ b/mllm/models/smallthinker/mbp/settings_smallthinker_mbp.hpp @@ -0,0 +1,145 @@ +#pragma once +// #include +// #include +#include +// #include +// #include +#include +#include +#include +#include +#include +#include +#include +#include +#include "Tensor.hpp" + +using namespace std; +using namespace mllm; + +int mbp_load_layer_idx; +int mbp_load_expert_idx; + +struct LoadRequest { + int layer; + int expert; +}; +queue load_requests; // 替换原do_mbp_load相关变量 +mutex queue_mutex; // 队列互斥锁 +condition_variable queue_cv; // 队列条件变量 + +// ========= Clip Thread Globals (NEW) ========= +// 1. 新增 ClipRequest 结构体 +struct ClipRequest { + int layer; + int expert; + Tensor hidden_states; + Tensor exp_token_idx; + Tensor topk_weight; + Tensor exp_idx; +}; +// 2. 新增 clip 线程的任务队列、锁和条件变量 +queue clip_requests; +mutex clip_queue_mutex; +condition_variable clip_queue_cv; +// 3. 新增用于存储 clip 结果的 map 和其互斥锁 +map> clipped_data; +mutex clip_results_mutex; +//============ End Clip Thread Globals ============ + +atomic mbp_finish{false}; // 改为原子布尔 + +vector>> mtxs; // 每个层和专家一个互斥锁 +vector>> cvs; // 每个层和专家一个条件变量 +vector>> dones; // 原子布尔保证可见性 + +// --- Clipping Primitives (NEW) --- +// 4. 新增 clip 线程的同步对象 +vector>> clip_mtxs; +vector>> clip_cvs; +vector>> clip_dones; + +// 修改 MAP_MINICPMMOE_MBP_HPP 中的相关部分 + +inline void reset_syntax_mbm(int layer_idx, int expert_idx) { + // 使用原子操作重置状态 + dones[layer_idx][expert_idx].store(false, std::memory_order_release); +} + +inline void mbp_init(int num_layers, int num_experts) { + // 初始化 loading 相关的变量 + mtxs.resize(num_layers); + cvs.resize(num_layers); + dones.resize(num_layers); + for (int i = 0; i < num_layers; ++i) { + mtxs[i].resize(num_experts); + cvs[i].resize(num_experts); + dones[i] = std::vector>(num_experts); + for (int j = 0; j < num_experts; ++j) { + mtxs[i][j] = make_unique(); + cvs[i][j] = make_unique(); + dones[i][j].store(false, std::memory_order_relaxed); + } + } + // 初始化 clipping 相关的变量 + clip_mtxs.resize(num_layers); + clip_cvs.resize(num_layers); + clip_dones.resize(num_layers); + for (int i = 0; i < num_layers; ++i) { + clip_mtxs[i].resize(num_experts); + clip_cvs[i].resize(num_experts); + clip_dones[i] = std::vector>(num_experts); + for (int j = 0; j < num_experts; ++j) { + clip_mtxs[i][j] = make_unique(); + clip_cvs[i][j] = make_unique(); + clip_dones[i][j].store(false, std::memory_order_relaxed); + } + } +} + +map> load_times; +map> expert_cal_times; +map> expert_clip_times; +map> expert_wait_times; +uint64_t start_time; +void clearMBPtimes() { + load_times.clear(); + expert_cal_times.clear(); + expert_clip_times.clear(); + expert_wait_times.clear(); + clipped_data.clear(); + start_time = 0; +} +void prinMBPtimes(string start_word = "") { + double load_times_cal = 0; + cout << "load_times = [" << endl; + for (const auto &entry : load_times) { + if (start_word.empty() || entry.first.substr(0, start_word.length()) == start_word) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + load_times_cal += entry.second.second - entry.second.first; + } + cout << "]" << endl; + cout << "calc_times = [" << endl; + for (const auto &entry : expert_cal_times) { + if (start_word.empty() || entry.first.substr(0, start_word.length()) == start_word) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + } + cout << "]" << endl; + cout << "clip_times = [" << endl; + for (const auto &entry : expert_clip_times) { + if (start_word.empty() || entry.first.substr(0, start_word.length()) == start_word) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + } + cout << "]" << endl; + cout << "wait_times = [" << endl; + for (const auto &entry : expert_wait_times) { + if (start_word.empty() || entry.first.substr(0, start_word.length()) == start_word) { + cout << "(\"" << entry.first << "\" , " << entry.second.first << ", " << entry.second.second << ")," << endl; + } + } + cout << "]" << endl; + std::cout << "load_times_cal = " << load_times_cal << "ms" << endl; +} \ No newline at end of file diff --git a/mllm/models/smallthinker/modeling_smallthinker.hpp b/mllm/models/smallthinker/modeling_smallthinker.hpp new file mode 100644 index 000000000..85e16e814 --- /dev/null +++ b/mllm/models/smallthinker/modeling_smallthinker.hpp @@ -0,0 +1,218 @@ +#ifndef MODELING_SMOLTHINKER_HPP +#define MODELING_SMOLTHINKER_HPP + +#include "Layer.hpp" +#include "Module.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include "configuration_smallthinker.hpp" +#include "models/transformer/modeling_transformer.hpp" +#include +using namespace mllm; + +class SmallThinkerMLP final : public Module { +public: + SmallThinkerMLP() = default; + SmallThinkerMLP(int hidden_size, int intermediate_size, const SmallThinkerNameConfig &names, const std::string &base_name) { + gate_proj = Linear(hidden_size, intermediate_size, false, base_name + names._gate_proj_name); + relu = ReLU(base_name + "relu"); + up_proj = Linear(hidden_size, intermediate_size, false, base_name + names._up_proj_name); + down_proj = Linear(intermediate_size, hidden_size, false, base_name + names._down_proj_name); + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto x = gate_proj(inputs[0]); + x = relu(x); + auto y = up_proj(inputs[0]); + x = x * y; + x = down_proj(x); + return {x}; + } + +private: + Layer gate_proj; + Layer up_proj; + Layer down_proj; + Layer relu; +}; + +class SmallThinkerMoeBlock final : public Module { +public: + SmallThinkerMoeBlock() = default; + SmallThinkerMoeBlock(const SmallThinkerConfig &config, const SmallThinkerNameConfig &names, const string &base_name) { + experts = List(config.num_experts, config.hidden_size, config.intermediate_size, names, base_name + "experts."); + // primary_router = Linear(config.hidden_size, config.num_experts, false, base_name + "primary_router"); + sigmoid = Sigmoid(base_name + "sigmoid"); + num_experts_per_tok = config.num_experts_per_tok; + } + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = inputs[0]; + if (hidden_states.batch() > 1) hidden_states = hidden_states.view(1, -1, ANYDIM, -1); // 1, batch*seq, 1, hidden + auto router_logits = inputs[1]; + auto expert_indices = inputs[2]; + auto expert_weights = sigmoid(router_logits); + expert_weights = expert_weights / expert_weights.sum(DIMENSION); // 1, batch*seq, 1, k + expert_weights = expert_weights.view(-1, -1, 1, 1); // 1, k* batch*seq, 1, 1 + // moe_infer + auto idxs = expert_indices.argsort(); // 1, 1, 1, k* batch*seq + auto tokens_per_expert = expert_indices.bincount(); // (1, 1, 1, 0) 1, 1, 1, k + auto token_idxs = idxs / num_experts_per_tok; // 1, 1, 1, k* batch*seq + int start_idx = 0; + int end_idx = start_idx; + auto expert_cache = Tensor::zero_like(hidden_states); // 1, batch*seq, 1, hidden + for (int i = 0; i < experts.size(); ++i) { + if (tokens_per_expert.dimension() != 0 && i >= tokens_per_expert.dimension()) + break; + int this_token_num = tokens_per_expert.dimension() == 0 ? + 0 : + tokens_per_expert.d(0, 0, 0, i); + if (tokens_per_expert.dimension() != 0 && this_token_num == 0) + continue; + end_idx = start_idx + this_token_num; + // + auto exp_token_idx = token_idxs.clip({}, {}, {}, {start_idx, end_idx}); //(1, 1, 1, 0) 1, 1, 1, e-s + auto exp_idx = idxs.clip({}, {}, {}, {start_idx, end_idx}); //(1, 1, 1, 0) 1, 1, 1, e-s + auto expert_tokens = hidden_states.clip(exp_token_idx, SEQUENCE); //(1, 0, 1, hidden) 1, e-s, 1, hidden + auto expert_out = experts[i]({expert_tokens})[0]; //(1, 0, 1, hidden) 1, e-s, 1, + auto expert_weights_clip = expert_weights.clip(exp_idx, SEQUENCE); //(1, 0, 1, 1) 1, e-s, 1, 1 + expert_out = expert_out * expert_weights_clip; //(1, 0, 1, hidden) 1, e-s, 1, hidden + expert_cache.scatter_add(expert_out, exp_token_idx); // 1, batch*seq, 1, hidden + // + start_idx = end_idx; + } + if (hidden_states.batch() > 1) { + // expert_cache.view(ANYDIM, seq, -1, -1);//TODO + } + return {expert_cache}; + } + +private: + std::vector experts; + // Layer primary_router; + Layer sigmoid; + int num_experts_per_tok{}; +}; + +class SmallThinkerDecoder final : public Module { +public: + SmallThinkerDecoder() = default; + SmallThinkerDecoder(const SmallThinkerConfig &config, const SmallThinkerNameConfig &names, const string &base_name) { + self_atten = MultiHeadAttention(config.hidden_size, config.num_attention_heads, + config.num_key_value_heads, + config.hidden_size / config.num_attention_heads, + SPLIT_NONE, PostQkv_NONE, false, + config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, + true, false, false, + config.attn_implementation, names, base_name + names._attn_base_name); + block_sparse_moe = SmallThinkerMoeBlock(config, names, base_name + names._ffn_base_name); + input_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._attn_norm_name); + post_attention_layernorm = RMSNorm(config.hidden_size, config.rms_norm_eps, base_name + names._ffn_norm_name); + num_hidden_layers = config.num_hidden_layers; + primary_router = Linear(config.hidden_size, config.num_experts, false, base_name + names._ffn_base_name + "primary_router"); + num_experts_per_tok = config.num_experts_per_tok; + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto router_input = inputs[0]; + if (router_input.batch() > 1) router_input = router_input.view(1, -1, 1, -1); // 1, batch*seq, 1, hidden + auto router_logits = primary_router(router_input); // 1, batch*seq, 1, num_experts + auto experts_w_i = Tensor::topk(router_logits, num_experts_per_tok, DIMENSION); + router_logits = experts_w_i[0]; // 1, batch*seq, 1, k + auto expert_indices = experts_w_i[1]; // 1, batch*seq, 1, k + expert_indices = expert_indices.view(-1, 1, 1, -1); // 1, 1, 1, k* batch*seq + auto hidden_states = input_layernorm(router_input); + hidden_states = self_atten({hidden_states, hidden_states, hidden_states})[0]; + auto residual = hidden_states + inputs[0]; + hidden_states = post_attention_layernorm(residual); + hidden_states = block_sparse_moe({hidden_states, router_logits, expert_indices})[0]; + hidden_states = hidden_states + residual; + return {hidden_states}; + } + + MultiHeadAttention &get_attention() { + return self_atten; + } + +private: + MultiHeadAttention self_atten; + SmallThinkerMoeBlock block_sparse_moe; + Layer input_layernorm; + Layer post_attention_layernorm; + Layer primary_router; + int num_hidden_layers; + int num_experts_per_tok{}; +}; + +class SmallThinkerModel final : public Module { +public: + SmallThinkerModel() = default; + SmallThinkerModel(const SmallThinkerConfig &config, const SmallThinkerNameConfig &names, const string &base_name) { + blocks = List(config.num_hidden_layers, config, names, base_name); + norm = RMSNorm(config.hidden_size, config.rms_norm_eps, names.post_norm_name); + } + std::vector Forward(std::vector inputs, std::vector args) override { + auto hidden_states = inputs[0]; + for (auto &block : blocks) { + hidden_states = block({hidden_states})[0]; + } + hidden_states = norm(hidden_states); + return {hidden_states}; + } + + void clear_kvcache() override { + for (auto &block : blocks) { + auto kvcache = block.get_attention().get_cache(); + for (auto &cache : kvcache) { cache->clearCache(); } + auto ropes = block.get_attention().get_rope(); + for (auto &rope : ropes) { rope->clearCache(); } + } + } + +private: + std::vector blocks; + Layer norm; +}; + +class SmallThinkerForCausalLM final : public Module { +public: + CHAINABLE_MODULE_METHODS(SmallThinkerForCausalLM) + SmallThinkerForCausalLM(SmallThinkerConfig &config) { + auto names = config.names_config; + hidden_size = config.hidden_size; + embedding = Embedding(config.vocab_size, config.hidden_size, names.token_embd_name); + model = SmallThinkerModel(config, names, names.blk_name); + tie_embedding_words = config.tie_embedding_words; + if (tie_embedding_words) { + lm_head = Parameter(1, config.vocab_size, 1, config.hidden_size, names.token_embd_name + ".weight"); + } else { + lm_head_layer = Linear(config.hidden_size, config.vocab_size, false, names.lm_head_name); + } + } + + std::vector Forward(std::vector inputs, std::vector args) override { + auto x = embedding(inputs[0]); + auto outputs = model({x})[0]; + if (outputs.sequence() > 1) { + outputs = outputs.clip({}, {}, {-1}, {}); + } + if (tie_embedding_words) { + outputs = Tensor::mm(outputs, lm_head().transpose(Chl::SEQUENCE, Chl::DIMENSION)); + } else { + outputs = lm_head_layer(outputs); + } + return {outputs}; + } + void clear_kvcache() override { + model.clear_kvcache(); + } + +private: + int hidden_size; + bool tie_embedding_words; + Layer embedding; + Parameter lm_head; + Layer lm_head_layer; + SmallThinkerModel model; +}; + +#endif // MODELING_SMOLTHINKER_HPP \ No newline at end of file diff --git a/src/models/smollm/configuration_smollm.hpp b/mllm/models/smollm/configuration_smollm.hpp similarity index 100% rename from src/models/smollm/configuration_smollm.hpp rename to mllm/models/smollm/configuration_smollm.hpp diff --git a/src/models/smollm/modeling_smollm.hpp b/mllm/models/smollm/modeling_smollm.hpp similarity index 88% rename from src/models/smollm/modeling_smollm.hpp rename to mllm/models/smollm/modeling_smollm.hpp index 86d8d6ca8..2526d57a1 100644 --- a/src/models/smollm/modeling_smollm.hpp +++ b/mllm/models/smollm/modeling_smollm.hpp @@ -50,9 +50,8 @@ class SmolLMBlock final : public Module { public: SmolLMBlock() = default; - SmolLMBlock(int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, const SmolLMNameConfig &names, const string &base_name) { - attention = MultiHeadAttention(hidden_dim, head_size, kv_head_size, hidden_dim / head_size, SPLIT_NONE, false, false, - RoPE_type, rope_theta, max_position_embeddings, cache_limit, true, false, names, base_name + names._attn_base_name); + SmolLMBlock(int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, string attn_implementation, const SmolLMNameConfig &names, const string &base_name) { + attention = MultiHeadAttention(hidden_dim, head_size, kv_head_size, hidden_dim / head_size, SPLIT_NONE, PostQkv_NONE, false, RoPE_type, rope_theta, max_position_embeddings, cache_limit, true, false, false, attn_implementation, names, base_name + names._attn_base_name); mlp = SmolLMMLP(hidden_dim, ffn_hidden, names, base_name + names._ffn_base_name); norm1 = RMSNorm(hidden_dim, 1e-6, base_name + names._attn_norm_name); norm2 = RMSNorm(hidden_dim, 1e-6, base_name + names._ffn_norm_name); @@ -81,13 +80,13 @@ class SmolLMModel final : public Module { public: explicit SmolLMModel(const SmolLMConfig &config) : SmolLMModel(config.vocab_size, config.hidden_dim, config.head_size, config.num_key_value_heads, config.ffn_hidden, config.block_num, - config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, + config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, config.attn_implementation, config.names_config, config.names_config.blk_name) { } - SmolLMModel(int vocab_size, int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, + SmolLMModel(int vocab_size, int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, string attn_implementation, const SmolLMNameConfig &names, const string &base_name) { embedding = Embedding(vocab_size, hidden_dim, names.token_embd_name); - blocks = List(block_num, hidden_dim, head_size, kv_head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, names, base_name); + blocks = List(block_num, hidden_dim, head_size, kv_head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, attn_implementation, names, base_name); norm = RMSNorm(hidden_dim, 1e-6, names.post_norm_name); lm_head = Parameter(1, vocab_size, 1, hidden_dim, names.token_embd_name + ".weight"); diff --git a/src/models/smollm/tokenization_smollm.hpp b/mllm/models/smollm/tokenization_smollm.hpp similarity index 100% rename from src/models/smollm/tokenization_smollm.hpp rename to mllm/models/smollm/tokenization_smollm.hpp diff --git a/src/models/stablelm/configuration_stablelm.hpp b/mllm/models/stablelm/configuration_stablelm.hpp similarity index 100% rename from src/models/stablelm/configuration_stablelm.hpp rename to mllm/models/stablelm/configuration_stablelm.hpp diff --git a/src/models/stablelm/modeling_stablelm.hpp b/mllm/models/stablelm/modeling_stablelm.hpp similarity index 69% rename from src/models/stablelm/modeling_stablelm.hpp rename to mllm/models/stablelm/modeling_stablelm.hpp index 8033689e8..a0c652a83 100644 --- a/src/models/stablelm/modeling_stablelm.hpp +++ b/mllm/models/stablelm/modeling_stablelm.hpp @@ -20,39 +20,41 @@ class StableLMMultiHeadAttention final : public Module { Layer o_proj; int head_size_{}; int kv_head_size_{}; - int attn_hidden_dim_{}; + int head_dim_{}; Chl split_chl_{}; + string attn_impl; public: StableLMMultiHeadAttention() = default; - StableLMMultiHeadAttention(int hidden_dim, int head_size, int kv_head_size, int attn_hidden_dim, - RoPEType RoPE_type, int cache_limit, bool do_mask, bool bias, + StableLMMultiHeadAttention(int hidden_dim, int head_size, int kv_head_size, int head_dim, + RoPEType RoPE_type, int cache_limit, bool do_mask, bool bias, string attn_implementation, const TransformerNameConfig &names, const string &base_name) { - attn_hidden_dim_ = attn_hidden_dim; + head_dim_ = head_dim; head_size_ = head_size; kv_head_size_ = kv_head_size; - q_proj = Linear(hidden_dim, head_size * attn_hidden_dim, bias, base_name + names._q_proj_name); - k_proj = Linear(hidden_dim, kv_head_size * attn_hidden_dim, bias, base_name + names._k_proj_name); - v_proj = Linear(hidden_dim, kv_head_size * attn_hidden_dim, bias, base_name + names._v_proj_name); + attn_impl = attn_implementation; + q_proj = Linear(hidden_dim, head_size * head_dim, bias, base_name + names._q_proj_name); + k_proj = Linear(hidden_dim, kv_head_size * head_dim, bias, base_name + names._k_proj_name); + v_proj = Linear(hidden_dim, kv_head_size * head_dim, bias, base_name + names._v_proj_name); if (RoPE_type > 0) { q_rope = RoPE(RoPE_type, 10000, 0.25, 4096, base_name + "q_rope"); k_rope = RoPE(RoPE_type, 10000, 0.25, 4096, base_name + "k_rope"); } if (cache_limit > 0) { - k_cache = KVCache(kv_head_size, attn_hidden_dim, head_size / kv_head_size, cache_limit, base_name + "k_cache"); - v_cache = KVCache(kv_head_size, attn_hidden_dim, head_size / kv_head_size, cache_limit, base_name + "v_cache"); + k_cache = KVCache(kv_head_size, head_dim, head_size / kv_head_size, cache_limit, attn_impl, base_name + "k_cache"); + v_cache = KVCache(kv_head_size, head_dim, head_size / kv_head_size, cache_limit, attn_impl, base_name + "v_cache"); } softmax = Softmax(DIMENSION, do_mask, base_name + "softmax"); - o_proj = Linear(head_size * attn_hidden_dim, hidden_dim, false, base_name + names._o_proj_name); + o_proj = Linear(head_size * head_dim, hidden_dim, false, base_name + names._o_proj_name); } vector Forward(vector inputs, vector args) override { Tensor q, k, v; q = q_proj(inputs[0]); k = k_proj(inputs[1]); v = v_proj(inputs[2]); - q = q.view(-1, head_size_, -1, attn_hidden_dim_); - k = k.view(-1, kv_head_size_, -1, attn_hidden_dim_); - v = v.view(-1, kv_head_size_, -1, attn_hidden_dim_); + q = q.view(-1, head_size_, -1, head_dim_); + k = k.view(-1, kv_head_size_, -1, head_dim_); + v = v.view(-1, kv_head_size_, -1, head_dim_); if (q_rope.ready() && k_rope.ready()) { q = q_rope(q); k = k_rope(k); @@ -61,12 +63,18 @@ class StableLMMultiHeadAttention final : public Module { k = k_cache(k); v = v_cache(v); } - k = k.transpose(SEQUENCE, DIMENSION); - auto qk = Tensor::mm(q, k); - qk = qk / std::sqrt(attn_hidden_dim_); - qk = softmax(qk, k_cache.getCacheSeqLen()); - auto o = Tensor::mm(qk, v); - o = o.view(-1, 1, -1, attn_hidden_dim_ * head_size_); + + Tensor o; + if (attn_impl == "flash_attention_2") { + o = Tensor::flash_attention2_forward(q, k, v, true); + } else { // eager implementation + k = k.transpose(SEQUENCE, DIMENSION); + auto qk = Tensor::mm(q, k); + qk = qk / std::sqrt(head_dim_); + qk = softmax(qk, k_cache.getCacheSeqLen()); + o = Tensor::mm(qk, v); + } + o = o.view(-1, 1, -1, head_dim_ * head_size_); o = o_proj(o); return {o}; } @@ -104,9 +112,10 @@ class StableLMBlock final : public Module { public: StableLMBlock() = default; - StableLMBlock(int hidden_dim, int head_size, int ffn_hidden, RoPEType RoPE_type, int cache_limit, const stablelmNameConfig &names, const string &base_name) { + StableLMBlock(int hidden_dim, int head_size, int ffn_hidden, RoPEType RoPE_type, int cache_limit, string attn_implementation, const stablelmNameConfig &names, const string &base_name) { attention = StableLMMultiHeadAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, - RoPE_type, cache_limit, true, true, names, base_name + names._attn_base_name); + RoPE_type, cache_limit, true, true, attn_implementation, + names, base_name + names._attn_base_name); mlp = StableLMMLP(hidden_dim, ffn_hidden, names, base_name + names._ffn_base_name); norm1 = LayerNorm(hidden_dim, true, 1e-5, base_name + names._attn_norm_name); norm2 = LayerNorm(hidden_dim, true, 1e-5, base_name + names._ffn_norm_name); @@ -130,13 +139,11 @@ class StableLMModel final : public Module { public: explicit StableLMModel(const StableLMConfig &config) : - StableLMModel(config.vocab_size, config.hidden_dim, config.head_size, config.ffn_hidden, config.block_num, config.RoPE_type, config.cache_limit, - config.names_config, config.names_config.blk_name) { + StableLMModel(config.vocab_size, config.hidden_dim, config.head_size, config.ffn_hidden, config.block_num, config.RoPE_type, config.cache_limit, config.attn_implementation, config.names_config, config.names_config.blk_name) { } - StableLMModel(int vocab_size, int hidden_dim, int head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, int cache_limit, - const stablelmNameConfig &names, const string &base_name) { + StableLMModel(int vocab_size, int hidden_dim, int head_size, int ffn_hidden, int block_num, RoPEType RoPE_type, int cache_limit, string attn_implementation, const stablelmNameConfig &names, const string &base_name) { embedding = Embedding(vocab_size, hidden_dim, names.token_embd_name); - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, RoPE_type, cache_limit, names, base_name); + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, RoPE_type, cache_limit, attn_implementation, names, base_name); norm = LayerNorm(hidden_dim, true, 1e-5, names.post_norm_name); lm_head = Linear(hidden_dim, vocab_size, false, names.lm_head_name); } diff --git a/src/models/stablelm/tokenization_stablelm.hpp b/mllm/models/stablelm/tokenization_stablelm.hpp similarity index 100% rename from src/models/stablelm/tokenization_stablelm.hpp rename to mllm/models/stablelm/tokenization_stablelm.hpp diff --git a/src/models/tinyllama/configuration_tinyllama.hpp b/mllm/models/tinyllama/configuration_tinyllama.hpp similarity index 100% rename from src/models/tinyllama/configuration_tinyllama.hpp rename to mllm/models/tinyllama/configuration_tinyllama.hpp diff --git a/src/models/tinyllama/modeling_tinyllama.hpp b/mllm/models/tinyllama/modeling_tinyllama.hpp similarity index 66% rename from src/models/tinyllama/modeling_tinyllama.hpp rename to mllm/models/tinyllama/modeling_tinyllama.hpp index 3d54d6cf4..eb7b7e0da 100644 --- a/src/models/tinyllama/modeling_tinyllama.hpp +++ b/mllm/models/tinyllama/modeling_tinyllama.hpp @@ -20,9 +20,12 @@ class TinyLLaMABlock final : public Module { public: TinyLLaMABlock() = default; - TinyLLaMABlock(int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, const LLaMANameConfig &names, const string &base_name) { - attention = MultiHeadAttention(hidden_dim, head_size, kv_head_size, hidden_dim / head_size, SPLIT_NONE, false, false, - RoPE_type, rope_theta, max_position_embeddings, cache_limit, true, false, names, base_name + names._attn_base_name); + TinyLLaMABlock(int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, string attn_implementation, const LLaMANameConfig &names, const string &base_name) { + attention = MultiHeadAttention(hidden_dim, head_size, kv_head_size, + hidden_dim / head_size, SPLIT_NONE, PostQkv_NONE, false, + RoPE_type, rope_theta, max_position_embeddings, + cache_limit, true, false, false, + attn_implementation, names, base_name + names._attn_base_name); mlp = LLaMAMLP(hidden_dim, ffn_hidden, names, base_name + names._ffn_base_name); norm1 = RMSNorm(hidden_dim, 1e-6, base_name + names._attn_norm_name); norm2 = RMSNorm(hidden_dim, 1e-6, base_name + names._ffn_norm_name); @@ -36,6 +39,9 @@ class TinyLLaMABlock final : public Module { x = x + tmp; return {x}; } + MultiHeadAttention &get_attention() { + return attention; + } }; class TinyLLaMAModel final : public Module { @@ -46,15 +52,15 @@ class TinyLLaMAModel final : public Module { public: explicit TinyLLaMAModel(const TinyLLaMAConfig &config) : - TinyLLaMAModel(config.vocab_size, config.hidden_dim, config.head_size, config.kv_head_size, config.ffn_hidden, config.block_num, - config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, + TinyLLaMAModel(config.vocab_size, config.hidden_dim, config.head_size, config.kv_head_size, config.ffn_hidden, config.block_num, + config.RoPE_type, config.rope_theta, config.max_position_embeddings, config.cache_limit, config.attn_implementation, config.names_config, config.names_config.blk_name) { } - TinyLLaMAModel(int vocab_size, int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, int block_num, - RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, + TinyLLaMAModel(int vocab_size, int hidden_dim, int head_size, int kv_head_size, int ffn_hidden, int block_num, + RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, string attn_implementation, const LLaMANameConfig &names, const string &base_name) { embedding = Embedding(vocab_size, hidden_dim, names.token_embd_name); - blocks = List(block_num, hidden_dim, head_size, kv_head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, names, base_name); + blocks = List(block_num, hidden_dim, head_size, kv_head_size, ffn_hidden, RoPE_type, rope_theta, max_position_embeddings, cache_limit, attn_implementation, names, base_name); norm = RMSNorm(hidden_dim, 1e-6, names.post_norm_name); lm_head = Linear(hidden_dim, vocab_size, false, names.lm_head_name); } @@ -67,6 +73,14 @@ class TinyLLaMAModel final : public Module { x = lm_head(x); return {x}; } + void clear_kvcache() override { + for (auto &block : blocks) { + auto kvcache = block.get_attention().get_cache(); + for (auto &cache : kvcache) { cache->clearCache(); } + auto ropes = block.get_attention().get_rope(); + for (auto &rope : ropes) { rope->clearCache(); } + } + } }; #endif // MODELING_TINYLLAMA_HPP diff --git a/src/models/transformer/configuration_transformer.hpp b/mllm/models/transformer/configuration_transformer.hpp similarity index 85% rename from src/models/transformer/configuration_transformer.hpp rename to mllm/models/transformer/configuration_transformer.hpp index 042e0f97f..3a3dfc711 100644 --- a/src/models/transformer/configuration_transformer.hpp +++ b/mllm/models/transformer/configuration_transformer.hpp @@ -37,5 +37,7 @@ class TransformerConfig { public: TransformerConfig() { } + string attn_implementation = "flash_attention_2"; // Options: "flash_attention_2", "eager" + DataType dtype = MLLM_TYPE_F32; }; #endif // CONFIGURATION_TRANSFORMER_HPP diff --git a/mllm/models/transformer/modeling_transformer.hpp b/mllm/models/transformer/modeling_transformer.hpp new file mode 100644 index 000000000..1fbe7e175 --- /dev/null +++ b/mllm/models/transformer/modeling_transformer.hpp @@ -0,0 +1,241 @@ +// +// Created by Rongjie Yi on 24-2-29. +// + +#ifndef MODELING_TRANSFORMER_HPP +#define MODELING_TRANSFORMER_HPP + +#include "DataType.hpp" +#include "Layer.hpp" +#include "Types.hpp" +#include "configuration_transformer.hpp" +#include + +using namespace mllm; + +struct MultiHeadAttentionConfig { + int hidden_dim; + int num_heads; + int num_key_value_heads; + int head_dim; + AttnQKVSplitType do_qkv_proj = SPLIT_NONE; // Options: SPLIT_NONE, SPLIT_HD, SPLIT_D_HD + AttnPostQkvNormType post_qkv_norm = PostQkv_NONE; + bool bias_kv_cat = false; // Only used when do_qkv_proj > 0 + RoPEType RoPE_type = RoPEType::NONE; // Options: NONE, ALIBI, ROPE, PERSIMMONROPE + float rope_theta; + int max_position_embeddings; + float partial_rotary_factor = 1.0f; // Used for PERSIMMONROPE + int cache_limit; + bool is_causal; + bool qkv_bias; + bool o_bias; + string attn_implementation = "flash_attention_2"; // Options: "flash_attention_2", "eager" +}; + +class MultiHeadAttention final : public Module { + Layer qkv_proj; + Layer q_proj; + Layer k_proj; + Layer v_proj; + RoPE q_rope; + RoPE k_rope; + Layer q_norm; + Layer k_norm; + KVCache k_cache; + KVCache v_cache; + Softmax softmax; + Layer o_proj; + Parameter bias_k; + Parameter bias_v; + int num_heads_{}; + int num_key_value_heads_{}; + int head_dim_{}; + Chl split_chl_{}; + bool causal_mask = true; + string attn_implementation_ = "flash_attention_2"; // Options: "flash_attention_2", "eager" + bool head_first_attn = false; // 是否是head-first的注意力排布实现 + +public: + MultiHeadAttention() = default; + MultiHeadAttention(MultiHeadAttentionConfig config, + const TransformerNameConfig &names, const string &base_name) : + MultiHeadAttention(config.hidden_dim, config.num_heads, + config.num_key_value_heads, config.head_dim, + config.do_qkv_proj, config.post_qkv_norm, config.bias_kv_cat, + config.RoPE_type, config.rope_theta, config.partial_rotary_factor, + config.max_position_embeddings, + config.cache_limit, config.is_causal, + config.qkv_bias, config.o_bias, + config.attn_implementation, names, base_name) { + } + MultiHeadAttention(int hidden_dim, int num_heads, int num_key_value_heads, int head_dim, + AttnQKVSplitType do_qkv_proj, AttnPostQkvNormType post_qkv_norm, bool bias_kv_cat, + RoPEType RoPE_type, float rope_theta, int max_position_embeddings, + int cache_limit, bool is_causal, bool qkv_bias, bool o_bias, + string attn_implementation, + const TransformerNameConfig &names, const string &base_name) : + MultiHeadAttention(hidden_dim, num_heads, num_key_value_heads, head_dim, + do_qkv_proj, post_qkv_norm, bias_kv_cat, + RoPE_type, rope_theta, 1.0f, max_position_embeddings, + cache_limit, is_causal, qkv_bias, o_bias, + attn_implementation, names, base_name) { + } + MultiHeadAttention(int hidden_dim, int num_heads, int num_key_value_heads, int head_dim, + AttnQKVSplitType do_qkv_proj, AttnPostQkvNormType post_qkv_norm, bool bias_kv_cat, + RoPEType RoPE_type, float rope_theta, float partial_rotary_factor, int max_position_embeddings, + int cache_limit, bool is_causal, bool qkv_bias, bool o_bias, + string attn_implementation, + const TransformerNameConfig &names, const string &base_name) { + head_dim_ = head_dim; + num_heads_ = num_heads; + num_key_value_heads_ = num_key_value_heads; + causal_mask = is_causal; + attn_implementation_ = attn_implementation; + if (do_qkv_proj > 0) { + split_chl_ = (Chl)do_qkv_proj; + if (do_qkv_proj == SPLIT_HD) { + qkv_proj = Linear(hidden_dim, (num_heads_ + num_key_value_heads_ + num_key_value_heads_) * head_dim, qkv_bias, base_name + names._qkv_proj_name); + } else { + qkv_proj = Linear(hidden_dim, num_heads * head_dim * 3, qkv_bias, base_name + names._qkv_proj_name); + } + } else { + q_proj = Linear(hidden_dim, num_heads * head_dim, qkv_bias, base_name + names._q_proj_name); + k_proj = Linear(hidden_dim, num_key_value_heads * head_dim, qkv_bias, base_name + names._k_proj_name); + v_proj = Linear(hidden_dim, num_key_value_heads * head_dim, qkv_bias, base_name + names._v_proj_name); + } + if (post_qkv_norm == PostQkv_LayerNorm) { + q_norm = LayerNorm(head_dim, true, 1e-6, base_name + names._q_norm_name); + k_norm = LayerNorm(head_dim, true, 1e-6, base_name + names._k_norm_name); + } else if (post_qkv_norm == PostQkv_RMSNorm) { + q_norm = RMSNorm(head_dim, 1e-6, base_name + names._q_norm_name); + k_norm = RMSNorm(head_dim, 1e-6, base_name + names._k_norm_name); + } + if (RoPE_type > 0) { + q_rope = RoPE(RoPE_type, rope_theta, partial_rotary_factor, max_position_embeddings, base_name + "q_rope"); + k_rope = RoPE(RoPE_type, rope_theta, partial_rotary_factor, max_position_embeddings, base_name + "k_rope"); + } + if (cache_limit > 0) { + k_cache = KVCache(num_key_value_heads, head_dim, + num_heads / num_key_value_heads, cache_limit, + attn_implementation_, base_name + "k_cache"); + v_cache = KVCache(num_key_value_heads, head_dim, + num_heads / num_key_value_heads, cache_limit, + attn_implementation_, base_name + "v_cache"); + } + softmax = Softmax(DIMENSION, is_causal, base_name + "softmax"); + o_proj = Linear(num_heads * head_dim, hidden_dim, o_bias, base_name + names._o_proj_name); + if (bias_kv_cat) { + bias_k = Parameter(1, 1, num_heads, head_dim, base_name + "bias_k"); + bias_v = Parameter(1, 1, num_heads, head_dim, base_name + "bias_v"); + } + } + vector Forward(vector inputs, vector args) override { + Tensor q, k, v; + if (qkv_proj.ready()) { + auto qkv = qkv_proj(inputs[0]); + if (split_chl_ == HD) { + auto qkv_sp = qkv.split({head_dim_ * num_heads_, + head_dim_ * num_key_value_heads_, + head_dim_ * num_key_value_heads_}, + DIMENSION); + q = qkv_sp[0]; + k = qkv_sp[1]; + v = qkv_sp[2]; + q = q.view(-1, num_heads_, -1, head_dim_); + k = k.view(-1, num_key_value_heads_, -1, head_dim_); + v = v.view(-1, num_key_value_heads_, -1, head_dim_); + } else { + auto qkv_sp = qkv.split({head_dim_, head_dim_, head_dim_}, split_chl_, num_heads_); + q = qkv_sp[0]; + k = qkv_sp[1]; + v = qkv_sp[2]; + } + } else { + q = q_proj(inputs[0]); + k = k_proj(inputs[1]); + v = v_proj(inputs[2]); + q = q.view(-1, num_heads_, -1, head_dim_); + k = k.view(-1, num_key_value_heads_, -1, head_dim_); + v = v.view(-1, num_key_value_heads_, -1, head_dim_); + } + if (q_norm.ready() && k_norm.ready()) { + q = q_norm(q); + k = k_norm(k); + } + if (bias_k.ready() && bias_v.ready()) { + k = Tensor::cat({k, bias_k()}, SEQUENCE); + v = Tensor::cat({v, bias_v()}, SEQUENCE); + } + if (q_rope.ready() && k_rope.ready()) { + q = q_rope(q); + k = k_rope(k); + } + if (attn_implementation_ == "eager") { + q = q.transpose(HEAD, SEQUENCE); + k = k.transpose(HEAD, SEQUENCE); + v = v.transpose(HEAD, SEQUENCE); + } + if (k_cache.ready() && v_cache.ready()) { + k = k_cache(k); + v = v_cache(v); + } + Tensor o; + if (attn_implementation_ == "flash_attention_2") { + o = Tensor::flash_attention2_forward(q, k, v, causal_mask); + } else if (attn_implementation_ == "sage_attention") { + o = Tensor::sage_attention_forward(q, k, v, causal_mask); + } else if (attn_implementation_ == "eager") { // eager implementation + q = q / std::sqrt(head_dim_); + k = k.transpose(SEQUENCE, DIMENSION); + auto qk = Tensor::mm(q, k); + if (k_cache.ready() && v_cache.ready() && k_cache.getCacheSeqLen() != qk.sequence() && qk.sequence() > 1) { + qk = softmax(qk, k_cache.getCacheSeqLen()); + } else { + qk = softmax(qk); + } + o = Tensor::mm(qk, v); + o = o.transpose(HEAD, SEQUENCE); + } else if (attn_implementation_ == "eager_notrans") { // eager no transpose mplementation + q = q / std::sqrt(head_dim_); + k = k.transpose(SEQUENCE, DIMENSION); + auto qk = Tensor::mm(q, k); + if (k_cache.ready() && v_cache.ready() && k_cache.getCacheSeqLen() != qk.sequence() && qk.sequence() > 1) { + qk = softmax(qk, k_cache.getCacheSeqLen()); + } else { + qk = softmax(qk); + } + o = Tensor::mm(qk, v); + } + o = o.view(-1, 1, -1, head_dim_ * num_heads_); + o = o_proj(o); + return {o}; + } + vector get_cache() { + return {&k_cache, &v_cache}; + } + vector get_rope() { + return {&q_rope, &k_rope}; + } +}; + +class FeedForward final : public Module { + Layer up_proj; + Layer act; + Layer down_proj; + +public: + FeedForward() = default; + FeedForward(int hidden_dim, int ffn_hidden, const string &act_fn_type, bool bias, const TransformerNameConfig &names, const string &base_name) { + up_proj = Linear(hidden_dim, ffn_hidden, bias, base_name + names._up_proj_name); + act = ACT_FN[act_fn_type](base_name + "act"); + down_proj = Linear(ffn_hidden, hidden_dim, bias, base_name + names._down_proj_name); + } + vector Forward(vector inputs, vector args) override { + auto x = up_proj(inputs[0]); + x = act(x); + x = down_proj(x); + return {x}; + } +}; + +#endif // MODELING_TRANSFORMER_HPP diff --git a/src/models/vit/configuration_vit.hpp b/mllm/models/vit/configuration_vit.hpp similarity index 100% rename from src/models/vit/configuration_vit.hpp rename to mllm/models/vit/configuration_vit.hpp diff --git a/src/models/vit/labels_vit.hpp b/mllm/models/vit/labels_vit.hpp similarity index 100% rename from src/models/vit/labels_vit.hpp rename to mllm/models/vit/labels_vit.hpp diff --git a/src/models/vit/modeling_vit.hpp b/mllm/models/vit/modeling_vit.hpp similarity index 86% rename from src/models/vit/modeling_vit.hpp rename to mllm/models/vit/modeling_vit.hpp index a2a97915d..ec78678ce 100644 --- a/src/models/vit/modeling_vit.hpp +++ b/mllm/models/vit/modeling_vit.hpp @@ -37,10 +37,11 @@ class ViTBlock final : public Module { public: ViTBlock() = default; - ViTBlock(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, const ViTNameConfig &names, const string &base_name) { - attention = MultiHeadAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, SPLIT_NONE, - false, false, RoPEType::NONE, - -1, -1, 0, false, true, + ViTBlock(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, + string attn_implementation, const ViTNameConfig &names, const string &base_name) { + attention = MultiHeadAttention(hidden_dim, head_size, head_size, hidden_dim / head_size, + SPLIT_NONE,PostQkv_NONE, false, RoPEType::NONE, + -1, -1, 0, false, true, true, attn_implementation, names, base_name + names._attn_base_name); mlp = ViTMLP(hidden_dim, ffn_hidden, act_fn_type, names, base_name + names._ffn_base_name); down_proj = Linear(ffn_hidden, hidden_dim, true, base_name + names._down_proj_name); @@ -89,13 +90,11 @@ class ViTModel final : public Module { public: explicit ViTModel(const ViTConfig &config) : - ViTModel(config.hidden_dim, config.head_size, config.ffn_hidden, config.act_fn_type, config.patch, config.img_hw, config.block_num, config.class_size, - config.names_config, config.names_config.vison_model_name) { + ViTModel(config.hidden_dim, config.head_size, config.ffn_hidden, config.act_fn_type, config.patch, config.img_hw, config.block_num, config.class_size, config.attn_implementation, config.names_config, config.names_config.vison_model_name) { } - ViTModel(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, int patch, int img_hw, int block_num, int class_size, - const ViTNameConfig &names, const string &base_name) { + ViTModel(int hidden_dim, int head_size, int ffn_hidden, const string &act_fn_type, int patch, int img_hw, int block_num, int class_size, string attn_implementation, const ViTNameConfig &names, const string &base_name) { embedding = ViTEmbedding(hidden_dim, patch, img_hw, names, base_name + names._embd_name); - blocks = List(block_num, hidden_dim, head_size, ffn_hidden, act_fn_type, names, base_name + names._layer_name); + blocks = List(block_num, hidden_dim, head_size, ffn_hidden, act_fn_type, attn_implementation, names, base_name + names._layer_name); norm = LayerNorm(hidden_dim, true, 1e-6, base_name + names._post_norm_name); lm_head = Linear(hidden_dim, class_size, false, names.lm_head_name); } diff --git a/src/models/vit/processing_vit.hpp b/mllm/models/vit/processing_vit.hpp similarity index 95% rename from src/models/vit/processing_vit.hpp rename to mllm/models/vit/processing_vit.hpp index acf0d27f6..2ed67704b 100644 --- a/src/models/vit/processing_vit.hpp +++ b/mllm/models/vit/processing_vit.hpp @@ -16,7 +16,7 @@ using namespace mllm; class ViTProcessor final : public PreProcessor { Tensor img2Tensor(float *img, int height, int width, int channel, string name = "input", BackendType type = MLLM_CPU) { - Tensor tensor1(1, height, channel, width, Backend::global_backends[type], true); + Tensor tensor1(1, height, channel, width, Backend::global_backends[type].get(), true); tensor1.setName(std::move(name)); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); @@ -78,8 +78,8 @@ class ViTProcessor final : public PreProcessor { return token_idx; } - void PreProcessImages(const std::vector &images, const std::vector &image_length) override{}; - void Process(const std::string &text) override{}; + void PreProcessImages(const std::vector &images, const std::vector &image_length) override {}; + void Process(const std::string &text) override {}; }; #endif // TOKENIZATION_VIT_HPP diff --git a/src/processor/AudioProcess.cpp b/mllm/processor/AudioProcess.cpp similarity index 99% rename from src/processor/AudioProcess.cpp rename to mllm/processor/AudioProcess.cpp index 9d1619cc2..520948a07 100644 --- a/src/processor/AudioProcess.cpp +++ b/mllm/processor/AudioProcess.cpp @@ -13,7 +13,7 @@ #include "wenet_audio/params.h" #include "wenet_audio/wav.h" #include "wenet_audio/feature_pipeline.h" -#include "backends/cpu/compute/VecDot.hpp" +#include "backends/cpu/third_party/ggml/VecDotFP32.hpp" class Fraction { public: diff --git a/src/processor/AudioProcess.hpp b/mllm/processor/AudioProcess.hpp similarity index 100% rename from src/processor/AudioProcess.hpp rename to mllm/processor/AudioProcess.hpp diff --git a/src/processor/ClipPreProcess.cpp b/mllm/processor/ClipPreProcess.cpp similarity index 100% rename from src/processor/ClipPreProcess.cpp rename to mllm/processor/ClipPreProcess.cpp diff --git a/src/processor/ClipPreProcess.hpp b/mllm/processor/ClipPreProcess.hpp similarity index 100% rename from src/processor/ClipPreProcess.hpp rename to mllm/processor/ClipPreProcess.hpp diff --git a/src/processor/FuyuPreProcess.cpp b/mllm/processor/FuyuPreProcess.cpp similarity index 99% rename from src/processor/FuyuPreProcess.cpp rename to mllm/processor/FuyuPreProcess.cpp index a99b52e07..ddae928dc 100644 --- a/src/processor/FuyuPreProcess.cpp +++ b/mllm/processor/FuyuPreProcess.cpp @@ -263,7 +263,7 @@ Tensor FuyuPreProcess::vector3d2Tensor(vector>> image_patch seq = image_patches[0].size(); dims = image_patches[0][0].size(); } - Tensor tensor1(batch, 1, seq, dims, Backend::global_backends[type], true); + Tensor tensor1(batch, 1, seq, dims, Backend::global_backends[type].get(), true); tensor1.setName(name); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); @@ -284,7 +284,7 @@ Tensor FuyuPreProcess::vector2d2Tensor(vector> image_patches_indices batch = image_patches_indices.size(); seq = image_patches_indices[0].size(); } - Tensor tensor1(batch, 1, seq, 1, Backend::global_backends[type], true); + Tensor tensor1(batch, 1, seq, 1, Backend::global_backends[type].get(), true); tensor1.setName(name); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); diff --git a/src/processor/FuyuPreProcess.hpp b/mllm/processor/FuyuPreProcess.hpp similarity index 100% rename from src/processor/FuyuPreProcess.hpp rename to mllm/processor/FuyuPreProcess.hpp diff --git a/src/processor/PostProcess.cpp b/mllm/processor/PostProcess.cpp similarity index 100% rename from src/processor/PostProcess.cpp rename to mllm/processor/PostProcess.cpp diff --git a/src/processor/PostProcess.hpp b/mllm/processor/PostProcess.hpp similarity index 100% rename from src/processor/PostProcess.hpp rename to mllm/processor/PostProcess.hpp diff --git a/src/processor/PreProcess.cpp b/mllm/processor/PreProcess.cpp similarity index 100% rename from src/processor/PreProcess.cpp rename to mllm/processor/PreProcess.cpp diff --git a/src/processor/PreProcess.hpp b/mllm/processor/PreProcess.hpp similarity index 100% rename from src/processor/PreProcess.hpp rename to mllm/processor/PreProcess.hpp diff --git a/src/tokenizers/BPE/Bpe.cpp b/mllm/tokenizers/BPE/Bpe.cpp similarity index 80% rename from src/tokenizers/BPE/Bpe.cpp rename to mllm/tokenizers/BPE/Bpe.cpp index fd1cf72a9..8a066631c 100644 --- a/src/tokenizers/BPE/Bpe.cpp +++ b/mllm/tokenizers/BPE/Bpe.cpp @@ -132,22 +132,10 @@ void mllm::BPETokenizer::tokenize(const std::string &text, std::vectorvocab_map_.find(word_split); result != this->vocab_map_.end()) { - auto token_idx = result->second; - tokens.emplace_back(id_token_[token_idx].score); - } else { - if (!byte_fallback) { - tokens.emplace_back(mllm::BPETokenizer::TokenUnk); - } else { - for (const char j : word_split) { - token_id_t token_id = static_cast(j) + 3; - tokens.emplace_back(token_id); - } - } - } - } + // Use the same algorithm as the else branch for vocab-based tokenization + std::vector word_tokens; + tokenizeWordWithVocab(word, word_tokens, byte_fallback); + tokens.insert(tokens.end(), word_tokens.begin(), word_tokens.end()); } if (TokenEos > 0) { tokens.push_back(TokenEos); @@ -202,7 +190,7 @@ void mllm::BPETokenizer::tokenize(const std::string &text, std::vector(symbols_[i].ch[j]) + 3; tokens.emplace_back(token_id); } @@ -232,6 +220,63 @@ void mllm::BPETokenizer::tryMergeSymbol(size_t start, size_t end) { queue_.emplace(item); } } + +void mllm::BPETokenizer::tokenizeWordWithVocab(const std::string &word, std::vector &tokens, bool byte_fallback) { + if (auto result = this->vocab_map_.find(word); result != this->vocab_map_.end()) { + tokens.emplace_back(result->second); + // std::cout << "Word: \"" << word << "\" -> [\"" << word << "\"]" << std::endl; + return; + } + // Use greedy longest-match algorithm + size_t pos = 0; + std::vector token_strings; // For debug output + while (pos < word.size()) { + int best_len = 0; + token_id_t best_token = TokenUnk; + std::string best_substr; + // Try all possible substrings starting from current position + for (size_t len = 1; len <= word.size() - pos; ++len) { + std::string substr = word.substr(pos, len); + auto result = this->vocab_map_.find(substr); + if (result != this->vocab_map_.end()) { + // Found a match, update best if this is longer + if (len > best_len) { + best_len = len; + best_token = result->second; + best_substr = substr; + } + } + } + if (best_len > 0) { + // Found a match, add the token + tokens.emplace_back(best_token); + token_strings.push_back(best_substr); + pos += best_len; + } else { + // No match found, handle the single character + if (!byte_fallback) { + tokens.emplace_back(TokenUnk); + token_strings.push_back(""); + pos += utf8_len(word[pos]); // Skip one UTF-8 character + } else { + // Byte fallback + token_id_t token_id = static_cast(word[pos]) + 3; + tokens.emplace_back(token_id); + token_strings.push_back(std::string(1, word[pos])); + pos += 1; + } + } + } + + // std::cout << "Word: \"" << word << "\" -> ["; + // for (size_t i = 0; i < token_strings.size(); ++i) { + // std::cout << "\"" << token_strings[i] << "\""; + // if (i < token_strings.size() - 1) { + // std::cout << ", "; + // } + // } + // std::cout << "]" << std::endl; +} void mllm::BPETokenizer::tokenize(const std::string &text, std::vector &tokens, bool bos) { this->tokenize(std::move(text), tokens, bos, true); } diff --git a/src/tokenizers/BPE/Bpe.hpp b/mllm/tokenizers/BPE/Bpe.hpp similarity index 94% rename from src/tokenizers/BPE/Bpe.hpp rename to mllm/tokenizers/BPE/Bpe.hpp index dbe39faeb..5c1a44f83 100644 --- a/src/tokenizers/BPE/Bpe.hpp +++ b/mllm/tokenizers/BPE/Bpe.hpp @@ -30,6 +30,7 @@ class BPETokenizer : public Tokenizer { std::vector symbols_; std::priority_queue, TokenItem::Compare> queue_; void tryMergeSymbol(size_t start, size_t end); + void tokenizeWordWithVocab(const std::string &word, std::vector &tokens, bool byte_fallback); std::unordered_map bytes_to_unicode_; public: diff --git a/src/tokenizers/Tiktoken/tiktoken.cpp b/mllm/tokenizers/Tiktoken/tiktoken.cpp similarity index 100% rename from src/tokenizers/Tiktoken/tiktoken.cpp rename to mllm/tokenizers/Tiktoken/tiktoken.cpp diff --git a/src/tokenizers/Tiktoken/tiktoken.hpp b/mllm/tokenizers/Tiktoken/tiktoken.hpp similarity index 100% rename from src/tokenizers/Tiktoken/tiktoken.hpp rename to mllm/tokenizers/Tiktoken/tiktoken.hpp diff --git a/src/tokenizers/Tokenizer.cpp b/mllm/tokenizers/Tokenizer.cpp similarity index 97% rename from src/tokenizers/Tokenizer.cpp rename to mllm/tokenizers/Tokenizer.cpp index 6c5007215..45c2d98f8 100644 --- a/src/tokenizers/Tokenizer.cpp +++ b/mllm/tokenizers/Tokenizer.cpp @@ -1,9 +1,9 @@ // // Created by Xiang Li on 23-10-7. // +#include "Backend.hpp" #include "ParamLoader.hpp" #include "Tokenizer.hpp" -#include /* Vocab Structure * ┌──────┬──────┬─────┬────────┬──────┬──────┬───────┐ * │ │ │ │ │ │ │ │ @@ -99,7 +99,7 @@ bool Tokenizer::getTokenId(const token_t &token, token_id_t &id) { void Tokenizer::token2Tensor(Net *net, vector tokens, shared_ptr input_tensor) { // auto input_tensor = std::make_shared(); - input_tensor->setBackend(net->backends()[BackendType::MLLM_CPU].get()); + input_tensor->setBackend(Backend::global_backends[BackendType::MLLM_CPU].get()); input_tensor->reshape(1, 1, static_cast(tokens.size()), 1); input_tensor->alloc(); // input_tensor->fullData(1); @@ -110,7 +110,7 @@ void Tokenizer::token2Tensor(Net *net, vector tokens, shared_ptr> tokens, shared_ptr input_tensor) { // auto input_tensor = std::make_shared(); - input_tensor->setBackend(net->backends()[BackendType::MLLM_CPU].get()); + input_tensor->setBackend(Backend::global_backends[BackendType::MLLM_CPU].get()); const auto bsize = static_cast(tokens.size()); input_tensor->reshape(bsize, 1, static_cast(tokens[0].size()), 1); input_tensor->alloc(); diff --git a/src/tokenizers/Tokenizer.hpp b/mllm/tokenizers/Tokenizer.hpp similarity index 98% rename from src/tokenizers/Tokenizer.hpp rename to mllm/tokenizers/Tokenizer.hpp index cb0a85e32..1ffe3b441 100644 --- a/src/tokenizers/Tokenizer.hpp +++ b/mllm/tokenizers/Tokenizer.hpp @@ -51,7 +51,7 @@ class Tokenizer { std::string chat_template_end; public: - Tokenizer(){ + Tokenizer() { // do nothing } explicit Tokenizer(const std::string &vocab_file); @@ -76,7 +76,8 @@ class Tokenizer { static void token2Tensor(Net *net, vector tokens, shared_ptr input_tensor); static void tokens2Tensor(Net *net, vector> tokens, shared_ptr input_tensor); static Tensor tokens2Input(vector tokens_id, string name = "input", BackendType type = MLLM_CPU) { - Tensor tensor1(1, 1, tokens_id.size(), 1, Backend::global_backends[type], true); + Module::initBackend(type); + Tensor tensor1(1, 1, tokens_id.size(), 1, Backend::global_backends[type].get(), true); tensor1.setName(name); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); @@ -87,7 +88,7 @@ class Tokenizer { } static Tensor tokens2Input(vector> tokens, string name = "input", BackendType type = MLLM_CPU) { const auto bsize = static_cast(tokens.size()); - Tensor tensor1(bsize, 1, static_cast(tokens[0].size()), 1, Backend::global_backends[type], true); + Tensor tensor1(bsize, 1, static_cast(tokens[0].size()), 1, Backend::global_backends[type].get(), true); tensor1.setName(name); Tensor::tensor_status = TENSOR_STATIC_INIT; tensor1.setTtype(INPUT_TENSOR); diff --git a/src/tokenizers/Unicode.cpp b/mllm/tokenizers/Unicode.cpp similarity index 100% rename from src/tokenizers/Unicode.cpp rename to mllm/tokenizers/Unicode.cpp diff --git a/src/tokenizers/Unicode.hpp b/mllm/tokenizers/Unicode.hpp similarity index 100% rename from src/tokenizers/Unicode.hpp rename to mllm/tokenizers/Unicode.hpp diff --git a/src/tokenizers/UnicodeData.cpp b/mllm/tokenizers/UnicodeData.cpp similarity index 100% rename from src/tokenizers/UnicodeData.cpp rename to mllm/tokenizers/UnicodeData.cpp diff --git a/src/tokenizers/UnicodeData.hpp b/mllm/tokenizers/UnicodeData.hpp similarity index 100% rename from src/tokenizers/UnicodeData.hpp rename to mllm/tokenizers/UnicodeData.hpp diff --git a/src/tokenizers/Unigram/Unigram.cpp b/mllm/tokenizers/Unigram/Unigram.cpp similarity index 100% rename from src/tokenizers/Unigram/Unigram.cpp rename to mllm/tokenizers/Unigram/Unigram.cpp diff --git a/src/tokenizers/Unigram/Unigram.hpp b/mllm/tokenizers/Unigram/Unigram.hpp similarity index 100% rename from src/tokenizers/Unigram/Unigram.hpp rename to mllm/tokenizers/Unigram/Unigram.hpp diff --git a/src/tokenizers/Unigram/trie.hpp b/mllm/tokenizers/Unigram/trie.hpp similarity index 100% rename from src/tokenizers/Unigram/trie.hpp rename to mllm/tokenizers/Unigram/trie.hpp diff --git a/src/tokenizers/WordPiece/WordPiece.cpp b/mllm/tokenizers/WordPiece/WordPiece.cpp similarity index 100% rename from src/tokenizers/WordPiece/WordPiece.cpp rename to mllm/tokenizers/WordPiece/WordPiece.cpp diff --git a/src/tokenizers/WordPiece/WordPiece.hpp b/mllm/tokenizers/WordPiece/WordPiece.hpp similarity index 100% rename from src/tokenizers/WordPiece/WordPiece.hpp rename to mllm/tokenizers/WordPiece/WordPiece.hpp diff --git a/scripts/build_android.sh b/scripts/build_android.sh index 8268543a3..4e4a5391d 100755 --- a/scripts/build_android.sh +++ b/scripts/build_android.sh @@ -9,7 +9,7 @@ cmake .. \ -DANDROID_ABI="arm64-v8a" \ -DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. $1 $2 $3 \ -DANDROID_PLATFORM=android-34 \ --DCMAKE_CXX_FLAGS="-march=armv8.6-a+dotprod+i8mm" \ +-DCMAKE_CXX_FLAGS="-march=armv8.2-a+fp16+fp16fml+dotprod+i8mm" \ -DDEBUG=OFF \ -DTEST=OFF \ -DARM=ON \ diff --git a/scripts/push_qnn_lib.sh b/scripts/push_qnn_lib.sh new file mode 100644 index 000000000..96d20cdf5 --- /dev/null +++ b/scripts/push_qnn_lib.sh @@ -0,0 +1,17 @@ +adb shell mkdir -p /data/local/tmp/mllm/qnn-lib + +ANDR_LIB=$QNN_SDK_ROOT/lib/aarch64-android +OP_PATH=../mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/build +DEST=/data/local/tmp/mllm/qnn-lib + +adb push $ANDR_LIB/libQnnHtp.so $DEST +adb push $ANDR_LIB/libQnnHtpV75Stub.so $DEST +adb push $ANDR_LIB/libQnnHtpPrepare.so $DEST +adb push $ANDR_LIB/libQnnHtpProfilingReader.so $DEST +adb push $ANDR_LIB/libQnnHtpOptraceProfilingReader.so $DEST +adb push $ANDR_LIB/libQnnHtpV75CalculatorStub.so $DEST +adb push $QNN_SDK_ROOT/lib/hexagon-v75/unsigned/libQnnHtpV75Skel.so $DEST +adb push $QNN_SDK_ROOT/lib/aarch64-android/libQnnSystem.so $DEST + +adb push $OP_PATH/aarch64-android/libQnnLLaMAPackage.so $DEST/libQnnLLaMAPackage_CPU.so +adb push $OP_PATH/hexagon-v75/libQnnLLaMAPackage.so $DEST/libQnnLLaMAPackage_HTP.so diff --git a/scripts/run_ling.sh b/scripts/run_ling.sh new file mode 100755 index 000000000..ba34d7f80 --- /dev/null +++ b/scripts/run_ling.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +adb shell mkdir /data/local/tmp/mllm +adb shell mkdir /data/local/tmp/mllm/bin +adb shell mkdir /data/local/tmp/mllm/models +adb shell mkdir /data/local/tmp/mllm/vocab +adb push ../vocab/ling_vocab.mllm /data/local/tmp/mllm/vocab/ +adb push ../vocab/ling_merges.txt /data/local/tmp/mllm/vocab/ +adb push ../bin-arm/demo_bailing_moe /data/local/tmp/mllm/bin/ +adb push ../bin-arm/demo_bailing_moe_mbp /data/local/tmp/mllm/bin/ +adb push ../models/ling-lite-1.5-kai_q4_0.mllm /data/local/tmp/mllm/models/ +# adb push ../models/ling-lite-1.5-kai_q4_0_e2.mllm /data/local/tmp/mllm/models/ +# if push failed, exit +if [ $? -ne 0 ]; then + echo "adb push failed" + exit 1 +fi +adb shell "cd /data/local/tmp/mllm/bin && ./demo_bailing_moe -m ../models/ling-lite-1.5-kai_q4_0.mllm" +adb shell "cd /data/local/tmp/mllm/bin && ./demo_bailing_moe_mbp -m ../models/ling-lite-1.5-kai_q4_0.mllm" +# adb shell "cd /data/local/tmp/mllm/bin && ./demo_bailing_moe -d 1 -m ../models/ling-lite-1.5-q4_0.mllm" +# adb shell "cd /data/local/tmp/mllm/bin && ./demo_bailing_moe -m ../models/ling-lite-1.5-kai_q4_0_e2.mllm" +# adb shell "cd /data/local/tmp/mllm/bin && ./demo_bailing_moe_mbp -m ../models/ling-lite-1.5-kai_q4_0_e2.mllm" \ No newline at end of file diff --git a/scripts/run_phonelm_qnn.sh b/scripts/run_phonelm_qnn.sh index 818fb2ec2..9823945c6 100755 --- a/scripts/run_phonelm_qnn.sh +++ b/scripts/run_phonelm_qnn.sh @@ -20,14 +20,14 @@ else fi if [ -z "$QNN_SDK_ROOT" ]; then - export QNN_SDK_ROOT=/root/research/dev/mllm/src/backends/qnn/sdk + export QNN_SDK_ROOT=/root/research/dev/mllm/mllm/backends/qnn/sdk echo "QNN_SDK_ROOT is set to $QNN_SDK_ROOT" else echo "QNN_SDK_ROOT is set to $QNN_SDK_ROOT" fi ANDR_LIB=$QNN_SDK_ROOT/lib/aarch64-android -OP_PATH=../src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/build +OP_PATH=../mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/build DEST=/data/local/tmp/mllm/qnn-lib adb push $ANDR_LIB/libQnnHtp.so $DEST @@ -46,5 +46,5 @@ if [ $? -ne 0 ]; then exit 1 fi -adb push ../bin-arm/demo_phonelm_npu /data/local/tmp/mllm/bin/ +adb push ../bin-arm-qnn/demo_phonelm_npu /data/local/tmp/mllm/bin/ adb shell "cd /data/local/tmp/mllm/bin && export LD_LIBRARY_PATH=/data/local/tmp/mllm/qnn-lib && export ADSP_LIBRARY_PATH=/data/local/tmp/mllm/qnn-lib && ./demo_phonelm_npu" \ No newline at end of file diff --git a/scripts/run_qwen2_vl.sh b/scripts/run_qwen2_vl.sh index 3fd3070c5..4f20b0317 100755 --- a/scripts/run_qwen2_vl.sh +++ b/scripts/run_qwen2_vl.sh @@ -4,6 +4,7 @@ adb shell mkdir /data/local/tmp/mllm adb shell mkdir /data/local/tmp/mllm/bin adb shell mkdir /data/local/tmp/mllm/models adb shell mkdir /data/local/tmp/mllm/vocab +adb shell mkdir /data/local/tmp/mllm/assets adb push ../vocab/* /data/local/tmp/mllm/vocab/ adb push ../assets/* /data/local/tmp/mllm/assets/ adb push ../bin-arm/demo_qwen2_vl /data/local/tmp/mllm/bin/ @@ -13,4 +14,6 @@ if [ $? -ne 0 ]; then echo "adb push failed" exit 1 fi -adb shell "cd /data/local/tmp/mllm/bin && ./demo_qwen2_vl" \ No newline at end of file +adb shell "cd /data/local/tmp/mllm/bin && ./demo_qwen2_vl" +# adb shell "cd /data/local/tmp/mllm/bin && ./demo_qwen2_vl -m ../models/qwen-2-vl-7b-instruct-kai_q4_0.mllm -b 7B " +# adb shell "cd /data/local/tmp/mllm/bin && ./demo_qwen2_vl_vtp -m ../models/qwen-2-vl-7b-instruct-kai_q4_0_eager.mllm -b 7B " \ No newline at end of file diff --git a/scripts/run_qwen_qnn.sh b/scripts/run_qwen_qnn.sh index 81a572b2f..7bc9a7a5d 100755 --- a/scripts/run_qwen_qnn.sh +++ b/scripts/run_qwen_qnn.sh @@ -20,7 +20,7 @@ else fi if [ -z "$QNN_SDK_ROOT" ]; then - export QNN_SDK_ROOT=/root/research/dev/mllm/src/backends/qnn/sdk + export QNN_SDK_ROOT=/root/research/dev/mllm/mllm/backends/qnn/sdk echo "QNN_SDK_ROOT is set to $QNN_SDK_ROOT" # exit 1 else @@ -28,7 +28,7 @@ else fi ANDR_LIB=$QNN_SDK_ROOT/lib/aarch64-android -OP_PATH=../src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/build +OP_PATH=../mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/build DEST=/data/local/tmp/mllm/qnn-lib adb push $ANDR_LIB/libQnnHtp.so $DEST @@ -38,6 +38,8 @@ adb push $ANDR_LIB/libQnnHtpProfilingReader.so $DEST adb push $ANDR_LIB/libQnnHtpOptraceProfilingReader.so $DEST adb push $ANDR_LIB/libQnnHtpV75CalculatorStub.so $DEST adb push $QNN_SDK_ROOT/lib/hexagon-v75/unsigned/libQnnHtpV75Skel.so $DEST +adb push $QNN_SDK_ROOT/lib/aarch64-android/libQnnSystem.so $DEST + adb push $OP_PATH/aarch64-android/libQnnLLaMAPackage.so $DEST/libQnnLLaMAPackage_CPU.so adb push $OP_PATH/hexagon-v75/libQnnLLaMAPackage.so $DEST/libQnnLLaMAPackage_HTP.so @@ -47,5 +49,6 @@ if [ $? -ne 0 ]; then exit 1 fi -adb push ../bin-arm/demo_qwen_npu /data/local/tmp/mllm/bin/ +# adb shell "rm /data/local/tmp/mllm/bin/qnn_context.bin" +adb push ../bin-arm-qnn/demo_qwen_npu /data/local/tmp/mllm/bin/ adb shell "cd /data/local/tmp/mllm/bin && export LD_LIBRARY_PATH=/data/local/tmp/mllm/qnn-lib && export ADSP_LIBRARY_PATH=/data/local/tmp/mllm/qnn-lib && ./demo_qwen_npu" \ No newline at end of file diff --git a/scripts/run_showui_qnn.sh b/scripts/run_showui_qnn.sh new file mode 100755 index 000000000..705aef989 --- /dev/null +++ b/scripts/run_showui_qnn.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +adb shell mkdir -p /data/local/tmp/mllm/vocab +adb shell mkdir -p /data/local/tmp/mllm/qnn-lib + +adb push ../vocab/qwen_vocab.mllm /data/local/tmp/mllm/vocab/ + + +if ! adb shell [ -f "/data/local/tmp/mllm/models/showui-w8-fpbias-noshadow-xdl-test.mllm" ]; then + adb push ../models/showui-w8-fpbias-noshadow-xdl-test.mllm "/data/local/tmp/mllm/models/showui-w8-fpbias-noshadow-xdl-test.mllm" +else + echo "showui-w8-fpbias-noshadow-xdl-test file already exists" +fi + + +if ! adb shell [ -f "/data/local/tmp/mllm/models/showui-2B-rotated-q40.mllm" ]; then + adb push ../models/showui-2B-rotated-q40.mllm "/data/local/tmp/mllm/models/showui-2B-rotated-q40.mllm" +else + echo "showui-2B-rotated-q40.mllm file already exists" +fi + +if [ -z "$QNN_SDK_ROOT" ]; then + export QNN_SDK_ROOT=/root/research/dev/mllm/mllm/backends/qnn/sdk + # export HEXAGON_SDK_ROOT=/root/research/dev/mllm/mllm/backends/qnn/HexagonSDK/5.4.0 + echo "QNN_SDK_ROOT is set to $QNN_SDK_ROOT" + # exit 1 +else + echo "QNN_SDK_ROOT is set to $QNN_SDK_ROOT" +fi + +ANDR_LIB=$QNN_SDK_ROOT/lib/aarch64-android +OP_PATH=../mllm/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/build +DEST=/data/local/tmp/mllm/qnn-lib + +adb push $ANDR_LIB/libQnnHtp.so $DEST +adb push $ANDR_LIB/libQnnHtpV75Stub.so $DEST +adb push $ANDR_LIB/libQnnHtpPrepare.so $DEST +adb push $ANDR_LIB/libQnnHtpProfilingReader.so $DEST +adb push $ANDR_LIB/libQnnHtpOptraceProfilingReader.so $DEST +adb push $ANDR_LIB/libQnnHtpV75CalculatorStub.so $DEST +adb push $QNN_SDK_ROOT/lib/hexagon-v75/unsigned/libQnnHtpV75Skel.so $DEST +adb push $OP_PATH/aarch64-android/libQnnLLaMAPackage.so $DEST/libQnnLLaMAPackage_CPU.so +adb push $OP_PATH/hexagon-v75/libQnnLLaMAPackage.so $DEST/libQnnLLaMAPackage_HTP.so + + +if [ $? -ne 0 ]; then + echo "adb push failed" + exit 1 +fi +# adb shell "rm /data/local/tmp/mllm/bin/qnn_context.bin" +adb push ../bin-arm-qnn/demo_showui_npu /data/local/tmp/mllm/bin/ +adb shell "cd /data/local/tmp/mllm/bin && export LD_LIBRARY_PATH=/data/local/tmp/mllm/qnn-lib && export ADSP_LIBRARY_PATH=/data/local/tmp/mllm/qnn-lib && ./demo_showui_npu" \ No newline at end of file diff --git a/src/Executor.cpp b/src/Executor.cpp deleted file mode 100644 index f92b3a555..000000000 --- a/src/Executor.cpp +++ /dev/null @@ -1,151 +0,0 @@ -#include -#include "Timing.hpp" -#include "Executor.hpp" - -namespace mllm { -void Executor::setup(Net *net) { - mllm_time_init(); - - uint64_t time_start = mllm_time_us(); - uint64_t time_end; - - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - string name = "G" + std::to_string(i); - auto &g = net->subGraph()[name]; - - g->setUpOps(*data_loader_); - } - time_end = mllm_time_us(); - if (load_time_ == 0) { - load_time_ = (time_end - time_start) / 1000.0F; - std::cout << "Load model: " << load_time_ / 1000.0F << " s" << std::endl; - } -} - -void Executor::run(Net *net, vector> input_tensors) { - bool init = false; - bool reshape = false; - - checkReshape(init, reshape, input_tensors); - - // set Input tensor - vector flashGid = {}; - for (int tid = 0; tid < net->inputNames().size(); ++tid) { - auto input_name = net->inputNames()[tid]; - auto input_tensor = input_tensors[tid]; - input_tensor->setName(input_name); - net->tensors()[input_name] = input_tensor; - if (std::find(flashGid.begin(), flashGid.end(), net->inGmap()[input_name]) == flashGid.end()) { - flashGid.push_back(net->inGmap()[input_name]); - } - } - for (auto Gid : flashGid) { - net->subGraph()["G" + std::to_string(Gid)]->reflashInput(net->tensors()); - } - - auto ex_time_start = mllm_time_us(); - - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - string name = "G" + std::to_string(i); - auto &g = net->subGraph()[name]; - - g->reshape(); - g->setUpTensors(); - - result_ = g->forward(); - - // free - if (false) { - if (i < (int)net->subGraph().size() - 1) { - g->freeTensors(); - } - net->freeTensors(i); - } - } - - auto ex_time_end = mllm_time_us(); - if (input_tensors[0]->sequence() == 1) { - auto token_run_time = (ex_time_end - ex_time_start) / 1000.0F; - run_time_.push_back(token_run_time); - } - auto token_run_time = (ex_time_end - ex_time_start) / 1000.0F; - run_time_.push_back(token_run_time); -} - -// #define DYNAMIC -void Executor::execute(Net *net, vector> input_tensors) { - bool init = false; - bool reshape = false; - // TODO: when reshape begin - checkReshape(init, reshape, input_tensors); - // set Input tensor - - uint64_t time_start = mllm_time_us(); - uint64_t time_end; - - // Init inputs - vector flashGid = {}; - for (int tid = 0; tid < net->inputNames().size(); ++tid) { - auto input_name = net->inputNames()[tid]; - auto input_tensor = input_tensors[tid]; - input_tensor->setName(input_name); - net->tensors()[input_name] = input_tensor; - if (std::find(flashGid.begin(), flashGid.end(), net->inGmap()[input_name]) == flashGid.end()) { - flashGid.push_back(net->inGmap()[input_name]); - } - } - for (auto Gid : flashGid) { - net->subGraph()["G" + std::to_string(Gid)]->reflashInput(net->tensors()); - } - - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - string name = "G" + std::to_string(i); - auto &g = net->subGraph()[name]; - if (init || reshape) { - g->reshape(); - } - // load params - if (!paramloaded) { - g->setUpOps(*data_loader_); - } -#ifndef DYNAMIC - } - paramloaded = true; - time_end = mllm_time_us(); - if (load_time_ == 0) { - load_time_ = (time_end - time_start) / 1000.0F; - } - - auto ex_time_start = mllm_time_us(); - float exe_time = 0; - - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - string name = "G" + std::to_string(i); - auto &g = net->subGraph()[name]; -#endif - - g->reshape(); - g->setUpTensors(); - - result_ = g->forward(); - - // free - if (freeGraph) { -#ifdef DYNAMIC - g->freeOps(); - paramloaded = false; -#endif - if (i < (int)net->subGraph().size() - 1) { - g->freeTensors(); - } - net->freeTensors(i); - } - } - auto ex_time_end = mllm_time_us(); - if (input_tensors[0]->sequence() == 1) { - auto token_run_time = (ex_time_end - ex_time_start) / 1000.0F; - run_time_.push_back(token_run_time); - } -} - -} // namespace mllm diff --git a/src/Executor.hpp b/src/Executor.hpp deleted file mode 100644 index 8c487c24f..000000000 --- a/src/Executor.hpp +++ /dev/null @@ -1,101 +0,0 @@ -#ifndef MLLM_EXECUTOR_H -#define MLLM_EXECUTOR_H -#include "Net.hpp" -#include - -namespace mllm { -class Executor { -public: - Executor(AbstructLoader *data_loader) : - data_loader_(data_loader) { - } - ~Executor() = default; - - /** - * \brief Setup graphs in net - * \param net An instance of the Net class - */ - virtual void setup(Net *net); - - /** - * \brief Executes the foreword propagation of provided network - * \param net An instance of the Net class representing the network to be run - * \param input_tensors A vector of input tensors to be processed by the network - */ - virtual void run(Net *net, vector> input_tensors); - - /** - * \brief Setup&Executes the foreword propagation of provided network - * \param net An instance of the Net class representing the network to be run - * \param input_tensors A vector of input tensors to be processed by the network - * - * execute(net, input_tensors) is equivalent to setup(net) + run(net, input_tensors) - */ - virtual void execute(Net *net, vector> input_tensor); - - bool checkSame(vector> input_tensor) { - if (input_tensor.size() != input_sizes_.size()) { - return false; - } - bool same = true; - for (int i = 0; i < input_tensor.size(); ++i) { - if (input_tensor[i]->shape() != input_sizes_[i]) { - same = false; - break; - } - } - return same; - } - /** - * \brief Checks whether the input tensors have the same shape as the previous input tensors. - * Change init & reshape flags accordingly. - * \param init whether to initialize the input_sizes_ vector - * \param reshape whether to reshape the input tensors - * \param input_tensor A vector of input tensors to be processed by the network - * \return - */ - bool checkReshape(bool &init, bool &reshape, vector> input_tensor) { - if (input_sizes_.empty()) { - for (auto &t : input_tensor) { - input_sizes_.push_back(t->shape()); - } - init = true; - } else if (checkSame(input_tensor)) { - reshape = false; - } else { - input_sizes_.clear(); - for (auto &t : input_tensor) { - input_sizes_.push_back(t->shape()); - } - reshape = true; - } - return init || reshape; - } - - vector> &result() { - return result_; - } - - void perf() const { - std::cout << "load time: " << load_time_ << " ms" << std::endl; - double sum_time = std::accumulate(std::begin(run_time_), std::end(run_time_), 0.0); - double mean_time = sum_time / run_time_.size(); - std::cout << "token time: " << mean_time << " ms" << std::endl; - std::cout << "inference speed: " << 1000 / mean_time << " tokens/s" << std::endl; - } - -protected: - vector> input_sizes_; - vector> result_; - AbstructLoader *data_loader_; - - double load_time_ = 0; - vector run_time_; - - bool paramloaded = false; - bool freeGraph = false; -}; - -} // namespace mllm - -#endif // MLLM_EXECUTOR_H diff --git a/src/Graph.cpp b/src/Graph.cpp deleted file mode 100644 index ac4b72e5f..000000000 --- a/src/Graph.cpp +++ /dev/null @@ -1,236 +0,0 @@ -// -// Created by Rongjie Yi. -// -#include "Graph.hpp" -#include "memory/MemInspect.hpp" -#include "OpDefined.hpp" -#include "Types.hpp" -#ifdef DEBUGPRINT -#include "Timing.hpp" -#endif - -std::string intToStringWithLeadingZero(int num) { - if (num < 10) { - return "0" + std::to_string(num); - } - return std::to_string(num); -} - -namespace mllm { - -Graph::Graph(const NetParameter ¶m, Backend *bn, - unordered_map> &external_tensors, - int threadCount) { - backend_ = bn; - - for (auto net_tensor : param.net_tensors) { - auto it = external_tensors.find(net_tensor->name); - if (it == external_tensors.end()) { // not in external_tensors - tensors_[net_tensor->name] = std::make_shared(backend_); - tensors_[net_tensor->name]->setName(net_tensor->name); - tensors_[net_tensor->name]->setDtype(net_tensor->type); - } - } - for (auto net_op : param.net_ops) { - // for QNN prefill & CPU decoding execution, KVCache should be shared for each block -#ifdef USE_QNN - if (net_op->type == KVCACHE || net_op->type == KVCACHENPU) { -#ifdef DEBUGPRINT - std::cout << net_op->name << " is KVCache" << std::endl; -#endif - shared_ptr my_op(nullptr); - if (kv_cache_map.find(net_op->name) == kv_cache_map.end()) { - // for the prefill part, we need to create a new op - auto *new_op = backend_->opCreate(net_op->param, net_op->name, threadCount); - my_op.reset(new_op); - my_op->setOpType(net_op->type); - kv_cache_map[net_op->name] = new_op; - } else { -#ifdef DEBUGPRINT - std::cout << net_op->name << " is shared used" << std::endl; -#endif - // for the decoding part, we need to get created op from global container - my_op.reset(kv_cache_map[net_op->name]); - } - ops_[net_op->name] = my_op; - continue; - } -#endif - shared_ptr my_op(nullptr); - auto *new_op = backend_->opCreate(net_op->param, net_op->name, threadCount); - my_op.reset(new_op); - my_op->setOpType(net_op->type); - ops_[net_op->name] = my_op; - } - for (auto net_op : param.net_ops) { - bool connect_input = false; - string op_name = net_op->name; - op_names_.push_back(op_name); - auto in_tensors = net_op->in; - vector> inTensors; - for (auto *in_t : in_tensors) { - if (in_t->in == NULL) { - connect_input = true; - } - auto in_t_name = in_t->name; - auto it = tensors_.find(in_t_name); - if (it != tensors_.end()) { - inTensors.push_back(tensors_[in_t_name]); - } else { - inTensors.push_back(external_tensors[in_t_name]); - } - } - vector> outTensors; - for (int oz = 0; oz < net_op->out_size; oz++) { - auto out_t_name = "outtensor-" + op_name + "-" + intToStringWithLeadingZero(oz); - auto it = tensors_.find(out_t_name); - if (it != tensors_.end()) { - outTensors.push_back(tensors_[out_t_name]); - } else { - outTensors.push_back(external_tensors[out_t_name]); - } - } - ops_input_tensors_[op_name] = inTensors; - ops_output_tensors_[op_name] = outTensors; - if (connect_input) { ops_connect_input_.push_back(op_name); } - } -} - -void Graph::reflashInput( - unordered_map> &external_tensors) { - for (auto op : ops_connect_input_) { - vector tmp_name; - for (auto in_t : ops_input_tensors_[op]) { - tmp_name.push_back(in_t->name()); - } - ops_input_tensors_[op].clear(); - for (auto input_tensor_name : tmp_name) { - if (tensors_.find(input_tensor_name) != tensors_.end()) { - ops_input_tensors_[op].push_back(tensors_[input_tensor_name]); - } else { - ops_input_tensors_[op].push_back( - external_tensors[input_tensor_name]); - } - } - } -} -void Graph::reshape() { - for (const auto &op_name : op_names_) { - bool do_ = true; - if (ops_[op_name]->type() == PARAMETER || ops_[op_name]->type() == RANGE || ops_[op_name]->type() == GATHER || ops_[op_name]->type() == REPLACE) { - do_ = true; - } else { - for (auto &input_tensor : ops_input_tensors_[op_name]) { - if (input_tensor->count() == 0) { - do_ = false; - } - } - } - ops_not_inputs_empty_[op_name] = do_; - if (do_) { - ops_[op_name]->reshape( - ops_input_tensors_[op_name], - ops_output_tensors_[op_name]); // tensors_[op_name]:1.reshape - } else { - // std::cout<<"op_name:"<reshape(0, 0, 0, 0); - } - } - } -} - -void Graph::setUpTensors() { - auto &graph_in_tensors = ops_input_tensors_[op_names_[0]]; - // set graph out tensor TensorType - auto &graph_out_tensors = ops_output_tensors_[op_names_[op_names_.size() - 1]]; - for (auto &t : graph_out_tensors) { - t->setTtype(GRAPH_OUTPUT); - } - - this->backend_->onSetUpStart(graph_in_tensors, graph_out_tensors); - - for (auto &t : graph_in_tensors) { t->alloc(); } - - // set up tensors of ops - for (const auto &op_name : op_names_) { - if (ops_not_inputs_empty_[op_name]) { - ops_[op_name]->setUp(ops_input_tensors_[op_name], - ops_output_tensors_[op_name]); - // PRINT_MEMORY_USAGE((op_name + " setUp").c_str()); - } else { - // std::cout<<"op_name:"<load(loader); -#ifdef DEBUGPRINT - PRINT_MEMORY_USAGE((op_name + " load").c_str()); -#endif - } -} -// #define SAVECHECK -const vector> &Graph::forward(bool autofree) { - // backend event hook - this->backend_->onExecuteStart(ops_input_tensors_[op_names_[0]], ops_output_tensors_[op_names_[op_names_.size() - 1]]); - - for (const auto &op_name : op_names_) { - if (ops_not_inputs_empty_[op_name]) { -#ifdef SAVECHECK - for (auto &t : ops_input_tensors_[op_name]) { - t->checkData(); - t->saveData(); - } -#endif -#ifdef DEBUGPRINT - uint64_t t_start = mllm_time_us(); -#endif - ops_[op_name]->execute(ops_input_tensors_[op_name], - ops_output_tensors_[op_name]); - -#ifdef SAVECHECK - for (auto &t : ops_output_tensors_[op_name]) { - t->checkData(); - t->saveData(); - } -#endif - -#ifdef DEBUGPRINT - uint64_t t_end = mllm_time_us(); - std::cout << "" << op_name - << " exe_time:" << (t_end - t_start) / 1000.0F << " ms" - << std::endl; -#endif - if (autofree) { - ops_[op_name]->free(ops_input_tensors_[op_name], - ops_output_tensors_[op_name]); - } - } else { - // std::cout<<"op_name:"<backend_->onExecuteEnd(_, ""); - return ops_output_tensors_[op_names_[op_names_.size() - 1]]; -} - -void Graph::freeOps() { - for (const auto &op_name : op_names_) { - ops_[op_name]->free(ops_input_tensors_[op_name], - ops_output_tensors_[op_name]); - } -} -void Graph::freeTensors() { - for (auto &t : tensors_) { - t.second->free(); - } -} -void Graph::free() { - freeOps(); - freeTensors(); -} -} // namespace mllm diff --git a/src/Graph.hpp b/src/Graph.hpp deleted file mode 100644 index 906c69402..000000000 --- a/src/Graph.hpp +++ /dev/null @@ -1,109 +0,0 @@ -// -// Created by Rongjie Yi. -// - -#ifndef MLLM_GRAPH_H -#define MLLM_GRAPH_H -#include "Tensor.hpp" -#include "Op.hpp" -#include "ParamLoader.hpp" -#include "Backend.hpp" -#include "Types.hpp" -#include "express/ExpressBase.hpp" -#include -using std::unordered_map; - -namespace mllm { - -class Graph { -public: - /** - * \brief Graph - * \param param NetParameter contains the structure of this graph - * \param bn Backend like CPU/QNN etc - * \param external_tensors external tensors from other graph and inter graphs. - * \param threadCount number of Threads - */ - explicit Graph(const NetParameter ¶m, Backend *bn, unordered_map> &external_tensors, int threadCount); - virtual ~Graph() = default; - - /** - * \brief set the output tensors' shape of Ops in this graph. - */ - virtual void reshape(); - - /** - * \brief alloc the memory of output tensors of Ops in this graph. - */ - virtual void setUpTensors(); - - /** - * \brief load the weights/bias of Ops in this graph. - * \param loader A Paramloader - */ - void setUpOps(AbstructLoader &loader); - - /** - * \brief forward propagation - * \param autofree Whether to release the memory of weights. Set to false - * \return The last output tensor - */ - virtual const vector> &forward(bool autofree = false); - - /** - * \brief free the memory of Ops' weights in this graph. - */ - void freeOps(); - /** - * \brief free the memory of output tensors of Ops in this graph. - */ - void freeTensors(); - /** - * \brief free output tensors & Ops' weights - */ - void free(); - - /** - * \brief backward propagation [Not Used] - */ - void backward(); - - /** - * \brief reflash 'ops_input_tensors_'. - * \param external_tensors external tensors from other graph and inter graphs. - */ - void reflashInput(unordered_map> &external_tensors); - - /** - * \brief get the backend type of this graph. - */ - BackendType device() const { - return backend_->type(); - } - -protected: - Backend *backend_; - string name_; - - vector layer_names_; - - // tensor indices for the input and the output of the net - vector input_tensor_indices_; - vector output_tensor_indices_; - vector input_tensors_; - vector output_tensors_; - - unordered_map>> ops_input_tensors_; // opname: op's output Tensors - unordered_map>> ops_output_tensors_; // opname: op's output Tensors - unordered_map> tensors_; // opname: Tensors - unordered_map> ops_; // opname: op - unordered_map ops_not_inputs_empty_; // opname: ops_not_inputs_empty - - vector op_names_; - - vector ops_connect_input_; -}; - -} // namespace mllm - -#endif // MLLM_GRAPH_H diff --git a/src/Net.cpp b/src/Net.cpp deleted file mode 100644 index bcfefc864..000000000 --- a/src/Net.cpp +++ /dev/null @@ -1,54 +0,0 @@ -#include "Net.hpp" -#include "Op.hpp" -#include "Types.hpp" -#include "Backend.hpp" -#include - -namespace mllm { - -Net::Net(BackendConfig config) { - backends_.emplace(MLLM_CPU, GetBackendCreator(MLLM_CPU)->create(config)); -} - -void Net::convert(vector ¶m, BackendType backend_type, int threadCount) { - for (int ii = 0; ii < (int)param.size(); ++ii) { - auto &sub_param = param[ii]; - vector names = {}; - auto net_in_tensor = sub_param.net_inputs; - for (const auto &out_t : net_in_tensor) { - tensors_[out_t->name] = std::make_shared(backends_[backend_type].get()); - tensors_[out_t->name]->setName(out_t->name); - for (auto &tensor_name : tensor_names_) { - tensor_name.erase(std::remove(tensor_name.begin(), tensor_name.end(), out_t->name), tensor_name.end()); - } - names.push_back(out_t->name); - } - - for (auto *t : sub_param.net_tensors) { - if (t->in == NULL) { - auto *in_tensor = t; - tensors_[in_tensor->name] = std::make_shared(backends_[backend_type].get()); - tensors_[in_tensor->name]->setName(in_tensor->name); - input_names_.push_back(in_tensor->name); - inputname_graphidx_[in_tensor->name] = ii; - names.push_back(in_tensor->name); - } - } - tensor_names_.push_back(names); - } - - for (int i = 0; i < (int)param.size(); ++i) { - param[i].topologySort(); - shared_ptr subg_1; - subg_1.reset(new Graph(param[i], backends_[backend_type].get(), tensors_, threadCount)); - subGraphs_["G" + std::to_string(i)] = subg_1; - } -} - -void Net::freeTensors(int graph_idx) { - auto &graph_ex_tensor = tensor_names_[graph_idx]; - for (auto &name : graph_ex_tensor) { - tensors_[name]->free(); - } -} -} // namespace mllm diff --git a/src/Net.hpp b/src/Net.hpp deleted file mode 100644 index 8cfc68d3f..000000000 --- a/src/Net.hpp +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef MLLM_NET_H -#define MLLM_NET_H - -#include "Op.hpp" -#include "ParamLoader.hpp" -#include "Graph.hpp" -#include "Tensor.hpp" -#include "Types.hpp" -namespace mllm { -class Net { -public: - explicit Net(BackendConfig config); - virtual ~Net() = default; - - virtual void convert(vector ¶m, BackendType backend_type = BackendType::MLLM_CPU, int threadCount = 4); - - unordered_map> &subGraph() { - return subGraphs_; - } - unordered_map> &tensors() { - return tensors_; - } - - unordered_map> &backends() { - return backends_; - } - vector> &tensorNames() { - return tensor_names_; - } - void freeTensors(int graph_idx); - vector inputNames() const { - return input_names_; - } - map inGmap() const { - return inputname_graphidx_; - } - -protected: - unordered_map> subGraphs_; - unordered_map> tensors_; - vector> tensor_names_; - vector ops_; - unordered_map> backends_; - vector input_names_; - map inputname_graphidx_; -}; - -} // namespace mllm - -#endif // MLLM_NET_H \ No newline at end of file diff --git a/src/Tensor.cpp b/src/Tensor.cpp deleted file mode 100644 index d4104528c..000000000 --- a/src/Tensor.cpp +++ /dev/null @@ -1,666 +0,0 @@ -#include "Tensor.hpp" - -#include -#include -#include -#include -#include "Backend.hpp" -#include "OpDefined.hpp" -#include "Timing.hpp" -#include "Types.hpp" -#include -#include -#include -#include -#include - -namespace mllm { - -/* Tensor类构造函数实现(对应头文件中的声明)*/ -Tensor::Tensor(const int batch, const int head, const int sequence, const int dimension) : - impl_(std::make_shared()) { // 初始化impl_ - reshape(batch, head, sequence, dimension); -} - -Tensor::Tensor(int batch, int head, int sequence, int dimension, Backend *bn, bool do_alloc) : - impl_(std::make_shared(bn)) { // 使用带Backend的TensorImpl构造函数 - impl_->dtype_ = MLLM_TYPE_F32; - reshape(batch, head, sequence, dimension); - if (do_alloc) { - alloc(); - } -} - -Tensor::Tensor(int batch, int head, int sequence, int dimension, BackendType bn_type, bool do_alloc) : - impl_(std::make_shared()) { - impl_->dtype_ = MLLM_TYPE_F32; - impl_->backend_ = Backend::global_backends[bn_type]; - reshape(batch, head, sequence, dimension); - if (do_alloc) { - alloc(); - } -} - -Tensor::Tensor(const std::vector &shape) : - impl_(std::make_shared()) { - impl_->private_reshape(shape); -} - -Tensor::Tensor(int value, Backend *bn) : - impl_(std::make_shared()) { - impl_->dtype_ = MLLM_TYPE_F32; - impl_->backend_ = bn; - reshape(1, 1, 1, 1); - alloc(); - impl_->should_in_graphs_ = false; - setDataAt(0, 0, 0, 0, static_cast(value)); -} - -Tensor::Tensor(int value, BackendType bn_type) : - impl_(std::make_shared()) { - impl_->dtype_ = MLLM_TYPE_F32; - impl_->backend_ = Backend::global_backends[bn_type]; - reshape(1, 1, 1, 1); - alloc(); - impl_->should_in_graphs_ = false; - setDataAt(0, 0, 0, 0, static_cast(value)); -} - -Tensor::Tensor(std::vector values, BackendType bn_type) : - impl_(std::make_shared()) { - impl_->dtype_ = MLLM_TYPE_F32; - impl_->backend_ = Backend::global_backends[bn_type]; - reshape(1, 1, 1, values.size()); - alloc(); - impl_->should_in_graphs_ = false; - for (size_t i = 0; i < values.size(); ++i) { - setDataAt(0, 0, 0, i, values[i]); - } -} - -bool Tensor::reshape(const int batch, const int head, const int sequence, const int dimension) { - return impl_->reshape(batch, head, sequence, dimension); - // vector shape(4); - // shape[chls()[BATCH]] = batch; - // shape[chls()[HEAD]] = head; - // shape[chls()[SEQUENCE]] = sequence; - // shape[chls()[DIMENSION]] = dimension; - // return reshape(shape); -} - -// Tensor.cpp -void Tensor::alloc() { - // if ("out-model.embed_tokens" == name()) - // std::cout << "alloc " << name() << std::endl; - if (aggregated_) return; - assert(impl_->backend_ != nullptr); - if (master_tensor_ != nullptr) return; - if (!shape_offset_.empty() && !shape_master_.empty()) return; - - impl_->alloc(); -} - -bool Tensor::reshape(int batch, int channel, int time, int height, int width) { - if (impl_->ctype_ != BTHWC) { - impl_->ctype_ = BCTHW; - impl_->chls_[BATCH] = 0; - impl_->chls_[CHANNLE] = 1; - impl_->chls_[TIME] = 2; - impl_->chls_[HEIGHT] = 3; - impl_->chls_[WIDTH] = 4; - } else { - impl_->chls_[BATCH] = 0; - impl_->chls_[TIME] = 1; - impl_->chls_[HEIGHT] = 2; - impl_->chls_[WIDTH] = 3; - impl_->chls_[CHANNLE] = 4; - } - - std::vector shape(5); - const auto &chls = impl_->chls_; // 从TensorImpl获取维度映射 - - shape[chls.at(BATCH)] = batch; - shape[chls.at(CHANNLE)] = channel; - shape[chls.at(TIME)] = time; - shape[chls.at(HEIGHT)] = height; - shape[chls.at(WIDTH)] = width; - - return impl_->private_reshape(shape); -} - -TensorStatus Tensor::tensor_status; - -uint32_t &Tensor::uuid() { - return uuid_; -} - -TensorType &Tensor::xnnTensorType() { - return xnn_tensor_type_; -} - -void Tensor::forceResetHostPointer(void *ptr) { - impl_->host_ptr_ = ptr; -} - -Tensor &Tensor::to(BackendType backend_type) { - // TODO: check if the data is shared between devices - // if so, return the origin tensor - // if not, return the new tensor - // TODO: if need copy, should implement copyDataCrossBn and do copy when Tensor::TENSOR_STATIC_READY - - /** - * Currently, there are following cases: - * CPU -> QNN, QNN -> CPU - * if it is CPU -> QNN, the buffer should be realloced - * (NOTE: not handling data copy as the tensor.to() shoudld be called before the data is set and tensor.device() should be checked in frontend) - * if it is QNN -> CPU, the data is sharable between CPU and QNN, no need to copy or realloc - */ - if (device() == backend_type) { - return *this; - } - if (backend_type == MLLM_CPU && device() == MLLM_QNN) { - // data is sharable between CPU and QNN - return *this; - } - // realloc the tensor - if (backend_type == MLLM_QNN && device() == MLLM_CPU) { - this->free(); - } - if (backend_type == MLLM_CPU && device() == MLLM_XNNPACK) { - module()->activation_tensors[name()]->setBackend(Backend::global_backends[backend_type]); - this->setBackend(Backend::global_backends[backend_type]); - return *this; - } - if (backend_type == MLLM_XNNPACK && device() == MLLM_CPU) { - module()->activation_tensors[name()]->setBackend(Backend::global_backends[backend_type]); - this->setBackend(Backend::global_backends[backend_type]); - return *this; - } - module()->activation_tensors[name()]->setBackend(Backend::global_backends[backend_type]); - this->alloc(); - return *this; -}; - -// TensorFuctions -std::vector Tensor::runFunc(std::vector out_names, - TensorFuncType type, - std::vector float_args, - std::vector> input_tensors, - bool in_place) { - auto backend = input_tensors.empty() ? Backend::global_backends[MLLM_CPU] : input_tensors[0]->backend(); - if (Backend::global_backends.size() == 2 && Backend::global_backends.find(MLLM_QNN) != Backend::global_backends.end()) { - backend = Backend::global_backends[MLLM_QNN]; - } - return backend->runFunc(out_names, type, float_args, input_tensors, in_place); -} - -/* -Tensor &Tensor::getFunc(const std::string &suffix, const TensorFuncType type, - vector float_args, vector other_tensors) { - assert(module() != nullptr); - auto &module_tensors = module()->activation_tensors; - auto &activation_tensors_num = module()->activation_tensors_num; - const std::string next_name = impl_->name_ + "-" + suffix; - // if (module_tensors.find(name_) == module_tensors.end()) { - // module_tensors[name_] = std::shared_ptr(this, [](Tensor *) {}); - // } - if (module_tensors.find(next_name) == module_tensors.end()) { - module_tensors[next_name] = std::make_shared(impl_->backend_); - module_tensors[next_name]->setName(next_name); - module_tensors[next_name]->setModule(module()); - activation_tensors_num[next_name] = 0; - } - if (module()->doLoad) { return *module_tensors[next_name]; } - TensorFunction *func = impl_->backend_->funcCreate(type); - std::vector tensorPtrs = {module_tensors[impl_->name_].get()}; - for (auto &other_tensor : other_tensors) { tensorPtrs.push_back(other_tensor); } -#ifdef DEBUGOPTIME - auto start_t = mllm_time_us(); -#endif - switch (Tensor::tensor_status) { - case TENSOR_STATIC_INIT: { - func->setup({module_tensors[next_name].get()}, tensorPtrs, float_args); - break; - } - case TENSOR_STATIC_READY: { - func->execute({module_tensors[next_name].get()}, tensorPtrs, float_args); - break; - } - case TENSOR_STATIC_TRACE: { - if (impl_->backend_->type() == BackendType::MLLM_CPU) { - Tracer::addTensorFunction(func, tensorPtrs, {module_tensors[next_name].get()}, float_args); - } - break; - } - default: { - } - } - if (Backend::global_backends.size() == 1) { - for (auto input_tensor : tensorPtrs) { - if (activation_tensors_num.find(input_tensor->name()) != activation_tensors_num.end()) { - switch (Tensor::tensor_status) { - case TENSOR_STATIC_INIT: { - activation_tensors_num[input_tensor->name()] += 1; - break; - } - case TENSOR_STATIC_READY: { - activation_tensors_num[input_tensor->name()] -= 1; - break; - } - default: { - } - } - if (activation_tensors_num[input_tensor->name()] == 0 && module_tensors[input_tensor->name()]->sequence() > 1 - && module_tensors[input_tensor->name()]->ttype() != GRAPH_OUTPUT) { - module_tensors[input_tensor->name()]->free(); - // std::cout << input_tensor->name() << " |F" << std::endl; - } - } - } - } -#ifdef DEBUGOPTIME - if (Tensor::tensor_status == TENSOR_STATIC_READY) { - auto end_t = mllm_time_us(); - std::cout << next_name << " | " << Tensor::tensor_status - << " time: " << (end_t - start_t) / 1000.0F << "ms" << std::endl; - } -#endif -#ifdef DEBUGSAVETENSOR - module_tensors[next_name]->saveNData(); -#endif - return *module_tensors[next_name]; -} - -void Tensor::getFunc(const TensorFuncType type, - vector float_args, vector other_tensors) { - assert(module() != nullptr); - auto &module_tensors = module()->activation_tensors; - auto &activation_tensors_num = module()->activation_tensors_num; - if (module()->doLoad) { return; } - TensorFunction *func = impl_->backend_->funcCreate(type); - std::vector tensorPtrs = {module_tensors[impl_->name_].get()}; - for (auto &other_tensor : other_tensors) { tensorPtrs.push_back(other_tensor); } -#ifdef DEBUGOPTIME - auto start_t = mllm_time_us(); -#endif - switch (Tensor::tensor_status) { - case TENSOR_STATIC_INIT: { - func->setup({}, tensorPtrs, float_args); - break; - } - case TENSOR_STATIC_READY: { - func->execute({}, tensorPtrs, float_args); - break; - } - default: { - } - } - if (Backend::global_backends.size() == 1) { - for (auto input_tensor : tensorPtrs) { - if (activation_tensors_num.find(input_tensor->name()) != activation_tensors_num.end() - // && input_tensor->dimension() * input_tensor->sequence() > 0 - ) { - switch (Tensor::tensor_status) { - case TENSOR_STATIC_INIT: { - activation_tensors_num[input_tensor->name()] += 1; - break; - } - case TENSOR_STATIC_READY: { - activation_tensors_num[input_tensor->name()] -= 1; - break; - } - default: { - } - } - if (activation_tensors_num[input_tensor->name()] == 0 && module_tensors[input_tensor->name()]->sequence() > 1 - && module_tensors[input_tensor->name()]->ttype() != GRAPH_OUTPUT) { - module_tensors[input_tensor->name()]->free(); - // std::cout << input_tensor->name() << " |F" << std::endl; - } - } - } - } -#ifdef DEBUGOPTIME - if (Tensor::tensor_status == TENSOR_STATIC_READY) { - auto end_t = mllm_time_us(); - std::cout << " | " << Tensor::tensor_status - << " time: " << (end_t - start_t) / 1000.0F << "ms" << std::endl; - } -#endif -} - -std::vector> Tensor::getStaticFunc(vector out_names, - const TensorFuncType type, - vector float_args, - vector input_tensors) { - Module *module; - if (!input_tensors.empty()) { - module = input_tensors[0]->module(); - } else { - module = Module::llm_model_ptr; - } - assert(module != nullptr); - auto &module_tensors = module->activation_tensors; - auto &activation_tensors_num = module->activation_tensors_num; - auto *backend_h = Backend::global_backends[MLLM_CPU]; - if (!input_tensors.empty() && input_tensors[0]->impl_->backend_ != nullptr) { - backend_h = input_tensors[0]->backend(); - } - for (auto out_name : out_names) { - if (module_tensors.find(out_name) == module_tensors.end()) { - module_tensors[out_name] = std::make_shared(backend_h); - module_tensors[out_name]->setName(out_name); - module_tensors[out_name]->setModule(module); - activation_tensors_num[out_name] = 0; - } - } - if (module->doLoad) { - std::vector> results; - for (auto out_name : out_names) { results.push_back(*module_tensors[out_name]); } - return results; - } - TensorFunction *func = backend_h->funcCreate(type); - // std::vector tensorPtrs; - // for (auto input_tensor : input_tensors){ tensorPtrs.push_back(module_tensors[input_tensor->name()].get()); } - std::vector outPtrs; - for (auto out_name : out_names) { outPtrs.push_back(module_tensors[out_name].get()); } -#ifdef DEBUGOPTIME - auto start_t = mllm_time_us(); -#endif - switch (Tensor::tensor_status) { - case TENSOR_STATIC_INIT: { - func->setup(outPtrs, input_tensors, float_args); - break; - } - case TENSOR_STATIC_READY: { - func->execute(outPtrs, input_tensors, float_args); - break; - } - case TENSOR_STATIC_TRACE: { - if (backend_h->type() == BackendType::MLLM_CPU) { - Tracer::addTensorFunction(func, input_tensors, outPtrs, float_args); - } - break; - } - default: { - } - } - if (Backend::global_backends.size() == 1) { - for (auto input_tensor : input_tensors) { - if (activation_tensors_num.find(input_tensor->name()) != activation_tensors_num.end()) { - switch (Tensor::tensor_status) { - case TENSOR_STATIC_INIT: { - activation_tensors_num[input_tensor->name()] += 1; - break; - } - case TENSOR_STATIC_READY: { - activation_tensors_num[input_tensor->name()] -= 1; - break; - } - default: { - } - } - if (activation_tensors_num[input_tensor->name()] == 0 && module_tensors[input_tensor->name()]->sequence() > 1 - && module_tensors[input_tensor->name()]->ttype() != GRAPH_OUTPUT) { - module_tensors[input_tensor->name()]->free(); - // std::cout << input_tensor->name() << " |S "<< std::endl;// << out_names[0] << std::endl; - } - } - } - } -#ifdef DEBUGOPTIME - if (Tensor::tensor_status == TENSOR_STATIC_READY) { - auto end_t = mllm_time_us(); - std::cout << out_names[0] << " | " << Tensor::tensor_status - << " time: " << (end_t - start_t) / 1000.0F << "ms" << std::endl; - } -#endif -#ifdef DEBUGSAVETENSOR - for (auto out_name : out_names) { module_tensors[out_name]->saveNData(); } -#endif - std::vector> results; - for (auto out_name : out_names) { results.push_back(*module_tensors[out_name]); } - return results; -} -*/ - -Tensor Tensor::operator+(float data) { - return runFunc({name() + "-add"}, FUNC_ADD, {data}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::operator-(float data) { - return runFunc({name() + "-sub"}, FUNC_SUB, {data}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::operator*(float data) { - return runFunc({name() + "-mul"}, FUNC_MUL, {data}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::operator/(float data) { - return runFunc({name() + "-div"}, FUNC_DIV, {data}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::operator/(double data) { - return runFunc({name() + "-div"}, FUNC_DIV, {static_cast(data)}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::operator/(int data) { - return runFunc({name() + "-div"}, FUNC_DIVINT, {static_cast(data)}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::operator+(Tensor other) { - return runFunc({name() + "-TTadd"}, FUNC_TTADD, {}, - {std::shared_ptr(this, [](Tensor *) {}), - std::shared_ptr(&other, [](Tensor *) {})})[0]; -} - -Tensor Tensor::operator-(Tensor other) { - return runFunc({name() + "-TTsub"}, FUNC_TTSUB, {}, - {std::shared_ptr(this, [](Tensor *) {}), - std::shared_ptr(&other, [](Tensor *) {})})[0]; -} - -Tensor Tensor::operator*(Tensor other) { - return runFunc({name() + "-TTmul"}, FUNC_TTMUL, {}, - {std::shared_ptr(this, [](Tensor *) {}), - std::shared_ptr(&other, [](Tensor *) {})})[0]; -} - -Tensor Tensor::operator/(Tensor other) { - return runFunc({name() + "-TTdiv"}, FUNC_TTDIV, {}, - {std::shared_ptr(this, [](Tensor *) {}), - std::shared_ptr(&other, [](Tensor *) {})})[0]; -} - -Tensor Tensor::mean(Chl axis) { - return runFunc({name() + "-mean"}, FUNC_MEAN, {(float)axis}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::view(int b, int h, int s, int d) { - return runFunc({name() + "-view"}, FUNC_VIEW, {(float)b, (float)h, (float)s, (float)d}, - {std::shared_ptr(this, [](Tensor *) {})}, true)[0]; -} - -Tensor Tensor::flatten(Chl axis_start, Chl axis_end) { - return runFunc({name() + "-flatten"}, FUNC_FLATTEN, {(float)axis_start, (float)axis_end}, - {std::shared_ptr(this, [](Tensor *) {})}, true)[0]; -} - -Tensor Tensor::transpose(vector> axiss) { - vector axis_s; - for (auto &axis : axiss) { - axis_s.push_back((float)axis.first); - axis_s.push_back((float)axis.second); - } - return runFunc({name() + "-transpose"}, FUNC_TRANPOSE, axis_s, - {std::shared_ptr(this, [](Tensor *) {})}, master_tensor_ == nullptr)[0]; -} - -Tensor Tensor::clip(vector b, vector h, vector s, vector d) { - vector axis_s; - axis_s.push_back(b.size()); - axis_s.push_back(h.size()); - axis_s.push_back(s.size()); - axis_s.push_back(d.size()); - for (auto &axis : b) { axis_s.push_back((float)axis); } - for (auto &axis : h) { axis_s.push_back((float)axis); } - for (auto &axis : s) { axis_s.push_back((float)axis); } - for (auto &axis : d) { axis_s.push_back((float)axis); } - string name_su = "clip-"; - if (!(d.size() == 2 && b.empty() && h.empty() && s.empty())) { - for (auto as : axis_s) { - name_su += std::to_string(int(as)) + "_"; - } - } - return runFunc({name() + name_su}, FUNC_CLIP, axis_s, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::clip(Chl keep_axis, vector b, vector h, vector s, vector d) { - vector axis_s = {(float)keep_axis}; - axis_s.push_back(b.size()); - axis_s.push_back(h.size()); - axis_s.push_back(s.size()); - axis_s.push_back(d.size()); - for (auto &axis : b) { axis_s.push_back((float)axis); } - for (auto &axis : h) { axis_s.push_back((float)axis); } - for (auto &axis : s) { axis_s.push_back((float)axis); } - for (auto &axis : d) { axis_s.push_back((float)axis); } - return runFunc({name() + "-clipaxis"}, FUNC_CLIPAXIS, axis_s, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::clip(Tensor index, Chl dim) { - return runFunc({name() + "-cliptensor"}, FUNC_CLIPTENSOR, {(float)dim}, - {std::shared_ptr(this, [](Tensor *) {}), - std::shared_ptr(&index, [](Tensor *) {})})[0]; -} -Tensor Tensor::expand(int b, int h, int s, int d) { - return runFunc({name() + "-expand"}, FUNC_EXPPAND, {(float)b, (float)h, (float)s, (float)d}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::norm(int L_n) { - return runFunc({name() + "-norm"}, FUNC_NORM, {(float)L_n}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::where(float value, Chl axis) { - return runFunc({name() + "-where"}, FUNC_WHERE, {(float)value, (float)axis}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} - -Tensor Tensor::index_put(Tensor value, Tensor indices, bool accumulate) { - return runFunc({name() + "-index_put"}, FUNC_INDEX_PUT, {(float)accumulate}, - {std::shared_ptr(this, [](Tensor *) {}), - std::shared_ptr(&value, [](Tensor *) {}), - std::shared_ptr(&indices, [](Tensor *) {})}, - !accumulate)[0]; -} -void Tensor::scatter_reduce(Tensor value, Tensor indices) { - runFunc({name()}, FUNC_SCATTERREDUCE, {}, - {std::shared_ptr(this, [](Tensor *) {}), - std::shared_ptr(&value, [](Tensor *) {}), - std::shared_ptr(&indices, [](Tensor *) {})})[0]; -} - -Tensor Tensor::cat(vector input_tensors, Chl axis) { - Module *module = input_tensors[0].module(); - vector> inputs = {}; - for (auto &input_tensor : input_tensors) { - inputs.push_back(std::shared_ptr(&input_tensor, [](Tensor *) {})); - } - return runFunc({input_tensors[0].name() + "-cat"}, FUNC_CAT, {(float)axis}, inputs)[0]; -} - -Tensor Tensor::mm(Tensor input0, Tensor input1) { - Module *module = input0.module(); - string nname = input0.name() + "-mm-" + input1.name(); - return runFunc( - {nname}, FUNC_MM, {}, - {std::shared_ptr(&input0, [](Tensor *) {}), - std::shared_ptr(&input1, [](Tensor *) {})})[0]; -} - -Tensor Tensor::range(int start, int end) { - return runFunc({"range-" + std::to_string(start) + "-" + std::to_string(end)}, FUNC_RANGE, - {(float)start, (float)end}, {})[0]; -} - -vector Tensor::split(Tensor input, std::vector each_dims, - Chl split_dim, int same_dim_size) { - vector next_names; - std::vector args; - for (int i = 0; i < each_dims.size(); ++i) { - args.push_back(each_dims[i]); - next_names.push_back(input.name() + ".split-" + std::to_string(i)); - } - args.push_back(split_dim); - args.push_back(same_dim_size); - Module *module = input.module(); - return runFunc(next_names, FUNC_SPLIT, args, - {std::shared_ptr(&input, [](Tensor *) {})}); -} - -vector Tensor::topk(Tensor input, int k, Chl dim) { - Module *module = input.module(); - return runFunc({input.name() + "-top" + std::to_string(k) + "-value", - input.name() + "-top" + std::to_string(k) + "-idx"}, - FUNC_TOPK, - {(float)k, (float)dim}, - {std::shared_ptr(&input, [](Tensor *) {})}); -} -Tensor Tensor::sum(Chl dim) { - return runFunc({name() + "sum"}, FUNC_SUM, {(float)dim}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} -Tensor Tensor::argsort() { - return runFunc({name() + "argsort"}, FUNC_ARGSORT, {}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} -Tensor Tensor::bincount() { - return runFunc({name() + "bincount"}, FUNC_BINCOUNT, {}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} -Tensor Tensor::repeat(Chl dim, int dim_size) { - return runFunc({name() + "repeat"}, FUNC_REPEAT, {(float)dim, (float)dim_size}, - {std::shared_ptr(this, [](Tensor *) {})})[0]; -} -Tensor Tensor::zero_like(Tensor input) { - Module *module = input.module(); - return runFunc({input.name() + "-zero_like"}, FUNC_LIKE, {0.0}, - {std::shared_ptr(&input, [](Tensor *) {})})[0]; -} -Tensor Tensor::apply_rotary_pos_emb_vision(Tensor input, Tensor rotary_pos_emb) { - Module *module = input.module(); - return runFunc({input.name() + "-apply_rotary_pos_emb"}, FUNC_APPLY_VISIOROPE, - {}, - {std::shared_ptr(&input, [](Tensor *) {}), - std::shared_ptr(&rotary_pos_emb, [](Tensor *) {})})[0]; -} - -Tensor Tensor::fuyu_gather_embd(Tensor word, Tensor image_patches, Tensor image_patches_indices) { - Module *module = word.module(); - return runFunc({word.name() + ".fuyu_gather_embd"}, FUNC_FUYU_GATHER_EMBD, - {}, - {std::shared_ptr(&word, [](Tensor *) {}), - std::shared_ptr(&image_patches, [](Tensor *) {}), - std::shared_ptr(&image_patches_indices, [](Tensor *) {})}, - true)[0]; -} - -Tensor Tensor::phi3v_hd_merge(Tensor input, int h_crop, int w_crop) { - Module *module = input.module(); - return runFunc({input.name() + ".phi3v_hd_merge"}, FUNC_PHI3V_HD_MERGE, - {(float)h_crop, (float)w_crop}, - {std::shared_ptr(&input, [](Tensor *) {})})[0]; -} - -} // namespace mllm \ No newline at end of file diff --git a/src/backends/cpu/CMakeLists.txt b/src/backends/cpu/CMakeLists.txt deleted file mode 100644 index 1aa021689..000000000 --- a/src/backends/cpu/CMakeLists.txt +++ /dev/null @@ -1,71 +0,0 @@ -# CPU Backend -file(GLOB MLLM_CPU_SRC - ${CMAKE_CURRENT_LIST_DIR}/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/*.hpp - ${CMAKE_CURRENT_LIST_DIR}/compute/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/compute/*.hpp - ${CMAKE_CURRENT_LIST_DIR}/function/*.hpp - ${CMAKE_CURRENT_LIST_DIR}/op/*.hpp - ${CMAKE_CURRENT_LIST_DIR}/op/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/quantize/*.hpp - ${CMAKE_CURRENT_LIST_DIR}/quantize/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/type/*.cpp - ${CMAKE_CURRENT_LIST_DIR}/type/*.hpp -) - -if (MLLM_OPENMP) -find_package(OpenMP REQUIRED) -if(OpenMP_FOUND) - message(STATUS "found openmp") - set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS} ${OPENMP_C_FLAGS}) - set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${OPENMP_CXX_FLAGS}) -else() - message(FATAL_ERROR "openmp not found!") -endif() -endif() -if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") - message(STATUS "ARM detected") - add_compile_options(-march=armv8.2-a+dotprod+fp16+fp16fml) -elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$") - message(STATUS "x86_64 detected") -add_compile_options(-mavx2) -add_compile_options(-march=native) -endif() - -if(${MLLM_ENABLE_PYTHON}) -add_library( - MLLM_CPU - SHARED - ${MLLM_CPU_SRC} -) -else() -add_library( - MLLM_CPU - OBJECT - ${MLLM_CPU_SRC} -) -endif() - -target_include_directories( - MLLM_CPU - PRIVATE - ${CMAKE_CURRENT_LIST_DIR} -) - -if(OpenMP_FOUND) - message(STATUS "found openmp") - if(ARM AND NOT APK) - message(STATUS "[ARM] found openmp") - target_compile_options(MLLM_CPU PUBLIC -fopenmp) - target_link_libraries(MLLM_CPU PUBLIC -fopenmp -static-openmp) - else() - target_link_libraries(MLLM_CPU - PUBLIC - OpenMP::OpenMP_CXX - ) - endif() -endif() - -target_link_libraries(MLLM_CPU PUBLIC fmt::fmt-header-only) - -set_target_properties(MLLM_CPU PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE) \ No newline at end of file diff --git a/src/backends/cpu/CPUBackend.cpp b/src/backends/cpu/CPUBackend.cpp deleted file mode 100644 index df4e81ed6..000000000 --- a/src/backends/cpu/CPUBackend.cpp +++ /dev/null @@ -1,594 +0,0 @@ -#include "CPUBackend.hpp" -#include -#include -#include -#include "Backend.hpp" -#include "OpDefined.hpp" -#include "Types.hpp" -#include "memory/SystemMemoryManager.hpp" -#include -#include "Layer.hpp" - -#include "op/CPUHeadLinear.hpp" -#include "op/CPULinearInt8.hpp" -#include "op/CPUNTKRoPE.hpp" -#include "op/CPUPoEmbedding.hpp" -#include "op/CPUSplitInput.hpp" -#include "op/CPUView.hpp" -#include "op/CPUAdd.hpp" -#include "op/CPUCausalMask.hpp" -#include "op/CPUCausalTreeMask.hpp" -#include "op/CPUSlidingWindowMask.hpp" -#include "op/CPUMatmul.hpp" -#include "op/CPURMSNorm.hpp" -#include "op/CPURoPE.hpp" -#include "op/CPURoPETree.hpp" -#include "op/CPUScale.hpp" -#include "op/CPUSiLU.hpp" -#include "op/CPUSoftMax.hpp" -#include "op/CPULinear.hpp" -#include "op/CPUEmbedding.hpp" -#include "op/CPUMul.hpp" -#include "op/CPUKVCache.hpp" -#include "op/CPUReLU.hpp" -#include "op/CPUReLU2.hpp" -#include "op/CPUGELU.hpp" -#include "op/CPUSplit.hpp" -#include "op/CPULayerNorm.hpp" -#include "op/CPUGather.hpp" -#include "op/CPUConvolution2D.hpp" -#include "op/CPUAvgPool2D.hpp" -#include "op/CPUMaxPool2D.hpp" -#include "op/CPUConvolution3D.hpp" -#include "op/CPUVisionRoPE.hpp" -#include "op/CPUMultimodalRoPE.hpp" -#include "op/CPUParameter.hpp" -#include "op/CPUCat.hpp" -#include "op/CPUSubDim.hpp" -#include "op/CPUQuickGELU.hpp" -#include "op/CPUDivision.hpp" -#include "op/CPUNorm.hpp" -#include "op/CPUShape.hpp" -#include "op/CPUTranspose.hpp" -#include "op/CPUMean.hpp" -#include "op/CPURange.hpp" -#include "op/CPUWhere.hpp" -#include "op/CPUReplace.hpp" -#include "op/CPUPredictor.hpp" -#include "op/CPUSparseIdLinear.hpp" -#include "op/CPUSparseLinear.hpp" -#include "op/CPUElasticLinear.hpp" -#include "op/CPUQuantize.hpp" -#include "op/CPUMergeOutput.hpp" -#include "op/CPULinearINT8Shadow.hpp" -#include "op/CPUIRoPE.hpp" -#include "op/CPUPosition.hpp" - -#include "op/CPUKVCacheNPU.hpp" -#include "op/CPUKVCacheXp.hpp" - -#include "function/CPUBinaryFunc.hpp" -#include "function/CPUCatFunc.hpp" -#include "function/CPUClipFunc.hpp" -#include "function/CPUExpandFunc.hpp" -#include "function/CPUFlattenFunc.hpp" -#include "function/CPUMatmulFunc.hpp" -#include "function/CPUMeanFunc.hpp" -#include "function/CPUNormFunc.hpp" -#include "function/CPURangeFunc.hpp" -#include "function/CPUSplitFunc.hpp" -#include "function/CPUSumFunc.hpp" -#include "function/CPUTopkFunc.hpp" -#include "function/CPUTransposeFunc.hpp" -#include "function/CPUViewFunc.hpp" -#include "function/CPUWhereFunc.hpp" -#include "function/CPUIndexPutFunc.hpp" -#include "function/CPUArgSortFunc.hpp" -#include "function/CPUBinCountFunc.hpp" -#include "function/CPURepeatFunc.hpp" -#include "function/CPULikeFunc.hpp" -#include "function/CPUScatterReduceFunc.hpp" -#include "function/CPUApplyVisionRoPE.hpp" - -#include "function/CPUFuyuGatherEmbdFunc.hpp" -#include "function/CPUPhi3VhdmergeFunc.hpp" - -namespace mllm { -class CPUBackendCreator : public BackendCreator { - Backend *create(BackendConfig config) { - shared_ptr mm = nullptr; - switch (config.memory) { - case BackendConfig::Memory_High: - // mm = std::make_shared(); - mm = std::make_shared(); // todomm - break; - default: - // mm = std::make_shared(); - mm = std::make_shared(); // todomm - break; - } - return new CPUBackend(mm); - }; -}; - -void registerCPUBackendCreator() { - InsertBackendCreatorMap(MLLM_CPU, std::make_shared()); -} - -CPUBackend::CPUBackend(shared_ptr &mm) : - Backend(mm) { - type_ = BackendType::MLLM_CPU; - registerOps(); - registerFuncs(); -} - -Op *CPUBackend::opCreate(const OpParam &op_param, string name, int threadCount) { - OpType optype = OpType(op_param.find("type")->second); - auto iter = map_creator_.find(optype); - if (iter == map_creator_.end()) { - std::cout << "CPU Op Don't support type : " << name << std::endl; - return nullptr; - } - Op *exe = iter->second->create(op_param, this, name, cpu_threads); - return exe; -} -void CPUBackend::registerOps() { - addCreator(PARAMETER, (CPUBackend::Creator *)(new CPUParameterCreator())); - addCreator(ADD, (CPUBackend::Creator *)(new CPUAddCreator())); - addCreator(CAUSALMASK, (CPUBackend::Creator *)(new CPUCausalMaskCreator())); - addCreator(CAUSALTREEMASK, (CPUBackend::Creator *)(new CPUCausalTreeMaskCreator())); - addCreator(SLIDINGWINDOWMASK, (CPUBackend::Creator *)(new CPUSlidingWindowMaskCreator())); - addCreator(MATMUL, (CPUBackend::Creator *)(new CPUMatmulCreator())); - addCreator(RMSNORM, (CPUBackend::Creator *)(new CPURMSNormCreator())); - addCreator(ROPE, (CPUBackend::Creator *)(new CPURoPECreator())); - addCreator(ROPETREE, (CPUBackend::Creator *)(new CPURoPETreeCreator())); - addCreator(SCALE, (CPUBackend::Creator *)(new CPUScaleCreator())); - addCreator(SILU, (CPUBackend::Creator *)(new CPUSiLUCreator())); - addCreator(SOFTMAX, (CPUBackend::Creator *)(new CPUSoftMaxCreator())); - addCreator(LINEAR, (CPUBackend::Creator *)(new CPULinearCreator())); - addCreator(LINEARINT8, (CPUBackend::Creator *)(new CPULinearInt8Creator())); - addCreator(EMBEDDING, (CPUBackend::Creator *)(new CPUEmbeddingCreator())); - addCreator(MUL, (CPUBackend::Creator *)(new CPUMulCreator())); - addCreator(VIEW, (CPUBackend::Creator *)(new CPUViewCreator())); - addCreator(KVCACHE, (CPUBackend::Creator *)(new CPUKVCacheCreator())); - addCreator(KVCACHENPU, (CPUBackend::Creator *)(new CPUKVCacheNPUCreator())); - addCreator(RELU, (CPUBackend::Creator *)(new CPUReLUCreator())); - addCreator(RELU2, (CPUBackend::Creator *)(new CPUReLU2Creator())); - addCreator(OP_GELU, (CPUBackend::Creator *)(new CPUGELUCreator())); - addCreator(QUICKGLUE, (CPUBackend::Creator *)(new CPUQuickGELUCreator())); - addCreator(LAYERNORM, (CPUBackend::Creator *)(new CPULayerNormCreator())); - addCreator(SPLIT, (CPUBackend::Creator *)(new CPUSplitCreator())); - addCreator(GATHER, (CPUBackend::Creator *)(new CPUGatherCreator())); - addCreator(CONVOLUTION2D, (CPUBackend::Creator *)(new CPUConvolution2DCreator())); - addCreator(AVGPOOL2D, (CPUBackend::Creator *)(new CPUAvgPoolCreator())); - addCreator(MAXPOOL2D, (CPUBackend::Creator *)(new CPUMaxPoolCreator())); - addCreator(CONVOLUTION3D, (CPUBackend::Creator *)(new CPUConvolution3DCreator())); - addCreator(VISIONROPE, (CPUBackend::Creator *)(new CPUVisionRoPECreator())); - addCreator(MULTIMODALROPE, (CPUBackend::Creator *)(new CPUMultimodalRoPECreator())); - // addCreator(CAT, (CPUBackend::Creator *)(new CPUCatCreator())); - addCreator(TRANSPOSE, (CPUBackend::Creator *)(new CPUTransposeCreator())); - addCreator(SUBDIM, (CPUBackend::Creator *)(new CPUSubDimCreator())); - addCreator(DIVISION, (CPUBackend::Creator *)(new CPUDivisionCreator())); - addCreator(NORM, (CPUBackend::Creator *)(new CPUNormCreator())); - addCreator(SHAPE, (CPUBackend::Creator *)(new CPUShapeCreator())); - addCreator(MEAN, (CPUBackend::Creator *)(new CPUMeanCreator())); - addCreator(RANGE, (CPUBackend::Creator *)(new CPURangeCreator())); - addCreator(WHERE, (CPUBackend::Creator *)(new CPUWhereCreator())); - addCreator(REPLACE, (CPUBackend::Creator *)(new CPUReplaceCreator())); - addCreator(PREDICTOR, (CPUBackend::Creator *)(new CPUPredictorCreator())); - addCreator(SPARSELINEAR, (CPUBackend::Creator *)(new CPUSparseLinearCreator())); - addCreator(SPARSEIDLINEAR, (CPUBackend::Creator *)(new CPUSparseIdLinearCreator())); - addCreator(ELASTICLINEAR, (CPUBackend::Creator *)(new CPUElasticLinearCreator())); - addCreator(POSITION, (CPUBackend::Creator *)(new CPUPositionCreator())); - addCreator(QUANTIZE, (CPUBackend::Creator *)(new CPUQuantizeCreator())); - addCreator(MERGEOUTPUT, (CPUBackend::Creator *)(new CPUMergeOutputCreator())); - addCreator(SPLITINPUT, (CPUBackend::Creator *)(new CPUSplitInputCreator())); - addCreator(LINEARINT8SHADOW, (CPUBackend::Creator *)(new CPULinearINT8ShadowCreator())); - addCreator(IROPE, (CPUBackend::Creator *)(new CPUIRoPECreator())); - addCreator(XP_KVCACHE, (CPUBackend::Creator *)(new CPUKVCacheXpCreator())); - addCreator(NTKROPE, (CPUBackend::Creator *)(new CPUNTKRoPECreator())); - addCreator(HEADLINEAR, (CPUBackend::Creator *)(new CPUHeadLinearCreator())); -} -TensorFunction *CPUBackend::funcCreate(const TensorFuncType type) { - auto iter = map_function_.find(type); - if (iter == map_function_.end()) { - std::cout << "CPU funcCreate Don't support type : " << type << std::endl; - return nullptr; - } - return iter->second; -} - -void CPUBackend::registerFuncs() { - map_function_[TensorFuncType::FUNC_ADD] = new CPUaddFunction(); - map_function_[TensorFuncType::FUNC_SUB] = new CPUsubFunction(); - map_function_[TensorFuncType::FUNC_MUL] = new CPUmulFunction(); - map_function_[TensorFuncType::FUNC_DIV] = new CPUdivFunction(); - map_function_[TensorFuncType::FUNC_DIVINT] = new CPUdivintFunction(); - map_function_[TensorFuncType::FUNC_TTADD] = new CPUaddTwoFunction(); - map_function_[TensorFuncType::FUNC_TTSUB] = new CPUsubTwoFunction(); - map_function_[TensorFuncType::FUNC_TTMUL] = new CPUmulTwoFunction(); - map_function_[TensorFuncType::FUNC_TTDIV] = new CPUdivTwoFunction(); - map_function_[TensorFuncType::FUNC_MM] = new CPUmmFunction(); - map_function_[TensorFuncType::FUNC_NORM] = new CPUnormFunction(); - map_function_[TensorFuncType::FUNC_MEAN] = new CPUmeanFunction(); - map_function_[TensorFuncType::FUNC_CAT] = new CPUcatFunction(); - map_function_[TensorFuncType::FUNC_VIEW] = new CPUviewFunction(); - map_function_[TensorFuncType::FUNC_TRANPOSE] = new CPUtransposeFunction(); - map_function_[TensorFuncType::FUNC_FLATTEN] = new CPUflattenFunction(); - map_function_[TensorFuncType::FUNC_CLIP] = new CPUclipFunction(); - map_function_[TensorFuncType::FUNC_CLIPAXIS] = new CPUclipaxisFunction(); - map_function_[TensorFuncType::FUNC_CLIPTENSOR] = new CPUcliptensorFunction(); - map_function_[TensorFuncType::FUNC_RANGE] = new CPURangeFunction(); - map_function_[TensorFuncType::FUNC_WHERE] = new CPUwhereFunction(); - map_function_[TensorFuncType::FUNC_INDEX_PUT] = new CPUIndexPutFunction(); - map_function_[TensorFuncType::FUNC_SPLIT] = new CPUsplitFunction(); - map_function_[TensorFuncType::FUNC_SUM] = new CPUsumFunction(); - map_function_[TensorFuncType::FUNC_TOPK] = new CPUtopkFunction(); - map_function_[TensorFuncType::FUNC_EXPPAND] = new CPUexpandFunction(); - map_function_[TensorFuncType::FUNC_ARGSORT] = new CPUargsortFunction(); - map_function_[TensorFuncType::FUNC_BINCOUNT] = new CPUbincountFunction(); - map_function_[TensorFuncType::FUNC_REPEAT] = new CPUrepeatFunction(); - map_function_[TensorFuncType::FUNC_LIKE] = new CPUlikeFunction(); - map_function_[TensorFuncType::FUNC_SCATTERREDUCE] = new CPUScatterReduceFunction(); - map_function_[TensorFuncType::FUNC_APPLY_VISIOROPE] = new CPUApplyVisionRoPEFunction(); - // models use only - map_function_[TensorFuncType::FUNC_FUYU_GATHER_EMBD] = new CPUFuyuGatherEmbdFunc(); - map_function_[TensorFuncType::FUNC_PHI3V_HD_MERGE] = new CPUPhi3VhdmergeFunction(); -}; - -int CPUBackend::cpu_threads = 4; - -std::vector CPUBackend::runFunc( - std::vector out_names, - TensorFuncType type, - std::vector float_args, - std::vector> input_tensors, - bool in_place) { - Module *module = input_tensors.empty() ? Module::llm_model_ptr : input_tensors[0]->module(); - auto &activation_tensors = module->activation_tensors; - assert(module != nullptr); - Backend *backend = input_tensors.empty() ? Backend::global_backends[MLLM_CPU] : input_tensors[0]->backend(); - TensorFunction *func = backend->funcCreate(type); - - if (module->doLoad) { - auto &activation_tensors_num = module->activation_tensors_num; - for (const auto &out_name : out_names) { - if (activation_tensors.find(out_name) == activation_tensors.end()) { - activation_tensors[out_name] = std::make_shared(backend); - activation_tensors[out_name]->setName(out_name); - activation_tensors[out_name]->setModule(module); - activation_tensors_num[out_name] = 0; - } - } - std::vector> inPtrs; - for (auto &t : input_tensors) inPtrs.push_back(activation_tensors[t->name()]); - std::vector> outPtrs; - for (auto &name : out_names) outPtrs.push_back(activation_tensors[name]); - func->setUp(outPtrs, inPtrs, float_args); - std::vector results; - for (auto &name : out_names) results.push_back(*activation_tensors[name]); - return results; - } - -#ifdef DEBUGOPTIME - auto start_t = mllm_time_us(); -#endif - std::vector> out_tensors; - if (in_place) { - for (size_t i = 0; i < input_tensors.size() && i < out_names.size(); ++i) { - input_tensors[i]->setName(out_names[i]); - out_tensors.push_back(input_tensors[i]); - } - } else { - if (input_tensors.size() == 1 && !input_tensors[0]->aggregatedTensors().empty()) { - auto aggregatedTensorsSize = input_tensors[0]->aggregatedTensors().size(); - for (int i = 0; i < aggregatedTensorsSize; i++) { - out_tensors.push_back(input_tensors[0]->aggregatedTensors()[i]); - } - } else { - for (auto out_name : out_names) { - auto out_tensor = std::make_shared(backend); - out_tensor->setName(out_name); - out_tensor->setModule(module); - auto it = activation_tensors.find(out_name); - if (it != activation_tensors.end() && out_tensor->name().find("-transpose") == std::string::npos - && out_tensor->ctype() != it->second->ctype()) { - out_tensor->chls() = it->second->chls(); - out_tensor->setCtype(it->second->ctype()); - } - out_tensors.push_back(out_tensor); - } - } - } - func->reshape(out_tensors, input_tensors, float_args); - for (auto &out_tensor : out_tensors) { - if (activation_tensors.find(out_tensor->name()) != activation_tensors.end() - && !activation_tensors[out_tensor->name()]->aggregatedTensors().empty()) { - // 存在aggregatedTensors - vector> shared_outputs = {}; - auto split_dim = activation_tensors[out_tensor->name()]->aggregatedDim(); - for (int id = 0; id < activation_tensors[out_tensor->name()]->aggregatedTensors().size(); id++) { - auto shared_ot = std::make_shared(backend); - shared_ot->setName(out_tensor->name() + ".split-" + std::to_string(id)); - shared_ot->setModule(module); - auto ot = activation_tensors[out_tensor->name()]->aggregatedTensors()[id]; - shared_ot->setCtype(ot->ctype()); - switch (split_dim) { - case Chl::HEAD: { - shared_ot->reshape(out_tensor->batch(), ot->head(), out_tensor->sequence(), out_tensor->dimension()); - break; - } - case Chl::SEQUENCE: { - shared_ot->reshape(out_tensor->batch(), out_tensor->head(), ot->sequence(), out_tensor->dimension()); - break; - } - case Chl::DIMENSION: { - shared_ot->reshape(out_tensor->batch(), out_tensor->head(), out_tensor->sequence(), ot->dimension()); - break; - } - case Chl::D_HD: - case Chl::HD: { - shared_ot->reshape(out_tensor->batch(), ot->head(), out_tensor->sequence(), ot->dimension()); - break; - } - default: { - break; - } - } - if (activation_tensors[shared_ot->name()]->masterTensor() != nullptr - && activation_tensors[shared_ot->name()]->masterTensor()->name().find("Cache") != std::string::npos) { - auto cache_seq_len_ = activation_tensors[shared_ot->name()]->shapeOffset()[2]; - if (shared_ot->name().find("cache") == std::string::npos) { // KVcahe的输出不设置,只有输入设置 - cache_seq_len_ = activation_tensors[shared_ot->name()]->masterTensor()->cache_seq_len_; - auto cpuBackend = dynamic_cast(backend); - if (cpuBackend->isUsingDraft()) { - unsigned int last_draft_length = cpuBackend->getLastDraftLength(); - const std::vector &last_verified_position_ids = cpuBackend->getLastVerifiedPositionIds(); - cache_seq_len_ = cache_seq_len_ - (last_draft_length) + last_verified_position_ids.size(); - } - } - shared_ot->setDtype(activation_tensors[shared_ot->name()]->masterTensor()->dtype()); - // masterTensor() 是Cache所以shape没有问题 - shared_ot->shallowCopyFrom(activation_tensors[shared_ot->name()]->masterTensor(), false, {0, 0, cache_seq_len_, 0}); - } else { - shared_ot->alloc(); - } - shared_outputs.push_back(shared_ot); - } - out_tensor->addTensors(shared_outputs, split_dim); - } else if (activation_tensors.find(out_tensor->name()) != activation_tensors.end() - && activation_tensors[out_tensor->name()]->masterTensor() != nullptr - && activation_tensors[out_tensor->name()]->masterTensor()->name().find("Cache") != std::string::npos) { - // output_tensor的master是KVCache - auto cache_seq_len_ = activation_tensors[out_tensor->name()]->shapeOffset()[2]; - if (out_tensor->name().find("cache") == std::string::npos) { // KVcahe的输出不设置,只有输入设置 - cache_seq_len_ = activation_tensors[out_tensor->name()]->masterTensor()->cache_seq_len_; - auto cpuBackend = dynamic_cast(backend); - if (cpuBackend->isUsingDraft()) { - unsigned int last_draft_length = cpuBackend->getLastDraftLength(); - const std::vector &last_verified_position_ids = cpuBackend->getLastVerifiedPositionIds(); - cache_seq_len_ = cache_seq_len_ - (last_draft_length) + last_verified_position_ids.size(); - } - } - out_tensor->setDtype(activation_tensors[out_tensor->name()]->masterTensor()->dtype()); - out_tensor->shallowCopyFrom(activation_tensors[out_tensor->name()]->masterTensor(), false, {0, 0, cache_seq_len_, 0}); - } else { - out_tensor->alloc(); - } - } - func->execute(out_tensors, input_tensors, float_args); -#ifdef DEBUGOPTIME - auto end_t = mllm_time_us(); - std::cout << out_names[0] << " | time: " << (end_t - start_t) / 1000.0F << "ms" << std::endl; -#endif - vector results; - for (const auto &out_tensor : out_tensors) { results.push_back(*out_tensor); } - return results; -} - -std::vector CPUBackend::runLayer(Layer *layer, std::vector inputs, int N) { - Module *module = inputs.empty() ? Module::llm_model_ptr : inputs[0].module(); - map> &activation_tensors = module->activation_tensors; - bool do_init = false; - if (module->doLoad || !layer->inited_loaded) { - // set backend to current module device and try to create op - // use Module::tmp_device only when creating the op as the recersive module backend only handled in load and init stage - layer->backend_ = Backend::global_backends[MLLM_CPU]; - do_init = !layer->inited_loaded; - if (layer->op_ == nullptr) { - layer->op_ = layer->backend_->opCreate(layer->param_, layer->name_); - } - if (module->doLoad) { - layer->op_->load(*module->loader); - layer->inited_loaded = true; - } else if (layer->loaded_param) { - layer->inited_loaded = layer->loaded_param; - } else { - if (!layer->inited_loaded) { - auto empty_loader = new ParamLoader(""); - layer->op_->load(*empty_loader); - layer->inited_loaded = true; - } - } - vector out_names = {}; - int count = (N > 1) ? N : 1; - for (int i = 0; i < count; ++i) { - std::string out_name = (N > 1) ? "out-" + layer->op_->name() + "-" + std::to_string(i) : "out-" + layer->op_->name(); - out_names.push_back(out_name); - if (activation_tensors.find(out_name) == activation_tensors.end()) { - activation_tensors[out_name] = std::make_shared(layer->backend_); - activation_tensors[out_name]->setName(out_name); - activation_tensors[out_name]->setModule(module); - } - } - if (module->doLoad) { - // input_tensors - vector> inPtrs; - for (auto &input : inputs) { - inPtrs.push_back(input.shouldInGraphs() ? activation_tensors[input.name()] : std::shared_ptr(&input, [](Tensor *) {})); - } - // output_tensors - vector> outPtrs = {}; - for (auto &name : out_names) outPtrs.push_back(activation_tensors[name]); - layer->op_->setUp(inPtrs, outPtrs); - vector results = {}; - for (auto &name : out_names) results.push_back(*activation_tensors[name]); - return results; - } - } - // NEW START - -#ifdef DEBUGOPTIME - uint64_t time_start = mllm_time_us(); -#endif - vector> input_tensors; - for (auto &input : inputs) { - input_tensors.push_back(std::shared_ptr(&input, [](Tensor *) {})); - } - vector> out_tensors; - if (input_tensors.size() == 1 && !input_tensors[0]->aggregatedTensors().empty()) { - auto aggregatedTensorsSize = input_tensors[0]->aggregatedTensors().size(); - for (int i = 0; i < aggregatedTensorsSize; i++) { - out_tensors.push_back(input_tensors[0]->aggregatedTensors()[i]); - } - } else { - int count = (N > 1) ? N : 1; - for (int i = 0; i < count; ++i) { - std::string tensor_name = (N > 1) ? "out-" + layer->op_->name() + "-" + std::to_string(i) : "out-" + layer->op_->name(); - auto out_tensor = std::make_shared(layer->backend_); - out_tensor->setName(tensor_name); - out_tensor->setModule(module); - if (out_tensor->name().find("-transpose") == std::string::npos - && out_tensor->ctype() != activation_tensors.at(out_tensor->name())->ctype()) { - out_tensor->chls() = activation_tensors.at(out_tensor->name())->chls(); - out_tensor->setCtype(activation_tensors.at(out_tensor->name())->ctype()); - } - out_tensors.push_back(out_tensor); - } - } - // 直接使用 out_tensors 进行 reshape - layer->op_->reshape(input_tensors, out_tensors); - // 直接使用 out_tensors 进行 alloc - for (auto &out_tensor : out_tensors) { - if (activation_tensors.find(out_tensor->name()) != activation_tensors.end() - && !activation_tensors[out_tensor->name()]->aggregatedTensors().empty()) { - // 存在aggregatedTensors - vector> shared_outputs = {}; - auto split_dim = activation_tensors[out_tensor->name()]->aggregatedDim(); - for (int id = 0; id < activation_tensors[out_tensor->name()]->aggregatedTensors().size(); id++) { - auto shared_ot = std::make_shared(layer->backend_); - shared_ot->setName(out_tensor->name() + ".split-" + std::to_string(id)); - shared_ot->setModule(module); - auto ot = activation_tensors[out_tensor->name()]->aggregatedTensors()[id]; - shared_ot->setCtype(ot->ctype()); - switch (split_dim) { - case Chl::HEAD: { - shared_ot->reshape(out_tensor->batch(), ot->head(), out_tensor->sequence(), out_tensor->dimension()); - break; - } - case Chl::SEQUENCE: { - shared_ot->reshape(out_tensor->batch(), out_tensor->head(), ot->sequence(), out_tensor->dimension()); - break; - } - case Chl::DIMENSION: { - shared_ot->reshape(out_tensor->batch(), out_tensor->head(), out_tensor->sequence(), ot->dimension()); - break; - } - case Chl::D_HD: - case Chl::HD: { - shared_ot->reshape(out_tensor->batch(), ot->head(), out_tensor->sequence(), ot->dimension()); - break; - } - default: { - break; - } - } - if (activation_tensors[shared_ot->name()]->masterTensor() != nullptr - && activation_tensors[shared_ot->name()]->masterTensor()->name().find("Cache") != std::string::npos) { - auto cache_seq_len_ = activation_tensors[shared_ot->name()]->shapeOffset()[2]; - if (shared_ot->name().find("cache") == std::string::npos) { // KVcahe的输出不设置,只有输入设置 - cache_seq_len_ = activation_tensors[shared_ot->name()]->masterTensor()->cache_seq_len_; - auto cpuBackend = dynamic_cast(layer->backend_); - if (cpuBackend->isUsingDraft()) { - unsigned int last_draft_length = cpuBackend->getLastDraftLength(); - const std::vector &last_verified_position_ids = cpuBackend->getLastVerifiedPositionIds(); - cache_seq_len_ = cache_seq_len_ - (last_draft_length) + last_verified_position_ids.size(); - } - } - shared_ot->setDtype(activation_tensors[shared_ot->name()]->masterTensor()->dtype()); - // masterTensor() 是Cache所以shape没有问题 - shared_ot->shallowCopyFrom(activation_tensors[shared_ot->name()]->masterTensor(), false, {0, 0, cache_seq_len_, 0}); - } else { - shared_ot->alloc(); - } - shared_outputs.push_back(shared_ot); - } - out_tensor->addTensors(shared_outputs, split_dim); - } else if (activation_tensors.find(out_tensor->name()) != activation_tensors.end() - && activation_tensors[out_tensor->name()]->masterTensor() != nullptr - && activation_tensors[out_tensor->name()]->masterTensor()->name().find("Cache") != std::string::npos) { - // output_tensor的master是KVCache - auto cache_seq_len_ = activation_tensors[out_tensor->name()]->shapeOffset()[2]; - if (out_tensor->name().find("cache") == std::string::npos) { // KVcahe的输出不设置,只有输入设置 - cache_seq_len_ = activation_tensors[out_tensor->name()]->masterTensor()->cache_seq_len_; - auto cpuBackend = dynamic_cast(layer->backend_); - if (cpuBackend->isUsingDraft()) { - unsigned int last_draft_length = cpuBackend->getLastDraftLength(); - const std::vector &last_verified_position_ids = cpuBackend->getLastVerifiedPositionIds(); - cache_seq_len_ = cache_seq_len_ - (last_draft_length) + last_verified_position_ids.size(); - } - } - out_tensor->setDtype(activation_tensors[out_tensor->name()]->masterTensor()->dtype()); - out_tensor->shallowCopyFrom(activation_tensors[out_tensors[0]->name()]->masterTensor(), false, {0, 0, cache_seq_len_, 0}); - } else { - out_tensor->setDtype(MLLM_TYPE_F32); - out_tensor->alloc(); - } - } - // 直接使用 out_tensors 进行 execute - layer->op_->execute(input_tensors, out_tensors); - -#ifdef DEBUGOPTIME - uint64_t time_end = mllm_time_us(); - double inference_time_ = (time_end - time_start) / 1000.0F; // ms - std::cout << op_->name() << " | time: " << inference_time_ << "ms" << std::endl; -#endif - // 将 shared_ptr 转换为 Tensor 返回 - vector results; - for (const auto &out_tensor : out_tensors) { results.push_back(*out_tensor); } - return results; -} -std::vector CPUBackend::runForward(Module *module, std::vector inputs, std::vector args) { - if (mllm::Module::llm_model_ptr && mllm::Module::llm_model_ptr->doLoad) { - auto outputs = module->Forward(inputs, args); - return outputs; - } - uint64_t time_start, time_end; - bool ouilter_flag = (inputs[0].ttype() == TensorType::INPUT_TENSOR); - if (ouilter_flag) { - for (int i = 0; i < inputs.size(); i++) { - auto &input = inputs[i]; - input.setModule(module); - input.setTtype(TensorType::NORMAL_TENSOR); - } - mllm::Module::llm_model_ptr = module; - if (module->prefilling_token_size_ == 0) { // first time init - module->prefilling_token_size_ = inputs[0].sequence(); - } else if (module->decoding_token_size_ == 0) { - module->decoding_token_size_ = inputs[0].sequence(); - } - time_start = mllm_time_us(); - } - - // Module setUp & execute - auto output = module->Forward(inputs, args); - - if (ouilter_flag) { - time_end = mllm_time_us(); - double inference_time_ = (time_end - time_start) / 1000.0F; // ms - module->inference_times_.push_back(inference_time_); - mllm::Module::llm_model_ptr->op_transposed_flag = true; - } - return output; -} -} // namespace mllm diff --git a/src/backends/cpu/compute/Convolution.hpp b/src/backends/cpu/compute/Convolution.hpp deleted file mode 100644 index c89418f6b..000000000 --- a/src/backends/cpu/compute/Convolution.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// -// Created by Rongjie Yi on 23-12-18. -// - -#ifndef CONVOLUTION2D_HPP -#define CONVOLUTION2D_HPP - - - -#include "VecDot.hpp" -using namespace mllm; - -float ** reshape_conv2d_kernal_fp32(Tensor* kernel); - -void conv2d_fp32_VALID(Tensor* input, Tensor* output, float** k_new, int kernel_h, int kernel_w, bool support_bias, Tensor* bias, int stride_h, int stride_w, int thread_count=4); -void conv2d_fp32_SAME(Tensor* input, Tensor* output, float** k_new, int kernel_h, int kernel_w, bool support_bias, Tensor* bias, int stride_h, int stride_w, int padding_h, int padding_w, int thread_count=4); - - -float **reshape_conv3d_kernal_fp32(Tensor *kernel); - -void conv3d_fp32_VALID(Tensor* input, Tensor *output, float** k_new, int kernel_t, int kernel_h, int kernel_w, bool support_bias, Tensor* bias, int stride_t, int stride_h, int stride_w, int thread_count=4); - -#endif //CONVOLUTION2D_HPP diff --git a/src/backends/cpu/compute/GEMM_AArch64.hpp b/src/backends/cpu/compute/GEMM_AArch64.hpp deleted file mode 100644 index cb829cec6..000000000 --- a/src/backends/cpu/compute/GEMM_AArch64.hpp +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef MLLM_GEMM_AARCH64_HPP -#define MLLM_GEMM_AARCH64_HPP - -#include "VecDot.hpp" -using namespace mllm; - -// Quantization -void quantize_q8_0_4x4(const float *__restrict x, void *__restrict y, int64_t k); -void quantize_q8_0_4x8(const float *__restrict x, void *__restrict y, int64_t k); - -void quantize_mat_q8_0(const float *__restrict x, void *__restrict y, int64_t nrows, int64_t n_per_row, int64_t blck_size_interleave); - -// Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization") -size_t quantize_q4_0_4x4(const float *__restrict src, void *__restrict dst, int64_t nrows, int64_t n_per_row, const float *imatrix); -size_t quantize_q4_0_4x8(const float *__restrict src, void *__restrict dst, int64_t nrows, int64_t n_per_row, const float *imatrix); -size_t quantize_q4_0_8x8(const float *__restrict src, void *__restrict dst, int64_t nrows, int64_t n_per_row, const float *imatrix); - -//===----------------------------------------------------------------------===// -// GEMV -//===----------------------------------------------------------------------===// -void mllm_gemv_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); -void mllm_gemv_q4_0_4x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); -void mllm_gemv_q4_0_8x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); - -// NOTE: Do not add a bias flag in mllm_gemv_q4_0_4x4_q8_0. It may cause branch miss hit problem. -void _mllm_gemv_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); -void _mllm_gemv_q4_0_4x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); -void _mllm_gemv_q4_0_8x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); - -//===----------------------------------------------------------------------===// -// GEMM -//===----------------------------------------------------------------------===// -void mllm_gemm_q4_0_4x4_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); -void mllm_gemm_q4_0_4x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); -void mllm_gemm_q4_0_8x8_q8_0(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias = nullptr); -void _mllm_gemm_q4_0_4x4_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); -void _mllm_gemm_q4_0_4x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); -void _mllm_gemm_q4_0_8x8_q8_0_bias(int n, float *__restrict s, size_t bs, const void *__restrict vx, const void *__restrict vy, int nr, int nc, const void *__restrict bias); - -void quantize_row_q4_0_4x4(const float *__restrict x, void *__restrict y, int k); -void quantize_row_q4_0_4x4(const float *__restrict x, void *__restrict y, int k, int raw); - -#endif // MLLM_GEMM_HPP \ No newline at end of file diff --git a/src/backends/cpu/compute/VecDot.cpp b/src/backends/cpu/compute/VecDot.cpp deleted file mode 100644 index 9b55b71da..000000000 --- a/src/backends/cpu/compute/VecDot.cpp +++ /dev/null @@ -1,4422 +0,0 @@ -/* - * This code is based on ggml(https://github.com/ggerganov/ggml), - * please see https://github.com/ggerganov/ggml/blob/master/src/ggml.c - * ggml is licensed under MIT Copyright (c) 2022 Georgi Gerganov: - * - * MIT License - * Copyright (c) 2022 Georgi Gerganov - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "VecDot.hpp" - -#ifdef __AVX2__ -static void vec_dot_fp32_avx2(const int n, float *__restrict s, const float *__restrict x, const float *__restrict y) { - float sumf = 0.0F; - const int np = (n & ~(MLLM_F32_STEP - 1)); - - MLLM_F32_VEC sum[MLLM_F32_ARR] = {MLLM_F32_VEC_ZERO}; - - MLLM_F32_VEC ax[MLLM_F32_ARR]; - MLLM_F32_VEC ay[MLLM_F32_ARR]; - - for (int i = 0; i < np; i += MLLM_F32_STEP) { - for (int j = 0; j < MLLM_F32_ARR; j++) { - ax[j] = MLLM_F32_VEC_LOAD(x + i + j * MLLM_F32_EPR); - ay[j] = MLLM_F32_VEC_LOAD(y + i + j * MLLM_F32_EPR); - - sum[j] = MLLM_F32_VEC_FMA(sum[j], ax[j], ay[j]); - } - } - - // reduce sum0..sum3 to sum0 - MLLM_F32_VEC_REDUCE(sumf, sum); - - // leftovers - for (int i = np; i < n; ++i) { - sumf += x[i] * y[i]; - } - - *s = sumf; -} -#endif - -#ifdef __ARM_NEON -static void vec_dot_fp32_arm(const int n, float *__restrict s, const float *__restrict x, const float *__restrict y) { - float sumf = 0.0F; - const int np = (n & ~(16 - 1)); - - F32_VEC sum[4] = {vdupq_n_f32(0.0F)}; - - F32_VEC ax[F32_ARR]; - F32_VEC ay[F32_ARR]; - - for (int i = 0; i < np; i += F32_STEP) { - for (int j = 0; j < F32_ARR; j++) { - ax[j] = vld1q_f32(x + i + j * F32_REG); - ay[j] = vld1q_f32(y + i + j * F32_REG); - sum[j] = vfmaq_f32(sum[j], ax[j], ay[j]); - // sum[j] = vmlaq_lane_f32(sum[j], ax[j], ay[0], - } - } - - // reduce sum0..sum3 to sum0 - F32_VEC_REDUCE(sumf, sum); - - // leftovers - for (int i = np; i < n; ++i) { - sumf += x[i] * y[i]; - } - - *s = sumf; -} -#endif - -void vec_dot_fp32(const int n, float *__restrict s, const float *__restrict vx, const float *__restrict vy) { -#ifdef __AVX2__ - vec_dot_fp32_avx2(n, s, vx, vy); -#elif defined(__ARM_NEON) - vec_dot_fp32_arm(n, s, vx, vy); -#endif -} - -void vec_dot_fp16(const int n, float *__restrict s, const mllm_fp16_t *__restrict vx, const mllm_fp16_t *__restrict vy) { - float sumf = 0.0; - -#if defined(__AVX2__) || defined(__ARM_NEON) - const int np = (n & ~(MLLM_F16_STEP - 1)); - - MLLM_F16_VEC sum[MLLM_F16_ARR] = {MLLM_F16_VEC_ZERO}; - - MLLM_F16_VEC ax[MLLM_F16_ARR]; - MLLM_F16_VEC ay[MLLM_F16_ARR]; - - for (int i = 0; i < np; i += MLLM_F16_STEP) { - for (int j = 0; j < MLLM_F16_ARR; j++) { - ax[j] = MLLM_F16_VEC_LOAD(vx + i + j * MLLM_F16_EPR, j); - ay[j] = MLLM_F16_VEC_LOAD(vy + i + j * MLLM_F16_EPR, j); - - sum[j] = MLLM_F16_VEC_FMA(sum[j], ax[j], ay[j]); - } - } - - // reduce sum0..sum3 to sum0 - MLLM_F16_VEC_REDUCE(sumf, sum); - - // leftovers - for (int i = np; i < n; ++i) { - sumf += (float)(MLLM_FP16_TO_FP32(vx[i]) * MLLM_FP16_TO_FP32(vy[i])); - } -#else - for (int i = 0; i < n; ++i) { - sumf += (float)(MLLM_FP16_TO_FP32(vx[i]) * MLLM_FP16_TO_FP32(vy[i])); - } -#endif - - *s = sumf; -} - -#ifdef __AVX2__ -static void vec_dot_q4_0_q8_0_avx(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { - const int qk = QK8_0; - const int nb = n / qk; - - assert(n % qk == 0); - - const block_q4_0 *__restrict x = (block_q4_0 *)vx; - const block_q8_0 *__restrict y = (block_q8_0 *)vy; - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (int i = 0; i < nb; ++i) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(MLLM_FP16_TO_FP32(x[i].d) * MLLM_FP16_TO_FP32(y[i].d)); - - __m256i bx = bytes_from_nibbles_32(x[i].qs); - - // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. - const __m256i off = _mm256_set1_epi8(8); - bx = _mm256_sub_epi8(bx, off); - - __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); - - const __m256 q = mul_sum_i8_pairs_float(bx, by); - - /* Multiply q with scale and accumulate */ - acc = _mm256_fmadd_ps(d, q, acc); - } - *s = hsum_float_8(acc); -} -#endif - -#ifdef __ARM_NEON -// COPY FROMN -static void vec_dot_q4_0_q8_0_arm(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { - const int qk = QK8_0; - const int nb = n / qk; - - assert(n % qk == 0); - - const block_q4_0 *__restrict x = (block_q4_0 *)vx; - const block_q8_0 *__restrict y = (block_q8_0 *)vy; - -#if defined(__ARM_FEATURE_MATMUL_INT8) - { - size_t bs = 0; - size_t bx = 0; - size_t by = 0; - const block_q4_0 *__restrict vx0 = (const block_q4_0 *)vx; - const block_q4_0 *__restrict vx1 = (const block_q4_0 *)((const uint8_t *)vx + bx); - const block_q8_0 *__restrict vy0 = (const block_q8_0 *)vy; - const block_q8_0 *__restrict vy1 = (const block_q8_0 *)((const uint8_t *)vy + by); - - float32x4_t sumv0 = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; i++) { - const block_q4_0 *__restrict b_x0 = &vx0[i]; - const block_q4_0 *__restrict b_x1 = &vx1[i]; - const block_q8_0 *__restrict b_y0 = &vy0[i]; - const block_q8_0 *__restrict b_y1 = &vy1[i]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - const int8x16_t s8b = vdupq_n_s8(0x8); - - const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); - const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8(v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // sub 8 - const int8x16_t x0_l = vsubq_s8(v0_0l, s8b); - const int8x16_t x0_h = vsubq_s8(v0_0h, s8b); - const int8x16_t x1_l = vsubq_s8(v0_1l, s8b); - const int8x16_t x1_h = vsubq_s8(v0_1h, s8b); - - // load y - const int8x16_t y0_l = vld1q_s8(b_y0->qs); - const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); - const int8x16_t y1_l = vld1q_s8(b_y1->qs); - const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - - float32_t _scale[4] = { - MLLM_FP16_TO_FP32(b_x0->d) * MLLM_FP16_TO_FP32(b_y0->d), - MLLM_FP16_TO_FP32(b_x0->d) * MLLM_FP16_TO_FP32(b_y1->d), - MLLM_FP16_TO_FP32(b_x1->d) * MLLM_FP16_TO_FP32(b_y0->d), - MLLM_FP16_TO_FP32(b_x1->d) * MLLM_FP16_TO_FP32(b_y1->d)}; - float32x4_t scale = vld1q_f32(_scale); - - int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - - int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - - int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - - int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - - sumv0 = vmlaq_f32(sumv0, (vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), l1, r1)), l2, r2)), l3, r3))), scale); - } - - float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); - float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - - vst1_f32(s, vget_low_f32(sumv2)); - vst1_f32(s + bs, vget_high_f32(sumv2)); - - return; - } -#endif - - float32x4_t sumv0 = vdupq_n_f32(0.0F); - float32x4_t sumv1 = vdupq_n_f32(0.0F); - - assert(nb % 2 == 0); // TODO: handle odd nb - for (int i = 0; i < nb; i += 2) { - const block_q4_0 *__restrict x0 = &x[i + 0]; - const block_q4_0 *__restrict x1 = &x[i + 1]; - const block_q8_0 *__restrict y0 = &y[i + 0]; - const block_q8_0 *__restrict y1 = &y[i + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - const int8x16_t s8b = vdupq_n_s8(0x8); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8(v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // sub 8 - const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); - const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); - const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); - const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - -#if defined(__ARM_FEATURE_DOTPROD) - // dot product into int32x4_t - const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); - const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), MLLM_FP16_TO_FP32(x0->d) * MLLM_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), MLLM_FP16_TO_FP32(x1->d) * MLLM_FP16_TO_FP32(y1->d)); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8(v0_0ls), vget_low_s8(v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8(v0_0hs), vget_low_s8(v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8(v0_1ls), vget_low_s8(v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8(v0_1hs), vget_low_s8(v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), MLLM_FP16_TO_FP32(x0->d) * MLLM_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), MLLM_FP16_TO_FP32(x1->d) * MLLM_FP16_TO_FP32(y1->d)); -#endif - } - - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -} -#endif - -void vec_dot_q4_0_q8_0(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { -#ifdef __AVX2__ - vec_dot_q4_0_q8_0_avx(n, s, vx, vy); -#elif defined(__ARM_NEON) - vec_dot_q4_0_q8_0_arm(n, s, vx, vy); -#endif -} -void vec_dot_q4_0_q8_0(const void *__restrict src0, const void *__restrict src1, Tensor *dst, bool support_bias, Tensor *bias, int hid_len, int batch, int head, int src0_inf, int sec1_outf) { - float value = 0; -#ifdef __AVX2__ - vec_dot_q4_0_q8_0_avx(hid_len, &value, src1, src0); -#elif defined(__ARM_NEON) - vec_dot_q4_0_q8_0_arm(hid_len, &value, src1, src0); -#endif - if (support_bias) { - value += bias->dataAt(0, head, 0, sec1_outf); - } - dst->setDataAt({batch, head, src0_inf, sec1_outf}, value); -} - -#if QK_K == 256 -void vec_dot_q4_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { - assert(n % QK_K == 0); - - const block_q4_K *__restrict x = (block_q4_K *)vx; - const block_q8_K *__restrict y = (block_q8_K *)vy; - - const int nb = n / QK_K; - - static const uint32_t kmask1 = 0x3f3f3f3f; - static const uint32_t kmask2 = 0x0f0f0f0f; - static const uint32_t kmask3 = 0x03030303; - - uint32_t utmp[4]; - -#ifdef __ARM_FEATURE_SVE - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - - const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); - - memcpy(utmp, x[i].scales, K_SCALE_SIZE); - - uint32x2_t mins8 = {0}; - mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); - mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); - - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[0] &= kmask1; - - const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); - const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)), - vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); - sumf -= dmin * vaddvq_s32(prod); - - const uint8_t *scales = (const uint8_t *)utmp; - - const uint8_t *__restrict q4 = (const uint8_t *)x[i].qs; - const int8_t *__restrict q8 = (const int8_t *)y[i].qs; - - const int vector_length = mllm_cpu_get_sve_cnt() * 8; - const svuint8_t m4b = svdup_n_u8(0xf); - const svint32_t mzero = svdup_n_s32(0); - svint32_t sumi1 = svdup_n_s32(0); - svint32_t sumi1_1 = svdup_n_s32(0); - svint32_t sumi1_2 = svdup_n_s32(0); - svint32_t sumi2 = svdup_n_s32(0); - svint32_t sumi2_1 = svdup_n_s32(0); - svint32_t sumi2_2 = svdup_n_s32(0); - switch (vector_length) { - case 128: { - for (int j = 0; j < QK_K / 64; ++j) { - svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), m4b)); - svint8_t q8bytes = svld1_s8(svptrue_b8(), q8); - q8 += 16; - sumi1_1 = svmla_n_s32_x(svptrue_b32(), sumi1_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 0]); - q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4 + 16), m4b)); - q8bytes = svld1_s8(svptrue_b8(), q8); - q8 += 16; - sumi1_2 = svmla_n_s32_x(svptrue_b32(), sumi1_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 0]); - - q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), 4)); - q8bytes = svld1_s8(svptrue_b8(), q8); - q8 += 16; - sumi2_1 = svmla_n_s32_x(svptrue_b32(), sumi2_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 1]); - q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4 + 16), 4)); - q8bytes = svld1_s8(svptrue_b8(), q8); - q8 += 16; - sumi2_2 = svmla_n_s32_x(svptrue_b32(), sumi2_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 1]); - q4 += 32; - } - sumi1 = svadd_s32_x(svptrue_b32(), sumi1_1, sumi1_2); - sumi2 = svadd_s32_x(svptrue_b32(), sumi2_1, sumi2_2); - sumf += d * (svaddv_s32(svptrue_b32(), svadd_s32_x(svptrue_b32(), sumi1, sumi2))); - } break; - case 256: - case 512: { - for (int j = 0; j < QK_K / 64; ++j) { - const svuint8_t q4bits = svld1_u8(svptrue_pat_b8(SV_VL32), q4); - q4 += 32; - svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_pat_b8(SV_VL32), q4bits, m4b)); - svint8_t q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); - q8 += 32; - sumi1 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 0]); - - q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q4bits, 4)); - q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); - q8 += 32; - sumi2 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi2, svdot_s32(mzero, q4bytes, q8bytes), scales[2 * j + 1]); - } - sumf += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), sumi1, sumi2))); - } break; - default: - assert(false && "Unsupported vector length"); - break; - } - } - *s = sumf; -#elif defined __ARM_NEON - - const uint8x16_t m4b = vdupq_n_u8(0xf); -#ifdef __ARM_FEATURE_DOTPROD - const int32x4_t mzero = vdupq_n_s32(0); -#endif - - int8x16x2_t q4bytes; - int8x16x2_t q8bytes; - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - - const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); - - memcpy(utmp, x[i].scales, 12); - - const uint32x2_t mins8 = {utmp[1] & kmask1, ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4)}; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[0] &= kmask1; - - const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); - const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)), - vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); - sumf -= dmin * vaddvq_s32(prod); - - const uint8_t *scales = (const uint8_t *)utmp; - - const uint8_t *__restrict q4 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - // int32x4_t isum = mzero; - - int32_t sumi1 = 0; - int32_t sumi2 = 0; - - for (int j = 0; j < QK_K / 64; ++j) { - const uint8x16x2_t q4bits = vld1q_u8_x2(q4); - q4 += 32; - -#ifdef __ARM_FEATURE_DOTPROD - q8bytes = vld1q_s8_x2(q8); - q8 += 32; - q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[0], m4b)); - q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[1], m4b)); - - const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); - sumi1 += vaddvq_s32(p1) * scales[2 * j + 0]; - - q8bytes = vld1q_s8_x2(q8); - q8 += 32; - q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); - q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); - - const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); - - sumi2 += vaddvq_s32(p2) * scales[2 * j + 1]; -#else - q8bytes = vld1q_s8_x2(q8); - q8 += 32; - q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[0], m4b)); - q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[1], m4b)); - const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[0]), vget_low_s8(q8bytes.val[0])), - vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[1]), vget_low_s8(q8bytes.val[1])), - vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - sumi1 += vaddvq_s16(vaddq_s16(p0, p1)) * scales[2 * j + 0]; - - q8bytes = vld1q_s8_x2(q8); - q8 += 32; - q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); - q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); - const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[0]), vget_low_s8(q8bytes.val[0])), - vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[1]), vget_low_s8(q8bytes.val[1])), - vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - sumi2 += vaddvq_s16(vaddq_s16(p2, p3)) * scales[2 * j + 1]; - -#endif - } - - sumf += d * (sumi1 + sumi2); - } - - *s = sumf; - -#elif defined __AVX2__ - - const __m256i m4 = _mm256_set1_epi8(0xF); - - __m256 acc = _mm256_setzero_ps(); - __m128 acc_m = _mm_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const uint8_t *__restrict q4 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); - - const __m256i q8sums = _mm256_loadu_si256((const __m256i *)y[i].bsums); - const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); - const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); - acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m); - - const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); - const __m256i scales = MM256_SET_M128I(sc128, sc128); - - __m256i sumi = _mm256_setzero_si256(); - - for (int j = 0; j < QK_K / 64; ++j) { - const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j + 0)); - const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j + 1)); - - const __m256i q4bits = _mm256_loadu_si256((const __m256i *)q4); - q4 += 32; - const __m256i q4l = _mm256_and_si256(q4bits, m4); - const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); - - const __m256i q8l = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); - p16l = _mm256_madd_epi16(scale_l, p16l); - - const __m256i q8h = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); - p16h = _mm256_madd_epi16(scale_h, p16h); - const __m256i sumj = _mm256_add_epi32(p16l, p16h); - - sumi = _mm256_add_epi32(sumi, sumj); - } - - __m256 vd = _mm256_set1_ps(d); - acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); - } - - acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); - acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); - - *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); - -#else - const uint8_t *scales = (const uint8_t *)&utmp[0]; - const uint8_t *mins = (const uint8_t *)&utmp[2]; - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums[8]; - int32_t aux32[8]; - memset(sums, 0, 8 * sizeof(float)); - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t *__restrict q4 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - memset(aux32, 0, 8 * sizeof(int32_t)); - int8_t *__restrict a = aux8; - for (int j = 0; j < QK_K / 64; ++j) { - for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); - a += 32; - for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); - a += 32; - q4 += 32; - } - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - int sumi = 0; - for (int j = 0; j < QK_K / 16; ++j) sumi += y[i].bsums[j] * mins[j / 2]; - a = aux8; - int is = 0; - for (int j = 0; j < QK_K / 32; ++j) { - int32_t scale = scales[is++]; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; - a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; - a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; - a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; - a += 8; - } - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = MLLM_FP16_TO_FP32(x[i].dmin) * y[i].d; - sumf -= dmin * sumi; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; -#endif -} -#else -void vec_dot_q4_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { - assert(n % QK_K == 0); - - const block_q4_K *__restrict x = (block_q4_K *)vx; - const block_q8_K *__restrict y = (block_q8_K *)vy; - - const int nb = n / QK_K; - -#ifdef __ARM_NEON - - const uint8x16_t m4b = vdupq_n_u8(0xf); - -#ifdef __ARM_FEATURE_DOTPROD - const int32x4_t mzero = vdupq_n_s32(0); -#endif - - float sumf = 0; - - int8x16x2_t q4bytes; - int8x16x4_t q8bytes; - - float sum_mins = 0.f; - - uint16_t aux16[2]; - const uint8_t *__restrict scales = (const uint8_t *)aux16; - - for (int i = 0; i < nb; ++i) { - const uint8_t *__restrict q4 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - const uint16_t *__restrict a = (const uint16_t *)x[i].scales; - aux16[0] = a[0] & 0x0f0f; - aux16[1] = (a[0] >> 4) & 0x0f0f; - - const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]); - sum_mins += y[i].d * (float)x[i].d[1] * summi; - - const float d = y[i].d * (float)x[i].d[0]; - - const uint8x16x2_t q4bits = vld1q_u8_x2(q4); - -#ifdef __ARM_FEATURE_DOTPROD - q8bytes = vld1q_s8_x4(q8); - q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[0], m4b)); - q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[1], m4b)); - - const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); - const int32_t sumi1 = vaddvq_s32(p1) * scales[0]; - - q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); - q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); - - const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]); - const int32_t sumi2 = vaddvq_s32(p2) * scales[1]; - -#else - q8bytes = vld1q_s8_x4(q8); - q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[0], m4b)); - q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q4bits.val[1], m4b)); - const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[0]), vget_low_s8(q8bytes.val[0])), - vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[1]), vget_low_s8(q8bytes.val[1])), - vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - int32_t sumi1 = vaddvq_s16(vaddq_s16(p0, p1)) * scales[0]; - - q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); - q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); - const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[0]), vget_low_s8(q8bytes.val[2])), - vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[2]))); - const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8(q4bytes.val[1]), vget_low_s8(q8bytes.val[3])), - vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[3]))); - int32_t sumi2 = vaddvq_s16(vaddq_s16(p2, p3)) * scales[1]; - -#endif - sumf += d * (sumi1 + sumi2); - } - - *s = sumf - sum_mins; - -#elif defined __AVX2__ - - const __m256i m4 = _mm256_set1_epi8(0xF); - - __m256 acc = _mm256_setzero_ps(); - - float summs = 0; - - uint16_t aux16[2]; - const uint8_t *scales = (const uint8_t *)aux16; - - for (int i = 0; i < nb; ++i) { - const float d = MLLM_FP16_TO_FP32(x[i].d[0]) * y[i].d; - const float m = MLLM_FP16_TO_FP32(x[i].d[1]) * y[i].d; - const __m256 vd = _mm256_set1_ps(d); - - const uint16_t *a = (const uint16_t *)x[i].scales; - aux16[0] = a[0] & 0x0f0f; - aux16[1] = (a[0] >> 4) & 0x0f0f; - - summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); - - const uint8_t *__restrict q4 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - const __m256i q4bits = _mm256_loadu_si256((const __m256i *)q4); - const __m256i q4l = _mm256_and_si256(q4bits, m4); - const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); - - const __m256i q8l = _mm256_loadu_si256((const __m256i *)(q8 + 0)); - const __m256i q8h = _mm256_loadu_si256((const __m256i *)(q8 + 32)); - - const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); - const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); - - const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l); - acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc); - - const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h); - acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc); - } - - *s = hsum_float_8(acc) - summs; - -#else - - uint8_t aux8[QK_K]; - int16_t aux16[16]; - float sums[8]; - memset(sums, 0, 8 * sizeof(float)); - - uint16_t s16[2]; - const uint8_t *__restrict scales = (const uint8_t *)s16; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t *__restrict q4 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - uint8_t *__restrict a = aux8; - for (int l = 0; l < 32; ++l) a[l + 0] = q4[l] & 0xF; - for (int l = 0; l < 32; ++l) a[l + 32] = q4[l] >> 4; - - const uint16_t *__restrict b = (const uint16_t *)x[i].scales; - s16[0] = b[0] & 0x0f0f; - s16[1] = (b[0] >> 4) & 0x0f0f; - - sumf -= y[i].d * MLLM_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); - - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d[0]); - - for (int j = 0; j < QK_K / 32; ++j) { - for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l]; - q8 += 16; - a += 16; - for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l]; - q8 += 16; - a += 16; - const float dl = d * scales[j]; - for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l + 8]); - } - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; -#endif -} -#endif - -void vec_dot_q4_K_q8_K(const void *__restrict src0, const void *__restrict src1, Tensor *dst, bool support_bias, Tensor *bias, int hid_len, int batch, int head, int src0_inf, int sec1_outf) { - float value = 0; - vec_dot_q4_K_q8_K(hid_len, dst->ptrAt(batch, head, src0_inf, sec1_outf), src1, src0); - if (support_bias) { - dst->setDataAt({batch, head, src0_inf, sec1_outf}, dst->dataAt(batch, head, src0_inf, sec1_outf) + bias->dataAt(0, head, 0, sec1_outf)); - } -} - -#if QK_K == 256 -void vec_dot_q6_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { - assert(n % QK_K == 0); - - const block_q6_K *__restrict x = (block_q6_K *)vx; - const block_q8_K *__restrict y = (block_q8_K *)vy; - - const int nb = n / QK_K; - -#ifdef __ARM_NEON - - float sum = 0; - - const uint8x16_t m4b = vdupq_n_u8(0xF); -#if defined(__ARM_FEATURE_DOTPROD) - const int32x4_t vzero = vdupq_n_s32(0); -#endif - // const int8x16_t m32s = vdupq_n_s8(32); - - const uint8x16_t mone = vdupq_n_u8(3); - - int8x16x4_t q6bytes; - uint8x16x4_t q6h; - - for (int i = 0; i < nb; ++i) { - const float d_all = MLLM_FP16_TO_FP32(x[i].d); - - const uint8_t *__restrict q6 = x[i].ql; - const uint8_t *__restrict qh = x[i].qh; - const int8_t *__restrict q8 = y[i].qs; - - const int8_t *__restrict scale = x[i].scales; - - const int16x8x2_t q8sums = vld1q_s16_x2(y[i].bsums); - const int8x16_t scales = vld1q_s8(scale); - const int16x8x2_t q6scales = {vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}; - - const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16(q8sums.val[0]), vget_low_s16(q6scales.val[0])), - vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))), - vaddq_s32(vmull_s16(vget_low_s16(q8sums.val[1]), vget_low_s16(q6scales.val[1])), - vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1])))); - int32_t isum_mins = vaddvq_s32(prod); - - int32_t isum = 0; - - for (int j = 0; j < QK_K / 128; ++j) { - uint8x16x2_t qhbits = vld1q_u8_x2(qh); - qh += 32; - uint8x16x4_t q6bits = vld1q_u8_x4(q6); - q6 += 64; - int8x16x4_t q8bytes = vld1q_s8_x4(q8); - q8 += 64; - - q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); - q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); - uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2); - q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[1], 2); - q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - - // q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); - // q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); - // q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s); - // q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s); - q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])); - q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])); - q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])); - q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])); - -#if defined(__ARM_FEATURE_DOTPROD) - - isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; - scale += 4; - -#else - - int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[0]), vget_low_s8(q8bytes.val[0])), - vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[1]), vget_low_s8(q8bytes.val[1])), - vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1]; - scale += 2; - - int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[2]), vget_low_s8(q8bytes.val[2])), - vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2]))); - int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[3]), vget_low_s8(q8bytes.val[3])), - vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3]))); - isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1]; - scale += 2; -#endif - - q8bytes = vld1q_s8_x4(q8); - q8 += 64; - - shifted = vshrq_n_u8(qhbits.val[0], 4); - q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[1], 4); - q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[0], 6); - q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[1], 6); - q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - - // q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s); - // q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s); - // q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s); - // q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s); - q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])); - q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])); - q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])); - q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])); - -#if defined(__ARM_FEATURE_DOTPROD) - - isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; - scale += 4; - - // for (int l = 0; l < 4; ++l) { - // const int32x4_t p = vdotq_s32(vzero, q6bytes.val[l], q8bytes.val[l]); - // isum += vaddvq_s32(p) * *scale++; - // } -#else - p0 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[0]), vget_low_s8(q8bytes.val[0])), - vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - p1 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[1]), vget_low_s8(q8bytes.val[1])), - vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1]; - scale += 2; - - p2 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[2]), vget_low_s8(q8bytes.val[2])), - vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2]))); - p3 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[3]), vget_low_s8(q8bytes.val[3])), - vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3]))); - isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1]; - scale += 2; -#endif - } - // sum += isum * d_all * y[i].d; - sum += d_all * y[i].d * (isum - 32 * isum_mins); - } - *s = sum; - -#elif defined __AVX2__ - - const __m256i m4 = _mm256_set1_epi8(0xF); - const __m256i m2 = _mm256_set1_epi8(3); - const __m256i m32s = _mm256_set1_epi8(32); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - - const uint8_t *__restrict q4 = x[i].ql; - const uint8_t *__restrict qh = x[i].qh; - const int8_t *__restrict q8 = y[i].qs; - - const __m128i scales = _mm_loadu_si128((const __m128i *)x[i].scales); - - __m256i sumi = _mm256_setzero_si256(); - - int is = 0; - - for (int j = 0; j < QK_K / 128; ++j) { - const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); - const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); - const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); - const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); - is += 4; - - const __m256i q4bits1 = _mm256_loadu_si256((const __m256i *)q4); - q4 += 32; - const __m256i q4bits2 = _mm256_loadu_si256((const __m256i *)q4); - q4 += 32; - const __m256i q4bitsH = _mm256_loadu_si256((const __m256i *)qh); - qh += 32; - - const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4); - const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4); - const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4); - const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4); - - const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); - const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1); - const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2); - const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3); - - const __m256i q8_0 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - - __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); - __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); - __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2); - __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3); - - __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); - __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); - __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2); - __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3); - - p16_0 = _mm256_sub_epi16(p16_0, q8s_0); - p16_1 = _mm256_sub_epi16(p16_1, q8s_1); - p16_2 = _mm256_sub_epi16(p16_2, q8s_2); - p16_3 = _mm256_sub_epi16(p16_3, q8s_3); - - p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); - p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); - p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2); - p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3)); - } - - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); - } - -#if defined(_MSC_VER) || defined(__MINGW32__) - float arr[8]; - _mm256_storeu_ps(arr, acc); - - // for(float i : arr) { - // printf("%f ", i); - // } - // printf("\n"); -#endif - - *s = hsum_float_8(acc); - -#elif defined __AVX__ - const __m128i m4 = _mm_set1_epi8(0xF); - const __m128i m3 = _mm_set1_epi8(3); - const __m128i m32s = _mm_set1_epi8(32); - const __m128i m2 = _mm_set1_epi8(2); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - - const uint8_t *__restrict q4 = x[i].ql; - const uint8_t *__restrict qh = x[i].qh; - const int8_t *__restrict q8 = y[i].qs; - - const __m128i scales = _mm_loadu_si128((const __m128i *)x[i].scales); - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); - for (int j = 0; j < QK_K / 128; ++j) { - const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i *)qh); - qh += 16; - const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i *)qh); - qh += 16; - - const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4); - const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4); - const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4); - const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4); - const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4); - const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4); - const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4); - const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4); - - const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i *)q4); - q4 += 16; - const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i *)q4); - q4 += 16; - const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i *)q4); - q4 += 16; - const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i *)q4); - q4 += 16; - - const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0); - const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1); - const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2); - const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3); - const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4); - const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5); - const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6); - const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7); - - const __m128i q8_0 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_1 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_2 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_3 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_4 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_5 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_6 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_7 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - - __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0); - __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1); - __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2); - __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3); - __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4); - __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5); - __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6); - __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7); - - __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0); - __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1); - __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2); - __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3); - __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4); - __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5); - __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6); - __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7); - - p16_0 = _mm_sub_epi16(p16_0, q8s_0); - p16_1 = _mm_sub_epi16(p16_1, q8s_1); - p16_2 = _mm_sub_epi16(p16_2, q8s_2); - p16_3 = _mm_sub_epi16(p16_3, q8s_3); - p16_4 = _mm_sub_epi16(p16_4, q8s_4); - p16_5 = _mm_sub_epi16(p16_5, q8s_5); - p16_6 = _mm_sub_epi16(p16_6, q8s_6); - p16_7 = _mm_sub_epi16(p16_7, q8s_7); - - const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi8(shuffle, m2); - const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi8(shuffle, m2); - const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi8(shuffle, m2); - const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi8(shuffle, m2); - - p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); - p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1); - p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); - p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3); - p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4); - p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5); - p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6); - p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7); - - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7)); - } - - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); - } - - *s = hsum_float_8(acc); - -#else - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums[8]; - int32_t aux32[8]; - memset(sums, 0, 8 * sizeof(float)); - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t *__restrict q4 = x[i].ql; - const uint8_t *__restrict qh = x[i].qh; - const int8_t *__restrict q8 = y[i].qs; - memset(aux32, 0, 8 * sizeof(int32_t)); - int8_t *__restrict a = aux8; - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) { - a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; - a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; - a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; - a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; - } - a += 128; - q4 += 64; - qh += 32; - } - a = aux8; - int is = 0; - for (int j = 0; j < QK_K / 16; ++j) { - int scale = x[i].scales[is++]; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; - a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; - a += 8; - } - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; -#endif -} - -#else - -void vec_dot_q6_K_q8_K(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { - assert(n % QK_K == 0); - - const block_q6_K *__restrict x = vx; - const block_q8_K *__restrict y = vy; - - const int nb = n / QK_K; - -#ifdef __ARM_NEON - - float sum = 0; - - const uint8x16_t m4b = vdupq_n_u8(0xF); - const int8x16_t m32s = vdupq_n_s8(32); -#if defined(__ARM_FEATURE_DOTPROD) - const int32x4_t vzero = vdupq_n_s32(0); -#endif - - const uint8x16_t mone = vdupq_n_u8(3); - - int8x16x4_t q6bytes; - uint8x16x4_t q6h; - - for (int i = 0; i < nb; ++i) { - const float d_all = (float)x[i].d; - - const uint8_t *__restrict q6 = x[i].ql; - const uint8_t *__restrict qh = x[i].qh; - const int8_t *__restrict q8 = y[i].qs; - - const int8_t *__restrict scale = x[i].scales; - - int32_t isum = 0; - - uint8x16_t qhbits = vld1q_u8(qh); - uint8x16x2_t q6bits = vld1q_u8_x2(q6); - int8x16x4_t q8bytes = vld1q_s8_x4(q8); - - q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4); - uint8x16_t shifted = vshrq_n_u8(qhbits, 2); - q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits, 4); - q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits, 6); - q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - - q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); - q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); - q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s); - q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s); - -#if defined(__ARM_FEATURE_DOTPROD) - - isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; -#else - - int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[0]), vget_low_s8(q8bytes.val[0])), - vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0]))); - int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[1]), vget_low_s8(q8bytes.val[1])), - vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1]))); - isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1]; - - int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[2]), vget_low_s8(q8bytes.val[2])), - vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2]))); - int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8(q6bytes.val[3]), vget_low_s8(q8bytes.val[3])), - vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3]))); - isum += vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3]; -#endif - - sum += isum * d_all * y[i].d; - } - *s = sum; - -#elif defined __AVX2__ - - const __m256i m4 = _mm256_set1_epi8(0xF); - const __m256i m2 = _mm256_set1_epi8(3); - const __m256i m32s = _mm256_set1_epi8(32); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - - const uint8_t *__restrict q4 = x[i].ql; - const uint8_t *__restrict qh = x[i].qh; - const int8_t *__restrict q8 = y[i].qs; - - const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]); - const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]); - const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]); - const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]); - - __m256i sumi = _mm256_setzero_si256(); - - const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1); - const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3); - - const __m256i q4bits1 = _mm256_loadu_si256((const __m256i *)q4); - const __m128i q4bitsH = _mm_loadu_si128((const __m128i *)qh); - - const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4); - const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4); - - const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); - const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1); - - const __m256i q8_0 = _mm256_loadu_si256((const __m256i *)(q8 + 0)); - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)(q8 + 32)); - - __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); - __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); - - __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); - __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); - - p16_0 = _mm256_sub_epi16(p16_0, q8s_0); - p16_1 = _mm256_sub_epi16(p16_1, q8s_1); - - p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); - p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); - - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __AVX__ - - const __m128i m4 = _mm_set1_epi8(0xF); - const __m128i m2 = _mm_set1_epi8(3); - const __m128i m32s = _mm_set1_epi8(32); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - - const uint8_t *__restrict q4 = x[i].ql; - const uint8_t *__restrict qh = x[i].qh; - const int8_t *__restrict q8 = y[i].qs; - - const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]); - const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]); - const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]); - const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]); - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1); - const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3); - - const __m256i q4bits1 = _mm256_loadu_si256((const __m256i *)q4); - const __m128i q4bitsH = _mm_loadu_si128((const __m128i *)qh); - - const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4); - const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4); - const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4); - const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4); - - const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0); - const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1); - const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2); - const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3); - - const __m256i q8_0 = _mm256_loadu_si256((const __m256i *)(q8 + 0)); - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)(q8 + 32)); - - __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0)); - __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1)); - __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0)); - __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1)); - - __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0)); - __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1)); - __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0)); - __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1)); - - p16_0 = _mm_sub_epi16(p16_0, q8s_0); - p16_1 = _mm_sub_epi16(p16_1, q8s_1); - p16_2 = _mm_sub_epi16(p16_2, q8s_2); - p16_3 = _mm_sub_epi16(p16_3, q8s_3); - - p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); - p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1); - p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); - p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3); - - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); - - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc); - } - - *s = hsum_float_8(acc); - -#else - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums[8]; - int32_t aux32[8]; - memset(sums, 0, 8 * sizeof(float)); - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t *__restrict q4 = x[i].ql; - const uint8_t *__restrict qh = x[i].qh; - const int8_t *__restrict q8 = y[i].qs; - memset(aux32, 0, 8 * sizeof(int32_t)); - int8_t *__restrict a = aux8; - for (int l = 0; l < 16; ++l) { - a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; - a[l + 16] = (int8_t)((q4[l + 16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; - a[l + 32] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; - a[l + 48] = (int8_t)((q4[l + 16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; - } - int is = 0; - for (int j = 0; j < QK_K / 16; ++j) { - int scale = x[i].scales[is++]; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; - a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; - a += 8; - } - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; -#endif -} -#endif - -void vec_dot_q6_K_q8_K(const void *__restrict src0, const void *__restrict src1, Tensor *dst, bool support_bias, Tensor *bias, int hid_len, int batch, int head, int src0_inf, int sec1_outf) { - float value = 0; - - vec_dot_q6_K_q8_K(hid_len, &value, src1, src0); - - if (support_bias) { - value += bias->dataAt(0, head, 0, sec1_outf); - } - dst->setDataAt({batch, head, src0_inf, sec1_outf}, value); -} - -void vec_dot_q8_0_q8_0(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy, size_t bs, size_t bx, size_t by) { - const int qk = QK8_0; - const int nb = n / qk; // number of blocks - - assert(n % qk == 0); - - const auto *__restrict x = static_cast(vx); - const auto *__restrict y = static_cast(vy); - -#if defined(__ARM_FEATURE_MATMUL_INT8) - // if (nrc == 2) - { - const block_q8_0 *__restrict vx0 = (const block_q8_0 *)vx; - const block_q8_0 *__restrict vx1 = (const block_q8_0 *)((const uint8_t *)vx + bx); - const block_q8_0 *__restrict vy0 = (const block_q8_0 *)vy; - const block_q8_0 *__restrict vy1 = (const block_q8_0 *)((const uint8_t *)vy + by); - - float32x4_t sumv0 = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; i++) { - const block_q8_0 *__restrict b_x0 = &vx0[i]; - const block_q8_0 *__restrict b_y0 = &vy0[i]; - - const block_q8_0 *__restrict b_x1 = &vx1[i]; - const block_q8_0 *__restrict b_y1 = &vy1[i]; - - const int8x16_t x0_l = vld1q_s8(b_x0->qs); - const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16); - const int8x16_t x1_l = vld1q_s8(b_x1->qs); - const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16); - - // load y - const int8x16_t y0_l = vld1q_s8(b_y0->qs); - const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); - const int8x16_t y1_l = vld1q_s8(b_y1->qs); - const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - - float32_t _scale[4] = { - MLLM_FP16_TO_FP32(b_x0->d) * MLLM_FP16_TO_FP32(b_y0->d), - MLLM_FP16_TO_FP32(b_x0->d) * MLLM_FP16_TO_FP32(b_y1->d), - MLLM_FP16_TO_FP32(b_x1->d) * MLLM_FP16_TO_FP32(b_y0->d), - MLLM_FP16_TO_FP32(b_x1->d) * MLLM_FP16_TO_FP32(b_y1->d)}; - float32x4_t scale = vld1q_f32(_scale); - - int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - - int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - - int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - - int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - - sumv0 = vmlaq_f32(sumv0, (vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), l1, r1)), l2, r2)), l3, r3))), scale); - } - - float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); - float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - - vst1_f32(s, vget_low_f32(sumv2)); - vst1_f32(s + bs, vget_high_f32(sumv2)); - - return; - } -#elif defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - assert(nb % 2 == 0); // TODO: handle odd nb - - for (int i = 0; i < nb; i += 2) { - const block_q8_0 *x0 = &x[i + 0]; - const block_q8_0 *x1 = &x[i + 1]; - const block_q8_0 *y0 = &y[i + 0]; - const block_q8_0 *y1 = &y[i + 1]; - - const int8x16_t x0_0 = vld1q_s8(x0->qs); - const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); - const int8x16_t x1_0 = vld1q_s8(x1->qs); - const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); - - // load y - const int8x16_t y0_0 = vld1q_s8(y0->qs); - const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); - const int8x16_t y1_0 = vld1q_s8(y1->qs); - const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(mllm_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), mllm_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), MLLM_FP16_TO_FP32(x0->d) * MLLM_FP16_TO_FP32(y0->d)); - - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(mllm_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), mllm_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), MLLM_FP16_TO_FP32(x1->d) * MLLM_FP16_TO_FP32(y1->d)); - } - - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined(__AVX2__) || defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (int i = 0; i < nb; ++i) { - // Compute combined scale for the block - const __m256 d = _mm256_set1_ps(MLLM_FP16_TO_FP32(x[i].d) * MLLM_FP16_TO_FP32(y[i].d)); - __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs); - __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); - - const __m256 q = mul_sum_i8_pairs_float(bx, by); - - // Multiply q with scale and accumulate -#if defined(__AVX2__) - acc = _mm256_fmadd_ps(d, q, acc); -#else - acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); -#endif - } - - *s = hsum_float_8(acc); -#endif -} - -void vec_dot_i8_i8(const int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy, float scale1, float scale2) { - const int qk = QK8_0; - const int nb = n / qk; - - const float scale = scale1 * scale2; - - assert(n % qk == 0); - - const block_q8_per_tensor *__restrict x = (block_q8_per_tensor *)vx; - const block_q8_per_tensor *__restrict y = (block_q8_per_tensor *)vy; - -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - assert(nb % 2 == 0); // TODO: handle odd nb - - for (int i = 0; i < nb; i += 2) { - const block_q8_per_tensor *__restrict x0 = &x[i + 0]; - const block_q8_per_tensor *__restrict x1 = &x[i + 1]; - const block_q8_per_tensor *__restrict y0 = &y[i + 0]; - const block_q8_per_tensor *__restrict y1 = &y[i + 1]; - - const int8x16_t x0_0 = vld1q_s8(x0->qs); - const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); - const int8x16_t x1_0 = vld1q_s8(x1->qs); - const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); - - // load y - const int8x16_t y0_0 = vld1q_s8(y0->qs); - const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); - const int8x16_t y1_0 = vld1q_s8(y1->qs); - const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(mllm_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), mllm_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), scale); - - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(mllm_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), mllm_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), scale); - } - - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined(__AVX2__) || defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (int i = 0; i < nb; ++i) { - // Compute combined scale for the block - const __m256 d = _mm256_set1_ps(scale); - __m256i qx = _mm256_loadu_si256((const __m256i *)x[i].qs); - __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - // Multiply q with scale and accumulate -#if defined(__AVX2__) - acc = _mm256_fmadd_ps(d, q, acc); -#else - acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); -#endif - } - - *s = hsum_float_8(acc); -#else - // scalar - float sumf = 0.0; - - for (int i = 0; i < nb; i++) { - int sumi = 0; - - for (int j = 0; j < qk; j++) { - sumi += x[i].qs[j] * y[i].qs[j]; - } - - sumf += sumi * scale; - } - - *s = sumf; -#endif -} - -#ifdef __AVX2__ -static void vec_value_dot_fp32_avx2(const int n, float *__restrict s, const float *__restrict x, const float *__restrict y, bool addition) { - float sumf = 0.0F; - const int np = (n & ~(MLLM_F32_STEP - 1)); - - MLLM_F32_VEC sum[MLLM_F32_ARR] = {MLLM_F32_VEC_ZERO}; - - MLLM_F32_VEC ax[MLLM_F32_ARR]; - MLLM_F32_VEC ay[MLLM_F32_ARR]; - - for (int i = 0; i < np; i += MLLM_F32_STEP) { - for (int j = 0; j < MLLM_F32_ARR; j++) { - ax[j] = MLLM_F32_VEC_LOAD(x + i + j * MLLM_F32_EPR); - ay[j] = MLLM_F32_VEC_LOAD(y + i + j * MLLM_F32_EPR); - - sum[j] = MLLM_F32_VEC_FMA(sum[j], ax[j], ay[j]); - } - } - - // reduce sum0..sum3 to sum0 - MLLM_F32_VEC_REDUCE(sumf, sum); - - // leftovers - for (int i = np; i < n; ++i) { - sumf += x[i] * y[i]; - } - - *s = sumf; -} -#endif - -#ifdef __ARM_NEON -// s:vector k -// x:value -// y:vector k -static void vec_value_dot_fp32_arm(const int n, float *__restrict s, const float x, const float *__restrict y, bool addition) { - int i; - float32x4_t vec_x; - float32x4_t vec_y; - float32x4_t vec_s; - - vec_x = vdupq_n_f32(x); - - int n_aligned = n & -4; - - if (addition) { - for (i = 0; i < n_aligned; i += 4) { - vec_y = vld1q_f32(y + i); - vec_s = vmulq_f32(vec_x, vec_y); - vec_s = vaddq_f32(vec_s, vld1q_f32(s + i)); - vst1q_f32(s + i, vec_s); - } - } else { - for (i = 0; i < n_aligned; i += 4) { - vec_y = vld1q_f32(y + i); - vec_s = vmulq_f32(vec_x, vec_y); - vst1q_f32(s + i, vec_s); - } - } - for (; i < n; ++i) { - if (addition) - s[i] += x * y[i]; - else { - s[i] = x * y[i]; - } - } -} -#endif - -#ifdef __AVX2__ -void vec_value_dot_fp32(const int n, float *__restrict s, const float *x, const float *__restrict vy, bool addition) { - vec_value_dot_fp32_avx2(n, s, x, vy, addition); -} -#elif defined(__ARM_NEON) -void vec_value_dot_fp32(const int n, float *__restrict s, const float x, const float *__restrict vy, bool addition) { - vec_value_dot_fp32_arm(n, s, x, vy, addition); -} -#endif - -void vec_dot_q2_K_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { - const block_q2_K *__restrict x = (block_q2_K *)vx; - const block_q8_K *__restrict y = (block_q8_K *)vy; - - const int nb = n / QK_K; - -#ifdef __ARM_FEATURE_SVE - const int vector_length = svcntb() * 8; - const svuint8_t m3s = svdup_n_u8(0x3); - const svuint32_t m4s = svdup_n_u32(0xF); - const svint32_t vzero_sv = svdup_n_s32(0); - svfloat32_t acc_sum = svdup_n_f32(0); - svbool_t pred_s32 = svptrue_pat_b32(SV_VL4); - - switch (vector_length) { - case 128: - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - svfloat32_t d_broad = svdup_n_f32((float32_t)d); - const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); - - const uint8_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8_sv = y[i].qs; - const uint8_t *__restrict sc = x[i].scales; - - svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc); - const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc + 4); - const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums); - svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums + 4); - - const svint32_t s0 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_2, q8sums_sv_2)); - - mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc + 8); - const svint32_t mins_sv_3 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc + 12); - const svint32_t mins_sv_4 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums + 8); - q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums + 12); - - svint32_t s1 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_3, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_4, q8sums_sv_2)); - - svfloat32_t temp = svcvt_f32_s32_x(svptrue_b32(), svadd_s32_x(svptrue_b32(), s0, s1)); - - acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, temp, dmin_broad); - - svint32_t sumi1 = svdup_n_s32(0); - - { - const svuint8_t q2bits_1 = svld1_u8(svptrue_b8(), q2); - svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_1, m3s)); - svint8_t q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc), m4s)); - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 0)); - - const svuint8_t q2bits_3 = svld1_u8(svptrue_b8(), q2 + 16); - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_3, m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 1)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 3)); - - const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc + 4), m4s)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 0)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 1)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 3)); - - //------------------------------- - - q2 += 32; - const svint32_t scales_sv_2 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc + 8), m4s)); - const svuint8_t q2bits_2 = svld1_u8(svptrue_b8(), q2); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_2, m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 0)); - - const svuint8_t q2bits_4 = svld1_u8(svptrue_b8(), q2 + 16); - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_4, m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 1)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 3)); - - const svint32_t scales_sv_3 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc + 12), m4s)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 0)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 1)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 3)); - } - acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, svcvt_f32_s32_x(svptrue_b32(), sumi1), d_broad); - } - *s = svaddv_f32(svptrue_b32(), acc_sum); - break; - - case 256: - case 512: - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - svfloat32_t d_broad = svdup_n_f32((float32_t)d); - const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); - - const uint8_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8_sv = y[i].qs; - const uint8_t *__restrict sc = x[i].scales; - - const svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); - sc += 8; - const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, m4s)); - const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, 4)); - svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums); - - const svuint32_t mins_and_scales_sve_1 = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); - const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, m4s)); - const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, 4)); - - svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums + 8); - - svfloat32_t temp = svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_2, q8sums_sv_2))); - - acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, temp, dmin_broad); - - svint32_t sumi1 = svdup_n_s32(0); - - { - const svuint8_t q2bits_1 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); - svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_1, m3s)); - svint8_t q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - - svint32_t scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 0), svdup_lane_s32(scales_sv, 1)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - - svint32_t scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 2), svdup_lane_s32(scales_sv, 3)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(svdup_n_s32(0), q2bytes_sv, q8bytes_sv), scale_2); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - - scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 4), svdup_lane_s32(scales_sv, 5)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - - scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 6), svdup_lane_s32(scales_sv, 7)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); - - q2 += 32; - - const svuint8_t q2bits_2 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_2, m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - - scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 0), svdup_lane_s32(scales_sv_1, 1)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - - scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 2), svdup_lane_s32(scales_sv_1, 3)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - - scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 4), svdup_lane_s32(scales_sv_1, 5)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - - scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 6), svdup_lane_s32(scales_sv_1, 7)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); - } - acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), sumi1), d_broad); - } - *s = svaddv_f32(svptrue_pat_b32(SV_VL8), acc_sum); - break; - - default: - assert(false && "Unsupported vector length"); - break; - } - -#elif __ARM_NEON - const uint8x16_t m3 = vdupq_n_u8(0x3); - const uint8x16_t m4 = vdupq_n_u8(0xF); - - const int32x4_t vzero = vdupq_n_s32(0); - - mllm_int8x16x2_t q2bytes; - uint8_t aux[16]; - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - - const uint8_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - const uint8_t *__restrict sc = x[i].scales; - - const uint8x16_t mins_and_scales = vld1q_u8(sc); - const uint8x16_t scales = vandq_u8(mins_and_scales, m4); - vst1q_u8(aux, scales); - - const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); - const mllm_int16x8x2_t q8sums = mllm_vld1q_s16_x2(y[i].bsums); - const mllm_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}}; - const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16(mins16.val[0]), vget_low_s16(q8sums.val[0])), - vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0]))); - const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16(mins16.val[1]), vget_low_s16(q8sums.val[1])), - vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1]))); - sum += dmin * vaddvq_s32(vaddq_s32(s0, s1)); - - int isum = 0; - int is = 0; - - // We use this macro instead of a function call because for some reason - // the code runs 2-3% slower, even if the function is declared inline -#define MULTIPLY_ACCUM_WITH_SCALE(index) \ - isum += vaddvq_s32(mllm_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is + (index)]; \ - isum += vaddvq_s32(mllm_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is + 1 + (index)]; - -#define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index) \ - q8bytes = mllm_vld1q_s8_x2(q8); \ - q8 += 32; \ - q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3)); \ - q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3)); \ - MULTIPLY_ACCUM_WITH_SCALE((index)); - - for (int j = 0; j < QK_K / 128; ++j) { - const mllm_uint8x16x2_t q2bits = mllm_vld1q_u8_x2(q2); - q2 += 32; - - mllm_int8x16x2_t q8bytes = mllm_vld1q_s8_x2(q8); - q8 += 32; - q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3)); - q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3)); - - MULTIPLY_ACCUM_WITH_SCALE(0); - - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2); - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4); - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6); - - is += 8; - } - - sum += d * isum; - } - - *s = sum; - -#elif defined __AVX2__ - - const __m256i m3 = _mm256_set1_epi8(3); - const __m128i m4 = _mm_set1_epi8(0xF); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - - const uint8_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - const __m128i mins_and_scales = _mm_loadu_si128((const __m128i *)x[i].scales); - const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); - const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); - const __m256i mins = _mm256_cvtepi8_epi16(mins8); - const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i *)y[i].bsums)); - - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); - - const __m256i all_scales = _mm256_cvtepi8_epi16(scales8); - const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); - const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); - const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; - - __m256i sumi = _mm256_setzero_si256(); - - for (int j = 0; j < QK_K / 128; ++j) { - const __m256i q2bits = _mm256_loadu_si256((const __m256i *)q2); - q2 += 32; - - const __m256i q8_0 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - - const __m256i q2_0 = _mm256_and_si256(q2bits, m3); - const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); - const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); - const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); - - __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0); - __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1); - __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2); - __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3); - - p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0); - p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1); - p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2); - p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3); - - p0 = _mm256_add_epi32(p0, p1); - p2 = _mm256_add_epi32(p2, p3); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); - } - - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __AVX__ - - const __m128i m3 = _mm_set1_epi8(0x3); - const __m128i m4 = _mm_set1_epi8(0xF); - const __m128i m2 = _mm_set1_epi8(0x2); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - const float dall = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - - const uint8_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - // load mins and scales from block_q2_K.scales[QK_K/16] - const __m128i mins_and_scales = _mm_loadu_si128((const __m128i *)x[i].scales); - const __m128i scales16 = _mm_and_si128(mins_and_scales, m4); - const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); - const __m128i mins_0 = _mm_cvtepi8_epi16(mins16); - const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16)); - - // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2 - const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i *)&y[i].bsums[0])); - const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i *)&y[i].bsums[8])); - - // sumf += -dmin * summs in 32bits*8 - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc); - - const __m128i scales_0 = _mm_cvtepi8_epi16(scales16); - const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16)); - const __m128i scales[2] = {scales_0, scales_1}; - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - for (int j = 0; j < QK_K / 128; ++j) { - // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K] - const __m128i q8_0 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_1 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_2 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_3 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_4 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_5 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_6 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_7 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - - // load 2bits*16*8 from block_q2_K.qs[QK_K/4] - __m128i q2bits = _mm_loadu_si128((const __m128i *)q2); - q2 += 16; - const __m128i q2_0 = _mm_and_si128(q2bits, m3); - const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); - const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); - const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); - q2bits = _mm_loadu_si128((const __m128i *)q2); - q2 += 16; - const __m128i q2_1 = _mm_and_si128(q2bits, m3); - const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); - const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); - const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); - - // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8 - __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0); - __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1); - __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2); - __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3); - __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4); - __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5); - __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6); - __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7); - - // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8 - __m128i shuffle = _mm_set1_epi16(0x0100); - p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0); - shuffle = _mm_add_epi16(shuffle, m2); - p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1); - shuffle = _mm_add_epi16(shuffle, m2); - p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2); - shuffle = _mm_add_epi16(shuffle, m2); - p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3); - shuffle = _mm_add_epi16(shuffle, m2); - p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4); - shuffle = _mm_add_epi16(shuffle, m2); - p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5); - shuffle = _mm_add_epi16(shuffle, m2); - p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6); - shuffle = _mm_add_epi16(shuffle, m2); - p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7); - - p0 = _mm_add_epi32(p0, p1); - p2 = _mm_add_epi32(p2, p3); - p4 = _mm_add_epi32(p4, p5); - p6 = _mm_add_epi32(p6, p7); - - // isum in 32bits*4*2 - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6)); - } - - // sumf += dall * isum - dmin * summs in 32bits - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __wasm_simd128__ - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const uint8_t *q2 = x[i].qs; - const int8_t *q8 = y[i].qs; - const uint8_t *sc = x[i].scales; - - // Vectorized summs calculation - v128_t summs_vec = wasm_i32x4_splat(0); - { - v128_t sc_vec = wasm_v128_load(sc); - v128_t sc_upper = wasm_u8x16_shr(sc_vec, 4); - - v128_t sc_low = wasm_u16x8_extend_low_u8x16(sc_upper); - v128_t sc_high = wasm_u16x8_extend_high_u8x16(sc_upper); - - v128_t bsums1 = wasm_v128_load(&y[i].bsums[0]); - v128_t bsums2 = wasm_v128_load(&y[i].bsums[8]); - - summs_vec = wasm_i32x4_add( - wasm_i32x4_add(wasm_i32x4_dot_i16x8(sc_low, bsums1), - wasm_i32x4_dot_i16x8(sc_high, bsums2)), - summs_vec); - - summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 2, 3, 0, 1)); - summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 1, 0, 3, 2)); - } - int32_t summs = wasm_i32x4_extract_lane(summs_vec, 0); - - // Vectorized isum calculation - int32_t isum = 0; - const uint8_t *sc_ptr = sc; - const int k_iters = QK_K / 128; - - for (int k = 0; k < k_iters; ++k) { - v128_t isum_vec = wasm_i32x4_splat(0); - int shift = 0; - - for (int j = 0; j < 4; ++j) { - const int d0 = (sc_ptr[0] & 0xF); - const int d1 = (sc_ptr[1] & 0xF); - sc_ptr += 2; - - // Process first 16 elements - v128_t q2_0 = wasm_v128_load(q2); - v128_t q8_0 = wasm_v128_load(q8); - v128_t q2_shift_0 = wasm_u8x16_shr(q2_0, shift); - v128_t q2_bits_0 = wasm_v128_and(q2_shift_0, wasm_i8x16_splat(0x03)); - - // Process next 16 elements - v128_t q2_1 = wasm_v128_load(q2 + 16); - v128_t q8_1 = wasm_v128_load(q8 + 16); - v128_t q2_shift_1 = wasm_u8x16_shr(q2_1, shift); - v128_t q2_bits_1 = wasm_v128_and(q2_shift_1, wasm_i8x16_splat(0x03)); - - // Calculate dot products - v128_t p0 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q8_0), - wasm_i16x8_extend_low_i8x16(q2_bits_0)); - v128_t p1 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q8_0), - wasm_i16x8_extend_high_i8x16(q2_bits_0)); - v128_t p2 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q8_1), - wasm_i16x8_extend_low_i8x16(q2_bits_1)); - v128_t p3 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q8_1), - wasm_i16x8_extend_high_i8x16(q2_bits_1)); - - // Accumulate scaled results - v128_t scaled = wasm_i32x4_add( - wasm_i32x4_mul(wasm_i32x4_add(p0, p1), wasm_i32x4_splat(d0)), - wasm_i32x4_mul(wasm_i32x4_add(p2, p3), wasm_i32x4_splat(d1))); - - isum_vec = wasm_i32x4_add(isum_vec, scaled); - q8 += 32; - shift += 2; - } - q2 += 32; - - // Horizontal sum of isum_vec - isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 2, 3, 0, 1)); - isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 1, 0, 3, 2)); - isum += wasm_i32x4_extract_lane(isum_vec, 0); - } - - const float dall = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - const float dmin = MLLM_FP16_TO_FP32(x[i].dmin) * y[i].d; - sumf += dall * isum - dmin * summs; - } - - *s = sumf; - -#elif defined __riscv_v_intrinsic - - const int vector_length = __riscv_vlenb() * 8; - float sumf = 0; - - uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - uint8_t atmp[16]; - - switch (vector_length) { - case 256: - for (int i = 0; i < nb; ++i) { - const uint8_t *q2 = x[i].qs; - const int8_t *q8 = y[i].qs; - const uint8_t *sc = x[i].scales; - - const float dall = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - - size_t vl = 16; - - vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl); - vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl); - - vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl); - - vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl); - vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl); - vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); - vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl); - vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); - - sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums); - - vl = 32; - - vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); - vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl); - - uint8_t is = 0; - int isum = 0; - - for (int j = 0; j < QK_K / 128; ++j) { - // load Q2 - vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl); - - vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl); - vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03, vl); - vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03, vl); - vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03, vl); - - // duplicate scale elements for product - vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0 + is, vl), vl); - vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2 + is, vl), vl); - vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4 + is, vl), vl); - vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6 + is, vl), vl); - - vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl)); - vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl)); - vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl)); - vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl)); - - // load Q8 - vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); - vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8 + 32, vl); - vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8 + 64, vl); - vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8 + 96, vl); - - vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl); - vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl); - vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl); - vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl); - - vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl); - vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl); - - isum += __riscv_vmv_x_s_i32m1_i32(isum1); - - q2 += 32; - q8 += 128; - is = 8; - } - - sumf += dall * isum; - } - break; - case 128: - for (int i = 0; i < nb; ++i) { - const uint8_t *q2 = x[i].qs; - const int8_t *q8 = y[i].qs; - const uint8_t *sc = x[i].scales; - const float dall = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - uint8_t *patmp = atmp; - int vsums; - int tmp; - __asm__ __volatile__( - "vsetivli zero, 16, e8, m1\n\t" - "vmv.v.x v8, zero\n\t" - "vle8.v v1, (%[sc])\n\t" - "vand.vi v0, v1, 0xF\n\t" - "vsrl.vi v1, v1, 4\n\t" - "vse8.v v0, (%[scale])\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vle16.v v2, (%[bsums])\n\t" - "vzext.vf2 v0, v1\n\t" - "vwmul.vv v4, v0, v2\n\t" - "vsetivli zero, 16, e32, m4\n\t" - "vredsum.vs v8, v4, v8\n\t" - "vmv.x.s %[vsums], v8" - : [tmp] "=&r"(tmp), [vsums] "=&r"(vsums) - : [sc] "r"(sc), [scale] "r"(atmp), [bsums] "r"(y[i].bsums) - : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); - sumf += dmin * vsums; - int isum = 0; - - for (int j = 0; j < QK_K / 128; ++j) { - __asm__ __volatile__( - "vsetvli zero, %[vl32], e8, m2\n\t" - "vle8.v v0, (%[q2])\n\t" - "vsrl.vi v2, v0, 2\n\t" - "vsrl.vi v4, v0, 4\n\t" - "vsrl.vi v6, v0, 6\n\t" - "vand.vi v0, v0, 0x3\n\t" - "vand.vi v2, v2, 0x3\n\t" - "vand.vi v4, v4, 0x3\n\t" - "vsetvli zero, %[vl128], e8, m8\n\t" - "vle8.v v8, (%[q8])\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vwmul.vv v16, v0, v8\n\t" - "vwmul.vv v24, v4, v12\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vmv.v.x v0, zero\n\t" - "vwredsum.vs v10, v16, v0\n\t" - "vwredsum.vs v9, v18, v0\n\t" - "vwredsum.vs v8, v20, v0\n\t" - "vwredsum.vs v7, v22, v0\n\t" - "vwredsum.vs v11, v24, v0\n\t" - "vwredsum.vs v12, v26, v0\n\t" - "vwredsum.vs v13, v28, v0\n\t" - "vwredsum.vs v14, v30, v0\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vslideup.vi v10, v9, 1\n\t" - "vslideup.vi v8, v7, 1\n\t" - "vslideup.vi v11, v12, 1\n\t" - "vslideup.vi v13, v14, 1\n\t" - "vslideup.vi v10, v8, 2\n\t" - "vslideup.vi v11, v13, 2\n\t" - "vsetivli zero, 8, e32, m2\n\t" - "vle8.v v15, (%[scale])\n\t" - "vzext.vf4 v12, v15\n\t" - "vmul.vv v10, v10, v12\n\t" - "vredsum.vs v0, v10, v0\n\t" - "vmv.x.s %[tmp], v0\n\t" - "add %[isum], %[isum], %[tmp]" - : [tmp] "=&r"(tmp), [isum] "+&r"(isum) - : [q2] "r"(q2), [scale] "r"(patmp), [q8] "r"(q8), [vl32] "r"(32), [vl64] "r"(64), [vl128] "r"(128) - : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); - q2 += 32; - q8 += 128; - patmp += 8; - } - - sumf += dall * isum; - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - - *s = sumf; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0x3); - const vector signed char lowScaleMask = vec_splats((signed char)0xF); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v2 = vec_splats((unsigned char)0x2); - const vector unsigned char v6 = vec_splats((unsigned char)0x6); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(MLLM_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(MLLM_FP16_TO_FP32(x[i].dmin)); - vector float vdmin = vec_mul(vxmin, vyd); - - vector signed short q8ysums0 = vec_xl(0, y[i].bsums); - vector signed short q8ysums1 = vec_xl(16, y[i].bsums); - - vector signed char q2xmins = (vector signed char)vec_xl(0, x[i].scales); - vector signed char vscales = vec_and(q2xmins, lowScaleMask); - - q2xmins = vec_sr(q2xmins, v4); - vector signed short q2xmins0 = vec_unpackh(q2xmins); - vector signed short q2xmins1 = vec_unpackl(q2xmins); - - vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); - vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); - vector signed int prod2 = vec_mule(q2xmins1, q8ysums1); - vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1); - - vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); - vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); - vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); - vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - vector signed int vsumi4 = v0; - vector signed int vsumi5 = v0; - vector signed int vsumi6 = v0; - vector signed int vsumi7 = v0; - - const uint8_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - for (int j = 0; j < QK_K / 128; ++j) { - __builtin_prefetch(q2, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl(0, q2); - vector signed char qxs1 = (vector signed char)vec_xl(16, q2); - q2 += 32; - - vector unsigned char q2x00 = (vector unsigned char)vec_and(qxs0, lowMask); - vector unsigned char q2x01 = (vector unsigned char)vec_and(vec_sr(qxs0, v2), lowMask); - vector unsigned char q2x02 = (vector unsigned char)vec_and(vec_sr(qxs0, v4), lowMask); - vector unsigned char q2x03 = (vector unsigned char)vec_and(vec_sr(qxs0, v6), lowMask); - vector unsigned char q2x10 = (vector unsigned char)vec_and(qxs1, lowMask); - vector unsigned char q2x11 = (vector unsigned char)vec_and(vec_sr(qxs1, v2), lowMask); - vector unsigned char q2x12 = (vector unsigned char)vec_and(vec_sr(qxs1, v4), lowMask); - vector unsigned char q2x13 = (vector unsigned char)vec_and(vec_sr(qxs1, v6), lowMask); - - vector signed char q8y00 = vec_xl(0, q8); - vector signed char q8y10 = vec_xl(16, q8); - vector signed char q8y01 = vec_xl(32, q8); - vector signed char q8y11 = vec_xl(48, q8); - vector signed char q8y02 = vec_xl(64, q8); - vector signed char q8y12 = vec_xl(80, q8); - vector signed char q8y03 = vec_xl(96, q8); - vector signed char q8y13 = vec_xl(112, q8); - q8 += 128; - - vector signed int qv0 = vec_msum(q8y00, q2x00, v0); - vector signed int qv1 = vec_msum(q8y01, q2x01, v0); - vector signed int qv2 = vec_msum(q8y02, q2x02, v0); - vector signed int qv3 = vec_msum(q8y03, q2x03, v0); - vector signed int qv4 = vec_msum(q8y10, q2x10, v0); - vector signed int qv5 = vec_msum(q8y11, q2x11, v0); - vector signed int qv6 = vec_msum(q8y12, q2x12, v0); - vector signed int qv7 = vec_msum(q8y13, q2x13, v0); - - vector signed short vscales_07 = vec_unpackh(vscales); - vector signed int vscales_03 = vec_unpackh(vscales_07); - vector signed int vscales_47 = vec_unpackl(vscales_07); - vector signed int vs0 = vec_splat(vscales_03, 0); - vector signed int vs1 = vec_splat(vscales_03, 1); - vector signed int vs2 = vec_splat(vscales_03, 2); - vector signed int vs3 = vec_splat(vscales_03, 3); - vector signed int vs4 = vec_splat(vscales_47, 0); - vector signed int vs5 = vec_splat(vscales_47, 1); - vector signed int vs6 = vec_splat(vscales_47, 2); - vector signed int vs7 = vec_splat(vscales_47, 3); - vscales = vec_sld(vscales, vscales, 8); - - vsumi0 = vec_add(vec_mul(qv0, vs0), vsumi0); - vsumi1 = vec_add(vec_mul(qv1, vs2), vsumi1); - vsumi2 = vec_add(vec_mul(qv2, vs4), vsumi2); - vsumi3 = vec_add(vec_mul(qv3, vs6), vsumi3); - vsumi4 = vec_add(vec_mul(qv4, vs1), vsumi4); - vsumi5 = vec_add(vec_mul(qv5, vs3), vsumi5); - vsumi6 = vec_add(vec_mul(qv6, vs5), vsumi6); - vsumi7 = vec_add(vec_mul(qv7, vs7), vsumi7); - } - - vsumi0 = vec_add(vsumi0, vsumi4); - vsumi1 = vec_add(vsumi1, vsumi5); - vsumi2 = vec_add(vsumi2, vsumi6); - vsumi3 = vec_add(vsumi3, vsumi7); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - __m256 acc = (__m256)__lasx_xvldi(0); - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - - const uint8_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - const __m128i mins_and_scales128 = __lsx_vld((const __m128i *)x[i].scales, 0); - const __m128i scales128 = __lsx_vandi_b(mins_and_scales128, 0xf); - const __m256i mins = lasx_ext8_16(__lsx_vsrli_b(mins_and_scales128, 4)); - const __m256i prod = lasx_madd_h(mins, __lasx_xvld((const __m256i *)y[i].bsums, 0)); - - acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(dmin), __lasx_xvffint_s_w(prod), acc); - - const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; - const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); - - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K / 128; ++j) { - const __m256i q2bits = __lasx_xvld((const __m256i *)q2, 0); - q2 += 32; - - const __m256i q8_0 = __lasx_xvld((const __m256i *)q8, 0); - q8 += 32; - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); - q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); - q8 += 32; - const __m256i q8_3 = __lasx_xvld((const __m256i *)q8, 0); - q8 += 32; - - const __m256i q2_0 = __lasx_xvandi_b(q2bits, 3); - const __m256i q2_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 2), 3); - const __m256i q2_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 4), 3); - const __m256i q2_3 = __lasx_xvsrli_b(q2bits, 6); - - __m256i p0 = lasx_madd_h_b(q2_0, q8_0); - __m256i p1 = lasx_madd_h_b(q2_1, q8_1); - __m256i p2 = lasx_madd_h_b(q2_2, q8_2); - __m256i p3 = lasx_madd_h_b(q2_3, q8_3); - - p0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p0); - p1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p1); - p2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p2); - p3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p3); - - p0 = __lasx_xvadd_w(p0, p1); - p2 = __lasx_xvadd_w(p2, p3); - - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p0, p2)); - } - - acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); - } - - *s = hsum_float_8(acc); - -#else - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const uint8_t *q2 = x[i].qs; - const int8_t *q8 = y[i].qs; - const uint8_t *sc = x[i].scales; - - int summs = 0; - for (int j = 0; j < 16; ++j) { - summs += y[i].bsums[j] * (sc[j] >> 4); - } - - const float dall = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * MLLM_FP16_TO_FP32(x[i].dmin); - - int isum = 0; - int is = 0; - int d; - for (int k = 0; k < QK_K / 128; ++k) { - int shift = 0; - for (int j = 0; j < 4; ++j) { - d = sc[is++] & 0xF; - int isuml = 0; - for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); - isum += d * isuml; - d = sc[is++] & 0xF; - isuml = 0; - for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); - isum += d * isuml; - shift += 2; - q8 += 32; - } - q2 += 32; - } - sumf += dall * isum - dmin * summs; - } - *s = sumf; -#endif -} - -void vec_dot_q3_K_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { - assert(n % QK_K == 0); - - const uint32_t kmask1 = 0x03030303; - const uint32_t kmask2 = 0x0f0f0f0f; - - const block_q3_K *__restrict x = (block_q3_K *)vx; - const block_q8_K *__restrict y = (block_q8_K *)vy; - - const int nb = n / QK_K; - -#if defined(__ARM_FEATURE_SVE) - - uint32_t aux[3]; - uint32_t utmp[4]; - - const int8_t m32 = 32; - const int vector_length = svcntb() * 8; - const svuint8_t m3b_sv = svdup_n_u8(0x3); - const svint32_t vzero_sv = svdup_n_s32(0); - - const svuint8_t m0_sv = svdup_n_u8(1); - const svuint8_t m1_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 1); - const svuint8_t m2_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 2); - const svuint8_t m3_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 3); - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - - const uint8_t *__restrict q3_sv = x[i].qs; - const uint8_t *__restrict qh_sv = x[i].hmask; - const int8_t *__restrict q8_sv = y[i].qs; - - // Set up scales - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t *scale = (int8_t *)utmp; - - for (int j = 0; j < 16; ++j) scale[j] -= m32; - - switch (vector_length) { - case 128: { - svuint8_t qhbits_sv_1 = svld1_u8(svptrue_b8(), qh_sv); - svuint8_t qhbits_sv_2 = svld1_u8(svptrue_b8(), qh_sv + 16); - svuint8_t q3h_sv; - - svint32_t sumi1_1 = svdup_n_s32(0); - svint8_t q3bytes_sv; - - for (int j = 0; j < QK_K / 128; ++j) { - const svuint8_t q3bits_sv = svld1_u8(svptrue_b8(), q3_sv); - q3_sv += 16; - const svuint8_t q3bits_sv_1 = svld1_u8(svptrue_b8(), q3_sv); - q3_sv += 16; - svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_1), 2); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_2), 2); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv_1, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); - - q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_1), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_2), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); - - scale += 4; - q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); - - q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_2); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); - - q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); - q8_sv += 16; - - q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_1), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); - - q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_2), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); - - if (j == 0) { - qhbits_sv_1 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_1, 4); - qhbits_sv_2 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_2, 4); - } - - scale += 4; - } - - sum += d * (svaddv_s32(svptrue_b32(), sumi1_1)); - } break; - case 256: - case 512: { - svuint8_t qhbits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), qh_sv); - svuint8_t q3h_sv; - - svint32_t sumi1_1 = svdup_n_s32(0); - svint8_t q3bytes_sv; - - for (int j = 0; j < QK_K / 128; ++j) { - const svuint8_t q3bits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), q3_sv); - q3_sv += 32; - svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - - q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m0_sv, qhbits_sv), 2); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - svint32_t scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); - - q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m1_sv, qhbits_sv), 1); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); - - scale += 4; - q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); - q8_sv += 32; - - q3h_sv = svbic_u8_x(svptrue_pat_b8(SV_VL32), m2_sv, qhbits_sv); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); - - q3h_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m3_sv, qhbits_sv), 1); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); - - if (j == 0) { - qhbits_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), qhbits_sv, 4); - } - - scale += 4; - } - - sum += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), sumi1_1)); - } break; - default: - assert(false && "Unsupported vector length"); - break; - } - } - *s = sum; - -#elif __ARM_NEON - - uint32_t aux[3]; - uint32_t utmp[4]; - - const uint8x16_t m3b = vdupq_n_u8(0x3); - const int32x4_t vzero = vdupq_n_s32(0); - - const uint8x16_t m0 = vdupq_n_u8(1); - const uint8x16_t m1 = vshlq_n_u8(m0, 1); - const uint8x16_t m2 = vshlq_n_u8(m0, 2); - const uint8x16_t m3 = vshlq_n_u8(m0, 3); - const int8_t m32 = 32; - - mllm_int8x16x4_t q3bytes; - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - - const uint8_t *__restrict q3 = x[i].qs; - const uint8_t *__restrict qh = x[i].hmask; - const int8_t *__restrict q8 = y[i].qs; - - mllm_uint8x16x2_t qhbits = mllm_vld1q_u8_x2(qh); - - mllm_uint8x16x4_t q3h; - - int32_t isum = 0; - - // Set up scales - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t *scale = (int8_t *)utmp; - for (int j = 0; j < 16; ++j) scale[j] -= m32; - - for (int j = 0; j < QK_K / 128; ++j) { - const mllm_uint8x16x2_t q3bits = mllm_vld1q_u8_x2(q3); - q3 += 32; - const mllm_int8x16x4_t q8bytes_1 = mllm_vld1q_s8_x4(q8); - q8 += 64; - const mllm_int8x16x4_t q8bytes_2 = mllm_vld1q_s8_x4(q8); - q8 += 64; - - q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2); - q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2); - q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1); - q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1); - - q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0])); - q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1])); - q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2])); - q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3])); - - isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0]; - isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1]; - isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2]; - isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3]; - - scale += 4; - - q3h.val[0] = vbicq_u8(m2, qhbits.val[0]); - q3h.val[1] = vbicq_u8(m2, qhbits.val[1]); - q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1); - q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1); - - q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0])); - q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1])); - q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2])); - q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3])); - - isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0]; - isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1]; - isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2]; - isum += vaddvq_s32(mllm_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3]; - - scale += 4; - - if (j == 0) { - qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4); - qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4); - } - } - sum += d * isum; - } - - *s = sum; - -#elif defined __AVX2__ - - const __m256i m3 = _mm256_set1_epi8(3); - const __m256i mone = _mm256_set1_epi8(1); - const __m128i m32 = _mm_set1_epi8(32); - - __m256 acc = _mm256_setzero_ps(); - - uint32_t aux[3]; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - - const uint8_t *__restrict q3 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - // Set up scales - memcpy(aux, x[i].scales, 12); - __m128i scales128 = _mm_set_epi32( - ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), - ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), - (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), - (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); - scales128 = _mm_sub_epi8(scales128, m32); - const __m256i all_scales = _mm256_cvtepi8_epi16(scales128); - const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); - const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); - const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; - - // high bit - const __m256i hbits = _mm256_loadu_si256((const __m256i *)x[i].hmask); - - // integer accumulator - __m256i sumi = _mm256_setzero_si256(); - - int bit = 0; - int is = 0; - - for (int j = 0; j < QK_K / 128; ++j) { - // load low 2 bits - const __m256i q3bits = _mm256_loadu_si256((const __m256i *)q3); - q3 += 32; - - // prepare low and high bits - const __m256i q3l_0 = _mm256_and_si256(q3bits, m3); - const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); - const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); - const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); - const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - // load Q8 quants - const __m256i q8_0 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - - // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, - // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, - // and 2 if the high bit was set) - __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); - __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); - __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); - __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); - - __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); - __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); - __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); - __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); - - p16_0 = _mm256_sub_epi16(p16_0, q8s_0); - p16_1 = _mm256_sub_epi16(p16_1, q8s_1); - p16_2 = _mm256_sub_epi16(p16_2, q8s_2); - p16_3 = _mm256_sub_epi16(p16_3, q8s_3); - - // multiply with scales - p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0); - p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1); - p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2); - p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3); - - // accumulate - p16_0 = _mm256_add_epi32(p16_0, p16_1); - p16_2 = _mm256_add_epi32(p16_2, p16_3); - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); - } - - // multiply with block scale and accumulate - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __AVX__ - - const __m128i m3 = _mm_set1_epi8(3); - const __m128i mone = _mm_set1_epi8(1); - const __m128i m32 = _mm_set1_epi8(32); - const __m128i m2 = _mm_set1_epi8(2); - - __m256 acc = _mm256_setzero_ps(); - - const uint32_t *aux; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - - const uint8_t *__restrict q3 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - // Set up scales - aux = (const uint32_t *)x[i].scales; - __m128i scales128 = _mm_set_epi32( - ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), - ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), - (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), - (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); - scales128 = _mm_sub_epi8(scales128, m32); - const __m128i scales_0 = _mm_cvtepi8_epi16(scales128); - const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128)); - const __m128i scales[2] = {scales_0, scales_1}; - - // high bit *128*2 from block_q3_K.hmask[QK_K/8] - const __m128i hbits_0 = _mm_loadu_si128((const __m128i *)&x[i].hmask[0]); - const __m128i hbits_1 = _mm_loadu_si128((const __m128i *)&x[i].hmask[16]); - - // integer accumulator - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - for (int j = 0; j < QK_K / 128; ++j) { - // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4] - const __m128i q3bits_0 = _mm_loadu_si128((const __m128i *)q3); - q3 += 16; - const __m128i q3bits_1 = _mm_loadu_si128((const __m128i *)q3); - q3 += 16; - - // prepare low and high bits - const int bit = j << 2; - - const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3); - const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3); - const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2); - const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2); - - const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3); - const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3); - const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit + 1)), bit + 1), 2); - const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit + 1)), bit + 1), 2); - - const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3); - const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3); - const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit + 2)), bit + 2), 2); - const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit + 2)), bit + 2), 2); - - const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3); - const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3); - const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit + 3)), bit + 3), 2); - const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit + 3)), bit + 3), 2); - - // load Q8 quants from block_q8_K.qs[QK_K] - const __m128i q8_0 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_1 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_2 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_3 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_4 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_5 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_6 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_7 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - - // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, - // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, - // and 2 if the high bit was set) - __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0); - __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1); - __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2); - __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3); - __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4); - __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5); - __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6); - __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7); - - __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0); - __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1); - __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2); - __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3); - __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4); - __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5); - __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6); - __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7); - - p16_0 = _mm_sub_epi16(p16_0, q8s_0); - p16_1 = _mm_sub_epi16(p16_1, q8s_1); - p16_2 = _mm_sub_epi16(p16_2, q8s_2); - p16_3 = _mm_sub_epi16(p16_3, q8s_3); - p16_4 = _mm_sub_epi16(p16_4, q8s_4); - p16_5 = _mm_sub_epi16(p16_5, q8s_5); - p16_6 = _mm_sub_epi16(p16_6, q8s_6); - p16_7 = _mm_sub_epi16(p16_7, q8s_7); - - // multiply with scales - __m128i shuffle = _mm_set1_epi16(0x0100); - p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0); - shuffle = _mm_add_epi16(shuffle, m2); - p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1); - shuffle = _mm_add_epi16(shuffle, m2); - p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2); - shuffle = _mm_add_epi16(shuffle, m2); - p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3); - shuffle = _mm_add_epi16(shuffle, m2); - p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4); - shuffle = _mm_add_epi16(shuffle, m2); - p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5); - shuffle = _mm_add_epi16(shuffle, m2); - p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6); - shuffle = _mm_add_epi16(shuffle, m2); - p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7); - - // accumulate - p16_0 = _mm_add_epi32(p16_0, p16_1); - p16_2 = _mm_add_epi32(p16_2, p16_3); - p16_4 = _mm_add_epi32(p16_4, p16_5); - p16_6 = _mm_add_epi32(p16_6, p16_7); - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6)); - } - - // multiply with block scale and accumulate - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __wasm_simd128__ - int8_t aux8[QK_K]; - float sums[8] = {0}; - uint32_t auxs[4]; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t *__restrict q3 = x[i].qs; - const uint8_t *__restrict hm = x[i].hmask; - const int8_t *__restrict q8 = y[i].qs; - - // Process blocks with SIMD - int8_t *a = aux8; - uint8_t m = 1; - for (int j = 0; j < QK_K; j += 128) { - for (int shift = 0; shift <= 6; shift += 2) { - v128_t v_m = wasm_i8x16_splat(m); - for (int l = 0; l < 32; l += 16) { - v128_t v_q3 = wasm_v128_load(q3 + l); - v128_t v_shift = wasm_i8x16_shr(v_q3, shift); - v128_t v_low2 = wasm_v128_and(v_shift, wasm_i8x16_splat(0x03)); - - v128_t v_hm = wasm_v128_load(hm + l); - v128_t v_mask = wasm_v128_and(v_hm, v_m); - v_mask = wasm_i8x16_ne(v_mask, wasm_i8x16_splat(0)); - - v_low2 = wasm_i8x16_sub(v_low2, wasm_v128_and(wasm_i8x16_splat(4), wasm_v128_not(v_mask))); - wasm_v128_store(a + l, v_low2); - } - a += 32; - m <<= 1; - } - q3 += 32; - } - - // Extract scales - memcpy(auxs, x[i].scales, 12); - uint32_t tmp = auxs[2]; - auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); - auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); - auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); - auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); - const int8_t *scales = (const int8_t *)auxs; - - // SIMD dot product with register accumulators - v128_t v_acc0 = wasm_i32x4_splat(0); - v128_t v_acc1 = wasm_i32x4_splat(0); - a = aux8; - for (int j = 0; j < QK_K / 16; ++j) { - const v128_t v_scale = wasm_i16x8_splat(scales[j] - 32); - - // Process 16 elements per iteration - for (int k = 0; k < 2; ++k) { - const v128_t v_q8 = wasm_i16x8_load8x8(q8); - const v128_t v_a = wasm_i16x8_load8x8(a); - - v128_t v_prod = wasm_i16x8_mul(v_q8, v_a); - v_prod = wasm_i16x8_mul(v_prod, v_scale); - - v_acc0 = wasm_i32x4_add(v_acc0, wasm_i32x4_extend_low_i16x8(v_prod)); - v_acc1 = wasm_i32x4_add(v_acc1, wasm_i32x4_extend_high_i16x8(v_prod)); - - q8 += 8; - a += 8; - } - } - - // Accumulate results - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - const v128_t v_d = wasm_f32x4_splat(d); - v128_t v_sum = wasm_f32x4_add( - wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc0), v_d), - wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc1), v_d)); - - // Accumulate into sums vector - wasm_v128_store(sums, wasm_f32x4_add(wasm_v128_load(sums), v_sum)); - } - - // Horizontal sum - v128_t v_sum = wasm_f32x4_add(wasm_v128_load(sums), wasm_v128_load(sums + 4)); - sumf = wasm_f32x4_extract_lane(v_sum, 0) + wasm_f32x4_extract_lane(v_sum, 1) + wasm_f32x4_extract_lane(v_sum, 2) + wasm_f32x4_extract_lane(v_sum, 3); - - *s = sumf; - -#elif defined __riscv_v_intrinsic - - uint32_t aux[3]; - uint32_t utmp[4]; - - const int vector_length = __riscv_vlenb() * 8; - float sumf = 0; - - switch (vector_length) { - case 256: - for (int i = 0; i < nb; ++i) { - const uint8_t *__restrict q3 = x[i].qs; - const uint8_t *__restrict qh = x[i].hmask; - const int8_t *__restrict q8 = y[i].qs; - - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t *scale = (int8_t *)utmp; - for (int j = 0; j < 16; ++j) scale[j] -= 32; - - size_t vl = 32; - uint8_t m = 1; - - vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); - vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl); - - int sum_t = 0; - - for (int j = 0; j < QK_K; j += 128) { - vl = 32; - - // load Q3 - vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl); - - vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl)); - vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03, vl)); - vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03, vl)); - vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03, vl)); - - // compute mask for subtraction - vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl); - vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_mu(vmask_0, q3_0, q3_0, 0x4, vl); - m <<= 1; - - vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl); - vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_mu(vmask_1, q3_1, q3_1, 0x4, vl); - m <<= 1; - - vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl); - vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_mu(vmask_2, q3_2, q3_2, 0x4, vl); - m <<= 1; - - vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl); - vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_mu(vmask_3, q3_3, q3_3, 0x4, vl); - m <<= 1; - - // load Q8 and take product with Q3 - vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl); - vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8 + 32, vl), vl); - vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8 + 64, vl), vl); - vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8 + 96, vl), vl); - - vl = 16; - - // retrieve lane to multiply with scale - vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl); - vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl); - vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl); - vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl); - vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl); - vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl); - vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl); - vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl); - - vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl); - vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl); - vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl); - vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl); - - sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); - - q3 += 32; - q8 += 128; - scale += 8; - } - - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - - sumf += d * sum_t; - } - break; - case 128: - for (int i = 0; i < nb; ++i) { - const uint8_t *restrict q3 = x[i].qs; - const uint8_t *restrict qh = x[i].hmask; - const int8_t *restrict q8 = y[i].qs; - - int8_t *scale = (int8_t *)utmp; - int tmp; - __asm__ __volatile__( - "vsetivli zero, 12, e8, m1\n\t" - "vle8.v v0, (%[s6b])\n\t" - "vmv1r.v v2, v0\n\t" - "vsetivli zero, 2, e64, m1\n\t" - "vmv.v.x v9, %[sh]\n\t" - "vslidedown.vi v1, v0, 1\n\t" - "vslide1up.vx v8, v9, zero\n\t" // {0, 0, 4, 4} - "vslideup.vi v0, v2, 1\n\t" // {aux[0], aux[1], aux[0], aux[1]} - "vsetivli zero, 4, e32, m1\n\t" - "vid.v v9\n\t" - "vmv.x.s %[tmp], v1\n\t" - "vsll.vi v9, v9, 1\n\t" // {0, 2, 4, 6} - "vmv.v.x v1, %[tmp]\n\t" // {aux[2], aux[2], aux[2], aux[2]} - "vsrl.vv v4, v1, v9\n\t" - "vsrl.vv v2, v0, v8\n\t" - "vand.vx v5, v4, %[kmask1]\n\t" - "vand.vx v3, v2, %[kmask2]\n\t" - "vsll.vi v6, v5, 4\n\t" - "vor.vv v7, v6, v3\n\t" - "vsetivli zero, 16, e8, m1\n\t" - "vsub.vx v0, v7, %[c]\n\t" - "vse8.v v0, (%[scale])" - : [tmp] "=&r"(tmp) - : [sh] "r"(0x0000000400000004), [s6b] "r"(x[i].scales), [c] "r"(32), [scale] "r"(scale), [kmask1] "r"(kmask1), [kmask2] "r"(kmask2) - : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); - - uint8_t m = 1; - int isum = 0; - for (int j = 0; j < QK_K; j += 128) { - __asm__ __volatile__( - "vsetvli zero, %[vl32], e8, m2, ta, mu\n\t" - "vle8.v v8, (%[q3])\n\t" - "vsrl.vi v10, v8, 2\n\t" - "vsrl.vi v12, v8, 4\n\t" - "vsrl.vi v14, v8, 6\n\t" - "vand.vi v8, v8, 3\n\t" - "vand.vi v10, v10, 3\n\t" - "vand.vi v12, v12, 3\n\t" - "vle8.v v2, (%[qh])\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v8, v8, -4, v0.t\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v10, v10, -4, v0.t\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v12, v12, -4, v0.t\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v14, v14, -4, v0.t\n\t" - "vsetvli zero, %[vl128], e8, m8\n\t" - "vle8.v v0, (%[q8])\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vwmul.vv v16, v0, v8\n\t" - "vwmul.vv v24, v4, v12\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vmv.v.x v0, zero\n\t" - "vwredsum.vs v10, v16, v0\n\t" - "vwredsum.vs v9, v18, v0\n\t" - "vwredsum.vs v8, v20, v0\n\t" - "vwredsum.vs v7, v22, v0\n\t" - "vwredsum.vs v11, v24, v0\n\t" - "vwredsum.vs v12, v26, v0\n\t" - "vwredsum.vs v13, v28, v0\n\t" - "vwredsum.vs v14, v30, v0\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vslideup.vi v10, v9, 1\n\t" - "vslideup.vi v8, v7, 1\n\t" - "vslideup.vi v11, v12, 1\n\t" - "vslideup.vi v13, v14, 1\n\t" - "vslideup.vi v10, v8, 2\n\t" - "vslideup.vi v11, v13, 2\n\t" - "vsetivli zero, 8, e32, m2\n\t" - "vle8.v v15, (%[scale])\n\t" - "vsext.vf4 v12, v15\n\t" - "vmul.vv v10, v10, v12\n\t" - "vredsum.vs v0, v10, v0\n\t" - "vmv.x.s %[tmp], v0\n\t" - "add %[isum], %[isum], %[tmp]" - : [tmp] "=&r"(tmp), [m] "+&r"(m), [isum] "+&r"(isum) - : [vl128] "r"(128), [vl64] "r"(64), [vl32] "r"(32), [q3] "r"(q3), [qh] "r"(qh), [scale] "r"(scale), [q8] "r"(q8) - : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); - q3 += 32; - q8 += 128; - scale += 8; - } - - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - sumf += d * isum; - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - - *s = sumf; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0x3); - const vector signed char lowMask1 = vec_splats((int8_t)0xf); - const vector signed char lowMask2 = vec_splats((int8_t)0x30); - const vector int v0 = vec_splats((int32_t)0); - const vector signed char v1 = vec_splats((signed char)0x1); - const vector unsigned char v2 = vec_splats((unsigned char)0x2); - const vector unsigned char v3 = vec_splats((unsigned char)0x3); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - const vector unsigned char v6 = vec_splats((unsigned char)0x6); - const vector signed char off = vec_splats((signed char)0x20); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(MLLM_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - UNUSED(kmask1); - UNUSED(kmask2); - - vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); - vector signed char u1 = vec_and(u0, lowMask1); - vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); - vector signed char u3 = (vector signed char)vec_mergeh((vector signed int)u2, (vector signed int)vec_sr(u2, v2)); - vector signed char u30 = vec_sl(vec_and(u3, lowMask), v4); - vector signed char u31 = vec_and(u3, lowMask2); - - u1 = vec_or(u1, u30); - u2 = vec_or(vec_sr(u0, v4), u31); - - vector signed char vscales = (vector signed char)vec_mergeh((vector signed long long)u1, (vector signed long long)u2); - vector signed char qxhs0 = (vector signed char)vec_xl(0, x[i].hmask); - vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask); - - vscales = vec_sub(vscales, off); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - vector signed int vsumi4 = v0; - vector signed int vsumi5 = v0; - vector signed int vsumi6 = v0; - vector signed int vsumi7 = v0; - - const uint8_t *__restrict q3 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - for (int j = 0; j < QK_K / 128; ++j) { - __builtin_prefetch(q3, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl(0, q3); - vector signed char qxs1 = (vector signed char)vec_xl(16, q3); - q3 += 32; - - // the low 2 bits - vector signed char qxs00 = vec_and(qxs0, lowMask); - vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); - vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask); - vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask); - vector signed char qxs10 = vec_and(qxs1, lowMask); - vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask); - vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask); - vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask); - - // the 3rd bit - vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); - vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2); - vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); - vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2); - vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2); - vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2); - vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2); - vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2); - qxhs0 = vec_sr(qxhs0, v4); - qxhs1 = vec_sr(qxhs1, v4); - - vector signed char q3x00 = vec_sub(qxs00, qxh00); - vector signed char q3x01 = vec_sub(qxs01, qxh01); - vector signed char q3x02 = vec_sub(qxs02, qxh02); - vector signed char q3x03 = vec_sub(qxs03, qxh03); - vector signed char q3x10 = vec_sub(qxs10, qxh10); - vector signed char q3x11 = vec_sub(qxs11, qxh11); - vector signed char q3x12 = vec_sub(qxs12, qxh12); - vector signed char q3x13 = vec_sub(qxs13, qxh13); - - vector signed char q8y00 = vec_xl(0, q8); - vector signed char q8y10 = vec_xl(16, q8); - vector signed char q8y01 = vec_xl(32, q8); - vector signed char q8y11 = vec_xl(48, q8); - vector signed char q8y02 = vec_xl(64, q8); - vector signed char q8y12 = vec_xl(80, q8); - vector signed char q8y03 = vec_xl(96, q8); - vector signed char q8y13 = vec_xl(112, q8); - q8 += 128; - - vector signed short vscales_h = vec_unpackh(vscales); - vector signed short vs0 = vec_splat(vscales_h, 0); - vector signed short vs1 = vec_splat(vscales_h, 1); - vector signed short vs2 = vec_splat(vscales_h, 2); - vector signed short vs3 = vec_splat(vscales_h, 3); - vector signed short vs4 = vec_splat(vscales_h, 4); - vector signed short vs5 = vec_splat(vscales_h, 5); - vector signed short vs6 = vec_splat(vscales_h, 6); - vector signed short vs7 = vec_splat(vscales_h, 7); - vscales = vec_sld(vscales, vscales, 8); - - vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); - vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); - vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02)); - vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03)); - vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); - vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); - vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12)); - vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13)); - - vsumi0 = vec_msum(qv00, vs0, vsumi0); - vsumi1 = vec_msum(qv01, vs2, vsumi1); - vsumi2 = vec_msum(qv02, vs4, vsumi2); - vsumi3 = vec_msum(qv03, vs6, vsumi3); - vsumi4 = vec_msum(qv10, vs1, vsumi4); - vsumi5 = vec_msum(qv11, vs3, vsumi5); - vsumi6 = vec_msum(qv12, vs5, vsumi6); - vsumi7 = vec_msum(qv13, vs7, vsumi7); - } - - vsumi0 = vec_add(vsumi0, vsumi4); - vsumi1 = vec_add(vsumi1, vsumi5); - vsumi2 = vec_add(vsumi2, vsumi6); - vsumi3 = vec_add(vsumi3, vsumi7); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - const __m128i m32 = __lsx_vreplgr2vr_b(32); - - __m256 acc = (__m256)__lasx_xvldi(0); - - uint32_t aux[3]; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - const uint8_t *__restrict q3 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - // Set up scales - memcpy(aux, x[i].scales, 12); - __m128i scales128 = lsx_set_w( - ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), - ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), - (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), - (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); - scales128 = __lsx_vsub_b(scales128, m32); - - const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; - const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); - - // high bit - const __m256i hbits = __lasx_xvld((const __m256i *)x[i].hmask, 0); - - // integer accumulator - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K / 128; ++j) { - // load low 2 bits - const __m256i q3bits = __lasx_xvld((const __m256i *)q3, 0); - q3 += 32; - - // prepare low and high bits - const __m256i q3l_0 = __lasx_xvandi_b(q3bits, 3); - const __m256i q3l_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 2), 3); - const __m256i q3l_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 4), 3); - const __m256i q3l_3 = __lasx_xvsrli_b(q3bits, 6); - const __m256i q3h_0 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 0), 0), 2); - const __m256i q3h_1 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 1), 0), 2); - const __m256i q3h_2 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 2), 0), 2); - const __m256i q3h_3 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 3), 0), 2); - const __m256i q3_0 = __lasx_xvor_v(q3h_0, q3l_0); - const __m256i q3_1 = __lasx_xvor_v(q3h_1, q3l_1); - const __m256i q3_2 = __lasx_xvor_v(q3h_2, q3l_2); - const __m256i q3_3 = __lasx_xvor_v(q3h_3, q3l_3); - - // load Q8 quants - const __m256i q8_0 = __lasx_xvld((const __m256i *)q8, 0); - q8 += 32; - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); - q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); - q8 += 32; - const __m256i q8_3 = __lasx_xvld((const __m256i *)q8, 0); - q8 += 32; - - __m256i p16_0 = lasx_madd_h_b(q8_0, q3_0); - __m256i p16_1 = lasx_madd_h_b(q8_1, q3_1); - __m256i p16_2 = lasx_madd_h_b(q8_2, q3_2); - __m256i p16_3 = lasx_madd_h_b(q8_3, q3_3); - - // multiply with scales - p16_0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p16_0); - p16_1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p16_1); - p16_2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p16_2); - p16_3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p16_3); - - // accumulate - p16_0 = __lasx_xvadd_w(p16_0, p16_1); - p16_2 = __lasx_xvadd_w(p16_2, p16_3); - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_2)); - } - // multiply with block scale and accumulate - acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); - } - - *s = hsum_float_8(acc); -#elif defined(__VXE__) || defined(__VXE2__) - uint32_t aux[3]; - uint32_t utmp[4]; - - const int32x4_t v_z = vec_splat_s32(0); - const uint8x16_t v_3m = vec_splat_u8(0x03); - - const uint8x16_t v_0c = vec_splat_u8(1); - const uint8x16_t v_1c = vec_sl(v_0c, 1); - const uint8x16_t v_2c = vec_sl(v_0c, 2); - const uint8x16_t v_3c = vec_sl(v_0c, 3); - - uint8x16_t q3h[4]; - uint8x16_t q3b[2]; - int8x16_t q3bytes[4]; - int8x16_t q8bytes[4]; - uint8x16_t qhbits[2]; - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * MLLM_FP16_TO_FP32(x[i].d); - - const uint8_t *restrict x0l = x[i].qs; - const uint8_t *restrict x0h = x[i].hmask; - const int8_t *restrict y0 = y[i].qs; - - qhbits[0] = vec_xl(0, x0h); - qhbits[1] = vec_xl(16, x0h); - - int32_t isum = 0; - - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t *scale = (int8_t *)utmp; - for (int j = 0; j < 16; ++j) scale[j] -= 32; - - for (int j = 0; j < QK_K / 128; ++j) { - int32x4_t isum0, isum1, isum2, isum3; - - q3b[0] = vec_xl(0, x0l); - q3b[1] = vec_xl(16, x0l); - x0l += 32; - - q8bytes[0] = vec_xl(0, y0); - q8bytes[1] = vec_xl(16, y0); - q8bytes[2] = vec_xl(32, y0); - q8bytes[3] = vec_xl(48, y0); - q8bytes[4] = vec_xl(64, y0); - q8bytes[5] = vec_xl(80, y0); - q8bytes[6] = vec_xl(96, y0); - q8bytes[7] = vec_xl(112, y0); - y0 += 128; - - q3h[0] = vec_sl(vec_andc(v_0c, qhbits[0]), 2); - q3h[1] = vec_sl(vec_andc(v_0c, qhbits[1]), 2); - q3h[2] = vec_sl(vec_andc(v_1c, qhbits[0]), 1); - q3h[3] = vec_sl(vec_andc(v_1c, qhbits[1]), 1); - - q3bytes[0] = vec_sub((int8x16_t)vec_and(q3b[0], v_3m), (int8x16_t)q3h[0]); - q3bytes[1] = vec_sub((int8x16_t)vec_and(q3b[1], v_3m), (int8x16_t)q3h[1]); - q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 2), v_3m), (int8x16_t)q3h[2]); - q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 2), v_3m), (int8x16_t)q3h[3]); - - isum0 = mllm_vec_dot(v_z, q3bytes[0], q8bytes[0]); - isum1 = mllm_vec_dot(v_z, q3bytes[1], q8bytes[1]); - isum2 = mllm_vec_dot(v_z, q3bytes[2], q8bytes[2]); - isum3 = mllm_vec_dot(v_z, q3bytes[3], q8bytes[3]); - - isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; - isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; - isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; - isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; - - scale += 4; - - q3h[0] = vec_andc(v_2c, qhbits[0]); - q3h[1] = vec_andc(v_2c, qhbits[1]); - q3h[2] = vec_sr(vec_andc(v_3c, qhbits[0]), 1); - q3h[3] = vec_sr(vec_andc(v_3c, qhbits[1]), 1); - - q3bytes[0] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 4), v_3m), (int8x16_t)q3h[0]); - q3bytes[1] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 4), v_3m), (int8x16_t)q3h[1]); - q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 6), v_3m), (int8x16_t)q3h[2]); - q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 6), v_3m), (int8x16_t)q3h[3]); - - isum0 = mllm_vec_dot(v_z, q3bytes[0], q8bytes[4]); - isum1 = mllm_vec_dot(v_z, q3bytes[1], q8bytes[5]); - isum2 = mllm_vec_dot(v_z, q3bytes[2], q8bytes[6]); - isum3 = mllm_vec_dot(v_z, q3bytes[3], q8bytes[7]); - - isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; - isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; - isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; - isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; - - scale += 4; - - if (j == 0) { - qhbits[0] = vec_sr(qhbits[0], 4); - qhbits[1] = vec_sr(qhbits[1], 4); - } - } - - sum += d * isum; - } - - *s = sum; -#else - // scalar version - // This function is written like this so the compiler can manage to vectorize most of it - // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the - // manually vectorized version above. Every other version I tried would run at least 4 times slower. - // The ideal situation would be if we could just write the code once, and the compiler would - // automatically produce the best possible set of machine instructions, instead of us having to manually - // write vectorized versions for AVX, ARM_NEON, etc. - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums[8]; - int32_t aux32[8]; - memset(sums, 0, 8 * sizeof(float)); - - uint32_t auxs[4]; - const int8_t *scales = (const int8_t *)auxs; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t *__restrict q3 = x[i].qs; - const uint8_t *__restrict hm = x[i].hmask; - const int8_t *__restrict q8 = y[i].qs; - memset(aux32, 0, 8 * sizeof(int32_t)); - int8_t *__restrict a = aux8; - uint8_t m = 1; - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; - m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; - m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; - m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; - m <<= 1; - q3 += 32; - } - a = aux8; - - memcpy(auxs, x[i].scales, 12); - uint32_t tmp = auxs[2]; - auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); - auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); - auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); - auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); - for (int j = 0; j < QK_K / 16; ++j) { - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; - q8 += 8; - a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; - q8 += 8; - a += 8; - } - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; - -#endif -} - -void vec_dot_iq2_xxs_q8_K(int n, float *__restrict s, const void *__restrict vx, const void *__restrict vy) { - assert(n % QK_K == 0); - - const block_iq2_xxs *__restrict x = (block_iq2_xxs *)vx; - const block_q8_K *__restrict y = (block_q8_K *)vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - const uint64_t *signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t *aux8 = (const uint8_t *)aux32; - - mllm_int8x16x4_t q2u; - mllm_int8x16x4_t q2s; - mllm_int8x16x4_t q8b; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - float sumf1 = 0, sumf2 = 0; - for (int ib32 = 0; ib32 < QK_K / 32; ib32 += 2) { - q8b = mllm_vld1q_s8_x4(q8); - q8 += 64; - memcpy(aux32, q2, 4 * sizeof(uint32_t)); - q2 += 8; - q2u.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2xxs_grid + aux8[0])), vld1_s8((const int8_t *)(iq2xxs_grid + aux8[1]))); - q2u.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2xxs_grid + aux8[2])), vld1_s8((const int8_t *)(iq2xxs_grid + aux8[3]))); - q2u.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2xxs_grid + aux8[8])), vld1_s8((const int8_t *)(iq2xxs_grid + aux8[9]))); - q2u.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2xxs_grid + aux8[10])), vld1_s8((const int8_t *)(iq2xxs_grid + aux8[11]))); - q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const int8_t *)(signs64 + ((aux32[1] >> 7) & 127)))); - q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const int8_t *)(signs64 + ((aux32[1] >> 21) & 127)))); - q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const int8_t *)(signs64 + ((aux32[3] >> 7) & 127)))); - q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const int8_t *)(signs64 + ((aux32[3] >> 21) & 127)))); - q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); - q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); - q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); - q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); - const int32x4_t p1 = mllm_vdotq_s32(mllm_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]); - const int32x4_t p2 = mllm_vdotq_s32(mllm_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]); - sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28)); - sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28)); - } - sumf += d * (sumf1 + sumf2); - } - *s = 0.25f * sumf; - -#elif defined(__AVX2__) - - const uint64_t *signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t *aux8 = (const uint8_t *)aux32; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K / 32; ib32 += 2) { - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); - q8 += 32; - memcpy(aux32, q2, 4 * sizeof(uint32_t)); - q2 += 8; - const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); - const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); - const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], - signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], - signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); - const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); - const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); - const uint16_t ls1 = aux32[1] >> 28; - const uint16_t ls2 = aux32[3] >> 28; - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2 * ls1 + 1)); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2 * ls2 + 1)); - sumi1 = _mm256_add_epi32(sumi1, p1); - sumi2 = _mm256_add_epi32(sumi2, p2); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__AVX__) - const uint64_t *signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t *aux8 = (const uint8_t *)aux32; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K / 32; ib32 += 2) { - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); - q8 += 16; - memcpy(aux32, q2, 4 * sizeof(uint32_t)); - q2 += 8; - const __m128i q2_1_0 = _mm_set_epi64x(iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); - const __m128i q2_1_1 = _mm_set_epi64x(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]]); - const __m128i q2_2_0 = _mm_set_epi64x(iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); - const __m128i q2_2_1 = _mm_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]]); - const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); - const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); - const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127]); - const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); - const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); - const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); - const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - const uint16_t ls1 = aux32[1] >> 28; - const uint16_t ls2 = aux32[3] >> 28; - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2 * ls1 + 1)); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2 * ls1 + 1)); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2 * ls2 + 1)); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2 * ls2 + 1)); - sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); - sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); - sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); - sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__POWER9_VECTOR__) - const vector int v0 = vec_splats((int32_t)0); - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const uint64_t *signs64 = (const uint64_t *)keven_signs_q2xs; - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(MLLM_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint16_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - - for (int j = 0; j < QK_K / 32; j += 2) { - __builtin_prefetch(q2, 0, 1); - __builtin_prefetch(q8, 0, 1); - - uint32_t aux32[4]; - const uint8_t *aux8 = (const uint8_t *)aux32; - - memcpy(aux32, q2, 4 * sizeof(uint32_t)); - q2 += 8; - - vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[0]), *(const int64_t *)(iq2xxs_grid + aux8[1])}; - vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[2]), *(const int64_t *)(iq2xxs_grid + aux8[3])}; - vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[8]), *(const int64_t *)(iq2xxs_grid + aux8[9])}; - vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])}; - - vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))}; - vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))}; - vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))}; - vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))}; - - vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); - vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); - vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); - vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); - - vector signed char q8y0 = vec_xl(0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); - - const uint16_t ls0 = aux32[1] >> 28; - const uint16_t ls1 = aux32[3] >> 28; - - vector signed short vscales01 = vec_splats((int16_t)(2 * ls0 + 1)); - vector signed short vscales23 = vec_splats((int16_t)(2 * ls1 + 1)); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = 0.125f * vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - const uint64_t *signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t *aux8 = (const uint8_t *)aux32; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K / 32; ib32 += 2) { - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); - q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); - q8 += 32; - memcpy(aux32, q2, 4 * sizeof(uint32_t)); - q2 += 8; - - const __m256i q2_1 = lasx_set_d(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); - const __m256i q2_2 = lasx_set_d(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); - const __m256i s2_1 = lasx_set_d(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], - signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m256i s2_2 = lasx_set_d(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], - signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); - const __m256i q8s_1 = __lasx_xvsigncov_b(s2_1, q8_1); - const __m256i q8s_2 = __lasx_xvsigncov_b(s2_2, q8_2); - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); - const uint16_t ls1 = aux32[1] >> 28; - const uint16_t ls2 = aux32[3] >> 28; - const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2 * ls1 + 1)); - const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2 * ls2 + 1)); - sumi1 = __lasx_xvadd_w(sumi1, p1); - sumi2 = __lasx_xvadd_w(sumi2, p2); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - } - - *s = 0.125f * hsum_float_8(accumf); -// #elif defined(__VXE__) || defined(__VXE2__) -// const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; -// -// uint32_t aux32[4]; -// const uint8_t * aux8 = (const uint8_t *)aux32; -// -// float sumf = 0; -// -// for (int i = 0; i < nb; ++i) { -// const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; -// const uint16_t * __restrict q2 = x[i].qs; -// const int8_t * __restrict q8 = y[i].qs; -// -// float sumf1 = 0, sumf2 = 0; -// -// for (int ib32 = 0; ib32 < QK_K/32; ib += 2) { -// int8x16_t q8b0 = vec_xl( 0, q8); -// int8x16_t qb81 = vec_xl(16, q8); -// int8x16_t q8b2 = vec_xl(32, q8); -// int8x16_t q8b3 = vec_xl(48, q8); -// q8 += 64; -// -// memcpy(aux32, q2, 4 * sizeof(uint32_t)); -// q2 += 8; -// -// int8x16_t q2u0 = { *(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1]) }; -// int8x16_t q2u1 = { *(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3]) }; -// int8x16_t q2u2 = { *(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9]) }; -// int8x16_t q2u3 = { *(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11]) }; -// -// int8x16_t q2s0 = { *(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127)) }; -// int8x16_t q2s1 = { *(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127)) }; -// int8x16_t q2s2 = { *(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127)) }; -// int8x16_t q2s3 = { *(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127)) }; -// -// q2u0 = vec_mul(q2u0, q2s0); -// q2u1 = vec_mul(q2u1, q2s1); -// q2u2 = vec_mul(q2u2, q2s2); -// q2u3 = vec_mul(q2u3, q2s3); -// -// const int32x4_t p1 = mllm_vec_dot(mllm_vec_dot(vec_splat_s32(0), q2u0, q8b0), q2u1, q8b1); -// const int32x4_t p2 = mllm_vec_dot(mllm_vec_dot(vec_splat_s32(0), q2u2, q8b2), q2u3, q8b3); -// -// sumf1 += (p1[0] + p1[1] + p1[2] + p1[3]) * (0.5f + (aux32[1] >> 28)); -// sumf2 += (p2[0] + p2[1] + p2[2] + p2[3]) * (0.5f + (aux32[3] >> 28)); -// } -// -// sumf += d * (sumf1 + sumf2); -// } -// -// *s = 0.25f * sumf; -#else - - uint32_t aux32[2]; - const uint8_t *aux8 = (const uint8_t *)aux32; - - float sumf = 0.f; - for (int i = 0; i < nb; ++i) { - const float d = MLLM_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t *__restrict q2 = x[i].qs; - const int8_t *__restrict q8 = y[i].qs; - int32_t bsum = 0; - for (int ib32 = 0; ib32 < QK_K / 32; ++ib32) { - memcpy(aux32, q2, 2 * sizeof(uint32_t)); - q2 += 4; - const uint32_t ls = 2 * (aux32[1] >> 28) + 1; - int32_t sumi = 0; - for (int l = 0; l < 4; ++l) { - const uint8_t *grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); - const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7 * l) & 127]; - for (int j = 0; j < 8; ++j) { - sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - bsum += sumi * ls; - } - sumf += d * bsum; - } - *s = 0.125f * sumf; -#endif -} diff --git a/src/backends/cpu/function/CPUClipFunc.hpp b/src/backends/cpu/function/CPUClipFunc.hpp deleted file mode 100644 index 08e6d9e94..000000000 --- a/src/backends/cpu/function/CPUClipFunc.hpp +++ /dev/null @@ -1,307 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-26. -// - -#ifndef CPUCLIPFUNC_HPP -#define CPUCLIPFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" -#include "CPUBackend.hpp" - -namespace mllm { -class Tensor; - -class CPUclipFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - int b_size = args[0]; - int h_size = args[1]; - int s_size = args[2]; - int d_size = args[3]; - vector b = {}; - vector h = {}; - vector s = {}; - vector d = {}; - for (int i = 0; i < b_size; i++) { - b.push_back(args[4 + i]); - } - for (int i = 0; i < h_size; i++) { - h.push_back(args[4 + b_size + i]); - } - for (int i = 0; i < s_size; i++) { - s.push_back(args[4 + b_size + h_size + i]); - } - for (int i = 0; i < d_size; i++) { - d.push_back(args[4 + b_size + h_size + s_size + i]); - } - int dim_b = inputs[0]->batch(); - int dim_h = inputs[0]->head(); - int dim_s = inputs[0]->sequence(); - int dim_d = inputs[0]->dimension(); - std::vector, int *>> data = {{b, &dim_b}, {h, &dim_h}, {s, &dim_s}, {d, &dim_d}}; - for (auto &pair : data) { - if (pair.first.size() == 2) { - *pair.second = pair.first[1] - pair.first[0]; - } else if (pair.first.size() == 1) { - *pair.second = 1; - } - } - outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - } - void execute(vector> outputs, vector> inputs, vector args) override { - int b_size = args[0]; - int h_size = args[1]; - int s_size = args[2]; - int d_size = args[3]; - vector b = {}; - vector h = {}; - vector s = {}; - vector d = {}; - for (int i = 0; i < b_size; i++) { - b.push_back(args[4 + i]); - } - for (int i = 0; i < h_size; i++) { - h.push_back(args[4 + b_size + i]); - } - for (int i = 0; i < s_size; i++) { - s.push_back(args[4 + b_size + h_size + i]); - } - for (int i = 0; i < d_size; i++) { - d.push_back(args[4 + b_size + h_size + s_size + i]); - } - int dim_b = inputs[0]->batch(); - int dim_h = inputs[0]->head(); - int dim_s = inputs[0]->sequence(); - int dim_d = inputs[0]->dimension(); - std::vector, int *>> data = {{b, &dim_b}, {h, &dim_h}, {s, &dim_s}, {d, &dim_d}}; - for (auto &pair : data) { - if (pair.first.size() == 2) { - *pair.second = pair.first[1] - pair.first[0]; - } else if (pair.first.size() == 1) { - *pair.second = 1; - } - } - if (outputs[0]->dimension() * outputs[0]->sequence() * outputs[0]->head() * outputs[0]->batch() == 0 - || outputs[0]->shape().empty() - || dim_d != outputs[0]->dimension()) { - outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); - outputs[0]->alloc(); - } - - if (s.size() == 2) { -#pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) - for (int b = 0; b < inputs[0]->batch(); ++b) { - memcpy(outputs[0]->hostPtr() + outputs[0]->offset(b, 0, 0, 0), - inputs[0]->hostPtr() + inputs[0]->offset(b, 0, s[0], 0), - inputs[0]->head() * (s[1] - s[0]) * inputs[0]->dimension() * sizeof(float)); - } - } else if (s.size() == 1) { - int seq_idx = s[0]; - if (seq_idx < 0) { - seq_idx = inputs[0]->sequence() + seq_idx; - } -#pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) - for (int b = 0; b < inputs[0]->batch(); ++b) { - memcpy(outputs[0]->hostPtr() + outputs[0]->offset(b, 0, 0, 0), - inputs[0]->hostPtr() + inputs[0]->offset(b, 0, seq_idx, 0), - inputs[0]->head() * 1 * inputs[0]->dimension() * sizeof(float)); - } - } else if (b.size() == 1) { - int bth_idx = b[0]; - if (bth_idx < 0) { - bth_idx = inputs[0]->batch() + bth_idx; - } - memcpy(outputs[0]->hostPtr(), - inputs[0]->hostPtr() + inputs[0]->offset(bth_idx, 0, 0, 0), - inputs[0]->head() * inputs[0]->sequence() * inputs[0]->dimension() * sizeof(float)); - } else if (b.size() == 2) { - assert(b[1] - b[0] > 0); - memcpy(outputs[0]->hostPtr(), - inputs[0]->hostPtr() + inputs[0]->offset(b[0], 0, 0, 0), - (b[1] - b[0]) * inputs[0]->head() * inputs[0]->sequence() * inputs[0]->dimension() * sizeof(float)); - } else if (d.size() == 2) { -#pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) - for (int b = 0; b < inputs[0]->batch(); ++b) { - for (int s = 0; s < inputs[0]->sequence(); ++s) { - for (int h = 0; h < inputs[0]->head(); ++h) { - memcpy(outputs[0]->hostPtr() + outputs[0]->offset(b, h, s, 0), - inputs[0]->hostPtr() + inputs[0]->offset(b, h, s, d[0]), - (d[1] - d[0]) * sizeof(float)); - } - } - } - } else { - std::cout << "[TODO]Tensor.CLip not support!!!!" << std::endl; - } - } -}; - -class CPUclipaxisFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - Chl axis = (Chl)args[0]; - int b_size = args[1]; - int h_size = args[2]; - int s_size = args[3]; - int d_size = args[4]; - vector b = {}; - vector h = {}; - vector s = {}; - vector d = {}; - for (int i = 0; i < b_size; i++) { - b.push_back(args[5 + i]); - } - for (int i = 0; i < h_size; i++) { - h.push_back(args[5 + b_size + i]); - } - for (int i = 0; i < s_size; i++) { - s.push_back(args[5 + b_size + h_size + i]); - } - for (int i = 0; i < d_size; i++) { - d.push_back(args[5 + b_size + h_size + s_size + i]); - } - int dim_b = inputs[0]->batch(); - int dim_h = inputs[0]->head(); - int dim_s = inputs[0]->sequence(); - int dim_d = inputs[0]->dimension(); - switch (axis) { - case BATCH: { - std::vector, int *>> data = {{h, &dim_h}, {s, &dim_s}, {d, &dim_d}}; - for (auto &pair : data) { - if (!pair.first.empty()) { - *pair.second = 1; - } - } - break; - } - case HEAD: { - std::vector, int *>> data = {{b, &dim_b}, {s, &dim_s}, {d, &dim_d}}; - for (auto &pair : data) { - if (!pair.first.empty()) { - *pair.second = 1; - } - } - break; - } - case SEQUENCE: { - std::vector, int *>> data = {{b, &dim_b}, {h, &dim_h}, {d, &dim_d}}; - for (auto &pair : data) { - if (!pair.first.empty()) { - *pair.second = 1; - } - } - break; - } - case DIMENSION: { - std::vector, int *>> data = {{b, &dim_b}, {h, &dim_h}, {s, &dim_s}}; - for (auto &pair : data) { - if (!pair.first.empty()) { - *pair.second = 1; - } - } - break; - } - default: - break; - } - outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - } - void execute(vector> outputs, vector> inputs, vector args) override { - Chl axis = (Chl)args[0]; - int b_size = args[1]; - int h_size = args[2]; - int s_size = args[3]; - int d_size = args[4]; - vector b = {}; - vector h = {}; - vector s = {}; - vector d = {}; - for (int i = 0; i < b_size; i++) { - b.push_back(args[5 + i]); - } - for (int i = 0; i < h_size; i++) { - h.push_back(args[5 + b_size + i]); - } - for (int i = 0; i < s_size; i++) { - s.push_back(args[5 + b_size + h_size + i]); - } - for (int i = 0; i < d_size; i++) { - d.push_back(args[5 + b_size + h_size + s_size + i]); - } - if (axis == BATCH) { - if (!s.empty()) { - for (int i = 0; i < s.size(); ++i) { - auto seq_idx = s[i]; - memcpy(outputs[0]->hostPtr() + outputs[0]->offset(i, 0, 0, 0), - inputs[0]->hostPtr() + inputs[0]->offset(i, 0, seq_idx, 0), - inputs[0]->head() * 1 * inputs[0]->dimension() * sizeof(float)); - } - } - } else { - std::cout << "[TODO]Tensor.CLip not support!!!!" << std::endl; - } - } -}; - -class CPUcliptensorFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - Chl dim = (Chl)args[0]; - if (dim == SEQUENCE) { - int new_seq = inputs[1]->dimension(); - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), new_seq, inputs[0]->dimension()); - } else if (dim == DIMENSION) { - int new_seq = inputs[1]->dimension(); - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), new_seq); - } else { - std::cout << "[TODO]Tensor.CLip not support!!!!" << std::endl; - } - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - } - void execute(vector> outputs, vector> inputs, vector args) override { - Chl dim = (Chl)args[0]; - if (dim == SEQUENCE) { - int new_seq = inputs[1]->dimension(); - if (outputs[0]->sequence() == 0 || outputs[0]->shape().empty() - || new_seq != outputs[0]->sequence()) { - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), new_seq, inputs[0]->dimension()); - outputs[0]->alloc(); - } -#pragma omp parallel for collapse(2) num_threads(CPUBackend::cpu_threads) - for (int b = 0; b < inputs[0]->batch(); ++b) { - for (int s = 0; s < inputs[1]->dimension(); ++s) { - auto selected_idx = (int)inputs[1]->dataAt(0, 0, 0, s); - memcpy(outputs[0]->ptrAt(b, 0, s, 0), - inputs[0]->ptrAt(b, 0, selected_idx, 0), - inputs[0]->head() * inputs[0]->dimension() * sizeof(float)); - } - } - } else if (dim == DIMENSION) { - int new_seq = inputs[1]->dimension(); - if (outputs[0]->sequence() == 0 || outputs[0]->shape().empty() - || new_seq != outputs[0]->sequence()) { - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), new_seq); - outputs[0]->alloc(); - } -#pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) - for (int b = 0; b < inputs[0]->batch(); ++b) { - for (int s = 0; s < inputs[0]->sequence(); ++s) { - for (int d = 0; d < inputs[1]->dimension(); ++d) { - auto selected_idx = (int)inputs[1]->dataAt(0, 0, 0, d); - outputs[0]->setDataAt(b, 0, s, d, - inputs[0]->dataAt(b, 0, s, selected_idx)); - } - } - } - } else { - std::cout << "[TODO]Tensor.CLip not support!!!!" << std::endl; - } - } -}; -} // namespace mllm -#endif // CPUCLIPFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUExpandFunc.hpp b/src/backends/cpu/function/CPUExpandFunc.hpp deleted file mode 100644 index 3a86bf953..000000000 --- a/src/backends/cpu/function/CPUExpandFunc.hpp +++ /dev/null @@ -1,80 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-26. -// - -#ifndef CPUEXPANDFUNC_HPP -#define CPUEXPANDFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" -#include "CPUBackend.hpp" - -namespace mllm { -class Tensor; - -class CPUexpandFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - int b = (int)args[0]; - int h = (int)args[1]; - int s = (int)args[2]; - int d = (int)args[3]; - assert(b * h * d * s < 0); - int dim_b = inputs[0]->batch(); - int dim_h = inputs[0]->head(); - int dim_s = inputs[0]->sequence(); - int dim_d = inputs[0]->dimension(); - if (b != -1) { - assert(dim_b == 1); - dim_b = b; - } else if (s != -1) { - assert(dim_s == 1); - dim_s = s; - } else if (h != -1) { - assert(dim_h == 1); - dim_h = h; - } else if (d != -1) { - assert(dim_d == 1); - dim_d = d; - } - outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); - outputs[0]->alloc(); - } - void execute(vector> outputs, vector> inputs, vector args) override { - int b = (int)args[0]; - int h = (int)args[1]; - int s = (int)args[2]; - int d = (int)args[3]; - int dim_b = inputs[0]->batch(); - int dim_s = inputs[0]->sequence(); - int dim_h = inputs[0]->head(); - int dim_d = inputs[0]->dimension(); - if (b != -1) { - std::cerr << "expand tp support" << std::endl; - } else if (s != -1) { -#pragma omp parallel for collapse(2) num_threads(CPUBackend::cpu_threads) - for (int b_ = 0; b_ < dim_b; ++b_) { - for (int s_ = 0; s_ < s; ++s_) { - memcpy(outputs[0]->ptrAt(b_, 0, s_, 0), - inputs[0]->ptrAt(b_, 0, 0, 0), - dim_d * dim_h * sizeof(float)); - } - } - } else if (h != -1) { -#pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) - for (int b_ = 0; b_ < dim_b; ++b_) { - for (int s_ = 0; s_ < dim_s; ++s_) { - for (int h_ = 0; h_ < h; ++h_) { - memcpy(outputs[0]->ptrAt(b_, h_, s_, 0), - inputs[0]->ptrAt(b_, h_, 0, 0), - dim_d * sizeof(float)); - } - } - } - } else if (d != -1) { - std::cerr << "expand tp support" << std::endl; - } - } -}; - -} // namespace mllm -#endif // CPUEXPANDFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUFlattenFunc.hpp b/src/backends/cpu/function/CPUFlattenFunc.hpp deleted file mode 100644 index 900eecff8..000000000 --- a/src/backends/cpu/function/CPUFlattenFunc.hpp +++ /dev/null @@ -1,128 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-26. -// - -#ifndef CPUFLATTENFUNC_HPP -#define CPUFLATTENFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" -#include "Module.hpp" - -namespace mllm { -class Tensor; - -class CPUflattenFunction : public TensorFunction { -public: - void setUp(vector> outputs, vector> inputs, vector args) override { - // inputs[0]->shallowCopyFrom(outputs[0].get(), false); - Chl axis_start = (Chl)args[0]; - Chl axis_end = (Chl)args[1]; - if ((axis_start == TIME & axis_end == WIDTH && inputs[0]->ctype() == BCTHW) - || (axis_start == CHANNLE & axis_end == HEIGHT && inputs[0]->ctype() == BWCTH) - || (axis_start == HEIGHT & axis_end == CHANNLE && inputs[0]->ctype() == BTHWC) - || (axis_start == BATCH & axis_end == SEQUENCE && inputs[0]->ctype() != BCTHW) - || (axis_start == HEAD & axis_end == SEQUENCE && inputs[0]->ctype() == BSHD) - || (axis_start == HEAD & axis_end == SEQUENCE && inputs[0]->ctype() == BHDS) - || (axis_start == HEAD & axis_end == SEQUENCE && inputs[0]->ctype() == BDHS) - || (axis_start == HEAD & axis_end == DIMENSION && inputs[0]->ctype() == BSHD) - || (axis_start == HEAD & axis_end == DIMENSION && inputs[0]->ctype() == BHDS) - || (axis_start == HEAD & axis_end == SEQUENCE && inputs[0]->ctype() == BDSH)) { - if (inputs[0]->masterTensor() == nullptr) { - inputs[0]->free(); - } - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); - } else if (Module::llm_model_ptr->op_transposed_flag) { - if (inputs[0]->masterTensor() == nullptr) { - inputs[0]->free(); - } - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); - } else { - std::cout << "[TODO]Tensor.Flatten not support!!!!" << std::endl; - } - } - - void reshape(vector> outputs, vector> inputs, vector args) override { - Chl axis_start = (Chl)args[0]; - Chl axis_end = (Chl)args[1]; - int dim_b = inputs[0]->batch(); - int dim_h = 0; - int dim_s = 0; - int dim_d = 0; - if (inputs[0]->shape().size() == 4) { - dim_h = inputs[0]->head(); - dim_s = inputs[0]->sequence(); - dim_d = inputs[0]->dimension(); - if (axis_start == BATCH & axis_end == SEQUENCE) { - dim_b = 1; - dim_s = inputs[0]->sequence() * inputs[0]->batch(); - } else if (axis_start == HEAD & axis_end == SEQUENCE) { - dim_h = 1; - dim_s = inputs[0]->sequence() * inputs[0]->head(); - } else if (axis_start == HEAD & axis_end == DIMENSION) { - dim_h = 1; - dim_d = inputs[0]->dimension() * inputs[0]->head(); - } else { - std::cout << "ERROR: flatten " << axis_start << "&" << axis_end << std::endl; - } - } else if (inputs[0]->shape().size() == 5) { - if (axis_start == CHANNLE & axis_end == HEIGHT) { - dim_h = 1; - dim_s = inputs[0]->channel() * inputs[0]->height() * inputs[0]->time(); - dim_d = inputs[0]->width(); - } else if (axis_start == HEIGHT & axis_end == CHANNLE) { - dim_h = 1; - dim_s = inputs[0]->channel() * inputs[0]->height() * inputs[0]->width(); - dim_d = inputs[0]->time(); - } - } - assert(dim_d + dim_s + dim_h > 0); - if (inputs[0]->ctype() == BCTHW) { // TODOTMPA - outputs[0]->chls()[BATCH] = 0; - outputs[0]->chls()[SEQUENCE] = 1; - outputs[0]->chls()[HEAD] = 2; - outputs[0]->chls()[DIMENSION] = 3; - outputs[0]->setCtype(BSHD); - } - outputs[0]->reshape(dim_b, dim_h, dim_s, dim_d); - - /* - if ((axis_start == TIME & axis_end == WIDTH && inputs[0]->ctype() == BCTHW) - || (axis_start == CHANNLE & axis_end == HEIGHT && inputs[0]->ctype() == BWCTH) - || (axis_start == HEIGHT & axis_end == CHANNLE && inputs[0]->ctype() == BTHWC) - || (axis_start == BATCH & axis_end == SEQUENCE && inputs[0]->ctype() != BCTHW) - || (axis_start == HEAD & axis_end == SEQUENCE && inputs[0]->ctype() == BSHD) - || (axis_start == HEAD & axis_end == SEQUENCE && inputs[0]->ctype() == BHDS) - || (axis_start == HEAD & axis_end == SEQUENCE && inputs[0]->ctype() == BDHS) - || (axis_start == HEAD & axis_end == DIMENSION && inputs[0]->ctype() == BSHD) - || (axis_start == HEAD & axis_end == DIMENSION && inputs[0]->ctype() == BHDS) - || (axis_start == HEAD & axis_end == SEQUENCE && inputs[0]->ctype() == BDSH)) { - if (inputs[0]->masterTensor() == nullptr) { - inputs[0]->free(); - } - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); - } else if (Module::llm_model_ptr->op_transposed_flag) { - if (inputs[0]->masterTensor() == nullptr) { - inputs[0]->free(); - } - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); - return; - } else { - std::cout << "[TODO]Tensor.Flatten not support!!!!" << std::endl; - } - */ - } - - void execute(vector> outputs, vector> inputs, vector args) override { - } -}; - -} // namespace mllm -#endif // CPUFLATTENFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPULikeFunc.hpp b/src/backends/cpu/function/CPULikeFunc.hpp deleted file mode 100644 index 25d4af51d..000000000 --- a/src/backends/cpu/function/CPULikeFunc.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// -// Created by Rongjie Yi on 24-12-16. -// - -#ifndef CPULIKEFUNC_HPP -#define CPULIKEFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" -namespace mllm { -class Tensor; - -class CPUlikeFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - float like_value = args[0]; - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); - outputs[0]->setDtype(inputs[0]->dtype()); // like_values - outputs[0]->alloc(); - memset(outputs[0]->hostPtr(), like_value, outputs[0]->count() * sizeof(float)); - } - void execute(vector> outputs, vector> inputs, vector args) override { - float like_value = args[0]; - memset(outputs[0]->hostPtr(), like_value, outputs[0]->count() * sizeof(float)); - } -}; -} // namespace mllm -#endif // CPULIKEFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUMatmulFunc.hpp b/src/backends/cpu/function/CPUMatmulFunc.hpp deleted file mode 100644 index 647d20b9e..000000000 --- a/src/backends/cpu/function/CPUMatmulFunc.hpp +++ /dev/null @@ -1,90 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-26. -// - -#ifndef CPUMATMULFUNC_HPP -#define CPUMATMULFUNC_HPP -#include "CPUBackend.hpp" -#include "Tensor.hpp" -#include "Types.hpp" -#include "../compute/Matmul.hpp" -#include - -namespace mllm { -class Tensor; - -class CPUmmFunction : public TensorFunction { - static void tranTensorChl(Tensor &input) { - assert(input.ctype() == BSHD); - auto b = input.batch(); - auto h = input.head(); - auto d = input.dimension(); - auto s = input.sequence(); - auto ori_seq_idx = input.chls()[SEQUENCE]; - auto ori_head_idx = input.chls()[HEAD]; - auto ori_dim_idx = input.chls()[DIMENSION]; - input.chls()[HEAD] = ori_seq_idx; - input.chls()[DIMENSION] = ori_head_idx; - input.chls()[SEQUENCE] = ori_dim_idx; - input.changeCtype(); - input.reshape(b, h, s, d); - input.transed() = true; - input.undiffusion() = false; - // if no TENSOR_STATIC_SHAPED - if (input.masterTensor() != nullptr) { - auto b = input.masterTensor()->batch(); - auto h = input.masterTensor()->head(); - auto d = input.masterTensor()->dimension(); - auto s = input.masterTensor()->sequence(); - input.masterTensor()->chls() = input.chls(); - input.masterTensor()->changeCtype(); - input.masterTensor()->reshape(b, h, s, d); - for (auto child : input.masterTensor()->childTensors()) { - auto b = child->batch(); - auto h = child->head(); - auto d = child->dimension(); - auto s = child->sequence(); - child->chls() = input.chls(); - child->changeCtype(); - child->reshape(b, h, s, d); - } - } else { - for (auto child : input.childTensors()) { - auto b = child->batch(); - auto h = child->head(); - auto d = child->dimension(); - auto s = child->sequence(); - child->chls() = input.chls(); - child->changeCtype(); - child->reshape(b, h, s, d); - } - } - } - -public: - void setUp(vector> outputs, vector> inputs, vector args) override { - if (inputs[1]->chls()[SEQUENCE] != 3) { - tranTensorChl(*inputs[1]); - } - if (!inputs[1]->shape().empty() && !inputs[0]->shape().empty()) { - assert(inputs[0]->dimension() == inputs[1]->sequence()); - } - } - void reshape(vector> outputs, vector> inputs, vector args) override { - if (inputs[1]->chls()[SEQUENCE] != 3) { - tranTensorChl(*inputs[1]); - assert(inputs[1]->chls()[SEQUENCE] == 3); - } - assert(inputs[0]->dimension() == inputs[1]->sequence()); - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[1]->dimension()); - // outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - } - void execute(vector> outputs, vector> inputs, vector args) override { - bool isSame = std::equal(inputs[0]->chls().begin(), inputs[0]->chls().end(), inputs[1]->chls().begin()); - assert(inputs[0]->dtype() == MLLM_TYPE_F32); - mat_mul(inputs[0].get(), inputs[1].get(), outputs[0].get(), false, nullptr, false, isSame, CPUBackend::cpu_threads); - } -}; -} // namespace mllm -#endif // CPUMATMULFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUNormFunc.hpp b/src/backends/cpu/function/CPUNormFunc.hpp deleted file mode 100644 index 7a97b1b1d..000000000 --- a/src/backends/cpu/function/CPUNormFunc.hpp +++ /dev/null @@ -1,53 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-26. -// - -#ifndef CPUNORMFUNC_HPP -#define CPUNORMFUNC_HPP -#include "CPUBackend.hpp" -#include "Tensor.hpp" -#include "Types.hpp" - -namespace mllm { -class Tensor; - -class CPUnormFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - int L_n = (int)args[0]; - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - } - void execute(vector> outputs, vector> inputs, vector args) override { - int L_n = (int)args[0]; - for (int h = 0; h < inputs[0]->head(); h++) { - for (int n = 0; n < inputs[0]->batch(); n++) { - for (int s = 0; s < inputs[0]->sequence(); s++) { - if (L_n == 2) { - float sum_of_squares = 0.0f; - for (int d = 0; d < inputs[0]->dimension(); ++d) { - sum_of_squares += inputs[0]->dataAt(n, h, s, d) * inputs[0]->dataAt(n, h, s, d); - } - float l2_norm = std::sqrt(sum_of_squares); -#pragma omp parallel for num_threads(CPUBackend::cpu_threads) - for (int d = 0; d < inputs[0]->dimension(); d++) { - outputs[0]->setDataAt(n, h, s, d, l2_norm); - } - } else { - float sum_of_abs_values = 0.0f; - for (int d = 0; d < inputs[0]->dimension(); ++d) { - sum_of_abs_values += std::abs(inputs[0]->dataAt(n, h, s, d)); - } -#pragma omp parallel for num_threads(CPUBackend::cpu_threads) - for (int d = 0; d < inputs[0]->dimension(); d++) { - outputs[0]->setDataAt(n, h, s, d, sum_of_abs_values); - } - } - } - } - } - } -}; -} // namespace mllm -#endif // CPUNORMFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUPhi3VhdmergeFunc.hpp b/src/backends/cpu/function/CPUPhi3VhdmergeFunc.hpp deleted file mode 100644 index f27370bd5..000000000 --- a/src/backends/cpu/function/CPUPhi3VhdmergeFunc.hpp +++ /dev/null @@ -1,78 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-26. -// - -#ifndef CPUPHI3VHDMERGEEFUNC_HPP -#define CPUPHI3VHDMERGEEFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" -#include "CPUBackend.hpp" - -namespace mllm { -class Tensor; - -class CPUPhi3VhdmergeFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - assert(args.size() == 2); - int h_crop = (int)args[0]; - int w_crop = (int)args[1]; - int N = inputs[0]->batch(); - int L = inputs[0]->sequence(); - int C = inputs[0]->dimension(); - assert(L == 24 * 24); - assert(C == 1024); - assert(N % (h_crop * w_crop) == 0); - int num_images = N / (h_crop * w_crop); - int H = static_cast(std::sqrt(L)); - - int b = num_images; - int s = h_crop * H / 2; - int h = w_crop * H / 2; - int d = 4 * C; - - outputs[0]->reshape(b, h, s, d); - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - } - void execute(vector> outputs, vector> inputs, vector args) override { - int h_crop = (int)args[0]; - int w_crop = (int)args[1]; - int N = inputs[0]->batch(); - int L = inputs[0]->sequence(); - int C = inputs[0]->dimension(); - int num_images = N / (h_crop * w_crop); - int H = static_cast(std::sqrt(L)); - - int b = num_images; - int s = h_crop * H / 2; - int h = w_crop * H / 2; - int d = 4 * C; - -#pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) - for (int ob = 0; ob < b; ob++) { - for (int os = 0; os < s; os++) { - for (int oh = 0; oh < h; oh++) { - int base_s = int(oh / 12) * (24 * 24) + os * 48 + 2 * (oh % 12); - int hed = base_s % L; - int btch = int(base_s / L); - auto i_ptr_0 = inputs[0]->ptrAt(btch, hed, 0, 0); - auto i_ptr_1 = inputs[0]->ptrAt(btch, hed + 1, 0, 0); - auto i_ptr_2 = inputs[0]->ptrAt(btch, hed + 24, 0, 0); - auto i_ptr_3 = inputs[0]->ptrAt(btch, hed + 25, 0, 0); - memcpy(outputs[0]->ptrAt(ob, oh, os, 0), - i_ptr_0, C * sizeof(float)); - memcpy(outputs[0]->ptrAt(ob, oh, os, C), - i_ptr_1, C * sizeof(float)); - memcpy(outputs[0]->ptrAt(ob, oh, os, C * 2), - i_ptr_2, C * sizeof(float)); - memcpy(outputs[0]->ptrAt(ob, oh, os, C * 3), - i_ptr_3, C * sizeof(float)); - } - } - } - } -}; - -} // namespace mllm -#endif // CPUPHI3VHDMERGEEFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPURangeFunc.hpp b/src/backends/cpu/function/CPURangeFunc.hpp deleted file mode 100644 index fa0bc7f84..000000000 --- a/src/backends/cpu/function/CPURangeFunc.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-26. -// - -#ifndef CPURANGEFUNC_HPP -#define CPURANGEFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" -#include "CPUBackend.hpp" - -namespace mllm { -class Tensor; - -class CPURangeFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - int start = (int)args[0]; - int end = (int)args[1]; - outputs[0]->reshape(1, 1, end - start, 1); - outputs[0]->setDtype(MLLM_TYPE_F32); - outputs[0]->alloc(); - } - void execute(vector> outputs, vector> inputs, vector args) override { - int start = (int)args[0]; - int end = (int)args[1]; -#pragma omp parallel for collapse(1) num_threads(CPUBackend::cpu_threads) - for (int i = 0; i < end - start; ++i) { - outputs[0]->setDataAt(0, 0, i + start, 0, (float)i); - } - } -}; - -} // namespace mllm -#endif // CPURANGEFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPURepeatFunc.hpp b/src/backends/cpu/function/CPURepeatFunc.hpp deleted file mode 100644 index 319d66a0a..000000000 --- a/src/backends/cpu/function/CPURepeatFunc.hpp +++ /dev/null @@ -1,87 +0,0 @@ -// -// Created by Rongjie Yi on 24-12-16. -// - -#ifndef CPUREPEATEFUNC_HPP -#define CPUREPEATEFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" -#include -#include -#include "CPUBackend.hpp" - -namespace mllm { -class Tensor; - -class CPUrepeatFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - assert(args.size() == 2); - Chl dim = (Chl)args[0]; - int size = (int)args[1]; - int batch = inputs[0]->batch(); - int head = inputs[0]->head(); - int sequence = inputs[0]->sequence(); - int dimension = inputs[0]->dimension(); - switch (dim) { - case Chl::BATCH: { - batch = size; - break; - } - case Chl::HEAD: { - head = size; - break; - } - case Chl::SEQUENCE: { - sequence = size; - break; - } - case Chl::DIMENSION: { - dimension = size; - break; - } - default: - break; - } - outputs[0]->reshape(batch, head, sequence, dimension); - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - } - void execute(vector> outputs, vector> inputs, vector args) override { - assert(args.size() == 2); - Chl dim = (Chl)args[0]; - int size = (int)args[1]; - switch (dim) { - case Chl::BATCH: { - std::cerr << "Repeat Not implemented" << std::endl; - break; - } - case Chl::HEAD: { - std::cerr << "Repeat Not implemented" << std::endl; - break; - } - case Chl::SEQUENCE: { - std::cerr << "Repeat Not implemented" << std::endl; - break; - } - case Chl::DIMENSION: { -#pragma omp parallel for collapse(4) num_threads(CPUBackend::cpu_threads) - for (int b = 0; b < inputs[0]->batch(); b++) { - for (int s = 0; s < inputs[0]->sequence(); s++) { - for (int h = 0; h < inputs[0]->head(); h++) { - for (int d = 0; d < size; d++) { - float data = inputs[0]->dataAt(b, h, s, 0); - outputs[0]->setDataAt(b, h, s, d, data); - } - } - } - } - break; - } - default: - break; - } - } -}; -} // namespace mllm -#endif // CPUREPEATEFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUScatterReduceFunc.hpp b/src/backends/cpu/function/CPUScatterReduceFunc.hpp deleted file mode 100644 index 1b04e8136..000000000 --- a/src/backends/cpu/function/CPUScatterReduceFunc.hpp +++ /dev/null @@ -1,48 +0,0 @@ -// -// Created by Rongjie Yi on 24-12-26. -// - -#ifndef CPUSCATTERREDUCEFUNC_HPP -#define CPUSCATTERREDUCEFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" -// #include "CPUBackend.hpp" -#include "../compute/Arithmetic.hpp" - -namespace mllm { -class Tensor; - -class CPUScatterReduceFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - } - void execute(vector> outputs, vector> inputs, vector args) override { - if (inputs[1]->batch() == 0) { - return; - } - assert(inputs.size() == 3); - assert(inputs[0]->batch() == 1); - assert(inputs[0]->head() == 1); - auto dest_input = inputs[0]; - auto src_input = inputs[1]; - auto replace_idx = inputs[2]; - assert(replace_idx->batch() == 1); - assert(replace_idx->sequence() == 1); - assert(replace_idx->head() == 1); - // #pragma omp parallel for num_threads(CPUBackend::cpu_threads) - for (int r_idx = 0; r_idx < replace_idx->dimension(); r_idx++) { - auto replace_seq = (int)replace_idx->dataAt(0, 0, 0, r_idx); - auto dst_ptr = dest_input->ptrAt(0, 0, replace_seq, 0); - auto src_ptr = src_input->ptrAt(0, 0, r_idx, 0); - // memcpy(dst_ptr, src_ptr, sizeof(float) * src_input->dimension()); - float tmp[src_input->dimension()]; - memcpy(tmp, dst_ptr, sizeof(float) * dest_input->dimension()); - mllm_add_fp32(tmp, - src_ptr, - dst_ptr, dest_input->dimension()); - } - } -}; - -} // namespace mllm -#endif // CPUSCATTERREDUCEFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUSplitFunc.hpp b/src/backends/cpu/function/CPUSplitFunc.hpp deleted file mode 100644 index 7b7ac0131..000000000 --- a/src/backends/cpu/function/CPUSplitFunc.hpp +++ /dev/null @@ -1,161 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-26. -// - -#ifndef CPUSPLITFUNC_HPP -#define CPUSPLITFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" - -namespace mllm { -class Tensor; - -class CPUsplitFunction : public TensorFunction { -public: - void setUp(vector> outputs, vector> inputs, vector args) override { - // inputs[0]->shallowCopyFrom(outputs[0], false); - int size = args.size(); - std::vector each_dims; - for (int i = 0; i < size - 2; i++) { - each_dims.push_back(args[i]); - } - Chl split_dim = (Chl)args[size - 2]; - int head_size = (int)args[size - 1]; - int split_num_ = each_dims.size(); - // store each dims - int split_dim_size_ = 0; - std::vector each_dims_; - for (size_t i = 0; i < each_dims.size(); ++i) { - each_dims_.push_back((float)each_dims[i]); - split_dim_size_ += each_dims[i]; - } - assert(split_num_ == outputs.size()); - switch (split_dim) { - case Chl::HEAD: { - // assert(inputs[0]->head() == split_dim_size_); - for (int i = 0; i < split_num_; i++) { - outputs[i]->reshape(inputs[0]->batch(), each_dims_[i], inputs[0]->sequence(), inputs[0]->dimension()); - } - break; - } - case Chl::SEQUENCE: { - // assert(inputs[0]->sequence() == split_dim_size_); - for (int i = 0; i < split_num_; i++) { - outputs[i]->reshape(inputs[0]->batch(), inputs[0]->head(), each_dims_[i], inputs[0]->dimension()); - } - break; - } - case Chl::DIMENSION: { - // assert(inputs[0]->dimension() == split_dim_size_); - for (int i = 0; i < split_num_; i++) { - outputs[i]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), each_dims_[i]); - } - break; - } - case Chl::D_HD: { - // assert(inputs[0]->dimension() == split_dim_size_ * head_size); - for (int i = 0; i < split_num_; i++) { - outputs[i]->reshape(inputs[0]->batch(), head_size, inputs[0]->sequence(), each_dims_[i]); - } - break; - } - case Chl::HD: { - // assert(inputs[0]->dimension() == split_dim_size_ * head_size); - for (int i = 0; i < split_num_; i++) { - outputs[i]->reshape(inputs[0]->batch(), head_size, inputs[0]->sequence(), each_dims_[i]); - } - break; - } - default: { - break; - } - } - vector> shared_outputs = {}; - for (const auto &output : outputs) { - output->alloc(); - shared_outputs.push_back(output); - } - if (inputs[0]->masterTensor() == nullptr && !inputs[0]->childTensors().empty()) { - inputs[0]->free(); - } - inputs[0]->addTensors(shared_outputs, split_dim); - } - void reshape(vector> outputs, vector> inputs, vector args) override { - // auto aggregatedTensorsSize = inputs[0]->aggregatedTensors().size(); - // for (int i = 0; i < aggregatedTensorsSize; i++) { - // outputs[i] = inputs[0]->aggregatedTensors()[i].get(); - // } - - // int size = args.size(); - // std::vector each_dims; - // for (int i = 0; i < size - 2; i++) { - // each_dims.push_back(args[i]); - // } - // Chl split_dim = (Chl)args[size - 2]; - // int head_size = (int)args[size - 1]; - // int split_num_ = each_dims.size(); - // // store each dims - // int split_dim_size_ = 0; - // std::vector each_dims_; - // for (size_t i = 0; i < each_dims.size(); ++i) { - // each_dims_.push_back((float)each_dims[i]); - // split_dim_size_ += each_dims[i]; - // } - // assert(split_num_ == outputs.size()); - // return; - // switch (split_dim) { - // case Chl::HEAD: { - // assert(inputs[0]->head() == split_dim_size_); - // for (int i = 0; i < split_num_; i++) { - // outputs[i]->reshape(inputs[0]->batch(), each_dims_[i], inputs[0]->sequence(), inputs[0]->dimension()); - // } - // break; - // } - // case Chl::SEQUENCE: { - // assert(inputs[0]->sequence() == split_dim_size_); - // for (int i = 0; i < split_num_; i++) { - // outputs[i]->reshape(inputs[0]->batch(), inputs[0]->head(), each_dims_[i], inputs[0]->dimension()); - // } - // break; - // } - // case Chl::DIMENSION: { - // assert(inputs[0]->dimension() == split_dim_size_); - // for (int i = 0; i < split_num_; i++) { - // outputs[i]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), each_dims_[i]); - // } - // break; - // } - // case Chl::D_HD: { - // assert(inputs[0]->dimension() == split_dim_size_ * head_size); - // for (int i = 0; i < split_num_; i++) { - // outputs[i]->reshape(inputs[0]->batch(), head_size, inputs[0]->sequence(), each_dims_[i]); - // } - // break; - // } - // case Chl::HD: { - // assert(inputs[0]->dimension() == split_dim_size_ * head_size); - // for (int i = 0; i < split_num_; i++) { - // outputs[i]->reshape(inputs[0]->batch(), head_size, inputs[0]->sequence(), each_dims_[i]); - // } - // break; - // } - // default: { - // break; - // } - // } - // vector> shared_outputs = {}; - // for (const auto &output : outputs) { - // output->alloc(); - // shared_outputs.push_back(std::shared_ptr(output, [](Tensor *) {})); - // } - // if (inputs[0]->masterTensor() == nullptr && !inputs[0]->childTensors().empty()) { - // inputs[0]->free(); - // } - // inputs[0]->addTensors(shared_outputs, split_dim); - } - void execute(vector> outputs, vector> inputs, vector args) override { - } -}; - -} // namespace mllm -#endif // CPUSPLITFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUTopkFunc.hpp b/src/backends/cpu/function/CPUTopkFunc.hpp deleted file mode 100644 index 0e01ace3c..000000000 --- a/src/backends/cpu/function/CPUTopkFunc.hpp +++ /dev/null @@ -1,60 +0,0 @@ -// -// Created by Rongjie Yi on 24-12-16. -// - -#ifndef CPUTOPKFUNC_HPP -#define CPUTOPKFUNC_HPP -#include "CPUBackend.hpp" -#include "Tensor.hpp" -#include "Types.hpp" -#include -#include - -namespace mllm { -class Tensor; - -class CPUtopkFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - assert(args.size() == 2); - int k = (int)args[0]; - Chl dim = (Chl)args[1]; - if (dim == DIMENSION) { - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), k); - outputs[1]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), k); - } - outputs[0]->setDtype(inputs[0]->dtype()); // topk_values - outputs[0]->alloc(); - outputs[1]->setDtype(inputs[0]->dtype()); // topk_indices - outputs[1]->alloc(); - } - void execute(vector> outputs, vector> inputs, vector args) override { - int k = (int)args[0]; - Chl dim = (Chl)args[1]; - if (dim == DIMENSION) { -#pragma omp parallel for collapse(3) num_threads(CPUBackend::cpu_threads) - for (int n = 0; n < inputs[0]->batch(); n++) { - for (int h = 0; h < inputs[0]->head(); h++) { - for (int s = 0; s < inputs[0]->sequence(); s++) { - std::priority_queue, std::vector>, std::greater<>> topk_value_indices; - for (int d = 0; d < inputs[0]->dimension(); ++d) { - float value = inputs[0]->dataAt(n, h, s, d); - topk_value_indices.push({value, d}); - if (topk_value_indices.size() > k) { - topk_value_indices.pop(); - } - } - for (int d = k - 1; d >= 0; --d) { - auto top = topk_value_indices.top(); - topk_value_indices.pop(); - outputs[0]->setDataAt(n, h, s, d, top.first); - outputs[1]->setDataAt(n, h, s, d, top.second); - } - } - } - } - } - } -}; -} // namespace mllm -#endif // CPUTOPKFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUTransposeFunc.hpp b/src/backends/cpu/function/CPUTransposeFunc.hpp deleted file mode 100644 index 0906a5f66..000000000 --- a/src/backends/cpu/function/CPUTransposeFunc.hpp +++ /dev/null @@ -1,90 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-26. -// - -#ifndef CPUTRANSPOSEFUNC_HPP -#define CPUTRANSPOSEFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" -#include "Module.hpp" -#include - -namespace mllm { -class Tensor; - -class CPUtransposeFunction : public TensorFunction { -public: - void setUp(vector> outputs, vector> inputs, vector args) override { - vector> axiss; - for (int i = 0; i < args.size(); i += 2) { - axiss.push_back({(Chl)args[i], (Chl)args[i + 1]}); - } - if (!outputs[0]->undiffusion()) { - outputs[0]->transCopyShape(inputs[0]->shape()); - std::map origin_chls = {{BATCH, 0}, {SEQUENCE, 1}, {HEAD, 2}, {DIMENSION, 3}, {CHANNLE, 1}, {TIME, 2}, {HEIGHT, 3}, {WIDTH, 4}}; - if (std::equal(outputs[0]->chls().begin(), outputs[0]->chls().end(), origin_chls.begin())) { - outputs[0]->chls() = inputs[0]->chls(); - for (auto axis : axiss) { - auto axis0 = axis.first; - auto axis1 = axis.second; - auto ori_0_idx = outputs[0]->chls()[axis0]; - auto ori_1_idx = outputs[0]->chls()[axis1]; - outputs[0]->chls()[axis0] = ori_1_idx; - outputs[0]->chls()[axis1] = ori_0_idx; - } - outputs[0]->changeCtype(inputs[0]->shape().size()); - outputs[0]->undiffusion() = true; - } - } - if (inputs[0]->masterTensor() != nullptr && (inputs[0]->masterTensor()->name().find("Cache") != std::string::npos || inputs[0]->masterTensor()->name().find("weight") != std::string::npos)) { - if (outputs[0]->masterTensor() == nullptr) { - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->shallowCopyFrom(inputs[0].get(), false); - } - } else { - if (inputs[0]->masterTensor() == nullptr) { - inputs[0]->free(); - } - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - inputs[0]->setUndiffusion(true); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); - outputs[0]->transFrom() = axiss; - } - } - void reshape(vector> outputs, vector> inputs, vector args) override { - vector> axiss; - for (int i = 0; i < args.size(); i += 2) { - axiss.push_back({(Chl)args[i], (Chl)args[i + 1]}); - } - std::map origin_chls = {{BATCH, 0}, {SEQUENCE, 1}, {HEAD, 2}, {DIMENSION, 3}, {CHANNLE, 1}, {TIME, 2}, {HEIGHT, 3}, {WIDTH, 4}}; - auto origin_s = inputs[0]->shape().size(); - outputs[0]->transCopyShape(inputs[0]->shape()); - if (inputs[0]->masterTensor() == nullptr - || std::equal(outputs[0]->chls().begin(), outputs[0]->chls().end(), origin_chls.begin())) { - outputs[0]->chls() = inputs[0]->chls(); - for (auto axis : axiss) { - auto axis0 = axis.first; - auto axis1 = axis.second; - auto ori_0_idx = outputs[0]->chls()[axis0]; - auto ori_1_idx = outputs[0]->chls()[axis1]; - outputs[0]->chls()[axis0] = ori_1_idx; - outputs[0]->chls()[axis1] = ori_0_idx; - } - outputs[0]->changeCtype(origin_s); - outputs[0]->undiffusion() = true; - } - if (inputs[0]->masterTensor() != nullptr - && (inputs[0]->masterTensor()->name().find("Cache") != std::string::npos || inputs[0]->masterTensor()->name().find("weight") != std::string::npos)) { - // outputs[0]->shallowCopyFrom(inputs[0]->masterTensor(), false, inputs[0]->shapeOffset()); - if (outputs[0]->masterTensor() == nullptr) { - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->shallowCopyFrom(inputs[0].get(), false); - } - } - } - void execute(vector> outputs, vector> inputs, vector args) override { - } -}; -} // namespace mllm -#endif // CPUTRANSPOSEFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/function/CPUWhereFunc.hpp b/src/backends/cpu/function/CPUWhereFunc.hpp deleted file mode 100644 index bb10397b6..000000000 --- a/src/backends/cpu/function/CPUWhereFunc.hpp +++ /dev/null @@ -1,107 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-26. -// - -#ifndef CPUWHEREFUNC_HPP -#define CPUWHEREFUNC_HPP -#include "Tensor.hpp" -#include "Types.hpp" -#include "CPUBackend.hpp" - -namespace mllm { -class Tensor; - -class CPUwhereFunction : public TensorFunction { -public: - void reshape(vector> outputs, vector> inputs, vector args) override { - } - void execute(vector> outputs, vector> inputs, vector args) override { - float value = args[0]; - Chl axis = (Chl)args[1]; - vector b_vec = {}; - vector s_vec = {}; - vector h_vec = {}; - vector d_vec = {}; - if (inputs[0]->count() % CPUBackend::cpu_threads == 0) { -#pragma omp parallel for collapse(4) num_threads(CPUBackend::cpu_threads) - for (int b = 0; b < inputs[0]->batch(); b++) { - for (auto s = 0; s < inputs[0]->sequence(); s++) { - for (auto h = 0; h < inputs[0]->head(); h++) { - for (auto d = 0; d < inputs[0]->dimension(); d++) { - if (inputs[0]->dataAt(b, h, s, d) == value) { - b_vec.push_back(b); - s_vec.push_back(s); - h_vec.push_back(h); - d_vec.push_back(d); - } - } - } - } - } - } else { - for (int b = 0; b < inputs[0]->batch(); b++) { - for (auto s = 0; s < inputs[0]->sequence(); s++) { - for (auto h = 0; h < inputs[0]->head(); h++) { - for (auto d = 0; d < inputs[0]->dimension(); d++) { - if (inputs[0]->dataAt(b, h, s, d) == value) { - b_vec.push_back(b); - s_vec.push_back(s); - h_vec.push_back(h); - d_vec.push_back(d); - } - } - } - } - } - } - int num = b_vec.size(); - if ((int)axis == -1) { - outputs[0]->reshape(1, 1, 4, num); - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - for (int i = 0; i < 4; ++i) { - auto dest_ptr = outputs[0]->hostPtr() + outputs[0]->offset(0, 0, i, 0); - switch (i) { - case 0: - memcpy(dest_ptr, b_vec.data(), num * sizeof(float)); - break; - case 1: - memcpy(dest_ptr, h_vec.data(), num * sizeof(float)); - break; - case 2: - memcpy(dest_ptr, s_vec.data(), num * sizeof(float)); - break; - case 3: - memcpy(dest_ptr, d_vec.data(), num * sizeof(float)); - break; - default: - break; - } - } - } else { - outputs[0]->reshape(1, 1, 1, num); - outputs[0]->setDtype(inputs[0]->dtype()); - outputs[0]->alloc(); - auto dest_ptr = outputs[0]->hostPtr(); - switch (axis) { - case BATCH: - memcpy(dest_ptr, b_vec.data(), num * sizeof(float)); - break; - case HEAD: - memcpy(dest_ptr, h_vec.data(), num * sizeof(float)); - break; - case SEQUENCE: - memcpy(dest_ptr, s_vec.data(), num * sizeof(float)); - break; - case DIMENSION: - memcpy(dest_ptr, d_vec.data(), num * sizeof(float)); - break; - default: - break; - } - } - } -}; - -} // namespace mllm -#endif // CPUWHEREFUNC_HPP \ No newline at end of file diff --git a/src/backends/cpu/op/CPUGather.cpp b/src/backends/cpu/op/CPUGather.cpp deleted file mode 100644 index e79ba9040..000000000 --- a/src/backends/cpu/op/CPUGather.cpp +++ /dev/null @@ -1,60 +0,0 @@ -#include "CPUGather.hpp" -#include - -namespace mllm { - -CPUGather::CPUGather(Backend *bn, string opName, int threadCount) : thread_count(threadCount), - Op(bn, opName) { -} - -ErrorCode CPUGather::reshape(vector> inputs, vector> outputs) { - - assert(inputs.size() == 3); - assert(outputs.size() == 1); - if(inputs[1]->batch() == 0) { - outputs[0]->reshape(inputs[0]->batch(), 1, inputs[0]->sequence(), inputs[0]->dimension()); - return Op::reshape(inputs, outputs); - } - assert(inputs[0]->batch() == inputs[1]->batch()); - assert(inputs[0]->head() == inputs[1]->head()); - assert(inputs[0]->head() == 1); - assert(inputs[0]->dimension() == inputs[1]->dimension()); - assert(inputs[2]->dimension() == 1); - outputs[0]->reshape(inputs[0]->batch(), 1, inputs[0]->sequence(), inputs[0]->dimension()); - return Op::reshape(inputs, outputs); -} - -ErrorCode CPUGather::execute(vector> inputs, vector> outputs) { - if(inputs[1]->batch() == 0) { - return Op::execute(inputs, outputs); - } - - assert(inputs[0]->ctype() == BSHD); - assert(inputs[1]->ctype() == BSHD); - assert(outputs[0]->ctype() == BSHD); - auto input_indices = inputs[2]; - int hiddenSize = inputs[0]->dimension(); - for (int batch = 0; batch < inputs[0]->batch(); ++batch) { - for (int seq = 0; seq < inputs[0]->sequence(); ++seq) { - if(input_indices->dataAt(batch, 0, seq, 0) >= 0) { - memcpy(outputs[0]->hostPtr() + outputs[0]->offset(batch, 0, seq, 0), - inputs[1]->hostPtr() + (int)inputs[1]->offset(batch, 0, input_indices->dataAt(batch, 0, seq, 0), 0), - inputs[1]->dtypeSize() * hiddenSize); - } - } - } - return Op::execute(inputs, outputs); -} - -ErrorCode CPUGather::setUp(vector> inputs, vector> outputs) { - - if(inputs[0]->masterTensor() == nullptr) { - inputs[0]->free(); - } - outputs[0]->setDtype(activation_dtype()); - outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); - return MLLM_NO_ERROR; -} -} // namespace mllm - diff --git a/src/backends/cpu/op/CPUKVCache.cpp b/src/backends/cpu/op/CPUKVCache.cpp deleted file mode 100644 index 17a8a90e0..000000000 --- a/src/backends/cpu/op/CPUKVCache.cpp +++ /dev/null @@ -1,345 +0,0 @@ - - -#include "CPUKVCache.hpp" -#include "ParamLoader.hpp" -#include "Types.hpp" - -int n_pack = 16; -namespace mllm { -CPUKVCache::CPUKVCache(Backend *bn, string opName, int hidden, int head, int n_rep, int cache_max, int threadCount) : - thread_count(threadCount), Op(bn, opName) { - cache_.setBackend(bn); - switch (KVCache_TYPE) { - case 16: { - cache_.setDtype(MLLM_TYPE_F16); - break; - } - case 8: { - if (opName.find("k_cache") != std::string::npos) { - cache_.setDtype(MLLM_TYPE_Q8_0); - n_pack = QK8_0; - } else { - cache_.setDtype(MLLM_TYPE_F16); - } - break; - } - case 32: { - cache_.setDtype(MLLM_TYPE_F32); - break; - } - default: { - cache_.setDtype(MLLM_TYPE_F32); - break; - } - } -// #endif -#ifdef LLAMAFILE_SGEMM - cache_max = ((cache_max + (n_pack - 1)) / n_pack) * n_pack; -#endif - cache_limit_ = cache_max; - n_rep_ = n_rep; - if (head > 0) { - if (for_xnn_) cache_.setDtype(MLLM_TYPE_F32); - - cache_.reshape(1, head * n_rep_, cache_limit_, hidden); - cache_.setName(name() + ".Cache"); - cache_.alloc(); - - switch (cache_.dtype()) { - case MLLM_TYPE_F32: - memset(cache_.hostPtr(), 0, cache_.count() * sizeof(float)); - break; - case MLLM_TYPE_F16: - memset(cache_.hostPtr(), 0, cache_.count() * sizeof(mllm_fp16_t)); - break; - case MLLM_TYPE_Q8_0: - memset((char *)cache_.rawHostPtr(), 0, cache_.count() * sizeof(block_q8_0) / QK8_0); - break; - default: - break; - }; - cache_seq_len_ = 0; - cache_.cache_seq_len_ = cache_seq_len_; - } -} - -ErrorCode CPUKVCache::reshape(vector> inputs, - vector> outputs) { - assert(inputs.size() == 1); - assert(outputs.size() == 1); - if (cache_seq_len_ < 0) { - if (for_xnn_) cache_.setDtype(MLLM_TYPE_F32); - - cache_.reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, cache_limit_, - inputs[0]->dimension()); - cache_.setName(name() + ".Cache"); - cache_.alloc(); - - switch (cache_.dtype()) { - case MLLM_TYPE_F32: - memset(cache_.hostPtr(), 0, cache_.count() * sizeof(float)); - break; - case MLLM_TYPE_F16: - memset(cache_.hostPtr(), 0, cache_.count() * sizeof(mllm_fp16_t)); - break; - case MLLM_TYPE_Q8_0: - memset((char *)cache_.rawHostPtr(), 0, cache_.count() * sizeof(block_q8_0) / QK8_0); - break; - default: - break; - }; - cache_seq_len_ = 0; - cache_.cache_seq_len_ = cache_seq_len_; - } - - // for sd - auto cpuBackend = dynamic_cast(backend_); - if (cpuBackend->isUsingDraft()) { - unsigned int last_draft_length = cpuBackend->getLastDraftLength(); - const std::vector &last_verified_position_ids = cpuBackend->getLastVerifiedPositionIds(); - cache_seq_len_ = cache_seq_len_ - (last_draft_length) + last_verified_position_ids.size(); - cache_.cache_seq_len_ = cache_seq_len_; - } - - int sequence = inputs[0]->sequence() + cache_seq_len_; -#ifdef LLAMAFILE_SGEMM - if (!for_xnn_ && sequence % n_pack != 0) sequence = ((sequence + (n_pack - 1)) / n_pack) * n_pack; -#endif - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, sequence, - inputs[0]->dimension()); - if (sequence > cache_limit_) { - MLLM_LOG_ERROR_STREAM << "\n[ERROR]: Current tokens exceed cache limit: " << sequence << ">" - << cache_limit_ << ";" - << "\n Please set args `--limits` >" << cache_limit_ << std::endl; - - exit(1); - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head() * n_rep_, cache_limit_, - inputs[0]->dimension()); - } - return Op::reshape(inputs, outputs); -} - -ErrorCode CPUKVCache::load(AbstructLoader &loader) { - return Op::load(loader); -} - -ErrorCode CPUKVCache::execute(vector> inputs, - vector> outputs) { - // for sd - auto cpuBackend = dynamic_cast(backend_); - if (cpuBackend->isUsingDraft()) { - const std::vector &last_verified_position_ids = cpuBackend->getLastVerifiedPositionIds(); - if (!last_verified_position_ids.empty()) { - this->updateVerifiedKVCache(last_verified_position_ids); - } - } - - int cache_seq_len_old = cache_seq_len_; - cache_seq_len_ += inputs[0]->sequence(); - cache_.cache_seq_len_ = cache_seq_len_; - if (n_rep_ > 1) { - if (cache_.ctype() == BSHD) { - for (int b = 0; b < cache_.batch(); ++b) { - for (int h = inputs[0]->head() - 1; h >= 0; --h) { -#pragma omp parallel for collapse(2) num_threads(thread_count) - for (int seq = cache_seq_len_old; seq < cache_seq_len_; ++seq) { - for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { - auto cache_head = h * n_rep_ + i_rep; - if (cache_.dtype() == MLLM_TYPE_F32) { - auto src_ptr = - inputs[0]->ptrAt(b, h, seq - cache_seq_len_old, 0); - auto dest_ptr = cache_.ptrAt(b, cache_head, seq, 0); - int copy_size = cache_.dimension(); - memcpy(dest_ptr, src_ptr, copy_size * sizeof(float)); - } else if (cache_.dtype() == MLLM_TYPE_F16) { - auto src_ptr = - inputs[0]->ptrAt(b, h, seq - cache_seq_len_old, 0); - auto dest_ptr = cache_.ptrAt(b, cache_head, seq, 0); - int copy_size = cache_.dimension(); - memcpy(dest_ptr, src_ptr, copy_size * sizeof(mllm_fp16_t)); - } else if (cache_.dtype() == MLLM_TYPE_Q8_0) { - auto src_ptr = - (char *)inputs[0]->rawHostPtr() + inputs[0]->offset(b, h, seq - cache_seq_len_old, 0) * sizeof(block_q8_0) / QK8_0; - auto dest_ptr = (char *)cache_.rawHostPtr() + cache_.offset(b, cache_head, seq, 0) * sizeof(block_q8_0) / QK8_0; - int copy_size = cache_.dimension(); - memcpy(dest_ptr, src_ptr, copy_size * sizeof(block_q8_0) / QK8_0); - } - } - } - } - } - } else if (cache_.ctype() == BHDS) { - for (int b = 0; b < cache_.batch(); ++b) { - for (int h = inputs[0]->head() - 1; h >= 0; --h) { -#pragma omp parallel for collapse(2) num_threads(thread_count) - for (int d = 0; d < inputs[0]->dimension(); ++d) { - for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { - auto cache_head = h * n_rep_ + i_rep; - if (cache_.dtype() == MLLM_TYPE_F32) { - auto src_ptr = inputs[0]->ptrAt(b, h, 0, d); - auto dest_ptr = - cache_.ptrAt(b, cache_head, cache_seq_len_old, d); - int copy_size = cache_seq_len_ - cache_seq_len_old; - memcpy(dest_ptr, src_ptr, copy_size * sizeof(float)); - } else if (cache_.dtype() == MLLM_TYPE_F16) { - auto src_ptr = inputs[0]->ptrAt(b, h, 0, d); - auto dest_ptr = - cache_.ptrAt(b, cache_head, cache_seq_len_old, d); - int copy_size = cache_seq_len_ - cache_seq_len_old; - memcpy(dest_ptr, src_ptr, copy_size * sizeof(mllm_fp16_t)); - } else if (cache_.dtype() == MLLM_TYPE_Q8_0) { - auto src_ptr = - (char *)inputs[0]->rawHostPtr() + inputs[0]->offset(b, h, 0, d) * sizeof(block_q8_0) / QK8_0; - auto dest_ptr = (char *)cache_.rawHostPtr() + cache_.offset(b, cache_head, cache_seq_len_old, d) * sizeof(block_q8_0) / QK8_0; - int copy_size = cache_.dimension(); - memcpy(dest_ptr, src_ptr, copy_size * sizeof(block_q8_0) / QK8_0); - } - } - } - } - } - } else { - std::cout << "ERROR Ctype in KVCcache;" << std::endl; - } - } - return Op::execute(inputs, outputs); -} - -ErrorCode CPUKVCache::free(vector> inputs, vector> outputs) { - return Op::free(inputs, outputs); -} - -ErrorCode CPUKVCache::setUp(vector> inputs, vector> outputs) { - assert(inputs.size() == 1); - assert(outputs.size() == 1); - outputs[0]->setDtype(cache_.dtype()); - outputs[0]->shallowCopyFrom(cache_, false, {0, 0, cache_seq_len_ / cache_limit_, 0}); - if (inputs[0]->sequence() + cache_seq_len_ > cache_limit_) { - outputs[0]->shallowCopyFrom(cache_, false, {0, 0, cache_seq_len_ % cache_limit_ + 1, 0}); - } - if (inputs[0]->masterTensor() == nullptr) { inputs[0]->free(); } - inputs[0]->shallowCopyFrom(cache_, false, {0, 0, cache_seq_len_ % cache_limit_, 0}); - return MLLM_NO_ERROR; -} - -ErrorCode CPUKVCache::updateVerifiedKVCache(const std::vector &verified_position_ids) { - if (cache_.ctype() == BSHD) { - unsigned int dest_pid = cache_seq_len_ - verified_position_ids.size(); - for (unsigned int src_pid : verified_position_ids) { - if (src_pid == dest_pid) { - dest_pid += 1; - continue; - } - // #pragma omp parallel for collapse(1) num_threads(thread_count) - for (int b = 0; b < cache_.batch(); ++b) { - if (cache_.dtype() == MLLM_TYPE_F32) { - auto src_ptr = cache_.ptrAt(b, 0, src_pid, 0); - auto dest_ptr = cache_.ptrAt(b, 0, dest_pid, 0); - int copy_size = cache_.dimension() * cache_.head(); - memcpy(dest_ptr, src_ptr, copy_size * sizeof(float)); - } else if (cache_.dtype() == MLLM_TYPE_F16) { - auto src_ptr = cache_.ptrAt(b, 0, src_pid, 0); - auto dest_ptr = cache_.ptrAt(b, 0, dest_pid, 0); - int copy_size = cache_.dimension() * cache_.head(); - memcpy(dest_ptr, src_ptr, copy_size * sizeof(mllm_fp16_t)); - } else if (cache_.dtype() == MLLM_TYPE_Q8_0) { - // TODO: Q8 Check - auto src_ptr = - (char *)cache_.rawHostPtr() + cache_.offset(b, 0, src_pid, 0) * sizeof(block_q8_0) / QK8_0; - auto dest_ptr = (char *)cache_.rawHostPtr() + cache_.offset(b, 0, dest_pid, 0) * sizeof(block_q8_0) / QK8_0; - int copy_size = cache_.dimension() * cache_.head(); - memcpy(dest_ptr, src_ptr, copy_size * sizeof(block_q8_0) / QK8_0); - } - } - dest_pid += 1; - } - } else if (cache_.ctype() == BHDS) { - unsigned int dest_pid = cache_seq_len_ - verified_position_ids.size(); - for (unsigned int src_pid : verified_position_ids) { - if (src_pid == dest_pid) { - dest_pid += 1; - continue; - } -#pragma omp parallel for collapse(3) num_threads(thread_count) - for (int b = 0; b < cache_.batch(); ++b) { - for (int h = 0; h < cache_.head(); ++h) { - for (int d = 0; d < cache_.dimension(); ++d) { - if (cache_.dtype() == MLLM_TYPE_F32) { - auto src_data = cache_.dataAt(b, h, src_pid, d); - cache_.setDataAt(b, h, dest_pid, d, src_data); - } else if (cache_.dtype() == MLLM_TYPE_F16) { - auto src_data = cache_.dataAt(b, h, src_pid, d); - cache_.setDataAt(b, h, dest_pid, d, src_data); - } else if (cache_.dtype() == MLLM_TYPE_Q8_0) { - // TODO: Q8 Check 不知道q8能不能直接setDataAt - // auto src_data = cache_.dataAt(b, h, src_pid, d); - // cache_.setDataAt(b, h, dest_pid, d, src_data); - auto src_ptr = - (char *)cache_.rawHostPtr() + cache_.offset(b, h, src_pid, d) * sizeof(block_q8_0) / QK8_0; - auto dest_ptr = (char *)cache_.rawHostPtr() + cache_.offset(b, h, dest_pid, d) * sizeof(block_q8_0) / QK8_0; - int copy_size = 1; - memcpy(dest_ptr, src_ptr, copy_size * sizeof(block_q8_0) / QK8_0); - } - } - } - } - dest_pid += 1; - } - } else { - std::cout << "ERROR Ctype in KVCcache;" << std::endl; - } - - // clear kv cache - // if (cache_seq_len_ < cache_seq_len_old) { - // if (n_rep_ > 1) { - // if (cache_.ctype() == BSHD) { - // for (int b = 0; b < cache_.batch(); ++b) { - // for (int h = cache_.head() - 1; h >= 0; --h) { - // // #pragma omp parallel for collapse(2) num_threads(thread_count) - // for (int seq = cache_seq_len_; seq < cache_seq_len_old; ++seq) { - // for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { - // auto cache_head = h * n_rep_ + i_rep; - // if (cache_.dtype() == MLLM_TYPE_F32) { - // auto dest_ptr = cache_.ptrAt(b, cache_head, seq, 0); - // int copy_size = cache_.dimension(); - // memset(dest_ptr, 0, copy_size * sizeof(float)); - // } else if (cache_.dtype() == MLLM_TYPE_F16) { - // auto dest_ptr = cache_.ptrAt(b, cache_head, seq, 0); - // int copy_size = cache_.dimension(); - // memset(dest_ptr, 0, copy_size * sizeof(mllm_fp16_t)); - // } - // } - // } - // } - // } - // } else if (cache_.ctype() == BHDS) { - // for (int b = 0; b < cache_.batch(); ++b) { - // for (int h = cache_.head() - 1; h >= 0; --h) { - // // #pragma omp parallel for collapse(2) num_threads(thread_count) - // for (int d = 0; d < cache_.dimension(); ++d) { - // for (int i_rep = 0; i_rep < n_rep_; ++i_rep) { - // auto cache_head = h * n_rep_ + i_rep; - // if (cache_.dtype() == MLLM_TYPE_F32) { - // auto dest_ptr = - // cache_.ptrAt(b, cache_head, cache_seq_len_, d); - // int copy_size = cache_seq_len_old - cache_seq_len_; - // memset(dest_ptr, 0, copy_size * sizeof(float)); - // } else if (cache_.dtype() == MLLM_TYPE_F16) { - // auto dest_ptr = - // cache_.ptrAt(b, cache_head, cache_seq_len_, d); - // int copy_size = cache_seq_len_old - cache_seq_len_; - // memset(dest_ptr, 0, copy_size * sizeof(mllm_fp16_t)); - // } - // } - // } - // } - // } - // } else { - // std::cout << "ERROR Ctype in KVCcache;" << std::endl; - // } - // } - // } - return MLLM_NO_ERROR; -} - -} // namespace mllm \ No newline at end of file diff --git a/src/backends/cpu/op/CPULinear.cpp b/src/backends/cpu/op/CPULinear.cpp deleted file mode 100644 index 916484f51..000000000 --- a/src/backends/cpu/op/CPULinear.cpp +++ /dev/null @@ -1,164 +0,0 @@ - -#include "CPULinear.hpp" -#include "Types.hpp" -#include -#include - -namespace mllm { - -CPULinear::CPULinear(Backend *bn, string opName, int in_features, int out_features, bool bias, int threadCount) : - thread_count(threadCount), - Op(bn, opName) { - in_features_ = in_features; - out_features_ = out_features; - support_bias_ = bias; - thread_count = threadCount; - weight_.setBackend(bn); - bias_.setBackend(bn); -} - -ErrorCode CPULinear::reshape(vector> inputs, vector> outputs) { - // std::cout << name() << " CPULinear reshape" << std::endl; - assert(inputs.size() == 1); - assert(outputs.size() == 1); - if (inputs[0]->count() == 0 && inputs[0]->sequence() != 0) { - outputs[0]->reshape(0, 0, 0, 0); - return Op::reshape(inputs, outputs); - } - // N | C | H | W - // ----------------------------------------------- - // 1 |out_channel | in_channel | 1 - // |out_features| in_features | - // ----------------------------------------------- - // batch |in_channel | seq_len | 1 - // |in_features | inputs[0]->sequence() | - // ----------------------------------------------- - // batch |out_channel | seq_len | 1 - // |out_features| inputs[0]->sequence() | - assert(inputs[0]->head() == 1); - assert(in_features_ == inputs[0]->dimension()); - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), out_features_); - // outputs[0]->setDtype(activationDtype()); - return Op::reshape(inputs, outputs); -} - -ErrorCode CPULinear::load(AbstructLoader &loader) { - // std::cout << name() << " CPULinear load" << std::endl; - weight_.setName(name() + ".weight"); - weight_.reshape(1, 1, out_features_, in_features_); - if (loader.getDataType(weight_.name()) != MLLM_TYPE_COUNT) { - weight_.setDtype(loader.getDataType(weight_.name())); - weight_.alloc(); - loader.load(&weight_); - } else { - if (weight_.name().find('v') != std::string::npos && Op::noLoadWeightsDtype() == MLLM_TYPE_Q4_0_4_4) { - weight_.setDtype(MLLM_TYPE_Q4_0); - } else { - weight_.setDtype(Op::noLoadWeightsDtype()); - } - weight_.alloc(); - } - if (support_bias_) { - bias_.setName(name() + ".bias"); - bias_.reshape(1, 1, 1, out_features_); - if (loader.getDataType(bias_.name()) != MLLM_TYPE_COUNT) { - bias_.setDtype(loader.getDataType(bias_.name())); - bias_.alloc(); - loader.load(&bias_); - } else { - bias_.setDtype(MLLM_TYPE_F32); - bias_.alloc(); - } - } - return Op::load(loader); -} - -ErrorCode CPULinear::execute(vector> inputs, vector> outputs) { - // auto start = mllm::mllm_time_us(); - if (inputs[0]->count() == 0) { - return Op::execute(inputs, outputs); - } - if (inputs[0]->sequence() != outputs[0]->sequence() && outputs[0]->masterTensor() == nullptr) { - outputs[0]->reshape(outputs[0]->batch(), outputs[0]->head(), inputs[0]->sequence(), outputs[0]->dimension()); - // outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), out_features_); - outputs[0]->alloc(); - } - // TODO: Q8_0 KVCache can not use!! - if (outputs[0]->dtype() == MLLM_TYPE_Q8_0) { - auto tmp_out = std::make_shared(outputs[0]->backend()); - // tmp_out->setBackend(outputs[0]->backend()); - auto b = outputs[0]->batch(); - auto h = outputs[0]->head(); - auto d = outputs[0]->dimension(); - auto s = outputs[0]->sequence(); - tmp_out->chls() = outputs[0]->chls(); - tmp_out->setCtype(outputs[0]->ctype()); - tmp_out->reshape(b, h, s, d); - tmp_out->setDtype(MLLM_TYPE_F32); - tmp_out->alloc(); - mat_mul(inputs[0].get(), &weight_, tmp_out.get(), support_bias_, &bias_, false, true, thread_count); - if (tmp_out->ctype() == BSHD) { -#pragma omp parallel for collapse(3) num_threads(thread_count) - for (int b = 0; b < tmp_out->batch(); b++) { - for (int h = 0; h < tmp_out->head(); h++) { - for (int s = 0; s < tmp_out->sequence(); s++) { - quantize_row_q8_0(tmp_out->hostPtr() + tmp_out->offset(b, h, s, 0), - (char *)outputs[0]->rawHostPtr() - + outputs[0]->offset(b, h, s, 0) * sizeof(block_q8_0) / QK8_0, - tmp_out->dimension()); - } - } - } - } else { // BHDS -#pragma omp parallel for collapse(3) num_threads(thread_count) - for (int b = 0; b < tmp_out->batch(); b++) { - for (int h = 0; h < tmp_out->head(); h++) { - for (int d = 0; d < tmp_out->dimension(); d++) { - quantize_row_q8_0(tmp_out->hostPtr() + tmp_out->offset(b, h, 0, d), - (char *)outputs[0]->rawHostPtr() - + outputs[0]->offset(b, h, 0, d) * sizeof(block_q8_0) / QK8_0, - outputs[0]->sequence()); - } - } - } - } - } else { - mat_mul(inputs[0].get(), &weight_, outputs[0].get(), support_bias_, &bias_, false, true, thread_count); - } - // std::cout << name() << " CPULinear()" << std::endl; - /* - switch (weight_.dtype()) { - case MLLM_TYPE_F32: { - mat_mul_fp32(inputs[0].get(), &weight_, outputs[0].get(), support_bias_, &bias_, false, true, thread_count); - break; - } - case MLLM_TYPE_F16: break; - case MLLM_TYPE_Q4_0: { - mat_mul_fp32_q4_0(inputs[0].get(), &weight_, outputs[0].get(), support_bias_, &bias_, thread_count); - break; - } - case MLLM_TYPE_Q4_K: { - mat_mul_fp32_q4_K(inputs[0].get(), &weight_, outputs[0].get(), support_bias_, &bias_, thread_count); - break; - } - case MLLM_TYPE_Q6_K: { - mat_mul_fp32_q6_K(inputs[0].get(), &weight_, outputs[0].get(), support_bias_, &bias_, thread_count); - break; - } - default: - break; - } - */ - // auto end = mllm::mllm_time_us(); - // printf("exec time: %ld us\n", end - start); - return Op::execute(inputs, outputs); -} -ErrorCode CPULinear::free(vector> inputs, vector> outputs) { - weight_.free(); - if (support_bias_) { - bias_.free(); - } - return Op::free(inputs, outputs); -} - -} // namespace mllm diff --git a/src/backends/cpu/op/CPUQuantize.cpp b/src/backends/cpu/op/CPUQuantize.cpp deleted file mode 100644 index fa4060f8f..000000000 --- a/src/backends/cpu/op/CPUQuantize.cpp +++ /dev/null @@ -1,97 +0,0 @@ -// -// Created by Daliang Xu on 2024/04/18. -// - -#include "CPUQuantize.hpp" -#include "Types.hpp" -#include "backends/cpu/quantize/QuantizeQ8.hpp" - -#include - -namespace mllm { -CPUQuantize::CPUQuantize(Backend *bn, string opName, int threadCount):thread_count(threadCount), Op(bn, std::move(opName)) { - activation_dtype_ = MLLM_TYPE_I8; - scale_.setBackend(bn); -} - -ErrorCode CPUQuantize::reshape(vector> inputs, vector> outputs) { - assert(inputs.size() == 1); - assert(outputs.size() == 1); - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); - return Op::reshape(inputs, outputs); -} - -ErrorCode CPUQuantize::execute(vector> inputs, vector> outputs) { - auto input = inputs[0]; - auto output = outputs[0]; - int batch = input->batch(); - int head = input->head(); - int seq = input->sequence(); - int dim = input->dimension(); - - float quantScale = 0; - quantScale = scale_.hostPtr()[0] / 127.0; - quantScale = roundf(quantScale * 100000) / 100000; - - auto src0 = inputs[0]; - auto src0_i8 = outputs[0]; - -// #pragma omp parallel for collapse(4) - // for (int b = 0; b dataAt(b, h, s, d); - // int32_t v = static_cast(Round(value / quantScale)); - // v = std::max (std::min(v, 127), -128); - // output->setDataAt(b, h, s, d, static_cast(v)); - // } - // } - // std::cout << std::endl; - // } - // } - -#pragma omp parallel for collapse(3) num_threads(thread_count) - for (int b = 0; b < batch; b++) { - for (int h = 0; h hostPtr() + src0->offset(b, h, s, 0), - src0_i8->hostPtr() + src0_i8->offset(b, h, s, 0), - dim, quantScale); - } - } - } - - // outputs[0]->printData(); - - - return Op::execute(inputs, outputs); -} - -ErrorCode CPUQuantize::setUp(vector> inputs, vector> outputs) { - activation_dtype_ = MLLM_TYPE_I8; - return Op::setUp(inputs, outputs); -} - -ErrorCode CPUQuantize::free(vector> inputs, vector> outputs) { - return Op::free(inputs, outputs); -} - -ErrorCode CPUQuantize::load(AbstructLoader &loader) { - string scaleName = name(); - - std::string wordToRemove = "quantize"; - int pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - } - - scale_.setName(scaleName + "input_scale"); - scale_.reshape(1, 1, 1, 1); - scale_.setDtype(MLLM_TYPE_F32); - scale_.alloc(); - loader.load(&scale_); - - return Op::load(loader); -} -} // namespace mllm \ No newline at end of file diff --git a/src/backends/cpu/op/CPURMSNorm.cpp b/src/backends/cpu/op/CPURMSNorm.cpp deleted file mode 100644 index 6ea3aa693..000000000 --- a/src/backends/cpu/op/CPURMSNorm.cpp +++ /dev/null @@ -1,92 +0,0 @@ -#include -#include "CPURMSNorm.hpp" -#include "Tensor.hpp" -#include "Timing.hpp" -#include "../compute/VecDot.hpp" - -namespace mllm { - -// int32_t opp = 897988541; - -// int32_t op_params[1]; -CPURMSNorm::CPURMSNorm(Backend *bn, string opName, int normSize, float epsilon, bool add_unit_offset_, int threadCount) : - thread_count(threadCount), add_unit_offset_(add_unit_offset_), - Op(bn, opName), epsilon_(epsilon) { - // op_params[0] = 897988541;s, sizeof(float)); - // memcpy(&epsilon_, op_param) - normSize_ = normSize; - weight_.setBackend(bn); -} - -ErrorCode CPURMSNorm::reshape(vector> inputs, vector> outputs) { - // RMSNorm is similar to LayerNorm which operates on the channel dimension. - assert(normSize_ == inputs[0]->dimension()); - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); - // outputs[0]->setDtype(activationDtype()); - // std::cout << name() << " CPURMSNorm reshape" << std::endl; - return Op::reshape(inputs, outputs); -} - -ErrorCode CPURMSNorm::execute(vector> inputs, vector> outputs) { - auto input = inputs[0]; - int batch = input->batch(); - int dim = input->dimension(); - int seq = input->sequence(); - int head = input->head(); -#pragma omp parallel for collapse(3) num_threads(thread_count) - for (int h = 0; h < head; h++) { - for (int n = 0; n < batch; n++) { - for (int s = 0; s < seq; s++) { - double sum_squares = 0.0F; - // sum - for (int d = 0; d < dim; d++) { - float value = input->dataAt(n, h, s, d); - sum_squares += (double)value * value; - } - const float mean = sum_squares / dim; - const float rms = 1.0f / sqrtf(mean + epsilon_); - - memcpy(outputs[0]->ptrAt(n, h, s, 0), - inputs[0]->ptrAt(n, h, s, 0), - dim * sizeof(float)); - vec_scale_f32(dim, outputs[0]->ptrAt(n, h, s, 0), rms); - } - } - } - -#pragma omp parallel for collapse(4) num_threads(thread_count) - for (int h = 0; h < head; h++) { - for (int n = 0; n < batch; n++) { - for (int s = 0; s < seq; s++) { - for (int d = 0; d < dim; d++) { - float weight = weight_.dataAt(0, 0, 0, d); - if (add_unit_offset_) { - *outputs[0]->ptrAt(n, h, s, d) *= (1 + weight); - } else { - *outputs[0]->ptrAt(n, h, s, d) *= (weight); - } - } - } - } - } - return Op::execute(inputs, outputs); -} -ErrorCode CPURMSNorm::load(AbstructLoader &loader) { - weight_.setName(name() + ".weight"); - weight_.reshape(1, 1, 1, normSize_); // - if (loader.getDataType(weight_.name()) != MLLM_TYPE_COUNT) { - weight_.setDtype(loader.getDataType(weight_.name())); - weight_.alloc(); - // auto l = loader.length(weight_.name()); - loader.load(&weight_); - } else { - weight_.setDtype(MLLM_TYPE_F32); - weight_.alloc(); - } - return Op::load(loader); -} -ErrorCode CPURMSNorm::free(vector> inputs, vector> outputs) { - weight_.free(); - return Op::free(inputs, outputs); -} -} // namespace mllm \ No newline at end of file diff --git a/src/backends/cpu/op/CPUView.cpp b/src/backends/cpu/op/CPUView.cpp deleted file mode 100644 index 203ee4a09..000000000 --- a/src/backends/cpu/op/CPUView.cpp +++ /dev/null @@ -1,127 +0,0 @@ - - -#include "CPUView.hpp" - -namespace mllm { - -CPUView::CPUView(Backend *bn, string opName,vector dims, vectordata_dims, int threadCount) : thread_count(threadCount), - Op(bn, opName) { - dim0_ = dims[0]; - dim1_ = dims[1]; - dim2_ = dims[2]; - dim3_ = dims[3]; - // if(dims.size() == 5) { - // dim4_ = dims[4]; - // } - data_dim0_ = data_dims[0]; - data_dim1_ = data_dims[1]; - data_dim2_ = data_dims[2]; - data_dim3_ = data_dims[3]; - // if(data_dims.size() == 5) { - // data_dim4_ = data_dims[4]; - // } -} - -ErrorCode CPUView::reshape(vector> inputs, vector> outputs) { - - // if(data_dim4_ != -999) { - // int dim0 = inputs[0]->batch(); - // int dim1 = inputs[0]->channel(); - // int dim2 = inputs[0]->height(); - // int dim3 = inputs[0]->width(); - // int dim4 = inputs[0]->dimension(); - // assert(inputs[0]->ctype() == BCTHW); - // - // outputs[0]->reshape(dim0, dim1, dim2, dim3, dim4); - // } else { - int dim0 = inputs[0]->batch(); - int dim1 = inputs[0]->head(); - int dim2 = inputs[0]->sequence(); - int dim3 = inputs[0]->dimension(); - if(data_dim0_ == BATCH && data_dim1_ == DIMENSION && data_dim2_ == SEQUENCE && data_dim3_ == DIMENSION) { - dim1 = dim1_; - dim3 = inputs[0]->dimension()/ dim1_; - } else if(data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == SEQUENCE && data_dim3_ == HEAD+DIMENSION){ - dim1 = 1; - dim3 = inputs[0]->dimension() * inputs[0]->head(); - } else if(data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == SEQUENCE+HEAD && data_dim3_ == DIMENSION){ - dim1 = 1; - dim2 = inputs[0]->sequence()* inputs[0]->head(); - } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == CHANNLE && data_dim3_ == TIME + HEIGHT + WIDTH) { - // assert(inputs[0]->ctype() == BCTHW); - dim1 = 1; - dim2 = inputs[0]->channel(); - dim3 = inputs[0]->time() * inputs[0]->height() * inputs[0]->width(); - } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == TIME + HEIGHT + WIDTH && data_dim3_ == CHANNLE ) { - if(inputs[0]->ctype() == BTHWC) { - dim1 = 1; - dim2 = inputs[0]->time() * inputs[0]->height() * inputs[0]->width(); - dim3 = inputs[0]->channel(); - }else { - dim1 = 1; - dim2 = inputs[0]->time() * inputs[0]->height() * inputs[0]->channel(); - dim3 = inputs[0]->width(); - } - } else if (data_dim0_ == SEQUENCE && data_dim1_ == HEAD && data_dim2_ == BATCH && data_dim3_ ==DIMENSION) { - dim0 = inputs[0]->sequence(); - dim1 = inputs[0]->head(); - dim2 = inputs[0]->batch(); - dim3 = inputs[0]->dimension(); - } else if (data_dim0_ == BATCH && data_dim1_ == HEAD && data_dim2_ == BATCH && data_dim3_ ==DIMENSION) { - dim0 = inputs[0]->batch()/dim2_; - dim1 = inputs[0]->head(); - dim2 = dim2_; - dim3 = inputs[0]->dimension(); - } else if (data_dim0_ == BATCH && data_dim1_ == HEAD && data_dim2_ == SEQUENCE && data_dim3_ ==DIMENSION) { - dim0 = dim0_; - dim1 = dim1_; - dim2 = dim2_; - dim3 = dim3_; - } else { - std::cout<<"CPUView not support!!!!"<reshape(dim0, dim1, dim2, dim3); - // } - return Op::reshape(inputs, outputs); -} - -ErrorCode CPUView::execute(vector> inputs, vector> outputs) { - if(noNeedEx_){ - return Op::execute(inputs, outputs); - } else { - std::cout<<"CPUView not support!!!!"<> inputs, vector> outputs) { - assert(inputs.size() == 1); - assert(outputs.size() == 1); - - activation_dtype_ = inputs[0]->dtype(); - - if ( (data_dim0_ == BATCH && data_dim2_ ==SEQUENCE && inputs[0]->ctype()!=BCTHW) // head & dimension - || (data_dim0_ == BATCH && data_dim3_ ==DIMENSION && inputs[0]->ctype()==BSHD) // head & sequence - || (data_dim0_ == SEQUENCE && data_dim1_ == HEAD && data_dim2_ == BATCH && data_dim3_ ==DIMENSION && inputs[0]->ctype()==BSHD) // head & sequence - || (data_dim0_ == BATCH && inputs[0]->ctype()==BCTHW) // - || (data_dim1_ == HEAD && data_dim3_ ==DIMENSION && inputs[0]->ctype()==BSHD // batch & sequence - || (data_dim0_ == BATCH && data_dim1_ == HEAD && data_dim2_ == SEQUENCE && data_dim3_ ==DIMENSION)) // batch & sequence & head & dimension - // || (data_dim0_ == BATCH && data_dim3_ == CHANNLE && inputs[0]->ctype()==BTHWC) // - ){ - noNeedEx_ = true; - if (inputs[0]->masterTensor() == nullptr) { - inputs[0]->free(); - } - outputs[0]->setDtype(activation_dtype()); - outputs[0]->alloc(); - inputs[0]->shallowCopyFrom(outputs[0].get(), false); - return MLLM_NO_ERROR; - } - else { - std::cout<<"CPUView not support!!!!"<= v75 and hexagon sdk >= 5.4.0) -endif - -# Users should note that the tools version may change between hexagon sdk versions -# Following combination of SDK and Tool version is supported -# HEXAGON_SDK_ROOT_V68 := $(HEXAGON_SDK_BASE)/hexagon-sdk-4.2.0 -# HEXAGON_SDK_ROOT_V69 := $(HEXAGON_SDK_BASE)/hexagon-sdk-4.3.0 -# HEXAGON_SDK_ROOT_V73 := $(HEXAGON_SDK_BASE)/hexagon-sdk-5.4.0 -# HEXAGON_SDK_ROOT_V75 := $(HEXAGON_SDK_BASE)/hexagon-sdk-5.4.0 -HEXAGON_SDK_ROOT_V68 := $(HEXAGON_SDK_ROOT) -HEXAGON_SDK_ROOT_V69 := $(HEXAGON_SDK_ROOT) -HEXAGON_SDK_ROOT_V73 := $(HEXAGON_SDK_ROOT) -HEXAGON_SDK_ROOT_V75 := $(HEXAGON_SDK_ROOT) -#Updated to point to latest sdk to match with libQnnHtp.so -# HEXAGON_SDK_ROOT_X86 := $(HEXAGON_SDK_BASE)/hexagon-sdk-5.4.0 -HEXAGON_SDK_ROOT_X86 := $(HEXAGON_SDK_ROOT) - -HEXAGON_TOOLS_VERSION_V68 := 8.4.09 -HEXAGON_TOOLS_VERSION_V69 := 8.5.03 -HEXAGON_TOOLS_VERSION_V73 := 8.7.06 -HEXAGON_TOOLS_VERSION_V75 := 8.7.06 -#Updated to point to latest sdk to match with libQnnHtp.so -HEXAGON_TOOLS_VERSION_X86 := 8.7.06 - -ifndef ANDROID_NDK_ROOT -ifeq ($(MAKECMDGOALS),htp_aarch64) -$(error "ERROR: ANDROID_NDK_ROOT is not set. Android NDK path must be set to compile package for aarch64") -else ifeq ($(MAKECMDGOALS),all) -$(info "WARNING: ANDROID_NDK_ROOT is not set. Android NDK path must be set to compile package for aarch64") -endif -endif - -ifndef PACKAGE_NAME -export -PACKAGE_NAME := $(notdir $(shell pwd)) -$(info "INFO: No package name defined. Using current directory name: $(PACKAGE_NAME) as the package name") -endif - -WORK := build -SRC_DIR := src -OP_SRC_DIR := src/ops -OP_INCLUDE_DIR := ./include -OP_INCLUDES = #$(wildcard $(OP_INCLUDE_DIR)/*.h) user defined if any op specific headers are needed, add -I to common flags -LIBRARY_NAME := libQnn$(PACKAGE_NAME).so -SUPPORTED_TARGETS = x86_64-linux-clang hexagon-v68 hexagon-v69 hexagon-v73 hexagon-v75 aarch64-android - -INCLUDES = $(addprefix -I,$(QHL_INC_DIRS)) $(addprefix -I,$(QHL_HVX_INC_DIRS)) $(addprefix -I,$(WORKER_POOL_INC)) -I$(HEXAGON_SDK_ROOT)/libs/qhl_hvx/inc/qhmath_hvx/ -I$(HEXAGON_SDK_ROOT)/libs/qhl_hvx/inc/internal - - -COMMON_CXX_FLAGS = -std=c++17 -I$(QNN_INCLUDE) -fPIC -Wall -Wreorder -Wno-missing-braces -fno-builtin -Wno-unused-function -COMMON_CXX_FLAGS += -Werror -Wno-format -Wno-unused-command-line-argument -fvisibility=default -stdlib=libc++ -COMMON_CXX_FLAGS += -DQNN_API="__attribute__((visibility(\"default\")))" -D__QAIC_HEADER_EXPORT="__attribute__((visibility(\"default\")))" - -X86_LIBNATIVE_RELEASE_DIR := $(HEXAGON_SDK_ROOT_X86)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_X86)/Tools - -# Ensure hexagon sdk tool version can be retrieved -ifeq ($(wildcard $(X86_LIBNATIVE_RELEASE_DIR)/.),) -$(error "Cannot retrieve hexagon tools from: $(X86_LIBNATIVE_RELEASE_DIR). \ - \ - Please check that hexagon tools version is correct. Expected: $(HEXAGON_TOOLS_VERSION_X86)") -endif - -#Check tools for hexagon_v75 are present. -ifeq ($(MAKECMDGOALS),htp_v75) -ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V75)),) -$(error "ERROR: HEXAGON_SDK_ROOT_V75 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V75)") -endif -endif - -#Check tools for hexagon_v68 are present. -ifeq ($(MAKECMDGOALS),htp_v68) -ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V68)),) -$(error "ERROR: HEXAGON_SDK_ROOT_V68 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V68)") -endif -endif - -ifeq ($(MAKECMDGOALS),htp_v69) -ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V69)),) -$(error "ERROR: HEXAGON_SDK_ROOT_V69 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V69)") -endif -endif - -ifeq ($(MAKECMDGOALS),htp_v73) -ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V73)),) -$(error "ERROR: HEXAGON_SDK_ROOT_V73 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V73)") -endif -endif - -endif -OP_SOURCES = $(wildcard $(OP_SRC_DIR)/*.cpp) -OTHER_SOURCES = $(wildcard $(SRC_DIR)/*.cpp) -HFILES = $(wildcard $(QNN_INCLUDE)/*.h) -HFILES += $(wildcard $(QNN_INCLUDE)/HTP/*.h) -HFILES += $(wildcard $(QNN_INCLUDE)/HTP/core/*.h) -OP_OBJS = $(patsubst $(SRC_DIR)/%,%,$(patsubst %.cpp,%.o,$(OP_SOURCES))) -OTHER_OBJS = $(patsubst $(SRC_DIR)/%,%,$(patsubst %.cpp,%.o,$(OTHER_SOURCES))) - -#======= Assembly ======== -OP_SOURCES_ASM_X86 += $(wildcard $(OP_SRC_DIR)/x86_asm/*.S) -OP_OBJS_ASM_X86 += $(subst /x86_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_X86)))) -OP_SOURCES_ASM_V68 += $(wildcard $(OP_SRC_DIR)/v68_asm/*.S) -OP_OBJS_ASM_V68 += $(subst /v68_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V68)))) -OP_SOURCES_ASM_V69 += $(wildcard $(OP_SRC_DIR)/v69_asm/*.S) -OP_OBJS_ASM_V69 += $(subst /v69_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V69)))) -OP_SOURCES_ASM_V73 += $(wildcard $(OP_SRC_DIR)/v73_asm/*.S) -OP_OBJS_ASM_V73 += $(subst /v73_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V73)))) -OP_SOURCES_ASM_V75 += $(wildcard $(OP_SRC_DIR)/v75_asm/*.S) -OP_OBJS_ASM_V75 += $(subst /v75_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V75)))) -OP_SOURCES_ASM_ANDROID += $(wildcard $(OP_SRC_DIR)/android_asm/*.S) -OP_OBJS_ASM_ANDROID += $(subst /android_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_ANDROID)))) - -all: htp_v68 htp_x86 htp_aarch64 - -#============================================================================================================ -# Setup compiler, compiler instructions and linker for x86 -X86_CXX ?= clang++ -X86_LDFLAGS:= -Wl,--whole-archive -L$(X86_LIBNATIVE_RELEASE_DIR)/libnative/lib -lnative -Wl,--no-whole-archive -lpthread -X86_C_FLAGS := -D__HVXDBL__ -I$(X86_LIBNATIVE_RELEASE_DIR)/libnative/include -ffast-math -DUSE_OS_LINUX -X86_CXX_FLAGS = $(COMMON_CXX_FLAGS) $(X86_C_FLAGS) -fomit-frame-pointer -Wno-invalid-offsetof -linux_objs = -#============================================================================================================ -# Setup compiler, compiler instructions and linker for hexagon -HEXAGON_CXX_FLAGS := $(COMMON_CXX_FLAGS) -mhvx -mhvx-length=128B -mhmx -DUSE_OS_QURT -O2 -Wno-reorder -DPREPARE_DISABLED -HEXAGON_CXX_FLAGS += -I$(HEXAGON_SDK_ROOT_V75)/incs -I$(HEXAGON_SDK_ROOT_V75)/incs/stddef - -HEXAGON_CXX_FLAGS_V68 := $(HEXAGON_CXX_FLAGS) -mv68 -I$(HEXAGON_SDK_ROOT_V68)/rtos/qurt/computev68/include/qurt -I$(HEXAGON_SDK_ROOT_V68)/rtos/qurt/computev68/include/posix -HEXAGON_CXX_FLAGS_V69 := $(HEXAGON_CXX_FLAGS) -mv69 -I$(HEXAGON_SDK_ROOT_V69)/rtos/qurt/computev69/include/qurt -I$(HEXAGON_SDK_ROOT_V69)/rtos/qurt/computev69/include/posix -HEXAGON_CXX_FLAGS_V73 := $(HEXAGON_CXX_FLAGS) -mv73 -I$(HEXAGON_SDK_ROOT_V73)/rtos/qurt/computev73/include/qurt -I$(HEXAGON_SDK_ROOT_V73)/rtos/qurt/computev73/include/posix -HEXAGON_CXX_FLAGS_V75 := $(HEXAGON_CXX_FLAGS) -mv75 -I$(HEXAGON_SDK_ROOT_V75)/rtos/qurt/computev75/include/qurt -I$(HEXAGON_SDK_ROOT_V75)/rtos/qurt/computev75/include/posix - -HEXAGON_CXX_V68 := $(HEXAGON_SDK_ROOT_V68)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V68)/Tools/bin/hexagon-clang++ -HEXAGON_CXX_V69 := $(HEXAGON_SDK_ROOT_V69)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V69)/Tools/bin/hexagon-clang++ -HEXAGON_CXX_V73 := $(HEXAGON_SDK_ROOT_V73)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V73)/Tools/bin/hexagon-clang++ -HEXAGON_CXX_V75 := $(HEXAGON_SDK_ROOT_V75)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V75)/Tools/bin/hexagon-clang++ - -HEX_LDFLAGS = -hexagon_objs = -#============================================================================================================ -# Setup compiler, compiler instructions and linker for aarch64 -AARCH64_C__FLAGS = -D__HVXDBL__ -I$(X86_LIBNATIVE_RELEASE_DIR)/libnative/include -ffast-math -DUSE_OS_LINUX -DANDROID -AARCH64_CXX_FLAGS = $(COMMON_CXX_FLAGS) $(AARCH64_C__FLAGS) -fomit-frame-pointer -Wno-invalid-offsetof -Wno-unused-variable -Wno-unused-parameter -Wno-missing-braces -Wno-sign-compare -Wno-unused-private-field -Wno-unused-variable -Wno-ignored-qualifiers -Wno-missing-field-initializers -ARM_CLANG_OPTS =--target=aarch64-none-linux-android21 --sysroot=$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/linux-x86_64/sysroot -stdlib=libc++ -static-libstdc++ -AARCH64_CXX = $(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/linux-x86_64/bin/clang++ $(ARM_CLANG_OPTS) -AARCH64_LDFLAGS = -L$(QNN_TARGET_LIB) -lQnnHtp -lQnnHtpPrepare -aarch64_objs = -#============================================================================================================ -# Setup targets and goals - -htp_x86: X86_BUILD - -htp_v68: HEXAGON_BUILD_V68 - -htp_v69: HEXAGON_BUILD_V69 - -htp_v73: HEXAGON_BUILD_V73 - -htp_v75: HEXAGON_BUILD_V75 - -htp_aarch64: AARCH64_BUILD - -AARCH64_BUILD: $(WORK)/aarch64-android/$(LIBRARY_NAME) - -HEXAGON_BUILD_V68: $(WORK)/hexagon-v68/$(LIBRARY_NAME) - -HEXAGON_BUILD_V69: $(WORK)/hexagon-v69/$(LIBRARY_NAME) - -HEXAGON_BUILD_V73: $(WORK)/hexagon-v73/$(LIBRARY_NAME) - -HEXAGON_BUILD_V75: $(WORK)/hexagon-v75/$(LIBRARY_NAME) - -X86_BUILD: $(WORK)/x86_64-linux-clang/$(LIBRARY_NAME) - - -define build_objs = -ifneq ($(filter $(2),$(SUPPORTED_TARGETS)),) -$(2)_objs += $(foreach x,$(1),$(WORK)/$(2)/$(x)) -else -$$(error "Unknown target option provided: $(2): Supported targets are: $(SUPPORTED_TARGETS)") -endif -endef - -$(eval $(call build_objs,$(OTHER_OBJS),x86_64-linux-clang)) -$(eval $(call build_objs,$(OP_OBJS),x86_64-linux-clang)) -$(eval $(call build_objs,$(OP_OBJS_ASM_X86),x86_64-linux-clang)) -$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v68)) -$(eval $(call build_objs,$(OP_OBJS),hexagon-v68)) -$(eval $(call build_objs,$(OP_OBJS_ASM_V68),hexagon-v68)) -$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v69)) -$(eval $(call build_objs,$(OP_OBJS),hexagon-v69)) -$(eval $(call build_objs,$(OP_OBJS_ASM_V69),hexagon-v69)) -$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v73)) -$(eval $(call build_objs,$(OP_OBJS),hexagon-v73)) -$(eval $(call build_objs,$(OP_OBJS_ASM_V73),hexagon-v73)) -$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v75)) -$(eval $(call build_objs,$(OP_OBJS),hexagon-v75)) -$(eval $(call build_objs,$(OP_OBJS_ASM_V75),hexagon-v75)) -$(eval $(call build_objs,$(OTHER_OBJS),aarch64-android)) -$(eval $(call build_objs,$(OP_OBJS),aarch64-android)) -$(eval $(call build_objs,$(OP_OBJS_ASM_ANDROID),aarch64-android)) - -# x86 -$(WORK)/x86_64-linux-clang $(WORK)/hexagon-v68 $(WORK)/hexagon-v69 $(WORK)/hexagon-v73 $(WORK)/hexagon-v75 $(WORK)/aarch64-android: - @mkdir -p $@/ops - -$(WORK)/x86_64-linux-clang/%.o: $(SRC_DIR)/%.cpp | $(WORK)/x86_64-linux-clang - $(X86_CXX) $(X86_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/x86_64-linux-clang/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/x86_64-linux-clang - $(X86_CXX) $(X86_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -DREFERENCE_OP -c $< -o $@ - -$(WORK)/x86_64-linux-clang/ops/%.o: $(OP_SRC_DIR)/x86_asm/%.S | $(WORK)/x86_64-linux-clang - $(X86_CXX) $(X86_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/x86_64-linux-clang/$(LIBRARY_NAME): $(x86_64-linux-clang_objs) | $(HFILES) - $(X86_CXX) -fPIC -std=c++17 -g -shared -o $@ $^ $(X86_LDFLAGS) - -# v68 -$(WORK)/hexagon-v68/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v68 - $(HEXAGON_CXX_V68) $(HEXAGON_CXX_FLAGS_V68) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/hexagon-v68/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v68 - $(HEXAGON_CXX_V68) $(HEXAGON_CXX_FLAGS_V68) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/hexagon-v68/ops/%.o: $(OP_SRC_DIR)/v68_asm/%.S | $(WORK)/hexagon-v68 - $(HEXAGON_CXX_V68) $(HEXAGON_CXX_FLAGS_V68) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/hexagon-v68/$(LIBRARY_NAME): $(hexagon-v68_objs) | $(HFILES) - $(HEXAGON_CXX_V68) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) - -# v69 -$(WORK)/hexagon-v69/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v69 - $(HEXAGON_CXX_V69) $(HEXAGON_CXX_FLAGS_V69) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/hexagon-v69/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v69 - $(HEXAGON_CXX_V69) $(HEXAGON_CXX_FLAGS_V69) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/hexagon-v69/ops/%.o: $(OP_SRC_DIR)/v69_asm/%.S | $(WORK)/hexagon-v69 - $(HEXAGON_CXX_V69) $(HEXAGON_CXX_FLAGS_V69) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/hexagon-v69/$(LIBRARY_NAME): $(hexagon-v69_objs) | $(HFILES) - $(HEXAGON_CXX_V69) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) - -# v73 -$(WORK)/hexagon-v73/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v73 - $(HEXAGON_CXX_V73) $(HEXAGON_CXX_FLAGS_V73) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/hexagon-v73/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v73 - $(HEXAGON_CXX_V73) $(HEXAGON_CXX_FLAGS_V73) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD $(INCLUDES) -DHVX_OP -c $< -o $@ - -$(WORK)/hexagon-v73/ops/%.o: $(OP_SRC_DIR)/v73_asm/%.S | $(WORK)/hexagon-v73 - $(HEXAGON_CXX_V73) $(HEXAGON_CXX_FLAGS_V73) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/hexagon-v73/$(LIBRARY_NAME): $(hexagon-v73_objs) | $(HFILES) - $(HEXAGON_CXX_V73) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) $(WORKER_POOL_LIB) - -#v75 -$(WORK)/hexagon-v75/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v75 - $(HEXAGON_CXX_V75) $(HEXAGON_CXX_FLAGS_V75) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/hexagon-v75/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v75 - $(HEXAGON_CXX_V75) $(HEXAGON_CXX_FLAGS_V75) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD $(INCLUDES) -DHVX_OP -c $< -o $@ - -$(WORK)/hexagon-v75/ops/%.o: $(OP_SRC_DIR)/v75_asm/%.S | $(WORK)/hexagon-v75 - $(HEXAGON_CXX_V75) $(HEXAGON_CXX_FLAGS_V75) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/hexagon-v75/$(LIBRARY_NAME): $(hexagon-v75_objs) | $(HFILES) - $(HEXAGON_CXX_V75) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) $(WORKER_POOL_LIB) - -# aarch64 -$(WORK)/aarch64-android/%.o: $(SRC_DIR)/%.cpp | $(WORK)/aarch64-android - $(AARCH64_CXX) $(AARCH64_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/aarch64-android/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/aarch64-android - $(AARCH64_CXX) $(AARCH64_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -DREFERENCE_OP -c $< -o $@ - -$(WORK)/aarch64-android/ops/%.o: $(OP_SRC_DIR)/android_asm/%.S | $(WORK)/aarch64-android - $(AARCH64_CXX) $(AARCH64_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ - -$(WORK)/aarch64-android/$(LIBRARY_NAME): $(aarch64-android_objs) | $(HFILES) - $(AARCH64_CXX) -fPIC -std=c++17 -g -shared -o $@ $^ $(AARCH64_LDFLAGS) - -clean: - -rm -rf $(WORK) - -.PHONY: all clean - diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/IRoPE.cpp b/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/IRoPE.cpp deleted file mode 100755 index b237b70af..000000000 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/IRoPE.cpp +++ /dev/null @@ -1,224 +0,0 @@ -//============================================================================== -// Auto Generated Code for LLaMAPackage -//============================================================================== - -#include "HTP/core/constraints.h" -#include "HTP/core/op_package_feature_support.h" -#include "HTP/core/op_register_ext.h" -#include "HTP/core/optimize.h" -#include "QnnOpPackage.h" -#include "HTP/core/simple_reg.h" - - -BEGIN_PKG_OP_DEFINITION(PKG_IRoPE); - - -// op execute function declarations -template -GraphStatus iropeImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1, - const TensorType& cos, - const TensorType1 &h_cnt, - const Tensor& pose_type); - -// forward declaration of sample cost function -static float iropeCostFunc(const Op *op); - -/* - * method 1 for defining op, using default cost value (i.e. GLACIAL) and default flag (Flags::RESOURCE_HVX) - * syntax: DEF_PACKAGE_OP(F,OP) - * e.g. DEF_PACKAGE_OP((iropeImpl), "IRoPE") - */ -DEF_PACKAGE_OP((iropeImpl), "IRoPE") - -/* - * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) - * and provided flags - * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) - * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, - * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) - * e.g. DEF_PACKAGE_OP_AND_COST_AND_FLAGS((iropeImpl), "IRoPE", SNAIL) - */ - -/* - * method 3 for defining op with cost function pointer and provided flags - * cost function pointer type: typedef float (*cost_function) (const Op * op); - * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) - * e.g. DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((iropeImpl), - * "IRoPE", iropeCostFunc, Flags::RESOURCE_HVX) - */ - -/* - * optimization definitions - * need to be global in the package - * one definition per optimization - * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) - * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) - * HTP core provides some replacement functions for op package to use - * for more information about optimization rules, please refer to HTP core documentations - */ - -/* - * op parameter order definitions - * need to be global in the package - * one definition per op, and this is optional - * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) - * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions - * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode - * will be passed into op execution functions - * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned - * if two or more op packages with the same package name will be registered, they cannot list - * conflicting parameter orders - * PARAM refers to parameter name as a string literal - * MANDATORY refers to whether this parameter is required to be provided at Qnn_addNode - * DEFAULT is used when MANDATORY is false - * if provided as Qnn_Param_t*, - * DEFAULT will be used for graph construction when this parameter is not provided at - * Qnn_addNode - * if provided as nullptr, - * graph construction will skip this parameter when this parameter is not provided at - * Qnn_addNode - */ -DEF_PACKAGE_PARAM_ORDER("IRoPE", - "pose_type", - true, - nullptr) - - -/* execute functions for ops */ - -template -GraphStatus iropeImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& sin, - const TensorType& cos, - const TensorType1 &h_cnt, - const Tensor& pose_type) - -{ - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - auto pose_type_ = pose_type(0,0,0,0); - auto h_cnt_ = static_cast(h_cnt(0,0,0,0)); - - out_0.set_dims(in_0); - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - uint32_t half_dimension = d_in / 2; - - auto sin_ptr = (uint8_t*)sin.raw_data_const(); - auto cos_ptr = (uint8_t*)cos.raw_data_const(); - - auto in_ptr = (uint8_t*)in_0.raw_data_const(); - - sin_ptr += half_dimension * h_cnt_; - cos_ptr += half_dimension * h_cnt_; - - // float scale_ = in_0.get_interface_scale() * sin.get_interface_scale() * cos.get_interface_scale(); - - if (pose_type_ == 4) { - DType dtype = out_0.get_dtype(); - - if (dtype == DType::Float32) { - - auto out_ptr = (float*)out_0.raw_data(); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - - int partial_dimension = d_in; - for (Idx d = 0; d < partial_dimension / 2; ++d) { - int in_value = *in_ptr; - int in_value_2 = *(in_ptr + half_dimension); - - int sin_value = *(sin_ptr+d); - int cos_value = *(cos_ptr+d); - float value = (in_value-128) * (cos_value-128) * cos.get_interface_scale() - (in_value_2-128) * (sin_value-128) * sin.get_interface_scale(); - float value2 = (in_value-128) * (sin_value-128) * sin.get_interface_scale() + (in_value_2-128) * (cos_value-128) * cos.get_interface_scale(); - - *out_ptr = value; - *(out_ptr + half_dimension) = value2; - - out_ptr++; - in_ptr++; - } - - in_ptr += half_dimension; - out_ptr += half_dimension; - } - - sin_ptr += half_dimension; - cos_ptr += half_dimension; - - } - } - } else if (dtype == DType::Float16) { - - auto out_ptr = (__fp16*)out_0.raw_data(); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - - int partial_dimension = d_in; - for (Idx d = 0; d < partial_dimension / 2; ++d) { - int in_value = *in_ptr; - int in_value_2 = *(in_ptr + half_dimension); - - int sin_value = *(sin_ptr+d); - int cos_value = *(cos_ptr+d); - float value = (in_value-128) * (cos_value-128) * cos.get_interface_scale() - (in_value_2-128) * (sin_value-128) * sin.get_interface_scale(); - float value2 = (in_value-128) * (sin_value-128) * sin.get_interface_scale() + (in_value_2-128) * (cos_value-128) * cos.get_interface_scale(); - - *out_ptr = static_cast<__fp16>(value); - *(out_ptr + half_dimension) = static_cast<__fp16>(value2); - - out_ptr++; - in_ptr++; - } - - in_ptr += half_dimension; - out_ptr += half_dimension; - } - - sin_ptr += half_dimension; - cos_ptr += half_dimension; - - } - } - } - } - - return GraphStatus::Success; -} - -__attribute__((unused)) static float iropeCostFunc(const Op *op) -{ - /* - * add code here - * */ - - float cost = 0.0; // add cost computation here - return cost; -} - - - - - -/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), - where is as BEGIN_PKG_OP_DEFINITION -*/ -END_PKG_OP_DEFINITION(PKG_IRoPE); \ No newline at end of file diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMADequantize.cpp b/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMADequantize.cpp deleted file mode 100755 index 6afb884f2..000000000 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMADequantize.cpp +++ /dev/null @@ -1,420 +0,0 @@ -//============================================================================== -// Auto Generated Code for LLaMAPackage -//============================================================================== - -#include "HTP/core/constraints.h" -#include "HTP/core/op_package_feature_support.h" -#include "HTP/core/op_register_ext.h" -#include "HTP/core/optimize.h" -#include "QnnOpPackage.h" -#include "HTP/core/simple_reg.h" - - -BEGIN_PKG_OP_DEFINITION(PKG_LLaMADequantize); - - -// op execute function declarations -template -GraphStatus llamadequantizeImpl(TensorType1 &out_0, - const TensorType1 &in_0, - const PlainFloatTensor& scale); - -// forward declaration of sample cost function -static float llamadequantizeCostFunc(const Op *op); - -/* - * method 1 for defining op, using default cost value (i.e. GLACIAL) and default flag (Flags::RESOURCE_HVX) - * syntax: DEF_PACKAGE_OP(F,OP) - * e.g. DEF_PACKAGE_OP((llamadequantizeImpl), "LLaMADequantize") - */ -DEF_PACKAGE_OP((llamadequantizeImpl), "LLaMADequantize") - -/* - * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) - * and provided flags - * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) - * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, - * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) - * e.g. DEF_PACKAGE_OP_AND_COST_AND_FLAGS((llamadequantizeImpl), "LLaMADequantize", SNAIL) - */ - -/* - * method 3 for defining op with cost function pointer and provided flags - * cost function pointer type: typedef float (*cost_function) (const Op * op); - * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) - * e.g. DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((llamadequantizeImpl), - * "LLaMADequantize", llamadequantizeCostFunc, Flags::RESOURCE_HVX) - */ - -/* - * optimization definitions - * need to be global in the package - * one definition per optimization - * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) - * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) - * HTP core provides some replacement functions for op package to use - * for more information about optimization rules, please refer to HTP core documentations - */ - -/* - * op parameter order definitions - * need to be global in the package - * one definition per op, and this is optional - * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) - * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions - * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode - * will be passed into op execution functions - * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned - * if two or more op packages with the same package name will be registered, they cannot list - * conflicting parameter orders - * PARAM refers to parameter name as a string literal - * MANDATORY refers to whether this parameter is required to be provided at Qnn_addNode - * DEFAULT is used when MANDATORY is false - * if provided as Qnn_Param_t*, - * DEFAULT will be used for graph construction when this parameter is not provided at - * Qnn_addNode - * if provided as nullptr, - * graph construction will skip this parameter when this parameter is not provided at - * Qnn_addNode - */ -DEF_PACKAGE_PARAM_ORDER("LLaMADequantize", - "scale", - true, - nullptr) - -#ifndef REFERENCE_OP -/* execute functions for ops */ -#include "qhmath_hvx.h" -#include "hvx_internal.h" -#include -#include - -#define BLOCK_SIZE (8*1024/VLEN) /* vector chunks */ -#define L2FETCH_AHEAD (BLOCK_SIZE) - -static inline int32_t float_to_fp16s(float input) -{ - union { - int32_t i; - __fp16 f[2]; - } fp32 = {.f = {(__fp16)input, (__fp16)input}}; - return fp32.i; -} - -static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) -{ - union { float f; uint32_t i; } fp32 = { .f = x }; - return fp32.i; -} - - - -/* execute functions for ops */ -int32_t qhmath_hvx_dequantize_ahf( - int8_t *restrict input, - int8_t *restrict output, - uint32_t size, - float scale) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { - return -1; - } - - HVX_Vector *iptr = (HVX_Vector *) input; - HVX_UVector *optr = (HVX_UVector *) output; - - HVX_Vector sline1p, sline1c, sline1; - HVX_Vector scale_vec; - - int32_t block, l2fetch_block; - int32_t leftover = size & 63; - int32_t vectors_in_rounddown = size / 128; // element number! - // int32_t leftover_size = leftover * sizeof(float); - - sline1p = *iptr++; - - uint32_t convert = 0x00800080; - HVX_Vector convert_vector = Q6_V_vsplat_R(convert); - - - scale_vec = Q6_V_vsplat_R(float_to_fp16s(scale)); - HVX_Vector zero_v_sf = Q6_V_vzero(); - - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { - block = Q6_R_min_RR(i, BLOCK_SIZE); - l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - - if (l2fetch_block > 0) - { - l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - } - - for (int32_t j = 0; j < block; ++j) - { - sline1c = *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); - - temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); - HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); - HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); - - - *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), scale_vec)); - *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), scale_vec)); - - sline1p = sline1c; - } - } - - if (vectors_in_rounddown > 0) { - - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - - HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); - - temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); - HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); - HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); - - - *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), scale_vec)); - *optr++ = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), scale_vec)); - - } - - - return 0; -} - -// Only support 128x dimension -int32_t qhmath_hvx_dequantize_af( - int8_t *restrict input, - int8_t *restrict output, - uint32_t size, - float scale) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { - return -1; - } - - HVX_Vector *iptr = (HVX_Vector *) input; - HVX_UVector *optr = (HVX_UVector *) output; - - HVX_Vector sline1p, sline1c, sline1; - HVX_Vector scale_vec; - HVX_Vector one_vec; - - int32_t block, l2fetch_block; - int32_t leftover = size & 127; - int32_t vectors_in_rounddown = size / 128; - // int32_t leftover_size = leftover * sizeof(float); - - sline1p = *iptr++; - - uint32_t convert = 0x00800080; - HVX_Vector convert_vector = Q6_V_vsplat_R(convert); - - - scale_vec = Q6_V_vsplat_R(float_to_bits(scale)); - one_vec = Q6_V_vsplat_R(float_to_fp16s(1.0)); - HVX_Vector zero_v_sf = Q6_V_vzero(); - scale_vec = Q6_Vqf32_vadd_VsfVsf(scale_vec, Q6_V_vzero()); - - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { - block = Q6_R_min_RR(i, BLOCK_SIZE); - l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - - if (l2fetch_block > 0) - { - l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - } - - for (int32_t j = 0; j < block; ++j) - { - sline1c = *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); - - temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); - HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); - HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); - - HVX_VectorPair result1 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), one_vec); - result1 = Q6_W_vshuff_VVR(Q6_V_hi_W(result1), Q6_V_lo_W(result1), -4); - - HVX_VectorPair result2 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), one_vec); - result2 = Q6_W_vshuff_VVR(Q6_V_hi_W(result2), Q6_V_lo_W(result2), -4); - - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), scale_vec)); - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), scale_vec)); - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), scale_vec)); - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), scale_vec)); - - sline1p = sline1c; - } - } - - if (vectors_in_rounddown > 0) { - - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - - HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); - - temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); - HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); - HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); - - HVX_VectorPair result1 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), one_vec); - result1 = Q6_W_vshuff_VVR(Q6_V_hi_W(result1), Q6_V_lo_W(result1), -4); - - HVX_VectorPair result2 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), one_vec); - result2 = Q6_W_vshuff_VVR(Q6_V_hi_W(result2), Q6_V_lo_W(result2), -4); - - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), scale_vec)); - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), scale_vec)); - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), scale_vec)); - *optr++ = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), scale_vec)); - - } - - return 0; -} - -template -GraphStatus llamadequantizeImpl(TensorType1 &out_0, - const TensorType1 &in_0, - const PlainFloatTensor& scale) - -{ - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - - // HVX Method -- FP32 Version - out_0.set_dims(in_0); - - // NHWC - auto in_ptr = (int8_t*)in_0.raw_data_const(); - auto out_ptr = (int8_t*)out_0.raw_data(); - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - float scale_ = scale(0,0,0,0); - - - size_t size = b_in*h_in*w_in*d_in; - - if (in_0.get_dtype() == DType::QUInt8 && out_0.get_dtype() == DType::Float16) { - qhmath_hvx_dequantize_ahf(in_ptr, out_ptr, size, scale_); - } - else { - qhmath_hvx_dequantize_af(in_ptr, out_ptr, size, scale_); - } - - - - return GraphStatus::Success; -} -#else -template -GraphStatus llamadequantizeImpl(TensorType1 &out_0, - const TensorType1 &in_0, - const PlainFloatTensor& scale) - -{ - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - - // HVX Method -- FP32 Version - out_0.set_dims(in_0); - - float scale_ = scale(0,0,0,0); - - auto in_ptr = (uint8_t*)in_0.raw_data_const(); - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - - if (out_0.get_dtype() == DType::Float32) { - auto out_ptr = (float*)out_0.raw_data(); - - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - for (Idx d = 0; d < d_in; d++) { - - int32_t inval = static_cast(*in_ptr++); - *out_ptr++ = (inval-128) * scale_; - - } - } - } - } - } else if (out_0.get_dtype() == DType::Float16) { - - auto out_ptr = (__fp16*)out_0.raw_data(); - - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - for (Idx d = 0; d < d_in; d++) { - - int32_t inval = static_cast(*in_ptr++); - *out_ptr++ = (__fp16)((inval-128) * scale_); - - } - } - } - } - } - - return GraphStatus::Success; -} - -#endif - - -__attribute__((unused)) static float llamadequantizeCostFunc(const Op *op) -{ - /* - * add code here - * */ - - float cost = 0.0; // add cost computation here - return cost; -} - - - - - -/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), - where is as BEGIN_PKG_OP_DEFINITION -*/ -END_PKG_OP_DEFINITION(PKG_LLaMADequantize); \ No newline at end of file diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMASuperSiLU.cpp b/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMASuperSiLU.cpp deleted file mode 100755 index 0a849ca11..000000000 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/LLaMASuperSiLU.cpp +++ /dev/null @@ -1,1171 +0,0 @@ -//============================================================================== -// Auto Generated Code for LLaMAPackage -//============================================================================== - -#include "HTP/core/constraints.h" -#include "HTP/core/op_package_feature_support.h" -#include "HTP/core/op_register_ext.h" -#include "HTP/core/optimize.h" -#include "QnnOpPackage.h" -#include "HTP/core/simple_reg.h" - - -BEGIN_PKG_OP_DEFINITION(PKG_LLaMASuperSiLU); - - -// op execute function declarations -template -GraphStatus llamasupersiluImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1, - const PlainFloatTensor& a_scale, - const PlainFloatTensor& b_scale, - const PlainFloatTensor& o_scale); - -// forward declaration of sample cost function -static float llamasupersiluCostFunc(const Op *op); - -/* - * method 1 for defining op, using default cost value (i.e. GLACIAL) and default flag (Flags::RESOURCE_HVX) - * syntax: DEF_PACKAGE_OP(F,OP) - * e.g. DEF_PACKAGE_OP((llamasupersiluImpl), "LLaMASuperSiLU") - */ -DEF_PACKAGE_OP((llamasupersiluImpl), "LLaMASuperSiLU") - -/* - * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) - * and provided flags - * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) - * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, - * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) - * e.g. DEF_PACKAGE_OP_AND_COST_AND_FLAGS((llamasupersiluImpl), "LLaMASuperSiLU", SNAIL) - */ - -/* - * method 3 for defining op with cost function pointer and provided flags - * cost function pointer type: typedef float (*cost_function) (const Op * op); - * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) - * e.g. DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((llamasupersiluImpl), - * "LLaMASuperSiLU", llamasupersiluCostFunc, Flags::RESOURCE_HVX) - */ - -/* - * optimization definitions - * need to be global in the package - * one definition per optimization - * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) - * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) - * HTP core provides some replacement functions for op package to use - * for more information about optimization rules, please refer to HTP core documentations - */ - -/* - * op parameter order definitions - * need to be global in the package - * one definition per op, and this is optional - * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) - * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions - * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode - * will be passed into op execution functions - * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned - * if two or more op packages with the same package name will be registered, they cannot list - * conflicting parameter orders - * PARAM refers to parameter name as a string literal - * MANDATORY refers to whether this parameter is required to be provided at Qnn_addNode - * DEFAULT is used when MANDATORY is false - * if provided as Qnn_Param_t*, - * DEFAULT will be used for graph construction when this parameter is not provided at - * Qnn_addNode - * if provided as nullptr, - * graph construction will skip this parameter when this parameter is not provided at - * Qnn_addNode - */ -DEF_PACKAGE_PARAM_ORDER("LLaMASuperSiLU", - "a_scale", - true, - nullptr, - "b_scale", - true, - nullptr, - "o_scale", - true, - nullptr) - - -/* execute functions for ops */ - -#ifndef REFERENCE_OP - -#include "qhmath_hvx.h" -#include "hvx_internal.h" -#include -#include - -#define BLOCK_SIZE (8*1024/VLEN) /* vector chunks */ -#define L2FETCH_AHEAD (BLOCK_SIZE) - -#define FP16_MANTISA 10 -#define FP16_EXPONENT_MASK 0x1f -#define FP16_EXPONENT_BIAS 0xf -#define FP16_MANTISA_MASK 0x000003ff -#define FP16_SIGN 15 -#define FP16_NEG_1 0xbc00 -#define ROUND_2_SCALE 22 -#define ROUND_SCALSE ((1 << ROUND_2_SCALE) * 1.0f) - -static inline int32_t float_to_fp16s(float input) -{ - union { - int32_t i; - __fp16 f[2]; - } fp32 = {.f = {(__fp16)input, (__fp16)input}}; - return fp32.i; -} - -static HVX_INLINE_ALWAYS uint32_t float_to_bits(float x) -{ - union { float f; uint32_t i; } fp32 = { .f = x }; - return fp32.i; -} - - -static const float fp16_c0_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.13239719960243818,0.2216255210749415,0.3447664743728659,0.48137452032585476,0.5716299228719798,0.5547323231605259,0.5046287748870234,0.4999985574626892, -0.5000036514755082,0.49475652448004626,0.4441393352532763,0.428500379952032,0.5173297285470642,0.6541461039833616,0.7783931007462818,0.8678015179911097, -}; -static const float fp16_c1_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.05928005756790343,0.11063222460270064,0.1932879057003057,0.30302440212086995,0.3922924462181049,0.36546332659415875,0.2644148210990377,0.24989020912329707, -0.2498532691910313,0.2661055781198988,0.36728015359480604,0.39215270010450015,0.3041825601732039,0.1940762094668647,0.11061794856987572,0.059174800917353595, -}; -static const float fp16_c2_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.010145494303219278,0.02123968384425681,0.04207468332514667,0.07519946712591977,0.10840620196267145,0.09270738184406795,0.015322371881818012,-0.0009948273994921822, -0.0011544907060402412,-0.017040517565094934,-0.09379878876657094,-0.10835043868732394,-0.07558705272699548,-0.04228875316413285,-0.021235740718738055,-0.010124599879590107, -}; -static const float fp16_c3_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0007841223015974933,0.001850453397354219,0.004187899308371771,0.008640952434084206,0.01414741414964877,0.010117749275618,-0.01654848996354919,-0.02395108399453624, --0.024199111971064446,-0.015783556879607072,0.010407672131558174,0.014137608186323335,0.008698510795258909,0.004213708431213342,0.0018499827774393985,0.0007822799742289481, -}; -static const float fp16_c4_coeffs[32] __attribute__((aligned(VLEN))) = -{ - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.3031641204975905e-05,6.150442488966733e-05,0.00015997783736818624,0.00038491646239693526,0.0007283649599237781,0.00034439150914392054,-0.003142246198646662,-0.004120389580321761, -0.004246050162553198,0.0030162727520777893,-0.00037312974308425725,-0.0007277242855014247,-0.00038811687679772674,-0.0001611434776868886,-6.14837984586862e-05,-2.297076123375133e-05, -}; - -int32_t hvx_supersilu_ahf( - uint8_t *restrict input, - uint8_t *restrict input2, - uint8_t *restrict output, - float a_scale, - float b_scale, - float o_scale, - uint32_t size) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { - return -1; - } - - HVX_Vector *iptr = (HVX_Vector *)input; - HVX_Vector *iptr2 = (HVX_Vector *)input2; - HVX_UVector *optr = (HVX_UVector *)output; - HVX_Vector sline1p, sline1c, sline1; - HVX_Vector sline2p, sline2c, sline2; - - int32_t block, l2fetch_block; - int32_t leftover = size & 128; - int32_t vectors_in_rounddown = size / 128; - // int32_t leftover_size = leftover * sizeof(__fp16); - - sline1p = *iptr++; - sline2p = *iptr2++; - - - // dequantize - uint32_t convert = 0x00800080; - HVX_Vector convert_vector = Q6_V_vsplat_R(convert); - - - HVX_Vector a_scale_vec = Q6_V_vsplat_R(float_to_fp16s(a_scale)); - HVX_Vector b_scale_vec = Q6_V_vsplat_R(float_to_fp16s(b_scale)); - HVX_Vector zero_v_sf = Q6_V_vzero(); - - - //silu - HVX_Vector input_min_v_hf; - HVX_Vector input_shifted_v_hf; - HVX_Vector input_scaled_v; - HVX_VectorPair input_vp_qf32; - // HVX_Vector input_v_qf16; - HVX_Vector mask_idx1_v, mask_idx2_v; - HVX_Vector const16_0_v_hf; - HVX_Vector zero_v_hf, one_v_hf; - HVX_Vector tmp_v; - HVX_Vector idx1_v, idx2_v; - HVX_Vector scale_v; - HVX_DV output_dv; - HVX_DV c0_coeff_dv; - HVX_VectorPair c0_coeff_vp; - HVX_Vector c0_coeff_v; - HVX_DV c1_coeff_dv; - HVX_VectorPair c1_coeff_vp; - HVX_Vector c1_coeff_v; - HVX_DV c2_coeff_dv; - HVX_VectorPair c2_coeff_vp; - HVX_Vector c2_coeff_v; - HVX_DV c3_coeff_dv; - HVX_VectorPair c3_coeff_vp; - HVX_Vector c3_coeff_v; - HVX_DV c4_coeff_dv; - HVX_VectorPair c4_coeff_vp; - HVX_Vector c4_coeff_v; - - scale_v = Q6_Vh_vsplat_R(0x3bfe); - - /* Vector of ones used as mpy neutral element in conversions from hf vector to qf32 vector pair */ - one_v_hf = Q6_Vh_vsplat_R(0x3c00); - - /* - * Vector of zeroes used as neutral element in hf to qf16 conversions. - * NOTE: Some of conversions (i.e conversion of scale factor and coefficients) - * can be avoided in real-time, but this is not done in order to don't - * sacrify code readibility in expense of insignificant performance improvement. - */ - zero_v_hf = Q6_V_vzero(); - - /* Mask for extracting only 4 bits of mantissa */ - mask_idx1_v = Q6_Vh_vsplat_R(0x000F); - - mask_idx2_v = Q6_V_vsplat_R(0x00001010); - - /* 16.0 in IEEE 16-bit floating-point representation */ - const16_0_v_hf = Q6_Vh_vsplat_R(0x4c00); - - /* - * Prepare vector of input_min values, that is used later in shifting input range. - * input_min is low boundary of specified input range. - */ - input_min_v_hf = Q6_Vh_vsplat_R(0xc800); - - /* Convert scale factor from hf to q16. Use the same vector for both formats */ - scale_v = Q6_Vqf16_vadd_VhfVhf(scale_v, zero_v_hf); - - /* Load coefficients */ - c0_coeff_v = *((HVX_Vector *)(fp16_c0_coeffs)); - c1_coeff_v = *((HVX_Vector *)(fp16_c1_coeffs)); - c2_coeff_v = *((HVX_Vector *)(fp16_c2_coeffs)); - c3_coeff_v = *((HVX_Vector *)(fp16_c3_coeffs)); - c4_coeff_v = *((HVX_Vector *)(fp16_c4_coeffs)); - - /* Convert coefficients from hf to qf32 format. Use the same vector for both representations */ - c0_coeff_v = Q6_Vqf32_vadd_VsfVsf(c0_coeff_v, zero_v_hf); - c1_coeff_v = Q6_Vqf32_vadd_VsfVsf(c1_coeff_v, zero_v_hf); - c2_coeff_v = Q6_Vqf32_vadd_VsfVsf(c2_coeff_v, zero_v_hf); - c3_coeff_v = Q6_Vqf32_vadd_VsfVsf(c3_coeff_v, zero_v_hf); - c4_coeff_v = Q6_Vqf32_vadd_VsfVsf(c4_coeff_v, zero_v_hf); - - /* Split 32-bit coefficients to lower and upper part in order to obtain them later with VLUT16. */ - c0_coeff_dv.VV = Q6_Wuw_vzxt_Vuh(c0_coeff_v); - c1_coeff_dv.VV = Q6_Wuw_vzxt_Vuh(c1_coeff_v); - c2_coeff_dv.VV = Q6_Wuw_vzxt_Vuh(c2_coeff_v); - c3_coeff_dv.VV = Q6_Wuw_vzxt_Vuh(c3_coeff_v); - c4_coeff_dv.VV = Q6_Wuw_vzxt_Vuh(c4_coeff_v); - - - // quantize - HVX_Vector low_level_vec, high_level_vec, o_scale_vec, es_vec, round_scale_vec; - HVX_Vector uintconvert = Q6_V_vsplat_R(0x80808080); - HVX_Vector vmb = Q6_V_vsplat_R(0x40004000); - - - float post_scale_flt = a_scale * b_scale * o_scale; - int scexp = flt_getexp( post_scale_flt); - int rsh = min_i32( -scexp,7); // e.g. 0.11 -> 0.88, rsh = 3 - float rsh_fac = flt_power2(rsh); - - int adj_bias = roundf_i32(128 * rsh_fac); - adj_bias = Q6_R_combine_RlRl( adj_bias, adj_bias); - - HVX_Vector vadj = Q6_V_vsplat_R(adj_bias); - - float es = 0.5; - low_level_vec = Q6_V_vsplat_R(float_to_fp16s(-128.0f)); - high_level_vec = Q6_V_vsplat_R(float_to_fp16s(127.0f)); - o_scale_vec = Q6_V_vsplat_R(float_to_fp16s(post_scale_flt * rsh_fac * (1<<15))); - // one_vec = Q6_V_vsplat_R(float_to_fp16s(1.0f)); - // o_scale_vec = Q6_Vqf16_vadd_VhfVhf(o_scale_vec, zero_v_hf); - es_vec = Q6_V_vsplat_R(float_to_fp16s(es)); - round_scale_vec = Q6_V_vsplat_R(float_to_bits(ROUND_SCALSE)); - - es_vec = Q6_Vqf16_vadd_VhfVhf(es_vec, zero_v_sf); - round_scale_vec = Q6_Vqf32_vadd_VsfVsf(round_scale_vec, zero_v_sf); - - HVX_Vector expmask = Q6_Vh_vsplat_R(FP16_EXPONENT_MASK); - HVX_Vector expbias = Q6_Vh_vsplat_R(FP16_EXPONENT_BIAS); - HVX_Vector manmask = Q6_Vh_vsplat_R(FP16_MANTISA_MASK); - HVX_Vector exp23 = Q6_Vh_vsplat_R(23 - 1); - HVX_Vector exp0 = Q6_Vh_vsplat_R(0 - 1); - HVX_Vector negone = Q6_Vh_vsplat_R(FP16_NEG_1); - HVX_Vector zero = Q6_V_vzero(); - - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { - block = Q6_R_min_RR(i, BLOCK_SIZE); - l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - - if (l2fetch_block > 0) - { - l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - } - - for (int32_t j = 0; j < block; ++j) - { - sline1c = *iptr++; - sline2c = *iptr2++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); - - - HVX_Vector sline1_high; - HVX_Vector sline1_low; - // HVX_Vector sline2_high; - // HVX_Vector sline2_low; - - { - // dequantize sline1 qf16 - HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); - - temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); - HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); - HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); - - sline1_low = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), a_scale_vec); - sline1_low = Q6_Vhf_equals_Vqf16(sline1_low); - sline1_high = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), a_scale_vec); - sline1_high = Q6_Vhf_equals_Vqf16(sline1_high); - } - - - // { - // // dequantize sline2 qf16 - // HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline2, zero_v_sf); - - // temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); - // HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); - // HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); - - // sline2_low = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), b_scale_vec); - // sline2_low = Q6_Vhf_equals_Vqf16(sline2_low); - // sline2_high = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), b_scale_vec); - // sline2_high = Q6_Vhf_equals_Vqf16(sline2_high); - // } - - { - // silu sline1_low - tmp_v = Q6_Vh_vdeal_Vh(sline1_low); - - /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ - input_shifted_v_hf = Q6_Vqf16_vsub_VhfVhf(tmp_v, input_min_v_hf); - - /* - * Scale shifted input range from [0, input_max - input_min] to [0,16.0) - * in order to get corresponding coefficient indexes - */ - input_scaled_v = Q6_Vqf16_vmpy_Vqf16Vqf16(input_shifted_v_hf, scale_v); - - /* - * VLUT 16 requires integer indexes. Shift scaled input range from [0,16.0) - * to [16.0,32.0) in order to convert float indexes to integer values. - * Float values, represented in IEEE 754, in range [16.0,32.0] have the - * same exponent, which means 4 MSB of mantissa carry information about - * integer index. - * Use the same input_scaled_v vector for hf and qf16 representation - */ - input_scaled_v = Q6_Vqf16_vadd_Vqf16Vhf(input_scaled_v, const16_0_v_hf); - - /* Convert back from qf16 to hf in order to extract integer index */ - tmp_v = Q6_Vhf_equals_Vqf16(input_scaled_v); - - /* Only 4 MSB bits of mantissa represent segment index */ - idx1_v = Q6_Vuh_vlsr_VuhR(tmp_v, 6); - - /* Ensure only 4 MSB bits of mantissa are used as indexes */ - idx1_v = Q6_V_vand_VV(idx1_v, mask_idx1_v); - - idx1_v = Q6_Vb_vshuff_Vb(idx1_v); - idx1_v = Q6_V_vor_VV(idx1_v, mask_idx2_v); - idx2_v = Q6_Vw_vasl_VwR(idx1_v, 16); - - /* Obtain the polynomial coefficients from lookup table */ - c0_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c0_coeff_dv.VV), 1); - c0_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c0_coeff_vp, idx2_v, Q6_V_hi_W(c0_coeff_dv.VV), 1); - c1_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c1_coeff_dv.VV), 1); - c1_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c1_coeff_vp, idx2_v, Q6_V_hi_W(c1_coeff_dv.VV), 1); - c2_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c2_coeff_dv.VV), 1); - c2_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c2_coeff_vp, idx2_v, Q6_V_hi_W(c2_coeff_dv.VV), 1); - c3_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c3_coeff_dv.VV), 1); - c3_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c3_coeff_vp, idx2_v, Q6_V_hi_W(c3_coeff_dv.VV), 1); - c4_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c4_coeff_dv.VV), 1); - c4_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c4_coeff_vp, idx2_v, Q6_V_hi_W(c4_coeff_dv.VV), 1); - - /* Convert input from hf vector to qf32 vector pair for Horner's method*/ - input_vp_qf32 = Q6_Wqf32_vmpy_VhfVhf(sline1_low, one_v_hf); - - /* Perform evaluation of polynomial using Horner's method */ - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(c4_coeff_vp), Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c3_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c2_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c1_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c0_coeff_vp)); - - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(c4_coeff_vp), Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c3_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c2_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c1_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c0_coeff_vp)); - - // x * sigmod - // output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(input_vp_qf32), output_dv.V.lo); - // output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(input_vp_qf32), output_dv.V.hi); - - sline1_low = Q6_Vhf_equals_Wqf32(output_dv.VV); - } - - - { - // silu sline1_high - tmp_v = Q6_Vh_vdeal_Vh(sline1_high); - - /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ - input_shifted_v_hf = Q6_Vqf16_vsub_VhfVhf(tmp_v, input_min_v_hf); - - /* - * Scale shifted input range from [0, input_max - input_min] to [0,16.0) - * in order to get corresponding coefficient indexes - */ - input_scaled_v = Q6_Vqf16_vmpy_Vqf16Vqf16(input_shifted_v_hf, scale_v); - - /* - * VLUT 16 requires integer indexes. Shift scaled input range from [0,16.0) - * to [16.0,32.0) in order to convert float indexes to integer values. - * Float values, represented in IEEE 754, in range [16.0,32.0] have the - * same exponent, which means 4 MSB of mantissa carry information about - * integer index. - * Use the same input_scaled_v vector for hf and qf16 representation - */ - input_scaled_v = Q6_Vqf16_vadd_Vqf16Vhf(input_scaled_v, const16_0_v_hf); - - /* Convert back from qf16 to hf in order to extract integer index */ - tmp_v = Q6_Vhf_equals_Vqf16(input_scaled_v); - - /* Only 4 MSB bits of mantissa represent segment index */ - idx1_v = Q6_Vuh_vlsr_VuhR(tmp_v, 6); - - /* Ensure only 4 MSB bits of mantissa are used as indexes */ - idx1_v = Q6_V_vand_VV(idx1_v, mask_idx1_v); - - idx1_v = Q6_Vb_vshuff_Vb(idx1_v); - idx1_v = Q6_V_vor_VV(idx1_v, mask_idx2_v); - idx2_v = Q6_Vw_vasl_VwR(idx1_v, 16); - - /* Obtain the polynomial coefficients from lookup table */ - c0_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c0_coeff_dv.VV), 1); - c0_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c0_coeff_vp, idx2_v, Q6_V_hi_W(c0_coeff_dv.VV), 1); - c1_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c1_coeff_dv.VV), 1); - c1_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c1_coeff_vp, idx2_v, Q6_V_hi_W(c1_coeff_dv.VV), 1); - c2_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c2_coeff_dv.VV), 1); - c2_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c2_coeff_vp, idx2_v, Q6_V_hi_W(c2_coeff_dv.VV), 1); - c3_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c3_coeff_dv.VV), 1); - c3_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c3_coeff_vp, idx2_v, Q6_V_hi_W(c3_coeff_dv.VV), 1); - c4_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c4_coeff_dv.VV), 1); - c4_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c4_coeff_vp, idx2_v, Q6_V_hi_W(c4_coeff_dv.VV), 1); - - /* Convert input from hf vector to qf32 vector pair for Horner's method*/ - input_vp_qf32 = Q6_Wqf32_vmpy_VhfVhf(sline1_high, one_v_hf); - - /* Perform evaluation of polynomial using Horner's method */ - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(c4_coeff_vp), Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c3_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c2_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c1_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c0_coeff_vp)); - - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(c4_coeff_vp), Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c3_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c2_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c1_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c0_coeff_vp)); - - // x * sigmod - // output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(input_vp_qf32), output_dv.V.lo); - // output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(input_vp_qf32), output_dv.V.hi); - - sline1_high = Q6_Vhf_equals_Wqf32(output_dv.VV); - } - - - HVX_Vector sline_high; - HVX_Vector sline_low; - - // { - // // mul - // sline_high = Q6_Vqf16_vmpy_VhfVhf(sline1_high, sline2_high); - // sline_low = Q6_Vqf16_vmpy_VhfVhf(sline1_low, sline2_low); - - // sline_high = Q6_Vhf_equals_Vqf16(sline_high); - // sline_low = Q6_Vhf_equals_Vqf16(sline_low); - // } - - HVX_VectorPair mul_output; - { - // uint8 mul - // (a-128)*(b-128) = a*b - 128 (a+b) + 128*128 - HVX_VectorPair prod1 = Q6_Wuh_vmpyacc_WuhVubVub(Q6_W_vcombine_VV(vmb,vmb), sline1, sline2); - HVX_VectorPair prod2 = Q6_Wh_vmpa_WubRub( Q6_W_vcombine_VV(sline2, sline1), 0x80808080); - mul_output = Q6_Wh_vsub_WhWh(prod1, prod2); - - mul_output = Q6_W_vshuff_VVR(Q6_V_hi_W(mul_output), Q6_V_lo_W(mul_output), -2); - - // sline_low = Q6_Vqf16_vmpy_VhfVhf(sline1_low, Q6_Vhf_equals_Vh(Q6_V_lo_W(mul_output))); - // sline_high = Q6_Vqf16_vmpy_VhfVhf(sline1_high, Q6_Vhf_equals_Vh(Q6_V_hi_W(mul_output))); - - } - - { - // scaling quantize - sline_low = Q6_Vqf16_vmpy_VhfVhf(sline1_low, o_scale_vec); - sline_low = Q6_Vh_equals_Vhf(Q6_Vhf_equals_Vqf16(sline_low)); - sline_low = Q6_Vh_vadd_VhVh_sat(Q6_Vh_vmpy_VhVh_s1_rnd_sat(Q6_V_lo_W(mul_output), sline_low), vadj); - - sline_high = Q6_Vqf16_vmpy_VhfVhf(sline1_high, o_scale_vec); - sline_high = Q6_Vh_equals_Vhf(Q6_Vhf_equals_Vqf16(sline_high)); - sline_high = Q6_Vh_vadd_VhVh_sat(Q6_Vh_vmpy_VhVh_s1_rnd_sat(sline_high, Q6_V_hi_W(mul_output)), vadj); - - HVX_Vector sout = Q6_Vub_vasr_VhVhR_rnd_sat( sline_high, sline_low, rsh); - sout = Q6_Vb_vdeal_Vb(sout); - *optr++ = sout; - } - - // { - // // quantize - // HVX_Vector sout1 = Q6_Vqf16_vmpy_Vqf16Vhf(sline_low, o_scale_vec); - // sout1 = Q6_Vqf16_vadd_Vqf16Vqf16(sout1, es_vec); - // sout1 = Q6_Vhf_equals_Vqf16(sout1); - // sout1 = Q6_Vhf_vmin_VhfVhf(sout1, high_level_vec); - // sout1 = Q6_Vhf_vmax_VhfVhf(sout1, low_level_vec); - // HVX_VectorPair sout1_pair = Q6_Wqf32_vmpy_VhfVhf(sout1, one_vec); - // HVX_Vector sout1_low = Q6_Vsf_equals_Vqf32( Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sout1_pair), round_scale_vec)); - // HVX_Vector sout1_high = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sout1_pair), round_scale_vec)); - - // sout1_pair = Q6_W_vshuff_VVR(sout1_high, sout1_low, -4); - // sout1_low = Q6_V_lo_W(sout1_pair); - // sout1_high = Q6_V_hi_W(sout1_pair); - - - // // { - // // HVX_Vector exp = Q6_Vh_vasr_VhR(sout1, FP16_MANTISA); - // // exp = Q6_V_vand_VV(exp, expmask); - // // exp = Q6_Vh_vsub_VhVh(exp, expbias); - - // // HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); - // // HVX_Vector manzero = Q6_V_vand_VV(sout1, man); - - // // HVX_Vector sign = Q6_Vh_vasr_VhR(sout1, FP16_SIGN); - // // HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); - - // // HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); - // // HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); - // // HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); - - // // HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout1, man); - // // man = Q6_V_vnot_V(man); - // // HVX_Vector exppos_signpos = Q6_V_vand_VV(sout1, man); - // // exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); - // // HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout1, 1); - // // HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); - - // // // exp >= 0 - // // HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); - // // tsout1 = Q6_V_vmux_QVV(maneqzero, sout1, tsout1); - - // // // exp < 0 (-1, 1) - // // HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout1, negone); - // // tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); - - // // tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); - // // sout1 = Q6_V_vmux_QVV(expgte23, sout1, tsout1); - // // } - - // sout1_low = Q6_Vw_equals_Vsf(sout1_low); - // sout1_low = Q6_Vw_vasr_VwR(sout1_low, ROUND_2_SCALE); - // sout1_high = Q6_Vw_equals_Vsf(sout1_high); - // sout1_high = Q6_Vw_vasr_VwR(sout1_high, ROUND_2_SCALE); - - - // HVX_Vector sout2 = Q6_Vqf16_vmpy_Vqf16Vhf(sline_high, o_scale_vec); - // sout2 = Q6_Vqf16_vadd_Vqf16Vqf16(sout2, es_vec); - // sout2 = Q6_Vhf_equals_Vqf16(sout2); - // sout2 = Q6_Vhf_vmin_VhfVhf(sout2, high_level_vec); - // sout2 = Q6_Vhf_vmax_VhfVhf(sout2, low_level_vec); - // HVX_VectorPair sout2_pair = Q6_Wqf32_vmpy_VhfVhf(sout2, one_vec); - // HVX_Vector sout2_low = Q6_Vsf_equals_Vqf32( Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sout2_pair), round_scale_vec)); - // HVX_Vector sout2_high = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sout2_pair), round_scale_vec)); - - // sout2_pair = Q6_W_vshuff_VVR(sout2_high, sout2_low, -4); - // sout2_low = Q6_V_lo_W(sout2_pair); - // sout2_high = Q6_V_hi_W(sout2_pair); - - // // { - // // HVX_Vector exp = Q6_Vh_vasr_VhR(sout2, FP16_MANTISA); - // // exp = Q6_V_vand_VV(exp, expmask); - // // exp = Q6_Vh_vsub_VhVh(exp, expbias); - - // // HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); - // // HVX_Vector manzero = Q6_V_vand_VV(sout2, man); - - // // HVX_Vector sign = Q6_Vh_vasr_VhR(sout2, FP16_SIGN); - // // HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); - - // // HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); - // // HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); - // // HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); - - // // HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout2, man); - // // man = Q6_V_vnot_V(man); - // // HVX_Vector exppos_signpos = Q6_V_vand_VV(sout2, man); - // // exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); - // // HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout2, 1); - // // HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); - - // // // exp >= 0 - // // HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); - // // tsout1 = Q6_V_vmux_QVV(maneqzero, sout2, tsout1); - - // // // exp < 0 (-1, 1) - // // HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout2, negone); - // // tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); - - // // tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); - // // sout2 = Q6_V_vmux_QVV(expgte23, sout2, tsout1); - // // } - - // sout2_low = Q6_Vw_equals_Vsf(sout2_low); - // sout2_low = Q6_Vw_vasr_VwR(sout2_low, ROUND_2_SCALE); - // sout2_high = Q6_Vw_equals_Vsf(sout2_high); - // sout2_high = Q6_Vw_vasr_VwR(sout2_high, ROUND_2_SCALE); - - // HVX_Vector reql_h = Q6_Vh_vpack_VwVw_sat(sout1_high, sout1_low); - // HVX_Vector reqh_h = Q6_Vh_vpack_VwVw_sat(sout2_high, sout2_low); - // HVX_Vector req_b = Q6_Vb_vpack_VhVh_sat(reqh_h, reql_h); - - // *optr++ = Q6_Vb_vadd_VbVb(req_b, uintconvert); - // } - - - - - - sline1p = sline1c; - sline2p = sline2c; - } - } - - if (vectors_in_rounddown > 0) { - - o_scale_vec = Q6_V_vsplat_R(float_to_fp16s(o_scale)); - - sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - - sline2c = is_aligned(iptr2, VLEN) && leftover == 0 ? sline2p : *iptr2++; - sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) input2); - - - HVX_Vector sline1_high; - HVX_Vector sline1_low; - HVX_Vector sline2_high; - HVX_Vector sline2_low; - - { - // dequantize sline1 qf16 - HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); - - temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); - HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); - HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); - - sline1_low = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), a_scale_vec); - sline1_low = Q6_Vhf_equals_Vqf16(sline1_low); - sline1_high = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), a_scale_vec); - sline1_high = Q6_Vhf_equals_Vqf16(sline1_high); - } - - - { - // dequantize sline2 qf16 - HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline2, zero_v_sf); - - temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); - HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); - HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); - - sline2_low = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), b_scale_vec); - sline2_low = Q6_Vhf_equals_Vqf16(sline2_low); - sline2_high = Q6_Vqf16_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), b_scale_vec); - sline2_high = Q6_Vhf_equals_Vqf16(sline2_high); - } - - { - // silu sline1_low - tmp_v = Q6_Vh_vdeal_Vh(sline1_low); - - /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ - input_shifted_v_hf = Q6_Vqf16_vsub_VhfVhf(tmp_v, input_min_v_hf); - - /* - * Scale shifted input range from [0, input_max - input_min] to [0,16.0) - * in order to get corresponding coefficient indexes - */ - input_scaled_v = Q6_Vqf16_vmpy_Vqf16Vqf16(input_shifted_v_hf, scale_v); - - /* - * VLUT 16 requires integer indexes. Shift scaled input range from [0,16.0) - * to [16.0,32.0) in order to convert float indexes to integer values. - * Float values, represented in IEEE 754, in range [16.0,32.0] have the - * same exponent, which means 4 MSB of mantissa carry information about - * integer index. - * Use the same input_scaled_v vector for hf and qf16 representation - */ - input_scaled_v = Q6_Vqf16_vadd_Vqf16Vhf(input_scaled_v, const16_0_v_hf); - - /* Convert back from qf16 to hf in order to extract integer index */ - tmp_v = Q6_Vhf_equals_Vqf16(input_scaled_v); - - /* Only 4 MSB bits of mantissa represent segment index */ - idx1_v = Q6_Vuh_vlsr_VuhR(tmp_v, 6); - - /* Ensure only 4 MSB bits of mantissa are used as indexes */ - idx1_v = Q6_V_vand_VV(idx1_v, mask_idx1_v); - - idx1_v = Q6_Vb_vshuff_Vb(idx1_v); - idx1_v = Q6_V_vor_VV(idx1_v, mask_idx2_v); - idx2_v = Q6_Vw_vasl_VwR(idx1_v, 16); - - /* Obtain the polynomial coefficients from lookup table */ - c0_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c0_coeff_dv.VV), 1); - c0_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c0_coeff_vp, idx2_v, Q6_V_hi_W(c0_coeff_dv.VV), 1); - c1_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c1_coeff_dv.VV), 1); - c1_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c1_coeff_vp, idx2_v, Q6_V_hi_W(c1_coeff_dv.VV), 1); - c2_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c2_coeff_dv.VV), 1); - c2_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c2_coeff_vp, idx2_v, Q6_V_hi_W(c2_coeff_dv.VV), 1); - c3_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c3_coeff_dv.VV), 1); - c3_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c3_coeff_vp, idx2_v, Q6_V_hi_W(c3_coeff_dv.VV), 1); - c4_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c4_coeff_dv.VV), 1); - c4_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c4_coeff_vp, idx2_v, Q6_V_hi_W(c4_coeff_dv.VV), 1); - - /* Convert input from hf vector to qf32 vector pair for Horner's method*/ - input_vp_qf32 = Q6_Wqf32_vmpy_VhfVhf(sline1_low, one_v_hf); - - /* Perform evaluation of polynomial using Horner's method */ - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(c4_coeff_vp), Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c3_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c2_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c1_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c0_coeff_vp)); - - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(c4_coeff_vp), Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c3_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c2_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c1_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c0_coeff_vp)); - - // x * sigmod - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(input_vp_qf32), output_dv.V.lo); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(input_vp_qf32), output_dv.V.hi); - - sline1_low = Q6_Vhf_equals_Wqf32(output_dv.VV); - } - - - { - // silu sline1_high - tmp_v = Q6_Vh_vdeal_Vh(sline1_high); - - /* Shift input range from [input_min, input_max] to [0, input_max - input_min] */ - input_shifted_v_hf = Q6_Vqf16_vsub_VhfVhf(tmp_v, input_min_v_hf); - - /* - * Scale shifted input range from [0, input_max - input_min] to [0,16.0) - * in order to get corresponding coefficient indexes - */ - input_scaled_v = Q6_Vqf16_vmpy_Vqf16Vqf16(input_shifted_v_hf, scale_v); - - /* - * VLUT 16 requires integer indexes. Shift scaled input range from [0,16.0) - * to [16.0,32.0) in order to convert float indexes to integer values. - * Float values, represented in IEEE 754, in range [16.0,32.0] have the - * same exponent, which means 4 MSB of mantissa carry information about - * integer index. - * Use the same input_scaled_v vector for hf and qf16 representation - */ - input_scaled_v = Q6_Vqf16_vadd_Vqf16Vhf(input_scaled_v, const16_0_v_hf); - - /* Convert back from qf16 to hf in order to extract integer index */ - tmp_v = Q6_Vhf_equals_Vqf16(input_scaled_v); - - /* Only 4 MSB bits of mantissa represent segment index */ - idx1_v = Q6_Vuh_vlsr_VuhR(tmp_v, 6); - - /* Ensure only 4 MSB bits of mantissa are used as indexes */ - idx1_v = Q6_V_vand_VV(idx1_v, mask_idx1_v); - - idx1_v = Q6_Vb_vshuff_Vb(idx1_v); - idx1_v = Q6_V_vor_VV(idx1_v, mask_idx2_v); - idx2_v = Q6_Vw_vasl_VwR(idx1_v, 16); - - /* Obtain the polynomial coefficients from lookup table */ - c0_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c0_coeff_dv.VV), 1); - c0_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c0_coeff_vp, idx2_v, Q6_V_hi_W(c0_coeff_dv.VV), 1); - c1_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c1_coeff_dv.VV), 1); - c1_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c1_coeff_vp, idx2_v, Q6_V_hi_W(c1_coeff_dv.VV), 1); - c2_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c2_coeff_dv.VV), 1); - c2_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c2_coeff_vp, idx2_v, Q6_V_hi_W(c2_coeff_dv.VV), 1); - c3_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c3_coeff_dv.VV), 1); - c3_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c3_coeff_vp, idx2_v, Q6_V_hi_W(c3_coeff_dv.VV), 1); - c4_coeff_vp = Q6_Wh_vlut16_VbVhR(idx1_v, Q6_V_lo_W(c4_coeff_dv.VV), 1); - c4_coeff_vp = Q6_Wh_vlut16or_WhVbVhR(c4_coeff_vp, idx2_v, Q6_V_hi_W(c4_coeff_dv.VV), 1); - - /* Convert input from hf vector to qf32 vector pair for Horner's method*/ - input_vp_qf32 = Q6_Wqf32_vmpy_VhfVhf(sline1_high, one_v_hf); - - /* Perform evaluation of polynomial using Horner's method */ - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(c4_coeff_vp), Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c3_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c2_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c1_coeff_vp)); - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(input_vp_qf32)); - output_dv.V.lo = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.lo, Q6_V_lo_W(c0_coeff_vp)); - - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(c4_coeff_vp), Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c3_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c2_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c1_coeff_vp)); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(input_vp_qf32)); - output_dv.V.hi = Q6_Vqf32_vadd_Vqf32Vqf32(output_dv.V.hi, Q6_V_hi_W(c0_coeff_vp)); - - // x * sigmod - output_dv.V.lo = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(input_vp_qf32), output_dv.V.lo); - output_dv.V.hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(input_vp_qf32), output_dv.V.hi); - - sline1_high = Q6_Vhf_equals_Wqf32(output_dv.VV); - } - - - HVX_Vector sline_high; - HVX_Vector sline_low; - - { - // mul - sline_high = Q6_Vqf16_vmpy_VhfVhf(sline1_high, sline2_high); - sline_low = Q6_Vqf16_vmpy_VhfVhf(sline1_low, sline2_low); - - sline_high = Q6_Vhf_equals_Vqf16(sline_high); - sline_low = Q6_Vhf_equals_Vqf16(sline_low); - } - - - { - // quantize - HVX_Vector sout1 = Q6_Vqf16_vmpy_VhfVhf(sline_low, o_scale_vec); - sout1 = Q6_Vqf16_vadd_Vqf16Vqf16(sout1, es_vec); - sout1 = Q6_Vhf_equals_Vqf16(sout1); - sout1 = Q6_Vhf_vmin_VhfVhf(sout1, high_level_vec); - sout1 = Q6_Vhf_vmax_VhfVhf(sout1, low_level_vec); - - { - HVX_Vector exp = Q6_Vh_vasr_VhR(sout1, FP16_MANTISA); - exp = Q6_V_vand_VV(exp, expmask); - exp = Q6_Vh_vsub_VhVh(exp, expbias); - - HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); - HVX_Vector manzero = Q6_V_vand_VV(sout1, man); - - HVX_Vector sign = Q6_Vh_vasr_VhR(sout1, FP16_SIGN); - HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); - - HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); - HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); - HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); - - HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout1, man); - man = Q6_V_vnot_V(man); - HVX_Vector exppos_signpos = Q6_V_vand_VV(sout1, man); - exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); - HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout1, 1); - HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); - - // exp >= 0 - HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); - tsout1 = Q6_V_vmux_QVV(maneqzero, sout1, tsout1); - - // exp < 0 (-1, 1) - HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout1, negone); - tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); - - tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); - sout1 = Q6_V_vmux_QVV(expgte23, sout1, tsout1); - } - - sout1 = Q6_Vh_equals_Vhf(sout1); - - - HVX_Vector sout2 = Q6_Vqf16_vmpy_VhfVhf(sline_high, o_scale_vec); - sout2 = Q6_Vqf16_vadd_Vqf16Vqf16(sout2, es_vec); - sout2 = Q6_Vhf_equals_Vqf16(sout2); - sout2 = Q6_Vhf_vmin_VhfVhf(sout2, high_level_vec); - sout2 = Q6_Vhf_vmax_VhfVhf(sout2, low_level_vec); - - { - HVX_Vector exp = Q6_Vh_vasr_VhR(sout2, FP16_MANTISA); - exp = Q6_V_vand_VV(exp, expmask); - exp = Q6_Vh_vsub_VhVh(exp, expbias); - - HVX_Vector man = Q6_Vh_vasr_VhVh(manmask, exp); - HVX_Vector manzero = Q6_V_vand_VV(sout2, man); - - HVX_Vector sign = Q6_Vh_vasr_VhR(sout2, FP16_SIGN); - HVX_Vector issignpos = Q6_Q_vcmp_eq_VhVh(sign, zero); - - HVX_Vector expgte23 = Q6_Q_vcmp_gt_VhVh(exp, exp23); - HVX_Vector expgte0 = Q6_Q_vcmp_gt_VhVh(exp, exp0); - HVX_Vector maneqzero = Q6_Q_vcmp_eq_VhVh(manzero, zero); - - HVX_Vector exppos_signneg = Q6_Vh_vadd_VhVh(sout2, man); - man = Q6_V_vnot_V(man); - HVX_Vector exppos_signpos = Q6_V_vand_VV(sout2, man); - exppos_signneg = Q6_V_vand_VV(exppos_signneg, man); - HVX_Vector shift1 = Q6_Vh_vasl_VhR(sout2, 1); - HVX_Vector iszero = Q6_Q_vcmp_eq_VhVh(shift1, zero); - - // exp >= 0 - HVX_Vector tsout1 = Q6_V_vmux_QVV(issignpos, exppos_signpos, exppos_signneg); - tsout1 = Q6_V_vmux_QVV(maneqzero, sout2, tsout1); - - // exp < 0 (-1, 1) - HVX_Vector tsout2 = Q6_V_vmux_QVV(iszero, sout2, negone); - tsout2 = Q6_V_vmux_QVV(issignpos, zero, tsout2); - - tsout1 = Q6_V_vmux_QVV(expgte0, tsout1, tsout2); - sout2 = Q6_V_vmux_QVV(expgte23, sout2, tsout1); - } - - sout2 = Q6_Vh_equals_Vhf(sout2); - - HVX_Vector reql_h = Q6_Vb_vpack_VhVh_sat(sout2, sout1); - *optr++ = Q6_Vb_vadd_VbVb(reql_h, uintconvert); - - } - - } - - // // Handle leftover elements. - // if (leftover_size > 0) { - // sline1c = (is_in_one_chunk(iptr, leftover_size, VLEN) - // ? sline1p - // : *iptr++); - // sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t)input); - - - // sline2c = (is_in_one_chunk(iptr2, leftover_size, VLEN) - // ? sline2p - // : *iptr2++); - // sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t)input2); - - // vstu_variable(optr, leftover_size, Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(sline1, sline2))); - // } - - return 0; -} - - -template -GraphStatus llamasupersiluImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1, - const PlainFloatTensor& a_scale, - const PlainFloatTensor& b_scale, - const PlainFloatTensor& o_scale) - -{ - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - out_0.set_dims(in_0); - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - size_t size = b_in*h_in*w_in*d_in; - - - float a_scale_ = a_scale(0,0,0,0); - float b_scale_ = b_scale(0,0,0,0); - float o_scale_ = o_scale(0,0,0,0); - - auto in_ptr = (uint8_t*)in_0.raw_data_const(); - auto in_ptr2 = (uint8_t*)in_1.raw_data_const(); - - auto out_ptr = (uint8_t*)out_0.raw_data(); - - - DType dtype = in_0.get_dtype(); - - if (dtype == DType::QUInt8 && out_0.get_dtype() == DType::QUInt8) { - hvx_supersilu_ahf(in_ptr, in_ptr2, out_ptr, a_scale_, b_scale_, 1.0f/o_scale_, size); - } - - return GraphStatus::Success; -} - -#else - -template -GraphStatus llamasupersiluImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& in_1, - const PlainFloatTensor& a_scale, - const PlainFloatTensor& b_scale, - const PlainFloatTensor& o_scale) - -{ - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - - out_0.set_dims(in_0); - - float a_scale_ = a_scale(0,0,0,0); - float b_scale_ = b_scale(0,0,0,0); - float o_scale_ = o_scale(0,0,0,0); - - auto in_ptr = (uint8_t*)in_0.raw_data_const(); - auto in_ptr2 = (uint8_t*)in_1.raw_data_const(); - - auto out_ptr = (uint8_t*)out_0.raw_data(); - - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - // mul - for (Idx d = 0; d < d_in; d++) { - - - int32_t a_inval = static_cast(*in_ptr++); - float a_inval_fp16 = (a_inval-128) * a_scale_; - - - int32_t b_inval = static_cast(*in_ptr2++); - float b_inval_fp16 = (b_inval-128) * b_scale_; - - - a_inval_fp16 = a_inval_fp16 * (1 / (1 + expf(-a_inval_fp16))); - - float inval = a_inval_fp16 * b_inval_fp16; - - long v = lroundf(inval / o_scale_); - - if (v > 127) - v = 127; - - if (v < -128) - v = -128; - - v += 128; - - *out_ptr++ = static_cast(v); - - } - } - } - } - - - return GraphStatus::Success; -} - -#endif - -__attribute__((unused)) static float llamasupersiluCostFunc(const Op *op) -{ - /* - * add code here - * */ - - float cost = 0.0; // add cost computation here - return cost; -} - - - - - -/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), - where is as BEGIN_PKG_OP_DEFINITION -*/ -END_PKG_OP_DEFINITION(PKG_LLaMASuperSiLU); \ No newline at end of file diff --git a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RoPE.cpp b/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RoPE.cpp deleted file mode 100755 index 3aaeccf00..000000000 --- a/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/src/ops/RoPE.cpp +++ /dev/null @@ -1,1079 +0,0 @@ -//============================================================================== -// Auto Generated Code for LLaMAPackage -//============================================================================== - -#include "HTP/core/constraints.h" -#include "HTP/core/op_package_feature_support.h" -#include "HTP/core/op_register_ext.h" -#include "HTP/core/optimize.h" -#include "QnnOpPackage.h" -#include "HTP/core/simple_reg.h" - - -BEGIN_PKG_OP_DEFINITION(PKG_RoPE); - - -// op execute function declarations -template -GraphStatus ropeImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& sin, - const TensorType& cos, - const TensorType1 &h_cnt, - const Tensor& pose_type); - - -// forward declaration of sample cost function -static float ropeCostFunc(const Op *op); - -/* - * method 1 for defining op, using default cost value (i.e. GLACIAL) and default flag (Flags::RESOURCE_HVX) - * syntax: DEF_PACKAGE_OP(F,OP) - * e.g. DEF_PACKAGE_OP((ropeImpl), "RoPE") - */ -DEF_PACKAGE_OP((ropeImpl), "RoPE") - -/* - * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) - * and provided flags - * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) - * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, - * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) - * e.g. DEF_PACKAGE_OP_AND_COST_AND_FLAGS((ropeImpl), "RoPE", SNAIL) - */ - -/* - * method 3 for defining op with cost function pointer and provided flags - * cost function pointer type: typedef float (*cost_function) (const Op * op); - * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) - * e.g. DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((ropeImpl), - * "RoPE", ropeCostFunc, Flags::RESOURCE_HVX) - */ - -/* - * optimization definitions - * need to be global in the package - * one definition per optimization - * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) - * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) - * HTP core provides some replacement functions for op package to use - * for more information about optimization rules, please refer to HTP core documentations - */ - -/* - * op parameter order definitions - * need to be global in the package - * one definition per op, and this is optional - * syntax: DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) - * one or more parameters can be specified for each op - * order of parameters listed determines the order of parameters passed into op execution functions - * if an op does not have a parameter order definition, parameter order passed into Qnn_addNode - * will be passed into op execution functions - * if an op has a parameter order definition, any parameter passed into Qnn_addNode with unlisted - * name will be abandoned - * if two or more op packages with the same package name will be registered, they cannot list - * conflicting parameter orders - * PARAM refers to parameter name as a string literal - * MANDATORY refers to whether this parameter is required to be provided at Qnn_addNode - * DEFAULT is used when MANDATORY is false - * if provided as Qnn_Param_t*, - * DEFAULT will be used for graph construction when this parameter is not provided at - * Qnn_addNode - * if provided as nullptr, - * graph construction will skip this parameter when this parameter is not provided at - * Qnn_addNode - */ -DEF_PACKAGE_PARAM_ORDER("RoPE", - "pose_type", - true, - nullptr) - - -/* execute functions for ops */ - -#ifndef REFERENCE_OP - -#include "qhmath_hvx.h" -#include "hvx_internal.h" -#include -#include - -#define BLOCK_SIZE (8*1024/VLEN) /* vector chunks */ -#define L2FETCH_AHEAD (BLOCK_SIZE) -#define ONE 0x3F800000 -#define M_ONE 0xAF800000 - -int32_t hvx_rope_af( - float *restrict input, - float *restrict sin, - float *restrict cos, - float *restrict output, - uint32_t size, - uint32_t partial_dimension) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { - return -1; - } - - HVX_Vector *iptr = (HVX_Vector *)input; - HVX_Vector *iptr_half = (HVX_Vector *)(input + partial_dimension/2); - HVX_Vector *iptr2 = (HVX_Vector *)sin; - HVX_Vector *iptr3 = (HVX_Vector *)cos; - HVX_UVector *optr = (HVX_UVector *)output; - HVX_UVector *optr_half = (HVX_UVector *)(output + partial_dimension/2);; - HVX_Vector sline1; - HVX_Vector sline1_half; - HVX_Vector sinline1p, sinline1c, sinline1; - HVX_Vector cosline1p, cosline1c, cosline1; - - int32_t l2fetch_block; - int32_t leftover = size & 31; - int32_t vectors_in_rounddown = size / 32; - int32_t leftover_size = leftover * sizeof(float); - - sinline1p = *iptr2++; - cosline1p = *iptr3++; - - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { - l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - - if (l2fetch_block > 0) - { - l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - l2fetch(iptr3 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - } - - for (int32_t d = 0; d < partial_dimension/2; d+=32) { - cosline1c = *iptr3++; - cosline1 = Q6_V_valign_VVR(cosline1c, cosline1p, (size_t)cos); - cosline1p = cosline1c; - - sinline1c = *iptr2++; - sinline1 = Q6_V_valign_VVR(sinline1c, sinline1p, (size_t)sin); - sinline1p = sinline1c; - - - HVX_Vector *jiptr = iptr + d/32; - HVX_Vector *jiptr_half = iptr_half + d/32; - HVX_Vector *joptr = optr + d/32; - HVX_Vector *joptr_half = optr_half + d/32; - - for (int32_t j = 0; j < size/partial_dimension; j++) - { - sline1 = *jiptr; - sline1_half = *jiptr_half; - - // auto value = in_value * cos_value - in_value_2 * sin_value; - { - HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1, cosline1); - HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1_half, sinline1); - *joptr = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); - } - - - - // auto value2 = in_value * sin_value + in_value_2 * cos_value; - { - HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1_half, cosline1); - HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_VsfVsf(sline1, sinline1); - *joptr_half = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); - } - - jiptr += partial_dimension/32; - jiptr_half += partial_dimension/32; - joptr += partial_dimension/32; - joptr_half += partial_dimension/32; - - } - - - } - - } - - // if (vectors_in_rounddown > 0) { - - // sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - // sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - // sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); - - // } - - - if (leftover_size > 0) - return -1; - - return 0; -} - -static inline int32_t float_to_fp16s(float input) -{ - union { - int32_t i; - __fp16 f[2]; - } fp32 = {.f = {(__fp16)input, (__fp16)input}}; - return fp32.i; -} - -int32_t hvx_rope_uint8_af( - uint8_t *restrict input, - float *restrict sin, - float *restrict cos, - float *restrict output, - uint32_t size, - uint32_t partial_dimension) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { - return -1; - } - - HVX_Vector *iptr = (HVX_Vector *)input; - HVX_Vector *iptr2 = (HVX_Vector *)sin; - HVX_Vector *iptr3 = (HVX_Vector *)cos; - HVX_UVector *optr = (HVX_UVector *)output; - - int32_t l2fetch_block; - int32_t leftover = size & 127; - int32_t vectors_in_rounddown = size / 128; - int32_t leftover_size = leftover * sizeof(float); - - HVX_Vector zero_v_sf = Q6_V_vzero(); - uint32_t convert = 0x00800080; - HVX_Vector convert_vector = Q6_V_vsplat_R(convert); - HVX_Vector one_vec = Q6_V_vsplat_R(float_to_fp16s(1.0)); - - // - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { - l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - - if (l2fetch_block > 0) - { - l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - } - - // - HVX_Vector sinline1_low = *iptr2; - HVX_Vector cosline1_low = *iptr3; - sinline1_low = Q6_Vqf32_vadd_VsfVsf(sinline1_low, Q6_V_vzero()); - cosline1_low = Q6_Vqf32_vadd_VsfVsf(cosline1_low, Q6_V_vzero()); - - - HVX_Vector sinline1_high = *(iptr2+1); - HVX_Vector cosline1_high = *(iptr3+1); - sinline1_high = Q6_Vqf32_vadd_VsfVsf(sinline1_high, Q6_V_vzero()); - cosline1_high = Q6_Vqf32_vadd_VsfVsf(cosline1_high, Q6_V_vzero()); - - for (int32_t j = 0; j < size/partial_dimension; j++) { - - HVX_Vector sline1 = *iptr++; - - HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); - - temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); - HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); - HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); - - HVX_VectorPair result1 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), one_vec); - result1 = Q6_W_vshuff_VVR(Q6_V_hi_W(result1), Q6_V_lo_W(result1), -4); - - HVX_VectorPair result2 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), one_vec); - result2 = Q6_W_vshuff_VVR(Q6_V_hi_W(result2), Q6_V_lo_W(result2), -4); - - - - // auto value = in_value * cos_value - in_value_2 * sin_value; - { - HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), cosline1_low); - HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), sinline1_low); - *optr = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); - } - - - - // auto value2 = in_value * sin_value + in_value_2 * cos_value; - { - HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), cosline1_low); - HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), sinline1_low); - *(optr+2) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); - } - - - // auto value = in_value * cos_value - in_value_2 * sin_value; - { - HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), cosline1_high); - HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), sinline1_high); - *(optr+1) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); - } - - - - // auto value2 = in_value * sin_value + in_value_2 * cos_value; - { - HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), cosline1_high); - HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), sinline1_high); - *(optr+3) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32)); - } - - optr+=4; - - } - - } - - // if (vectors_in_rounddown > 0) { - - // sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - // sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - // sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); - - // } - - - if (leftover_size > 0) - return -1; - - return 0; -} - -int32_t hvx_rope_uint8_ahf( - uint8_t *restrict input, - float *restrict sin, - float *restrict cos, - __fp16 *restrict output, - uint32_t size, - uint32_t partial_dimension, - float scale) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { - return -1; - } - - HVX_Vector *iptr = (HVX_Vector *)input; - HVX_Vector *iptr2 = (HVX_Vector *)sin; - HVX_Vector *iptr3 = (HVX_Vector *)cos; - HVX_UVector *optr = (HVX_UVector *)output; - - int32_t l2fetch_block; - int32_t leftover = size & 127; - int32_t vectors_in_rounddown = size / 128; - int32_t leftover_size = leftover * sizeof(float); - - HVX_Vector zero_v_sf = Q6_V_vzero(); - uint32_t convert = 0x00800080; - HVX_Vector convert_vector = Q6_V_vsplat_R(convert); - - HVX_Vector scale_vec = Q6_V_vsplat_R(float_to_fp16s(scale)); - - // - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { - l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - - if (l2fetch_block > 0) - { - l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - } - - // - HVX_Vector sinline1_low = *iptr2; - HVX_Vector cosline1_low = *iptr3; - sinline1_low = Q6_Vqf32_vadd_VsfVsf(sinline1_low, Q6_V_vzero()); - cosline1_low = Q6_Vqf32_vadd_VsfVsf(cosline1_low, Q6_V_vzero()); - - - HVX_Vector sinline1_high = *(iptr2+1); - HVX_Vector cosline1_high = *(iptr3+1); - sinline1_high = Q6_Vqf32_vadd_VsfVsf(sinline1_high, Q6_V_vzero()); - cosline1_high = Q6_Vqf32_vadd_VsfVsf(cosline1_high, Q6_V_vzero()); - - for (int32_t j = 0; j < size/partial_dimension; j++) { - - HVX_Vector sline1 = *iptr++; - - HVX_VectorPair temp = Q6_Wh_vadd_VubVub(sline1, zero_v_sf); - - temp = Q6_W_vshuff_VVR(Q6_V_hi_W(temp), Q6_V_lo_W(temp), -2); - HVX_Vector sout1 = Q6_Vh_vsub_VhVh(Q6_V_lo_W(temp), convert_vector); - HVX_Vector sout2 = Q6_Vh_vsub_VhVh(Q6_V_hi_W(temp), convert_vector); - - HVX_VectorPair result1 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout1), scale_vec); - result1 = Q6_W_vshuff_VVR(Q6_V_hi_W(result1), Q6_V_lo_W(result1), -4); - - HVX_VectorPair result2 = Q6_Wqf32_vmpy_VhfVhf(Q6_Vhf_equals_Vh(sout2), scale_vec); - result2 = Q6_W_vshuff_VVR(Q6_V_hi_W(result2), Q6_V_lo_W(result2), -4); - - - - - { - HVX_Vector first; - HVX_Vector second; - // auto value = in_value * cos_value - in_value_2 * sin_value; - { - HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), cosline1_low); - HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), sinline1_low); - first = Q6_Vqf32_vsub_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32); - } - - // auto value = in_value * cos_value - in_value_2 * sin_value; - { - HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), cosline1_high); - HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), sinline1_high); - second = Q6_Vqf32_vsub_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32); - } - - HVX_Vector r = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(second, first)); - r = Q6_Vh_vdeal_Vh(r); - *optr = r; - } - - { - HVX_Vector first; - HVX_Vector second; - // auto value2 = in_value * sin_value + in_value_2 * cos_value; - { - HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result2), cosline1_low); - HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(result1), sinline1_low); - first = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32); - } - - - // auto value2 = in_value * sin_value + in_value_2 * cos_value; - { - HVX_Vector cos_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result2), cosline1_high); - HVX_Vector sin_middle_value_qf32 = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(result1), sinline1_high); - second = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32, sin_middle_value_qf32); - } - HVX_Vector r = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(second, first)); - r = Q6_Vh_vdeal_Vh(r); - *(optr+1) = r; - } - - - optr+=2; - - } - - } - - // if (vectors_in_rounddown > 0) { - - // sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - // sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - // sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); - - // } - - - if (leftover_size > 0) - return -1; - - return 0; -} - - -int32_t hvx_rope_ahf( - __fp16 *restrict input, - float *restrict sin, - float *restrict cos, - __fp16 *restrict output, - uint32_t size, - uint32_t partial_dimension) -{ - if ((input == NULL) || (output == NULL) || (size == 0)) - { - return -1; - } - - HVX_Vector *iptr = (HVX_Vector *)input; - HVX_Vector *iptr_half = (HVX_Vector *)(input + partial_dimension/2); - HVX_Vector *iptr2 = (HVX_Vector *)sin; - HVX_Vector *iptr3 = (HVX_Vector *)cos; - HVX_UVector *optr = (HVX_UVector *)output; - HVX_UVector *optr_half = (HVX_UVector *)(output + partial_dimension/2);; - HVX_Vector sline1; - HVX_Vector sline1_half; - - int32_t l2fetch_block; - int32_t leftover = size & 63; - int32_t vectors_in_rounddown = size / 64; - int32_t leftover_size = leftover * sizeof(float); - - HVX_Vector one_vsf = Q6_V_vsplat_R(ONE); - HVX_Vector m_one_vqf32 = Q6_Vqf32_vsub_VsfVsf(Q6_V_vzero(), one_vsf); - - HVX_Vector one_vhf = Q6_V_vsplat_R(float_to_fp16s(1.0)); - // HVX_Vector m_one_vqf16 = Q6_Vqf32_vsub_VsfVsf(Q6_V_vzero(), one_vhf); - - for (int32_t i = vectors_in_rounddown - 1; i > 0; i -= BLOCK_SIZE) - { - l2fetch_block = Q6_R_min_RR(i - L2FETCH_AHEAD, BLOCK_SIZE); - - if (l2fetch_block > 0) - { - l2fetch(iptr + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - l2fetch(iptr2 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - l2fetch(iptr3 + L2FETCH_AHEAD, VLEN, VLEN, l2fetch_block, 0); - } - - for (int32_t d = 0; d < partial_dimension/2; d+=64) { - - HVX_Vector sinline1_low = *iptr2++; - HVX_Vector cosline1_low = *iptr3++; - - HVX_Vector sinline1_high = *iptr2++; - HVX_Vector cosline1_high = *iptr3++; - - - HVX_Vector *jiptr = iptr + d/64; - HVX_Vector *jiptr_half = iptr_half + d/64; - HVX_Vector *joptr = optr + d/64; - HVX_Vector *joptr_half = optr_half + d/64; - - - for (int32_t j = 0; j < size/partial_dimension; j++) - { - sline1 = *jiptr; - sline1_half = *jiptr_half; - - HVX_VectorPair sline1_half_pair = Q6_Wqf32_vmpy_VhfVhf(sline1_half, one_vhf); - HVX_VectorPair sline1_pair = Q6_Wqf32_vmpy_VhfVhf(sline1, one_vhf); - - sline1_half_pair = Q6_W_vshuff_VVR(Q6_V_hi_W(sline1_half_pair), Q6_V_lo_W(sline1_half_pair), -4); - sline1_pair = Q6_W_vshuff_VVR(Q6_V_hi_W(sline1_pair), Q6_V_lo_W(sline1_pair), -4); - - HVX_Vector m_sline1_half_low = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sline1_half_pair), m_one_vqf32); - HVX_Vector m_sline1_half_hi = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sline1_half_pair), m_one_vqf32); - - - // auto value = in_value * cos_value - in_value_2 * sin_value; - HVX_Vector middle_value_low; - { - HVX_Vector cosline1_vqf32_low = Q6_Vqf32_vadd_VsfVsf(cosline1_low, Q6_V_vzero()); - HVX_Vector cos_middle_value_qf32_low = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sline1_pair), cosline1_vqf32_low); - - HVX_Vector sinline1_vqf32_low = Q6_Vqf32_vadd_VsfVsf(sinline1_low, Q6_V_vzero()); - - HVX_Vector sin_middle_value_qf32_low = Q6_Vqf32_vmpy_Vqf32Vqf32(m_sline1_half_low, sinline1_vqf32_low); - middle_value_low = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32_low, sin_middle_value_qf32_low); - } - - - - // auto value2 = in_value * sin_value + in_value_2 * cos_value; - - HVX_Vector middle_value_half_low; - { - HVX_Vector cosline1_vqf32_low = Q6_Vqf32_vadd_VsfVsf(cosline1_low, Q6_V_vzero()); - HVX_Vector cos_middle_value_qf32_low = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sline1_half_pair), cosline1_vqf32_low); - - HVX_Vector sinline1_vqf32_low = Q6_Vqf32_vadd_VsfVsf(sinline1_low, Q6_V_vzero()); - HVX_Vector sin_middle_value_qf32_low = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_lo_W(sline1_pair), sinline1_vqf32_low); - - middle_value_half_low = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32_low, sin_middle_value_qf32_low); - } - - // second qf16 vector - HVX_Vector middle_value_high; - { - HVX_Vector cosline1_vqf32_high= Q6_Vqf32_vadd_VsfVsf(cosline1_high, Q6_V_vzero()); - HVX_Vector cos_middle_value_qf32_high = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sline1_pair), cosline1_vqf32_high); - - HVX_Vector sinline1_vqf32_high = Q6_Vqf32_vadd_VsfVsf(sinline1_high, Q6_V_vzero()); - - HVX_Vector sin_middle_value_qf32_high = Q6_Vqf32_vmpy_Vqf32Vqf32(m_sline1_half_hi, sinline1_vqf32_high); - middle_value_high = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32_high, sin_middle_value_qf32_high); - } - - - - // auto value2 = in_value * sin_value + in_value_2 * cos_value; - - HVX_Vector middle_value_half_high; - { - HVX_Vector cosline1_vqf32_high = Q6_Vqf32_vadd_VsfVsf(cosline1_high, Q6_V_vzero()); - HVX_Vector cos_middle_value_qf32_high = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sline1_half_pair), cosline1_vqf32_high); - - HVX_Vector sinline1_vqf32_high = Q6_Vqf32_vadd_VsfVsf(sinline1_high, Q6_V_vzero()); - HVX_Vector sin_middle_value_qf32_high = Q6_Vqf32_vmpy_Vqf32Vqf32(Q6_V_hi_W(sline1_pair), sinline1_vqf32_high); - - middle_value_half_high = Q6_Vqf32_vadd_Vqf32Vqf32(cos_middle_value_qf32_high, sin_middle_value_qf32_high); - } - - HVX_Vector sline = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(middle_value_high, middle_value_low)); - sline = Q6_Vh_vdeal_Vh(sline); - - HVX_Vector sline_half = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(middle_value_half_high, middle_value_half_low)); - sline_half = Q6_Vh_vdeal_Vh(sline_half); - - *joptr = sline; - *joptr_half = sline_half; - - jiptr += partial_dimension/64; - jiptr_half += partial_dimension/64; - joptr += partial_dimension/64; - joptr_half += partial_dimension/64; - } - } - } - - // if (vectors_in_rounddown > 0) { - - // sline1c = is_aligned(iptr, VLEN) && leftover == 0 ? sline1p : *iptr++; - // sline1 = Q6_V_valign_VVR(sline1c, sline1p, (size_t) input); - // sum = Q6_Vqf32_vadd_Vqf32Vqf32(sum, Q6_Vqf32_vmpy_VsfVsf(sline1, sline1)); - - // } - - - if (leftover_size > 0) - return -1; - - return 0; -} - - -template -GraphStatus ropeImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& sin, - const TensorType& cos, - const TensorType1 &h_cnt, - const Tensor& pose_type) -{ - - out_0.set_dims(in_0); - - auto pose_type_ = pose_type(0,0,0,0); - auto h_cnt_ = static_cast(h_cnt(0,0,0,0)); - - if (pose_type_ == 4) { - - DType dtype = out_0.get_dtype(); - - if (in_0.get_dtype() == DType::Float32 && dtype == DType::Float32) { - auto in_ptr = (float*)in_0.raw_data_const(); - auto sin_ptr = (float*)sin.raw_data_const(); - auto cos_ptr = (float*)cos.raw_data_const(); - auto out_ptr = (float*)out_0.raw_data(); - - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - uint32_t half_dimension = d_in / 2; - sin_ptr += half_dimension * h_cnt_; - cos_ptr += half_dimension * h_cnt_; - - int partial_dimension = d_in; - - // NSHD - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - - // for (Idx w = 0; w < w_in; w++) { - hvx_rope_af(in_ptr, sin_ptr, cos_ptr, out_ptr, w_in * d_in, partial_dimension); - - in_ptr += w_in * d_in; - out_ptr += w_in * d_in; - // } - - sin_ptr += half_dimension; - cos_ptr += half_dimension; - } - } - } else if (in_0.get_dtype() == DType::Float16 && dtype == DType::Float16) { - - auto in_ptr = (__fp16*)in_0.raw_data_const(); - auto sin_ptr = (float*)sin.raw_data_const(); - auto cos_ptr = (float*)cos.raw_data_const(); - auto out_ptr = (__fp16*)out_0.raw_data(); - - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - uint32_t half_dimension = d_in / 2; - sin_ptr += half_dimension * h_cnt_; - cos_ptr += half_dimension * h_cnt_; - - int partial_dimension = d_in; - - // NSHD - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - - // for (Idx w = 0; w < w_in; w++) { - hvx_rope_ahf(in_ptr, sin_ptr, cos_ptr, out_ptr, w_in * d_in, partial_dimension); - - in_ptr += w_in * d_in; - out_ptr += w_in * d_in; - // } - - sin_ptr += half_dimension; - cos_ptr += half_dimension; - } - } - } else if (in_0.get_dtype() == DType::QUInt8 && dtype == DType::Float32) { - auto in_ptr = (uint8_t*)in_0.raw_data_const(); - auto sin_ptr = (float*)sin.raw_data_const(); - auto cos_ptr = (float*)cos.raw_data_const(); - auto out_ptr = (float*)out_0.raw_data(); - - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - uint32_t half_dimension = d_in / 2; - sin_ptr += half_dimension * h_cnt_; - cos_ptr += half_dimension * h_cnt_; - - int partial_dimension = d_in; - - // NSHD - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - - // for (Idx w = 0; w < w_in; w++) { - hvx_rope_uint8_af(in_ptr, sin_ptr, cos_ptr, out_ptr, w_in * d_in, partial_dimension); - - in_ptr += w_in * d_in; - out_ptr += w_in * d_in; - // } - - sin_ptr += half_dimension; - cos_ptr += half_dimension; - } - } - } else if (in_0.get_dtype() == DType::QUInt8 && dtype == DType::Float16) { - - auto in_ptr = (uint8_t*)in_0.raw_data_const(); - auto sin_ptr = (float*)sin.raw_data_const(); - auto cos_ptr = (float*)cos.raw_data_const(); - auto out_ptr = (__fp16*)out_0.raw_data(); - - float scale_ = in_0.get_interface_scale(); - - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - - uint32_t half_dimension = d_in / 2; - sin_ptr += half_dimension * h_cnt_; - cos_ptr += half_dimension * h_cnt_; - - int partial_dimension = d_in; - - // NSHD - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - - // for (Idx w = 0; w < w_in; w++) { - hvx_rope_uint8_ahf(in_ptr, sin_ptr, cos_ptr, out_ptr, w_in * d_in, partial_dimension, scale_); - - in_ptr += w_in * d_in; - out_ptr += w_in * d_in; - // } - - sin_ptr += half_dimension; - cos_ptr += half_dimension; - } - } - } - - - } else { - - // only support pose_type == 2 (LLaMA) now - return GraphStatus::ErrorFatal; - - } - - - - - - return GraphStatus::Success; - -} - - - -#else - - -template -GraphStatus ropeImpl(TensorType& out_0, - const TensorType& in_0, - const TensorType& sin, - const TensorType& cos, - const TensorType1 &h_cnt, - const Tensor& pose_type) - -{ - /* - * add code here - * */ - /* - * To have good performance and stability, it is required to avoid heap memory - * allocation in this function. The heap memory allocation includes but not - * limited to calling malloc, operator new, constructing STL container objects - * like std::vector with default allocator, and adding items like calling - * std::vector::push_back to STL container objects with default allocator. - * - * Please check in SDK documentation for more information. - */ - - debuglog("RoPE execute... dims=(%zdx%zdx%zdx%zd)", in_0.dim(0), in_0.dim(1), in_0.dim(2), in_0.dim(3)); - debuglog("RoPE execute... dims=(%zdx%zdx%zdx%zd)", sin.dim(0), sin.dim(1), sin.dim(2), sin.dim(3)); - debuglog("RoPE execute... dims=(%zdx%zdx%zdx%zd)", cos.dim(0), cos.dim(1), cos.dim(2), cos.dim(3)); - - // BSHD => NHWC - - // Todo: We need consider to store the sequence position if we have KV Cache - - auto pose_type_ = pose_type(0,0,0,0); - auto h_cnt_ = static_cast(h_cnt(0,0,0,0)); - - out_0.set_dims(in_0); - auto [b_in, h_in, w_in, d_in] = in_0.dims(); - if (pose_type_ == 4) { - DType dtype = out_0.get_dtype(); - - if (dtype == DType::Float32) { - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - - int s = h; // BSHD order - int partial_dimension = d_in; - int half = (int)(partial_dimension / 2); - for (Idx d = 0; d < partial_dimension / 2; ++d) { - float in_value = in_0(b, h, w, d); - float in_value_2 = in_0(b, h, w, d + half); - float sin_value = sin(0, 0, s + h_cnt_, d); - float cos_value = cos(0, 0, s + h_cnt_, d); - auto value = in_value * cos_value - in_value_2 * sin_value; - auto value2 = in_value * sin_value + in_value_2 * cos_value; - out_0(b, h, w, d) = value; - out_0(b, h, w, d + half) = value2; - } - } - } - } - } else if (dtype == DType::Float16) { - - auto in_ptr = (__fp16*)in_0.raw_data_const(); - // auto sin_ptr = (__fp16*)sin.raw_data_const(); - // auto cos_ptr = (__fp16*)cos.raw_data_const(); - auto out_ptr = (__fp16*)out_0.raw_data(); - - for (Idx b = 0; b < b_in; b++) { - for (Idx h = 0; h < h_in; h++) { - for (Idx w = 0; w < w_in; w++) { - - int s = h; // BSHD order - int partial_dimension = d_in; - int half = (int)(partial_dimension / 2); - for (Idx d = 0; d < partial_dimension / 2; ++d) { - __fp16 in_value = *in_ptr; - __fp16 in_value_2 = *(in_ptr + half); - float sin_value = sin(0, 0, s + h_cnt_, d); - float cos_value = cos(0, 0, s + h_cnt_, d); - auto value = in_value * cos_value - in_value_2 * sin_value; - auto value2 = in_value * sin_value + in_value_2 * cos_value; - *out_ptr = static_cast<__fp16>(value); - *(out_ptr + half) = static_cast<__fp16>(value2); - - out_ptr++; - in_ptr++; - } - - out_ptr += half; - in_ptr += half; - } - } - } - } - } - - // for (Idx b = 0; b < b_in; b++) { - // for (Idx h = 0; h < h_in; h++) { - // for (Idx w = 0; w < w_in; w++) { - // // RoPE - // for (Idx d = 0; d < d_in; d++) { - - - // int s = h; // BSHD order - // if (pose_type_ == 1) { - // float in_value = in_0(b, h, w, d); - // float in_value_2; - // if (d < d_in / 2) { // 偶數 0,2,4 - // in_value_2 = -in_0(b, h, w, d + d_in / 2); - // } else { - // in_value_2 = in_0(b, h, w, d - d_in / 2); - // } - // float sin_value = sin(0, 0, s +h_cnt_, d); - // float cos_value = cos(0, 0, s +h_cnt_, d); - // auto value = in_value * cos_value + in_value_2 * sin_value; - // out_0(b, h, w, d) = value; - // } - // else if (pose_type_ == 2) { - // float in_value = in_0(b, h, w, d); - // debuglog("rope execute... in_value=(%f)", in_value); - // float in_value_2; - // if (d % 2 == 0) { // 偶數 0,2,4 - // in_value_2 = -in_0(b, h, w, d + 1); - // } else { - // in_value_2 = in_0(b, h, w, d - 1); - // } - // debuglog("rope execute... in_value_2=(%f)", in_value_2); - // float sin_value = sin(0, 0, s +h_cnt_, d); - // float cos_value = cos(0, 0, s +h_cnt_, d); - // auto value = in_value * cos_value + in_value_2 * sin_value; - - // debuglog("rope execute... sin_value=(%f)", sin_value); - // debuglog("rope execute... cos_value=(%f)", cos_value); - - // debuglog("rope execute... value=(%f)", value); - // out_0(b, h, w, d) = value; - // } else if (pose_type_ == 4) { - // } else { - // float in_value = in_0(b, h, w, d); - // float in_value_2; - // float sin_value = sin(0, 0, s +h_cnt_, d); - // float cos_value = cos(0, 0, s +h_cnt_, d); - // if (d < d_in / 4) { - // in_value_2 = -in_0(b, h, w, d + d_in / 4); - // auto value = in_value * cos_value + in_value_2 * sin_value; - - // out_0(b ,h , w, d) = value; - // } else if(d < d_in / 2){ - // in_value_2 = in_0(b, h, w, d - d_in / 4); - // auto value = in_value * cos_value + in_value_2 * sin_value; - - // out_0(b ,h , w, d) = value; - // }else { - - // out_0(b ,h , w, d) = in_value; - // } - // } - - // } - // } - // } - // } - - -// auto &input = inputs[0]; -// auto &output = outputs[0]; -// for (int n = 0; n < input->batch(); ++n) { -// for (int h = 0; h < input->head(); ++h) { -// for (int s = 0; s < input->sequence(); ++s) {//sequance -// #pragma omp parallel for num_threads(4) -// for (int d = 0; d < input->dimension(); ++d) { -// if (pose_type_== 1) { -// float in_value = input->dataAt(n, h, s, d); -// float in_value_2; -// if (d < input->dimension() / 2) { // 偶數 0,2,4 -// in_value_2 = -input->dataAt(n, h, s, d + input->dimension() / 2); -// } else { -// in_value_2 = input->dataAt(n, h, s, d - input->dimension() / 2); -// } -// float sin_value = sin_.dataAt(0, 0, s +h_cnt_, d); -// float cos_value = cos_.dataAt(0, 0, s +h_cnt_, d); -// auto value = in_value * cos_value + in_value_2 * sin_value; -// if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F32) { -// output->setDataAt(n, h, s, d, value); -// } -// else if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F16) { -// output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); -// } -// } -// else if (pose_type_== 2) { -// float in_value = input->dataAt(n, h, s, d); -// float in_value_2; -// if (d % 2 == 0) { // 偶數 0,2,4 -// in_value_2 = -input->dataAt(n, h, s, d + 1); -// } else { -// in_value_2 = input->dataAt(n, h, s, d - 1); -// } -// float sin_value = sin_.dataAt(0, 0, s +h_cnt_, d); -// float cos_value = cos_.dataAt(0, 0, s +h_cnt_, d); -// auto value = in_value * cos_value + in_value_2 * sin_value; -// if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F32) { -// output->setDataAt(n, h, s, d, value); -// } -// else if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F16) { -// output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); -// } -// }else{ -// float in_value = input->dataAt(n, h, s, d); -// float in_value_2; -// float sin_value = sin_.dataAt(0, 0, s +h_cnt_, d); -// float cos_value = cos_.dataAt(0, 0, s +h_cnt_, d); -// if (d < input->dimension() / 4) { -// in_value_2 = - input->dataAt(n, h, s, d + input->dimension() / 4); -// auto value = in_value * cos_value + in_value_2 * sin_value; -// if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F32) { -// output->setDataAt(n, h, s, d, value); -// } -// else if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F16) { -// output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); -// } -// } else if(d < input->dimension() / 2){ -// in_value_2 = input->dataAt(n, h, s, d - input->dimension() / 4); -// auto value = in_value * cos_value + in_value_2 * sin_value; -// if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F32) { -// output->setDataAt(n, h, s, d, value); -// } -// else if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F16) { -// output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(value)); -// } -// }else { -// if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F32) { -// output->setDataAt(n, h, s, d, in_value); -// } -// else if(output->dtypeAt(n,h,s, d) == MLLM_TYPE_F16) { -// output->setDataAt(n, h, s, d, MLLM_FP32_TO_FP16(in_value)); -// } -// } -// } -// } -// } -// } -// } - - -// Todo store history position -// h_cnt_ += input->sequence(); -// if(h_cnt_ >pos_max_){ -// h_cnt_ = 0; -// } - - - return GraphStatus::Success; -} - -#endif - - -__attribute__((unused)) static float ropeCostFunc(const Op *op) -{ - /* - * add code here - * */ - - float cost = 0.0; // add cost computation here - return cost; -} - - - - - -/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), - where is as BEGIN_PKG_OP_DEFINITION -*/ -END_PKG_OP_DEFINITION(PKG_RoPE); \ No newline at end of file diff --git a/src/backends/qnn/Log/LogUtils.cpp b/src/backends/qnn/Log/LogUtils.cpp deleted file mode 100644 index 1709d25d1..000000000 --- a/src/backends/qnn/Log/LogUtils.cpp +++ /dev/null @@ -1,45 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2020, 2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include "LogUtils.hpp" - -void qnn::log::utils::logStdoutCallback(const char* fmt, - QnnLog_Level_t level, - uint64_t timestamp, - va_list argp) { - const char* levelStr = ""; - switch (level) { - case QNN_LOG_LEVEL_ERROR: - levelStr = " ERROR "; - break; - case QNN_LOG_LEVEL_WARN: - levelStr = "WARNING"; - break; - case QNN_LOG_LEVEL_INFO: - levelStr = " INFO "; - break; - case QNN_LOG_LEVEL_DEBUG: - levelStr = " DEBUG "; - break; - case QNN_LOG_LEVEL_VERBOSE: - levelStr = "VERBOSE"; - break; - case QNN_LOG_LEVEL_MAX: - levelStr = "UNKNOWN"; - break; - } - - double ms = (double)timestamp / 1000000.0; - // To avoid interleaved messages - { - std::lock_guard lock(sg_logUtilMutex); - fprintf(stdout, "%8.1fms [%-7s] ", ms, levelStr); - vfprintf(stdout, fmt, argp); - fprintf(stdout, "\n"); - } -} diff --git a/src/backends/qnn/Log/LogUtils.hpp b/src/backends/qnn/Log/LogUtils.hpp deleted file mode 100644 index 6ffe08c02..000000000 --- a/src/backends/qnn/Log/LogUtils.hpp +++ /dev/null @@ -1,27 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2020, 2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#pragma once - -#include -#include -#include -#include - -#include "QnnLog.h" - -namespace qnn { -namespace log { -namespace utils { - -void logStdoutCallback(const char* fmt, QnnLog_Level_t level, uint64_t timestamp, va_list argp); -static std::mutex sg_logUtilMutex; - -} // namespace utils -} // namespace log -} // namespace qnn diff --git a/src/backends/qnn/Log/Logger.cpp b/src/backends/qnn/Log/Logger.cpp deleted file mode 100644 index cabb8223c..000000000 --- a/src/backends/qnn/Log/Logger.cpp +++ /dev/null @@ -1,105 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -#include -#include -#include -#include - -#include "LogUtils.hpp" -#include "Logger.hpp" - -using namespace qnn::log; - -std::shared_ptr Logger::s_logger = nullptr; - -std::mutex Logger::s_logMutex; - -std::shared_ptr Logger::createLogger(QnnLog_Callback_t callback, - QnnLog_Level_t maxLevel, - QnnLog_Error_t* status) { - std::lock_guard lock(s_logMutex); - if ((maxLevel > QNN_LOG_LEVEL_VERBOSE) || (maxLevel == 0)) { - if (status) { - *status = QNN_LOG_ERROR_INVALID_ARGUMENT; - } - return nullptr; - } - if (!s_logger) { - s_logger = std::shared_ptr(new (std::nothrow) Logger(callback, maxLevel, status)); - } - *status = QNN_LOG_NO_ERROR; - return s_logger; -} - -Logger::Logger(QnnLog_Callback_t callback, QnnLog_Level_t maxLevel, QnnLog_Error_t* status) - : m_callback(callback), m_maxLevel(maxLevel), m_epoch(getTimestamp()) { - if (!callback) { - m_callback = utils::logStdoutCallback; - } -} - -void Logger::log(QnnLog_Level_t level, const char* file, long line, const char* fmt, ...) { - if (m_callback) { - if (level > m_maxLevel.load(std::memory_order_seq_cst)) { - return; - } - va_list argp; - va_start(argp, fmt); - std::string logString(fmt); - std::ignore = file; - std::ignore = line; - (*m_callback)(logString.c_str(), level, getTimestamp() - m_epoch, argp); - va_end(argp); - } -} - -uint64_t Logger::getTimestamp() const { - return std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count(); -} - -std::shared_ptr<::qnn::log::Logger> g_logger{nullptr}; - -bool qnn::log::initializeLogging() { - QnnLog_Level_t logLevel; - QnnLog_Error_t status; -#ifdef QNN_ENABLE_DEBUG - logLevel = QNN_LOG_LEVEL_DEBUG; -#else - logLevel = QNN_LOG_LEVEL_INFO; -#endif - // Default log stream is enabled in Core/Logger component - g_logger = ::qnn::log::Logger::createLogger(nullptr, logLevel, &status); - if (QNN_LOG_NO_ERROR != status || !g_logger) { - return false; - } - return true; -} - -QnnLog_Callback_t qnn::log::getLogCallback() { return g_logger->getLogCallback(); } - -QnnLog_Level_t qnn::log::getLogLevel() { return g_logger->getMaxLevel(); } - -bool qnn::log::isLogInitialized() { - if (g_logger == nullptr) { - return false; - } - return true; -} - -bool qnn::log::setLogLevel(QnnLog_Level_t maxLevel) { - if (!::qnn::log::Logger::isValid() || - !(maxLevel >= QNN_LOG_LEVEL_ERROR && maxLevel <= QNN_LOG_LEVEL_DEBUG)) { - return false; - } - - g_logger->setMaxLevel(maxLevel); - return true; -} diff --git a/src/backends/qnn/Log/Logger.hpp b/src/backends/qnn/Log/Logger.hpp deleted file mode 100644 index f00c6be67..000000000 --- a/src/backends/qnn/Log/Logger.hpp +++ /dev/null @@ -1,107 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#pragma once - -#include -#include -#include -#include -#include - -#include "QnnLog.h" - -#define __FILENAME__ (strrchr(__FILE__, '/') + 1) - -/** - * @brief Log something with the current logger. Always valid to call, though - * it won't do something if no logger has been set. - */ - -#define QNN_LOG_LEVEL(level, fmt, ...) \ - do { \ - auto logger = ::qnn::log::Logger::getLogger(); \ - if (logger) { \ - logger->log(level, __FILENAME__, __LINE__, fmt, ##__VA_ARGS__); \ - } \ - } while (0) - -#define QNN_ERROR(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_ERROR, fmt, ##__VA_ARGS__) - -#define QNN_ERROR_EXIT(fmt, ...) \ - { \ - QNN_ERROR(fmt, ##__VA_ARGS__); \ - exit(EXIT_FAILURE); \ - } - -#define QNN_WARN(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_WARN, fmt, ##__VA_ARGS__) - -#define QNN_INFO(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_INFO, fmt, ##__VA_ARGS__) - -#define QNN_DEBUG(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_DEBUG, fmt, ##__VA_ARGS__) - -#define QNN_VERBOSE(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_VERBOSE, fmt, ##__VA_ARGS__) - -#define QNN_FUNCTION_ENTRY_LOG QNN_LOG_LEVEL(QNN_LOG_LEVEL_VERBOSE, "Entering %s", __func__) - -#define QNN_FUNCTION_EXIT_LOG QNN_LOG_LEVEL(QNN_LOG_LEVEL_VERBOSE, "Returning from %s", __func__) - -namespace qnn { -namespace log { - -bool initializeLogging(); - -QnnLog_Callback_t getLogCallback(); - -QnnLog_Level_t getLogLevel(); - -bool isLogInitialized(); - -bool setLogLevel(QnnLog_Level_t maxLevel); - -class Logger final { - public: - Logger(const Logger&) = delete; - Logger& operator=(const Logger&) = delete; - Logger(Logger&&) = delete; - Logger& operator=(Logger&&) = delete; - - void setMaxLevel(QnnLog_Level_t maxLevel) { - m_maxLevel.store(maxLevel, std::memory_order_seq_cst); - } - - QnnLog_Level_t getMaxLevel() { return m_maxLevel.load(std::memory_order_seq_cst); } - - QnnLog_Callback_t getLogCallback() { return m_callback; } - - void log(QnnLog_Level_t level, const char* file, long line, const char* fmt, ...); - - static std::shared_ptr createLogger(QnnLog_Callback_t callback, - QnnLog_Level_t maxLevel, - QnnLog_Error_t* status); - - static bool isValid() { return (s_logger != nullptr); } - - static std::shared_ptr getLogger() { return s_logger; } - - static void reset() { s_logger = nullptr; } - - private: - Logger(QnnLog_Callback_t callback, QnnLog_Level_t maxLevel, QnnLog_Error_t* status); - - uint64_t getTimestamp() const; - - QnnLog_Callback_t m_callback; - std::atomic m_maxLevel; - uint64_t m_epoch; - static std::shared_ptr s_logger; - static std::mutex s_logMutex; -}; - -} // namespace log -} // namespace qnn diff --git a/src/backends/qnn/Model/QnnModel.cpp b/src/backends/qnn/Model/QnnModel.cpp deleted file mode 100644 index 32adae185..000000000 --- a/src/backends/qnn/Model/QnnModel.cpp +++ /dev/null @@ -1,658 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include -#include -#include -#include - -#include "QnnModel.hpp" -#include "QnnModelPal.hpp" -#include "QnnTypeMacros.hpp" - -#define FREE_MEMORY(ptr1, ptr2, ptr3) \ - do { \ - free(ptr1); \ - free(ptr2); \ - free(ptr3); \ - } while (0) - -namespace qnn_wrapper_api { - -ModelError_t QnnModel::initialize(const Qnn_BackendHandle_t &backendHandle, - const QNN_INTERFACE_VER_TYPE &qnnInterface, - const Qnn_ContextHandle_t &context, - const char *graphName, - bool debug, - uint8_t doNodeValidations, - const QnnGraph_Config_t **graphConfigs) { - if (backendHandle == nullptr) { - PRINT_ERROR("QnnModel::initialize() nullptr passed as backend handle."); - return MODEL_CONTEXT_ERROR; - } - if (context == nullptr) { - PRINT_ERROR("QnnModel::initialize() nullptr passed as context handle."); - return MODEL_CONTEXT_ERROR; - } - if (graphName == nullptr) { - PRINT_ERROR("QnnModel::initialize() nullptr passed as graphName."); - return MODEL_GRAPH_ERROR; - } - - if (!m_graphName.empty()) { - // only one graph is allowed per QnnModel - PRINT_ERROR("QnnModel::initialize() model for graph %s already initialized.", graphName); - return MODEL_GRAPH_ERROR; - } - - if (!m_doNodeValidations) { - PRINT_WARNING( - "Node validation disabled. Backend will not perform op " - "validation prior to adding Node. \n"); - } - - m_qnnInterface = qnnInterface; - m_backendHandle = backendHandle; - m_graphName = graphName; - m_debug = debug; - m_doNodeValidations = doNodeValidations; - - if (m_qnnInterface.graphCreate(context, graphName, graphConfigs, &m_graph) != - QNN_GRAPH_NO_ERROR || - m_graph == nullptr) { - PRINT_ERROR("QnnModel::initialize() not able to create graph in given context."); - return MODEL_GRAPH_ERROR; - } - - return MODEL_NO_ERROR; -} - -ModelError_t QnnModel::addTensor(const char *nodeName, Qnn_Tensor_t *tensor, bool saveTensor) { - ModelError_t err; - if (!tensor) { - PRINT_ERROR("QnnModel::addTensor() NULL tensor pointer provided.\n"); - return MODEL_TENSOR_ERROR; - } - VALIDATE_TENSOR_VERSION((*tensor), err); - - // Verify tensor being added is not a duplicate - std::string mapEntry = std::string(QNN_TENSOR_GET_NAME(tensor)); - if (m_modelTensorsMap.find(mapEntry) != m_modelTensorsMap.end()) { - PRINT_ERROR("QnnModel::addTensor() creating tensor %s for node %s. Tensor already exists.\n", - mapEntry.c_str(), - nodeName); - - return MODEL_TENSOR_ERROR; - } - - const std::map dataTypeToSize = { - {QNN_DATATYPE_INT_8, 1}, - {QNN_DATATYPE_INT_16, 2}, - {QNN_DATATYPE_INT_32, 4}, - {QNN_DATATYPE_INT_64, 8}, - {QNN_DATATYPE_UINT_8, 1}, - {QNN_DATATYPE_UINT_16, 2}, - {QNN_DATATYPE_UINT_32, 4}, - {QNN_DATATYPE_UINT_64, 8}, - {QNN_DATATYPE_FLOAT_16, 2}, - {QNN_DATATYPE_FLOAT_32, 4}, - {QNN_DATATYPE_BOOL_8, 1}, - {QNN_DATATYPE_SFIXED_POINT_8, 1}, - {QNN_DATATYPE_SFIXED_POINT_16, 2}, - {QNN_DATATYPE_SFIXED_POINT_32, 4}, - {QNN_DATATYPE_UFIXED_POINT_8, 1}, - {QNN_DATATYPE_UFIXED_POINT_16, 2}, - {QNN_DATATYPE_UFIXED_POINT_32, 4}, - }; - - if (dataTypeToSize.find(QNN_TENSOR_GET_DATA_TYPE(tensor)) == dataTypeToSize.end()) { - PRINT_ERROR( - "QnnModel::addTensor() invalid QNN data type provided, %u, for tensor %s on node %s\n", - QNN_TENSOR_GET_DATA_TYPE(tensor), - QNN_TENSOR_GET_NAME(tensor), - nodeName); - return MODEL_TENSOR_ERROR; - } - - // sanity check tensor data if addTensor used for static tensor - if (QNN_TENSOR_GET_TYPE(tensor) == QNN_TENSOR_TYPE_STATIC) { - if (QNN_TENSOR_GET_MEM_TYPE(tensor) != QNN_TENSORMEMTYPE_RAW) { - PRINT_ERROR( - "QnnModel::addTensor(): Expected raw memType in provided static tensor %s for node %s", - mapEntry.c_str(), - nodeName); - return MODEL_TENSOR_ERROR; - } - // verify size expressed by the dims matches the raw tensor size - uint32_t qnnTensorSize = - std::accumulate(QNN_TENSOR_GET_DIMENSIONS(tensor), - QNN_TENSOR_GET_DIMENSIONS(tensor) + QNN_TENSOR_GET_RANK(tensor), - (uint32_t)dataTypeToSize.find(QNN_TENSOR_GET_DATA_TYPE(tensor))->second, - std::multiplies()); - if (qnnTensorSize != QNN_TENSOR_GET_CLIENT_BUF(tensor).dataSize) { - PRINT_ERROR( - "QnnModel::addTensor(): Adding STATIC tensor, length mismatch between clientBuf" - "size and tensor Dims(dim * rank * sizeof(datatype) for, nodeName: %s, tensorName: %s." - "Got tensorSize: %d, tensor.clientBuf.dataSize: %d.\n", - nodeName, - QNN_TENSOR_GET_NAME(tensor), - qnnTensorSize, - QNN_TENSOR_GET_CLIENT_BUF(tensor).dataSize); - return MODEL_TENSOR_ERROR; - } - } - - if (m_debug && QNN_TENSOR_GET_TYPE(tensor) == QNN_TENSOR_TYPE_NATIVE) { - // for debug, make all tensors accessible by client - QNN_TENSOR_SET_TYPE(tensor, QNN_TENSOR_TYPE_APP_READ); - } - - if (m_qnnInterface.tensorCreateGraphTensor(m_graph, tensor) != QNN_TENSOR_NO_ERROR) { - PRINT_ERROR("QnnModel::addTensor() Creating tensor for node: %s, tensorName: %s.\n", - nodeName, - QNN_TENSOR_GET_NAME(tensor)); - return MODEL_TENSOR_ERROR; - } - - if (saveTensor) { - Qnn_Tensor_t tensorCopy; - VALIDATE(deepCopyQnnTensors(*tensor, tensorCopy), err); - - // save network input/outputs tensors to use for setting the Qnn graph's input and output - // tensors for populating GraphInfo_t for caller - if (QNN_TENSOR_GET_TYPE(tensor) == QNN_TENSOR_TYPE_APP_WRITE) { - m_modelInputTensors.push_back(tensorCopy); - } else if (QNN_TENSOR_GET_TYPE(tensor) == QNN_TENSOR_TYPE_APP_READ) { - m_modelOutputTensors.push_back(tensorCopy); - } - - // save created tensors for later lookup to populate graph node construction - m_modelTensorsMap[mapEntry] = tensorCopy; - } - - return MODEL_NO_ERROR; -} - -ModelError_t QnnModel::addTensor(const char *nodeName, Qnn_Tensor_t tensor, bool saveTensor) { - return addTensor(nodeName, &tensor, saveTensor); -} - -ModelError_t QnnModel::getQnnTensor(const char *&nodeName, - const char *&tensorName, - Qnn_Tensor_t &tensor) { - std::string mapEntry = std::string(tensorName); - if (m_modelTensorsMap.find(tensorName) == m_modelTensorsMap.end()) { - PRINT_ERROR( - "QnnModel::getQnnTensor() tensor %s not found on node %s\n", mapEntry.c_str(), nodeName); - return MODEL_TENSOR_ERROR; - } - tensor = m_modelTensorsMap[mapEntry]; - - return MODEL_NO_ERROR; -} - -// overload for string tensorName -ModelError_t QnnModel::getQnnTensor(std::string nodeName, - std::string tensorName, - Qnn_Tensor_t &tensor) { - if (m_modelTensorsMap.find(tensorName) == m_modelTensorsMap.end()) { - PRINT_ERROR( - "QnnModel::getQnnTensor() tensor %s not found on node %s\n", tensorName.c_str(), nodeName.c_str()); - return MODEL_TENSOR_ERROR; - } - tensor = m_modelTensorsMap[tensorName]; - - return MODEL_NO_ERROR; -} - - -ModelError_t QnnModel::addNode(Qnn_OpConfigVersion_t version, - const char *name, - const char *packageName, - const char *type, - Qnn_Param_t *params, - uint32_t numOfParams, - const char **inputNames, - uint32_t numOfInputs, - Qnn_Tensor_t *outputTensors, - uint32_t numOfOutputs) { - ModelError_t nodeError; - Qnn_OpConfig_t opDefinition = QNN_OPCONFIG_INIT; - opDefinition.version = version; - VALIDATE_OP_CONFIG_VERSION((opDefinition), nodeError); - - // populate Qnn param for node - Qnn_Param_t *nodeParams = (Qnn_Param_t *)malloc(numOfParams * sizeof(Qnn_Param_t)); - - // populate input tensors for node - Qnn_Tensor_t *inputs = (Qnn_Tensor_t *)malloc(numOfInputs * sizeof(Qnn_Tensor_t)); - - // populate output tensors of node - Qnn_Tensor_t *outputs = (Qnn_Tensor_t *)malloc(numOfOutputs * sizeof(Qnn_Tensor_t)); - - if (nodeParams == nullptr || inputs == nullptr || outputs == nullptr) { - PRINT_ERROR( - "QnnModel::addNode() failed for allocate memory for creating QNN OpConfig for node %s.\n", - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return MODEL_MEMORY_ALLOCATE_ERROR; - } - uint32_t nodeParamsCounter = 0; - for (size_t i = 0; i < numOfParams; i++) { - switch (params[i].paramType) { - case QNN_PARAMTYPE_TENSOR: { - Qnn_Tensor_t &tensor = params[i].tensorParam; - // Note: set saveTensor to false as no need to save tensor beyond this - // function call for params - nodeError = addTensor(name, &tensor, false); - if (nodeError != MODEL_NO_ERROR) { - PRINT_ERROR("QnnModel::addNode() addTensor() failed for tensor param %s on node %s.\n", - QNN_TENSOR_GET_NAME(tensor), - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return nodeError; - } - nodeParams[nodeParamsCounter].paramType = QNN_PARAMTYPE_TENSOR; - nodeParams[nodeParamsCounter].name = params[i].name; - nodeParams[nodeParamsCounter++].tensorParam = tensor; - break; - } - case QNN_PARAMTYPE_SCALAR: { - nodeParams[nodeParamsCounter].paramType = QNN_PARAMTYPE_SCALAR; - nodeParams[nodeParamsCounter].name = params[i].name; - nodeParams[nodeParamsCounter++].scalarParam = params[i].scalarParam; - break; - } - default: { - PRINT_ERROR("QnnModel::addNode() unknown param type passed for param %s on node %s.\n", - params[i].name, - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return MODEL_PARAMS_ERROR; - } - } - } - - size_t inputsCounter = 0; - for (size_t j = 0; j < numOfInputs; j++) { - nodeError = getQnnTensor(name, inputNames[j], inputs[inputsCounter++]); - if (nodeError != MODEL_NO_ERROR) { - PRINT_ERROR("QnnModel::addNode() getQnnTensor() failed for tensor %s on node %s.\n", - inputNames[j], - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return nodeError; - } - } - - size_t outputsCounter = 0; - m_modelOutputTensorMap[name] = {}; - for (size_t k = 0; k < numOfOutputs; k++) { - // create node output tensors first - nodeError = addTensor(name, outputTensors[k]); - if (nodeError != MODEL_NO_ERROR) { - PRINT_ERROR("QnnModel::addNode() addTensor() failed for tensor %s on node %s\n", - QNN_TENSOR_GET_NAME(outputTensors[k]), - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return nodeError; - } - const char *outTensorName = QNN_TENSOR_GET_NAME(outputTensors[k]); - m_modelOutputTensorMap[name].push_back(outTensorName); - nodeError = getQnnTensor(name, outTensorName, outputs[outputsCounter++]); - if (nodeError != MODEL_NO_ERROR) { - PRINT_ERROR("QnnModel::addNode() getQnnTensor() failed for tensor %s on node %s.\n", - outTensorName, - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return nodeError; - } - } - - // define and add node to graph - QNN_OP_CFG_SET_NAME(opDefinition, name); - QNN_OP_CFG_SET_PACKAGE_NAME(opDefinition, packageName); - QNN_OP_CFG_SET_TYPE_NAME(opDefinition, type); - QNN_OP_CFG_SET_PARAMS(opDefinition, numOfParams, nodeParams); - QNN_OP_CFG_SET_INPUTS(opDefinition, numOfInputs, inputs); - QNN_OP_CFG_SET_OUTPUTS(opDefinition, numOfOutputs, outputs); - - if (m_doNodeValidations) { - auto validationStatus = m_qnnInterface.backendValidateOpConfig(m_backendHandle, opDefinition); - if (validationStatus == QNN_BACKEND_ERROR_NOT_SUPPORTED) { - PRINT_DEBUG("QnnModel::addNode() validation API not supported.\n"); - } else if (validationStatus != QNN_SUCCESS) { - PRINT_ERROR("QnnModel::addNode() validating node %s failed.\n", name); - FREE_MEMORY(nodeParams, inputs, outputs); - return MODEL_GRAPH_ERROR; - } - } - - if (m_qnnInterface.graphAddNode(m_graph, opDefinition) != QNN_GRAPH_NO_ERROR) { - PRINT_ERROR("QnnModel::addNode() adding node %s failed.\n", name); - FREE_MEMORY(nodeParams, inputs, outputs); - return MODEL_GRAPH_ERROR; - } - - FREE_MEMORY(nodeParams, inputs, outputs); - return MODEL_NO_ERROR; -} - -// overload for string tensorName -ModelError_t QnnModel::addNode(Qnn_OpConfigVersion_t version, - const char *name, - const char *packageName, - const char *type, - Qnn_Param_t *params, - uint32_t numOfParams, - std::vector inputNames, - uint32_t numOfInputs, - Qnn_Tensor_t *outputTensors, - uint32_t numOfOutputs) { - ModelError_t nodeError; - Qnn_OpConfig_t opDefinition = QNN_OPCONFIG_INIT; - opDefinition.version = version; - VALIDATE_OP_CONFIG_VERSION((opDefinition), nodeError); - - // populate Qnn param for node - Qnn_Param_t *nodeParams = (Qnn_Param_t *)malloc(numOfParams * sizeof(Qnn_Param_t)); - - // populate input tensors for node - Qnn_Tensor_t *inputs = (Qnn_Tensor_t *)malloc(numOfInputs * sizeof(Qnn_Tensor_t)); - - // populate output tensors of node - Qnn_Tensor_t *outputs = (Qnn_Tensor_t *)malloc(numOfOutputs * sizeof(Qnn_Tensor_t)); - - if (nodeParams == nullptr || inputs == nullptr || outputs == nullptr) { - PRINT_ERROR( - "QnnModel::addNode() failed for allocate memory for creating QNN OpConfig for node %s.\n", - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return MODEL_MEMORY_ALLOCATE_ERROR; - } - uint32_t nodeParamsCounter = 0; - for (size_t i = 0; i < numOfParams; i++) { - switch (params[i].paramType) { - case QNN_PARAMTYPE_TENSOR: { - Qnn_Tensor_t &tensor = params[i].tensorParam; - // Note: set saveTensor to false as no need to save tensor beyond this - // function call for params - nodeError = addTensor(name, &tensor, false); - if (nodeError != MODEL_NO_ERROR) { - PRINT_ERROR("QnnModel::addNode() addTensor() failed for tensor param %s on node %s.\n", - QNN_TENSOR_GET_NAME(tensor), - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return nodeError; - } - nodeParams[nodeParamsCounter].paramType = QNN_PARAMTYPE_TENSOR; - nodeParams[nodeParamsCounter].name = params[i].name; - nodeParams[nodeParamsCounter++].tensorParam = tensor; - break; - } - case QNN_PARAMTYPE_SCALAR: { - nodeParams[nodeParamsCounter].paramType = QNN_PARAMTYPE_SCALAR; - nodeParams[nodeParamsCounter].name = params[i].name; - nodeParams[nodeParamsCounter++].scalarParam = params[i].scalarParam; - break; - } - default: { - PRINT_ERROR("QnnModel::addNode() unknown param type passed for param %s on node %s.\n", - params[i].name, - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return MODEL_PARAMS_ERROR; - } - } - } - - size_t inputsCounter = 0; - for (size_t j = 0; j < numOfInputs; j++) { - nodeError = getQnnTensor(name, inputNames[j], inputs[inputsCounter++]); - if (nodeError != MODEL_NO_ERROR) { - PRINT_ERROR("QnnModel::addNode() getQnnTensor() failed for tensor %s on node %s.\n", - inputNames[j].c_str(), - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return nodeError; - } - } - - size_t outputsCounter = 0; - m_modelOutputTensorMap[name] = {}; - for (size_t k = 0; k < numOfOutputs; k++) { - // create node output tensors first - nodeError = addTensor(name, outputTensors[k]); - if (nodeError != MODEL_NO_ERROR) { - PRINT_ERROR("QnnModel::addNode() addTensor() failed for tensor %s on node %s\n", - QNN_TENSOR_GET_NAME(outputTensors[k]), - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return nodeError; - } - const char *outTensorName = QNN_TENSOR_GET_NAME(outputTensors[k]); - m_modelOutputTensorMap[name].push_back(outTensorName); - nodeError = getQnnTensor(name, outTensorName, outputs[outputsCounter++]); - if (nodeError != MODEL_NO_ERROR) { - PRINT_ERROR("QnnModel::addNode() getQnnTensor() failed for tensor %s on node %s.\n", - outTensorName, - name); - FREE_MEMORY(nodeParams, inputs, outputs); - return nodeError; - } - } - - // define and add node to graph - QNN_OP_CFG_SET_NAME(opDefinition, name); - QNN_OP_CFG_SET_PACKAGE_NAME(opDefinition, packageName); - QNN_OP_CFG_SET_TYPE_NAME(opDefinition, type); - QNN_OP_CFG_SET_PARAMS(opDefinition, numOfParams, nodeParams); - QNN_OP_CFG_SET_INPUTS(opDefinition, numOfInputs, inputs); - QNN_OP_CFG_SET_OUTPUTS(opDefinition, numOfOutputs, outputs); - - if (m_doNodeValidations) { - auto validationStatus = m_qnnInterface.backendValidateOpConfig(m_backendHandle, opDefinition); - if (validationStatus == QNN_BACKEND_ERROR_NOT_SUPPORTED) { - PRINT_DEBUG("QnnModel::addNode() validation API not supported.\n"); - } else if (validationStatus != QNN_SUCCESS) { - PRINT_ERROR("QnnModel::addNode() validating node %s failed.\n", name); - FREE_MEMORY(nodeParams, inputs, outputs); - return MODEL_GRAPH_ERROR; - } - } - - if (m_qnnInterface.graphAddNode(m_graph, opDefinition) != QNN_GRAPH_NO_ERROR) { - PRINT_ERROR("QnnModel::addNode() adding node %s failed.\n", name); - FREE_MEMORY(nodeParams, inputs, outputs); - return MODEL_GRAPH_ERROR; - } - - FREE_MEMORY(nodeParams, inputs, outputs); - return MODEL_NO_ERROR; -} - -ModelError_t QnnModel::freeCachedTensors() { - ModelError_t err = MODEL_NO_ERROR; - - // cleanup cached tensors - for (std::map::iterator tensorIt = m_modelTensorsMap.begin(); - tensorIt != m_modelTensorsMap.end();) { - Qnn_Tensor_t &tensor = tensorIt->second; - if (QNN_TENSOR_GET_TYPE(tensor) != QNN_TENSOR_TYPE_APP_WRITE && - QNN_TENSOR_GET_TYPE(tensor) != QNN_TENSOR_TYPE_APP_READ) { - VALIDATE(freeQnnTensor(tensor), err); - tensorIt = m_modelTensorsMap.erase(tensorIt); - } else { - tensorIt++; - } - } - - return err; -} - -ModelError_t QnnModel::finalize(Qnn_ProfileHandle_t profile, Qnn_SignalHandle_t signal) { - ModelError_t err; - - // finalize the graph - if (m_qnnInterface.graphFinalize(m_graph, profile, signal) != QNN_GRAPH_NO_ERROR) { - PRINT_ERROR("QnnModel::finalize() finalizing graph failed.\n"); - return MODEL_GRAPH_ERROR; - } - - VALIDATE(freeCachedTensors(), err); - - return err; -} - -ModelError_t getGraphInfoFromModels(QnnModel *models, - uint32_t numModels, - GraphInfoPtr_t **graphsInfo) { - ModelError_t err = MODEL_NO_ERROR; - if (models == nullptr || graphsInfo == nullptr || numModels <= 0) { - PRINT_ERROR( - "getGraphInfoFromModels() models and graphsInfo uninitialized or number of models is " - "<= 0.\n"); - return MODEL_GRAPH_ERROR; - } - - *graphsInfo = (GraphInfo_t **)malloc(numModels * sizeof(GraphInfo_t *)); - if (*graphsInfo == nullptr) { - PRINT_ERROR("getGraphInfoFromModels() graphsInfo malloc returned nullptr.\n"); - return MODEL_GRAPH_ERROR; - } - - GraphInfo_t *graphArr = (GraphInfo_t *)malloc(numModels * sizeof(GraphInfo_t)); - if (graphArr == nullptr) { - PRINT_ERROR("getGraphInfoFromModels() graphArr malloc returned nullptr.\n"); - return MODEL_GRAPH_ERROR; - } - - for (uint32_t i = 0; i < numModels; i++) { - QnnModel &model = models[i]; - graphArr[i].graph = model.getQnnGraph(); - graphArr[i].graphName = - strnDup(model.getQnnGraphName().c_str(), model.getQnnGraphName().size()); - if (graphArr[i].graphName == nullptr) { - PRINT_ERROR("getGraphInfoFromModels() failed to construct graphName. Received nullptr.\n"); - return MODEL_GRAPH_ERROR; - } - - // allocate and add graph input/output TensorsWrapper. Note: no need to make deep copies of - // the tensor's pointer members as they are already allocated on heap in the addTensor - // function call. - std::vector graphInputTensors = model.getGraphInputTensors(); - size_t numInputTensors = graphInputTensors.size(); - size_t inputTensorsSize = numInputTensors * sizeof(Qnn_Tensor_t); - graphArr[i].inputTensors = (Qnn_Tensor_t *)malloc(inputTensorsSize); - memscpy(graphArr[i].inputTensors, inputTensorsSize, graphInputTensors.data(), inputTensorsSize); - graphArr[i].numInputTensors = (uint32_t)numInputTensors; - // allocate and add graph outputTensors - std::vector graphOutputTensors = model.getGraphOutputTensors(); - size_t numOutputTensors = graphOutputTensors.size(); - size_t outputTensorsSize = numOutputTensors * sizeof(Qnn_Tensor_t); - graphArr[i].outputTensors = (Qnn_Tensor_t *)malloc(outputTensorsSize); - memscpy( - graphArr[i].outputTensors, outputTensorsSize, graphOutputTensors.data(), outputTensorsSize); - graphArr[i].numOutputTensors = (uint32_t)numOutputTensors; - - // have return object point to the populated graph struct - (*graphsInfo)[i] = graphArr + i; - - // graph composition is complete by this stage, free if any cached tensors remaining - VALIDATE(model.freeCachedTensors(), err); - } - - return err; -} - -ModelError_t getSingleGraphInfoFromModel(QnnModel &model, GraphInfoPtr_t* graphInfoPtr) { - ModelError_t err = MODEL_NO_ERROR; - - *graphInfoPtr = (GraphInfo_t *)malloc(sizeof(GraphInfo_t)); - auto graphInfo = *graphInfoPtr; - if (graphInfo == nullptr) { - PRINT_ERROR("getGraphInfoFromModels() graphsInfo malloc returned nullptr.\n"); - return MODEL_GRAPH_ERROR; - } - - graphInfo->graph = model.getQnnGraph(); - graphInfo->graphName = - strnDup(model.getQnnGraphName().c_str(), model.getQnnGraphName().size()); - if (graphInfo->graphName == nullptr) { - PRINT_ERROR("getGraphInfoFromModels() failed to construct graphName. Received nullptr.\n"); - return MODEL_GRAPH_ERROR; - } - - // allocate and add graph input/output TensorsWrapper. Note: no need to make deep copies of - // the tensor's pointer members as they are already allocated on heap in the addTensor - // function call. - std::vector graphInputTensors = model.getGraphInputTensors(); - size_t numInputTensors = graphInputTensors.size(); - size_t inputTensorsSize = numInputTensors * sizeof(Qnn_Tensor_t); - graphInfo->inputTensors = (Qnn_Tensor_t *)malloc(inputTensorsSize); - memscpy(graphInfo->inputTensors, inputTensorsSize, graphInputTensors.data(), inputTensorsSize); - graphInfo->numInputTensors = (uint32_t)numInputTensors; - // allocate and add graph outputTensors - std::vector graphOutputTensors = model.getGraphOutputTensors(); - size_t numOutputTensors = graphOutputTensors.size(); - size_t outputTensorsSize = numOutputTensors * sizeof(Qnn_Tensor_t); - graphInfo->outputTensors = (Qnn_Tensor_t *)malloc(outputTensorsSize); - memscpy( - graphInfo->outputTensors, outputTensorsSize, graphOutputTensors.data(), outputTensorsSize); - graphInfo->numOutputTensors = (uint32_t)numOutputTensors; - - // graph composition is complete by this stage, free if any cached tensors remaining - VALIDATE(model.freeCachedTensors(), err); - return err; -} - -ModelError_t freeGraphsInfo(GraphInfoPtr_t **graphsInfo, uint32_t numGraphs) { - if (graphsInfo == nullptr || *graphsInfo == nullptr) { - PRINT_ERROR("freeGraphsInfo() invalid graphsInfo."); - return MODEL_TENSOR_ERROR; - } - for (uint32_t i = 0; i < numGraphs; i++) { - PRINT_INFO("Freeing graph in freeGraphInfo"); - free((*graphsInfo)[i]->graphName); - freeQnnTensors((*graphsInfo)[i]->inputTensors, (*graphsInfo)[i]->numInputTensors); - freeQnnTensors((*graphsInfo)[i]->outputTensors, (*graphsInfo)[i]->numOutputTensors); - } - - free(**graphsInfo); - free(*graphsInfo); - *graphsInfo = nullptr; - - return MODEL_NO_ERROR; -} - -ModelError_t QnnModel::freeTensors() { - - for (std::map::iterator tensorIt = m_modelTensorsMap.begin(); - tensorIt != m_modelTensorsMap.end();) { - Qnn_Tensor_t &tensor = tensorIt->second; - - tensorIt = m_modelTensorsMap.erase(tensorIt++); - } - - return MODEL_NO_ERROR; -} - -ModelError_t QnnModel::clearGraph() { - - m_modelInputTensors.resize(0); - m_modelOutputTensors.resize(0); - - m_modelOutputTensorMap.clear(); - m_graphName.clear(); - - - - return MODEL_NO_ERROR; -} - -} // namespace qnn_wrapper_api \ No newline at end of file diff --git a/src/backends/qnn/Model/QnnModel.hpp b/src/backends/qnn/Model/QnnModel.hpp deleted file mode 100644 index 6521d76e5..000000000 --- a/src/backends/qnn/Model/QnnModel.hpp +++ /dev/null @@ -1,270 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#pragma once - -#include -#include -#include -#include - -#include "QnnInterface.h" -#include "QnnLog.h" -#include "QnnModelPal.hpp" -#include "../WrapperUtils/QnnWrapperUtils.hpp" - -namespace qnn_wrapper_api { - -class QnnModel { - public: - ~QnnModel() = default; - - /** - * @brief Creates a Qnn Graph within given context. - * - * @param[in] backendHandle A handle to the QNN backend handle which will be used to query the API - * symbols - * - * @param[in] qnnInterface the QNN backend interface to use - * - * @param[in] context A handler to the context where the model's graph would be created. - * - * @param[in] graphName The name to use for creating a graph in the context provided. - * - * @param[in] debug If flag is true, sets all tensors created in model to be - * QNN_TENSOR_TYPE_APP_READ, essentially overwriting what is set - * in Qnn_TensorType. - * - * @param[in] doNodeValidations If flag is set, all nodes added with addNode call - * will be validated by Backend - * - * @param[in] graphConfigs Array of graph configurations to use for creating the QNN Graph. - * Default: nullptr - * - */ - ModelError_t initialize(const Qnn_BackendHandle_t& backendHandle, - const QNN_INTERFACE_VER_TYPE& qnnInterface, - const Qnn_ContextHandle_t& context, - const char* graphName, - bool debug, - uint8_t doNodeValidations = 1, - const QnnGraph_Config_t** graphConfigs = nullptr); - - /** - * @brief A wrapper function to create a tensor inside class's context graph. - * - * @param[in] nodeName Lookup name for node/layer - * - * @param[in] tensor A pointer to a struct containing information on the tensor - * - * @param[in] saveTensor Flag to indicate if tensor should be saved in object for later retrieval - * with class getter functions. - * - * @return Error code - * - */ - ModelError_t addTensor(const char* nodeName, Qnn_Tensor_t* tensor, bool saveTensor = true); - - /** - * @brief A wrapper function to create a tensor inside class's context graph. - * - * @param[in] nodeName Lookup name for node/layer - * - * @param[in] tensor A struct containing information on the tensor - * - * @param[in] saveTensor Flag to indicate if tensor should be saved in object for later retrieval - * with class getter functions. - * - * @return Error code - * - */ - ModelError_t addTensor(const char* nodeName, Qnn_Tensor_t tensor, bool saveTensor = true); - - /** - * @brief function to be used to query tensors created within this QnnModel instance - * - * @param[in] nodeName Lookup name for node/layer - * - * @param[in] tensorName Lookup name for tensor - * - * @param[out] tensor The corresponding Qnn_Tensor_t object for given tensor name. - * - * @return Error code - * - */ - ModelError_t getQnnTensor(const char*& nodeName, const char*& tensorName, Qnn_Tensor_t& tensor); - ModelError_t getQnnTensor(std::string nodeName, std::string tensorName, Qnn_Tensor_t& tensor); - - /** - * @brief A wrapper function to create a node in class's graph. - * - * @param[in] version The QNN version for Op_Config_t structure to use (e.g. - * QNN_OPCONFIG_VERSION_1) - * - * @param[in] name The node name to use (e.g. my_graph_conv_1) - * - * @param[in] packageName The node package name (e.g. qti.aisw) - * - * @param[in] type The QNN_OP_QNN_OP_H node type (e.g. QNN_OP_ARGMAX) - * - * @param[in] params A struct object containing all the params for the node to be added. For - * tensorParam case. The tensor will be created within the function and the data will be retrieved - * from the binary blob to set the tensor data. - * - * @param[in] numOfParams The number of elements in above params object - * - * @param[in] inputNames List of tensor names for inputs to node. Note: the corresponding qnn - * tensor objects must be created within this instance prior to being listed as input to a node - * - * @param[in] numOfInputs The number of elements in above inputNames object - * - * @param[in] outputTensors List of Qnn_Tensor_t objects for outputs from node. - * Note1: the corresponding qnn tensor objects will be created in - * function and must not already exist. Note2: the output names must be unique per graph - * - * @param[in] numOfOutputs The number of elements in above outputs object - * - * @return Error code - * - */ - ModelError_t addNode(Qnn_OpConfigVersion_t version, - const char* name, - const char* packageName, - const char* type, - Qnn_Param_t* params, - uint32_t numOfParams, - const char** inputNames, - uint32_t numOfInputs, - Qnn_Tensor_t* outputTensors, - uint32_t numOfOutputs); - // overload for vector of inputNames - ModelError_t addNode(Qnn_OpConfigVersion_t version, - const char* name, - const char* packageName, - const char* type, - Qnn_Param_t* params, - uint32_t numOfParams, - std::vector inputNames, - uint32_t numOfInputs, - Qnn_Tensor_t* outputTensors, - uint32_t numOfOutputs); - - /** - * @brief A wrapper function to return model's graph - * - * @return The Qnn graph object - * - */ - Qnn_GraphHandle_t getQnnGraph() { return m_graph; } - - /** - * @brief A wrapper function to return model's graphName - * - * @return The Qnn graph object's name - * - */ - std::string getQnnGraphName() { return m_graphName; } - - /** - * @brief A wrapper function to return model's graph input tensors - * - * @return vector of Qnn_Tensor_t objects - * - */ - std::vector getGraphInputTensors() { return m_modelInputTensors; } - - /** - * @brief A wrapper function to return model's graph output tensors - * - * @return vector of Qnn_Tensor_t objects - * - */ - std::vector getGraphOutputTensors() { return m_modelOutputTensors; } - - /** - * @brief A wrapper function to return graph's output tensors->op mapping - * - * @return map of std::string, std::vector - * - */ - std::map> getOutputTensorMap() { - return m_modelOutputTensorMap; - } - - /** - * @brief A wrapper function to finalize model's graph which includes calling backend finalize on - * graph. - * - * @return Error code - * - */ - ModelError_t finalize(Qnn_ProfileHandle_t profile = nullptr, Qnn_SignalHandle_t signal = nullptr); - - /** - * @brief Removes saved Qnn_Tensor_t objects and frees memory - * Note: Cleanup doesnt apply to input/output tensors as they are needed - * beyond this class finishes graph construction for the execute call. User of this API is - * expected to free those. - * - * @return Error code - */ - ModelError_t freeCachedTensors(); - - - ModelError_t freeTensors(); - - ModelError_t clearGraph(); - - private: - Qnn_GraphHandle_t m_graph = nullptr; - std::string m_graphName; - bool m_debug = false; // flag to indicate if requested graph is to be run in debug mode - // (i.e. all intermediate tensors will be accessible to client) - // flag to indicate whether all addNode calls need to be validated - bool m_doNodeValidations = true; - - std::vector m_modelInputTensors; - std::vector m_modelOutputTensors; - // keeps track of graph tensors to enable creating Qnn nodes from tensor names - std::map m_modelTensorsMap; - std::map> m_modelOutputTensorMap; - - // Qnn Backend Interface Api - QNN_INTERFACE_VER_TYPE m_qnnInterface; - Qnn_BackendHandle_t m_backendHandle; - -}; // QNN_MODEL_CLASS - -/** - * @brief A helper function to convert QnnModel objects to Graph struct for qnn_model c - * interface - * @param[in] models List of QnnModel objects - * @param[in] numModels The number of elements in above models object - * - * @param[out] graphsInfo The corresponding array of Graph object for each of the above model - * objects(note: this function will malloc memory needed to store the struct objects. Following free - * shall be invoked when objects are no longer needed. - * - * @return Error code - * - */ -ModelError_t getGraphInfoFromModels(QnnModel* models, - uint32_t numModels, - GraphInfoPtr_t** graphsInfo); -ModelError_t getSingleGraphInfoFromModel(QnnModel &model, GraphInfoPtr_t* graphInfoPtr); - -/** - * @brief A helper function to free memory malloced for communicating the Graph for a model(s) - * @param[in] graphsInfo Pointer pointing to location of graph objects - * @param[in] numGraphs The number of graph objects the above pointer is pointing to - * - * @return Error code - * - */ -ModelError_t freeGraphsInfo(GraphInfoPtr_t** graphsInfo, uint32_t numGraphs); -} // namespace qnn_wrapper_api diff --git a/src/backends/qnn/Model/QnnModelPal.cpp b/src/backends/qnn/Model/QnnModelPal.cpp deleted file mode 100644 index ab4d556d6..000000000 --- a/src/backends/qnn/Model/QnnModelPal.cpp +++ /dev/null @@ -1,27 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2021-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include -#include -#include - -#include "QnnModelPal.hpp" - -namespace qnn_wrapper_api { -void *dlSym(void *handle, const char *symbol) { - if (handle == DL_DEFAULT) { - return ::dlsym(RTLD_DEFAULT, symbol); - } - - return ::dlsym(handle, symbol); -} - -char *dlError(void) { return ::dlerror(); } - -char *strnDup(const char *source, size_t maxlen) { return ::strndup(source, maxlen); } -} // namespace qnn_wrapper_api \ No newline at end of file diff --git a/src/backends/qnn/Model/QnnModelPal.hpp b/src/backends/qnn/Model/QnnModelPal.hpp deleted file mode 100644 index 00bf45cac..000000000 --- a/src/backends/qnn/Model/QnnModelPal.hpp +++ /dev/null @@ -1,54 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2021-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -namespace qnn_wrapper_api { - -// specify this address to distingiush from NULL pointer -#define DL_DEFAULT (void *)(0x4) - -//--------------------------------------------------------------------------- -/// @brief -/// obtain address of a symbol in a shared object or executable -/// @handle -/// a handle of a dynamic loaded shared object returned by dlopen -/// @symbol -/// a null-terminated symbol name -/// @return -/// On success, return the address associated with symbol -/// On error, NULL -//--------------------------------------------------------------------------- -void *dlSym(void *handle, const char *symbol); - -//--------------------------------------------------------------------------- -/// @brief -/// obtain error diagnostic for functions in the dl-family APIs. -/// @return -/// returns a human-readable, null-terminated string describing the most -/// recent error that occurred from a call to one of the functions in the -/// dl-family APIs. -/// -//--------------------------------------------------------------------------- -char *dlError(void); - -//--------------------------------------------------------------------------- -/// @brief -/// Returns a pointer to a null-terminated byte string, which contains copies -/// of at most maxlen bytes from the string pointed to by str. If the null -/// terminator is not encountered in the first maxlen bytes, it is added to -/// the duplicated string. -/// @source -/// Null-terminated source string. -/// @maxlen -/// Max number of bytes to copy from str -/// @return -/// A pointer to the newly allocated string, or a null pointer if an error -/// occurred. -/// -//--------------------------------------------------------------------------- -char *strnDup(const char *source, size_t maxlen); -} // namespace qnn_wrapper_api \ No newline at end of file diff --git a/src/backends/qnn/PAL/include/PAL/Debug.hpp b/src/backends/qnn/PAL/include/PAL/Debug.hpp deleted file mode 100644 index d03331c26..000000000 --- a/src/backends/qnn/PAL/include/PAL/Debug.hpp +++ /dev/null @@ -1,21 +0,0 @@ -//============================================================================ -// -// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================ - -#pragma once - -#define DEBUG_ON 0 - -#if DEBUG_ON -#define DEBUG_MSG(...) \ - { \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - } -#else -#define DEBUG_MSG(...) -#endif diff --git a/src/backends/qnn/PAL/include/PAL/Directory.hpp b/src/backends/qnn/PAL/include/PAL/Directory.hpp deleted file mode 100644 index 435d5ee7e..000000000 --- a/src/backends/qnn/PAL/include/PAL/Directory.hpp +++ /dev/null @@ -1,80 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2008-2014, 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -//--------------------------------------------------------------------------- -/// @file -/// This file includes APIs for directory operations on supported platforms -//--------------------------------------------------------------------------- - -#pragma once - -#include - -#include "PAL/FileOp.hpp" - -namespace pal { -class Directory; -} - -class pal::Directory { - public: - using DirMode = pal::FileOp::FileMode; - //--------------------------------------------------------------------------- - /// @brief - /// Creates a directory in the file system. - /// @param path - /// Name of directory to create. - /// @param dirmode - /// Directory mode - /// @return - /// True if - /// 1. create a directory successfully - /// 2. or directory exist already - /// False otherwise - /// - /// For example: - /// - /// - Create a directory in default. - /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - /// pal::Directory::Create(path, pal::Directory::DirMode::S_DEFAULT_); - /// pal::Directory::Create(path); - /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - /// - /// - Create a directory with specific permission. - /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - /// pal::Directory::Create(path, pal::Directory::DirMode::S_IRWXU_| - /// pal::Directory::DirMode::S_IRWXG_| - /// pal::Directory::DirMode::S_IRWXO_); - /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - /// - /// @note For windows, dirmode is not used. - /// @note For linux, dirmode is used to set the permission of the folder. - //--------------------------------------------------------------------------- - static bool create(const std::string &path, - pal::Directory::DirMode dirmode = pal::Directory::DirMode::S_DEFAULT_); - - //--------------------------------------------------------------------------- - /// @brief - /// Removes the entire directory whether it's empty or not. - /// @param path - /// Name of directory to delete. - /// @return - /// True if the directory was successfully deleted, false otherwise. - //--------------------------------------------------------------------------- - static bool remove(const std::string &path); - - //--------------------------------------------------------------------------- - /// @brief - /// Creates a directory and all parent directories required. - /// @param path - /// Path of directory to create. - /// @return - /// True if the directory was successfully created, false otherwise. - //--------------------------------------------------------------------------- - static bool makePath(const std::string &path); -}; diff --git a/src/backends/qnn/PAL/include/PAL/DynamicLoading.hpp b/src/backends/qnn/PAL/include/PAL/DynamicLoading.hpp deleted file mode 100644 index 8a2e18cee..000000000 --- a/src/backends/qnn/PAL/include/PAL/DynamicLoading.hpp +++ /dev/null @@ -1,99 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -//--------------------------------------------------------------------------- -/// @file -/// This file includes APIs for dynamic loading on supported platforms -//--------------------------------------------------------------------------- - -#pragma once - -#include - -namespace pal { -namespace dynamicloading { -// we only support subset of POSIX of dlopen/dlsym/dladdr/dlerror/dlclose -// except the following flags for dlopen, others should be done only -// when we really need them -// DL_NOW is MUST -// DL_LOCAL is enabled if not specified -enum { - DL_NOW = 0x0001, - DL_LOCAL = 0x0002, - DL_GLOBAL = 0x0004, -}; - -// specify this address to distingiush from NULL pointer -#define DL_DEFAULT (void *)(0x4) - -//--------------------------------------------------------------------------- -/// @brief -/// Loads the dynamic shared object -/// @param filename -/// If contains path separators, treat it as relative or absolute pathname -/// or search it for the rule of dynamic linker -/// @param flags -/// - DL_NOW: resolve undefined symbols before return. MUST be specified. -/// - DL_LOCAL: optional, but the default specified. Symbols defined in this -/// shared object are not made available to resolve references in subsequently -/// loaded shared objects -/// - DL_GLOBAL: optional, resolve symbol globally -/// @return -/// On success, a non-NULL handle for the loaded library. -/// On error, NULL -//--------------------------------------------------------------------------- -void *dlOpen(const char *filename, int flags); - -//--------------------------------------------------------------------------- -/// @brief -/// Obtain address of a symbol in a shared object or executable -/// @param handle -/// A handle of a dynamic loaded shared object returned by dlopen -/// @param symbol -/// A null-terminated symbol name -/// @return -/// On success, return the address associated with symbol -/// On error, NULL -//--------------------------------------------------------------------------- -void *dlSym(void *handle, const char *symbol); - -//--------------------------------------------------------------------------- -/// @brief -/// Translate the address of a symbol to the path of the belonging shared object -/// @param addr -/// Address of symbol in a shared object -/// @param path -/// Full name of shared object that contains address, usually it is an absolute path -/// @return -/// On success, return a non-zero value -/// On error, return 0 -//--------------------------------------------------------------------------- -int dlAddrToLibName(void *addr, std::string &name); - -//--------------------------------------------------------------------------- -/// @brief -/// Decrements the reference count on the dynamically loaded shared object -/// referred to by handle. If the reference count drops to 0, then the -/// object is unloaded. -/// @return -/// On success, 0; on error, a nonzero value -//--------------------------------------------------------------------------- -int dlClose(void *handle); - -//--------------------------------------------------------------------------- -/// @brief -/// Obtain error diagnostic for functions in the dl-family APIs. -/// @return -/// Returns a human-readable, null-terminated string describing the most -/// recent error that occurred from a call to one of the functions in the -/// dl-family APIs. -//--------------------------------------------------------------------------- -char *dlError(void); - -} // namespace dynamicloading -} // namespace pal diff --git a/src/backends/qnn/PAL/include/PAL/FileOp.hpp b/src/backends/qnn/PAL/include/PAL/FileOp.hpp deleted file mode 100644 index 3218719c1..000000000 --- a/src/backends/qnn/PAL/include/PAL/FileOp.hpp +++ /dev/null @@ -1,238 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2008-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -//------------------------------------------------------------------------------ -/// @file -/// This file includes APIs for file operations on the supported platforms -//------------------------------------------------------------------------------ - -#pragma once - -#include - -#include -#include - -namespace pal { -class FileOp; -} - -//------------------------------------------------------------------------------ -/// @brief -/// FileOp contains OS Specific file system functionality. -//------------------------------------------------------------------------------ -class pal::FileOp { - public: - // enum for symbolic constants mode, strictly follow linux usage - // windows or another OS user should transfer the usage - // ref : http://man7.org/linux/man-pages/man2/open.2.html - enum class FileMode : uint32_t { - S_DEFAULT_ = 0777, - S_IRWXU_ = 0700, - S_IRUSR_ = 0400, - S_IWUSR_ = 0200, - S_IXUSR_ = 0100, - S_IRWXG_ = 0070, - S_IRGRP_ = 0040, - S_IWGRP_ = 0020, - S_IXGRP_ = 0010, - S_IRWXO_ = 0007, - S_IROTH_ = 0004, - S_IWOTH_ = 0002, - S_IXOTH_ = 0001 - }; - - //--------------------------------------------------------------------------- - /// @brief - /// Copies a file from one location to another, overwrites if the - /// destination already exists. - /// @param source - /// File name of the source file. - /// @param target - /// File name of the target file. - /// @return - /// True on success, otherwise false. - //--------------------------------------------------------------------------- - static bool copyOverFile(const std::string &source, const std::string &target); - - //--------------------------------------------------------------------------- - /// @brief - /// Checks whether the file exists or not. - /// @param fileName - /// File name of the source file, including its complete path. - /// @return - /// True on success, otherwise false. - //--------------------------------------------------------------------------- - static bool checkFileExists(const std::string &fileName); - - //--------------------------------------------------------------------------- - /// @brief - /// Renames an existing file. If the file with target name exists, this call - /// overwrites it with the file with source name. - /// @param source - /// Current File name. - /// @param target - /// New name of the file. - /// @param overwrite - /// Flag indicating to overwrite existing file with newName - /// @return - /// True if successful, otherwise false. - /// @warning - /// Does not work if source and target are on different filesystems. - //--------------------------------------------------------------------------- - static bool move(const std::string &source, const std::string &target, bool overwrite); - - //--------------------------------------------------------------------------- - /// @brief - /// Delete an existing file - /// @param fileName - /// File name of the file to be deleted. - /// @return - /// True if successful, otherwise false. - //--------------------------------------------------------------------------- - static bool deleteFile(const std::string &fileName); - - //--------------------------------------------------------------------------- - /// @brief - /// Check if path is a directory or not - /// @param path - /// Path to check - /// @return - /// True if successful, otherwise false. - //--------------------------------------------------------------------------- - static bool checkIsDir(const std::string &path); - - //--------------------------------------------------------------------------- - /// @brief Data type representing parts of a filename - //--------------------------------------------------------------------------- - typedef struct { - //--------------------------------------------------------------------------- - /// @brief Name of the file without the extension (i.e., basename) - //--------------------------------------------------------------------------- - std::string basename; - - //--------------------------------------------------------------------------- - /// @brief Name of the file extension (i.e., .txt or .hlnd, .html) - //--------------------------------------------------------------------------- - std::string extension; - - //--------------------------------------------------------------------------- - /// @brief - /// Location of the file (i.e., /abc/xyz/foo.bar <-- /abc/xyz/). - /// If the file name has no location then the Directory points to - /// empty string - //--------------------------------------------------------------------------- - std::string directory; - } FilenamePartsType_t; - - //--------------------------------------------------------------------------- - /// @brief - /// Determines the components of a given filename, being the directory, - /// basename and extension. If the file has no location or extension, these - /// components remain empty - /// @param filename - /// Path of the file for which the components are to be determined - /// @param filenameParts - /// Will contain the file name components when this function returns - /// @return - /// True if successful, false otherwise - //--------------------------------------------------------------------------- - static bool getFileInfo(const std::string &filename, FilenamePartsType_t &filenameParts); - - //--------------------------------------------------------------------------- - /// @brief - /// Typedef for a vector of FilenamePartsType_t - //--------------------------------------------------------------------------- - typedef std::vector FilenamePartsListType_t; - - //--------------------------------------------------------------------------- - /// @brief - /// Typedef for a vector of FilenamePartsType_t const iterator - //--------------------------------------------------------------------------- - typedef std::vector::const_iterator FilenamePartsListTypeIter_t; - - //--------------------------------------------------------------------------- - /// @brief - /// Returns a vector of FilenamePartsType_t objects for a given directory - /// @param path - /// Path to scan for files - /// @return - /// True if successful, false otherwise - //--------------------------------------------------------------------------- - static bool getFileInfoList(const std::string &path, FilenamePartsListType_t &filenamePartsList); - - //--------------------------------------------------------------------------- - /// @brief - /// Returns a vector of FilenamePartsType_t objects for a given directory - /// and the child directories inside. - /// @param path - /// Path to directory to scan for files for - /// @note if path is not a directory - the function will return false - /// @param filenamePartList - /// List to append to - /// @param ignoreDirs - /// If this flag is set to true, directories (and symbolic links to directories) - /// are not included in the list. Only actual files below the specified - /// directory path will be appended. - /// @return True if successful, false otherwise - /// @note Directories in list only populate Directory member variable of the struct. - /// That is Basename and Extension will be empty strings. - /// @note Symbolic links to directories are not followed. This is to avoid possible - /// infinite recursion. However the initial call to this method can have - /// path to be a symbolic link to a directory. If ignoreDirs is true, - /// symbolic links to directories are also ignored. - /// @note The order in which the files/directories are listed is platform - /// dependent. However files inside a directory always come before the - /// directory itself. - //--------------------------------------------------------------------------- - static bool getFileInfoListRecursive(const std::string &path, - FilenamePartsListType_t &filenamePartsList, - const bool ignoreDirs); - - //--------------------------------------------------------------------------- - /// @brief - /// Create an absolute path from the supplied path - /// @param path - /// Path should not contain trailing '/' or '\\' - /// @return - /// Return absolute path without trailing '/' or '\\' - //--------------------------------------------------------------------------- - static std::string getAbsolutePath(const std::string &path); - - //--------------------------------------------------------------------------- - /// @brief Get the file name from a path - //--------------------------------------------------------------------------- - static std::string getFileName(const std::string &file); - - //--------------------------------------------------------------------------- - /// @brief Get the directory path to a file - //--------------------------------------------------------------------------- - static std::string getDirectory(const std::string &file); - - //--------------------------------------------------------------------------- - /// @brief Get the current working directory. - /// @returns The absolute CWD or empty string if the path could not be - /// retrieved (because it was too long or deleted for example). - //--------------------------------------------------------------------------- - static std::string getCurrentWorkingDirectory(); - - //--------------------------------------------------------------------------- - /// @brief Set the current working directory - //--------------------------------------------------------------------------- - static bool setCurrentWorkingDirectory(const std::string &workingDir); - - //--------------------------------------------------------------------------- - /// @brief Returns true if the file contains any extension or false. - //--------------------------------------------------------------------------- - static bool hasFileExtension(const std::string &file); - - //--------------------------------------------------------------------------- - /// @brief Returns full path of file, Directory/Basename(.Extension, if any) - //--------------------------------------------------------------------------- - static std::string partsToString(const FilenamePartsType_t &filenameParts); -}; diff --git a/src/backends/qnn/PAL/include/PAL/GetOpt.hpp b/src/backends/qnn/PAL/include/PAL/GetOpt.hpp deleted file mode 100644 index 23a3d10ed..000000000 --- a/src/backends/qnn/PAL/include/PAL/GetOpt.hpp +++ /dev/null @@ -1,93 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -//-------------------------------------------------------------------------------- -/// @file -/// This file includes APIs for the command line parsing on supported platforms -//-------------------------------------------------------------------------------- - -#pragma once - -namespace pal { -// we implement a similar API for POSIX.2 -// so that some global var are necessary - -extern const char *g_optArg; -extern int g_optInd; - -enum { - no_argument = 0, - required_argument = 1, - optional_argument = 2, -}; - -//-------------------------------------------------------------------------------------------------- -/// @brief -/// This structure describes a single long option name for the sake of getopt_long. The argument -/// longopts must be an array of these structures, one for each long option. Terminate the array -/// with an element containing all zeros. -//-------------------------------------------------------------------------------------------------- -struct Option { - //-------------------------------------------------------------------------------------------------- - /// @brief The name of the long option. - //-------------------------------------------------------------------------------------------------- - const char *name; - - //-------------------------------------------------------------------------------------------------- - /// @brief - /// If the option does not take an argument, no_argument (or 0). - /// If the option requires an argument, required_argument (or 1). - //-------------------------------------------------------------------------------------------------- - int hasArg; - - //-------------------------------------------------------------------------------------------------- - /// @brief - /// Specifies how results are returned for a long option. - /// If flag is NULL, then GetOptLongOnly() returns val. Otherwise, it returns 0, and flag - /// points to a variable which is set to val if the option is found, but - /// left unchanged if the option is not found. - //-------------------------------------------------------------------------------------------------- - int *flag; - - //-------------------------------------------------------------------------------------------------- - /// @brief - /// The value to return, or to load into the variable pointed to by flag. - /// The last element of the array has to be filled with zeros. - //-------------------------------------------------------------------------------------------------- - int val; -}; - -//-------------------------------------------------------------------------------------------------- -/// @brief -/// This parses command-line options as POSIX getopt_long_only() -/// but we don't support optstring and optonal_argument now -/// @param argc -/// Argument count -/// @param argv -/// Argument array -/// @param optstring -/// Legitimate option characters, short options, don't support now -/// @param longopts -/// A pointer to the first element of an array of struct option, -/// has_arg field in the struct option indicates 3 possibilities, -/// no_argument, required_argument or optional_argument. we don't -/// support optional_argument now -/// @param longindex -/// If longindex is not NULL, it points to a variable which is set -/// to the index of the long option relative to longopts -/// @return -/// -1 for parsing done, '?' for non-recognized arguments, 0 for -/// flag in longopts is not NULL and saved the val to it -//-------------------------------------------------------------------------------------------------- -int getOptLongOnly(int argc, - const char *const argv[], - const char *optstring, - const struct Option *longopts, - int *longindex); - -} // namespace pal diff --git a/src/backends/qnn/PAL/include/PAL/Path.hpp b/src/backends/qnn/PAL/include/PAL/Path.hpp deleted file mode 100644 index 60b10fe5d..000000000 --- a/src/backends/qnn/PAL/include/PAL/Path.hpp +++ /dev/null @@ -1,50 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2008-2014, 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -//============================================================================== - -//------------------------------------------------------------------------------ -/// @file -/// The file includes APIs for path related operations on supported platforms -//------------------------------------------------------------------------------ - -#pragma once - -#include -#include - -namespace pal { -class Path; -} - -class pal::Path { - public: - //--------------------------------------------------------------------------- - /// @brief Returns path separator for the system - //--------------------------------------------------------------------------- - static char getSeparator(); - - //--------------------------------------------------------------------------- - /// @brief Concatenate s1 and s2 - //--------------------------------------------------------------------------- - static std::string combine(const std::string &s1, const std::string &s2); - - //--------------------------------------------------------------------------- - /// @brief Get the directory name - //--------------------------------------------------------------------------- - static std::string getDirectoryName(const std::string &path); - - //--------------------------------------------------------------------------- - /// @brief Get absolute path - //--------------------------------------------------------------------------- - static std::string getAbsolute(const std::string &path); - - //--------------------------------------------------------------------------- - /// @brief Check if the input path is absolute path - //--------------------------------------------------------------------------- - static bool isAbsolute(const std::string &path); - - private: -}; diff --git a/src/backends/qnn/PAL/include/PAL/StringOp.hpp b/src/backends/qnn/PAL/include/PAL/StringOp.hpp deleted file mode 100644 index 8794cda9f..000000000 --- a/src/backends/qnn/PAL/include/PAL/StringOp.hpp +++ /dev/null @@ -1,60 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2018-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -//----------------------------------------------------------------------------- -/// @file -/// The file inludes APIs for string operations on supported platforms -//----------------------------------------------------------------------------- - -#pragma once - -#include - -namespace pal { -class StringOp; -} - -//------------------------------------------------------------------------------ -/// @brief -/// FileOp contains OS Specific file system functionality. -//------------------------------------------------------------------------------ -class pal::StringOp { - public: - //--------------------------------------------------------------------------- - /// @brief - /// Copy copy_size bytes from buffer src to buffer dst. Behaviour of the - /// function is undefined if src and dst overlap. - /// @param dst - /// Destination buffer - /// @param dst_size - /// Size of destination buffer - /// @param src - /// Source buffer - /// @param copy_size - /// Number of bytes to copy - /// @return - /// Number of bytes copied - //--------------------------------------------------------------------------- - static size_t memscpy(void *dst, size_t dstSize, const void *src, size_t copySize); - - //--------------------------------------------------------------------------- - /// @brief - /// Returns a pointer to a null-terminated byte string, which contains copies - /// of at most size bytes from the string pointed to by str. If the null - /// terminator is not encountered in the first size bytes, it is added to the - /// duplicated string. - /// @param source - /// Source string - /// @param maxlen - /// Max number of bytes to copy from str - /// @return - /// A pointer to the newly allocated string, or a null pointer if an error - /// occurred. - //--------------------------------------------------------------------------- - static char *strndup(const char *source, size_t maxlen); -}; diff --git a/src/backends/qnn/PAL/src/common/GetOpt.cpp b/src/backends/qnn/PAL/src/common/GetOpt.cpp deleted file mode 100644 index 700bcdf55..000000000 --- a/src/backends/qnn/PAL/src/common/GetOpt.cpp +++ /dev/null @@ -1,154 +0,0 @@ -//============================================================================= -// -// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================= - -#include - -#include - -#include "PAL/GetOpt.hpp" - -using namespace std; - -namespace pal { - -const char *g_optArg = nullptr; -int g_optInd = 1; - -static const struct Option *findOpt(const string str, - const struct Option *longopts, - int *longindex) { - const struct Option *opt = nullptr; - int idx = 0; - size_t searchEnd = str.find_first_of("="); - - for (opt = longopts; opt->name && strlen(opt->name) > 0; opt++, idx++) { - if (str.substr(0, searchEnd) == opt->name) { - if (longindex) { - *longindex = idx; - } - break; - } - } - // if not found, opt would point to the last element of longopts - // whose name MUST be empty - return opt->name ? opt : nullptr; -} - -int getOptLongOnly(int argc, - const char *const argv[], - const char *, - const struct Option *longopts, - int *longindex) { - const struct Option *opt; - int argLen = 0; - bool isShort = false; - const char *arg = ""; - - g_optArg = nullptr; - // no arg, means the end of command - if (g_optInd >= argc) { - return -1; - } - - arg = argv[g_optInd]; - - if (arg[0] != '-') { - g_optInd += 1; - return '?'; - } - - argLen = strlen(arg); - - if (argLen < 2) { - g_optInd += 1; - return '?'; - } - - if (!longopts) { - g_optInd += 1; - return '?'; - } - - // check short options with this form, -a arg - if (argLen == 2) { - isShort = true; - // check short options with this form, -a=arg - } else if (argLen > 3 && arg[2] == '=') { - isShort = true; - // check for long options, can be used for both forms - } else if (argLen > 2 && arg[1] != '=') { - if (arg[1] != '-') { - g_optInd += 1; - return '?'; - } - isShort = false; - } - - // start after -- to find the option - const char *const optStr = isShort ? &arg[1] : &arg[2]; - opt = findOpt(optStr, longopts, longindex); - if (!opt) { - g_optInd += 1; - return '?'; - } - - if (opt->hasArg == no_argument) { - g_optInd += 1; - - if (!opt->flag) { - return opt->val; - } else { - *(opt->flag) = opt->val; - return 0; - } - } - - if (opt->hasArg == required_argument) { - string optStr = argv[g_optInd]; - size_t assignIdx = optStr.find_first_of("="); - bool advance = (assignIdx == string::npos); - - // if it is --opt arg form, this will be true, - // so we need to advance one step to get arg - // otherwise, need to stop advance step & extract arg from argv[g_optInd] - if (advance) { - g_optInd += 1; - } - - if (g_optInd >= argc) { - return '?'; - } else { - // if advance, means it is the form --opt arg - // otherwise, the form, --opt=arg - if (advance) { - // since g_optInd is advanced, g_optArg can be assigned directly - g_optArg = argv[g_optInd]; - } else { - if (assignIdx == optStr.size()) { - return '?'; - } - // for not advanced form, - // g_optArg should point to the address right after "=" - g_optArg = &argv[g_optInd][assignIdx + 1]; - } - // OK, now we are ready to handle the next pair - g_optInd += 1; - - if (!opt->flag) { - return opt->val; - } else { - *(opt->flag) = opt->val; - return 0; - } - } - } - - return '?'; -} // end of getOptLongOnly - -} // namespace pal diff --git a/src/backends/qnn/PAL/src/common/StringOp.cpp b/src/backends/qnn/PAL/src/common/StringOp.cpp deleted file mode 100644 index d3ec614b2..000000000 --- a/src/backends/qnn/PAL/src/common/StringOp.cpp +++ /dev/null @@ -1,45 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2018-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include -#include - -#include "PAL/StringOp.hpp" - -//--------------------------------------------------------------------------- -// pal::StringOp::memscpy -//--------------------------------------------------------------------------- -size_t pal::StringOp::memscpy(void *dst, size_t dstSize, const void *src, size_t copySize) { - if (!dst || !src || !dstSize || !copySize) return 0; - - size_t minSize = dstSize < copySize ? dstSize : copySize; - - memcpy(dst, src, minSize); - - return minSize; -} - -//--------------------------------------------------------------------------- -// pal::StringOp::strndup -//--------------------------------------------------------------------------- -char *pal::StringOp::strndup(const char *source, size_t maxlen) { -#ifdef _WIN32 - size_t length = ::strnlen(source, maxlen); - - char *destination = (char *)malloc((length + 1) * sizeof(char)); - if (destination == nullptr) return nullptr; - - // copy length bytes to destination and leave destination[length] to be - // null terminator - strncpy_s(destination, length + 1, source, length); - - return destination; -#else - return ::strndup(source, maxlen); -#endif -} diff --git a/src/backends/qnn/PAL/src/linux/Directory.cpp b/src/backends/qnn/PAL/src/linux/Directory.cpp deleted file mode 100644 index 9f6bd2675..000000000 --- a/src/backends/qnn/PAL/src/linux/Directory.cpp +++ /dev/null @@ -1,153 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2008-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include -#include -#include -#ifndef __QNXNTO__ -#include -#endif -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "PAL/Directory.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" - -//------------------------------------------------------------------------------ -//------------------------------------------------------------------------------ -#ifdef __QNXNTO__ -static bool is_qnx_dir(const struct dirent *ep) { - struct dirent_extra *exp; - bool is_dir = false; - - for (exp = _DEXTRA_FIRST(ep); _DEXTRA_VALID(exp, ep); exp = _DEXTRA_NEXT(exp)) { - if (exp->d_type == _DTYPE_STAT || exp->d_type == _DTYPE_LSTAT) { - struct stat *statbuff = &((dirent_extra_stat *)exp)->d_stat; - if (statbuff && S_ISDIR(statbuff->st_mode)) { - is_dir = true; - break; - } - } - } - return is_dir; -} -#endif - -// ------------------------------------------------------------------------------ -// pal::Directory::create -// ------------------------------------------------------------------------------ -bool pal::Directory::create(const std::string &path, pal::Directory::DirMode dirmode) { - struct stat st; - int status = 0; - if (stat(path.c_str(), &st) != 0) { - // Directory does not exist - status = mkdir(path.c_str(), static_cast(dirmode)); - } else if (!S_ISDIR(st.st_mode)) { - errno = ENOTDIR; - status = -1; - } - return (status == 0); -} - -//------------------------------------------------------------------------------ -//------------------------------------------------------------------------------ -bool pal::Directory::remove(const std::string &dirName) { - DIR *dir; - struct dirent *entry; - - dir = opendir(dirName.c_str()); - if (dir == nullptr) { - // If the directory doesn't exist then just return true. - if (errno == ENOENT) { - return true; - } - return false; - } - -#ifdef __QNXNTO__ - if (dircntl(dir, D_SETFLAG, D_FLAG_STAT) == -1) { - return false; - } -#endif - - // Recursively traverse the directory tree. - while ((entry = readdir(dir)) != nullptr) { - if (strcmp(entry->d_name, ".") && strcmp(entry->d_name, "..")) { - std::stringstream ss; - ss << dirName << Path::getSeparator() << entry->d_name; - std::string path = ss.str(); -#ifdef __QNXNTO__ - if (is_qnx_dir(entry)) -#else - if (entry->d_type == DT_DIR) -#endif - { - // It's a directory so we need to drill down into it and delete - // its contents. - if (!remove(path)) { - return false; - } - } else { - if (::remove(path.c_str())) { - return false; - } - } - } - } - - closedir(dir); - - if (::remove(dirName.c_str())) { - return false; - } - - return true; -} - -bool pal::Directory::makePath(const std::string &path) { - struct stat st; - bool rc = false; - - if (path == ".") { - rc = true; - } else if (stat(path.c_str(), &st) == 0) { - if (st.st_mode & S_IFDIR) { - rc = true; - } - } else { - size_t offset = path.find_last_of(Path::getSeparator()); - if (offset != std::string::npos) { - std::string newPath = path.substr(0, offset); - if (!makePath(newPath)) { - return false; - } - } - - // There is a possible race condition, where a file/directory can be - // created in between the stat() above, and the mkdir() call here. - // So, ignore the return code from the mkdir() call, and then re-check - // for existence of the directory after it. Ensure both that it exists - // and that it is a directory - just like above. - mkdir(path.c_str(), 0777); - - if ((stat(path.c_str(), &st) == 0) && (st.st_mode & S_IFDIR)) { - rc = true; - } - } - - return rc; -} diff --git a/src/backends/qnn/PAL/src/linux/DynamicLoading.cpp b/src/backends/qnn/PAL/src/linux/DynamicLoading.cpp deleted file mode 100644 index 4af1d1f63..000000000 --- a/src/backends/qnn/PAL/src/linux/DynamicLoading.cpp +++ /dev/null @@ -1,88 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include -#include -#include - -#include "Log.h" -#include "PAL/Debug.hpp" -#include "PAL/DynamicLoading.hpp" -const std::vector LIB_PREFIX = {"/system/lib64/", "/odm/lib64/", "/vendor/lib64/", "/data/local/tmp/mllm/qnn-lib/", "/system_ext/lib64/"}; -void *pal::dynamicloading::dlOpen(const char *filename, int flags) { - int realFlags = 0; - - if (flags & DL_NOW) { - realFlags |= RTLD_NOW; - } - - if (flags & DL_LOCAL) { - realFlags |= RTLD_LOCAL; - } - - if (flags & DL_GLOBAL) { - realFlags |= RTLD_GLOBAL; - } - - auto res = ::dlopen(filename, realFlags); - if (!res) { - for (auto prefix_ : LIB_PREFIX) { - std::string prefix = prefix_ + filename; - res = ::dlopen(prefix.c_str(), realFlags); - if (res) { - break; - } - MLLM_LOG_ERROR("{} not found", prefix); - } - } - return res; -} - -void *pal::dynamicloading::dlSym(void *handle, const char *symbol) { - if (handle == DL_DEFAULT) { - handle = RTLD_DEFAULT; - } - - return ::dlsym(handle, symbol); -} - -int pal::dynamicloading::dlAddrToLibName(void *addr, std::string &name) { - // Clean the output buffer - name = std::string(); - - // If the address is empty, return zero as treating failure - if (!addr) { - DEBUG_MSG("Input address is nullptr."); - return 0; - } - - // Dl_info do not maintain the lifetime of its string members, - // it would be maintained by dlopen() and dlclose(), - // so we do not need to release it manually - Dl_info info; - int result = ::dladdr(addr, &info); - - // If dladdr() successes, set name to the library name - if (result) { - name = std::string(info.dli_fname); - } else { - DEBUG_MSG("Input address could not be matched to a shared object."); - } - - return result; -} - -int pal::dynamicloading::dlClose(void *handle) { - if (!handle) { - return 0; - } - - return ::dlclose(handle); -} - -char *pal::dynamicloading::dlError(void) { return ::dlerror(); } diff --git a/src/backends/qnn/PAL/src/linux/FileOp.cpp b/src/backends/qnn/PAL/src/linux/FileOp.cpp deleted file mode 100644 index 908fe470a..000000000 --- a/src/backends/qnn/PAL/src/linux/FileOp.cpp +++ /dev/null @@ -1,356 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2008-2013,2015,2019-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include -#include -#include -#ifndef __QNXNTO__ -#include -#endif -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "PAL/Debug.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" - -typedef struct stat Stat_t; - -//--------------------------------------------------------------------------- -// pal::FileOp::HasFileExtension -//--------------------------------------------------------------------------- -bool pal::FileOp::checkFileExists(const std::string& fileName) { - Stat_t sb; - - if (stat(fileName.c_str(), &sb) == -1) { - return false; - } else { - return true; - } -} - -//--------------------------------------------------------------------------- -// pal::FileOp::move -//--------------------------------------------------------------------------- -bool pal::FileOp::move(const std::string& currentName, const std::string& newName, bool overwrite) { - if (overwrite) { - remove(newName.c_str()); - } - return (rename(currentName.c_str(), newName.c_str()) == 0); -} - -//--------------------------------------------------------------------------- -// pal::FileOp::deleteFile -//--------------------------------------------------------------------------- -bool pal::FileOp::deleteFile(const std::string& fileName) { - return (remove(fileName.c_str()) == 0); -} - -//------------------------------------------------------------------------------ -// pal::FileOp::checkIsDir -//------------------------------------------------------------------------------ -bool pal::FileOp::checkIsDir(const std::string& fileName) { - bool retVal = false; - Stat_t sb; - if (stat(fileName.c_str(), &sb) == 0) { - if (sb.st_mode & S_IFDIR) { - retVal = true; - } - } - return retVal; -} - -//------------------------------------------------------------------------------ -// pal::FileOp::getFileInfo -//------------------------------------------------------------------------------ -bool pal::FileOp::getFileInfo(const std::string& filename, - pal::FileOp::FilenamePartsType_t& filenameParts) { - std::string name; - - // Clear the result - filenameParts.basename.clear(); - filenameParts.extension.clear(); - filenameParts.directory.clear(); - - size_t lastPathSeparator = filename.find_last_of(Path::getSeparator()); - if (lastPathSeparator == std::string::npos) { - // No directory - name = filename; - } else { - // has a directory part - filenameParts.directory = filename.substr(0, lastPathSeparator); - name = filename.substr(lastPathSeparator + 1); - } - - size_t ext = name.find_last_of("."); - if (ext == std::string::npos) { - // no extension - filenameParts.basename = name; - } else { - // has extension - filenameParts.basename = name.substr(0, ext); - filenameParts.extension = name.substr(ext + 1); - } - - return true; -} - -//--------------------------------------------------------------------------- -// pal::FileOp::copyOverFile -//--------------------------------------------------------------------------- -bool pal::FileOp::copyOverFile(const std::string& fromFile, const std::string& toFile) { - bool rc = false; - int readFd; - int writeFd; - struct stat statBuf; - - // Open the input file. - readFd = ::open(fromFile.c_str(), O_RDONLY); - if (readFd == -1) { - close(readFd); - return false; - } - - // Stat the input file to obtain its size. */ - if (fstat(readFd, &statBuf) != 0) { - close(readFd); - return false; - } - - // Open the output file for writing, with the same permissions as the input - writeFd = ::open(toFile.c_str(), O_WRONLY | O_CREAT | O_TRUNC, statBuf.st_mode); - if (writeFd == -1) { - close(readFd); - return false; - } - - // Copy the file in a non-kernel specific way */ - char fileBuf[8192]; - ssize_t rBytes, wBytes; - while (true) { - rBytes = read(readFd, fileBuf, sizeof(fileBuf)); - - if (!rBytes) { - rc = true; - break; - } - - if (rBytes < 0) { - rc = false; - break; - } - - wBytes = write(writeFd, fileBuf, (size_t)rBytes); - - if (!wBytes) { - rc = true; - break; - } - - if (wBytes < 0) { - rc = false; - break; - } - } - - /* Close up. */ - close(readFd); - close(writeFd); - return rc; -} - -static bool getFileInfoListRecursiveImpl(const std::string& path, - pal::FileOp::FilenamePartsListType_t& filenamePartsList, - const bool ignoreDirs, - size_t maxDepth) { - struct dirent** namelist = nullptr; - int entryCount = 0; - - // Base case - if (maxDepth == 0) { - return true; - } - -#ifdef __ANDROID__ - // android dirent.h has the wrong signature for alphasort so it had to be disabled or fixed - entryCount = scandir(path.c_str(), &namelist, 0, 0); -#else - entryCount = scandir(path.c_str(), &namelist, 0, alphasort); -#endif - if (entryCount < 0) { - return false; - } else { - while (entryCount--) { - const std::string dName(namelist[entryCount]->d_name); - free(namelist[entryCount]); - - // skip current directory, prev directory and empty string - if (dName.empty() || dName == "." || dName == "..") { - continue; - } - - std::string curPath = path; - curPath += pal::Path::getSeparator(); - curPath += dName; - - // recurse if directory but avoid symbolic links to directories - if (pal::FileOp::checkIsDir(curPath)) { - Stat_t sb; - if (lstat(curPath.c_str(), &sb) == 0 && S_ISDIR(sb.st_mode)) { - if (!getFileInfoListRecursiveImpl(curPath, filenamePartsList, ignoreDirs, maxDepth - 1)) { - return false; - } - } - - if (ignoreDirs) { - continue; - } - - // Append training / to make this path look like a directory for - // getFileInfo() - if (curPath.back() != pal::Path::getSeparator()) { - curPath += pal::Path::getSeparator(); - } - } - - // add to vector - pal::FileOp::FilenamePartsType_t filenameParts; - if (pal::FileOp::getFileInfo(curPath, filenameParts)) { - filenamePartsList.push_back(filenameParts); - } - } - - free(namelist); - } - - return true; -} - -//--------------------------------------------------------------------------- -// pal::FileOp::getFileInfoList -//--------------------------------------------------------------------------- -bool pal::FileOp::getFileInfoList(const std::string& path, - FilenamePartsListType_t& filenamePartsList) { - return getFileInfoListRecursiveImpl(path, filenamePartsList, false, 1); -} - -//--------------------------------------------------------------------------- -// pal::FileOp::getFileInfoListRecursive -//--------------------------------------------------------------------------- -bool pal::FileOp::getFileInfoListRecursive(const std::string& path, - FilenamePartsListType_t& filenamePartsList, - const bool ignoreDirs) { - return getFileInfoListRecursiveImpl( - path, filenamePartsList, ignoreDirs, std::numeric_limits::max()); -} - -//--------------------------------------------------------------------------- -// pal::FileOp::getAbsolutePath -//--------------------------------------------------------------------------- -std::string pal::FileOp::getAbsolutePath(const std::string& path) { - // NOTE: This implementation is broken currently when a path with - // non-existant components is passed! NEO-19723 was created to address. - char absPath[PATH_MAX + 1] = {0}; - - if (realpath(path.c_str(), absPath) == NULL) { - DEBUG_MSG("GetAbsolute path fail! Error code : %d", errno); - return std::string(); - } - return std::string(absPath); -} - -//--------------------------------------------------------------------------- -// pal::FileOp::setCWD -//--------------------------------------------------------------------------- -bool pal::FileOp::setCurrentWorkingDirectory(const std::string& workingDir) { - return chdir(workingDir.c_str()) == 0; -} - -//--------------------------------------------------------------------------- -// pal::FileOp::getDirectory -//--------------------------------------------------------------------------- -std::string pal::FileOp::getDirectory(const std::string& file) { - std::string rc = file; - size_t offset = file.find_last_of(Path::getSeparator()); - if (offset != std::string::npos) { - rc = file.substr(0, offset); - } - return rc; -} - -//--------------------------------------------------------------------------- -// pal::FileOp::getFileName -//--------------------------------------------------------------------------- -std::string pal::FileOp::getFileName(const std::string& file) { - std::string rc = file; - size_t offset = file.find_last_of(Path::getSeparator()); - if (offset != std::string::npos) { - rc = file.substr(offset + 1); // +1 to skip path separator - } - return rc; -} - -//--------------------------------------------------------------------------- -// pal::FileOp::hasFileExtension -//--------------------------------------------------------------------------- -bool pal::FileOp::hasFileExtension(const std::string& file) { - FilenamePartsType_t parts; - getFileInfo(file, parts); - - return !parts.extension.empty(); -} - -//--------------------------------------------------------------------------- -// pal::FileOp::getCWD -//--------------------------------------------------------------------------- -std::string pal::FileOp::getCurrentWorkingDirectory() { - char buffer[PATH_MAX + 1]; - buffer[0] = '\0'; - - // If there is any failure return empty string. It is technically possible - // to handle paths exceeding PATH_MAX on some flavors of *nix but platforms - // like Android (Bionic) do no provide such capability. For consistency we - // will not handle extra long path names. - if (nullptr == getcwd(buffer, PATH_MAX)) { - return std::string(); - } else { - return std::string(buffer); - } -} - -//--------------------------------------------------------------------------- -// pal::FileOp::partsToString -//--------------------------------------------------------------------------- -std::string pal::FileOp::partsToString(const FilenamePartsType_t& filenameParts) { - std::string path; - - if (!filenameParts.directory.empty()) { - path += filenameParts.directory; - path += Path::getSeparator(); - } - if (!filenameParts.basename.empty()) { - path += filenameParts.basename; - } - if (!filenameParts.extension.empty()) { - path += "."; - path += filenameParts.extension; - } - return path; -} diff --git a/src/backends/qnn/PAL/src/linux/Path.cpp b/src/backends/qnn/PAL/src/linux/Path.cpp deleted file mode 100644 index 1a46b7a6b..000000000 --- a/src/backends/qnn/PAL/src/linux/Path.cpp +++ /dev/null @@ -1,48 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2008-2014, 2015, 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include - -#include -#ifndef PATH_MAX -#include -#endif - -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" - -char pal::Path::getSeparator() { return '/'; } - -std::string pal::Path::combine(const std::string &s1, const std::string &s2) { - std::stringstream ss; - ss << s1; - if (s1.size() > 0 && s1[s1.size() - 1] != getSeparator()) { - ss << getSeparator(); - } - ss << s2; - return ss.str(); -} - -std::string pal::Path::getDirectoryName(const std::string &path) { - std::string rc = path; - size_t index = path.find_last_of(pal::Path::getSeparator()); - if (index != std::string::npos) { - rc = path.substr(0, index); - } - return rc; -} - -std::string pal::Path::getAbsolute(const std::string &path) { - // Functionality was duplicated of function in FileOp - // Just call that function directly instead - return pal::FileOp::getAbsolutePath(path); -} - -bool pal::Path::isAbsolute(const std::string &path) { - return path.size() > 0 && path[0] == getSeparator(); -} diff --git a/src/backends/qnn/QNN.hpp b/src/backends/qnn/QNN.hpp deleted file mode 100644 index 49b217fbb..000000000 --- a/src/backends/qnn/QNN.hpp +++ /dev/null @@ -1,46 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2020-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#pragma once - -#include "QnnInterface.h" -#include "WrapperUtils/QnnWrapperUtils.hpp" -#include "System/QnnSystemInterface.h" - -namespace qnn { -namespace tools { -namespace sample_app { - -// Graph Related Function Handle Types -typedef qnn_wrapper_api::ModelError_t (*ComposeGraphsFnHandleType_t)( - Qnn_BackendHandle_t, - QNN_INTERFACE_VER_TYPE, - Qnn_ContextHandle_t, - const qnn_wrapper_api::GraphConfigInfo_t **, - const uint32_t, - qnn_wrapper_api::GraphInfo_t ***, - uint32_t *, - bool, - QnnLog_Callback_t, - QnnLog_Level_t); -typedef qnn_wrapper_api::ModelError_t (*FreeGraphInfoFnHandleType_t)( - qnn_wrapper_api::GraphInfo_t ***, uint32_t); - -typedef struct QnnFunctionPointers { - ComposeGraphsFnHandleType_t composeGraphsFnHandle; - // not used, only appears in DynamicLoadUtil.cpp for loading QNN model from shared library(QNN sample app demo) - FreeGraphInfoFnHandleType_t freeGraphInfoFnHandle; - QNN_INTERFACE_VER_TYPE qnnInterface; - QNN_SYSTEM_INTERFACE_VER_TYPE qnnSystemInterface; -} QnnFunctionPointers; - -} // namespace sample_app -} // namespace tools -} // namespace qnn - - diff --git a/src/backends/qnn/QNNBackend.cpp b/src/backends/qnn/QNNBackend.cpp deleted file mode 100755 index 69b112831..000000000 --- a/src/backends/qnn/QNNBackend.cpp +++ /dev/null @@ -1,1229 +0,0 @@ -#include -#include - -#include -#include -#include - -#include "Log.h" -#include "Module.hpp" -#include "OpDefined.hpp" -#include "QNNBackend.hpp" -#include "ParamLoader.hpp" -#include "QnnModel.hpp" -#include "Utils/QnnSampleAppUtils.hpp" -#include "Utils/IOTensor.hpp" -#include "Utils/DynamicLoadUtil.hpp" -#include "Log/Logger.hpp" -#include "WrapperUtils/QnnWrapperUtils.hpp" -#include "QNNMemoryManager.hpp" -#include "QnnTypes.h" -#include "HTP/QnnHtpGraph.h" -#include "Layer.hpp" - -#include "Types.hpp" -#include "op/QNNAdd.hpp" -#include "op/QNNCausalMask.hpp" -#include "op/QNNGELU.hpp" -#include "op/QNNLinearINT8.hpp" -#include "op/QNNMatmul.hpp" -#include "op/QNNMul.hpp" -#include "op/QNNLayerNorm.hpp" -#include "op/QNNRMSNorm.hpp" -#include "op/QNNRoPE.hpp" -#include "op/QNNScale.hpp" -#include "op/QNNSiLU.hpp" -#include "op/QNNSoftMax.hpp" -#include "op/QNNView.hpp" -#include "op/QNNReLU.hpp" -#include "op/QNNQuantize.hpp" -#include "op/QNNDequantize.hpp" -#include "op/QNNMergeOutput.hpp" -#include "op/QNNSplitInput.hpp" -#include "op/QNNTranspose.hpp" -#include "op/QNNSuperSiLU.hpp" -#include "op/QNNIRoPE.hpp" - -#include "memory/MemInspect.hpp" - -#ifdef DEBUGPRINT -#include "Timing.hpp" -#endif - -using namespace qnn; -using namespace qnn::tools; -using namespace qnn::tools::sample_app; - -// Flag to determine if Backend should node validation for each opNode added -#ifdef QNN_VALIDATE_NODE -#define DO_GRAPH_NODE_VALIDATIONS 1 -#else -#define DO_GRAPH_NODE_VALIDATIONS 0 -#endif - -namespace mllm { - -void QNNBackend::registerOps() { - addCreator(ADD, (QNNBackend::Creator *)new QNNAddCreator()); - addCreator(CAUSALMASK, (QNNBackend::Creator *)(new QNNCausalMaskCreator())); - addCreator(MATMUL, (QNNBackend::Creator *)(new QNNMatmulCreator())); - addCreator(RMSNORM, (QNNBackend::Creator *)(new QNNRMSNormCreator())); - addCreator(LAYERNORM, (QNNBackend::Creator *)(new QNNLayerNormCreator())); - addCreator(ROPE, (QNNBackend::Creator *)(new QNNRoPECreator())); - addCreator(IROPE, (QNNBackend::Creator *)(new QNNIRoPECreator())); - addCreator(SCALE, (QNNBackend::Creator *)(new QNNScaleCreator())); - addCreator(SILU, (QNNBackend::Creator *)(new QNNSiLUCreator())); - addCreator(SOFTMAX, (QNNBackend::Creator *)(new QNNSoftMaxCreator())); - addCreator(LINEAR, (QNNBackend::Creator *)(new QNNLinearINT8Creator())); - addCreator(LINEARINT8, (QNNBackend::Creator *)(new QNNLinearINT8Creator())); - addCreator(MUL, (QNNBackend::Creator *)(new QNNMulCreator())); - addCreator(VIEW, (QNNBackend::Creator *)(new QNNViewCreator())); - addCreator(RELU, (QNNBackend::Creator *)(new QNNReLUCreator())); - addCreator(OP_GELU, (QNNBackend::Creator *)(new QNNGELUCreator())); - addCreator(QUANTIZE, (QNNBackend::Creator *)(new QNNQuantizeCreator())); - addCreator(DEQUANTIZE, (QNNBackend::Creator *)(new QNNDequantizeCreator())); - addCreator(MERGEOUTPUT, (QNNBackend::Creator *)(new QNNMergeOutputCreator())); - addCreator(SPLITINPUT, (QNNBackend::Creator *)(new QNNSplitInputCreator())); - addCreator(TRANSPOSE, (QNNBackend::Creator *)(new QNNTransposeCreator())); - addCreator(SUPERSILU, (QNNBackend::Creator *)(new QNNSuperSiLUCreator())); -} - -QNNBackend::QNNBackend(shared_ptr mm) : - Backend(mm) { - type_ = BackendType::MLLM_QNN; // used in Tensor.device() - if (!log::initializeLogging()) { - MLLM_LOG_ERROR_STREAM << "ERROR: Unable to initialize logging!\n"; - return; - } - // TODO: make debug level configuable - log::setLogLevel(QnnLog_Level_t::QNN_LOG_LEVEL_ERROR); - - std::string backEndPath = "libQnnHtp.so"; - std::string opPackagePaths = "libQnnLLaMAPackage_CPU.so:LLaMAPackageInterfaceProvider:CPU,libQnnLLaMAPackage_HTP.so:LLaMAPackageInterfaceProvider:HTP"; - - // TODO: make these configuable - m_debug = false; // when set true, NATIVE tensor will be regared as APP_READ tensor - m_inputDataType = iotensor::InputDataType::NATIVE; - m_profilingLevel = ProfilingLevel::OFF; - - m_isBackendInitialized = false; - m_isContextCreated = false; - - // config path strings - split(m_opPackagePaths, opPackagePaths, ','); - - if (backEndPath.empty()) { - std::exit(EXIT_FAILURE); - } - MLLM_LOG_INFO_LEGACY("Backend: %s", backEndPath.c_str()); - - // Load backend and validate all the required function symbols are resolved - auto statusCode = dynamicloadutil::getQnnFunctionPointers(backEndPath, - "", - &m_qnnFunctionPointers, - &m_backendLibraryHandle, - false, - nullptr); - if (dynamicloadutil::StatusCode::SUCCESS != statusCode) { - if (dynamicloadutil::StatusCode::FAIL_LOAD_BACKEND == statusCode) { - exitWithMessage( - "Error initializing QNN Function Pointers: could not load backend: " + backEndPath, - EXIT_FAILURE); - } else if (dynamicloadutil::StatusCode::FAIL_LOAD_MODEL == statusCode) { - exitWithMessage( - "Error initializing QNN Function Pointers: could not load model: ", - EXIT_FAILURE); - } else { - exitWithMessage("Error initializing QNN Function Pointers", EXIT_FAILURE); - } - } - - // init qnn resources - { - MLLM_LOG_INFO_LEGACY("Backend build version: %s", getBackendBuildId().c_str()); - - // initialize logging in the backend - if (log::isLogInitialized()) { - auto logCallback = log::getLogCallback(); - auto logLevel = log::getLogLevel(); - // MLLM_LOG_INFO("Initializing logging in the backend. Callback: {}, Log Level: {}", - // logCallback, - // logLevel); - if (QNN_SUCCESS != m_qnnFunctionPointers.qnnInterface.logCreate(logCallback, logLevel, &m_logHandle)) { - MLLM_LOG_WARN_LEGACY("Unable to initialize logging in the backend."); - } - } else { - MLLM_LOG_WARN_LEGACY("Logging not available in the backend."); - } - - // initialize QnnBackend - auto qnnStatus = m_qnnFunctionPointers.qnnInterface.backendCreate( - m_logHandle, (const QnnBackend_Config_t **)m_backendConfig, &m_backendHandle); - if (QNN_BACKEND_NO_ERROR != qnnStatus) { - MLLM_LOG_ERROR("Could not initialize backend due to error = {}", (unsigned int)qnnStatus); - this->reportError("Backend Initialization failure"); - } - MLLM_LOG_INFO("Initialize Backend Returned Status = {}", (unsigned int)qnnStatus); - m_isBackendInitialized = true; - - auto devicePropertySupportStatus = this->isDevicePropertySupported(); - if (StatusCode::FAILURE != devicePropertySupportStatus) { - auto createDeviceStatus = this->createDevice(); - if (StatusCode::SUCCESS != createDeviceStatus) { - this->reportError("Device Creation failure"); - } - } - - if (StatusCode::SUCCESS != this->initializeProfiling()) { - this->reportError("Profiling Initialization failure"); - } - - if (StatusCode::SUCCESS != this->registerOpPackages()) { - this->reportError("Register Op Packages failure"); - } - } - - // register ops - this->registerOps(); -} - -QNNBackend::~QNNBackend() { - terminateBackend(); - // free creaters in map_creator_ - for (auto &iter : map_creator_) { - delete iter.second; - } - // free qnn backend resource - auto devicePropertySupportStatus = this->isDevicePropertySupported(); - if (StatusCode::FAILURE != devicePropertySupportStatus) { - auto freeDeviceStatus = this->freeDevice(); - if (StatusCode::SUCCESS != freeDeviceStatus) { - this->reportError("Device Free failure"); - } - } - // free dynamic library handle - if (m_backendLibraryHandle) { - pal::dynamicloading::dlClose(m_backendLibraryHandle); - } - QNN_INFO("Free handle"); -} - -void QNNBackend::onSetUpStart(vector> &inputs, vector> &outputs, string graphName) { - auto returnStatus = StatusCode::SUCCESS; - - // create a new graph - qnnModelIndex_ = qnnModels_.size(); - qnnModelIndexMap_.insert(std::make_pair(graphName, qnnModelIndex_)); - qnnModels_.push_back(qnn_wrapper_api::QnnModel()); - // create qnn context, assign context to qnn memory manager - if (StatusCode::SUCCESS != this->createContext()) { - this->reportError("Context Creation failure"); - } -#ifdef QNN_ARM - auto qnnMM = std::static_pointer_cast(mem_manager_); - qnnMM->setQnnInterfaceAndContext(m_context); -#endif - - // initialize qnn graph info, set graph info, graph count - // NOTE: currently not using it - QnnHtpGraph_CustomConfig_t customConfig; - // customConfig.option = QNN_HTP_GRAPH_CONFIG_OPTION_NUM_HVX_THREADS; - // customConfig.numHvxThreads = 4; // set a number. MAX = number of HVX HW blocks for that SoC - customConfig.option = QNN_HTP_GRAPH_CONFIG_OPTION_VTCM_SIZE; - customConfig.vtcmSizeInMB = 8; - - QnnGraph_Config_t graphConfig; - graphConfig.option = QNN_GRAPH_CONFIG_OPTION_CUSTOM; - graphConfig.customConfig = &customConfig; - - const QnnGraph_Config_t *pGraphConfig[] = {&graphConfig, NULL}; - - const QnnGraph_Config_t **graphConfigs = pGraphConfig; - - m_graphConfigsInfoCount = 1; - - qnn_wrapper_api::ModelError_t err = qnn_wrapper_api::getQnnGraphConfigFromInfo( - graphName.c_str(), (const qnn_wrapper_api::GraphConfigInfo_t **)m_graphConfigsInfo, m_graphConfigsInfoCount, graphConfigs); - if (err != qnn_wrapper_api::MODEL_NO_ERROR) { - this->reportError("Graph Config Info failure"); - } - - err = qnnModels_[qnnModelIndex_].initialize(m_backendHandle, - m_qnnFunctionPointers.qnnInterface, - m_context, - graphName.c_str(), - m_debug, - DO_GRAPH_NODE_VALIDATIONS, - graphConfigs); - if (err != qnn_wrapper_api::MODEL_NO_ERROR) { - this->reportError("Graph Initialization failure: " + graphName); - } - - // To avoid no input, we put inputs here. - // For splitinput op input, the seq will be divided as 5, and we add the input in split ops. - for (auto &input : inputs) { - Qnn_DataType_t data_type; - auto quantizeDefined = QNN_DEFINITION_UNDEFINED; - auto quantizeType = QNN_QUANTIZATION_ENCODING_UNDEFINED; - float scale = 0.0f; - AbstructLoader *loader = nullptr; - if (Module::llm_model_ptr == nullptr) { // old frontend - loader = dataLoader_; - } else { // new frontend - loader = Module::llm_model_ptr->loader; - } - Tensor scaleTensor(this); - scaleTensor.reshape(1, 1, 1, 1); - scaleTensor.setDtype(MLLM_TYPE_F32); - scaleTensor.alloc(); - - switch (input->dtype()) { - case MLLM_TYPE_F32: - data_type = QNN_DATATYPE_FLOAT_32; - break; - case MLLM_TYPE_I8: { - data_type = QNN_DATATYPE_SFIXED_POINT_8; - quantizeDefined = QNN_DEFINITION_DEFINED; - quantizeType = QNN_QUANTIZATION_ENCODING_SCALE_OFFSET; - - string scaleName = input->name(); - - std::string wordToRemove = "outtensor-"; - int pos = scaleName.find(wordToRemove); - if (pos != -1) { // old frontend merge/split generated tensor - scaleName = scaleName.substr(wordToRemove.length()); - wordToRemove = "or_split"; - if (scaleName.find(wordToRemove) != -1) { - pos = scaleName.find("or_split"); - // scaleName.erase(pos, wordToRemove.length()); - scaleName = scaleName.substr(0, pos); - // o - scaleName += "o_proj.input_scale"; - } else if (scaleName.find("ires_split") != -1) { - pos = scaleName.find("ires_split"); - wordToRemove = "ires_split"; - // scaleName.erase(pos, wordToRemove.length()); - scaleName = scaleName.substr(0, pos); - // q - scaleName += "q_proj.input_scale"; - } else if (scaleName.find("fres_split") != -1) { - pos = scaleName.find("fres_split"); - wordToRemove = "fres_split"; - // scaleName.erase(pos, wordToRemove.length()); - scaleName = scaleName.substr(0, pos); - // fc1 - scaleName += "up_proj.input_scale"; - } - } else { // new frontend no merge/split condition - std::string prefix = "out-", suffix = ".quantize"; - if (input->name().find(prefix) != std::string::npos) { - scaleName = input->name().substr(prefix.length()); - } - if (scaleName.find(suffix) != std::string::npos) { - scaleName = scaleName.substr(0, scaleName.length() - suffix.length()); - } - scaleName += ".input_scale"; - } - scaleTensor.setName(scaleName); - loader->load(&scaleTensor); - scale = roundf(scaleTensor.hostPtr()[0] / 127.0 * 100000) / 100000; - scaleTensor.free(); - - break; - } - default: - MLLM_LOG_ERROR_STREAM << "[ERROR] QNNBackend not support dtype: " << input->dtype() << std::endl; - data_type = QNN_DATATYPE_FLOAT_32; - } - - uint32_t dimensionsInput[4] = { - static_cast(input->batch()), - static_cast(input->sequence()), - static_cast(input->head()), - static_cast(input->dimension()), - }; - - qnnModels_[qnnModelIndex_].addTensor(input->name().c_str(), - (Qnn_Tensor_t){ - .version = QNN_TENSOR_VERSION_1, - .v1 = { - .id = 0, - .name = input->name().c_str(), - .type = QNN_TENSOR_TYPE_APP_WRITE, - .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, - .dataType = data_type, - .quantizeParams = {quantizeDefined, - quantizeType, - {.scaleOffsetEncoding = {.scale = scale, .offset = 0}}}, - .rank = 4, - .dimensions = dimensionsInput, - .memType = QNN_TENSORMEMTYPE_RAW, - .clientBuf = {.data = nullptr, - .dataSize = 0}}}); - } - - // create a new inputBuffer and outputBuffer for the graph - inputBufferMap.insert(std::make_pair(graphName, std::vector(inputs.size()))); - outputBufferMap.insert(std::make_pair(graphName, std::vector())); - - currentInputBuffers = &inputBufferMap[graphName]; - currentOutputBuffers = &outputBufferMap[graphName]; - - // push input tensors to the buffer list - for (int i = 0; i < inputs.size(); i++) { - (*currentInputBuffers)[i] = inputs[i]->hostPtr(); - } -} - -void QNNBackend::onSetUpEnd(vector> &inputs, vector> &outputs, string graphName) { - currentInputBuffers = &inputBufferMap[graphName]; - currentOutputBuffers = &outputBufferMap[graphName]; - qnnModelIndex_ = qnnModelIndexMap_[graphName]; - PRINT_MEMORY_USAGE("before graph finilize") - auto status = graphFinilize(); - PRINT_MEMORY_USAGE("after graph finilize") - if (qnn_wrapper_api::ModelError_t::MODEL_NO_ERROR != status) { - this->reportError("Graph Finalization failure"); - } - - auto returnStatus = StatusCode::SUCCESS; - - Qnn_Tensor_t *qnnInputs = nullptr; - Qnn_Tensor_t *qnnOutputs = nullptr; - - auto graphInfo = graphInfoMap_[qnnModelIndex_]; - - // directly get qnnInputs and qnnOutputs from graphInfo.outputTensors - if (iotensor::StatusCode::SUCCESS != m_ioTensor.setupInputAndOutputTensors(&qnnInputs, &qnnOutputs, *graphInfo)) { - MLLM_LOG_ERROR_LEGACY("Error in setting up Input and output Tensors for qnnModelIndex_: %d", qnnModelIndex_); - returnStatus = StatusCode::FAILURE; - } - - auto qnnMM = std::static_pointer_cast(mem_manager_); - - // register input and output tensor to qnn shared buffers - // must insure the inputs and outputs of mllm graph are the same as the qnn graph - // op created io tensors (kvcache, wnop...) should be solved -#ifdef DEBUGPRINT - std::cout << "input tensors num:" << graphInfo->numInputTensors << std::endl; - std::cout << "output tensors num:" << graphInfo->numOutputTensors << std::endl; -#endif - - for (int i = 0; i < graphInfo->numInputTensors; i++) { - qnnMM->registerQnnTensor((*currentInputBuffers)[i], qnnInputs[i]); -#ifdef DEBUGPRINT - if (i < inputs.size()) { - std::cout << "\nregistered input tensor: " << inputs[i]->hostPtr() << " backend staged ptr: " << (void *)(*currentInputBuffers)[i] << std::endl; - } else { - std::cout << "\n registered op added input" << std::endl; - } - std::cout << "qnn input tensor name: " << qnnInputs[i].v1.name << std::endl; - std::cout << "qnn input tensor scale: " << qnnInputs[i].v1.quantizeParams.scaleOffsetEncoding.scale << std::endl; -#endif - } - for (int i = 0; i < graphInfo->numOutputTensors; i++) { - qnnMM->registerQnnTensor((*currentOutputBuffers)[i], qnnOutputs[i]); -#ifdef DEBUGPRINT - if (i < outputs.size()) { - std::cout << "\nregistered output tensor: " << outputs[i]->hostPtr() << " backend staged ptr: " << (void *)(*currentOutputBuffers)[i] << std::endl; - } else { - std::cout << "\n registered op added output" << std::endl; - } - std::cout << "qnn output tensor name: " << qnnOutputs[i].v1.name << std::endl; - std::cout << "qnn output tensor scale: " << qnnOutputs[i].v1.quantizeParams.scaleOffsetEncoding.scale << std::endl; -#endif - } - - inputsMap_[qnnModelIndex_] = qnnInputs; - outputsMap_[qnnModelIndex_] = qnnOutputs; -} - -void QNNBackend::onExecuteStart(vector> &inputs, vector> &outputs, string graphName) { - // to support multi-thread, we need local variable. - // update currentInputBuffers, currentOutputBuffers, qnnModelIndex_ - auto t_qnnModelIndex_ = qnnModelIndexMap_[graphName]; - - qnn_wrapper_api::GraphInfo_t *graphInfo = graphInfoMap_[t_qnnModelIndex_]; - - Qnn_Tensor_t *inputs_ = inputsMap_[t_qnnModelIndex_]; - Qnn_Tensor_t *outputs_ = outputsMap_[t_qnnModelIndex_]; - - Qnn_ErrorHandle_t executeStatus = QNN_GRAPH_NO_ERROR; -#ifdef DEBUGPRINT - uint64_t t_start = mllm_time_us(); -#endif - executeStatus = - m_qnnFunctionPointers.qnnInterface.graphExecute(graphInfo->graph, - inputs_, - graphInfo->numInputTensors, - outputs_, - graphInfo->numOutputTensors, - m_profileBackendHandle, - nullptr); -#ifdef DEBUGPRINT - uint64_t t_end = mllm_time_us(); - std::cout << "QNN execution time " << (t_end - t_start) / 1000.0F << " ms" << std::endl; -#endif - - if (QNN_GRAPH_NO_ERROR != executeStatus) { - MLLM_LOG_ERROR_STREAM << "Error in executing graph: " << graphName << std::endl; - } - - if (ProfilingLevel::OFF != m_profilingLevel) { - extractBackendProfilingInfo(m_profileBackendHandle); - } -} - -void QNNBackend::onExecuteEnd(std::vector> &outputs, const string &graph_name) { -} - -void QNNBackend::freeGraphDataStructure(string graphName) { - auto it = qnnModelIndexMap_.find(graphName); - if (it != qnnModelIndexMap_.end()) { - qnnModelIndex_ = it->second; - - qnnModels_[qnnModelIndex_].freeTensors(); - qnnModels_[qnnModelIndex_].clearGraph(); - } - - inputBufferMap[graphName].resize(0); - outputBufferMap[graphName].resize(0); -} - -void QNNBackend::afterAllGraphsExecute() { - // clear old models. - qnnModelIndexMap_.clear(); - - auto qnnMM = std::static_pointer_cast(mem_manager_); - qnnMM->deRegisterQnnTensor(); - - this->freeContext(); - - inputBufferMap.clear(); - outputBufferMap.clear(); - - graphInfoMap_.clear(); - inputsMap_.clear(); - outputsMap_.clear(); -} - -std::string QNNBackend::getBackendBuildId() { - char *backendBuildId{nullptr}; - if (QNN_SUCCESS != m_qnnFunctionPointers.qnnInterface.backendGetBuildId((const char **)&backendBuildId)) { - MLLM_LOG_ERROR_LEGACY("Unable to get build Id from the backend."); - } - return (backendBuildId == nullptr ? std::string("") : std::string(backendBuildId)); -} - -qnn_wrapper_api::ModelError_t QNNBackend::graphAddNode(string name, - string nodeType, - std::vector inputTensorNames, - std::vector outputTensors, - std::vector params, - string packageName) { - qnn_wrapper_api::ModelError_t err = qnn_wrapper_api::ModelError_t::MODEL_NO_ERROR; - Qnn_Param_t *paramsPtr = nullptr; - if (!params.empty()) { - paramsPtr = params.data(); - } - VALIDATE(qnnModels_[qnnModelIndex_].addNode( - QNN_OPCONFIG_VERSION_1, // Op_Config_t Version - name.c_str(), // Node Name - packageName.c_str(), // Package Name - nodeType.c_str(), // Qnn Node Type - paramsPtr, // Node Params - params.size(), // Num Node Params - inputTensorNames, // Input Tensor Names - inputTensorNames.size(), // Num Input Tensor Names - outputTensors.data(), // Output Tensors - outputTensors.size() // Num Output Tensors - ), - err); - return err; -} - -qnn_wrapper_api::ModelError_t QNNBackend::graphFinilize() { - // Populate the constructed graphs in provided output variables - qnn_wrapper_api::ModelError_t err = qnn_wrapper_api::MODEL_NO_ERROR; - qnn_wrapper_api::GraphInfo_t *graphInfo = nullptr; - - VALIDATE(getSingleGraphInfoFromModel(qnnModels_[qnnModelIndex_], &graphInfo), err); - - // Graph finalize - if (QNN_GRAPH_NO_ERROR != m_qnnFunctionPointers.qnnInterface.graphFinalize(graphInfo->graph, m_profileBackendHandle, nullptr)) { - return qnn_wrapper_api::ModelError_t::MODEL_GRAPH_ERROR; - } - if (ProfilingLevel::OFF != m_profilingLevel) { - extractBackendProfilingInfo(m_profileBackendHandle); - } - - graphInfoMap_[qnnModelIndex_] = graphInfo; - - return qnn_wrapper_api::ModelError_t::MODEL_NO_ERROR; -} - -qnn_wrapper_api::ModelError_t QNNBackend::modelAddTensor(std::string nodeName, Qnn_Tensor_t tensor) { - return qnnModels_[qnnModelIndex_].addTensor(nodeName.c_str(), tensor); -} - -StatusCode QNNBackend::initializeProfiling() { - if (ProfilingLevel::OFF != m_profilingLevel) { - MLLM_LOG_INFO_LEGACY("Profiling turned on; level = %d", (int)m_profilingLevel); - if (ProfilingLevel::BASIC == m_profilingLevel) { - MLLM_LOG_INFO_LEGACY("Basic profiling requested. Creating Qnn Profile object."); - if (QNN_PROFILE_NO_ERROR != m_qnnFunctionPointers.qnnInterface.profileCreate(m_backendHandle, QNN_PROFILE_LEVEL_BASIC, &m_profileBackendHandle)) { - MLLM_LOG_WARN_LEGACY("Unable to create profile handle in the backend."); - return StatusCode::FAILURE; - } - } else if (ProfilingLevel::DETAILED == m_profilingLevel) { - MLLM_LOG_INFO_LEGACY("Detailed profiling requested. Creating Qnn Profile object."); - if (QNN_PROFILE_NO_ERROR != m_qnnFunctionPointers.qnnInterface.profileCreate(m_backendHandle, QNN_PROFILE_LEVEL_DETAILED, &m_profileBackendHandle)) { - MLLM_LOG_ERROR_LEGACY("Unable to create profile handle in the backend."); - return StatusCode::FAILURE; - } - } - } - return StatusCode::SUCCESS; -} - -// Simple method to report error from app to lib. -void QNNBackend::reportError(const std::string &err) { - MLLM_LOG_ERROR_LEGACY("%s", err.c_str()); - exit(1); -} - -// Terminate the backend after done. -StatusCode QNNBackend::terminateBackend() { - if ((m_isBackendInitialized && nullptr != m_qnnFunctionPointers.qnnInterface.backendFree) && QNN_BACKEND_NO_ERROR != m_qnnFunctionPointers.qnnInterface.backendFree(m_backendHandle)) { - MLLM_LOG_ERROR_LEGACY("Could not terminate backend"); - return StatusCode::FAILURE; - } - m_isBackendInitialized = false; - return StatusCode::SUCCESS; -} - -// Register op packages and interface providers supplied during -// object creation. If there are multiple op packages, register -// them sequentially in the order provided. -StatusCode QNNBackend::registerOpPackages() { - const size_t pathIdx = 0; - const size_t interfaceProviderIdx = 1; - for (auto const &opPackagePath : m_opPackagePaths) { - std::vector opPackage; - split(opPackage, opPackagePath, ':'); - QNN_DEBUG("opPackagePath: %s", opPackagePath.c_str()); - const char *target = nullptr; - const size_t targetIdx = 2; - if (opPackage.size() != 2 && opPackage.size() != 3) { - MLLM_LOG_ERROR_LEGACY("Malformed opPackageString provided: %s", opPackagePath.c_str()); - return StatusCode::FAILURE; - } - if (opPackage.size() == 3) { - target = (char *)opPackage[targetIdx].c_str(); - } - if (nullptr == m_qnnFunctionPointers.qnnInterface.backendRegisterOpPackage) { - MLLM_LOG_ERROR_LEGACY("backendRegisterOpPackageFnHandle is nullptr."); - return StatusCode::FAILURE; - } - if (QNN_BACKEND_NO_ERROR != m_qnnFunctionPointers.qnnInterface.backendRegisterOpPackage(m_backendHandle, (char *)opPackage[pathIdx].c_str(), (char *)opPackage[interfaceProviderIdx].c_str(), target)) { - MLLM_LOG_ERROR_LEGACY("Could not register Op Package: %s and interface provider: %s", - opPackage[pathIdx].c_str(), - opPackage[interfaceProviderIdx].c_str()); - return StatusCode::FAILURE; - } - MLLM_LOG_INFO_LEGACY("Registered Op Package: %s and interface provider: %s", - opPackage[pathIdx].c_str(), - opPackage[interfaceProviderIdx].c_str()); - } - return StatusCode::SUCCESS; -} - -// Create a Context in a backend. -StatusCode QNNBackend::createContext() { - if (QNN_CONTEXT_NO_ERROR != m_qnnFunctionPointers.qnnInterface.contextCreate(m_backendHandle, m_deviceHandle, (const QnnContext_Config_t **)&m_contextConfig, &m_context)) { - MLLM_LOG_ERROR_LEGACY("Could not create context"); - return StatusCode::FAILURE; - } - m_isContextCreated = true; - return StatusCode::SUCCESS; -} - -// Free context after done. -StatusCode QNNBackend::freeContext() { - if (m_isContextCreated && QNN_CONTEXT_NO_ERROR != m_qnnFunctionPointers.qnnInterface.contextFree(m_context, m_profileBackendHandle)) { - MLLM_LOG_ERROR_LEGACY("Could not free context"); - return StatusCode::FAILURE; - } - m_isContextCreated = false; - return StatusCode::SUCCESS; -} - -StatusCode QNNBackend::extractBackendProfilingInfo( - Qnn_ProfileHandle_t profileHandle) { - if (nullptr == m_profileBackendHandle) { - MLLM_LOG_ERROR_LEGACY("Backend Profile handle is nullptr; may not be initialized."); - return StatusCode::FAILURE; - } - const QnnProfile_EventId_t *profileEvents{nullptr}; - uint32_t numEvents{0}; - if (QNN_PROFILE_NO_ERROR != m_qnnFunctionPointers.qnnInterface.profileGetEvents(profileHandle, &profileEvents, &numEvents)) { - MLLM_LOG_ERROR_LEGACY("Failure in profile get events."); - return StatusCode::FAILURE; - } - QNN_DEBUG("ProfileEvents: [%p], numEvents: [%d]", profileEvents, numEvents); - for (size_t event = 0; event < numEvents; event++) { - extractProfilingEvent(*(profileEvents + event)); - extractProfilingSubEvents(*(profileEvents + event)); - } - return StatusCode::SUCCESS; -} - -StatusCode QNNBackend::extractProfilingSubEvents( - QnnProfile_EventId_t profileEventId) { - const QnnProfile_EventId_t *profileSubEvents{nullptr}; - uint32_t numSubEvents{0}; - if (QNN_PROFILE_NO_ERROR != m_qnnFunctionPointers.qnnInterface.profileGetSubEvents(profileEventId, &profileSubEvents, &numSubEvents)) { - MLLM_LOG_ERROR_LEGACY("Failure in profile get sub events."); - return StatusCode::FAILURE; - } - QNN_DEBUG("ProfileSubEvents: [%p], numSubEvents: [%d]", profileSubEvents, numSubEvents); - for (size_t subEvent = 0; subEvent < numSubEvents; subEvent++) { - extractProfilingEvent(*(profileSubEvents + subEvent)); - extractProfilingSubEvents(*(profileSubEvents + subEvent)); - } - return StatusCode::SUCCESS; -} - -StatusCode QNNBackend::extractProfilingEvent( - QnnProfile_EventId_t profileEventId) { - QnnProfile_EventData_t eventData; - if (QNN_PROFILE_NO_ERROR != m_qnnFunctionPointers.qnnInterface.profileGetEventData(profileEventId, &eventData)) { - MLLM_LOG_ERROR_LEGACY("Failure in profile get event type."); - return StatusCode::FAILURE; - } - QNN_DEBUG("Printing Event Info - Event Type: [%d], Event Value: [%" PRIu64 - "], Event Identifier: [%s], Event Unit: [%d]", - eventData.type, - eventData.value, - eventData.identifier, - eventData.unit); - return StatusCode::SUCCESS; -} - -StatusCode QNNBackend::verifyFailReturnStatus(Qnn_ErrorHandle_t errCode) { - auto returnStatus = StatusCode::FAILURE; - switch (errCode) { - case QNN_COMMON_ERROR_SYSTEM_COMMUNICATION: - returnStatus = StatusCode::FAILURE_SYSTEM_COMMUNICATION_ERROR; - break; - case QNN_COMMON_ERROR_SYSTEM: - returnStatus = StatusCode::FAILURE_SYSTEM_ERROR; - break; - case QNN_COMMON_ERROR_NOT_SUPPORTED: - returnStatus = StatusCode::QNN_FEATURE_UNSUPPORTED; - break; - default: - break; - } - return returnStatus; -} - -StatusCode QNNBackend::isDevicePropertySupported() { - if (nullptr != m_qnnFunctionPointers.qnnInterface.propertyHasCapability) { - auto qnnStatus = - m_qnnFunctionPointers.qnnInterface.propertyHasCapability(QNN_PROPERTY_GROUP_DEVICE); - if (QNN_PROPERTY_NOT_SUPPORTED == qnnStatus) { - MLLM_LOG_WARN_LEGACY("Device property is not supported"); - } - if (QNN_PROPERTY_ERROR_UNKNOWN_KEY == qnnStatus) { - MLLM_LOG_ERROR_LEGACY("Device property is not known to backend"); - return StatusCode::FAILURE; - } - } - return StatusCode::SUCCESS; -} - -StatusCode QNNBackend::createDevice() { - if (nullptr != m_qnnFunctionPointers.qnnInterface.deviceCreate) { - auto qnnStatus = - m_qnnFunctionPointers.qnnInterface.deviceCreate(m_logHandle, nullptr, &m_deviceHandle); - if (QNN_SUCCESS != qnnStatus && QNN_DEVICE_ERROR_UNSUPPORTED_FEATURE != qnnStatus) { - MLLM_LOG_ERROR_LEGACY("Failed to create device"); - return verifyFailReturnStatus(qnnStatus); - } - } - return StatusCode::SUCCESS; -} - -StatusCode QNNBackend::freeDevice() { - if (nullptr != m_qnnFunctionPointers.qnnInterface.deviceFree) { - auto qnnStatus = m_qnnFunctionPointers.qnnInterface.deviceFree(m_deviceHandle); - if (QNN_SUCCESS != qnnStatus && QNN_DEVICE_ERROR_UNSUPPORTED_FEATURE != qnnStatus) { - MLLM_LOG_ERROR_LEGACY("Failed to free device"); - return verifyFailReturnStatus(qnnStatus); - } - } - return StatusCode::SUCCESS; -} - -std::vector QNNBackend::runFunc(std::vector out_names, - TensorFuncType type, - std::vector float_args, - std::vector> input_tensors, - bool in_place) { - Module *module = input_tensors.empty() ? Module::llm_model_ptr : input_tensors[0]->module(); - assert(module != nullptr); - auto &activation_tensors = module->activation_tensors; - auto &activation_tensors_num = module->activation_tensors_num; - - std::vector> output_ptrs; - for (const auto &out_name : out_names) { - if (activation_tensors.find(out_name) == activation_tensors.end()) { - Backend *backend_h = Backend::global_backends[MLLM_CPU]; - if (!input_tensors.empty()) { - backend_h = input_tensors[0]->backend(); - } - activation_tensors[out_name] = std::make_shared(backend_h); - activation_tensors[out_name]->setName(out_name); - activation_tensors[out_name]->setModule(module); - activation_tensors_num[out_name] = 0; - } - output_ptrs.push_back(activation_tensors[out_name]); - } - - if (module->doLoad) { - std::vector results; - for (auto &out_tensor : output_ptrs) { - results.push_back(*activation_tensors[out_tensor->name()]); - } - return results; - } - - Backend *backend_h = Backend::global_backends[MLLM_CPU]; - if (!input_tensors.empty()) { - backend_h = input_tensors[0]->backend(); - } - TensorFunction *func = backend_h->funcCreate(type); - - std::vector> input_ptrs; - for (auto &tensor : input_tensors) { - input_ptrs.push_back(activation_tensors[tensor->name()]); - } - // if (in_place) { - // for (size_t i = 0; i < input_tensors.size() && i < out_names.size(); ++i) { - // input_tensors[i]->setName(out_names[i]); - // output_ptrs.push_back(input_tensors[i]); - // } - // } - -#ifdef DEBUGOPTIME - auto start_t = mllm_time_us(); -#endif - - switch (Tensor::tensor_status) { - case TENSOR_STATIC_INIT: - func->reshape(output_ptrs, input_ptrs, float_args); - func->setUp(output_ptrs, input_ptrs, float_args); - break; - case TENSOR_STATIC_READY: - func->execute(output_ptrs, input_ptrs, float_args); - break; - case TENSOR_STATIC_TRACE: - if (backend_h->type() == BackendType::MLLM_CPU) { - Tracer::addTensorFunction(func, input_ptrs, output_ptrs, float_args); - } - break; - default: - break; - } - - // if (Backend::global_backends.size() == 1) { - // for (auto input_tensor : input_ptrs) { - // auto it = activation_tensors_num.find(input_tensor->name()); - // if (it != activation_tensors_num.end()) { - // switch (Tensor::tensor_status) { - // case TENSOR_STATIC_INIT: - // it->second += 1; - // break; - // case TENSOR_STATIC_READY: - // it->second -= 1; - // break; - // default: - // break; - // } - // if (it->second == 0 && module_tensors[input_tensor->name()]->sequence() > 1 && module_tensors[input_tensor->name()]->ttype() != GRAPH_OUTPUT) { - // activation_tensors[input_tensor->name()]->free(); - // } - // } - // } - // } - -#ifdef DEBUGOPTIME - if (Tensor::tensor_status == TENSOR_STATIC_READY) { - auto end_t = mllm_time_us(); - std::cout << (out_names.empty() ? "" : out_names[0]) << " | " - << Tensor::tensor_status << " time: " - << (end_t - start_t) / 1000.0F << "ms" << std::endl; - } -#endif - -#ifdef DEBUGSAVETENSOR - for (auto &out_name : out_names) { - activation_tensors[out_name]->saveNData(); - } -#endif - - std::vector results; - for (auto &out_tensor : output_ptrs) { - results.emplace_back(*activation_tensors[out_tensor->name()]); - } - return results; -} -std::string name_num_to_X(const std::string &input_string) { - std::regex pattern(R"(\.\d{1,3}\.)"); // Matches any number between 1 and 100 between two dots - std::string replacement = ".X."; // The string to replace the matched pattern with - std::string output_string = std::regex_replace(input_string, pattern, replacement); - return output_string; -} -std::string name_X_to_num(const std::string &input_string, int in_idx) { - std::regex pattern(".X."); // Matches any number between 1 and 100 between two dots - std::string replacement = "." + std::to_string(in_idx) + "."; // The string to replace the matched pattern with - std::string output_string = std::regex_replace(input_string, pattern, replacement); - return output_string; -} -void init_reset_KVCache(string input_name, Module *module, int saved_list_idx, map layername_2_tensorname, Backend *backend_) { - map> &activation_tensors = module->activation_tensors; - vector renameX_names; - renameX_names.push_back(input_name); - const vector suffixs = {"-view", ".split-0", ".split-1", ".split-2", "-cat", "-split-0-48"}; - vector new_names; - bool can_break = true; - auto in_x_name = renameX_names[0]; - while (can_break) { - can_break = false; - for (const auto &suffix : suffixs) { - if (in_x_name.rfind(suffix) == (in_x_name.size() - suffix.size())) { - const auto r_name = in_x_name.substr(0, in_x_name.size() - suffix.size()); - if (std::find(renameX_names.begin(), renameX_names.end(), r_name) == renameX_names.end() && std::find(new_names.begin(), new_names.end(), r_name) == new_names.end()) { - new_names.push_back(r_name); - in_x_name = r_name; - can_break = true; - } - break; - } - } - } - renameX_names.insert(renameX_names.end(), new_names.begin(), new_names.end()); - for (const auto x_name : renameX_names) { - auto name = name_X_to_num(x_name, saved_list_idx); - layername_2_tensorname[name] = name; - activation_tensors[name] = std::make_shared(backend_); - activation_tensors[name]->initFrom(*activation_tensors[x_name]); - activation_tensors[name]->setName(name); - activation_tensors[name]->setModule(module); - } -} - -std::vector QNNBackend::runLayer(Layer *layer, std::vector inputs, int N) { - Module *module = inputs.empty() ? Module::llm_model_ptr : inputs[0].module(); - map> &activation_tensors = module->activation_tensors; - auto &activation_tensors_num = module->activation_tensors_num; - // Module::runlistIdx = saved_list_idx; - bool do_init = false; - - if (module->doLoad || !layer->inited_loaded) { - // set backend to current module device and try to create op - // use Module::tmp_device only when creating the op as the recersive module backend only handled in load and init stage - layer->backend_ = Backend::global_backends[Module::tmp_device]; - do_init = !layer->inited_loaded; - if (layer->op_ == nullptr) { -#ifdef USE_QNN - if ((layer->param_["type"] == KVCACHE || layer->param_["type"] == KVCACHENPU) && (Backend::global_backends.find(MLLM_QNN) != Backend::global_backends.end())) { - if (kv_cache_map.find(layer->name_) == kv_cache_map.end()) { - // for the prefill part, we need to create a new op - layer->param_["type"] = KVCACHENPU; - layer->op_ = layer->backend_->opCreate(layer->param_, layer->name_); - kv_cache_map[layer->name_] = layer->op_; - } else { -#ifdef DEBUGPRINT - std::cout << name_ << " is shared used" << std::endl; -#endif - // for the decoding part, we need to get created op from global container - layer->op_ = kv_cache_map[layer->name_]; - } - } else { - layer->op_ = layer->backend_->opCreate(layer->param_, layer->name_); - } -#else - layer->op_ = layer->backend_->opCreate(layer->param_, layer->name_); -#endif - } - if (module->doLoad) { - layer->op_->load(*module->loader); - layer->inited_loaded = true; - } else if (layer->loaded_param) { - layer->inited_loaded = layer->loaded_param; - } else { - if (!layer->inited_loaded) { - // module->loader = new ParamLoader(""); - // op_->load(*module->loader); - auto empty_loader = new ParamLoader(""); - layer->op_->load(*empty_loader); - layer->inited_loaded = true; - } - } - vector layer_next_names = {}; - if (N > 1) { - for (int i = 0; i < N; ++i) { - layer_next_names.push_back("out-" + layer->op_->name() + "-" + std::to_string(i)); - } - } else { - layer_next_names = {"out-" + layer->op_->name()}; - } - for (const auto &layer_next_name : layer_next_names) { - string next_name; - if (Layer::use_layername_2_tensorname) { - if (Layer::layername_2_tensorname.find(layer_next_name) == Layer::layername_2_tensorname.end()) { - if (layer->param_["type"] == KVCACHE) { - Layer::layername_2_tensorname[layer_next_name] = layer_next_name; - init_reset_KVCache(inputs[0].name(), module, layer->saved_list_idx, Layer::layername_2_tensorname, layer->backend_); - } else { - Layer::layername_2_tensorname[layer_next_name] = name_num_to_X(layer_next_name); - } - } - next_name = Layer::layername_2_tensorname[layer_next_name]; - } else { - next_name = layer_next_name; - } - if (activation_tensors.find(next_name) == activation_tensors.end()) { - activation_tensors[next_name] = std::make_shared(layer->backend_); - activation_tensors[next_name]->setName(next_name); - activation_tensors[next_name]->setModule(module); - activation_tensors_num[next_name] = 0; - } - } - if (module->doLoad) { - vector output_result = {}; - for (const auto &layer_next_name : layer_next_names) { - string next_name = Layer::use_layername_2_tensorname ? Layer::layername_2_tensorname[layer_next_name] : layer_next_name; - output_result.push_back(*activation_tensors[next_name]); - } - return output_result; - } - } - // input_tensors - vector> input_tensors; - for (auto &input : inputs) { - if (input.shouldInGraphs()) { - auto input_name = input.name(); - if (layer->param_["type"] == KVCACHE && do_init && Layer::use_layername_2_tensorname) { - input_name = name_X_to_num(input_name, layer->saved_list_idx); - } - input_tensors.push_back(activation_tensors[input_name]); - } else { - input_tensors.push_back(std::shared_ptr(&input, [](Tensor *) {})); - } - } - // output_tensors - vector layer_next_names = {}; - if (N > 1) { - for (int i = 0; i < N; ++i) { - layer_next_names.push_back("out-" + layer->op_->name() + "-" + std::to_string(i)); - } - } else { - layer_next_names = {"out-" + layer->op_->name()}; - } - vector> output_tensors = {}; - for (const auto &layer_next_name : layer_next_names) { - string next_name = Layer::use_layername_2_tensorname ? Layer::layername_2_tensorname[layer_next_name] : layer_next_name; - output_tensors.push_back(activation_tensors[next_name]); - } -#ifdef DEBUGOPTIME - auto start_t = mllm_time_us(); -#endif - switch (Tensor::tensor_status) { - case TENSOR_STATIC_INIT: { - layer->op_->reshape(input_tensors, output_tensors); - layer->op_->setUp(input_tensors, output_tensors); - break; - } - case TENSOR_STATIC_READY: { - layer->op_->execute(input_tensors, output_tensors); - break; - } - case TENSOR_STATIC_TRACE: { - if (layer->backend_->type() == BackendType::MLLM_CPU) { - Tracer::addOp(layer->op_, input_tensors, output_tensors); - } - break; - } - default: { - break; - } - } - // if (Backend::global_backends.size() == 1) { - // for (auto input_tensor : input_tensors) { - // if ((activation_tensors_num.find(input_tensor->name()) != activation_tensors_num.end())) { - // switch (Tensor::tensor_status) { - // case TENSOR_STATIC_INIT: { - // activation_tensors_num[input_tensor->name()] += 1; - // break; - // } - // case TENSOR_STATIC_READY: { - // activation_tensors_num[input_tensor->name()] -= 1; - // break; - // } - // default: { - // } - // } - // if (activation_tensors_num[input_tensor->name()] == 0 && activation_tensors[input_tensor->name()]->sequence() > 1 - // && activation_tensors[input_tensor->name()]->ttype() != GRAPH_OUTPUT) { - // activation_tensors[input_tensor->name()]->free(); - // // std::cout << input_tensor->name() << "|" << std::endl; - // } - // } - // } - // } -#ifdef DEBUGOPTIME - if (Tensor::tensor_status == TENSOR_STATIC_READY) { - auto end_t = mllm_time_us(); - std::cout << layer->op_->name() << " | " << Tensor::tensor_status << " time: " << (end_t - start_t) / 1000.0F << "ms" << std::endl; - } -#endif - vector output_result = {}; - for (const auto &layer_next_name : layer_next_names) { - string next_name = Layer::use_layername_2_tensorname ? Layer::layername_2_tensorname[layer_next_name] : layer_next_name; -#ifdef DEBUGSAVETENSOR - activation_tensors[next_name]->saveNData(layer_next_name); -#endif - output_result.push_back(*activation_tensors[next_name]); - } - return output_result; -} -std::vector QNNBackend::runForward(Module *module, std::vector inputs, std::vector args) { - // set static tmp_device to device_ to init layers' op - auto previoud_device = Module::tmp_device; - Module::tmp_device = module->device_; - // Module Loading - if (Module::llm_model_ptr && Module::llm_model_ptr->doLoad) { - auto outputs = module->Forward(inputs, args); - // for inner module, set output tensors to GRAPH_OUTPUT - if (inputs[0].ttype() != TensorType::INPUT_TENSOR) { // XPUs' module should not be the outermost input tensor - for (auto &output : outputs) { - inputs[0].module()->activation_tensors[output.name()]->setTtype(GRAPH_OUTPUT); - } - } - // set Module::tmp_device to previous device - Module::tmp_device = previoud_device; - return outputs; - } - // if (false) { - // inputs[0].setTtype(TensorType::INPUT_TENSOR); - // } - // Module setUp & execute - if (inputs[0].ttype() == TensorType::INPUT_TENSOR) { - if (module->prefilling_token_size_ == 0) { // first time init - module->prefilling_token_size_ = inputs[0].sequence(); - } else if (module->decoding_token_size_ == 0) { - module->decoding_token_size_ = inputs[0].sequence(); - } - for (int i = 0; i < inputs.size(); i++) { - auto &input = inputs[i]; - input.setName("input" + std::to_string(i)); - input.setTtype(TensorType::NORMAL_TENSOR); - module->activation_tensors[input.name()] = std::shared_ptr(&input, [](Tensor *) {}); - module->activation_tensors[input.name()]->setName(input.name()); - module->activation_tensors[input.name()]->setModule(module); - } - Module::llm_model_ptr = module; - Tensor::tensor_status = TENSOR_STATIC_INIT; - - uint64_t time_start = mllm_time_us(); - module->Forward(inputs, args); - Tensor::tensor_status = TENSOR_STATIC_READY; // change to EAGER - - auto output = module->Forward(inputs, args); - uint64_t time_end = mllm_time_us(); - - double inference_time_ = (time_end - time_start) / 1000.0F; // ms - module->inference_times_.push_back(inference_time_); - - Module::llm_model_ptr->op_transposed_flag = true; - return output; - } else { // inner Modules - // offload according to the backends' info inited during loading - if (Tensor::tensor_status == TENSOR_STATIC_INIT && module->device_ != MLLM_CPU) { // backend specific module reshape & setup - if (Module::isMultiChunkPrefilling && !Module::isFirstChunk) { // set to TENSOR_UNDEFINED and SKIP executing qnn layers - Tensor::tensor_status = TENSOR_UNDEFINED; - auto outputs = module->Forward(inputs, args); - Tensor::tensor_status = TENSOR_STATIC_INIT; - return outputs; - } - auto inputs_vec = vector>(); - auto outputs_vec = vector>(); - for (auto &i : inputs) { - inputs_vec.push_back(inputs[0].module()->activation_tensors[i.name()]); - } - - Backend::global_backends[module->device_]->onSetUpStart(inputs_vec, outputs_vec, module->getUniqueName()); - - // for xnnpack currently - for (auto &i : inputs) { - i.uuid() = inputs[0].module()->activation_tensors[i.name()]->uuid(); - } - - auto outputs = module->Forward(inputs, args); - for (auto &output : outputs) { - outputs_vec.push_back(inputs[0].module()->activation_tensors[output.name()]); - } - Backend::global_backends[module->device_]->onSetUpEnd(inputs_vec, outputs_vec, module->getUniqueName()); - - // for xnnpack currently - for (auto &o : outputs) { - o.uuid() = outputs[0].module()->activation_tensors[o.name()]->uuid(); - } - - return outputs; - } else if (Tensor::tensor_status == TENSOR_STATIC_READY && module->device_ != MLLM_CPU) { // backend specific module execute - auto inputs_vec = vector>(); - auto outputs_vec = vector>(); - for (auto &i : inputs) { - inputs_vec.push_back(inputs[0].module()->activation_tensors[i.name()]); - } - - auto outputs = module->Forward(inputs, args); - - for (auto &output : outputs) { - outputs_vec.push_back(inputs[0].module()->activation_tensors[output.name()]); - } - Backend::global_backends[module->device_]->onExecuteStart(inputs_vec, outputs_vec, module->getUniqueName()); - - Backend::global_backends[module->device_]->onExecuteEnd(outputs_vec, module->getUniqueName()); - - // for xnnpack currently - for (auto &o : outputs) { - o.uuid() = outputs[0].module()->activation_tensors[o.name()]->uuid(); - o.forceResetHostPointer(outputs[0].module()->activation_tensors[o.name()]->rawHostPtr()); - } - - return outputs; - } else if (Tensor::tensor_status == TENSOR_STATIC_TRACE && module->device_ != MLLM_CPU) { - auto inputs_vec = vector>(); - auto outputs_vec = vector>(); - for (auto &i : inputs) { - inputs_vec.push_back(inputs[0].module()->activation_tensors[i.name()]); - } - - auto outputs = module->Forward(inputs, args); - - for (auto &output : outputs) { - outputs_vec.push_back(inputs[0].module()->activation_tensors[output.name()]); - } - Tracer::addModule(inputs_vec, outputs_vec, module->getUniqueName()); - return outputs; - } - return module->Forward(inputs, args); - } -} - -} // namespace mllm \ No newline at end of file diff --git a/src/backends/qnn/QNNBackend.hpp b/src/backends/qnn/QNNBackend.hpp deleted file mode 100644 index 0064ff753..000000000 --- a/src/backends/qnn/QNNBackend.hpp +++ /dev/null @@ -1,196 +0,0 @@ -#ifndef MLLM_QNNBACKEND_H -#define MLLM_QNNBACKEND_H - -#include "Backend.hpp" -#include "Op.hpp" -#include "OpDefined.hpp" -#include "ParamLoader.hpp" -#include "QnnTypes.h" -#include "Types.hpp" -#include "MemoryManager.hpp" -#include - -#include "Utils/IOTensor.hpp" -#include "PAL/DynamicLoading.hpp" -#include "Model/QnnModel.hpp" -#include "QNN.hpp" -#include "Log/Logger.hpp" - -using std::shared_ptr; - -using namespace qnn; -using namespace qnn::tools; - -namespace mllm { -class Module; -class Layer; - -enum class StatusCode { - SUCCESS, - FAILURE, - FAILURE_INPUT_LIST_EXHAUSTED, - FAILURE_SYSTEM_ERROR, - FAILURE_SYSTEM_COMMUNICATION_ERROR, - QNN_FEATURE_UNSUPPORTED -}; - -class Op; - -class Tensor; -class Backend; -class QNNBackend : public Backend { -public: - QNNBackend(shared_ptr mm); - ~QNNBackend(); - - Op *opCreate(const OpParam &op_param, string name = "", int threadCount = 4) override { - OpType optype = OpType(op_param.find("type")->second); - auto iter = map_creator_.find(optype); - if (iter == map_creator_.end()) { - std::cout << "NPU Op Don't support type : " << name << std::endl; - return nullptr; - } - Op *exe = nullptr; - exe = iter->second->create(op_param, this, name); - return exe; - } - - // currently, qnn don't support tensor function - TensorFunction *funcCreate(const TensorFuncType type) override { - return nullptr; - } - - class Creator { - public: - virtual ~Creator() = default; - virtual Op *create(OpParam op_param, Backend *bn, string name) const = 0; - }; - bool addCreator(OpType t, Creator *c) { - if (map_creator_.find(t) != map_creator_.end()) { - printf("Error: %d type has be added\n", t); - return false; - } - map_creator_.insert(std::make_pair(t, c)); - return true; - } - - qnn_wrapper_api::ModelError_t graphAddNode(string name, string nodeType, - std::vector inputTensorNames, std::vector outputTensors, - std::vector params, - string packageName); - - qnn_wrapper_api::ModelError_t modelAddTensor(std::string nodeName, Qnn_Tensor_t tensor); - - virtual void onSetUpStart(vector> &inputs, vector> &outputs, string graphName) override; - virtual void onSetUpEnd(vector> &inputs, vector> &outputs, string graphName) override; - virtual void onExecuteStart(vector> &inputs, vector> &outputs, string graphName = "") override; - virtual void onExecuteEnd(std::vector> &outputs, const string &graph_name) override; - - std::vector runFunc( - std::vector out_names, - TensorFuncType type, - std::vector float_args, - std::vector> input_tensors, - bool in_place) override; - std::vector runLayer(Layer *layer, std::vector inputs, int N) override; - std::vector runForward(Module *module, std::vector inputs, std::vector args) override; - - void freeGraphDataStructure(string graphName); - - void afterAllGraphsExecute(); - - void pushInputBuffers(uint8_t *ptr) { - currentInputBuffers->push_back(ptr); - } - void pushOutputBuffers(uint8_t *ptr) { - currentOutputBuffers->push_back(ptr); - } - - void setDataLoader(AbstructLoader *dataLoader) { - dataLoader_ = dataLoader; - } - -private: - qnn_wrapper_api::ModelError_t graphFinilize(); - qnn_wrapper_api::ModelError_t graphConfig(); - - void registerOps() override; - void registerFuncs() override{}; - - // @brief Print a message to STDERR then exit with a non-zero - void reportError(const std::string &err); - - StatusCode createContext(); - - StatusCode registerOpPackages(); - - StatusCode freeContext(); - - StatusCode terminateBackend(); - - StatusCode initializeProfiling(); - - std::string getBackendBuildId(); - - StatusCode isDevicePropertySupported(); - - StatusCode createDevice(); - - StatusCode freeDevice(); - - StatusCode verifyFailReturnStatus(Qnn_ErrorHandle_t errCode); - - StatusCode extractBackendProfilingInfo(Qnn_ProfileHandle_t profileHandle); - - StatusCode extractProfilingSubEvents(QnnProfile_EventId_t profileEventId); - - StatusCode extractProfilingEvent(QnnProfile_EventId_t profileEventId); - - AbstructLoader *dataLoader_; - - std::map> inputBufferMap; - std::vector *currentInputBuffers; - std::map> outputBufferMap; - std::vector *currentOutputBuffers; - - std::map map_creator_; - - std::map qnnModelIndexMap_; - std::vector qnnModels_; - int qnnModelIndex_; - - sample_app::QnnFunctionPointers m_qnnFunctionPointers; - - std::vector m_opPackagePaths; - - QnnBackend_Config_t **m_backendConfig = nullptr; - Qnn_ContextHandle_t m_context = nullptr; - QnnContext_Config_t **m_contextConfig = nullptr; - bool m_debug; - - iotensor::InputDataType m_inputDataType; - sample_app::ProfilingLevel m_profilingLevel; - - std::map graphInfoMap_; - - const QnnGraph_Config_t **graphConfigs = nullptr; - // these two pointers is .so library handle - void *m_backendLibraryHandle = nullptr; - - iotensor::IOTensor m_ioTensor; - bool m_isBackendInitialized; - bool m_isContextCreated; - Qnn_ProfileHandle_t m_profileBackendHandle = nullptr; - qnn_wrapper_api::GraphConfigInfo_t **m_graphConfigsInfo = nullptr; - uint32_t m_graphConfigsInfoCount; - Qnn_LogHandle_t m_logHandle = nullptr; - Qnn_BackendHandle_t m_backendHandle = nullptr; - Qnn_DeviceHandle_t m_deviceHandle = nullptr; - - std::map inputsMap_; - std::map outputsMap_; -}; - -} // namespace mllm - -#endif // MLLM_QNNBACKEND_H \ No newline at end of file diff --git a/src/backends/qnn/QNNExecutor.cpp b/src/backends/qnn/QNNExecutor.cpp deleted file mode 100644 index 2b3a0eb84..000000000 --- a/src/backends/qnn/QNNExecutor.cpp +++ /dev/null @@ -1,341 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "QNNBackend.hpp" -#include "QNNGraph.hpp" -#include "Timing.hpp" -#include "QNNExecutor.hpp" -#include "memory/MemInspect.hpp" -#include "Types.hpp" -#include "express/ExpressBase.hpp" - -namespace mllm { -// for print graph execute time -#define QNN_EXECUTE_TIME 1 - -BackendType QNNExecutor::graphOffloadRule(BackendType expectedBackend, int graphIndex) { - if (expectedBackend != MLLM_CPU && expectedBackend != MLLM_QNN) { - return MLLM_CPU; - } else { - return expectedBackend; - } -} - -void QNNExecutor::setup(Net *net) { - mllm_time_init(); - - uint64_t time_start = mllm_time_us(); - uint64_t time_end; - - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - auto &g = net->subGraph()[graphNamingRule(i)]; - g->setUpOps(*data_loader_); - } - time_end = mllm_time_us(); - if (load_time_ == 0) { - load_time_ = (time_end - time_start) / 1000.0F; - std::cout << "Load model: " << load_time_ / 1000.0F << " s" << std::endl; - } -} - -void QNNExecutor::run(Context *ctx, Net *net, vector> input_tensors) { - bool init = false; - bool reshape = false; - - checkReshape(init, reshape, input_tensors); - - // set Input tensor - vector flashGid = {}; - for (int tid = 0; tid < net->inputNames().size(); ++tid) { - auto input_name = net->inputNames()[tid]; - auto input_tensor = input_tensors[tid]; - input_tensor->setName(input_name); - net->tensors()[input_name] = input_tensor; - if (std::find(flashGid.begin(), flashGid.end(), net->inGmap()[input_name]) == flashGid.end()) { - flashGid.push_back(net->inGmap()[input_name]); - } - } - - for (auto Gid : flashGid) { - net->subGraph()[graphNamingRule(Gid)]->reflashInput(net->tensors()); - } - - auto ex_time_start = mllm_time_us(); - PRINT_MEMORY_USAGE("before setup all graph"); - - static_cast(net->backends()[MLLM_QNN].get())->setDataLoader(data_loader_); - - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - string name = graphNamingRule(i); - auto &g = net->subGraph()[name]; - - g->reshape(); - g->setUpTensors(); - } - auto ex_time_end = mllm_time_us(); - - ex_time_start = mllm_time_us(); - - // execute all graphs here - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - uint64_t t_start = mllm_time_us(); - - auto &g = net->subGraph()[graphNamingRule(i)]; - result_ = g->forward(); - - uint64_t t_end = mllm_time_us(); -#ifdef QNN_EXECUTE_TIME - if (g->device() == MLLM_CPU) { - std::cout << " TIME of CPU Graph " << i << ": " << (t_end - t_start) / 1000.0F << "ms, End at " << (t_end - ex_time_start) / 1000.f << std::endl; - } else { - std::cout << " TIME of QNN Graph " << i << ": " << (t_end - t_start) / 1000.0F << "ms, End at " << (t_end - ex_time_start) / 1000.f << std::endl; - } -#endif - } - - ex_time_end = mllm_time_us(); - - // free all graphs here - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - auto &g = net->subGraph()[graphNamingRule(i)]; - if (g->device() != MLLM_QNN) { - continue; - } - auto *qnn_graph = dynamic_cast(g.get()); - qnn_graph->free(); - } - // use the second graph to free all context is OK. - { - auto &g = net->subGraph()[graphNamingRule(1)]; - auto *qnn_graph = dynamic_cast(g.get()); - qnn_graph->allFree(); - } - - if (input_tensors[0]->sequence() == 1) { - auto token_run_time = (ex_time_end - ex_time_start) / 1000.0F; - run_time_.push_back(token_run_time); - } - std::cout << "prefill time: " << (ex_time_end - ex_time_start) / 1000.0F << "ms" << std::endl; -} - -void QNNPipelineExecutor::warmup(Context *ctx, Net *net, vector> input_tensors) { - auto ex_time_start = mllm_time_us(); - // input will be split into chunks and execute in pipeline - int chunk_num = (input_tensors[0]->sequence() + chunk_size_ - 1) / chunk_size_; - // we suppose the tensor(s) of input_tensors is the only one or all have the same seq length - for (int i = 0; i < input_tensors.size(); ++i) { - if (i != 0) { - assert(input_tensors[i]->sequence() == input_tensors[i - 1]->sequence()); - } - } - - // create a new tensor for each chunk - // (chunk_num, vector>(input_tensors.size())); - chunked_tensors_list.resize(chunk_num, vector>(input_tensors.size())); - - if (!isSetup_) { - bool init = false; - bool reshape = false; - // split the tensor in chunks - for (int i = 0; i < chunk_num; ++i) { - // for all inputs in input_tensors - auto &chunked_tensors = chunked_tensors_list[i]; - for (int j = 0; j < input_tensors.size(); ++j) { - chunked_tensors[j] = std::make_shared(); - chunked_tensors[j]->setBackend(net->backends()[BackendType::MLLM_CPU].get()); - chunked_tensors[j]->reshape(1, 1, chunk_size_, 1); - chunked_tensors[j]->setName(net->inputNames()[j]); - // use shallowCopyFrom for each chunk to avoid memcpy - chunked_tensors[j]->shallowCopyFrom(input_tensors[j].get(), false, {0, 0, i * chunk_size_, 0}); - } - } - - checkReshape(init, reshape, chunked_tensors_list[0]); - - // set Input tensor - vector flashGid = {}; - for (int tid = 0; tid < net->inputNames().size(); ++tid) { - auto input_name = net->inputNames()[tid]; - auto input_tensor = chunked_tensors_list[0][tid]; - input_tensor->setName(input_name); - net->tensors()[input_name] = input_tensor; - if (std::find(flashGid.begin(), flashGid.end(), net->inGmap()[input_name]) == flashGid.end()) { - flashGid.push_back(net->inGmap()[input_name]); - } - } - - for (auto Gid : flashGid) { - net->subGraph()[graphNamingRule(Gid)]->reflashInput(net->tensors()); - } - - PRINT_MEMORY_USAGE("before setup all graph"); - - static_cast(net->backends()[MLLM_QNN].get())->setDataLoader(data_loader_); - - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - string name = graphNamingRule(i); - auto &g = net->subGraph()[name]; - - g->reshape(); - g->setUpTensors(); - } - isSetup_ = true; - } - auto ex_time_end = mllm_time_us(); - std::cout << "warmup done for " << (ex_time_end - ex_time_start) / 1000000.0 << "s" << std::endl; -} - -void QNNPipelineExecutor::run(Context *ctx, Net *net, vector> input_tensors) { - auto ex_time_start = mllm_time_us(); - - // input will be split into chunks and execute in pipeline - int chunk_num = (input_tensors[0]->sequence() + chunk_size_ - 1) / chunk_size_; - // we suppose the tensor(s) of input_tensors is the only one or all have the same seq length - for (int i = 0; i < input_tensors.size(); ++i) { - if (i != 0) { - assert(input_tensors[i]->sequence() == input_tensors[i - 1]->sequence()); - } - } - - if (!isSetup_) { - warmup(ctx, net, input_tensors); - } - - auto ex_time_end = mllm_time_us(); - - ex_time_start = mllm_time_us(); - - // execute all graphs here - vector> chunked_result_list; - - std::function executeFunc = [&](int chunk_id, int graphIdx) { - int i = graphIdx - chunk_id; - if (i < 0 || i >= (int)net->subGraph().size()) { - return; - } - - if (i == 0) { - // update the input tensor for each chunk - for (int tid = 0; tid < net->inputNames().size(); ++tid) { - auto input_name = net->inputNames()[tid]; - auto input_tensor = chunked_tensors_list[chunk_id][tid]; - unordered_map> map; - map[input_name] = input_tensor; - string graphName = graphNamingRule(i); - net->subGraph()[graphName]->reflashInput(map); - } - } - - auto expectedBackend = ctx->sub_backend_[i]; - string name = graphNamingRule(i); - auto t_start = mllm_time_us(); - - auto &g = net->subGraph()[name]; - if (chunk_id != 0 && g->device() == MLLM_CPU) { - // cpu graph should reshape and setup for every chunk forward for KVCache op - g->reshape(); - g->setUpTensors(); - } - // only get the result at the last graph - if (i == net->subGraph().size() - 1) { - chunked_result_list = g->forward(); - } else { - g->forward(); - } - - auto t_end = mllm_time_us(); - -#ifdef QNN_EXECUTE_TIME - if (g->device() == MLLM_CPU) { - std::cout << " TIME of CPU Graph " << i << ": " << (t_end - t_start) / 1000.0F << "ms, End at " << (t_end - ex_time_start) / 1000.f << std::endl; - } else { - std::cout << " TIME of QNN Graph " << i << ": " << (t_end - t_start) / 1000.0F << "ms, End at " << (t_end - ex_time_start) / 1000.f << std::endl; - } -#endif - - PRINT_MEMORY_USAGE((string("execute graph: ") + std::to_string(i)).c_str()); - - // if it is the last graph, move the result to the final result - if (i == (int)net->subGraph().size() - 1) { - result_.resize(chunked_result_list.size()); - if (chunk_id == 0) { // reshape the result tensor when first chunk is executed - for (int tid = 0; tid < chunked_result_list.size(); ++tid) { - result_[tid] = std::make_shared(); - result_[tid]->setBackend(net->backends()[BackendType::MLLM_CPU].get()); - result_[tid]->reshape(chunked_result_list[tid]->batch(), - chunked_result_list[tid]->head(), - chunk_size_ * chunk_num, - chunked_result_list[tid]->dimension()); - result_[tid]->alloc(); - } - } - - // move the result to the final result - for (int tid = 0; tid < chunked_result_list.size(); ++tid) { - auto &result_tensor = chunked_result_list[tid]; - - memcpy(result_[tid]->ptrAt(0, 0, chunk_size_ * chunk_id, 0), result_tensor->hostPtr(), result_tensor->count() * sizeof(float)); - } - } - }; - - omp_set_max_active_levels(3); - // based on chunk_num, execute it every 2 chunk in pipeline - for (int chunk_id = 0; chunk_id < chunk_num / 2; ++chunk_id) { - // for every two chunk, start at chunk_id * 2 to avoid no execute for - for (int i = chunk_id * 2; i < (int)net->subGraph().size() + chunk_id * 2 + 5; ++i) { -#pragma omp parallel for num_threads(2) - for (int pair_idx = 0; pair_idx < 2; ++pair_idx) { - executeFunc(chunk_id * 2 + pair_idx, i - pair_idx * 4); - } -#pragma omp barrier -#ifdef QNN_EXECUTE_TIME - std::cout << "---------------------------" << std::endl; -#endif - } - } - // the last chunk if there is odd chunks - if (chunk_num % 2 == 1) { - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - executeFunc(chunk_num - 1, i); - } - } - - ex_time_end = mllm_time_us(); - - // free all graphs here - for (int i = 0; i < (int)net->subGraph().size(); ++i) { - auto expectedBackend = ctx->sub_backend_[i]; - if (expectedBackend == MLLM_CPU || i == 0) { // use CPU graph and CPU backend for embedding, based on specific subgraph split - continue; - } - - string name = graphNamingRule(i); - auto &g = net->subGraph()[name]; - auto *qnn_graph = dynamic_cast(g.get()); - qnn_graph->free(); - } - // use the second graph to free all context is OK. - { - string name = graphNamingRule(1); - auto &g = net->subGraph()[name]; - auto *qnn_graph = dynamic_cast(g.get()); - qnn_graph->allFree(); - } - - if (input_tensors[0]->sequence() == 1) { - auto token_run_time = (ex_time_end - ex_time_start) / 1000.0F; - run_time_.push_back(token_run_time); - } - std::cout << "prefill time: " << (ex_time_end - ex_time_start) / 1000.0F << "ms" << std::endl; -} - -} // namespace mllm diff --git a/src/backends/qnn/QNNExecutor.hpp b/src/backends/qnn/QNNExecutor.hpp deleted file mode 100644 index 0b0cb532b..000000000 --- a/src/backends/qnn/QNNExecutor.hpp +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef MLLM_QNNEXECUTOR_H -#define MLLM_QNNEXECUTOR_H -#include "Net.hpp" -#include "Executor.hpp" -#include "Types.hpp" -#include "express/ExpressBase.hpp" -#include -#include -#include -#include -#include - -namespace mllm { -class QNNExecutor : public Executor { -public: - QNNExecutor(ParamLoader *data_loader) : - Executor(data_loader) { - } - ~QNNExecutor() = default; - - /** - * \brief Setup graphs in net - * \param net An instance of the Net class - */ - void setup(Net *net) override; - - /** - * \brief Executes the foreword propagation of provided network - * \param net An instance of the Net class representing the network to be run - * \param input_tensors A vector of input tensors to be processed by the network - */ - void run(Net *net, vector> input_tensors) override { - MLLM_LOG_ERROR_STREAM << "QNN Executor do not support this method" << std::endl; - exit(1); - }; - - // used for assigning graph backends execuation - virtual void run(Context *ctx, Net *net, vector> input_tensor); - virtual void warmup(Context *ctx, Net *net, vector> input_tensor) {}; - - /** - * \brief Setup&Executes the foreword propagation of provided network - * \param net An instance of the Net class representing the network to be run - * \param input_tensors A vector of input tensors to be processed by the network - * - * execute(net, input_tensors) is equivalent to setup(net) + run(net, input_tensors) - */ - void execute(Net *net, vector> input_tensor) override { - MLLM_LOG_ERROR_STREAM << "QNNExecutor::execute Not implemented" << std::endl; - }; - - // graph offload rule for qnn execution, used in setup and execution - static BackendType graphOffloadRule(BackendType expectedBackend, int graphIndex); - - // graph naming rule for qnn execution - string graphNamingRule(int graphIndex) { - switch (executionType_) { - case PROMPT: - return "Prompt_Graph." + std::to_string(graphIndex); - case AUTOREGRESSIVE: - return "Autoregressive_Graph." + std::to_string(graphIndex); - } - }; - -protected: - bool isSetup_ = false; - ExecutionType executionType_ = PROMPT; -}; - -class QNNPipelineExecutor : public QNNExecutor { - vector>> chunked_tensors_list; - int chunk_size_; - -public: - QNNPipelineExecutor(ParamLoader *data_loader, int chunk_size = 128) : - QNNExecutor(data_loader), chunk_size_(chunk_size) { - } - - virtual void run(Context *ctx, Net *net, vector> input_tensor) override; - virtual void warmup(Context *ctx, Net *net, vector> input_tensor) override; -}; - -} // namespace mllm - -#endif // MLLM_EXECUTOR_H diff --git a/src/backends/qnn/QNNGraph.cpp b/src/backends/qnn/QNNGraph.cpp deleted file mode 100644 index a2c266bb4..000000000 --- a/src/backends/qnn/QNNGraph.cpp +++ /dev/null @@ -1,175 +0,0 @@ -#include "QNNGraph.hpp" -#include "OpDefined.hpp" -#include "Types.hpp" -#include -#include -#ifdef DEBUGPRINT -#include "Timing.hpp" -#endif - -#include "QNNBackend.hpp" - -namespace mllm { - -QNNGraph::QNNGraph(const NetParameter ¶m, Backend *bn, - unordered_map> &external_tensors, - int threadCount, string graphName) : - Graph(param, bn, external_tensors, threadCount), graphName_(graphName) { -} - -// TODO: deprecated, remove -void QNNGraph::setUpTensors(std::string name) { - - // change to use merge op output as graph input tensor - vector> graph_in_tensors; - if (ops_[op_names_[0]]->type() == SPLITINPUT) { - graph_in_tensors = ops_output_tensors_[op_names_[0]]; - } else { - graph_in_tensors = ops_input_tensors_[op_names_[0]]; - } - - // set graph out tensor TensorType - auto &graph_out_tensors = ops_output_tensors_[op_names_[op_names_.size() - 1]]; - for (auto &t : graph_out_tensors) { - t->setTtype(GRAPH_OUTPUT); - t->alloc(); - } - - this->backend_->onSetUpStart(graph_in_tensors, graph_out_tensors, name); - - // set up tensors of ops - for (const auto &op_name : op_names_) { - if (ops_not_inputs_empty_[op_name]) { - ops_[op_name]->setUp(ops_input_tensors_[op_name], - ops_output_tensors_[op_name]); - } else { - // std::cout << "op_name:" << op_name << " is not do" << std::endl; - } - } - - this->backend_->onSetUpEnd(graph_in_tensors, graph_out_tensors, name); -} - -void QNNGraph::setUpTensors() { - // change to use merge op output as graph input tensor - vector> graph_in_tensors; - if (ops_[op_names_[0]]->type() == SPLITINPUT) { - graph_in_tensors = ops_output_tensors_[op_names_[0]]; - } else { - graph_in_tensors = ops_input_tensors_[op_names_[0]]; - } - - // set graph out tensor TensorType - auto &graph_out_tensors = ops_output_tensors_[op_names_[op_names_.size() - 1]]; - for (auto &t : graph_out_tensors) { - t->setTtype(GRAPH_OUTPUT); - t->alloc(); - } - - this->backend_->onSetUpStart(graph_in_tensors, graph_out_tensors, graphName_); - - // set up tensors of ops - for (const auto &op_name : op_names_) { - if (ops_not_inputs_empty_[op_name]) { - ops_[op_name]->setUp(ops_input_tensors_[op_name], - ops_output_tensors_[op_name]); - } else { - // std::cout << "op_name:" << op_name << " is not do" << std::endl; - } - } - - this->backend_->onSetUpEnd(graph_in_tensors, graph_out_tensors, graphName_); -} - -// WARNING: non virtual override function, all features should be merged into the origin function -const vector> &QNNGraph::forward(std::string graphName) { - for (const auto &op_name : op_names_) { - if (ops_not_inputs_empty_[op_name]) { -#ifdef SAVECHECK - for (auto &t : ops_input_tensors_[op_name]) { - t->checkData(); - t->saveData(); - } -#endif -#ifdef DEBUGPRINT - uint64_t t_start = mllm_time_us(); -#endif - if (ops_[op_name]->type() == LINEARINT8SHADOW || ops_[op_name]->type() == ROPE) - continue; - ops_[op_name]->execute(ops_input_tensors_[op_name], - ops_output_tensors_[op_name]); - -#ifdef SAVECHECK - for (auto &t : ops_output_tensors_[op_name]) { - t->checkData(); - t->saveData(); - } -#endif - -#ifdef DEBUGPRINT - uint64_t t_end = mllm_time_us(); - std::cout << "" << op_name - << " exe_time:" << (t_end - t_start) / 1000.0F << " ms" - << std::endl; -#endif - } else { - // std::cout<<"op_name:"<backend_->onExecuteStart(ops_input_tensors_[op_names_[0]], ops_output_tensors_[op_names_[op_names_.size() - 1]], graphName); - - return ops_output_tensors_[op_names_[op_names_.size() - 1]]; -} - -void QNNGraph::free() { - auto *qnn_backend = dynamic_cast(this->backend_); - qnn_backend->freeGraphDataStructure(graphName_); -} - -void QNNGraph::allFree() { - auto *qnn_backend = dynamic_cast(this->backend_); - qnn_backend->afterAllGraphsExecute(); -} - -const vector> &QNNGraph::forward(bool autofree) { - for (const auto &op_name : op_names_) { - if (ops_not_inputs_empty_[op_name]) { -#ifdef SAVECHECK - for (auto &t : ops_input_tensors_[op_name]) { - t->checkData(); - t->saveData(); - } -#endif -#ifdef DEBUGPRINT - uint64_t t_start = mllm_time_us(); -#endif - if (ops_[op_name]->type() == LINEARINT8SHADOW || ops_[op_name]->type() == ROPE) - continue; - ops_[op_name]->execute(ops_input_tensors_[op_name], - ops_output_tensors_[op_name]); - -#ifdef SAVECHECK - for (auto &t : ops_output_tensors_[op_name]) { - t->checkData(); - t->saveData(); - } -#endif - -#ifdef DEBUGPRINT - uint64_t t_end = mllm_time_us(); - std::cout << "" << op_name - << " exe_time:" << (t_end - t_start) / 1000.0F << " ms" - << std::endl; -#endif - } else { - // std::cout<<"op_name:"<backend_->onExecuteStart(ops_input_tensors_[op_names_[0]], ops_output_tensors_[op_names_[op_names_.size() - 1]], graphName_); - - return ops_output_tensors_[op_names_[op_names_.size() - 1]]; -} - -} // namespace mllm diff --git a/src/backends/qnn/QNNGraph.hpp b/src/backends/qnn/QNNGraph.hpp deleted file mode 100644 index 84f7feeae..000000000 --- a/src/backends/qnn/QNNGraph.hpp +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef MLLM_QNNGRAPH_H -#define MLLM_QNNGRAPH_H -#include "Tensor.hpp" -#include "Op.hpp" -#include "ParamLoader.hpp" -#include "Backend.hpp" -#include "express/ExpressBase.hpp" -#include -#include -#include -#include -#include - -using std::unordered_map; - -namespace mllm { - -class QNNGraph : public Graph { -public: - /** - * \brief Graph - * \param param NetParameter contains the structure of this graph - * \param bn Backend like CPU/QNN etc - * \param external_tensors external tensors from other graph and inter graphs. - * \param threadCount number of Threads - */ - explicit QNNGraph(const NetParameter ¶m, Backend *bn, unordered_map> &external_tensors, int threadCount, string graphName = ""); - virtual ~QNNGraph() = default; - - /** - * \brief forward propagation - * \param autofree Whether to release the memory of weights. Set to false - * \return The last output tensor - */ - virtual const vector> &forward(bool autofree = false) override; - - // TODO: WARNING!!! non virtual forward - const vector> &forward(std::string graphName); - - void setUpTensors(std::string graphName); - void setUpTensors() override; - void free(); - void allFree(); -private: - std::string graphName_; -}; - -} // namespace mllm - -#endif // MLLM_GRAPH_H diff --git a/src/backends/qnn/QNNNet.cpp b/src/backends/qnn/QNNNet.cpp deleted file mode 100644 index e1e3ecf10..000000000 --- a/src/backends/qnn/QNNNet.cpp +++ /dev/null @@ -1,66 +0,0 @@ -#include "QNNNet.hpp" -#include "Op.hpp" -#include "QNNExecutor.hpp" -#include "Types.hpp" -#include "Backend.hpp" -#include -#include "backends/qnn/QNNGraph.hpp" -#include "express/ExpressBase.hpp" - -namespace mllm { - -QNNNet::QNNNet(BackendConfig config, Context *ctx) : - Net(config) { - backends_.emplace(MLLM_QNN, GetBackendCreator(MLLM_QNN)->create(config)); - ctx_ = ctx; -} - - -void QNNNet::convert(Context* ctx, BackendType backend_type, int threadCount) { - auto& param = ctx->sub_param_; - // tensors will all be converted to QNN shared buffer - for (int ii = 0; ii < (int)param.size(); ++ii) { - auto &sub_param = param[ii]; - vector names = {}; - auto net_in_tensor = sub_param.net_inputs; - for (const auto &out_t : net_in_tensor) { - tensors_[out_t->name] = std::make_shared(backends_[backend_type].get()); - tensors_[out_t->name]->setName(out_t->name); - tensors_[out_t->name]->setDtype(out_t->type); - for (auto &tensor_name : tensor_names_) { - tensor_name.erase(std::remove(tensor_name.begin(), tensor_name.end(), out_t->name), tensor_name.end()); - } - names.push_back(out_t->name); - } - - for (auto *t : sub_param.net_tensors) { - if (t->in == NULL) { - auto *in_tensor = t; - tensors_[in_tensor->name] = std::make_shared(backends_[backend_type].get()); - tensors_[in_tensor->name]->setName(in_tensor->name); - tensors_[in_tensor->name]->setDtype(in_tensor->type); - input_names_.push_back(in_tensor->name); - inputname_graphidx_[in_tensor->name] = ii; - names.push_back(in_tensor->name); - } - } - tensor_names_.push_back(names); - } - - for (int i = 0; i < (int)param.size(); ++i) { - auto expectedBackend = ctx->sub_backend_[i]; - - param[i].topologySort(); - shared_ptr subg_1; - - if(QNNExecutor::graphOffloadRule(expectedBackend, i) == MLLM_CPU){ - subg_1.reset(new Graph(param[i], backends_[MLLM_CPU].get(), tensors_, threadCount)); - } else if (QNNExecutor::graphOffloadRule(expectedBackend, i) == MLLM_QNN) { - subg_1.reset(new QNNGraph(param[i], backends_[backend_type].get(), tensors_, threadCount, "Prompt_Graph." + std::to_string(i))); - } - - subGraphs_["Prompt_Graph." + std::to_string(i)] = subg_1; - } -} - -} // namespace mllm diff --git a/src/backends/qnn/QNNNet.hpp b/src/backends/qnn/QNNNet.hpp deleted file mode 100644 index 77f5bc180..000000000 --- a/src/backends/qnn/QNNNet.hpp +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef MLLM_QNNNET_H -#define MLLM_QNNNET_H - -#include "Types.hpp" -#include "Net.hpp" -#include "express/ExpressBase.hpp" - -namespace mllm { -class QNNNet : public Net { -public: - explicit QNNNet(BackendConfig config, Context *ctx); - virtual ~QNNNet() = default; - - virtual void convert(vector ¶m, BackendType backend_type = BackendType::MLLM_QNN, int threadCount = 4) override {}; - virtual void convert(Context* ctx, BackendType backend_type = BackendType::MLLM_QNN, int threadCount = 4); - -private: - Context *ctx_; -}; - -} // namespace mllm - -#endif // MLLM_NET_H \ No newline at end of file diff --git a/src/backends/qnn/QnnTypeMacros.hpp b/src/backends/qnn/QnnTypeMacros.hpp deleted file mode 100644 index 99aecbecc..000000000 --- a/src/backends/qnn/QnnTypeMacros.hpp +++ /dev/null @@ -1,507 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#pragma once - -#include -#include -#include - -#include "QnnTypes.h" -#include "WrapperUtils/QnnWrapperUtils.hpp" - -namespace qnn_wrapper_api { - -/** - * @brief Verifies the tensor object passed is of supported Qnn_Tensor_t API version - * - * @param[in] tensor Qnn_Tensor_t object to validate - * - * @return Error code - */ -inline ModelError_t validateTensorVersion(Qnn_Tensor_t tensor) { - if (tensor.version != QNN_TENSOR_VERSION_1) { - PRINT_ERROR("validateTensorVersion() tensor %s, got unsupported version %d.", - tensor.v1.name, - tensor.version); - return MODEL_TENSOR_ERROR; - } - return MODEL_NO_ERROR; -} - -/** - * @brief Verifies the tensor object passed is of supported Qnn_OpConfig_t API version - * - * @param[in] tensor Qnn_OpConfig_t object to validate - * - * @return Error code - */ -inline ModelError_t validateOpConfigVersion(Qnn_OpConfig_t opConfig) { - if (opConfig.version != QNN_OPCONFIG_VERSION_1) { - PRINT_ERROR("validateOpConfigVersion() op %s, got unsupported version %d.", - opConfig.v1.name, - opConfig.version); - return MODEL_NODES_ERROR; - } - return MODEL_NO_ERROR; -} - -inline const char* getQnnOpConfigName(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.name; - } - return nullptr; -} - -inline const char* getQnnOpConfigName(const Qnn_OpConfig_t* opConfig) { - return getQnnOpConfigName(*opConfig); -} - -inline const char* getQnnOpConfigPackageName(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.packageName; - } - return nullptr; -} - -inline const char* getQnnOpConfigPackageName(const Qnn_OpConfig_t* opConfig) { - return getQnnOpConfigPackageName(*opConfig); -} - -inline const char* getQnnOpConfigTypeName(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.typeName; - } - return nullptr; -} - -inline const char* getQnnOpConfigTypeName(const Qnn_OpConfig_t* opConfig) { - return getQnnOpConfigTypeName(*opConfig); -} - -inline uint32_t getQnnOpConfigNumParams(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.numOfParams; - } - return 0u; -} - -inline uint32_t getQnnOpConfigNumParams(const Qnn_OpConfig_t* opConfig) { - return getQnnOpConfigNumParams(*opConfig); -} - -inline const Qnn_Param_t* getQnnOpConfigParams(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.params; - } - return nullptr; -} - -inline const Qnn_Param_t* getQnnOpConfigParams(const Qnn_OpConfig_t* opConfig) { - return getQnnOpConfigParams(*opConfig); -} - -inline uint32_t getQnnOpConfigNumInputs(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.numOfInputs; - } - return 0u; -} - -inline uint32_t getQnnOpConfigNumInputs(const Qnn_OpConfig_t* opConfig) { - return getQnnOpConfigNumInputs(*opConfig); -} - -inline const Qnn_Tensor_t* getQnnOpConfigInputs(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.inputTensors; - } - return nullptr; -} - -inline const Qnn_Tensor_t* getQnnOpConfigInputs(const Qnn_OpConfig_t* opConfig) { - return getQnnOpConfigInputs(*opConfig); -} - -inline uint32_t getQnnOpConfigNumOutputs(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.numOfOutputs; - } - return 0u; -} - -inline uint32_t getQnnOpConfigNumOutputs(const Qnn_OpConfig_t* opConfig) { - return getQnnOpConfigNumOutputs(*opConfig); -} - -inline const Qnn_Tensor_t* getQnnOpConfigOutputs(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.outputTensors; - } - return nullptr; -} - -inline const Qnn_Tensor_t* getQnnOpConfigOutputs(const Qnn_OpConfig_t* opConfig) { - return getQnnOpConfigOutputs(*opConfig); -} - -inline void setQnnOpConfigName(Qnn_OpConfig_t& opConfig, const char* name) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.name = name; - } -} - -inline void setQnnOpConfigName(Qnn_OpConfig_t* opConfig, const char* name) { - setQnnOpConfigName(*opConfig, name); -} - -inline void setQnnOpConfigPackageName(Qnn_OpConfig_t& opConfig, const char* packageName) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.packageName = packageName; - } -} - -inline void setQnnOpConfigPackageName(Qnn_OpConfig_t* opConfig, const char* packageName) { - setQnnOpConfigPackageName(*opConfig, packageName); -} - -inline void setQnnOpConfigTypeName(Qnn_OpConfig_t& opConfig, const char* typeName) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.typeName = typeName; - } -} - -inline void setQnnOpConfigTypeName(Qnn_OpConfig_t* opConfig, const char* typeName) { - setQnnOpConfigTypeName(*opConfig, typeName); -} - -inline void setQnnOpConfigParams(Qnn_OpConfig_t& opConfig, - uint32_t numOfParams, - Qnn_Param_t* params) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.numOfParams = numOfParams; - opConfig.v1.params = params; - } -} - -inline void setQnnOpConfigParams(Qnn_OpConfig_t* opConfig, - uint32_t numOfParams, - Qnn_Param_t* params) { - setQnnOpConfigParams(*opConfig, numOfParams, params); -} - -inline void setQnnOpConfigInputs(Qnn_OpConfig_t& opConfig, - uint32_t numOfInputs, - Qnn_Tensor_t* inputTensors) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.numOfInputs = numOfInputs; - opConfig.v1.inputTensors = inputTensors; - } -} - -inline void setQnnOpConfigInputs(Qnn_OpConfig_t* opConfig, - uint32_t numOfInputs, - Qnn_Tensor_t* inputTensors) { - setQnnOpConfigInputs(*opConfig, numOfInputs, inputTensors); -} - -inline void setQnnOpConfigOutputs(Qnn_OpConfig_t& opConfig, - uint32_t numOfOutputs, - Qnn_Tensor_t* outputTensors) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.numOfOutputs = numOfOutputs; - opConfig.v1.outputTensors = outputTensors; - } -} - -inline void setQnnOpConfigOutputs(Qnn_OpConfig_t* opConfig, - uint32_t numOfOutputs, - Qnn_Tensor_t* outputTensors) { - setQnnOpConfigOutputs(*opConfig, numOfOutputs, outputTensors); -} - -// inline Qnn_OpConfig_t - -inline uint32_t getQnnTensorId(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.id; - } - return 0u; -} - -inline uint32_t getQnnTensorId(const Qnn_Tensor_t* tensor) { return getQnnTensorId(*tensor); } - -inline const char* getQnnTensorName(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.name; - } - return 0u; -} - -inline const char* getQnnTensorName(const Qnn_Tensor_t* tensor) { - return getQnnTensorName(*tensor); -} - -inline Qnn_TensorType_t getQnnTensorType(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.type; - } - return QNN_TENSOR_TYPE_UNDEFINED; -} - -inline Qnn_TensorType_t getQnnTensorType(const Qnn_Tensor_t* tensor) { - return getQnnTensorType(*tensor); -} - -inline Qnn_TensorDataFormat_t getQnnTensorDataFormat(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.dataFormat; - } - return QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER; -} - -inline Qnn_TensorDataFormat_t getQnnTensorDataFormat(const Qnn_Tensor_t* tensor) { - return getQnnTensorDataFormat(*tensor); -} - -inline Qnn_DataType_t getQnnTensorDataType(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.dataType; - } - return QNN_DATATYPE_UNDEFINED; -} - -inline Qnn_DataType_t getQnnTensorDataType(const Qnn_Tensor_t* tensor) { - return getQnnTensorDataType(*tensor); -} - -inline Qnn_QuantizeParams_t getQnnTensorQuantParams(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.quantizeParams; - } - return QNN_QUANTIZE_PARAMS_INIT; -} - -inline Qnn_QuantizeParams_t getQnnTensorQuantParams(const Qnn_Tensor_t* tensor) { - return getQnnTensorQuantParams(*tensor); -} - -inline uint32_t getQnnTensorRank(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.rank; - } - return 0u; -} - -inline uint32_t getQnnTensorRank(const Qnn_Tensor_t* tensor) { return getQnnTensorRank(*tensor); } - -inline uint32_t* getQnnTensorDimensions(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.dimensions; - } - return nullptr; -} - -inline uint32_t* getQnnTensorDimensions(const Qnn_Tensor_t* tensor) { - return getQnnTensorDimensions(*tensor); -} - -inline Qnn_TensorMemType_t getQnnTensorMemType(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.memType; - } - return QNN_TENSORMEMTYPE_UNDEFINED; -} - -inline Qnn_TensorMemType_t getQnnTensorMemType(const Qnn_Tensor_t* tensor) { - return getQnnTensorMemType(*tensor); -} - -inline Qnn_ClientBuffer_t getQnnTensorClientBuf(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.clientBuf; - } - return QNN_CLIENT_BUFFER_INIT; -} - -inline Qnn_ClientBuffer_t getQnnTensorClientBuf(const Qnn_Tensor_t* tensor) { - return getQnnTensorClientBuf(*tensor); -} - -inline Qnn_MemHandle_t getQnnTensorMemHandle(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return tensor.v1.memHandle; - } - return nullptr; -} - -inline Qnn_MemHandle_t getQnnTensorMemHandle(const Qnn_Tensor_t* tensor) { - return getQnnTensorMemHandle(*tensor); -} - -inline void setQnnTensorId(Qnn_Tensor_t& tensor, uint32_t id) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.id = id; - } -} - -inline void setQnnTensorId(Qnn_Tensor_t* tensor, uint32_t id) { setQnnTensorId(*tensor, id); } - -inline void setQnnTensorName(Qnn_Tensor_t& tensor, const char* name) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.name = name; - } -} - -inline void setQnnTensorName(Qnn_Tensor_t* tensor, const char* name) { - setQnnTensorName(*tensor, name); -} - -inline void setQnnTensorType(Qnn_Tensor_t& tensor, Qnn_TensorType_t type) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.type = type; - } -} - -inline void setQnnTensorType(Qnn_Tensor_t* tensor, Qnn_TensorType_t type) { - setQnnTensorType(*tensor, type); -} - -inline void setQnnTensorDataFormat(Qnn_Tensor_t& tensor, Qnn_TensorDataFormat_t format) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.dataFormat = format; - } -} - -inline void setQnnTensorDataFormat(Qnn_Tensor_t* tensor, Qnn_TensorDataFormat_t format) { - setQnnTensorDataFormat(*tensor, format); -} - -inline void setQnnTensorDataType(Qnn_Tensor_t& tensor, Qnn_DataType_t dataType) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.dataType = dataType; - } -} - -inline void setQnnTensorDataType(Qnn_Tensor_t* tensor, Qnn_DataType_t dataType) { - setQnnTensorDataType(*tensor, dataType); -} - -inline void setQnnTensorQuantParams(Qnn_Tensor_t& tensor, Qnn_QuantizeParams_t params) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.quantizeParams = params; - } -} - -inline void setQnnTensorQuantParams(Qnn_Tensor_t* tensor, Qnn_QuantizeParams_t params) { - setQnnTensorQuantParams(*tensor, params); -} - -inline void setQnnTensorRank(Qnn_Tensor_t& tensor, uint32_t rank) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.rank = rank; - } -} - -inline void setQnnTensorRank(Qnn_Tensor_t* tensor, uint32_t rank) { - setQnnTensorRank(*tensor, rank); -} - -inline void setQnnTensorDimensions(Qnn_Tensor_t& tensor, uint32_t* dims) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.dimensions = dims; - } -} - -inline void setQnnTensorDimensions(Qnn_Tensor_t* tensor, uint32_t* dims) { - setQnnTensorDimensions(*tensor, dims); -} - -inline void setQnnTensorMemType(Qnn_Tensor_t& tensor, Qnn_TensorMemType_t memType) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.memType = memType; - } -} - -inline void setQnnTensorMemType(Qnn_Tensor_t* tensor, Qnn_TensorMemType_t memType) { - setQnnTensorMemType(*tensor, memType); -} - -inline void setQnnTensorClientBuf(Qnn_Tensor_t& tensor, Qnn_ClientBuffer_t clientBuf) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.clientBuf = clientBuf; - } -} - -inline void setQnnTensorClientBuf(Qnn_Tensor_t* tensor, Qnn_ClientBuffer_t clientBuf) { - setQnnTensorClientBuf(*tensor, clientBuf); -} - -inline void setQnnTensorMemHandle(Qnn_Tensor_t& tensor, Qnn_MemHandle_t handle) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - tensor.v1.memHandle = handle; - } -} - -inline void setQnnTensorMemHandle(Qnn_Tensor_t* tensor, Qnn_MemHandle_t handle) { - setQnnTensorMemHandle(*tensor, handle); -} - -// Validation -#define VALIDATE_TENSOR_VERSION(tensor, err) VALIDATE(validateTensorVersion(tensor), err) -#define VALIDATE_OP_CONFIG_VERSION(op, err) VALIDATE(validateOpConfigVersion(op), err) - -// Accessors for QNN Op Config -#define QNN_OP_CFG_GET_NAME(opConfig) getQnnOpConfigName(opConfig) -#define QNN_OP_CFG_GET_PACKAGE_NAME(opConfig) getQnnOpConfigPackageName(opConfig) -#define QNN_OP_CFG_GET_TYPE_NAME(opConfig) getQnnOpConfigTypeName(opConfig) -#define QNN_OP_CFG_GET_NUM_PARAMS(opConfig) getQnnOpConfigNumParams(opConfig) -#define QNN_OP_CFG_GET_PARAMS(opConfig) getQnnOpConfigParams(opConfig) -#define QNN_OP_CFG_GET_NUM_INPUTS(opConfig) getQnnOpConfigNumInputs(opConfig) -#define QNN_OP_CFG_GET_INPUTS(opConfig) getQnnOpConfigInputs(opConfig) -#define QNN_OP_CFG_GET_NUM_OUTPUTS(opConfig) getQnnOpConfigNumOutputs(opConfig) -#define QNN_OP_CFG_GET_OUTPUTS(opConfig) getQnnOpConfigOutputs(opConfig) - -// Modifiers for QNN Op Config -#define QNN_OP_CFG_SET_NAME(opConfig, value) setQnnOpConfigName(opConfig, value) -#define QNN_OP_CFG_SET_PACKAGE_NAME(opConfig, value) setQnnOpConfigPackageName(opConfig, value) -#define QNN_OP_CFG_SET_TYPE_NAME(opConfig, value) setQnnOpConfigTypeName(opConfig, value) -#define QNN_OP_CFG_SET_PARAMS(opConfig, numOfParams, params) \ - setQnnOpConfigParams(opConfig, numOfParams, params) -#define QNN_OP_CFG_SET_INPUTS(opConfig, numOfInputs, inputTensors) \ - setQnnOpConfigInputs(opConfig, numOfInputs, inputTensors) -#define QNN_OP_CFG_SET_OUTPUTS(opConfig, numOfOutputs, outputTensors) \ - setQnnOpConfigOutputs(opConfig, numOfOutputs, outputTensors) - -// Accessors for QNN Tensor -#define QNN_TENSOR_GET_ID(tensor) getQnnTensorId(tensor) -#define QNN_TENSOR_GET_NAME(tensor) getQnnTensorName(tensor) -#define QNN_TENSOR_GET_TYPE(tensor) getQnnTensorType(tensor) -#define QNN_TENSOR_GET_DATA_FORMAT(tensor) getQnnTensorDataFormat(tensor) -#define QNN_TENSOR_GET_DATA_TYPE(tensor) getQnnTensorDataType(tensor) -#define QNN_TENSOR_GET_QUANT_PARAMS(tensor) getQnnTensorQuantParams(tensor) -#define QNN_TENSOR_GET_RANK(tensor) getQnnTensorRank(tensor) -#define QNN_TENSOR_GET_DIMENSIONS(tensor) getQnnTensorDimensions(tensor) -#define QNN_TENSOR_GET_MEM_TYPE(tensor) getQnnTensorMemType(tensor) -#define QNN_TENSOR_GET_CLIENT_BUF(tensor) getQnnTensorClientBuf(tensor) -#define QNN_TENSOR_GET_MEM_HANDLE(tensor) getQnnTensorMemHandle(tensor) - -// Modifiers for QNN Tensor -#define QNN_TENSOR_SET_ID(tensor, value) setQnnTensorId(tensor, value) -#define QNN_TENSOR_SET_NAME(tensor, value) setQnnTensorName(tensor, value) -#define QNN_TENSOR_SET_TYPE(tensor, value) setQnnTensorType(tensor, value) -#define QNN_TENSOR_SET_DATA_FORMAT(tensor, value) setQnnTensorDataFormat(tensor, value) -#define QNN_TENSOR_SET_DATA_TYPE(tensor, value) setQnnTensorDataType(tensor, value) -#define QNN_TENSOR_SET_QUANT_PARAMS(tensor, value) setQnnTensorQuantParams(tensor, value) -#define QNN_TENSOR_SET_RANK(tensor, value) setQnnTensorRank(tensor, value) -#define QNN_TENSOR_SET_DIMENSIONS(tensor, value) setQnnTensorDimensions(tensor, value) -#define QNN_TENSOR_SET_MEM_TYPE(tensor, value) setQnnTensorMemType(tensor, value) -#define QNN_TENSOR_SET_CLIENT_BUF(tensor, value) setQnnTensorClientBuf(tensor, value) -#define QNN_TENSOR_SET_MEM_HANDLE(tensor, value) setQnnTensorMemHandle(tensor, value) - -} // namespace qnn_wrapper_api diff --git a/src/backends/qnn/README.md b/src/backends/qnn/README.md deleted file mode 100644 index b7f38d908..000000000 --- a/src/backends/qnn/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# Qualcomm AI Engine Direct(QNN) backend - -Currently, this is only preliminary support and is under active development for better performance and more supported models. - -## QNN Environment Set Up -This section is basically following the QNN documentation, for more details, see: [QNN Linux Setup](https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/linux_setup.html). -The QNN backend relies on the Qualcomm QNN SDK and Hexagon SDK to compile QNN Backends and LLM-specific operators. The QNN SDK can be downloaded [here](https://www.qualcomm.com/developer/software/qualcomm-ai-engine-direct-sdk). The Hexagon SDK can be downloaded using [QPM](https://qpm.qualcomm.com/). The compiling environment only supports Linux now. - -Version requirements: -* QNN: [Linux v2.20+](https://qpm.qualcomm.com/#/main/tools/details/qualcomm_neural_processing_sdk) -* Hexagon SDK: [Linux 5.x](https://qpm.qualcomm.com/#/main/tools/details/HexagonSDK5.x) (Some accounts may have no permission to access this SDK and may need to contact Qualcomm for support.) - -**NOTE:** After downloading the QNN SDK, unzip the file and move the folder name like `qairt/2.31.0.250130` to `src/backends/qnn/` and rename the version to 'sdk'. The folder structure should be like `src/backends/qnn/sdk`. - -After downloading and installing the two SDKs use "qpm-cli", set up the sdk environment by running the following commands: - -```bash -source /bin/envsetup.sh -source /setup_sdk_env.source -``` - -After setting up the environment, you will have following ENV variables: - -* QNN_SDK_ROOT=/path/to/your/qnn/sdk -* HEXAGON_SDK_ROOT=/path/to/your/hexagon/sdk - -## Op Package Compile - -To use QNN offload, the CPU & HTP QNN op package are needed, the following scripts will build QNN op package needed by the project. `QNN_SDK_ROOT`, `HEXAGON_SDK_ROOT` and `ANDROID_NDK_ROOT` should be set in the environment. - -```bash -cd mllm/src/backends/qnn/LLaMAOpPackageHtp/LLaMAPackage/ -make htp_aarch64 && make htp_v75 -``` - -## Model Conversion - -The model used by QNN prefilling is in int8 format, with static per-tensor quantization. And several 'shadow layer' weights are needed to be added to the model. The Profiling Activation Tools discription is in [tools/convertor/profiling_activation/README.md](../../../tools/convertor/profiling_activation/README.md), you can refer to it for more details. - -## Build & Run - -Build the target with QNN backend. - -```bash -cd ../script -./build_qnn_android.sh -``` - -Currently, there are two style of modeling, the Module API and the old implementation. The demo of the Module API is in `examples/demo_qwen_npu.cpp` which is in a **user friendly style**, and the old implementation is in `examples/main_qwen_npu.cpp` which supports **the chunk pipeline prefilling**. - -Download the model from [here](https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm/blob/main/), or using the following instructions - -```bash -mkdir ../models && cd ../models -# Download int8 model used by npu & q4k model used by cpu -wget https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm/resolve/main/qwen-1.5-1.8b-chat-int8.mllm?download=true -O qwen-1.5-1.8b-chat-int8.mllm -wget https://huggingface.co/mllmTeam/qwen-1.5-1.8b-chat-mllm/resolve/main/qwen-1.5-1.8b-chat-q4k.mllm?download=true -O qwen-1.5-1.8b-chat-q4k.mllm -``` - -Run on an android phone with at least 16GB of memory. - -```bash -cd ../script -./run_qwen_npu.sh -``` - -There are two arguments in the executable. `-s` is for the sequence length of prefilling, the default value is 64 in the demo we provided. `-c` for type of QNN prefilling options, when it is set to 1, the input will be splited into many chunks of sequence 256 and be executed in a pipeline. When it is set to 0, the input will be executed in one chunk. - -Result are as followed: - -``` -> ./main_qwen_npu -s 512 -c 1 -[Q] <|im_start|>system -You are a helpful assistant.<|im_end|> -<|im_start|>user -Give me a short introduction to large language model.<|im_end|> -<|im_start|>assistant - -[A] The large language model is a type of artificial intelligence that is designed to generate human-like text based on the input it receives It is typically trained on large datasets of text, such as books, articles, and web pages, and uses statistical models to learn patterns and relationships in the data The goal of a large language model is to generate text that is coherent -``` diff --git a/src/backends/qnn/Utils/DataUtil.cpp b/src/backends/qnn/Utils/DataUtil.cpp deleted file mode 100644 index 5dcc1616f..000000000 --- a/src/backends/qnn/Utils/DataUtil.cpp +++ /dev/null @@ -1,403 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== -#include -#include -#include -#include -#include - -#include "DataUtil.hpp" -#include "Log.h" -#include "Logger.hpp" -#include "PAL/Directory.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" - -using namespace qnn; -using namespace qnn::tools; - -std::tuple datautil::getDataTypeSizeInBytes(Qnn_DataType_t dataType) { - if (g_dataTypeToSize.find(dataType) == g_dataTypeToSize.end()) { - MLLM_LOG_ERROR_LEGACY("Invalid qnn data type provided"); - return std::make_tuple(StatusCode::INVALID_DATA_TYPE, 0); - } - return std::make_tuple(StatusCode::SUCCESS, g_dataTypeToSize.find(dataType)->second); -} - -size_t datautil::calculateElementCount(std::vector dims) { - if (dims.size() == 0) { - return 0; - } - return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); -} - -std::tuple datautil::calculateLength(std::vector dims, - Qnn_DataType_t dataType) { - if (dims.size() == 0) { - MLLM_LOG_ERROR_LEGACY("dims.size() is zero"); - return std::make_tuple(StatusCode::INVALID_DIMENSIONS, 0); - } - StatusCode returnStatus{StatusCode::SUCCESS}; - size_t length{0}; - std::tie(returnStatus, length) = getDataTypeSizeInBytes(dataType); - if (StatusCode::SUCCESS != returnStatus) { - return std::make_tuple(returnStatus, 0); - } - length *= calculateElementCount(dims); - return std::make_tuple(StatusCode::SUCCESS, length); -} - -datautil::StatusCode datautil::readDataFromFile(std::string filePath, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer) { - if (nullptr == buffer) { - MLLM_LOG_ERROR_LEGACY("buffer is nullptr"); - return StatusCode::INVALID_BUFFER; - } - std::ifstream in(filePath, std::ifstream::binary); - if (!in) { - MLLM_LOG_ERROR_LEGACY("Failed to open input file: %s", filePath.c_str()); - return StatusCode::FILE_OPEN_FAIL; - } - in.seekg(0, in.end); - const size_t length = in.tellg(); - in.seekg(0, in.beg); - StatusCode err{StatusCode::SUCCESS}; - size_t l{0}; - std::tie(err, l) = datautil::calculateLength(dims, dataType); - if (StatusCode::SUCCESS != err) { - return err; - } - if (length != l) { - MLLM_LOG_ERROR_LEGACY("Input file %s: file size in bytes (%d), should be equal to: %d", - filePath.c_str(), - length, - l); - return StatusCode::DATA_SIZE_MISMATCH; - } - - if (!in.read(reinterpret_cast(buffer), length)) { - MLLM_LOG_ERROR_LEGACY("Failed to read the contents of: %s", filePath.c_str()); - return StatusCode::DATA_READ_FAIL; - } - return StatusCode::SUCCESS; -} - -datautil::ReadBatchDataRetType_t datautil::readBatchDataAndUpdateQueue( - std::queue& filePaths, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer) { - if (nullptr == buffer) { - MLLM_LOG_ERROR_LEGACY("buffer is nullptr"); - return std::make_tuple(StatusCode::INVALID_BUFFER, 0, 0); - } - StatusCode err{StatusCode::SUCCESS}; - size_t l{0}; - std::tie(err, l) = datautil::calculateLength(dims, dataType); - if (StatusCode::SUCCESS != err) { - return std::make_tuple(err, 0, 0); - } - size_t numInputsCopied = 0; - size_t numBatchSize = 0; - size_t totalLength = 0; - do { - if (filePaths.empty()) { - numBatchSize += (l - totalLength) / (totalLength / numBatchSize); - // pad the vector with zeros - memset(buffer + totalLength, 0, (l - totalLength) * sizeof(char)); - totalLength = l; - } else { - std::ifstream in(filePaths.front(), std::ifstream::binary); - if (!in) { - MLLM_LOG_ERROR_LEGACY("Failed to open input file: %s", filePaths.front().c_str()); - return std::make_tuple(StatusCode::FILE_OPEN_FAIL, numInputsCopied, numBatchSize); - } - in.seekg(0, in.end); - const size_t length = in.tellg(); - in.seekg(0, in.beg); - if ((l % length) != 0 || length > l || length == 0) { - MLLM_LOG_ERROR_LEGACY("Input file %s: file size in bytes (%d), should be multiples of: %d", - filePaths.front().c_str(), - length, - l); - return std::make_tuple(StatusCode::DATA_SIZE_MISMATCH, numInputsCopied, numBatchSize); - } - if (!in.read(reinterpret_cast(buffer + (numInputsCopied * length)), length)) { - MLLM_LOG_ERROR_LEGACY("Failed to read the contents of: %s", filePaths.front().c_str()); - return std::make_tuple(StatusCode::DATA_READ_FAIL, numInputsCopied, numBatchSize); - } - QNN_VERBOSE("Return from readDataFromFile()"); - totalLength += length; - numInputsCopied += 1; - numBatchSize += 1; - filePaths.pop(); - } - } while (totalLength < l); - return std::make_tuple(StatusCode::SUCCESS, numInputsCopied, numBatchSize); -} - -std::tuple datautil::getFileSize(std::string filePath) { - std::ifstream in(filePath, std::ifstream::binary); - if (!in) { - MLLM_LOG_ERROR_LEGACY("Failed to open input file: %s", filePath.c_str()); - return std::make_tuple(StatusCode::FILE_OPEN_FAIL, 0); - } - in.seekg(0, in.end); - const size_t length = in.tellg(); - in.seekg(0, in.beg); - return std::make_tuple(StatusCode::SUCCESS, length); -} - -datautil::StatusCode datautil::readBinaryFromFile(std::string filePath, - uint8_t* buffer, - size_t bufferSize) { - if (nullptr == buffer) { - MLLM_LOG_ERROR_LEGACY("buffer is nullptr"); - return StatusCode::INVALID_BUFFER; - } - std::ifstream in(filePath, std::ifstream::binary); - if (!in) { - MLLM_LOG_ERROR_LEGACY("Failed to open input file: %s", filePath.c_str()); - return StatusCode::FILE_OPEN_FAIL; - } - if (!in.read(reinterpret_cast(buffer), bufferSize)) { - MLLM_LOG_ERROR_LEGACY("Failed to read the contents of: %s", filePath.c_str()); - return StatusCode::DATA_READ_FAIL; - } - return StatusCode::SUCCESS; -} - -datautil::StatusCode datautil::writeDataToFile(std::string fileDir, - std::string fileName, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer) { - if (nullptr == buffer) { - MLLM_LOG_ERROR_LEGACY("buffer is nullptr"); - return StatusCode::INVALID_BUFFER; - } - if (!pal::Directory::makePath(fileDir)) { - MLLM_LOG_ERROR_LEGACY("Failed to create output directory: %s", fileDir.c_str()); - return StatusCode::DIRECTORY_CREATE_FAIL; - } - const std::string outputPath(fileDir + pal::Path::getSeparator() + fileName); - std::ofstream os(outputPath, std::ofstream::binary); - if (!os) { - MLLM_LOG_ERROR_LEGACY("Failed to open output file for writing: %s", outputPath.c_str()); - return StatusCode::FILE_OPEN_FAIL; - } - StatusCode err{StatusCode::SUCCESS}; - size_t length{0}; - std::tie(err, length) = datautil::calculateLength(dims, dataType); - if (StatusCode::SUCCESS != err) { - return err; - } - for (size_t l = 0; l < length; l++) { - os.write(reinterpret_cast(&(*(buffer + l))), 1); - } - return StatusCode::SUCCESS; -} - -datautil::StatusCode datautil::writeBatchDataToFile(std::vector fileDirs, - std::string fileName, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer, - const size_t batchSize) { - if (nullptr == buffer) { - MLLM_LOG_ERROR_LEGACY("buffer is nullptr"); - return StatusCode::INVALID_BUFFER; - } - StatusCode err{StatusCode::SUCCESS}; - size_t length{0}; - std::tie(err, length) = datautil::calculateLength(dims, dataType); - if (StatusCode::SUCCESS != err) { - return err; - } - auto outputSize = (length / batchSize); - for (size_t batchIndex = 0; batchIndex < fileDirs.size(); batchIndex++) { - std::string fileDir = fileDirs[batchIndex]; - if (!pal::Directory::makePath(fileDir)) { - MLLM_LOG_ERROR_LEGACY("Failed to create output directory: %s", fileDir.c_str()); - return StatusCode::DIRECTORY_CREATE_FAIL; - } - const std::string outputPath(fileDir + pal::Path::getSeparator() + fileName); - std::ofstream os(outputPath, std::ofstream::binary); - if (!os) { - MLLM_LOG_ERROR_LEGACY("Failed to open output file for writing: %s", outputPath.c_str()); - return StatusCode::FILE_OPEN_FAIL; - } - for (size_t l = 0; l < outputSize; l++) { - size_t bufferIndex = l + (batchIndex * outputSize); - os.write(reinterpret_cast(&(*(buffer + bufferIndex))), 1); - } - } - return StatusCode::SUCCESS; -} - -datautil::StatusCode datautil::writeBinaryToFile(std::string fileDir, - std::string fileName, - uint8_t* buffer, - size_t bufferSize) { - if (nullptr == buffer) { - MLLM_LOG_ERROR_LEGACY("buffer is nullptr"); - return StatusCode::INVALID_BUFFER; - } - if (!pal::Directory::makePath(fileDir)) { - MLLM_LOG_ERROR_LEGACY("Failed to create output directory: %s", fileDir.c_str()); - return StatusCode::DIRECTORY_CREATE_FAIL; - } - const std::string outputPath(fileDir + pal::Path::getSeparator() + fileName); - std::ofstream os(outputPath, std::ofstream::binary); - if (!os) { - MLLM_LOG_ERROR_LEGACY("Failed to open output file for writing: %s", outputPath.c_str()); - return StatusCode::FILE_OPEN_FAIL; - } - os.write(reinterpret_cast(buffer), bufferSize); - return StatusCode::SUCCESS; -} - -template -datautil::StatusCode datautil::floatToTfN( - T_QuantType* out, float* in, int32_t offset, float scale, size_t numElements) { - static_assert(std::is_unsigned::value, "floatToTfN supports unsigned only!"); - - if (nullptr == out || nullptr == in) { - MLLM_LOG_ERROR_LEGACY("Received a nullptr"); - return StatusCode::INVALID_BUFFER; - } - - size_t dataTypeSizeInBytes = sizeof(T_QuantType); - size_t bitWidth = dataTypeSizeInBytes * g_bitsPerByte; - double trueBitWidthMax = pow(2, bitWidth) - 1; - double encodingMin = offset * scale; - double encodingMax = (trueBitWidthMax + offset) * scale; - double encodingRange = encodingMax - encodingMin; - - for (size_t i = 0; i < numElements; ++i) { - int quantizedValue = round(trueBitWidthMax * (in[i] - encodingMin) / encodingRange); - if (quantizedValue < 0) - quantizedValue = 0; - else if (quantizedValue > (int)trueBitWidthMax) - quantizedValue = (int)trueBitWidthMax; - out[i] = static_cast(quantizedValue); - } - return StatusCode::SUCCESS; -} - -template datautil::StatusCode datautil::floatToTfN( - uint8_t* out, float* in, int32_t offset, float scale, size_t numElements); - -template datautil::StatusCode datautil::floatToTfN( - uint16_t* out, float* in, int32_t offset, float scale, size_t numElements); - -template -datautil::StatusCode datautil::tfNToFloat( - float* out, T_QuantType* in, int32_t offset, float scale, size_t numElements) { - static_assert(std::is_unsigned::value, "tfNToFloat supports unsigned only!"); - - if (nullptr == out || nullptr == in) { - MLLM_LOG_ERROR_LEGACY("Received a nullptr"); - return StatusCode::INVALID_BUFFER; - } - for (size_t i = 0; i < numElements; i++) { - double quantizedValue = static_cast(in[i]); - double offsetDouble = static_cast(offset); - out[i] = static_cast((quantizedValue + offsetDouble) * scale); - } - return StatusCode::SUCCESS; -} - -template datautil::StatusCode datautil::tfNToFloat( - float* out, uint8_t* in, int32_t offset, float scale, size_t numElements); - -template datautil::StatusCode datautil::tfNToFloat( - float* out, uint16_t* in, int32_t offset, float scale, size_t numElements); - -template -datautil::StatusCode datautil::castToFloat(float* out, T_QuantType* in, size_t numElements) { - if (nullptr == out || nullptr == in) { - MLLM_LOG_ERROR_LEGACY("Received a nullptr"); - return StatusCode::INVALID_BUFFER; - } - for (size_t i = 0; i < numElements; i++) { - out[i] = static_cast(in[i]); - } - return StatusCode::SUCCESS; -} - -template datautil::StatusCode datautil::castToFloat(float* out, - uint8_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat(float* out, - uint16_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat(float* out, - uint32_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat(float* out, - int8_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat(float* out, - int16_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat(float* out, - int32_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat<__fp16>(float* out, - __fp16* in, - size_t numElements); - - -template -datautil::StatusCode datautil::castFromFloat(T_QuantType* out, float* in, size_t numElements) { - if (nullptr == out || nullptr == in) { - MLLM_LOG_ERROR_LEGACY("Received a nullptr"); - return StatusCode::INVALID_BUFFER; - } - for (size_t i = 0; i < numElements; i++) { - out[i] = static_cast(in[i]); - } - return StatusCode::SUCCESS; -} - -template datautil::StatusCode datautil::castFromFloat(uint8_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat(uint16_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat(uint32_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat(int8_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat(int16_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat(int32_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat<__fp16>(__fp16* out, - float* in, - size_t numElements); \ No newline at end of file diff --git a/src/backends/qnn/Utils/DataUtil.hpp b/src/backends/qnn/Utils/DataUtil.hpp deleted file mode 100644 index 730abd883..000000000 --- a/src/backends/qnn/Utils/DataUtil.hpp +++ /dev/null @@ -1,121 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== -#pragma once - -#include -#include -#include - -#include "QnnTypes.h" - -namespace qnn { -namespace tools { -namespace datautil { -enum class StatusCode { - SUCCESS, - DATA_READ_FAIL, - DATA_WRITE_FAIL, - FILE_OPEN_FAIL, - DIRECTORY_CREATE_FAIL, - INVALID_DIMENSIONS, - INVALID_DATA_TYPE, - DATA_SIZE_MISMATCH, - INVALID_BUFFER, -}; - -const size_t g_bitsPerByte = 8; - -using ReadBatchDataRetType_t = std::tuple; - -std::tuple getDataTypeSizeInBytes(Qnn_DataType_t dataType); - -std::tuple calculateLength(std::vector dims, Qnn_DataType_t dataType); - -size_t calculateElementCount(std::vector dims); - -std::tuple getFileSize(std::string filePath); - -StatusCode readDataFromFile(std::string filePath, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer); - -/* - * Read data in batches from Queue and try to matches the model input's - * batches. If the queue is empty while matching the batch size of model, - * pad the remaining buffer with zeros - * @param filePathsQueue image paths queue - * @param dims model input dimensions - * @param dataType to create input buffer from file - * @param buffer to fill the input image data - * - * @return ReadBatchDataRetType_t returns numFilesCopied and batchSize along - * with status - */ -ReadBatchDataRetType_t readBatchDataAndUpdateQueue(std::queue& filePaths, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer); - -StatusCode readBinaryFromFile(std::string filePath, uint8_t* buffer, size_t bufferSize); - -StatusCode writeDataToFile(std::string fileDir, - std::string fileName, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer); - -StatusCode writeBatchDataToFile(std::vector fileDirs, - std::string fileName, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer, - const size_t batchSize); - -StatusCode writeBinaryToFile(std::string fileDir, - std::string fileName, - uint8_t* buffer, - size_t bufferSize); - -template -datautil::StatusCode floatToTfN( - T_QuantType* out, float* in, int32_t offset, float scale, size_t numElements); - -template -datautil::StatusCode tfNToFloat( - float* out, T_QuantType* in, int32_t offset, float scale, size_t numElements); - -template -datautil::StatusCode castToFloat(float* out, T_QuantType* in, size_t numElements); - -template -datautil::StatusCode castFromFloat(T_QuantType* out, float* in, size_t numElements); - -const std::map g_dataTypeToSize = { - {QNN_DATATYPE_INT_8, 1}, - {QNN_DATATYPE_INT_16, 2}, - {QNN_DATATYPE_INT_32, 4}, - {QNN_DATATYPE_INT_64, 8}, - {QNN_DATATYPE_UINT_8, 1}, - {QNN_DATATYPE_UINT_16, 2}, - {QNN_DATATYPE_UINT_32, 4}, - {QNN_DATATYPE_UINT_64, 8}, - {QNN_DATATYPE_FLOAT_16, 2}, - {QNN_DATATYPE_FLOAT_32, 4}, - {QNN_DATATYPE_FLOAT_64, 8}, - {QNN_DATATYPE_SFIXED_POINT_8, 1}, - {QNN_DATATYPE_SFIXED_POINT_16, 2}, - {QNN_DATATYPE_SFIXED_POINT_32, 4}, - {QNN_DATATYPE_UFIXED_POINT_8, 1}, - {QNN_DATATYPE_UFIXED_POINT_16, 2}, - {QNN_DATATYPE_UFIXED_POINT_32, 4}, - {QNN_DATATYPE_BOOL_8, 1}, -}; -} // namespace datautil -} // namespace tools -} // namespace qnn diff --git a/src/backends/qnn/Utils/DynamicLoadUtil.cpp b/src/backends/qnn/Utils/DynamicLoadUtil.cpp deleted file mode 100644 index e8b89f67a..000000000 --- a/src/backends/qnn/Utils/DynamicLoadUtil.cpp +++ /dev/null @@ -1,175 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include - -#include "DynamicLoadUtil.hpp" -#include "Log.h" -#include "Logger.hpp" -#include "PAL/DynamicLoading.hpp" - -using namespace qnn; -using namespace qnn::tools; - -typedef Qnn_ErrorHandle_t (*QnnInterfaceGetProvidersFn_t)(const QnnInterface_t*** providerList, - uint32_t* numProviders); - -typedef Qnn_ErrorHandle_t (*QnnSystemInterfaceGetProvidersFn_t)( - const QnnSystemInterface_t*** providerList, uint32_t* numProviders); - -template -static inline T resolveSymbol(void* libHandle, const char* sym) { - T ptr = (T)pal::dynamicloading::dlSym(libHandle, sym); - if (ptr == nullptr) { - MLLM_LOG_ERROR_LEGACY("Unable to access symbol [%s]. pal::dynamicloading::dlError(): %s", - sym, - pal::dynamicloading::dlError()); - } - return ptr; -} - -dynamicloadutil::StatusCode dynamicloadutil::getQnnFunctionPointers( - std::string backendPath, - std::string modelPath, - sample_app::QnnFunctionPointers* qnnFunctionPointers, - void** backendHandleRtn, - bool loadModelLib, - void** modelHandleRtn) { - void* libBackendHandle = pal::dynamicloading::dlOpen( - backendPath.c_str(), pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_GLOBAL); - if (nullptr == libBackendHandle) { - MLLM_LOG_ERROR_LEGACY("Unable to load backend. pal::dynamicloading::dlError(): %s", - pal::dynamicloading::dlError()); - return StatusCode::FAIL_LOAD_BACKEND; - } - if (nullptr != backendHandleRtn) { - *backendHandleRtn = libBackendHandle; - } - // Get QNN Interface - QnnInterfaceGetProvidersFn_t getInterfaceProviders{nullptr}; - getInterfaceProviders = - resolveSymbol(libBackendHandle, "QnnInterface_getProviders"); - if (nullptr == getInterfaceProviders) { - return StatusCode::FAIL_SYM_FUNCTION; - } - QnnInterface_t** interfaceProviders{nullptr}; - uint32_t numProviders{0}; - if (QNN_SUCCESS != - getInterfaceProviders((const QnnInterface_t***)&interfaceProviders, &numProviders)) { - MLLM_LOG_ERROR_LEGACY("Failed to get interface providers."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - if (nullptr == interfaceProviders) { - MLLM_LOG_ERROR_LEGACY("Failed to get interface providers: null interface providers received."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - if (0 == numProviders) { - MLLM_LOG_ERROR_LEGACY("Failed to get interface providers: 0 interface providers."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - bool foundValidInterface{false}; - for (size_t pIdx = 0; pIdx < numProviders; pIdx++) { - if (QNN_API_VERSION_MAJOR == interfaceProviders[pIdx]->apiVersion.coreApiVersion.major && - QNN_API_VERSION_MINOR <= interfaceProviders[pIdx]->apiVersion.coreApiVersion.minor) { - foundValidInterface = true; - qnnFunctionPointers->qnnInterface = interfaceProviders[pIdx]->QNN_INTERFACE_VER_NAME; - break; - } - } - if (!foundValidInterface) { - MLLM_LOG_ERROR_LEGACY("Unable to find a valid interface."); - libBackendHandle = nullptr; - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - - if (true == loadModelLib) { - QNN_INFO("Loading model shared library ([model].so)"); - void* libModelHandle = pal::dynamicloading::dlOpen( - modelPath.c_str(), pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_LOCAL); - if (nullptr == libModelHandle) { - MLLM_LOG_ERROR_LEGACY("Unable to load model. pal::dynamicloading::dlError(): %s", - pal::dynamicloading::dlError()); - return StatusCode::FAIL_LOAD_MODEL; - } - if (nullptr != modelHandleRtn) { - *modelHandleRtn = libModelHandle; - } - - std::string modelPrepareFunc = "QnnModel_composeGraphs"; - qnnFunctionPointers->composeGraphsFnHandle = - resolveSymbol(libModelHandle, - modelPrepareFunc.c_str()); - if (nullptr == qnnFunctionPointers->composeGraphsFnHandle) { - return StatusCode::FAIL_SYM_FUNCTION; - } - - std::string modelFreeFunc = "QnnModel_freeGraphsInfo"; - qnnFunctionPointers->freeGraphInfoFnHandle = - resolveSymbol(libModelHandle, - modelFreeFunc.c_str()); - if (nullptr == qnnFunctionPointers->freeGraphInfoFnHandle) { - return StatusCode::FAIL_SYM_FUNCTION; - } - } else { - QNN_INFO("Model wasn't loaded from a shared library."); - } - return StatusCode::SUCCESS; -} - -dynamicloadutil::StatusCode dynamicloadutil::getQnnSystemFunctionPointers( - std::string systemLibraryPath, sample_app::QnnFunctionPointers* qnnFunctionPointers) { - QNN_FUNCTION_ENTRY_LOG; - if (!qnnFunctionPointers) { - MLLM_LOG_ERROR_LEGACY("nullptr provided for qnnFunctionPointers"); - return StatusCode::FAILURE; - } - void* systemLibraryHandle = pal::dynamicloading::dlOpen( - systemLibraryPath.c_str(), pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_LOCAL); - if (nullptr == systemLibraryHandle) { - MLLM_LOG_ERROR_LEGACY("Unable to load system library. pal::dynamicloading::dlError(): %s", - pal::dynamicloading::dlError()); - return StatusCode::FAIL_LOAD_SYSTEM_LIB; - } - QnnSystemInterfaceGetProvidersFn_t getSystemInterfaceProviders{nullptr}; - getSystemInterfaceProviders = resolveSymbol( - systemLibraryHandle, "QnnSystemInterface_getProviders"); - if (nullptr == getSystemInterfaceProviders) { - return StatusCode::FAIL_SYM_FUNCTION; - } - QnnSystemInterface_t** systemInterfaceProviders{nullptr}; - uint32_t numProviders{0}; - if (QNN_SUCCESS != getSystemInterfaceProviders( - (const QnnSystemInterface_t***)&systemInterfaceProviders, &numProviders)) { - MLLM_LOG_ERROR_LEGACY("Failed to get system interface providers."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - if (nullptr == systemInterfaceProviders) { - MLLM_LOG_ERROR_LEGACY("Failed to get system interface providers: null interface providers received."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - if (0 == numProviders) { - MLLM_LOG_ERROR_LEGACY("Failed to get interface providers: 0 interface providers."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - bool foundValidSystemInterface{false}; - for (size_t pIdx = 0; pIdx < numProviders; pIdx++) { - if (QNN_SYSTEM_API_VERSION_MAJOR == systemInterfaceProviders[pIdx]->systemApiVersion.major && - QNN_SYSTEM_API_VERSION_MINOR <= systemInterfaceProviders[pIdx]->systemApiVersion.minor) { - foundValidSystemInterface = true; - qnnFunctionPointers->qnnSystemInterface = - systemInterfaceProviders[pIdx]->QNN_SYSTEM_INTERFACE_VER_NAME; - break; - } - } - if (!foundValidSystemInterface) { - MLLM_LOG_ERROR_LEGACY("Unable to find a valid system interface."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - QNN_FUNCTION_EXIT_LOG; - return StatusCode::SUCCESS; -} \ No newline at end of file diff --git a/src/backends/qnn/Utils/DynamicLoadUtil.hpp b/src/backends/qnn/Utils/DynamicLoadUtil.hpp deleted file mode 100644 index 04033abfb..000000000 --- a/src/backends/qnn/Utils/DynamicLoadUtil.hpp +++ /dev/null @@ -1,36 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#pragma once - -#include "QNN.hpp" - -namespace qnn { -namespace tools { -namespace dynamicloadutil { -enum class StatusCode { - SUCCESS, - FAILURE, - FAIL_LOAD_BACKEND, - FAIL_LOAD_MODEL, - FAIL_SYM_FUNCTION, - FAIL_GET_INTERFACE_PROVIDERS, - FAIL_LOAD_SYSTEM_LIB, -}; - -StatusCode getQnnFunctionPointers(std::string backendPath, - std::string modelPath, - sample_app::QnnFunctionPointers* qnnFunctionPointers, - void** backendHandle, - bool loadModelLib, - void** modelHandleRtn); -StatusCode getQnnSystemFunctionPointers(std::string systemLibraryPath, - sample_app::QnnFunctionPointers* qnnFunctionPointers); -} // namespace dynamicloadutil -} // namespace tools -} // namespace qnn diff --git a/src/backends/qnn/Utils/IOTensor.cpp b/src/backends/qnn/Utils/IOTensor.cpp deleted file mode 100644 index e6e9cb617..000000000 --- a/src/backends/qnn/Utils/IOTensor.cpp +++ /dev/null @@ -1,934 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2020-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== -#include -#include -#include -#include -#include -#include - -#include "DataUtil.hpp" -#include "IOTensor.hpp" -#include "Log.h" -#include "Logger.hpp" -#include "PAL/Directory.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" -#include "PAL/StringOp.hpp" -#include "QnnTypeMacros.hpp" -#include "QnnTypes.h" - -using namespace qnn; -using namespace qnn::tools; -using namespace qnn_wrapper_api; - -// Helper method to read data from files to a buffer. -iotensor::StatusCode iotensor::IOTensor::readDataAndAllocateBuffer( - std::queue& filePaths, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t** bufferToCopy) { - StatusCode returnStatus = StatusCode::SUCCESS; - *bufferToCopy = nullptr; - returnStatus = allocateBuffer(bufferToCopy, dims, dataType); - if (StatusCode::SUCCESS == returnStatus) { - datautil::StatusCode status; - std::tie(status, m_numFilesPopulated, m_batchSize) = datautil::readBatchDataAndUpdateQueue( - filePaths, dims, dataType, reinterpret_cast(*bufferToCopy)); - if (datautil::StatusCode::SUCCESS != status) { - QNN_DEBUG("Failure in datautil::readBatchDataAndUpdateQueue"); - returnStatus = StatusCode::FAILURE; - } - } - if (StatusCode::SUCCESS != returnStatus) { - if (nullptr != *bufferToCopy) { - free(*bufferToCopy); - *bufferToCopy = nullptr; - } - } - return returnStatus; -} - -// Helper method to copy a float buffer, quantize it, and copy -// it to a tensor (Qnn_Tensor_t) buffer. -iotensor::StatusCode iotensor::IOTensor::copyFromFloatToNative(float* floatBuffer, - Qnn_Tensor_t* tensor) { - if (nullptr == floatBuffer || nullptr == tensor) { - MLLM_LOG_ERROR_LEGACY("copyFromFloatToNative(): received a nullptr"); - return StatusCode::FAILURE; - } - - StatusCode returnStatus = StatusCode::SUCCESS; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(tensor), QNN_TENSOR_GET_RANK(tensor)); - - switch (QNN_TENSOR_GET_DATA_TYPE(tensor)) { - case QNN_DATATYPE_UFIXED_POINT_8: - datautil::floatToTfN(static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, - datautil::calculateElementCount(dims)); - break; - - case QNN_DATATYPE_UFIXED_POINT_16: - datautil::floatToTfN(static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, - datautil::calculateElementCount(dims)); - break; - - case QNN_DATATYPE_FLOAT_16: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat<__fp16>( - static_cast<__fp16*>(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - MLLM_LOG_ERROR_LEGACY("failure in castFromFloat<__fp16>"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_8: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - MLLM_LOG_ERROR_LEGACY("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_16: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - MLLM_LOG_ERROR_LEGACY("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_32: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - MLLM_LOG_ERROR_LEGACY("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_8: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - MLLM_LOG_ERROR_LEGACY("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_16: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - MLLM_LOG_ERROR_LEGACY("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_32: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - MLLM_LOG_ERROR_LEGACY("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_BOOL_8: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - MLLM_LOG_ERROR_LEGACY("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - default: - MLLM_LOG_ERROR_LEGACY("Datatype not supported yet!"); - returnStatus = StatusCode::FAILURE; - break; - } - return returnStatus; -} - -// Helper method to populate an input tensor in the graph during execution. -// It relies on reading data from files provided during app creation. -iotensor::StatusCode iotensor::IOTensor::populateInputTensor( - std::queue& filePaths, - Qnn_Tensor_t* input, - iotensor::InputDataType inputDataType) { - if (nullptr == input) { - MLLM_LOG_ERROR_LEGACY("input is nullptr"); - return StatusCode::FAILURE; - } - - auto returnStatus = StatusCode::SUCCESS; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(input), QNN_TENSOR_GET_RANK(input)); - - if (inputDataType == InputDataType::FLOAT && - QNN_TENSOR_GET_DATA_TYPE(input) != QNN_DATATYPE_FLOAT_32) { - uint8_t* fileToBuffer = nullptr; - returnStatus = readDataAndAllocateBuffer(filePaths, dims, QNN_DATATYPE_FLOAT_32, &fileToBuffer); - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("readDataFromFileToBuffer successful"); - returnStatus = copyFromFloatToNative(reinterpret_cast(fileToBuffer), input); - } - if (nullptr != fileToBuffer) { - free(fileToBuffer); - fileToBuffer = nullptr; - } - } else { - datautil::StatusCode status; - std::tie(status, m_numFilesPopulated, m_batchSize) = datautil::readBatchDataAndUpdateQueue( - filePaths, - dims, - QNN_TENSOR_GET_DATA_TYPE(input), - static_cast(QNN_TENSOR_GET_CLIENT_BUF(input).data)); - if (datautil::StatusCode::SUCCESS != status) { - QNN_DEBUG("Failure in datautil::readBatchDataAndUpdateQueue"); - returnStatus = StatusCode::FAILURE; - } - } - return returnStatus; -} - -// Helper method to populate all input tensors during execution. -iotensor::StatusCode iotensor::IOTensor::populateInputTensors( - uint32_t graphIdx, - std::vector>& filePathsQueue, - Qnn_Tensor_t* inputs, - qnn_wrapper_api::GraphInfo_t graphInfo, - iotensor::InputDataType inputDataType) { - QNN_DEBUG("populateInputTensors() graphIndx %d", graphIdx); - if (nullptr == inputs) { - MLLM_LOG_ERROR_LEGACY("inputs is nullptr"); - return StatusCode::FAILURE; - } - auto inputCount = graphInfo.numInputTensors; - if (filePathsQueue.size() != inputCount) { - MLLM_LOG_ERROR_LEGACY( - "Incorrect amount of Input files for graphIdx: %d. Expected: %d, " - "received: %d", - graphIdx, - inputCount, - filePathsQueue.size()); - return StatusCode::FAILURE; - } - - for (size_t inputIdx = 0; inputIdx < inputCount; inputIdx++) { - if (StatusCode::SUCCESS != - populateInputTensor(filePathsQueue[inputIdx], &(inputs[inputIdx]), inputDataType)) { - QNN_DEBUG("populateInputTensor() failure for input: %d", inputIdx); - return StatusCode::FAILURE; - } - } - return StatusCode::SUCCESS; -} - -// Helper method to populate an input tensor in the graph during execution. -// It relies on reading data from buffer provided during executeGraph() call. -iotensor::StatusCode iotensor::IOTensor::populateInputTensor( - uint8_t* buffer, Qnn_Tensor_t* input, iotensor::InputDataType inputDataType) { - if (nullptr == input) { - MLLM_LOG_ERROR_LEGACY("input is nullptr"); - return StatusCode::FAILURE; - } - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(input), QNN_TENSOR_GET_RANK(input)); - if (inputDataType == InputDataType::FLOAT && - QNN_TENSOR_GET_DATA_TYPE(input) != QNN_DATATYPE_FLOAT_32) { - QNN_DEBUG("Received FLOAT input, but model needs non-float input"); - if (StatusCode::SUCCESS != copyFromFloatToNative(reinterpret_cast(buffer), input)) { - QNN_DEBUG("copyFromFloatToNative failure"); - return StatusCode::FAILURE; - } - } else { - size_t length; - datautil::StatusCode returnStatus; - std::tie(returnStatus, length) = - datautil::calculateLength(dims, QNN_TENSOR_GET_DATA_TYPE(input)); - if (datautil::StatusCode::SUCCESS != returnStatus) { - return StatusCode::FAILURE; - } - pal::StringOp::memscpy( - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(input).data), length, buffer, length); - } - return StatusCode::SUCCESS; -} - -// Helper method to populate all input tensors. -// copy data in inputBuffers to input.clientBuffer.data (not used in mllm shared buffer execution) -iotensor::StatusCode iotensor::IOTensor::populateInputTensors( - uint32_t graphIdx, - std::vector inputBuffers, - Qnn_Tensor_t* inputs, - qnn_wrapper_api::GraphInfo_t graphInfo, - iotensor::InputDataType inputDataType) { - if (nullptr == inputs) { - MLLM_LOG_ERROR_LEGACY("inputs is nullptr"); - return StatusCode::FAILURE; - } - auto inputCount = graphInfo.numInputTensors; - if (inputBuffers.size() != inputCount) { - MLLM_LOG_ERROR_LEGACY("Incorrect amount of Input Buffers for graphIdx: %d. Expected: %d, received: %d", - graphIdx, - inputCount, - inputBuffers.size()); - return StatusCode::FAILURE; - } - for (size_t inputIdx = 0; inputIdx < inputCount; inputIdx++) { - if (StatusCode::SUCCESS != - populateInputTensor(inputBuffers[inputIdx], &(inputs[inputIdx]), inputDataType)) { - QNN_DEBUG("populateInputTensor() failure for input: %d", inputIdx); - return StatusCode::FAILURE; - } - } - return StatusCode::SUCCESS; -} - -// Setup details for Qnn_Tensor_t for execution -// based on information in Qnn_TensorWrapper_t provided by model.so. -iotensor::StatusCode iotensor::IOTensor::setupTensors(Qnn_Tensor_t** tensors, - uint32_t tensorCount, - Qnn_Tensor_t* tensorWrappers) { - if (nullptr == tensorWrappers) { - MLLM_LOG_ERROR_LEGACY("tensorWrappers is nullptr"); - return StatusCode::FAILURE; - } - if (0 == tensorCount) { - QNN_INFO("tensor count is 0. Nothing to setup."); - return StatusCode::SUCCESS; - } - auto returnStatus = StatusCode::SUCCESS; - *tensors = (Qnn_Tensor_t*)calloc(1, tensorCount * sizeof(Qnn_Tensor_t)); - if (nullptr == *tensors) { - MLLM_LOG_ERROR_LEGACY("mem alloc failed for *tensors"); - returnStatus = StatusCode::FAILURE; - return returnStatus; - } - for (size_t tensorIdx = 0; tensorIdx < tensorCount; tensorIdx++) { - Qnn_Tensor_t wrapperTensor = tensorWrappers[tensorIdx]; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(wrapperTensor), QNN_TENSOR_GET_RANK(wrapperTensor)); - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("allocateBuffer successful"); - (*tensors)[tensorIdx] = QNN_TENSOR_INIT; - returnStatus = - (sample_app::deepCopyQnnTensorInfo(((*tensors) + tensorIdx), &wrapperTensor) == true - ? StatusCode::SUCCESS - : StatusCode::FAILURE); - } - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("deepCopyQnnTensorInfo successful"); - QNN_TENSOR_SET_MEM_TYPE(((*tensors) + tensorIdx), QNN_TENSORMEMTYPE_RAW); - } - Qnn_ClientBuffer_t clientBuffer = QNN_CLIENT_BUFFER_INIT; - returnStatus = allocateBuffer(reinterpret_cast(&clientBuffer.data), - dims, - QNN_TENSOR_GET_DATA_TYPE((*tensors) + tensorIdx)); - datautil::StatusCode datautilStatus{datautil::StatusCode::SUCCESS}; - size_t length{0}; - std::tie(datautilStatus, length) = - datautil::calculateLength(dims, QNN_TENSOR_GET_DATA_TYPE((*tensors) + tensorIdx)); - if (datautilStatus != datautil::StatusCode::SUCCESS) { - returnStatus = StatusCode::FAILURE; - } - clientBuffer.dataSize = length; - QNN_TENSOR_SET_CLIENT_BUF(((*tensors) + tensorIdx), clientBuffer); - if (StatusCode::SUCCESS != returnStatus) { - MLLM_LOG_ERROR_LEGACY("Failure in setupTensors, cleaning up resources"); - if (nullptr != (QNN_TENSOR_GET_CLIENT_BUF((*tensors) + tensorIdx)).data) { - free(QNN_TENSOR_GET_CLIENT_BUF((*tensors) + tensorIdx).data); - } - tearDownTensors(*tensors, tensorIdx); - *tensors = nullptr; - returnStatus = StatusCode::FAILURE; - MLLM_LOG_ERROR_LEGACY("Failure in setupTensors, done cleaning up resources"); - return returnStatus; - } - } - return returnStatus; -} - -iotensor::StatusCode iotensor::IOTensor::setupTensorsNoCopy(Qnn_Tensor_t** tensors, - uint32_t tensorCount, - Qnn_Tensor_t* tensorWrappers){ - if (nullptr == tensorWrappers) { - MLLM_LOG_ERROR_LEGACY("tensorWrappers is nullptr"); - return StatusCode::FAILURE; - } - if (0 == tensorCount) { - QNN_INFO("tensor count is 0. Nothing to setup."); - return StatusCode::SUCCESS; - } - auto returnStatus = StatusCode::SUCCESS; - *tensors = (Qnn_Tensor_t *)calloc(1, tensorCount * sizeof(Qnn_Tensor_t)); - if (nullptr == *tensors) { - MLLM_LOG_ERROR_LEGACY("mem alloc failed for *tensors"); - returnStatus = StatusCode::FAILURE; - return returnStatus; - } - for (size_t tensorIdx = 0; tensorIdx < tensorCount; tensorIdx++) { - Qnn_Tensor_t wrapperTensor = tensorWrappers[tensorIdx]; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(wrapperTensor), QNN_TENSOR_GET_RANK(wrapperTensor)); - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("allocateBuffer successful"); - (*tensors)[tensorIdx] = QNN_TENSOR_INIT; - returnStatus = - (sample_app::deepCopyQnnTensorInfo(((*tensors) + tensorIdx), &wrapperTensor) == true ? StatusCode::SUCCESS : StatusCode::FAILURE); - } - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("deepCopyQnnTensorInfo successful"); - QNN_TENSOR_SET_MEM_TYPE(((*tensors) + tensorIdx), QNN_TENSORMEMTYPE_MEMHANDLE); - } - } - return returnStatus; -} - -// Setup details for all input and output tensors for graph execution. -iotensor::StatusCode iotensor::IOTensor::setupInputAndOutputTensors( - Qnn_Tensor_t** inputs, Qnn_Tensor_t** outputs, qnn_wrapper_api::GraphInfo_t graphInfo) { - auto returnStatus = StatusCode::SUCCESS; -#ifdef QNN_ARM - if (StatusCode::SUCCESS != setupTensorsNoCopy(inputs, graphInfo.numInputTensors, (graphInfo.inputTensors))) { - MLLM_LOG_ERROR_LEGACY("Failure in setting up input tensors"); - returnStatus = StatusCode::FAILURE; - } - if (StatusCode::SUCCESS != setupTensorsNoCopy(outputs, graphInfo.numOutputTensors, (graphInfo.outputTensors))) { - MLLM_LOG_ERROR_LEGACY("Failure in setting up output tensors"); - returnStatus = StatusCode::FAILURE; - } -#else - if (StatusCode::SUCCESS != setupTensors(inputs, graphInfo.numInputTensors, (graphInfo.inputTensors))) { - MLLM_LOG_ERROR_LEGACY("Failure in setting up input tensors"); - returnStatus = StatusCode::FAILURE; - } - if (StatusCode::SUCCESS != setupTensors(outputs, graphInfo.numOutputTensors, (graphInfo.outputTensors))) { - MLLM_LOG_ERROR_LEGACY("Failure in setting up output tensors"); - returnStatus = StatusCode::FAILURE; - } -#endif - if (StatusCode::SUCCESS != returnStatus) { - MLLM_LOG_ERROR_LEGACY("Failure in setupInputAndOutputTensors, cleaning up resources"); - if (nullptr != *inputs) { - QNN_DEBUG("cleaning up input tensors"); - tearDownTensors(*inputs, graphInfo.numInputTensors); - *inputs = nullptr; - } - if (nullptr != *outputs) { - QNN_DEBUG("cleaning up output tensors"); - tearDownTensors(*outputs, graphInfo.numOutputTensors); - *outputs = nullptr; - } - MLLM_LOG_ERROR_LEGACY("Failure in setupInputAndOutputTensors, done cleaning up resources"); - } - return returnStatus; -} - -// Clean up all tensors related data after execution. -iotensor::StatusCode iotensor::IOTensor::tearDownTensors(Qnn_Tensor_t* tensors, - uint32_t tensorCount) { - for (size_t tensorIdx = 0; tensorIdx < tensorCount; tensorIdx++) { - QNN_DEBUG("freeing resources for tensor: %d", tensorIdx); - if (nullptr != QNN_TENSOR_GET_DIMENSIONS(tensors[tensorIdx])) { - QNN_DEBUG("freeing dimensions"); - free(QNN_TENSOR_GET_DIMENSIONS(tensors[tensorIdx])); - } - if (nullptr != QNN_TENSOR_GET_CLIENT_BUF(tensors[tensorIdx]).data) { - QNN_DEBUG("freeing clientBuf.data"); - free(QNN_TENSOR_GET_CLIENT_BUF(tensors[tensorIdx]).data); - } - } - free(tensors); - return StatusCode::SUCCESS; -} - -// Clean up all input and output tensors after execution. -iotensor::StatusCode iotensor::IOTensor::tearDownInputAndOutputTensors(Qnn_Tensor_t* inputs, - Qnn_Tensor_t* outputs, - size_t numInputTensors, - size_t numOutputTensors) { - if (nullptr != inputs) { - QNN_INFO("cleaning up resources for input tensors"); - tearDownTensors(inputs, numInputTensors); - inputs = nullptr; - } - if (nullptr != outputs) { - QNN_INFO("cleaning up resources for output tensors"); - tearDownTensors(outputs, numOutputTensors); - outputs = nullptr; - } - return StatusCode::SUCCESS; -} - -// Helper method to allocate a buffer. -iotensor::StatusCode iotensor::IOTensor::allocateBuffer(uint8_t** buffer, - std::vector dims, - Qnn_DataType_t dataType) { - size_t elementCount = datautil::calculateElementCount(dims); - auto returnStatus = StatusCode::SUCCESS; - switch (dataType) { - case QNN_DATATYPE_FLOAT_32: - QNN_DEBUG("allocating float buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_FLOAT_16: - QNN_DEBUG("allocating fp16 buffer"); - returnStatus = allocateBuffer<__fp16>(reinterpret_cast<__fp16**>(buffer), elementCount); - break; - - case QNN_DATATYPE_UINT_8: - case QNN_DATATYPE_UFIXED_POINT_8: - QNN_DEBUG("allocating uint8_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_UINT_16: - case QNN_DATATYPE_UFIXED_POINT_16: - QNN_DEBUG("allocating uint16_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_UINT_32: - QNN_DEBUG("allocating uint32_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_INT_8: - QNN_DEBUG("allocating int8_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_INT_16: - QNN_DEBUG("allocating int16_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_INT_32: - QNN_DEBUG("allocating int32_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_BOOL_8: - QNN_DEBUG("allocating bool buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - default: - MLLM_LOG_ERROR_LEGACY("Datatype not supported yet!"); - returnStatus = StatusCode::FAILURE; - break; - } - return returnStatus; -} - -// Helper method to allocate a buffer. -template -iotensor::StatusCode iotensor::IOTensor::allocateBuffer(T** buffer, size_t& elementCount) { - QNN_DEBUG("ElementCount: %d, sizeof(T): %d, total size: %d", - elementCount, - sizeof(T), - elementCount * sizeof(T)); - *buffer = (T*)malloc(elementCount * sizeof(T)); - if (nullptr == *buffer) { - MLLM_LOG_ERROR_LEGACY("mem alloc failed for *buffer"); - return StatusCode::FAILURE; - } - return StatusCode::SUCCESS; -} - -// Convert data to float or de-quantization. This is used when -// user requests for float output and the model produces -// non-float output. -iotensor::StatusCode iotensor::IOTensor::convertToFloat(float** out, Qnn_Tensor_t* tensor) { - if (nullptr == tensor) { - MLLM_LOG_ERROR_LEGACY("tensors is nullptr"); - return StatusCode::FAILURE; - } - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(tensor), QNN_TENSOR_GET_RANK(tensor)); - auto returnStatus = StatusCode::SUCCESS; - size_t elementCount = datautil::calculateElementCount(dims); - returnStatus = allocateBuffer(out, elementCount); - if (StatusCode::SUCCESS != returnStatus) { - MLLM_LOG_ERROR_LEGACY("failure in allocateBuffer"); - return returnStatus; - } - switch (QNN_TENSOR_GET_DATA_TYPE(tensor)) { - case QNN_DATATYPE_UFIXED_POINT_8: - if (datautil::StatusCode::SUCCESS != - datautil::tfNToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, - elementCount)) { - MLLM_LOG_ERROR_LEGACY("failure in tfNToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UFIXED_POINT_16: - if (datautil::StatusCode::SUCCESS != - datautil::tfNToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, - elementCount)) { - MLLM_LOG_ERROR_LEGACY("failure in tfNToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_FLOAT_16: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat<__fp16>( - *out, - reinterpret_cast<__fp16*>(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - MLLM_LOG_ERROR_LEGACY("failure in castToFloat<__fp16>"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_8: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - MLLM_LOG_ERROR_LEGACY("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_16: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - MLLM_LOG_ERROR_LEGACY("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_32: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - MLLM_LOG_ERROR_LEGACY("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_8: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - MLLM_LOG_ERROR_LEGACY("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_16: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - MLLM_LOG_ERROR_LEGACY("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_32: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - MLLM_LOG_ERROR_LEGACY("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_BOOL_8: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - MLLM_LOG_ERROR_LEGACY("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - default: - MLLM_LOG_ERROR_LEGACY("Datatype not supported yet!"); - returnStatus = StatusCode::FAILURE; - break; - } - if (StatusCode::SUCCESS != returnStatus) { - QNN_DEBUG("freeing *out"); - if (*out != nullptr) { - free(*out); - *out = nullptr; - } - } - return returnStatus; -} - -// Helper method to convert Output tensors to float and write them -// out to files. -iotensor::StatusCode iotensor::IOTensor::convertAndWriteOutputTensorInFloat( - Qnn_Tensor_t* output, std::vector outputPaths, std::string fileName) { - if (nullptr == output) { - MLLM_LOG_ERROR_LEGACY("output is nullptr"); - return StatusCode::FAILURE; - } - - auto returnStatus = StatusCode::SUCCESS; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(output), QNN_TENSOR_GET_RANK(output)); - float* floatBuffer = nullptr; - returnStatus = convertToFloat(&floatBuffer, output); - if (StatusCode::SUCCESS != returnStatus) { - MLLM_LOG_ERROR_LEGACY("failure in convertToFloat"); - return StatusCode::FAILURE; - } - uint8_t* bufferToWrite = reinterpret_cast(floatBuffer); - if (datautil::StatusCode::SUCCESS != - datautil::writeBatchDataToFile( - outputPaths, fileName, dims, QNN_DATATYPE_FLOAT_32, bufferToWrite, m_batchSize)) { - MLLM_LOG_ERROR_LEGACY("failure in writeBatchDataToFile"); - returnStatus = StatusCode::FAILURE; - } - if (nullptr != floatBuffer) { - QNN_DEBUG("freeing floatBuffer"); - free(floatBuffer); - floatBuffer = nullptr; - } - return returnStatus; -} - -// Helper method to write out output. There is no de-quantization here. -// Just write output as is to files. -iotensor::StatusCode iotensor::IOTensor::writeOutputTensor(Qnn_Tensor_t* output, - std::vector outputPaths, - std::string fileName) { - if (nullptr == output) { - MLLM_LOG_ERROR_LEGACY("output is nullptr"); - return StatusCode::FAILURE; - } - auto returnStatus = StatusCode::SUCCESS; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(output), QNN_TENSOR_GET_RANK(output)); - uint8_t* bufferToWrite = reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(output).data); - if (datautil::StatusCode::SUCCESS != - datautil::writeBatchDataToFile(outputPaths, - fileName, - dims, - QNN_TENSOR_GET_DATA_TYPE(output), - bufferToWrite, - m_batchSize)) { - MLLM_LOG_ERROR_LEGACY("failure in writeBatchDataToFile"); - returnStatus = StatusCode::FAILURE; - } - return returnStatus; -} - -// Helper method to write out output. There is no de-quantization here. -// Just write output as is to files. -iotensor::StatusCode iotensor::IOTensor::writeOutputTensor(Qnn_Tensor_t* output, uint8_t* output_buffer) { - if (nullptr == output) { - MLLM_LOG_ERROR_LEGACY("output is nullptr"); - return StatusCode::FAILURE; - } - auto returnStatus = StatusCode::SUCCESS; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(output), QNN_TENSOR_GET_RANK(output)); - float* floatBuffer = nullptr; - returnStatus = convertToFloat(&floatBuffer, output); - if (StatusCode::SUCCESS != returnStatus) { - MLLM_LOG_ERROR_LEGACY("failure in convertToFloat"); - return StatusCode::FAILURE; - } - uint8_t* bufferToWrite = reinterpret_cast(floatBuffer); - - datautil::StatusCode err{datautil::StatusCode::SUCCESS}; - size_t length{0}; - std::tie(err, length) = datautil::calculateLength(dims, QNN_DATATYPE_FLOAT_32); - if (datautil::StatusCode::SUCCESS != err) { - return StatusCode::FAILURE; - } - - memcpy(output_buffer, bufferToWrite, length); - - return returnStatus; -} - -// Write out all output tensors to files. If output_data_type is float, -// then all outputs will be raw floats regardless of what the model outputs. -// If the output_data_type is native, then output is written as produced by the model. -// Also, for native option, a json with quantization parameters is written out. -// If output_data_type is float_and_native, both above are done. -// If the output in the graph is float, then output_data_type has no effect. -iotensor::StatusCode iotensor::IOTensor::writeOutputTensors(uint32_t graphIdx, - size_t startIdx, - char* graphName, - Qnn_Tensor_t* outputs, - uint32_t numOutputs, - iotensor::OutputDataType outputDatatype, - uint32_t graphsCount, - std::string outputPath) { - if (nullptr == outputs) { - MLLM_LOG_ERROR_LEGACY("Received nullptr"); - return StatusCode::FAILURE; - } - if (graphsCount > 1) { - if (nullptr != graphName && strlen(graphName) > 0) { - outputPath += (pal::Path::getSeparator() + std::string(graphName)); - } else { - outputPath += (pal::Path::getSeparator() + std::string("Graph_") + std::to_string(graphIdx)); - } - } - auto returnStatus = StatusCode::SUCCESS; - std::vector outputPaths; - for (size_t idx = 0; idx < m_numFilesPopulated; idx++) { - std::string output = outputPath + (pal::Path::getSeparator() + std::string("Result_") + - std::to_string(startIdx + idx)); - outputPaths.push_back(output); - } - for (size_t outputIdx = 0; outputIdx < numOutputs; outputIdx++) { - QNN_DEBUG("Writing output for outputIdx: %d", outputIdx); - std::string outputFilePrefix; - if (nullptr != QNN_TENSOR_GET_NAME(outputs[outputIdx]) && - strlen(QNN_TENSOR_GET_NAME(outputs[outputIdx])) > 0) { - outputFilePrefix = std::string(QNN_TENSOR_GET_NAME(outputs[outputIdx])); - } else { - outputFilePrefix = std::string("Output_") + std::to_string(outputIdx); - } - auto outputFile = outputFilePrefix + std::string(".raw"); - auto outputFileNative = outputFilePrefix + std::string("_native.raw"); - if (QNN_TENSOR_GET_DATA_TYPE(outputs[outputIdx]) == QNN_DATATYPE_FLOAT_32) { - QNN_DEBUG("Writing in output->dataType == QNN_DATATYPE_FLOAT_32"); - returnStatus = writeOutputTensor(&(outputs[outputIdx]), outputPaths, outputFile); - } else if (outputDatatype == OutputDataType::FLOAT_ONLY) { - QNN_DEBUG("Writing in output->dataType == OutputDataType::FLOAT_ONLY"); - returnStatus = - convertAndWriteOutputTensorInFloat(&(outputs[outputIdx]), outputPaths, outputFile); - } else if (outputDatatype == OutputDataType::NATIVE_ONLY) { - QNN_DEBUG("Writing in output->dataType == OutputDataType::NATIVE_ONLY"); - returnStatus = writeOutputTensor(&(outputs[outputIdx]), outputPaths, outputFileNative); - } else if (outputDatatype == OutputDataType::FLOAT_AND_NATIVE) { - QNN_DEBUG("Writing in output->dataType == OutputDataType::FLOAT_AND_NATIVE"); - returnStatus = - convertAndWriteOutputTensorInFloat(&(outputs[outputIdx]), outputPaths, outputFile); - if (StatusCode::SUCCESS == returnStatus) { - returnStatus = writeOutputTensor(&(outputs[outputIdx]), outputPaths, outputFileNative); - } - } - } - return returnStatus; -} - -// Helper method to allocate a buffer and copy data to it. -iotensor::StatusCode iotensor::IOTensor::allocateAndCopyBuffer(uint8_t** buffer, - Qnn_Tensor_t* tensor) { - if (nullptr == tensor) { - return StatusCode::FAILURE; - } - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(tensor), QNN_TENSOR_GET_RANK(tensor)); - datautil::StatusCode datautilStatus; - size_t length; - std::tie(datautilStatus, length) = - datautil::calculateLength(dims, QNN_TENSOR_GET_DATA_TYPE(tensor)); - if (datautilStatus != datautil::StatusCode::SUCCESS) { - return StatusCode::FAILURE; - } - if (StatusCode::SUCCESS != allocateBuffer(buffer, dims, QNN_TENSOR_GET_DATA_TYPE(tensor))) { - MLLM_LOG_ERROR_LEGACY("failure in allocateBuffer"); - return StatusCode::FAILURE; - } - pal::StringOp::memscpy(*buffer, - length * sizeof(uint8_t), - QNN_TENSOR_GET_CLIENT_BUF(tensor).data, - length * sizeof(uint8_t)); - return StatusCode::SUCCESS; -} - -iotensor::StatusCode iotensor::IOTensor::fillDims(std::vector& dims, - uint32_t* inDimensions, - uint32_t rank) { - if (nullptr == inDimensions) { - MLLM_LOG_ERROR_LEGACY("input dimensions is nullptr"); - return StatusCode::FAILURE; - } - for (size_t r = 0; r < rank; r++) { - dims.push_back(inDimensions[r]); - } - return StatusCode::SUCCESS; -} - -iotensor::OutputDataType iotensor::parseOutputDataType(std::string dataTypeString) { - std::transform(dataTypeString.begin(), dataTypeString.end(), dataTypeString.begin(), ::tolower); - OutputDataType parsedDataType = OutputDataType::INVALID; - if (dataTypeString == "float_only") { - parsedDataType = OutputDataType::FLOAT_ONLY; - } else if (dataTypeString == "native_only") { - parsedDataType = OutputDataType::NATIVE_ONLY; - } else if (dataTypeString == "float_and_native") { - parsedDataType = OutputDataType::FLOAT_AND_NATIVE; - } - return parsedDataType; -} - -iotensor::InputDataType iotensor::parseInputDataType(std::string dataTypeString) { - std::transform(dataTypeString.begin(), dataTypeString.end(), dataTypeString.begin(), ::tolower); - InputDataType parsedDataType = InputDataType::INVALID; - if (dataTypeString == "float") { - parsedDataType = InputDataType::FLOAT; - } else if (dataTypeString == "native") { - parsedDataType = InputDataType::NATIVE; - } - return parsedDataType; -} \ No newline at end of file diff --git a/src/backends/qnn/Utils/IOTensor.hpp b/src/backends/qnn/Utils/IOTensor.hpp deleted file mode 100644 index b9de43ea8..000000000 --- a/src/backends/qnn/Utils/IOTensor.hpp +++ /dev/null @@ -1,115 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2020, 2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== -#pragma once - -#include -#include - -#include "QnnBackend.h" -#include "QnnCommon.h" -#include "QnnContext.h" -#include "QnnGraph.h" -#include "QnnProperty.h" -#include "QnnSampleAppUtils.hpp" -#include "QnnTensor.h" -#include "QnnTypes.h" -#include "QnnWrapperUtils.hpp" - -namespace qnn { -namespace tools { -namespace iotensor { - -enum class StatusCode { SUCCESS, FAILURE }; -enum class OutputDataType { FLOAT_ONLY, NATIVE_ONLY, FLOAT_AND_NATIVE, INVALID }; -enum class InputDataType { FLOAT, NATIVE, INVALID }; - -OutputDataType parseOutputDataType(std::string dataTypeString); -InputDataType parseInputDataType(std::string dataTypeString); - -class IOTensor { - public: - IOTensor() : m_batchSize(1), m_numFilesPopulated(0) {} - - StatusCode setupInputAndOutputTensors(Qnn_Tensor_t **inputs, - Qnn_Tensor_t **outputs, - qnn_wrapper_api::GraphInfo_t graphInfo); - - StatusCode writeOutputTensors(uint32_t graphIdx, - size_t startIdx, - char *graphName, - Qnn_Tensor_t *outputs, - uint32_t numOutputs, - OutputDataType outputDatatype, - uint32_t graphsCount, - std::string outputPath); - - StatusCode populateInputTensors(uint32_t graphIdx, - std::vector> &filePathsQueue, - Qnn_Tensor_t *inputs, - qnn_wrapper_api::GraphInfo_t graphInfo, - iotensor::InputDataType inputDataType); - - StatusCode populateInputTensors(uint32_t graphIdx, - std::vector inputBuffers, - Qnn_Tensor_t *inputs, - qnn_wrapper_api::GraphInfo_t graphInfo, - InputDataType inputDataType); - - StatusCode tearDownInputAndOutputTensors(Qnn_Tensor_t *inputs, - Qnn_Tensor_t *outputs, - size_t numInputTensors, - size_t numOutputTensors); - - StatusCode writeOutputTensor(Qnn_Tensor_t *output, uint8_t* output_buffer); - - private: - size_t m_batchSize; - size_t m_numFilesPopulated; - - StatusCode populateInputTensor(std::queue &filePaths, - Qnn_Tensor_t *input, - InputDataType inputDataType); - - StatusCode populateInputTensor(uint8_t *buffer, Qnn_Tensor_t *input, InputDataType inputDataType); - - StatusCode readDataAndAllocateBuffer(std::queue &filePaths, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t **bufferToCopy); - - template - StatusCode allocateBuffer(T **buffer, size_t &elementCount); - - StatusCode convertToFloat(float **out, Qnn_Tensor_t *output); - - StatusCode convertAndWriteOutputTensorInFloat(Qnn_Tensor_t *output, - std::vector outputPaths, - std::string fileName); - - StatusCode writeOutputTensor(Qnn_Tensor_t *output, - std::vector outputPaths, - std::string fileName); - - StatusCode allocateAndCopyBuffer(uint8_t **buffer, Qnn_Tensor_t *tensor); - - StatusCode tearDownTensors(Qnn_Tensor_t *tensors, uint32_t tensorCount); - - StatusCode allocateBuffer(uint8_t **buffer, std::vector dims, Qnn_DataType_t dataType); - - StatusCode copyFromFloatToNative(float *floatBuffer, Qnn_Tensor_t *tensor); - - StatusCode setupTensors(Qnn_Tensor_t **tensors, uint32_t tensorCount, Qnn_Tensor_t *tensorsInfo); - // just set the tensor info, no buffer allocation - // used when enable qnn shared buffer for input and output - StatusCode setupTensorsNoCopy(Qnn_Tensor_t **tensors, uint32_t tensorCount, Qnn_Tensor_t *tensorsInfo); - - StatusCode fillDims(std::vector &dims, uint32_t *inDimensions, uint32_t rank); -}; -} // namespace iotensor -} // namespace tools -} // namespace qnn \ No newline at end of file diff --git a/src/backends/qnn/Utils/QnnSampleAppUtils.cpp b/src/backends/qnn/Utils/QnnSampleAppUtils.cpp deleted file mode 100644 index d38bf9948..000000000 --- a/src/backends/qnn/Utils/QnnSampleAppUtils.cpp +++ /dev/null @@ -1,340 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019-2023 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include - -#include "Log/Logger.hpp" -#include "PAL/Directory.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" -#include "PAL/StringOp.hpp" -#include "Utils/QnnSampleAppUtils.hpp" -#include "QnnTypeMacros.hpp" - -using namespace qnn; -using namespace qnn::tools; -using namespace qnn_wrapper_api; - -void sample_app::split(std::vector &splitString, - const std::string &tokenizedString, - const char separator) { - splitString.clear(); - std::istringstream tokenizedStringStream(tokenizedString); - while (!tokenizedStringStream.eof()) { - std::string value; - getline(tokenizedStringStream, value, separator); - if (!value.empty()) { - splitString.push_back(value); - } - } -} - -void sample_app::parseInputFilePaths(std::vector &inputFilePaths, - std::vector &paths, - std::string separator) { - for (auto &inputInfo : inputFilePaths) { - auto position = inputInfo.find(separator); - if (position != std::string::npos) { - auto path = inputInfo.substr(position + separator.size()); - paths.push_back(path); - } else { - paths.push_back(inputInfo); - } - } -} - -sample_app::ReadInputListsRetType_t sample_app::readInputLists( - std::vector inputFileListPaths) { - std::vector>> filePathsLists; - for (auto const &path : inputFileListPaths) { - bool readSuccess; - std::vector> filePathList; - std::tie(filePathList, readSuccess) = readInputList(path); - if (!readSuccess) { - filePathsLists.clear(); - return std::make_tuple(filePathsLists, false); - } - filePathsLists.push_back(filePathList); - } - return std::make_tuple(filePathsLists, true); -} - -sample_app::ReadInputListRetType_t sample_app::readInputList(const std::string inputFileListPath) { - std::queue lines; - std::ifstream fileListStream(inputFileListPath); - if (!fileListStream) { - QNN_ERROR("Failed to open input file: %s", inputFileListPath.c_str()); - std::vector> result; - return std::make_tuple(result, false); - } - std::string fileLine; - while (std::getline(fileListStream, fileLine)) { - if (fileLine.empty()) continue; - lines.push(fileLine); - } - if (!lines.empty() && lines.front().compare(0, 1, "#") == 0) { - lines.pop(); - } - std::string separator = ":="; - std::vector> filePathsList; - while (!lines.empty()) { - std::vector paths{}; - std::vector inputFilePaths; - split(inputFilePaths, lines.front(), ' '); - parseInputFilePaths(inputFilePaths, paths, separator); - // TODO: multi input support - filePathsList.reserve(paths.size()); - for (size_t idx = 0; idx < paths.size(); idx++) { - if (idx >= filePathsList.size()) { - filePathsList.push_back(std::queue()); - } - filePathsList.back().push(paths[idx]); - } - lines.pop(); - } - return std::make_tuple(filePathsList, true); -} - -sample_app::ProfilingLevel sample_app::parseProfilingLevel(std::string profilingLevelString) { - std::transform(profilingLevelString.begin(), - profilingLevelString.end(), - profilingLevelString.begin(), - ::tolower); - ProfilingLevel parsedProfilingLevel = ProfilingLevel::INVALID; - if (profilingLevelString == "off") { - parsedProfilingLevel = ProfilingLevel::OFF; - } else if (profilingLevelString == "basic") { - parsedProfilingLevel = ProfilingLevel::BASIC; - } else if (profilingLevelString == "detailed") { - parsedProfilingLevel = ProfilingLevel::DETAILED; - } - return parsedProfilingLevel; -} - -bool sample_app::deepCopyQnnTensorInfo(Qnn_Tensor_t *dst, const Qnn_Tensor_t *src) { - if (nullptr == dst || nullptr == src) { - QNN_ERROR("Received nullptr"); - return false; - } - // set tensor.version before using QNN_TENSOR_SET macros, as they require the version to be set - // to correctly assign values - dst->version = src->version; - const char *tensorName = QNN_TENSOR_GET_NAME(src); - if (!tensorName) { - QNN_TENSOR_SET_NAME(dst, nullptr); - } else { - QNN_TENSOR_SET_NAME(dst, pal::StringOp::strndup(tensorName, strlen(tensorName))); - } - QNN_TENSOR_SET_ID(dst, QNN_TENSOR_GET_ID(src)); - QNN_TENSOR_SET_TYPE(dst, QNN_TENSOR_GET_TYPE(src)); - QNN_TENSOR_SET_DATA_FORMAT(dst, QNN_TENSOR_GET_DATA_FORMAT(src)); - QNN_TENSOR_SET_DATA_TYPE(dst, QNN_TENSOR_GET_DATA_TYPE(src)); - Qnn_QuantizeParams_t qParams = QNN_QUANTIZE_PARAMS_INIT; - qParams.encodingDefinition = QNN_TENSOR_GET_QUANT_PARAMS(src).encodingDefinition; - qParams.quantizationEncoding = QNN_QUANTIZATION_ENCODING_UNDEFINED; - if (QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding == - QNN_QUANTIZATION_ENCODING_SCALE_OFFSET) { - qParams.quantizationEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding; - qParams.scaleOffsetEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).scaleOffsetEncoding; - } else if (QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding == - QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET) { - qParams.quantizationEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding; - qParams.axisScaleOffsetEncoding.axis = - QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.axis; - qParams.axisScaleOffsetEncoding.numScaleOffsets = - QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets; - if (QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets > 0) { - qParams.axisScaleOffsetEncoding.scaleOffset = (Qnn_ScaleOffset_t *)malloc( - QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets * - sizeof(Qnn_ScaleOffset_t)); - if (qParams.axisScaleOffsetEncoding.scaleOffset) { - for (size_t idx = 0; - idx < QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets; - idx++) { - qParams.axisScaleOffsetEncoding.scaleOffset[idx].scale = - QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.scaleOffset[idx].scale; - qParams.axisScaleOffsetEncoding.scaleOffset[idx].offset = - QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.scaleOffset[idx].offset; - } - } - } - } - QNN_TENSOR_SET_QUANT_PARAMS(dst, qParams); - QNN_TENSOR_SET_RANK(dst, QNN_TENSOR_GET_RANK(src)); - QNN_TENSOR_SET_DIMENSIONS(dst, nullptr); - if (QNN_TENSOR_GET_RANK(src) > 0) { - QNN_TENSOR_SET_DIMENSIONS(dst, (uint32_t *)malloc(QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t))); - if (QNN_TENSOR_GET_DIMENSIONS(dst)) { - pal::StringOp::memscpy(QNN_TENSOR_GET_DIMENSIONS(dst), - QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t), - QNN_TENSOR_GET_DIMENSIONS(src), - QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t)); - } - } - return true; -} - -bool sample_app::copyTensorsInfo(const Qnn_Tensor_t *tensorsInfoSrc, - Qnn_Tensor_t *&tensorWrappers, - uint32_t tensorsCount) { - QNN_FUNCTION_ENTRY_LOG; - auto returnStatus = true; - tensorWrappers = (Qnn_Tensor_t *)calloc(tensorsCount, sizeof(Qnn_Tensor_t)); - if (nullptr == tensorWrappers) { - QNN_ERROR("Failed to allocate memory for tensorWrappers."); - return false; - } - if (returnStatus) { - for (size_t tIdx = 0; tIdx < tensorsCount; tIdx++) { - QNN_DEBUG("Extracting tensorInfo for tensor Idx: %d", tIdx); - tensorWrappers[tIdx] = QNN_TENSOR_INIT; - deepCopyQnnTensorInfo(&tensorWrappers[tIdx], &tensorsInfoSrc[tIdx]); - } - } - QNN_FUNCTION_EXIT_LOG; - return returnStatus; -} - -bool sample_app::copyGraphsInfoV1(const QnnSystemContext_GraphInfoV1_t *graphInfoSrc, - qnn_wrapper_api::GraphInfo_t *graphInfoDst) { - graphInfoDst->graphName = nullptr; - if (graphInfoSrc->graphName) { - graphInfoDst->graphName = - pal::StringOp::strndup(graphInfoSrc->graphName, strlen(graphInfoSrc->graphName)); - } - graphInfoDst->inputTensors = nullptr; - graphInfoDst->numInputTensors = 0; - if (graphInfoSrc->graphInputs) { - if (!copyTensorsInfo( - graphInfoSrc->graphInputs, graphInfoDst->inputTensors, graphInfoSrc->numGraphInputs)) { - return false; - } - graphInfoDst->numInputTensors = graphInfoSrc->numGraphInputs; - } - graphInfoDst->outputTensors = nullptr; - graphInfoDst->numOutputTensors = 0; - if (graphInfoSrc->graphOutputs) { - if (!copyTensorsInfo(graphInfoSrc->graphOutputs, - graphInfoDst->outputTensors, - graphInfoSrc->numGraphOutputs)) { - return false; - } - graphInfoDst->numOutputTensors = graphInfoSrc->numGraphOutputs; - } - return true; -} - -bool sample_app::copyGraphsInfo(const QnnSystemContext_GraphInfo_t *graphsInput, - const uint32_t numGraphs, - qnn_wrapper_api::GraphInfo_t **&graphsInfo) { - QNN_FUNCTION_ENTRY_LOG; - if (!graphsInput) { - QNN_ERROR("Received nullptr for graphsInput."); - return false; - } - auto returnStatus = true; - graphsInfo = - (qnn_wrapper_api::GraphInfo_t **)calloc(numGraphs, sizeof(qnn_wrapper_api::GraphInfo_t *)); - qnn_wrapper_api::GraphInfo_t *graphInfoArr = - (qnn_wrapper_api::GraphInfo_t *)calloc(numGraphs, sizeof(qnn_wrapper_api::GraphInfo_t)); - if (nullptr == graphsInfo || nullptr == graphInfoArr) { - QNN_ERROR("Failure to allocate memory for *graphInfo"); - returnStatus = false; - } - if (true == returnStatus) { - for (size_t gIdx = 0; gIdx < numGraphs; gIdx++) { - QNN_DEBUG("Extracting graphsInfo for graph Idx: %d", gIdx); - if (graphsInput[gIdx].version == QNN_SYSTEM_CONTEXT_GRAPH_INFO_VERSION_1) { - copyGraphsInfoV1(&graphsInput[gIdx].graphInfoV1, &graphInfoArr[gIdx]); - } - graphsInfo[gIdx] = graphInfoArr + gIdx; - } - } - if (true != returnStatus) { - QNN_ERROR("Received an ERROR during extractGraphsInfo. Freeing resources."); - if (graphsInfo) { - for (uint32_t gIdx = 0; gIdx < numGraphs; gIdx++) { - if (graphsInfo[gIdx]) { - if (nullptr != graphsInfo[gIdx]->graphName) { - free(graphsInfo[gIdx]->graphName); - graphsInfo[gIdx]->graphName = nullptr; - } - qnn_wrapper_api::freeQnnTensors(graphsInfo[gIdx]->inputTensors, - graphsInfo[gIdx]->numInputTensors); - qnn_wrapper_api::freeQnnTensors(graphsInfo[gIdx]->outputTensors, - graphsInfo[gIdx]->numOutputTensors); - } - } - free(*graphsInfo); - } - free(graphsInfo); - graphsInfo = nullptr; - } - QNN_FUNCTION_EXIT_LOG; - return true; -} - -bool sample_app::copyMetadataToGraphsInfo(const QnnSystemContext_BinaryInfo_t *binaryInfo, - qnn_wrapper_api::GraphInfo_t **&graphsInfo, - uint32_t &graphsCount) { - if (nullptr == binaryInfo) { - QNN_ERROR("binaryInfo is nullptr."); - return false; - } - graphsCount = 0; - if (binaryInfo->version == QNN_SYSTEM_CONTEXT_BINARY_INFO_VERSION_1) { - if (binaryInfo->contextBinaryInfoV1.graphs) { - if (!copyGraphsInfo(binaryInfo->contextBinaryInfoV1.graphs, - binaryInfo->contextBinaryInfoV1.numGraphs, - graphsInfo)) { - QNN_ERROR("Failed while copying graphs Info."); - return false; - } - graphsCount = binaryInfo->contextBinaryInfoV1.numGraphs; - return true; - } - } else if (binaryInfo->version == QNN_SYSTEM_CONTEXT_BINARY_INFO_VERSION_2) { - if (binaryInfo->contextBinaryInfoV2.graphs) { - if (!copyGraphsInfo(binaryInfo->contextBinaryInfoV2.graphs, - binaryInfo->contextBinaryInfoV2.numGraphs, - graphsInfo)) { - QNN_ERROR("Failed while copying graphs Info."); - return false; - } - graphsCount = binaryInfo->contextBinaryInfoV2.numGraphs; - return true; - } - } - QNN_ERROR("Unrecognized system context binary info version."); - return false; -} - -QnnLog_Level_t sample_app::parseLogLevel(std::string logLevelString) { - QNN_FUNCTION_ENTRY_LOG; - std::transform(logLevelString.begin(), logLevelString.end(), logLevelString.begin(), ::tolower); - QnnLog_Level_t parsedLogLevel = QNN_LOG_LEVEL_MAX; - if (logLevelString == "error") { - parsedLogLevel = QNN_LOG_LEVEL_ERROR; - } else if (logLevelString == "warn") { - parsedLogLevel = QNN_LOG_LEVEL_WARN; - } else if (logLevelString == "info") { - parsedLogLevel = QNN_LOG_LEVEL_INFO; - } else if (logLevelString == "verbose") { - parsedLogLevel = QNN_LOG_LEVEL_VERBOSE; - } else if (logLevelString == "debug") { - parsedLogLevel = QNN_LOG_LEVEL_DEBUG; - } - QNN_FUNCTION_EXIT_LOG; - return parsedLogLevel; -} diff --git a/src/backends/qnn/Utils/QnnSampleAppUtils.hpp b/src/backends/qnn/Utils/QnnSampleAppUtils.hpp deleted file mode 100644 index d9f223230..000000000 --- a/src/backends/qnn/Utils/QnnSampleAppUtils.hpp +++ /dev/null @@ -1,69 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "../QNN.hpp" - -namespace qnn { -namespace tools { -namespace sample_app { - -enum class ProfilingLevel { OFF, BASIC, DETAILED, INVALID }; - -using ReadInputListRetType_t = std::tuple>, bool>; - -ReadInputListRetType_t readInputList(std::string inputFileListPath); - -using ReadInputListsRetType_t = std::tuple>>, bool>; - -ReadInputListsRetType_t readInputLists(std::vector inputFileListPath); - -ProfilingLevel parseProfilingLevel(std::string profilingLevelString); - -void parseInputFilePaths(std::vector &inputFilePaths, - std::vector &paths, - std::string separator); - -void split(std::vector &splitString, - const std::string &tokenizedString, - const char separator); - -bool copyMetadataToGraphsInfo(const QnnSystemContext_BinaryInfo_t *binaryInfo, - qnn_wrapper_api::GraphInfo_t **&graphsInfo, - uint32_t &graphsCount); - -bool copyGraphsInfo(const QnnSystemContext_GraphInfo_t *graphsInput, - const uint32_t numGraphs, - qnn_wrapper_api::GraphInfo_t **&graphsInfo); - -bool copyGraphsInfoV1(const QnnSystemContext_GraphInfoV1_t *graphInfoSrc, - qnn_wrapper_api::GraphInfo_t *graphInfoDst); - -bool copyTensorsInfo(const Qnn_Tensor_t *tensorsInfoSrc, - Qnn_Tensor_t *&tensorWrappers, - uint32_t tensorsCount); - -bool deepCopyQnnTensorInfo(Qnn_Tensor_t *dst, const Qnn_Tensor_t *src); - -QnnLog_Level_t parseLogLevel(std::string logLevelString); - -void inline exitWithMessage(std::string &&msg, int code) { - std::cerr << msg << std::endl; - std::exit(code); -} - -} // namespace sample_app -} // namespace tools -} // namespace qnn \ No newline at end of file diff --git a/src/backends/qnn/WrapperUtils/QnnWrapperUtils.cpp b/src/backends/qnn/WrapperUtils/QnnWrapperUtils.cpp deleted file mode 100644 index b70180308..000000000 --- a/src/backends/qnn/WrapperUtils/QnnWrapperUtils.cpp +++ /dev/null @@ -1,198 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#include -#include -#include - -#include "QnnModelPal.hpp" -#include "QnnTypeMacros.hpp" -#include "QnnWrapperUtils.hpp" - -namespace qnn_wrapper_api { -size_t memscpy(void *dst, size_t dstSize, const void *src, size_t copySize) { - if (!dst || !src || !dstSize || !copySize) return 0; - - size_t minSize = dstSize < copySize ? dstSize : copySize; - - memcpy(dst, src, minSize); - - return minSize; -} - -ModelError_t getQnnGraphConfigFromInfo(const char *graphName, - const GraphConfigInfo_t **graphsConfigInfo, - const uint32_t numGraphsConfigInfo, - const QnnGraph_Config_t **&graphConfigs) { - if (!graphsConfigInfo || numGraphsConfigInfo == 0) { - PRINT_DEBUG("getQnnGraphConfigFromInfo() no custom configs passed for graph:%s.\n", graphName); - return MODEL_NO_ERROR; - } - - size_t found = 0; - - for (uint32_t i = 0; i < numGraphsConfigInfo; i++) { - if (!graphsConfigInfo[i]) { - PRINT_ERROR( - "getQnnGraphConfigFromInfo() lookup error while trying to query graphName:%s. " - "numGraphsConfigInfo > num of element in graphsConfigInfo\n", - graphName); - return MODEL_INVALID_ARGUMENT_ERROR; - } - if (strcmp(graphsConfigInfo[i]->graphName, graphName) == 0) { - graphConfigs = graphsConfigInfo[i]->graphConfigs; - found++; - } - } - - if (!found) { - PRINT_ERROR( - "getQnnGraphConfigFromInfo() unable to find graphName:%s in provided " - "graphsConfigInfo object.\n", - graphName); - return MODEL_INVALID_ARGUMENT_ERROR; - } else if (found > 1) { - PRINT_ERROR( - "getQnnGraphConfigFromInfo() duplicate GraphConfigInfo entries found with " - "graphName:%s.\n", - graphName); - return MODEL_INVALID_ARGUMENT_ERROR; - } else { - return MODEL_NO_ERROR; - } -} - -ModelError_t deepCopyQnnTensors(Qnn_Tensor_t &src, Qnn_Tensor_t &dst) { - ModelError_t err; - VALIDATE_TENSOR_VERSION(src, err); - - dst.version = src.version; - QNN_TENSOR_SET_NAME( - dst, strnDup(QNN_TENSOR_GET_NAME(src), std::string(QNN_TENSOR_GET_NAME(src)).size())); - if (QNN_TENSOR_GET_NAME(dst) == nullptr) { - return MODEL_TENSOR_ERROR; - } - QNN_TENSOR_SET_ID(dst, QNN_TENSOR_GET_ID(src)); - QNN_TENSOR_SET_TYPE(dst, QNN_TENSOR_GET_TYPE(src)); - QNN_TENSOR_SET_DATA_FORMAT(dst, QNN_TENSOR_GET_DATA_FORMAT(src)); - QNN_TENSOR_SET_DATA_TYPE(dst, QNN_TENSOR_GET_DATA_TYPE(src)); - QNN_TENSOR_SET_MEM_TYPE(dst, QNN_TENSOR_GET_MEM_TYPE(src)); - - // Only metadata (i.e. non-static data) is copied from source to destination. The union still - // must be initialized so that the clientBuf/memHandle do not contain garbage data - if (QNN_TENSOR_GET_MEM_TYPE(src) == QNN_TENSORMEMTYPE_RAW) { - Qnn_ClientBuffer_t clientBuf = {nullptr, 0}; - QNN_TENSOR_SET_CLIENT_BUF(dst, clientBuf); - } else if (QNN_TENSOR_GET_MEM_TYPE(src) == QNN_TENSORMEMTYPE_MEMHANDLE) { - QNN_TENSOR_SET_MEM_HANDLE(dst, nullptr); - } else { - return MODEL_TENSOR_ERROR; - } - - Qnn_QuantizeParams_t srcQParam = QNN_TENSOR_GET_QUANT_PARAMS(src); - Qnn_QuantizationEncoding_t encoding = srcQParam.quantizationEncoding; - if (encoding == QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET) { - // need to allocate and copy memory for scaleOffset as it is a pointer array - Qnn_QuantizeParams_t srcQParamCpy = srcQParam; - Qnn_AxisScaleOffset_t &axisScaleOffset = srcQParamCpy.axisScaleOffsetEncoding; - Qnn_ScaleOffset_t **scaleOffset = &axisScaleOffset.scaleOffset; - size_t scaleOffsetSize = axisScaleOffset.numScaleOffsets * sizeof(Qnn_ScaleOffset_t); - *scaleOffset = (Qnn_ScaleOffset_t *)malloc(scaleOffsetSize); - memscpy(*scaleOffset, - scaleOffsetSize, - srcQParam.axisScaleOffsetEncoding.scaleOffset, - scaleOffsetSize); - QNN_TENSOR_SET_QUANT_PARAMS(dst, srcQParamCpy); - } else if (encoding == QNN_QUANTIZATION_ENCODING_BW_AXIS_SCALE_OFFSET) { - // need to allocate and copy memory for scaleOffset as it is a pointer array - Qnn_QuantizeParams_t srcQParamCpy = srcQParam; - Qnn_BwAxisScaleOffset_t &bwAxisScaleOffset = srcQParamCpy.bwAxisScaleOffsetEncoding; - size_t scaleSize = bwAxisScaleOffset.numElements * sizeof(float); - float **scales = &bwAxisScaleOffset.scales; - int32_t **offsets = &bwAxisScaleOffset.offsets; - *scales = (float *)malloc(scaleSize); - memscpy(*scales, scaleSize, srcQParam.bwAxisScaleOffsetEncoding.scales, scaleSize); - - // Only copy offsets if present, nullptr implies all offsets are 0 - if (bwAxisScaleOffset.offsets != nullptr) { - size_t offsetSize = bwAxisScaleOffset.numElements * sizeof(int32_t); - *offsets = (int32_t *)malloc(offsetSize); - memscpy(*offsets, offsetSize, srcQParam.bwAxisScaleOffsetEncoding.offsets, offsetSize); - } - QNN_TENSOR_SET_QUANT_PARAMS(dst, srcQParamCpy); - } else { - QNN_TENSOR_SET_QUANT_PARAMS(dst, srcQParam); - } - - // need to allocate and copy memory for all the pointer members - uint32_t rank = QNN_TENSOR_GET_RANK(src); - QNN_TENSOR_SET_RANK(dst, rank); - size_t dimSize = rank * sizeof(uint32_t); - uint32_t *dimensions = (uint32_t *)malloc(dimSize); - if (dimensions == nullptr) { - PRINT_ERROR("deepCopyQnnTensors() Allocation error while copying tensor %s", - QNN_TENSOR_GET_NAME(src)); - return MODEL_TENSOR_ERROR; - } - memscpy(dimensions, dimSize, QNN_TENSOR_GET_DIMENSIONS(src), dimSize); - QNN_TENSOR_SET_DIMENSIONS(dst, dimensions); - - return err; -} - -ModelError_t freeQnnTensor(Qnn_Tensor_t &tensor) { - ModelError_t err; - VALIDATE_TENSOR_VERSION(tensor, err); - - // free all pointer allocations in struct - free((void *)QNN_TENSOR_GET_NAME(tensor)); - free(QNN_TENSOR_GET_DIMENSIONS(tensor)); - - return MODEL_NO_ERROR; -} - -ModelError_t freeQnnTensors(Qnn_Tensor_t *&tensors, uint32_t numTensors) { - // free all pointer allocations in struct - for (size_t i = 0; i < numTensors; i++) { - freeQnnTensor(tensors[i]); - } - free(tensors); - - return MODEL_NO_ERROR; -} - -std::string getModelErrorName(ModelError_t modelError) { - switch (modelError) { - case MODEL_NO_ERROR: - return "MODEL_NO_ERROR"; - case MODEL_TENSOR_ERROR: - return "MODEL_TENSOR_ERROR"; - case MODEL_PARAMS_ERROR: - return "MODEL_PARAMS_ERROR"; - case MODEL_NODES_ERROR: - return "MODEL_NODES_ERROR"; - case MODEL_GRAPH_ERROR: - return "MODEL_GRAPH_ERROR"; - case MODEL_CONTEXT_ERROR: - return "MODEL_CONTEXT_ERROR"; - case MODEL_GENERATION_ERROR: - return "MODEL_GENERATION_ERROR"; - case MODEL_SETUP_ERROR: - return "MODEL_SETUP_ERROR"; - case MODEL_UNKNOWN_ERROR: - return "MODEL_UNKNOWN_ERROR"; - case MODEL_INVALID_ARGUMENT_ERROR: - return "MODEL_INVALID_ARGUMENT_ERROR"; - case MODEL_FILE_ERROR: - return "MODEL_FILE_ERROR"; - default: - return "INVALID_ERROR_CODE"; - } -} - -} // namespace qnn_wrapper_api diff --git a/src/backends/qnn/WrapperUtils/QnnWrapperUtils.hpp b/src/backends/qnn/WrapperUtils/QnnWrapperUtils.hpp deleted file mode 100644 index a51e3f0e8..000000000 --- a/src/backends/qnn/WrapperUtils/QnnWrapperUtils.hpp +++ /dev/null @@ -1,200 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2019-2022 Qualcomm Technologies, Inc. -// All Rights Reserved. -// Confidential and Proprietary - Qualcomm Technologies, Inc. -// -//============================================================================== - -#pragma once - -#include "QnnContext.h" -#include "QnnGraph.h" -#include "QnnTensor.h" -#include "QnnTypes.h" -#include - -namespace qnn_wrapper_api { - -// macro utils - -// Enables FILE[LINE]: FMT for VALIDATE macro -#ifdef QNN_ENABLE_DEBUG - -#define PRINTF(fmt, ...) \ - do { \ - printf("%s[%d]: ", __FILE__, __LINE__); \ - printf((fmt), ##__VA_ARGS__); \ - } while (0) - -#else - -#define PRINTF(fmt, ...) \ - do { \ - printf((fmt), ##__VA_ARGS__); \ - } while (0) - -#endif - -#ifdef QNN_ENABLE_DEBUG -#define PRINT_DEBUG(fmt, ...) \ - do { \ - printf("[ DEBUG ] "); \ - PRINTF((fmt), ##__VA_ARGS__); \ - } while (0) -#else -#define PRINT_DEBUG(fmt, ...) -#endif - -// Enables ERROR tag for errors -#define PRINT_ERROR(fmt, ...) \ - do { \ - printf("[ ERROR ] "); \ - PRINTF((fmt), ##__VA_ARGS__); \ - } while (0) - -// Enables WARNING tag for errors -#define PRINT_WARNING(fmt, ...) \ - do { \ - printf("[ WARNING ] "); \ - PRINTF((fmt), ##__VA_ARGS__); \ - } while (0) - -// Enables INFO tag for errors -#define PRINT_INFO(fmt, ...) \ - do { \ - printf("[ INFO ] "); \ - PRINTF((fmt), ##__VA_ARGS__); \ - } while (0) - -#define STRINGFY(str) str -#define STRINGFYVALUE(str) STRINGFY(str) - -// Ensures ModelError_t returning functions return MODEL_NO_ERROR -// retStatus should be set to MODEL_NO_ERROR before passing to macro -#define VALIDATE(value, retStatus) \ - do { \ - retStatus = value; \ - if (retStatus != qnn_wrapper_api::MODEL_NO_ERROR) { \ - PRINT_ERROR( \ - "%s expected MODEL_NO_ERROR, got %s\n", #value, getModelErrorName(retStatus).c_str()); \ - return retStatus; \ - } \ - } while (0) - -// macros for retrieving binary data -#define BINVARSTART(NAME) \ - ({ \ - extern const uint8_t _binary_obj_binary_##NAME##_raw_start[]; \ - (void *)_binary_obj_binary_##NAME##_raw_start; \ - }) -#define BINVAREND(NAME) \ - ({ \ - extern const uint8_t _binary_obj_binary_##NAME##_raw_end[]; \ - (void *)_binary_obj_binary_##NAME##_raw_end; \ - }) -#define BINLEN(NAME) \ - ({ \ - extern const uint8_t _binary_obj_binary_##NAME##_raw_start[]; \ - extern const uint8_t _binary_obj_binary_##NAME##_raw_end[]; \ - (uint32_t)((_binary_obj_binary_##NAME##_raw_end) - (_binary_obj_binary_##NAME##_raw_start)); \ - }) - -typedef enum ModelError { - MODEL_NO_ERROR = 0, - MODEL_TENSOR_ERROR = 1, - MODEL_PARAMS_ERROR = 2, - MODEL_NODES_ERROR = 3, - MODEL_GRAPH_ERROR = 4, - MODEL_CONTEXT_ERROR = 5, - MODEL_GENERATION_ERROR = 6, - MODEL_SETUP_ERROR = 7, - MODEL_INVALID_ARGUMENT_ERROR = 8, - MODEL_FILE_ERROR = 9, - MODEL_MEMORY_ALLOCATE_ERROR = 10, - // Value selected to ensure 32 bits. - MODEL_UNKNOWN_ERROR = 0x7FFFFFFF -} ModelError_t; - -/** - * @brief Returns the error message associated with a given error code - * - * @param[in] modelError ModelError_t error code - * - * @return string message - */ -std::string getModelErrorName(ModelError_t modelError); - -typedef struct GraphInfo { - Qnn_GraphHandle_t graph; - char *graphName; - Qnn_Tensor_t *inputTensors; - uint32_t numInputTensors; - Qnn_Tensor_t *outputTensors; - uint32_t numOutputTensors; -} GraphInfo_t; -typedef GraphInfo_t *GraphInfoPtr_t; - -typedef struct GraphConfigInfo { - char *graphName; - const QnnGraph_Config_t **graphConfigs; -} GraphConfigInfo_t; - -/** - * @brief Helper function to get Qnn GraphConfig structure from provided GraphConfigInfo using - * graphName. - * - * @param[in] graphName the Qnn graphName to use for lookup - * - * @param[in] graphsConfigInfo array of GraphConfig_t objects - * - * @param[in] numGraphsConfigInfo the number of array elements in graphConfigInfo - * - * @param[out] graphConfigs the result of query of graphName from graphsConfigInfo if successful. - * - * @return Error code - * - */ -ModelError_t getQnnGraphConfigFromInfo(const char *graphName, - const GraphConfigInfo_t **graphsConfigInfo, - const uint32_t numGraphsConfigInfo, - const QnnGraph_Config_t **&graphConfigs); - -/** - * @brief Deep Copies QnnTensor_t structs to a pointer array destination location. - * Note: The copy will be stored on the heap and as such requires caller to make - * appropriate free call(s) using function below. - * Note 2: deepCopy is only done for metadata - * - * @param[in] source tensor object to copy from - * - * @param[in] destination tensor object to copy to - * - * @return Error code - */ -ModelError_t deepCopyQnnTensors(Qnn_Tensor_t &source, Qnn_Tensor_t &destination); - -/** - * @brief Frees all memory allocated tensor attributes. - * - * @param[in] tensor Qnn_Tensor_t object to free - * - * @return Error code - */ -ModelError_t freeQnnTensor(Qnn_Tensor_t &tensor); - -/** - * @brief Loops through and frees all memory allocated tensor attributes for each tensor - * object. - * - * @param[in] tensors array of tensor objects to free - * - * @param[in] numTensors length of the above tensors array - * - * @return Error code - */ -ModelError_t freeQnnTensors(Qnn_Tensor_t *&tensors, uint32_t numTensors); - -size_t memscpy(void *dst, size_t dstSize, const void *src, size_t copySize); - -} // namespace qnn_wrapper_api diff --git a/src/backends/qnn/op/QNNLinearINT8.cpp b/src/backends/qnn/op/QNNLinearINT8.cpp deleted file mode 100755 index a1510a943..000000000 --- a/src/backends/qnn/op/QNNLinearINT8.cpp +++ /dev/null @@ -1,268 +0,0 @@ - -#include "QNNLinearINT8.hpp" -#include "QnnTypes.h" -#include "Types.hpp" -#include "QNNCommonOp.hpp" -#include -#include - -namespace mllm { -QNNLinearINT8::QNNLinearINT8(Backend *bn, string opName, int in_features, int out_features, bool bias) : - QNNCommonOp(bn, opName), in_features_(in_features), out_features_(out_features), support_bias_(bias) { - weight_.setBackend(bn); - bias_.setBackend(bn); - - weightScale_.setBackend(bn); - biasScale_.setBackend(bn); - outputScale_.setBackend(bn); - inputScale_.setBackend(bn); -} - -ErrorCode QNNLinearINT8::reshape(vector> inputs, vector> outputs) { - assert(inputs.size() == 1); - assert(outputs.size() == 1); - // N | C | H | W - // ----------------------------------------------- - // 1 |out_channel | in_channel | 1 - // |out_features| in_features | - // ----------------------------------------------- - // batch |in_channel | seq_len | 1 - // |in_features | inputs[0]->sequence() | - // ----------------------------------------------- - // batch |out_channel | seq_len | 1 - // |out_features| inputs[0]->sequence() | - assert(inputs[0]->head() == 1); - assert(in_features_ == inputs[0]->dimension()); - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), out_features_); - return Op::reshape(inputs, outputs); -} - -ErrorCode QNNLinearINT8::setUp(vector> inputs, vector> outputs) { - outputs[0]->setDtype(MLLM_TYPE_I8); - // add matmul param to qnn - vector paramsMatmul = { - {.paramType = QNN_PARAMTYPE_SCALAR, - .name = "transpose_in0", - .scalarParam = (Qnn_Scalar_t){QNN_DATATYPE_BOOL_8, {.bool8Value = 0}}}, - {.paramType = QNN_PARAMTYPE_SCALAR, - .name = "transpose_in1", - .scalarParam = (Qnn_Scalar_t){QNN_DATATYPE_BOOL_8, {.bool8Value = 1}}}}; - - uint32_t dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_dilation[] = {2}; - uint32_t InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_dilation[] = {1, 1}; - uint32_t dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount[] = {2, 2}; - uint32_t InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount[] = {0, 0, 0, 0}; - uint32_t dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride[] = {2}; - uint32_t InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride[] = {1, 1}; - - string strideName = name() + ".stride"; - string padName = name() + ".pad"; - vector params_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D = { - {.paramType = QNN_PARAMTYPE_TENSOR, - .name = "stride", - .tensorParam = - (Qnn_Tensor_t){ - .version = QNN_TENSOR_VERSION_1, - .v1 = {.id = 0, - .name = strideName.c_str(), - .type = QNN_TENSOR_TYPE_STATIC, - .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, - .dataType = QNN_DATATYPE_UINT_32, - .quantizeParams = {QNN_DEFINITION_UNDEFINED, - QNN_QUANTIZATION_ENCODING_UNDEFINED, - {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, - .offset = 0}}}, - .rank = 1, - .dimensions = dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride, - .memType = QNN_TENSORMEMTYPE_RAW, - .clientBuf = - {.data = (uint8_t *)InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_stride, - .dataSize = 8}}}}, - {.paramType = QNN_PARAMTYPE_TENSOR, - .name = "pad_amount", - .tensorParam = - (Qnn_Tensor_t){ - .version = QNN_TENSOR_VERSION_1, - .v1 = {.id = 0, - .name = padName.c_str(), - .type = QNN_TENSOR_TYPE_STATIC, - .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, - .dataType = QNN_DATATYPE_UINT_32, - .quantizeParams = {QNN_DEFINITION_UNDEFINED, - QNN_QUANTIZATION_ENCODING_UNDEFINED, - {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, - .offset = 0}}}, - .rank = 2, - .dimensions = - dimensions_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount, - .memType = QNN_TENSORMEMTYPE_RAW, - .clientBuf = - {.data = (uint8_t *) - InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D_pad_amount, - .dataSize = 16}}}}, - - }; - - // add weight tensor to qnn - uint32_t dimensionsWeight[4] = {1, 1, static_cast(weight_.sequence()), static_cast(weight_.dimension())}; - - auto qnnQuantDefined = QNN_DEFINITION_UNDEFINED; - float weightScale = 0; - - qnnQuantDefined = QNN_DEFINITION_DEFINED; - weightScale = weightScale_.hostPtr()[0]; - - qnnBackend_->modelAddTensor(weight_.name(), (Qnn_Tensor_t){ - .version = QNN_TENSOR_VERSION_1, - .v1 = { - .id = 0, - .name = weight_.name().c_str(), - .type = QNN_TENSOR_TYPE_STATIC, - .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, - .dataType = QNN_DATATYPE_SFIXED_POINT_8, - .quantizeParams = {qnnQuantDefined, - QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, - {.scaleOffsetEncoding = {.scale = weightScale, .offset = 0}}}, - .rank = 4, - .dimensions = dimensionsWeight, - .memType = QNN_TENSORMEMTYPE_RAW, - .clientBuf = {.data = weight_.hostPtr(), - .dataSize = (uint32_t)weight_.cntSize()}}}); - // free weight host memory - weight_.free(); - - // dimensions of matmul output and bias - uint32_t dimensionsOutput[4] = {static_cast(outputs[0]->batch()), - static_cast(outputs[0]->sequence()), - static_cast(outputs[0]->head()), - static_cast(outputs[0]->dimension())}; - - auto outName = outputs[0]->name(); - - // if don't support bias, just dequantize and write to tensor with name of outputs[0] - if (!support_bias_) { - float outputScale = 0; - outputScale = outputScale_.hostPtr()[0] / 127.0; - outputScale = roundf(outputScale * 100000) / 100000; - - vector matmulOut = {{QNN_TENSOR_VERSION_1, - {.v1 = { - .id = 0, - .name = outName.c_str(), - .type = getOutputTensorType(outputs[0]), - .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, - .dataType = QNN_DATATYPE_SFIXED_POINT_8, - .quantizeParams = {QNN_DEFINITION_DEFINED, - QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, - {.scaleOffsetEncoding = {.scale = outputScale, .offset = 0}}}, - .rank = 4, - .dimensions = dimensionsOutput, - .memType = QNN_TENSORMEMTYPE_RAW, - .clientBuf = {.data = nullptr, - .dataSize = 0}}}}}; - return graphAddNode(name() + ".linearint8", "Conv2d", {inputs[0]->name(), weight_.name()}, matmulOut, params_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D); - } - - // add bias tensor to qnn - uint32_t dimensionsBias[1] = {(uint32_t)out_features_}; - float biasScale = 0; - - qnnQuantDefined = QNN_DEFINITION_DEFINED; - biasScale = biasScale_.hostPtr()[0]; - - qnnBackend_->modelAddTensor(bias_.name(), (Qnn_Tensor_t){ - .version = QNN_TENSOR_VERSION_1, - .v1 = { - .id = 0, - .name = bias_.name().c_str(), - .type = QNN_TENSOR_TYPE_STATIC, - .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, - .dataType = QNN_DATATYPE_UFIXED_POINT_8, - .quantizeParams = {qnnQuantDefined, - QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, - {.scaleOffsetEncoding = {.scale = biasScale, .offset = -128}}}, - .rank = 1, - .dimensions = dimensionsBias, - .memType = QNN_TENSORMEMTYPE_RAW, - .clientBuf = {.data = bias_.hostPtr(), - .dataSize = (uint32_t)bias_.cntSize()}}}); - // free bias host memory - bias_.free(); - - float outputScale = 0; - outputScale = outputScale_.hostPtr()[0] / 127.0; - outputScale = roundf(outputScale * 100000) / 100000; - - // final output - vector biasOutput = {{QNN_TENSOR_VERSION_1, - {.v1 = { - .id = 0, - .name = outName.c_str(), - .type = getOutputTensorType(outputs[0]), - .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, - .dataType = QNN_DATATYPE_SFIXED_POINT_8, - .quantizeParams = {QNN_DEFINITION_DEFINED, - QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, - {.scaleOffsetEncoding = {.scale = outputScale, .offset = 0}}}, - .rank = 4, - .dimensions = dimensionsOutput, - .memType = QNN_TENSORMEMTYPE_RAW, - .clientBuf = {.data = nullptr, - .dataSize = 0}}}}}; - return graphAddNode(name() + ".linearint8", "Conv2d", {inputs[0]->name(), weight_.name(), bias_.name()}, biasOutput, params_InceptionV3_InceptionV3_Conv2d_1a_3x3_Conv2D); -} - -ErrorCode QNNLinearINT8::load(AbstructLoader &loader) { - weight_.setName(name() + ".weight"); - weight_.reshape(1, 1, in_features_, out_features_); - weight_.setDtype(MLLM_TYPE_I8); - weight_.alloc(); - loader.load(&weight_); - - bias_.setName(name() + ".bias"); - bias_.reshape(1, 1, 1, out_features_); - bias_.setDtype(MLLM_TYPE_I8); - bias_.alloc(); - if (support_bias_) { - loader.load(&bias_); - // sign to unsign - for (int i = 0; i < out_features_; i++) { - int32_t val = bias_.dataAt(0, 0, 0, i); - val += 128; - bias_.setDataAt(0, 0, 0, i, (uint8_t)val); - } - } else { - memset(bias_.hostPtr(), 0, bias_.cntSize()); - } - - weightScale_.setName(name() + ".weight.scale"); - weightScale_.reshape(1, 1, 1, 1); - weightScale_.setDtype(MLLM_TYPE_F32); - weightScale_.alloc(); - loader.load(&weightScale_); - - biasScale_.setName(name() + ".bias.scale"); - biasScale_.reshape(1, 1, 1, 1); - biasScale_.setDtype(MLLM_TYPE_F32); - biasScale_.alloc(); - loader.load(&biasScale_); - - outputScale_.setName(name() + ".output_scale"); - outputScale_.reshape(1, 1, 1, 1); - outputScale_.setDtype(MLLM_TYPE_F32); - outputScale_.alloc(); - loader.load(&outputScale_); - - inputScale_.setName(name() + ".input_scale"); - inputScale_.reshape(1, 1, 1, 1); - inputScale_.setDtype(MLLM_TYPE_F32); - inputScale_.alloc(); - loader.load(&inputScale_); - - return Op::load(loader); -} - -ErrorCode QNNLinearINT8::free(vector> inputs, vector> outputs) { - return Op::free(inputs, outputs); -} -} // namespace mllm diff --git a/src/backends/qnn/op/QNNQuantize.cpp b/src/backends/qnn/op/QNNQuantize.cpp deleted file mode 100644 index cbf4937e8..000000000 --- a/src/backends/qnn/op/QNNQuantize.cpp +++ /dev/null @@ -1,101 +0,0 @@ - -#include "QNNQuantize.hpp" -#include "QnnTypes.h" -#include "Types.hpp" -#include "QNNCommonOp.hpp" -#include -#include - -namespace mllm { -QNNQuantize::QNNQuantize(Backend *bn, string opName, bool isNSHD) : - QNNCommonOp(bn, opName) { - isNSHD_ = isNSHD; - scale_.setBackend(bn); -} - -ErrorCode QNNQuantize::reshape(vector> inputs, vector> outputs) { - assert(outputs.size() == 1); - outputs[0]->reshape(inputs[0]->batch(), inputs[0]->head(), inputs[0]->sequence(), inputs[0]->dimension()); - return Op::reshape(inputs, outputs); -} - -ErrorCode QNNQuantize::setUp(vector> inputs, vector> outputs) { - outputs[0]->setDtype(MLLM_TYPE_I8); - auto outName = outputs[0]->name(); - - uint32_t dimensionsOutput[4]; - - if (isNSHD_) { - dimensionsOutput[0] = static_cast(outputs[0]->batch()); - dimensionsOutput[1] = static_cast(outputs[0]->sequence()); - dimensionsOutput[2] = static_cast(outputs[0]->head()); - dimensionsOutput[3] = static_cast(outputs[0]->dimension()); - } else { - dimensionsOutput[0] = static_cast(outputs[0]->batch()); - dimensionsOutput[1] = static_cast(outputs[0]->head()); - dimensionsOutput[2] = static_cast(outputs[0]->sequence()); - dimensionsOutput[3] = static_cast(outputs[0]->dimension()); - } - - float quantScale = 0; - quantScale = scale_.hostPtr()[0] / 127.0; - quantScale = roundf(quantScale * 100000) / 100000; - - uint32_t paramsQuantizeDimension[1] = {1}; - auto paramsQuantizeName = name() + "quantize_params"; - vector paramsQuantize = { - {.paramType = QNN_PARAMTYPE_TENSOR, - .name = "scale", - .tensorParam = - (Qnn_Tensor_t){.version = QNN_TENSOR_VERSION_1, - .v1 = { - .id = 0, - .name = paramsQuantizeName.c_str(), - .type = QNN_TENSOR_TYPE_STATIC, - .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, - .dataType = QNN_DATATYPE_FLOAT_32, - .quantizeParams = {QNN_DEFINITION_UNDEFINED, - QNN_QUANTIZATION_ENCODING_UNDEFINED, - {.scaleOffsetEncoding = {.scale = 0.0000000000000000f, - .offset = 0}}}, - .rank = 1, - .dimensions = paramsQuantizeDimension, - .memType = QNN_TENSORMEMTYPE_RAW, - .clientBuf = {.data = (uint8_t *)&quantScale, - .dataSize = sizeof(float)}}}}}; - - vector outputTensor = {{QNN_TENSOR_VERSION_1, - {.v1 = { - .id = 0, - .name = outName.c_str(), - .type = getOutputTensorType(outputs[0]), - .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER, - .dataType = QNN_DATATYPE_SFIXED_POINT_8, - .quantizeParams = {QNN_DEFINITION_DEFINED, - QNN_QUANTIZATION_ENCODING_SCALE_OFFSET, - {.scaleOffsetEncoding = {.scale = quantScale, .offset = 0}}}, - .rank = 4, - .dimensions = dimensionsOutput, - .memType = QNN_TENSORMEMTYPE_RAW, - .clientBuf = {.data = nullptr, - .dataSize = 0}}}}}; - return graphAddNode(name(), "LLaMAQuantize", {inputs[0]->name()}, outputTensor, paramsQuantize, "LLaMAPackage"); -} -ErrorCode QNNQuantize::load(AbstructLoader &loader) { - string scaleName = name(); - - std::string wordToRemove = "quantize"; - int pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - } - - scale_.setName(scaleName + "input_scale"); - scale_.reshape(1, 1, 1, 1); - scale_.setDtype(MLLM_TYPE_F32); - scale_.alloc(); - loader.load(&scale_); - - return Op::load(loader); -} -} // namespace mllm diff --git a/src/backends/qnn/op/QNNView.cpp b/src/backends/qnn/op/QNNView.cpp deleted file mode 100644 index e383ce226..000000000 --- a/src/backends/qnn/op/QNNView.cpp +++ /dev/null @@ -1,205 +0,0 @@ - -#include "QNNView.hpp" -#include "Types.hpp" -#include "QNNCommonOp.hpp" -#include - -namespace mllm { -QNNView::QNNView(Backend *bn, string opName, vector dims, vector data_dims) : - QNNCommonOp(bn, opName) { - dim0_ = dims[0]; - dim1_ = dims[1]; - dim2_ = dims[2]; - dim3_ = dims[3]; - data_dim0_ = data_dims[0]; - data_dim1_ = data_dims[1]; - data_dim2_ = data_dims[2]; - data_dim3_ = data_dims[3]; - - scale_.setBackend(bn); -} - -ErrorCode QNNView::reshape(vector> inputs, vector> outputs) { - int dim0 = inputs[0]->batch(); - int dim1 = inputs[0]->sequence(); - int dim2 = inputs[0]->head(); - int dim3 = inputs[0]->dimension(); - - if (data_dim0_ == BATCH && data_dim1_ == DIMENSION && data_dim2_ == SEQUENCE && data_dim3_ == DIMENSION) { - dim1 = dim1_; - dim2 = inputs[0]->sequence(); - dim3 = inputs[0]->dimension() / dim1_; - } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == SEQUENCE && data_dim3_ == HEAD + DIMENSION) { - dim2 = dim1; - dim1 = 1; - dim3 = inputs[0]->dimension() * inputs[0]->head(); - } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == SEQUENCE + HEAD && data_dim3_ == DIMENSION) { - dim1 = 1; - dim2 = inputs[0]->sequence() * inputs[0]->head(); - } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == CHANNLE && data_dim3_ == TIME + HEIGHT + WIDTH) { - // assert(inputs[0]->ctype() == BCTHW); - dim1 = 1; - dim2 = inputs[0]->channel(); - dim3 = inputs[0]->time() * inputs[0]->height() * inputs[0]->width(); - } else if (data_dim0_ == BATCH && data_dim1_ == -1 && data_dim2_ == TIME + HEIGHT + WIDTH && data_dim3_ == CHANNLE) { - if (inputs[0]->ctype() == BTHWC) { - dim1 = 1; - dim2 = inputs[0]->time() * inputs[0]->height() * inputs[0]->width(); - dim3 = inputs[0]->channel(); - } else { - dim1 = 1; - dim2 = inputs[0]->time() * inputs[0]->height() * inputs[0]->channel(); - dim3 = inputs[0]->width(); - } - } else if (data_dim0_ == SEQUENCE && data_dim1_ == HEAD && data_dim2_ == BATCH && data_dim3_ == DIMENSION) { - dim0 = inputs[0]->sequence(); - dim1 = inputs[0]->head(); - dim2 = inputs[0]->batch(); - dim3 = inputs[0]->dimension(); - } else if (data_dim0_ == BATCH && data_dim1_ == HEAD && data_dim2_ == BATCH && data_dim3_ == DIMENSION) { - dim0 = inputs[0]->batch() / dim2_; - dim1 = inputs[0]->head(); - dim2 = dim2_; - dim3 = inputs[0]->dimension(); - } else if (data_dim0_ == BATCH && data_dim1_ == SEQUENCE && data_dim2_ == SEQUENCE && data_dim3_ == DIMENSION) { - dim0 = inputs[0]->batch(); - dim1 = dim1_; - dim2 = dim1_; - dim3 = inputs[0]->dimension(); - } else if (data_dim0_ == BATCH && data_dim1_ == HEAD && data_dim2_ == SEQUENCE && data_dim3_ == DIMENSION) { - dim0 = dim0_; - dim1 = dim1_; - dim2 = dim2_; - dim3 = dim3_; - } else { - std::cout << "QNNView not support!!!!" << std::endl; - } - outputs[0]->reshape(dim0, dim1, dim2, dim3); - - return Op::reshape(inputs, outputs); -} - -ErrorCode QNNView::setUp(vector> inputs, vector> outputs) { - outputs[0]->setDtype(inputs[0]->dtype()); - - if (outputs[0]->dtype() == MLLM_TYPE_I8) - return graphAddNode(name(), "Reshape", inputs, outputs, {}, "qti.aisw", true, &scale_); - else { - return graphAddNode(name(), "Reshape", inputs, outputs, {}, "qti.aisw", true, nullptr); - } -} - -ErrorCode QNNView::load(AbstructLoader &loader) { - string scaleName = name(); - - std::string wordSplit = ".or_split"; - - int spos = scaleName.find(wordSplit); - if (spos != -1) { - // KVMerge - scaleName.erase(spos, wordSplit.length()); - - string scale_type_name = ".input_scale"; - std::string split_variable; - std::string wordToRemove = "-00_view_"; - - int pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - split_variable = ".o_proj"; - } - - wordToRemove = "-01_view_"; - pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - split_variable = ".q_proj"; - } - - scale_.setName(scaleName + split_variable + scale_type_name); - scale_.reshape(1, 1, 1, 1); - scale_.setDtype(MLLM_TYPE_F32); - scale_.alloc(); - loader.load(&scale_); - - } else if (scaleName.find(".ires_split") != -1) { - spos = scaleName.find(".ires_split"); - - wordSplit = ".ires_split"; - scaleName.erase(spos, wordSplit.length()); - - string scale_type_name = ".input_scale"; - std::string split_variable; - std::string wordToRemove = "-00_view_"; - - int pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - split_variable = ".q_proj"; - } - - scale_.setName(scaleName + split_variable + scale_type_name); - scale_.reshape(1, 1, 1, 1); - scale_.setDtype(MLLM_TYPE_F32); - scale_.alloc(); - loader.load(&scale_); - - } else if (scaleName.find(".fres_split") != -1) { - spos = scaleName.find(".fres_split"); - - wordSplit = ".fres_split"; - scaleName.erase(spos, wordSplit.length()); - - string scale_type_name = ".input_scale"; - std::string split_variable; - std::string wordToRemove = "-00_view_"; - - int pos = scaleName.find(wordToRemove); - while (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - split_variable = ".up_proj"; - pos = scaleName.find(wordToRemove); - } - - scale_.setName(scaleName + split_variable + scale_type_name); - scale_.reshape(1, 1, 1, 1); - scale_.setDtype(MLLM_TYPE_F32); - scale_.alloc(); - loader.load(&scale_); - - } else { - // common view - std::string wordToRemove = "-00_view_"; - int pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - } - - string scale_type_name = ".output_scale"; - - wordToRemove = ".quantize"; - pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - scale_type_name = ".input_scale"; - } - - wordToRemove = ".post_attention_layernorm"; - pos = scaleName.find(wordToRemove); - if (pos != -1) { - scaleName.erase(pos, wordToRemove.length()); - scale_type_name = ".mlp.up_proj.input_scale"; - } - - scale_.setName(scaleName + scale_type_name); - scale_.reshape(1, 1, 1, 1); - scale_.setDtype(MLLM_TYPE_F32); - scale_.alloc(); - loader.load(&scale_); - } - - return Op::load(loader); -} - - -} // namespace mllm diff --git a/src/backends/xnnpack/XnnpackBackend.cpp b/src/backends/xnnpack/XnnpackBackend.cpp deleted file mode 100644 index c42514b1b..000000000 --- a/src/backends/xnnpack/XnnpackBackend.cpp +++ /dev/null @@ -1,677 +0,0 @@ -#include "backends/xnnpack/XnnpackBackend.hpp" -#include "Backend.hpp" -#include "OpDefined.hpp" -#include "backends/xnnpack/Utils/Logger.hpp" -#include "pthreadpool.h" -#include "xnnpack.h" -#include "backends/xnnpack/Functions/XpBinaryFunc.hpp" -#include "backends/xnnpack/Ops/XpBinary.hpp" -#include "backends/xnnpack/XpMemoryManager.hpp" -#include "backends/xnnpack/Ops/XpDirect.hpp" -#include "backends/xnnpack/Ops/XpDispatch.hpp" -#include "backends/xnnpack/Ops/XpLinear.hpp" -#include "backends/xnnpack/Ops/XpMatmul.hpp" -#include "backends/xnnpack/Ops/XpRoPE.hpp" -#include "backends/xnnpack/Ops/XpSubGraphStart.hpp" -#include "backends/xnnpack/Ops/XpSubGraphFinalize.hpp" -#include "backends/xnnpack/Ops/XpD2H.hpp" -#include "backends/xnnpack/Ops/XpReLU.hpp" -#include "backends/xnnpack/Ops/XpSoftmax.hpp" -#include "backends/xnnpack/Ops/XpGeLU.hpp" -#include "backends/xnnpack/Ops/XpSiLU.hpp" -#include "backends/xnnpack/Ops/XpTranspose.hpp" -#include "backends/xnnpack/Functions/XpTransposeFunc.hpp" -#include "backends/xnnpack/Ops/XpRMSNorm.hpp" -#include "backends/xnnpack/Ops/XpKVCache.hpp" -#include "backends/xnnpack/Ops/XpCausalMask.hpp" -#include "backends/xnnpack/Ops/XpSDPA.hpp" -#include "backends/xnnpack/Functions/XpViewFunc.hpp" -#include "backends/xnnpack/Functions/XpMatmulFunc.hpp" -#include "backends/xnnpack/Ops/XpEmbedding.hpp" -#include "backends/xnnpack/Ops/XpParameter.hpp" -#include "xnnpack/allocator.h" -#include "xnnpack/memory.h" -#include "xnnpack/subgraph.h" -#include -#include - -namespace mllm { - -class XpBackendCreator : public BackendCreator { - Backend *create(BackendConfig config) override { - // initialize xnnpack - if (xnn_initialize(nullptr /* allocator */) != xnn_status_success) { - ::mllm::xnnpack::Log::error("failed to initialize XNNPACK"); - return nullptr; - } - - auto mm = std::make_shared<::mllm::xnnpack::XpMemoryManager>(); - return new ::mllm::xnnpack::XnnpackBackend(mm); - }; -}; - -void registerXNNBackendCreator() { - ::mllm::xnnpack::Log::info("xnnpack backend registered"); - InsertBackendCreatorMap(MLLM_XNNPACK, std::make_shared()); -} -} // namespace mllm - -namespace mllm::xnnpack { - -XnnpackModelRuntime::XnnpackModelRuntime(pthreadpool_t threadpool) : - threadpool_(threadpool), model_(nullptr, xnn_delete_subgraph) { - num_threads_ = pthreadpool_get_threads_count(threadpool); -} - -XnnpackModelRuntime::~XnnpackModelRuntime() { - if (runtime_) { - xnn_delete_runtime(runtime_); - } - - // not release all - // FIXME: explicit memory leak. - // NOTE: explicit memory leak. - // NOTE: explicit memory leak. - // NOTE: explicit memory leak. - // NOTE: explicit memory leak. - // NOTE: explicit memory leak. - // NOTE: explicit memory leak. - // - // for (auto i = 0; i < external_values_.size(); ++i) { - // if ((model_->values[i].flags & ((uint32_t)XNN_VALUE_FLAG_EXTERNAL_INPUT)) == 1) { - // xnn_release_simd_memory(uuid_2_externals_v_[i].data); - // } - // } -} - -bool XnnpackModelRuntime::createModel(const xnn_subgraph_t &model_factory) { - model_.reset(model_factory); - if (!model_) { - Log::error("failed to create model"); - return false; - } - - for (uint32_t i = 0; i < model_->num_values; ++i) { - // if not external values. ignore alloc memory - if ((model_->values[i].flags & ((uint32_t)XNN_VALUE_FLAG_EXTERNAL_INPUT | (uint32_t)XNN_VALUE_FLAG_EXTERNAL_OUTPUT)) == 0) { - continue; - } - - // The prepared external_num > actually external_num, ignore redundant part. - if (uuid_2_externals_v_.count(i)) { - // if already alloced by user, ignore alloc memory - if (uuid_2_externals_v_[i].data) { - external_values_.push_back(xnn_external_value{i, uuid_2_externals_v_[i].data}); - continue; - } - - // Make a buffer for this external value. - size_t size = xnn_tensor_get_size(&model_->values[i]) + XNN_EXTRA_BYTES; - auto ev = xnn_external_value{i, xnn_allocate_zero_simd_memory(size)}; - uuid_2_externals_v_[i] = ev; - external_values_.push_back(ev); - } - } - - return model_ != nullptr; -} - -bool XnnpackModelRuntime::createRuntime(uint32_t flags) { - assert(!runtime_); - // flags |= XNN_FLAG_NO_OPERATOR_FUSION; - return xnn_status_success == xnn_create_runtime_v4(model_.get(), weight_cache_, nullptr, threadpool_, flags, &runtime_); -} - -bool XnnpackModelRuntime::reshapeRuntime() { - return xnn_status_success == xnn_reshape_runtime(runtime_); -} - -bool XnnpackModelRuntime::setupRuntime() { - return xnn_status_success == xnn_setup_runtime_v2(runtime_, external_values_.size(), external_values_.data()); -} - -bool XnnpackModelRuntime::invoke() { - return xnn_status_success == xnn_invoke_runtime(runtime_); -} - -void XnnpackModelRuntime::resetUuidExternalValuesMap(const std::unordered_map &ext_vals) { - uuid_2_externals_v_ = ext_vals; -} - -void XnnpackModelRuntime::setWeightCache(xnn_weights_cache_t weight_cache) { - weight_cache_ = weight_cache; -} - -xnn_runtime_t XnnpackModelRuntime::getXnnRt() { - return runtime_; -} - -std::unordered_map &XnnpackModelRuntime::__uuidToExternalsV() { - return uuid_2_externals_v_; -} - -XnnpackBackend::XnnpackBackend(std::shared_ptr mm, const XnnpackBackendOpts &opts) : - Backend(mm), opts_(opts) { - // init weight_cache_ - // xnn_create_weights_cache(&weight_cache_); - - // register ops - type_ = BackendType::MLLM_XNNPACK; - registerOps(); - registerFuncs(); -} - -XnnpackBackend::~XnnpackBackend() { -} - -bool XnnpackBackend::addCreator(OpType t, Creator *c) { - if (map_op_creator_.count(t)) { - Log::error("{} op has been added to this backend.", (int32_t)t); - return false; - } - map_op_creator_.insert({t, c}); - return true; -} - -Op *XnnpackBackend::opCreate(const OpParam &op_param, string name, int thread_count) { - auto op_type = OpType(op_param.find("type")->second); - auto iter = map_op_creator_.find(op_type); - - if (thread_count) { - Log::warn("Xnnpack use global thread pool. thread_count is set to {}, but not used.", thread_count); - } - - if (iter == map_op_creator_.end()) { - Log::error("OpType={}, Name={} is not supported yet.", int(op_param.find("type")->second), name); - return nullptr; - } - auto op = iter->second->create(op_param, this, name, thread_count); - return op; -} - -TensorFunction *XnnpackBackend::funcCreate(TensorFuncType type) { - auto iter = map_tensor_function_.find(type); - if (iter == map_tensor_function_.end()) { - Log::error("Xnnpack backend don't support func type {}", (int32_t)type); - return nullptr; - } - return iter->second; -} - -void XnnpackBackend::registerOps() { - addCreator(D2H, new XpD2HCreator()); - addCreator(ADD, new XpAddCreator()); - addCreator(DIRECT, new XpDirectCreator()); - addCreator(DISPATCH, new XpDispatchCreator()); - addCreator(SUBGRAPHSTART, new XpSubGraphStartCreator()); - addCreator(SUBGRAPHFINALIZE, new XpSubGraphFinalizeCreator()); - addCreator(LINEAR, new XpLinearCreator()); - addCreator(MATMUL, new XpMatMulCreator()); - addCreator(ROPE, new XpRoPECreator()); - addCreator(RELU, new XpReLUCreator()); - addCreator(SOFTMAX, new XpSoftmaxCreator()); - addCreator(OP_GELU, new XpGeLUCreator()); - addCreator(SILU, new XpSiLUCreator()); - addCreator(TRANSPOSE, new XpTransposeCreator()); - addCreator(RMSNORM, new XpRMSNormCreator()); - addCreator(XP_KVCACHE, new XpKVCacheCreator()); - addCreator(CAUSALMASK, new XpCausalMaskCreator()); - addCreator(SDPA, new XpSDPACreator()); - addCreator(EMBEDDING, new XpEmbeddingCreator()); - addCreator(PARAMETER, new XpParameterCreator()); -} - -void XnnpackBackend::registerFuncs() { - // broadcast element wise tensor func - map_tensor_function_[TensorFuncType::FUNC_ADD] = new XpBroadcastAddFunction(); - map_tensor_function_[TensorFuncType::FUNC_SUB] = new XpBroadcastSubFunction(); - map_tensor_function_[TensorFuncType::FUNC_MUL] = new XpBroadcastMulFunction(); - map_tensor_function_[TensorFuncType::FUNC_DIV] = new XpBroadcastDivFunction(); - - // element wise tensor func - map_tensor_function_[TensorFuncType::FUNC_TTADD] = new XpTTAddFunction(); - map_tensor_function_[TensorFuncType::FUNC_TTSUB] = new XpTTSubFunction(); - map_tensor_function_[TensorFuncType::FUNC_TTMUL] = new XpTTMulFunction(); - map_tensor_function_[TensorFuncType::FUNC_TTDIV] = new XpTTDivFunction(); - - // others - map_tensor_function_[TensorFuncType::FUNC_TRANPOSE] = new XpTransposeFunction(); - map_tensor_function_[TensorFuncType::FUNC_VIEW] = new XpViewFunction(); - map_tensor_function_[TensorFuncType::FUNC_MM] = new XpMatmulFunction(); -} - -uint32_t XnnpackCargo::getExecCnt() { - return exec_cnt_; -} - -uint32_t XnnpackCargo::incExecCnt() { - exec_cnt_++; - return exec_cnt_; -} - -void XnnpackCargo::setThreadPool(pthreadpool_t tp) { - threadpool_ = tp; -} - -std::shared_ptr XnnpackCargo::getModelRuntime() { - return model_runtime_; -} - -std::shared_ptr XnnpackCargo::recreateModelRuntime() { - model_runtime_ = std::make_shared(threadpool_); - - // set external values - model_runtime_->resetUuidExternalValuesMap(uuid_2_externals_v_); - model_runtime_->setWeightCache(weight_cache_); - - return model_runtime_; -} - -xnn_subgraph_t XnnpackCargo::getXnnSubgraph() { - return graph_; -} - -void XnnpackCargo::createSubgraph(int32_t external_nums) { - if (graph_) { - Log::error("The subgraph has already been created. Use recreateSubGraph instead."); - exit(-1); - } - - uuid_2_externals_v_.clear(); - uuid_2_mllm_tensor_.clear(); - uuid_2_mllm_weight_tensor_.clear(); - uuid_2_normal_tensor_.clear(); - activation_name_2_uuid_.clear(); - auto status = xnn_create_subgraph(external_nums, 0, &graph_); - if (status != xnn_status_success) { - Log::error("Failed to create subgrpah"); - exit(-1); - } -} - -void XnnpackCargo::recreateSubgraph(int32_t external_nums) { - if (graph_) { - // no need to delete this, the previous xnnpack runtime will manage it. - // xnn_delete_subgraph(subgraph_); - uuid_2_mllm_tensor_.clear(); - uuid_2_mllm_weight_tensor_.clear(); - uuid_2_externals_v_.clear(); - uuid_2_normal_tensor_.clear(); - activation_name_2_uuid_.clear(); - } - - auto status = xnn_create_subgraph(external_nums, 0, &graph_); - if (status != xnn_status_success) { - Log::error("Failed to create subgrpah"); - exit(-1); - } -} - -void XnnpackCargo::registerExternalValue(uint32_t uuid, const xnn_external_value &ext_v) { - if (uuid_2_externals_v_.count(uuid)) { - Log::error("when reigster a external value, found exists uuid: {}", uuid); - exit(-1); - } - - uuid_2_externals_v_.insert({uuid, ext_v}); -} - -void XnnpackCargo::updateExternalValue(uint32_t uuid, const xnn_external_value &ext_v) { - if (!uuid_2_externals_v_.count(uuid)) { - Log::error("when update a external value, found exists uuid: {}", uuid); - exit(-1); - } - uuid_2_externals_v_[uuid] = ext_v; -} - -void XnnpackCargo::registerNormalValue(uint32_t uuid) { - if (uuid_2_normal_tensor_.count(uuid)) { - Log::error("when reigster a normal value, found exists uuid: {}", uuid); - exit(-1); - } - - uuid_2_normal_tensor_.insert({uuid, true}); -} - -void XnnpackCargo::registerUuidTensor(uint32_t uuid, Tensor *t) { - if (uuid_2_mllm_tensor_.count(uuid)) { - Log::error("when reigster a tensor value, found exists uuid: {}", uuid); - exit(-1); - } - - uuid_2_mllm_tensor_.insert({uuid, t}); -} - -void XnnpackCargo::updateUuidTensor(uint32_t uuid, Tensor *t) { - if (!uuid_2_mllm_tensor_.count(uuid)) { - Log::error("XnnpackCargo::updateUuidTensor failed. {} is not exists", uuid); - exit(-1); - } - - uuid_2_mllm_tensor_[uuid] = t; -} - -void XnnpackCargo::registerUuidWeightTensor(uint32_t uuid, Tensor *t) { - if (uuid_2_mllm_weight_tensor_.count(uuid)) { - Log::error("when reigster a weight tensor value, found exists uuid: {}", uuid); - exit(-1); - } - - uuid_2_mllm_weight_tensor_.insert({uuid, t}); -} - -void *XnnpackCargo::getExternalValueptr(uint32_t uuid) { - if (uuid_2_externals_v_.count(uuid)) { - return uuid_2_externals_v_[uuid].data; - } - Log::error("getExternalValueptr return nullptr for uuid: {}", uuid); - return nullptr; -} - -bool XnnpackCargo::hasExternalValue(uint32_t uuid) { - return uuid_2_externals_v_.count(uuid); -} - -bool XnnpackCargo::hasNormalValue(uint32_t uuid) { - return uuid_2_normal_tensor_.count(uuid); -} - -bool XnnpackCargo::hasWeightValue(uint32_t uuid) { - return uuid_2_mllm_weight_tensor_.count(uuid); -} - -xnn_datatype XnnpackBackend::mllmDType2XnnDType(DataType mllm_dtype) { - switch (mllm_dtype) { - case MLLM_TYPE_F32: - return xnn_datatype_fp32; - case MLLM_TYPE_F16: - return xnn_datatype_fp16; - case MLLM_TYPE_I32: - return xnn_datatype_int32; - default: - return xnn_datatype_invalid; - } - return xnn_datatype_invalid; -} - -uint32_t XnnpackCargo::getNewEXternalId() { - return (uint32_t)uuid_2_externals_v_.size(); -} - -void XnnpackCargo::assignPtrToTensor() { - // update from runtime - uuid_2_externals_v_ = getModelRuntime()->__uuidToExternalsV(); - - // for (auto &iter : uuid_2_mllm_tensor_) { - // auto t = iter.second; - // auto uuid = iter.first; - // auto ext_v = uuid_2_externals_v_[uuid]; - // t->forceResetHostPointer(ext_v.data); - // } - - for (auto &iter : uuid_2_mllm_weight_tensor_) { - iter.second->uuid() = XNN_INVALID_VALUE_ID; - } -} - -void XnnpackCargo::setSubgraphDispatched(bool b) { - subgraph_dispatched_ = b; -} - -xnn_weights_cache_t XnnpackCargo::getWeightCache() { - return weight_cache_; -} - -bool XnnpackCargo::isWeightCacheFinalized() const { - return weight_cache_finalized_; -} - -void XnnpackCargo::setWeightCacheFinalized(bool b) { - weight_cache_finalized_ = b; -} - -bool XnnpackCargo::inActivationName(const std::string &name) { - return activation_name_2_uuid_.count(name); -} - -uint32_t XnnpackCargo::getUUIDByActivationName(const std::string &name) { - if (inActivationName(name)) return activation_name_2_uuid_[name]; - Log::error("XnnpackCargo::getUUIDByActivationName, {} not in activation name", name); - exit(-1); -} - -void XnnpackCargo::registerActivationNameAndUUID(const std::string &name, uint32_t uuid) { - if (inActivationName(name)) { - Log::error("XnnpackCargo::registerActivationNameAndUUID, {} already exists", name); - exit(-1); - } - activation_name_2_uuid_.insert({name, uuid}); -} - -void XnnpackBackend::createNewGraph(const std::string &name) { - if (graphs_.count(name)) { - if (enable_dynamic_shape) { - Log::error("XnnpackBackend::createNewGraph, {} graph already exists", name); - exit(-1); - } else { - graphs_.erase(graphs_.find(name)); - } - } - - graphs_.insert({name, std::make_shared()}); - graphs_[name]->setThreadPool(threadpool_); - graphs_[name]->createSubgraph(); -} - -std::shared_ptr XnnpackBackend::getGraph(const std::string &name) { - if (!graphs_.count(name)) { - Log::error("XnnpackBackend::getGraph, {} graph not exists"); - exit(-1); - } - return graphs_[name]; -} - -bool XnnpackBackend::hasGraph(const std::string &name) { - return graphs_.count(name); -} - -void XnnpackBackend::onSetUpStart(std::vector> &inputs, std::vector> &outputs, std::string graph_name) { - // 0. create graph - cur_processing_graph_name_ = graph_name; - - if (!hasGraph(graph_name) || !XnnpackBackend::enable_dynamic_shape) { - createNewGraph(graph_name); - auto cargo = getGraph(graph_name); - - if (XnnpackBackend::enable_legacy_wrapper) Backend::onSetUpStart(inputs, outputs, graph_name); - - // 1. register all inputs - for (auto &t : inputs) { - auto xp_dtype = XnnpackBackend::mllmDType2XnnDType(t->dtype()); - - xnn_status status; - std::vector dims; - for (auto d : t->shape()) dims.push_back(d); - - uint32_t flags = XNN_VALUE_FLAG_EXTERNAL_INPUT; - uint32_t external_id = cargo->getNewEXternalId(); - - switch (xp_dtype) { - case xnn_datatype_fp32: { - status = xnn_define_tensor_value( - cargo->getXnnSubgraph(), xp_dtype, - dims.size(), dims.data(), - /*data=*/nullptr, - external_id, flags, &t->uuid()); - break; - } - default: - Log::error("XnnpackBackend::onSetUpStart, Unsupported datatype."); - break; - } - - cargo->registerExternalValue(t->uuid(), xnn_external_value{.id = t->uuid(), .data = t->rawHostPtr()}); - cargo->registerUuidTensor(t->uuid(), t.get()); - cargo->registerActivationNameAndUUID(t->name(), t->uuid()); - - if (status != xnn_status_success) { - Log::error("xnnpack backend defineXpTensor Error"); - exit(-1); - } - } - } else { - // do not create a new graph. Reuse already exists runtime - auto cargo = getGraph(graph_name); - - for (auto &t : inputs) { - t->uuid() = cargo->getUUIDByActivationName(t->name()); - cargo->updateExternalValue(t->uuid(), xnn_external_value{.id = t->uuid(), .data = t->rawHostPtr()}); - cargo->updateUuidTensor(t->uuid(), t.get()); - std::vector dims; - for (auto d : t->shape()) dims.push_back(d); - xnn_reshape_external_value(cargo->getModelRuntime()->getXnnRt(), t->uuid(), dims.size(), dims.data()); - } - } -} - -void XnnpackBackend::onSetUpEnd(std::vector> &inputs, std::vector> &outputs, std::string graph_name) { - cur_processing_graph_name_ = graph_name; - - if (getGraph(graph_name)->getExecCnt() == 0 || !XnnpackBackend::enable_dynamic_shape) { - if (XnnpackBackend::enable_legacy_wrapper) Backend::onSetUpEnd(inputs, outputs, graph_name); - - // 0. get graph - auto cargo = getGraph(graph_name); - - // 1. register all outputs - for (auto &t : outputs) { - auto xp_dtype = XnnpackBackend::mllmDType2XnnDType(t->dtype()); - - xnn_status status; - std::vector dims; - for (auto d : t->shape()) dims.push_back(d); - - uint32_t flags = XNN_VALUE_FLAG_EXTERNAL_OUTPUT; - uint32_t external_id = cargo->getNewEXternalId(); - - switch (xp_dtype) { - case xnn_datatype_fp32: { - status = xnn_define_tensor_value( - cargo->getXnnSubgraph(), xp_dtype, - dims.size(), dims.data(), - /*data=*/nullptr, - external_id, flags, &t->uuid()); - break; - } - default: - break; - } - - cargo->registerExternalValue(t->uuid(), xnn_external_value{.id = t->uuid(), .data = t->rawHostPtr()}); - cargo->registerUuidTensor(t->uuid(), t.get()); - cargo->registerActivationNameAndUUID(t->name(), t->uuid()); - - if (status != xnn_status_success) { - Log::error("xnnpack backend defineXpTensor Error"); - exit(-1); - } - } - } else { - // do not create a new graph. Reuse already exists runtime - auto cargo = getGraph(graph_name); - - for (auto &t : outputs) { - t->uuid() = cargo->getUUIDByActivationName(t->name()); - cargo->updateExternalValue(t->uuid(), xnn_external_value{.id = t->uuid(), .data = t->rawHostPtr()}); - cargo->updateUuidTensor(t->uuid(), t.get()); - std::vector dims; - for (auto d : t->shape()) dims.push_back(d); - xnn_reshape_external_value(cargo->getModelRuntime()->getXnnRt(), t->uuid(), dims.size(), dims.data()); - } - } -} - -void XnnpackBackend::onExecuteStart(std::vector> &inputs, std::vector> &outputs, std::string graph_name) { - cur_processing_graph_name_ = graph_name; -} - -void XnnpackBackend::onExecuteEnd(std::vector> &outputs, const string &graph_name) { - cur_processing_graph_name_ = graph_name; - auto cargo = getCurProcessingGraph(); - - if (getGraph(graph_name)->getExecCnt() == 0 || !XnnpackBackend::enable_dynamic_shape) { - // recreate runtime - auto m_rt = cargo->recreateModelRuntime(); - - // create Model - m_rt->createModel(cargo->getXnnSubgraph()); - - // create runtime - m_rt->createRuntime(0); - - // auto wc = xnnbk->getWeightCache(); - // if (!xnnbk->isWeightCacheFinalized()) { - // xnn_finalize_weights_cache(wc, xnn_weights_cache_finalization_kind_hard); - // xnnbk->setWeightCacheFinalized(true); - // } - - // reshape - m_rt->reshapeRuntime(); - - // setup - m_rt->setupRuntime(); - - // run - if (!m_rt->invoke()) { - Log::error("XnnpackBackend::onExecuteStart xnn invoke failed"); - return; - } - - // update all output's ptr - cargo->assignPtrToTensor(); - - cargo->setSubgraphDispatched(true); - - cargo->incExecCnt(); - } else { - // recreate runtime - auto m_rt = cargo->getModelRuntime(); - - // setup - m_rt->setupRuntime(); - - // run - if (!m_rt->invoke()) { - Log::error("XnnpackBackend::onExecuteStart xnn invoke failed"); - return; - } - - // update all output's ptr - cargo->assignPtrToTensor(); - - cargo->setSubgraphDispatched(true); - - cargo->incExecCnt(); - } - - for (auto &o : outputs) { - o->forceResetHostPointer(getCurProcessingGraph()->getExternalValueptr(o->uuid())); - o->uuid() = XNN_INVALID_VALUE_ID; - } -} - -XnnpackCargo *XnnpackBackend::getCurProcessingGraph() { - if (!graphs_.count(cur_processing_graph_name_)) { - Log::error("XnnpackBackend::getCurProcessingGraph, {} graph not exists"); - exit(-1); - } - return graphs_[cur_processing_graph_name_].get(); -} - -int XnnpackBackend::xnn_threads = 4; - -bool XnnpackBackend::enable_dynamic_shape = true; - -bool XnnpackBackend::enable_legacy_wrapper = false; - -} // namespace mllm::xnnpack \ No newline at end of file diff --git a/src/express/Express.cpp b/src/express/Express.cpp deleted file mode 100644 index bbab86ab1..000000000 --- a/src/express/Express.cpp +++ /dev/null @@ -1,1182 +0,0 @@ -#include -#include -#include "OpDefined.hpp" -#include "Types.hpp" -#include "unordered_map" -#include "Express.hpp" -#include -#include - -using namespace mllm; - -// For Visualization and Debug -void displayExpress(NetParameter *net) { - std::cout << "===NetParameter===" << std::endl; - for (auto *op : net->net_ops) { - std::cout << "===NetOP===" << std::endl; - std::cout << "op->name:" << op->name << std::endl; - std::cout << "op->type:" << op->type << std::endl; - std::cout << "op input" << op->in.size() << std::endl; - for (auto *input : op->in) { - std::cout << "==Input==\ninput.name:" << input->name << std::endl; - if (input->in != nullptr) { - std::cout << "input op:" << input->in->name << std::endl; - } - std::cout << "input in subgraph:" << (input->subgraph == net) << std::endl; - std::cout << std::endl; - } - std::cout << "op output" << op->out.size() << std::endl; - for (auto *output : op->out) { - std::cout << "output.name:" << output->name << std::endl; - std::cout << "output op:" << output->out.size() << std::endl; - if (!output->out.empty()) { - std::cout << "output op:" << output->out[0]->name << std::endl; - } - } - std::cout << std::endl; - } -} -void displayExpress(Context *c) { - for (auto sub : c->sub_param_) { - displayExpress(&sub); - } -} - -#define _STORE_OUT_TENSOR \ - ctx->net_tensors.insert(out_tensor); \ - auto sub_param = get_active_subgraph(ctx); \ - out_tensor->subgraph = sub_param; \ - sub_param->net_tensors.push_back(out_tensor); - -#define _NEW_OP(type_) \ - sub_param->net_ops.emplace_back(new NetOp()); \ - auto net_op_ = (sub_param->net_ops.back()); \ - if (name.empty()) { \ - name = #type_ + std::to_string(ctx->idx); \ - } \ - net_op_->name = name; \ - net_op_->type = type_; \ - net_op_->param = OpParam(); \ - net_op_->param["type"] = type_; \ - ctx->net_ops.push_back(net_op_); - -#define _UPDATE_INPUT_TENSORS \ - for (auto &input : inputs) { \ - net_op_->in.push_back(input); \ - input->out.push_back(net_op_); \ - if (std::find(sub_param->net_tensors.begin(), sub_param->net_tensors.end(), input) == sub_param->net_tensors.end()) { \ - sub_param->net_tensors.push_back(input); \ - if (input->subgraph != nullptr) { \ - sub_param->net_inputs.insert(input); \ - } \ - } \ - } -static void topology(const NetParameter *net, vector &result, NetOp *op, std::unordered_map &visited) { - if (visited[op]) { - return; - } - visited[op] = true; - for (auto *input : op->in) { - if (input->in != nullptr && std::find(net->net_inputs.begin(), net->net_inputs.end(), input) == net->net_inputs.end()) { - topology(net, result, input->in, visited); - } - } - result.push_back(op); -} -void NetParameter::topologySort() { - std::unique_ptr> result(new vector()); - std::unordered_map visited; - result->reserve(net_ops.size()); - visited.reserve(net_ops.size()); - for (auto *op : net_ops) { - topology(this, *result, op, visited); - } - net_ops = *result; -} - -/** - * \brief Creates an input tensor with the given parameters. - * \param ctx The context in which the tensor is created. - * \param dims The dimensions of the tensor. Default is an empty vector. - * \param name The name of the tensor. Default is an empty string. - * \param type The data type of the tensor. Default is MLLM_TYPE_F32. - * \return A pointer to the created tensor. - */ -NetTensor *_Input(Context *ctx, vector dims, string name, DataType type) { - NetTensor *net_tensor = new NetTensor(); - if (name.empty()) { - name = "input" + std::to_string(ctx->idx); - } - net_tensor->name = name + "-00"; - net_tensor->shape_ = dims; - net_tensor->type = type; - net_tensor->subgraph = get_active_subgraph(ctx); - ctx->idx++; - auto *sub_param = get_active_subgraph(ctx); - sub_param->net_tensors.push_back(net_tensor); - ctx->net_tensors.insert(net_tensor); - net_tensor->ctx = ctx; - return net_tensor; -} - -/** - * \brief Creates a parameter tensor with the given parameters. - * \param ctx The context in which the tensor is created. - * \param inputs {} - * \param batch The batch size. - * \param seq The sequence length. - * \param head The number of heads. - * \param dim The dimension size. - * \param name The name of the tensor. - * \param type The data type of the tensor. Default is MLLM_TYPE_F32. - * \return A pointer to the created tensor. - */ -NetTensor *_Parameter(Context *ctx, std::vector inputs, int batch, int seq, int head, int dim, string name, DataType type) { - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Parameter" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::PARAMETER) - net_op_->param["batch"] = batch; - net_op_->param["seq"] = seq; - net_op_->param["head"] = head; - net_op_->param["dim"] = dim; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \brief Creates a range tensor from start to end. [start, end) - * \param ctx The context in which the tensor is created. - * \param inputs {} - * \param start start number - * \param end end number - * \param name The name of the tensor. - * \param type The data type of the tensor. Default is MLLM_TYPE_F32. - * \return A pointer to the created tensor. - */ -NetTensor *_Range(Context *ctx, std::vector inputs, int start, int end, string name, DataType type) { - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Range" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::RANGE) - net_op_->param["start"] = start; - net_op_->param["end"] = end; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_Add(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - // TODO:Check - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Add" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::ADD) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_Causalmask(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Causalmask" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::CAUSALMASK) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_Transpose(std::vector inputs, std::vector perm, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Transpose" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::TRANSPOSE) - net_op_->param["perm0"] = perm[0]; - net_op_->param["perm1"] = perm[1]; - net_op_->param["perm2"] = perm[2]; - net_op_->param["perm3"] = perm[3]; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} - -NetTensor *_SiLU(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Silu" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::SILU) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} - -NetTensor *_SuperSiLU(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Silu" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::SUPERSILU) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_Quantize(std::vector inputs, bool isNSHD, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Quantize" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = MLLM_TYPE_I8; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::QUANTIZE) - net_op_->param["isNSHD"] = isNSHD; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_Dequantize(std::vector inputs, bool isNSHD, string name, bool isFP32) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Dequantize" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - if (isFP32) - out_tensor->type = MLLM_TYPE_F32; - else - out_tensor->type = MLLM_TYPE_F16; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::DEQUANTIZE) - net_op_->param["isNSHD"] = isNSHD; - net_op_->param["isFP32"] = isFP32; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param axis The axis along which the softmax is performed. e.g. DIMENSION. - */ -NetTensor *_Softmax(std::vector inputs, int axis, int do_causal_mask, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Softmax" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::SOFTMAX) - net_op_->param["axis"] = axis; - net_op_->param["do_causal_mask"] = do_causal_mask; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param transpose0 Whether to transpose the first input tensor. - * \param transpose1 Whether to transpose the second input tensor. - */ -NetTensor *_Matmul(std::vector inputs, bool transpose0, bool transpose1, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Matmul" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::MATMUL) - net_op_->param["transpose0"] = transpose0; - net_op_->param["transpose1"] = transpose1; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param norm_size The size of the normed dimension. - * \param epsilon Default is 1e-6. - */ -NetTensor *_RMSNorm(std::vector inputs, int norm_size, float epsilon, string name, bool isFP32) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "RMSNorm" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - if (isFP32) - out_tensor->type = inputs[0]->type; - else - out_tensor->type = MLLM_TYPE_I8; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::RMSNORM) - net_op_->param["norm_size"] = (float)norm_size; - net_op_->param["epsilon"] = (float)epsilon; - net_op_->param["isFP32"] = (float)isFP32; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param pose_type RoPR type, 4 for HuggingFace Hub, 2 for LLama, 3 for fuyu, NO_USE for 1. - * This RoPE function is ready for optimization in the future. - */ -NetTensor *_RoPE(std::vector inputs, int pose_type, string name, int rope_theta, int max_position_embeddings) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "RoPE" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::ROPE) - net_op_->param["pose_type"] = pose_type; - net_op_->param["rope_theta"] = rope_theta; - net_op_->param["max_position_embeddings"] = max_position_embeddings; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} - -NetTensor *_IRoPE(std::vector inputs, int pose_type, string name, int rope_theta, int max_position_embeddings) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "IRoPE" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::IROPE) - net_op_->param["pose_type"] = pose_type; - net_op_->param["rope_theta"] = rope_theta; - net_op_->param["max_position_embeddings"] = max_position_embeddings; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} - - -NetTensor *_QNNRoPE(std::vector inputs, int pose_type, string name, int rope_theta, int max_position_embeddings, bool isFP32) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "RoPE" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - if (isFP32) - out_tensor->type = MLLM_TYPE_F32; - else - out_tensor->type = MLLM_TYPE_F16; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::ROPE) - net_op_->param["pose_type"] = pose_type; - net_op_->param["rope_theta"] = rope_theta; - net_op_->param["max_position_embeddings"] = max_position_embeddings; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} - -NetTensor *_QNNIRoPE(std::vector inputs, int pose_type, string name, int rope_theta, int max_position_embeddings, bool isFP32) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "RoPE" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - if (isFP32) - out_tensor->type = MLLM_TYPE_F32; - else - out_tensor->type = MLLM_TYPE_F16; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::IROPE) - net_op_->param["pose_type"] = pose_type; - net_op_->param["rope_theta"] = rope_theta; - net_op_->param["max_position_embeddings"] = max_position_embeddings; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} - -/** - * \param max_num The maximum number of positions. - */ -NetTensor *_PositionalEmbedding(std::vector inputs, int max_num, int hidden_dim, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "PE" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::POSITIOANL_EMBEDDING) - net_op_->param["max_num"] = max_num; - net_op_->param["hidden_dim"] = hidden_dim; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param scale scale factor. - * \param bias default is 0. - * \param bias_after_scale whether to add bias after scale. - */ -NetTensor *_Scale(std::vector inputs, float scale, float bias, bool bias_after_scale, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Scale" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::SCALE) - net_op_->param["scale"] = scale; - net_op_->param["bias"] = bias; - net_op_->param["bias_after_scale"] = (int)bias_after_scale; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param in_features The size of each input sample (i.e., input dimension). - * \param out_features The size of each output sample (i.e., output dimension). - * \param bias If set to false, the layer will not learn an additive bias. Default is true. - */ -NetTensor *_Linear(std::vector inputs, int in_features, int out_features, bool bias, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Linear" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::LINEAR) - net_op_->param["in_features"] = in_features; - net_op_->param["out_features"] = out_features; - net_op_->param["bias"] = (int)bias; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param in_dim The size of each input sample (i.e., input dimension). - * \param out_dim The size of each output sample (i.e., output dimension). - */ -NetTensor *_SparseLinear(std::vector inputs, int in_dim, int out_dim, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "SPARSELINEAR" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::SPARSELINEAR) - net_op_->param["in_dim_"] = (float)in_dim; - net_op_->param["out_dim_"] = (float)out_dim; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param in_dim The size of each input sample (i.e., input dimension). - * \param out_dim The size of each output sample (i.e., output dimension). - */ -NetTensor *_SparseIdLinear(std::vector inputs, int in_dim, int out_dim, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "SPARSEIDLINEAR" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::SPARSEIDLINEAR) - net_op_->param["in_dim_"] = (float)in_dim; - net_op_->param["out_dim_"] = (float)out_dim; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param in_dim The size of each input sample (i.e., input dimension). - * \param out_dim The size of each output sample (i.e., output dimension). - */ -NetTensor *_Predictor(std::vector inputs, int in_dim, int out_dim, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Predictor" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::PREDICTOR) - net_op_->param["in_dim"] = (float)in_dim; - net_op_->param["out_dim"] = (float)out_dim; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param in_features The size of each input sample (i.e., input dimension). - * \param out_features The size of each output sample (i.e., output dimension). - * \param bias If set to false, the layer will not learn an additive bias. Default is true. - */ -NetTensor *_LinearINT8(std::vector inputs, int in_features, int out_features, bool bias, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "LinearINT8" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::LINEARINT8) - net_op_->param["in_features"] = in_features; - net_op_->param["out_features"] = out_features; - net_op_->param["bias"] = (int)bias; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} - -/** - * \param in_features The size of each input sample (i.e., input dimension). - * \param out_features The size of each output sample (i.e., output dimension). - * \param bias If set to false, the layer will not learn an additive bias. Default is true. - */ -vector _LinearINT8ShadowMerge(std::vector inputs, int in_features, int out_features, bool bias, string name) { - Context *ctx = inputs[0]->ctx; - if (name.empty()) { - name = "LinearINT8SHADOW" + std::to_string(ctx->idx); - } - auto sub_param = get_active_subgraph(ctx); - _NEW_OP(mllm::LINEARINT8SHADOW) - net_op_->param["in_features"] = in_features; - net_op_->param["out_features"] = out_features; - net_op_->param["bias"] = (int)bias; - _UPDATE_INPUT_TENSORS - vector out_tensors; - net_op_->out_size = 3; - for (int i = 0; i < 3; ++i) { - NetTensor *out_tensor = new NetTensor(); - out_tensor->name = "outtensor-" + name + "-0" + std::to_string(i); - out_tensor->type = inputs[0]->type; - ctx->idx++; - ctx->net_tensors.insert(out_tensor); - out_tensor->subgraph = sub_param; - sub_param->net_tensors.push_back(out_tensor); - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - out_tensors.push_back(out_tensor); - } - return out_tensors; -} - -/** - * Shadow linear for cpu graph in qnn model and cpu models - * \param in_features The size of each input sample (i.e., input dimension). - * \param out_features The size of each output sample (i.e., output dimension). - * \param bias If set to false, the layer will not learn an additive bias. Default is true. - */ -NetTensor *_LinearINT8ShadowCPU(std::vector inputs, int in_features, int out_features, int max_position, bool bias, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "LinearINT8SHADOWCPU" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = MLLM_TYPE_F32; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::LINEARINT8SHADOW) - net_op_->param["in_features"] = in_features; - net_op_->param["out_features"] = out_features; - net_op_->param["max_position"] = max_position; - net_op_->param["bias"] = (int)bias; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} - -/** - * \param vocab_size The size of the vocabulary. - * \param hidden_size The size of the hidden layer. - */ -NetTensor *_Embedding(std::vector inputs, int vocab_size, int hidden_size, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Embedding" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::EMBEDDING) - net_op_->param["hidden_size"] = hidden_size; - net_op_->param["vocab_size"] = vocab_size; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_Mul(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - // TODO:Check - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Mul" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::MUL) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_KVCache(std::vector inputs, int cache_max, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "KVCache" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::KVCACHE) - net_op_->param["n_rep"] = 1; - net_op_->param["cache_max"] = (int)cache_max; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -// kvcache for qnn chunk execute -NetTensor *_KVCache(std::vector inputs, int n_rep, bool share_input, int cache_max, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "KVCache" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::KVCACHE) - net_op_->param["n_rep"] = 1; - net_op_->param["share_input"] = share_input; - net_op_->param["cache_max"] = (int)cache_max; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * Only for Transformer-based models' Decoder. - * \param n_rep if head size of K/V is different with Q, set n_rep > 1, the output will be replicated n_rep times. - * e.g. n_rep = 8 in TinyLLama. - */ -NetTensor *_KVCache(std::vector inputs, int n_rep, int cache_max, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "KVCache" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::KVCACHE) - net_op_->param["n_rep"] = (int)n_rep; - net_op_->param["cache_max"] = (int)cache_max; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_KVCacheNPU(std::vector inputs, int cache_max, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "KVCache" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::KVCACHENPU) - net_op_->param["cache_max"] = (int)cache_max; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_ReLU(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "ReLU" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::RELU) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_ReLUSquaredActivation(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "ReLUSquaredActivation" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::RELU2) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_GELU(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "GELU" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::OP_GELU) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_QuickGELU(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "_QuickGELU" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::QUICKGLUE) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param norm_size The size of the normed dimension. - * \param bias If set to false, the layer will not learn an additive bias. - * \param epsilon Default is 1e-6. - */ -NetTensor *_LayerNorm(std::vector inputs, int norm_size, bool bias, float epsilon, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "LayerNorm" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::LAYERNORM) - net_op_->param["bias"] = (int)bias; - net_op_->param["norm_size"] = (int)norm_size; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param split_num The number of output tensors. - * \param split_dim The dimension along which to split. e.g. D_HD for fuyu. - * \param split_dim_size The size of the dimension along which to split. - * This _Split function is ready for optimization in the future. - */ -vector _Split(std::vector inputs, int split_num, Chl split_dim, int split_dim_size, string name) { - Context *ctx = inputs[0]->ctx; - if (name.empty()) { - name = "Split" + std::to_string(ctx->idx); - } - auto sub_param = get_active_subgraph(ctx); - _NEW_OP(mllm::SPLIT) - net_op_->param["split_num"] = (int)split_num; - net_op_->param["split_dim"] = (int)split_dim; - net_op_->param["split_dim_size"] = (int)split_dim_size; - _UPDATE_INPUT_TENSORS - vector out_tensors; - net_op_->out_size = split_num; - for (int i = 0; i < split_num; ++i) { - NetTensor *out_tensor = new NetTensor(); - out_tensor->name = "outtensor-" + name + "-0" + std::to_string(i); - out_tensor->type = inputs[0]->type; - ctx->idx++; - ctx->net_tensors.insert(out_tensor); - out_tensor->subgraph = sub_param; - sub_param->net_tensors.push_back(out_tensor); - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - out_tensors.push_back(out_tensor); - } - return out_tensors; -} -NetTensor *_Gather(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Gather" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::GATHER) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param in_channel The number of channels in the input image. - * \param out_channel The number of channels produced by the convolution. - * \param kernal The size of the convolving kernel. - * \param stride The stride of the convolution. - * \param padding The type of padding applied to the input. Default is VALID. - * \param bias If set to false, the layer will not learn an additive bias. Default is true. - */ -NetTensor *_Convolution2D(std::vector inputs, int in_channel, int out_channel, vector kernal, vector stride, PaddingType padding, bool bias, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Convolution2D" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::CONVOLUTION2D) - net_op_->param["in_channel"] = (float)in_channel; - net_op_->param["out_channel"] = (float)out_channel; - net_op_->param["kernal_h"] = (float)kernal[0]; - net_op_->param["kernal_w"] = (float)kernal[1]; - net_op_->param["stride_h"] = (float)stride[0]; - net_op_->param["stride_w"] = (float)stride[1]; - net_op_->param["padding"] = (float)padding; - net_op_->param["bias"] = (float)bias; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param in_channel The number of channels in the input image. - * \param out_channel The number of channels produced by the convolution. - * \param kernal The size of the convolving kernel. - * \param stride The stride of the convolution. - * \param padding The type of padding applied to the input. - * \param bias If set to false, the layer will not learn an additive bias. Default is true. - */ -NetTensor *_Convolution3D(std::vector inputs, int in_channel, int out_channel, vector kernal, vector stride, PaddingType padding, bool bias, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Convolution3D" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::CONVOLUTION3D) - net_op_->param["in_channel"] = (float)in_channel; - net_op_->param["out_channel"] = (float)out_channel; - net_op_->param["kernal_t"] = (float)kernal[0]; - net_op_->param["kernal_h"] = (float)kernal[1]; - net_op_->param["kernal_w"] = (float)kernal[2]; - net_op_->param["stride_t"] = (float)stride[1]; - net_op_->param["stride_h"] = (float)stride[1]; - net_op_->param["stride_w"] = (float)stride[2]; - net_op_->param["padding"] = (float)padding; - net_op_->param["bias"] = (float)bias; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param kernal The size of the convolving kernel. - * \param stride The stride of the convolution. - * \param padding The type of padding applied to the input. - */ -NetTensor *_AvgPool2D(std::vector inputs, vector kernal, vector stride, PaddingType padding, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "AvgPool2D" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::AVGPOOL2D) - net_op_->param["kernal_h"] = (float)kernal[0]; - net_op_->param["kernal_w"] = (float)kernal[1]; - net_op_->param["stride_h"] = (float)stride[0]; - net_op_->param["stride_w"] = (float)stride[1]; - net_op_->param["padding"] = (float)padding; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param kernal The size of the convolving kernel. - * \param stride The stride of the convolution. - * \param padding The type of padding applied to the input. - */ -NetTensor *_MaxPool2D(std::vector inputs, vector kernal, vector stride, PaddingType padding, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "MaxPool2D" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::MAXPOOL2D) - net_op_->param["kernal_h"] = (float)kernal[0]; - net_op_->param["kernal_w"] = (float)kernal[1]; - net_op_->param["stride_h"] = (float)stride[0]; - net_op_->param["stride_w"] = (float)stride[1]; - net_op_->param["padding"] = (float)padding; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -/** - * \param axis The axis along which the concat is performed. - */ -NetTensor *_Cat(std::vector inputs, Chl axis, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "_Cat" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::CAT) - net_op_->param["axis"] = (float)axis; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_Division(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Division" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::DIVISION) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} -NetTensor *_Replace(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "Replace" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::REPLACE) - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} - -NetTensor *_WNop(std::vector inputs, int sync_type, string name) { - Context *ctx = inputs[0]->ctx; - NetTensor *out_tensor = new NetTensor(); - if (name.empty()) { - name = "WNop" + std::to_string(ctx->idx); - } - out_tensor->name = "outtensor-" + name + "-00"; - out_tensor->type = inputs[0]->type; - ctx->idx++; - _STORE_OUT_TENSOR - _NEW_OP(mllm::WNOP) - net_op_->param["sync_type"] = (float)sync_type; - _UPDATE_INPUT_TENSORS - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - return out_tensor; -} - -vector _MergeOutput(std::vector inputs, string name) { - Context *ctx = inputs[0]->ctx; - if (name.empty()) { - name = "Merge" + std::to_string(ctx->idx); - } - auto sub_param = get_active_subgraph(ctx); - _NEW_OP(mllm::MERGEOUTPUT) - _UPDATE_INPUT_TENSORS - vector out_tensors; - net_op_->out_size = inputs.size(); - for (int i = 0; i < inputs.size(); ++i) { - NetTensor *out_tensor = new NetTensor(); - out_tensor->name = "outtensor-" + name + "-0" + std::to_string(i); - out_tensor->type = inputs[i]->type; - ctx->idx++; - ctx->net_tensors.insert(out_tensor); - out_tensor->subgraph = sub_param; - sub_param->net_tensors.push_back(out_tensor); - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - out_tensors.push_back(out_tensor); - } - return out_tensors; -} - -vector _SplitInput(std::vector inputs, bool isPrompt, int num, string name) { - Context *ctx = inputs[0]->ctx; - if (name.empty()) { - name = "Split" + std::to_string(ctx->idx); - } - auto sub_param = get_active_subgraph(ctx); - _NEW_OP(mllm::SPLITINPUT) - net_op_->param["isPrompt"] = (float)isPrompt; - net_op_->param["num"] = (float)num; - _UPDATE_INPUT_TENSORS - vector out_tensors; - net_op_->out_size = inputs.size(); - for (int i = 0; i < inputs.size(); ++i) { - NetTensor *out_tensor = new NetTensor(); - out_tensor->name = "outtensor-" + name + "-0" + std::to_string(i); - out_tensor->type = inputs[i]->type; - ctx->idx++; - ctx->net_tensors.insert(out_tensor); - out_tensor->subgraph = sub_param; - sub_param->net_tensors.push_back(out_tensor); - out_tensor->in = net_op_; - out_tensor->ctx = ctx; - out_tensors.push_back(out_tensor); - } - return out_tensors; -} - -void _SubgraphBegin(Context *ctx, BackendType backend) { - ctx->active_sub++; - ctx->next_backend = backend; -} \ No newline at end of file diff --git a/src/express/Express.hpp b/src/express/Express.hpp deleted file mode 100644 index 19c1a6a1f..000000000 --- a/src/express/Express.hpp +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef MLLM_EXPRESS_H -#define MLLM_EXPRESS_H - -#include "ExpressBase.hpp" -#include "Types.hpp" -#include -#include -using namespace mllm; -void displayExpress(Context *c); - -void _SubgraphBegin(Context *ctx, BackendType backend = MLLM_CPU); - -NetTensor *_Input(Context *ctx, vector dims = {}, string name = "", DataType type = MLLM_TYPE_F32); -NetTensor *_Parameter(Context *ctx, std::vector inputs, int batch, int seq, int head, int dim, string name = "", DataType type = MLLM_TYPE_F32); -NetTensor *_Range(Context *ctx, std::vector inputs, int start, int end, string name = "", DataType type = MLLM_TYPE_F32); -NetTensor *_Add(std::vector inputs, string name = ""); -NetTensor *_Causalmask(std::vector inputs, string name = ""); -NetTensor *_SiLU(std::vector inputs, string name = ""); -NetTensor *_SuperSiLU(std::vector inputs, string name = ""); -NetTensor *_Quantize(std::vector inputs, bool isNSHD = true, string name = ""); -NetTensor *_Dequantize(std::vector inputs, bool isNSHD = true, string name = "", bool isFP32 = true); -NetTensor *_Softmax(std::vector inputs, int axis, int do_causal_mask, string name = ""); -NetTensor *_Matmul(std::vector inputs, bool transpose0, bool transpose1, string name = ""); -NetTensor *_RMSNorm(std::vector inputs, int norm_size, float epsilon = 1e-6, string name = "", bool isFP32 = true); -NetTensor *_RoPE(std::vector inputs, int pose_type, string name = "", int rope_theta = 10000, int max_position_embeddings = 16384); -NetTensor *_IRoPE(std::vector inputs, int pose_type, string name = "", int rope_theta = 10000, int max_position_embeddings = 16384); -NetTensor *_QNNRoPE(std::vector inputs, int pose_type, string name = "", int rope_theta = 10000, int max_position_embeddings = 16384, bool isFP32 = true); -NetTensor *_QNNIRoPE(std::vector inputs, int pose_type, string name = "", int rope_theta = 10000, int max_position_embeddings = 16384, bool isFP32 = true); -NetTensor *_PositionalEmbedding(std::vector inputs, int max_num, int hidden_dim, string name = ""); -NetTensor *_Scale(std::vector inputs, float scale, float bias, bool bias_after_scale, string name); -NetTensor *_Linear(std::vector inputs, int in_features, int out_features, bool bias, string name = ""); -NetTensor *_LinearINT8(std::vector inputs, int in_features, int out_features, bool bias, string name = ""); -vector _LinearINT8ShadowMerge(std::vector inputs, int in_features, int out_features, bool bias, string name = ""); -NetTensor *_LinearINT8ShadowCPU(std::vector inputs, int in_features, int out_features, int max_position = 1024, bool bias = false, string name = ""); -NetTensor *_Embedding(std::vector inputs, int vocab_size, int hidden_size, string name = ""); -NetTensor *_Mul(std::vector inputs, string name = ""); -NetTensor *_KVCache(std::vector inputs, int cache_max, string name = ""); -NetTensor *_KVCache(std::vector inputs, int n_rep, int cache_max, string name = ""); -NetTensor *_KVCacheNPU(std::vector inputs, int cache_max, string name = ""); -NetTensor *_KVCache(std::vector inputs, int n_rep, bool share_input, int cache_max, string name = ""); -NetTensor *_ReLU(std::vector inputs, string name = ""); -NetTensor *_ReLUSquaredActivation(std::vector inputs, string name = ""); -NetTensor *_GELU(std::vector inputs, string name = ""); -NetTensor *_QuickGELU(std::vector inputs, string name = ""); -NetTensor *_LayerNorm(std::vector inputs, int norm_size, bool bias = true, float epsilon = 1e-6, string name = ""); -vector _Split(std::vector inputs, int split_num, Chl split_dim, int split_dim_size = -1, string name = ""); -NetTensor *_Gather(std::vector inputs, string name = ""); -NetTensor *_Convolution2D(std::vector inputs, int in_channel, int out_channel, vector kernal, vector stride, PaddingType padding, bool bias = false, string name = ""); -NetTensor *_Convolution3D(std::vector inputs, int in_channel, int out_channel, vector kernal, vector stride, PaddingType padding, bool bias = false, string name = ""); -NetTensor *_AvgPool2D(std::vector inputs, vector kernal, vector stride, PaddingType padding, string name = ""); -NetTensor *_MaxPool2D(std::vector inputs, vector kernal, vector stride, PaddingType padding, string name = ""); -NetTensor *_Cat(std::vector inputs, Chl axis, string name = ""); -NetTensor *_Division(std::vector inputs, string name = ""); -NetTensor *_Replace(std::vector inputs, string name = ""); -NetTensor *_SparseLinear(std::vector inputs, int in_dim, int out_dim, string name = ""); -NetTensor *_SparseIdLinear(std::vector inputs, int in_dim, int out_dim, string name = ""); -NetTensor *_Predictor(std::vector inputs, int in_dim, int out_dim, string name = ""); -NetTensor *_WNop(std::vector inputs, int sync_type, string name = ""); -vector _MergeOutput(std::vector inputs, string name = ""); -vector _SplitInput(std::vector inputs, bool isPrompt, int num, string name = ""); -NetTensor *_Transpose(std::vector inputs, std::vector perm, string name = ""); - -#endif // MLLM_EXPRESS_H \ No newline at end of file diff --git a/src/express/ExpressBase.cpp b/src/express/ExpressBase.cpp deleted file mode 100644 index 63f22a508..000000000 --- a/src/express/ExpressBase.cpp +++ /dev/null @@ -1,423 +0,0 @@ -// -// Created by Rongjie Yi on 24-1-8. -// - -#include "ExpressBase.hpp" -namespace mllm { - -#define _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, div_str) \ - std::string prefix = "outtensor"; \ - if (name.compare(0, prefix.size(), prefix) == 0) { \ - out_tensor->name = name + div_str + "-00"; \ - } else { \ - out_tensor->name = "outtensor-" + name + div_str + "-00"; \ - } \ - out_tensor->type = this->type; \ - ctx->idx++; \ - ctx->net_tensors.insert(out_tensor); \ - auto sub_param = get_active_subgraph(ctx); \ - out_tensor->subgraph = sub_param; \ - sub_param->net_tensors.push_back(out_tensor); \ - sub_param->net_ops.emplace_back(new NetOp()); \ - auto net_op_ = (sub_param->net_ops.back()); \ - if (name.compare(0, prefix.size(), prefix) == 0) { \ - std::string name_new = name.substr(10); \ - net_op_->name = name_new + div_str; \ - } else { \ - net_op_->name = name + div_str; \ - } \ - net_op_->param = OpParam(); - -#define _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) \ - ctx->net_ops.push_back(net_op_); \ - net_op_->in.push_back(this); \ - this->out.push_back(net_op_); \ - if (std::find(sub_param->net_tensors.begin(), sub_param->net_tensors.end(), this) == sub_param->net_tensors.end()) { \ - sub_param->net_tensors.push_back(this); \ - if (this->subgraph != nullptr) { \ - sub_param->net_inputs.insert(this); \ - } \ - } \ - out_tensor->in = net_op_; \ - out_tensor->ctx = ctx; - -#define _SET_NET_OP_AND_TENSOR(ctx, net_op_, this, sub_param, in_1, out_tensor) \ - ctx->net_ops.push_back(net_op_); \ - net_op_->in.push_back(this); \ - this->out.push_back(net_op_); \ - if (std::find(sub_param->net_tensors.begin(), sub_param->net_tensors.end(), this) == sub_param->net_tensors.end()) { \ - sub_param->net_tensors.push_back(this); \ - if (this->subgraph != nullptr) { \ - sub_param->net_inputs.insert(this); \ - } \ - } \ - net_op_->in.push_back(in_1); \ - in_1->out.push_back(net_op_); \ - if (std::find(sub_param->net_tensors.begin(), sub_param->net_tensors.end(), in_1) == sub_param->net_tensors.end()) { \ - sub_param->net_tensors.push_back(in_1); \ - if (in_1->subgraph != nullptr) { \ - sub_param->net_inputs.insert(in_1); \ - } \ - } \ - out_tensor->in = net_op_; \ - out_tensor->ctx = ctx; - -NetTensor *TNetTensor::clip(vector b, vector h, vector s, vector d) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_clip_") - net_op_->type = SUBDIM; - net_op_->param["type"] = SUBDIM; - if ((b.size() + h.size() + s.size() + d.size() == 2) && (b.size() * b.size() + h.size() * h.size() + s.size() * s.size() + d.size() * d.size() == 4)) { - if (b.size() == 2) { - net_op_->param["dim"] = (float)BATCH; - net_op_->param["start_i"] = (float)b[0]; - net_op_->param["end_i"] = (float)b[1]; - } else if (h.size() == 2) { - net_op_->param["dim"] = (float)HEAD; - net_op_->param["start_i"] = (float)h[0]; - net_op_->param["end_i"] = (float)h[1]; - } else if (s.size() == 2) { - net_op_->param["dim"] = (float)SEQUENCE; - net_op_->param["start_i"] = (float)s[0]; - net_op_->param["end_i"] = (float)s[1]; - } else if (d.size() == 2) { - net_op_->param["dim"] = (float)DIMENSION; - net_op_->param["start_i"] = (float)d[0]; - net_op_->param["end_i"] = (float)d[1]; - } else { - } - } else if ((b.size() + h.size() + s.size() + d.size() == 1) && (b.size() * b.size() + h.size() * h.size() + s.size() * s.size() + d.size() * d.size() == 1)) { - if (b.size() == 1) { - net_op_->param["dim"] = (float)BATCH; - net_op_->param["start_i"] = (float)b[0]; - net_op_->param["end_i"] = (float)(b[0] + 1); - } else if (h.size() == 1) { - net_op_->param["dim"] = (float)HEAD; - net_op_->param["start_i"] = (float)h[0]; - net_op_->param["end_i"] = (float)(h[0] + 1); - } else if (s.size() == 1) { - net_op_->param["dim"] = (float)SEQUENCE; - net_op_->param["start_i"] = (float)s[0]; - net_op_->param["end_i"] = (float)(s[0] + 1); - } else if (d.size() == 1) { - net_op_->param["dim"] = (float)DIMENSION; - net_op_->param["start_i"] = (float)d[0]; - net_op_->param["end_i"] = (float)(d[0] + 1); - } else { - } - } else { - std::cout << "ERROR: " << name << " clip" << std::endl; - } - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::_clip(intTensor_pair b, intTensor_pair h, intTensor_pair s, intTensor_pair d) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_clip_") - net_op_->type = SUBDIM; - net_op_->param["type"] = SUBDIM; - NetTensor *in_1; - if (b.end_i != nullptr) { - net_op_->param["dim"] = (float)BATCH; - net_op_->param["start_i"] = (float)b.start_i; - net_op_->param["end_i"] = (float)ANYDIM; - in_1 = b.end_i; - } else if (h.end_i != nullptr) { - net_op_->param["dim"] = (float)HEAD; - net_op_->param["start_i"] = (float)h.start_i; - net_op_->param["end_i"] = (float)ANYDIM; - in_1 = h.end_i; - } else if (s.end_i != nullptr) { - net_op_->param["dim"] = (float)SEQUENCE; - net_op_->param["start_i"] = (float)s.start_i; - net_op_->param["end_i"] = (float)ANYDIM; - in_1 = s.end_i; - } else if (d.end_i != nullptr) { - net_op_->param["dim"] = (float)DIMENSION; - net_op_->param["start_i"] = (float)d.start_i; - net_op_->param["end_i"] = (float)ANYDIM; - in_1 = d.end_i; - } else { - } - _SET_NET_OP_AND_TENSOR(ctx, net_op_, this, sub_param, in_1, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::_clip(Tensor_pair b, Tensor_pair h, Tensor_pair s, Tensor_pair d) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_clip_") - net_op_->type = SUBDIM; - net_op_->param["type"] = SUBDIM; - NetTensor *in_1; - net_op_->param["start_i"] = (float)ANYDIM; - net_op_->param["end_i"] = (float)ANYDIM; - if (b.end_i != nullptr) { - net_op_->param["dim"] = (float)BATCH; - in_1 = b.end_i; - } else if (h.end_i != nullptr) { - net_op_->param["dim"] = (float)HEAD; - in_1 = h.end_i; - } else if (s.end_i != nullptr) { - net_op_->param["dim"] = (float)SEQUENCE; - in_1 = s.end_i; - } else if (d.end_i != nullptr) { - net_op_->param["dim"] = (float)DIMENSION; - in_1 = d.end_i; - } else { - } - _SET_NET_OP_AND_TENSOR(ctx, net_op_, this, sub_param, in_1, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::shape(Chl axis) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_shape_") - net_op_->type = SHAPE; - net_op_->param["type"] = SHAPE; - net_op_->param["axis"] = axis; - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::mean(Chl axis) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_mean_") - net_op_->type = MEAN; - net_op_->param["type"] = MEAN; - net_op_->param["axis"] = axis; - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::where(float data) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_where_") - net_op_->type = WHERE; - net_op_->param["type"] = WHERE; - net_op_->param["data"] = data; - net_op_->param["axis"] = -1; - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::where(float data, Chl axis) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_where_") - net_op_->type = WHERE; - net_op_->param["type"] = WHERE; - net_op_->param["data"] = data; - net_op_->param["axis"] = (float)axis; - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::view(int b, int h, int s, int d) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_view_") - - net_op_->type = VIEW; - net_op_->param["type"] = VIEW; - - vector dims; - vector data_dims; - if (b == -1 & s == -1 & h != -1 & d != -1) { // keep b&s change h&d - if (h != 1) { - dims = {b, h, s, -1}; - data_dims = {BATCH, DIMENSION, SEQUENCE, DIMENSION}; - } else { - dims = {b, -1, s, -1}; - data_dims = {BATCH, -1, SEQUENCE, HEAD + DIMENSION}; - } - } else if (b == -1 & d == -1 & h != -1 & s != -1) { // keep b&d change h&s - if (h != 1) { - dims = {b, h, -1, d}; - data_dims = {BATCH, SEQUENCE, SEQUENCE, DIMENSION}; - } else { - dims = {b, -1, -1, d}; - data_dims = {BATCH, -1, HEAD + SEQUENCE, DIMENSION}; - } - } else if (h == -1 & d == -1 & b != -1 & s != -1) { // keep h&d change b&s - if (s != 1) { - dims = {-1, h, s, d}; - data_dims = {BATCH, HEAD, BATCH, DIMENSION}; - } else { - dims = {-1, h, -1, d}; - data_dims = {BATCH + SEQUENCE, HEAD, -1, DIMENSION}; - } - } else if (b != -1 & d != -1 & h != -1 & s != -1) { // change all dimension. - - dims = {b, h, s, d}; - data_dims = {BATCH, HEAD, SEQUENCE, DIMENSION}; - - } else { - std::cout << "ERROR: " << name << " view [" << b << ", " << h << ", " << s << ", " << d << "]" << std::endl; - } - net_op_->param["dim0"] = dims[0]; - net_op_->param["dim1"] = dims[1]; - net_op_->param["dim2"] = dims[2]; - net_op_->param["dim3"] = dims[3]; - net_op_->param["data_dim0"] = data_dims[0]; - net_op_->param["data_dim1"] = data_dims[1]; - net_op_->param["data_dim2"] = data_dims[2]; - net_op_->param["data_dim3"] = data_dims[3]; - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - out_tensor->type = this->type; - return out_tensor; -} - -NetTensor *TNetTensor::flatten(Chl axis_start, Chl axis_end) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_flatten_") - net_op_->type = VIEW; - net_op_->param["type"] = VIEW; - vector dims = {-1, -1, -1, -1}; - vector data_dims; - if (axis_start == BATCH & axis_end == SEQUENCE) { - data_dims = {-1, HEAD, BATCH + SEQUENCE, DIMENSION}; - } else if (axis_start == HEAD & axis_end == SEQUENCE) { - data_dims = {BATCH, -1, HEAD + SEQUENCE, DIMENSION}; - } else if (axis_start == HEAD & axis_end == DIMENSION) { - data_dims = {BATCH, HEAD, -1, SEQUENCE + DIMENSION}; - } else if (axis_start == TIME & axis_end == WIDTH) { - data_dims = {BATCH, -1, TIME + HEIGHT + WIDTH, CHANNLE}; - } else { - std::cout << "ERROR: " << name << " flatten " << axis_start << "&" << axis_end << std::endl; - } - net_op_->param["dim0"] = dims[0]; - net_op_->param["dim1"] = dims[1]; - net_op_->param["dim2"] = dims[2]; - net_op_->param["dim3"] = dims[3]; - net_op_->param["data_dim0"] = data_dims[0]; - net_op_->param["data_dim1"] = data_dims[1]; - net_op_->param["data_dim2"] = data_dims[2]; - net_op_->param["data_dim3"] = data_dims[3]; - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::transpose(Chl axis1, Chl axis2) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_transpose_") - if (axis1 == SEQUENCE & axis2 == DIMENSION) { - net_op_->type = TRANSPOSE; - net_op_->param["type"] = TRANSPOSE; - net_op_->param["axis0"] = axis1; - net_op_->param["axis1"] = axis2; - } else if (axis1 == THW & axis2 == CHANNLE) { - net_op_->type = TRANSPOSE; - net_op_->param["type"] = TRANSPOSE; - net_op_->param["axis0"] = axis1; - net_op_->param["axis1"] = axis2; - } else if (axis1 == BATCH & axis2 == SEQUENCE) { - net_op_->type = TRANSPOSE; - net_op_->param["type"] = TRANSPOSE; - net_op_->param["axis0"] = axis1; - net_op_->param["axis1"] = axis2; - } else { - net_op_->type = VIEW; - net_op_->param["type"] = VIEW; - vector dims; - vector data_dims; - if (axis1 == BATCH & axis2 == SEQUENCE) { - dims = {-1, -1, -1, -1}; - data_dims = {SEQUENCE, HEAD, BATCH, DIMENSION}; - } else if (axis1 == HEAD & axis2 == SEQUENCE) { - dims = {-1, -1, -1, -1}; - data_dims = {BATCH, SEQUENCE, HEAD, DIMENSION}; - } else if (axis1 == HEAD & axis2 == DIMENSION) { - dims = {-1, -1, -1, -1}; - data_dims = {BATCH, SEQUENCE, DIMENSION, HEAD}; - } else { - std::cout << "ERROR: " << name << " transpose " << axis1 << "&" << axis2 << std::endl; - } - net_op_->param["dim0"] = dims[0]; - net_op_->param["dim1"] = dims[1]; - net_op_->param["dim2"] = dims[2]; - net_op_->param["dim3"] = dims[3]; - net_op_->param["data_dim0"] = data_dims[0]; - net_op_->param["data_dim1"] = data_dims[1]; - net_op_->param["data_dim2"] = data_dims[2]; - net_op_->param["data_dim3"] = data_dims[3]; - } - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::norm(int L_n) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_norm_") - net_op_->type = NORM; - net_op_->param["type"] = NORM; - net_op_->param["L_n"] = (float)L_n; - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::operator+(NetTensor *in_1) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_add_") - net_op_->type = ADD; - net_op_->param["type"] = ADD; - _SET_NET_OP_AND_TENSOR(ctx, net_op_, this, sub_param, in_1, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::operator*(NetTensor *in_1) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_mul_") - net_op_->type = MUL; - net_op_->param["type"] = MUL; - _SET_NET_OP_AND_TENSOR(ctx, net_op_, this, sub_param, in_1, out_tensor) - return out_tensor; -} -NetTensor *TNetTensor::operator*(float muln) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_div1_") - net_op_->type = SCALE; - net_op_->param["type"] = SCALE; - net_op_->param["scale"] = muln; - net_op_->param["bias"] = 0.0F; - net_op_->param["bias_after_scale"] = (float)false; - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::operator/(NetTensor *in_1) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_div_") - net_op_->type = DIVISION; - net_op_->param["type"] = DIVISION; - _SET_NET_OP_AND_TENSOR(ctx, net_op_, this, sub_param, in_1, out_tensor) - return out_tensor; -} - -NetTensor *TNetTensor::operator/(float devr) { - Context *ctx = this->ctx; - NetTensor *out_tensor = new NetTensor(); - _SET_OUT_TENSOR_NAME(out_tensor, name, net_op_, "_div1_") - net_op_->type = SCALE; - net_op_->param["type"] = SCALE; - net_op_->param["scale"] = 1 / devr; - net_op_->param["bias"] = 0.0F; - net_op_->param["bias_after_scale"] = (float)false; - _SET_NET_OP(ctx, net_op_, this, sub_param, out_tensor) - return out_tensor; -} - -} // namespace mllm \ No newline at end of file diff --git a/src/express/ExpressBase.hpp b/src/express/ExpressBase.hpp deleted file mode 100644 index ef70efd0b..000000000 --- a/src/express/ExpressBase.hpp +++ /dev/null @@ -1,113 +0,0 @@ - -#ifndef MLLM_NETPARAMETER_H -#define MLLM_NETPARAMETER_H - -#include "Types.hpp" -#include -#include -#include -#include -#include -using std::string; -using std::vector; -using std::map; - -namespace mllm { - -typedef struct TNetTensor NetTensor; -typedef struct TNetParameter NetParameter; - -typedef struct TNetOp { - OpType type; - vector in; - vector out; - vector in_op; // input ops' names; - string name; - OpParam param; - int out_size = 1; // output tensor size - -} NetOp; - -typedef struct TNetParameter { - string weights_path; - string model_path; - vector net_ops; - vector net_tensors; - std::set net_inputs; - std::set net_outputs; - void topologySort(); - -} NetParameter; - -// 前置声明 -struct Context { - vector sub_param_; - vector sub_backend_; - BackendType next_backend = MLLM_CPU; - vector net_ops; - std::set net_tensors; - int idx = 0; - int active_sub = 0; -}; -inline NetParameter *get_active_subgraph(Context *ctx) { - if (ctx->active_sub >= ctx->sub_param_.size()) { - ctx->sub_param_.emplace_back(); - ctx->sub_backend_.emplace_back(ctx->next_backend); - } - return &ctx->sub_param_[ctx->active_sub]; -} - -struct intTensor_pair { - int start_i; - NetTensor *end_i; -}; -struct Tensor_pair { - NetTensor *end_i; -}; - -typedef struct TNetTensor { - string name; - vector shape_; - DataType type; - NetOp *in; - vector out; - NetParameter *subgraph; - Context *ctx; - - /** - * \brief Clips the tensor along the specified dimensions. - * \param b A vector specifying the start and end indices for the batch dimension. - * \param h A vector specifying the start and end indices for the head dimension. - * \param s A vector specifying the start and end indices for the sequence dimension. - * \param d A vector specifying the start and end indices for the dimension dimension. - * \return A pointer to the resulting tensor after clipping. - * - * e.g. clip({}, {}, {0, 1}, {}) will clip the tensor along the sequence dimension from 0 to 2; - * clip({}, {}, {0}, {}) will clip the tensor along the sequence dimension from 0 to 1; - * clip({}, {}, {-1}, {}) will clip the tensor along the sequence dimension from dim_len-1 to dim_len; - * _clip({}, {}, {0, i->shape(SEQUENCE)}, {}); will clip the tensor along the sequence dimension from 0 to i->shape(SEQUENCE); - * _clip({}, {}, {in_len}, {}); will clip the tensor along the sequence dimension from in_len to in_len+1; - */ - NetTensor *clip(vector b, vector h, vector s, vector d); - NetTensor *_clip(intTensor_pair b, intTensor_pair h, intTensor_pair s, intTensor_pair d); - NetTensor *_clip(Tensor_pair b, Tensor_pair h, Tensor_pair s, Tensor_pair d); - NetTensor *shape(Chl axis); - NetTensor *view(int b, int h, int s, int d); - NetTensor *flatten(Chl axis_start, Chl axis_end); - NetTensor *transpose(Chl axis1, Chl axis2); - NetTensor *norm(int L_n); - NetTensor *mean(Chl axis); - NetTensor *where(float data); - NetTensor *where(float data, Chl axis); - - /* Overload the operators.*/ - NetTensor *operator+(NetTensor *in_1); - NetTensor *operator*(NetTensor *in_1); - NetTensor *operator*(float muln); - NetTensor *operator/(NetTensor *in_1); - NetTensor *operator/(float devr); -} NetTensor; - -} // namespace mllm - -#endif // MLLM_NETPARAMETER_H \ No newline at end of file diff --git a/src/models/qwen2_vl/configuration_qwen2_vl.hpp b/src/models/qwen2_vl/configuration_qwen2_vl.hpp deleted file mode 100644 index d7cce5ade..000000000 --- a/src/models/qwen2_vl/configuration_qwen2_vl.hpp +++ /dev/null @@ -1,61 +0,0 @@ -// -// Created by Rongjie Yi on 25-2-9. -// -#ifndef CONFIG_PHI3V_HPP -#define CONFIG_PHI3V_HPP -#include "models/qwen/configuration_qwen.hpp" -#include "models/vit/configuration_vit.hpp" -// #include - -using namespace mllm; - -class Qwen2VLNameConfig : public ViTNameConfig { - public: - // string token_embd_name = "model.embed_tokens"; - string patch_embed_name = ".patch_embed"; // - string _merger_name = ".merger"; // - string _ln_q_name = ".ln_q"; // - string _m_mlp_0_name = ".mlp.0"; // - string _m_mlp_2_name = ".mlp.2"; // - void init_qwen2vl() { - vison_model_name = "visual"; // - _patch_embedding_name = ".proj"; // - _layer_name = ".blocks."; // - _attn_base_name = "attn."; // - _ffn_base_name = "mlp."; // - _qkv_proj_name = "qkv"; // - _o_proj_name = "proj"; // - _up_proj_name = "fc1"; // - _down_proj_name = "fc2"; // - _attn_norm_name = "norm1"; // - _ffn_norm_name = "norm2"; // - } -}; - -class Qwen2VLConfig : public QWenConfig { -public: - int vision_embed_dim; - int spatial_merge_size= 2; - string projection_cls; - - int bos_token_id = 151643; - int eos_token_id = 151645; - int vision_start_token_id = 151652; - int vision_end_token_id = 151653; - int vision_token_id = 151654; - int image_token_id = 151655; - int video_token_id = 151656; - vector mrope_section = {16, 24, 24}; - - Qwen2VLNameConfig vision_names_config; - Qwen2VLConfig(int token_limit, string billions = "1.5b", RoPEType type = HFHUBROPE, int vocab = 32064, string project_cls = "MLP") : - QWenConfig(token_limit, billions, type) { - // names_config.init(type); - projection_cls = project_cls; - hidden_size = 1536; - vision_embed_dim = 1280; - vision_names_config.init_qwen2vl(); - } -}; - -#endif // CONFIG_PHI3V_HPP diff --git a/src/models/transformer/modeling_transformer.hpp b/src/models/transformer/modeling_transformer.hpp deleted file mode 100644 index dbc601ce2..000000000 --- a/src/models/transformer/modeling_transformer.hpp +++ /dev/null @@ -1,145 +0,0 @@ -// -// Created by Rongjie Yi on 24-2-29. -// - -#ifndef MODELING_TRANSFORMER_HPP -#define MODELING_TRANSFORMER_HPP - -#include "Layer.hpp" -#include "Types.hpp" -#include "configuration_transformer.hpp" -#include - -using namespace mllm; - -class MultiHeadAttention final : public Module { - Layer qkv_proj; - Layer q_proj; - Layer k_proj; - Layer v_proj; - RoPE q_rope; - RoPE k_rope; - Layer q_norm; - Layer k_norm; - KVCache k_cache; - KVCache v_cache; - Softmax softmax; - Layer o_proj; - Parameter bias_k; - Parameter bias_v; - int head_size_{}; - int kv_head_size_{}; - int attn_hidden_dim_{}; - Chl split_chl_{}; - -public: - MultiHeadAttention() = default; - MultiHeadAttention(int hidden_dim, int head_size, int kv_head_size, int attn_hidden_dim, - AttnQKVSplitType do_qkv_proj, bool post_qkv_norm, bool bias_kv_cat, - RoPEType RoPE_type, float rope_theta, int max_position_embeddings, - int cache_limit, bool do_mask, bool bias, - const TransformerNameConfig &names, const string &base_name) { - attn_hidden_dim_ = attn_hidden_dim; - head_size_ = head_size; - kv_head_size_ = kv_head_size; - if (do_qkv_proj > 0) { - qkv_proj = Linear(hidden_dim, head_size * attn_hidden_dim * 3, bias, base_name + names._qkv_proj_name); - split_chl_ = (Chl)do_qkv_proj; - } else { - q_proj = Linear(hidden_dim, head_size * attn_hidden_dim, bias, base_name + names._q_proj_name); - k_proj = Linear(hidden_dim, kv_head_size * attn_hidden_dim, bias, base_name + names._k_proj_name); - v_proj = Linear(hidden_dim, kv_head_size * attn_hidden_dim, bias, base_name + names._v_proj_name); - } - if (post_qkv_norm) { - q_norm = LayerNorm(attn_hidden_dim, true, 1e-6, base_name + names._q_norm_name); - k_norm = LayerNorm(attn_hidden_dim, true, 1e-6, base_name + names._k_norm_name); - } - if (RoPE_type > 0) { - q_rope = RoPE(RoPE_type, rope_theta, max_position_embeddings, base_name + "q_rope"); - k_rope = RoPE(RoPE_type, rope_theta, max_position_embeddings, base_name + "k_rope"); - } - if (cache_limit > 0) { - k_cache = KVCache(kv_head_size, attn_hidden_dim, head_size / kv_head_size, cache_limit, base_name + "k_cache"); - v_cache = KVCache(kv_head_size, attn_hidden_dim, head_size / kv_head_size, cache_limit, base_name + "v_cache"); - } - softmax = Softmax(DIMENSION, do_mask, base_name + "softmax"); - o_proj = Linear(head_size * attn_hidden_dim, hidden_dim, bias, base_name + names._o_proj_name); - if (bias_kv_cat) { - bias_k = Parameter(1, 1, head_size, attn_hidden_dim, base_name + "bias_k"); - bias_v = Parameter(1, 1, head_size, attn_hidden_dim, base_name + "bias_v"); - } - } - vector Forward(vector inputs, vector args) override { - Tensor q, k, v; - if (qkv_proj.ready()) { - auto qkv = qkv_proj(inputs[0]); - auto qkv_sp = qkv.split({attn_hidden_dim_, attn_hidden_dim_, attn_hidden_dim_}, split_chl_, head_size_); - q = qkv_sp[0]; - k = qkv_sp[1]; - v = qkv_sp[2]; - } else { - q = q_proj(inputs[0]); - k = k_proj(inputs[1]); - v = v_proj(inputs[2]); - q = q.view(-1, head_size_, -1, attn_hidden_dim_); - k = k.view(-1, kv_head_size_, -1, attn_hidden_dim_); - v = v.view(-1, kv_head_size_, -1, attn_hidden_dim_); - } - if (q_norm.ready() && k_norm.ready()) { - q = q_norm(q); - k = k_norm(k); - } - if (bias_k.ready() && bias_v.ready()) { - k = Tensor::cat({k, bias_k()}, SEQUENCE); - v = Tensor::cat({v, bias_v()}, SEQUENCE); - } - if (q_rope.ready() && k_rope.ready()) { - q = q_rope(q); - k = k_rope(k); - } - if (k_cache.ready() && v_cache.ready()) { - k = k_cache(k); - v = v_cache(v); - } - k = k.transpose(SEQUENCE, DIMENSION); - auto qk = Tensor::mm(q, k); - qk = qk / std::sqrt(attn_hidden_dim_); - if (k_cache.ready() && v_cache.ready()) { - qk = softmax(qk, k_cache.getCacheSeqLen()); - } else { - qk = softmax(qk); - } - auto o = Tensor::mm(qk, v); - o = o.view(-1, 1, -1, attn_hidden_dim_ * head_size_); - o = o_proj(o); - return {o}; - } - vector get_cache() { - return {&k_cache, &v_cache}; - } - vector get_rope() { - return {&q_rope, &k_rope}; - } -}; - -class FeedForward final : public Module { - Layer up_proj; - Layer act; - Layer down_proj; - -public: - FeedForward() = default; - FeedForward(int hidden_dim, int ffn_hidden, const string &act_fn_type, bool bias, const TransformerNameConfig &names, const string &base_name) { - up_proj = Linear(hidden_dim, ffn_hidden, bias, base_name + names._up_proj_name); - act = ACT_FN[act_fn_type](base_name + "act"); - down_proj = Linear(ffn_hidden, hidden_dim, bias, base_name + names._down_proj_name); - } - vector Forward(vector inputs, vector args) override { - auto x = up_proj(inputs[0]); - x = act(x); - x = down_proj(x); - return {x}; - } -}; - -#endif // MODELING_TRANSFORMER_HPP diff --git a/src/quantizer/ParamWriter.cpp b/src/quantizer/ParamWriter.cpp deleted file mode 100644 index f1cf39dec..000000000 --- a/src/quantizer/ParamWriter.cpp +++ /dev/null @@ -1,64 +0,0 @@ -// -// Created by Xiang Li on 23-10-30. -// - -#include "ParamWriter.hpp" -#include - -ParamWriter::ParamWriter(std::string filename) : - path_(std::move(filename)) { - fp_ = fopen(path_.c_str(), "wb"); - writeInt(fp_, _MAGIC_NUMBER); -} -ParamWriter::~ParamWriter() { - if (fp_ != nullptr) - fclose(fp_); -} -int ParamWriter::calcIndexSize(const vector names) { - int size = 0; - for (const auto &name : names) { - // One Tensor Index Item Contains: Name_Len(Int)+Name(str)+Weights_Len(UInt64)+Offset(UInt64)+DataType(Int) - size += sizeof(int) + name.size() + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(int); - } - return size; -} -void ParamWriter::writeIndex() { - fseek(fp_, sizeof(int32_t) + sizeof(uint64_t), SEEK_SET); - for (const auto ¶m : param_info_) { - writeString(fp_, param.name); - write_u64(fp_, param.size); - write_u64(fp_, param.offset); - writeInt(fp_, param.type); - std::cout<<"write param "< names) { - param_info_.resize(names.size()); - // write 0 padding to preserve space for index - int index_size = calcIndexSize(names); - write_u64(fp_, index_size); - char i[index_size]; - fwrite(&i, sizeof(char), index_size, fp_); -} diff --git a/src/quantizer/QuantWriter.cpp b/src/quantizer/QuantWriter.cpp deleted file mode 100644 index 4a3df85c1..000000000 --- a/src/quantizer/QuantWriter.cpp +++ /dev/null @@ -1,439 +0,0 @@ -#include "ParamWriter.hpp" -#include "ParamLoader.hpp" -#include "Types.hpp" -#include -#include "QuantWriter.hpp" -#include "backends/cpu/compute/GEMM_AArch64.hpp" -namespace mllm { -QuantWriter::QuantWriter(std::string output_path, std::string input_path) : - ParamWriter(output_path), output_path_(output_path) { - param_loader_ = new mllm::ParamLoader(std::move(input_path)); - if (param_loader_ == nullptr) { - __exit(-1); - } -} -QuantWriter::~QuantWriter() { -#ifdef TEST - for (auto &item : data_) { - delete[] item.second; - } -#endif -}; -int QuantWriter::readParams() { - param_names_ = param_loader_->getParamNames(); - paddingIndex(param_names_); - return param_names_.size(); -} -float *QuantWriter::getParam(std::string param_name) { - auto type = param_loader_->data_type_[param_name]; - if (type != DataType::MLLM_TYPE_F32) { - return nullptr; - } - auto [data, size] = param_loader_->load(param_name); - return static_cast((void *)data); -} -bool find_names(const string &name, const vector &layer_names) { - if ("vision_embed_tokens" == name) { // for FUYU - return true; - } - for (const auto &layer : layer_names) { - if (name.find(layer) != std::string::npos) { - return true; - } - } - return false; -} - -vector fp32_layers = { - "norm", - "rope", - "bias", - "rotary_emb", - "embed_tokens", - "_GN", - "class_embedding", - // "vision_embed_tokens", - "embeddings", - "logit_scale", - //"tok_embeddings", - "modality_preprocessors", - "modality_heads", - "modality_postprocessors", - "pre_transformer_layer", - "pos_embed.inv_freq", - "ln_q", - "patch_embed.proj", - "lm_head.weight", - // MoE - "mlp.gate.", -}; -vector q6_layers = { - "w2", - "wv", - "dense_h_to_4h", - "v_proj", - "down_proj", -}; -vector q2k_layers = { - // "mlp.experts", -}; - -vector q3k_layers = { - // "mlp.experts", -}; - -int tmp_hidden_dim = -1; -void QuantWriter::quantParams(DataType dataType) { - quant_type_ = dataType; - for (const auto &name : param_names_) { - // int force_quant_type = -1; - auto *param = getParam(name); - if (param == nullptr) { - __exit(-1); - } - auto size = param_loader_->offsets_[name].second / sizeof(float); - if (find_names(name, {"input_layernorm"})) { - tmp_hidden_dim = size; - } - void *quant_ptr = nullptr; - std::pair block_t; - if (find_names(name, q6_layers) && (dataType == MLLM_TYPE_Q6_K || dataType == MLLM_TYPE_Q4_K || dataType == MLLM_TYPE_Q2_K || dataType == MLLM_TYPE_Q3_K)) { - if (tmp_hidden_dim > 0 && (size / tmp_hidden_dim) % 256 != 0) { - std::cout << "Quantize param " << name << " to " << DataTypeName(MLLM_TYPE_Q4_0) << "\t"; - block_t = alloc_quant_block(size, MLLM_TYPE_Q4_0); - quant_ptr = block_t.first; - quantize_row_q4_0(param, quant_ptr, size); - size = block_t.second; - if (quant_ptr != nullptr) { - writeParam(name, MLLM_TYPE_Q4_0, quant_ptr, size); - std::cout << " size:" << size << " type:" << DataTypeName(MLLM_TYPE_Q4_0) << std::endl; - } - - continue; - } - } - if (find_names(name, fp32_layers)) { - std::cout << "Quantize param " << name << " to " << DataTypeName(MLLM_TYPE_F32) << "\t"; - const auto s = param_loader_->offsets_[name].second / sizeof(float); - const auto tsize = alloc_quant_block(s, MLLM_TYPE_F32).second; - writeParam(name, MLLM_TYPE_F32, param, tsize); - std::cout << " size:" << tsize << std::endl; - - } else if (find_names(name, q6_layers)) { - switch (dataType) { - case MLLM_TYPE_F32: - std::cout << "No need to quantize FP32 params\n"; - __exit(-1); - break; - case MLLM_TYPE_Q4_0: - std::cout << "Quantize param " << name << " to " << DataTypeName(dataType) << "\t"; - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q4_0(param, quant_ptr, size); - size = block_t.second; - break; - case MLLM_TYPE_Q4_K: - case MLLM_TYPE_Q6_K: - std::cout << "Quantize param " << name << " to " << DataTypeName(MLLM_TYPE_Q6_K) << "\t"; - block_t = alloc_quant_block(size, MLLM_TYPE_Q6_K); - quant_ptr = block_t.first; - quantize_row_q6_K(param, quant_ptr, size); - size = block_t.second; - break; - case MLLM_TYPE_Q8_0: - std::cout << "Quantize param " << name << " to " << DataTypeName(dataType) << "\t"; - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q8_0(param, quant_ptr, size); - size = block_t.second; - break; - case MLLM_TYPE_Q8_K: - std::cout << "Quantize param " << name << " to " << DataTypeName(dataType) << "\t"; - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q8_K(param, quant_ptr, size); - size = block_t.second; - break; - case MLLM_TYPE_Q4_0_4_4: - std::cout << "Quantize param " << name << " to " << DataTypeName(dataType) << "\t"; - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q4_0_4x4(param, quant_ptr, size); - size = block_t.second; - break; - default: - break; - } - if (quant_ptr != nullptr) { - if ((dataType == MLLM_TYPE_Q4_K) | (dataType == MLLM_TYPE_Q6_K)) { - writeParam(name, MLLM_TYPE_Q6_K, quant_ptr, size); - std::cout << " size:" << size << " type:" << DataTypeName(MLLM_TYPE_Q6_K) << std::endl; - } else { - writeParam(name, quant_type_, quant_ptr, size); - std::cout << " size:" << size << " type:" << DataTypeName(quant_type_) << std::endl; - } - } - } else if (find_names(name, q3k_layers)) { - std::cout << "Quantize param " << name << " to " << DataTypeName(MLLM_TYPE_Q3_K) << "\t"; - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q3_K(param, quant_ptr, size); - size = block_t.second; - if (quant_ptr != nullptr) { - writeParam(name, MLLM_TYPE_Q3_K, quant_ptr, size); - std::cout << " size:" << size << " type:" << DataTypeName(MLLM_TYPE_Q3_K) << std::endl; - } - } else if (find_names(name, q2k_layers)) { - std::cout << "Quantize param " << name << " to " << DataTypeName(MLLM_TYPE_Q2_K) << "\t"; - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q2_K(param, quant_ptr, size); - size = block_t.second; - if (quant_ptr != nullptr) { - writeParam(name, MLLM_TYPE_Q2_K, quant_ptr, size); - std::cout << " size:" << size << " type:" << DataTypeName(MLLM_TYPE_Q2_K) << std::endl; - } - } else { - std::cout << "Quantize param " << name << " to " << DataTypeName(dataType) << "\t"; - switch (dataType) { - case MLLM_TYPE_F32: - std::cout << "No need to quantize FP32 params\n"; - __exit(-1); - break; - case MLLM_TYPE_Q4_0: - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q4_0(param, quant_ptr, size); - size = block_t.second; - break; - case MLLM_TYPE_Q8_0: - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q8_0(param, quant_ptr, size); - size = block_t.second; - break; - case MLLM_TYPE_Q4_K: - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q4_K(param, quant_ptr, size); - size = block_t.second; - break; - case MLLM_TYPE_Q6_K: - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q6_K(param, quant_ptr, size); - size = block_t.second; - break; - case MLLM_TYPE_Q8_K: - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q8_K(param, quant_ptr, size); - size = block_t.second; - break; - case MLLM_TYPE_Q4_0_4_4: - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - quantize_row_q4_0_4x4(param, quant_ptr, size); - size = block_t.second; - break; - case MLLM_TYPE_I8: - case MLLM_TYPE_Q4_1: - case MLLM_TYPE_Q8_1: - case MLLM_TYPE_I16: - case MLLM_TYPE_I32: - case MLLM_TYPE_F16: - NOT_IMPLEMENTED(dataType); - break; - case MLLM_TYPE_COUNT: - UNREACHABLE() - break; - default: - break; - } - if (quant_ptr != nullptr) { - writeParam(name, quant_type_, quant_ptr, size); - std::cout << " size:" << size << std::endl; - } - // writeParam(name, quant_type_, quant_ptr, size); -#ifndef TEST - delete[] (char *)quant_ptr; -#endif - } - } - writeIndex(); -} -vector q4x4_2_q4_layers = { - "wv", - "v_proj", - "qkv_proj", - // dclm - "in_proj", - "w12", - "model.output", -}; -bool dclm_flag = false; -void QuantWriter::quantParams_q4_(DataType dataType) { - bool do_quantParams_q4_vl = false; - for (const auto &name : param_names_) { - auto size = param_loader_->offsets_[name].second / sizeof(float); - if (find_names(name, {"visual"})) { - do_quantParams_q4_vl = true; - } - } - if (do_quantParams_q4_vl) { - return quantParams_q4_vl(dataType); - } - - for (const auto &name : param_names_) { - auto size = param_loader_->offsets_[name].second / sizeof(float); - if (find_names(name, {"norm"})) { - tmp_hidden_dim = size; - } - if (find_names(name, {"w12"})) { - dclm_flag = true; - } - } - quant_type_ = dataType; - for (const auto &name : param_names_) { - auto *param = getParam(name); - if (param == nullptr) { - __exit(-1); - } - auto size = param_loader_->offsets_[name].second / sizeof(float); - if (find_names(name, {"norm"})) { - tmp_hidden_dim = size; - } - void *quant_ptr = nullptr; - std::pair block_t; - if (find_names(name, fp32_layers)) { - std::cout << "Quantize param " << name << " to " << DataTypeName(MLLM_TYPE_F32) << "\t"; - const auto s = param_loader_->offsets_[name].second / sizeof(float); - const auto tsize = alloc_quant_block(s, MLLM_TYPE_F32).second; - writeParam(name, MLLM_TYPE_F32, param, tsize); - std::cout << " size:" << tsize << std::endl; - } else if (find_names(name, q4x4_2_q4_layers)) { - std::cout << "Quantize param " << name << " to " << DataTypeName(MLLM_TYPE_Q4_0) << "\t"; - block_t = alloc_quant_block(size, MLLM_TYPE_Q4_0); - quant_ptr = block_t.first; - quantize_row_q4_0(param, quant_ptr, size); - size = block_t.second; - if (quant_ptr != nullptr) { - writeParam(name, MLLM_TYPE_Q4_0, quant_ptr, size); - std::cout << " size:" << size << " type:" << DataTypeName(MLLM_TYPE_Q4_0) << std::endl; - } - } else { - std::cout << "Quantize param " << name << " to " << DataTypeName(dataType) << "\t"; - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - int tmp_hidden_dim_q4 = tmp_hidden_dim; - if (find_names(name, {"w2", "down_proj"}) || (dclm_flag && find_names(name, {"w3"}))) { - tmp_hidden_dim_q4 = (size / tmp_hidden_dim); - } - quantize_row_q4_0_4x4(param, quant_ptr, size, tmp_hidden_dim_q4); - size = block_t.second; - if (quant_ptr != nullptr) { - writeParam(name, quant_type_, quant_ptr, size); - std::cout << " size:" << size << std::endl; - } -#ifndef TEST - delete[] (char *)quant_ptr; -#endif - } - } - writeIndex(); -} - -vector vl_q4x4_2_q4_layers = { - "wv", - "v_proj", - ".attn.qkv", - "in_proj", - "w12", - "model.output", - // "embed_tokens", - "mlp.0", - "mlp.2", - - // "visual", - // "q_proj", - // "model", -}; -int vit_tmp_hidden_dim = -1; -void QuantWriter::quantParams_q4_vl(DataType dataType) { - for (const auto &name : param_names_) { - auto size = param_loader_->offsets_[name].second / sizeof(float); - if (find_names(name, {"input_layernorm"}) && find_names(name, {"model"})) { - tmp_hidden_dim = size; - } - if (find_names(name, {"norm"}) && find_names(name, {"visual"})) { - vit_tmp_hidden_dim = size; - } - } - std::cout << "tmp_hidden_dim:" << tmp_hidden_dim << std::endl; - std::cout << "vit_tmp_hidden_dim:" << vit_tmp_hidden_dim << std::endl; - quant_type_ = dataType; - for (const auto &name : param_names_) { - auto *param = getParam(name); - if (param == nullptr) { - __exit(-1); - } - auto size = param_loader_->offsets_[name].second / sizeof(float); - // if (find_names(name, {"input_layernorm"})) { - // tmp_hidden_dim = size; - // } - void *quant_ptr = nullptr; - std::pair block_t; - if (find_names(name, fp32_layers)) { - std::cout << "Quantize param " << name << " to " << DataTypeName(MLLM_TYPE_F32) << "\t"; - const auto s = param_loader_->offsets_[name].second / sizeof(float); - const auto tsize = alloc_quant_block(s, MLLM_TYPE_F32).second; - writeParam(name, MLLM_TYPE_F32, param, tsize); - std::cout << " size:" << tsize << std::endl; - } else if (find_names(name, vl_q4x4_2_q4_layers)) { - std::cout << "Quantize param " << name << " to " << DataTypeName(MLLM_TYPE_Q4_0) << "\t"; - block_t = alloc_quant_block(size, MLLM_TYPE_Q4_0); - quant_ptr = block_t.first; - quantize_row_q4_0(param, quant_ptr, size); - size = block_t.second; - if (quant_ptr != nullptr) { - writeParam(name, MLLM_TYPE_Q4_0, quant_ptr, size); - std::cout << " size:" << size << " type:" << DataTypeName(MLLM_TYPE_Q4_0) << std::endl; - } - } else { - std::cout << "Quantize param " << name << " to " << DataTypeName(dataType) << "\t"; - block_t = alloc_quant_block(size, dataType); - quant_ptr = block_t.first; - if (find_names(name, {"visual"})) { - int tmp_hidden_dim_q4 = vit_tmp_hidden_dim; - if (find_names(name, {"fc2", "down_proj"})) { - tmp_hidden_dim_q4 = (size / vit_tmp_hidden_dim); - } - quantize_row_q4_0_4x4(param, quant_ptr, size, tmp_hidden_dim_q4); - } else { - int tmp_hidden_dim_q4 = tmp_hidden_dim; - if (find_names(name, {"w2", "down_proj"})) { - tmp_hidden_dim_q4 = (size / tmp_hidden_dim); - } - quantize_row_q4_0_4x4(param, quant_ptr, size, tmp_hidden_dim_q4); - } - size = block_t.second; - if (quant_ptr != nullptr) { - writeParam(name, quant_type_, quant_ptr, size); - std::cout << " size:" << size << std::endl; - } -#ifndef TEST - delete[] (char *)quant_ptr; -#endif - } - } - writeIndex(); -} - -void QuantWriter::writeParam(string name, DataType type, void *data, uint64_t size) { -#ifdef TEST - data_[name] = (char *)data; -#endif - ParamWriter::writeParam(name, type, data, size); -} - -} // namespace mllm \ No newline at end of file diff --git a/src/quantizer/QuantWriter.hpp b/src/quantizer/QuantWriter.hpp deleted file mode 100644 index 0939420d2..000000000 --- a/src/quantizer/QuantWriter.hpp +++ /dev/null @@ -1,57 +0,0 @@ -#include "ParamWriter.hpp" -#include "ParamLoader.hpp" -#include "backends/cpu/quantize/QuantizeQ6.hpp" -#include "backends/cpu/quantize/QuantizeQ2.hpp" -#include "backends/cpu/quantize/QuantizeQ3.hpp" -#include "backends/cpu/quantize/QuantizeQ4.hpp" -#include "backends/cpu/quantize/QuantizeQ8.hpp" -#include -#include -#ifndef MLLM_QUANTWRITER_HPP -#define MLLM_QUANTWRITER_HPP -#define NOT_IMPLEMENTED(x) \ - std::cout << "Quantize params to " << DataTypeName(x) << " is not implemented\n"; \ - __exit(-1); -#define UNREACHABLE() \ - std::cout << "Unreachable code\n"; \ - __exit(-1); -#define __exit(status) \ - { \ - if (status != 0) { \ - std::cout << "Quantize failed\n"; \ - remove(output_path_.c_str()); \ - } \ - exit(status); \ - } -static std::pair alloc_quant_block(uint64_t count, DataType type) { - uint64_t size = DataTypeSize(type, count); - if (size <= 0) { - return std::make_pair(nullptr, 0); - } - void *data = new char[size]; - return std::make_pair(data, size); -} -namespace mllm { -class QuantWriter : public ParamWriter { -public: - ~QuantWriter(); - explicit QuantWriter(std::string output_path, std::string input_path); - int readParams(); - void quantParams(DataType dataType); - void quantParams_q4_(DataType dataType); - void quantParams_q4_vl(DataType dataType); - -#ifdef TEST - std::unordered_map data_; - -#endif -private: - string output_path_; - mllm::ParamLoader *param_loader_; - DataType quant_type_; - std::vector param_names_; - float *getParam(std::string param_name); - void writeParam(string name, DataType type, void *data, uint64_t size) override; -}; -} // namespace mllm -#endif \ No newline at end of file diff --git a/src/quantizer/main.cpp b/src/quantizer/main.cpp deleted file mode 100644 index c91c5664c..000000000 --- a/src/quantizer/main.cpp +++ /dev/null @@ -1,48 +0,0 @@ -// -// Created by Xiang Li on 23-10-31. -// -#include "ParamWriter.hpp" -#include "ParamLoader.hpp" -#include -#include "QuantWriter.hpp" - -int main(int argc, char **argv) { - if (argc != 4) { - std::cout << "Usage: ./quantize \n"; - return -1; - } - auto input_path = std::string(argv[1]); - auto output_path = std::string(argv[2]); - auto quant_type = std::string(argv[3]); - // std::string input_path = "../models/showui-2b-fp32.mllm"; - // std::string output_path = "../models/showui-2b-q4_0_4_4.mllm"; - // std::string quant_type = "Q4_0_4_4"; - mllm::QuantWriter quant_writer(output_path, input_path); - int param_count = quant_writer.readParams(); - if (param_count <= 0) { - std::cout << "No params to quantize\n"; - return -1; - } - std::cout << "Quantize " << param_count << " params to " << quant_type << "\n"; - if (quant_type == "Q4_0") { - quant_writer.quantParams(MLLM_TYPE_Q4_0); - } else if (quant_type == "Q8_0") { - quant_writer.quantParams(MLLM_TYPE_Q8_0); - } else if (quant_type == "Q2_K") { - quant_writer.quantParams(MLLM_TYPE_Q2_K); - } else if (quant_type == "Q3_K") { - quant_writer.quantParams(MLLM_TYPE_Q3_K); - } else if (quant_type == "Q4_K") { - quant_writer.quantParams(MLLM_TYPE_Q4_K); - } else if (quant_type == "Q6_K") { - quant_writer.quantParams(MLLM_TYPE_Q6_K); - } else if (quant_type == "Q8_K") { - quant_writer.quantParams(MLLM_TYPE_Q8_K); - } else if (quant_type == "Q4_0_4_4") { - quant_writer.quantParams_q4_(MLLM_TYPE_Q4_0_4_4); - } else { - std::cout << "Quant type " << quant_type << " is not supported\n"; - return -1; - } - return 0; -} \ No newline at end of file diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 89b8a17eb..b3f87a2be 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -13,7 +13,7 @@ if(MLLM_BUILD_XNNPACK_BACKEND) ${PROJECT_SOURCE_DIR}/test/xnnpack/*.cpp ) list(APPEND MLLM_XNNPACK_BACKEND_TEST_SRC ${_MLLM_XNNPACK_BACKEND_TEST_SRC}) - list(APPEND MLLM_XNNPACK_BACKEND_TEST_DEP_LIB MllmXnnpackBackend) + list(APPEND MLLM_XNNPACK_BACKEND_TEST_DEP_LIB mllm_xnnpack) list(REMOVE_ITEM MLLM_XNNPACK_BACKEND_TEST_SRC ${PROJECT_SOURCE_DIR}/test/xnnpack/XpEmbeddingTest.cpp) endif() @@ -37,13 +37,13 @@ add_executable( ${PROJECT_SOURCE_DIR}/test/main.cpp ${PROJECT_SOURCE_DIR}/test/TestLoader.cpp ${MLLM_TEST} - ${DIR_SRC_CPU} ${DIR_SRC_MEM_MANAGER} ${DIR_SRC_EXP} ${DIR_SRC} ${MLLM_QUANTIZER} ${SRC_TOKENIZERS} - ${PROJECT_SOURCE_DIR}/src/processor/ClipPreProcess.hpp - ${PROJECT_SOURCE_DIR}/src/processor/ClipPreProcess.cpp - ${PROJECT_SOURCE_DIR}/src/processor/FuyuPreProcess.hpp - ${PROJECT_SOURCE_DIR}/src/processor/FuyuPreProcess.cpp - ${PROJECT_SOURCE_DIR}/src/processor/PreProcess.hpp - ${PROJECT_SOURCE_DIR}/src/processor/PreProcess.cpp + ${DIR_SRC_MEM_MANAGER} ${DIR_SRC_EXP} ${DIR_SRC} ${MLLM_QUANTIZER} ${SRC_TOKENIZERS} + ${PROJECT_SOURCE_DIR}/mllm/processor/ClipPreProcess.hpp + ${PROJECT_SOURCE_DIR}/mllm/processor/ClipPreProcess.cpp + ${PROJECT_SOURCE_DIR}/mllm/processor/FuyuPreProcess.hpp + ${PROJECT_SOURCE_DIR}/mllm/processor/FuyuPreProcess.cpp + ${PROJECT_SOURCE_DIR}/mllm/processor/PreProcess.hpp + ${PROJECT_SOURCE_DIR}/mllm/processor/PreProcess.cpp ${PROJECT_SOURCE_DIR}/test/processor/ClipPreprocessorTest.cpp # xnnpack @@ -53,10 +53,14 @@ add_executable( target_link_libraries( MLLM_TEST GTest::gtest_main - MLLM_CPU + mllm_cpu -fopenmp ${MLLM_XNNPACK_BACKEND_TEST_DEP_LIB} ) + +if(OPENCL) + target_link_libraries(MLLM_TEST mllm_opencl) +endif() # add_executable( # memoryPoolTest # ${PROJECT_SOURCE_DIR}/test/TestMemoryPoolManager.cpp diff --git a/test/quantizer/WriterTest.cpp b/test/quantizer/WriterTest.cpp index d6916ac48..82294d4e7 100644 --- a/test/quantizer/WriterTest.cpp +++ b/test/quantizer/WriterTest.cpp @@ -3,12 +3,17 @@ // #include "gtest/gtest.h" #include +#include // #include "ParamLoader.hpp" #include "ParamWriter.hpp" #include "QuantWriter.hpp" #include "QuantTest.hpp" #include "Types.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ4.hpp" // For manual quantization + namespace mllm { + +// ReadTest 不需要修改,因为 ParamLoader 的读取接口保持兼容 TEST_F(QuantTest, ReadTest) { auto loader = ParamLoader("../bin/quant_test.mllm"); auto tensor_name = loader.getParamNames(); @@ -17,44 +22,95 @@ TEST_F(QuantTest, ReadTest) { auto [data, size] = loader.load("weight_f0"); ASSERT_EQ(data[0], 0.0); ASSERT_EQ(data[1], 0.0); + delete[] data; // 释放 loader.load 返回的内存 } + +// ** [修改] ** WriteTest 使用新的流式写入 API TEST_F(QuantTest, WriteTest) { auto *loader = new ParamLoader("../bin/quant_test.mllm"); auto *writer = new ParamWriter("../bin/quant_result.mllm"); auto tensor_name = loader->getParamNames(); writer->paddingIndex(tensor_name); ASSERT_EQ(tensor_name.size(), 2); - std::unordered_map ori_data; - for (auto tensor : tensor_name) { + + // 使用 vector 来管理内存,避免手动 new/delete + std::unordered_map> ori_data; + + for (const auto &tensor : tensor_name) { auto [data, size] = loader->load(tensor); - ori_data[tensor] = data; - writer->writeParam(tensor, DataType::MLLM_TYPE_F32, data, size); + + // 存储原始数据以供后续比较 + ori_data[tensor].resize(size); + memcpy(ori_data[tensor].data(), data, size); + + // ** 使用新的三段式流式写入 ** + writer->beginWriteParam(tensor, DataType::MLLM_TYPE_F32); + writer->writeChunk(data, size); + writer->endWriteParam(); + + delete[] data; // 释放 loader->load 返回的内存 } + writer->writeIndex(); delete writer; delete loader; + + // 验证写入的文件 auto loader2 = ParamLoader("../bin/quant_result.mllm"); auto tensor_name2 = loader2.getParamNames(); ASSERT_EQ(tensor_name2.size(), 2); - ASSERT_EQ(loader2.getDataType(tensor_name2[0]), DataType::MLLM_TYPE_F32); - auto [data, size] = loader2.load("weight_f1"); - float *fdata = (float *)data; - for (int i = 0; i < size / sizeof(float); i++) { - ASSERT_EQ(fdata[i], ori_data["weight_f1"][i]); + ASSERT_EQ(loader2.getDataType("weight_f1"), DataType::MLLM_TYPE_F32); + + auto [data2, size2] = loader2.load("weight_f1"); + float *fdata = (float *)data2; + unsigned char *original_raw_data = ori_data["weight_f1"].data(); + + // 逐字节比较 + for (size_t i = 0; i < size2; i++) { + ASSERT_EQ(data2[i], original_raw_data[i]); } + delete[] data2; } + +// ** [修改] ** QuantTest 不再依赖 QuantWriter 内部状态,而是测试端到端的文件输出 TEST_F(QuantTest, QuantTest) { - auto *quant = new QuantWriter("../bin/quant_result.mllm", "../bin/quant_test.mllm"); - ASSERT_EQ(quant->readParams(), 2); - quant->quantParams(DataType::MLLM_TYPE_Q4_0); - ASSERT_EQ(quant->data_.size(), 2); - // delete quant; - auto loader = ParamLoader("../bin/quant_result.mllm"); - auto tensor_name = loader.getParamNames(); - ASSERT_EQ(tensor_name.size(), 2); - ASSERT_EQ(loader.getDataType(tensor_name[0]), DataType::MLLM_TYPE_Q4_0); - auto [data, size] = loader.load("weight_f1"); - auto *ori_data = quant->data_["weight_f1"]; - ASSERT_TRUE(compare_eq(reinterpret_cast(ori_data), reinterpret_cast(data))); + const std::string input_path = "../bin/quant_test.mllm"; + const std::string output_path = "../bin/quant_result.mllm"; + const std::string target_tensor_name = "weight_f1"; + + // 1. 执行量化,生成输出文件 + auto *quant_writer = new QuantWriter(output_path, input_path); + ASSERT_EQ(quant_writer->readParams(), 2); + quant_writer->quantize(DataType::MLLM_TYPE_Q4_0, ""); // 使用新的 quantize API + delete quant_writer; + + // 2. 加载原始的 FP32 数据,用于生成“期望”的量化结果 + auto *original_loader = new ParamLoader(input_path); + auto [original_data_ptr, original_size] = original_loader->load(target_tensor_name); + + // 3. 在测试中手动进行量化,得到期望的结果 + uint64_t num_floats = original_size / sizeof(float); + auto block_t = alloc_quant_block(num_floats, DataType::MLLM_TYPE_Q4_0); + void *expected_quant_data = block_t.first; + quantize_row_q4_0(reinterpret_cast(original_data_ptr), expected_quant_data, num_floats); + + // 4. 从 QuantWriter 生成的文件中加载实际的量化结果 + auto result_loader = ParamLoader(output_path); + auto tensor_names_from_result = result_loader.getParamNames(); + ASSERT_EQ(tensor_names_from_result.size(), 2); + ASSERT_EQ(result_loader.getDataType(target_tensor_name), DataType::MLLM_TYPE_Q4_0); + auto [actual_quant_data_ptr, actual_quant_size] = result_loader.load(target_tensor_name); + + // 5. 比较期望的量化结果和实际的量化结果 + ASSERT_EQ(block_t.second, actual_quant_size); // 尺寸应该一致 + ASSERT_TRUE(compare_eq( + reinterpret_cast(expected_quant_data), + reinterpret_cast(actual_quant_data_ptr))); + + // 6. 清理内存 + delete[] (char *)expected_quant_data; + delete[] original_data_ptr; + delete[] actual_quant_data_ptr; + delete original_loader; } -} // namespace mllm +} // namespace mllm \ No newline at end of file diff --git a/test/xnnpack/XpCausalMaskTest.cpp b/test/xnnpack/XpCausalMaskTest.cpp index 3775efb4e..a3e5a32aa 100644 --- a/test/xnnpack/XpCausalMaskTest.cpp +++ b/test/xnnpack/XpCausalMaskTest.cpp @@ -30,13 +30,13 @@ TEST_F(XpTest, CausalMaskModule) { auto model = mllm::xnnpack::wrap2xnn(1, 1); model.setNoLoadWeightsDtype(DataType::MLLM_TYPE_F32); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; } - Tensor x(1, 1, 8, 8, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 1, 8, 8, Backend::global_backends[MLLM_XNNPACK].get(), true); x.setTtype(TensorType::INPUT_TENSOR); auto start = std::chrono::high_resolution_clock::now(); diff --git a/test/xnnpack/XpDynamicShapeTest.cpp b/test/xnnpack/XpDynamicShapeTest.cpp index 1e4838388..1de987601 100644 --- a/test/xnnpack/XpDynamicShapeTest.cpp +++ b/test/xnnpack/XpDynamicShapeTest.cpp @@ -14,6 +14,7 @@ #include "xnnpack.h" #include "Tensor.hpp" #include "backends/xnnpack/Utils/Logger.hpp" +#include "Context.hpp" using namespace mllm; @@ -51,14 +52,14 @@ TEST_F(XpTest, XpDyanmicShape) { mllm::xnnpack::Log::log_level = mllm::xnnpack::Log::INFO; std::array input_1_shape{1, 16, 1, 2048}; std::array output_1_shape{1, 16, 1, 1024}; - Tensor inputs_1(1, 1, 16, 2048, Backend::global_backends[MLLM_XNNPACK], true); - Tensor outputs_1(1, 1, 16, 1024, Backend::global_backends[MLLM_XNNPACK], true); + Tensor inputs_1(1, 1, 16, 2048, Backend::global_backends[MLLM_XNNPACK].get(), true); + Tensor outputs_1(1, 1, 16, 1024, Backend::global_backends[MLLM_XNNPACK].get(), true); // define inputs 2 [B=1, S=32, H=1, D=2048]. std::array input_2_shape{1, 32, 1, 2048}; std::array output_2_shape{1, 32, 1, 1024}; - Tensor inputs_2(1, 1, 32, 2048, Backend::global_backends[MLLM_XNNPACK], true); - Tensor outputs_2(1, 1, 32, 1024, Backend::global_backends[MLLM_XNNPACK], true); + Tensor inputs_2(1, 1, 32, 2048, Backend::global_backends[MLLM_XNNPACK].get(), true); + Tensor outputs_2(1, 1, 32, 1024, Backend::global_backends[MLLM_XNNPACK].get(), true); std::vector exts; diff --git a/test/xnnpack/XpEmbeddingTest.cpp b/test/xnnpack/XpEmbeddingTest.cpp index ad301ff7c..32e1ca21b 100644 --- a/test/xnnpack/XpEmbeddingTest.cpp +++ b/test/xnnpack/XpEmbeddingTest.cpp @@ -47,13 +47,13 @@ TEST_F(XpTest, CPUAndXnnMixed) { model.setNoLoadWeightsDtype(DataType::MLLM_TYPE_F32); model.to(BackendType::MLLM_XNNPACK); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; } - Tensor x(1, 1, 10, 1, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 1, 10, 1, Backend::global_backends[MLLM_XNNPACK].get(), true); x.setTtype(TensorType::INPUT_TENSOR); for (int i = 0; i < 10; ++i) { diff --git a/test/xnnpack/XpExternalTensorTest.cpp b/test/xnnpack/XpExternalTensorTest.cpp index 760aa8baf..0fab51043 100644 --- a/test/xnnpack/XpExternalTensorTest.cpp +++ b/test/xnnpack/XpExternalTensorTest.cpp @@ -24,14 +24,14 @@ class AddModule : public Module { TEST_F(XpTest, AddModule) { auto model = ::mllm::xnnpack::wrap2xnn(2, 1); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; } - Tensor x1(1, 1, 4, 4, Backend::global_backends[MLLM_XNNPACK], true); - Tensor x2(1, 1, 4, 4, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x1(1, 1, 4, 4, Backend::global_backends[MLLM_XNNPACK].get(), true); + Tensor x2(1, 1, 4, 4, Backend::global_backends[MLLM_XNNPACK].get(), true); x1.setTtype(TensorType::INPUT_TENSOR); x2.setTtype(TensorType::INPUT_TENSOR); diff --git a/test/xnnpack/XpKVCacheTest.cpp b/test/xnnpack/XpKVCacheTest.cpp index a4ff3ce0c..9a53ec10d 100644 --- a/test/xnnpack/XpKVCacheTest.cpp +++ b/test/xnnpack/XpKVCacheTest.cpp @@ -5,6 +5,7 @@ #include "backends/xnnpack/Utils/Logger.hpp" #include #include "XpTest.hpp" +#include "Context.hpp" using namespace mllm; @@ -33,13 +34,13 @@ TEST_F(XpTest, KVCacheModule) { auto model = ::mllm::xnnpack::wrap2xnn(1, 1); model.setNoLoadWeightsDtype(DataType::MLLM_TYPE_F32); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; } - Tensor x(1, 1, 1, 8, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 1, 1, 8, Backend::global_backends[MLLM_XNNPACK].get(), true); x.setTtype(TensorType::INPUT_TENSOR); for (int i = 0; i < 8; ++i) { diff --git a/test/xnnpack/XpLinearTest.cpp b/test/xnnpack/XpLinearTest.cpp index b0f7f6e28..b60ad0c1e 100644 --- a/test/xnnpack/XpLinearTest.cpp +++ b/test/xnnpack/XpLinearTest.cpp @@ -34,13 +34,13 @@ TEST_F(XpTest, LinearModule) { auto model = ::mllm::xnnpack::wrap2xnn(1, 1); model.setNoLoadWeightsDtype(DataType::MLLM_TYPE_F32); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; } - Tensor x(1, 1, 256, 1024, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 1, 256, 1024, Backend::global_backends[MLLM_XNNPACK].get(), true); x.setTtype(TensorType::INPUT_TENSOR); for (int i = 0; i < 256 * 1024; ++i) { diff --git a/test/xnnpack/XpLlamaMHATest.cpp b/test/xnnpack/XpLlamaMHATest.cpp index 540f441ad..0689511c2 100644 --- a/test/xnnpack/XpLlamaMHATest.cpp +++ b/test/xnnpack/XpLlamaMHATest.cpp @@ -28,7 +28,7 @@ class XpLLaMAMHA final : public Module { int head_size_ = 0; int kv_head_size_ = 0; - int attn_hidden_dim_ = 0; + int head_dim_ = 0; public: XpLLaMAMHA() = default; @@ -37,16 +37,16 @@ class XpLLaMAMHA final : public Module { int hidden_dim, int head_size, int kv_head_size, - int attn_hidden_dim, + int head_dim, RoPEType RoPE_type, float rope_theta, int max_position_embeddings, int cache_limit, const XpLLaMAMHANameCfg &names, const string &base_name) { - q_proj = Linear(hidden_dim, head_size * attn_hidden_dim, false, base_name + names._q_proj_name); - k_proj = Linear(hidden_dim, kv_head_size * attn_hidden_dim, false, base_name + names._k_proj_name); - v_proj = Linear(hidden_dim, kv_head_size * attn_hidden_dim, false, base_name + names._v_proj_name); + q_proj = Linear(hidden_dim, head_size * head_dim, false, base_name + names._q_proj_name); + k_proj = Linear(hidden_dim, kv_head_size * head_dim, false, base_name + names._k_proj_name); + v_proj = Linear(hidden_dim, kv_head_size * head_dim, false, base_name + names._v_proj_name); q_rope = RoPE(RoPE_type, rope_theta, max_position_embeddings, base_name + "q_rope"); k_rope = RoPE(RoPE_type, rope_theta, max_position_embeddings, base_name + "k_rope"); @@ -54,13 +54,13 @@ class XpLLaMAMHA final : public Module { k_cache = XP_KVCache(head_size / kv_head_size, cache_limit, base_name + "k_cache"); v_cache = XP_KVCache(head_size / kv_head_size, cache_limit, base_name + "v_cache"); - o_proj = Linear(head_size * attn_hidden_dim, hidden_dim, false, base_name + names._o_proj_name); + o_proj = Linear(head_size * head_dim, hidden_dim, false, base_name + names._o_proj_name); sdpa = ScaledDotProductAttention("sdpa"); head_size_ = head_size; kv_head_size_ = kv_head_size; - attn_hidden_dim_ = attn_hidden_dim; + head_dim_ = head_dim; } vector @@ -73,9 +73,9 @@ class XpLLaMAMHA final : public Module { // q = q.view(bsz, q_len, num_heads, head_dim) // [B, S, H=heads, D=dim] - q = q.view(-1, head_size_, -1, attn_hidden_dim_); - k = k.view(-1, kv_head_size_, -1, attn_hidden_dim_); - v = v.view(-1, kv_head_size_, -1, attn_hidden_dim_); + q = q.view(-1, head_size_, -1, head_dim_); + k = k.view(-1, kv_head_size_, -1, head_dim_); + v = v.view(-1, kv_head_size_, -1, head_dim_); q = q_rope(q); k = k_rope(k); @@ -94,7 +94,7 @@ class XpLLaMAMHA final : public Module { // [B, H, S, D] -> [B, S, H, D] o = o.transpose(SEQUENCE, HEAD); // [B, S, H, D] -> [B, S, 1, H * D] - o = o.view(-1, 1, -1, attn_hidden_dim_ * head_size_); + o = o.view(-1, 1, -1, head_dim_ * head_size_); o = o_proj(o); return {o}; @@ -120,13 +120,13 @@ TEST_F(XpTest, XpLLaMAMHA) { "base-"); model.setNoLoadWeightsDtype(DataType::MLLM_TYPE_F32); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; } - Tensor x(1, 1, 16, 4096, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 1, 16, 4096, Backend::global_backends[MLLM_XNNPACK].get(), true); x.setTtype(TensorType::INPUT_TENSOR); auto start = std::chrono::high_resolution_clock::now(); diff --git a/test/xnnpack/XpReLUTest.cpp b/test/xnnpack/XpReLUTest.cpp index 66cf9bfd9..bb9293fdf 100644 --- a/test/xnnpack/XpReLUTest.cpp +++ b/test/xnnpack/XpReLUTest.cpp @@ -28,13 +28,13 @@ TEST_F(XpTest, ReLUModule) { auto model = ::mllm::xnnpack::wrap2xnn(1, 1); model.setNoLoadWeightsDtype(DataType::MLLM_TYPE_F32); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; } - Tensor x(1, 1, 1024, 1024, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 1, 1024, 1024, Backend::global_backends[MLLM_XNNPACK].get(), true); x.setTtype(TensorType::INPUT_TENSOR); for (int i = 0; i < 1024 * 1024; ++i) { diff --git a/test/xnnpack/XpRoPETest.cpp b/test/xnnpack/XpRoPETest.cpp index 565106726..9b94c3791 100644 --- a/test/xnnpack/XpRoPETest.cpp +++ b/test/xnnpack/XpRoPETest.cpp @@ -31,14 +31,14 @@ TEST_F(XpTest, RoPEModule) { auto model = ::mllm::xnnpack::wrap2xnn(1, 1); model.setNoLoadWeightsDtype(DataType::MLLM_TYPE_F32); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; } // rope accpect b, s, h, d. - Tensor x(1, 1, 256, 1024, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 1, 256, 1024, Backend::global_backends[MLLM_XNNPACK].get(), true); x.setTtype(TensorType::INPUT_TENSOR); { diff --git a/test/xnnpack/XpSDPATest.cpp b/test/xnnpack/XpSDPATest.cpp index 37d13b9db..dea6d2f4d 100644 --- a/test/xnnpack/XpSDPATest.cpp +++ b/test/xnnpack/XpSDPATest.cpp @@ -32,7 +32,7 @@ TEST_F(XpTest, SDPAModule) { auto model = ::mllm::xnnpack::wrap2xnn(3, 1); model.setNoLoadWeightsDtype(DataType::MLLM_TYPE_F32); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; @@ -45,11 +45,11 @@ TEST_F(XpTest, SDPAModule) { int S = 3; int H = 1; int D = 8; - Tensor Q(B, S, H, D, Backend::global_backends[MLLM_XNNPACK], true); + Tensor Q(B, S, H, D, Backend::global_backends[MLLM_XNNPACK].get(), true); Q.setTtype(TensorType::INPUT_TENSOR); - Tensor K(B, S, H, D, Backend::global_backends[MLLM_XNNPACK], true); + Tensor K(B, S, H, D, Backend::global_backends[MLLM_XNNPACK].get(), true); K.setTtype(TensorType::INPUT_TENSOR); - Tensor V(B, S, H, D, Backend::global_backends[MLLM_XNNPACK], true); + Tensor V(B, S, H, D, Backend::global_backends[MLLM_XNNPACK].get(), true); V.setTtype(TensorType::INPUT_TENSOR); // set data diff --git a/test/xnnpack/XpSliceTest.cpp b/test/xnnpack/XpSliceTest.cpp index 08eec9d0c..f5bf868a5 100644 --- a/test/xnnpack/XpSliceTest.cpp +++ b/test/xnnpack/XpSliceTest.cpp @@ -2,6 +2,7 @@ #include "Module.hpp" #include "Types.hpp" #include "xnnpack.h" +#include "Context.hpp" #include "backends/xnnpack/Utils/Logger.hpp" #include #include @@ -12,12 +13,12 @@ using namespace mllm; TEST_F(XpTest, XNNPACK) { - Backend::global_backends.emplace(MLLM_XNNPACK, GetBackendCreator(MLLM_XNNPACK)->create({})); + Module::initBackend(MLLM_XNNPACK); mllm::xnnpack::Log::log_level = mllm::xnnpack::Log::INFO; // inputs // B, S, H, D - Tensor x(1, 1, 8, 8, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 1, 8, 8, Backend::global_backends[MLLM_XNNPACK].get(), true); if (xnn_initialize(nullptr /* allocator */) != xnn_status_success) { ::mllm::xnnpack::Log::error("failed to initialize XNNPACK"); @@ -29,7 +30,7 @@ TEST_F(XpTest, XNNPACK) { xnn_create_subgraph(2, 0, &subgraph); // outputs - Tensor out(1, 1, 8, 8, Backend::global_backends[MLLM_XNNPACK], true); + Tensor out(1, 1, 8, 8, Backend::global_backends[MLLM_XNNPACK].get(), true); // define tensor { diff --git a/test/xnnpack/XpSoftmaxTest.cpp b/test/xnnpack/XpSoftmaxTest.cpp index 5d47888d7..57ba88b72 100644 --- a/test/xnnpack/XpSoftmaxTest.cpp +++ b/test/xnnpack/XpSoftmaxTest.cpp @@ -29,14 +29,14 @@ TEST_F(XpTest, SoftmaxModule) { auto model = ::mllm::xnnpack::wrap2xnn(1, 1); model.setNoLoadWeightsDtype(DataType::MLLM_TYPE_F32); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; } // B, S, H, D - Tensor x(1, 1, 1, 8, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 1, 1, 8, Backend::global_backends[MLLM_XNNPACK].get(), true); x.setTtype(TensorType::INPUT_TENSOR); for (int i = 0; i < 8; ++i) { diff --git a/test/xnnpack/XpTensorFunctionTest.cpp b/test/xnnpack/XpTensorFunctionTest.cpp index 019c2c570..e3cb2af59 100644 --- a/test/xnnpack/XpTensorFunctionTest.cpp +++ b/test/xnnpack/XpTensorFunctionTest.cpp @@ -31,8 +31,8 @@ TEST_F(XpTest, TTSub) { return; } - Tensor x1(1, 1, 4, 4, Backend::global_backends[MLLM_XNNPACK], true); - Tensor x2(1, 1, 4, 4, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x1(1, 1, 4, 4, Backend::global_backends[MLLM_XNNPACK].get(), true); + Tensor x2(1, 1, 4, 4, Backend::global_backends[MLLM_XNNPACK].get(), true); x1.setTtype(TensorType::INPUT_TENSOR); x2.setTtype(TensorType::INPUT_TENSOR); diff --git a/test/xnnpack/XpTransposeTest.cpp b/test/xnnpack/XpTransposeTest.cpp index b75557a4f..816b83d91 100644 --- a/test/xnnpack/XpTransposeTest.cpp +++ b/test/xnnpack/XpTransposeTest.cpp @@ -1,6 +1,7 @@ #include "Layer.hpp" #include "Module.hpp" #include "Types.hpp" +#include "Context.hpp" #include "backends/xnnpack/XpWrapper.hpp" #include "backends/xnnpack/Utils/Logger.hpp" #include "xnnpack.h" @@ -29,14 +30,14 @@ TEST_F(XpTest, TransposeModule) { auto model = ::mllm::xnnpack::wrap2xnn(1, 1); model.setNoLoadWeightsDtype(DataType::MLLM_TYPE_F32); - EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK] != nullptr, true); + EXPECT_EQ(Backend::global_backends[MLLM_XNNPACK].get() != nullptr, true); if (XnnpackBackend::enable_legacy_wrapper == false) { Log::warn("This test method is dropped. But tested ok in legacy wrapper mode"); return; } // B, S ,H, D - Tensor x(1, 6, 8, 1, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 6, 8, 1, Backend::global_backends[MLLM_XNNPACK].get(), true); x.setTtype(TensorType::INPUT_TENSOR); float cnt = 0.f; @@ -59,11 +60,11 @@ TEST_F(XpTest, TransposeModule) { } TEST(TransposeTest, RawXnnImpl) { - Backend::global_backends.emplace(MLLM_XNNPACK, GetBackendCreator(MLLM_XNNPACK)->create({})); + Module::initBackend(MLLM_XNNPACK); mllm::xnnpack::Log::log_level = mllm::xnnpack::Log::INFO; // B, S, H, D - Tensor x(1, 1, 2048, 1024, Backend::global_backends[MLLM_XNNPACK], true); + Tensor x(1, 1, 2048, 1024, Backend::global_backends[MLLM_XNNPACK].get(), true); if (xnn_initialize(nullptr /* allocator */) != xnn_status_success) { ::mllm::xnnpack::Log::error("failed to initialize XNNPACK"); @@ -123,7 +124,7 @@ TEST(TransposeTest, RawXnnImpl) { } // create outexternal output. - Tensor out(1, 1, 2048, 4096, Backend::global_backends[MLLM_XNNPACK], true); + Tensor out(1, 1, 2048, 4096, Backend::global_backends[MLLM_XNNPACK].get(), true); std::array dims_out{1, 2048, 1, 4096}; status = xnn_define_tensor_value( subgraph, @@ -140,8 +141,8 @@ TEST(TransposeTest, RawXnnImpl) { } // create linear - Tensor weight(1, 1, 1024, 4096, Backend::global_backends[MLLM_XNNPACK], true); - Tensor bias(1, 1, 1, 4096, Backend::global_backends[MLLM_XNNPACK], true); + Tensor weight(1, 1, 1024, 4096, Backend::global_backends[MLLM_XNNPACK].get(), true); + Tensor bias(1, 1, 1, 4096, Backend::global_backends[MLLM_XNNPACK].get(), true); std::array dim_weight{1024, 4096}; status = xnn_define_tensor_value( subgraph, diff --git a/tools/convertor/converter.py b/tools/convertor/converter.py index 3bc196800..65a015949 100644 --- a/tools/convertor/converter.py +++ b/tools/convertor/converter.py @@ -45,6 +45,8 @@ def __torch_dtype_to_int(self, dtype: torch.dtype) -> int: return 1 elif dtype == torch.int8 or dtype == torch.bool: return 16 + elif dtype == torch.uint8: + return 31 elif dtype == torch.int32: return 18 else: @@ -69,10 +71,13 @@ def write_tensor(self, tensor: torch.Tensor, name: str) -> [int, int]: offset = self.writer.tell() if tensor.dtype == torch.bfloat16: # to float 16 tensor_numpy = tensor.detach().to(torch.float32).numpy() - elif tensor.dtype == torch.bool or tensor.dtype == torch.int8: # exported model for QNN int8 + elif tensor.dtype == torch.bool or tensor.dtype == torch.int8: tensor_numpy = tensor.detach().to(torch.int8).numpy() + elif tensor.dtype == torch.uint8: + tensor_numpy = tensor.detach().to(torch.uint8).numpy() else: - tensor_numpy = tensor.numpy() + # print(f"Write tensor {name} with dtype {tensor.dtype}") + tensor_numpy = tensor.detach().to(torch.float32).numpy() tensor_numpy.tofile(self.writer) size = self.writer.tell() - offset tensor_idx.size = size @@ -80,7 +85,7 @@ def write_tensor(self, tensor: torch.Tensor, name: str) -> [int, int]: return offset, size def write_tensor_index( - self, + self, ): self.writer.seek(4 + 8) for tensor_name in self.tensors_name: @@ -94,7 +99,9 @@ def write_tensor_index( self.write_u64(tensor.size) self.write_u64(tensor.offset) self.write_int(tensor.dtype) - print(f"Write tensor {tensor.name} to {tensor.offset} with size {tensor.size}") + print( + f"Write tensor {tensor.name} to {tensor.offset} with size {tensor.size}" + ) def write_tensor_index_padding(self, tensors_name: [str]): if len(tensors_name) > 0: @@ -116,7 +123,11 @@ def close(self): def get_tensor(model: dict, key: str, index_: dict): - if index_ is not None and isinstance(index_, dict) and "weight_map" in index_.keys(): + if ( + index_ is not None + and isinstance(index_, dict) + and "weight_map" in index_.keys() + ): if key in index_["weight_map"].keys(): model_ = file_map[index_["weight_map"][key]] if args.type == "torch": @@ -137,9 +148,13 @@ def get_tensor(model: dict, key: str, index_: dict): def all_keys(model: dict, index_: dict): global file_map all_keys_name = [] - if index_ is not None and isinstance(index_, dict) and "weight_map" in index_.keys(): + if ( + index_ is not None + and isinstance(index_, dict) + and "weight_map" in index_.keys() + ): json_pwd = os.path.dirname(args.input_model.name) - for (key, val) in index_["weight_map"].items(): + for key, val in index_["weight_map"].items(): all_keys_name.append(key) if val is not None and val not in file_map.keys(): # JOIN PATH @@ -165,16 +180,17 @@ def all_keys(model: dict, index_: dict): return all_keys_name -def process_str(name: str, type: str='dense'): - if type == 'dense' or ('down_proj.weight' not in name): +def process_str(name: str, type: str = "dense"): + if type == "dense" or ("down_proj.weight" not in name): return name - return name.replace('weight', 'weight_T') + return name.replace("weight", "weight_T") + -def process(name: str, ten: torch.Tensor, type: str='dense'): - if type == 'dense' or ('down_proj.weight' not in name): +def process(name: str, ten: torch.Tensor, type: str = "dense"): + if type == "dense" or ("down_proj.weight" not in name): return name, ten - new_name = name.replace('weight', 'weight_T') + new_name = name.replace("weight", "weight_T") transposed_tensor = ten.transpose(-2, -1).contiguous() return new_name, transposed_tensor @@ -201,7 +217,10 @@ def process(name: str, ten: torch.Tensor, type: str='dense'): args = parser.parse_args() if args.type == "torch": if args.input_model.name.endswith(".json"): - if os.path.basename(args.input_model.name) != "pytorch_model.bin.index.json": + if ( + os.path.basename(args.input_model.name) + != "pytorch_model.bin.index.json" + ): raise Exception("Only support pytorch_model.bin.index.json") index_ = json.load(args.input_model) else: @@ -223,12 +242,18 @@ def process(name: str, ten: torch.Tensor, type: str='dense'): raise Exception("Unknown type") writer = Writer(args.output_model) model_keys = all_keys(model, index_) - writer.write_tensor_index_padding([process_str(name, args.model_type) for name in model_keys]) + writer.write_tensor_index_padding( + [process_str(name, args.model_type) for name in model_keys] + ) for key in model_keys: tensor = get_tensor(model, key, index_) key, tensor = process(key, tensor, args.model_type) - if tensor.dtype != torch.bool or tensor.dtype != torch.int8: + if ( + tensor.dtype != torch.bool + and tensor.dtype != torch.int8 + and tensor.dtype != torch.uint8 + ): tensor = tensor.float() offset, size = writer.write_tensor(tensor, key) print(f"Get tensor {key} to {offset} with size {size}") diff --git a/tools/convertor/profiling_activation/README.md b/tools/convertor/profiling_activation/README.md deleted file mode 100644 index f3411b3da..000000000 --- a/tools/convertor/profiling_activation/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Profiling Activation Tools - -The profiling activation tools are used to get quantized **W8A8** model used by **QNN backend**. This dircetory contains the following files: -- get_act_distribution.py: get activation distribution config -- eval_quantize_threshold.py: evaluate the quantization threshold(number of clipped layers) -- export_int8_model.py: export quantized .pth model with int8 weights, quantization scales and clipped layers information -- simulate_inference.py: simulate inference with quantized model - -## Supported Model Type -- transformers.models.qwen2 -- transformers.models.llama -- transformers.models.opt -- transformers.models.gemma -- transformers.models.phi -- transformers.models.mixtral -- transformers.models.falcon - -## Design -We have designed a W8A8 quantization algorithm called **'Shadow Outlier Execution'** that selectively retains the precision of certain layers by sampling outlier layers and applying a threshold-based selection. This approach accelerates computation on low-precision data for NPUs while minimizing accuracy loss. - -![Shadow Execution](../../../assets/shadow_execution.png) - -It extracts the activation channels with outliers at runtime into a more compact tensor, executes it on CPU, and merges it back to the outcome of original operator on NPU. - -It will go through following steps: - -1. Profile a large corpora at offline -2. Determine an outlier threshold by the profling result, thereby identifying the outliers by simply comparing the activation numbers to this threshold. -3. Export int8 pytorch model and convert to mllm model file - - -## Usage - -Get activation distribution config using `tools/convertor/profiling_activation/get_act_distribution.py`. - -```bash -# You can download the validation dataset of the Pile at https://huggingface.co/datasets/mit-han-lab/pile-val-backup/resolve/main/val.jsonl.zst -python get_act_distribution.py --model_name --dataset pile-val-backup/val.jsonl.zst --output_file act_scales_distribution.json -``` -**Caution: getting activation distribution config needs huge amount of (cpu) memory and will take more than 1 hour. Memory Volume > 100 GB is suggested.** - -Use the activation distribution config to predict in different threshold of clipping. It will determine the number and the position of no_clip layers(shadow layers in modeling, see `src/models/qwen/modeling_qwen_npu.hpp`). - -```bash -python eval_quantize_threshold.py --model_name --model_type ["llama", "qwen1", "qwen2", "gemma", "phi", "opt", "mixtral", "falcon"] --scale_file -``` - -Export the int8 weight model with shadow layers config and static quantization scale. - -```bash -python export_int8_model.py --model_name --model_type ["llama", "qwen1", "qwen2", "gemma", "phi", "opt", "mixtral", "falcon"] --scale_file --output_model -``` - -Convert the .pth model to mllm model using `tools/convertor/converter.py`. - -```bash -python converter.py --input_model --output_model output_model.mllm -``` \ No newline at end of file diff --git a/tools/convertor/profiling_activation/eval_quantize_threshold.py b/tools/convertor/profiling_activation/eval_quantize_threshold.py deleted file mode 100644 index e8742868d..000000000 --- a/tools/convertor/profiling_activation/eval_quantize_threshold.py +++ /dev/null @@ -1,131 +0,0 @@ -""" -This file is for evaluating the accuracy of a model with differen thresholds for clipping the activations. -The threshold will control the number of activations that are clipped. If a layer is clipped, if means that this layer can be caculated using W8A8 with no fall back to FP32. -""" - -import argparse -from transformers import AutoModelForCausalLM, AutoTokenizer -import torch -import multiprocessing -import json -from tqdm import tqdm -from datasets import load_dataset - -from utils.quantization_simulation import ( - quantize_falcon_like, - quantize_mixtral, - quantize_qwen2_like, - quantize_llama_like, - quantize_gemma_like, - quantize_opt, - quantize_phi_like, -) -from utils.get_input_output_scales import get_clip_and_scale - - -class Evaluator: - def __init__(self, dataset, tokenizer, device): - self.dataset = dataset - self.tokenizer = tokenizer - self.device = device - - # tokenize the dataset - def tokenize_function(examples): - example = self.tokenizer(examples["text"]) - return example - - self.dataset = self.dataset.map(tokenize_function, batched=True) - self.dataset.set_format(type="torch", columns=["input_ids"]) - - @torch.no_grad() - def evaluate(self, model): - model.eval() - # The task is to predict the last word of the input. - total, hit = 0, 0 - for batch in tqdm(self.dataset): - input_ids = batch["input_ids"].to(self.device).unsqueeze(0) - label = input_ids[:, -1] - outputs = model(input_ids) - last_token_logits = outputs.logits[:, -2, :] - pred = last_token_logits.argmax(dim=-1) - total += label.size(0) - hit += (pred == label).sum().item() - acc = hit / total - return acc - - -def evaluate_model(model_name, act_dict, result_queue, t01m_thre): - dataset = load_dataset("lambada", split="validation[:1000]") - tokenizer = AutoTokenizer.from_pretrained(model_name, device_map="cuda:1") - evaluator = Evaluator(dataset, tokenizer, "cuda:1") - model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cuda:1") - - act_scales, clip_top, return_dict = get_clip_and_scale(act_dict, t01m_thre) - - if args.model_type == "llama": - q_model = quantize_llama_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "qwen2" or args.model_type == "qwen1": - q_model = quantize_qwen2_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "gemma": - q_model = quantize_gemma_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "phi": - q_model = quantize_phi_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "opt": - q_model = quantize_opt(model, act_scales, layer_clip=clip_top) - elif args.model_type == "mixtral": - q_model = quantize_mixtral(model, act_scales, layer_clip=clip_top) - elif args.model_type == "falcon": - q_model = quantize_falcon_like(model, act_scales, layer_clip=clip_top) - else: - print("Model type not supported") - exit(1) - - res = evaluator.evaluate(q_model) - print(t01m_thre, res) - - return_dict["res"] = float(res) - result_queue.put((t01m_thre, return_dict)) - - -def get_all_actscale_result_parallel(model_name, act_dict): - manager = multiprocessing.Manager() - result_queue = manager.Queue() - - for t01m_thre in [1, 2, 4, 8, 16, 24, 32, 64, 128, 152, 10000000]: - p = multiprocessing.Process( - target=evaluate_model, args=(model_name, act_dict, result_queue, t01m_thre) - ) - p.start() - p.join() - - results = {} - while not result_queue.empty(): - key, value = result_queue.get() - results[key] = value - - return results - - -if __name__ == "__main__": - global args - parser = argparse.ArgumentParser() - parser.add_argument("--model_name", type=str) - parser.add_argument( - "--model_type", - choices=["llama", "qwen1", "qwen2", "gemma", "phi", "opt", "mixtral", "falcon"], - default="llama", - ) - parser.add_argument("--scale_file", type=argparse.FileType("r")) - args = parser.parse_args() - - res_data = {} - - act_dict = json.load(open(args.scale_file.name)) - results = get_all_actscale_result_parallel( - args.model_name, act_dict - ) - res_data[args.model_name] = results - - with open("model_res_acc.json", "w") as f: - json.dump(res_data, f, indent=4, ensure_ascii=False) - print("write to model_res_acc.json") diff --git a/tools/convertor/profiling_activation/export_int8_model.py b/tools/convertor/profiling_activation/export_int8_model.py deleted file mode 100644 index 825cc69aa..000000000 --- a/tools/convertor/profiling_activation/export_int8_model.py +++ /dev/null @@ -1,134 +0,0 @@ -import argparse -from transformers import AutoModelForCausalLM -import torch -import json - -from utils.get_input_output_scales import get_clip_and_scale -from utils.quantization_simulation import ( - quantize_qwen2_like, - quantize_llama_like, - quantize_gemma_like, - quantize_opt, - quantize_phi_like, - quantize_mixtral, - quantize_falcon_like, -) - - -@torch.no_grad() -def quantize_weight_per_tensor_absmax(w, n_bits=8): - # w: (out_features, in_features) - w = w.to("cuda") - scales = w.abs().max() - q_max = 2 ** (n_bits - 1) - 1 - scales.clamp_(min=1e-5).div_(q_max) - w.div_(scales).round_() - - if n_bits == 8: - w = w.to("cpu").type(torch.int8) - elif n_bits == 16: - w = w.to("cpu").type(torch.int32) - else: - w = w.to("cpu").type(torch.int8) - scale = scales.to("cpu").type(torch.float32) - return w, scale - - -if __name__ == "__main__": - global args - parser = argparse.ArgumentParser() - parser.add_argument("--model_name", type=str) - parser.add_argument( - "--model_type", - choices=["llama", "qwen1", "qwen2", "gemma", "phi", "opt", "mixtral", "falcon"], - default="llama", - ) - parser.add_argument("--scale_file", type=argparse.FileType("r")) - parser.add_argument("--t01m_clip_threshold", type=int, default=152) - parser.add_argument("--output_model", type=str, default="model-int8.pth") - args = parser.parse_args() - - print("model: ", args.model_name) - print("model type: ", args.model_type) - print("scale file: ", args.scale_file.name) - print("t01m clip threshold: ", args.t01m_clip_threshold) - print("output model: ", args.output_model) - - model_name = args.model_name - act_dict = args.scale_file.name - t01m_clip_threshold = args.t01m_clip_threshold - - model = AutoModelForCausalLM.from_pretrained(model_name) - act_dict = json.load(open(act_dict)) - - act_scales, clip_top, return_dict = get_clip_and_scale( - act_dict, t01m_clip_threshold - ) - print(f"clip input num: {return_dict['clip_input_num']}") - print(f"clip output num: {return_dict['clip_output_num']}") - print(f"no clip input num: {return_dict['no_clip_input_num']}") - for i in return_dict["no_clip_input_name"]: - print(f"no clip input: {i}") - print(f"no clip output num: {return_dict['no_clip_output_num']}") - for i in return_dict["no_clip_output_name"]: - print(f"no clip output: {i}") - - if args.model_type == "llama": - q_model = quantize_llama_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "qwen2" or args.model_type == "qwen1": - q_model = quantize_qwen2_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "gemma": - q_model = quantize_gemma_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "phi": - q_model = quantize_phi_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "opt": - q_model = quantize_opt(model, act_scales, layer_clip=clip_top) - elif args.model_type == "mixtral": - q_model = quantize_mixtral(model, act_scales, layer_clip=clip_top) - elif args.model_type == "falcon": - q_model = quantize_falcon_like(model, act_scales, layer_clip=clip_top) - else: - print("Model type not supported") - exit(1) - - model_dict = q_model.state_dict() - - for i in act_scales: - model_dict[i + ".input_scale"] = torch.tensor(act_scales[i]["input"]) - model_dict[i + ".output_scale"] = torch.tensor(act_scales[i]["output"]) - model_dict[i + ".clip_input"] = torch.tensor(clip_top[i]["input"]) - model_dict[i + ".clip_output"] = torch.tensor(clip_top[i]["output"]) - - new_model = {} - for name, param in model_dict.items(): - if name.replace(".weight", "") in act_scales: - if "head" not in name: - layer_name = name - new_model[layer_name], scale = quantize_weight_per_tensor_absmax( - model_dict[layer_name], 8 - ) - new_model[layer_name + ".scale"] = scale - - # NOTE: the int8 weight used for QNN in mllm needs to be transposed - new_model[name] = new_model[name].transpose(-2, -1) - # print(f"Quantized {layer_name} with scale {scale}") - else: - new_model[name] = param - # print(f"Copy {name}") - elif name.replace(".bias", "") in act_scales: - if "head" not in name: - layer_name = name - new_model[layer_name], scale = quantize_weight_per_tensor_absmax( - model_dict[layer_name], 8 - ) - new_model[layer_name + ".scale"] = scale - # print(f"Quantized {layer_name} with scale {scale}") - else: - new_model[name] = param - # print(f"Copy {name}") - else: - new_model[name] = param - # print(f"Copy {name}") - - torch.save(new_model, args.output_model) - print(f"Model saved to {args.output_model}") diff --git a/tools/convertor/profiling_activation/get_act_distribution.py b/tools/convertor/profiling_activation/get_act_distribution.py deleted file mode 100644 index 02838e9eb..000000000 --- a/tools/convertor/profiling_activation/get_act_distribution.py +++ /dev/null @@ -1,99 +0,0 @@ -# NOTE:Need a HUGE amount of memory(>=100GB) and time(>=1 hour) to run this script -""" -This file is used to get the distribution of the activation scales of the model. -The model is loaded from the path specified in the `model_name` argument, and the activation scales are loaded from the path specified in the `output_file` argument. -The activation scales are then flattened and the distribution of the scales is calculated. -It will calculate the mean and standard deviation of the scales for each layer of the model before and after removing the top 0.1% of the scales. -""" - -import argparse -import json -from utils.get_input_output_scales import get_static_decoder_layer_scales_distribution -from transformers import AutoModelForCausalLM, AutoTokenizer -import gc -import numpy as np - - -def flatten_act_dict(act_dict): - for layer, scales in act_dict.items(): - if isinstance(scales, list): - try: - all_acts = np.array(scales).reshape(-1) - except ValueError: - all_acts = [np.array(scale).reshape(-1) for scale in scales] - all_acts = np.concatenate(all_acts) - act_dict[layer] = all_acts - else: - act_dict[layer] = flatten_act_dict(scales) - print(layer) - gc.collect() - - return act_dict - - -def get_act_percentage(act_dict: dict, threshold: float): - assert 0 <=threshold <= 1 - percentage = 1 - threshold - act_percentage = {} - for layer, scales in act_dict.items(): - if not isinstance(scales, dict): - all_acts_flattened = scales - percentage_index = int(len(all_acts_flattened) * percentage) - 1 - nth_percentile_value = np.partition(all_acts_flattened, percentage_index)[percentage_index] - act_percentage[layer] = float(nth_percentile_value) - else: - print(layer) - act_percentage[layer] = get_act_percentage(scales, threshold) - return act_percentage - - -def get_act_distribution_stat(act_dict): - act_distribution = {} - for layer, scales in act_dict.items(): - if not isinstance(scales, dict): - act_distribution[layer] = {'mean': float(np.mean(scales)), 'std': float(np.std(scales))} - else: - act_distribution[layer] = get_act_distribution_stat(scales) - print(layer) - return act_distribution - - -if __name__ == "__main__": - global args - parser = argparse.ArgumentParser() - parser.add_argument("--model_name", type=str) - parser.add_argument( - "--dataset", - type=argparse.FileType("r"), - default="pile-val-backup/val.jsonl.zst", - ) - parser.add_argument( - "--output_file", type=str, default="act_scales_distribution.json" - ) - args = parser.parse_args() - - model = AutoModelForCausalLM.from_pretrained( - args.model_name, device_map="cuda" - ) - tokenizer = AutoTokenizer.from_pretrained(args.model_name, device_map="cuda") - - # You can download the validation dataset of the Pile at https://huggingface.co/datasets/mit-han-lab/pile-val-backup/resolve/main/val.jsonl.zst - act_dict = get_static_decoder_layer_scales_distribution( - model, tokenizer, args.dataset.name , num_samples=128 - ) - - print("begin_flatten") - act_dict = flatten_act_dict(act_dict) - print("finish flatten") - - # origin model scale - ori_scale = get_act_percentage(act_dict, 0) - # scale after remove top 0.1% outliers - top_0_1_scale = get_act_percentage(act_dict, 0.001) - # get mean and std of all scales - all_stat = get_act_distribution_stat(act_dict) - - res_dict = {"ori": ori_scale, "top_0_1": top_0_1_scale, "all_stat": all_stat} - - with open(args.output_file, "w") as f: - json.dump(res_dict, f, indent=4, ensure_ascii=False) diff --git a/tools/convertor/profiling_activation/simulate_inference.py b/tools/convertor/profiling_activation/simulate_inference.py deleted file mode 100644 index f9bc7c8f6..000000000 --- a/tools/convertor/profiling_activation/simulate_inference.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -This file is a simulation of the inference process of a model that has been quantized using the quantization functions in the `quantization_simulation.py` file. -The model is loaded from the path specified in the `model_name` argument, and the activation scales are loaded from the path specified in the `scale_file` argument. The `t01m_clip_threshold` argument specifies the threshold for clipping the activations. -The model is quantized using the specified `model_type` argument, which determines the quantization function to be used. The quantized model is then used to generate an example based on the provided prompt. -""" - -import argparse -import json - -import torch -from utils.get_input_output_scales import get_clip_and_scale -from utils.quantization_simulation import ( - quantize_qwen2_like, - quantize_llama_like, - quantize_gemma_like, - quantize_opt, - quantize_phi_like, - quantize_mixtral, - quantize_falcon_like, -) -from transformers import AutoModelForCausalLM, AutoTokenizer - - -# prompt = """"Large Language Models (LLMs) are advanced artificial intelligence systems designed to understand and generate human-like text. These models are trained on vast amounts of data, enabling them to perform a wide range of tasks, from answering questions and summarizing text to generating creative content and engaging in conversational dialogue. LLMs like GPT-3 and GPT-4, developed by OpenAI, have set new benchmarks in natural language processing by leveraging deep learning architectures, particularly transformer models, which excel at capturing context and relationships within text. The scalability and versatility of LLMs make them invaluable tools for applications in education, customer service, content creation, and more. However, their deployment also raises ethical considerations, including issues of bias, misinformation, and the potential for misuse. As the field continues to evolve, ongoing research and responsible deployment strategies are essential to harnessing the full potential of these powerful AI systems while mitigating their risks." -# Generate a title based on the above text. -# """ - -prompt = "Give me a short introduction to large language model." - -if __name__ == "__main__": - global args - parser = argparse.ArgumentParser() - parser.add_argument("--model_name", type=str) - parser.add_argument( - "--model_type", - choices=["llama", "qwen1", "qwen2", "gemma", "phi", "opt", "mixtral", "falcon"], - default="llama", - ) - parser.add_argument("--scale_file", type=argparse.FileType("r")) - parser.add_argument("--t01m_clip_threshold", type=int, default=152) - args = parser.parse_args() - - tokenizer = AutoTokenizer.from_pretrained(args.model_name) - model = AutoModelForCausalLM.from_pretrained( - args.model_name, torch_dtype=torch.bfloat16, device_map="cuda" - ) - act_dict = json.load(open(args.scale_file.name)) - - act_scales, clip_top, return_dict = get_clip_and_scale(act_dict, args.t01m_clip_threshold) - - print(f"clip input num: {return_dict['clip_input_num']}") - print(f"clip output num: {return_dict['clip_output_num']}") - print(f"no clip input num: {return_dict['no_clip_input_num']}") - for i in return_dict["no_clip_input_name"]: - print(f"no clip input: {i}") - print(f"no clip output num: {return_dict['no_clip_output_num']}") - for i in return_dict["no_clip_output_name"]: - print(f"no clip output: {i}") - - if args.model_type == "llama": - q_model = quantize_llama_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "qwen2" or args.model_type == "qwen1": - q_model = quantize_qwen2_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "gemma": - q_model = quantize_gemma_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "phi": - q_model = quantize_phi_like(model, act_scales, layer_clip=clip_top) - elif args.model_type == "opt": - q_model = quantize_opt(model, act_scales, layer_clip=clip_top) - elif args.model_type == "mixtral": - q_model = quantize_mixtral(model, act_scales, layer_clip=clip_top) - elif args.model_type == "falcon": - q_model = quantize_falcon_like(model, act_scales, layer_clip=clip_top) - else: - print("Model type not supported") - exit(1) - - # use q_model to generate an example - input_ids = tokenizer(prompt, return_tensors="pt").to("cuda") - with torch.no_grad(): - output = q_model.generate( - **input_ids, max_length=100, do_sample=False, top_p=None, top_k=None - ) - print(tokenizer.decode(output[0], skip_special_tokens=True)) diff --git a/tools/convertor/profiling_activation/utils/quantization_simulation.py b/tools/convertor/profiling_activation/utils/quantization_simulation.py deleted file mode 100644 index 716d2c7f3..000000000 --- a/tools/convertor/profiling_activation/utils/quantization_simulation.py +++ /dev/null @@ -1,588 +0,0 @@ -""" -This file performs a fake quantization of the model weights and activations. -It will be used to simulate the quantization of the model weights and activations. -""" - -import torch -from torch import nn -from functools import partial - - -@torch.no_grad() -def simulate_quantize_weight_per_channel_absmax(w, n_bits=8): - scales = w.abs().max(dim=-1, keepdim=True)[0] - q_max = 2 ** (n_bits - 1) - 1 - scales.clamp_(min=1e-5).div_(q_max) - w.div_(scales).round_().mul_(scales) - return w - - -@torch.no_grad() -def simulate_quantize_weight_per_tensor_absmax(w, n_bits=8): - scales = w.abs().max() - q_max = 2 ** (n_bits - 1) - 1 - scales.clamp_(min=1e-5).div_(q_max) - w.div_(scales).round_().mul_(scales) - return w - - -@torch.no_grad() -def simulate_quantize_weight_scale_per_tensor_absmax(w, n_bits=8): - scales = w.abs().max() - q_max = 2 ** (n_bits - 1) - 1 - scales.clamp_(min=1e-5).div_(q_max) - return scales - - -@torch.no_grad() -def simulate_quantize_activation_per_tensor_static_input(t, scale=1, n_bits=8, clip_top=False): - scale = scale.clone().to(t.device) - t_shape = t.shape - t.view(-1, t_shape[-1]) - q_max = 2 ** (n_bits - 1) - 1 - scale.clamp_(min=1e-5).div_(q_max) - scale = scale * 100000 - scale = scale.round() / 100000 - t = t.div(scale).round() - if clip_top: - t = t.clamp(-128.0, 127.0) - t = t.mul(scale) - return t - - -@torch.no_grad() -def simulate_quantize_activation_per_tensor_static_output(t, scale=1, n_bits=8, clip_top=False): - scale = scale.clone().to(t.device) - - t_shape = t.shape - t.view(-1, t_shape[-1]) - q_max = 2 ** (n_bits - 1) - 1 - scale.clamp_(min=1e-5).div_(q_max) - - t = t.div(scale) - t = t.round() - if clip_top: - t = t.clamp(-128.0, 127.0) - t = t.mul(scale) - return t - - -@torch.no_grad() -def simulate_quantize_activation_per_token_absmax(t, n_bits=8): - t_shape = t.shape - t.view(-1, t_shape[-1]) - scales = t.abs().max(dim=-1, keepdim=True)[0] - q_max = 2 ** (n_bits - 1) - 1 - scales.clamp_(min=1e-5).div_(q_max) - t.div_(scales).round_().mul_(scales) - return t - - -@torch.no_grad() -def simulate_quantize_activation_per_tensor_absmax(t, n_bits=8): - t_shape = t.shape - t.view(-1, t_shape[-1]) - scales = t.abs().max() - q_max = 2 ** (n_bits - 1) - 1 - scales.clamp_(min=1e-5).div_(q_max) - t.div_(scales).round_().mul_(scales) - return t - - -class W8A8LinearStatic(nn.Module): - def __init__( - self, - in_features, - out_features, - input_scale, - output_scale, - bias=True, - clip_top=False, - ): - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.input_scale = torch.tensor(input_scale) - self.output_scale = torch.tensor(output_scale) - - self.weight_scale = None - self.weight_quant_type = None - - self.register_buffer( - "weight", - torch.randn( - self.out_features, - self.in_features, - dtype=torch.float16, - requires_grad=False, - ), - ) - if bias: - self.register_buffer( - "bias", - torch.zeros( - (1, self.out_features), dtype=torch.float16, requires_grad=False - ), - ) - else: - self.register_buffer("bias", None) - - self.act_quant_input = partial( - simulate_quantize_activation_per_tensor_static_input, - n_bits=8, - clip_top=clip_top["input"], - ) - self.act_quant_output = partial( - simulate_quantize_activation_per_tensor_static_output, - n_bits=8, - clip_top=clip_top["output"], - ) - - def to(self, *args, **kwargs): - super(W8A8LinearStatic, self).to(*args, **kwargs) - self.weight = self.weight.to(*args, **kwargs) - if self.bias is not None: - self.bias = self.bias.to(*args, **kwargs) - return self - - @torch.no_grad() - def forward(self, x): - # perform online quantize-dequantize matmul to simulate W8A8 inference - q_x = self.act_quant_input(x, scale=self.input_scale) - y = torch.functional.F.linear(q_x, self.weight, self.bias) - q_y = self.act_quant_output(y, scale=self.output_scale) - - return q_y - - @staticmethod - def from_float(module, scales, weight_quant_type="per_tensor", clip_top=False): - assert isinstance(module, torch.nn.Linear) - - new_module = W8A8LinearStatic( - module.in_features, - module.out_features, - bias=module.bias is not None, - input_scale=scales["input"], - output_scale=scales["output"], - clip_top=clip_top, - ) - - if weight_quant_type == "per_channel": - new_module.weight = simulate_quantize_weight_per_channel_absmax( - module.weight, n_bits=8 - ) # use 8-bit integer for weight - elif weight_quant_type == "per_tensor": - new_module.weight = simulate_quantize_weight_per_tensor_absmax( - module.weight, n_bits=8 - ) - else: - raise ValueError(f"Invalid weight_quant: {weight_quant_type}") - - new_module.weight_quant_name = weight_quant_type - - if module.bias is not None: - new_module.bias = simulate_quantize_weight_per_tensor_absmax(module.bias, n_bits=8) - - return new_module - - def __repr__(self): - return f"W8A8LinearStatic({self.in_features}, {self.out_features}, bias={self.bias is not None}, weight_quant={self.weight_quant_name}, input_scale={self.input_scale.item()}, output_scale={self.output_scale.item()}, clip_top={self.act_quant_input.keywords['clip_top']})" - - -def quantize_opt( - model, - decoder_scales, - weight_quant="per_tensor", - act_quant="per_tensor", - quantize_bmm_input=True, - layer_clip={}, -): - from transformers.models.opt.modeling_opt import ( - OPTAttention, - OPTDecoderLayer, - ) - - for name, m in model.model.named_modules(): - - if isinstance(m, OPTDecoderLayer): - m.fc1 = W8A8LinearStatic.from_float( - m.fc1, - decoder_scales["model." + name + ".fc1"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".fc1"], - ) - m.fc2 = W8A8LinearStatic.from_float( - m.fc2, - decoder_scales["model." + name + ".fc2"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".fc2"], - ) - elif isinstance(m, OPTAttention): - # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj - m.q_proj = W8A8LinearStatic.from_float( - m.q_proj, - decoder_scales["model." + name + ".q_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".q_proj"], - ) - m.k_proj = W8A8LinearStatic.from_float( - m.k_proj, - decoder_scales["model." + name + ".k_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".k_proj"], - ) - m.v_proj = W8A8LinearStatic.from_float( - m.v_proj, - decoder_scales["model." + name + ".v_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".v_proj"], - ) - m.out_proj = W8A8LinearStatic.from_float( - m.out_proj, - decoder_scales["model." + name + ".out_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".out_proj"], - ) - return model - - -def quantize_llama_like( - model, - decoder_scales, - weight_quant="per_tensor", - act_quant="per_tensor", - quantize_bmm_input=False, - layer_clip={}, -): - from transformers.models.llama.modeling_llama import ( - LlamaAttention, - LlamaMLP, - ) - - from transformers.models.mistral.modeling_mistral import ( - MistralAttention, - MistralMLP, - ) - - for name, m in model.model.named_modules(): - if isinstance(m, (LlamaMLP, MistralMLP)): - m.gate_proj = W8A8LinearStatic.from_float( - m.gate_proj, - decoder_scales["model." + name + ".gate_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".gate_proj"], - ) - m.up_proj = W8A8LinearStatic.from_float( - m.up_proj, - decoder_scales["model." + name + ".up_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".up_proj"], - ) - m.down_proj = W8A8LinearStatic.from_float( - m.down_proj, - decoder_scales["model." + name + ".down_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".down_proj"], - ) - elif isinstance(m, (LlamaAttention, MistralAttention)): - # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj - m.q_proj = W8A8LinearStatic.from_float( - m.q_proj, - decoder_scales["model." + name + ".q_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".q_proj"], - ) - m.k_proj = W8A8LinearStatic.from_float( - m.k_proj, - decoder_scales["model." + name + ".k_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".k_proj"], - ) - m.v_proj = W8A8LinearStatic.from_float( - m.v_proj, - decoder_scales["model." + name + ".v_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".v_proj"], - ) - m.o_proj = W8A8LinearStatic.from_float( - m.o_proj, - decoder_scales["model." + name + ".o_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".o_proj"], - ) - return model - - -def quantize_qwen2_like( - model, - decoder_scales, - weight_quant="per_tensor", - act_quant="per_tensor", - quantize_bmm_input=False, - layer_clip={}, -): - from transformers.models.qwen2.modeling_qwen2 import ( - Qwen2Attention, - Qwen2MLP, - ) - - for name, m in model.model.named_modules(): - if isinstance(m, Qwen2MLP): - m.gate_proj = W8A8LinearStatic.from_float( - m.gate_proj, - decoder_scales["model." + name + ".gate_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".gate_proj"], - ) - m.up_proj = W8A8LinearStatic.from_float( - m.up_proj, - decoder_scales["model." + name + ".up_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".up_proj"], - ) - m.down_proj = W8A8LinearStatic.from_float( - m.down_proj, - decoder_scales["model." + name + ".down_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".down_proj"], - ) - elif isinstance(m, Qwen2Attention): - # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj - m.q_proj = W8A8LinearStatic.from_float( - m.q_proj, - decoder_scales["model." + name + ".q_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".q_proj"], - ) - m.k_proj = W8A8LinearStatic.from_float( - m.k_proj, - decoder_scales["model." + name + ".k_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".k_proj"], - ) - m.v_proj = W8A8LinearStatic.from_float( - m.v_proj, - decoder_scales["model." + name + ".v_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".v_proj"], - ) - m.o_proj = W8A8LinearStatic.from_float( - m.o_proj, - decoder_scales["model." + name + ".o_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".o_proj"], - ) - return model - - -def quantize_gemma_like( - model, - decoder_scales, - weight_quant="per_tensor", - act_quant="per_tensor", - quantize_bmm_input=False, - layer_clip={}, -): - from transformers.models.gemma.modeling_gemma import ( - GemmaSdpaAttention, - GemmaMLP, - ) - - for name, m in model.model.named_modules(): - if isinstance(m, GemmaMLP): - m.gate_proj = W8A8LinearStatic.from_float( - m.gate_proj, - decoder_scales["model." + name + ".gate_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".gate_proj"], - ) - m.up_proj = W8A8LinearStatic.from_float( - m.up_proj, - decoder_scales["model." + name + ".up_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".up_proj"], - ) - m.down_proj = W8A8LinearStatic.from_float( - m.down_proj, - decoder_scales["model." + name + ".down_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".down_proj"], - ) - elif isinstance(m, GemmaSdpaAttention): - # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj - m.q_proj = W8A8LinearStatic.from_float( - m.q_proj, - decoder_scales["model." + name + ".q_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".q_proj"], - ) - m.k_proj = W8A8LinearStatic.from_float( - m.k_proj, - decoder_scales["model." + name + ".k_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".k_proj"], - ) - m.v_proj = W8A8LinearStatic.from_float( - m.v_proj, - decoder_scales["model." + name + ".v_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".v_proj"], - ) - m.o_proj = W8A8LinearStatic.from_float( - m.o_proj, - decoder_scales["model." + name + ".o_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".o_proj"], - ) - return model - - -def quantize_phi_like( - model, - decoder_scales, - weight_quant="per_tensor", - act_quant="per_tensor", - quantize_bmm_input=False, - layer_clip={}, -): - from transformers.models.phi.modeling_phi import ( - PhiAttention, - PhiMLP, - ) - - for name, m in model.model.named_modules(): - if isinstance(m, PhiMLP): - m.fc1 = W8A8LinearStatic.from_float( - m.fc1, - decoder_scales["model." + name + ".fc1"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".fc1"], - ) - m.fc2 = W8A8LinearStatic.from_float( - m.fc2, - decoder_scales["model." + name + ".fc2"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".fc2"], - ) - - elif isinstance(m, PhiAttention): - # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj - m.q_proj = W8A8LinearStatic.from_float( - m.q_proj, - decoder_scales["model." + name + ".q_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".q_proj"], - ) - m.k_proj = W8A8LinearStatic.from_float( - m.k_proj, - decoder_scales["model." + name + ".k_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".k_proj"], - ) - m.v_proj = W8A8LinearStatic.from_float( - m.v_proj, - decoder_scales["model." + name + ".v_proj"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".v_proj"], - ) - m.dense = W8A8LinearStatic.from_float( - m.dense, - decoder_scales["model." + name + ".dense"], - weight_quant_type=weight_quant, - clip_top=layer_clip["model." + name + ".dense"], - ) - return model - - -def quantize_mixtral( - model, weight_quant="per_channel", act_quant="per_token", quantize_bmm_input=False -): - from transformers.models.mixtral.modeling_mixtral import ( - MixtralAttention, - MixtralSparseMoeBlock, - MixtralBLockSparseTop2MLP, - ) - - for name, m in model.model.named_modules(): - if isinstance(m, MixtralBLockSparseTop2MLP): - m.w1 = W8A8LinearStatic.from_float( - m.w1, weight_quant_type=weight_quant, act_quant=act_quant - ) - m.w2 = W8A8LinearStatic.from_float( - m.w2, weight_quant_type=weight_quant, act_quant=act_quant - ) - m.w3 = W8A8LinearStatic.from_float( - m.w3, weight_quant_type=weight_quant, act_quant=act_quant - ) - elif isinstance(m, MixtralAttention): - # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj - m.q_proj = W8A8LinearStatic.from_float( - m.q_proj, - weight_quant_type=weight_quant, - act_quant=act_quant, - quantize_output=quantize_bmm_input, - ) - m.k_proj = W8A8LinearStatic.from_float( - m.k_proj, - weight_quant_type=weight_quant, - act_quant=act_quant, - quantize_output=quantize_bmm_input, - ) - m.v_proj = W8A8LinearStatic.from_float( - m.v_proj, - weight_quant_type=weight_quant, - act_quant=act_quant, - quantize_output=quantize_bmm_input, - ) - m.o_proj = W8A8LinearStatic.from_float( - m.o_proj, weight_quant_type=weight_quant, act_quant=act_quant - ) - elif isinstance(m, MixtralSparseMoeBlock): - m.gate = W8A8LinearStatic.from_float( - m.gate, weight_quant_type=weight_quant, act_quant=act_quant - ) - return model - - -def quantize_falcon_like( - model, - decoder_scales, - weight_quant="per_tensor", - act_quant="per_tensor", - quantize_bmm_input=False, - layer_clip={}, -): - from transformers.models.falcon.modeling_falcon import ( - FalconAttention, - FalconMLP, - ) - - for name, m in model.named_modules(): - if isinstance(m, FalconMLP): - m.dense_h_to_4h = W8A8LinearStatic.from_float( - m.dense_h_to_4h, - decoder_scales[name + ".dense_h_to_4h"], - weight_quant_type=weight_quant, - clip_top=layer_clip[name + ".dense_h_to_4h"], - ) - m.dense_4h_to_h = W8A8LinearStatic.from_float( - m.dense_4h_to_h, - decoder_scales[name + ".dense_4h_to_h"], - weight_quant_type=weight_quant, - clip_top=layer_clip[name + ".dense_4h_to_h"], - ) - elif isinstance(m, FalconAttention): - # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj - m.query_key_value = W8A8LinearStatic.from_float( - m.query_key_value, - decoder_scales[name + ".query_key_value"], - weight_quant_type=weight_quant, - clip_top=layer_clip[name + ".query_key_value"], - ) - m.dense = W8A8LinearStatic.from_float( - m.dense, - decoder_scales[name + ".dense"], - weight_quant_type=weight_quant, - clip_top=layer_clip[name + ".dense"], - ) - return model diff --git a/tools/jni/LibHelper.cpp b/tools/jni/LibHelper.cpp index 4c5819360..530c2a99e 100644 --- a/tools/jni/LibHelper.cpp +++ b/tools/jni/LibHelper.cpp @@ -30,9 +30,9 @@ using namespace mllm; #ifdef USE_QNN -#include "models/qwen/modeling_qwen_npu.hpp" +#include "models/qwen/modeling_qwen_npu_v2.hpp" #include "models/phonelm/modeling_phonelm_npu.hpp" - +#include "models/qwen2_vl/modeling_qwen2_vl_npu.hpp" #endif inline bool exists_test(const std::string &name) { std::ifstream f(name.c_str()); @@ -55,7 +55,18 @@ unsigned int LibHelper::postProcessing(shared_ptr result, shared_ptr(qwconfig, chunk_size); + prefill_module_ = make_shared(qwconfig, chunk_size); prefill_module_->load(qnn_weights_path); auto tokenizer = dynamic_pointer_cast(tokenizer_); @@ -103,11 +114,11 @@ bool LibHelper::setUp(const std::string &base_path, std::string weights_path, st if (!not_end) { return false; } return true; }); - Module::isFirstChunk = false; - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(PROMPT); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - Module::isMultiChunkPrefilling = true; + Context::Instance().inference_state().setQnnGraphFrozen(true); + Context::Instance().inference_state().setCurSequenceLength(0); + Context::Instance().inference_state().setExecutionType(PROMPT); + Context::Instance().inference_state().toggleSwitching(); + // warmup END LOGE("QNN Warmup finished."); } @@ -120,7 +131,23 @@ bool LibHelper::setUp(const std::string &base_path, std::string weights_path, st break; case QWEN2VL: processor_ = new Qwen2VLProcessor(vocab_path, merge_path); - module_ = make_shared(qwvlconfig); + LOGI("Init Qwen2VLProcessor: %d", backend_type); +#ifdef USE_QNN + if (backend_type == MLLMBackendType::QNN) { + int chunk_size = 256; + prefill_module_ = make_shared(qwvlconfig, chunk_size); + prefill_module_->load(qnn_weights_path); + prefill_embedding_ = make_shared(qwvlconfig); + prefill_embedding_->load(weights_path); + qwvlconfig.attn_implementation = "eager"; + module_ = make_shared(qwvlconfig); + } else { +#endif + module_ = make_shared(qwvlconfig); + +#ifdef USE_QNN + } +#endif break; case Bert: tokenizer_ = make_shared(vocab_path, true); @@ -155,11 +182,11 @@ bool LibHelper::setUp(const std::string &base_path, std::string weights_path, st if (!not_end) { return false; } return true; }); - Module::isFirstChunk = false; - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(PROMPT); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); - Module::isMultiChunkPrefilling = true; + Context::Instance().inference_state().setQnnGraphFrozen(true); + Context::Instance().inference_state().setCurSequenceLength(0); + Context::Instance().inference_state().setExecutionType(PROMPT); + Context::Instance().inference_state().toggleSwitching(); + // warmup END LOGE("QNN Warmup finished."); } @@ -197,9 +224,9 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u bool isSwitched = false; // set total seq length for HeadLinear execute, which can not get the real seq length from Opts - static_cast(Backend::global_backends[MLLM_CPU])->setTotalSequenceLength(real_seq_length); + Context::Instance().inference_state().setTotalSequenceLength(real_seq_length); // set chunk size for the HeadLinear execute, which can not get the chunk size from Opts - static_cast(Backend::global_backends[MLLM_CPU])->setChunkSize(chunk_size); + Context::Instance().inference_state().setChunkSize(chunk_size); LlmTextGeneratorOpts opt{ .max_new_tokens = 1, @@ -210,16 +237,16 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u }; std::vector chunked_tensors(chunk_num); for (int chunk_id = 0; chunk_id < chunk_num; ++chunk_id) { - chunked_tensors[chunk_id].setBackend(Backend::global_backends[MLLM_CPU]); + chunked_tensors[chunk_id].setBackend(Backend::global_backends[MLLM_CPU].get()); chunked_tensors[chunk_id].setTtype(INPUT_TENSOR); chunked_tensors[chunk_id].reshape(1, 1, chunk_size, 1); chunked_tensors[chunk_id].setName("input-chunk-" + to_string(chunk_id)); chunked_tensors[chunk_id].shallowCopyFrom(&input_tensor, false, {0, 0, chunk_id * chunk_size, 0}); prefill_module_->generate(chunked_tensors[chunk_id], opt, [&](unsigned int out_token) -> bool { - if (!isSwitched && chunk_id == 0 && static_cast(Backend::global_backends[MLLM_CPU])->isStageSwitching()) { + if (!isSwitched && chunk_id == 0 && Context::Instance().inference_state().isStageSwitching()) { // turn off switching at the first chunk of following inputs - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().toggleSwitching(); isSwitched = true; } // switch_flag = true; @@ -239,11 +266,11 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u } return true; }); - Module::isFirstChunk = false; + Context::Instance().inference_state().setQnnGraphFrozen(true); } - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(real_seq_length); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(AUTOREGRESSIVE); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().setCurSequenceLength(real_seq_length); + Context::Instance().inference_state().setExecutionType(AUTOREGRESSIVE); + Context::Instance().inference_state().toggleSwitching(); opt = LlmTextGeneratorOpts{ .max_new_tokens = max_new_tokens - 1, @@ -256,7 +283,7 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u isSwitched = false; module_->generate(chunked_tensors.back(), opt, [&](unsigned int out_token) -> bool { if (!isSwitched) { - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().toggleSwitching(); isSwitched = true; } auto out_token_string = tokenizer_->detokenize({out_token}); @@ -274,9 +301,9 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u if (!not_end) { return false; } return true; }); - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(PROMPT); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().setCurSequenceLength(0); + Context::Instance().inference_state().setExecutionType(PROMPT); + Context::Instance().inference_state().toggleSwitching(); } else { // CPU auto input_tensor = tokenizer_->tokenize(input_str); max_new_tokens = tokens_limit - input_tensor.sequence(); @@ -318,26 +345,148 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u } module_->clear_kvcache(); } else if (model_ == QWEN2VL) { - auto model = dynamic_cast(module_.get()); auto processor = dynamic_cast(processor_); input_str = "Based on the screenshot of the page, I give a text description and you give its corresponding location. The coordinate represents a clickable location [x, y] for an element, which is a relative coordinate on the screenshot, scaled from 0 to 1.<|vision_start|><|image_pad|><|vision_end|>" + input_str; input_str = processor->tokenizer->apply_chat_template(input_str); auto input_tensors = processor->process(input_str, {image}, {image_length}); LOGE("Instruct: %s", input_str.c_str()); LOGE("Tokens: %d", input_tensors[0].sequence()); - for (int step = 0; step < 100; step++) { - model->get_position_ids(input_tensors); - auto result = (*model)(input_tensors); - auto outputs = processor->detokenize(result[0]); - auto out_string = outputs.first; - auto out_token = outputs.second; - auto [end, string] = processor->tokenizer->postprocess(out_string); - output_string_ += string; - callback_(output_string_, !end, {}); - if (!end) { break; } + +#ifdef USE_QNN + if (backend_ == MLLMBackendType::QNN) { + int chunk_size = 256; + + const int real_seq_length = input_tensors[0].sequence(); + const int num_iter = (real_seq_length + chunk_size - 1) / chunk_size; + auto model = dynamic_cast(module_.get()); + auto prefill_embedding = dynamic_cast(prefill_embedding_.get()); + // padding the position_ids to total chunk length(example: 256*2) for CPUMultimodalRoPEPipeline + LOGE("before get_position_ids"); + prefill_embedding->get_position_ids(input_tensors, chunk_size * num_iter); + LOGE("after get_position_ids"); + + // warm up (still need a warm up as the setup stage is not omitted now) + auto merged_embd_warmup_tensor = Tensor(Backend::global_backends[MLLM_QNN]); + merged_embd_warmup_tensor.reshape(1, 1, chunk_size, 1536); + merged_embd_warmup_tensor.setTtype(INPUT_TENSOR); + merged_embd_warmup_tensor.alloc(); + merged_embd_warmup_tensor.setTtype(INPUT_TENSOR); + input_tensors.back().setTtype(INPUT_TENSOR); + vector prefill_input = {merged_embd_warmup_tensor, input_tensors.back()}; + (*prefill_module_)(prefill_input); + LOGE("after warm up"); + + Module::isFirstChunk = false; + static_cast(Backend::global_backends[MLLM_CPU].get())->setCurSequenceLength(0); + static_cast(Backend::global_backends[MLLM_CPU].get())->setExecutionType(PROMPT); + static_cast(Backend::global_backends[MLLM_CPU].get())->toggleSwitching(); + + // set total seq length for HeadLinear execute, which can not get the real seq length from Opts + static_cast(Backend::global_backends[MLLM_CPU].get())->setTotalSequenceLength(real_seq_length); + // set chunk size for the HeadLinear execute, which can not get the chunk size from Opts + static_cast(Backend::global_backends[MLLM_CPU].get())->setChunkSize(chunk_size); + + for (auto &t : input_tensors) { + t.setTtype(INPUT_TENSOR); + } + + // 1. get the vit embedding using CPU + auto merged_embd = (*prefill_embedding)(input_tensors); + LOGE("after vit embedding"); + + // free prefill embedding tensor, approximately free 1GB for 59ms + auto begin_free = mllm_time_ms(); + auto &embedding_act = prefill_embedding->activation_tensors; + // go through the activation tensors to get the merged_embd + for (auto iter = embedding_act.begin(); iter != embedding_act.end(); ++iter) { + // std::cout << iter->first << std::endl; + if (iter->first.find("input") != std::string::npos || iter->first.find("index_put") != std::string::npos) { + continue; + } + iter->second->free(); + } + auto end_free = mllm_time_ms(); + LOGE("after free"); + + // 2. QNN LLM Prefill + unsigned int out_token = 0; + for (auto i = 0; i < num_iter; ++i) { + // copy the data from merged_embd[0] to merged_embd_warmup_tensor + auto source = merged_embd[0].ptrAt(0, 0, chunk_size * i, 0); + auto dest = prefill_input[0].hostPtr(); + if (i == 0) { + memcpy(dest, source, prefill_input[0].cntSize()); + } + { + memcpy(dest, source, (merged_embd[0].sequence() % chunk_size) * merged_embd[0].dimension() * sizeof(float)); + } + + auto result = (*prefill_module_)(prefill_input); + + if (i == 0) { // turn off switching to avoid RoPE h_cnt_ reset to curSequenceLength in next chunk + static_cast(Backend::global_backends[MLLM_CPU].get())->toggleSwitching(); + } + + if (i == 1) { + auto outputs = processor->detokenize(result[0], real_seq_length % chunk_size); + auto out_string = outputs.first; + out_token = outputs.second; + // auto [not_end, output_string] = processor->tokenizer->postprocess(out_string); + // std::cout << output_string << std::flush; + auto [end, string] = processor->tokenizer->postprocess(out_string); + output_string_ += string; + callback_(output_string_, !end, {}); + } + } + chatPostProcessing(out_token, input_tensors[0], {&input_tensors[1], &input_tensors[2]}); + + static_cast(Backend::global_backends[MLLM_CPU].get())->setCurSequenceLength(real_seq_length); + static_cast(Backend::global_backends[MLLM_CPU].get())->setExecutionType(AUTOREGRESSIVE); + static_cast(Backend::global_backends[MLLM_CPU].get())->toggleSwitching(); + + // 3. CPU LLM Decoding + for (auto &t : input_tensors) { // set to INPUT_TENSOR to let decoding module update act + t.setTtype(INPUT_TENSOR); + } + + const int last_position_id = input_tensors[3].dataAt(0, 0, 0, real_seq_length - 1); + for (int step = 0; step < 100; step++) { + // use the last position id(no padding position) in decoding + prefill_embedding->get_position_ids(input_tensors, 0, last_position_id + 1 + step); + + auto result = (*model)(input_tensors); + auto outputs = processor->detokenize(result[0]); + auto out_string = outputs.first; + auto out_token = outputs.second; + auto [end, string] = processor->tokenizer->postprocess(out_string); + output_string_ += string; + callback_(output_string_, !end, {}); + if (!end) { break; } + chatPostProcessing(out_token, input_tensors[0], {&input_tensors[1], &input_tensors[2]}); + if (step == 0) static_cast(Backend::global_backends[MLLM_CPU].get())->toggleSwitching(); + } + + std::cout << std::endl; + } else { +#endif + auto model = dynamic_cast(module_.get()); + for (int step = 0; step < 100; step++) { + model->get_position_ids(input_tensors); + auto result = (*model)(input_tensors); + auto outputs = processor->detokenize(result[0]); + auto out_string = outputs.first; + auto out_token = outputs.second; + auto [end, string] = processor->tokenizer->postprocess(out_string); + output_string_ += string; + callback_(output_string_, !end, {}); + if (!end) { break; } + chatPostProcessing(out_token, input_tensors[0], {&input_tensors[1], &input_tensors[2]}); + } + module_->clear_kvcache(); +#ifdef USE_QNN } - module_->clear_kvcache(); +#endif } else if (model_ == Bert) { LOGE("Bert model is not supported in this version."); } else if (model_ == PhoneLM) { @@ -355,9 +504,9 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u bool isSwitched = false; // set total seq length for HeadLinear execute, which can not get the real seq length from Opts - static_cast(Backend::global_backends[MLLM_CPU])->setTotalSequenceLength(real_seq_length); + Context::Instance().inference_state().setTotalSequenceLength(real_seq_length); // set chunk size for the HeadLinear execute, which can not get the chunk size from Opts - static_cast(Backend::global_backends[MLLM_CPU])->setChunkSize(chunk_size); + Context::Instance().inference_state().setChunkSize(chunk_size); LlmTextGeneratorOpts opt{ .max_new_tokens = 1, @@ -368,7 +517,7 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u }; std::vector chunked_tensors(chunk_num); for (int chunk_id = 0; chunk_id < chunk_num; ++chunk_id) { - chunked_tensors[chunk_id].setBackend(Backend::global_backends[MLLM_CPU]); + chunked_tensors[chunk_id].setBackend(Backend::global_backends[MLLM_CPU].get()); chunked_tensors[chunk_id].setTtype(INPUT_TENSOR); chunked_tensors[chunk_id].reshape(1, 1, chunk_size, 1); chunked_tensors[chunk_id].setName("input-chunk-" + to_string(chunk_id)); @@ -378,7 +527,7 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u // if (switch_flag && !isSwitched && chunk_id == 0) { if (!isSwitched && chunk_id == 0) { // turn off switching at the first chunk of following inputs - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().toggleSwitching(); isSwitched = true; } // switch_flag = true; @@ -399,11 +548,11 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u if (!not_end) { return false; } return true; }); - Module::isFirstChunk = false; + Context::Instance().inference_state().setQnnGraphFrozen(true); } - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(real_seq_length); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(AUTOREGRESSIVE); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().setCurSequenceLength(real_seq_length); + Context::Instance().inference_state().setExecutionType(AUTOREGRESSIVE); + Context::Instance().inference_state().toggleSwitching(); opt = LlmTextGeneratorOpts{ .max_new_tokens = max_new_tokens - 1, @@ -416,7 +565,7 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u isSwitched = false; module_->generate(chunked_tensors.back(), opt, [&](unsigned int out_token) -> bool { if (!isSwitched) { - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().toggleSwitching(); isSwitched = true; } auto out_token_string = tokenizer_->detokenize({out_token}); @@ -434,9 +583,9 @@ void LibHelper::run(std::string &input_str, uint8_t *image, unsigned max_step, u if (!not_end) { return false; } return true; }); - static_cast(Backend::global_backends[MLLM_CPU])->setCurSequenceLength(0); - static_cast(Backend::global_backends[MLLM_CPU])->setExecutionType(PROMPT); - static_cast(Backend::global_backends[MLLM_CPU])->toggleSwitching(); + Context::Instance().inference_state().setCurSequenceLength(0); + Context::Instance().inference_state().setExecutionType(PROMPT); + Context::Instance().inference_state().toggleSwitching(); } else { // CPU auto input_tensor = tokenizer_->tokenize(input_str); max_new_tokens = tokens_limit - input_tensor.sequence(); diff --git a/tools/jni/LibHelper.hpp b/tools/jni/LibHelper.hpp index 98791ae3d..97431528a 100644 --- a/tools/jni/LibHelper.hpp +++ b/tools/jni/LibHelper.hpp @@ -47,6 +47,7 @@ class LibHelper { PreProcessor *processor_; std::shared_ptr module_; std::shared_ptr prefill_module_; + std::shared_ptr prefill_embedding_; // Tokenizer *tokenizer_ = nullptr; unsigned int eos_id_ = 2; diff --git a/tools/jni/helper.hpp b/tools/jni/helper.hpp deleted file mode 100644 index 79cec239f..000000000 --- a/tools/jni/helper.hpp +++ /dev/null @@ -1,89 +0,0 @@ -// -// Created by Xiang Li on 2023/12/21. -// - -#ifndef HELPER_HPP -#define HELPER_HPP -#include -#include -#include -#include -#include -#include "Net.hpp" -#include "Executor.hpp" -#include "express/Express.hpp" -#include "tokenizers/BPE/Bpe.hpp" -#include "tokenizers/Tokenizer.hpp" -using namespace mllm; - -inline void fullTensor(shared_ptr input_tensor, mllm::Net *net, vector shape, float value) { - input_tensor->setBackend(net->backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(shape[0], shape[1], shape[2], shape[3]); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - input_tensor->fullData(value); -} - -inline void token2Tensor(shared_ptr input_tensor, Net *net, vector tokens) { - input_tensor->setBackend(net->backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(1, 1, static_cast(tokens.size()), 1); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - input_tensor->fullData(1); - for (int idx = 0; idx < tokens.size(); ++idx) { - input_tensor->setDataAt(0, 0, idx, 0, tokens[idx]); - } -} -inline void patches2Tensor(shared_ptr input_tensor, Net *net, vector>> image_patches) { - if(image_patches.empty()) { - fullTensor(input_tensor, net, {0, 0, 0, 0},1.0F); - return; - } - const int batch = image_patches.size(); - const int seq = image_patches[0].size(); - const int dims = image_patches[0][0].size(); - input_tensor->setBackend(net->backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(batch, 1, seq, dims); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - for (int i = 0; i < batch; ++i) { - for (int j = 0; j < seq; ++j) { - for (int k = 0; k < dims; ++k) { - input_tensor->setDataAt(i, 0, j, k, image_patches[i][j][k]); - } - } - } -} -inline void patchIdx2Tensor(shared_ptr input_tensor, Net *net, vector> image_patches_indices) { - if(image_patches_indices.empty()) { - fullTensor(input_tensor, net, {0, 0, 0, 0},1.0F); - return; - } - const int batch = image_patches_indices.size(); - const int seq = image_patches_indices[0].size(); - input_tensor->setBackend(net->backends()[BackendType::MLLM_CPU].get()); - input_tensor->reshape(batch, 1, seq, 1); - input_tensor->setDtype(MLLM_TYPE_F32); - input_tensor->alloc(); - for (int i = 0; i < batch; ++i) { - for (int j = 0; j < seq; ++j) { - input_tensor->setDataAt(i, 0, j, 0, image_patches_indices[i][j]); - } - } -} - -inline unsigned int argmax(const std::vector &scores) { - if (scores.empty()) { - throw std::invalid_argument("Input vector is empty"); - } - unsigned int maxIndex = 0; - float maxValue = scores[0]; - for (size_t i = 1; i < scores.size(); ++i) { - if (scores[i] > maxValue) { - maxIndex = i; - maxValue = scores[i]; - } - } - return maxIndex; -} -#endif //HELPER_HPP diff --git a/tools/qnn_convertor/README.md b/tools/qnn_convertor/README.md new file mode 100644 index 000000000..8b6894eac --- /dev/null +++ b/tools/qnn_convertor/README.md @@ -0,0 +1,161 @@ +# MLLM QNN Convertor + +## Profile model +Currently, QNN only supports static quantization, which means we need to collect the activation distributions of every layer offline. + +Use following command to get distribution of a specific model +```bash +python get_distribution.py --config_file config/qwen1.5-1.8b.json +``` + +Two example configs are provided in the config directory for `qwen1.5-1.8b` and `showui-2b`. + +Currently, we support the following model types: `qwen2` and `qwen2-vl`. + +The activation scale information will be saved to the `output_file`, and the randomly generated rotation matrix will be saved to the `save_rotation` file. + +Note that `online_rotation` should be set to true if we are going to convert an original model that has not been rotated. Otherwise, `online_rotation` should be set to false. + +The schema of the config file is: +```python +{ + "type": "object", + "required": ["profile_config", "export_config"], + "additionalProperties": False, + + "properties": { + "profile_config": { + "type": "object", + "required": [ + "dataset_path", "output_path", "num_samples", "no_bias", "model_config" + ], + "additionalProperties": False, + + "properties": { + "dataset_path": {"type": "string"}, # which dataset to use for profiling + "output_path": {"type": "string"}, # where to save the profiling results + "num_samples": {"type": "integer", "minimum": 2}, # number of samples to use in dataset to profile + "no_bias": {"type": "boolean"}, # if true, we will ignore bias when profiling a linear layer. that is, for a linear layer Wx + b, we will only record the output scale of Wx. + + "model_config": { + "type": "object", + "required": [ + "model_type", # currently only support qwen2 and qwen-vl(this is qwen2-vl, not qwen2.5-vl. you can refer to model_interface.py for details) + "tokenizer_name", # path to tokenizer + "model_name", # path to model + ], + "additionalProperties": True, + + "properties": { + "model_type": {"type": "string"}, + "tokenizer_name": {"type": "string"}, + "model_name": {"type": "string"}, + "online_rotation": {"type": "boolean"}, # rotate after loading model + "random_rotate": {"type": "boolean"}, # generate random rotation matrix and use it to rotate the model + "save_rotation": {"type": "string"}, # this is the path to save the rotation matrix + "R_path": {"type": "string"} # if online_rotation is true, rotation matrix from R_path will be used to rotate the model. The random_rotate and R_path are mutually exclusive + } + } + } + }, + + "export_config": { + "type": "object", + "required": [ + "scale_file", "output_model", "model_config" + ], + "additionalProperties": False, + + "properties": { + "scale_file": {"type": "string"}, + "output_model": {"type": "string"}, + "t01m_clip_threshold": {"type": "integer"}, + "quant_bias": {"type": "boolean"}, + "clip_all": {"type": "boolean"}, # if true, t01m_clip_threshold will not be effected + + "quantize_vit": {"type": "boolean"}, # if true, we will quantize vit model + + "model_config": { + "type": "object", + "required": [ + "model_type", + "tokenizer_name", + "model_name", + ], + "additionalProperties": True, + + "properties": { + "model_type": {"type": "string"}, + "tokenizer_name": {"type": "string"}, + "model_name": {"type": "string"}, + "online_rotation": {"type": "boolean"}, + "random_rotate": {"type": "boolean"}, + "save_rotation": {"type": "string"}, + "R_path": {"type": "string"} # R_path and random_rotate are mutually exclusive + } + } + } + } + } +} +``` + +The flowchart of the profiling process is shown below +```mermaid +flowchart TD + A[Load Model] --> B{online_rotation?} + B -->|Yes| C{random_rotate=True?} + B -->|No| E[Load Dataset] + C -->|Yes| D1[R:=random rotation matrix] + C -->|No| D2{R_path?} + C ---|mutually exclusive| D2 + D2 -->|Exists| D3[R:=rotation matrix from R_path] + D2 -->|Not set| E + D1 --> F[Apply Rotation R] + D3 --> F + F --> E + E --> G[Feed data to get distribution] +``` + + +## Export QNN Model + +Use the following command to export a QNN-compatible model: + +```bash +python export_qnn_model.py --config_file config/qwen1.5-1.8b.json +``` + +## Export FP32 Rotated Model + +You can also use the following command to export an FP32 rotated model that can be converted to an MLLM CPU model: + +```bash +python export_rotate_model.py --config_file config/qwen1.5-1.8b.json +``` + +This will export a rotated model in FP32 format that maintains the full precision while applying the rotation transformations. + +The flowchart of exporting process is shown below +```mermaid +flowchart TD + A[Load Model] --> B{online_rotation?} + B -->|Yes| C{random_rotate=True?} + B -->|No| E[Quantize model and save as mllm file] + C -->|Yes| D1[R:=random rotation matrix] + C -->|No| D2{R_path?} + C ---|mutually exclusive| D2 + D2 -->|Exists| D3[R:=rotation matrix from R_path] + D2 -->|Not set| E + D1 --> F[Apply Rotation R] + D3 --> F + F --> E +``` + + +## Workflow Summary + +1. **Profile activation scales**: Run `get_distribution_wobias.py` to collect activation statistics and generate rotation matrices +2. **Export quantized model**: Run `export_qnn_model.py` to create a quantized model for QNN deployment +3. **Export FP32 rotated model**: Run `export_rotate_model.py` to create an FP32 rotated model for CPU deployment +4. **Convert to MLLM format**: Use the standard MLLM convertor tools to generate the final deployment model diff --git a/tools/qnn_convertor/config/qwen1.5-1.8b.json b/tools/qnn_convertor/config/qwen1.5-1.8b.json new file mode 100644 index 000000000..c7f900e56 --- /dev/null +++ b/tools/qnn_convertor/config/qwen1.5-1.8b.json @@ -0,0 +1,34 @@ +{ + "profile_config": { + "dataset_path": "path/to/pile-val-backup/", + "output_path": "./dis/qwen1.5-1.8b-rot-dis.json", + "num_samples": 2, + "no_bias": true, + + "model_config": { + "model_type": "qwen2", + "tokenizer_name": "path/to/Qwen1.5-1.8B-Chat", + "model_name": "path/to/Qwen1.5-1.8B-Chat", + + "online_rotation": true, + "random_rotate": true, + "save_rotation": "./R/qwen1.5-1.8b-rotation-matrix.bin" + } + }, + "export_config": { + "scale_file": "./dis/qwen1.5-1.8b-rot-dis.json", + "output_model": "./models/qwen1.5-1.8b-qnn.pth", + + "t01m_clip_threshold": 64, + "quant_bias": false, + + "model_config": { + "model_type": "qwen2", + "tokenizer_name": "path/to/Qwen1.5-1.8B-Chat", + "model_name": "path/to/Qwen1.5-1.8B-Chat", + + "online_rotation": true, + "R_path": "./R/qwen1.5-1.8b-rotation-matrix.bin" + } + } +} \ No newline at end of file diff --git a/tools/qnn_convertor/config/show-ui-2b.json b/tools/qnn_convertor/config/show-ui-2b.json new file mode 100644 index 000000000..067bc5da3 --- /dev/null +++ b/tools/qnn_convertor/config/show-ui-2b.json @@ -0,0 +1,39 @@ +{ + "profile_config": { + "dataset_path": "path/to/ScreenSpot", + "output_path": "./dis/showui_rot_screenqa_nobias_dis.json", + "num_samples": 2, + "no_bias": true, + + "model_config": { + "model_type": "qwen2-vl", + "tokenizer_name": "path/to/Qwen2-VL-2B-Instruct", + "model_name": "path/to/ShowUI-2B", + + "online_rotation": true, + "rotate_vit": true, + "no_quantize": true, + "random_rotate": true, + "save_rotation": "./R/ShowUI-2B-rotation-matrix.bin" + } + }, + "export_config": { + "scale_file": "./dis/showui_rot_screenqa_nobias_dis.json", + "output_model": "path/to/ShowUI-2B-Rot-QNN.pth", + + "t01m_clip_threshold": 64, + "quant_bias": false, + "quantize_vit": true, + + "model_config": { + "model_type": "qwen2-vl", + "tokenizer_name": "path/to/Qwen2-VL-2B-Instruct", + "model_name": "path/to/ShowUI-2B", + + "online_rotation": true, + "rotate_vit": true, + "no_quantize": true, + "R_path": "mllm_qnn_convertor/R/ShowUI-2B-R.bin" + } + } +} \ No newline at end of file diff --git a/tools/qnn_convertor/export_qnn_model.py b/tools/qnn_convertor/export_qnn_model.py new file mode 100644 index 000000000..07b731ff6 --- /dev/null +++ b/tools/qnn_convertor/export_qnn_model.py @@ -0,0 +1,249 @@ +import argparse +import torch +import json +from typing import Dict, Any + +from model_interface import ModelFactory, ModelInterface +from utils.get_input_output_scales import get_clip_and_scale + + +class ModelExporter: + """通用模型导出器""" + + def __init__(self, model_interface: ModelInterface, args): + self.model_interface = model_interface + self.args = args + self.model = model_interface.get_model_for_hook() + + @torch.no_grad() + def quantize_weight_per_tensor_absmax(self, w, n_bits=8): + """权重量化""" + w = w.to("cuda") + scales = w.abs().max() + q_max = 2 ** (n_bits - 1) - 1 + scales.clamp_(min=1e-5).div_(q_max) + w.div_(scales).round_() + + if n_bits == 8: + w = w.to("cpu").type(torch.int8) + elif n_bits == 16 or n_bits == 32: + w = w.to("cpu").type(torch.int32) + else: + w = w.to("cpu").type(torch.int8) + scale = scales.to("cpu").type(torch.float32) + return w, scale + + @torch.no_grad() + def quantize_bias_per_tensor_absmax(self, w, n_bits=8): + """bias量化""" + w = w.to("cuda") + scales = w.abs().max() + q_max = 2 ** (n_bits - 1) - 1 + scales.clamp_(min=1e-5).div_(q_max) + w.div_(scales).round_() + + w = w.to("cpu").type(torch.int32) + scale = scales.to("cpu").type(torch.float32) + return w, scale + + def get_activation_scales(self, act_dict_path: str): + """获取激活量化参数""" + act_dict = json.load(open(act_dict_path)) + + skip_layers = self.model_interface.get_skip_layers() + no_clip_input = skip_layers.get("no_clip_input", set()) + no_clip_output = skip_layers.get("no_clip_output", set()) + + act_scales, clip_top, return_dict = get_clip_and_scale( + act_dict, + self.args.t01m_clip_threshold, + self.args.clip_all, + no_clip_input=no_clip_input, + no_clip_output=no_clip_output + ) + + # 打印统计信息 + print(f"clip input num: {return_dict['clip_input_num']}") + print(f"clip output num: {return_dict['clip_output_num']}") + print(f"no clip input num: {return_dict['no_clip_input_num']}") + for i in return_dict["no_clip_input_name"]: + print(f"no clip input: {i}") + print(f"no clip output num: {return_dict['no_clip_output_num']}") + for i in return_dict["no_clip_output_name"]: + print(f"no clip output: {i}") + + return act_scales, clip_top + + def should_skip_layer(self, name: str) -> bool: + """判断是否应该跳过导出这个层""" + skip_layers = self.model_interface.get_skip_layers() + skip_patterns = skip_layers.get("skip_export", set()) + + for pattern in skip_patterns: + if pattern in name: + return True + return False + + def should_quantize_layer(self, name: str) -> bool: + """判断是否应该量化这个层""" + rules = self.model_interface.get_special_quantization_rules() + skip_layers = rules.get("skip_layers", set()) + + layer_name = name.replace(".weight", "").replace(".bias", "") + for skip_pattern in skip_layers: + if skip_pattern in layer_name: + return False + return True + + def is_head_layer(self, name: str) -> bool: + """判断是否是head层""" + rules = self.model_interface.get_special_quantization_rules() + head_layers = rules.get("head_layers", set()) + + for head_pattern in head_layers: + if head_pattern in name: + return True + return False + + def export_model(self, act_dict_path: str, output_path: str): + """导出量化模型""" + + # 获取激活量化参数 + act_scales, clip_top = self.get_activation_scales(act_dict_path) + + # 获取模型状态字典 + model_dict = self.model.state_dict() + # move parameters to CPU + for key in model_dict: + model_dict[key] = model_dict[key].cpu() + + # 添加激活量化参数 + for layer_name in act_scales: + model_dict[layer_name + ".input_scale"] = torch.tensor(act_scales[layer_name]["input"]) + model_dict[layer_name + ".output_scale"] = torch.tensor(act_scales[layer_name]["output"]) + print(f"{layer_name} input scale: {act_scales[layer_name]['input']}, output scale: {act_scales[layer_name]['output']}") + model_dict[layer_name + ".clip_input"] = torch.tensor(clip_top[layer_name]["input"]) + model_dict[layer_name + ".clip_output"] = torch.tensor(clip_top[layer_name]["output"]) + + # 量化和导出 + new_model = {} + rules = self.model_interface.get_special_quantization_rules() + + for name, param in model_dict.items(): + print(name) + + # 跳过特定层 + if self.should_skip_layer(name): + print(f"Skipping {name} as per skip rules") + continue + + # 不量化的层直接复制 + if not self.should_quantize_layer(name): + new_model[name] = param + print(f"Skipping quantization for {name} as per special rules") + continue + + # 权重量化 + if name.replace(".weight", "") in act_scales: + if not self.is_head_layer(name): + layer_name = name + new_model[layer_name], scale = self.quantize_weight_per_tensor_absmax(param, 8) + new_model[layer_name + ".scale"] = scale + + # QNN需要转置权重 + if self.model_interface.should_transpose_weight(layer_name): + new_model[name] = new_model[name].transpose(-2, -1) + + print(f"Quantized {layer_name} with scale {scale}") + else: + new_model[name] = param + + # bias量化 + elif name.replace(".bias", "") in act_scales: + if not self.is_head_layer(name): + layer_name = name + if not self.args.quant_bias: + new_model[name] = param + print(f"FP {layer_name}") + else: + new_model[layer_name], scale = self.quantize_bias_per_tensor_absmax(param, 8) + new_model[layer_name + ".scale"] = scale + print(f"Quantized {layer_name} with scale {scale}") + else: + new_model[name] = param + else: + new_model[name] = param + + # 保存模型 + torch.save(new_model, output_path) + print(f"Model saved to {output_path}") + + +from utils.config import ConfigDict, CONFIG_SCHEMA, validate_config + +from pathlib import Path + +def ensure_parent_dir(path: str | Path) -> None: + path = Path(path) + target_dir = path.parent if path.suffix else path + target_dir.mkdir(parents=True, exist_ok=True) + + +MODEL_2_VIT_NAME = { + "qwen2-vl": "visual" +} + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--config_file", type=str, required=True, help="Path to the config file") + args = parser.parse_args() + + config = ConfigDict(json.load(open(args.config_file, "r"))) + config.check_schema(CONFIG_SCHEMA) + validate_config(config) + export_config = config.export_config + + model_config = export_config.model_config + model_type = model_config.model_type + tokenizer_name = model_config.tokenizer_name + model_name = model_config.model_name + scale_file = export_config.scale_file + output_model = export_config.output_model + + assert export_config.quantize_vit is None or model_type in MODEL_2_VIT_NAME, \ + f"Model type {model_type} does not have quantization config for ViT" + + # when explicitly set quantize_vit to False + # we skip the quantization of ViT layers + # if not set or set to true, we don't skip any layer to quantize + if export_config.quantize_vit == False: + vit_name = MODEL_2_VIT_NAME.get(model_type, None) + model_config["special_quantization_rules"] = {} + model_config["special_quantization_rules"]["skip_layers"] = {vit_name} + + if model_config.random_rotate and model_config.R_path: + raise ValueError("random_rotation and R_path cannot be true at the same time") + + ensure_parent_dir(output_model) + + print("model:", model_name) + print("model type:", model_type) + print("scale file:", scale_file) + print("t01m clip threshold:", export_config.t01m_clip_threshold) + print("output model:", output_model) + print("Quantize bias:", export_config.quant_bias) + print("quantize_vit:", export_config.quantize_vit) + print(f"model config: {model_config}") + + # 创建模型接口 + model_interface = ModelFactory.create_model( + model_type=model_type, + tokenizer_name=tokenizer_name, + model_name=model_name, + args=model_config + ) + + # 创建导出器并导出模型 + exporter = ModelExporter(model_interface, export_config) + exporter.export_model(scale_file, output_model) diff --git a/tools/qnn_convertor/export_rotate_model.py b/tools/qnn_convertor/export_rotate_model.py new file mode 100644 index 000000000..02aa0710d --- /dev/null +++ b/tools/qnn_convertor/export_rotate_model.py @@ -0,0 +1,87 @@ +import argparse +import torch +import json + +from model_interface import ModelFactory + + +class RotateModelExporter: + """旋转模型导出器 - 简化版,只导出state_dict""" + + def __init__(self, model_interface): + self.model_interface = model_interface + self.model = model_interface.get_model_for_hook() + + def export_model(self, output_path: str): + """导出模型state_dict""" + print("Getting model state dict...") + + # 获取模型状态字典 + model_dict = self.model.state_dict() + + # 移动到CPU(如果在GPU上) + print("Moving parameters to CPU...") + for key in model_dict: + model_dict[key] = model_dict[key].cpu() + + # 保存模型 + print(f"Saving model to {output_path}...") + torch.save(model_dict, output_path) + print(f"Model successfully saved to {output_path}") + + +from utils.config import ConfigDict, CONFIG_SCHEMA, validate_config + +from pathlib import Path + +def ensure_parent_dir(path: str | Path) -> None: + path = Path(path) + target_dir = path.parent if path.suffix else path + target_dir.mkdir(parents=True, exist_ok=True) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--config_file", type=str, required=True, help="Path to the config file") + args = parser.parse_args() + + # 从配置文件加载参数 + config = ConfigDict(json.load(open(args.config_file, "r"))) + config.check_schema(CONFIG_SCHEMA) + validate_config(config) + export_config = config.export_config + model_config = export_config.model_config + + if model_config.random_rotate and model_config.R_path: + raise ValueError("random_rotate and R_path cannot be true at the same time") + + model_type = model_config.model_type + tokenizer_name = model_config.tokenizer_name + model_name = model_config.model_name + output_model = export_config.output_model + ensure_parent_dir(output_model) + + print("=" * 50) + print("Rotate Model Export Configuration") + print("=" * 50) + print(f"Model type: {model_type}") + print(f"Model name: {model_name}") + print(f"Tokenizer name: {tokenizer_name}") + print(f"Output model: {output_model}") + print(f"Model config: {model_config}") + print("=" * 50) + + + print("Creating model interface...") + model_interface = ModelFactory.create_model( + model_type=model_type, + tokenizer_name=tokenizer_name, + model_name=model_name, + args=model_config + ) + + print("Creating exporter...") + exporter = RotateModelExporter(model_interface) + exporter.export_model(output_model) + + print("Export completed successfully!") + \ No newline at end of file diff --git a/tools/qnn_convertor/get_distribution.py b/tools/qnn_convertor/get_distribution.py new file mode 100644 index 000000000..059159d11 --- /dev/null +++ b/tools/qnn_convertor/get_distribution.py @@ -0,0 +1,221 @@ +from functools import partial +import gc +import json + +import torch +import numpy as np + +import argparse +import json + +from model_interface import ModelFactory, ModelInterface + + +def flatten_act_dict(act_dict): + for layer, scales in act_dict.items(): + if isinstance(scales, list): + try: + all_acts = np.array(scales).reshape(-1) + except ValueError: + all_acts = [np.array(scale).reshape(-1) for scale in scales] + all_acts = np.concatenate(all_acts) + act_dict[layer] = all_acts + else: + act_dict[layer] = flatten_act_dict(scales) + print(layer) + gc.collect() + + return act_dict + +def get_act_percentage(act_dict: dict, threshold: float): + assert 0 <= threshold <= 1 + percentage = 1 - threshold + act_percentage = {} + for layer, scales in act_dict.items(): + if not isinstance(scales, dict): + all_acts_flattened = scales + percentage_index = int(len(all_acts_flattened) * percentage) - 1 + nth_percentile_value = np.partition(all_acts_flattened, percentage_index)[ + percentage_index + ] + act_percentage[layer] = float(nth_percentile_value) + else: + print(layer) + act_percentage[layer] = get_act_percentage(scales, threshold) + return act_percentage + + +@torch.no_grad() +def get_static_decoder_layer_scales_distribution( + model_interface: ModelInterface, + dataset_path, + num_samples=32, + no_bias=True, +): + act_dict = {} + + def stat_io_hook(m, x, y, name): + if isinstance(x, tuple): + x = x[0] + if name not in act_dict: + act_dict[name] = {} + if "input" not in act_dict[name]: + act_dict[name]["input"] = [] + act_dict[name]["input"].append(x.clone().detach().cpu().numpy()) + if isinstance(y, tuple): + y = y[0] + + ty = y.clone().detach().cpu() + # 去除 bias(只针对 nn.Linear) + if no_bias and isinstance(m, torch.nn.Linear) and m.bias is not None: + # print(name + str(".wobias")) + # print(y.shape) + + bias = m.bias.clone().detach().view(1, -1) # shape [1, out_features] + ty = ty - bias.to(ty.device) + + if "output" not in act_dict[name]: + act_dict[name]["output"] = [] + act_dict[name]["output"].append(ty.detach().cpu().numpy()) + + hooks = [] + model_for_hook = model_interface.get_model_for_hook() + for name, m in model_for_hook.named_modules(): + if isinstance(m, torch.nn.Linear): + hooks.append(m.register_forward_hook(partial(stat_io_hook, name=name))) + + print("Collecting activation scales...") + + from tqdm import tqdm + + dataset = model_interface.load_dataset(dataset_path, split="test") + + # 打乱数据集,设置随机种子以确保可重复性 + shuffled_dataset = dataset.shuffle(seed=42) + + processed_count = 0 + correct = 0 + + with tqdm(total=num_samples) as pbar: + + pbar.set_description("Processing Dataset:") + for data in shuffled_dataset: + if model_interface.should_process_sample(data): + # 进行推理 + inference_result = model_interface.infer(data) + + # 评估结果 + is_correct = model_interface.evaluate_sample(data, inference_result) + if is_correct: + correct += 1 + else: + print(f"Sample failed: {data.get('file_name', 'unknown')}") + + processed_count += 1 + pbar.update(1) + + if processed_count >= num_samples: + break + + if processed_count > 0: + print(f"Accuracy: {correct / processed_count:.4f} ({correct}/{processed_count})") + else: + print("No samples were processed") + + + for hook in hooks: + hook.remove() + + return act_dict + + +def get_act_distribution_stat(act_dict): + act_distribution = {} + for layer, scales in act_dict.items(): + if not isinstance(scales, dict): + act_distribution[layer] = { + "mean": float(np.mean(scales)), + "std": float(np.std(scales)), + } + else: + act_distribution[layer] = get_act_distribution_stat(scales) + return act_distribution + +from utils.config import ConfigDict, CONFIG_SCHEMA, validate_config + +from pathlib import Path + +def ensure_parent_dir(path: str | Path) -> None: + path = Path(path) + target_dir = path.parent if path.suffix else path + target_dir.mkdir(parents=True, exist_ok=True) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--config_file", type=str, default="mllm_qnn_convertor/config/qwen1.5-1.8b.json", help="Path to the config file") + args = parser.parse_args() + + config = ConfigDict(json.load(open(args.config_file, "r"))) + config.check_schema(CONFIG_SCHEMA) + validate_config(config) + profile_config = config.profile_config + + profile_model_config = profile_config.model_config + model_type = profile_model_config.model_type + tokenizer_name = profile_model_config.tokenizer_name + model_name = profile_model_config.model_name + dataset_path = profile_config.dataset_path + output_file = profile_config.output_path + ensure_parent_dir(output_file) + num_samples = profile_config.get("num_samples", 32) + no_bias = profile_config.get("no_bias", True) + + + print("=" * 60) + print("Get Distribution Configuration:") + print("=" * 60) + print(f"Model Type: {model_type}") + print(f"Tokenizer Name: {tokenizer_name}") + print(f"Model Name: {model_name}") + print(f"Dataset Path: {dataset_path}") + print(f"Output File: {output_file}") + print(f"Number of Samples: {num_samples}") + print(f"No Bias: {no_bias}") + print(f"Model Config: {dict(profile_model_config)}") + print("=" * 60) + print() + + if profile_model_config.save_rotation: + ensure_parent_dir(profile_model_config.save_rotation) + + if profile_model_config.random_rotate and profile_model_config.R_path: + raise ValueError("random_rotate and R_path cannot be true at the same time") + + model_interface = ModelFactory.create_model( + model_type=model_type, + tokenizer_name=tokenizer_name, + model_name=model_name, + args=profile_model_config + ) + # FIXME: when num_samples is 1, this script will panic + act_dict = get_static_decoder_layer_scales_distribution(model_interface, dataset_path, num_samples, no_bias) + + print("begin_flatten") + act_dict = flatten_act_dict(act_dict) + print("finish flatten") + + # origin model scale + print("begin_calculate") + print("get act 0") + ori_scale = get_act_percentage(act_dict, 0) + # scale after remove top 0.1% outliers + print("get act 0.001") + top_0_1_scale = get_act_percentage(act_dict, 0.001) + # get mean and std of all scales + print("get act distribution") + all_stat = get_act_distribution_stat(act_dict) + res_dict = {"ori": ori_scale, "top_0_1": top_0_1_scale, "all_stat": all_stat} + with open(output_file, "w") as f: + json.dump(res_dict, f, indent=4, ensure_ascii=False) + diff --git a/tools/qnn_convertor/model_interface.py b/tools/qnn_convertor/model_interface.py new file mode 100644 index 000000000..ef1db2f04 --- /dev/null +++ b/tools/qnn_convertor/model_interface.py @@ -0,0 +1,374 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Tuple, Union, Set +import torch +from datasets import Dataset + + +class ModelRegistry: + """ + 模型注册表,用于自动注册模型类型 + """ + _registry = {} + + @classmethod + def register(cls, model_type: str): + """ + 装饰器,用于注册模型类型 + + Args: + model_type: 模型类型名称 + """ + def decorator(model_class): + cls._registry[model_type] = model_class + return model_class + return decorator + + @classmethod + def get_registry(cls) -> Dict[str, type]: + """获取注册表""" + return cls._registry.copy() + + @classmethod + def get_model_class(cls, model_type: str) -> type: + """根据模型类型获取模型类""" + if model_type not in cls._registry: + raise ValueError(f"Unknown model type: {model_type}. Available types: {list(cls._registry.keys())}") + return cls._registry[model_type] + + +class ModelInterface(ABC): + """ + 抽象基类,定义了模型接口的标准方法 + """ + + def __init__(self, tokenizer_name: str, model_name: str, args: Any): + """ + 初始化模型接口 + + Args: + tokenizer_name: tokenizer名称或路径 + model_name: 模型名称或路径 + args: 配置参数 + """ + self.tokenizer_name = tokenizer_name + self.model_name = model_name + self.args = args + self.model = None + self._load_model() + self.special_quantization_rules = {} + self.skip_layers = {} + if args.special_quantization_rules: + self.special_quantization_rules = args.special_quantization_rules + + if args.skip_layers: + self.skip_layers = args.skip_layers + + @abstractmethod + def _load_model(self): + """ + 加载模型,需要在子类中实现 + """ + pass + + @abstractmethod + def load_dataset(self, dataset_path: str, split: str = "test") -> Dataset: + """ + 加载数据集 + + Args: + dataset_path: 数据集路径 + split: 数据集分割(train/test/validation等) + + Returns: + Dataset: 加载的数据集 + """ + pass + + @abstractmethod + def infer(self, sample: Dict[str, Any]) -> Any: + """ + 对单个样本进行推理 + + Args: + sample: 数据集中的一个样本 + + Returns: + 推理结果 + """ + pass + + @abstractmethod + def evaluate_sample(self, sample: Dict[str, Any], inference_result: Any) -> bool: + """ + 评估单个样本的推理结果 + + Args: + sample: 数据集中的一个样本 + inference_result: 推理结果 + + Returns: + bool: 是否正确 + """ + pass + + @abstractmethod + def should_process_sample(self, sample: Dict[str, Any]) -> bool: + """ + 判断是否应该处理这个样本(用于过滤) + + Args: + sample: 数据集中的一个样本 + + Returns: + bool: 是否应该处理 + """ + pass + + def get_model_for_hook(self) -> torch.nn.Module: + """ + 获取用于注册hook的模型对象 + + Returns: + torch.nn.Module: 模型对象 + """ + return self.model + + def get_skip_layers(self) -> Dict[str, Set[str]]: + """ + 获取需要跳过的层 + + Returns: + Dict: {"skip_export": set(), "no_clip_input": set(), "no_clip_output": set()} + """ + return { + "skip_export": self.skip_layers.get("skip_export", set()) | self._get_skip_layers().get("skip_export", set()), + "no_clip_input": self.skip_layers.get("no_clip_input", set()) | self._get_skip_layers().get("no_clip_input", set()), + "no_clip_output": self.skip_layers.get("no_clip_output", set()) | self._get_skip_layers().get("no_clip_output", set()), + } + + @abstractmethod + def _get_skip_layers(self) -> Dict[str, Set[str]]: + """ + 获取需要跳过的层 + + Returns: + Dict: {"skip_export": set(), "no_clip_input": set(), "no_clip_output": set()} + """ + pass + + def get_special_quantization_rules(self) -> Dict[str, Any]: + """ + 获取特殊量化规则 + + Returns: + Dict: 特殊量化规则 + """ + return { + "skip_layers": self.special_quantization_rules.get("skip_layers", set()) | self._get_special_quantization_rules().get("skip_layers", set()), + "head_layers": self.special_quantization_rules.get("head_layers", set()) | self._get_special_quantization_rules().get("head_layers", set()), + } + + @abstractmethod + def _get_special_quantization_rules(self) -> Dict[str, Any]: + """ + 获取特殊量化规则 + + Returns: + Dict: 特殊量化规则 + """ + pass + + def should_transpose_weight(self, layer_name: str) -> bool: + """ + 判断权重是否需要转置(QNN特定需求) + + Args: + layer_name: 层名称 + + Returns: + bool: 是否需要转置 + """ + return True # 默认需要转置 + + +from transformers import AutoTokenizer, AutoModelForCausalLM +import sys, pathlib +sys.path.insert(0, str(pathlib.Path(__file__).resolve().parent.parent)) +import rotate + +@ModelRegistry.register("qwen2") +class QwenModelInterface(ModelInterface): + def _load_model(self): + self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name) + self.model = AutoModelForCausalLM.from_pretrained( + self.model_name, + torch_dtype=torch.float32, + device_map="auto") + self.model.eval() + if getattr(self.args, 'online_rotation', False): + print("Online rotation enabled") + if getattr(self.args, 'random_rotate', False): + print("Using random rotation matrix") + device = "cuda" if torch.cuda.is_available() else "cpu" + # model info + num_layers = self.model.config.num_hidden_layers + dim = self.model.config.hidden_size + qo_heads = self.model.config.num_attention_heads + head_dim = dim // qo_heads + # get random hadamard rotation matrix + R = rotate.get_orthogonal_matrix(dim, mode="hadamard", device=device) + R_v = [rotate.get_orthogonal_matrix(head_dim, mode="hadamard" , device=device) for _ in range(num_layers)] + + if getattr(self.args, 'save_rotation', None): + R_bin = { + "R": R, + "R_v": R_v, + } + torch.save(R_bin, self.args.save_rotation) + print(f"Rotation matrix saved to {self.args.save_rotation}") + else: + print(f"Using pre-defined rotation matrix from {getattr(self.args, 'R_path', './R.bin')}") + R_bin = torch.load(getattr(self.args, 'R_path', './R.bin')) + R = R_bin["R"] + R_v = R_bin["R_v"] + + print(f"Rotate model") + rotate.rotate_model(self.model, R, R_v) + + + def load_dataset(self, dataset_path: str, split: str = "test") -> Dataset: + from datasets import load_dataset + # ignore split since we only use one file + dataset = load_dataset("json", data_files=f"{dataset_path}/val.jsonl.zst", split="train") + return dataset + + def infer(self, sample: Dict[str, Any]) -> Any: + with torch.no_grad(): + inputs = self.tokenizer(sample["text"][:6000], return_tensors="pt").to(self.model.device) + # just simply forward + self.model(**inputs) + # don't return anything, just for profiling + return None + + def evaluate_sample(self, sample: Dict[str, Any], inference_result: Any) -> bool: + return True # For profiling, we don't need to evaluate correctness + + def should_process_sample(self, sample: Dict[str, Any]) -> bool: + return True + + def _get_skip_layers(self) -> Dict[str, Set[str]]: + return { + "skip_export": {"vision_tower"}, # 跳过视觉塔 + "no_clip_input": set(), + "no_clip_output": set(), + } + + def _get_special_quantization_rules(self) -> Dict[str, Any]: + return { + "skip_layers": {"lm_head", "merger"}, # 不量化的层 + "head_layers": {"head"}, # head层特殊处理 + } + + + +@ModelRegistry.register("qwen2-vl") +class ShowUIModelInterface(ModelInterface): + """ + ShowUI模型的具体实现 + """ + + def _load_model(self): + """加载ShowUI模型""" + from utils.model import LLMNPUShowUIModel + self.model = LLMNPUShowUIModel(self.tokenizer_name, self.model_name, args=self.args) + + def load_dataset(self, dataset_path: str, split: str = "test") -> Dataset: + """加载ScreenSpot数据集""" + from datasets import load_dataset + return load_dataset(dataset_path, split=split) + + def infer(self, sample: Dict[str, Any]) -> Any: + """对单个样本进行推理""" + return self.model.infer(sample["image"], sample["instruction"], None)[0] + + def evaluate_sample(self, sample: Dict[str, Any], inference_result: Any) -> bool: + """评估ScreenSpot样本的推理结果""" + import ast + try: + point = ast.literal_eval(inference_result) + bbox = sample["bbox"] + x_min, y_min, x_max, y_max = bbox + px, py = point + is_inside = (x_min <= px <= x_max) and (y_min <= py <= y_max) + return is_inside + except: + return False + + def should_process_sample(self, sample: Dict[str, Any]) -> bool: + """判断是否应该处理ScreenSpot样本""" + pc_or_mobile = sample["file_name"].split("_")[0] + return sample["data_type"] in ["text"] and pc_or_mobile == "mobile" + + def get_model_for_hook(self) -> torch.nn.Module: + """获取用于注册hook的模型对象""" + return self.model.model + + def _get_skip_layers(self) -> Dict[str, Set[str]]: + return { + "skip_export": {"vision_tower"}, + "no_clip_input": set(), + "no_clip_output": set(), + } + + def _get_special_quantization_rules(self) -> Dict[str, Any]: + return { + "skip_layers": {"lm_head", "merger"}, + "head_layers": {"lm_head"}, + } + + +class ModelFactory: + """ + 模型工厂类,用于创建不同类型的模型接口 + """ + + @classmethod + def create_model(cls, model_type: str, tokenizer_name: str, model_name: str, args: Any) -> ModelInterface: + """ + 创建模型接口实例 + + Args: + model_type: 模型类型 + tokenizer_name: tokenizer名称或路径 + model_name: 模型名称或路径 + args: 配置参数 + + Returns: + ModelInterface: 模型接口实例 + """ + model_class = ModelRegistry.get_model_class(model_type) + return model_class(tokenizer_name, model_name, args) + + @classmethod + def get_available_models(cls) -> List[str]: + """ + 获取所有可用的模型类型 + + Returns: + List[str]: 模型类型列表 + """ + return list(ModelRegistry.get_registry().keys()) + + @classmethod + def register_model(cls, model_type: str, model_class: type): + """ + 手动注册模型类型(兼容旧代码) + + Args: + model_type: 模型类型名称 + model_class: 模型类 + """ + ModelRegistry._registry[model_type] = model_class + +if __name__ == "__main__": + print("Available models:", ModelFactory.get_available_models()) diff --git a/tools/qnn_convertor/utils/__init__.py b/tools/qnn_convertor/utils/__init__.py new file mode 100644 index 000000000..756bd3107 --- /dev/null +++ b/tools/qnn_convertor/utils/__init__.py @@ -0,0 +1 @@ +from utils.config import ConfigDict \ No newline at end of file diff --git a/tools/qnn_convertor/utils/config.py b/tools/qnn_convertor/utils/config.py new file mode 100644 index 000000000..d9a5d176c --- /dev/null +++ b/tools/qnn_convertor/utils/config.py @@ -0,0 +1,220 @@ +class ConfigDict(dict): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + for k, v in self.items(): + if isinstance(v, dict): + self[k] = ConfigDict(v) + + """配置字典,支持点号访问""" + def __getattr__(self, key): + # try: + # return self[key] + # except KeyError: + # # 让getattr的默认值机制生效 + # raise AttributeError(key) + return self.get(key, None) + + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ + + def __dir__(self): + return list(self.keys()) + list(super().__dir__()) + + def check_schema(self, schema: dict): + """ + 用 JSON Schema(dict 形式)校验当前 ConfigDict。 + 失败时抛 ValueError / TypeError,信息中包含字段路径。 + """ + def _check(value, schema, path): + def _err(msg): + raise ValueError(f"{path}: {msg}") + + type_req = schema.get("type") + if type_req == "object": + if not isinstance(value, dict): + _err(f"expected object, got {type(value).__name__}") + elif type_req == "array": + if not isinstance(value, list): + _err(f"expected array, got {type(value).__name__}") + elif type_req in ("string", "integer", "number", "boolean"): + py_type = {"string": str, "integer": int, + "number": (int, float), "boolean": bool}[type_req] + if not isinstance(value, py_type): + _err(f"expected {type_req}, got {type(value).__name__}") + + # ---- object 专用检查 ---- + if type_req == "object": + props = schema.get("properties", {}) + required = schema.get("required", []) + allow_extra = schema.get("additionalProperties", True) + + for k in required: + if k not in value: + _err(f"missing required field '{k}'") + + if allow_extra is False: + extra = set(value) - set(props) + if extra: + _err(f"unexpected fields {list(extra)}") + + for k, sub_schema in props.items(): + if k in value: + child_path = f"{path}.{k}" if path != "" else k + _check(value[k], sub_schema, child_path) + + # ---- array 专用检查 ---- + if type_req == "array": + items_schema = schema.get("items") + if items_schema: + for idx, item in enumerate(value): + child_path = f"{path}[{idx}]" + _check(item, items_schema, child_path) + + # ---- 数值 / 字符串约束 ---- + if isinstance(value, (int, float)): + if "minimum" in schema and value < schema["minimum"]: + _err(f"value {value} < minimum {schema['minimum']}") + if "maximum" in schema and value > schema["maximum"]: + _err(f"value {value} > maximum {schema['maximum']}") + + if isinstance(value, str) and "pattern" in schema: + import re + if not re.fullmatch(schema["pattern"], value): + _err(f"value '{value}' does not match pattern /{schema['pattern']}/") + + # ---- 枚举 ---- + if "enum" in schema and value not in schema["enum"]: + _err(f"value {value} not in allowed enum {schema['enum']}") + + _check(self, schema, "") + + +CONFIG_SCHEMA = { + "type": "object", + "required": ["profile_config", "export_config"], + "additionalProperties": False, + + "properties": { + "profile_config": { + "type": "object", + "required": [ + "dataset_path", "output_path", "num_samples", "no_bias", "model_config" + ], + "additionalProperties": False, + + "properties": { + "dataset_path": {"type": "string"}, # which dataset to use for profiling + "output_path": {"type": "string"}, # where to save the profiling results + "num_samples": {"type": "integer", "minimum": 2}, # number of samples to use in dataset to profile + "no_bias": {"type": "boolean"}, # if true, we will ignore bias when profiling a linear layer. that is, for a linear layer Wx + b, we will only record the output scale of Wx. + + "model_config": { + "type": "object", + "required": [ + "model_type", # currently only support qwen2 and qwen-vl(this is qwen2-vl, not qwen2.5-vl. you can refer to model_interface.py for details) + "tokenizer_name", # path to tokenizer + "model_name", # path to model + ], + "additionalProperties": True, + + "properties": { + "model_type": {"type": "string"}, + "tokenizer_name": {"type": "string"}, + "model_name": {"type": "string"}, + "online_rotation": {"type": "boolean"}, # rotate after loading model + "random_rotate": {"type": "boolean"}, # generate random rotation matrix and use it to rotate the model + "save_rotation": {"type": "string"}, # this is the path to save the rotation matrix + "R_path": {"type": "string"} # if online_rotation is true, rotation matrix from R_path will be used to rotate the model. random_rotate and R_path and random_rotate are mutually exclusive + } + } + } + }, + + "export_config": { + "type": "object", + "required": [ + "scale_file", "output_model", "model_config" + ], + "additionalProperties": False, + + "properties": { + "scale_file": {"type": "string"}, + "output_model": {"type": "string"}, + "t01m_clip_threshold": {"type": "integer"}, + "quant_bias": {"type": "boolean"}, + "clip_all": {"type": "boolean"}, # if true, t01m_clip_threshold will not be effected + + "quantize_vit": {"type": "boolean"}, # if true, we will quantize vit model + + "model_config": { + "type": "object", + "required": [ + "model_type", + "tokenizer_name", + "model_name", + ], + "additionalProperties": True, + + "properties": { + "model_type": {"type": "string"}, + "tokenizer_name": {"type": "string"}, + "model_name": {"type": "string"}, + "online_rotation": {"type": "boolean"}, + "random_rotate": {"type": "boolean"}, + "save_rotation": {"type": "string"}, + "R_path": {"type": "string"} # R_path and random_rotate are mutually exclusive + } + } + } + } + } +} + +def validate_config(config: ConfigDict): + if config.profile_config.model_config.online_rotation: + if not config.profile_config.no_bias: + raise ValueError("online_rotation requires no_bias to be true") + + if config.export_config.quant_bias: + raise ValueError("quant_bias cannot be true when online_rotation is enabled") + + assert config.profile_config.model_config.model_type == config.export_config.model_config.model_type, \ + "model_type in profile_config and export_config must match" + + assert config.profile_config.model_config.tokenizer_name == config.export_config.model_config.tokenizer_name, \ + "tokenizer_name in profile_config and export_config must match" + + assert config.profile_config.model_config.model_name == config.export_config.model_config.model_name, \ + "model_name in profile_config and export_config must match" + + +if __name__ == "__main__": + schema = { + "type": "object", + "properties": { + "name": {"type": "string"}, + "age": {"type": "integer", "minimum": 0}, + "address": { + "type": "object", + "properties": { + "street": {"type": "string"}, + "zip": {"type": "string", "pattern": r"^\d{5}$"} + }, + "required": ["street", "zip"], + "additionalProperties": False + } + }, + "required": ["name", "address"], + "additionalProperties": False + } + + cfg = ConfigDict({ + "name": "Alice", + "age": 30, + "address": { + "street": "Main St", + "zip": "12345" + } + }) + + cfg.check_schema(schema) diff --git a/tools/convertor/profiling_activation/utils/get_input_output_scales.py b/tools/qnn_convertor/utils/get_input_output_scales.py similarity index 87% rename from tools/convertor/profiling_activation/utils/get_input_output_scales.py rename to tools/qnn_convertor/utils/get_input_output_scales.py index 5dfaa5069..670d10a06 100644 --- a/tools/convertor/profiling_activation/utils/get_input_output_scales.py +++ b/tools/qnn_convertor/utils/get_input_output_scales.py @@ -58,7 +58,8 @@ def stat_io_hook(m, x, y, name): return act_dict -def get_clip_and_scale(act_dict: dict, t01m_thre=5) -> tuple: +def get_clip_and_scale(act_dict: dict, t01m_thre=5, clip_all=False, + no_clip_input=None, no_clip_output=None) -> tuple: """ Get the clipped(W8A8) and no clipped(shadow linear to restore origin scale) input and output scales of the model's layers. """ @@ -73,6 +74,9 @@ def get_clip_and_scale(act_dict: dict, t01m_thre=5) -> tuple: no_clip_output_num = 0 no_clip_input_name = [] no_clip_output_name = [] + + no_clip_input = no_clip_input if no_clip_input is not None else set() + no_clip_output = no_clip_output if no_clip_output is not None else set() for i in stat: top_0_1_input = top_0_1[i]["input"] @@ -80,7 +84,7 @@ def get_clip_and_scale(act_dict: dict, t01m_thre=5) -> tuple: act_scale[i] = {} clip_top[i] = {} # layer input - if top_0_1_input * t01m_thre > ori_scale[i]["input"]: + if (not (i in no_clip_input)) and (clip_all or top_0_1_input * t01m_thre > ori_scale[i]["input"]): clip_input_num += 1 clip_top[i]["input"] = True act_scale[i]["input"] = ori_scale[i]["input"] @@ -90,7 +94,7 @@ def get_clip_and_scale(act_dict: dict, t01m_thre=5) -> tuple: act_scale[i]["input"] = top_0_1[i]["input"] no_clip_input_name.append(i) # layer output - if top_0_1_output * t01m_thre > ori_scale[i]["output"]: + if (not (i in no_clip_output)) and (clip_all or top_0_1_output * t01m_thre > ori_scale[i]["output"]): clip_output_num += 1 clip_top[i]["output"] = True act_scale[i]["output"] = ori_scale[i]["output"] diff --git a/tools/qnn_convertor/utils/model.py b/tools/qnn_convertor/utils/model.py new file mode 100644 index 000000000..9635c0315 --- /dev/null +++ b/tools/qnn_convertor/utils/model.py @@ -0,0 +1,251 @@ +import os +import json +import torch +from PIL import Image +from transformers import AutoTokenizer, AutoProcessor +from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration + +from .wrapper import VisionMLPWrapper, MLPWrapper +from .quantization_simulation import quantize_qwen2vl_qkvnobias_like +from .get_input_output_scales import get_clip_and_scale + +MIN_PIXELS = 256 * 28 * 28 +MAX_PIXELS = 1280 * 28 * 28 +TP_REGION_FUSE_THRESHOLD = 10 + +_SCREENSPOT_SYSTEM = "Based on the screenshot of the page, I give a text description and you give its corresponding location." +_SYSTEM_point = "The coordinate represents a clickable location [x, y] for an element, which is a relative coordinate on the screenshot, scaled from 0 to 1." +_SYSTEM_point_int = "The coordinate represents a clickable location [x, y] for an element, which is a relative coordinate on the screenshot, scaled from 1 to 1000." + +_SCREENSPOT_USER = "<|image_1|>{system}{element}" + + +class LLMNPUShowUIProcessor: + def __init__(self, processor_path): + self.processor = AutoProcessor.from_pretrained( + processor_path, + min_pixels=MIN_PIXELS, + max_pixels=MAX_PIXELS, + model_max_length=8192, + ) + self.messages_template = { + "role": "user", + "content": [ + { + "type": "image", + "image": None, + }, + {"type": "text", "text": None}, + ], + } + self.chat_template = "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}" + + def screenspot_to_openai_qwen(self, element_name, image, xy_int=False): + transformed_data = [] + user_content = [] + + if xy_int: + system_prompt = _SCREENSPOT_SYSTEM + " " + _SYSTEM_point_int + else: + system_prompt = _SCREENSPOT_SYSTEM + " " + _SYSTEM_point + + "{system}<|image_1|>{element}" + user_content.append({"type": "text", "text": system_prompt}) + user_content.append(image) + user_content.append({"type": "text", "text": element_name}) + + transformed_data.append( + { + "role": "user", + "content": user_content, + }, + ) + return transformed_data + + def process(self, img: Image, text: str, json_path): + + img_dict = { + "type": "image", + "min_pixels": MIN_PIXELS, + "max_pixels": MAX_PIXELS, + "image": img, + } + source = self.screenspot_to_openai_qwen(text, img_dict) + prompt = self.processor.tokenizer.apply_chat_template( + source, + chat_template=self.chat_template, + tokenize=False, + add_generation_prompt=True, + ) + inputs = self.processor( + text=[prompt], + images=img, + videos=None, + padding=True, + return_tensors="pt", + ) + + return inputs + +class LLMNPUShowUIModel: + + def __init__(self, tokenizer_name, + model_name, + args, t01m_clip_threshold=64): + + self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) + self.processor = LLMNPUShowUIProcessor(tokenizer_name) + model = Qwen2VLForConditionalGeneration.from_pretrained( + model_name, torch_dtype=torch.float32, device_map="cuda", return_dict_in_generate=True, + ) + # print(f"Model loaded: {model}") + if args.online_rotation: + import rotate + if not args.random_rotate: + print(f"load R from {args.R_path}") + R_bin = torch.load(args.R_path) + R = R_bin["R"] + R_v = R_bin["R_v"] + R_vit = R_bin["R_vit"] + R_vs_vit = R_bin["R_vs_vit"] + else: + device = "cuda" if torch.cuda.is_available() else "cpu" + + # model info + num_layers = model.config.num_hidden_layers + dim = model.config.hidden_size + qo_heads = model.config.num_attention_heads + head_dim = dim // qo_heads + + # get random hadamard rotation matrix + R = rotate.get_orthogonal_matrix(dim, mode="hadamard", device=device) + R_v = [rotate.get_orthogonal_matrix(head_dim, mode="hadamard", device=device) for _ in range(num_layers)] + + # vision info + vit_dim = model.config.vision_config.embed_dim + vit_heads = model.config.vision_config.num_heads + vit_head_dim = vit_dim // vit_heads + vit_layers = model.config.vision_config.depth + R_vit = rotate.get_orthogonal_matrix(vit_dim, mode="hadamard", device=device) + R_vs_vit = [rotate.get_orthogonal_matrix(vit_head_dim, mode="hadamard", device=device) for _ in range(vit_layers)] + + if args.save_rotation: + R_bin = { + "R": R, + "R_v": R_v, + "R_vit": R_vit, + "R_vs_vit": R_vs_vit + } + torch.save(R_bin, args.save_rotation) + print(f"Rotation matrix saved to {args.save_rotation}") + + from rotate import rotate_model + + rotate_model(model, R, R_v) + if args.rotate_vit: + rotate_model(model.visual, R_vit, R_vs_vit) + print(f"finish online rotation") + + if args.vision_mlp_rotate: + print("rotate vision mlp") + from rotate import hadmard_matrix + hadamard = hadmard_matrix(model.visual.config.embed_dim * model.visual.config.mlp_ratio, + "cuda") + for layer_idx in args.vision_layers_to_rotate: + mlp = model.visual.blocks[layer_idx].mlp + model.visual.blocks[layer_idx].mlp = VisionMLPWrapper(mlp, + hadamard if args.rot_fc1 else None, + hadamard if args.rot_fc2 else None) + print(f"rotate mlp layer {layer_idx} with {hadamard.shape}") + + if args.lm_mlp_rotate: + print("rotate lm mlp") + from rotate import hadmard_matrix + hadamard = hadmard_matrix(model.config.intermediate_size, "cuda") + for layer_idx in args.lm_layers_to_rotate: + mlp = model.model.layers[layer_idx].mlp + model.model.layers[layer_idx].mlp = MLPWrapper(mlp, + hadamard if args.rot_up else None, + hadamard if args.rot_gate else None, + hadamard if args.rot_down else None) + print(f"rotate lm mlp layer {layer_idx} with {hadamard.shape}") + + # print(f"model loaded: {model}") + + if not args.no_quantize: + no_clip_input = { + # "visual.blocks.22.mlp.fc2", + # "visual.blocks.23.mlp.fc2", + # "visual.blocks.24.mlp.fc2", + # "visual.blocks.25.mlp.fc2", + # "visual.blocks.26.mlp.fc2", + # "visual.blocks.27.mlp.fc2", + } + + no_clip_output = { + # "visual.blocks.22.mlp.fc2", + # "visual.blocks.23.mlp.fc2", + # "visual.blocks.24.mlp.fc2", + # "visual.blocks.25.mlp.fc2", + # "visual.blocks.26.mlp.fc2", + # "visual.blocks.27.mlp.fc2", + } + + act_dict = json.load(open(args.scale_file.name)) + + act_scales, clip_top, return_dict = get_clip_and_scale(act_dict, t01m_clip_threshold, args.clip_all, + no_clip_input=no_clip_input, no_clip_output=no_clip_output) + + file_name = os.path.basename(model_name) + "." + os.path.basename(args.scale_file.name) + "." + str(t01m_clip_threshold) + ".clip.info" + + f = None + if args.save_clip_info: + f = open(file_name, "a") + + print(f"clip input num: {return_dict['clip_input_num']}", file=f) + print(f"clip output num: {return_dict['clip_output_num']}", file=f) + print(f"no clip input num: {return_dict['no_clip_input_num']}", file=f) + for i in return_dict["no_clip_input_name"]: + print(f"no clip input: {i}", file=f) + print(f"no clip output num: {return_dict['no_clip_output_num']}", file=f) + for i in return_dict["no_clip_output_name"]: + print(f"no clip output: {i}", file=f) + + if args.save_clip_info: + f.close() + + print(f"quantize_vit: {args.quantize_vit}") + + nbits_dict = { + "model.layers.1.mlp.down_proj": 16, + "model.layers.26.mlp.down_proj": 16, + } + + model = quantize_qwen2vl_qkvnobias_like(model, act_scales, + layer_clip=clip_top, + quantize_ViT=args.quantize_vit, + nbits_dict=nbits_dict) + + self.model = model + + + def infer(self, image: Image, text: str, json_path: str): + inputs = self.processor.process(image, text, json_path) + inputs = inputs.to("cuda") + generated_ids = self.model.generate(**inputs, max_new_tokens=128, + # do_sample=False, + # temperature=None, + # top_p=None, + # top_k=None + ) + generated_ids_trimmed = [ + out_ids[len(in_ids) :] + for in_ids, out_ids in zip(inputs.input_ids, generated_ids["sequences"]) + ] + output_text = self.processor.processor.batch_decode( + generated_ids_trimmed, + skip_special_tokens=True, + clean_up_tokenization_spaces=False, + ) + return output_text + diff --git a/tools/qnn_convertor/utils/quantization_simulation.py b/tools/qnn_convertor/utils/quantization_simulation.py new file mode 100644 index 000000000..6ff4f8f5f --- /dev/null +++ b/tools/qnn_convertor/utils/quantization_simulation.py @@ -0,0 +1,1252 @@ +""" +This file performs a fake quantization of the model weights and activations. +It will be used to simulate the quantization of the model weights and activations. +""" + +import torch +from torch import nn +from functools import partial +import numpy as np + + +@torch.no_grad() +def simulate_quantize_weight_per_channel_absmax(w, n_bits=8): + scales = w.abs().max(dim=-1, keepdim=True)[0] + q_max = 2 ** (n_bits - 1) - 1 + scales.clamp_(min=1e-5).div_(q_max) + w.div_(scales).round_().mul_(scales) + return w + + +@torch.no_grad() +def simulate_quantize_weight_per_tensor_absmax(w, n_bits=8): + scales = w.abs().max() + q_max = 2 ** (n_bits - 1) - 1 + scales.clamp_(min=1e-5).div_(q_max) + w.div_(scales).round_().clamp_((-2**(n_bits-1)), (2**(n_bits-1)-1)).mul_(scales) + return w + + +# @torch.no_grad() +# def simulate_quantize_weight_scale_per_tensor_absmax(w, n_bits=8): +# scales = w.abs().max() +# q_max = 2 ** (n_bits - 1) - 1 +# scales.clamp_(min=1e-5).div_(q_max) +# return scales + + +@torch.no_grad() +def simulate_quantize_activation_per_tensor_static_input(t, scale=1, n_scale_bits = 8, n_bits=8, clip_top=False): + assert n_scale_bits <= n_bits + + scale = scale.clone().to(t.device) + t_shape = t.shape + t.view(-1, t_shape[-1]) + q_max = 2 ** (n_scale_bits - 1) - 1 + scale.clamp_(min=1e-5).div_(q_max) + t = t.div(scale).round() + if clip_top: + t = t.clamp(-2**(n_bits-1), 2**(n_bits-1) - 1) + t = t.mul(scale) + return t + + +@torch.no_grad() +def simulate_quantize_activation_per_tensor_static_output(t, scale=1, n_scale_bits = 8, n_bits=8, clip_top=False): + assert n_scale_bits <= n_bits + + scale = scale.clone().to(t.device) + t_shape = t.shape + t.view(-1, t_shape[-1]) + q_max = 2 ** (n_scale_bits - 1) - 1 + scale.clamp_(min=1e-5).div_(q_max) + + t = t.div(scale) + t = t.round() + if clip_top: + t = t.clamp(-2**(n_bits-1), 2**(n_bits-1) - 1) + t = t.mul(scale) + return t + + +@torch.no_grad() +def simulate_quantize_activation_per_token_absmax(t, n_bits=8): + t_shape = t.shape + t.view(-1, t_shape[-1]) + scales = t.abs().max(dim=-1, keepdim=True)[0] + q_max = 2 ** (n_bits - 1) - 1 + scales.clamp_(min=1e-5).div_(q_max) + t.div_(scales).round_().mul_(scales) + return t + + +@torch.no_grad() +def simulate_quantize_activation_per_tensor_absmax(t, n_bits=8): + t_shape = t.shape + t.view(-1, t_shape[-1]) + scales = t.abs().max() + q_max = 2 ** (n_bits - 1) - 1 + scales.clamp_(min=1e-5).div_(q_max) + t.div_(scales).round_().mul_(scales) + return t + + +class W8A8LinearStatic(nn.Module): + def __init__( + self, + in_features, + out_features, + input_scale, + output_scale, + bias=True, + clip_top=False, + name="default" + ): + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.input_scale = torch.tensor(input_scale) + self.output_scale = torch.tensor(output_scale) + + self.weight_scale = None + self.weight_quant_type = None + + self.printAct = False + + self.name= name + + self.register_buffer( + "weight", + torch.randn( + self.out_features, + self.in_features, + dtype=torch.float16, + requires_grad=False, + ), + ) + if bias: + self.register_buffer( + "bias", + torch.zeros( + (1, self.out_features), dtype=torch.float16, requires_grad=False + ), + ) + else: + self.register_buffer("bias", None) + + self.act_quant_input = partial( + simulate_quantize_activation_per_tensor_static_input, + n_bits=8, + clip_top=clip_top["input"], + ) + self.act_quant_output = partial( + simulate_quantize_activation_per_tensor_static_output, + n_bits=8, + clip_top=clip_top["output"], + ) + + def to(self, *args, **kwargs): + super(W8A8LinearStatic, self).to(*args, **kwargs) + self.weight = self.weight.to(*args, **kwargs) + if self.bias is not None: + self.bias = self.bias.to(*args, **kwargs) + return self + + @torch.no_grad() + def forward(self, x): + # perform online quantize-dequantize matmul to simulate W8A8 inference + # if self.name != "default": + # print(self.name) + # # dump output q_y to file + # cpu_q_y = x.to('cpu').float().squeeze(0) + # # print(self.name, cpu_q_y.shape) + # np.savetxt(self.name + "—input.csv", cpu_q_y.numpy(), delimiter=',', fmt='%.6f') + + q_x = self.act_quant_input(x, scale=self.input_scale) + + # if self.name != "default": + # print(self.name) + # print(q_x.shape) + # # dump output q_y to file + + # cpu_q_y = (q_x / (self.input_scale.clamp(min=1e-5).div(127))).to('cpu').int().squeeze(0) + # # print(self.name, cpu_q_y.shape) + # np.savetxt(self.name + "—input-int8.csv", cpu_q_y.numpy(), delimiter=',', fmt='%.6f') + y = torch.functional.F.linear(q_x, self.weight, self.bias) + q_y = self.act_quant_output(y, scale=self.output_scale) + + # if self.printAct: + # print(q_y) + + # if self.name != "default": + # print(self.name) + # # dump output q_y to file + # cpu_q_y = q_y.to('cpu').float().squeeze(0) + # print(self.name, cpu_q_y.shape) + # np.savetxt(self.name + ".csv", cpu_q_y.numpy(), delimiter=',', fmt='%.6f') + + return q_y + + @staticmethod + def from_float(module, scales, weight_quant_type="per_tensor", clip_top=False, name="default"): + assert isinstance(module, torch.nn.Linear) + + new_module = W8A8LinearStatic( + module.in_features, + module.out_features, + bias=module.bias is not None, + input_scale=scales["input"], + output_scale=scales["output"], + clip_top=clip_top, + name=name + ) + + if weight_quant_type == "per_channel": + new_module.weight = simulate_quantize_weight_per_channel_absmax( + module.weight, n_bits=8 + ) # use 8-bit integer for weight + elif weight_quant_type == "per_tensor": + new_module.weight = simulate_quantize_weight_per_tensor_absmax( + module.weight, n_bits=8 + ) + else: + raise ValueError(f"Invalid weight_quant: {weight_quant_type}") + + new_module.weight_quant_name = weight_quant_type + + if module.bias is not None: + if name != "default": + print(name) + # dump output q_y to file + cpu_bias = module.bias.to('cpu').float() + print(name, cpu_bias.shape) + np.savetxt(name + "-bias.csv", cpu_bias.numpy(), delimiter=',', fmt='%.6f') + new_module.bias = simulate_quantize_weight_per_tensor_absmax(module.bias, n_bits=8) + + return new_module + + + @staticmethod + def from_float_and_print(module, scales, weight_quant_type="per_tensor", clip_top=False): + assert isinstance(module, torch.nn.Linear) + + new_module = W8A8LinearStatic( + module.in_features, + module.out_features, + bias=module.bias is not None, + input_scale=scales["input"], + output_scale=scales["output"], + clip_top=clip_top, + ) + + if weight_quant_type == "per_channel": + new_module.weight = simulate_quantize_weight_per_channel_absmax( + module.weight, n_bits=8 + ) # use 8-bit integer for weight + elif weight_quant_type == "per_tensor": + new_module.weight = simulate_quantize_weight_per_tensor_absmax( + module.weight, n_bits=8 + ) + else: + raise ValueError(f"Invalid weight_quant: {weight_quant_type}") + + new_module.weight_quant_name = weight_quant_type + new_module.printAct = True + + if module.bias is not None: + new_module.bias = simulate_quantize_weight_per_tensor_absmax(module.bias, n_bits=8) + + return new_module + + def __repr__(self): + return f"W8A8LinearStatic({self.in_features}, {self.out_features}, bias={self.bias is not None}, weight_quant={self.weight_quant_name}, input_scale={self.input_scale.item()}, output_scale={self.output_scale.item()}, clip_top={self.act_quant_input.keywords['clip_top']})" + + + +class W8AX_LinearStatic(nn.Module): + def __init__( + self, + in_features, + out_features, + input_scale, + output_scale, + n_scale_bits, + n_bits, + bias=True, + clip_top=False, + name="default" + ): + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.input_scale = torch.tensor(input_scale) + self.output_scale = torch.tensor(output_scale) + + self.weight_scale = None + self.weight_quant_type = None + + self.printAct = False + + self.name = name + + self.register_buffer( + "weight", + torch.randn( + self.out_features, + self.in_features, + dtype=torch.float16, + requires_grad=False, + ), + ) + if bias: + self.register_buffer( + "bias", + torch.zeros( + (1, self.out_features), dtype=torch.float16, requires_grad=False + ), + ) + else: + self.register_buffer("bias", None) + + self.act_quant_input = partial( + simulate_quantize_activation_per_tensor_static_input, + n_scale_bits=n_scale_bits, + n_bits=n_bits, + clip_top=clip_top["input"], + ) + self.act_quant_output = partial( + simulate_quantize_activation_per_tensor_static_output, + n_scale_bits=n_scale_bits, + n_bits=n_bits, + clip_top=clip_top["output"], + ) + + def to(self, *args, **kwargs): + super(W8AX_LinearStatic, self).to(*args, **kwargs) + self.weight = self.weight.to(*args, **kwargs) + if self.bias is not None: + self.bias = self.bias.to(*args, **kwargs) + return self + + @torch.no_grad() + def forward(self, x): + # perform online quantize-dequantize matmul to simulate W8A8 inference + + # if self.name != "default": + # print(self.name) + # print(x.shape) + # # dump output q_y to file + + # print(self.input_scale) + + # cpu_q_y = x.to('cpu').float().squeeze(0) + # # print(self.name, cpu_q_y.shape) + # np.savetxt(self.name + "—input-beforequant.csv", cpu_q_y.numpy(), delimiter=',', fmt='%.6f') + + q_x = self.act_quant_input(x, scale=self.input_scale) + + + # if self.name != "default": + # print(self.name) + # print(q_x.shape) + # # dump output q_y to file + + # cpu_q_y = (q_x / (self.input_scale.clamp(min=1e-5).div(127))).to('cpu').int().squeeze(0) + # # print(self.name, cpu_q_y.shape) + # np.savetxt(self.name + "—input-int8.csv", cpu_q_y.numpy(), delimiter=',', fmt='%.6f') + + + y = torch.functional.F.linear(q_x, self.weight, self.bias) + q_y = self.act_quant_output(y, scale=self.output_scale) + + # if self.printAct: + # print(q_y) + + # if self.name != "default": + # print(self.name) + # # dump output q_y to file + # cpu_q_y = q_y.to('cpu').float().squeeze(0) + # np.savetxt(self.name + ".csv", cpu_q_y.numpy(), delimiter=',', fmt='%.6f') + + # if self.name != "default": + # print(self.name) + # # dump output q_y to file + # cpu_q_y = (q_y/(self.output_scale/127.0)).to('cpu').float().squeeze(0) + # np.savetxt(self.name + "-output-int8.csv", cpu_q_y.numpy(), delimiter=',', fmt='%.6f') + return q_y + + @staticmethod + def from_float(module, scales, n_scale_bits, n_bits, weight_quant_type="per_tensor", clip_top=False, name="default"): + assert isinstance(module, torch.nn.Linear) + + new_module = W8AX_LinearStatic( + module.in_features, + module.out_features, + bias=module.bias is not None, + input_scale=scales["input"], + output_scale=scales["output"], + clip_top=clip_top, + n_scale_bits=n_scale_bits, + n_bits=n_bits, + name=name + ) + + if weight_quant_type == "per_channel": + new_module.weight = simulate_quantize_weight_per_channel_absmax( + module.weight, n_bits=8 + ) # use 8-bit integer for weight + elif weight_quant_type == "per_tensor": + new_module.weight = simulate_quantize_weight_per_tensor_absmax( + module.weight, n_bits=8 + ) + else: + raise ValueError(f"Invalid weight_quant: {weight_quant_type}") + + if module.bias is not None: + # if name != "default": + # print(name) + # # dump output q_y to file + # cpu_bias = module.bias.clone().to('cpu').float() + # print(name, cpu_bias.shape) + # np.savetxt(name + "-bias.csv", cpu_bias.detach().numpy(), delimiter=',', fmt='%.6f') + new_module.bias = simulate_quantize_weight_per_tensor_absmax(module.bias, n_bits=8) + + return new_module + + def __repr__(self): + return f"W8AXLinearStatic({self.in_features}, {self.out_features}, bias={self.bias is not None}, weight_quant={self.weight_quant_name}, input_scale={self.input_scale.item()}, output_scale={self.output_scale.item()}, clip_top={self.act_quant_input.keywords['clip_top']})" + +class W8AX_Nobias_LinearStatic(nn.Module): + def __init__( + self, + in_features, + out_features, + input_scale, + output_scale, + n_scale_bits, + n_bits, + bias=True, + clip_top=False, + name="default" + ): + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.input_scale = torch.tensor(input_scale) + self.output_scale = torch.tensor(output_scale) + + self.weight_scale = None + self.weight_quant_type = None + + self.printAct = False + + self.name = name + + self.register_buffer( + "weight", + torch.randn( + self.out_features, + self.in_features, + dtype=torch.float16, + requires_grad=False, + ), + ) + if bias: + self.register_buffer( + "bias", + torch.zeros( + (1, self.out_features), dtype=torch.float16, requires_grad=False + ), + ) + else: + self.register_buffer("bias", None) + + self.act_quant_input = partial( + simulate_quantize_activation_per_tensor_static_input, + n_scale_bits=n_scale_bits, + n_bits=n_bits, + clip_top=clip_top["input"], + ) + self.act_quant_output = partial( + simulate_quantize_activation_per_tensor_static_output, + n_scale_bits=n_scale_bits, + n_bits=n_bits, + clip_top=clip_top["output"], + ) + + def to(self, *args, **kwargs): + super(W8AX_LinearStatic, self).to(*args, **kwargs) + self.weight = self.weight.to(*args, **kwargs) + if self.bias is not None: + self.bias = self.bias.to(*args, **kwargs) + return self + + @torch.no_grad() + def forward(self, x): + + + q_x = self.act_quant_input(x, scale=self.input_scale) + + y = torch.functional.F.linear(q_x, self.weight, None) + q_y = self.act_quant_output(y, scale=self.output_scale) + + if self.bias is not None: + q_y = q_y + self.bias + + return q_y + + @staticmethod + def from_float(module, scales, n_scale_bits, n_bits, weight_quant_type="per_tensor", clip_top=False, name="default"): + assert isinstance(module, torch.nn.Linear) + + new_module = W8AX_Nobias_LinearStatic( + module.in_features, + module.out_features, + bias=module.bias is not None, + input_scale=scales["input"], + output_scale=scales["output"], + clip_top=clip_top, + n_scale_bits=n_scale_bits, + n_bits=n_bits, + name=name + ) + + if weight_quant_type == "per_channel": + new_module.weight = simulate_quantize_weight_per_channel_absmax( + module.weight, n_bits=8 + ) # use 8-bit integer for weight + elif weight_quant_type == "per_tensor": + new_module.weight = simulate_quantize_weight_per_tensor_absmax( + module.weight, n_bits=8 + ) + else: + raise ValueError(f"Invalid weight_quant: {weight_quant_type}") + + if module.bias is not None: + + new_module.bias = module.bias + + return new_module + + def __repr__(self): + return f"W8AXLinearStatic({self.in_features}, {self.out_features}, bias={self.bias is not None}, weight_quant={self.weight_quant_name}, input_scale={self.input_scale.item()}, output_scale={self.output_scale.item()}, clip_top={self.act_quant_input.keywords['clip_top']})" + + +def quantize_opt( + model, + decoder_scales, + weight_quant="per_tensor", + act_quant="per_tensor", + quantize_bmm_input=True, + layer_clip={}, +): + from transformers.models.opt.modeling_opt import ( + OPTAttention, + OPTDecoderLayer, + ) + + for name, m in model.model.named_modules(): + + if isinstance(m, OPTDecoderLayer): + m.fc1 = W8A8LinearStatic.from_float( + m.fc1, + decoder_scales["model." + name + ".fc1"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".fc1"], + ) + m.fc2 = W8A8LinearStatic.from_float( + m.fc2, + decoder_scales["model." + name + ".fc2"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".fc2"], + ) + elif isinstance(m, OPTAttention): + # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj + m.q_proj = W8A8LinearStatic.from_float( + m.q_proj, + decoder_scales["model." + name + ".q_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".q_proj"], + ) + m.k_proj = W8A8LinearStatic.from_float( + m.k_proj, + decoder_scales["model." + name + ".k_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".k_proj"], + ) + m.v_proj = W8A8LinearStatic.from_float( + m.v_proj, + decoder_scales["model." + name + ".v_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".v_proj"], + ) + m.out_proj = W8A8LinearStatic.from_float( + m.out_proj, + decoder_scales["model." + name + ".out_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".out_proj"], + ) + return model + + +def quantize_llama_like( + model, + decoder_scales, + weight_quant="per_tensor", + act_quant="per_tensor", + quantize_bmm_input=False, + layer_clip={}, +): + from transformers.models.llama.modeling_llama import ( + LlamaAttention, + LlamaMLP, + ) + + from transformers.models.mistral.modeling_mistral import ( + MistralAttention, + MistralMLP, + ) + + for name, m in model.model.named_modules(): + if isinstance(m, (LlamaMLP, MistralMLP)): + m.gate_proj = W8A8LinearStatic.from_float( + m.gate_proj, + decoder_scales["model." + name + ".gate_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".gate_proj"], + ) + m.up_proj = W8A8LinearStatic.from_float( + m.up_proj, + decoder_scales["model." + name + ".up_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".up_proj"], + ) + m.down_proj = W8A8LinearStatic.from_float( + m.down_proj, + decoder_scales["model." + name + ".down_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".down_proj"], + ) + elif isinstance(m, (LlamaAttention, MistralAttention)): + # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj + m.q_proj = W8A8LinearStatic.from_float( + m.q_proj, + decoder_scales["model." + name + ".q_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".q_proj"], + ) + m.k_proj = W8A8LinearStatic.from_float( + m.k_proj, + decoder_scales["model." + name + ".k_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".k_proj"], + ) + m.v_proj = W8A8LinearStatic.from_float( + m.v_proj, + decoder_scales["model." + name + ".v_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".v_proj"], + ) + m.o_proj = W8A8LinearStatic.from_float( + m.o_proj, + decoder_scales["model." + name + ".o_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".o_proj"], + ) + return model + + +def quantize_qwen2_like( + model, + decoder_scales, + weight_quant="per_tensor", + act_quant="per_tensor", + quantize_bmm_input=False, + layer_clip={}, +): + from transformers.models.qwen2.modeling_qwen2 import ( + Qwen2Attention, + Qwen2MLP, + ) + + for name, m in model.model.named_modules(): + if isinstance(m, Qwen2MLP): + m.gate_proj = W8A8LinearStatic.from_float( + m.gate_proj, + decoder_scales["model." + name + ".gate_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".gate_proj"], + name = "model." + name + ".gate_proj", + ) + m.up_proj = W8A8LinearStatic.from_float( + m.up_proj, + decoder_scales["model." + name + ".up_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".up_proj"], + name = "model." + name + ".up_proj", + ) + m.down_proj = W8A8LinearStatic.from_float( + m.down_proj, + decoder_scales["model." + name + ".down_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".down_proj"], + name = "model." + name + ".down_proj", + ) + elif isinstance(m, Qwen2Attention): + # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj + # print name of module m + print(name) + m.q_proj = W8AX_LinearStatic.from_float( + m.q_proj, + decoder_scales["model." + name + ".q_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".q_proj"], + name = "model." + name + ".q_proj", + n_scale_bits = 16, + n_bits = 16, + ) + m.k_proj = W8AX_LinearStatic.from_float( + m.k_proj, + decoder_scales["model." + name + ".k_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".k_proj"], + name = "model." + name + ".k_proj", + n_scale_bits = 16, + n_bits = 16, + ) + m.v_proj = W8AX_LinearStatic.from_float( + m.v_proj, + decoder_scales["model." + name + ".v_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".v_proj"], + name = "model." + name + ".v_proj", + n_scale_bits = 16, + n_bits = 16, + ) + m.o_proj = W8A8LinearStatic.from_float( + m.o_proj, + decoder_scales["model." + name + ".o_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".o_proj"], + name = "model." + name + ".o_proj", + ) + return model + +def quantize_qwen2vl_like( + model, + decoder_scales, + weight_quant="per_tensor", + act_quant="per_tensor", + quantize_bmm_input=False, + layer_clip={}, +): + from transformers.models.qwen2_vl.modeling_qwen2_vl import ( + Qwen2VLSdpaAttention, + Qwen2MLP, + ) + + for name, m in model.model.named_modules(): + if isinstance(m, Qwen2MLP): + m.gate_proj = W8AX_LinearStatic.from_float( + m.gate_proj, + decoder_scales["model." + name + ".gate_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".gate_proj"], + n_scale_bits = 8, + n_bits = 8, + ) + m.up_proj = W8AX_LinearStatic.from_float( + m.up_proj, + decoder_scales["model." + name + ".up_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".up_proj"], + n_scale_bits = 8, + n_bits = 8, + ) + m.down_proj = W8AX_LinearStatic.from_float( + m.down_proj, + decoder_scales["model." + name + ".down_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".down_proj"], + n_scale_bits = 16, + n_bits = 16, + name = "model." + name + ".down_proj", + ) + elif isinstance(m, Qwen2VLSdpaAttention): + # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj + m.q_proj = W8AX_LinearStatic.from_float( + m.q_proj, + decoder_scales["model." + name + ".q_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".q_proj"], + n_scale_bits = 16, + n_bits = 16, + name= "model." + name + ".q_proj", + ) + m.k_proj = W8AX_LinearStatic.from_float( + m.k_proj, + decoder_scales["model." + name + ".k_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".k_proj"], + n_scale_bits = 16, + n_bits = 16, + name= "model." + name + ".k_proj", + ) + m.v_proj = W8AX_LinearStatic.from_float( + m.v_proj, + decoder_scales["model." + name + ".v_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".v_proj"], + n_scale_bits = 16, + n_bits = 16, + name= "model." + name + ".v_proj", + ) + + m.o_proj = W8AX_LinearStatic.from_float( + m.o_proj, + decoder_scales["model." + name + ".o_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".o_proj"], + n_scale_bits = 16, + n_bits = 16, + name= "model." + name + ".o_proj", + ) + return model + + +def quantize_qwen2vl_woqkv_like( + model, + decoder_scales, + weight_quant="per_tensor", + act_quant="per_tensor", + quantize_bmm_input=False, + layer_clip={}, +): + from transformers.models.qwen2_vl.modeling_qwen2_vl import ( + Qwen2VLSdpaAttention, + Qwen2MLP, + ) + + for name, m in model.model.named_modules(): + if isinstance(m, Qwen2MLP): + m.gate_proj = W8AX_LinearStatic.from_float( + m.gate_proj, + decoder_scales["model." + name + ".gate_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".gate_proj"], + n_scale_bits = 8, + n_bits = 8, + ) + m.up_proj = W8AX_LinearStatic.from_float( + m.up_proj, + decoder_scales["model." + name + ".up_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".up_proj"], + n_scale_bits = 8, + n_bits = 8, + ) + m.down_proj = W8AX_LinearStatic.from_float( + m.down_proj, + decoder_scales["model." + name + ".down_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".down_proj"], + n_scale_bits = 16, + n_bits = 16, + name = "model." + name + ".down_proj", + ) + elif isinstance(m, Qwen2VLSdpaAttention): + # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj + # m.q_proj = W8AX_LinearStatic.from_float( + # m.q_proj, + # decoder_scales["model." + name + ".q_proj"], + # weight_quant_type=weight_quant, + # clip_top=layer_clip["model." + name + ".q_proj"], + # n_scale_bits = 16, + # n_bits = 16, + # name= "model." + name + ".q_proj", + # ) + # m.k_proj = W8AX_LinearStatic.from_float( + # m.k_proj, + # decoder_scales["model." + name + ".k_proj"], + # weight_quant_type=weight_quant, + # clip_top=layer_clip["model." + name + ".k_proj"], + # n_scale_bits = 16, + # n_bits = 16, + # name= "model." + name + ".k_proj", + # ) + # m.v_proj = W8AX_LinearStatic.from_float( + # m.v_proj, + # decoder_scales["model." + name + ".v_proj"], + # weight_quant_type=weight_quant, + # clip_top=layer_clip["model." + name + ".v_proj"], + # n_scale_bits = 16, + # n_bits = 16, + # name= "model." + name + ".v_proj", + # ) + + m.o_proj = W8AX_LinearStatic.from_float( + m.o_proj, + decoder_scales["model." + name + ".o_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".o_proj"], + n_scale_bits = 16, + n_bits = 16, + name= "model." + name + ".o_proj", + ) + return model + + +def quantize_qwen2vl_qkvnobias_like( + model, + decoder_scales, + weight_quant="per_tensor", + act_quant="per_tensor", + quantize_bmm_input=False, + layer_clip=None, + quantize_ViT=False, + nbits_dict=None, +): + from transformers.models.qwen2_vl.modeling_qwen2_vl import ( + Qwen2VLSdpaAttention, + Qwen2MLP, + VisionMlp, + VisionSdpaAttention, + ) + + assert layer_clip is not None, "layer_clip should not be None" + nbits_dict = nbits_dict if nbits_dict is not None else {} + + if quantize_ViT: + for name, m in model.visual.named_modules(): + if isinstance(m, VisionMlp): + print(f"quantize: {name}.fc1/fc2") + m.fc1 = W8AX_Nobias_LinearStatic.from_float( + m.fc1, + decoder_scales["visual." + name + ".fc1"], + weight_quant_type=weight_quant, + clip_top=layer_clip["visual." + name + ".fc1"], + n_scale_bits = 8, + n_bits = 8, + ) + print(f"{"visual." + name + ".fc1"}: {decoder_scales['visual.' + name + '.fc1']} {layer_clip['visual.' + name + '.fc1']}") + m.fc2 = W8AX_Nobias_LinearStatic.from_float( + m.fc2, + decoder_scales["visual." + name + ".fc2"], + weight_quant_type=weight_quant, + clip_top=layer_clip["visual." + name + ".fc2"], + n_scale_bits = 8, + n_bits = 8, + ) + print(f"{"visual." + name + ".fc2"}: {decoder_scales['visual.' + name + '.fc2']} {layer_clip['visual.' + name + '.fc2']}") + + if isinstance(m, VisionSdpaAttention): + print(f"quantize: {name}.qkv/proj") + m.qkv = W8AX_Nobias_LinearStatic.from_float( + m.qkv, + decoder_scales["visual." + name + ".qkv"], + weight_quant_type=weight_quant, + clip_top=layer_clip["visual." + name + ".qkv"], + n_scale_bits = 16, + n_bits = 16, + name= "visual." + name + ".qkv", + ) + print(f"{"visual." + name + ".qkv"}: {decoder_scales['visual.' + name + '.qkv']} {layer_clip['visual.' + name + '.qkv']}") + m.proj = W8AX_Nobias_LinearStatic.from_float( + m.proj, + decoder_scales["visual." + name + ".proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["visual." + name + ".proj"], + n_scale_bits = 8, + n_bits = 8, + name= "visual." + name + ".proj", + ) + print(f"{"visual." + name + ".proj"}: {decoder_scales['visual.' + name + '.proj']} {layer_clip['visual.' + name + '.proj']}") + + for name, m in model.model.named_modules(): + if isinstance(m, Qwen2MLP): + print(f"{'model.' + name + '.down_proj'}: nbits: {nbits_dict.get('model.' + name + '.down_proj', 8)}") + print(f"quantize: {name}.gate_proj/up_proj/down_proj") + m.gate_proj = W8AX_LinearStatic.from_float( + m.gate_proj, + decoder_scales["model." + name + ".gate_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".gate_proj"], + n_scale_bits = 8, + n_bits = 8, + ) + print(f"{"model." + name + ".gate_proj"}: {decoder_scales['model.' + name + '.gate_proj']} {layer_clip['model.' + name + '.gate_proj']}") + m.up_proj = W8AX_LinearStatic.from_float( + m.up_proj, + decoder_scales["model." + name + ".up_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".up_proj"], + n_scale_bits = 8, + n_bits = 8, + ) + print(f"{"model." + name + ".up_proj"}: {decoder_scales['model.' + name + '.up_proj']} {layer_clip['model.' + name + '.up_proj']}") + m.down_proj = W8AX_LinearStatic.from_float( + m.down_proj, + decoder_scales["model." + name + ".down_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".down_proj"], + n_scale_bits = nbits_dict.get("model." + name + ".down_proj", 8), + n_bits = nbits_dict.get("model." + name + ".down_proj", 8), + name = "model." + name + ".down_proj", + ) + print(f"{"model." + name + ".down_proj"}: {decoder_scales['model.' + name + '.down_proj']} {layer_clip['model.' + name + '.down_proj']}") + elif isinstance(m, Qwen2VLSdpaAttention): + print(f"quantize: {name}.q_proj/k_proj/v_proj/o_proj") + # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj + m.q_proj = W8AX_Nobias_LinearStatic.from_float( + m.q_proj, + decoder_scales["model." + name + ".q_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".q_proj"], + n_scale_bits = 16, + n_bits = 16, + name= "model." + name + ".q_proj", + ) + print(f"{"model." + name + ".q_proj"}: {decoder_scales['model.' + name + '.q_proj']} {layer_clip['model.' + name + '.q_proj']}") + m.k_proj = W8AX_Nobias_LinearStatic.from_float( + m.k_proj, + decoder_scales["model." + name + ".k_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".k_proj"], + n_scale_bits = 16, + n_bits = 16, + name= "model." + name + ".k_proj", + ) + print(f"{"model." + name + ".k_proj"}: {decoder_scales['model.' + name + '.k_proj']} {layer_clip['model.' + name + '.k_proj']}") + m.v_proj = W8AX_Nobias_LinearStatic.from_float( + m.v_proj, + decoder_scales["model." + name + ".v_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".v_proj"], + n_scale_bits = 16, + n_bits = 16, + name= "model." + name + ".v_proj", + ) + print(f"{"model." + name + ".v_proj"}: {decoder_scales['model.' + name + '.v_proj']} {layer_clip['model.' + name + '.v_proj']}") + m.o_proj = W8AX_LinearStatic.from_float( + m.o_proj, + decoder_scales["model." + name + ".o_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".o_proj"], + n_scale_bits = 8, + n_bits = 8, + name= "model." + name + ".o_proj", + ) + print(f"{"model." + name + ".o_proj"}: {decoder_scales['model.' + name + '.o_proj']} {layer_clip['model.' + name + '.o_proj']}") + return model + +def quantize_gemma_like( + model, + decoder_scales, + weight_quant="per_tensor", + act_quant="per_tensor", + quantize_bmm_input=False, + layer_clip={}, +): + from transformers.models.gemma.modeling_gemma import ( + GemmaSdpaAttention, + GemmaMLP, + ) + + for name, m in model.model.named_modules(): + if isinstance(m, GemmaMLP): + m.gate_proj = W8A8LinearStatic.from_float( + m.gate_proj, + decoder_scales["model." + name + ".gate_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".gate_proj"], + ) + m.up_proj = W8A8LinearStatic.from_float( + m.up_proj, + decoder_scales["model." + name + ".up_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".up_proj"], + ) + m.down_proj = W8A8LinearStatic.from_float( + m.down_proj, + decoder_scales["model." + name + ".down_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".down_proj"], + ) + elif isinstance(m, GemmaSdpaAttention): + # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj + m.q_proj = W8A8LinearStatic.from_float( + m.q_proj, + decoder_scales["model." + name + ".q_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".q_proj"], + ) + m.k_proj = W8A8LinearStatic.from_float( + m.k_proj, + decoder_scales["model." + name + ".k_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".k_proj"], + ) + m.v_proj = W8A8LinearStatic.from_float( + m.v_proj, + decoder_scales["model." + name + ".v_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".v_proj"], + ) + m.o_proj = W8A8LinearStatic.from_float( + m.o_proj, + decoder_scales["model." + name + ".o_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".o_proj"], + ) + return model + + +def quantize_phi_like( + model, + decoder_scales, + weight_quant="per_tensor", + act_quant="per_tensor", + quantize_bmm_input=False, + layer_clip={}, +): + from transformers.models.phi.modeling_phi import ( + PhiAttention, + PhiMLP, + ) + + for name, m in model.model.named_modules(): + if isinstance(m, PhiMLP): + m.fc1 = W8A8LinearStatic.from_float( + m.fc1, + decoder_scales["model." + name + ".fc1"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".fc1"], + ) + m.fc2 = W8A8LinearStatic.from_float( + m.fc2, + decoder_scales["model." + name + ".fc2"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".fc2"], + ) + + elif isinstance(m, PhiAttention): + # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj + m.q_proj = W8A8LinearStatic.from_float( + m.q_proj, + decoder_scales["model." + name + ".q_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".q_proj"], + ) + m.k_proj = W8A8LinearStatic.from_float( + m.k_proj, + decoder_scales["model." + name + ".k_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".k_proj"], + ) + m.v_proj = W8A8LinearStatic.from_float( + m.v_proj, + decoder_scales["model." + name + ".v_proj"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".v_proj"], + ) + m.dense = W8A8LinearStatic.from_float( + m.dense, + decoder_scales["model." + name + ".dense"], + weight_quant_type=weight_quant, + clip_top=layer_clip["model." + name + ".dense"], + ) + return model + + +def quantize_mixtral( + model, weight_quant="per_channel", act_quant="per_token", quantize_bmm_input=False +): + from transformers.models.mixtral.modeling_mixtral import ( + MixtralAttention, + MixtralSparseMoeBlock, + MixtralBLockSparseTop2MLP, + ) + + for name, m in model.model.named_modules(): + if isinstance(m, MixtralBLockSparseTop2MLP): + m.w1 = W8A8LinearStatic.from_float( + m.w1, weight_quant_type=weight_quant, act_quant=act_quant + ) + m.w2 = W8A8LinearStatic.from_float( + m.w2, weight_quant_type=weight_quant, act_quant=act_quant + ) + m.w3 = W8A8LinearStatic.from_float( + m.w3, weight_quant_type=weight_quant, act_quant=act_quant + ) + elif isinstance(m, MixtralAttention): + # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj + m.q_proj = W8A8LinearStatic.from_float( + m.q_proj, + weight_quant_type=weight_quant, + act_quant=act_quant, + quantize_output=quantize_bmm_input, + ) + m.k_proj = W8A8LinearStatic.from_float( + m.k_proj, + weight_quant_type=weight_quant, + act_quant=act_quant, + quantize_output=quantize_bmm_input, + ) + m.v_proj = W8A8LinearStatic.from_float( + m.v_proj, + weight_quant_type=weight_quant, + act_quant=act_quant, + quantize_output=quantize_bmm_input, + ) + m.o_proj = W8A8LinearStatic.from_float( + m.o_proj, weight_quant_type=weight_quant, act_quant=act_quant + ) + elif isinstance(m, MixtralSparseMoeBlock): + m.gate = W8A8LinearStatic.from_float( + m.gate, weight_quant_type=weight_quant, act_quant=act_quant + ) + return model + + +def quantize_falcon_like( + model, + decoder_scales, + weight_quant="per_tensor", + act_quant="per_tensor", + quantize_bmm_input=False, + layer_clip={}, +): + from transformers.models.falcon.modeling_falcon import ( + FalconAttention, + FalconMLP, + ) + + for name, m in model.named_modules(): + if isinstance(m, FalconMLP): + m.dense_h_to_4h = W8A8LinearStatic.from_float( + m.dense_h_to_4h, + decoder_scales[name + ".dense_h_to_4h"], + weight_quant_type=weight_quant, + clip_top=layer_clip[name + ".dense_h_to_4h"], + ) + m.dense_4h_to_h = W8A8LinearStatic.from_float( + m.dense_4h_to_h, + decoder_scales[name + ".dense_4h_to_h"], + weight_quant_type=weight_quant, + clip_top=layer_clip[name + ".dense_4h_to_h"], + ) + elif isinstance(m, FalconAttention): + # Here we simulate quantizing BMM inputs by quantizing the output of q_proj, k_proj, v_proj + m.query_key_value = W8A8LinearStatic.from_float( + m.query_key_value, + decoder_scales[name + ".query_key_value"], + weight_quant_type=weight_quant, + clip_top=layer_clip[name + ".query_key_value"], + ) + m.dense = W8A8LinearStatic.from_float( + m.dense, + decoder_scales[name + ".dense"], + weight_quant_type=weight_quant, + clip_top=layer_clip[name + ".dense"], + ) + return model \ No newline at end of file diff --git a/tools/qnn_convertor/utils/wrapper.py b/tools/qnn_convertor/utils/wrapper.py new file mode 100644 index 000000000..dc1270e69 --- /dev/null +++ b/tools/qnn_convertor/utils/wrapper.py @@ -0,0 +1,91 @@ +import torch +from torch import nn +from transformers.models.qwen2_vl.modeling_qwen2_vl import VisionMlp +from transformers.models.qwen2.modeling_qwen2 import Qwen2MLP +from rotate import AutoOperation + +class VisionMLPWrapper(nn.Module): + def __init__(self, mlp: VisionMlp, + hadamard_fc1: torch.Tensor = None, + hadamard_fc2: torch.Tensor = None): + super(VisionMLPWrapper, self).__init__() + self.mlp = mlp + + # assuming all weights are on the same device + self.device = mlp.fc1.weight.device + self.dtype = mlp.fc2.weight.dtype + + self.rotate_fc1 = True if hadamard_fc1 is not None else False + if self.rotate_fc1: + AutoOperation.rotate_output(mlp.fc1, hadamard_fc1) + self.register_buffer("hadamard_fc1_T", hadamard_fc1.T.to(self.device, dtype=self.dtype)) + + self.rotate_fc2 = True if hadamard_fc2 is not None else False + if self.rotate_fc2: + AutoOperation.rotate_input(mlp.fc2, hadamard_fc2.T) + self.register_buffer("hadamard_fc2", hadamard_fc2.to(self.device, dtype=self.dtype)) + + + def forward(self, x): + up = self.mlp.fc1(x) + + # rotate back + if self.rotate_fc1: + up = up @ self.hadamard_fc1_T + + act = self.mlp.act(up) + + if self.rotate_fc2: + # rotate + act = act @ self.hadamard_fc2 + + return self.mlp.fc2(act) + + +class MLPWrapper(nn.Module): + def __init__(self, mlp: Qwen2MLP, + hadamard_up: torch.Tensor = None, + hadamard_gate: torch.Tensor = None, + hadamard_down: torch.Tensor = None): + super(MLPWrapper, self).__init__() + self.mlp = mlp + + # assuming all weights are on the same device + self.device = mlp.up_proj.weight.device + self.dtype = mlp.up_proj.weight.dtype + + self.rotate_up = True if hadamard_up is not None else False + if self.rotate_up: + AutoOperation.rotate_output(mlp.up_proj, hadamard_up) + self.register_buffer("hadamard_up_T", hadamard_up.T.to(self.device, dtype=self.dtype)) + + self.rotate_gate = True if hadamard_gate is not None else False + if self.rotate_gate: + AutoOperation.rotate_output(mlp.gate_proj, hadamard_gate) + self.register_buffer("hadamard_gate_T", hadamard_gate.T.to(self.device, dtype=self.dtype)) + + self.rotate_down = True if hadamard_down is not None else False + if self.rotate_down: + AutoOperation.rotate_input(mlp.down_proj, hadamard_down.T) + self.register_buffer("hadamard_down", hadamard_down.to(self.device, dtype=self.dtype)) + + + def forward(self, x): + up = self.mlp.up_proj(x) + gate = self.mlp.gate_proj(x) + + # rotate back + if self.rotate_up: + up = up @ self.hadamard_up_T + + if self.rotate_gate: + gate = gate @ self.hadamard_gate_T + + gated_output = up * self.mlp.act_fn(gate) + + if self.rotate_down: + # rotate + gated_output = gated_output @ self.hadamard_down + + return self.mlp.down_proj(gated_output) + \ No newline at end of file diff --git a/tools/quantizer/ParamWriter.cpp b/tools/quantizer/ParamWriter.cpp new file mode 100644 index 000000000..c8928060a --- /dev/null +++ b/tools/quantizer/ParamWriter.cpp @@ -0,0 +1,84 @@ +// +// Created by Xiang Li on 23-10-30. +// +#include "ParamWriter.hpp" +#include +#include +#include +#include + +ParamWriter::ParamWriter(std::string filename) : + path_(std::move(filename)) { + fp_ = fopen(path_.c_str(), "wb"); + if (fp_ == nullptr) { + throw std::runtime_error("Failed to open file for writing: " + path_); + } + // _MAGIC_NUMBER is defined in ParamLoader.hpp + writeInt(fp_, _MAGIC_NUMBER); +} + +ParamWriter::~ParamWriter() { + if (fp_ != nullptr) + fclose(fp_); +} + +int ParamWriter::calcIndexSize(const std::vector &names) { + int size = 0; + for (const auto &name : names) { + // One Tensor Index Item Contains: Name_Len(Int)+Name(str)+Weights_Len(UInt64)+Offset(UInt64)+DataType(Int) + size += sizeof(int32_t) + name.size() + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(int32_t); + } + return size; +} + +void ParamWriter::writeIndex() { + fseek(fp_, sizeof(int32_t) + sizeof(uint64_t), SEEK_SET); + for (const auto ¶m : param_info_) { + writeString(fp_, param.name); + write_u64(fp_, param.size); + write_u64(fp_, param.offset); + write_dtype(fp_, param.type); + } + fflush(fp_); +} + +void ParamWriter::beginWriteParam(const std::string &name, DataType type) { + if (index_ >= param_info_.size()) { + throw std::runtime_error("Parameter index out of bounds. Did you call paddingIndex correctly?"); + } + auto ¶m = param_info_[index_]; + param.name = name; + param.type = type; + param.offset = ftell(fp_); + + current_param_start_offset_ = param.offset; +} + +void ParamWriter::writeChunk(const void *data, uint64_t size_in_bytes) { + if (size_in_bytes == 0) return; + auto status = fwrite(data, 1, size_in_bytes, fp_); + if (status != size_in_bytes) { + std::cout << "fwrite error: wrote " << status << " bytes instead of " << size_in_bytes << std::endl; + throw std::runtime_error("Failed to write chunk to file."); + } +} + +void ParamWriter::endWriteParam() { + fflush(fp_); + auto current_pos = ftell(fp_); + if (index_ >= param_info_.size()) { + throw std::runtime_error("Parameter index out of bounds at endWriteParam."); + } + auto ¶m = param_info_[index_]; + param.size = current_pos - current_param_start_offset_; + + index_++; +} + +void ParamWriter::paddingIndex(const std::vector &names) { + param_info_.resize(names.size()); + int index_size = calcIndexSize(names); + write_u64(fp_, index_size); + std::vector padding(index_size, 0); + fwrite(padding.data(), sizeof(char), index_size, fp_); +} \ No newline at end of file diff --git a/src/quantizer/ParamWriter.hpp b/tools/quantizer/ParamWriter.hpp similarity index 50% rename from src/quantizer/ParamWriter.hpp rename to tools/quantizer/ParamWriter.hpp index de6ed1d46..c2da5fb60 100644 --- a/src/quantizer/ParamWriter.hpp +++ b/tools/quantizer/ParamWriter.hpp @@ -5,6 +5,10 @@ #ifndef MLLM_PARAMWRITER_HPP #define MLLM_PARAMWRITER_HPP #include "ParamLoader.hpp" +#include +#include +#include + static void write_u64(FILE *fp, uint64_t val) { fwrite(&val, sizeof(uint64_t), 1, fp); } @@ -12,12 +16,13 @@ static void writeInt(FILE *fp, int32_t val) { fwrite(&val, sizeof(int32_t), 1, fp); } static void writeString(FILE *fp, const std::string &str) { - writeInt(fp, str.size()); - fwrite(str.c_str(), sizeof(char), str.size(), fp); - + writeInt(fp, static_cast(str.size())); + if (!str.empty()) { + fwrite(str.c_str(), sizeof(char), str.size(), fp); + } } static void write_dtype(FILE *fp, DataType dtype) { - writeInt(fp, dtype); + writeInt(fp, static_cast(dtype)); } struct ParmInfo { @@ -26,20 +31,28 @@ struct ParmInfo { uint64_t offset; uint64_t size; }; + class ParamWriter { public: - ~ParamWriter(); - ParamWriter(std::string filename); - int calcIndexSize(vector names); + virtual ~ParamWriter(); + explicit ParamWriter(std::string filename); + int calcIndexSize(const std::vector &names); void writeIndex(); - virtual void writeParam(string name, DataType type, void *data, uint64_t size); - void paddingIndex(vector names); -private: + void beginWriteParam(const std::string &name, DataType type); + void writeChunk(const void *data, uint64_t size_in_bytes); + void endWriteParam(); + + void paddingIndex(const std::vector &names); + +protected: uint64_t index_ = 0; FILE *fp_; std::string path_; std::vector param_info_; + +private: + uint64_t current_param_start_offset_ = 0; }; -#endif // MLLM_PARAMWRITER_HPP +#endif // MLLM_PARAMWRITER_HPP \ No newline at end of file diff --git a/tools/quantizer/QuantWriter.cpp b/tools/quantizer/QuantWriter.cpp new file mode 100644 index 000000000..30ed8d019 --- /dev/null +++ b/tools/quantizer/QuantWriter.cpp @@ -0,0 +1,362 @@ +#include "QuantWriter.hpp" +#include "Types.hpp" +#include "backends/cpu/compute/GemmKleidiai.hpp" +#include +#include +#include + +namespace mllm { + +const std::vector fp32_layers = { + "norm", + "rope", + "bias", + "rotary_emb", + // "embed_tokens", + "_GN", + "class_embedding", + "embeddings", + "logit_scale", + "modality_preprocessors", + "modality_heads", + "modality_postprocessors", + "pre_transformer_layer", + "pos_embed.inv_freq", + "ln_q", + "patch_embed.proj", + // "mlp.gate.", + // "lm_head.weight", // T + // "query_key_value", // T + // "word_embeddings", // T +}; +const std::vector q40_layers = { + "embed_tokens", + "word_embeddings", +}; +const std::vector q6_layers = { + // "w2", "wv", "dense_h_to_4h", "v_proj", "down_proj", +}; +const std::vector q23_layers = { + // ".experts.", +}; +const std::vector q23_to_q4_0_4x4_layers = { + "w2", + "wv", + "dense_h_to_4h", + "v_proj", + "down_proj", + "down", +}; + +const std::vector q4_0_kai_to_q4_0_4x4_layers = { + // 设置为KAI_Q4_0但不用 KAI_Q4_0 + "in_proj", + "w12", + "model.output", + "merger.mlp", + // for ling-lite-moe + // "query_key_value", + // "dense", +}; + +bool find_in_layers(const std::string &name, const std::vector &layer_names) { + if ("vision_embed_tokens" == name) return true; + for (const auto &layer : layer_names) { + if (name.find(layer) != std::string::npos) { + return true; + } + } + return false; +} + +QuantWriter::QuantWriter(std::string output_path, std::string input_path) : + ParamWriter(std::move(output_path)), output_path_(this->path_) { + param_loader_ = new mllm::ParamLoader(std::move(input_path)); + if (!param_loader_->isAvailible()) { + __exit(-1); + } +} + +QuantWriter::~QuantWriter() { + delete param_loader_; +}; + +int QuantWriter::readParams() { + original_param_names_ = param_loader_->getParamNames(); // 保存原始参数名 + param_names_ = original_param_names_; // 复制一份用于可能的操作 + + // 检查 lm_head.weight 是否存在,如果不存在且 model.embed_tokens.weight 存在,则添加它 + bool lm_head_exists = false; + bool embed_tokens_exists = false; + for (const auto &name : original_param_names_) { + if (name == "lm_head.weight") { + lm_head_exists = true; + } + if (name == "model.embed_tokens.weight") { + embed_tokens_exists = true; + } + } + + if (!lm_head_exists && embed_tokens_exists) { + std::cout << "INFO: lm_head.weight not found, will be created by copying model.embed_tokens.weight" << std::endl; + param_names_.push_back("lm_head.weight"); + } + + paddingIndex(param_names_); + return param_names_.size(); +} + +std::vector QuantWriter::load_full_fp32_param(const std::string &name) { + if (param_loader_->getDataType(name) != MLLM_TYPE_F32) { + return {}; + } + auto [data_ptr, size] = param_loader_->load(name); + if (data_ptr == nullptr || size == 0) { + return {}; + } + std::vector param_data(size / sizeof(float)); + memcpy(param_data.data(), data_ptr, size); + delete[] data_ptr; + return param_data; +} + +DataType QuantWriter::getQuantizationTypeFor(const std::string &name, DataType target_type, const std::string &other_flag) { + /* + if (name.find("down_proj") != std::string::npos && name.find("visual.blocks") != std::string::npos + && name.find("bias") == std::string::npos) { + return MLLM_TYPE_F32; + } + if (name.find("qkv") != std::string::npos && name.find("bias") == std::string::npos) { + return MLLM_TYPE_Q4_K; + } + */ + if (find_in_layers(name, q40_layers)) { + return MLLM_TYPE_Q4_0; + } + if (find_in_layers(name, fp32_layers)) { + return MLLM_TYPE_F32; + } + if (find_in_layers(name, q23_layers) && (name.find("down") == std::string::npos)) { + return MLLM_TYPE_Q2_K; + } + if (target_type == MLLM_TYPE_KLEIDIAI_Q4_0) { + if (find_in_layers(name, q4_0_kai_to_q4_0_4x4_layers)) { + return MLLM_TYPE_Q4_0_4_4; // MLLM_TYPE_Q4_0; // 这些层回退到Q4_0 + } + if (other_flag == "eager" && name.find("v_proj") != std::string::npos) { + return MLLM_TYPE_Q4_0; // eager模式下 v_proj 回退到Q4_0 + } + return MLLM_TYPE_KLEIDIAI_Q4_0; + } + if (target_type >= MLLM_TYPE_Q2_K && target_type <= MLLM_TYPE_Q8_K) { + if (find_in_layers(name, q6_layers)) { + return MLLM_TYPE_Q6_K; + } + } + return target_type; +} + +void QuantWriter::quantize(DataType target_quant_type, const std::string &other_flag) { + FILE *fp_in = param_loader_->getInputStream(); + if (!fp_in) { + std::cout << "Failed to get input file stream from ParamLoader." << std::endl; + __exit(-1); + } + + const int CHUNK_SIZE_FLOATS = 1024 * 1024; // 每次处理4MB + std::vector read_buffer(CHUNK_SIZE_FLOATS); + + int tmp_hidden_dim = -1; + int vit_tmp_hidden_dim = -1; + int qw3_hidden_dim = 2048; + + // 预扫描以找到隐藏维度 + std::cout << "Pre-scanning to find hidden dimensions..." << std::endl; + for (const auto &name : original_param_names_) { + if (tmp_hidden_dim == -1 && (name.find("model") != std::string::npos && name.find("norm") != std::string::npos && name.find("k") == std::string::npos && name.find("q") == std::string::npos)) { + ParamMetadata meta = param_loader_->getParamMetadata(name); + tmp_hidden_dim = meta.size / sizeof(float); + std::cout << " - Found hidden dimension (tmp_hidden_dim): " << tmp_hidden_dim << " from layer '" << name << "'" << std::endl; + } + if (vit_tmp_hidden_dim == -1 && (name.find("visual") != std::string::npos && name.find("norm") != std::string::npos)) { + ParamMetadata meta = param_loader_->getParamMetadata(name); + vit_tmp_hidden_dim = meta.size / sizeof(float); + std::cout << " - Found ViT hidden dimension (vit_tmp_hidden_dim): " << vit_tmp_hidden_dim << " from layer '" << name << "'" << std::endl; + } + if (tmp_hidden_dim != -1 && vit_tmp_hidden_dim != -1) { + break; + } + } + + for (const auto &name : param_names_) { + bool is_copied_lm_head = (name == "lm_head.weight" && std::find(original_param_names_.begin(), original_param_names_.end(), name) == original_param_names_.end()); + DataType final_quant_type = getQuantizationTypeFor(name, target_quant_type, other_flag); + + std::cout << "Processing param " << name << " -> " << DataTypeName(final_quant_type) << " ... "; + if (is_copied_lm_head) { + std::cout << "(copied from model.embed_tokens.weight) "; + } + fflush(stdout); + + beginWriteParam(name, final_quant_type); + + std::vector full_param_data; + uint64_t num_floats; + + if (is_copied_lm_head) { + full_param_data = load_full_fp32_param("model.embed_tokens.weight"); + if (full_param_data.empty()) { + std::cerr << "FAIL! Failed to load model.embed_tokens.weight for copying." << std::endl; + __exit(-1); + } + num_floats = full_param_data.size(); + } else { + ParamMetadata meta = param_loader_->getParamMetadata(name); + num_floats = meta.size / sizeof(float); + fseek(fp_in, meta.offset, SEEK_SET); + if (final_quant_type == MLLM_TYPE_KLEIDIAI_Q4_0 || final_quant_type == MLLM_TYPE_Q4_0_4_4) { + full_param_data.resize(num_floats); + fread(full_param_data.data(), sizeof(float), num_floats, fp_in); + } + } + + if (!full_param_data.empty()) { + void *quant_ptr = nullptr; + uint64_t quant_size = 0; + + if (final_quant_type == MLLM_TYPE_KLEIDIAI_Q4_0) { +#if defined(__aarch64__) || defined(__arm__) || defined(__arm64__) + int H = find_in_layers(name, {"visual"}) ? vit_tmp_hidden_dim : tmp_hidden_dim; + if (find_in_layers(name, {"self_attn.o_proj.weight"}) && other_flag == "qw3") { + H = qw3_hidden_dim; + std::cout << "(QWen3 self_attn.o_proj.weight detected, using hidden dim: " << H << ") "; + } + if (H <= 0) { + std::cout << "FAIL! Hidden dimension not found for " << name << std::endl; + __exit(-1); + } + + // ==================【代码修正】================== + // 恢复您指出的、用于判断 N 和 K 的关键逻辑 + int N, K; + if (find_in_layers(name, {"w2", "down_proj", "down", "fc2"})) { + N = H; + if (num_floats % N != 0) { + std::cerr << "FAIL! num_floats " << num_floats << " not divisible by N for " << name << std::endl; + __exit(-1); + } + K = num_floats / N; + } else { + K = H; + if (num_floats % K != 0) { + std::cerr << "FAIL! num_floats " << num_floats << " not divisible by K for " << name << std::endl; + __exit(-1); + } + N = num_floats / K; + } + // =============================================== + + std::string bias_name = name; + bias_name.replace(bias_name.find("weight"), 6, "bias"); + std::vector bias_data = load_full_fp32_param(bias_name); + std::vector transposed_weight_data(num_floats); + for (int n = 0; n < N; ++n) + for (int k = 0; k < K; ++k) transposed_weight_data[k * N + n] = full_param_data[n * K + k]; + auto block_t = alloc_kleidiai_quant_block(final_quant_type, N, K); + quant_ptr = block_t.first; + quant_size = block_t.second; + // std::cout << "N: " << N << ", K: " << K << ", quant_size: " << quant_size << " "; +#ifndef KAI_FP16_CAL + mllm_kleidai_pack_b_and_bias_qsi4((uint8_t *)quant_ptr, transposed_weight_data.data(), bias_data.empty() ? nullptr : bias_data.data(), N, K); +#else + mllm_kleidai_pack_b_and_bias_qsi4_to_fp16((uint8_t *)quant_ptr, transposed_weight_data.data(), bias_data.empty() ? nullptr : bias_data.data(), N, K); +#endif +#else + std::cerr << "KLEIDIAI_Q4_0 is only supported on ARM architecture." << std::endl; + __exit(-1); +#endif + } else if (final_quant_type == MLLM_TYPE_Q4_0_4_4) { + bool is_visual = find_in_layers(name, {"visual"}); + int H = is_visual ? vit_tmp_hidden_dim : tmp_hidden_dim; + if (H <= 0) { + std::cout << "FAIL! Hidden dimension not found for " << name << std::endl; + __exit(-1); + } + int K = H; + if ((is_visual && find_in_layers(name, {"fc2", "down_proj", "down"})) + || (!is_visual && find_in_layers(name, {"w2", "down_proj", "down"}))) { + if (num_floats % H != 0) { + std::cerr << "FAIL! num_floats not divisible by H for " << name << std::endl; + __exit(-1); + } + K = num_floats / H; + } + auto block_t = alloc_quant_block(num_floats, final_quant_type); + quant_ptr = block_t.first; + quant_size = block_t.second; + quantize_row_q4_0_4x4(full_param_data.data(), quant_ptr, num_floats, K); + } else { + auto block_t = alloc_quant_block(num_floats, final_quant_type); + quant_ptr = block_t.first; + quant_size = block_t.second; + switch (final_quant_type) { + case MLLM_TYPE_F32: break; + case MLLM_TYPE_Q4_0: quantize_row_q4_0(full_param_data.data(), quant_ptr, num_floats); break; + case MLLM_TYPE_Q8_0: quantize_row_q8_0(full_param_data.data(), quant_ptr, num_floats); break; + case MLLM_TYPE_Q2_K: quantize_row_q2_K(full_param_data.data(), quant_ptr, num_floats); break; + case MLLM_TYPE_Q3_K: quantize_row_q3_K(full_param_data.data(), quant_ptr, num_floats); break; + case MLLM_TYPE_Q4_K: quantize_row_q4_K(full_param_data.data(), quant_ptr, num_floats); break; + case MLLM_TYPE_Q6_K: quantize_row_q6_K(full_param_data.data(), quant_ptr, num_floats); break; + case MLLM_TYPE_Q8_K: quantize_row_q8_K(full_param_data.data(), quant_ptr, num_floats); break; + default: + std::cerr << "Unsupported quantization type for full-tensor processing: " << DataTypeName(final_quant_type) << std::endl; + delete[] (char *)quant_ptr; + __exit(-1); + } + } + if (final_quant_type == MLLM_TYPE_F32) { + writeChunk(full_param_data.data(), num_floats * sizeof(float)); + if (quant_ptr) delete[] (char *)quant_ptr; + } else { + writeChunk(quant_ptr, quant_size); + delete[] (char *)quant_ptr; + } + } else { + uint64_t floats_processed = 0; + while (floats_processed < num_floats) { + uint64_t floats_to_read = std::min((uint64_t)CHUNK_SIZE_FLOATS, num_floats - floats_processed); + if (floats_to_read == 0) break; + fread(read_buffer.data(), sizeof(float), floats_to_read, fp_in); + auto block_t = alloc_quant_block(floats_to_read, final_quant_type); + void *quant_ptr = block_t.first; + switch (final_quant_type) { + case MLLM_TYPE_F32: + writeChunk(read_buffer.data(), floats_to_read * sizeof(float)); + delete[] (char *)quant_ptr; + quant_ptr = nullptr; + break; + case MLLM_TYPE_Q4_0: quantize_row_q4_0(read_buffer.data(), quant_ptr, floats_to_read); break; + case MLLM_TYPE_Q8_0: quantize_row_q8_0(read_buffer.data(), quant_ptr, floats_to_read); break; + case MLLM_TYPE_Q2_K: quantize_row_q2_K(read_buffer.data(), quant_ptr, floats_to_read); break; + case MLLM_TYPE_Q3_K: quantize_row_q3_K(read_buffer.data(), quant_ptr, floats_to_read); break; + case MLLM_TYPE_Q4_K: quantize_row_q4_K(read_buffer.data(), quant_ptr, floats_to_read); break; + case MLLM_TYPE_Q6_K: quantize_row_q6_K(read_buffer.data(), quant_ptr, floats_to_read); break; + case MLLM_TYPE_Q8_K: quantize_row_q8_K(read_buffer.data(), quant_ptr, floats_to_read); break; + default: + std::cerr << "Unsupported quantization type in streaming loop: " << DataTypeName(final_quant_type) << std::endl; + delete[] (char *)quant_ptr; + __exit(-1); + } + if (quant_ptr) { + writeChunk(quant_ptr, block_t.second); + delete[] (char *)quant_ptr; + } + floats_processed += floats_to_read; + } + } + endWriteParam(); + std::cout << "Done." << std::endl; + } + writeIndex(); +} +} // namespace mllm \ No newline at end of file diff --git a/tools/quantizer/QuantWriter.hpp b/tools/quantizer/QuantWriter.hpp new file mode 100644 index 000000000..3ed0ad77e --- /dev/null +++ b/tools/quantizer/QuantWriter.hpp @@ -0,0 +1,82 @@ +#include "ParamWriter.hpp" +#include "ParamLoader.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ6.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ2.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ3.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ4.hpp" +#include "backends/cpu/third_party/ggml/QuantizeQ8.hpp" +#include "backends/cpu/third_party/ggml/GemmPack.hpp" +#include "backends/cpu/compute/GemmKleidiai.hpp" +#include +#include +#include +#include +#include + +#ifndef MLLM_QUANTWRITER_HPP +#define MLLM_QUANTWRITER_HPP + +#define NOT_IMPLEMENTED(x) \ + std::cout << "Quantize params to " << DataTypeName(x) << " is not implemented\n"; \ + __exit(-1); +#define UNREACHABLE() \ + std::cout << "Unreachable code\n"; \ + __exit(-1); +#define __exit(status) \ + { \ + if (status != 0) { \ + std::cout << "Quantize failed\n"; \ + remove(output_path_.c_str()); \ + } \ + exit(status); \ + } + +static std::pair alloc_quant_block(uint64_t count, DataType type) { + uint64_t size = DataTypeSize(type, count); + if (size <= 0) { + return std::make_pair(nullptr, 0); + } + void *data = new char[size]; + return std::make_pair(data, size); +} + +#if defined(__aarch64__) || defined(__arm__) || defined(__arm64__) +static std::pair alloc_kleidiai_quant_block(DataType type, int N, int K) { + assert(type == MLLM_TYPE_KLEIDIAI_Q4_0); + +#ifndef KAI_FP16_CAL + uint64_t size = mllm_kleidai_get_packed_b_qsi4_size(N, K); +#else + uint64_t size = mllm_kleidai_get_packed_b_qsi4_size_to_fp16(N, K); +#endif + if (size <= 0) { + return std::make_pair(nullptr, 0); + } + void *data = new uint8_t[size]; + return std::make_pair(data, size); +} +#endif + +namespace mllm { +extern const std::vector q4_0_kai_to_q4_0_4x4_layers; + +class QuantWriter : public ParamWriter { +public: + ~QuantWriter(); + explicit QuantWriter(std::string output_path, std::string input_path); + int readParams(); + + void quantize(DataType target_quant_type, const std::string &other_flag = ""); + +private: + std::string output_path_; + ParamLoader *param_loader_; + std::vector param_names_; + std::vector original_param_names_; + + DataType getQuantizationTypeFor(const std::string &name, DataType target_type, const std::string &other_flag); + + std::vector load_full_fp32_param(const std::string &name); +}; +} // namespace mllm +#endif \ No newline at end of file diff --git a/tools/quantizer/main_quantize.cpp b/tools/quantizer/main_quantize.cpp new file mode 100644 index 000000000..0f5134fca --- /dev/null +++ b/tools/quantizer/main_quantize.cpp @@ -0,0 +1,67 @@ +// +// Created by Xiang Li on 23-10-31. +// +#include "ParamWriter.hpp" +#include "ParamLoader.hpp" +#include +#include +#include "QuantWriter.hpp" +#include "Types.hpp" + +const std::vector vl_q4x4_2_q4_k_layers; + +int main(int argc, char **argv) { + if (argc < 4) { + std::cout << "Usage: ./quantize [other_flag]\n"; + std::cout << " quant_type: Q4_0, Q8_0, Q4_K, Q6_K, Q8_K, Q4_0_4_4, KAI_Q4_0, etc.\n"; + std::cout << " other_flag (optional): 'vl' or 'eager'\n"; + return -1; + } + auto input_path = std::string(argv[1]); + auto output_path = std::string(argv[2]); + auto quant_type_str = std::string(argv[3]); + std::string other_flag = ""; + if (argc == 5) { + other_flag = std::string(argv[4]); + if (other_flag != "vl" && other_flag != "eager" && other_flag != "qw3") { + std::cout << "Invalid other_flag. Use 'vl' or 'eager' or 'qw3'.\n"; + return -1; + } + } + + DataType quant_type_enum = MLLM_TYPE_COUNT; + if (quant_type_str == "Q4_0") + quant_type_enum = MLLM_TYPE_Q4_0; + else if (quant_type_str == "Q8_0") + quant_type_enum = MLLM_TYPE_Q8_0; + else if (quant_type_str == "Q2_K") + quant_type_enum = MLLM_TYPE_Q2_K; + else if (quant_type_str == "Q3_K") + quant_type_enum = MLLM_TYPE_Q3_K; + else if (quant_type_str == "Q4_K") + quant_type_enum = MLLM_TYPE_Q4_K; + else if (quant_type_str == "Q6_K") + quant_type_enum = MLLM_TYPE_Q6_K; + else if (quant_type_str == "Q8_K") + quant_type_enum = MLLM_TYPE_Q8_K; + else if (quant_type_str == "KAI_Q4_0") + quant_type_enum = MLLM_TYPE_KLEIDIAI_Q4_0; + else if (quant_type_str == "Q4_0_4_4") + quant_type_enum = MLLM_TYPE_Q4_0_4_4; + else { + std::cout << "Quant type " << quant_type_str << " is not supported\n"; + return -1; + } + + mllm::QuantWriter quant_writer(output_path, input_path); + int param_count = quant_writer.readParams(); + if (param_count <= 0) { + std::cout << "No params to quantize\n"; + return -1; + } + std::cout << "Quantizing " << param_count << " params to " << quant_type_str << " with flag '" << other_flag << "'\n"; + quant_writer.quantize(quant_type_enum, other_flag); + std::cout << "Quantization finished successfully.\n"; + + return 0; +} \ No newline at end of file diff --git a/tools/rotate/README.md b/tools/rotate/README.md new file mode 100644 index 000000000..c0a17ac76 --- /dev/null +++ b/tools/rotate/README.md @@ -0,0 +1,106 @@ +# RotLLM +This is an implementation of [SpinQuant](https://arxiv.org/abs/2405.16406) and [QuaRot](https://arxiv.org/abs/2404.00456) for different models like Qwen. We are not intented to do exactly the same things as SpinQuant and QuaRot, instead we provide a framework to customize rotation operations for any models you want to use. + +![Example rotation for Qwen2](../../assets/rotation.png) + +## Example +We provide a unified interface to rotate a model. +```python +import rotate +... # do whatever you want +rotate.rotate_model(model, ...) # parameters are customizable +``` +You can find an example for `Qwen2ForCausalLM` and `Qwen2VLForConditionalGeneration` in [`qwen2.5-instruct.py`](./example/qwen2.5-instruct.py). + +## WorkFlow of RotLLM +### Operations +The rotation operation on a model can be viewed as sequentially executing a series of predefined operations. Suppose you want to add a rotation operation for a model `abc`, first create `abc.py` in `rotate/model` and define operations as following +```python +from ..common import RotateOperationRegistry + +# register the first step of operation to rotate model abc +@RotateOperationRegistry.register(abc) +def first_operation(model: abc, ...): + ... # do whatever you want + +@RotateOperationRegistry.register(abc) +def second_operation(model: abc, ...): + ... # do whatever you want +``` +After doing that, `rotate.rotate_model(model, ...)` will sequantially call `first_operation` and `second_operation` to handle model. + +### Steps to rotate a model +#### Fuse layer norm +To ensure the invariance of a model, we should first fuse some operations of `norm` into the adjacent linear module. +Formally, +```math +norm(x) = f(x) \circ w_n + b_n +``` +in layer norm, we have +```math +f(x) = \frac{x-mean(x)}{\|x-mean(x)\|} +``` +in RSM norm, we have +```math +f(x) = \frac{x}{\|x\|} +``` +In LLMs, norm is usually followed by linear. +```math +\begin{aligned} +linear(norm(x)) &= norm(x)W_l + b_l \\ +&=\left(f(x) \circ w_n + b_n \right)W_l + b_l \\ +&=\left(f(x) diag(w_n) + b_n \right)W_l + b_l \\ +&=f(x) \ diag(w_n)W_l + (b_nW_l + b_l) +\end{aligned} +``` +This implies that $`norm(x)`$ is substitutable with $`f(x)`$. $`w_n`$ and $`b_n`$ can be fuse into linear layer +```math +\begin{aligned} +W_l &\rightarrow diag(w_n)W_l \\ +b_n &\rightarrow b_nW_l + b_l +\end{aligned} +``` + +This is done by `fuse_layer_norms` in [rotatioin_utils.py](./rotate/rotation_utils.py). + +The key problem is how `fuse_layer_norms` should identify the norm layers and their succeeding linear layers in diverse model architectures. + +In our framework, to support a model like abc, you must implement a `NormLinearIterator` in abc.py, which iterates through the model and yields all `(father, norm_name, linears)` pairs. An example in [qwen.py](./rotate/model/qwen.py) is shown below +```python +from ..common import NormLinearIterator + +@NormLinearIterator.register_iterator +class Qwen2NormLinearIterator(NormLinearIterator): + def __init__(self, model: Qwen2ForCausalLM): + super().__init__() + self.model = model + + def __iter__(self): + for layer in self.model.model.layers: + yield layer, "input_layernorm", [ + layer.self_attn.q_proj, + layer.self_attn.k_proj, + layer.self_attn.v_proj, + ] + yield layer, "post_attention_layernorm", [ + layer.mlp.up_proj, + layer.mlp.gate_proj, + ] + yield self.model.model, "norm", [self.model.lm_head] + + @classmethod + def supports_model(cls, model: nn.Module) -> bool: + return isinstance(model, Qwen2ForCausalLM) or isinstance(model, Qwen2VLForConditionalGeneration) +``` + +#### Rotate the model +The rotation operation on a model can be viewed as applying rotational transformations to either the inputs or outputs of certain layers while ensuring mathematical equivalence before and after rotation. + +For different layer types (e.g., `embedding` and `linear`), the implementation of rotating their outputs varies. However, at an abstract level, both cases involve rotating outputs. + +To streamline the code logic, our framework introduces the `AutoOperation` class, which encapsulates the same operation across different layers. This eliminates the need for conditional statements when applying the same operation to different layer types. + +For details, you can refer to [common.py](./rotate/common.py) and [qwen.py](./rotate/model/qwen.py). + +## Training rotation matrix +Currently, the rotation matrices we use are all random Hadamard matrices, which may not achieve optimal performance. According to SpinQuant, we can adopt a QAT (Quantization-Aware Training)-like approach to learn the rotation matrices for better results. This functionality has not yet been implemented and remains a TODO item. diff --git a/tools/rotate/__init__.py b/tools/rotate/__init__.py new file mode 100644 index 000000000..6e02746e6 --- /dev/null +++ b/tools/rotate/__init__.py @@ -0,0 +1,7 @@ +from .common import rotate_model, RotateOperationRegistry, AutoOperation + +registry = RotateOperationRegistry() +registry.auto_discover(package_name="model") + +from .rotation_utils import get_orthogonal_matrix +from .hadamard_utils import hadmard_matrix diff --git a/tools/rotate/common.py b/tools/rotate/common.py new file mode 100644 index 000000000..b82cd8e91 --- /dev/null +++ b/tools/rotate/common.py @@ -0,0 +1,373 @@ +from abc import ABC, abstractmethod +from typing import Iterator, Tuple, Iterable, List, Optional +import torch.nn as nn +import importlib +from pathlib import Path + + +class NormLinearIterator(ABC): + """iterate over norm and its subsequent linear layers""" + + _registered_iterators: List["NormLinearIterator"] = [] + + @abstractmethod + def __iter__(self) -> Iterator[Tuple[nn.Module, str, Iterable[nn.Linear]]]: + """(parent_module, norm_layer_name, [linear_layers])""" + pass + + @classmethod + @abstractmethod + def supports_model(cls, model: nn.Module) -> bool: + """check if the model is supported""" + pass + + @classmethod + def register_iterator(cls, iter_cls) -> "NormLinearIterator": + """register an iterator class""" + cls._registered_iterators.append(iter_cls) + return iter_cls + + @classmethod + def from_model(cls, model: nn.Module) -> "NormLinearIterator": + for iterator_cls in cls._registered_iterators: + if iterator_cls.supports_model(model): + return iterator_cls(model) + + raise ValueError( + f"No suitable NormLinearIterator found for model type {type(model)}. " + "Consider implementing and registering a custom iterator." + ) + + +from typing import Dict, Type, Callable, Any, Union +import torch +import torch.nn as nn + +class AutoOperationMeta(type): + def __getattr__(cls, name): + if name.startswith('_'): + raise AttributeError(name) + + def method(module: nn.Module, *args, **kwargs): + return cls.apply_operation(name, module, *args, **kwargs) + + return method + +class AutoOperation(metaclass=AutoOperationMeta): + """ + A class that supports registering and applying operations to different module types. + Operations can be registered externally and applied to modules dynamically. + """ + + # Nested dictionary to store operations: + # {operation_name: {module_type: operation_func}} + _operations: Dict[str, Dict[Type[nn.Module], Callable]] = {} + + @classmethod + def register_operation(cls, operation_name: str, module_type: Type[nn.Module]): + """ + Decorator to register an operation for a specific module type. + + Args: + operation_name: Name of the operation (e.g., 'rotate_input') + module_type: The module type this operation applies to + """ + def decorator(func: Callable): + if operation_name not in cls._operations: + cls._operations[operation_name] = {} + cls._operations[operation_name][module_type] = func + return func + return decorator + + @classmethod + def apply_operation(cls, operation_name: str, module: nn.Module, *args, **kwargs) -> Any: + """ + Apply a registered operation to a module. + + Args: + operation_name: Name of the operation to apply + module: The module to apply the operation to + *args, **kwargs: Additional arguments to pass to the operation + + Returns: + The result of the operation (if any) + + Raises: + ValueError: If the operation is not registered for the module type + """ + if operation_name not in cls._operations: + raise ValueError(f"Operation '{operation_name}' not registered") + + module_type = type(module) + for base in module_type.__mro__: + if base in cls._operations[operation_name]: + return cls._operations[operation_name][base](module, *args, **kwargs) + + raise ValueError(f"Operation '{operation_name}' not registered for module type {module_type}") + + @classmethod + def has_operation(cls, operation_name: str, module_type: Type[nn.Module]) -> bool: + """ + Check if an operation is registered for a module type. + """ + if operation_name not in cls._operations: + return False + return any(base in cls._operations[operation_name] for base in module_type.__mro__) + + # Convenience methods (dynamically generated based on registered operations) + def __getattr__(cls, name): + if name.startswith('_'): + raise AttributeError(name) + + def method(module: nn.Module, *args, **kwargs): + return cls.apply_operation(name, module, *args, **kwargs) + + return method + +@AutoOperation.register_operation("rotate_input", nn.Linear) +def op_rotate_linear_input( + linear: torch.nn.Linear, + R: torch.Tensor): + """ + Rotate the input of linear layers by a rotation matrix. + i.e. xW + b -> (xR)W + b ==> x(RW) + b + This is done by multiplying the weight matrix by the rotation matrix. + The rotation matrix should be orthogonal. + """ + R_dim = R.shape[0] + in_dim = linear.in_features + repeat_times = in_dim // R_dim + assert in_dim % R_dim == 0, "input dim should be multiple of rotation matrix dim" + # refer to patch merger of ViT of Qwen2VL + R = torch.block_diag(*([R] * repeat_times)) # sometimes we calculate (x1R, x2R, x3R) W + b, which is equivalent to (x1, x2, x3) diag(R, R, R) W + b + dtype = linear.weight.dtype + R_device = R.device + w_device = linear.weight.device + W_ = linear.weight.data.to(device=R_device, dtype=torch.float64) + # note that the W_ in linear is transpose of W + linear.weight.data = (W_ @ (R.T.to(torch.float64))).to(device=w_device, dtype=dtype) + + +@AutoOperation.register_operation("rotate_output", nn.Linear) +def op_rotate_linear_output( + linear: nn.Linear, + R: torch.Tensor): + """ + Rotate the output of linear layers by a rotation matrix. + i.e. o = xW + b -> o = (xW + b)R ==> o = x(WR) + bR + This is done by multiplying the weight matrix by the rotation matrix. + The rotation matrix should be orthogonal. + """ + assert R.shape[0] == R.shape[1], "R should be a square matrix" + assert R.shape[0] == linear.weight.shape[0], "R should be same size as output dim of linear layer" + dtype = linear.weight.dtype + R_device = R.device + w_device = linear.weight.device + W_ = linear.weight.data.to(device=R_device, dtype=torch.float64) + # note that the W_ in linear is transpose of W + linear.weight.data = (R.T.to(torch.float64) @ W_).to(device=w_device, dtype=dtype) + # rotate the bias + if linear.bias is not None: + bias = linear.bias.data.to(device=R_device, dtype=torch.float64) + linear.bias.data = (bias @ R.to(torch.float64)).to(device=linear.bias.device, + dtype=linear.bias.dtype) + +@AutoOperation.register_operation("rotate_output", nn.Embedding) +def op_rotate_embedding( + embedding: torch.nn.Embedding, + R: torch.Tensor): + """ + Rotate each embedding vector by a rotation matrix R. + """ + dtype = embedding.weight.dtype + R_device = R.device + w_device = embedding.weight.device + W_ = embedding.weight.data.to(device=R_device, dtype=torch.float64) + # note that the W_ in linear is transpose of W + embedding.weight.data = (W_ @ (R.to(torch.float64))).to(device=w_device, dtype=dtype) + + + +""" +# denote centering the vector x as C(x) = x - mu +# we have C(x) = x - mu = x - mu 1 where 1 is the vector of ones +# = x - 1/d sum(x) 1 +# we have sum(x) = x_1 + x_2 + ... + x_n = 1^T x +# so we have C(x) = x - 1/d (1^T x) 1 = x - 1/d 1 (1^T x) = x - 1/d 1 1^T x +# that is, we can write C(x) = (I - 1/d 1 1^T) x +# denote the matrix I - 1/d 1 1^T as C +# we have C(x) = C x +# here all the vectors are column vectors +# it is easy to see that C is a symmetric matrix +# so for a row vector x we have C(x) = x C^T = x C +""" +@AutoOperation.register_operation("center_output", nn.Linear) +def op_center_linear_output(linear: torch.nn.Linear): + """ + Center the output of linear layers + i.e. xW + b -> (xW + b) C = xW C + bC + that is we need to center the weight matrix by row and the bias + """ + dtype = linear.weight.dtype + W_ = linear.weight.data.to(dtype=torch.float64) + # note that the W_ in linear is transpose of W + # center echo columns of W equivalent to centering the rows of W_ + W_mean = W_.mean(dim=0, keepdim=True) + W_centered = W_ - W_mean + linear.weight.data = W_centered.to(dtype=dtype) + if linear.bias is not None: + bias = linear.bias.data.to(dtype=torch.float64) + bias_mean = bias.mean() + bias_centered = bias - bias_mean + linear.bias.data = bias_centered.to(dtype=dtype) + + +from typing import Callable, Dict, List, Any, Type +from functools import wraps + + +class RotateOperationRegistry: + """A singleton registry for managing rotate operations across different modules. + + This registry maintains a mapping from module types to lists of rotate operations. + Each module type can have multiple rotate operations registered, which will be + executed in registration order when the rotate interface is called. + """ + + _instance = None + _registry: Dict[Type, List[Callable[..., Any]]] = {} + + def __new__(cls): + """Ensures singleton pattern implementation.""" + if cls._instance is None: + cls._instance = super().__new__(cls) + return cls._instance + + @classmethod + def register(cls, module_type: Type) -> Callable: + """Decorator to register a rotate operation for a specific module type. + + Args: + module_type: The module class that this operation applies to. + + Returns: + A decorator function that will register the target function. + """ + + def decorator(func: Callable[..., Any]) -> Callable: + """Inner decorator that performs the actual registration. + + Args: + func: The rotate operation function to be registered. + + Returns: + The original function with registration side-effect. + """ + if module_type not in cls._registry: + cls._registry[module_type] = [] + cls._registry[module_type].append(func) + + @wraps(func) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + return wrapper + + return decorator + + @classmethod + def get_operations(cls, module_type: Type) -> List[Callable[..., Any]]: + """Retrieves all registered rotate operations for a module type. + + Args: + module_type: The module class to look up operations for. + + Returns: + A list of registered rotate operations for the given module type. + Returns empty list if no operations are registered. + """ + return cls._registry.get(module_type, []) + + @classmethod + def clear(cls) -> None: + """Clears all registered operations (primarily for testing purposes).""" + cls._registry.clear() + + @classmethod + def discover_and_register(cls, package_path: str, module_prefix: str) -> None: + """Scan a directory and import all Python modules for registration. + + Args: + package_path: Filesystem path to the directory containing registration files. + module_prefix: Python import path prefix (e.g., 'myapp.registrations'). + """ + package_dir = Path(package_path) + + for module_file in package_dir.glob('*.py'): + if module_file.name.startswith('_'): + continue # Skip __init__.py and similar files + + module_name = f"{module_prefix}.{module_file.stem}" + try: + importlib.import_module(module_name) + print(f"Imported registration module: {module_name}") + except ImportError as e: + print(f"Failed to import {module_name}: {str(e)}") + + @classmethod + def auto_discover(cls, + package_name: str = "registrations", + base_package: Optional[str] = None) -> None: + """Automatically discover and load registration modules. + + Args: + package_name: Subpackage name containing registrations. + base_package: Root package name (e.g., 'myapp'). + If None, attempts to detect from caller's package. + """ + if base_package is None: + # Automatic base package detection + import inspect + frame = inspect.currentframe() + try: + caller_module = inspect.getmodule(frame.f_back) + base_package = caller_module.__package__.split('.')[0] + finally: + del frame # Clean up to avoid reference cycles + + full_package = f"{base_package}.{package_name}" if base_package else package_name + + try: + package = importlib.import_module(full_package) + package_path = Path(package.__file__).parent + + print(f"Discovering modules in: {package_path}") + cls.discover_and_register(str(package_path), full_package) + except ImportError as e: + print(f"Registration package not found: {full_package}: {str(e)}") + + +def rotate_model(module: Any, *args, **kwargs) -> None: + """Unified interface to execute all registered rotate operations for a module. + + Executes all registered rotate operations in registration order, passing through + all provided arguments to each operation. + + Args: + module: The module instance to be rotated. + *args: Positional arguments to pass to rotate operations. + **kwargs: Keyword arguments to pass to rotate operations. + + Raises: + ValueError: If no rotate operations are registered for the module's type. + """ + module_type = type(module) + operations = RotateOperationRegistry.get_operations(module_type) + + if not operations: + raise ValueError(f"No rotate operations registered for module type: {module_type.__name__}") + + for operation in operations: + operation(module, *args, **kwargs) + + + \ No newline at end of file diff --git a/tools/rotate/hadamard_utils.py b/tools/rotate/hadamard_utils.py new file mode 100644 index 000000000..948a3255f --- /dev/null +++ b/tools/rotate/hadamard_utils.py @@ -0,0 +1,4196 @@ +import torch, math +# Adapted from https://github.com/Cornell-RelaxML/quip-sharp/blob/main/lib/utils/matmul_had.py + +def get_hadK(n, transpose=False): + hadK, K = None, None + if n % 172 == 0: # llama-2-7b up + assert (is_pow2(n // 172)) + K = 172 + hadK = get_had172().T if transpose else get_had172() + elif n % 156 == 0: # llama-1-30b 3x hidden + assert (is_pow2(n // 156)) + K = 156 + hadK = get_had156().T if transpose else get_had156() + elif n % 140 == 0: # llama-1-30b intermediate + assert (is_pow2(n // 140)) + K = 140 + hadK = get_had140().T if transpose else get_had140() + elif n % 108 == 0: # llama-1-13b intermediate + assert (is_pow2(n // 108)) + K = 108 + hadK = get_had108().T if transpose else get_had108() + elif n % 60 == 0: # llama-1-13b 3x hidden + assert (is_pow2(n // 60)) + K = 60 + hadK = get_had60().T if transpose else get_had60() + elif n % 52 == 0: # llama-1-13b 1x hidden + assert (is_pow2(n // 52)) + K = 52 + hadK = get_had52().T if transpose else get_had52() + elif n % 36 == 0: + assert (is_pow2(n // 36)) + K = 36 + hadK = get_had36().T if transpose else get_had36() + elif n % 28 == 0: #llama-3 up + assert (is_pow2(n // 28)) + K = 28 + hadK = get_had28().T if transpose else get_had28() + elif n % 40 == 0: + assert (is_pow2(n // 40)) + K = 40 + hadK = get_had40().T if transpose else get_had40() + elif n % 20 == 0: + assert (is_pow2(n // 20)) + K = 20 + hadK = get_had20().T if transpose else get_had20() + elif n % 12 == 0: + assert (is_pow2(n // 12)) + K = 12 + hadK = get_had12().T if transpose else get_had12() + else: + assert (is_pow2(n)) + K = 1 + + return hadK, K + + +def matmul_hadU(X, transpose=False): + # [..., n] + n = X.shape[-1] + hadK, K = get_hadK(n, transpose) + input = X.clone().view(-1, n, 1) # [num, n, 1] + output = input.clone() # [num, n, 1] + while input.shape[1] > K: # n > K + # [num, n, 1] -> [num, n//2, 2, 1] + input = input.view(input.shape[0], input.shape[1] // 2, 2, input.shape[2]) + # [num, n//2, 2, 1] + output = output.view(input.shape) + output[:, :, 0, :] = input[:, :, 0, :] + input[:, :, 1, :] + output[:, :, 1, :] = input[:, :, 0, :] - input[:, :, 1, :] + + # [num, n//2, 2, 1] -> [num, n//2, 2] + output = output.view(input.shape[0], input.shape[1], -1) + (input, output) = (output, input) + del output + + if K > 1: + # Do not explicitly repeat - OOM + # input = torch.bmm( + # hadK.repeat(len(input), 1, 1).to(input.device).to(input.dtype), input) + # Use bcast instead + input = hadK.view(1, K, K).to(input) @ input + + return input.view(X.shape) / torch.tensor(n).sqrt() + + +def matmul_hadUt(X): + return matmul_hadU(X, transpose=True) + +def hadmard_matrix(size, device): + I = torch.eye(size, dtype=torch.float64).to(device) + return matmul_hadU(I).to(device) + +def random_hadamard_matrix(size, device): + # See https://cornell-relaxml.github.io/quip-sharp/ , Section "Randomized Hadamard Transformation" + Q = torch.randint(low=0, high=2, size=(size,)).to(torch.float64) + Q = Q * 2 - 1 + Q = torch.diag(Q) + return matmul_hadU(Q).to(device) + +def matmul_hadU_cuda(X, hadK, K): + n = X.shape[-1] + if K == 1: + return fast_hadamard_transform.hadamard_transform(X.contiguous(), 1.0/torch.tensor(n).sqrt()) + # if transpose: + # hadK = hadK.T.contiguous() + input = X.view(-1, K, n // K) + input = fast_hadamard_transform.hadamard_transform(input.contiguous(), 1.0/torch.tensor(n).sqrt()) + input = hadK.to(input.device).to(input.dtype) @ input + return input.reshape(X.shape) + + +def apply_exact_had_to_linear(module, had_dim=-1, output=False): + assert isinstance(module, torch.nn.Linear) + in_features, out_features = module.in_features, module.out_features + + if had_dim != -1: + assert is_pow2(had_dim), "Hadamard dimension must be a power of 2!" + + W_ = module.weight.data + dtype = W_.dtype + dev = W_.device + init_shape = W_.shape + W_ = W_.float().cuda() + + if had_dim == -1: + if output: + had_K, K = get_hadK(out_features) + W_ = matmul_hadU_cuda(W_.t(), had_K, K).t() + if not output: + had_K, K = get_hadK(in_features) + W_ = matmul_hadU_cuda(W_, had_K, K) + else: + # Apply Hadamard to the last had_dim chunks of the weights + if output: + W_ = W_.t() + transposed_shape = W_.shape + W_ = fast_hadamard_transform.hadamard_transform( + W_.reshape(-1, transposed_shape[-1]//had_dim, had_dim), + scale=1/math.sqrt(had_dim) + ).reshape(transposed_shape).t() + else: + raise NotImplementedError("Not implemented (or tested) yet!") + n = W_.shape[1] + W_ = hadamard_transform(W_.reshape(-1, n//had_dim, had_dim), scale=1/math.sqrt(had_dim)).reshape(init_shape) + module.weight.data = W_.to(device=dev, dtype=dtype) + + + +def is_pow2(n): + return (n & (n - 1) == 0) and (n > 0) + + +# hadamard matrices for had12, had36.pal2, had52,will, +# # had60.pal, had108.pal, had140.pal, had156.will, had172.will: +# http://www.neilsloane.com/hadamard/index.html +def get_had12(): + return torch.FloatTensor([ + [+1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [+1, +1, -1, +1, -1, -1, -1, +1, +1, +1, -1, +1], + [+1, +1, +1, -1, +1, -1, -1, -1, +1, +1, +1, -1], + [+1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, +1], + [+1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1], + [+1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1], + [+1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1], + [+1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1], + [+1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1], + [+1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1], + [+1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1], + [+1, -1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1], + ]) + + +def get_had40(): + return torch.FloatTensor([ + [ + +1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, ], + [ + +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, ], + [ + +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, ], + [ + +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, ], + [ + +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, ], + [ + +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, ], + [ + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, ], + [ + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, ], + [ + +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, ], + [ + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, ], + [ + +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, ], + [ + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, ], + [ + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, ], + [ + +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, ], + [ + +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, ], + [ + +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, ], + [ + +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, ], + [ + +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, ], + [ + +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, ], + [ + +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, ], + [ + +1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, ], + [ + +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, ], + [ + +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, ], + [ + +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, ], + [ + +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, ], + [ + +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, ], + [ + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, ], + [ + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, ], + [ + +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, ], + [ + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, ], + [ + +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, ], + [ + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, ], + [ + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, ], + [ + +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, ], + [ + +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, ], + [ + +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, ], + [ + +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, ], + [ + +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, ], + [ + +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, ], + [ + +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, ], + ]) +def get_had20(): + return torch.FloatTensor([ + [+1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [+1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1], + [+1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1], + [+1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1], + [+1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1], + [+1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1], + [+1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1], + [+1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1], + [+1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1], + [+1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1], + [+1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1], + [+1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1], + [+1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1], + [+1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1], + [+1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1], + [+1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1], + [+1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1], + [+1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1], + [+1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1], + [+1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1] + ]) + + +def get_had28(): + return torch.FloatTensor([ + [ + +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, + +1, +1, +1, +1, +1, +1, +1], + [ + +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, +1 + ], + [ + +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, + -1, -1, -1, -1, +1, +1, -1 + ], + [ + +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, + +1, -1, -1, -1, -1, +1, +1 + ], + [ + +1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, -1, + +1, +1, -1, -1, -1, -1, +1 + ], + [ + +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, + -1, +1, +1, -1, -1, -1, -1 + ], + [ + +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, -1, + +1, -1, +1, +1, -1, -1, -1 + ], + [ + +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, -1, +1, +1, -1, -1 + ], + [ + +1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, + +1, -1, +1, -1, +1, +1, -1 + ], + [ + +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, +1, -1, +1, -1, +1, +1 + ], + [ + +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, +1, -1, +1, -1, +1 + ], + [ + +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, +1, -1, +1, -1 + ], + [ + +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, +1, -1, +1 + ], + [ + +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, +1, -1 + ], + [ + -1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1 + ], + [ + +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + +1, +1, +1, -1, -1, +1, -1 + ], + [ + +1, +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, + +1, +1, +1, +1, -1, -1, +1 + ], + [ + +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, + -1, +1, +1, +1, +1, -1, -1 + ], + [ + +1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, + -1, -1, +1, +1, +1, +1, -1 + ], + [ + +1, +1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, + +1, -1, -1, +1, +1, +1, +1 + ], + [ + +1, -1, +1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, + -1, +1, -1, -1, +1, +1, +1 + ], + [ + +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, +1, -1, -1, +1, +1 + ], + [ + +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, + -1, -1, -1, +1, -1, -1, +1 + ], + [ + +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, -1, -1, -1, +1, -1, -1 + ], + [ + +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, + -1, +1, -1, -1, -1, +1, -1 + ], + [ + +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, + -1, -1, +1, -1, -1, -1, +1 + ], + [ + +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, + +1, -1, -1, +1, -1, -1, -1 + ], + [ + +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, +1, + +1, +1, -1, -1, +1, -1, -1 + ]]) + + +def get_had36(): + return torch.FloatTensor([ + [+1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, +1, +1, +1, + +1, +1, +1, +1, +1, +1, +1, +1], + [+1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, +1, +1], + [+1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, +1], + [+1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, + -1, +1, +1, -1, -1, -1, +1, -1], + [+1, -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + -1, -1, +1, +1, -1, -1, -1, +1], + [+1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, -1, -1, +1, +1, -1, -1, -1], + [+1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, + +1, -1, -1, -1, +1, +1, -1, -1], + [+1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, -1, -1, +1, +1, -1], + [+1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, -1, -1, +1, +1], + [+1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, -1, -1, +1], + [+1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, + -1, +1, +1, -1, +1, -1, -1, -1], + [+1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, +1, + +1, -1, +1, +1, -1, +1, -1, -1], + [+1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, -1, +1, -1], + [+1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, +1, +1, -1, +1, +1, -1, +1], + [+1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, +1, +1, -1, +1, +1, -1], + [+1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, +1, +1, -1, +1, +1], + [+1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, +1, +1, -1, +1], + [+1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, +1, +1, -1], + [-1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1], + [+1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, + -1, +1, +1, +1, -1, +1, -1, -1], + [+1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, +1, + -1, -1, +1, +1, +1, -1, +1, -1], + [+1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, + +1, -1, -1, +1, +1, +1, -1, +1], + [+1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + +1, +1, -1, -1, +1, +1, +1, -1], + [+1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, +1, +1, -1, -1, +1, +1, +1], + [+1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, +1, +1, -1, -1, +1, +1], + [+1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, +1, +1, -1, -1, +1], + [+1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, +1, +1, -1, -1], + [+1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, +1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, +1, +1, -1], + [+1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, +1, +1], + [+1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, +1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, +1], + [+1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, +1], + [+1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, +1, +1, +1, -1, + +1, -1, -1, -1, -1, -1, +1, -1], + [+1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, +1, +1, +1, + -1, +1, -1, -1, -1, -1, -1, +1], + [+1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, +1, +1, + +1, -1, +1, -1, -1, -1, -1, -1], + [+1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, -1, -1, +1, -1, +1, +1, +1, -1, -1, +1, + +1, +1, -1, +1, -1, -1, -1, -1], + [+1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, -1, + +1, +1, +1, -1, +1, -1, -1, -1], + ]) + + +def get_had60(): + return torch.FloatTensor([ + [+1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, ], + [+1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, -1, +1, ], + [+1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, -1, ], + [+1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, ], + [+1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, -1, +1, +1, ], + [+1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, +1, ], + [+1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, ], + [+1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, + -1, +1, -1, +1, ], + [+1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, + -1, -1, +1, -1, ], + [+1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, + +1, -1, -1, +1, ], + [+1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, + -1, +1, -1, -1, ], + [+1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, + -1, -1, +1, -1, ], + [+1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, ], + [+1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + +1, +1, -1, -1, ], + [+1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + +1, +1, +1, -1, ], + [+1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, +1, +1, +1, ], + [+1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, +1, +1, ], + [+1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, +1, ], + [+1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, ], + [+1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, +1, +1, ], + [+1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, +1, ], + [+1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, + -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, ], + [+1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, + +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, ], + [+1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, + -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, ], + [+1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, + -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, + +1, +1, +1, -1, ], + [+1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, + -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + +1, +1, +1, +1, ], + [+1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, + +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, +1, +1, +1, ], + [+1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, + -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, +1, +1, ], + [+1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, +1, ], + [+1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, + +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, ], + [+1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, ], + [+1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, -1, -1, ], + [+1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, -1, ], + [+1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, ], + [+1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + -1, +1, +1, -1, ], + [+1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, -1, +1, +1, ], + [+1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, -1, +1, ], + [+1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, -1, ], + [+1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, + -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, ], + [+1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, + -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, ], + [+1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, + +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, + -1, -1, +1, -1, ], + [+1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, + -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, + -1, -1, -1, +1, ], + [+1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, + -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, + +1, -1, -1, -1, ], + [+1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, + +1, +1, -1, -1, ], + [+1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + -1, +1, +1, -1, ], + [+1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, -1, +1, +1, ], + [+1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, -1, +1, ], + [+1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, -1, ], + [+1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, ], + [+1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, + -1, +1, -1, +1, ], + [+1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, + +1, -1, +1, -1, ], + [+1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, + -1, +1, -1, +1, ], + [+1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, + -1, -1, +1, -1, ], + [+1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, + -1, -1, -1, +1, ], + [+1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, + +1, -1, -1, -1, ], + [+1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, + -1, +1, -1, -1, ], + [+1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + +1, -1, +1, -1, ], + [+1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, + +1, +1, -1, +1, ], + [+1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, +1, -1, ], + [+1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, +1, ], + ]) + + +def get_had52(): + return torch.FloatTensor([ + [+1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, +1, + -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, ], + [-1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, ], + [+1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, ], + [-1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, + +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, ], + [-1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, ], + [+1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, -1, -1, + +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, ], + [+1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, -1, + -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, ], + [+1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, + -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, ], + [+1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, +1, + +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, ], + [-1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, ], + [-1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, ], + [+1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, ], + [-1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, +1, -1, + +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, ], + [-1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, -1, +1, + +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, ], + [+1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, ], + [+1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, ], + [+1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, ], + [-1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, + +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, ], + [-1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, ], + [-1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, ], + [-1, -1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, ], + [-1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, ], + [-1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, ], + [+1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, ], + [+1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, ], + [+1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, ], + [-1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, ], + [-1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, ], + [+1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, ], + [-1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, ], + [+1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, ], + [+1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, + -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, ], + [-1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, ], + [-1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, +1, + +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, ], + [+1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, + +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, ], + [+1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, + +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, +1, ], + [-1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, + +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, ], + [+1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, ], + [-1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, ], + [-1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, + -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, ], + [+1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, ], + [+1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, ], + [+1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, ], + [+1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, ], + [-1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, ], + [+1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, ], + [+1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, ], + [-1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, +1, + +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, ], + [+1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, + +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, ], + [+1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, + +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, ], + [+1, +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, ], + [+1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, ], + ]) + + +def get_had108(): + return torch.FloatTensor([ + [+1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, ], + [+1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, + +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, ], + [+1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, + +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, ], + [+1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, + -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, ], + [+1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, + -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, ], + [+1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, + +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, ], + [+1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, + +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, ], + [+1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, + -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, ], + [+1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, ], + [+1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, ], + [+1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, ], + [+1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, ], + [+1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, ], + [+1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, ], + [+1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, ], + [+1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, ], + [+1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, ], + [+1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, ], + [+1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, ], + [+1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, ], + [+1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, ], + [+1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, ], + [+1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, ], + [+1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, ], + [+1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, ], + [+1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, + +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, ], + [+1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, + +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, + +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, ], + [+1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, + -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, ], + [+1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, + -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, ], + [+1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, + +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, ], + [+1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, + +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, ], + [+1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, + -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, ], + [+1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, + -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, ], + [+1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, + +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, ], + [+1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, + +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, ], + [+1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, + -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, ], + [+1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, ], + [+1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, ], + [+1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, ], + [+1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, ], + [+1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, ], + [+1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, ], + [+1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, ], + [+1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, ], + [+1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, ], + [+1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, ], + [+1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, ], + [+1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, ], + [+1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, + -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, ], + [+1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, + -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, ], + [+1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, + -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, ], + [+1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, + +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, ], + [+1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, ], + [+1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, ], + [+1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, + +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, ], + [+1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, ], + [+1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, ], + [+1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, ], + [+1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, ], + [+1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, ], + [+1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, ], + [+1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, ], + [+1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, ], + [+1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, ], + [+1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, ], + [+1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, ], + [+1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, ], + [+1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, ], + [+1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, ], + [+1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, ], + [+1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, ], + [+1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, ], + [+1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, ], + [+1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, ], + [+1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, ], + [+1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, ], + [+1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, + +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, + -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, ], + [+1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, + +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, + -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, ], + [+1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, + -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, + -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, ], + [+1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, + -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, + +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, ], + [+1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, + +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, ], + [+1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, + +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, ], + [+1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, + -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, + +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, ], + [+1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, + -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, ], + [+1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, + +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, ], + [+1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, + +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, ], + [+1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, + -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, ], + [+1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, ], + [+1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, ], + [+1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, ], + [+1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, ], + [+1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, ], + [+1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, ], + [+1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, ], + [+1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, ], + [+1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, ], + [+1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, ], + [+1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, ], + [+1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, ], + [+1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, ], + [+1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, ], + [+1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, +1, ], + [+1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, ], + [+1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, ], + [+1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, + +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, + -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, ], + [+1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, + +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, + -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, ], + [+1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, + -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, + -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, ], + [+1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, + -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, + +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, ], + ]) + + +def get_had140(): + return torch.FloatTensor([ + [+1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, ], + [+1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, ], + [+1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, ], + [+1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, ], + [+1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, ], + [+1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, + +1, ], + [+1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, + +1, ], + [+1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, + +1, ], + [+1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, + -1, ], + [+1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, + +1, ], + [+1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, + -1, ], + [+1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, ], + [+1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, ], + [+1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, ], + [+1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, + -1, ], + [+1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + -1, ], + [+1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, ], + [+1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, ], + [+1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, ], + [+1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, ], + [+1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, ], + [+1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, ], + [+1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, ], + [+1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, ], + [+1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, ], + [+1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, + +1, ], + [+1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + -1, ], + [+1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, ], + [+1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, + +1, ], + [+1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + +1, ], + [+1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, ], + [+1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, ], + [+1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, ], + [+1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, ], + [+1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + +1, ], + [+1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, ], + [+1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, ], + [+1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, ], + [+1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, ], + [+1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, + -1, ], + [+1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, + -1, ], + [+1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + +1, ], + [+1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, + -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, ], + [+1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, ], + [+1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, ], + [+1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, ], + [+1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, + +1, ], + [+1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + +1, ], + [+1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, ], + [+1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, + +1, ], + [+1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + -1, ], + [+1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, ], + [+1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, ], + [+1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, ], + [+1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + +1, ], + [+1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, ], + [+1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, ], + [+1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, ], + [+1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, ], + [+1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, ], + [+1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, ], + [+1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, ], + [+1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, ], + [+1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + +1, ], + [+1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, ], + [+1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, ], + [+1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, ], + [+1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, ], + [+1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, ], + [+1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, ], + [+1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, + -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, ], + [+1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, ], + [+1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, ], + [+1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, ], + [+1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, ], + [+1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, ], + [+1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, ], + [+1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + +1, ], + [+1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, ], + [+1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, ], + [+1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, ], + [+1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, ], + [+1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, + -1, ], + [+1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, + +1, ], + [+1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, + -1, ], + [+1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + -1, ], + [+1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, ], + [+1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, ], + [+1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, ], + [+1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, ], + [+1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, ], + [+1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, + +1, ], + [+1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + -1, ], + [+1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, ], + [+1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, ], + [+1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, ], + [+1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, ], + [+1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, ], + [+1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, + -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, + -1, ], + [+1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + +1, ], + [+1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, ], + [+1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, ], + [+1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, + -1, ], + [+1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + -1, ], + [+1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, ], + [+1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, ], + [+1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + +1, ], + [+1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, ], + [+1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, ], + [+1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, ], + [+1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, ], + [+1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, + -1, ], + [+1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, + +1, ], + [+1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, + +1, ], + [+1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, + -1, ], + [+1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, + -1, ], + [+1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, + +1, ], + [+1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + +1, ], + [+1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, ], + [+1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, + -1, ], + [+1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, ], + [+1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, + +1, ], + [+1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, + +1, ], + [+1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, + -1, ], + [+1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, + +1, ], + [+1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, + +1, ], + [+1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, + -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + -1, ], + [+1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, ], + [+1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, ], + [+1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, ], + [+1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, + -1, ], + [+1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, + +1, ], + [+1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, + -1, ], + [+1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, + -1, ], + [+1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, + -1, ], + [+1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + -1, ], + [+1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, ], + [+1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, ], + [+1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, ], + [+1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, ], + ]) + + +def get_had156(): + return torch.FloatTensor([ + [+1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, ], + [+1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, + +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, ], + [+1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, + -1, +1, +1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, + -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, ], + [-1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, + +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, ], + [-1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, +1, -1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, + +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, ], + [+1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, + -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, ], + [-1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, + +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, ], + [+1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, ], + [-1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, ], + [-1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, ], + [-1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, ], + [-1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, ], + [-1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, + -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, + +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, ], + [+1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, + -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, + -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, + -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, ], + [-1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, + +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, + +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, + -1, -1, +1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, + +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, ], + [-1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, + +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, -1, -1, + -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, + -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, ], + [+1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, + +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, +1, -1, +1, -1, + -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, + +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, ], + [+1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, + +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, + -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, ], + [-1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, + +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, ], + [-1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, + +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, ], + [-1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, ], + [-1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, ], + [+1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, ], + [+1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, + -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, ], + [-1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, + -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, ], + [-1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, + -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + -1, -1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, ], + [+1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, + -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, -1, -1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, ], + [-1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, +1, -1, +1, -1, -1, +1, + -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, ], + [-1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, + +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, ], + [-1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, ], + [-1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, ], + [-1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, ], + [+1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, ], + [-1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, ], + [+1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, ], + [-1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, ], + [-1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, ], + [+1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, ], + [+1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, ], + [-1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, + -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, ], + [-1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, + +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, ], + [-1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, + -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, + -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, ], + [-1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, + +1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, + -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, ], + [+1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, + -1, +1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, + -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, ], + [+1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, -1, + +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, ], + [+1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, + -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, ], + [-1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, + +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, ], + [+1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, ], + [+1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, + +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, ], + [-1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, -1, + -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, + -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, ], + [-1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, -1, +1, -1, +1, + -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + -1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, ], + [+1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, ], + [+1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, + +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, ], + [+1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, +1, +1, +1, -1, + +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, + -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, ], + [+1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, +1, +1, +1, + -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, + -1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, ], + [-1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + +1, -1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, ], + [+1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, + +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, ], + [-1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, + +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, ], + [+1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, ], + [+1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, ], + [-1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, -1, + -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, ], + [+1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, +1, -1, + -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, ], + [-1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, + -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, ], + [+1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, + +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, ], + [+1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, + -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, ], + [+1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, + -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, ], + [+1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, + -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, + +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, ], + [-1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, + -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, +1, +1, -1, -1, + -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, + +1, -1, +1, -1, +1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, ], + [-1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, + -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, ], + [+1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, + -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, ], + [+1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, + -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, ], + [-1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, ], + [+1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, + +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, ], + [+1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, + -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, ], + [+1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, + +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, ], + [-1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, ], + [-1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, ], + [-1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, + -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, ], + [-1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, + +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, ], + [-1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, ], + [-1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, ], + [+1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, ], + [+1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, ], + [-1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, ], + [-1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, ], + [+1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, + +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, + -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, ], + [-1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, + +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, ], + [+1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, + -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, ], + [+1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, + +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, ], + [+1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, + -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, ], + [-1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, ], + [+1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, ], + [-1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, ], + [+1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, ], + [+1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, + -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, ], + [-1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, + +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, ], + [+1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, + +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, ], + [+1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, ], + [+1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, + -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, ], + [+1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, + -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, ], + [-1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, + -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, ], + [+1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, + -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, ], + [+1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, + -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, ], + [-1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, + -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, ], + [+1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, + -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, ], + [-1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, ], + [+1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, ], + [+1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, ], + [+1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, ], + [-1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, -1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, ], + [+1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, +1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, ], + [-1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, +1, + +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, ], + [-1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, + +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, +1, ], + [+1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, + -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, ], + [+1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, + +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, ], + [-1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, -1, + +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, ], + [-1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, + -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, -1, -1, ], + [-1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, + +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, + -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, ], + [+1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, + +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, ], + [+1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, + +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, + -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, ], + [+1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, + +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, + +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, ], + [-1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, + -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, + +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, ], + [-1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, + -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, + -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, +1, +1, + +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, ], + [+1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, + -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, + +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, +1, + +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, ], + [-1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, + +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, + +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, +1, +1, + +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, ], + [+1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, + -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, +1, + +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, ], + [-1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, + -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, ], + [+1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, + +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, ], + [+1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, + -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, ], + [+1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, ], + [+1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, ], + [+1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, ], + [-1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, ], + [-1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, ], + [-1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, ], + [+1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, ], + [-1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, ], + [-1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, ], + [+1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, ], + [-1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, ], + [-1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, ], + [-1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, + -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, ], + [+1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, ], + [+1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, + +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, +1, + -1, -1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, ], + [+1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, + +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, + +1, -1, -1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, ], + [+1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, + -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, + -1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, ], + [+1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, + +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, ], + [-1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, + +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, ], + [+1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, + +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, ], + [-1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, + -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, ], + [+1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, + -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, + +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, +1, ], + [-1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, + +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, + +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, -1, ], + [-1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, + +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, + +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, -1, ], + [+1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, + +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, + +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, + +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, ], + [+1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, + -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, + +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, ], + [+1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, + +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, ], + ]) + + +def get_had172(): + return torch.FloatTensor([ + [+1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, -1, +1, ], + [-1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, + +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, -1, ], + [-1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, + +1, +1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, ], + [-1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, + +1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, ], + [+1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, + +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, + +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, ], + [+1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, + +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, +1, +1, ], + [-1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, + +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, +1, ], + [-1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, + -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, +1, -1, +1, ], + [+1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, + -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, +1, -1, ], + [+1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, + -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, +1, ], + [+1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, + +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, + +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, ], + [+1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, + -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, + +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, + +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, ], + [-1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, + +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, + -1, +1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, + +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, ], + [+1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, + +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, + +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, + +1, +1, -1, -1, ], + [-1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, + +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, + -1, +1, +1, -1, ], + [+1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, + -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, + -1, -1, +1, +1, ], + [+1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, + +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + -1, -1, -1, +1, ], + [+1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, + +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, + +1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, -1, -1, -1, ], + [-1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, + +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, -1, -1, ], + [+1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, + +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, -1, ], + [+1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, + +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, ], + [-1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, ], + [-1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, ], + [+1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, ], + [+1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, + -1, +1, +1, -1, ], + [-1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, + -1, -1, +1, +1, ], + [+1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, + +1, -1, -1, +1, ], + [+1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + -1, +1, -1, -1, ], + [+1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, -1, +1, -1, ], + [-1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, -1, +1, ], + [+1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, -1, ], + [-1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, ], + [+1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, + -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, + +1, +1, -1, +1, ], + [+1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, + -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, + +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, + +1, +1, +1, -1, ], + [+1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, + +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, + +1, +1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, + +1, +1, +1, +1, ], + [+1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, + +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, + -1, +1, +1, +1, ], + [-1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, + -1, -1, +1, +1, ], + [-1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, + -1, -1, -1, +1, ], + [+1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, + +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, + +1, -1, -1, -1, ], + [+1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, + -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, + +1, +1, -1, -1, ], + [-1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, + +1, +1, +1, -1, ], + [-1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + -1, +1, +1, +1, ], + [-1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, -1, +1, +1, ], + [-1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, +1, ], + [-1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, ], + [+1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, + -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, +1, -1, ], + [-1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, +1, ], + [-1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, + -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, ], + [-1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, + +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + -1, -1, +1, +1, ], + [-1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, + -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, -1, -1, +1, ], + [-1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, + -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, -1, -1, ], + [-1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, + +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, -1, ], + [+1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, + -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, ], + [+1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, -1, ], + [+1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, ], + [+1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, ], + [-1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, +1, +1, ], + [+1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, +1, ], + [-1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, ], + [+1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, ], + [+1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, + -1, -1, +1, -1, ], + [-1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, + -1, -1, -1, +1, ], + [-1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + -1, -1, -1, -1, ], + [+1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, -1, -1, -1, ], + [-1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, + -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, -1, -1, ], + [-1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, + -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, + +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, -1, ], + [+1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, + -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, ], + [-1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, + -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, + +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, ], + [-1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, + -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, + -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, ], + [+1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, + +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, + -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, ], + [+1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, + -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, + -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, + +1, -1, +1, +1, ], + [-1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, + -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, + +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, + -1, +1, -1, +1, ], + [+1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, + -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, +1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, + +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, + +1, -1, +1, -1, ], + [-1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, + +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, + +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, + -1, +1, -1, +1, ], + [+1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, + -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + -1, -1, +1, -1, ], + [+1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, + -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, -1, -1, +1, ], + [+1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, -1, ], + [+1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, -1, ], + [-1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, + +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, ], + [-1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, +1, + -1, +1, -1, +1, ], + [-1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + +1, -1, +1, -1, ], + [-1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, +1, -1, +1, ], + [-1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, ], + [-1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, +1, ], + [+1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, ], + [-1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, ], + [-1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, + +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, -1, +1, -1, ], + [-1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, -1, +1, ], + [-1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, -1, ], + [+1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, ], + [-1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, + -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, -1, -1, ], + [+1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, + -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, -1, ], + [-1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, + +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, + -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, ], + [-1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, + +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, + -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, ], + [+1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, + +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, + -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, ], + [+1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, + -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, + +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, ], + [-1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, + -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, + -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, +1, ], + [+1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, + -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, + -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, ], + [-1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, + +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, + -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, + +1, -1, +1, -1, ], + [+1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, + +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, + +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, + +1, +1, -1, +1, ], + [-1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, + +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, + -1, +1, +1, -1, ], + [-1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, + +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, + -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, + -1, -1, +1, +1, ], + [-1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, + -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, + -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, + +1, -1, -1, +1, ], + [-1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, + -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, + -1, +1, -1, -1, ], + [+1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + -1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, + -1, -1, +1, -1, ], + [-1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, + +1, -1, -1, +1, ], + [+1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, + -1, +1, -1, -1, ], + [+1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + -1, -1, +1, -1, ], + [+1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, -1, -1, +1, ], + [+1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, -1, ], + [-1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, -1, ], + [+1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, ], + [-1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, + +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, ], + [-1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, + -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, + -1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, ], + [-1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, + -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, + +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, + +1, +1, -1, +1, ], + [-1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, + +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, +1, + +1, +1, +1, -1, ], + [+1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, + +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, + -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, + +1, +1, +1, +1, ], + [-1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, + -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, + +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, + -1, +1, +1, +1, ], + [+1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, + -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, + -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, + -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, + -1, -1, +1, +1, ], + [-1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, -1, + -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, + +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, + -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, + -1, -1, -1, +1, ], + [+1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, -1, + -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, + +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, + +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, + -1, -1, -1, -1, ], + [+1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, + -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, + +1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, -1, + -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, -1, + -1, -1, -1, -1, ], + [-1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, -1, + -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, + +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, +1, + -1, -1, -1, -1, ], + [-1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, + -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, + -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, -1, + +1, -1, -1, -1, ], + [+1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, +1, + -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, + -1, -1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, -1, + -1, +1, -1, -1, ], + [-1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, -1, + -1, -1, +1, -1, ], + [+1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, +1, + -1, -1, -1, +1, ], + [-1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, -1, + -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, -1, + +1, -1, -1, -1, ], + [-1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, +1, +1, -1, +1, -1, -1, +1, + -1, -1, +1, +1, -1, -1, -1, -1, -1, -1, -1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, + +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, -1, + -1, +1, -1, -1, ], + [-1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, ], + [-1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, + +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, ], + [+1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, + -1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, ], + [+1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, + +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, + -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, ], + [+1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, + +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, ], + [-1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, ], + [-1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, ], + [-1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, ], + [-1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, ], + [+1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, +1, ], + [-1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, -1, +1, ], + [+1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, + +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, -1, ], + [+1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, + -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, ], + [-1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, + -1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, +1, -1, ], + [+1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, + -1, -1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, +1, ], + [+1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, ], + [-1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, -1, +1, ], + [-1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, + -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, -1, ], + [+1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, + -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, ], + [+1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, ], + [+1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, + -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, + -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, + +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, + +1, +1, -1, -1, ], + [+1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, + -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, + +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, + +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, +1, -1, ], + [+1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, + -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, + +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, + +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, +1, ], + [+1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, + -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, + +1, -1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, + +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, -1, +1, ], + [+1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, + +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, + -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, + -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, -1, ], + [+1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, + +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, + -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, + +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, + -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, ], + [-1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, + +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, + -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, + +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, ], + [-1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, + -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + -1, -1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, + +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, ], + [+1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, + -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, + +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, + -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, + +1, -1, +1, -1, ], + [+1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, + -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, + -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, + +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, + +1, +1, -1, +1, ], + [-1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, + +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, + +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, + +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, +1, + +1, +1, +1, -1, ], + [+1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, + +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, + +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, + -1, +1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, + +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, + +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, -1, + +1, +1, +1, +1, ], + [+1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, + +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, + +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, + +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, + -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, -1, + -1, +1, +1, +1, ], + [-1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, -1, + -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, +1, + +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, -1, + +1, +1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, +1, + +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, +1, + -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, +1, + -1, -1, +1, +1, ], + [+1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, -1, + -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, +1, + +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, -1, + -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, +1, + +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, +1, + +1, -1, -1, +1, ], + [-1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, -1, + -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, + +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, +1, + -1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, +1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, -1, + +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, -1, + +1, +1, -1, -1, ], + [-1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, + -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, -1, + +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, -1, + +1, -1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, +1, + -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, -1, + -1, +1, +1, -1, ], + [-1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, -1, + +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, +1, + -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, +1, + -1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, +1, + +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, -1, + -1, -1, +1, +1, ], + [-1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, +1, + -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, -1, + +1, -1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, +1, + -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, +1, + +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, +1, + -1, -1, -1, +1, ], + [+1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, +1, + +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, -1, + -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, -1, + -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, -1, + +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, -1, + +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, -1, + +1, -1, -1, -1, ], + [+1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, +1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, -1, + -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, -1, + -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, + -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, +1, + -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, -1, + -1, +1, -1, -1, ], + [+1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, -1, + -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, -1, + -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, -1, + +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, +1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, -1, + +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, -1, + -1, -1, +1, -1, ], + [-1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, +1, -1, +1, +1, -1, -1, +1, +1, +1, +1, +1, +1, +1, +1, -1, -1, +1, + +1, -1, +1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, -1, +1, + -1, -1, -1, -1, +1, -1, +1, +1, +1, +1, -1, +1, -1, -1, -1, -1, +1, -1, +1, -1, +1, +1, -1, -1, +1, -1, +1, -1, + -1, -1, +1, -1, +1, +1, +1, +1, +1, +1, -1, -1, -1, -1, +1, -1, +1, -1, -1, +1, +1, -1, +1, +1, -1, +1, +1, -1, + -1, +1, -1, +1, -1, -1, -1, -1, +1, +1, +1, +1, +1, +1, -1, +1, +1, -1, -1, -1, +1, +1, -1, -1, +1, +1, +1, +1, + -1, +1, -1, +1, +1, +1, -1, +1, +1, -1, -1, +1, +1, -1, +1, +1, +1, -1, +1, -1, +1, +1, +1, +1, -1, -1, +1, +1, + -1, -1, -1, +1, ], + ]) diff --git a/tools/rotate/model/__init__.py b/tools/rotate/model/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tools/rotate/model/qwen.py b/tools/rotate/model/qwen.py new file mode 100644 index 000000000..d98d5e34c --- /dev/null +++ b/tools/rotate/model/qwen.py @@ -0,0 +1,378 @@ +import torch +from torch import nn +from typing import Union +from transformers import Qwen2ForCausalLM +from ..common import RotateOperationRegistry +from ..common import AutoOperation + +from ..common import NormLinearIterator +try: + from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VisionTransformerPretrainedModel, Qwen2VLForConditionalGeneration + from transformers.models.qwen2_vl.modeling_qwen2_vl import PatchEmbed + from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLAttention + from transformers.models.qwen2_vl.modeling_qwen2_vl import VisionAttention, VisionFlashAttention2, VisionSdpaAttention + HAS_QWEN2_VL = True +except ImportError: + HAS_QWEN2_VL = False + class Qwen2VLForConditionalGeneration: + pass + class Qwen2VisionTransformerPretrainedModel: + pass + class PatchEmbed: + pass + class Qwen2VLAttention: + pass + class VisionAttention: + pass + class VisionFlashAttention2: + pass + class VisionSdpaAttention: + pass + + + +@NormLinearIterator.register_iterator +class Qwen2NormLinearIterator(NormLinearIterator): + def __init__(self, model: Qwen2ForCausalLM): + super().__init__() + self.model = model + + def __iter__(self): + for layer in self.model.model.layers: + yield layer, "input_layernorm", [ + layer.self_attn.q_proj, + layer.self_attn.k_proj, + layer.self_attn.v_proj, + ] + yield layer, "post_attention_layernorm", [ + layer.mlp.up_proj, + layer.mlp.gate_proj, + ] + yield self.model.model, "norm", [self.model.lm_head] + + @classmethod + def supports_model(cls, model: nn.Module) -> bool: + return isinstance(model, Qwen2ForCausalLM) or isinstance(model, Qwen2VLForConditionalGeneration) + + +@NormLinearIterator.register_iterator +class Qwen2ViTNormLinearIterator(NormLinearIterator): + def __init__(self, model: Qwen2VisionTransformerPretrainedModel): + super().__init__() + self.model = model + + def __iter__(self): + for layer in self.model.blocks: + yield layer, "norm1", [layer.attn.qkv] + yield layer, "norm2", [layer.mlp.fc1] + yield self.model.merger, "ln_q", [self.model.merger.mlp[0]] + + @classmethod + def supports_model(cls, model: nn.Module) -> bool: + return isinstance(model, Qwen2VisionTransformerPretrainedModel) + + +@AutoOperation.register_operation("rotate_output", PatchEmbed) +def op_rotate_patch_embed_output( + patch_embed: PatchEmbed, + R: torch.Tensor): + linear = patch_embed.proj + assert R.shape[0] == R.shape[1], "R should be a square matrix" + assert R.shape[0] == linear.weight.shape[0], "R should be same size as output dim of linear layer" + dtype = linear.weight.dtype + shape = linear.weight.shape + R_device = R.device + w_device = linear.weight.device + W_ = linear.weight.data.to(device=R_device, dtype=torch.float64).view(R.shape[0], -1) + # note that the W_ in linear is transpose of W + linear.weight.data = (R.T.to(torch.float64) @ W_).to(device=w_device, dtype=dtype).reshape(shape) + + +@AutoOperation.register_operation("center_output", PatchEmbed) +def op_center_patch_embed_output(patch_embed: PatchEmbed): + linear = patch_embed.proj + dtype = linear.weight.dtype + W_ = linear.weight.data.to(dtype=torch.float64).view(linear.weight.shape[0], -1) + # note that the W_ in linear is transpose of W + # center echo columns of W equivalent to centering the rows of W_ + W_mean = W_.mean(dim=0, keepdim=True) + W_centered = W_ - W_mean + linear.weight.data = W_centered.to(dtype=dtype).reshape(linear.weight.shape) + +from transformers.models.qwen2.modeling_qwen2 import Qwen2Attention + +@AutoOperation.register_operation("rotate_attn_v", Qwen2Attention) +@AutoOperation.register_operation("rotate_attn_v", Qwen2VLAttention) +def op_rotate_attn_v_for_LM( + attn: Union[Qwen2Attention, Qwen2VLAttention], + R_v: torch.Tensor): + """ + rotate the v (one of the inputs of attention) by a rotation matrix R_v + and rotate v back before W_o + """ + config = attn.config + num_qo_heads = config.num_attention_heads + num_kv_heads = config.num_key_value_heads + + # rotate v in attention + # i.e. rotate the output of W_v + # note that the output is something like [v_1, v_2, ..., v_{num_heads}] + # where v_i is a head_dim vector + # so we need to rotate each head + # results should be something like [v_1R_v, v_2R_v, ..., v_{num_heads}R_v] + # this is equal to [v_1, v_2, ..., v_{num_heads}] @ diag(R_v, R_v, ..., R_v) (num_heads times) + # so we need to rotate the output of W_v by diag(R_v, R_v, ..., R_v) + R_v_rot = torch.block_diag(*([R_v] * num_kv_heads)) + # rotate_linear_output([attn.v_proj], R_v_rot) + AutoOperation.rotate_output(attn.v_proj, R_v_rot) + + # then we need to rotate back the input of W_o + # since o_i is linear combination of v_i + # we can rotate the o_i by R_v^T to get back the original o_i + # rotate_linear_input([attn.o_proj], torch.block_diag(*([R_v] * num_qo_heads)).T) + AutoOperation.rotate_input(attn.o_proj, torch.block_diag(*([R_v] * num_qo_heads)).T) + + + + +@AutoOperation.register_operation("rotate_attn_v", VisionAttention) +@AutoOperation.register_operation("rotate_attn_v", VisionFlashAttention2) +@AutoOperation.register_operation("rotate_attn_v", VisionSdpaAttention) +def op_rotate_attn_v_for_ViT( + attn: Union[VisionAttention, VisionFlashAttention2, VisionSdpaAttention], + R_v: torch.Tensor): + """ + rotate the v (one of the inputs of attention) by a rotation matrix R_v + and rotate v back before W_o + """ + num_heads = attn.num_heads + dim = attn.proj.weight.shape[0] + head_dim = dim // num_heads + + + # shape of qkv.weight: [3 * dim, dim] + q_proj, k_proj, v_proj = attn.qkv.weight.view(3, dim, dim).unbind(0) # now shape of v_proj is [dim, dim] (out_dim, in_dim) + q_bias, k_bias, v_bias = attn.qkv.bias.view(3, dim).unbind(0) + + # v_proj: [dim, dim] can be view as [num_heads * head_dim, dim] + # view it as [num_heads, head_dim, dim] + dtype = v_proj.dtype + device = v_proj.device + R_device = R_v.device + v_proj = v_proj.view(num_heads, head_dim, dim).to(device=R_device, dtype=torch.float64) + v_proj = (R_v.T.unsqueeze(0).to(torch.float64) @ v_proj).to(device=device, dtype=dtype) + v_proj = v_proj.view(dim, dim) # change it back to the original shape + + # rotate v_bias + # v_bias: [dim] + # which can be view as [num_heads, head_dim] + v_bias = v_bias.view(num_heads, head_dim).to(dtype=torch.float64, device=R_device) + v_bias = (v_bias @ R_v.to(torch.float64)).to(dtype=dtype, device=device).view(-1) + + # stack to get the original qkv back + qkv = torch.stack([q_proj, k_proj, v_proj], dim=0) + qkv = qkv.view(3 * dim, dim) + qkv = qkv.to(device=device, dtype=dtype) + attn.qkv.weight.data = qkv + attn.qkv.bias.data = torch.cat([q_bias, k_bias, v_bias], dim=0) + + # rotate the output of W_o + AutoOperation.rotate_input(attn.proj, torch.block_diag(*([R_v] * num_heads)).T) + + + +def untie_word_embeddings(model): + if model.config.tie_word_embeddings: + # Spinquant is not compatiable with tie_word_embeddings, clone lm_head from embed_tokens + # this is because the weight of RMSNorm will be merge into lm_head + # and this weight will not be merged into the embeddings + # making the weights of lm_head and embed_tokens not the same + print("tie word embeddings, clone lm_head from embed_tokens") + model.config.tie_word_embeddings = False + + # create a new weight for lm_head + new_weight = torch.empty_like(model.model.embed_tokens.weight) + new_weight.copy_(model.model.embed_tokens.weight) + + # copy from model.model.embed_tokens.weight + model.lm_head.weight = nn.Parameter(new_weight) + new_weight = torch.empty_like(model.model.embed_tokens.weight) + new_weight.copy_(model.model.embed_tokens.weight) + + # assign the new weight to lm_head + model.lm_head.weight = nn.Parameter(new_weight) + + # ensure that the ptr of weight of lm_head is not the same as ptr of the weight of embed_tokens + assert model.model.embed_tokens.weight.data_ptr() != model.lm_head.weight.data_ptr() + + +@torch.inference_mode() +def rotate_model(model: Union[Qwen2ForCausalLM, Qwen2VLForConditionalGeneration], + R: torch.Tensor, + R_v: list[torch.Tensor] = None): + config = model.config + dim = config.hidden_size + num_heads = config.num_attention_heads + head_dim = dim // num_heads + num_layers = config.num_hidden_layers + + assert R.shape == (dim, dim), f"Rotation matrix shape {R.shape} does not match model dimension {dim}" + + if isinstance(R_v, torch.Tensor): + # R_v is a single rotation matrix + assert R_v.shape == (head_dim, head_dim), f"Rotation matrix shape {R_v.shape} does not match model dimension {dim}" + R_v = [R_v for _ in range(num_layers)] + + assert R_v is None or len(R_v) == num_layers, f"number of rotation matrix {len(R_v)} does not match number of layers {num_layers}" + assert all([R_v[i].shape == (head_dim, head_dim) for i in range(num_layers)]) if R_v is not None else True, f"Rotation matrix shape {R_v} does not match model dimension {dim}" + + # rotate embedding + AutoOperation.rotate_output(model.model.embed_tokens, R) + + if isinstance(model, Qwen2VLForConditionalGeneration): + # rotate the output of ViT + merger = model.visual.merger + AutoOperation.rotate_output(merger.mlp[2], R) + + + for l, layer in enumerate(model.model.layers): + attn = layer.self_attn + # reverse rotation for input of W_qkv + AutoOperation.rotate_input(attn.q_proj, R.T) + AutoOperation.rotate_input(attn.k_proj, R.T) + AutoOperation.rotate_input(attn.v_proj, R.T) + # rotate output of W_o + AutoOperation.rotate_output(attn.o_proj, R) + + if R_v is not None: + # rotate v in attention and rotate back before W_o + AutoOperation.rotate_attn_v(attn, R_v[l]) + + mlp = layer.mlp + # reverse rotation for input of W_up and W_gate + AutoOperation.rotate_input(mlp.up_proj, R.T) + AutoOperation.rotate_input(mlp.gate_proj, R.T) + # rotate output of W_down + AutoOperation.rotate_output(mlp.down_proj, R) + + # reverse rotation for input of W_lm + AutoOperation.rotate_input(model.lm_head, R.T) + + +def center_output_of_each_layer_for_qwen2_vit(model: Qwen2VisionTransformerPretrainedModel): + """ + Center the output of each layer for Qwen2 ViT. + """ + # extract the centering operation from LayerNorm to the previous layer + # center the output of the patch embedding + AutoOperation.center_output(model.patch_embed) + + for layer in model.blocks: + attn = layer.attn + # center the output of proj + AutoOperation.center_output(attn.proj) + + mlp = layer.mlp + # center the output of fc2 + AutoOperation.center_output(mlp.fc2) + + +@torch.inference_mode() +def rotate_qwen2_ViT(model: Qwen2VisionTransformerPretrainedModel, + R: torch.Tensor, + R_v: list[torch.Tensor] = None): + config = model.config + dim = config.embed_dim + num_heads = config.num_heads + head_dim = dim // num_heads + num_layers = config.depth + + assert R.shape == (dim, dim), f"Rotation matrix shape {R.shape} does not match model dimension {dim}" + + if isinstance(R_v, torch.Tensor): + # R_v is a single rotation matrix + assert R_v.shape == (head_dim, head_dim), f"Rotation matrix shape {R_v.shape} does not match model dimension {dim}" + R_v = [R_v for _ in range(num_layers)] + + assert R_v is None or len(R_v) == num_layers, f"number of rotation matrix {len(R_v)} does not match number of layers {num_layers}" + assert all([R_v[i].shape == (head_dim, head_dim) for i in range(num_layers)]) if R_v is not None else True, f"Rotation matrix shape {R_v} does not match model dimension {dim}" + + # rotate embedding + AutoOperation.rotate_output(model.patch_embed, R) + + for l, layer in enumerate(model.blocks): + attn = layer.attn + # reverse rotation for input of W_qkv + AutoOperation.rotate_input(attn.qkv, R.T) + # rotate output of W_o + AutoOperation.rotate_output(attn.proj, R) + + if R_v is not None: + # rotate v in attention and rotate back before W_o + AutoOperation.rotate_attn_v(attn, R_v[l]) + + mlp = layer.mlp + # reverse rotation for input of W_up and W_gate + AutoOperation.rotate_input(mlp.fc1, R.T) + # rotate output of W_down + AutoOperation.rotate_output(mlp.fc2, R) + + AutoOperation.rotate_input(model.merger.mlp[0], R.T) + + +@RotateOperationRegistry.register(Qwen2ForCausalLM) +@RotateOperationRegistry.register(Qwen2VLForConditionalGeneration) +def apply_untie_word_embeddings(model: Union[Qwen2ForCausalLM, Qwen2VLForConditionalGeneration], *args, **kwargs): + """ + Untie the word embeddings of the model. + """ + print("Untie word embeddings") + untie_word_embeddings(model) + +from ..rotation_utils import fuse_layer_norms + +@RotateOperationRegistry.register(Qwen2ForCausalLM) +@RotateOperationRegistry.register(Qwen2VLForConditionalGeneration) +def apply_fuse_layer_norms(model: Union[Qwen2ForCausalLM, Qwen2VLForConditionalGeneration], *args, **kwargs): + """ + Fuse the layer norms of the model. + """ + print("Fuse layer norms") + fuse_layer_norms(model) + + +@RotateOperationRegistry.register(Qwen2VisionTransformerPretrainedModel) +def apply_fuse_layer_norms_vit(model: Qwen2VisionTransformerPretrainedModel, *args, **kwargs): + """ + Fuse the layer norms of the model. + """ + print("Fuse layer norms for ViT of Qwen2") + fuse_layer_norms(model, replace_ln=True) + +@RotateOperationRegistry.register(Qwen2VisionTransformerPretrainedModel) +def apply_center_output_of_each_layer_for_qwen2_vit(model: Qwen2VisionTransformerPretrainedModel, *args, **kwargs): + """ + Center the output of each layer for Qwen2 ViT. + """ + print("Center output of each layer for Qwen2 ViT") + center_output_of_each_layer_for_qwen2_vit(model) + + +@RotateOperationRegistry.register(Qwen2VisionTransformerPretrainedModel) +def apply_rotate_qwen2_ViT(model: Qwen2VisionTransformerPretrainedModel, *args, **kwargs): + """ + Rotate the model. + """ + print("Rotate ViT model") + rotate_qwen2_ViT(model, *args, **kwargs) + + +@RotateOperationRegistry.register(Qwen2ForCausalLM) +@RotateOperationRegistry.register(Qwen2VLForConditionalGeneration) +def apply_rotate_model(model: Union[Qwen2ForCausalLM, Qwen2VLForConditionalGeneration], *args, **kwargs): + """ + Rotate the model. + """ + print("Rotate model") + rotate_model(model, *args, **kwargs) + diff --git a/tools/rotate/rotation_utils.py b/tools/rotate/rotation_utils.py new file mode 100644 index 000000000..7b37b8753 --- /dev/null +++ b/tools/rotate/rotation_utils.py @@ -0,0 +1,129 @@ +from typing import Iterable +import torch +from torch import nn + +from rotate.common import NormLinearIterator +from .hadamard_utils import random_hadamard_matrix + +def random_orthogonal_matrix(size, device): + """ + Generate a random orthogonal matrix of the specified size. + First, we generate a random matrix with entries from a standard distribution. + Then, we use QR decomposition to obtain an orthogonal matrix. + Finally, we multiply by a diagonal matrix with diag r to adjust the signs. + + Args: + size (int): The size of the matrix (size x size). + + Returns: + torch.Tensor: An orthogonal matrix of the specified size. + """ + torch.cuda.empty_cache() + random_matrix = torch.randn(size, size, dtype=torch.float64).to(device) + q, r = torch.linalg.qr(random_matrix) + q *= torch.sign(torch.diag(r)).unsqueeze(0) + return q + +def get_orthogonal_matrix(size, mode, device="cpu"): + if mode == 'random': + return random_orthogonal_matrix(size, device) + elif mode == 'hadamard': + return random_hadamard_matrix(size, device) + else: + raise ValueError(f'Unknown mode {mode}') + + +class RMSNorm(nn.Module): + def __init__(self, eps=1e-6): + """ + Qwen2RMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return hidden_states.to(input_dtype) + + def extra_repr(self): + return f"eps={self.variance_epsilon}" + + +def fuse_ln_linear(layernorm: torch.nn.Module, linear_layers: Iterable[torch.nn.Linear]) -> None: + """ + fuse the linear operations in Layernorm into the adjacent linear blocks. + """ + norm_weight = layernorm.weight.data.double() + norm_bias = layernorm.bias.data.double() if hasattr(layernorm, 'bias') else None + norm_dim = norm_weight.shape[0] + for linear in linear_layers: + linear_dtype = linear.weight.dtype + linear_device = linear.weight.device + in_dim = linear.in_features + + # this is for ViT merger + # merger takes in merge_size * merge_size patches + # while norm takes in only one patch + repeat_times = in_dim // norm_dim + + if in_dim % norm_dim != 0: + raise ValueError(f"Linear layer {linear} has in_features {in_dim} not divisible by LayerNorm {layernorm} with weight {norm_weight.shape[0]}") + + # Calculating new weight and bias + W_ = linear.weight.data.double() + linear.weight.data = (W_ * (norm_weight.to(linear_device).repeat(repeat_times))).to(linear_dtype) + + if hasattr(layernorm, 'bias'): + if linear.bias is None: + linear.bias = torch.nn.Parameter(torch.zeros(linear.out_features, dtype=torch.float64)) + linear.bias.data = linear.bias.data.double() + torch.matmul(W_, norm_bias.to(linear_device).repeat(repeat_times)) + linear.bias.data = linear.bias.data.to(linear_dtype) + + +def fuse_layer_norms(model: nn.Module, replace_ln: bool = False, verbose=False) -> None: + it = NormLinearIterator.from_model(model) + + for father, norm_name, linears in it: + # fuse the linear operations in Layernorm into the adjacent linear blocks. + norm = getattr(father, norm_name) + if verbose: + print(f"Fusing {norm_name}") + print(f" {norm}") + print(f" {linears}") + + fuse_ln_linear(norm, linears) + if not replace_ln: # keep the original layernorm/RMSNorm + W_norm = norm.weight.data + norm.weight.data = torch.ones_like(W_norm) + if hasattr(norm, 'bias'): + b_norm = norm.bias.data + norm.bias.data = torch.zeros_like(b_norm) + else: + # eps = 1e-6 + # if hasattr(norm, 'variance_epsilon'): + # eps = norm.variance_epsilon + # if hasattr(norm, 'eps'): + # eps = norm.eps + # # replace the layernorm with RMSNorm + # new_norm = RMSNorm(eps=eps) + # setattr(father, norm_name, new_norm) + + from torch.nn import RMSNorm + + # in this case, we replace the layernorm with RMSNorm implemented by torch + # torch's RMSNorm has weight + # in some cases, we need to save the weight though it is always 1.0 + # our implementation of RMSNorm does not have weight + # so here we use torch's RMSNorm + # and set the weight to 1.0 + eps = getattr(norm, 'eps', getattr(norm, 'variance_epsilon', 1e-6)) + normalized_shape = norm.normalized_shape if hasattr(norm, 'normalized_shape') else norm.weight.shape + + device = norm.weight.device + dtype = norm.weight.dtype + new_norm = RMSNorm(normalized_shape=normalized_shape, eps=eps).to(device=device, dtype=dtype) + new_norm.weight.data.fill_(1.0) + setattr(father, norm_name, new_norm) diff --git a/vocab/ling_merges.txt b/vocab/ling_merges.txt new file mode 100644 index 000000000..8ab6484a2 --- /dev/null +++ b/vocab/ling_merges.txt @@ -0,0 +1,125824 @@ +Ġ Ġ +Ġ t +i n +Ġ a +h e +r e +o n +ĠĠ ĠĠ +e r +a t +ä ¸ +Ġ s +Ġt he +o r +ï ¼ +e n +Ġ c +e s +Ġ w +i t +i s +o u +a n +ï¼ Į +a l +Ġ f +Ġ p +in g +Ġ o +Ġ b +e d +a r +ç ļ +Ġa n +çļ Ħ +ã Ģ +Ġ m +i on +ä º +l e +Ġ in +Ġt o +i c +Ġ d +Ġo f +Ġan d +a s +â Ģ +r o +ĠĠ Ġ +e t +Ġ h +Ġt h +c t +en t +ãĢ Ĥ +ä » +s t +å ı +æ ľ +e l +ĠĠĠĠ ĠĠĠĠ +o m +i l +Ġ n +Ġ re +Ġ l +è ¿ +å ¤ +å ħ +Ġ e +i d +æ ĺ +v e +a m +Ġ I +Ġ T +Ġ g +Ġ S +o t +å ® +å Ī +i m +ä ½ +Ġ y +Ġ is +o l +å IJ +c e +å ľ +i g +s e +Ġ C +u t +ä ¹ +ä¸ Ģ +Ġf or +o w +a d +Ġ A +at ion +a y +æ Ī +u r +l y +c h +Ġy ou +Ġ ( +æĺ ¯ +Ġ u +Ġ on +Ġb e +ç Ķ +å ° +v er +i f +Ġs t +Ġ = +è ¯ +u l +ç » +it h +Ġth at +å Ĭ +äº Ĩ +ä¸ į +o d +a g +æ Ĺ +Ġ M +æ Ŀ +i r +Ġ P +ãĢ ģ +t er +Ġw ith +æ ĸ +at e +ĠĠĠĠ ĠĠĠ +å Ľ +æ ī +r a +- - +æľ ī +å Ń +åľ ¨ +Ġc on +Ġ it +ä ¼ +å į +e m +è ® +a b +Ġa s +Ġp ro +u n +r i +Ġ B +c k +he r +u m +p e +Ġa l +es t +äº º +å ¹ +Ġ v +âĢ Ļ +âĢ ľ +es s +ou t +er s +å ¾ +q u +é ĩ +p p +å ĩ +n d +is t +Ġ D +u s +Ġw h +Ġw e +å ¥ +å · +il l +Ġ F +e w +t h +å ¼ +Ġ W +âĢ Ŀ +k e +Ġa re +re s +å Ĩ +ou r +Ġ or +Ġ R +è Ģ +ĉ ĉ +an d +Ġc om +ĥ ½ +å ¸ +Ġ H +o re +n t +Ġ he +or t +Ġe x +å º +ĠT he +o p +h t +Ġ L +m ent +Ġd e +è¿ Ļ +o s +æĪ ij +Ġ " +é Ģ +å ¯ +* * +it y +Ġa t +è ¡ +ç Ľ +p t +en d +è µ +l d +ä¸ ª +e ct +. . +/ / +Ġ N +Ġh a +ro m +an t +Ġ { +Ġ r +ä¸ Ń +Ġ * +Ġth is +l o +a in +æ ķ +Ġ E +æ Ģ +è § +Ġ G +å¤ § +æ Ĭ +å Ĵ +æ ł +ar t +Ġs u +ä¸ º +ç İ +i v +å ¿ +å Į +ä¸ Ĭ +ç ľ +i ve +å İ +i es +Ġs e +Ġn ot +Ġb y +æ Ń +Ġw as +é Ĺ +ä» ¥ +æĿ ¥ +in e +ig ht +e x +è ĩ +Ġ - +åĴ Į +u st +ç § +Ġ O +ĠĠĠĠĠĠĠĠ ĠĠĠ +ag e +ä» ĸ +è ´ +Ġf rom +am e +Ġs h +¦ ģ +é Ŀ +u d +u e +Ġc h +æ ĥ +ct ion +æ ² +al l +Ġ $ +æĹ ¶ +Ġc an +Ġha ve +u re +Ġyou r +åĪ ° +æ ° +o o +è ¦ģ +ç ī +i al +i z +p er +å ½ +o c +å ± +é Ļ +æ İ +æ Ķ +åı ¯ +f f +o st +p l +å ij +æ ŀ +u b +å° ± +Ġ le +re d +a c +ä¼ ļ +ar d +æ ³ +ç º +g e +çĶ ¨ +è ĥ½ +el l +ou ld +as s +åĩ º +ab le +âĢĻ s +é ģ +a ck +-- -- +om e +in d +r ou +è ½ +çĶ Ł + ł +Ġu s +id e +in t +ä ¿ +c l +Ġin t +Ġ j +ä¹ Ł +ç Ŀ +æ ĭ +ä» ¬ +ä¸ ĭ +Ġa b +Ġ if +or m +å¯ ¹ +Ġw ill +ic e +ï¼ ļ +ul t +as t +Ġ J +å¹ ´ +æ µ +è ¾ +Ġ k +æ ¯ +a p +å ¦ +Ġd o +Ġp l +a k +ur n +Ġ } +æ Ħ +Ġw or +a re +i p +ç ¬ +å ĵ +ä½ ł +Ġ U +ĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠ +Ġal l +as e +im p +åľ ° +è¿ ĩ +at ed +åIJ İ +b er +åı ij +ç IJ +ç Ń +im e +èĩ ª +Ġa d +å ī +o g +åŃ IJ +ion s +Ġt r +i le +é ¢ +äº İ +Ġn e +ç « +T he +on e +oo k +Ġw he +Ġc l +é ĺ +al ly +è¡ Į +a ct +è¯ ´ +Ġ âĢľ +Ġ ' +æ Į +an s +çĿ Ģ +ic h +c c +t her +Ġm e +c on +' s +Ġcom p +åĽ ½ +it e +e p +åŃ ¦ +æĪ IJ +Ġcon t +Ġre s +å¤ ļ +l ic +ä ¾ +ar y +ä½ ľ +e f +ä¹ ĭ +Ġ en +æī Ģ +æĸ ¹ +a il +ĠI n +å® ¶ +i b +ig n +an ce +Ġh as +e re +e ar +on g +é Ĥ +Ġg o +éģ ĵ +a ce +Ġb ut +å¥ ½ +Ġ out +ç ® +ĠS t +a v +a ke +æ ¬ +( ) +Ġs o +å¾ Ĺ +é ĥ½ +ĠT h +ex t +åĬ ¨ +è Ĥ +r u +ç ½ +g et +r ing +å° ı +ç Ħ +åĪ Ĩ +ç Ĥ +ä¸ ļ +ou s +éĿ ¢ +å Ł +r y +Ġu p +d u +f orm +s o +t urn +æ ¸ +ä¹ Ī +çİ ° +ation s +i re +g h += = +åħ ¬ +æ ± +at a +or d +èĢ Į +ç Ł +Ġm ore +res s +an g +Ġ + +Ġn ew +è · +i a +æľ ¬ +å¿ ĥ +è¿ ĺ +çľ ĭ +å¼ Ģ +a ch +ver y +Ġthe y +å ģ +å Ģ +Ġ \ +æ ´ +Ġ < +ĠĠĠĠ Ġ +el f +éĤ £ +ou nt +Ġthe ir +å® ŀ +çĦ ¶ +S t +çIJ Ĩ +å¦ Ĥ +å® ļ +Ġ V +Ġon e +Ġm y +ent s +l l +æ Ľ +ic al +ç» ı +è¿ Ľ +Ġwh ich +it ion +å¤ © +at h +en ce +å· ¥ +en s +Ġ K +Ġ qu +ï¼ Ł +er v +é ĥ +æ² ¡ +Ġ [ +Ġu n +y pe +Ġg et +j ect +é ķ +ç¬ ¬ +Ġ Y +Ġa r +c om +v el +re e +éĩ Į +é « +i e +I n +Ġ âĢ +od e +åħ ¶ +Ġ & +é« ĺ +é ¡ +è ° +Ġp er +- > +Ġl i +äº ĭ +æ³ ķ +ï¼ ģ +Ġd is +Ġm an +åĬ Ľ +ä½ ĵ +ä¸ İ +** ** +Ġab out +o ve +or y +Ġ our +åī į +æ ı +Ġa pp +ow n +k s +Ġe v +Ġs p +é Ľ +æ į +a u +Ġo ther +p h +il d +on t +å Ŀ +åIJ Į +ä¸Ģ 个 +çŃ ī +æ Ļ +åİ » +éĹ ´ +é £ +çĤ ¹ +ay s +or s +re at +æľ º +Ġan y +Ġt ime +el y +Ġh is +Ġwh o +åIJ Ī +Ġ imp +ou nd +ç ¨ +i ew +é ļ +æĥ ħ +èµ · +ro w +Ġal so +è ī +er t +Ġp art +Ġs c +éĥ ¨ +ar k +å § +è ¶ +ä¸ » +ĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠ +iz e +åħ ¨ +å¾ Ī +ç Ļ +ub lic +f t +Ġof f +Ġwe re +æĸ ° +) . +t e +ing s +æ ģ +è Ĭ +æĹ ¥ +æĥ ³ +e c +ic k +Ġp re +Ġ // +in k +) , +Ġre turn +Ġa g +ç§ į +Ġthe re +äº § +æī ĭ +Ġwhe n +e g +æĢ § +åĨ ħ +: : +ĠC h +ç ¾ +---- ---- +èĢ ħ +o b +å¥ ¹ +å £ +Ġwor k +ĠI t +è º +éķ ¿ +æľ Ģ +éĢ ļ +cl ud +åħ ³ +çĶ µ +Ġs ome +Ġk n +åĮ ĸ +æĹ ł +l ow +ver s +æĦ ı +äº Ľ +å½ ĵ +å° Ĩ += " +Âł Âł +è ¢ +åĬ ł +Ġt e +ç ¤ +åº ¦ +al ue +Ġthe m +ç © +Ġf un +é Ķ +ç Ī +Ġs a +åº Ķ +æ ¶ +Ġo ver +æ Ł +æł · +if f +åı¯ 以 +o v +æĸ ĩ +æĺ İ +o se +é ľ +Ġbe en +et h +o ol +éĩ į +ir st +æķ ° +c es +ä½ Ĩ +å· ± +ri b +Ġw ould +ä¸ ī +pe c +Ġy ear +èº « +ï¼ ī +ol d +Ġa m +è ģ +ç ¥ +è ĭ +Ġh ad +Ġli ke +åı ª +Ġa cc +ï¼ Ī +è Ħ +Ġ ro +æ° ´ +çĽ ¸ +Ġn o +_ t +ul l +Ġth an +ï¼ Ľ +è® ¾ +å¸ Ĥ +w e +u al +å » +èĩª å·± +ter n +le ct +Ġne ed +ä½ į +" , +Ġwh at +åĽ ł +å¤ ĸ +if ic +pt ion +åħ ¥ +æľ Ī +å ķ +_ _ +nd er +y st +åĵ ģ +æİ ¥ +æĪij 们 +y s +at es +Ġp e +( " +Ġu se +ç ĥ +r ic +Ġcom m +ç ķ +éĩ ı +ĠTh is +æŀ ľ +ä» İ +èµ Ħ +at er +é ħ +is h +è IJ +çŁ ¥ +Ġin d +E R +Ġad d +æı IJ +am p +åĪ ¶ +# # +Ġde f +Ġ her +Ġd es +ç ³ +Ġit s +Ġe m +Ġh ow +åı Ĭ +äº Į +Ġf e +a x +ĠW e +æ¯ Ķ +c he +âĢĻ t +å ¢ +r am +pe ct +æĽ ´ +en er +éĩ ij +s et +Ġm ay +æŃ ¤ +Ġs et +o int +Ġint o +å¹ ¶ +w o +åı ¸ +çī © +on s +Ġj ust +åľ º +r it +Ġ i +ç ģ +ç ł +没 æľī +b s +l i +å¤ ´ +é Ĵ +è ĥ +ig h +ment s +n e +æ¬ ¡ +er m +p s +è¢ « +å® ī +ç ¼ +f ter +Ġc ol +yst em +Ġb et +ç® ¡ +p ort +.. . +ä¿ ¡ +c ess +p ut +æ · +ç ± +æŃ £ +ç ´ +s s +åĬ ¡ +æľ Ł +ä¸ ¤ +I N +è¡ ¨ +t y +çĽ ® +Ġre c +æ° Ķ +Ġpro v +ad e +è Ī +ä¿ Ŀ +n ing +ç ĸ +Ġ / +Ġ : +op le +is s +is e +å· ² +c re +i ous +Ġcon s +è £ +an ge +i an +on d +au se +å ĸ +Ġon ly +Ġ @ +æĪ ĸ +T h +l es +åIJ į +æ ¡ +Ġu nder +å¸ ¸ +Ġbe c +ãĢĤ âĢĿ +è® © +Ġas s +ç ¡ +è® ¡ +ä½ ¿ +åĽ ŀ +Ġd iff +at ing +e v +ri v +ar am +a w +Ġp r +ç ² +i x +O N +Ġf irst +ĠâĢ ĵ +Ġa ct +Ġ | +Ġb u +i el +u p +éĹ ® +f ul +' t +Ġhe l +å¹ ³ +ç¨ ĭ +ä» Ģ +t ing +åį ģ +o id +ç³ » +åĮ º +rou nd +åħ¬ åı¸ +ä» ¶ +ç Ĺ +å» º +Ġkn ow +æĦ Ł +Ġt ra +ä»Ģ ä¹Ī +n g +çī ¹ +Ġcl ass +Ġp os +Ġ . +åĪ © +er r +Ġm ake +Ġre g +Ġpe ople +rou gh +è½ ¦ +åIJ ij +b le +Ġ # +Ġthe se +åģ ļ +< / +ãĢ ĭ +al s +èĢ ģ +ãĢ Ĭ +æ » +ç» Ļ +é © +il ity +Ġp ublic +Ġm od +âĢ ¦ +âĢ Ķ +è į +c ed +é ĵ +Ġ im +è§ £ +p r +p ro +it ies +Ġc o +Ġs he +ï¼ļ âĢľ +ç¾ İ +åı £ +æ Ĥ +ĠH e +l ine +s h +å¼ ı +pp ort +Ġin clud +ĉĉ ĉĉ +i ent +o od +åij ĺ +æį ® +h at +è¯ Ŀ +Ġp h +Ġ ke +æ´ » +é ª +Ġt wo +Ġn um +iel d +Ġthe n +çľ Ł +ç» ĵ +ib le +ic s +== == +Ġb ack +b ject +i o +åŁ º +u g +å± ķ +or k +m er +é ¦ +Ġm ost +ra ct +Ġre qu +ot h +Ġsh ould +å¤ Ħ +it s +ul ar +Ġat t +ic t +Ġcon st +å ŀ +Ġpro du +é » +Ġse e +ç ¦ +' , +o k +at ive +èī ² +Ġl ook +Ġs elf +è¿ ° +Ġst ud +æķ Ļ +æī ĵ +ou gh +Ġth rough +ru ct +Ġd ata +Ġf in +ĠC om +w n +ar g +Ġw ant +Ġin v +s elf +ï¼Į è¿Ļ +as ed +R E +ä» £ +Ġv ar +Ġw ell +i ed +ol og +é£ İ +at ch +Ġ* / +u res +åħ ĥ +è Ļ +Ġhel p +ic es +é¢ ĺ +Ġre m +Ġe very +å Ļ +è· ¯ +ar ch +Ġin st +at ic +çĽ ´ +Ġwhe re +f ore +Ġ end +re ad +u es +ĠP ro +d ay +éĹ ¨ +åĪ « +å° ij +æĬ Ĭ +le t +Ġfun ction +ĠY ou +Ġre l +Ġc ould +he d +è§ ģ +S T +å¥ ³ +åı ĺ +Ġs erv +Ġs pec +å Ķ +ol low +ers on +d ef +æĶ ¶ +Ġ el +e ad +æľ ¯ +åİ Ł +A T +il y +d er +ä¸ ľ +form ation +åı Ī +¥ ¿ +he n +Ġsa id +ä»ĸ 们 +åį ķ +ur re +Ġtr ans +**** **** +c y +ä» » +Ġsu ch +Ġ ent +è İ +Ġdiff ere +ĠU n +Ġus ed +o ck +å°± æĺ¯ +rib ut +äº ¤ +Ġex per +çĶ ± +æµ ģ +æ° ij +Ġe ff +ç½ ij +) ) +æĬ Ģ +E x +id ent +èĩ ³ +u ch +åĨ į +éľ Ģ +Ġa fter +if e +as h +è ¥¿ +Ġre ad +m s +Ġin ter +_ d +ow er +æī į +Ġs m +s p +Ġ very +æľ į +o x +çĻ ½ +è£ ħ +en g +çº ¿ +at or +æĶ ¾ +å¼ º +åĻ ¨ +åħ ī +in ess +Ġr ight +çľ ¼ +n ce +Ġw ay +ĉĉ ĉ +Ġst art +Ġres ult +Ġf ind +Ġd et +Ġa c +Ġman y +it t +an y +r ay +( ' +c ri +ï¼Į ä¸į +Ġev en +ç ĭ +p os +ï¼Į æĪij +Ġgo od +a ve +ã ģ +w ard +æ ® +n ame +.. .. +P ro +è´ ¨ +ation al +éĢ ī +a ir +å Ħ +e e +åIJ Ħ +ag es +Ġbec ause +å® ¹ +ç½ ® +Ġl ong +n er +A R +ç¥ ŀ +ak ing +Ġ x +ä¼ ł +å® Į +al th +å£ ° +t o +i ence +ar s +æĮ ģ +ĠI f +_ s +åı Ĺ +åħ Ī +{ \ +Ġdo es +æµ · +vel op +ĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠ +a pp +å¸ Ī +Ġc all +Ġn ow +åĩ ł +å¸ ¦ +æ± Ĥ +t t +è¯ ¥ +Ġd id +å·¥ ä½ľ +Ġm in +d d +åħ · +lo ck +ç« ĭ +æ º +Ġex t +å¤ ĩ +åŀ ĭ +Ġ ra +ur ing +Ġpl ay +Ġf ollow +è½ ¬ +Ġp res +` ` +f ig +åĦ ¿ +E N +our ce +Ġp ol +Ġc he +ä¹ ¦ +æł ĩ +ut h +è¿Ļ 个 +it ed +an k +l ed +æł ¼ +en se +æĢ » +å½ ¢ +ä¸ ĩ +rou p +lo g +Ġyear s +ot her +ï¼Į ä½Ĩ +Ġl oc +ç» Ħ +è¿Ľ è¡Į +O R +Ġd on +åĽ Ľ +é Ł +al e +ç§ ij +c o +Ġh igh +in s +Ġsh ow +è¿ ŀ +ç» Ł +Ġg reat +Ġ= = +t he +us iness +t s +æĬ ¥ +åĢ Ļ +æĮ ĩ +Ġt est +èµ ° +æ¸ ħ +Ġg u +å± ± +æ £ +le ment +Ġfor m +åĮ ħ +éĿ ŀ +è Į +el s +ç± » +Ġc re +ire ct +Ġd own +åĽ ¾ +ect ion +th is +. com +Ġ _ +ä» · +æĪ ĺ +v iew +g ram +Ġo wn +å¤ ª +è¿ IJ +ç¤ º +e ver +v ent +é¡ ¹ +Ġs ystem +Ġh im +m a +Ġhe re +Ġde c +urre nt +æģ ¯ +æ¯ ı +p en +Ġus ing +we en +ol ut +ä½ ķ +æ ¨ +I t +å½ ± +I T +Ġv alue +Ġm uch +Ġe ach +W e +Ġex p +å¯ ¼ +æ ¼ +Ġm on +C ont +iz ed +ç© º +t r +eth od +å ² +A L +imp ort +in es +³³ Âł +ä¸ ĸ +at ure +Ġst r +Ġimp ort +Ġre f +re turn +_ p +æĶ ¿ +Ġc our +åķ Ĩ +è® ¤ +Ġin formation +Ġth ink +è ± +_ f +I D +am es +è§ ī +Ġb el +con d +_ m +æĹ¶ éĹ´ +çİ ĭ +Ġs ign +åŁ İ +Ġsu b +ĠA nd +ç¡ ® +ä¸ ĵ +æ ij +Ġn ame +è° ĥ +Ġf l +èĢ ĥ +in ed +Ġd ist +. get +ä¸į æĺ¯ +_ c +æŃ ¥ +è¾ ¹ +Ġp erson +ce pt +å ĥ +ç ħ +o f +Ġdiffere nt +-------- -------- +æĸ Ļ +è§ Ħ +æķ ´ +ĠA n +S E +Ġch ar +ĠF or +å¿ « +Ġb est +è® ° +Ġbet ween +Ġl ist +Ġs ur +o y +Ġbe fore +Ġag ain +an n +éĻ ¢ +y m +æīĢ è¿° +è¯ ģ +g ht +ä¾ ¿ +èĬ Ĥ +Ġh and +g an +ark et +p on +Ġc or +f o +ĠE x +u ally +re f +ç® Ĺ +ad d +èĤ ¡ +Ġa v +äºĨ ä¸Ģ +æ ¢ +Ġc ase +Ġbe ing +ç Ĭ +t en +æĪ · +Ġg ener +im es +çº § +ä¸Ń åĽ½ +em ent +å ¨ +å·² ç»ı +y n +i red +Ġnum ber +in ce +er y +æĹ¶ åĢĻ +åı ĸ +æķ Ī +y p +ar n +æł ¹ +N ame +ä¸ Ķ +ch n +Ġg r +Ġt ake +C om +Ġth ose +ĠA r +L E +as on +å® ĥ +C h +ï¼Į åľ¨ +A N +æ³ ¨ +t on +Ġd ay +ĠA l +Ġ ! +Ġwh ile +å¼ ł +ä» Ĭ +Ġv al +an c +éĽ Ĩ +em ber +çŁ¥ éģĵ +- b +e k +ĠS h +Ġp oint +åį Ĺ +Ġpro cess +Ġm ed +T ype +ust om +. m +Ġ Q +od y +Ġin s +åĩ Ĩ +ĠN ew +å¦Ĥ æŀľ +Ġs er +Ġb l +id er +Ġde velop +. S +re n +éĺ Ł +e b +ç¬ ij +. s +Ġf act +Ġm ade +éĢ ł +Ġs ame +åĨ Ľ +èģ Ķ +Ġst ate +amp le +ï¼Į ä»ĸ +è ħ +çļĦ 人 +ate g +æĿ ¡ +é ¥ +Ġs om +al ity +n ess +iv es +åı į +Ġsu pport +C on +h ip +èĬ ± +åį ´ +åŃ ĺ +åĥ ı +ou se +e qu +第 ä¸Ģ +åı ° +al k +Ġp ost +ç ª +è§ Ĩ +ang u +w ay +çĪ ± +éļ ¾ +æĺ ĵ +è¾ ¾ +æ ħ +Ġv is +r ight +ction s +St ring +é© ¬ +if y +ra ph +æŀ Ħ +åĮ » +Ġc ar +åĽł 为 +ä½ ı +ç¦ » +. " +æĶ ¯ +_ { +Ġf il +æĿ ĥ +Ġf ound +çݰ åľ¨ +è´ ¹ +çIJ ĥ +äº Ķ +Ġc ount +ĠĠĠĠĠĠĠĠ Ġ +æĬ ķ +ç² ¾ +Ġ ed +ç¤ ¾ +t ain +Th is +v al +Ġp at +i et +I d +è¾ ĥ +lo ad +Ġar t +æİ § +å¤ į +Ġwor ld +éĻ IJ +i er +e red +at her +w are +ant s +åı · +åĢ ¼ +æĸ ½ +a z +Ġel se +å§ ĭ +Ġ > +ch ool +Ġb oth +æ ļ +Ġl ast +è¿ ij +Ġb usiness +_ C +Ġ ï¼Į +æĪ ¿ +çķ Į +ï¼Į ä¸Ģ +ï¼Į èĢĮ +Ġf am +re am +le ase +åĬ Ł +Ġ= > +åĨ µ +Ġprodu ct +a h +ĠA s +çĥ Ń +ro l +Ġl ife +al se +æĢ Ŀ +_ b +æĸ ¯ +ä¾ Ľ +éĹ® é¢ĺ +l and +æ¨ ¡ +a ult +Ġprov id +åĪ Ļ +v en +Ġa p +ion al +åŃ Ĺ +æ· ± +ä¾ ĭ +åı Ĥ +åĪ Ľ +æ² » +Ġre p +Ġ err +ak es +èIJ ¥ +Ġle t +- s +åIJ ¬ +éľĢ è¦ģ +åĮ Ĺ +å¢ ŀ +ter s +åĬ ŀ +Ġr un +çĹ ħ +Ġh ome +æĿ ¿ +. p +Ġs k +éĢļ è¿ĩ +æ¶ Ī +çİ ¯ +. c +æĢ İ +åĨ ³ +è¯ · +Ġb ook +riv ate +ï¼Į ä½ł +ä¼ ģ +angu age +m in +Ġc le +åij ¨ +Ġm ust +Ġt ype +çī ĩ +åij Ĭ +æ Ĵ +a red +è¶ Ĭ +as k +e y +i vers +ou n +Ġdes ign +æº IJ +um ent +Ġde p +. d +éĺ ² +ot e +Ġeff ect +æ¸ ¸ +R es +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +ĠĠĠĠ ĠĠ +Ġch ild +olog y +è¯ Ń +çļĦ ä¸Ģ +Ġapp ro +cc ess +Ġle ad +ç« ł +u c +åį ³ +çł Ķ +Ġpro gram +ere st +å® ¢ +é¢ Ĩ +æĶ ¹ +ar m +he s +Ġin f +l ess +Ġre ally +ï¼Į ä¹Ł +使 ç͍ +Ġpart ic +: // +è® º +ĠH ow +s on +Ġe qu +st r +w ays +Ġp ur +ä¼ ĺ +ĠB ut +Ġpos s +å¹ ² +å¸ ĥ +ent ial +éħ į +in al +ess age +er n +å ĭ +T o +è¿ĺ æĺ¯ +è§ Ĥ +et s +ist s +è¯ Ĩ +Ġwith out +åı¯ èĥ½ +Ġto p +管 çIJĨ +æ¡ Ī +åıij å±ķ +it al +ä¹ ł +è ¨ +å¹ ¿ +Ġtr ue +if ied +Ġm ain +æİ ¨ +Ġo b +Ġb re +Ġm at +Ġst ill +æŀ Ĺ +Ġcomm un +æĬĢ æľ¯ +ä½ İ +I C +Ġd el +æŁ ¥ +æľ ª +è® ® +Ġc are +E S +Ġv oid +éĢ Ł +åİ ĭ +ĠD e +f or +iv en +A C +ĠC l +æł ¡ +" > +Ġdis c +iz ation +æ ¥ +è¯ ķ +Ġa round +Ġre al +è® ¸ +Ġ z +us s +as es +Ġqu est +ĠC on +u ff +æ» ¡ +ro ss +åIJ § +ĠT r +é ¾ +è¨ Ģ +ri end +æīĢ ä»¥ +éļ ı +Ġpro f +å¿ ħ +} } +Ġe as +éŁ ³ +åij ½ +c ent +- t +åΰ äºĨ +Ġtr y +g g +A r +op y +p aram +Ġf ree +st and +Ġl o +Ġgo ing +Ġo pt +Ġpro per +in ing +lo y +ä¼ģ ä¸ļ +åŃ © +å° Ķ +v ing +è½ » +ĠP l +Ġ$ \ +ç ı +åį İ +er g +å¤ Ł +ä¹ IJ +ce ption +ĠS e +ç» Ń +] . +o ot +æĺ ¾ +eng th +åħ ± +m ber +çĻ ¾ +æľ Ľ +- d +Ġm arket +Ġm em +Ġw rit +Ġs ay +Ġp ass +Ġcon f +çİ ĩ +ç« Ļ +Ġin cre +// // +r id +æľį åĬ¡ +å¾ ® +Ġ ) +è¿Ļ æł· +æ IJ +. , +æŃ » +ĠW h +is ion +æĬ ¤ +åĽ ¢ +Ġt ri +b ers +r ite +Ġto o +os s +m an +éĻ ¤ +æĢ ģ +are nt +_ S +Ġte chn +Ġle vel +Ġad v +L ist +ĠS o +Ġor der +he re +ĠThe y +o ver +Ġpl ace +i qu +Ġall ow +æŀ ģ +un d +Ġhe alth +èĤ ² +ail able +Ġpro ble +Ġc urrent +m l +å¼ ķ +åIJ ĥ +i k +çº ¢ +Ġo per +Ġsm all +æĸ Ń +Ġcont in +å¾ · +## ## +ĠThe re +Ġse cond +å ĺ +Ġpro t +en cy +_ P +æĢİ ä¹Ī +le ction +i ents +Ġper form +ver t +Ġth ree +" : +Ġmon th +æµ ĭ +] , +is hed +s c +Ġ % +å£ « +è· Ł +ind ow +Ġha pp +Ġte am +ç ¢ +åĬ © +is m +èµ Ľ +us h +å¤ ± +Ġt erm +w ork +Ġl anguage +Ġf ile +v ed +==== ==== +ab ility +人 çļĦ + · +ur al +Ġd uring +Ġl ot +Ġo pen +çģ « +y le +Ġis s +itt le +le x +s w +ar ly +ou ght +èĩªå·± çļĦ +Ġbu ild +åı ĭ +ut e +Ġl ove +Ġ Z +- f +ro ll +å ¡ +Ġ X +æĭ ī +è ı +ar get +åħ ĭ +Ġl ine +u le +ä¸Ń çļĦ +åĵ į +bs ite +um m +æĥħ åĨµ +t ext +Ġc ent +Ġm ethod +Ġn ext +b e +ä¹ ī +å¾ Ģ +éª Į +çħ § +æ® µ +n ot +em s +Ġ , +cri pt +ä¿ ® +m p +Ġf ew +m ath +D ata +re m +è¶ ³ +æ¸ © +th ing +Ġc ustom +def ine +é£ Ł +Ġre port +ĠR es +iv id +Ġ est +Ġan al +ï¼Į å¹¶ +æ ¹ +A S +Ġth ings +åº ķ +[ ' +W hat +Ġre st +Ġm et +w w +eth ing +Ġre ce +第 äºĮ +uth or +A n +å± Ģ +æĺ¯ ä¸Ģ +è¿ ľ +Ġhe ad +_ l +âĢĻ re +a j +le te +Ġd irect +èµ· æĿ¥ +æī ¾ +ç Į +Ġp ay +it ions +r ation +r al +D E +r r +p ar +ç§ ° +pon se +ï¼Į éĤ£ +å¢ ĥ +Ġint erest +at t +ra w +èİ · +o ard +éĢ Ĥ +Ġst ring +Ġpro ject +ä» ½ +n ect +ĠR e +è± ¡ +at us +Ġke ep +A M +Ġ et +Ġpl an +ĠT o +re g +ab el +Ġbet ter +Ġst and +æİ Ĵ +é¦ ĸ +Ġfe el +iv ing +d s +r on +äº ² +mer ic +st ring +. h +æĿ İ +ĠI nd +èIJ ½ +cl ass +åĪ ĩ +A s +" ) +çľ ģ +å± ŀ +l ing +ç© ¶ +Ġ ide +er ing +a ut +Ġm ight +s er +åĸ ľ +éĺ ³ +pl ay +Ġal ways +çĬ ¶ +ot t +iss ion +ï¼Į å°± +çĶ · +ert ain +ä¼ Ĺ +G et +ĠI s +- m +Ġn ull +ç» Ĩ +i or +Ġs it +. C +åķ Ĭ +ic ally +b y +Ġche ck +R O +æĿ IJ +L L +Ġwe ek +éĥ½ æĺ¯ +Ġf r +åij ¢ +Ġs l +Ġex pl +n ew +éĩ ĩ +åIJ Ĺ +v es +å· ŀ +Ġimport ant +ĠO n +æ± Ł +éĻ ħ +. t +b ack +æ¬ ¾ +Y ou +èĩ ´ +Ġc r +com m +' ] +S h +ic le +ç» ´ +ra c +ç± ³ +å± Ĥ +lo b +æ ĵ +Ġex pect +äº ij +_ T +åĬ ¿ +Ġ â +Ġs ize +çļĦ æĹ¶åĢĻ +Ġke y +Ġs im +产 åĵģ +è¶ ħ +Ġs oc +åħ » +å¼Ģ å§ĭ +çº ¦ +Ġinclud ing +. f +Ġexper ience +ç» Ī +Ġn on +ç« ¯ +ç´ ł +I f +Ġc ook +I S +Ġl ittle +â̦ â̦ +} $ +Ġp ut +Ġfollow ing +é¢ Ħ +Ġsom ething +Ġav ailable +I ON +( $ +çŁ ³ +Ġan other +Ġ// / +åį ĩ +Ġp ower +Ġpres ent +ĠS p +Ġb r +Ġs ince +c al +ĠC ol +Ġcon d +ĠA ll +ç³» 绣 +at ely +ç ¿ +Ġserv ice +Ġor gan +vers ion +Ġg row +iv ely +ç® Ģ +i um +o h +åı Į +Ġa ut +Ġapp lic +Ġf ield +å © +éĿ Ĵ +or n +f ace +at s +, âĢĿ +Ġar g +æĺ Ł +åħ¶ ä»ĸ +r ist +èĦ ¸ +et er +ar ge +F or +d ate +Ġp rivate +- c +ï¼Į 以 +æ¬ ¢ +Ġl aw +oc i +çĶŁ æ´» +ä¿¡ æģ¯ +olut ion +it ive +Ġa ff +Ġprov ide +Ġw ater +åİ Ĩ +Ġo bject +Ġwith in +è ŀ +re en +ï¼Ł âĢĿ +Ġd i +Ġsu re +ĠS he +. g +a f +one y +ĠP h +******** ******** +E D +v ious +å Ĥ +Ġsu pp +u ck +å ł +Ġ -- +g er +Ġo ld +_ M +re qu +ï¼Į âĢľ +C T +_ n +Ġinv est +æķ° æį® +Ġon line +Ġde v +ĠW hat +ir on +æĹ © +åĩ » +Ġg roup +F F +è¿Ļ äºĽ +è¡ Ģ +ere nce +Ġ ve +# define +av a +Ġcom e +Ġl ess +ç§ ¯ +U T +Ġ! = +ã Ĥ +or ies +Ġc ost +Ġm ult +Ġv iew +ĠS c +åį Ĭ +Ġman ag +a it +Ġne ver +Ġadd ition +Ġwe bsite +é¾ Ļ +ĠA d +/ * +ail s +Ġs imp +du ct +æĿ ij +æķ ħ +åĪ ļ +ht t +å® Ŀ +æĮ ī +æ£ Ģ +Ġt em +um p +ĠA meric +Ġin c +in ation +çļĦ æĺ¯ +é» ij +) ; +Ġem p +V iew +Ġf riend +Ġcomp any +Ġg ot +Ġb ig +è´ £ +é Ń +iv ity +éĻ © +con st +Ġs pe +^ { +Ġacc ess +m ed +Ġg ive +ain s +re t +åŃ© åŃIJ +it le +èģ Į +æĻ ¯ +C l +èĻ ½ +ro p +v ar +Ġc ell +Ġfam ily +å¾Ī å¤ļ +un ction +Ġch ange +å¸Ĥ åľº +èĭ ± +od el +ä¸Ĭ çļĦ +è§ Ĵ +Ġmod el +Ġm ov +u ild +åĪ Ĺ +ĠâĢ ĺ +ä» ħ +clud e +Ġunder stand +大 çļĦ +éĶ Ļ +V alue +T r +Ġg iven +our s +Ġ Ð +a pe +Ġap pe +ra p +C ol +èĪ ¬ +l er +ivers ity +Ġch ang +ä¾ Ŀ +âĢĶ âĢĶ +A P +ent ion +ï¼ģ âĢĿ +Ġst ruct +( f +g le +o pe +v alue +Ġs chool +åĽ ´ +Ġfin al +ke y +åĿ Ĺ +w it +erv ice +Ġres p +un e +Ġh ard +i en +- p +u se +a im +ĠU S +Ġab le +Ġre se +æĭ © +ial ly +ï¼Į æľī +Ġf ull +Ġs ing +æıIJ ä¾Ľ +( s +好 çļĦ +a pt +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠ +ac y +åĪ Ĵ +Ġst atic +iv ed +Ġl im +ĠA pp +id th +ic y +, " +æĭ ¬ +ur s +an ag +as ter +( t +Ġg ame +Ġas k +éĶ Ģ +主 è¦ģ +c ul +ä¹Ł æĺ¯ +ater ial +åIJĮ æĹ¶ +Ġp op +æ² ¹ +in ter +è ij +æł¹ æį® +E T +éĢ ģ +å§ Ķ +ä¸į èĥ½ +an e +end ing +ress ion +éĿŀ 常 +éħ Ĵ +æ¼ Ķ +Ġloc al +ent ly +if t +Ġcre ate +ĠT e +p or +Ġin tern +ĠS u +åĩº æĿ¥ +A D +Ġle g +Ġd ays +è´ Ł +Ġd em +Ġw om +å¸ ® +ç Ĩ +èį ¯ +it or +大 å®¶ +Ġf alse +ä»ĸ çļĦ +æī ¿ +Ġus er +ï¼Į åıª +éĴ ± ++ + +ç´ § +Ġl og +çĽ Ĭ +èĮ ĥ +Ġt urn +é£ ŀ +op t +u f +ä¸į ä¼ļ +Ġh um +å® ¤ +åĨ · +è¿ĺ æľī +Ġbe h +B y +ess ion +Ġg l +U n +ĠCom m +åĪ Ŀ +åĨ Ļ +ä¸ĸ çķĮ +Ġagain st +ï¼Į æĺ¯ +é» Ħ +Ġev ent +Ġb as +åŁ Ł +ä¹ĭ åIJİ +æµ İ +Ġ  +Ġimp ro +Ġ ` +ic ense +ir d +it her +æĸ¹ æ³ķ +is ter +èĥ½ å¤Ł +Ġen g +res ent +ot s +Ġ ident +Ġl ight +ft ware +é ¸ +éĢī æĭ© +åĮħ æĭ¬ +Ġex ample +`` ` +rr or +act er +Ġ& & +Ġstud y +ab ly +cri ption +ac he +Ġre t +à © +Ġb ased +Ġc ap +a ction +Ġb en +è¾ ĵ +ĠB e +ä¹ ° +åĵ ª +in a +åı ¤ +çķ Ļ +Ġcont rol +con t +Ġle ft +ri es +项 缮 +Ġposs ible +Ġspec ific +l ish +çĸ Ĺ +Ġn et +oc us +äº ¬ +. A +ie ve +lo se +ide o +对 äºİ +è¦ģ æ±Ĥ +ord ing +Ġs ol +çłĶ ç©¶ +Ġp ar +is ed +å° ½ +Ġt yp +at ures +åħ Ń +Ġs ite +che s +ä¸į åIJĮ +Ġacc ount +åĪ » +ï¼Į åħ¶ +ou ble +ä¼ ¤ +v iron +Ġresult s +Ġcour se +åĽ½ å®¶ +H E +Ġme an +æĬķ èµĦ +ĠL et +d ata +Ġ es +è¯ Ħ +çª ģ +é¢ Ŀ +æīĢ æľī +å¯ Ĩ +ä¼ ¼ +ĠâĢ Ķ +} \ +ĠO r +äº ļ +b r +ĠN o +ĠC ont +ric t +} { +c rib +Ġserv ices +å± ħ +æĸ¹ å¼ı +_ F +Ġu nt +åį ¡ +Ġc al +pl ace +Ġgo ver +åº Ĺ +_ id +å® ĺ +åĿ ĩ +Ġsu ccess +ivid ual +Ġj ob +Ġp ract +ãĢ Ģ +æį ¢ +Ġp a +Ġare a +v oid +out h +éĩį è¦ģ +åħ ħ +çŃ Ķ +id d +çī Į +ç½ Ĺ +our ces +åº ľ +æ´» åĬ¨ +A l +æĤ ¨ +åħ ´ +âĢĻ ve +ar ing +end ed +å· ® +lob al +ur y +Ġ Î +Ġpro m +è¯ » +t a +an ces +Ġt ext +iqu e +le g +åĨ ľ +ir c +Ġt reat +ç» Ŀ +设 计 +Ġin it +Ġwork ing +æİ§ åζ +社 ä¼ļ +Ġcont ent +Ġm us +ãĢĤ åľ¨ +U R +m e +r ad +ĠSt ring +ç¾ ¤ +o ber +I nt +pl es +Ġre view +um n +å®ī åħ¨ +åı ² +ure d +åį ĥ +Ġf ac +éĥ¨ åĪĨ +ĠB l +u ly +åĽ º +ã ĥ +ç¼ ĸ +ific ation +read y +çĽ ij +ill s +. l +Ġb ro +b ook +f rac +ĠM ay +o le +c ode +å½± åĵį +çļ ® +E n +åľ Ł +in c +Ġle arn +O T +Ġp ot +or g +ul ation +Ġrequ ire +I M +两 个 +çī Ī +ĠTh at +w ord +ard s +ra in +æĸ¹ éĿ¢ +Ġbre ak +Ġh ist +ill ion +å¾ ħ +S et +èī º +ï¼Į 她 +oc ument +I nd +in it +æŃ ¦ +Ġemp loy +Ġex c +he ad +Ġm aking +æŃ ¢ +O bject +. b +Ġe lect +am b +Ġt imes +ag s +ur ity +åĩ ı +åħ « +åIJ ¦ +ä¸Ģ äºĽ +g in +u ro +åĽ Ń +H ow +Ġo cc +Ġcons ider +ä¹ İ +æĪij çļĦ +Ġwh y +ore d +è¿Ļ ç§į +èĭ ¥ +Ġinclud e +O n +ap s +is ing +um e +åħ į +I m +为 äºĨ +Ð ° +é¦ Ļ +æĪ ı +åѦ ä¹ł +id ence +çİ © +åij ³ +IN G +i ke +Ġm oney +å¤ « +Ġse c +ur ther +Ġan sw +I G +en e +åѦ çĶŁ +Ġth ought +Ġb it +i ver +éĻ į +åı ¦ +æĻ º +Ġs ays +Ġli k +åİ ¿ +Ġk ind +iet y +_ st +âĢĻ m +å® ³ +èϽ çĦ¶ +o ff +ĠW hen +æ¯ į +id s +ä» ¤ +æŁ IJ +åı « +æķĻ èĤ² +ne y +Ġf our +ä¸ ¥ +ip s +al es +Ġre du +Ġspec ial +Ġchild ren +' m +ä½ľ 为 +Ġind ividual +ĠW ith +çĶ » +Ġl ow +Ð ¾ +_ D +å¿ µ +Ġc ode +Ġf lo +W h +il es +Ġl arge +o ice +åıij çݰ +and s +é ² +æĭ ¿ +Ġf ood +. n +n ment +Ġrequ est +Ġto t +Ġa way +Ġrec ord +æ² ³ +A B +åĨħ 容 +Ġme ans +çģ µ +ag n +Ġst ep +Ġb ody +Ġproble m +è´ Ń +f er +Ġpro b +ut ure +åģ ¥ +Ġth ing +H e +viron ment +is k +ain t +__ __ +Ġd one +åĩº çݰ +Ġsh ort +() . +ut il +Ġph ot +ct or +çĦ¶ åIJİ +æĿ Ģ +pp ed +Ġp aram +ĠThe se +å¼ Ĥ +( self +pp ing +éĻ Ī +lo c +Ġc reat +Ġquest ion +ign ed +ir t +ĠCh rist +Ġstud ents +è´ ¢ +Ġcall ed +L O +Ġha ving +or th +ys is +ç»ı æµİ +Ġme as +äº ī +Ġal ong +ĠR ep +ĠHow ever +Ġp ast +Ġp ath +åº · +åı ¶ +Ġadd ress +aj or +Ġc ertain +ĠS ystem +pl ic +çł ´ +ect ed +er c +' re +in clude +ä¸Ģ å®ļ +å¤Ħ çIJĨ +ff ic +Ġr ange +Ġme et +i od +ä½ Ļ +Ġlook ing +ĠE n +Ġro om +Ġh ig +ĠM ar +è¿Ļ ä¹Ī +ï¼Į æĪij们 +æĪĸ èĢħ +it er +çĭ ¬ +le y +S S +ç¦ ı +è¿Ļ ä¸Ģ +Ġor ig +æ ¿ +ic ro +Ġerr or +y l +è Ĵ +âĢ ĵ +Ġm ove +A G +.... .... +( m +éĵ ¶ +O M +èŀ į +Ġal ready +E rror +åħ ļ +âĢĻ ll +å¸ Į +Ġse ver +R e +Ġt ell +ä» ĭ +B ut +Ġcomm on +æł ¸ +( p +åį ı +ab les +ight s +ĠC an +Ġof ten +å¥ ĩ +é Ĩ +Ġab ove +S e +Ġrese arch +Ġl a +Ġunt il +ï¼Į åı¯ +A nd +ä¸ ĥ +å¿ Ĺ +ĠE ng +[ i +g round +Ġto day +con om +æĻ ® +è® ² +um ber +æĹ ı +人 åijĺ +m b +éĺ ¿ +che d +Ġdet erm +a i +Ġan n +Ġsa f +K ey +ar ning +èĭ ı +Ġto get +Ġpr int +Ġtoget her +a ff +åĨ ² +Ġ/ * +æľī éĻIJ +ç ĵ +ĠL e +ĠA t +Ġsu per +è Ľ +Ġpartic ular +æĻ ļ +U S +ear ch +c er +Ġf ore +Ġvis it +W ith +Ġperson al +ay er +d e +end s +P l +pr int +è¯ ¾ +Ġp age +è½ ® +èĥ½ åĬĽ +缸 åħ³ +Ġle ast +çļĦ è¯Ŀ +Ġs ide +Ġneed s +åIJ ¸ +ac es +ĠP r +Ġbu y +. j +ä¸ ¾ +å¾ ĭ +Ġre ason +åºĶ 该 +äºĭ æĥħ +ç« Ł +ribut e +è§ī å¾Ĺ +P E +åı ¥ +çľĭ åΰ +al f +ĠA ss +åıij çĶŁ +ĠG od +Ġ id +t ype +re c +l ished +Ġm aterial +Ġan t +Ð µ +Ġp ack +åº ı +åħ µ +id es +Ġwhe ther +s ide +åIJ « +ĠM y +æ ¦ +æĭ ħ +æĿ¥ çļĦ +åį « +F ile +è¡ £ +Ġh old +çŃ ĸ +ar i +l u +os ed +umm ary +ç ŀ +å¦ Ī +åĿ IJ +å¯ Į +c ol +Ġsoc ial +以 åıĬ +s el +en n +Ġsp ace +æľ ¨ +le v +Ġind ust +çĶŁ 产 +Ġben ef +Ġproduct s +j oy +ç» § +Ġp ort +ä¸Ģ æł· +ib ility +Ġd ri +è¿Ļ éĩĮ +æ ¤ +g s +å· ´ +Ġsign ific +åģ ľ +个 人 +ä¸į è¿ĩ +æĵ į +ä¸ĵ ä¸ļ +ĠC ent +å½ ķ +un c +Ġf ocus +ç» ĩ +Ġvar i +åĶ ® +Ġsever al +oh n +orm al +T ext +Ġm en +ur l +in o +. D +ĠD o +ĠM ed +Ġen ough +le an +Ex ception +åĢ Ĵ +æ¿ Ģ +Ġen c +ord er +çĽ ĺ +ver age +Ġf ar +设 å¤ĩ +Ġh ouse +ur ation +æĢ ķ +Ġm ot +. set +Ġtr ad +Ġother s +éĿ Ļ +ar r +Ġd r +æľ Ŀ +oo gle +) $ +ent al +an ks +. M +I s +Ġt arget +Ġm ax +w h +Ġe ither +_ B +缴 æİ¥ +Ġm akes +Ġrequ ired +æĽ ¾ +os p +é º +ä½ł çļĦ +a us +ä¹ Ŀ +ç £ +og raph +çĪ ¶ +) : +res h +ib r +æ¯Ķ è¾ĥ +ĠP ol +åº § +çł ģ +æŀ IJ +Ġ ? +L et +or ld +Ġp ub +Ġinv ol +ä» ĺ +é¡ » +r c +re l +m m +( \ +Ġsing le +Ġact ually +ĠC o +lect ed +Ġ version +Ġcom b +Ġp o +è½ ½ +Ġdevelop ment +ro om +ut es +( d +pp er +å®ŀ æĸ½ +il ar +èĥ Į +U N +é ¼ +st ruct +Ġa uthor +Ġv ol +em pt +äº Ĵ +ä¸ Ŀ +å¾ ģ +Ġ ess +ove mber +en c +ĠCom p +> < +Ġrel ations +Ġ< < +Ġst ory +ĠUn iversity +Ġbec ome +æī § +EN T +è Ĩ +è¿ĩ ç¨ĭ +ä¹ĭ éĹ´ +ä¹ ħ +Ġwor ks +per ty +åŃ £ +ï¼Įä½Ĩ æĺ¯ +eg in +re ct +ĠI nt +ç§ » +. _ +Ġqu ality +éĵ ģ +Ġen erg +I P +. âĢĿ +çݯ å¢ĥ +æ³ ¢ +éĤ£ ä¹Ī +çı Ń +Ġint eg +Ġd ue +ĠI N +Ġoff er +å¤ ľ +åĵ ¥ +Ġdid n +Ġcommun ity +Ġf ail +Ġeas y +è¿ŀ æİ¥ +ä½Ĩ æĺ¯ +åĿ ļ +çľĭ çĿĢ +Ġ: = +ri or +f ect +Ġcon nect +æĪIJ 为 +_ N +im ate +le ft +---------------- ---------------- +大 åѦ +æĸĩ åĮĸ +è´ § +A dd +Ġb ring +sp an +Ġ ... +åħ· æľī +Ġt alk +Ġcl aim +T est +ward s +ĠC he +Ġm ajor +Ġcont ain +æ ½ +( - +Ġto ol +Ġw in +ĠQ u +I L +[ ] +oo se +çŁ Ń +æĦ ¿ +ä¹Ł ä¸į +Ġa ir +e ed +è¡ ¥ +Ġb lock +åĸ Ħ +\ n +Ġo bs +èĦ ij +Ġcour t +Ġ< / +æĺ ¥ +Ġdoes n +.. / +# include +un ity +b b +} , +C H +m it +_ in +on es +å¥ Ĺ +n ow +Ġm ar +Ġdo ing +æ¥ ¼ +Ġe ver +Ġm om +ĠY our +N ew +Ġdist ribut +Ġstr ong +çļ ĩ +y t +Ġpos ition +ens ion +( x +Ġam ount +ĠF r +- l +åħ ° +ä¸Ģ èµ· +ĠC al +建 设 +ip le +å· ¦ +è Ń +Ġt re +Ġmonth s +f ile +u ation +ç© ¿ +Ġm ind +d iv +p le +Ġac ross +cl us +ul es +T ime +t d +O L +Ġem ail +ç͍ æĪ· +Ġa ction +ĠL icense +The re +æĥ Ĭ +Ġh tt +Ġ| | +t ies +ĠSt ate +( n +é ½ +åĸľ 欢 +ĠG et +è° ģ +Ġhum an +ĠĠĠĠĠĠĠĠ ĠĠĠĠĠ +Ġth ough +Ġsignific ant +Ġt able +Ġget ting +æħ ¢ +Ġval ues +æľ ĭ +Ġdisc uss +m ost +åį ° +ĠIn st +Ġrep resent +å¦Ĥ ä½ķ +Ġopt ions +St ate +è Ĺ +åī ¯ +Ġs ource +è® Ń +In fo +å§ IJ +Ġass oci +âĢĿ ï¼Į +æĭ Ľ +ä¸Ń å¿ĥ +ç» ĥ +Ġansw er +o in +åĽł æŃ¤ +çĶ ļ +Ġf uture +le ments +at ors +Ġcomp let +f ort +èĩª çĦ¶ +an u +p x +èī ¯ +Ġon ce +Ġmem bers +Ġbel ow +. P +( c +åĪ ĺ +åħ³ ç³» +é¡ ¾ +ict ion +Ġl ater +Q u +æĢ ¥ +å®ŀ çݰ +et a +Ġres pect +ind ex +ra ft +Ġde al +Ġcol or +Ġcook ies +av or +Ġed uc +ct ober +il ities +ĠA N +Ġex ist +Ġenerg y +ĠC ar +og n +A ll +Ġproper ty +Ġsu gg +A pp +æľī äºĽ +Ġchar acter +ec ut +èŃ ¦ +å® ¡ +Ġsh are +Ġsugg est +åı ³ +Ġs ent +Ġcount ry +Ġc ou +D e +Ġab s +Ġkn own +im al +Ġen joy +ç¥ ¨ +Ġph ys +end er +çº ª +Ġp arent +Ġbe gin +ĠG u +ï¼Į æīĢ以 +åľ° æĸ¹ +ir l +ï¼Į ä»İ +Ġch all +us er +Ġwom en +å² ģ +ro id +u x +Ġl ive +O P +ic ation +æľĢ åIJİ +ak en +ir m +è ĵ +Ġd ig +ä¾ § +//// //// +c le +pr il +ï¼Į 对 +ï¼Į 让 +( b +Ġne cess +Ġar ray +Ġre d +åī § +Ġoff ic +ru ction +Ġen s +R equ +' ve +Ġact iv +Ġ __ +Ġh ours +Ġcont act +æľĭ åıĭ +Ġcom es +Ð ¸ +Ġso ftware +ĠN ot +åį ļ +人 æ°ij +Ġpot ential +éķ ĩ +Ġper iod +O r +ä¸Ģ ä¸ĭ +o ad +Ġtot al +Ġc ur +il t +ãĢ IJ +è¯ ī +U LL +æĿ ¾ +ĠB y +pt ember +ç ij +ce mber +ort s +åŃĺ åľ¨ +å¤ ı +è° ¢ +Ġqu ick +Ġp ress +ãĢ ij +] [ +Ġo pport +is on +Ġcomp ut +ç»ĵ æŀĦ +æŀ ¶ +Ġst re +R ead +ĠO ne +éĴ Ł +ä¸ ´ +in ary +Ġse arch +Ġo ption +at form +ï¼Į è¿ĺ +æĦŁ è§ī +ä¸Ģ èά +éľ ² +r t +ĠD es +Ġb log +ĠUn ited +Ġpat ients +ep end +æ± ī +Ġex am +çķ ¥ +im um +itt ed +Ġcon c +Ġterm s +举 西 +Ġo p +as ing +ri x +r m +- g +( ( +Ġg en +Ġl and +Ġn ight +p ace +E M +Ġimpro ve +Ġass ert +Ġy et +åĵ Ī +æł ij +it es +ug ust +Ġpre vious +çº ³ +ĠA dd +é¢ ij +P h +il i +ol l +Ġe y +Ñ Ĥ +æ¶ ² +Ġpr ice +Ġv ideo +Ġin put +Ġf urther +ç» ľ +åŁ ¹ +r s +Ġim age +ç ĩ +Ġob tain +æį Ł +Ġgover nment +ãĢĤ æĪij +- in +et ers +Ġ à +m y +Ġresp ons +Ġfe atures +op s +Ġl ink +t ime +å·¥ ç¨ĭ +Ġhapp en +è½ ¯ +æ¯ ķ +æ¥ ļ +ro du +Ġc opy +i res +èĦ ļ +Ġdes crib +n ers +Ġc rit +æĶ » +ä¸Ģ ç§į +art ment +err or +Ġrelations hip +' : +Ġen vironment +æľīéĻIJ åħ¬åı¸ +un g +Ġwe b +ĠD is +Ġth row +at ural +anu ary +åıª æĺ¯ +Ġse ason +ĠĠĠĠĠĠĠĠ ĠĠ +åģ ĩ +ç»Ħ ç»ĩ +å¯ Ł +ound s +Ġstr ateg +! ! +Ġdef ault +ra ck +led ge +u ed +Ġprovid ed +éģ ĩ +th ough +- w +é¡ º +Ġres ponse +ç´ ¢ +n ection +od es +ĠG ener +Ġyou ng +ht ml +ãĢĤ è¿Ļ +çĶ ³ +ĠU s +游 æĪı +Ġc red +æıIJ é«ĺ +èĤ ī +éĿ ł +. w +äº ® +ç͍ äºİ +Ġbe aut +说 éģĵ +Ġc ult +ç ¯ +Ġs ym +Ġc ame +at ter +N ot +Ġstart ed +m od +It em +er o +äº « +æĤ £ +ab or +c ing +ur ance +ï¼Į æīĢè¿° +æŃ Į +_ g +our n +g ress +O S +anc ial +Ġm essage +Ġb ase +com p +t ings +å¸ Ŀ +ä¸Ģ 缴 +äºĨ è§£ +_ h +ç¨ ³ +Ġam ong +an ies +åį ł +èĭ ¦ +S o +ä»» ä½ķ +z e +Ġse en +羣 çļĦ +å ³ +åѦ æł¡ +è ĸ +è¯ į +åĩº äºĨ +éĽ ¨ +ou d +ä½ł 们 +Ġgener al +ĠM ore +Ġre comm +Ġvar ious +un ch +- h +Ġr ate +缮 åīį +Ġf ace +ect or +on y +æķ £ +ĠIn tern +Ġsu bject +Ġnew s +pl ate +Ġc amp +认 为 +Ġc ut +Ġe conom +it ut +Ġinst all +æĹ ħ +A d +Ġcon fig +$ $ +Ġag o +æĶ¿ åºľ +N o +Ġpoint s +Ġsim ilar +Ġt ax +å¸Į æľĽ +Ġs w +m at +ĠAmeric an +ess ional +E X +ĠJ ohn +é¡ ¶ +Ġcle an +Ġhe art +indow s +S u +ä»Ĭ 天 +it em +âĢ ¢ +i ans +Ġsimp le +ĠM e +è¡Į ä¸ļ +_ name +Ġth ird +Ġiss ues +Ġe arly +Ġh ot +W hen +ç¾İ åĽ½ +P ath +um ents +Ġh ope +æī ¹ +Ġind ic +anag er +Ġp red +' ll +, æĪij +Ġp ick +éĺ µ +ok en +_ w +å® Ī +Ġl arg +. F +éĢ Ģ +éĽ ª +rou ps +第 ä¸ī +Ġr ad +Ġperform ance +her s +åĬŁ èĥ½ +Ġcomp et +Ġpur ch +P ost +äº ¿ +Ġwho le +Ġmed ia +con fig +æĿIJ æĸĻ +æĿ ¨ +Ġanal ysis +Ġmus ic +b ox +çĸ ij += ' +N ode +åŁº æľ¬ +å¥ ĸ +éĻ Ħ +um b +D es +åĩ Ģ +çİ ī +æľº æŀĦ +. org +Ġapplic ation +Ġrem ain +Ġcom ment +å° Ħ +设 ç½® +çͱ äºİ +æĸ Ĺ +Ġne g +ä¸ĭ æĿ¥ +注 æĦı +ic ult +Ġwrit ing +Ġcle ar +ition al +Ind ex +çĹ Ľ +ç² ī +èı ľ +Ġw rite +s u +Ġper fect +htt p +Ġto ok +ann el +======== ======== +Ġ( ! +Ġc ity +in ks +C C +#### #### +st er +Ġs at +Ġw on +å° ģ +表 示 +ĠR eg +S ize +è´ µ +> +st ance +æ´ Ĺ +Ġw ent +ç¤ ¼ +ĠI m +P aram +ĠA g +æĭ į +Ġhig her +Ġtra ck +éħ ¸ +d o +l t +å¼ Ł +us ing +er al +æĿ¥ 说 +it ing +Ġst ay +è´¨ éĩı +pec ially +O D +ig ure +Con fig +n ces +iv al +Ġquest ions +s ub +Ġsome one +Ġser ies +ix ed +ist ic +ĠO ur +èİ· å¾Ĺ +Ġh ost +c ome +re w +å¨ ģ +ĠP art +Ġro le +ĠĠĠĠĠĠĠĠ ĠĠĠĠ +Ġchang es +éĹ » +èµĦ 产 +åįķ ä½į +è ¸ +å® ģ +Ġm illion +d ing +ov ed +çĸ « +erv ed +ud e +uro pe +y d +Ġd om +éĽ · +èģ ļ +çļĦ 大 +Ġiss ue +æ¢ ¦ +Ġindust ry +æĴ Ń +Ġcan not +f rom +å ª +åŁİ å¸Ĥ +p ose +Ġide a +Ġneed ed +ĠA b +Ġst ore +Ġcont ext +Cont ext +Ġr isk +éĻ Ĩ +è· ij +Ġc ard +Ġbel ieve +Ġd ate +E qu +Ġm or +er a +and om +Ġhist ory +t est +ä¸Ģ 次 +Ġpartic ip +Ġl oss +Ġhow ever +. add +Ġa ge +Ġprovid es +ĠP ost +Ġmin utes +æ¯ Ĵ +Ġmanag ement +Ġv ia +eb ru +U p +å¿ħ é¡» +ĠM in +if ul +éĴ ¢ +Ġw alk +éĤ£ 个 +Ġg ames +åĩĨ å¤ĩ +Ġt aking +I R +Ġc lose +å® £ +æĬ Ĺ +Ġtreat ment +Ġany thing +çĶ ² +ĉĉĉĉ ĉ +ell ing +èĥ ľ +ob ile +T R +yn c +le ge +ebru ary +ï¼Į 为 +\ \ +ç½ij 绾 +L og +ĠSt ates +ph a +or age +Ġcond itions +Ġtra ining +æ¯ Ľ +Ġimp act +Ġout put +Ġst d +Ġs um +h ing +è¿Ľ åħ¥ +ic les +d a +l ist +re p +- st +èĮ ¶ +' ) +Ġde g +ĠR ev +. st +lic k +ĠE d +Ġm ach +çĶļ èĩ³ +ur ch +ä¹ĭ åīį +g ed +ĠA ct +身 ä½ĵ +ä¹ ¡ +Ġf ive +ï¼Į å°Ĩ +æĵį ä½ľ +Ġind ex +A t +å® « +Ġf low +ĠS ee +æİ ī +Ġart icle +uff er +] ( +Ġm iss +åĽ½ éĻħ +Ġb i +æĽ ² +olog ical +æĭ ¥ +. R +Ġd at +Ġnet work +Ġd ouble +Ġup on +ĠG r +Ġf all +ri ed +h ib +Ġac cept +S c +âĢĿ çļĦ +Ġcontin ue +ast ic +ateg ory +çĽ ĸ +æĢ Ģ +Ġincre ase +Ġp ie +ï¼Į å½ĵ +re ate +ç»§ ç»Ń +% % +n a +se c +ĠM arch +Requ est +å¾Ĺ åΰ +ĠA fter +Ð ½ +Ġdet ails +e le +ce ed +Ġimp lement +Ġb ar +Ġcl os +ĠC ount +A b +b egin +b ar +Ġcomp lete +è° Ī +p ack +iz ing +ä»· æł¼ +Ġcomp anies +Ġse em +Ġpa per +Ġper cent +Ġent ire +Ġadd ed +Ġm ass +Ġa ud +Ġw ond +å½ Ĵ +Ġc ases +Ġc irc +Ġpol it +b ol +çī¹ åĪ« +e es +P I +Ġc aus +ĠN ovember +è§£ åĨ³ +ĠS et +Ġtechn ology +b all +缮 æłĩ +ï¼Į å¦Ĥæŀľ +Ġlik ely +Cl ass +Ġreg ard +Ġsystem s +Ġ ver +Ġp rior +æķ ¢ +åIJĦ ç§į +æłĩ åĩĨ +çĻ » +Ġdiff icult +ch o +å¦Ĥ æŃ¤ +Ġp ain +U L +A tt +i ol +y th +ĠD r +æ ĩ +ä¸Ģ æŃ¥ +he l +Ġb ad +D ef +æĹł æ³ķ +Ġlo ad +Ġb o +æ²» çĸĹ +å¼ ¹ +g ing +Ġa ch +sel ves +Ġd am +ĠJ une +Com p +Ġc oun +ç¨ İ +ç¬ ¦ +il s +Ġh alf +Ġpr im +Ġd er +åŁº ç¡ +F ield +Ġne ar +um er +ï¼Į åı¯ä»¥ +Ġst yle +Ġmat ter +åĤ ¨ +m ail +Ġb row +æ± ½ +Ġfriend s +é ± +Ġr ather +id ed +产 çĶŁ +Ġu nd +an ced +ç§ Ģ +i ate +ag ing +Ġwe ight +¤ IJ +Ġproble ms +Ġd ocument +ä¸ ° +O f +av ing +er ver +Ġw ait +åºĶ ç͍ +Ġapp ly +æĽ´ å¤ļ +æ· · +p i +Ġl ength +ig ation +Ġm atch +O ver +b l +Ġto ld +Ġev ents +æ´ ¾ +æķĪ æŀľ +il it +åŁºç¡ Ģ +g o +al t +Ġh om +å° ¼ +å®Į åħ¨ +Ġd en +çī Ľ +htt ps +å© ļ +\ x +Ġwor ds +ãĢĤ ä»ĸ +Ġs ens +åĬ ³ +st atic +ac c +Ġbuild ing +çļĦ å°ı +ç¼ º +ach ing +å½ © +æ² Ļ +é± ¼ +Ġag re +åİ Ĥ +Ġtry ing +ï¼Į ä»ĸ们 +Ġacc ording +éģ ¿ +el t +ç¼ ĵ +çŃ ij +æĹ ¢ +ab led +Ġpre t +åıª æľī +Ġw ays +Ġl ower +L oc +Ġcell s +res ult +éĢ ı +æ° ¸ +F rom +å¿ Ļ +Ġbl ack +lo od +ar ray +ad a +M y +ay out +å¹³ åı° +ace book +Ġt aken +ä¿Ŀ æĬ¤ +åģ¥ åº· +Ġp lease +åĢ Ł +at est +ï¼Į çĦ¶åIJİ +Ġtra vel +è§ ¦ +op er +åĮ» éĻ¢ +æļ Ĺ +min ist +ĠP er +ens ive +åĮĹ äº¬ +èĹ ı +Ġtyp es +ĠSt ud +E L +i ef +Ġle arning +_ H +ĠJ uly +æĢ ª +帮 åĬ© +, åľ¨ +o on +Ġcre ated +åį ĸ +æ» ij +Ġ q +å®ī è£ħ +æĿ Ĥ +or ks +_ r +M ap +S ub +at ory +Ñ Ģ +ĠA pril +Ġd est +Ġa w +æĶ¯ æĮģ +Ġfr ont +ĠO ctober +åħ¶ ä¸Ń +oo lean +un k +ĠC our +Ġcol l +$ , +ĠD ep +ra ce +ass ert +åĽº å®ļ +ç» į +Ġal most +æ¬ § +è¿ĩ åİ» +j ust +çĨ Ł +Ġexper ien +Ġsimp ly +ĠC ity +c ast +O ut +éĶ ® +åĢ º +ac hed +æŁ ĵ +i am +Ġqu ite +s ummary +ĠC or +) ( +å¹´ çļĦ +ç¬ Ķ +op h +åĪ ¤ +) / +ç» ¿ +éĩĩ ç͍ +an a +_t ype +Ġg lobal +se qu +Ġmult iple +tern al +Ġexpect ed +ä¸ļ åĬ¡ +B e +or ing +_ L +çĪ · +Ġem b +Ġb ed +Ġrece ived +O V +o res +客 æĪ· +Ġw atch +å¢ŀ åĬł +. e +ä¸Ģ çĤ¹ +Ġdev ice +Ġs ection +r ong +她 çļĦ +Ġes pecially +Ar ray +èĢģ å¸Ī +ĠE urope +åº Ń +ä½ Ľ +v is +E nt +T M +åħ¶ å®ŀ +Ġor g +a ur +Ġext ra +éĴ Ī +Ġ+ = +å « +ï¼Į åĽłä¸º +代 表 +åĺ ´ +Ġp ict +Ġshow s +æķ ij +Ġoff ers +Ġst e +ĠAr t +éļ ľ +åĵģ çīĮ +ĠC ons +olut ions +wit ter +Ġp ers +m on +m ission +_ IN +ĠSe ptember +计 ç®Ĺ +åī Ĥ +ä¸į çŁ¥éģĵ +ä¹ ± +ĠHe alth +Ġcomp on +ç§ij æĬĢ +å®ŀ éĻħ +ĠDe cember +ĠS m +Ġstand ard +I V +Ġet c +I F +é ¹ +Ġt w +åº Ĭ +é¡ ¿ +Ġs olution +èµĦ æºIJ +容 æĺĵ +æĮ ¥ +Ġdis play +ch an +Ġest ab +yn am +Ġorig inal +Ġcor rect +éĩ Ĭ +ĠR ec +ag ed +ĠW orld +C R +Ġcomp lex +å¼Ģ åıij +om et +éĤ£ äºĽ +erv ices +_ RE +åł Ĥ +b oard +Ġ ãĢĤ +èĦ ± +Ġcom pl +è¯ Ĺ +ä» į +et y +Ġknow ledge +H and +Ġrel ated +R eg +l ight +è ľ +åŃ Ķ +uf act +R ef +pl it +Ġun ique +ç²¾ ç¥ŀ +P L +交 æĺĵ +Ġaddition al +ä»· å̼ +éĿ¢ çļĦ +R ec +åħ³ äºİ +Ġn il +ott om +Ġb ool +ou ch +et ic +å¹ ¸ +ĠM an +E d +éĺ ¶ +se e +L e +åIJ ¯ +å¨ ĺ +Ġnecess ary +Ġb ox +Ġare as +Ġf avor +Ġprob ably +ibr ary +. T +Ġaut om +èĤ ¯ +é ¤IJ +ĠS ome +è¯ ¯ +ï¼Į 大 +- y +ĠA ugust +Ñ ģ +Ġcont ract +åº ĵ +以 ä¸Ĭ +che ck +è· Ŀ +r ame +å½¢ æĪIJ +ĠJ anuary +il ter +ist ance +æİ Į +Ġsec urity +æĺ¯ åIJ¦ +Ġappro ach +V al +lo at +è§Ħ å®ļ +Ġexp ress +ĠL ist +p m +åĨ ° +le ep +ĠS chool +Ġevery one +æ¦ Ĥ +C E +ĠN ULL +w here +ãĢĤ èĢĮ +pt ions +Ġpol icy +å· ¨ +Ġev idence +æĺ¯ ä¸Ģ个 +ï¼Į åıĪ +_ {\ +Ġdown load +et urn +P S +C L +åİĨ åı² +oc ol +n o +é¥ Ń +Ġprof essional +è¯ ¢ +Ġlim it +o ke +Ġim m +Ġcustom ers +Ġn atural +m d +ĠN ow +ç§ij åѦ +çŁ¥ è¯Ĩ +In ter +Ġread ing +Ġfor ward +Ġeng ine +ĠR ead +Ġnot hing +æĪIJ åĬŁ +ä½į ç½® +Ġis n +æ¸ IJ +Ġpos itive +éŃ Ķ +èµ µ +ï¼ĮèĢĮ ä¸Ķ +å Ĺ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠ +æİ ¢ +Ġf it +ver se +Ġcal cul +Ġe p +D is +Ġc ause +res p +Ġwor d +Ġl iving +Ġval id +à ¤ +ĠY ork +åŁº éĩij +roll er +å» ¶ +ç»ı èIJ¥ +éļ IJ +A A +å® Ĺ +æĺ¯ åľ¨ +re st +ĠO ut +ĠA cc +Ġus ers +æ· ¡ +Ġj ud +Ġfe ature +åħ· ä½ĵ +ç®Ģ åįķ +au gh +Ġ Ï +Ġwh ite +åħ ¸ +Ġst op +idd le +Ġgrow th +计 åĪĴ +Ġex act +ï¼ī ï¼Į +Ġse par +Ġpl atform +Ġst at +ail y +Ġlevel s +åį Ī +Ġit em +æĿ¡ ä»¶ +ĠF in +f ield +t es +åĪ Ģ +Ġinclud es +_ R +Ġch oose +Tr ans +q l +ic on +Ġfin ancial +f l +空 éĹ´ +åĶ IJ +å° ļ +U ser +ä¸į æĸŃ +s y +ove red +Ġens ure +ap ter +am ed +Ġrec ogn +çĶ ° +Ġs ix +ĠO F +Ġc as +F orm +Ġrun ning +Ġev al +ĠNew s +åĪ© ç͍ +åİŁ åĽł +åī ij +ä¼ Ļ +åĪ ¸ +è¡ Ĺ +g en +Ġt itle +è¿ İ +Ġinst ead +Ġd iv +ĠM c +ç® ± +çĿ ¡ +æľ « +ac ed +é½ IJ +S p +e ks +æĽ´ åĬł +åĸ Ŀ +re ak +m ing +C ode +Ġinst ance +Ġbeaut iful +ï¼Į è¦ģ +com ing +èĻ ij +Ġâ Ī +Ġsh own +( & +é¦ Ĩ +Ġm ark +Ġwant ed +Ġyour self +Ġdis e +Ġbeh av +Ġask ed +ĠG e +_ V +Ġt akes +Ġg irl +åIJĪ ä½ľ +Ġg round +Ġrece ive +ĠO ff +éª ¨ +Ġgr ad +/ s +N A +p oint +m ap +ï¼Į åį´ +Ġint rodu +è¿Ļæł· çļĦ +Ġm ap +ĠO R +Ġmom ent +çĬ¶ æĢģ +Ġso on +éĿ © +Ġco ver +air s +n ov +ol id +Res ult +M essage +ĠW ill +ĠP res +Ġins ide +i ation +ĠT ra +ĠP ar +Ġ ill +er ve +âĢ ĺ +qu e +ĠU p +çŃ ¾ +Ġse lect +Ġpart y +Ġsh all +Ġeff ic +â ij +h old +Ġse ems +S ervice +çĥ Ł +ç¿ » +åĿ ı +å ´ +Ġf ul +Ġsc he +ĠP e +ĠA m +ĠN ational +Ġess ential +Ġf oot +g r +O ne +说 æĺİ +x y +æĿ Ĩ +ers ion +en u +ç» ¼ +è¿IJ åĬ¨ +irt ual +å¥ ¥ +èĻ ļ +åį ± +Ġg roups +åľ £ +æ¸ ¯ +Ġn ormal +éĩĮ çļĦ +-b ased +ä¼ ij +é¢ ľ +æĸ° çļĦ +Ġus ually +ĠThe n +s igned +un t +ĠB r +Ġit ems +人 们 +arg s +æĥ³ è¦ģ +Ġc y +ĠH ere +Ġcom ing +Ġde ath +Ġinit ial +Ġdec ision +Ġassoci ated +æĸ¹ åIJij +åľ Ĩ +_d ata +ĠS ec +. B +è´Ł è´£ +ĠN one +éŁ © +æĿ¥ äºĨ +g es +U E +ĠD ef +Ġlong er +Ġh ref +Ġinclud ed +at ur +Ġf ast +Ġback ground +$ \ +yth on +Ġout side +ĠS outh +许 å¤ļ +Ġright s +li ent +è¶ £ +Ġs ense +æĹ § +Ġpre p +C ount +ç« ŀ +çªģ çĦ¶ +, èĢĮ +Ġpret ty +Ġ( ( +éĽĨ åĽ¢ +ĠH is +çģ ¯ +. W +éĩ İ +pt y +æ¤ į +èĪ ª +" ] +ç¡ ¬ +Ġd raw +ob ject +以 ä¸ĭ +ak er +Ġpop ular +b f +an el +çī Ļ +[ " +l im +Ġp m +d own +AT E +Ġreg ular +Ġcond ition +Ġwrit ten +èİ « +ĠT HE +ur b +riv acy +l in +éĨ Ĵ +æ¶ ¦ +æİ Ī +ĠJ ust +cre ate +éĥ¨ éŨ +ĠTr ans +**************** **************** +çĹ ĩ +æŃ£ åľ¨ +æĬ ĵ +ï¼ İ +Ġe lement +ĠA ust +st ate +n own +Ġm ember +éĺ ´ +ourn al +Ġstruct ure +ch ar +æıIJ åįĩ +声 éŁ³ +Ġoff ice +Ġst atus +Ġconc ern +ĠS un +i pe +Ġfe ed +ub e +w ise +ĠP ublic +Ġb al +Ġcl ient +ĠR ed +> > +Ġo il +ä»ĭ ç»į +åĽ ° +ç¡® å®ļ +ç͵ è¯Ŀ +Ġse qu +Ġ 第 +I I +ide red +æģ ¶ +g y +Ġrec ent +tr ans +l abel +ĠO f +Ġchall eng +Ġpre vent +ĠGener al +Ġpro pos +ä¸ĩ åħĥ +ar c +æĺ¾ 示 +é¥ ® +T E +èĢ ³ +æĿ Ł +é¢Ĩ åŁŁ +$ . +ä½ ³ +æĬ¥ åijĬ +é» ĺ +о Ð +æĭ¥ æľī +建 çŃij +Ġst aff +æı ¡ +Ġ ** +u i +ĠT est +Ġeduc ation +åħ¨ åĽ½ +Ġmeas ure +O ur +ĠN orth +Ġprodu ction +ĠIn c +Ġf ire +Ġbeh ind +导 èĩ´ +åľ° åĮº +ç§ ĭ +è£ħ ç½® +é² ľ +Ġmethod s +çĥ Ī +èĮĥ åĽ´ +åĩł 个 +Ġpract ice +ĠF ree +ire ction +k en +åĪ º +Ġs qu +åıĺ åĮĸ +Ġus es +Ġloc ation +Ġmain tain +ch ange +ï¼Į å¦Ĥ +ä¸į å¾Ĺ +éļı çĿĢ +èĽ ĭ +li ed +éĢ IJ +wit ch +u k +Ġmed ical +æĮī çħ§ +M odel +åľ Ī +éľĢ æ±Ĥ +Ġf requ +Ġact ivity +ï ½ +æ´ Ľ +æ § +产 ä¸ļ +éĶĢ åĶ® +C K +Ġcl ick +M S +c d +Ġacc om +ĠA f +Ġn ames +os ition +ail ed +Ġb oard +æľī æķĪ +. html +ab ase +Ġ åľ¨ +åIJ Ľ +Ġph one +Ġeffect s +åĨ³ å®ļ +us ed +o e +Ġdef ined +ç» © +Ġ ï¼Ī +ĠF ebruary +Ġappe ar +Ġeffect ive +Ġn ode +Ġwe eks +èĵ Ŀ +. L +com es +æī§ è¡Į +Ġwor th +Ġo pp +_ ST +çº ¸ +é¥ ° +ain ed +å¸ ģ +Ġle ave +Ġcons idered +Ġsc reen +- to +æij ĩ +en ces +Ġc ross +M A +ev ice +æ¶Ī è´¹ +æ´ ² +in fo +è´£ ä»» +Ġcred it +Ġtr ust +ac hes +éϤ äºĨ +p art +is f +è´ ¥ +è¡Į 为 +èµ ¶ +av es +éļ Ķ +ang le +è½ ´ +[ @ +f unction +N S +Ġve h +Col or +val id +åį · +ä¸į åΰ +å½ĵ çĦ¶ +Ġav oid +ist ics +f unc +Ġor d += \ +æľī åħ³ +åı¯ æĺ¯ +O W +å¯ » +Ġhe ld +im ent +à ¤ +Ġde ep +Ġup d +ĠP a +è Ķ +ãĢĤ âĢľ +en ing +ry pt +o es +å¥ ¶ +AN T +Ġdesign ed +Ġs ound +Ġcons ist +l ink +P r +we et +æĬ ĺ +Im age +Ġ ] +æģ © +li er +em p +åĨ ł +ä¼ ° +Ġcustom er +ind ing +ç« ¥ +p ri +表 çݰ +ol ve +æľº ä¼ļ +è¿Ļ æĺ¯ +C he +at al +ç¼ © +il le +è´ ¦ +Ġin j +d is +Ġcont ribut +sh ip +ew ork +em ic +Ġst ates +Ġb lood +ĠF rom +iz es +èį £ +为 ä»Ģä¹Ī +Ġfil es +æ± ł +Ġallow s +ä¸į åı¯ +ç§ ĺ +Ġab ility +åĩº çļĦ +ãĢ Į +ĠI S +ä¸Ĭ æµ· +æĸĩ ä»¶ +Ġsk ills +q rt +ĠG oogle +çĿ Ľ +ĠD on +_ G +Ġw indow +èĤ ¤ +ï¼Į æ¯ı +Ġh on +åIJİ çļĦ +Ġh it +åİ ļ +l en +å± ĭ +T able +Ġs end +an ch +h ood +M anager +ist ry +çͳ 请 +Ġ æĪij +am ent +ĠD ata +u ge +Ġstud ies +Ġtem per +, ä½Ĩ +m iss +æķ´ 个 +Ġfil m +ĠN et +H el +æīĭ æľº +æĥ³ åΰ +Ġits elf +) \ +ï¼Į åĨį +che n +ĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠ +. app +ĠS oc +Ġprot ect +Ġa verage +Ġpub lished +( i +Ġmod els +æĸ¹ æ¡Ī +Ġsa ve +Ġbr and +åıĤ åĬł +Î ± +m et +Ġro ad +ä¼ļ è®® +Ġ( $ +åijĬ è¯ī +Ġm ention +æĮ ij +Ġcurrent ly +P C +æĹ ģ +éĽ ¶ +ä¿Ŀ æĮģ +am a +Ġany one +模 å¼ı +Ġrecomm end +å· Ŀ +Ġmarket ing +çĪ ¸ +Ġpat tern +à ¡ +æīį èĥ½ +è¿ĩ æĿ¥ +å³ ° +é£İ éĻ© +æĻº èĥ½ +Ġcon duct +d f +u ate +ĠP M +Ġd ru +Ġw all +Ġr out +li ke +b it +Ġsu bs +a res +_s ize +U M +åİ ħ +èĥ ¡ +le ased +åŃ Ļ +Ġspe ed +Ġup date +Ġch oice +éĵ¶ è¡Į +if ier +ph p +( this +Ġwond er +Ġcent er +ç§ ¦ +ur g +å¯ Ĵ +éĽ ħ +ï¼Į éĥ½ +Ġins urance +èµĦ éĩij +z on +ĠCour t +ç͵ åŃIJ +åĪĨ éĴŁ +Ġcom ments +Ġide as +p ublic +Th at +as ons +I B +Ġactiv ities +âĢĻ d +ell ow +Ġatt ack +epend ent +æł· çļĦ +uild er +ĠM on +Ġopport unity +_ i +åĩ ¡ +el se +æ±½ 车 +ç ¦ģ +éº » +re et +es e +Ġinf lu +åģ ı +æĬ ± +è¿ · +in put +ap i +Ġs il +ip ment +ï¼Į å®ĥ +z z +opy right +A Y +ĠJ ava +è· ³ +è¾¾ åΰ +å® ľ +Ġdiffere nce +ä¹ĭ ä¸Ģ +Ġex ecut +Ġres ources +ä¸ĭ çļĦ +æĤ£ èĢħ +æ¯ « +Ġreport ed +à ¸ +çĪ Ĩ +- n +Ġs n +ĠP ark +T H +èĪ ŀ +é ¬ +è® ¨ +ri ef +æ°´ å¹³ +ä¿ ĥ +ĠCent er +Ġw rong +Ġp ric +å° ¤ +am ples +èĪ ¹ +Ġfact ors +ef ore +Ġtri al +ä¼ ¸ +ĠP at +Ġman ufact +om s +ĠE l +æĪij们 çļĦ +// / +M od +pt r +. x +çĽ Ł +æģ IJ +ĠH igh +Ġde cl +Ġon es +ç»ı è¿ĩ +R ep +s ize +æ¯Ķ èµĽ +/ d +ar ies +åª Ĵ +iz er +åº Ĩ +Ġdirect ly +Ġg ra +Ġthrough out +Pro perty +çĸ ¾ +Ġbook s +em plate +Ġsa w +æľī ä¸Ģ +ers hip +at ives +Ġread y +å¿ĥ éĩĮ +åħĪ çĶŁ +port s +Ġsk in +ĠB ar +ir it +os ing +ç ĺ +Ġhtt p +m itted +- C +å°± ä¼ļ +èij £ +Ġk ids +~ ~ +E O +s k +æĶ¶ åħ¥ +Ġhapp y +ed eral +ir th +ĠD ay +act ive +Ġreg ion +g ment +al ing +éĢł æĪIJ +Ġv iol +åĬ ª +al pha +ä½ľ åĵģ +b or +" . +Ġleg al +çľ¼ çĿĽ +u y +w ith +åħ³ 注 +b ody +i us +æĢ§ çļĦ +ä¿Ŀ è¯ģ +ĠWh ile +ĠS k +å¼ ± +åĶ ¿ +def ault +å® ĩ +Ġeas ily +èīº æľ¯ +è¯ º +æī ¬ +Ġs ales +m ore +ĠV al +h a +C all +i j +Ġlook s +Ġinvol ved +è¾ Ĩ +' d +Ġatt empt +ç´ ¯ +Ġnum bers +. is +Ġc and +so ft +ve y +: " +Ġl ives +å±ŀ äºİ +ä» ª +ill ing +a ctions +åIJ ´ +E lement +Ġ å¹´ +Ġt im +ä¼ ´ +åĪĨ åĪ« +Ġim med +lic ation +ç Ĵ +æ ¾ +éĵ ¾ +Ġam az +Ġqu ant +ä» Ļ +ĠIn ter +å² Ľ +å¦ ¹ +cre t +ĠS ervice +Ġhe av +, ä¸į +Ġcom fort +Ġthem selves +Ġex cept +æľī 人 +Ġc am +d om +Ġs low +resp ond +Ġ row +ï¼Į 使 +ĠM r +Ġg raph +m as +Ġass ess +g or +ar ter +Ġbu ilt +Ġfunction s +ç½ ª +Ġd im +æĿ ¯ +éĺ » +è® ¿ +h am +AR T +../ ../ +Ġst ri +è´ ´ +P R +éĩij èŀį +Ġplay ers +Ġfun d +è¿Ľ ä¸ĢæŃ¥ +éľ ĩ +ĠL aw +Ġsur face +rid ge +pro t +æı ı +Ġg ives +Ġm ix +Ġe lements +åIJ ī +Ġs ort +Ġcol lect +ä»» åĬ¡ +, \ +U t +Ġbenef its +Ġt ask +pon ent +Ġpre c +çĿ £ +Ġw ar +( int +Ġd riv +ac ity +is ions +Ġcou ple +a ign +er ies +Ġphys ical +ĠCount y +ãĢĤ å¦Ĥæŀľ +p ath +l ation +ð Ł +èī² çļĦ +å¤ļ å°ij +Ġent er +Ġs us +ir ing +Ġpart s +ĠA M +éķ ľ +Ġdig ital +Ġb ank +åĨ Į +ĠI D +ĠF irst +Ġcomm it +ot o +Ġpro te +Ġs ell +Ġre lease +çĽ Ľ +ly ing +æij Ħ +Ġcomp ared +Ġtool s +Ġch o +in ition +åѦ éĻ¢ +Ġh or +Ġquick ly +éĽ Ħ +ĠCl ass +pos ition +ç»ĵ åIJĪ +以 åIJİ +åįģ åĪĨ +çĤ İ +le ar +åĬĽ éĩı +çº · +çĥ § +ï¼Į æľ¬ +Ġunderstand ing +ĠC ON +å¢ Ļ +ĠA re +ĠAmeric a +Ġ ; +( a +Ġr ound +Ġl atest +- up +el ta +e q +B C +it ch +Ġrem ember +ere nces +åĽ½ åĨħ +å¼Ģ å±ķ +Ġj ava +Ġrec ently +综 åIJĪ +è° ĵ +çļĦ æĥħåĨµ +_ W +T ER +r ast +æIJ Ń +ĠD av +äº Ī +离 å¼Ģ +n y +éģ Ĺ +çļĦ äºĭ +Ġpl ant +ec ause +Ġst ream +æķĻ åѦ +amb da +Ġdri ve +ab y +å¿ į +Ġm other +Ġgo al +ĠS ch +Ġpro ced +Ġb ound +in ate +å¹ ķ +Ġar ch +建 è®® +Ġdem on +å°ı æĹ¶ +Ġme chan +Ġsit es +ĠW est +Ġcon cept +Res ponse +çº ¯ +u int +Ġstre ng +Ġincre ased +ul a +Ġcost s +éĺ ħ +m t +缸 ä¿¡ +u el +Ġcomput er +Ġprogram s +å® ½ +Ġl ate +-y ear +å¾ Ħ +ens es +éĹ Ń +se mb +Ġs ong +* / +Ġsaf e +æł¹ æľ¬ +Ġext rem +ang er +ä¼ł 绣 +Ġqu e +m ax +er ial +est s +en ge +St ream +ç§ Ł +ug in +Ġst ock +im age +ä¼¼ ä¹İ +Ġc ool +æĪIJ æľ¬ +Ġu int +Ġl ines +Ġl ost +ul ations +Ġst ra +è¿ĩç¨ĭ ä¸Ń +å¹¶ ä¸į +P oint +æ£Ģ æµĭ +åħ¨ éĥ¨ +ĠH ouse +Ġcol lection +du c +Ġmach ine +Ġm icro +/ or +Ġser ver +å¡ Ķ +it able +Ġsub st +Ġcomm and +ç¨ĭ åºı +ĠA c +Ġbas ic +n es +ot al +æ³ķ å¾ĭ +é¡ µ +Ġre li +s g +Com m +I O +æ¡ ¥ +æµ ª +S ystem +Ġc ert +ograph y +æ£Ģ æŁ¥ +çī¹ å¾ģ +åıĹ åΰ +- M +U P +ç§ ģ +å¸ Ń +å®ŀæĸ½ ä¾ĭ +Ġindividual s +........ ........ +ĠCh ar +co ver +æļ ĸ +Ġ util +ï¼Į åį³ +åĬŀ æ³ķ +ï¼Į çľĭ +ic a +Ġgo es +ç¼ ĺ +Ġoffic ial +ex p +Ġ ä¸Ģ +Ġatt ention +G ener +交 éĢļ +æľī çļĦ +Ġun it +Ġnot e +äº ¦ +ĠB ro +Ġuse ful +Ġ è¿Ļ +å¹ ħ +æ± ĩ +cont ent +____ ____ +ar ily +form at +ĠD irect +建 ç«ĭ +gor ith +Ġs y +ĠB usiness +èĩª åĬ¨ +çª Ĺ +n s +Ġg e +Ġn ice +Ġplay ing +as c +ĠS ub +Ġexp ression +åİ» äºĨ +/ c +Ġmy self +Ġse x +Ġn ature +å¿ ĺ +åĬª åĬĽ +Ġlead ing +Ġact ive +Ġstud ent +åŃIJ çļĦ +Ġprot ected +ĠR E +æĶ¿ çŃĸ +r ate +Ġin sp +act ory +ĠBl ack +Ġwom an +ï¼Į æĹł +æĪIJ äºĨ +Ġform at +Ġconst ant +ĠM arket +. Get +女 人 +eb ug +å£ ģ +Ġwork ed +st d +Ġqu al +ãĢ į +tr ue +O DE +æ± ¡ +v est +èĤ¡ 份 +Ġre place +ĠF acebook +S P +Ġch arg +æ¶ ī +æļ ´ +Ġinterest ing +ru pt +i i +() ) +æĹ¶ 代 +å¿ ½ +Ġy es +ï¼Į æľĢ +h aps +} ( +Ġn ational +Ġs al +ĠS ervices +d b +æ´ ĭ +S C +ell ent +Ġm ode +Ġpur pose +é¬ ¼ +Ġfact or +] ) +èĤ¯ å®ļ +ĠRev iew +/ m +le vel +æķ Į +Ġf re +Ġdise ase +ĠM anag +å¡ ŀ +Ġs olutions +Ġproject s +åĪĽ æĸ° +èĢ IJ +an cy +ĠSu pp +Ġnot ice +Ġsat isf +ĠR em +æĬ ½ +ĠEng lish +羣 æŃ£ +Ġsit uation +Ġproper ties +Ġvar iety +éĢŁ 度 +el ine +r is +St art +è¯ ¸ +å·¦ åı³ +St atus +Ġmaterial s +ol es +æľª æĿ¥ +Ġplay er +Ġcount ries +Ġcl in +her n +Ġflo at +Ġcontin u +i ance +it ute +ä¼ Ĭ +ff ect +æĦı æĢĿ +S cript +Ġal tern +çĬ ¯ +( new +Ġaff ect +ç¯ ĩ +pect ed +im ately +çIJĨ è§£ +ĠF ound +Ġim ages +id get +é¸ ¡ +ĠTe chn +eg er +Ġcl ients +å° Ĭ +Ġha ir +Ġacc ur +æĻ® éĢļ +ç͵ å½± +ash ing +ram ework +æĹ¥ æľ¬ +å± Ĭ +Ġpr inc +r ated +ç±» åŀĭ +Ġhel ps +om en +Tr ue +Ġcon nection +_ v +æ£ ® +D ate +Ġim pl +ĠM us +Ġcol umn +o om +æľº åύ +Ġan im +Ġb all +ou b +Ġhe ight +lev ant +ag ue +i ber +çļĦ äºĭæĥħ +Ġpress ure +èĢĥ èĻij +Ġm ag +Ġad minist +od ing +å® ĭ +ad es +åıį åºĶ +L ine +act ers +Ġc atch +è¶Ĭ æĿ¥ +身 ä¸Ĭ +Ġst ress +T T +N ow +Ġhand s +id ing +ĠH ome +yp es +Ġmem ory +è¯ ļ +U s +è® ¢ +) * +ri al +Ġenc our +Ġb us +æĮ Ĥ +è°ĥ æķ´ +Ġstate ment +Ġto wards +Ġsom et +Ġto wn +ç»ı éªĮ +F l +Ġre ve +èĪ Ĵ +Ġlim ited +Ġw idth +Ġs un +åħ Ħ +çĮ ® +æ¯Ķ å¦Ĥ +Ġabs olut +Ġre v +è¶ĬæĿ¥ è¶Ĭ +A cc +æĮģ ç»Ń +积 æŀģ +o oth +ev ent +math cal +ĠPres ident +Ex p +æī © +l ib +Y PE +osp ital +è¶ħ è¿ĩ +æŃ£ 常 +缮 çļĦ +Ġappro pri +Ġcomplet ely +Ġthink ing +se ction +A ct +ãĢĤ ä½Ĩ +è¯ ¦ +P O +. r +Ġch ance +éĶ ģ +ff er +OR T +$ { +åĮº åŁŁ +éĥ½ ä¸į +Ġdemon str +Ġcons um +Ġser ious +Post ed +a a +it ude +Ġm is +\ end +ĠL og +ç³ ĸ +A fter +Ġsaf ety +Ġw ide +Ġdo or +S ource +ot es +Ġhigh ly +Ġk it +, ä¹Ł +Ġdep end +ĠU se +ar row +à IJ +æķħ äºĭ +am ily +åħ³ éĶ® +g n +åĶ ¯ +ro s +Ġparent s +Ġget s +C D +id a +r l +ĠN e +he t +ï¼Į å°ı +æ° § +å· § +ĠS T +Ġm ess +Ġey es +å¾ IJ +æ´ ģ +pack age +old er +æij Ĩ +t op +Ġfor mer +Ġhe ar +âĢĿ ãĢĤ +Ġf air +ol ar +æİ¥ åıĹ +b ut +} ) +ĠAf ric +ĠAss oci +èĥ ¶ +d es +æ° ı +Ġocc ur +ĠL a +Ġstart ing +å¼ ĥ +å¢ŀ éķ¿ +Ġorgan ization +ag ram +Ġdevelop ed +al ed +éĹ ª +ib ly +ĠB ook +ĠH ar +ish ing +G roup +ĠPro gram +P er +rap y +Ġr at +è® ¯ +rou ght +Ġl ack +Ġent ry +ĠG roup +è¾ ij +Ġd aily +æŁ Ķ +Ġmor ning +ï¼Į æĽ´ +Ġr andom +Ġd ark +Ġl iter +op en +Ġmod ern +Ġintern ational +g est +Ġcor respond +att le +Ġdef end +os is +Ab out +Ġresp ond +Ġprovid ing +Ġex erc +éĩĮ éĿ¢ +èģĶ ç³» +sy ch +é© ¾ +è¯ Ĭ +Ġhand le +å¦ ĩ +Ġs leep +éĥ½ æľī +æĺİ æĺ¾ +åĩ Į +_ set +ial og +æ¶ ¨ +对 æĸ¹ +é¢Ĩ 导 +ĠU K +Ġ vers +Ġfor ce +Ġcare er +rit er +Ġbas is +ï¼Įä¸į è¿ĩ +_ pro +ĠW ar +for ce +) { +Ġl en +lo cal +Ġloc ated +Ġgreat er +Ġg reen +p re +ĠT V +Ġsee k +b on +å¤ ® +çĶŁ åij½ +以 为 +æ¶Ī æģ¯ +init ely +F A +æ¸ħ æ¥ļ +éĢ Ķ +åİĭ åĬĽ +Ġbec ame +/ b +row n +å± ı +it ation +ĠO ver +Ġtrans fer +Ġb ud +f low +rid ay +l a +In put +åº Ħ +Ġres ource +Ġass ist +ĠAust ral +} ^ +s um +带 çĿĢ +ĠD et +Ġc ele +åıij è¡Į +å®¶ åºŃ +Ġpurch ase +ĠM at +è ª +æİ ª +Ġdeterm ine +t able +Ġcont ains +ith ub +con text +n et +ars er +Ġl abel +Ġhtt ps +ç¥ ĸ +he st +ç´ « +A V +ĠR uss +Ġpro gress +Ġt ouch +Ġl if +稳 å®ļ +Ġmult i +Ġc apt +Ġf elt +ous ly +è´ Ŀ +D F +Ġin fo +Ġintern al +æľŁ éĹ´ +éģ į +Ġdescrib ed +Ġspe ak +Ġrequire ments +S he +R el +ob j +Ġpre m +IT Y +Ġac qu +Ġach ieve +i ant +æ§ ½ +st ract +ar a +Ġpolit ical +C P +Ġbro ad +æĶ¹ åıĺ +ES S +Ġt en +ĠInd ia +Ġdec ided +çļĦ å¿ĥ +好 äºĨ +Ġdistribut ion +Ġre ach +if orn +è¿ Ŀ +O UT +ĠW ork +Ġmeet ing +wo od +Ġag ree +> ( +ä¾ĭ å¦Ĥ +Ġexact ly +Ġrem ove +end if +Ġincre asing +? " +Ġte ac +æ¢ ģ +Ġe arn +ĠH el +Ġpat ient +ĠB est +ç½ij ç«Ļ +ĠChe ck +ĠH er +_ DE +å·¥ ä¸ļ +Ġt our +p ite +Ġm obile +Ġapplic ations +u id +èIJ § +b and +Ġin form +ĠB rit +ter m +H ead +åĩı å°ij +Ġpl ans +ic ks +Ġamaz ing +ĠA ir +Ġbe gan +idd en +ron ic +åı¦ å¤ĸ +ĠM ich +åıĤ ä¸İ +ĠP lease +F T +åĨ ¬ +S D +ens ity +æĺİ çϽ +Ġsur pr +clus ion +: ( +ap an +ä½ľ èĢħ +çľĭ çľĭ +Ġst ru +Ġg ave +åĩ Ŀ +Ġs ample +Ġredu ce +ĠWe b +çļĦ éĹ®é¢ĺ +Ġadv ant +k nown +l s +äºİ æĺ¯ +Ġwas n +Ġar r +Ġg as +èĴ Ļ +un ities +as ks +æĴ ij +Ġfield s +_ E +åıij å¸ĥ +ç£ ¨ +AM E +ä¸Ģ个 人 +/ p +ĠT ime +Ġro ll +ab s +ĠG erm +Ġset ting +Ġass um +icro soft +requ est +Ġp ull +çľ ī +le ctions +Ġact ual +C lient +ĠS w +ä¸ ģ +Equ al +çļĦ æĹ¶éĹ´ +ed ia +ang ed +e ch +Q L +Ġs em +é ŀ +. ex +ĠIntern ational +éĥ½ ä¼ļ +åĿ Ģ +at ively +Ġm id +æĦı è¯Ĩ +Ġmean ing +èĨ ľ +åıª èĥ½ +out put +éĩį çĤ¹ +Ġdeg ree +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ Ġ +\ " +ur t +æŃ¤ æĹ¶ +B lock +am s +i os +in ner +Ġen h +Ġn av +Ġest im +Ġf a +pec ial +Ġcreat ing +å¢ ¨ +ĠS an +- S +ä»ĸ们 çļĦ +æĪ ª +ĠT ype +ĠA ng +Ġsomet imes +Ġsuccess ful +Ġh y +O ff +c her +iforn ia +cl ient +æĹ ĭ +Ġparam eters +Ġro ot +x x +ä¸ ¹ +Ġ ir +Ġful ly +ç³» åĪĹ +Ġkn ew +Ġident ify +ĠIn s +Che ck +å½ĵ æĹ¶ +Ġf ight +Ġallow ed +But ton +N umber +Ġne igh +ï¼Į çİ°åľ¨ +ĠA ut +ĠCh ina +ĠR ef +Ġthere fore +AT ION +æĺ¯ ä¸į +en ame +Ġchar ge +B A +æĩ Ĥ +ar ant +U I +çı ł +æĸ° åŀĭ +ER S +\ begin +ond on +æĭ Ł +Ġr ates +Ġst age +D o +Ġprim ary +ĠAl so +çݰ 代 +o pping +Ġg iving +Ġm agn +åĿļ æĮģ +d ev +Ġexper t +Ġw ood +//////// //////// +ul ated +äº ¡ +çݰ åľº +c il +ent y +Ġst orage +çĶ· 人 +B ox +Ġfavor ite +ï¼Į å°±æĺ¯ +Ġsay ing +Ġa ware +Ġem erg +a el +æ¢ ħ +Ġr ules +Ġ( ) +] { +Ġm ut +çĸ« æĥħ +ä¼ ¦ +math bb +Ġf resh +i very +ER R +ĠSt e +AL L +ï¼ĮéĤ£ ä¹Ī +æ¼ « +è·Ŀ 离 +enc ies +äºĮ åįģ +Ġd ed +Ġref erence +ĠO ther +æ¹ ¿ +Ġtri ed +K E +åº Ł +Ġcan cer +é© ± +Ã Ĺ +è ¹ +Ġt al +äºĨ ä¸Ģ个 +Ġcamp aign +åĪ« 人 +Ġsum mer +Ġpop ulation +è ļ +S earch +iz ations +åĨħ éĥ¨ +éĩı çļĦ +ir us +Ġp et +Ġstep s +çĦ¶ èĢĮ +Ġaut o +ĠC oun +ep s +Ġstrateg y +ï¼Įåħ¶ ä¸Ń +ç§» åĬ¨ +ĠDep artment +ol or +Ġst uff +Ġchang ed +Ġhe ard +ĠC opyright +Ġs ale +l or +ĠK ing +æĢ Ĵ +Ġnum er +ust er +å®ŀ ç͍ +ot ion +Ġde ad +çģ Ń +é² ģ +_t o +åĬ ± +çĭ Ĥ +Ġf ix +Ġs pect +Ġ ä»ĸ +E nd +Ġequ ipment +Ġanal y +Ġl ed +åıij æĺİ +æĻ ĵ +set s +Ġconst ruct +æı Ĵ +Ġw ish +I Z +ig ma +app ing +Ġp le +软 ä»¶ +Ġp ark +am ma +Ġop in +, è¿Ļ +au ght +in ated +æ¨ ª +Ġm ount +ĠDe velop + ° +â Ī +. util +ĠP re +ä¸į ä»ħ +ï¼Į åIJĮæĹ¶ +ĠAN D +P T +Ġdistribut ed +ï¼Į 便 +Ġpart ners +åıĬ æĹ¶ +Ã Ń +çĩ ĥ +Ġt ree +Ġl ock +Ġemploy ees +s qrt +éĿ¢ 对 +âĢĿ , +çļĦ åľ°æĸ¹ +Ġrequ ires +il er +æĹħ 游 +éĢ Ĵ +ĠO pen +åıĸ å¾Ĺ +æŀ ª +Ġsc ale +çŃĶ æ¡Ī +Ġpred ict +æ³ ° +Ġpie ce +æĺ¯ä¸Ģ ç§į +ä½į äºİ +g a +Ġpl aces +ro te +Y our +åĩº åİ» +å½ » +ĠM od +ĠD ist +ro y +ad o +å° ĺ +Ġcult ure +H ere +Ġde cre +Ġa im +æĿ ° +r an +# if +çĤ ¼ +. ) +ri ver +æĽ ¿ +éĿ¢ 积 +Ġas pect +Ġhim self +Ġcar ry +P art +ä¸į å°ij +_l ist +Ġ[ ] +èµĦ æĸĻ +æĥ ¯ +Ġinvest ig +Ġoper ation +ra z +Ġcon v +own load +ĠA tt +arg in +p c +å¿« éĢŁ +AB LE +ç͵ è§Ĩ +Ġche m +Ġh our +é¼ ĵ +Ġse lected +çĥ ¦ +Ġd oc +ens ions +Ġtemper ature +. N +ä¼ ¯ +Ġa f +Ġf ine +Ġinter face +Ġreg ist +Î ¿ +åĩł ä¹İ +ĠPol icy +g l +ï¼Į æĬĬ +s m +åĩº çīĪ +ĠE very +.l ength +ms g +_ re +éģ¿ åħį +ä¸ī 个 +èĤ ¥ +ï¼Į ç͍ +du le +- B +f ord +, å¹¶ +åĪ · +Ġg old +é¦ĸ åħĪ +Ġinter view +qu ery +ä¹ĭ ä¸Ń +ç»Ī äºİ +Ġref er +èĻ İ +Ġs on +è¢ ĭ +а Ð +Ġre levant +g ar +æľī çĤ¹ +Ġre pl +å½¢ å¼ı +Ġb and +ä¸įåIJĮ çļĦ +Ġro ck +ä¸į éĶĻ +r im +so le +ĠG o +Ġmov ing +Ġexist ing +Ġre pe +ä¸į 好 +ãĢĭ ï¼Į +Ġh uge +è± Ĩ +ri ve +Ġad just +Ġb ottom +Ġbehav ior +el come +ï¼Į 没æľī +Ġj o +æī¾ åΰ +说 çļĦ +ard en +ens or +ic ip +ĠK e +i as +丰 å¯Į +I LE +åĮ» çĸĹ +Ġth us +ma zon +å§Ķ åijĺ +å¹´ è½» +Ġcertain ly +æĺ¯ ä»Ģä¹Ī +Ġs witch +Ġover all +h ost +è®° èĢħ +æĽ ° +Ġtrans form +Ġd irection +or ge +æĬ µ +å®¶ çļĦ +M ethod +æ¬ ² +ty p +çĭ Ĺ +Ġparticular ly +Ġhe at +两 人 +è¿ ¹ +od ay +Ġs old +ffic ient +ĠD ec +åζ éĢł +ä¸Ĭ äºĨ +P re +Ġeconom ic +Over ride +åı¦ ä¸Ģ +ĠR eturn +ress ed +ĠS ince +åľ¨ äºİ +i ately +Ġarg ument +ĠM ost +éĺ¶ æ®µ +( r +Ġfin ally +Ġno vel +ynam ic +ä¿¡ åı· +il ies +it ect +Ġopt im +Ġchar acters +Ġt er +c p +Ġcomp ar +Ġdist ance +身 è¾¹ +-t ime +Ġpa id +Ex t +åħ± åIJĮ +x t +Ġ ~ +Ġhealth y +Ġs ession +ĠManag ement +ri p +æµ ĵ +è¿ Ķ +åŁ¹ è®Ń +åĭ ĩ +éĢ ĥ +Ġexc ellent +ol y +满 è¶³ +Ġad ult +ï¼Įå¹¶ ä¸Ķ +C A +In st +Ġf ather +åĶ ± +ä»Ĭ å¹´ +Ġim ag +ay ers +ames pace +èĦ ī +æķĻ å¸Ī +al d +åIJ¬ åΰ +ateg ories +Ġp ages +Ġdem and +çļĦ é«ĺ +Ġinterest ed +Ġlook ed +Ġal one +ï¼Į ä¸İ +Cont act +è° · +Ġregard ing +æķ ı +A X +çº ¹ +ĠF e +åζ 度 +åζ ä½ľ +认 è¯Ĩ +æ´ ŀ +ĠO bject +# endif +å¦Ī å¦Ī +ĠAn y +-t ype +Ġappropri ate +åIJĪ åIJĮ +te e +å° ¾ +Cont ent +C S +Ġbl ue +åĬł åħ¥ +( l +( [ +主 ä¹ī +( const +å§ ĵ +ĠR el +Ġappro x +à ³ +Ġintern et +Ġpre f +èģĮ ä¸ļ +ed ding +B ase +opt ions +Ġplan ning +s l +ä¸Ĭ è¿° +Ġin nov +Ġfr ame +ribut es +Ġturn ed +id ents +ĠT witter +èĤ¡ 举 +åªĴ ä½ĵ +Ġshow ed +éķ¿ æľŁ +æ¡ £ +åIJĮ åѦ +ru it +o ch +åı ¬ +Ġre asons +Ġother wise +in st +Ġf irm +D R +ĠRes earch +æ® ĭ +c ount +u it +anc ing +Ġwho se +è´¹ ç͍ +Ġmov ie +温 度 +Ġfil ter +ĠE v +iqu es +- pro +Ġad vert +ä» ¿ +æĥ ł +å¿ Ĩ +ĠT op +Ġst ar +亿 åħĥ +ild ren +Ġr ule +Ġst ories +é© ¶ +ä¼ĺ åĬ¿ +Ġsur v +pect ive +Ġbe y +Ġperform ed +ĉĉĉĉ ĉĉ +ren ch +ä¼ļ æľī +è¡ ¡ +v ille +Ġtrad itional +å¿ĥ çļĦ +ĠSo ftware +Ġv ict +Ġmay be +il ing +m ar +ãĢĤ " +Ġc ategory +M ay +æµĭ è¯ķ +çĶŁ çī© +Ġbey ond +Ġpol ice +说 è¯Ŀ +ĠL oc +M ore +Ġconst ruction +åΰ åºķ +c el +严 éĩį +, ä¸Ģ +m u +In stance +i pping +åĬŀ åħ¬ +Ġa x +Ġbegin ning +k a +å¤ļ 个 +Hand ler +ĠJ es +A ction +m ark +Ġpre fer +è§Ĩ é¢ij +Ġcon cent +ear n +ï¼Įè¿Ļ 个 +b um +Ġdev ices +ul ate +ar se +æ² Ī +Ġb rought +ä¿Ŀ éĻ© +æ¯ı 个 +Ġs av +Ġf em +Î ¹ +Ġcons ent +羣 æĺ¯ +Qu ery +Ġre leased +ag on +èħ ¿ +.app end +Ġcand id +Ġd ream +å®ŀ åĬĽ +ĠD em +åĬł 强 +C lick +_ get +ic o +iv il +Ġex pos +éĢĤ åIJĪ +l am +N ext +æķ ¬ +å¥ ĭ +é«ĺ çļĦ +ä¿Ŀ éļľ +åħ¨ çIJĥ +Ġdel iver +ĠV iew +ä¹ ĺ +ur d +èį IJ +å¤ļ çļĦ +c m +æī « +Ġdef initely +Ġdo g +Ġdam age +åıª è¦ģ +I ST +ER T +æ² ¿ +å·¥ åħ· +ĠP ort +B ack +è¡Į åĬ¨ +ĠS en +Ġtrans port +st art +Ġbel ie +ä¹Ł æľī +ĠCal ifornia +ĠN OT +ç¨ĭ 度 +ol ic +"> < +éŃ Ĥ +Ġgu ide +Ġb oolean +Ġtra in +Ġm er +èĩ £ +ord s +竣 çĦ¶ +D A +ear s +erv ation +ĠDes ign +en ed +Ġplay ed +ĠD el +Ġcrit ical +Ġgo als +Ġsh ape +Ġinvest ment +ĠSt reet +Ġpresent ed +Ġb irth +èŀ º +ç»ĵ æĿŁ +c ial +Ġcommun ication +oun ced +çŁ ¿ +Ġal though +å¹ ¼ +ist ory +ric s +Ġbenef it +Ġv irtual +Ġd ro +Ġadv ice +Ġneg ative +Ġ ign +Ġveh icle +ĠE ven +Ġp ow +æ¯ķ 竣 +æ¢ ° +atic ally +æ® Ĭ +, å°± +Ġfeel ing +å¡ ij +Ġlarg er +ç¢ İ +ĠComp any +å°¤ åħ¶ +_p ath +ĠPro f +çĤ¹ 头 +ä¸ĭ åİ» +åĢ į +è§Ħ åĪĴ +Ġimport ance +ool s +ra ction +es h +Ġbut ton +人 ç±» +ĠA p +æ¡ Į +æij ¸ +oo st +ac ing +æĿ¥ èĩª +æĢĿ æĥ³ +Ġfail ed +il ed +Ġpict ure +è¿ ª +Ġtri p +Ġsc ience +éŁ³ ä¹IJ +w idth +Ġan g +éĹ ² +- F +T oken +## # +w orks +å¿ĥ ä¸Ń +èij£ äºĭ +us ion +ĠI d +åıĤ èĢĥ +æ¶ Ĥ +缸 对 +éĿ¢ åīį +Ġ= == +G L +ot hes +T op +por ate +æĸĩ 竳 +} _ +表 éĿ¢ +Ġequ al +n cy +, æĺ¯ +ĠS im +AN D +Ġset s +æľ ± +Ġgrow ing +ä¹ĭéĹ´ çļĦ +ic ate +P A +Ġsp end +åıĺ å¾Ĺ +æ· » +in y +und red +. sh +æĹ Ĺ +Ġpost s +ï¼Į åĽłæŃ¤ +E C +O K +Ġ* ) +好 åĥı +Ġimmed iately +Ġstra ight +对 象 +G B +Ġapp lied +ond ay +Sh are +ä¸į äºĨ +ast e +Ġfollow s +O ptions +è¿IJ è¡Į +ç» ª +af e +Ġg ain +ĠJ apan +d ir +ç»ı 常 +ash ion +olog ies +ä¸į ä½ı +Ġcons ult +ï½ ŀ +Ġfind ing +Ġn amed +èĩ³ å°ij +ou l +- r +åŃĺ åĤ¨ +ĠM em +Ġthe ory +符 åIJĪ +V is +ãĢģ ãĢĬ +_f ile +æĮ ¯ +ĠPl an +æ³ ½ +Ġsh ared +ra int +Ġw arm +æ³ ¡ +Ġstreng th +i ency +M e +æĤ ī +交 æµģ +ign ment +ĠIn formation +èµ ŀ +F irst +Ġhist or +Ġobtain ed +æŃ£ ç¡® +Ġm iddle +çij ŀ +æ± ¤ +ow s +Ġ\ [ +æ¹ ¾ +ilit ary +Ġ< - +ĠW ell +ĠL ondon +æij © +IN T +B uffer +Ġv s +Ġfollow ed +ĠE duc +Ġprodu ce +U B +ol f +ï¼Į åĪĻ +ï¼Į è¿ĺæľī +Ġsu st +æĺ ł +åĽ¢ éĺŁ +Ġgr ant +æĿĥ åĪ© +Ġreturn s +Ġsc ient +N et +Ġgener ally +s es +Ġpric es +st ream +Ġbusiness es +our ney +Ġtr ade +æŁ ± +ĠG over +Ġn p +è®Ń ç»ĥ +Ġd ry +Ġcomm erc +Ġeff ort +- A +ĠFound ation +al y +C an +ĠT H +ãĢĤ ä½Ĩæĺ¯ +ĠF orm +ate ver +con s +Ġd oub +à ¶ +è¯Ń è¨Ģ +Ġflo or +ĠG reen +Ġth ous +as ure +a e +çĦ ¦ +Ġth reat +Ġsc ore +Ġb order +Ġapp rec +Ø § +ĠF riday +Ġc ash +å®ī æİĴ +as y +ç» ķ +è ¦ +Ġpass ed +D B +Ġdi agn +augh ter +es tern +Ġu lt +åĨħ çļĦ +ĠB oard +ä¸ĵ å®¶ +on ly +ä½ĵ çļĦ +ä½ĵ éªĮ +Ġmax imum +ä½ĵ ç³» +æĸ¹ 便 +çŀ ¬ +Ġpack age +¹ ģ +Ġcon vers +ĠCol lege +è´Ń ä¹° +Ġorig in +åIJİ æĿ¥ +åħ¨ éĿ¢ +a per +èIJ ¨ +ic ated +å¦ » +Ġe at +Ġsol id +Ġopport unities +he nt +Ġear lier +Ġmat rix +Ġal gorith +æµ ® +ç»Ŀ 对 +Ġparam eter +Ġc ru +Ġchalleng es +çļĦ æīĭ +ind er +Ġ> = +M L +Ġhig hest +ad ata +Ġro t +ve re +() ; +M at +O C +ĠM or +The y +- \ +æ³ Ľ +( g +èĦ Ĥ +èĥ ŀ +Ġj oin +ä» ģ +} ^{ +æĻ ¶ +ĠG reat +æ² Ł +éĥ ij +Ġh ous +om b +ĠPro ject +è· Į +让 人 +éª ij +åij Ī +atur day +Ġgu id +} . +Ġse lection +E mail +身 份 +æīĵ å¼Ģ +éª ¤ +ĠB en +ĠCan ada +Ġwant s +C al +Ġbrow ser +Ġexp and +semb ly +con nect +Ġcol lege +çIJĨ 论 +v ider +Ġfe et +æĶ¿ æ²» +ä¼ Ł +we b +B uilder +Ġcap ital +Ġm ill +Ġreview s +Ġt ick +èĢĥ è¯ķ +ĠTh anks +éĶ ħ +y ing +Ġ ut +Ġ :: +Ġar riv +Ġsupp ly +in n +( v +Ġf igure +% , +æĪ ´ +åĩ ī +Ġant i +æł· åŃIJ +######## ######## +AR R +z a +Ġl ab +Ġsp ent +åĽŀ çŃĶ +ct x +è¾ ħ +Ġsm art +Ġobs erved +Ġa ctions +L ast +r ont +çĶŁ çļĦ +M ode +Ġphot os +Ġw rote +èģ Ĭ +Ġsu ff +ĠE X +AG E +ĠâĢ ¢ +Ġc at +oc r +Ġtest ing +ĠC ook +ific ate +ĠA nt +D S +çζ æ¯į +am ing +ĠG ood +Ġreport s +Ġre nt +æļ Ĥ +Ġc ore +E E +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġra ise +Ġ( - +Ġsp irit +æĦı ä¹ī +IT H +ĠJ ack +. to +åºĶ çļĦ +èĪ į +ĠDav id +Ġsequ ence +P lease +aint iff +j son +æģ ¢ +Ġmod ule +Ġind ependent +M P +ĠC are +èµ ı +co pe +Ġn ut +h o +Ġdes cription +以 æĿ¥ +Ġmot ion +ĠW indows +ĠChrist mas +çļ® èĤ¤ +Ġex ception +Ġtre nd +Ġey e +ĠF ile +ov ing +éĴΠ坹 +Ġrest aur +i ers +ot ed +æĮ º +he ight +Ġeas ier +Ġd ie +Ġtr uth +in ct +Ġ vert +P age +Ġb rain +Ġf at +Ġcompon ents +ĠWh ite +åı¥ è¯Ŀ +hen s +éĻį ä½İ +, ä»ĸ +Ch ar +or row +ĠA R +Ġset tings +A ss +ĠOn line +Ġoper ations +ç ¹ģ +è± ª +èı Į +_ ex +Ġd rop +åĩ Ń +Ġpres ence +åģļ 好 +çģ ° +ĠEx p +Ġarg s +E m +u ps +ic ations +ĠG l +é¢ Ĺ +åĴ ¨ +Ġinc ome +. re +ĠL ife +( h +con tain +æĢ§ èĥ½ +up date +p ret +ar p +ĠE m +ï¼Į 说 +Ġd iet +Ġchall enge +æĪIJ ç«ĭ +( S +, æľī +Ġcent ral +Ġt ag +å§ ij +å®ĥ 们 +cre en +ĠM ake +_ add +Ġstru gg +. assert +è¿ ħ +ell ig +Ġpo or +Ġref lect +Ġd ivid +c an +Ġcoun ter +模 åĿĹ +Ġch annel +Ġ · +æ¯ı 天 +ĠN S +Su pp +l ies +è£ Ĥ +c are +u a +um an +Ġlet ter +( _ +ĠH ot +åĸ · +Ġh yp +ou nce +_in fo +Ġpay ment +Ġrel ig +ĠAN Y +è§Ħ 模 +Ġ er +Ġrest rict +宣 ä¼ł +add ing +ç»Ļ æĪij +Ġve ctor +ãĢĤ ä¸Ģ +åį« çĶŁ +Ġbud get +é¾ Ħ +Ġext ends +èĢ Ĺ +Ġreturn ed +Ġf ixed +Ġp en +æŁ ³ +Ġfor ms +Ġrespons ible +åĨį æ¬¡ +Ġcas ino +Ġl oved +se lect +Ġinst itut +Ġpres ident +AC T +çĸ¾ çĹħ +ï¼Į 请 +ä¸ĭ éĿ¢ +è¯ģ åΏ +Ġp air +Cont roller +B l +ex as +ph one +O per +Ġs rc +ĠSu per +Ġcompon ent +Ġa ward +åĽł ç´ł +èµ° äºĨ +ä¹ Į +ashing ton +ĠEurope an +T e +åĽŀ æĿ¥ +æĬ ¬ +L oad +æķ° åŃĹ +r d +vent ion +å¼ķ èµ· +ï¼Į éĢļè¿ĩ +Ġthan k +Ġ" $ +ĠAn n +ĠJes us +æĸ½ å·¥ +por ary +Ġclass es +ï¼Į 被 +ym bol +ar th +Ġh op +Ġpl us +D C +Ġtalk ing +æ½ ľ +Ġsp read +Ġfam ilies +Ġgl ass +Wh y +ç«ŀ äºī +æĻ ¨ +Ġ ing +æĶ¾ åľ¨ +å¼ Ħ +th at +Ġelect ric +ĠThere fore +Ġco ord +R em +åĵ Ń +èħ ¾ +" " +Ġcommerc ial +åķĨ ä¸ļ +ic ine +çĽ IJ +ĠWh y +UL T +Ġser ve +Ġro b +åĬĽ çļĦ +å°± åľ¨ +è´ · +ph i +æĺ¯ 个 +ing er +èĸ Ħ +om ic +æľī ä»Ģä¹Ī +Ġclaim s +ĠM ac +ĠInd ian +å®ŀ éªĮ +åij µ +ri pt +X X +é½ ¿ +Ġd ating +_ k +ãģ ® +z er +èĩª çͱ +yd ro +per ties +ĠChrist ian +Ġem pty +åĨ Ĵ +D on +æIJ ľ +.s ize +Ġl ayer +f s +ç»Ħ æĪIJ +Ġb ath +ĠA mazon +ide os +Ġh undred +I A +æģ¢ å¤į +éĻ · +æĮĩ 导 +Ġra ce +Ġne arly +Ġw ild +b a +EX T +Ġw ife +x ff +s cript +Add ress +Ġz ero +wh ich +æ¯ķ ä¸ļ +g ithub +ï¼Į 该 +ed s +ĠM et +å© Ĩ +Ġh arm +. > +æı ´ +ï¼Įè¿Ļ æł· +c ite +ĠO nce +Ġc overed +ĠAustral ia +é£Ł çī© +Ġident ified +Ġstrateg ies +math bf +st on +. id +Ġprep ared +Ġbro ther +Ġto ward +æİ¨ è¿Ľ +Ġopen ing +åħ¬ éĩĮ +æĿ¥ åΰ +ri age +Ġproced ure +ĠFl or +Ġsh ift +ĠEx t +f e +ĠT R +Ġdef inition +Ġf ear +n amespace +Ġb oy +. name +ä¸Ģå®ļ è¦ģ +Ġrel ative +P P +Ġpol y +ĠComm ission +L abel +Ġfin ish +åΤ æĸŃ +æĪĺ æĸĹ +Ġd ress +å¥ Ī +L ayout +el ines +iction ary +åħ´ è¶£ +Ġs weet +Ġsm ooth +Ġsym pt +Ġach ie +Ġmiss ing +ï¼ī ãĢĤ +Ġinflu ence +re hens +å¥ Ķ +ĠE qu +Ġst ored +Ġdevelop ing +æ´ ª +o or +at in +ĠSt r +çī© è´¨ +Ġimplement ation +çİ » +æĹ¶ æľŁ +ig ital +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠ +Ġwh atever +ct ors +Ġl ose +E CT +ãĢĤ åĽłæŃ¤ +Ġb ill +ï¼Į æīį +Ġst ation +Ġsom ew +ĠL ong +Ġm ental +un ately +ĠR ob +ri an +åŃ ¤ +æĪij åĽ½ +åĩºæĿ¥ çļĦ +ĠH ol +_ ID +ĠG NU +Ġwould n +ä¼ļ 计 +èģ ĺ +è§Ħ èĮĥ +u ations +Ġest ate +am in +Ġlead ers +< < +Ġmanag er +æĭ ³ +Ġthem e +ob s +æ¶ī åıĬ +ĠW ater +ĠC D +OR D +ĠE nd +å¤ļ æķ° +K e +ĠY es +ĠR oad +è° ĭ +. pro +åIJį ç§° +-------------------------------- -------------------------------- +Ġart icles +Ġann ual +pec ific +Ġsol ve +驱 åĬ¨ +Ġqu arter +Ġdist rict +/ t +Ġpract ices +ĠF ig +å±ħ çĦ¶ +ĠN ote +ĠM art +Ġh ab +Ġdefend ant +/ g +sp ace +_ j +è¿ Ł +is ode +erson al +as et +Ġvisit ors +ç« ¹ +éĥ½ åľ¨ +å¤į æĿĤ +å® ¿ +os h +ĠBrit ish +_ sh +ĠP ay +Ġthrow s +os es +第ä¸Ģ 次 +Ġbe g +æ°ij æĹı +é¸ Ł +< ? +C o +Ġpl ants +l ong +Ġoper ator +ro ad +F ilter +Ġdet ail +Ġatt end +ve c +is c +oun ter +æľī çĿĢ +çļĦ 女 +ï¼Įè¿Ļ æĺ¯ +æĵ ¦ +Ġtem plate +ĠB uild +" } +ĠS al +ĠCh urch +E ST +k y +Ġpre par +Ġes c +éĩij é¢Ŀ +ç¾İ åħĥ +Ġmot or +åĿ ¦ +ĠW ITH +anc el +Ġinj ury +S ign +ï¼Į 没 +Ï ģ +æ¼ ı +Ġf ans +Ġserv ed +天 ä¸ĭ +天 çļĦ +b lock +åĵĪ åĵĪ +Ġcommun ities +Ġac ad +ĠT om +Col umn +. class +Ġpie ces +æģ ¨ +. J +L ength +æĺİ ç¡® +- the +Ġfin ished +ç»ı çIJĨ +éĥ½ 没æľī +ĠEx per +ĠâĪ Ĵ +Ġme ant +F C +_ un +se y +ĠT ext +Ġf ol +åĪ ij +p u +Ġch ain +Ġext ract +çº µ +Ġwe ar +åĬł ä¸Ĭ +æĪĺ çķ¥ +Ġmed ium +æ¡ ĥ +åIJ ¹ +äºĴ èģĶç½ij +Ġgu ys +ĠO h +^{ - +æĥħ 绪 +]( # +b i +st e +P ress +åľŁ åľ° +' . +Ġex ists +ĠM icrosoft +A ccess +( * +Ġear th +Ġn uc +å¤Ħ äºİ +ĠS l +个 æľĪ +( R +广 åijĬ +å®ŀ è·µ +è¾ ° +æŃ£ æĺ¯ +en v +ĠB as +Ġpost ed +Ġwait ing +ic it +å² ¸ +ad ow +er ry +Ġupd ated +ĠM iss +ien ces +ed om +Ġdiscuss ion +S erver +Ġ § +if orm +é£İ æł¼ +ĠInst itute +ï¼Įä¹Ł æĺ¯ +ARR ANT +ãĢĤ ä»İ +ĠS am +, æīĢ以 +ï¼Į æĥ³ +ĠY ear +tt p +Ġsp ring +ĠL ord +Equ als +Ġeconom y +åħ ¼ +çļĦ æĸ¹æ³ķ +Ġfor g +P U +ĠPh il +ç½ ² +â ĸ +Ġerr ors +åĪĽ ä¸ļ +Ġb ra +ä¹ ³ +U rl +- old +M T +Ġabsolut ely +Ġ » +Ġarch itect +è¿ « +/ w +ad y +è¿ĩ äºĨ +ĠCom ments +缸 åIJĮ +æĤ ² +缮 åħī +rom e +çļĦ ä¸į +ï¼Į èĥ½ +Des cription +å̼ å¾Ĺ +ãĢĤ æľ¬ +Ġsc ene +ĠO per +ç± į +Ġc up +å£ ® +Ġcon ven +Ġchang ing +val ues +ĠW ARRANT +éģĵ è·¯ +Ġdis cover +ber g +Ġinc red +Ġmen u +es ome +æĭ Ķ +ol ved +è² Į +ĠB et +end ar +Ġanim als +Ġlead s +', ' +Ġess ay +ef t +åħĥ ç´ł +çݰ 象 +ĠA D +ï¼Į éļı +ĠI P +ï¼Į åħ¨ +ill er +os ite +_ a +éĻIJ åζ +ĠP RO +çĩ ķ +Ġtr ig +Ġar m +åĽŀ åΰ +äºĭ å®ŀ +y a +ĠS O +ä¼ ı +M in +åı£ æ°Ķ +B ook +Ġsqu are +ĠR et +an o +å§Ķåijĺ ä¼ļ +ä» Ķ +Ġc ry +art s +^ {\ +ĠDirect or +k in +ĠSt and +æ¼ Ĥ +Ġcoff ee +èį ¡ +Ġl uck +Ġadv ance +Ġgu y +Ġra re +Ġmeas ures +el ess +Ġst ick +Ġy ield +ĠE lect +c a +Ġj ournal +æµ © +çĽ Ī +Ġaud ience +Ġtyp ically +æĪ¿ éĹ´ +Ġcomfort able +. create +ĠIs rael +Ent ry +ĠL ove +â Ķ +Ġhe aring +\ [ +表 è¾¾ +Ġdiffere nces +Ġb rief +ĠF igure +M et +Ġcreat ive +.n et +çݰ éĩij +Ġ ãĢĬ +Ġm ission +the ta +Ġlead er +ĠMich ael +çļĦ ä¸ľè¥¿ +ec ess +ĠPl ay +è¿Ļ 次 +æĻ ĭ +模 åŀĭ +Ġtrans l +çº ¤ +åİŁ åĪĻ +ĠM ary +Ġfore ign +éĥ İ +è· ¨ +â Ħ +èħ ¹ +let s +d oc +æĸĩ æĺİ +ĠTh om +Ġb ag +ft y +all s +äº ķ +èĭ± åĽ½ +æľī æīĢ +M enu +yt ics +çļĦ æĸ¹å¼ı +ä¸Ĭ åİ» +åĽ° éļ¾ +æĿ¥ æºIJ +å°± è¦ģ +çļĦ 好 +å® ¾ +éĽĨ ä¸Ń +Ġwrit er +c ore +çī¹ èī² +å½ ¼ +_ e +å¤ļ äºĨ +éĤ ® +ĠP ress +Ġdirect or +è¿ħ éĢŁ +Ġill ust +P tr +ĠB ay +Ġknow s +Ġfun ds +æŁ ľ +Ġcour ses +go ing +Ġw arrant +åħį è´¹ +æĭ Ĵ +ĠC amp +Ġhom es +ess ions +p y +åĪļ æīį +F in +He ight +ĠB log +l ast +Ġconfig uration +ĠA PI +æ» ´ +ap ache +ult i +Ġmet al +æĹ¢ çĦ¶ +Ġ[ ... +Ġp anel +sc ribe +åįı è®® +Ġsent ence +ul ed +by te +M D +ĠPr ice +Ġgener ation +éģĩ åΰ +Ġs ector +Ġj oint +è¿IJ èIJ¥ +Ġstand ards +L ike +Ġagre ed +ï ¿ +Ġ" \ +ç¡® 认 +t imes +ĠL ead +ĠF ood +M em +ĠV alue +ig ration +/ j +åħħ 满 +ĠS ection +do or +Ġcorrespond ing +ĠAd v +说 äºĨ +Ġwe ak +çļĦ 主 +èĬ ¯ +& # +ï¼Į å¾Ī +ç¬ij çĿĢ +Ġac id +è· ĥ +Ġaw ait +Ġfl at +Ġdec ide +Ġsus p +çľĭ è§ģ +åĵª äºĽ +ern el +word s +æīĵ éĢł +åIJ IJ +n bsp +Ġcl imate +ĠRep ort +åĭ ¤ +åIJ¸ å¼ķ +ĠR ober +clud ed +Ġrun s +å¿ § +æĹ¶ çļĦ +rem ove +è¿ŀ ç»Ń +Ġpolic ies +Ġspecific ally +math rm +Ġcol ors +at abase +Ġs and +Ġg un +Ĥ ¬ +und le +å° ¸ +ent ity +ï¼Į ä¼ļ +_t ime +åı¯èĥ½ ä¼ļ +å¦ ĸ +an ner +/ L +- ch +Ġb ow +çŃ ¹ +Ġcent ury +ad er +æ± Ĺ +Ġla unch +Ġt or +Ġfl u +ĠJ ew +è§Ĥ å¯Ł +Ġf av +æİĮ æı¡ +ic ial +Ġ[ [ +éŀ ĭ +详 ç»Ĩ +èį · +æľº åħ³ +Ġrel ax +Ġkeep ing +å®ļ ä½į +E ng +çĽij çĿ£ +r ical +çĿĢ ä¸Ģ +ĠU N +ç«ĭ åį³ +æĹł 论 +éĵ ľ +Ġf an +ĠE mp +ent er +ï¼Įåıª è¦ģ +ä¾Ŀ çĦ¶ +ual ity +C ON +åľ¨ è¿ĻéĩĮ +Ġlaw s +ag ement +p ost +Ġinst r +ĠL o +Ġex change +ĠApp le +O B +Head er +ip es +åĬŀ çIJĨ +å®¶ éĩĮ +f ast +ĠT wo +_ key +_p aram +Ġm o +Ġident ity +ĠSh ow +Ġcontain ing +ant a +Ġun c +Ġdo ctor +èĩ Ĥ +ä¸į ç͍ +lic t +Ġoper ating +or a +as p +âĢĿ . +is ms +ĠTe am +ĠU ser +d raw +ç² Ĺ +åł Ĩ +Ġl abor +ä¸Ń åįİ +æīĢæľī çļĦ +Ġent ertain +ou ter +ad ing +大 ä¼ļ +Ġadv anced +un time +Ġr ich +c urrent +W ork +Ġag ent +ç² ® +严 æł¼ +Le vel +Ġte aching +ĠF urther +in ue +Ġto ken +P os +O b +æĪIJ éķ¿ +_T YPE +Ġex ternal +Ġsubst ant +ĠCl ub +èĢĮ è¨Ģ +åĴ± 们 +Ġparticip ants +un te +ĠSec urity +C ase +ĠS y +å¼Ģ åı£ +- R +åīį çļĦ +om y +ä¸į æĥ³ +ĠL ook +Ġpr ime +çļĦ å£°éŁ³ +åħĦ å¼Ł +ĠM ax +H ER +az ine +Ġapplic able +ore m +Ġdoc uments +ner gy +urs or +ev en +t ure +ĠO N +ĠAcc ording +Ġstart s +Ġ[ ' +im ation +Ġsche dule +TM L +P ol +Ġm ole +å¹³ åĿĩ +b uild +Ġwe bsites +O ST +éĢIJ æ¸IJ +ref ix +èĤ º +ç»Ħ ä»¶ +Ġthe rapy +çݰ å®ŀ +çİ» çĴĥ +驾 é©¶ +ï¼Į 对äºİ +éĺ Ģ +åĮħ åIJ« +åį± éĻ© +Ġlist ed +Ġnumer ous +ĠG en +Pro cess +æį · +绣 ä¸Ģ +ĠAfric a +Ġs ch +çļĦ éĩįè¦ģ +Ġfl ag +Ġd aughter +Ġsc hed +( data +ĠRes ult +IN D +æĭ Ĩ +ab et +t hen +( struct +Ġob j +R eturn +if ts +ä¿ Ħ +Ġinter pret +or ry +Ġb ond +ud d +sec ut +ĠF ire +çī© çļĦ +- se +and er +ĠA uthor +Ġfun c +åıĮ æĸ¹ +Ġf ashion +z en +åľ° çļĦ +éª Ĺ +Ġrat io +Ġenvironment al +Ġgirl s +èĭ± è¯Ń +v as +æ Ĩ +æĹ¥ 常 +é¦Ļ 港 +èĪ ° +Ġline ar +Ġra ised +Ġfocus ed +F alse +S L +Ġ icon +å°½ 管 +AP I +éĩĩ åıĸ +Ġhon est +ãĢĤ ä¸įè¿ĩ +å§ ľ +æ·± åħ¥ +å² © +, 以 +æĽ ¹ +ĠCh inese +Ġr ing +, ' +她 们 +F ollow +as er +od ies +ick en +æĸ ij +C urrent +ĠM ont +çĭ ¼ +Ġf ish +Ġsy nt +ĠComm it +Ġ"" " +æĪIJ åijĺ +ç͵ è·¯ +Ġ æľĪ +IL ITY +åľ° ä¸Ĭ +st yle +IC E +Ġman ner +ç¬ij éģĵ +ç¢ ³ +ĠH am +ãĢĢ ãĢĢ +ition ally +Ġsu fficient +ain er +Ġ$ $\ +ĠSt ar +Ġprov ider +çĵ ľ +all ed +ĠSu pport +ed y +Ð ² +_ ERR +ĠA lex +å®Į åĸĦ +) = +J ust +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠ +Ġre ached +Th ank +èĥ ĥ +ï¼Į 好 +Ġt asks +ov es +æ°¸ è¿ľ +Ġleg is +- a +ä¸Ģ å¹´ +æľ¬ åıijæĺİ +! " +Ġa uthors +è·Ł çĿĢ +çIJĥ éĺŁ +Ġwh om +Pro vider +Ar t +ä¸ ¢ +_d ir +Ġalgorith m +ç»´ æĬ¤ +F ig +åħ¬ åħ± +å¼Ģ äºĨ +erm s +i ples +Ġp al +çłĶ åıij +å¿« ä¹IJ +ĠEng land +- H +D irect +Ñ ı +Ġchem ical +èµ ĸ +ir ation +ĠP age +ĠDist rict +ï¼Į åıį +ĠB re +Ġst ack +Ġ il +Ġin du +ĠM ath +åĪĨ 为 +ĠAng el +æ¢ ¯ +ri e +Ġr id +åĪĽ 建 +b ra +åĽ¾ çīĩ +w in +ĠTechn ology +LO G +or ation +red ients +å°ij å¹´ +ĠE N +ĠComm unity +] ] +åĨľ æĿij +éļ¾ éģĵ +eg a +çľĭ æĿ¥ +Ġv ill +Ġv ideos +W indow +åij Ĩ +æį٠失 +New s +_ str +è¿ĺ åľ¨ +Ġfeed back +. print +Ġmess ages +åıĬ åħ¶ +Ġopen ed +ili ar +å°± èĥ½ +Ġ ãĢģ +ĠP et +Ġinteg er +g reg +åľ¨ äºĨ +Ġwill ing +Ġc itiz +ĠSupp ose +Ġdes pite +Ġfe es +form ance +Ġtrans ition +å¾Ģ å¾Ģ +.j s +åľ ³ +R C +éĶ ¦ +æİ¥ çĿĢ +ç«ĭ åĪ» +Ġret ail +èĥĮ æĻ¯ +P osition +g age +Rel ated +æľ¬ 身 +ç¯ ® +- N +å͝ ä¸Ģ +è¿ ģ +éĿĴ å¹´ +çĤ ī +Ġ} , +Ġu pper +Ġfor get +% ) +Ġm ail +ĠJ ournal +åī ² +_ index +y ou +ä¸ĭ éĻį +' ). +èIJ¥ åħ» +ad s +icens ed +ï¼Į æŃ¤ +it a +c ase +çĬ¶ åĨµ +omet ry +è¾ ī +ĠPro t +æĥ ij +çļĦ æĸ° +Ġd ied +loy ment +Ġlist en +V ector +F ind +ä¸ ² +ç» ĺ +Ġint ended +Ġpro ve +æĺ Į +Ġequ ival +é¢ľ èī² +æī ° +, ä»İ +Ġdel ay +at o +æķĻ æİĪ +ĠTh ank +âĢĿ ãĢģâĢľ +Ġexper ts +T arget +Ġra pid +æĭ ĵ +N um +计ç®Ĺ æľº +度 çļĦ +ĠAb out +ix el +Ġdivid ed +ach ine +, 对 +å¢ŀ 强 +y e +åij¨ åĽ´ +Ġbe coming +ĠFlor ida +èĥ½ æºIJ +åį³ åı¯ +Ġs yn +æ·± åľ³ +Ġsecond s +ĠM ap +è¯Ħ ä»· +æĦŁ åıĹ +å®¶ ä¼Ļ +ĠD i +ĠGover nment +Ġb onus +Ġm ac +认 羣 +ä¹Ł 没 +Ġrep air +b in +è°ĥ èĬĤ +å®¶ éķ¿ +å¼Ģ æĶ¾ +M on +ä¹Ł åı¯ä»¥ +åŃ ķ +Ġt iss +èĤ ł +Ġbel ong +Ġdep th +å¹ ½ +ĠG ame +åģļ åΰ +st ances +st atus +Ġs umm +O ctober +( { +ĠN ov +ĠSp ring +ĠA S +å¸ ħ +u zz +Ġ ice +èIJ¥ éĶĢ +S ec +ĠSt ep +ĠF alse +Ġmot iv +åľ¨ è¿Ļ +Ġtre ated +ident ial +am pl +Ġbuy ing +ĠC ard +Ġstre et +_st ate +马 ä¸Ĭ +æĦ Ī +re ne +å¼ ¯ +ro t +Ġdescrib e +on ym +Ġread ers +Ġj ump +享 åıĹ +ĠJ an +ĠNet work +ĠC H +rou d +è¿ĩ çļĦ +Ġk id +ï¼Į åΰ +P e +Ġthous ands +ãĢģ é«ĺ +Ġfrequ ency +太 éĺ³ +"> +_ init +Ġcall back +ç¨ ¿ +Ġcomp ens +âĢľ æĪij +Ġres istance +f a +Ġan gle +æĦŁ è°¢ +ç±» ä¼¼ +Ġvol unte +ĠT ur +èį Ĵ +çĹħ æ¯Ĵ +çϽçĻ ľé£İ +è¢ ģ +æ³ķ éĻ¢ +T ube +Ġprovid ers +Ġclass ic +å¹¼ åĦ¿ +æĦı åij³ +am m +æıIJ åīį +Ġh idden +åıij åĬ¨ +ist ent +n c +leg ate +- ex +b our +åħ¬ åijĬ +ĠLe ague +H z +lev ision +说 çĿĢ +æĦ £ +à § +ä¿® æĶ¹ +D P +omet imes +ĠRep ly +dd en +Ġindic ate +Ġ ri +å¾Ī æľī +Ġro of +ress ive +æĬ ļ +ĠD NA +W rite +é«ĺ åħ´ +åĪĨ å¸ĥ +èµĽ åŃ£ +D M +Ġcor rel +Ġke ys +æľŁ çļĦ +Ï Ĥ +ĠK ore +æĸ¹ çļĦ +Ġmar riage +èµ Ķ +æĢ§ åĴĮ +D evice +al i +Ġd ir +Ġk illed +Cont ainer +éĺŁ ä¼į +A uthor +Ġr ide +ãĢĭ çļĦ +å® ł +_ , +éĤ£ ç§į +Ġg ather +ĠO k +return s +ĠW estern +ĠIm age +Ġdo gs +d en +Ġcharg es +Ġto w +Ġfac ility +Ġexc iting +åĹ ¯ +æİ¥ åı£ +~ \ +Ġk ill +Ġbus y +èĬĤ 缮 +T O +Ġs an +çļĦ æł·åŃIJ +Ġaccom mod +èĦ¸ èī² +Ġdel ivered +å µ +è´ ¤ +ĠK now +ĠCh icago +) çļĦ +ç®Ģ ä»ĭ +éĢ » +éģ ¥ +æĮ ĸ +å½ĵ ä¸Ń +Ġrecord ed +Ġpl ate +Ġm ale +- line +in ating +Ġt y +³³ ³³ +Ġelect ronic +Ġbe ach +Ġstate ments +Pro t +æīĵ ç®Ĺ +ĠBe ach +ce ived +Ġlo ans +Ġt ight +Ġk ick +Ġfre edom +co very +Ġdeg rees +ĠAss ert +M M +Ġappear ance +_S IZE +E l +m g +ãĢģ 大 +k it +失 è´¥ +å¨ ±ä¹IJ +Ġhas h +ï¼Įåıª æľī +. ch +Ġb attle +æIJ º +ĠC EO +ĠD ev +Ġd a +in ates +ä¾Ŀ æĹ§ +Ġgl ad +Ġmar ried +ä¸Ĭ ä¸ĭ +Ġbig ger +çľ ł +æĪ Ĵ +Ġtrans action +ĠU t +G u +æĮĩ æłĩ +Ġing redients +Ï Ģ +E ach +ä¸ § +Ġm al +Ġimprove ment +Ġfour th +( N +Ġrece iving +Ġequival ent +Ġv el +Ġso le +广 大 +èĤ ĥ +åįģ äºĶ += > +s v +ĠAd minist +F ebruary +åĩ ¶ +* x +çľ¼ åīį +çĽ Ĺ +-c olor +_ O +m i +- related +Ġj u +Ġst opped +两 ç§į +Ġdi am +f ill +ok ing +Ġliqu id +æIJ ¬ +ç½ij åıĭ +orm s +iz ont +* ( +Ġpub lish +m odel +çªģ çł´ +åĪĽ ä½ľ +Ġ" / +am ental +åIJĪ éĢĤ +aut o +Ġst one +Ġun iversity +Ġrequest s +ï¼Į ä¸Ń +ä¼° 计 +çĥ Ĥ +éĥ½ èĥ½ +ï¼Ł æĪij +è¿Ķ åĽŀ +Ġval uable +mer ce +è¿ĺ 没æľī +Ġd b +ĠG al +Q ue +Ġsu dden +act s +Ġre venue +, è¦ģ +Ġc lock +av en +çĽ ¯ +人 åı£ +éĵ Ŀ +éĢļ éģĵ +Ġkind s +Ġl es +ost er +TT P +m osp +Ġsuggest s +Ġ rom +in er +çī¹å¾ģ åľ¨äºİ +En c +App lication +( : +_ error +éĻ µ +Ġtell s +sh ow +b uf +ä¸į å¤ļ +Ġsh ares +EN SE +Ġ' ' +- level +Ġexp lo +ĠE nergy +ä¿¡ ç͍ +åIJ Ĭ +Ġpar se +Ġsc r +l ength +-------- ---- +ĠO ptions +Ġconsum ers +ĠÎ ± +E P +Ġaccept ed +ĠN av +* n +Ġabsolut e +Ġatt orney +Ġdis p +Ġp riv +ins ert +å¯ Ħ +Ġemerg ency +s ource +ĠSec ret +Ġspe ech +ĠK it +Ġproced ures +Ġse gment +æ¤ ħ +ost on +in ite +Ġdocument ation +æĿĥ çĽĬ +O G +ĠM A +D elta +Ġbre ast +ĠWITH OUT +éĢĤ åºĶ +Ġveh icles +ĠAfric an +Ġne ut +æĥ ¨ +Ġper mission +ãĢĤ åħ¶ +Ġ éĤ£ +Ġa id +im ary +im ens +人æ°ij å¸ģ +ãģ « +Ġre pro +Ġyou th +C or +G o +H ash +ook ie +å´ ĩ +ĠV irgin +ĠV ideo +à ¥ +ĠA ction +, åı¯ä»¥ +Sh ow +U SE +Ġind eed +ï¼Įåħ¶ å®ŀ +ab els +请 æ±Ĥ +" çļĦ +Ġ Ø +qu are +ãĢĤ éĤ£ +Ġofficial s +éĨ ī +ter day +l ights +åľ¨ ä¸Ģ +ĠD ownload +urb an +rep rene +ä¸Ģå®ļ çļĦ +å¹´ é¾Ħ +os oph +Ġa cknow +è¯Ĩ åĪ« +æīĢ ç¤º +åĤ ² +Ġc ir +ãĥ ¼ +An other +M any +Ġart ists +Hand le +x FF +ä¾Ŀ æį® +Ġwor se +çĪ ¬ +Ġext ent +ĠE conom +m essage +Ġinvest igation +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +a ul +èģĮ å·¥ +et her +Acc ording +Call back +éĢĤ ç͍ +{ eq +Ġend s +iver se +第 äºĶ +Ġeval uation +ä¸į è§ģ +i est +é¼ ł +é»Ħ éĩij +B I +æĻ ´ +éĿ© åij½ +Ġ ä¸ī +ĠW all +ĠA P +nd ef +Ġcomp rehensive +Ġreli able +Ġsche me +ä»ħ ä»ħ +è£ħ å¤ĩ +ä¼ł æĴŃ +ĠSe lect +* t +Ġplatform s +Ar gs +Ġdoll ars +Ġany where +Ġestim ated +Ġtyp ical +è§Ĥ ä¼Ĺ +Ġprom pt +æĥħåĨµ ä¸ĭ +Ġexpos ure +代 çIJĨ +ï¼Įä»ĸ çļĦ +in stance +re v +ĠO ct +g ers +ĠAt l +åĪº æ¿Ģ +inter face +ï¼Į âĢĿ +Ġoffic er +ire ment +Ġhous ing +ĠP eter +ç¼ĸ è¾ij +ä¼ĺ è´¨ +çī¹åĪ« æĺ¯ +Ġst ret +åĩº çĶŁ +Ġsh ock +................ ................ +ç¥ ¥ +x ml +Ġgood s +ail ability +èĤ¡ æĿĥ +ir al +èĽ ĩ +Ġl ayout +ĠO S +Ġcharacter istics +Ġm it +Ġconcern ed +Ġ[ " +ĠM ot +æĤ Ħ +ST R +h and +_c ode +æĶ¾ å¿ĥ +åĽĽ 个 +C ar +ĠD igital +Ġmat ters +Ġco ach +æ» ĭ +æĢİä¹Ī æł· +æħ İ +æĥ § +) ãĢĤ +ä¸Ģ èĩ´ +Ġgu ests +çļĦ æ°Ķ +çĦ Ĭ +Ġapprec iate +æľŁ å¾ħ +B efore +è§Ĥ çĤ¹ +________ ________ +. prot +) ^ +h op +ribut ion +Ġk ing +Ġm g +Ġus ual +it ems +Ġiss ued +ynam ics +Ġm ilk +ĠB er +çļĦ åİŁåĽł +Ġcandid ate +Ġfrequ ently +ĠTr ust +é«ĺ 度 +Ġcomp r +ut ton +J son +ref erence +æĥ³ 象 +em pl +ç¬ij äºĨ +æij ĺ +S ave +æĬ Ľ +Ġf ellow +m ain +Ġengine ering +Ġrelig ious +Ġsav ed +å§IJ å§IJ +æĭĴ ç»Ŀ +ï¼Į ä¸ĭ +éĥ¨ çļĦ +IT E +ĠUp date +è´¸ æĺĵ +Ġpay ing +Ġsong s +Ġ( \ +r ho +li k +an ish +; ; +æį ķ +. put +æ³ µ +ç͵ åĬ¨ +Ġemploy ment +ag raph +Ġhealth care +ĠE U +Ġemot ional +Ġ' / +Ġturn s +麻 çĥ¦ +ãĢĤ 该 +ĠRober t +ĠAr g +ol umn +ĠP ract +Ġfurn iture +æĬ¥ åIJį +Ġmov ies +éĿ¢ ä¸Ĭ +Ġne arest +å°ı å¿ĥ +æĪĸ 许 +å¹³ æĹ¶ +ï¼Į çͱäºİ +Ġgr ade +se ct +çĥ ¤ +è´Ł åĢº +åįģ ä¸ī +Ġcitiz ens +ĠEx ception +Ġs ac +Ġmach ines +Com ment +_f l +Ġencour age +Ġ æĿİ +Ġ Ñģ +åħ¬ å®ī +åĽ¾ 书 +u ing +æ¼ Ĩ +ĠThom as +Val ues +or ough +ĠLoc al +ĠAmeric ans +_t est +Ġserv ing +Pl ayer +* d +åIJ¸ æĶ¶ +ï¼Į åIJij +m o +V D +Ġm and +åIJĮ æĦı +ï¼Į æ¯Ķå¦Ĥ +Ġout comes +* s +Ġinstall ation +åĢº åΏ +Ġant ib +r ary +ĠC ost +ä½ĵ çݰ +> & +Ġeduc ational +â̦â̦ âĢĿ +Ġfant astic +Ġentire ly +æ· ĺ +Ġpand emic +ult y +Ġspe aking +Ġr an +Â Ń +Ġm ath +- k +Ġcris is +Ġcru cial +ĠC up +Ġn odes +æĮĩ æĮ¥ +åIJİ éĿ¢ +ï¼Į æľĢåIJİ +raph ics +r b +Ġhistor ical +Ġ 第äºĮ +* p +çļĦ 主è¦ģ +r un +ush ing +æģ ° +Ġblock s +å¹¶ ä¸Ķ +typ es +u ction +ä¹Ł 许 +Ġsit uations +Ġposs ibility +En ter +Ġf ruit +) ). +ĠA L +大 人 +ĠEx ecut +ĠS er +or se +ĠS H +ä¹Ł è¦ģ +Ġcalcul ated +æ£ Ĵ +å¾Īå¤ļ 人 +äºĨ 个 +Ġus age +im ize +d ouble +Ġadminist r +ï¼Į åħĪ +- like +Ġun known +ĠR ight +ial s +çĩ ¥ +" ; +By tes +Ġc ere +äºĭ æķħ +og a +ome ga +Ġev olution +. new +æĭ Į +Ġfore st +æŃ» äºĨ +ĠB ob +åħ³ èģĶ +n ormal +ĠL ibrary +ĠCon nect +ä¸į åĩº +. on +, åı¯ +ĠJ un +Ġep isode +Ġc aught +_ log +Ġfab ric +Ġc abin +åł ¡ +顺 åĪ© +æ¡ ij +og rap +ell ant +.S et +Ġfl ight +th let +ap pe +ĠInst ead +æ¼Ķ åijĺ +ĠN eed +": " +Ġexpl icit +C ell +ĠOb ama +ç͵ åİĭ +* m +ç»Ļ ä»ĸ +å¾· åĽ½ +çī© æµģ +æIJŃ éħį +ĠC R +/ S +à ¨ +çĽij 管 +erv es +ĠInst agram +ï¼Įåħ¶ çī¹å¾ģåľ¨äºİ +ĠI r +çļĦ åIJĮæĹ¶ +ĠA I +up le +Ġpol l +Ġconf irmed +æĮĩ åĩº +åĤ ¬ +æĭ ¨ +纷 纷 +说 å®Į +igh ter +ãĢĤ ãĢĤ +W S +< div +ig r +æķ° åѦ +æµĻ æ±Ł +B UG +è¿ĺ ä¸į +ä¿ © +èŀį èµĦ +ĠF ederal +ä¸Ģ æĿ¡ +å¼ķ 导 +Ġcr ime +pl an +C ore +æĿ Ń +Ġdes ired +t ml +Su pport +Ġgr id +çIJĨ æĥ³ +åį³ ä½¿ +Ġs ad +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠ +è° ¨ +å« Į +du ction +Ġ ip +æ¼Ĥ 亮 +ĠW ay +è¦ģ æĺ¯ +ul ous +J ohn +æĮĩ æķ° +åŁ ĭ +è¦Ĩ çĽĸ +Le ave +大 å°ı +m ega +abet es +A v +åĢĴ æĺ¯ +. out +******************************** ******************************** +ĠF ollow +ĠB ur +åĪĨ åŃIJ +Ġpay ments +ok es +ĠB ox +ï¼Į åIJĦ +( X +大 åĪ© +å± Ī +æ°Ķ çļĦ +Ġcrim inal +女 åŃIJ +ä» ĩ +fo ot +Ġlink ed +æĢĿ ç»´ +Ġsk ill +è¡¥ åħħ +éĢļ ä¿¡ +çī § +Ġthe rm +æĸ ¤ +k ins +Ġoccur red +] : +angu ages +_ AD +Y S +Ġdec ades +/ l +Ġch annels +Ġfor ced +ul f +Ġknow ing +ĠVal ley +p ol +ĠS ource +å¿ Į +åij³ éģĵ +Id ent +ĠS QL +Ġfor th +ĠPH P +Ġ$ {\ +Ġag ents +B uild +æħ ° +Ġplan e +Ġam b +ĠL im +ĠS pe +èĽĭ çϽ +ere n +åĽ¾ åĥı +av as +clos ure +Ñ Į +AS K +Ġris ks +u ous +ĠLou is +comm on +åĩºçīĪ ç¤¾ +å¥ ´ +T ags +ĠAn other +Ġtrou ble +Ġr ough +èī ³ +Ġc ache +F igure +ocol ate +Ġex port +Ġp ump +Ġlo vely +åĩł å¹´ +( void +T wo +åħ¬ åĽŃ +ur se +ĠRuss ia +_b ack +èĬ Ĵ += ( +æľĢ 好çļĦ +Ġadv is +Ġliter ature +æ¶ Į +n ap +Ġinv al +lo or +OM P +ir ms +Ġper man +Ġpur su +ist or +Ġmass ive +t arget +Ġdesign s +N N +ĠC S +F P +Ġgen es +Ġp adding +Ġsh ipping +è´¡ çĮ® +Ġindust rial +Ġclos er +, 她 +Ï ħ +. Is +B it +大 éĥ¨åĪĨ +umm y +æīĭ 段 +Ġcontinu ous +b rid +Ġprinc iples +广 å·ŀ +Ġjud ge +转 æį¢ +ist ical +Ġser ial +ï¼Į ä¸ī +æŁ¥ 询 +å¹³ è¡¡ +ĉĉĉĉ ĉĉĉ +Ġass ign +åī ª +Ġf ell +å¨ ĩ +* b +Ġt ut +Ġmonth ly +æİ¥ æĶ¶ +ĠKe ep +è£ħ 饰 +çĵ · +Exp ression +åķĨ åĬ¡ +æİĴ åIJį +ä¸Ģ ç¬ij +Ġax is +{ " +ĠRuss ian +F ound +a fter +ãĢĤ ãĢĬ +ĠWhe ther +( L +ge bra +/ ( +.f ind +ç§į æ¤į +ĠB efore +ç¢ Ĺ +æĹ© å°± +ĠOr der +Ġs ister +æľ¬ æĿ¥ +Ġco vers +ĠH y +Ġsub t +G A +Ġal cohol +Im pl +å®ŀéĻħ ä¸Ĭ +Ġfav our +CT ION +By te +ap h +Ġcontrol s +ĠIns urance +ĠSh ould +Ġbeg ins +ĠW il +ĠW al +Ġt mp +Ġmain ly +Ġaccur acy +è¿ĺ èĥ½ +å·¥ä½ľ çļĦ +ĠJ o +Ġan s +M ain +Ġcontain ed +åĮ» åѦ +Ch annel +åIJ ķ +满 æĦı +Ġj ur +G en +Ġd raft +t au +Ġm el +Ġjust ice +ĠS C +æľĢ æĸ° +ĠO ption +åįļ 士 +Ġexpert ise +Ġdep art +Ġz one +Ġpercent age +t ag +ch ain +_N AME +I E +b urg +åį § +强 è°ĥ +D ownload +èģ ª +æ¸ ¡ +åľ° 说 +c uss +å°± ä¸ļ +Ġcharg ed +Ġprof it +å¤ĸ éĿ¢ +on a +sh ould +Ġann ounce +æĹĭ 转 +FA ULT +ï¼Į æĤ¨ +Ġfac ilit +ĠT own +Ġher self +) arg +èᝠçī© +Ġis land +Ġneg ot +é¥ ± +éĹ · +Ġsol ar +Ġgu ard +play er +åħģ 许 +M ake +v a +ä¸į åı¯èĥ½ +Ġcol our +åŃ Ŀ +ĠMay be +ict or +æĪ¿ åŃIJ +n el +. \ +. sp +ãĤ ĭ +å¹² åĩĢ +ip eline +æıIJ 示 +s he +ï¼Į æķħ +Ġdest ination +id ers +ĠS up +ĠD O +Ġv oc +G oogle +* c +Ġ äºĮ +çļĦ ä½ľç͍ +,èĢĮ ä¸Ķ +i pt +ĠB lock +Ġrecogn ized +ãĢĤ ï¼Ī +Ġcont ents +Ġt ough +Al so +âĶ Ģ +Î » +Ġc ategories +* i +Pro perties +Ġindic ates +Ġ å¦Ĥæŀľ +Ġn urs +åİ» å¹´ +ï¼Į èĥ½å¤Ł +Ġp in +å½ ¹ +åĵ¥ åĵ¥ +}$ , +Ġcriter ia +举 åĬŀ +第 ä¸Ģ个 +ĠD C +说 è¿ĩ +se cond +S ession +ul ner +_l en +Ġreli ef +t emplate +Ġest imate +æĺ¯ è°ģ +On ly +Ġeval uate +åı ł +Ġinter act +l ers +Ġhouse hold +R ect +* f +Ġ ä½Ĩ +Ġanaly ze +æĪIJ éĥ½ +è¾ ¨ +* y +ro g +Ġmat ches +æµ ´ +Ġhundred s +çĤ¹ çļĦ +raz y +èµ¶ ç´§ +Ġimag ine +r ance +m ond +æ³ Ħ +Ġe lev +ï¼Į å¼ł +à ¦ +Ġto m +ĠF re +Ġindic ated +åı¬ å¼Ģ +ĠMore over +Ġpers ons +Ġas ync +æIJľ ç´¢ +à ¢ +Ġse ed +èµ° åΰ +. k +è¿Ļ ä½į +( z +us es +ç»ı åħ¸ +Ġsc an +Ġcomm ission +Ġsu c +A ST +à £ +çī© çIJĨ +æĦ ¤ +Ġvers ions +çļĦ 第ä¸Ģ +Ġform ation +- W +Ġbring ing +å¼Ģ åħ³ +_ res +put e +.t xt +çİ°åľ¨ çļĦ +å°½ éĩı +Ġâ ī +ĠÏ Ħ +Ġappro aches +ãģ Ĺ +pen ame +çĭ¬ çī¹ +æĢ» ç»ĵ +è± « +ĠCont act +Ġcreat es +åįĥ ä¸ĩ +ä¼ļ åľ¨ +Ġc rack +B ig +âĢĶâĢĶ âĢĶâĢĶ +IC ENSE +d ict +ĠS E +Ġex ceed +ĠSt at +ä¸ĵ éŨ +sw er +Ġwe alth +Ġexist ence +Ġrefer red +ĠE R +Ob j +. V +å¼Ģ å¿ĥ +ac le +Ut il +* r +ä¾ į +产åĵģ çļĦ +Ġocc urs +æIJ ħ +Ġl ux +ĠD u +ç½Ĺ æĸ¯ +Ġcandid ates +è¿Ľ æĿ¥ +Ġj oy +Ġbr ands +Ġf ly +P ay +çł Ĥ +ĠJohn son +ĠHot el +å°º 寸 +ĠMin ister +åĭ ¾ +Ġgrant ed +Acc ount +ĠA ward +ç»ĵ å©ļ +f ont +ä»Ķ ç»Ĩ +Ġcommit ment +Ġoccas ion +èĸ ª +riend ly +饮 é£Ł +Ed it +N e +é¢Ħ 计 +| | +çĺ ¦ +w ater +Ġinf ection +é«ĺ 级 +Ġre new +ç³»ç»Ł çļĦ +requ ire +Ġre ct +Ġdanger ous +izont al +iff erent +Ġw el +çŃī å¾ħ +iss ions +Ġout door +ãĢĤ 对 +åĴ ĸ +'] [' +od d +æķ´ çIJĨ +è¿Ľ æŃ¥ +Ġst able +åĩº çİ°åľ¨ +åħ¬ 主 +Ġinf rastructure +Ġh ook +羣æŃ£ çļĦ +/ lic +T witter +ä¼ł æĦŁ +R un +Ġmechan ism +Ġexpl an +ï¼Į å¿ĥ +ul ing +åį ¢ +çīĪ æľ¬ +ert s +å°± åĥı +Ġd ialog +ĠCom ment +åħ¬ ä¼Ĺ +) ] +r int +Ġneighbor hood +çļĦ æ°´ +çIJ ³ +Ġchalleng ing +åķ ¥ +æ³ķ åĽ½ +* a +ï¼Į æıIJé«ĺ +Ġ .. +Ġsw im +ĠFin ally +Ġde als +p ass +åŀ « +é»ij èī² +ï¼Į éģĵ +èĤ ļ +Ġimp ossible +åĪ ł +ĠPr int +æµ Ĩ +Ġg ro +U ID +åľ° ä½į +Ġun able +Ġlim its +Col lection +raint s +Ġat mosp +ĠJ im +aw ay +ä¼Ļ ä¼´ +å±ŀ æĢ§ +j i +H igh +Ġnav igation +ĠRich ard +ĠL oad +ï¼Į è°ģ +æĻ ° +add r +H ave +Ġen abled +Ġre put +I RE +红 èī² +è¿ Ī +Ġtiss ue +Ġconcent ration +Rec ord +Ñ ĩ +Ġlaun ched +å¼Ģ åIJ¯ +ä¹Ł å¾Ī +Ġli ber +ĠEn vironment +D el +str ong +contain er +Rec ent +Ġmus cle +ï¼Į éĥ½æĺ¯ +åİ Į +osp it +åĵ ¼ +ult ural +大 äºĨ +Ġpl ug +Ġel ig +tern oon +* g +Ġc ro +人 å®¶ +Ġmonitor ing +æŀľ çĦ¶ +Ġguarant ee +æĶ¶ éĽĨ +æĦı å¤ĸ +amb ling +a is +åĨł åĨĽ +* v +. âĢĻ +使 å¾Ĺ +è´Łè´£ 人 +Ġw arn +Ġ æĹ¥ +ĠL os +Ġd uty +ĠN or +é¢Ħ æµĭ +_ en +. z +Ġpresent ation +iz z +od ule +os ph +çĹĽ èĭ¦ +çł ĸ +yt es +R T +_ url +å·¨ 大çļĦ +Ġp ed +Ġh un +ĠJ ose +Ġmed icine +天 æ°Ķ +Ġpr ison +Ġarriv ed +eng er +èĦ Ĩ +Ġn ine +um es +ed it +Ġb es +Ġrespons es +Ġconf lict +{ d +.s ub +r ition +IG N +åħ¶ å®ĥ +Ġv en +Ġhe ro +Ġs izes +Ġen v +主 æĮģ +que ue +Ġresearc hers +rodu ction +a ver +æ¡ Ĥ +G ood +Ġb an +Ġmanufact uring +def ined +ĠTh rough +å¾ĭ å¸Ī +Ġco ast +Ġcalcul ate +ï¼Į çİĭ +Ġcons c +Ġsupp lement +çħ ® +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠ +Ġsu p +Ġne ck +ĠE p +our se +ĠâĢ ¦ +Ġord ered +j u +ãĤ Ĵ +it z +pro ject +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠ +ãģ Ł +avas cript +* h +Ġsp ons +Ġph arm +çľĭ èµ·æĿ¥ +Ġout come +Ġrequest ed +* k +.c ore +人 士 +in em +n umber +设置 æľī +åıij éĢģ +æħ Į +Ġdet ection +_ array +ĠJapan ese +reg ion +Ġvis ible +qu ir +çģ Į +æĪIJ çĨŁ +ĠP ass +* w +ï¼Į 缴æİ¥ +* z +Ġlight s +H ost +éħ · +B ody +ad min +à ¥ +å¿Ĺ æĦ¿ +æĴ Ĵ +è´¢ æĶ¿ +Ġin n +Ġso il +ip her +_ class +Ġnames pace +n ic +ĠC y +åIJĮ ä¸Ģ +Ġbrow n +Ġfood s +Ġexecut ive +. res +Ġinform ed +Ġb acter +åħ¬ è·¯ +管 éģĵ +? : +}} \ +Ġte a +åŃ©åŃIJ çļĦ +Ġparticip ate +Ġen force +Ġcent re +IM E +Ġtell ing +ç͵ æºIJ +åIJĦ ç±» +è¯ķ éªĮ +举 è¡Į +ĠCont ent +N C +å®¶ æĹı +Mat rix +Ġachie ved +åıijè¡Į 人 +* l +Ġreal ized +Ġreport ing +_t ext +* q +èĤ ¾ +w as +Ġ{ } +Ġreg ulations +å¥ĩ æĢª +Ġcont ribute +_ read +Ġinvol ves +çħ ³ +re ens +Ch anged +е н +åĨľ æ°ij +% çļĦ +Ar ch +l ass +* u +Ġdri vers +Ġb ike +Ref erence +AL SE +举 æĸ¹ +Ġnecess arily +Ġo x +, - +ï¼Į æł¹æį® +äºĨ åĩł +Ġatt ributes +åı¯ä»¥ åľ¨ +缴 æĴŃ +åĶ ¤ +ĠM useum +( u +.... .. +ĠAs ia +Ġd ict +ç®Ģ 缴 +æĺ¯ æĮĩ +It ems +ï¼Į åĬł +Ġwe aring +åĽĽ å·Ŀ +çļĦ åŁºç¡Ģ +éĺ ģ +//////////////// //////////////// +r ig +ĠS ports +纤 ç»´ +Ġme at +Ġstud ied +Ġcap able +Ġch art +ĠS ar +Ġmod ified +Ġinvest ors +Ġinsp ired +* j +ï¼Į 为äºĨ +ub y +Ġext ensive +Ġimplement ed +Ġcorrect ly +ï¼Į æīĭ +Ġinstitut ions +um ps +ĠS ite +æĬ ¼ +Ġb inding +ĠO p +å» ī +Ġrepe ated +ãĢĤ èϽçĦ¶ +Ġman ual +Ġcompet itive +u ilt +el ly +ic an +ĠL ive +ion e +Ġ\ " +æĬĢ å·§ +ä¹Ł åľ¨ +_P RO +Ġcont rolled +ä¸ « +âij ł +åı ī +_C H +Ġcl ar +çĤ¹ äºĨ +l ife +å²Ĺ ä½į +ä¸Ģ å®¶ +åįģ åĽĽ +大 å¤ļæķ° +æ¬ º +ĠÐ ² +éĶ IJ +æĴ ¤ +Ġalleg ed +ĠConf erence +I ter +ĠAnal ysis +主 ä½ĵ +Ġb atch +Ġnot iced +ç¬ij 容 +Ġres id +Ġch ief +Wh o +ĠB O +Ġplan et +st ood +(f unction +Ġorgan ic +é¢ ĩ +ĠTra vel +Ġb its +ang o +Ġe c +and on +( F +hel p +Ġinv ent +Ġcrow d +Ġbl ess +W idget +äºĭ åĬ¡ +Ġp apers +Ġcomp at +et ry +ĠL ee +V ol +Act ivity +and roid +Ġj ury +å°Ŀ è¯ķ +æĪ¿ å±ĭ +åIJĪ å¹¶ +app oint +ĠR un +Ġs oul +è¡ ° +G S +å¿į ä¸įä½ı +âĢľ The +Ġacc ord +c ar +ä¸į ä¸Ĭ +åł ª +ĠCent re +G r +( true +ĠAr ts +ãĢĤ è¿ĻäºĽ +ó n +åĪ© äºİ +ire ctions +Ġhappen ing +_m ap +Ġm ur +Ġor ient +æĪij æĺ¯ +ĠPl us +ĠW in +Ġregist ration +åĨ » +p ub +èŀį åIJĪ +ĠU I +ĠF L +Ġar bit +Ġre ward +ĠT our +T emplate +Ġfor t +Ġperfect ly +å·¥ èµĦ +æī ® +Ġpurch ased +大 æ¦Ĥ +ĠSystem s +è£ ¤ +ï¼Į åĽł +ï¼Į åīį +ĠI L +åįģ åħ« +Ġpro port +Ġme al +ãĢĤ å¦Ĥ +æī ĩ +ĠM en +æİ © +çIJĨ çͱ +毫 æĹł +u an +ĠT ax +çļĦ 身 +ĠB oston +ĠPl ace +æ³ķ è§Ħ +ic ing +Ġl ib +Ġunc ertain +J oin +çĶŁ éķ¿ +ur able +t ab +Ġwarrant y +线 ä¸Ĭ +Ġassoci ation +æĭħ ä»» +He alth +åĬ Ŀ +ï¼Į æĸ° +Ġoffic ers +V M +Ġcare fully +il ton +ï¼Į æ¯ķ竣 +ateg y +oper ator +åIJĥ é¥Ń +_ EX +* o +_ user +Ġcon clusion +æİ¢ ç´¢ +ĠC ross +ĠReview s +Ġder ived +Ġgrow n +Ġhard ware +ĠC irc +L eft +Ġup load +cent er +Ġcert ificate +Ġregular ly +æĺ Ĩ +Ġkeep s +Ġform al +æµ ij +èĭ ¹ +Ġdes c +Ġhum ans +å© ´ +Ġrepe at +t ons +Ġsuc ceed +éĩĩ è´Ń +çļĦ æĬĢæľ¯ +Ġs ick +Ġco e +çĮ İ +åıĮ æīĭ +Ġmom ents +Ġguid ance +Ġadv oc +æŁ¥ çľĭ +ĠW H +ï¼Į åºĶ +( ! +is a +-m ail +Ġde posit +åĽ Ĭ +æħ Ī +åijĪ çݰ +ow nt +est yle +åįģ å¹´ +.A pp +èº º +[ j +ĠG ive +们 çļĦ +ä¹Ł ä¸įä¼ļ +{ R +Ġfig ures +p ons +s im +Ġcl uster +om es +Ġj ack +äºĨ åIJ§ +è¡Ĺ éģĵ +æ£ ī +ĠTh ose +- off +Ġcapt ure +ab b +Ġbelie ved +çŃī 级 +ĠC urrent +åıij çļĦ +Ed itor +Ġrepresent ed +Ġt aught +oper ation +åIJĦ 项 +ĠArt icle +pon ents +f riend +Ġp or +æ²³ åįĹ +é¢Ħ éĺ² +ĠI F +Ġt ast +atur ally +Ġ\ \ +Ġ ä¸įè¿ĩ +åħ¬ å¸ĥ +äºĨ ä¸ĭæĿ¥ +Ġpart ial +Ġk m +ĠL i +éĺ Ķ +ï¼Įä¸į è¦ģ +åħļ åijĺ +ĠF ROM +Ġbr anch +ĠCh ildren +Ġpark ing +Ġafford able +Ġcut ting +ĠE ss +_ X +low er +Ġprogram ming +Ġconsequ ences +is p +vent ional +ĠP op +Ġcons ists +Ġsubject s +othes is +éĢ» è¾ij +èij ¡ +ĠCh ief +ãģ ¦ +æĹ¶ å°ļ +ĠApp lication +ut ter +Ġ å¼ł +Ġviol ence +ĠUS B +å¾ ½ +Ġrecogn ize +-s ize +ser ver +Ġg ender +å² ³ +äºĨä¸Ģ äºĽ +e en +Ġshe ll +æĹ º +Ġinst ruction +ĠE mail +æī¿ 诺 +Ġar ms +ĠWARRANT IES +equ als +i ary +ĠJ ones +Ġfl ags +Ġd ance +Ġtest im +Sub scribe +P RO +强 度 +Ġinnov ative +è´ ¾ +ĠW atch +/ C +s d +éĥ½ 没 +Ġconstant ly +Ġwhe el +Ġch apter +oc ial +Ġs ections +( string +c ard +fig ure +se m +ï¼Į 转 +D ocument +åı¦ ä¸Ģ个 +Ġguid elines +Ġcrit ic +ä¸Ģ ä½į +ĠH en +ï¼Į å¸ĮæľĽ +大 åŀĭ +Ġown ed +æģ ¼ +Ġpass ing +ãĢĤ å®ĥ +G reat +Ġprem ium +Ġexp ansion +ĠW rite +æ±Ł èĭı +rit ten +Ġfil ms +Ġsh oes +çĥ ¯ +å¯Ĩ å°ģ +ï¼ī çļĦ +çķĻ ä¸ĭ +Ġbuild ings +èĢĮ ä¸Ķ +_T R +Ġatt ached +ĠH aw +æĪIJ å°± +Ġb read +Ġbo ys +ãĢĤæŃ¤ å¤ĸ +Ġre plic +Ġexpect ations +å®ĺ æĸ¹ +ĠSt ore +c ap +ĠÎ ¼ +Ġdim ension +c raft +F ull +ä»İ æĿ¥ +åİŁ çIJĨ +Ġshe et +åģľ æŃ¢ +æ¯ı 次 +Ġf o +ï¼Į éϤäºĨ +Ġfund amental +we ek +ach ment +ĠMart in +Ġsex ual +å¢ ĵ +æĦıåij³ çĿĢ +æĭį æijĦ +Ġcook ing +å¼ ¥ +Ġslow ly +f c +çŁ Ľ +åı¯èĥ½ æĺ¯ +Ġs essions +çĸ ² +æİ¥ ä¸ĭæĿ¥ +æµ ¸ +太 å¤ļ +ar ation +é ³ +or al +ul s +æµĭ éĩı +Ġf old +onym ous +ĠÐ ½ +访 éĹ® +ãĢĭ ï¼Ī +Ġch icken +P N +çϾ å§ĵ +ĠS elf +å°ı åĮº +Ġdecre ase +at re +éĿ¢ 临 +Ġm ock +Ġinteg rated +Ñģ ÑĤ +av ed +Ġinstr ument +åºĶ该 æĺ¯ +AR D +ä¸Ģ éĺµ +Ġte ch +fol io +Ġg lo +En abled +è¾ ½ +Ġun iform +it led +! âĢĿ +ex tern +ĠP ART +_ERR OR +ãĢĤ åı¯ +Ġdig it +å¤Ħ çļĦ +Ġd ump +ä¸Ģ åIJį +Ġdo ors +åł µ +ĠB ible +ur i +_st art +ãĢĤ è¿Ļ个 +r ase +M ark +ĠGe org +Ġconsider ation +ĠTra ining +Ġn om +ath y +ä¸Ń åѦ +RE S +æĹł æķ° +éĥ¨ éĺŁ +ĠS ize +As ync +st ore +çĥĪ çļĦ +ian a +s in +åº ŀ +. a +æĽ´ 好çļĦ +l ate +Ġte eth +.print ln +æīĢè¿° çļĦ +y es +æ¸ħ æĻ° +Ġcou pl +as ic +Ġ river +N ames +èĩª 主 +cer ning +çĭ ± +Ġg al +æŃ¦ åύ +} (\ +_n um +ĠCan adian +示 æĦı +ä¼ģä¸ļ çļĦ +ĠA rab +ĠP ath +Ġro les +Ġl unch +çł´ åĿı +Ġsurround ing +çĽ Ĩ +ï¼Ł ä½ł +åĪĻ æĺ¯ +Ġrout ine +Oper ation +室 åĨħ +çŃ ĭ +ç»Ĩ èĬĤ +ĠAngel es +Ġy ellow +w est +A pi +at i +- E +ï¼Į æ¯Ķ +ĠEngine ering +èĨ Ģ +ĠMex ico +ĠG ar +æ± ģ +Ġwall s +ĠComm on +ĠCol l +k es +ĠE st +ä¸į 管 +æŃ¦ æ±ī +åĮº åĪ« +ãĢĤ çİ°åľ¨ +Ġ/ > +Ġnorm ally +éĩį åºĨ +ĠL ess +强 åĮĸ +Ġnuc lear +ĠP rivacy +æ²» çIJĨ +. text +ĠP S +Ġl atter +Ġgu est +è¢ ĸ +it is +Ġact s +å±ķ å¼Ģ +Ġoper ate +ĠP DF +Ġmiss ed +æĺİ å¤© +å« ģ +ĠFin ancial +erm an +. In +Ġpotential ly +Ġp it +Ġinteg ration +w ing +æİ¥ è¿ij +Ġind epend +Dis play +Ġaccess ible +对 åºĶçļĦ +æĪIJ çļĦ +ield s +ĠH ard +çļĦä¸Ģ äºĽ +Ġexperim ents +ĠD eb +åĺ ¿ +Ġwid ely +ist ed +o op +ĠF unction +# ' +ĠSc ot +Ġprior ity +Ġaf ternoon +ĠC lean +åįĹ äº¬ +ategor ized +Ġfac ing +Ġ `` +Se cond +天 çĦ¶ +æ· ĭ +Ġpl anned +Ġa st +Ġrepl aced +c or +ä¸Ģ çīĩ +ç»Ļ äºĪ +h av +enc ing +è´ º +éĥ½ åı¯ä»¥ +ick et +Ġelect ron +Ġequ ations +è´ ¼ +C lose +Ġf ro +Ġun ion +ĠC BD +Ġf ib +An al +Ġbath room +ä» ² +Ġcontinu ing +ãĢĤ æį® +_ per +it ar +; &# +Ġcur ve +Ġcirc uit +èĦ ¾ +Ġ" , +Ġattack s +æŁIJ äºĽ +Ġv eter +ĠW orks +il arly +æľ¬ 人 +Ġdis miss +ce ive +]{ } +è¯ ± +Ġi Phone +æĺ ı +ä¸Ĭ åįĩ +åİī 害 +x i +Ġsubstant ial +Ġdi verse +Ġ å½ĵ +ĠG ames +èĭ¹ æŀľ +ĠO il +ap se +åIJ ŀ +ĠC ir +ĠDef ault +Ġclos ely +éĨ ĩ +ä¾Ŀ æ³ķ +ç© Ĩ +or gan +Ġcon crete +è° IJ +ĠL ow +_ UN +Ġel if +Ù ħ +ru g +åĨ³ çŃĸ +çľ ¸ +æĥħ æĦŁ +æ¸ł éģĵ +b rief +/lic enses +P arser +V er +ĠP enn +Ġt ou +Ġab use +æľī å¤ļ +Ġ æĺ¯ +Ġinnov ation +Ġ( [ +ç©¶ 竣 +ĠS erver +at he +ç¥ Ń +clud es +. __ +Ġte levision +ĠFurther more +Ġpen al +è°¢ è°¢ +ã o +LO CK +ious ly +çŀ § +I AL +hold er +Ġconven ient +ĠW alk +}$ . +Ġobject ive +hold ers +Ġadd s +æĤ Ķ +èĢ ¶ +ĠYou ng +ax is +éĥ¨ ä»¶ +-> _ +è´§ å¸ģ +å¨ ĥ +Ġbo at +_ of +T rack +ac ific +ad v +ä¸Ĭ åįĪ +Ġult imately +F loat +临 åºĬ +Ġfeel ings +g b +æij Ĭ +i ac +ĠB E +ore t +æĻ ĥ +Ġhor se +Ġfound ation +: \ +f oo +ĠV ector +, å°Ĩ +Ġprom ise +Ġsc al +V ideo +as hed +Ġinc ident +ĠC ast +ãĢĤ 对äºİ +èī ° +Ġconcept s +ĠPar is +è§£ æŀIJ +ĠAre a +Ġn a +æĪ¿ åľ°äº§ +r ink +ter y +const ruct +Pl ay +Ġs au +st al +( G +t ed +ï¼Į äºİæĺ¯ +Ġdise ases +\ _ +Ġj e +{ } +Ġconnect ions +Ġinst ruct +ä¸Ģ ä½ĵ +ç͍ æĿ¥ +Ġsecond ary +.d e +Ġad j +Ġinter ior +Ġtrans formation +ĠCh ris +ï¼Į éķ¿ +' | +æĪij 说 +å²ģ çļĦ +æķ ² +Ġposs ess +_ con +( err +Ġfol ks +åij¨ æľŁ +.n ext +F E +Ġopp osite +ĠB al +b ur +w s +re nt +æ¯ı ä¸Ģ个 +Ġdel icious +é¼ĵ åĬ± +Ġrequire ment +/ M +S im +å·® å¼Ĥ +Ġl ift +Ġt ub +l ot +ãģ ª +Ġ\ < +sh ot +ĠT ry +ĠM D +est ions +论 æĸĩ +ĠVirgin ia +ç» µ +Ġne ither +æĪIJ åĪĨ +_n ode +åĩºçݰ äºĨ +Ġwor st +al igned +ĠS uch +Ġsc ores +W riter +Ġth in +äºĨ ä»ĸ +ĠRes ources +Ġamount s +ĠC ase +âĢľ ä½ł +Hel p +å¥ ij +Ġab stract +S QL +Ġm ouse +èĩ³ ä»Ĭ +Ġen roll +Ex ecut +W ill +Wh ich +åѦ æľ¯ +re ement +çĬ¯ 罪 +ĠP ersonal +éĥ ¡ +Ð ¼ +B B +re land +_b y +èĢĥ çĶŁ +Ġthem es +ç¼ ¸ +éĩį çļĦ +éĹ®é¢ĺ çļĦ +人 ä½ĵ +ä¸İ æīĢè¿° +ĠM ult +) + +ãĢĤ çͱäºİ +æľī å¾Īå¤ļ +Ġbe ar +St orage +Ġper mit +f rame +ï¼Į ä¾ĭå¦Ĥ +Ġsens itive +Ġv irt +od s +t age +Ġaware ness +Ġturn ing +pl ore +Ġreg ional +Ġdis h +AP P +åľ° çIJĥ +Ġworld wide +产çĶŁ çļĦ +Ġon going +og ether +ç±» çļĦ +Ġhand ling +ä¿® å¤į +害 æĢķ +Ġv ulner +Ġsurpr ise +h us +æľĽ çĿĢ +âij ¡ +è¾ ĸ +d et +è¯Ħ 论 +æĢ§ æł¼ +ep th +ĠYou Tube +åĬ¨ çļĦ +åĩ ¸ +ĠAdd itionally +æ¶Ī éĺ² +ĠL earning +r as +ĠM a +Ġeng age +T L +ĠR oom +Ġag encies +å·¥ä½ľ 人åijĺ +ĠWord Press +梦 æĥ³ +AV E +Ġcl othes +ĠL td +Ġs ys +Ġne uro +un ctions +ĠH ospital +Ġbed room +åıį æĺł +èģĶ çĽŁ +Ġmet ab +Ġr ig +che dule +ĠLe vel +cl ip +ential s +i ot +çϽ èī² +Ġcommon ly +ĠR o +äºĨ åIJĹ +st one +åįı è°ĥ +çļĦ åĨħ容 +Ġm ice +Ġvisit ing +æĪª èĩ³ +it ivity +Ġunder lying +W orld +ĠCal culate +èĩª è¡Į +éŃ ħ +ĠP o +Ġdeal ing +AN G +acc ount +èIJ Ħ +é¢ ¤ +k m +带 æĿ¥çļĦ +Ġp il +ĠJ oe +æ¸ħ æ´ģ +ãĢĤ çĦ¶èĢĮ +let on +Ġcr usher +l ay +Ġtrend s +çĶŁæ´» ä¸Ń +ç»´ æĮģ +A IL +Ġv ast +Ġb odies +æ¶ Ľ +Ġinter actions +âĢľ We +Ġstand s +æ£ ĭ +ä¹Ł èĥ½ +C F +ĠM ike +Ġfair ly +Ġw arning +ie ce +转 ç§» +ab il +æ² ĥ +èĩ Ń +Ġlist s +åİ ¦ +V I +Ġre cept +Ġr ating +Ġex clusive +æ¸ħ æ´Ĺ +Ġincred ible +ĠW ild +ç®Ģåįķ çļĦ +ä¼¼ çļĦ +ä¸Ģæł· çļĦ +æľī 个 +B log +S M +头 çļĦ +ãĢģ ä¸Ń +UL AR +表 æĺİ +éĶ » +åįĬ å¹´ +å¾Ī 大çļĦ +é¢ Ī +æĢİä¹Ī åĬŀ +Ġanx iety +rib le +Ġsk y +' ], +ï¼Įä¸Ģ èά +Ġn arrow +Ġwhere as +b reak +T otal +è¿ĺ ä¼ļ +c f +æģIJ æĢķ +ĠC ur +Ġl osing +Ġmov es +ĠP ot +ĠB ow +. con +ĠP ut +U ST +but ton +Ġlik ed +ĠR ights +Ġimpro ving +} = +Ġrem ark +ess ages +Ġyes terday +, y +ĠSup reme +ĠPro perty +as tern +Pre vious +m ic +Ġl ip +使ç͍ çļĦ +RE SS +ĠStud ents +Ġlist ening +ĠAcad emy +Ġoption al +Ġv ital +Ġ âĢĿ +é¦ ¨ +D ialog +as ters +ĠG D +_ OP +Ġg rew +Ġinval id +çŀ ª +Ġarr ang +èij¡ èIJĦ +æľī 没æľī +Ġquant um +ãģ ¯ +ĠJe ff +P ublic +æī© 大 +èĥ ģ +P arent +è¶³ çIJĥ +n n +ĠInd ex +T V +åįģ åħŃ +Ġserv es +Ġcont roller +ial ize +Ġh urt +ack s +ol ving +ç¢ ij +Ġ! == +Ġt ip +ol ver +Ġl augh +Ġconsum ption +Ġh ate +çĽij æµĭ +n ab +ç²¾ 彩 +åķ ¡ +ĠCarol ina +L a +ĠU k +ä½Ļ é¢Ŀ +Ġdemonstr ated +Ġqu ote +å°ij 女 +Ġcirc le +ĠJ SON +Ġintrodu ction +ĠM ur +çĿĢ çļĦ +ï¼ģ æĪij +æıIJä¾Ľ äºĨ +par se +Ġtax es +id x +Ġprim arily +æĶ¹ éĢł +ra id +t mp +ãĢĭ ãĢĬ +æĢ ľ +æĹģ è¾¹ +% . +åĿ ij +çľĭ åΰäºĨ +Ð ´ +_f or +Ġe ase +ä¸ Ļ +Ġth reshold +认 è¯ģ +éĢĤ å½ĵ +Ġs pl +æľĪ 份 +ut ch +u ce +Ġdirect ed +æ¯ ħ +Pr int +缸åħ³ çļĦ +pl us +ann a +Ġs ou += False +Ġt error +ä¼łæĦŁ åύ +Ġs ending +ĠSt ock +ĠAdd ress +( key +æ¤ Ĵ +as ant +çݯ èĬĤ +丰å¯Į çļĦ +{ align +if er +å°ı ç»Ħ +æł¡ åĽŃ +g ener +çº ½ +P op +Ġass igned +P anel +Ġbirth day +Ġimmed iate +it ative +ĠI ss +Ġredu cing +Ä ģ +èµ ł +ef ined +(f alse +.t ype +\ left +S erial +_t able +ï¼Į åĪ« +ãģ ¨ +åĨħ å¿ĥ +ul er +åĩº æīĭ +ĠStand ard +. Text +ãĢĤ è¦ģ +E rr +Ġl ayers +æ¡ ¶ +Ġintellig ence +Ġtra ined +Ġapply ing +Al though +ĠI de +r ick +æŁ ı +ĠW at +è¿ĺ åı¯ä»¥ +_l ength +Ġin line +Ġbas ically +Ġed it +( ctx +çļĦ ç¥ŀ +Ġpolit ics +说 ä»Ģä¹Ī +c ur +ç§ij çłĶ +Con f +L Y +ä¸į åıĬ +act iv +æĺ Ĥ +æĥ³ çĿĢ +ap ing +M sg +ĠM id +Î º +D own +ĠSt ory +åѦ ç§ij +d i +è¿ĩ 滤 +n am +Hel lo +lis her +ï¼Į æĿ¥ +C ategory +ãģ § +ĠIt alian +ict ed +Ġflow ers +æł¹æį® æĿĥåĪ©è¦ģæ±Ĥ +Ġbl ank +H as +Ġl ies +Ġin ches +. Error +ĠN ever +å¤ļ 次 +D IT +éĻ Į +il ly +Ġst yles +Ġperform ing +çĥŃ æĥħ +Ġsil ver +Ġs ight +Ġp p +B R +ï¼Į çľ¼ +end or +_s ub +两个 人 +, b +éľ² åĩº +des cription +ç¢ § +ä¼Ĺ å¤ļ +ç»ĥ ä¹ł +æŃ¤ 次 +éĤĢ è¯· +H ttp +æĺ¨ 天 +V C +è¾ħ åĬ© +ĠP aram +åĴ ³ +Rev iew +Ġstrong ly +lement ary +Ù Ĩ +æ¡Ī ä»¶ +r ange +O U +导 æ¼Ķ +Ġth orough +å¿ĥ æĥħ +ĠM essage +ĠM ic +森 æŀĹ +ĠS P +Ġ ± +let ion +Ġexp enses +are n +Ġ' \ +or ne +_ out +æ¸IJ æ¸IJ +AR Y +Ġcontribut ion +ä¼ļ åijĺ +ĠÃ Ĺ +ost ic +ãĢģ ä¸į +ĠAut o +Ġsustain able +Ġarchitect ure +Ġ ë +Ġcho osing +èļ Ģ +ay lor +Ġtrans mission +ï¼Į çľĭçĿĢ +it o +强 大çļĦ +ç¼ ł +ric es +Ġext end +W ARE +主 å¸Ń +eps ilon +ĠMed icine +un a +Ġbreak fast +Ġinvol ving +è¾ © +w d +em on +er red +çļĦ ä½įç½® +Ġ 第ä¸Ģ +Ġorigin ally +Ġhous es +çŃĶ åºĶ +Ġcount y +ĠL ab +审 计 +âĢ IJ +in f +ber ry +åĴĸ åķ¡ +RE F +åĸ Ĥ +F ont +æľ¬ 书 +typ edef +t k +Ġbut ter +ãģ Ļ +. To +ĠL I +Ġless ons +ĠInt eger +Ġl ie +s i +å°Ĩ ä¼ļ +Ġte en +il a +人 æķ° +le ases +åĪĨ 离 +Ġmagn etic +ĠC ath +Ġsm ile +Ġspirit ual +åħ³ å¿ĥ +col umn +ĠAg ain +us r +ĠP A +á ĥ +ĠSp irit +ĠF ed +è¯ģ 书 +带 åĬ¨ +éģĵ çIJĨ +reg ister +åΰ è¾¾ +éŁ µ +Ġill ness +z y +ĠL LC +ĠAustral ian +Ġmill ions +æĺ¯ è¦ģ +æı Ń +D D +èĪ Į +åľ° çĤ¹ +OD O +B L +æĸ © +Ġanal ys +M r +#if ndef +æĺİ çļĦ +çĸ Ĩ +Ġphot ograph +N ESS +ev al +ãĢĤèĢĮ ä¸Ķ +ç®Ĺ æ³ķ +Ġm i +ĠM iddle +Ġad m +å§ » +å±ı å¹ķ +ĠT ri +æĢĢ çĸij +Ġpet ition +åį° è±¡ +æĭ ¾ +Ġpres ents +> , +N O +in ator +ĠH TML +) -> +ä¹Ł å°±æĺ¯ +强 大 +åĬł çĥŃ +$ this +Ġregard less +Ġtal ent +Ġperson ally +AT US +an al +ol ly +表 æ¼Ķ +çĭ IJ +Ġt ill +_c ast +ĠWe bsite +R ad +LE D +宣 å¸ĥ +ĠSm all +Ġcollabor ation +ĠF our +Ġexperim ental +ï¼Į æīĵ +os c +åį³ å°Ĩ +ra pe +å¸ ķ +Ġign ore +Ġcheck ing +Ġprote ins +_RE G +èĥľ åĪ© +Ġb anks +Ġpe ak +ok ed +cl oud +ĠI ran +Ġh ole +ĠI II +Ġmin imal +ĠS at +éĵģ è·¯ +空 è°ĥ +Ġfind s +å¹³æĸ¹ ç±³ +Ġ urban +Ġdraw ing +ï¼Į åIJ¦åĪĻ +Ġqual ified +åº Ļ +Ġd ates +Ġ ä¸Ń + ® +edd ed +Ġprefer red +Ġn arr +M I +ĠO pt +ç³ ķ +ç½ IJ +ĠIs lam +Que ue +éĶ Ī +ĠCl oud +Ġver ify +- com +the re +ä¸įå¾Ĺ ä¸į +èĪĴ éĢĤ +o ices +on al +Q UE +èĶ ¬ +ĠS ep +ble m +身 åIJİ +åīį éĿ¢ +ch i +ĠSp ace +Ad vert +Ġcon cerning +T oday +ag ues +èĦij è¢ĭ +ĠSh are +Ġvill age +èģĶ èµĽ +Ġhab it +Ġstat istics +æķĮ 人 +æ¯ı å¹´ +Ġre verse +U nder +ĠS ales +çĽ¯ çĿĢ +æĮ £ +èĥ Ģ +_st atus +ĠT reat +. end +æĶ¶ åΰ +imens ional +ĠCl in +ĠE ll +åŀĭ çļĦ +åĿĩ åĮĢ +ä¸į 太 +` , +im ages +Ġyoung er +线 çļĦ +ĠA ug +Ġrec over +Ġb one +ik es +温 æļĸ +Ġ çİĭ +im m +ist ing +Ġcaus ing +_ on +éģ ® +å¤ ķ +Ġden ied +ag en +Ġstrateg ic +å§Ķ ä¼ļ +ĠWilliam s +J ob +%%%% %%%% +åĬ « +Ġany way +头 åıij +åĵª 个 +ĠT w +m ake +ç²ī ä¸Ŀ +ĠA ri +D raw +Ġeng agement +ãĢĤ åħ¶ä¸Ń +Ġwhen ever +PO SE +Ġsomew hat +没 æĥ³åΰ +éĿĴ æĺ¥ +Ġmeasure ment +Ġunder stood +/ (- +Ġcol le +ï¼Į 缮åīį +, ä»ĸ们 +â Ĥ¬ +Ġra il +( result +Ġinj uries +Ġto x +r ich +Ġc odes +Ġcon version +åİŁ æĸĻ +Ġal ter +æľįåĬ¡ åύ +åīį å¾Ģ +OR Y +ĠP L +å® Ļ +Ġche ese +B N +å® ´ +Ġfe at +Ġsub mitted +{ x +ç´ł è´¨ +_C L +Ġf ault +ĠF air +ï¼Į çϽ +ãĢĤ è¿Ļç§į +æĺ¯ çͱ +è¶ħ 级 +Ġdiv ision +pp ers +f d +Ġanc ient +ĠSp anish +èī² å½© +çªģ åĩº +æĺİ æĺŁ +ä¼ĺ éĢī +è£ Ļ +åIJĮ æ¯Ķ +Ġm aps +Ġas set +Ġdet ected +im a +æłĩ å¿Ĺ +éľ ľ +roll ing +Ġsearch ing +Ġdr ag +p a +ans as +Ġpl ain +k o +ä½ł æĺ¯ +ä¹ī åĬ¡ +at omic +Param eters +Ġfight ing +os a +æ¡Ī ä¾ĭ +æľī æľº +Ġtr ick +æ·¡ æ·¡ +æ¬ ł +* , +éļı åIJİ +åĵģ ç§į +, 大 +· · +Ġcop ies +ĠN ight +ĠStud ies +Ġt iny +çĪ ¹ +ï¼Įä¸į æĺ¯ +Ġinter val +Ġdemonstr ate +E ven +ĠC ore +æ» ¨ +. X +Ġremain der +at ial +Ġent reprene +at ile +Ġst ability +ĠR oman +å°± ç®Ĺ +çļĦ ç¾İ +广 åľº +on na +èħ º +PE CT +Ġlif estyle +èµ ģ +Ġpo inter +Ġg ap +Ġdis appoint +E ffect +ond er +ĠM ill +d ec +é¥ ¼ +f ind +Ġ ä»İ +Ġ( _ +Ġjur is +or able +读 书 +Ġb asket +Ġdom estic +å¸ĥ å±Ģ +Ġmag ic +ĠD ie +) [ +ä¾ ł +çĺ ¤ +åħħ ç͵ +sec urity +op ic +èĬĤ çĤ¹ +æīĢ å¾Ĺ +Ġdisplay ed +çĿ ģ +Ġpartners hip +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠ +Ġmeet ings +åı¯ éĿł +f i +Arg ument +Ġtick ets +åħ± åĴĮ +Ġdisc ipl +' ), +Ġover w +ar ry +Ġtrack ing +çļĦ ç¡® +u ct +f ront +Fl ags +Ġent ries +Ġrem ind +åĽŀ æĶ¶ +et ing +ä¸į å¤Ł +Ġspect rum +åįģ ä¹Ŀ +ĠV an +Eng ine +ĠF IT +ane ous +. un +Ġmanag ing +Ġdeb ug +- ind +el i +ä¸į å¦Ĥ +ãĢĤ \ +ï¼Įä¸Ģ å®ļ +Ġlab els +K ind +_d ict +Ġed ition +en ess +ERS ION +R ule +ff ff +( in +åģļ æ³ķ +ä¸Ĭ 涨 +Ġphen omen +ĠI O +r ine +for ward +Ġpar allel +J SON +çĦ ° +, n +Ġstud io +ãĢģ æĸ° +Ġres olve +_ end +_M AX +æģ Ń +Ġentertain ment +ĠI reland +Ġfr ust +Ġh at +æ² IJ +Ġdiagn osis +w args +ain ts +ä¿¡ å¿ĥ +ru ption +Ġcheck ed +å¾Ĺ åΰäºĨ +å¾Ĺ äºĨ +åĨ ¯ +Ġsp oke +ĠM ember +çĸ«æĥħ éĺ²æİ§ +ç§Ł èµģ +L earn +é¸ ¿ +ä¸ĵä¸ļ çļĦ +Ġben ch +s ql +Ġserious ly +mat rix +ĠIt aly +Ġf raction +çŃī 人 +f n +ob by +In valid +Ġrestaur ants +Ġrec ording +_ u +åħ¨ ä½ĵ +Ġp y +ac her +å±ħ ä½ı +Ġres et +Ġevery day +å¸ ½ +Ġfact ory +Ġs ed +ĠG irl +end ant +Ġdo se +å¾Ī 好çļĦ +ï¼Įä¸į èĥ½ +n or +çļĦ æķ°æį® +å½¼ æŃ¤ +Ġc tx +æĻ Ĵ +Ġobvious ly +n av +æĪ Ī +at ar +åĮ Ĩ +è§ģ è¿ĩ +æī ¯ +Ġdro pped +, çĦ¶åIJİ +â ĺ +p o +Ġl ad +Ġrecogn ition +Ġcommun icate +te en +n i +or ters +ĠAp ache +point s +Ġrecommend ations +Ġmeasure ments +åIJ« æľī +Fe ature +ä¹Ł ä¸įæĺ¯ +Ġvert ical +ĠTh ree +Ġw et +åĪ© çİĩ +Ġintrodu ce +el lect +çļĦ èĢģ +requ ired +ĠE ffect +èľ Ĥ +L P +ang ers +Ġdra wn +Ġdat aset +Ġact ing +olog ist +ant ed +ĠC redit +Ġprov en +Ġsurv ival +äº Ń +éĺ² æĬ¤ +Ġf older +_ I +ĠK en +çĶ» éĿ¢ +ï¼ĮæĹł 论 +_ arg +Re al +Ġbu f +Cal culate +J ava +Ġcommit tee +è Ł +R NA +Ġauthor ities +Ġant icip +Ġsubsequ ent +_f rom +S O +Ġwalk ed +Ġdisc rim +ater n +æī¹ åĩĨ +åIJ ¾ +Ġincor por +Ġexpos ed +C G +Ġmount ain +çݰ æľī +ir a +éħ ± +æļĤ æĹ¶ +ç» ³ +it age +ĠJust ice +ï¼Į ãĢĬ +ĠL ittle +omet ric +ĠExecut ive +Ø ± +ĠH ope +O k +Ġmin ing +ĠP UR +OL D +ee k +Ġcommun ications +å±ķ çݰ +ĠJ on +Ġsym met +Ġc razy +D id +ick er +ï¼Į ä¼¼ä¹İ +顾 客 +we red +Ġst ake +: b +Ġper missions +Ġre form +B usiness +C ategories +à ł +am ber +Ġprepar ation +空 ä¸Ń +Ġsurpr ised +æİĪ æĿĥ +æµ ¦ +åIJĪ è®¡ +ĠM obile +Ġb le +ĠT urn +um a +èµ Į +Ġsomew here +ra k +ĠH as +Ġmod er +app er +ä¸į 对 +çħ§ 顾 +om ething +ç¿ ł +ast s +Ġd ust +ĠPol ice +Ġfra ud +Ġphil osoph +Ġcomplet ion +Ġappro val +ent le +Ġr h +æĪij å°± +à ® +ĠF ac +app end +Ġconf ident +Ġpartic les +çĽij æİ§ +.g oogle +ĠN C +u ced +c ers +Ġtarget s +Ġmanufacture r +çī© åĵģ +_ result +ch arge +e an +ç§ĺ å¯Ĩ +Ġpain ting +Ġh ur +P UT +ĠDan iel +. æł¹æį®æĿĥåĪ©è¦ģæ±Ĥ +( __ +èµ ´ +r at +ĠV ictor +éĤ£ ä¸Ģ +rec ord +L ock +ï¼Įå½ĵ çĦ¶ +eth yl +ri ers +d k +ĠCook ie +ĠMc C +Ġacc um +Ġrem oval +Ġ{ " +失 åİ» +_ z +èij£äºĭ ä¼ļ +åĬ¨ æĢģ +Ġland scape +Ġorgan ized +ly mp +Ġd ining +ha ust +Ġf asc +Ġad equ +Ġg aming +ins on +Ġdef e +Ġbrow s +èİ İ +Ġsens or +åijµ åijµ +ä¼ ª +-p arty +å®ŀ è¡Į +Ġfun ny +Ġnear by +Ġweek ly +Ġprocess ed +l ang +ĠT erms +Ġshould n +Ġins ights +Ġinit ially +é£ŀ æľº +Ġgra v +Ġb low +j ava +Ġsched uled +ĠD og +Ġfl uid +ch ild +æıIJä¾Ľ çļĦ +Ġg ate +æĻ ķ +T ool +Ġdi abetes +设 ç«ĭ +Ġlik es +is er +.p arse +åħ´ å¥ĭ +ke ep +asc ular +飩 åĽ½ +æ¼ ł +ĠN ode +被 人 +èī¯ å¥½ +UN D +ps ilon +Ġcl imb +é¹ ı +an es +R oot +Ġfin ite +p rivate +å¯ Ĥ +. co +ĠE s +èĸ ĩ +ĠF ar +缸 åºĶçļĦ +Ġmat hemat +Act ive +åĨĽ éĺŁ +Ġb are +ĠE th +ĠSte ve +ĠK n +Pr ice +it an +Ġp ill +ï » +Ġap artment +Ġl oves +Ġcon clude +Ġfl ash +Ġcolumn s +S ection +. New +åį ¿ +Ġprom ot +Ġag es +I LL +Ġ" < +Ġstring s +b ound +ear ing +Ġvis ited +ãĢĭ ãĢĤ +ic ity +the less +.m odel +Ġstrugg le +Ġfit ness +Ù Ī +ĠD id +Ġs ampl +Ġw est +Ġcy cl +c b +Ġf le +app lication +ĠM att +Ġfac es +ĠA C +äºĨä¸Ģ 声 +ç®Ģ ç§° +Ġtreat ments +çģµ éŃĤ +Ġdel ight +Ġt ack +ï» ¿ +缮 å½ķ +Ġwrit ers +å¤ ¸ +Ġtra il +åĨ² åĩ» +æĬĬ æı¡ +li ers +ĠO K +èµ· 身 +st ack +Ġgen u +)/ (- +Ġup coming +Ġappoint ment +çĨ Ļ +声 æĺİ +De lete +ps i +Ġt ank +ï¼Į 第ä¸Ģ +s f +åŃIJ ä¸Ĭ +g ments +ĠS olutions +æ¹ĸ åįĹ +Ġabs ence +Ġw ra +ens us +Ļ Ĥ +Ġre in +ĠCor poration +Ġwith draw +il ation +Ġm ob +èĢ ķ +çĪ µ +ç»Ī 端 +Ġesc ape +Ġch ose +it ting +åħ± 享 +çļĦ æľºä¼ļ +çľĭ åIJij +og gle +ä¸Ģ åľº +Ġbound ary +Ġob lig +Ġopt imal +ific ial +ĠJack son +ĠOh io +Ġag greg +ç¡ ħ +Ġdevelop ers +å°± æľī +pro of +Ġcomplic ated +ĠCon fig +al o +ĠF eb +ĠMon th +Ġl at +Ġtick et +s en +Ġfin ance +æĮĩ å®ļ +æĹł å¥Ī +ĠN a +å¡ij æĸĻ +ĠR om +Ġsupp lies +raz il +åľº æīĢ +Ġhost ing +A ng +ĠD aily +ul um +ĠB Y +Ġent itled +Ġfeature d +Ġad min +åľ¨ è¿Ļ个 +é¦ Ī +Ġmin i +Ġi o +ĠL abor +pro duct +Ġdescrib es +Ġh all +Ġatmosp here +å¦ ® +âĢ ĭ +Ġcan cel +!! !! +in cluding +Ġb at +åħ¬ åŃIJ +Ġexecut ion +Ġ åı¯ +Ġstrong er +åºĶ 对 +éĩį éĩı +ãĢĤ 缮åīį +è¿ij æĹ¥ +让 æĪij们 +U V +åĽŀ 头 +") ; +è´¢ 产 +f ilter +è¿Ľ åı£ +ä¸Ģ çľĭ +èĶ ¡ +è¯ ŀ +ug s +ĠF estival +Ġdeb ate +ort ed +Ġg ambling +_ ext +èѦ å¯Ł +æ·± åĪ» +æĥħ å½¢ +< li +è¿Ŀ æ³ķ +妻 åŃIJ +æ° ¨ +çļĦ 说 +W he +ev in +çİ ² +æ·± 度 + » +æĮģ æľī +Ġmort gage +Ġdriv en +èĪ Ł +Ġsuff ering +ãĢĤ ä¾ĭå¦Ĥ +ER V +士 åħµ +Ġextrem e +ĠCh apter +< int +çļĦ éķ¿ +ath an +Ġl anguages +äºĨ æĪij +å¿ĺ è®° +Ġs orry +ra ine +Ġt f +amp s +ĠT em +缺 ä¹ı +Ġn ob +ï¼Į 羣 +Ġc oc +Pol icy +ĠStud y +Ġoper ators +Ġnew sp +è¡ į +util s +es is +, åıĪ +a ire +oid s +Ġmatch ing +ï¼Į å·² +.W rite +ĠProduct s +med iate +ĠM achine +ï¼Į éĻĪ +-b y +åĨĽ äºĭ +Ġelect ro +Ġlow est +ru le +Ġsh arp +Ġas ide +ç͵ æµģ +o a +å¨ģ èĥģ +æĩ Ĵ +Ġcont emporary +S pace +ä¸Ń åĮ» +Ġprinc iple +Ġpr ay +çĶŁ åŃĺ +éļı æĹ¶ +ï¼Į ä»Ĭ天 +Ġre ly +ãĢģ æľī +_ is +Ġbuild er +ä¸ĵ åĪ© +ĠM ER +. as +åľ¨ æĪij +ï¼Į åİ» +Î ¯ +ĠP an +Ġremain ed +Ġflex ible +ĠT er +-b e +Ġelect rical +éĹ´ çļĦ +æĢ ĸ +Ġw ww +\ t +ĠR oyal +éĩį å¤į +( get +ç¬ ¼ +ĠCh air +Ġclaim ed +Ġaccord ance +æīĭ ä¸Ń +Ġch ronic +I mp +ĠE state +对 çĿĢ +æŁ ´ +ĠM aterial +ä¹ĭ ä¸Ĭ +Ġfl av +ĠA ud +P G +å¸ IJ +v in +ĠB ased +D O +Ġj ew +Ġdec ade +gy pt +Ġcap abilities +c ut +妹 妹 +ĠUn it +ä¼ł éĢĴ +æľº åľº +cl osed +Ġbox es +y y +ĠCol umb +Ġv ot +ï¼Į äºĮ +Ġlight ing +T im +ä¸ļ 绩 +Ab stract + ± +Ġsign ature +Ġent hus +åĬŁ çİĩ +Ġtom orrow +å®Į ç¾İ +Ġt ags +_ object +Ġmix ture +z ure +@ " +ro x +ï¼Įä¸į ä»ħ +Ġed ges +Ġstret ch +Ġtransport ation +ç© ´ +OM M +ãĢģ æ°´ +åľ¨ ä»ĸ +Ġrow s +(f ile +l ie +Ġcamp us +æıIJ 交 +ian o +çļĦ ä¸Ĭ +- x +Ġg ram +- con +å¦ ¥ +ĠCh ild +ĠM rs +et ary +G lobal +Ġb ridge +ä¿Ŀ åŃĺ +Ġincreasing ly +ter ior +M edia +ç¼ĵ ç¼ĵ +çª Ŀ +Ġ ठ+Ġeng aged +оР² +AS H +亲 èĩª +ï¼Į æŀĹ +Ġtext ure +å®ļ æľŁ +ed In +est roy +er as +.j oin +çº ± +at aset +ĠR en +è ¤ +Ġrelig ion +Ġmembers hip +UN T +, å°±æĺ¯ +Ġhe nce +éģĵ å¾· +ä¹ĭ å¤Ħ +大 éĻĨ +p ool +åĢº åĬ¡ +G ame +h r +pos ed +ĠB ed +Ġb urd +çļĦ åħī +Ġa thlet +éĩĬ æĶ¾ +T F +< string +æİ¨ åĩº +f ree +ï¼Įæľī äºĽ +Ġsign als +Ġc ake +Ġp airs +ĠR am +, åĨį +ĠH D +Ġpull ed +åIJĦ ä½į +Ġneigh b +ä¸ĸçķĮ ä¸Ĭ +Ġpack ages +åIJĪ æł¼ +ĠCh ampions +################ ################ +Ġexpl ains +磼 çĽ¾ +ä¼ IJ +Ġtw enty +æĬ« éľ² +æľ ´ +Ġb in +} [ +âĢĻ , +ĠH on +Ġpros pect +Ġprov ision +am era +.st art +w ar +æŃ¤ åĪ» +_ var +ĠSaf ety +å¸Ĥ æ°ij +ĠS ol +æī¿ 认 +è® ¶ +Ġgr ass +ĠM il +l ive +Ġag ric +æ¸ Ĺ +_b lock +FT WARE +Ġ æľī +ĠH im +. se +ott ed +ĠÏ Ģ +Ġlog ic +æĢ§ è´¨ +ĠPUR POSE +è¿Ľ æĶ» +Ġd iversity +Ġc ute +ON G +å¿ħ çĦ¶ +个 æĢ§ +S end +Ġquant ity +ï¼Į使 å¾Ĺ +Ġtrig ger +atch ing +PI O +é¦ĸ 次 +z ym +ĠF oot +ï¼Į å®ŀçݰ +Ġperman ent +( str +åĩº åıij +åľ° éĿ¢ +æ´ Ĵ +is ition +Ġrec ip +ĠMich igan +æŀĦ 建 +ad or +ant ly +å² Ń +OW N +ä¸Ĭ æĿ¥ +Ġnav igate +èĴ ĭ +å§ ¨ +Ġn aturally +cle ar +_m ode +om atic +Ġcomp ute +â Ĩ +åľ° ä¸ĭ +B M +enn is +Ġexam ine +Ġins ight +车 çļĦ +Ġearn ed +CH ANT +Ġmere ly +- res +C opy +åŃ©åŃIJ 们 +æĥ³ èµ· +O pt += - +Ġshoot ing +å±Ĥ 次 +åIJ« éĩı +第 åħŃ +Ġpar agraph +è¾ĥ 大 +Ġcol on +âĢĿ âĢľ +åħ¥ äºĨ +çļĦ 产åĵģ +.t est +çŁ¥ åIJį +èIJ Ŀ +Ġli ability +天 æ´¥ +Ġaf raid +çŁ £ +åݨ æĪ¿ +Ġcomp osition +ĠChar les +Ġelig ible +Ġav ailability +æİ ĺ +ĠT ag +Ġst ages +We bsite +Def inition +Ġpow der +ĠPART IC +åį ¸ +erv ing +ĠCons ider +临 æĹ¶ +ĠE r +Ġas sembly +Ġdist ingu +Com mon +ĠTe ch +Ġanaly zed +- z +æĹł 人 +ĠJ ac +Ä ± +. ed +Ġcare ful +éĽĨ ä½ĵ +èĪĴ æľį +end ers +(" % +Ġinsp iration +end ment +æĦ ģ +Us ing +Ġgover ning +Ġfore ver +Ġh ire +å·¥ åİĤ +) ), +pro p +ĠB ul +åı¯ä»¥ éĢļè¿ĩ +Ġlarg ely +åĽŀ äºĭ +ĠDes cription +æī Ķ +æĮĩ 令 +Ġt all +_m ax +Ġload ed +èĢĮ ä¸į +Trans form +Ġst ood +iz able +-st yle +对 åºĶ +ä½İ äºİ +èĵ Ħ +xy gen +ra cy +A F +. The +n ote +] ). +Ġco al +å¼ı çļĦ +il ty +Ġtrans actions +åĵ Ģ +re nd +Ġob st +Ġre nd +Ad apter +大 äºİ +(t ype +ĠBuild ing +ĠY et +Ġreg ulation +ribut ed +Ġsl ots +_tr ans +乡 æĿij +两 å¹´ +éĥ¨ ç½² +_ comp +G amma +Ġimm un +are a +æį IJ +ig ger +Ġn an +in itions +. Name +Ġcompens ation +- end +ul ator +ï¼Į èĩªçĦ¶ +æīį ä¼ļ +æĽ´ 为 +Ġh ospit +çij ¶ +Ġwin ner +é¢ ĸ +åįģ ä¸ĥ +ĠIn put +ĠT ask +Ġch ocolate +s is +D ay +D T +ĠS ale +_add r +Ġinf lam +ont o +add ress +G raph +Ġimp lements +åıijçĶŁ äºĨ +D uring +m al +ä¸Ģ åıª +Ġimm une +v c +d y +æĹ© å·² +å¯ ĵ +èij Ľ +Ġapp end +Ġbott le +Ġmag azine +Ġu l +æĹħ è¡Į +O wn +claim er +å±¥ è¡Į +Ġal pha +Ġhe aling +å¹² çĩ¥ +E vents +en um +osit ory +ul ating +el le +ä¸ Ľ +éħį å¥Ĺ +ĠN E +Ġbe er +èĬ¯ çīĩ +I con +è¯ģ æį® +æĭĽ èģĺ +Ġr ub +Ġinst ances +Ġadvent ure +æĹł æ¯Ķ +p ir +ĠF arm +it ud +De cl +ãĢĤ 为 +åĬł å¿« +Ġex ecute +pl ugin +åį° åº¦ +æŀ ļ +ãĢĤ å½ĵçĦ¶ +ä¼ĺ æĥł +Ġrent al +ä¸į è¡Į +Ġapp lies +CHANT ABILITY +#if def +on th +or ce +ç¾ ŀ +Ġdis k +Result s +ĠBook s +åĽ´ ç»ķ +W E +éķ¿ æĹ¶éĹ´ +ic ient +ĠCol lection +读 èĢħ +Ġcomp act +çĬ¹ 豫 +çī¹ æĢ§ +ä¸Ģ 人 +Ġtestim ony +Ġp arser +è¶ ģ +d p +èĬ Ŀ +Ġvel ocity +åİĤ å®¶ +Ġtem porary +Ġhon or +Ġun w +ĠStud ent +ãĢĤ åı¦å¤ĸ +Ġsp an +ax y +ï¼Į åħ·æľī +Ġactiv ation +è¯Ĭ æĸŃ +Log ger +åIJ ¼ +Ġkn ock +å®ĩ å®Ļ +Ġbre aking +P ool +Ġen ables +ov ie +ãĢĭ ä¸Ń +_ U +( std +èĽ ® +Ġle ague +èĪª 空 +ch or +. config +Ġr u +ï¼Į åIJ¬ +ĠP ak +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠ +id en +Ġs aving +Ġpublic ation +Ġ" % +ä¹ ĸ +az e +æ¸ Ĭ +Ġpropos al +R et +y nt +hes is +Ġbet ting +ĠT ips +ĠPARTIC ULAR +f all +Ġf irms +æĻ® éģį +æĺ¯ æľĢ +Ġg ifts +ä¸ĵ 项 +æ°´ çļĦ +ä¿Ħ ç½Ĺæĸ¯ +ï¼Į ä½ľä¸º +et he +Im port +人 群 +A K +Ġg ear +åĪł éϤ +ĠFIT NESS +让 她 +Ġto ler +O mega +ĠA LL +ĠS outhern +Ġfunction ality +_S H +ĠG iven +ä»ĸ 人 +. load +ãģ Į +大 éĩıçļĦ +ç»´ ä¿® +o ir +M ember +é¼ İ +_w ith +ink ing +Ġcre w +R est +主 人 +Ġtalk ed +æIJħ æĭĮ +Ġsau ce +å¤į åIJĪ +Ġemploy ed +. Data +Ġ{ { +ä¸į åĬ¨ +in ar +Ġdis abled +Ġch ances +Ġ ãĢĮ +ĠMe an +åºĶ æĶ¶ +çļĦ ä¸Ń +el a +Ġcarry ing +常 常 +Ġdim ensions +ĠReg ister +Ġprinc ipal +åİ ĺ +itut ional +(p ath +em o +Ġv i +åŁº åĽł +Ġst uck +Ġleg it +çļĦ åĨħ +Ġsus pect +æ² « +å¼ķ ç͍ +Ġter rit +r angle +Ġdep ression +Ġleg s +æľŁ æľ« +ĠðŁ ĻĤ +Ġil legal +Ġst ead +éŁ ¦ +Ġimp ressive +ä¸Ģ åı¥ +ï¼ĮæľĢ ç»Ī +Ġlaw yer +mer cial +å½ĵ å¹´ +æ£Ģ éªĮ +Ġcorrel ation +éĥ¨ ä½į +St at +Ch rist +åģļ ä»Ģä¹Ī +éĶ ¡ +ibr aries +Ġnew ly +çģµ æ´» +ĠSe a +ĠS EO +* - +sh ips +)$ , +Ġaddress es +AA AA +{ aligned +D ouble +å² Ĥ +im ents +ĠM P +- K +éĿł è¿ij +Ġout standing +Ġt un +Ġcompl aint +ĠG ra +å ¬ +ãĢĤ éĢļè¿ĩ +Ġpref erences +w hat +åıĺ åĬ¨ +çļĦä¸Ģ åĪĩ +à ¹ +Net work +åı¤ 代 +ĠEv ents +Des criptor +æŃ ī +ç¿» è¯ij +trans form +åĩ ij +ä¿¡ ä»» +Ġadopt ed +ĠAdminist ration +çĤ Ń +D oc +ĠJew ish +m id +Ġar ts +Î ² +Ġover come +ĠAnd rew +lo om +ç͵ åĬĽ +n p +ĠU lt +au x +ap ore +Ġtum or +as hes +uck y +P ad +Ġpre v +Reg ister +ĠI mm +æľĿ çĿĢ +ĠV eg +Ġadvant ages +he et +Ġpow ers +å®ŀæĸ½ä¾ĭ ä¸Ń +ĠCo ast +Ġv ess +Ġprodu cing +ä¸ ij +ĠP y +_in put +模 æĭŁ +cript ions +ĠFranc isco +od els +Ġrapid ly +ç©¿ çĿĢ +éĵ ¸ +Ġ{ ' +ĠK ar +Ġsav ings +Ġ 第ä¸ī +Ġp ra +Ġmo ist +C ard +ĠFor ce +Ġ era +Ġsuper ior +here nt +ail ing +Ġgen etic +Ġscen ario +çļĦ èĥ½åĬĽ +没 ä»Ģä¹Ī +çļĦ è¦ģæ±Ĥ +å¾Ĺ å¾Ī +大 å¸Ī +Ġk ernel +åķĨ æłĩ +Ġexplan ation +Pe ople +Ġtra ce +Ġl icensed +Ġvict im +Ġà IJ +社 交 +ï¼Į åĥı +ĠD ub +ed ing +ĠS em +èĥĮ åIJİ +Ġdepend ent +In tern +b uffer +ĠD ouble +ĠÏ ĥ +! [ +op l +Ġhand ler +M eta +G rid +Ġvers us +W ORD +åIJĦ èĩª +. info +Ġtor ch +Ġb orrow +ä»Ģä¹Ī æĹ¶åĢĻ +å©ļ å§» +irc raft +Ġre n +åıĪ æĺ¯ +æĭ ¦ +Ġdep loy +åĮº çļĦ +Ġestim ates +_ LO +ra ined +Ġvac ation +Ġlegis l +Ġlegis lation +Ġbehavi our +åį ľ +çϾ åĪĨ +att ribute +模 æł· +In v +/ A +M ock +å·¨ 大 +M ult +_F L +è§Ĩ è§ī +æĶ¶ èİ· +ĠD own +, åħ¶ +ĠKing dom +æĥ ¹ +ç¥ ¸ +Ġhop ing +èIJ Į +Ġmar ks +Ġany more +äºĨ åĩºæĿ¥ +li ament +Ġple asure +Ġl apt +_param s +is ely +æģIJ æĢĸ +av irus +åŁºæľ¬ ä¸Ĭ +åħļ å§Ķ +ĠSch olar +ĠSen ate +è¡ ¬ +u v +Ġsmall est +C ap +is ement +ãĥ ³ +Ġt ro +Ġque ue +ĠOffic er +enn y +Ġc able +c hers +а н +ĠS on +åIJ » +åIJĪ æ³ķ +ain e +ä¸į åģľ +Ġs ought +' )) +é¢Ħ æľŁ +and ed +Gener al +Ġ âĢĻ +Ġdi agram +Ġe ast +äºĴ åĬ¨ +æĬĵ ä½ı +h y +Ġ ), +ĠWith out +Ġsatisf ied +il st +æ° ¢ +ä¸Ģ æĬĬ +éĢī 项 +èĮ « +S R +ä¼ł è¾ĵ +Ġ& = +)/ ( +Ġde ck +å½¢ æĢģ +ï¼Į è¿Ľ +Ġdis appe +.j pg +çŃ Ľ +æľŁ åĨħ +Ġessential ly +Met adata +Ġpick ing +( q +ym ph +Ġund efined +èħ » +Ġmechan isms +av ax +ak a +åĿ Ĭ +vis or +çĶŁ ç´ł +B E +å§Ķ æīĺ +Ġbeh alf +.prot otype +ï¼ģ ï¼ģ +çļĦ åħ³ç³» +.m ax +Ġmaintain ing +Ġh ide +转 让 +ä½ı æĪ¿ +æ¯ı 个人 +ix els +Ġvict ory +ĠSm art +è¾ĵ éĢģ +Ġmet a +Ġcover ing +Ph oto +Ġclos ing +ĠCert ificate +Ġc md +Ġund ert +Ġb ird +ãĢĤ 以 +end a +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġcon ventional +Ġtal ks +IM IT +Ġ 对 +åĶ¿ åIJ¸ +ĠR ay +Ġrad ius +Ġd il +Ġman ip +ãĢĤ ( +ĠMus lim +S I +crib ed +ï¼Įåį³ ä½¿ +prot ected +b el +Ġal ive +ĠGeorg ia +.h as +Ġdeterm ination +éģ Ĥ +Ġ æŀĹ +çļĦ çľĭçĿĢ +éļ Ļ +Ġde eper +ĠS ometimes +Ġcontribut ions +ĠSO FTWARE +ist a +Ġinvestig ate +Ġsurpr ising +Ġassum ing +Ġp itch +é¹ ¿ +b g +ĠF ather +d c +not ation +Res ources +in line +Ġre ar +i ology +å£ ¤ +Rep ort +â ĨĴ +.assert Equal +ĠC at +æº ¢ +Ġenter prise +-qu ality +åħ Ķ +æĿ¿ åĿĹ +ä¼ĺ åħĪ +Ġev il +B ecause +Ġstat istical +M R +meric an +ET H +ç¡ « +Ġrepresent ing +v are +åĽŀ å¿Ĩ +ĠL iving +æĶ¾ åħ¥ +if iers +ĠMER CHANTABILITY +Ġobserv ations +Ġdrink ing +åħļ çļĦ +认 åı¯ +ç« ĸ +è§Ħ å¾ĭ +å« © +èĩª ä¿¡ +æĺ¯ 对 +ĠRequ est +å§ ¬ +ì Ŀ +h ome +éĵ Ń +ĠO lymp +i ón +En um +Ġcon j +og ram +Ġmus ical +Ġacc eler +Ġpur s +S ur +D ebug +ĠL ED +Ġ éĻĪ +ir ty +c irc +åħį çĸ« +N G +äºĴ 缸 +Ġh ang +Ġsuggest ions +Ġreturn ing +- class +ĠSh ort +éĢIJ æŃ¥ +å¤ı 天 +Ġdec ent +èµ° åIJij +ĠH un +ĠRead ing +Ġal ert +right arrow +é£İ æĻ¯ +ç¾İ æľ¯ +æĺ Ń +Ġha ck +ĠB rad +é Ħ +çļĦ 天 +Ġthous and +, 没æľī +Ġrec ipes +许 åı¯ +ĠM ajor +T ab +Ġcl othing +Ġemploy er +Ġbelie ves +ĠRes ource +éĢł åŀĭ +j ud +Ġrestrict ions +çļĦ å°±æĺ¯ +åij½ è¿IJ +Ġref erences +éĥ½ å¾Ī +æľ¬ æĸĩ +Ġgrad uate +åħ» èĢģ +=" ../../ +pe ction +å¹¶ éĿŀ +ĠJ ob +Ġp leased +viron ments +ĠResult s +Ġgener ic +ur ities +-s pecific +_ US +Ġrepresent ative +æ· Ģ +毫 ä¸į +OR S +éªĮ è¯ģ +Ġc urrency +çĶŁæ´» çļĦ +æĹ¥ èµ· +éļľ ç¢į +å¾® ç¬ij +Ġdam ages +Ġor al +å¸Ĥåľº çļĦ +Ġprogram me +su pport +主 管 +ĠS us +ĠO wn +ino is +ä¿Ŀ çķĻ +- per +ï¼Į åģļ +om orph +Ġhyp othesis +w an +Ġhead s +lo op +æīĢè¿° 第ä¸Ģ +Ġvis its +ĠCons ult +è¿Ļä¸Ģ çĤ¹ +åī Ĭ +ĠP acific +s a +Ġaccompl ish +Ġb irds +Ġless on +u h +ç¿ Ķ +çļĦ 两 +Ġput s +_ err +P ar +ç¥ŀ ç§ĺ +带 é¢Ĩ +åѦçĶŁ çļĦ +Ġstr ange +Ġres c +çĶŁ æĦı +Ġb ars +å´ © +示 èĮĥ +çĤ¹äºĨ çĤ¹å¤´ +t ilde +ill iant +ĠCh all +ĠM ir +ad ed +su per +ãĢĤ æł¹æį® +ãĢĭ çŃī +Ġreput ation +ail ure +çľĭ åĩº +Stud ents +ãĢĤ 为äºĨ +Ġpoint ed +ç» ij +Ġoverw hel +éĤ ĵ +çŁ¥éģĵ äºĨ +Ġfac ed +äºĨä¸Ģ çľ¼ +éĻ ķ +N D +Ġtrans lation +以 ä¸ĬçļĦ +é© ° +Ġp ars +便 å®ľ +åĵª æĢķ +æĺ Ĭ +å®¶ åħ· +é»ij æļĹ +Ġcheck s +Direct ory +ab eth +Ġ-- > +Ġad mit +() : +åħ ¹ +raw ing +åĭ ĥ +d l +å¥ĸ åĬ± +_ KEY +ĠM oney +st ar +Ġpass age +Ġmanag ers +Ġl oyal +J ECT +Ġre q +ï¼Į æĪIJ为 +Ġc ogn +C reat +orge ous +Ġ( @ +Ġd ynamics +å¹´è½» 人 +åı Ļ +ĠProf essor +ä¸ŃåĽ½ çļĦ +ik ing +ĠDiv ision +é¤IJ åİħ +r ating +Ġconsc ious +ĠS ummer +Ġdef ect +Ġf inger +clus ions +âij ¢ +ĠInt eg +ater al +ĠS ave +Å ¡ +ac on +Ġcal endar +ĠSecret ary +Ġre act +Ġhas n +( input +av id +S un +Ġret irement +Ġmur der +, t +HER E +Ġimpl ies +ï¼Į æ°´ +ar on +ĠJer sey +注 éĩį +ĠU SE +A W +ãĢĤ 大 +, ç͍ +y ers +Ġopin ions +_ cl +Ù Ĭ +ä¼ĺ çĤ¹ +ä¿ ± +ĠF all +Ġem ails +ĠC T +Ġ$ _ +( context +_M ODE +èͬ èıľ +ĠV ill +Ġwat ched +æ¶Ī èĢĹ +Ġ ## +ĠM om +ĠAg reement +å§ ļ +éĵ ĥ +Ġbu ff +Ġdo ctors +)* ( +Ġ åħ¬åı¸ +é«ĺ ä¸Ń +C ond +erv ative +ĠT ree +æĸ¯ çī¹ +Ġfor um +ĠT rack +å»¶ 伸 +å· ¾ +åIJĦ åľ° +ĠM icro +å¯ ¨ +Ġtrack s +ĠS her +Ġ ä½Ĩæĺ¯ +寿 åij½ +ĠMem bers +ourn ament +Ġm apping +åĩ ¹ +Ġcelebr ate +设 å®ļ +Ġsudden ly +åį± æľº +/ lib +Ġd ental +Ġindust ries +游 客 +Pl ugin +çļĦ æľĭåıĭ +审 æł¸ +Ġsur ve +æijĦ å½± +ç´¯ 计 +ä¼ij éĹ² +Ġt one +int on +Ġperiod s +ĠK im +Ġpro secut +åIJĮ å¿Ĺ +au g +° C +Gener ic +. remove +H R +Pro gram +çĻ « +ĠSh op +åıį 对 +eal and +Ġanal og +Ġdream s +ĠAs sembly +è´ ª +OP Y +Ġfore ach +éħ ¬ +ĠP sych +æķĻ ç»ĥ +æĺŁ æľŁ +.W indows +ro le +ĠG a +ä»ĸ åľ¨ +Ġtri als +Ï ī +Ġst orm +ä½ IJ +ĠColor ado +èĮĥåĽ´ åĨħ +å¤Ħ ç½® +Ident ifier +Field s +Ġpro hib +Ġ| = +ä¸ĩ 人 +and ler +Ġp ounds +ç¿ ° +Ġprov ed +Ġevery where +Ġsk ip +Ġmanufacture rs +equ ence +Ġgu itar +ä½ĵ åĨħ +az y +ä¼ł æĿ¥ +Ġforg ot +转 åĮĸ +Ġcor on +æķħ éļľ +her it +ĠDet ails +Ġlux ury +ĠP en +Ġport folio +ro vers +ĠC ancel +Ġc yt +iet ies +_ com +Ass ert +ĠC HECK +ĠC ivil +P erm +Ġle ct +zon a +ĠBe aut +re ation +Ġscient ists +Ġint ention +Ġfl avor +ist ration +ag ger +Ġhigh lights +Ġacqu ired +说 ä¸į +ï¼Įåıª èĥ½ +{ p +ĠCas ino +ĠChe m +æĦŁè§ī åΰ +ub s +D oes +d el +Ġegg s +æĢ» ç»ıçIJĨ +Ġc ov +F unc +ĠT y +Ġfac ulty +_w rite +Ġf is +级 çļĦ +Ġassist ant +Ġf ake +ĠP erson +å¿« çļĦ +æĥĬ è®¶ +Ġcour ts +åºķ éĥ¨ +Ġme als +ĠH a +æĹ¥ çļĦ +ĠS imple +Ġmem br +Ġl ic +å°ı åŃIJ +Ġbow l +_ lock +line ar +v y +ĠCustom er +éĤ£ è¾¹ +Ġsh ower +Sc ale +对 æ¯Ķ +st ers +ĠR ule +Ġâ ĸ +Ġfall s +g lobal +Ġlog ger +æ²³ åĮĹ +/ T +æľīæķĪ çļĦ +Ġfil ename +Ġab und +_V AL +âĶĢ âĶĢ +ew ise +åĪĿ å§ĭ +ĠÐ ¸ +, $ +ĠC G +as ma +éħ ¯ +Ġend ing +ĠAr my +Ġcl ust +ĠB ad +ï¼Į 身 +Ġex haust +çļĦ åĬĽéĩı +Ġd ownt +u ating +> :: +ãĢĤ éĤ£ä¹Ī +> \ +b m +åĭ ī +å°Ĩ åħ¶ +Ġresult ed +N av +ĠâĪ Ī +ĠE gypt +AR N +ĠGD PR +Cl oud +art icle +- check +cret ion +æĭħ ä¿Ŀ +. ui +are t +æİ§ èĤ¡ +Ġd ual +, éĤ£ä¹Ī +åĨ· åį´ +ä¸Ģ è·¯ +ag ent +ä¸į çͱ +Ġmem ories +Ġdis covery +Ġt ur +ãĢĤ èĭ¥ +åIJ µ +() ). +ĠR ose +主è¦ģ æĺ¯ +åĬ £ +Ġun iverse +èij ± +W ord +è¾ĥ 为 +åħ³ éĹŃ +æĺ¯ æĢİä¹Ī +Ġ outer +æĿij æ°ij +Ġjo ining +交 æį¢ +AM P +对 æĪij +äºĨ 两 +Ġstudy ing +Ad v +ĠI V +S w +å¹³ éĿĻ +Ġluck y +Ġro ots +åħ±åĴĮ åĽ½ +Ġred ist +Ġrob ust +back ground +ç° § +å¹´ 人 +@ property +ĠH ence +Ġacknow led +Ġh its +线 è·¯ +èĢĥ æł¸ +aw a +Ġinter f +ĠPol it +iz za +ort ion +H D +ĠS anta +æ¶ ¯ +Ġgreat ly +Ġpath s +Ġf ishing +Ġeval uated +ĠD rive +Ġst er +éĢ Ĭ +and y +_ AL +le ting +ĠG row +ï¼Į å¤ĸ +å°Ĩ åľ¨ +åģľ è½¦ +**************************************************************** ******** +Ġr ising +ent h +int age +Ġinteg ral +ĠSc iences +Ġincred ibly +yn om +, å¦Ĥ +æ»ij åĬ¨ +Ġm ic +F ILE +}} $ +ï¼Į æŃ¤æĹ¶ +Ġp ig +æīĵ åĩ» +posit ory +åIJĮ æŃ¥ +OM E +ä¼ļ çļĦ +ra q +åij¨ è¾¹ +H L +ĠB all +Ġbroad cast +Ġfavour ite +Ġtra v +d iction +ĠT aylor +çij Ł +æ ª +ä¿ ±ä¹IJ +èĹ ¤ +G ER +I ED +æ²ī é»ĺ +? . +åĥ µ +Ġmer ch +æĦ ī +d x +un icip +]\ ]. +ä¸ĭ åĪĹ +S chema +re ction +M O +Ġattract ive +w hen +çķ ı +b c +Ġfl our +ĠG arden +Det ails +æľĢ ä½İ +ï¼Į éϤ +Ġequ ally +L icense +Ġmist akes +Ġal gebra +Ġup grade +é s +ĠC ru +åİĭ 缩 +T ake +ä¼ļ ä¸Ĭ +Ġg olf +Ġclos est +. view +; >> +éŃĶ æ³ķ +Ġfollow ers +ä¸ĵ ç͍ +Ġres ort +Ġpenal ty +ĠCond itions +å½± è§Ĩ +Ġdeath s +Ġredist ribute +çĶ· 女 +æľº åĬ¨ +âij £ +O ld +ä¸Ģ 线 +ç½Ĺ 马 +t rack +import ant +r ank +EN CE +å°½ å¿« +{ $ +亿 ç¾İåħĥ +Ġexp ense +ig s +èº ģ +èĢĮ åĩº +IEL D +åĬŀ äºĭ +f x +ãĤ ¹ +Ġconcent rations +Ġcoord inate +Ġpregn ant +Pat tern +ĠG allery +ic iency +er ge +åħ¬ åħģ +çļĦ åıĺåĮĸ +ĠK ong +ĠP ast +b ind +ĠCONDIT IONS +d on +< td +II I +è§Ħ æł¼ +op ed +éĶĪ éĴ¢ +ĠAct iv +ä¸Ńåįİ人æ°ij åħ±åĴĮåĽ½ +åŃĺåľ¨ çļĦ +{ A +yt ical +ĠSk y +Ġbound aries +Ġper t +è¾Ľ èĭ¦ +è¶³ 以 +yn chron +Ġhost ed +ĠP O +riv en +åľ° éĵģ +Ġvalid ate +ĠCh oose +Ġde leted +Ġspeak ers +æ¡£ æ¡Ī +Ġstream ing +è ¥ +uck s +ĠS a +Ġconc ert +ĠCour se +åĬĽ åĴĮ +[ - +Ġorient ation +Ġart ificial +S elf +ĠK evin +lement ation +Ġt ang +Ġc ub +ä¸ ¨ +Ġent rance +åĽ½ æľī +ĠColumb ia +T erm +Ġgra b +Ġp ills +éĩĮ éĿ¢çļĦ +Ġb aking +人 大 +çĿ¡ çľł +Ġt u +æĬ¥ èѦ +ĠS L +åĮĹ京 å¸Ĥ +D ev +s ince +Ġsal ary +éĴ Ļ +Ġrepl ied +Ġc ats +Ġuncertain ty +å· · +Y ear +éĴ © +C ert +订 åįķ +Ġb att +L ife +. init +_n umber +å®Ĺ æķĻ +Sh ould +CR IP +å¢ ħ +_F ILE +Ġoper ational +_ST ATUS +çħ Į +it ate +M iss +Ġmembr ane +å® ° +. equals +åİ» æī¾ +epend ency +W in +% ï¼Ľ +Ġparticip ating +D im +ĠL O +广 æĴŃ +ob b +æľ¬ è´¨ +Ġsupp liers +Ġhead ed +rapeut ic +åľ° çľĭçĿĢ +Ġlog ical +, k +çͱ æŃ¤ +ĠDemocr atic +Ġbrows ing +Ġpart ition +Ġprevent ion +Ġemploy ers +r v +Ġh ol +Ġsh adow +Ġsupp ress +R F +ï¼Į å®ī +ĠEvery thing +便 åĪ© +Ġ\ , +丫 头 +Ġenthus i +æĪIJåĬŁ çļĦ +ens ure +D uration +e lect +.D rawing +Ġconvers ations +åĽ½å®¶ çļĦ +Ġpres cription +t emp +M apping +ĠP ick +Ġwis dom +ç»ĵ 论 +ĠGold en +ĠG ets +erv let +Ġ" # +f in +umm ies +Wh ite +Pro v +opt ional +Ġt ie +S ample +æ··åĩĿ åľŁ +Ġter rible +Å ¾ +.m d +Ġsure ly +Ġint ellectual +Ġ ---- +ĠD ise +çļĦ éĿ¢ +é¢Ħ 设 +st op +ĠM oon +Ġnewsp aper +S ocial +. Un +Ġgr an +ç²® é£Ł +Ġdiff er +ous es +_T O +äºĨ è¿ĩæĿ¥ +en z +鸡 èĽĭ +æį٠害 +Ġe cosystem +转 è½½ +C enter +g ly +Ġagre ements +æĽ´ æľī +t w +Ġ æĸ° +ing ton +çĽĪ åĪ© +æĥ © +èµĶ åģ¿ +Ġefficient ly +p erson +Ġtri ps +_ entry +.spring framework +Ġel der +Ġa ver +ï¼Į ä¸ĩ +cal ar +Ġstreng then +U IL +ĠJud ge +çŁ³ 头 +é¥ ¶ +æĬ Ħ +; i +ĠW inter +使 åij½ +Ġbasket ball +Ġw x +Ġwe igh +çľ ¨ +ĠD VD +ãĢĤ å°± +转 åŀĭ +back s +ä¸Ĭ çıŃ +Ġd ot +åī© ä¸ĭ +åı« åģļ +S B +ri um +ä¸ī è§Ĵ +Ġfound er +S imple +ĠS ug +åľ° æĿ¿ +Op ens +res ource +Ġnarr ative +, ä¹Łæĺ¯ +Ġprodu cer +ï¼ļ åľ¨ +Ġapp li +éĶĢ éĩı +ĠVal id +ul pt +- Z +Ġvulner able +ĠBas ic +åıĺ å½¢ +çıŃ çīĻ +ĠB ru +Ġcomp ounds +Ġconsult ation +H er +re place +Ġact ively +ĠDe ep +Ġch unk +Ġdifferent ial +U C += self +m art +Ġwh ilst +Ġaccept able +ol n +Re lease +Ġcapt ured +cy cl +Ġbr ush +Ġ µ +Ġstock s +et ition +( arg +_n o +Ġearn ings +s ite +Ġst ats +ä¹ĭ è·¯ +Ġcomp ound +Ġhad n +à ª +ĠEl izabeth +- content +èį Ĩ +_ request +å® ª +nt il +Ġs lic +è¹ Ī +text bf +èº ¯ +Ġcam eras +Ġpe pper +å´ Ķ +èĭ¥ å¹² +Ġqu eries +çļĦ人 çĶŁ +- form +æŁIJ æŁIJ +ĠO cean +E Y +èĩªåĬ¨ åĮĸ +G T +ĠTh read +Ġre leases +ĠSt orage +ĠMot or +ĠLeg al +oun ces +ĠS ong +.l ist +ĠArch itect +两 ä¾§ +é«ĺ äºİ +_ options +t ra +åķĨ å®¶ +Id x +认 çŁ¥ +ous ing +Ġneg lig +an ches +æĶ¯ æŀ¶ +ffic ients +rop ri +审 è®® +ãĢĤ ä½ľä¸º +equ ality +ç µ +å¤ļ åħĥ +ĠAp pe +? ) +Us ers +Ġcreat ivity +Ġc ust +Ġnull ptr +D avid +Ġdef ines +éĢ Ľ +ï¼Į çī¹åĪ«æĺ¯ +ter min +v i +ï¼Įè¿Ļ ä¸Ģ +ĠSc ient +Pro p +E F +ĠO ak +w t +Ġcomp osed +.r un +Point s +ak i +çĥ · +ï¼Į æīĢ +è¡ Ķ +Ġful fill +åĩł åįģ +æIJ ı +à ² +âĢĿ ) +Pack et +ç¦ı åĪ© +ile t +Ġin clusion +å·® çĤ¹ +. height +Ġsuggest ing +Ġd f +ĠC ut +Ġfacilit ate +æĬij åζ +ç¼ĸ åζ +èİ·å¾Ĺ äºĨ +åĩº åĶ® +AR CH +Ġsumm ar +Des ign +Ġz ip +Ġdispl ays +, åĽłæŃ¤ +Ġk in +out ing +En able +Ġ. = +ox y +æĦ¤ æĢĴ +Ġtim ing +å¤ĸ åĽ½ +Ġnerv ous +ãĢĤ èĩª +Ġmind s +åĵį èµ· +轨 éģĵ +ag g +å¾Ĺ çŁ¥ +s end +æĭ¿ åĩº +ï¼Į çĦ¶èĢĮ +Ġcogn itive +èIJ¥ éĢł +Ġcas ual +Ġcap s +Ġy oga +Ġaccom p +S ingle +åıijçĶŁ çļĦ +ï¼ĮéĤ£ äºĽ +ĠC P +IN S +Ġapprec iated +åŃĺåĤ¨ åύ +Ġposs ession +é¡¶ éĥ¨ +Ñ ħ +Ġl on +å£ ¶ +ĠBAS IS +Ġburn ing +ĠA h +èĥ º +b ec +书 æ³ķ +ç¾ ¡ +Ġref used +Ġ å°± +çĽ ¼ +Ġdriv es +ĠG as +ord inary +P refix +ĠCON TR +ic ator +æīĭ ä¸ŃçļĦ +\ pi +**** ** +ĠJ ordan +Ġvot ers +Ġad mission +ç§ © +Ġtransl ate +h ops +Ġsampl ing +(' / +En vironment +Ġg ay +ĠBet ter +Ġp ixel +look ing +X ml +ç Ģ +ĠM iami +Ġeng ines +A udio +çĶ· æĢ§ +Ġcult iv +身 ä¸ĬçļĦ +OL OR +çļĦ å®īåħ¨ +广 西 +Id s +âĢĿ çŃī +容 éĩı +о ÑĢ +ĠP ain +Ġve c +Ġimplement ing +çļĦ çĬ¶æĢģ +Ġblog s +r ays +ĠApp ly +{ b +th s +ï¼Į 两人 +ï¼ ij +cons in +ou st +ç¨į å¾® +Ġassum ption +ond s +åĴĮ å°ı +Ġcharacter istic +s ave +l ings +Ġhe m +p ot +å¤ĸ è§Ĥ +Ġsh apes +Ġrob ot +( pro +Ġsleep ing +Ġm igration +( uint +å½± çīĩ +æŃ£ 好 +ç»Ļ èĩªå·± +orne ys +Ġpl asma +Ġreg ime +Ġdifferent ly +_id s +å½ ¦ +ï¼ĮéĤ£ å°± +_ header +Ġthrow n +ï¼Įåıį èĢĮ +Ġlabor atory +ĠC V +Ġge ometry +èİ ¹ +i ating +ĠH istor +Ġimportant ly +qu ot +çļĦ éľĢæ±Ĥ +ä¼ł å¥ĩ +ä»ĸ 说 +è°ĥ çłĶ +Ġrece iver +ĠTh ink +ĠN othing +ï¼Į èµ° +he ll +m aster +ĠA udio +Ġun likely +Ġbed s +æĹ© æľŁ +Ġpurch ases +ç¨İ åĬ¡ +ĠW HERE +Ġdefend ants +å®ŀ çļĦ +æĬ¬ èµ· +Ġcollect ive +C V +Ġcult ures +Ġt ons +Ġsche ma +/ > +Ġdel ta +ãĢ Ķ +Ġrough ly +Ġn g +Ġview ing +ï¼Į éļ¾éģĵ +Ġexplo res +å¯Ĩ åĪĩ +sh ared +Ġcol lections +Ġcert ification +Ġr anging +ä¸Ģ 段æĹ¶éĹ´ +d evice +Ġh anging +Ġstru ck +ant e +Ġp uzz +Ġple asant +ĠÎ º +Ġassess ed +ä¸ī 大 +åıĺ éĩı +ĠOver view +ĠEnvironment al +F ire +ãĢ ķ +åĩº è¡Į +ĠF low +ab a +ĠUS D +riter ia +Ġb omb +éķ Ģ +ct rl +åį Ĵ +D i +Ġrespond ed +没æľī 人 +ï¼Į 说éģĵ +x c +è¿Ļä¸Ģ 次 +Ġexhib ition +èµ° åİ» +ï¼Į 好åĥı +åijĬè¯ī ä½ł +对 åħ¶ +ï¼Į åıij +Ġcar rier +pl ane +. List +æĻ¯ è§Ĥ +, c +PH ONE +ĠJ am +ä¸ĭ è·Į +离 å©ļ +g ood +Ġhor izontal +åĮ ł +Ġdesk top +ĠR T +çĶŁ æĹ¥ +ä¸Ģ 大 +Ġco aching +Ġsh irt +èIJ½ åľ° +å® Ľ +ĠD ays +oc hem +Ġs wit +.s end +Ġgener ating +N ormal +ĠSt ories +Ġcontract or +_ equal +W rapper +.t ime +em ia +, è¿Ļæł· +om er +Î ¸ +Ġgold en +o ft +ĠG ard +unn el +Ġs b +, ä¼ļ +++ ++ +Ġrestrict ed +è§ģ éĿ¢ +Or igin +å®¶ å±ħ +è¡Įä¸ļ çļĦ +Ġflow er +æİ¥ åΰ +ĠCom b +Ġm il +Ġsm ell +Ġmac ro +î Ĺ +æķĪ çĽĬ +ä¸Ĭ æľī +Ġthere by +Ġtri es +Ġmet ers +Ġl ung +Ġbank ing +è¾ IJ +Ġvolunte ers +Ġinf lation +ç͵è§Ĩ åī§ +âĺ ħ +Ġdestroy ed +ĠF inal +å·® è·Ŀ +缸 ä¼¼ +Ġprem ise +/b in +ne ed +D b +Ġamong st +m ates +ĠDe ath +ĠPh ilipp +_V ERSION +ç¬Ķ è®° +aa aa +Ġc ursor +us al +Ġemphas is +ĠConst ruction +åĩł 次 +èī ĺ +e ff +Ġsur f +ĠL ady +ĠR ow +t le +Ġbe am +ä¸Ģ个 å°ı +.b egin +.st atus +å°± å·²ç»ı +ï¼Į æµ· +Ġt ournament +常 è§Ħ +æĿĢ äºĨ +ä¸į å®ī +èĤ¿ çĺ¤ +ä¹Łä¸į çŁ¥éģĵ +Ġround ed +Ġp ipeline +s pec +强 çĥĪ +ĠL IMIT +M aterial +Ġ( [@ +客 è§Ĥ +Ġrequ iring +ä¸ĭ æĸ¹ +ĠCreat ive +ĠG reg +Ġhe ter +Ġsepar ation +é«ĺ çŃī +-b it +s ervice +_r ange +åIJİ æľŁ +Ġpres erve +Ġsimult aneously +ĠM ort +ĠH ay +ä¸Ģ 项 +urch ase +_P ATH +Ġpr ide +Ġpil ot +éº Ł +ç͍ éĢĶ +ï¼Į åħ± +,å¹¶ ä¸Ķ +Ġorgan isation +åĦ¿ çļĦ +ãĢĬ åħ³äºİ +U CC +ï¼Į å¾Ĺåΰ +Pl ace +审 æī¹ +Ġt we +Ġdiv orce +ä¸Ģä¸ĭ åŃIJ +m ay +ĠHolly wood +Ġbut tons +å¯Ĩ 度 +n ed +Ġfing ers +Ġboard s +éĺ» æŃ¢ +Ġf er +âĢĶ and +D L +Ġt err +.st ate +} )$ +Ġpro xy +ock ey +è¡Į äºĨ +Ġinvolve ment +æī¾ åΰäºĨ +Ġbas eline +Ġc ul +ç« Ń +æĸ Į +Ġoper ated +å±Ģ éĥ¨ +æįŁ çĽĬ +ĠC SS +æĮ ½ +ĠC reek +è§īå¾Ĺ èĩªå·± +å¦ĩ 女 +irect ory +ï¼ĮæĪij çļĦ +天 空 +Ġimp ression +Ġ èĭı +é«ĺ æīĭ +ĠC ategory +æĮij éĢī +æĽ´ é«ĺ +Ġg oogle +Ġwood en +S W +Ġatt ending +Ġroll ing +ï¼Įè¿Ļ 次 +Ġd w +lo pe +, è¿ĺæĺ¯ +, ä¸ŃåĽ½ +Ġcons ervation +, p +ç´§ æĢ¥ +Ġsk illed +ï¼Į å®Įåħ¨ +æijĦ åĥı +ĠH ttp +new s +建 æĪIJ +ãģ Ĩ +åѦ å®¶ +S cope +Ġref lection +Ġapp et +ĠEn c +åīį åIJİ +V ec +b ad +Ġacc used +é¢ĺ 缮 +大 éģĵ +å¤ · +åĽ½åĬ¡ éĻ¢ +: - +èģļ éĽĨ +Ġr um +Ġhab its +满 äºĨ +z h +Ġauthor ized +Ġb undle +ĠStep hen +ãĢĤ ä¸ī +Ġd ont +ä¹Ł ä¸įèĥ½ +Ġcere mony +ä¸įæĸŃ çļĦ +讲 è¯Ŀ +au c +valid ate +Ġw ise +åĩĢ åĪ©æ¶¦ +çĵ £ +Ġreve als +Ġ © +Not ification +/ re +æĺ¥ èĬĤ +em an +åİ ¢ +æ¢ ³ +为 æŃ¢ +D omain +ĠAd vent +åĨĽ çļĦ +ãĥ Ī +温 馨 +Ġth ro +Con vert +{ {\ +ĠSt op +åĨĴ éĻ© +te ch +Inter val +] ), +Ġve ctors +ĠL OG +ist ered +m ask +m enu +ĠEvery one +Ġprof its +Not ify +_ default +{ P +å¹´ åºķ +åĴ Ĵ +åĸ ĥ +Ġs ink +ĠL oss +Ġh ip +åģĩ å¦Ĥ +оР» +att ributes +Ġhost s +ah oo +ãĢĤ äºĮ +N AME +- he +åħħ满 äºĨ +ch ildren +C r +d ers +ç͵ åύ +Ġinf ections +M ulti +æİĴ éϤ +Ġ ä¸İ +ï¼Įæ¯ı 个 +ĠPh arm +u rop +m ove +elf are +Ġl oose +Ġphys ically +We ight +.d b +_b uf +ĠAdv anced +åĩ ° +Ġd ictionary +Ġres ident +pr ice +( out +çļĦ éĢīæĭ© +Ø ¯ +Ġvari ations +æķĪ åºĶ +Ġl ibraries +Ġcan vas +( item +å¾Ī ä¹ħ +Ġpsych ological +_L EN +ãĢĤ æĺ¯ +é¢ ł +éļı æĦı +N ET +ï¼Į åĨ· +éªij 士 +( request +Ġc v +Th rough +ĠM ind +ä¹ĭ åĬĽ +Ġhand led +çļĦ æĸ¹åIJij +Ġcur ious +ĠL inks +. Type +D iv +ĠH ong +Ġun like +带 æĿ¥äºĨ +Ġsold iers +Ġhard ly +Ġst ops +re nce +.d ebug +Ġrad ical +Ġs el +\ item +éĿĴ å²Ľ +Ġac ute +w ait +çļĦ 第äºĮ +\ < +_IN FO +ffect ive +ĠTh ird +é£İ æľº +大 èµĽ +_l abel +åĿļ å®ļ +ĠJ enn +æĥĬ åĸľ +ach uset +Int roduction +主 è§Ĵ +è®° ä½ı +Ġres olved +F amily +(f inal +èŀº 纹 +ĠM L +Ġe lections +è¿Ļ个 æĹ¶åĢĻ +ĠB at +ĠH appy +Ġvirt ually +NS String +L imit +Ġhar vest +Ġexpress ions +æ¼Ķ åĩº +ĠM ail +Ġcap ability +æľ¬ æľŁ +èĤ Ĩ +u ates +ï¼Į å¿ĥä¸Ń +end o +Ġv oting +æŀ ¢ +åı¯èĥ½ æĢ§ +P ut +achuset ts +åĩº ç§Ł +a ired +agn etic +Ġ/ >< +ĠWork ing +èĩ´ åĬĽäºİ +ĠCont inue +Ġfil ing +æ´» æĢ§ +èĵĿ èī² +ĠAust in +_c ol +çķ ľ +ID E +Ġam pl +è̳ æľµ +Ġentreprene ur +Ġdram a +yd ney +æĺİçϽ äºĨ +Ġe igen +åĪĩ åī² +èħIJ èļĢ +Ġbro ker +ap ters +Ġnut rition +. Test +ct l +h d +m ember +Ġtempl ates +æĢ ¡ +h ave +w indow +Ġf est +æ²Ļ åıij +off ee +çļĦ æľī +ï¼Įå®ĥ 们 +以 å¤ĸ +ç§į åŃIJ +M aster +.p df +Ġcool ing +.S ize +æİĴ æĶ¾ +ys ical +hent ication +Ġbal anced +mar ks +Ġconf lic +å¥ ¢ +ĠD R +B en +è°¨ æħİ +åº ¸ +åIJĮ æł·çļĦ +Ġh ub +ĠM E +大 约 +th rows +åĿ ª +身份 è¯ģ +Ġn orthern +ï¼Įä¸į ä¼ļ +被 åijĬ +åıĤ è§Ĥ +æĦıè¯Ĩ åΰ +Ġstom ach +Ġexpl os +è§Ħå®ļ çļĦ +ç£ · +ĠMin nesota +ĠM T +Sc ore +ï¼ĮæĪij æĥ³ +éģŃ éģĩ +èĶ ½ +Ġm ig +Ġ äºĶ +Whe ther +Ġpos it +t ail +ell ar +E p +Ġg ray +_f e +ä» Ĩ +åĽĽ åij¨ +å·¥ç¨ĭ å¸Ī +Ġcrypt o +ache lor +è¿Ľ å±ķ +æį٠伤 +è´« åĽ° +F older +éĴ ĵ +åĽŀ æĬ¥ +Ġtok ens +ast y +Ġw ider +ĠC ould +in v +s cription +Ġengine er +G F +_T IM +çĿĢ ä»ĸ +t ip +Ġatt ach +äºĨ ä¸įå°ij +æĬĬ æĪij +å®ŀ ä¾ĭ +ĠM ade +Ġoffic ially +ĠO regon +Reg ion +F ix +ä¿® çĤ¼ +Ġsw ing +Ġra cing +åĽŀ æĿ¥äºĨ +çļĩ åIJİ +ĠM C +Ġcell ular +åħ¬ 积 +æıIJ åĩºäºĨ +! ( +Ġtest ified +Ġmole cules +èĥ½ ä¸įèĥ½ +åī ¥ +_C O +ï¼Į ç¥ŀ +èĢģ åħ¬ +è´§ çī© +f rak +Ġattempt ed +èĭ¥ æĺ¯ +ä¸Ģ个 æľĪ +Ġperform ances +åĪĨ çļĦ +èı Ĭ +ä¸į å·² +åľ¨ å®¶ +ï¼Į ç»ıè¿ĩ +ĠDemocr ats +ĠD ifferent +è¿ľ å¤Ħ +产 éĩı +çļĦ çݯå¢ĥ +çħ İ +ethe less +E mp +unicip al +Ġox id +_l ink +Ġpar ad +Ġuser name +des c +al so +s ession +re tt +is ons +k l +ETH OD + £ +ĠK enn +Ġt ens +ast ed +Ġd ial +_DE V +' ; +Ġ ---------------------------------------------------------------- +Ġfrag ment +il ib +ĠLew is +Ġpri ze +ç¼ĸ åı· +ord ered +Ġad option +缺 çĤ¹ +çļĦ ä¸ĸçķĮ +å¿ĥ èĦı +åĴĮ 社ä¼ļ +èĵ ī +( user +æ¶² ä½ĵ +让 èĩªå·± +. content +Ġdis aster +æ©¡ èĥ¶ +çļĦ 缮æłĩ +UT E +h ire +_ create +ĠR uby +Ġnumer ical +av y +Ġsent ences +åĩı èĤ¥ +åŁİ éķĩ +éĽĨ åIJĪ +ï¼Į ç»§ç»Ń +Ġar c +ĠS i +ĠR NA +çīĩ åĪ» +w er +ge red +è¦ģ ç´ł +ey ond +è¶ħ å¸Ĥ +ou ri +Ġep isodes +æ¯ı æĹ¥ +R ender +aw ays +ĠSe attle +a uth +Ġas semb +eq ref +ç¾İ 容 +ï¼Įä¸į çĦ¶ +ĠT ools +ĠS ure +.s c +op ts +Ġg el +ĠD er +app ly +D ictionary +ĠA mb +Ġt ension +field s +æľŁ è´§ +ĠTur key +X ML +Ġsu ite +ond a +ĠV irtual +.g ov +Qu ick +C ost +åĺ ² +ãĢĤè¿Ļ æĺ¯ +æĿĥ åĬĽ +ry ing +Ġaccom pan +Ġreg ards +æµĵ 度 +- en +. ( +Ġphys ician +Ġn m +ç¬ijäºĨ ç¬ij +ĠN ation +é«ĺ 端 +Co ord +客 åİħ +客 人 +è¾ħ 导 +ĠB ridge +Ġpush ing +ï¼Įå°± åĥı +ĠSu ite +ass ign +k ind +con nection +, åħ¶ä¸Ń +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ Ġ +Ġs ectors +è¿IJ ä½ľ +çĹħ ä¾ĭ +í ķ +operator name +che m +éĻĽ ä¸ĭ +åĺ » +-h our +Ġcry stal +Ġlad ies +Ġf uck +Ġhospit als +Ġpromin ent +on omy +ï¼Į åĴ±ä»¬ +Ġven ue +Ġcharacter ized +éĢĶ å¾Ħ +Art icle +åĿ ł +un ks +Vol ume +Ġthe ories +å¸ ĸ +ï¼Į æķ´ +, éĥ½ +Ġtype of +çĭ¬çī¹ çļĦ +S ite +åıij èĤ² +N eed +Res earch +Ġpro x +_S Y +Ġlegit imate +ï¼Į ä»» +Ġp overty +Un known +LO C +ä¸Ŀ 毫 +{ E +éĨ ĭ +è´µ å·ŀ +Ġbase ball +Ġsh orter +ĠInt ellig +èĤļ åŃIJ +è§Ĩ 线 +è¦ģ 注æĦı +Ġ åħĥ +Ġdialog ue +C ancel +ĠOption al +Ù ĩ +Ġgr at +æ² ¸ +ess ment +o pp +-b ottom +ĠÎ ´ +仪 åύ +Ġleg acy +建 éĢł +Ġproceed ings +Ġmort ality +Ġass ault +çĥ « +ä¸į åİ» +Ġp od +ĠC ome +ĠBro ther +osp el +ĠF a +å°± 好 +çĨ ĺ +Ġfin ancing +anc ell +Ġb anner +or iginal +.d at +h ard +ĠS umm +çİ « +ãĢģ åľ¨ +pr ime +ï¼Į æ°Ķ +ĠCl inton +ĠP interest +Ġp ending +西 åĮĹ +Ġmoder ate +Ġcompet e +ä¸ļ 主 +çģ ¿ +îĹ ¥ +r ins +Ġthreat s +éķľ å¤´ +itud es +Ġres erve +æħ ¨ +å®ĮæĪIJ äºĨ +Ġsol o +r and +Ġdevelop ments +, s +om al +æľ¬ åħ¬åı¸ +Ġden omin +g it +åīį è¿Ľ +ĠCon c +Ġcomput ed +Ġgener ations +çļĦ 表æĥħ +un ate +ol icy +Ġhand les +ï¼Įä¹Ł 许 +su it +äºĨä¸Ģ åı¥ +éĻ· åħ¥ +ĠS peed +-w idth +è¾ Ĭ +Ġconsider able +om p +ĠB aby +éģµ å®Ī +次 æķ° +w orld +Ġwas hing +Ġf ur +ä»¶ äºĭæĥħ +Ġpr on +ĠN AS +ä¸Ģ ä¼ļåĦ¿ +çļĦ 缸åħ³ +Const ants +Ġpassion ate +æ´¾ åĩº +Ġsc ar +ou ds +Ġ' % +i ological +ä¸Ģèά çļĦ +å°¸ ä½ĵ +D iff +åĩĢ åĮĸ +Ġarg ued +Y P +âĹ ı +ĠEconom ic +ĠChall enge +ut o +Ġpro l +Comp any +ĠW ire +çľĭ æ³ķ +cont inue +ĠB L +ĠFoot ball +èĤ¡ä¸ľ 大ä¼ļ +æ°´ ä¸Ń +ĠIn cre +ï¼Į ç»Īäºİ +éĻĮ çĶŁ +åIJİ ç»Ń +æĹł çĸij +Ġn u +Ġdes per +Ġpredict ions +å¼ķ æĵİ +æŀģ åħ¶ +_ level +绣 æ²» +_N OT +è¾¾ æĪIJ +Ġbel t +, èϽçĦ¶ +Ġb reat +ĠT am +Ġl as +Ġpublish ing +ĠJ oin +l ah +Ġexclus ively +Ġr anges +å½ķ åıĸ +ĠD iet +~~~~ ~~~~ +on ed +Ġunivers ities +Ġ ï¼ļ +Ġinf inite +.Col lections +Ġrom antic +ä¸Ģ çϾ +Ġcharg ing +Ġreject ed +设置 åľ¨ +ĠPost ed +ĠBro ok +l f +ï¼ ı +ĠM ach +æĩĤ å¾Ĺ +Ġb ounds +Ġf ired +_d ec +äºī åıĸ +om as +ç͵ éĺ» +- name +Ġact ors +ï¼Į ä¿Ŀè¯ģ +est one +ĠL ev +ç͵ 梯 +Per formance +^ n +ï¼Įå¹¶ åľ¨ +æĶ¾ 大 +w rap +ĠFollow ing +, è¿Ļæĺ¯ +) ? +åİŁ æĸĩ +.c all +c ategorized +级 åĪ« +æIJº 带 +ãĢ İ +ty pename +W M +AG ES +ï¼Į以 åIJİ +- I +é¥ ² +ĠMethod s +åĺ´ éĩĮ +Ġg allery +iss a +ĠS aint +çIJĨ äºĭ +çĶ· çĶŁ +åįģ åĩł +end ors +å¸Ī èĮĥ +-c alled +è¡ · +åıijçݰ äºĨ +Ġcontact s +ol o +ig ious +Ġcrypt oc +åīį æĿ¥ +em ies +æī« æıı +S ummary +ogen ic +Ġ ï¼İ +æ¡ © +im estamp +è¯ķ åĽ¾ +}, \ +Ġconf used +Ġl un +ĠThe atre +Ġutil ized +Ġc ups +Ġs orts +çļĦ ç²¾ç¥ŀ +ä¸į æ¸ħ +è¿Ŀ åıį +; & +ĠY ears +Ġlock ed +Ġg est +Ġtheoret ical +Ġremark able +am ination +æĺ¯ä¸Ģ 款 +éĩijèŀį èµĦ产 +Ġc her +Sk ip +èĪ ± +ĠO s +ir able +t l +================================ ================================ +Ġtim ely +Ġt ied +grad uate +æİ ı +çļĦ åİĨåı² +Ġp ython +çĥŃ éĹ¹ +Ġr uling +Ġrece ipt +und ry +Ġsy ndrome +ãģ ĵ +å®Į æ¯ķ +as ync +Ġse am +Ġal ignment +ï¼ĮéĤ£ å°±æĺ¯ +Ġ第 åĽĽ +ĠDo ctor +奥 è¿IJ +Ġrece ives +, âĢĻ +T imer +å± ł +Ġcan nab +Ġarbit rary +åĽ½ çİĭ +Ġnot ion +ache l +Ġc ra +" We +西 çıŃçīĻ +ĠGr ant +Ġconfig ured +说 å¾Ĺ +æ² ¾ +Ġphotograph s +k wargs +ĠC reated +ĠF IG +h is +ç²¾ èĩ´ +Ġse gments +( V +Ġscen arios +{ F +ĠD ave +re r +AC C +è° ħ +çļĦ çłĶç©¶ +åĮ» å¸Ī +( len +ĠV ice +Ġen orm +è¦ģ åİ» +ĠD AM +- comp +åıij å°Ħ +éĥ½ å¸Ĥ +ERR OR +ĠE arly +m ation +åİļ 度 +Ġmed ian +Ġmand atory +Ġdesign ers +è¯ļ ä¿¡ +b ie +Î Ń +Ġrel ate +igr ants +ĠLead ership +éĺ² æ²» +. check +Ġ ä¸ĩåħĥ +k ers +f p +Ġco operation +lic ense +ĠL at +Ġaltern atives +失 åİ»äºĨ +ĠðŁ ĺ +èĨ ¨ +çļĦåŁºç¡Ģ ä¸Ĭ +ĠAuthor ity +Ġpat ent +b ed +ne ver +Ġdownt own +å±ķ è§Ī +åı· çłģ +åı£ 罩 +åįķ 纯 +_ client +out ube +çļĦ æľįåĬ¡ +Ġdifficult ies +, d +ĠC ash +äºĨ è¿ĩåİ» +Ġvac uum +Ġsing ing +Ġsub set +åĽŃ åĮº +. trans +Ġmass age +-b ox +ĠChrist ians +çļĦ åŃ©åŃIJ +P rivate +ä¸Ģ çĶŁ +ad get +ĠI ron +Ġg athered +Ġ çİ°åľ¨ +é«ĺ è´¨éĩı +çļĦ åĪĨ +Ġmount ains +Ġannounce ment +el ve +æĸĩ èīº +et ooth +- ray +çļĦ ä¼ģä¸ļ +Ġind ices +Ñ ĸ +h l +ĠVis ual +èĥ½ åľ¨ +g rid +çIJĨ è´¢ +Ġman ually +ä½ £ +Ġcomp aring +Ġep id +erc ise +Ġmark er +ĠIm ages +er os +t p +å¿ħè¦ģ çļĦ +Ġautom ated +b ul +G reen +id o +Ġco h +æĿĥ å¨ģ +åī¯ ä¸»ä»» +Ġdro ps +ãĤ ī +Custom er +ï¼Į æ¸ħ +/ R +." " +ick ing +Ġexc it +Ġhold er +W ait +Add itional +T YPE +F B +æłij èĦĤ +Ġmotiv ation +èµĦ è´¨ +éĢĢ ä¼ij +éĢı è¿ĩ +天 天 +ä¸į åıĺ +çª ĥ +ãĢĤ éļıçĿĢ +æĴ ° +ĠE ar +Ġdec ay +å·²ç»ı æĺ¯ +è®® æ¡Ī +Ġdist ribute +èµ· çļĦ +Ñ Ĩ +太 å¹³ +åĽ¢ ç»ĵ +w ear +us cript +Ġse vent +ä¸Ģ çķª +U ri +( E +Ġexhib it +Ġk iss +åŁİ çļĦ +Ġadjust ed +åĨį 说 +ï¼Į 车 +åģ¶ å°Ķ +ĠTreat ment +Exp and +VAL ID +éħ µ +Ġde ar +æ¶² åİĭ +Ġins pection +ä¸Ģ æĹ¶ +Ġe q +ĠR ome +Ġlimit ation +ish op +c ence +åĨ³ å¿ĥ +Ġident ifier +pect ives +Ġindepend ently +顾 éĹ® +ĠS PE +ï¼Į èµµ +Char acter +{ n +: [ +èħ ķ +ç±»åŀĭ çļĦ +Ġmeas uring +.re place +Ġstead y +Ġu m +Ġwor ship +) +ow ered +ãĢĤ æľĢåIJİ +Ġland ing +åħħ è¶³ +çļĦ åı¯ +-g roup +Ġed ited +Z ero +å¨ ¶ +æŀĦ éĢł +ĠProm ise +igen ous +ĠP ed +è¾IJ å°Ħ +Ġcoe fficients +èĢĮ åİ» +.sh ow +ens itive +ĠSp ot +rec ated +Ġsing er +Fe atures +Ġst abil +Av ailable +f ound +rop ical +Ġce iling +The me +SE D +è¿Ļ 两个 +p assword +{ H +Ġres il +Ġc yl +çĿĢ æĢ¥ +æ³ ¼ +f b +V ery +çĹ Ĵ +äºĮ 次 +Ġrat ings +Ġadv ised +L ead +èĪ ħ +) | +éħ ¿ +çļĦä¸Ģ éĥ¨åĪĨ +è¾ Ł +Ġcomp leting +Ġauthent ic +ack son +ĠJ ane +Ġcur riculum +D an +ĠL E +åĩºçݰ çļĦ +Ġen abling +_ J +Ġ ä»ĸ们 +ĠCov id +Ġl ying +pp y +Ġm ari +Ġdi pl +Ġexcess ive +_ ind +ĠC opy +Ġchemical s +ï¼Į æ·± +Ġst amp +å¦ Ħ +Ġmodel ing +ĠCon vert +Ġcoron avirus +ĠDeterm ine +çĭ Ń +_ content +说 æĪij +ç½ ķ +_f rame +Object s +on ing +end ants +为 人 +_ update +Ġmathemat ical +E st +åįł æ¯Ķ +Ġsurg ical +t ures +( is +Ġwal ks +Ġencour aging +Ġspecial ized +Y eah +æĺ¯ 被 +T om +ĠMar c +åıij æĶ¾ +Ġgen re +ä¸į å¿ħ +ç¾İ丽 çļĦ +Ġpopular ity +Ġ èĢģ +Ġar ise +on ut +èĥ½ åIJ¦ +r h +W D +Ġrout es +} ; +g ba +fl ag +Ġ@ " +ĠL yn +ĠSt ay +Ġcy cles +Ġin equality +ĠRest aur +Ġpast e +èĢIJ å¿ĥ +ju ana +st all +Ġche aper +Ġprec ious +åħ³éĶ® è¯į +bour ne +_VAL UE +ĠN orm +Ġcoupl ed +Ġint ake +çİ© ç¬ij +Ġ èµµ +as ive +Ġw rt +ä¸į ç¦ģ +æ±Ł æ¹ĸ +_ format +åĬł æ²¹ +Ġg ang +ou ver +ä¹Ł 被 +ĠAl ice +Ġapp le +N ECT +Ġcabin et +in ely +[ index +å½¢ 容 +æĪĺ åľº +Ġpl ates +宿 èĪį +oen ix +课 é¢ĺ +ac ular +西 åįĹ +ĠMat hemat +å½¢ çļĦ +ĠGl ass +èĩªèº« çļĦ +ä»Ģä¹Ī æĺ¯ +@g mail +ĠR ange +ĠC I +Ġcour age +own ers +ĠA bs +. Ed +Ġ 人 +ab ama +Ġs ender +å¾Ĺ 以 +éħį å¤ĩ +E duc +Ġbl ame +Ġ) ; +ĠB B +ĠAll en +Ġl ands +. query +P air +Ġincor porate +ï¼Įä¹Ł æľī +Ġv on +ĠIndian a +. insert +Ġcur ves +Ġgrad ient +Ġsurround ed +{ array +ĠSchool s +M ill +æijĩ äºĨ +Ġcor rection +Ġlaw yers +Ġemb race +Ġp ixels +Ġattempt ing +Ġag enda +.com mon +Ġins pect +ãĤ Ĭ +qu ire +Advert isement +re ach +_ load +Ġp H +G P +ï¼Įä¸Ģ æĹ¦ +Ġconsist ing +ĠB an +ot imes +Ġint u +ãĢĤ ä¸ŃåĽ½ +Ġreli ability +è§£ çŃĶ +Ġbr anches +Ġl l +Ġdiscrim ination +ap or +ĠDis play +pr imary +ï¼Į 尽管 +ĠAl ways +âĢľ ä¸Ģ +. format +ï¼Į éĿŀ +Ph i +Ġprom ised +p atch +ĠR ail +{ N +ĠC F +Id entity +ï¼ī ï¼ļ +gn ore +è¦ģ æľī +Ġindic ator +ĠMal ays +own ed +S igma +é¹ ħ +æĺ Ķ +ä½ľ æĸĩ +å°Ĩ æĿ¥ +æ¡ IJ +åħ¬ æ°ij +æĿ¡ ä¾ĭ +ä¸Ģ éĥ¨åĪĨ +缸å½ĵ äºİ +ĠS ant +è¶ Ł +v ia +çºł 纷 +Ġs ync +ï¼ī ï¼Ľ +Ġresc ue +Ġparticip ant +éĢłæĪIJ çļĦ +Ġeleg ant +çIJĥ è¿· +Ġsmart phone +主 æ¼Ķ +æĺ¯ 大 +Ġun p +Ġg auge +ä¾ £ +ĠH u +Ġcompar able +ĠN ations +File Name +åĪĨ æķ£ +Ġdiagn ostic +è¡¥ è´´ +ä¼ļ éķ¿ +æķ · +æŀ ķ +Ġpl acing +å½ĵ äºĭ +Ġbur st +åħ¨ æĸ° +B oth +There fore +Ġadj acent +c ategory +ĠB arn +è¿ij å¹³ +ï¼Ł " +çļĦ ç»ĵæŀĦ +Ġad vers +åIJİ æĤĶ +Ġc otton +åĩı è½» +Ġmat ched +ost ics +rict ion +Ġrecommend ation +ag an +ex pr +UP D +ag le +um bers +åĮĹ æĸ¹ +Ġgluc ose +ĠShe ll +Des pite +L abels +Ġreal istic +Ġt ar +Ġthe sis +ĠB R +- O +é© ³ +ick ed +Ġtem pt +æĥħ çļĦ +æĪª æŃ¢ +c ellent +Ġs ne +çĥŃ æ°´ +çŃī çļĦ +ï¼Į æĹ© +Ġvert ices +å« Ĥ +ĠF loor +åįł æį® +ï¼Į以 便 +Ġsc oring +ĠJ ay +( index +Ġcol ours +æ² § +Ġt v +bl ue +, æĹł +èµĽ äºĭ +, 对äºİ +éĺ IJ +Ġ åı¶ +. equal +, åħ¶å®ŀ +C md +æĢİä¹Ī ä¼ļ +ï¼Į 两个 +ĠAtl anta +ĠMass achusetts +å»¶ éķ¿ +og y +缺 éĻ· +éĶ ¤ +å¼ķ é¢Ĩ +ol ine +è§ģ äºĨ +Ġin ev +Ġis instance +b us +Ġem ission +_f unc +æĸ¯ åĿ¦ +ĠM enu +Ġchar ity +Ġathlet es +W ater +at on +ĠS erial +ĠS TR +ĠAn swer +ĠKore an +ĠD raw +Ġsk i +Te am +á º +ru p +_N UM +N Y +âĹ ĭ +Ġden ote +Z one +人æ°ij æ³ķéĻ¢ +Inst all +S ql +H ub +åı¯ä»¥ 帮åĬ© +ĠB irth +åĽŀ äºĨ +å®Ī æĬ¤ +ribut ions +éĢ ¾ +Ġbare ly +å¼Ĥ çļĦ +ï¼Į èĬ± +Eng lish +Ġa uth +apt ure +Ġgu ides +Ġt ent +Ġvent ure +è® ½ +Ġconsequ ence +Ġvol unt +_h andle +ĠB io +Ġsubst itute +è¿ĺ 羣 +ç² ¥ +ĠJe an +ãĢģ å¤ļ +Ġb ass +æĬĬ èĩªå·± +_ const +Ġcou ples +ur ches +ut ex +UCC ESS +Ġf ract +Ġind oor +A bs +ä¸Ģ 级 +èľ Ģ +æ½ ĩ +r ade +é¼» åŃIJ +ĠS A +ä¸įä»ħ ä»ħ +) ** +w riter +Ch apter +ä¹° åįĸ +ï¼ī åĴĮ +do ors +ac co +Ġdesign ated +L anguage +ä¹ł è¿ijå¹³ +_c ache +é ij +Ġsustain ability +Ġobl igation +æ¿Ģ åıij +ĠS olution +L ive +Ġa qu +æķij æı´ +ĠG ree +ç±» åĪ« +ä¸Ĭ æĸ¹ +ä¸į åĸľæ¬¢ +Ġf ool +éĺ¿ å°Ķ +' S +ĠEd ge +D NA +积 åĪĨ +å° ī +Ġcoe fficient +æľįåĬ¡ çļĦ +ul se +Ġaff irm +\ times +b urn +æĮ ¨ +Ġdis pute +æķ´ æ²» +éŁ³ é¢ij +- al +Ġst ere +æĭ ĺ +Ġth irty +a que +UR I +Ġlik ewise +身 æĿIJ +ä¸įè¿ĩ æĺ¯ +lish ing +æĹ¥ æĬ¥ +Ġbab ies +F urther +Ġtri angle +hab ilit +Ġbehavi ors +å·´ é»İ +-d ate +ĠDef ense +ASS ERT +Ġg rain +åĮ ª +Ġne ural +纳 ç¨İ +N ov +è¿ĩ 度 +add y +æīĵ çł´ +ar ma +i op +Ġflow s +ä»İ å°ı +Ġaccomp any +Ġinflam mation +ä¸į ä½Ĩ +Ġ å°Ĩ +ç͵ æŀģ +Ġth y +Ġ 以 +G od +æľīä¸Ģ 天 +l on +Ġtiss ues +Ġany body +C ent +Ġf s +n ut +R P +ĠF ilter +åįĹ æĸ¹ +Ġorder ing +Ġrestrict ion +.C ount +ĠG olf +Ġp ets +Ġag gressive +æī¾ ä¸įåΰ +åij ľ +Ġproduct ive +æ´»åĬ¨ çļĦ +(' # +Ġrec ru +- align +} : +ition er +ï¼Į èĩ³å°ij +ĠReg ion +ĠM aking +æĺ¯ æĪij们 +doc s +ĠR oss +web kit +ĠA ve +vent ory +Ġpaint ed +ĠC ool +( config +ar o +! \ +ĠA F +åij¼ åIJ¸ +Ġnum py +A IM +IS S +Ġc ord +ï¼Į é¡¿æĹ¶ +Ġch ips +ĠC ra +us hes +ç¥ Ī +re a +Ġv oted +Ġsubsequ ently +Ġdis closure +Ġaccompan ied +ãĤ Ĥ +Ġinter section +Ġsc ream +å®ł çī© +F actor +ï¼ģ " +æĹ¥ èĩ³ +æĤĦ æĤĦ +Ġd urable +ï¼Į åĨħ +æĿ ı +åħ¸ åŀĭ +è¿ĩ 头 +éľĢè¦ģ çļĦ +ĠRec ords +ï¼ļ ãĢĮ +æľĢ åĪĿ +Ġt ears +ĠA st +å°ģ éĹŃ +æŃ¤ åIJĮæĹ¶ +å¤į åζ +ï¼Į æĭ¥æľī +if act +åı ® +çĿĢ å¤´ +ãĢĤ åIJİæĿ¥ +at ers +åľ¨ åľ° +ï¼Į ä»Ĭ +ä¹ĭ éĻħ +Ġshort ly +åĮĸ åIJĪçī© +ĠH ind +Ġpart ially +am ount +åĽ¾ä¹¦ é¦Ĩ +好 åIJ§ +ãĢĤ ä»ĸçļĦ +æĹ ¬ +Ġr ug +Ġarr anged +B undle +Ġpublic ly +åĭĩ æ°Ķ +å©´ åĦ¿ +_ Y +empl ates +F il +Ġ éĩij +Ġpref erence +ĠE C +B al +ĠU V +Ġre wards +ĠH art +æł¼ å±Ģ +Ġform ats +Ġref riger +ĠNot es +g ency +ric ts +els on +Ġvis itor +Ġaddress ing +enc il +éĢĤ éĩı +- ph +Ġopt s +æĭ IJ +åIJ¬ çĿĢ +ĠEX P +Ġloc ate +ï¼Į éĢīæĭ© +_ def +A ir +\ to +ĠL arge +å¡ Į +Ġprom ises +åĨ¬ åŃ£ +lu x +.Error f +SS ION +æľīéĻIJ 责任 +Ġoverwhel ming +ab ases +æį ŀ +âĢĿ ï¼Ī +ç¾İ 好çļĦ +Ġbar rier +! = +f ly +ä¸ĵ é¢ĺ +ĠAtl antic +Ġport able +R oute +p aper +ag ra +ç² ¹ +çļĦ æĢģ度 +so ck +) åĴĮ +(' . +ĠL ot +b ot +ãĥ « +ĠH o +è¿ĩ åIJİ +ĠRep resent +Ġport ions +ç»ĵ ç®Ĺ +May be +ä¾Ŀ 次 +ä»» æĦı +ĠE L +ĠP ear +Ġtable t +Ġclin ic +R andom +çŁ ® +ĠPh D +IC T +ï¼Į æıIJä¾Ľ +Ġdram atic +ü r +ç¨ĭ度 ä¸Ĭ +ĠMin i +_ IM +æĬĬ å®ĥ +ĠER R +ĠVictor ia +ĠReg ional +T ech +ĠMary land +é¢Ĩ åıĸ +è¿Ļ æīį +con n +AN GE +å¹² æī° +Ġequ ality +é«ĺ åİĭ +äºĮ 级 +V ENT +å· ¢ +id al +Ġlist ener +m ont +Ġext ensions +A xis +Ġimm igration +èĪŀ è¹Ī +Ġbu cket +Ġtra iler +ĠQu ant +Ġoblig ations +æħķ 容 +è´¦ éĿ¢ +_h ash +æĺ¯ 没æľī +Ġlik elihood +ĠR oll +-st ep +Ġre pository +Ġpow ered +ï¼Į èİ« +Ġco pper +åķ ¸ +ï¼Įä¸į å¾Ĺ +éļ¾ å¾Ĺ +. object +Ġy eah +Ġrep airs +Ġoccas ions +Ġscr atch +Ġle ak +Ġdescrib ing +m ate +m c +Ġtrem end +emp loyment +ĠFe ature +ĠG M +身 çļĦ +plic ity +ĠC lose +ç§į ç§į +LO W +Ġacknow ledge +éĿĴ å°ijå¹´ +Ġdesign ing +Ġat om +B egin +Ġvit amin +Ġwhe els +ot ypes +cre ated +w p +=" ../ +Ġs li +åı¯ æĥľ +Ġsp oken +Ġhe x +Ġ åıª +æĪ¿ 产 +æĺİ æĺİ +_p ack +åħ±äº§ åħļ +. annotation +ĠH old +.L ength +Ġinv ari +_r ate +Ġj s +çľ¼ 泪 +ĠHarr is +ĠT E +Ph ot +ĠPro b +/ G +result s +ĠInvest ment +z o +Ġencour ages +å®¶ ä¸Ń +comm it +- only +Ġsub mission +éĺ³ å¸Ĥ +" It +_arg ument +Ġcar pet +å³ » +AD ER +ĠMe eting +Ġb ot +_D IS +å¿ĥ æĢģ +Ġv ine +ap on +Ġtrans mit +_ qu +ĠRepublic ans +Ġsch ol +ut ory +d ots +èĦ¾ æ°Ķ +, åį³ +ĠInit ial +ĠMo ore +Ġfe as +her lands +Ġhit ting +è¶ĬæĿ¥è¶Ĭ å¤ļ +Ġ* , +unn els +Ġthe rapeutic +ĠI owa +ĠEnter tainment +Ġcon secutive +_C ODE +å°ı åŃ© +Ġcannab is +大 åѦçĶŁ +ac o +ĠAf ghan +èIJĿ åįľ +Ġ اÙĦ +Ġqual ify +Ġp izza +æķ°æį® çļĦ +ĠEN D +æĬķ 票 +ca ption +{ B +sequ ently +ĠW E +èĦ± 离 +ine craft +_t ypes +Ġinf ected +Sub mit +Ġmin istry +Ġgar lic +åıĸå¾Ĺ äºĨ +Ġb lo +Ġstake holders +åıij è¾¾ +ï¼Į é£İ +ĠSim on +ï¼Įå½ĵ æĹ¶ +Ġfasc inating +sk ip +Pl atform +æ¼Ķ åͱ +ĠDe al +rypt ed +åįĬ 天 +id i +ĠAlex ander +Rend erer +ä¸Ģ ç³»åĪĹ +Ġcorrespond s +J ul +con c +al g +Ġwra pped +d n +Ġk g +atter y +Ġconfig ure +J an +æ½ Ń +Ġco in +ĠD oc +Ġaccompl ished +å°ij çļĦ +o T +åĩº ä¸Ģ个 +åĿĩ 为 +æĹłè®º æĺ¯ +æĮĸ æİĺ +Ġhydro gen +èĢģ 大 +ï¼Įæ¯ı 天 +Ġin sect +Ġcor poration +ĠH ills +St atic +导 å¸Ī +Ġengine ers +Ġdom ains +Ġoutput s +add le +æĺ¯ çļĦ +ĠIndust rial +æ° ® +ĠPort ug +{ a +Stand ard +U F +æĢ Ķ +Ġ' # +es ign +Ġemp ower +Ġ' - +ĠNiger ia +ç¿ ģ +Ġmotiv ated +Ġher itage +å±Ģ éĿ¢ +att ed +ï¼Įä¸į çŁ¥ +Ġtrans cript +Ġpersonal ized +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠ +es y +ill ance +Ġfranch ise +ä¼ĺç§Ģ çļĦ +Ġmake up +Ġco inc +Ġcompl aints +ĠAl b +æŀ £ +åĺ ± +Ġtrad em +Ġdiscount s +ç͍æĪ· çļĦ +Ġc ited +Ġan ger +讨 åİĮ +y r +Ġh oney +ĠIM PLIED +å¤ı åŃ£ +ĠLIMIT ED +ĠPat rick +ä¸Ģ å¼Ģå§ĭ +èŀį åħ¥ +Ġanc est +Ġi Pad +.c lear +{ L +大 æĪĺ +ĠV AL +å¾ħ éģĩ +plic it +çĶŁ 涯 +å¤ļ å¹´çļĦ +ç²¾ å¿ĥ +åİ ķ +W orks +Ġsc ulpt +P F +m atic +Ġpro pri +Ġper ception +ï¼Įä½Ĩ ä»ĸ +Ġkey word +çļĦ åĬŁèĥ½ +- weight +çļĦ åŃĺåľ¨ +çļĦ éĢŁåº¦ +ï¼Į å¾Ĺ +d iff +Ġbe aring +Ġread ily +Ġwork out +ï¼Į 举 +åĩº åħ· +Ġflo ating +ï¼Ł è¿Ļ +R ob +çŀ Ĵ +éļ¾ é¢ĺ +, . +Test s +ĠS everal +å°¼ äºļ +init ial +ed er +sub section +Ġcompet itors +s ure +ĠW olf +æĪij ä¼ļ +æĥ³ äºĨ +% ãĢģ +-h and +大 声 +Up dated +å¸Ĥåľº ä¸Ĭ +éľĩ æĥĬ +Ġsens ors +l ayout +Ġfit ting +_ END +pr ises +Ġac ids +-l ife +Ġ å½ĵçĦ¶ +Pl us +ro se +模 åħ· +H aving +åħ·ä½ĵ çļĦ +çī© æĸĻ +ãĢĤ ä¸Ģèά +Ġextra ordinary +åĵª åĦ¿ +å°½ åı¯èĥ½ +Ġblock chain +ur face +å¢ŀ å̼ +Ġpoll ution +att ice +ä½ĵ åζ +æĬ¥ èĢĥ +èŁ ¹ +èī° éļ¾ +Ġcomp lement +Ġp se +Not Null +Ġd irt +è¿Ļ个 人 +_f unction +Ġto ys +in th +ĠCom mercial +è§£ éϤ +Ġcr imes +çĭ Ħ +ï¼Į éĿŀ常 +Ġmount ed +è¿ŀ å¿Ļ +çīĻ é½¿ +Ġdir ty +Ġhun ting +Ġm old +æ´» çĿĢ +ĠEV ENT +ĠB M +ĠCo ord +ä»İ æľª +æķĻ å®¤ +Ġb rack +And roid +Ġre ception +Ġqu it +Ġorganis ations +Ġp up +çļĦ å½¢å¼ı +好 åıĭ +ç͵ æ°Ķ +( text +çģ¯ åħī +ãĢĤ ä¸Ģ个 +ï¼Į 羣æĺ¯ +Ġout s +æķ° åįģ +Ġaffect ing +ĠFore ign +, m +h ang +å¾Ī é«ĺ +ĠS ydney +ĠE M +ar ound +_ co +å¿ĥ æĢĿ +第ä¸Ģ æĹ¶éĹ´ +is l +Ġdest ruction +ĠT el +Ġc av +ç«ŀ èµĽ +_W R +Ġн а +u ccess +ä»Ģä¹Ī æł·çļĦ +Ġle mon +r ans +没 äºĨ +ä½ł å°± +Ġmin imize +Ġintellig ent +u its +, v +åĪĿ æŃ¥ +, èĥ½ +Ġr ings +ĠC OM +Ġsh ips +F ill +Ġyield s +Sh op +cont act +b log +Ġform ing +Ġd t +æĺ¯ä¸Ģ å®¶ +Ġpursu ant +Ġvari ance +} ' +Ġe ager +Ġbe ef +on ical +ur red +Ġconv in +æ°´ æ³¥ +O F +nc ies +ath on +ĠP E +Ġf ut +ĠPar liament +éĹ» è¨Ģ +ĠS ex +] -> +ĠJew s +ĠSim ilarly +Ġaccur ately +åľ°åĮº çļĦ +ig it +Ġthread s +Ġvar ied +Ġch ains +æĭī çĿĢ +Ġtransform ed +纽 约 +Ġbelong s +Ġprogress ive +ç»Ļ æĪij们 +ãĢĤ 第äºĮ +ĠW ales +ĠD iam +Ġaccommod ation +ĠAg ric +Ġclust ers +ĠAdd itional +eh icle +ï¼Į 满 +, ä¸įè¿ĩ +ĠN ev +ĠD evice +Ġl iv +p res +_ action +ĠL ie +æ¾ ¡ +æł ħ +çĦ¦ èĻij +ir q +- order +: ' +: @" +="../../ ../../ +好 çľĭ +as sembly +ä¿ ¯ +ä¸ĸçķĮ æĿ¯ +cri ber +ä¸į 满 +ol k +Ġwhere in +C ity +ĠS EC +Ġfriend ship +istic ated +Ġ: ) +æĭ¿ èµ· +æĿ¥ åΰäºĨ +ï¼ĮæĪĸ 许 +Ġse al +ĠGover nor +å¼ķç͍ æĹ¥æľŁ +Ġr ated +Ġprom ising +const ruction +Al ways +-------------------------------- ---------------- +(t arget +åij³ çļĦ +沿 çĿĢ +Ġcont rolling +è¿Ļä¸Ģ åĪĩ +# else +äºĨ 许å¤ļ +-in ch +ĠR ol +è¿Ļ æĿ¡ +诸 å¤ļ +Att r +客 æľį +- int +z ero +ç͵ ç¼Ĩ +ï¼Į è¿Ľè¡Į +åĨ¬ 天 +Ġtox ic +ĠComp lete +ER Y +ĠP itt +, 大家 +st ock +be ing +-be ing +Ġret ired +Ġforg iveness +ä¹ĭ äºĭ +g al +Ġbreath ing +Ġrank ed +Ġv intage +èĢ ¸ +ex ception +å¹³ 常 +ï¼Įä¸Ģ åĪĩ +Ġin cl +Ġh ill +S outh +ï¼Į åIJĮ +ĠD om +ï¼Įæľī çļĦ +ĠIntern al +ĠMag ic +Ġsp am +Ġoccasion ally +ĠF ocus +Ġconv iction +Ġconf usion +ç»Ĩ èıĮ +on i +Ġtack le +ç¢ Į +çľģ 级 +ard ed +éĿĻ éĿĻ +ĠV eter +æĸ° æĹ¶ä»£ +Ġvary ing +æĦŁ åºĶ +aud i +Ġdro ve +ĠD utch +re ason +æĿĢ äºº +è¹ ² +ĠE c +åĩł åĪĨ +帮 ä½ł +ä¾Ľ ç»Ļ +Ġcar b +Ġh ired +管çIJĨ 人åijĺ +Ġdel iber +寻 æ±Ĥ +Ġjew elry +ER N +Ġmari juana +å§IJ 妹 +lah oma +F ragment +ĠM ission +ĠIn n +ĠR isk +, : +- item +/ $ +Ġd ip +ag ers +çļĦ åħ·ä½ĵ +ĠViet nam +èĢ » +代 è°¢ +b ib +ĠW ait +ç²¾ 度 +et adata +ĠB iden +ĠComp anies +[ ( +ä½ĵ 积 +Ġup set +ç§ijæĬĢ æľīéĻIJåħ¬åı¸ +& \ +Ġcr ushing +èı © +ĠG i +L ower +æįŁ åĿı +Ġrub ber +{ ( +vert ed +幸 è¿IJ +ä¼ĺ éĽħ +ï¼Į å¦Ĥä½ķ +stand ard +ĠMic hel +Ġpublic ations +LE S +Ġarrang ements +æµ· åįĹ +ĠG ift +å¢ĥ åĨħ +/ O +严éĩį çļĦ +D IR +mb ox +Ġdem o +ĠR ap +ch annel +am ous +Ġgr ants +(" # +ĠMat rix +Own er +__ , +- ad +Ġincorpor ated +R aw +" ], +åΰ ä½į +ä¸į ä¸ĭ +ï¼Į é»Ħ +el and +Ġfl ights +Ġsil ent +m other +åĢŁ åĬ© +ĠAd vis +ed itor +( () +En v +{ figure +G ui +as ures +ĠM ock +our t +d ale +Ġt ong +ĠE lement +Ġ åı· +Ġsing ular +rac le +Ð ³ +å¹¶ æľª +ĠDel hi +ĠPro file +Ġindepend ence +Ġj et +.c ol +Ġt ender +Ġinteg rate +M c +åºĵ åŃĺ +Ġp d +åħ¨ çľģ +Ġhe al +Ġblock ed +Ġdis rupt +å·² æľī +以 å¾Ģ +T x +ĠCom pl +G G +ĠÐ · +æĦ ļ +æĿ¥ 讲 +æ°Ķ è´¨ +ab e +C hat +åıĪ æľī +ãĢĤ åı¯ä»¥ +èĢĮ æĪIJ +ï¼Į è¿Ľä¸ĢæŃ¥ +оР´ +Ġn urse +书 ç±į +纪 å½ķ +帮 æĪij +äºĽ ä»Ģä¹Ī +Ġh ierarch +Ġcons olid +_ OUT +N E +ï¼Įè¿Ļ æł·çļĦ +éķ¿ æ±Ł +\ text +N ational +ĠSur vey +éŁ § +Ġinsp ire +ĠY outh +e in +ĠBen efits +Ġch ampion +åĽ½åĨħ å¤ĸ +ĠTechn ologies +B ro +: h +åĿ İ +çļĦä¸Ģ 端 +åħ¬ åĬ¡ +èĬ Ļ +ĠD im +_ query +H P +Ġdiscuss ing +åĽ¾ æ¡Ī +urre ncies +çľĭ ä¸Ĭåİ» +ĠAg ent +Ġwin ners +åºı åĪĹ +åī© ä½Ļ +per ature +Ġres ist +Ġspeak s +Ġm l +Ġphenomen on +ç» Ĵ +éĴ ł +AN K +Ġp ose +ĠCol lect +am az +Ġstart up +Ġres erv +ĠÎ ³ +æĿ ł +o ks +è¿ĩåİ» äºĨ +æĽ² 线 +ĠH um +大 å¤ļ +æİ¢ 讨 +æĿĥ éĻIJ +Ġdom inant +èĤ¡ å¸Ĥ +æĺ¯ä¸Ģ äºĽ +EM ENT +Ġsee ks +ĠPe ace +æıĴ åħ¥ +Ġf ears +rec ision +Ġh o +åħ¬ å¯ĵ +ĠH ur +åľ¨ éĤ£ +ä¼ ŀ +Ġsim ulations +å´ ĸ +ĠM oh +çļĦ 表çݰ +h line +ĠD rug +Ġwor n +ĠA bb +Ġ ä¸Ģ个 +V T +( cl +Ġsec ured +ce iver +, æ¯Ķå¦Ĥ +Ġbe ans +è¶³å¤Ł çļĦ +AT OR +ĠCommun ication +éĥ½ ä¸įä¼ļ +ĠP D +ç§° åı· +çļĦ 女人 +èĦij æµ· +_ST ATE +_fl ags +æļ´ éľ² +Ġdenomin ator +S en +æĹł åħ³ +ĠRes pons +æĢĢ åŃķ +ĠLook ing +Ġassum ptions +or izontal +an ia +.s ave +_G ET +ï¼Įçľĭ æĿ¥ +ç¼´ 纳 +.c ss +аР» +Ġnot ifications +Ġ åįķä½į +Ġcl ause +ir s +æĪij è§īå¾Ĺ +.c pp +_ view +Ġw ing +Ġr ust +ĠB us +ĠR aj +éĢ Ĺ +æŃ ª +Ġold est +, a +/ - +Ġben e +线 ä¸ĭ +èı ĩ +Ġins ulin +éĻ Ģ +æĭ ± +Com put +H appy +ĠT ennessee +en ue +éĺ² çģ« +Mem bers +sh are +ĠH al +M onday +ĠN urs +Ġnew est +西 äºļ +Ġprior it +å°± å¼Ģå§ĭ + Ĥ +Ġpharm ac +ä½ł æĥ³ +Ġdecre asing +éĴ¢ éĵģ +çļĦ æĥħ +ï¼Į åĪ©ç͍ +ĠHall ow +æī® æ¼Ķ +Ġend orse +L ow +Ġhuman ity +н о +æĢĢ éĩĮ +ĠS ession +åĬ© åĬĽ +th ur +ö r +ĠD J +èĭ ij +Ġsoc cer +å¦Ĥ åĽ¾ +eb ook +Ġcon ce +Ġend point +ĠRef erence +缸åIJĮ çļĦ +Ġam plit +ï¼Į æĹ¶ +åįģ è¶³ +B ool +Ġman uscript +-l ink +Ġfin ishing +åºĶ ç͍äºİ +ĠThe ory +è¿Ľ 度 +èѦ æĸ¹ +ex pect +Ġpr inter +Ġkey words +å· © +Ġpo etry +f ather +Ġdead line +æ´» åĬĽ +Ġe ars +åIJĦ 大 +M ichael +Ġfab ulous +Ġtext s +ï¼Į åįķ +exp ression +_ ACT +anc ouver +èĶ ĵ +å½ĵ 代 +Ġscal ar +ĠTra il +éķ¿ å®ī +vel ope +/ st +Ġprotect ive +ï¼Į æĹ¥ +éĢļ ç͍ +ï¼ IJ +_t itle +Ġmoist ure +èĢĥèĻij åΰ +-d ependent +Ä « +ĠS ay +Ġcook ed +ĠS ET +lo ve +, è¿ĻäºĽ +èł ¢ +Ġinter mediate +å¼Ģ 车 +æ¶Ĥ æĸĻ +_EN ABLE +çļĦ 对 +N a +ĠIss ue +ĠF at +ĠLa ure +ä¸ĩ åIJ¨ +( ` +ĠM I +ener ate +çļĦ åħ³éĶ® +ãĢĤ ä¸Ĭ +ä¸į çĦ¶ +ott a +ĠC rim +pe g +m n +ĠApp lications +Ġoper ates +ĠM and +Ġflo ors +ï¼Į äºĶ +v ation +æ£ µ +Ġcollect ing +_ values +ĠT yp +Ġbeg un +æĭĽ åĶ¿ +Ġn n +Ġworks hops +rb an +pt ic +æĬµ æĬĹ +_ Q +f ox +ĠR andom +at tern +Ġpre jud +ãĢģ çϽ +强 åζ +umb ing +Ġharm ful +ï¼Į æİ¥çĿĢ +èµ·æĿ¥ äºĨ +ĠF ellow +Ġdig est +ys ql +åħ¶ä»ĸ 人 +ĠS amsung +Ġg reet +ipher al +ä¼ł éĢģ +A li +Ġv iral +z i +çłĶç©¶ éĻ¢ +Ġb arg +çļĦ人 çī© +æĸ° èĥ½æºIJ +prot ocol +ç»ĵæĿŁ åIJİ +EE E +ï¼ĮæĪij ä¼ļ +ĠPart icip +g ame +ential ly +ĠL eft +ä¼Ł 大 +� � +ĠPro perties +Ġinc ub +_DE BUG +F riday +åı¯ æĢľ +oo oo +Ġred irect +: ` +ĠO cc +Ġint ra +ï¼Į æĮĩ +vis ible +æį ¡ +æıIJ èµ· +- trans +Ġsell er +Ġd ivers +aff e +åIJ ı +dom ain +Ġsett le +ĠD atabase +w riting +, å¤ļ +Th ose +ĠR ain +ĠB esides +ï¼Į æĶ¾ +æµ· åĨĽ +-min ute +el eration +è¿ĻéĩĮ çļĦ +ï¼Į æĹ¢çĦ¶ +ï¼Į ä¹ĥ +ĠI ce +W eek +Ġv endors +åıĸ 代 +西 éĥ¨ +) ^{ +è° Ń +ãĢģ ä¸Ģ +éĤ£ æĺ¯ +个 ä½ĵ +Ġcomplic ations +oles ter +Ġport al +æłĩ è®° +Ġclos ure +å¹¿ä¸ľ çľģ +ilib rium +Ġra ces +éij « +ĠE T +èİ ŀ +å½ĵ 天 +åĩĨ åĪĻ +åŁ¹ èĤ² +ag ine +ĠMat thew +æij© æĵ¦ +æī ģ +ï¼Į ä¿ĥè¿Ľ +Ġnot ify +ent o +/ a +_ if +Ġfl ip +Ġf et +Ġconsist ency +çķĮ çļĦ +ĠC E +Ġsubs crib +人 ä¹ĭ +ic ating +éŀ Ń +åı³ æīĭ +Com b +æĹ¶éĹ´ çļĦ +rem ote +çݰ代 åĮĸ +ï¼ĮæĽ´ æĺ¯ +ĠR id +Ġj aw +Ġwork force +ĠAr k +ĠB oot +åºĶç͍ ç¨ĭåºı +IN FO +Ġ" _ +ĠR ES +è¯Ŀ 说 +èļ ģ +Ġ é¡¹çĽ® +Ġplaintiff s +A merican +ĠC ry +Ġdefe at +ĠC ele +ĠPer fect +Ġextract ed +OUR CE +Ġhonest ly +çķĻ è¨Ģ +end l +x d +V E +ä¹Ł æĺ¯ä¸Ģ +ï¼Į åIJĥ +伤 åı£ +ï¼Į让 ä»ĸ +çݰ çĬ¶ +_d im +t f +Ġj ail +ĠÎ » +ï¼Į å°¤åħ¶ +å· ħ +Ġfund ed +ä¸Ģ åįĬ +- reg +ï¼Ľ åľ¨ +ps y +æĸĹ äºī +å½¢æĪIJ çļĦ +ï¼Į é¦ĸåħĪ +S peed +åĩº äºİ +_c md +ĠMan chester +è´µ æĹı +_ Get +æľīéĻIJ责任 åħ¬åı¸ +Ġvis ibility +ĠRE G +Ġ åħ¶å®ŀ +Ġdef ensive +Ġexpect ing +éķ¿ èĢģ +o il +ĠP ret +Ġp el +ç¬ ¨ +Ġre aches +Ñ İ +åıij è¨Ģ +off s +è¿IJ 转 +cc a +çĦ ī +è¿Ļ éĥ¨ +Ġp tr +Ġkn ife +åĩĿ èģļ +ãĢĤ è¿ĺæľī +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠ +ï¼Į è¶Ĭ +F ood +s ort +Ġdes ert +ĠMem orial +_P ER +Ġunn ecessary +Ġbre ach +å²ģ æľĪ +ynt ax +l ag +ï¼Į å¹´ +æĸ¯ çļĦ +éĢĢ åĩº +åĺ¿ åĺ¿ + ½ +Ġg host +ï¼Į åıĮ +æłij ç«ĭ +Ġauthent ication +详 æĥħ +ĠC offee +ç»Ī æŃ¢ +缸 å¤Ħ +Ġmut ual +çİ© æ³ķ +ĠAm endment +åݦ éŨ +ĠG il +log in +åĪĨ æķ° +ĠY eah +ä¸į æĦ¿æĦı +-s m +å¸Ĥ å§Ķ +稳å®ļ æĢ§ +L R +Ġsh o +å¯ ¡ +客æĪ· 端 +ess ed +æĢ¥ å¿Ļ +hel per +Ġcar ing +Ġpers u +Ġprevent ing +æ·· ä¹± +Ġin k +Ġcompl iment +åľ¨ å¤ĸ +ĠB ureau +è£ ´ +b p +Ġ< !-- +ä¸ī ç§į +è¿Ļ å°±æĺ¯ +ĠMon itor +è¯Ń æĸĩ +ĠProgram s +Ġsou p +ĠInter view +( list +åŁºç¡Ģ 设æĸ½ +ï¼Į æģIJæĢķ +Ġdistribut ions +Ġout line +Ġfo am +( url +忽 çķ¥ +Ġm eth +Ġt ends +Ġver ified +çľ¼ éķľ +ĠMor gan +_f ilter +Ġex cluded +Ġex terior +ĠS S +æ±ĩ æĬ¥ +Ġfall en +ĠInt roduction +ig ue +emp loy +ç³» æķ° +{ D +_RE S +ĠIN T +sh ape +Ġcon ve +Ġpro pose +ent ine +' \ +ation ally +ï¼Į æį® +ç³ Ĭ +ĠEd ward +.g ithub +o j +( response +çĥ¦ æģ¼ + « +{ Z +ĠClass ic +Ġoptim ize +_C HECK +注æĦı åΰ +Ġ æł¹æį® +Ġb other +Ġsil ence +大 æ°Ķ +u ity +W est +C ast +Ġang les +ï¼Į åĪĨåĪ« +ĠNav y +.m odels +åĨ ¶ +Ġrun time +AT URE +M ouse +.f irst +Ġcontinu ously +Ġdel ivers +ĠAd vert +äºĭ åĦ¿ +Ġsubstant ially +Ġrid ic +Ġc od +ãĢģ é»Ħ +P DF +Control s +ä¿¡ ä»° +ĠRep air +DE D +Ġsol ved +Ġdis charge +象 å¾ģ +Ġenorm ous +em it +oun ge +, 被 +An im +Ġto ilet +太 åIJİ +ion ed +æĹ¥ ä¸ĬåįĪ +d m +Ġadd iction +ĠM ode +Ġenjoy able +设计 å¸Ī +以ä¸ĭ ç®Ģç§° +æĸĩ çī© +çīĽ å¥¶ +åĪĿ ä¸Ń +> ) +ord on +ĠClin ical +Ġprodu cers +оР¼ +åľ¨ åľ°ä¸Ĭ +Ġrem ed +çŁŃ ä¿¡ +ĠOr ange +ĠR ate +Ġpro ps +åijĬ çŁ¥ +ĠT ok +åŁºéĩij 管çIJĨ +åĨ· éĿĻ +åĪĨ æ³Į +ĠM IT +Ġmark ers +Ġdo ctrine +ä¸į æŃ¢ +å®ŀ ä¹ł +S an +: = +ĠS K +ãĢĤ æĿİ +éŨ å¤ĸ +Valid ation +ĠH ad +oles ale ++ ( +æµģ çļĦ +ĠOx ford +ĠVol ume +Ġt ear +å¥ ¸ +Ġsusp ension +Ġhor ror +ap olis +ensure math +ĠH ub +edd ings +out ine +éĴ ¦ +Ġut ter +_t arget +éĥ Ĭ +åľĨ å½¢ +è¿Ļ åľº +ure rs +代 ä»· +comm ended +å¾Ĺ å¤ļ +ĠS olar +rif ice +ï¼ĮæĪij æĺ¯ +Ġa ug +che ll +Ġ ell +çѾ ç½² +å°± æĬĬ +com b +åŀĭ åı· +[ å¼ķç͍æĹ¥æľŁ +ĠJ ess +ãĢĢãĢĢ ãĢĢ +ul ates +è¿ĺ åĮħæĭ¬ +T ry +UR N +Ġdiscipl ine +Ġsc ales +åĴ ª +å¼Ģ çļĦ +æĮ « +交æĺĵ æīĢ +ï¼Į é»ij +c n +-l ist +P erson +æ£ į +æ£ ł +çĶ « +å±± åĮº +è¿IJåĬ¨ åijĺ +è°Ī åΤ +ĠCal cul +c ule +Ġagric ultural +C ould +Cl uster +è¯ķ è¯ķ +Ġrepl acing +ï¼Į没 æĥ³åΰ +L ib +Ġsh it +_ ip +Ġtarget ing +' } +Input Stream +Ġpros pective +Ġsh aped +ï¼Įè¿Ľ èĢĮ +æĸŃ äºĨ +_f iles +éĢīæĭ© äºĨ +ĠChampions hip +(b uf +æīĵ äºĨ +Enc oding +Ġimp osed +icens ing +Ġrock s +. u +çļĦ æ¶Īæģ¯ +ĠÐ ¼ +Ù İ +Ġgrav ity +ĠAd ult +æĮ£ æīİ +漫 çĶ» +ES C +H TTP +ĠDise ase +ï¼Į 积æŀģ +ĠP odcast +æīĢæľī 人 +ĠT ai +/ test +AL S +Ġtechn ological +ï¼Į èĦ¸ä¸Ĭ +ĠHolid ay +ak h +Ġ[ ], +_P R +Ġpass engers +ä½ķ åĨµ +b age +Ġch amber +E lect +art ed +ï¼Į æıIJåįĩ +é¢Ħ 约 +åįĩ éĻį +g un +Ġcar ries +in ters +åĩ¤ åĩ° +ä¸Ĭ éŨ +Ġcomb inations +ä¹ĭåīį çļĦ +it arian +Ġdecl are +Ġper ceived +ĠM ut +ĠD ictionary +Ġth umb +ĠF if +触 åıij +ĠBer lin +Ġloyal ty +ãĢĤ ä»Ĭ天 +ug g +emb ered +ĠR ub +Ġdef ining +ov ies +Ġw are +Ġregard ed +éĿŀ æ³ķ +-d riven +ç»Ļ 人 +ĠNever theless +ĠTem ple +ä¸Ģ åı£ +and ra +ĠPre vious +ĠImpro ve +Ġpolynom ial +Ġbene ath +sequ ence +Ġfis cal +éĺ¿ éĩĮ +N R +rit is +ĠHallow een +è§Ĩ éĩİ +éĺ² çĸ« +Ġflu ct +Ġsh ame +:b efore +ĠPro du +åľ¨ åĵªéĩĮ +Ġg ig +ĠJac ob +ĠCl aim +æĸĩåĮĸ çļĦ +å¹´ 级 +Ġco ins +Ġactiv ated +p io +($ _ +r ific +Ġin verse +ÃIJ µ +Ġcoll apse +线 ç¨ĭ +Return s +å©ļ 礼 +an ne +Ġo live +Ġdirect ors +æ© Ļ +éĩį ç»Ħ +Ġviol ent +Ġgun s +ĠD ebug +èĬĤ çľģ +t ask +_T H +_d is +, æĪĸèĢħ +æĿĥ çļĦ +éĿ¢ æĿ¿ +ig o +Ġd ying +Function al +Ġgr inding +Ġdeal er +arc el +ch ing + § +comm un +ĠCr usher +è´¦ åı· +Head ers +Ġaff airs +Ġd é +ç«ĭ 马 +ا Ø +èĮ Ħ +ĠR oy +é¡ ½ +rac ing +ĠG A +Ġcom edy +æijĩäºĨ æijĩ头 +Ġv oices +Ġde emed +æľī æĹ¶ +er als +第 ä¹Ŀ +N orth +Ġo scill +ĠM agn +ct ic +Ġrank ing +ï¼Įè¿ĺ è¦ģ +.r andom +å¼ Ĭ +Ġaccommod ate +ĠIslam ic +åıĤ èµĽ +ann on +ch at +âĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶ +.com p +åĽºå®ļ èµĦ产 +ãĢĤ åħ¨ +åĸĿ éħĴ +ë ĭ +ĠC ow +Ġprov ince +Ġimplic it +_M EM +Ñ Ī +ĠT oy +ig est +ab ul +对 她 +Ġcl s +Const ant +ĠBy te +ä¸Ģ è¡Į +æĶ¶ åĽŀ +Us age +ĠC hen +ĠC raft +计 æıIJ +_t ag +G rad +(m essage +Ġarch ive +åħī æĺİ +Sm all +ĠR ot +( msg +Ġconvin ced +it i +Ġult ra +对æĸ¹ çļĦ +ç¡® è¯Ĭ +** ( +è¿ľ çļĦ +Ġsign aling +Ġal uminum +oo g +ĠCl ark +! < +w he +ĠM ovie +Ġexcit ement +å¼ķ åħ¥ +主 åĬŀ +åıij ä½ľ +äºı æįŁ +Ġdelay ed +è§£åĨ³ äºĨ +Y Y +为ä»Ģä¹Ī è¦ģ +Ġrefer ring +缴 å¾Ħ +ãĢģ æĬĢæľ¯ +è¾½ å®ģ +is ite +Ġsh ade +åıį æŃ£ +ĠCl imate +> : +Ġdepart ments +en ities +èĦĸ åŃIJ +ĠIntellig ence +ĠP os +ä¸Ĭæµ· å¸Ĥ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠ +Ġt el +Å Ĥ +ĠG h +. U +Ġtrou bles +.util s +ĠTechn ical +ç ão +, 缮åīį +B inding +æĺ¯ åı¯ä»¥ +åѤ çĭ¬ +Ġin herent +r ison +Ġwal let +à ª +å°± è¿Ļæł· +Ġ\ ( +è¯Ĺ 人 +"} ). +æĪIJ åŀĭ +æĢİä¹Ī äºĨ +Ġ' @ +F X +ĠQu ery +æİ ł +æ¾ Ħ +co res +Oper ator +.b ase +åĿIJ æłĩ +ific ates +Ġsp ouse +sp ring +ï¼Į è½» +ç¼ĸ åĨĻ +Ġdo zen +æĽ´ 大 +ĠLe on +form ing +ĠL incoln +ï¼į ï¼į +äºĨä¸Ģ åı£æ°Ķ +ur u +Ġtrans cription +im en +ĠFl ash +ropri ate +ail and +è¿Ļä¹Ī 说 +æİ Ģ +Ġ 马 +è¿ĻäºĽ 人 +ount ered +åĵ ij +Ġgrow s +Ġatt orneys +å»¶ ç»Ń +红 çļĦ +Ġ èĩª +è·Ł ä»ĸ +æľĢ éĩįè¦ģçļĦ +/ index +ï¼Į èĤ¯å®ļ +Ġm ales +åı° ä¸Ĭ +B asic +LE TE +s n +uff le +Ġs ake +ĠDe lete +_m odule +act ic +Ġperm its +art z +ĠAll iance +åĩı å̼ +Ġpar ks +宽 度 +( state +support ed +å¿« éĢĴ +Ġu i +< tr +Ġestablish ing +Ġan onymous +Ġautom ation +ĠG ET +壳 ä½ĵ +ell ers +Ġse q +lot te +- cl +è¡¥ åĬ© +ĠA RE +ĠB BC +Ġdeg rad +Ġconst itutional +L o +m ad +ï¼Į åĬłä¸Ĭ +Ġpain ful +ĠFriend s +Ġir rit +æł¼ åħ° +æīĢ å±ŀ +注æĦı åĬĽ +Ġn est +.n um +-c ol +Ġm aker +Ġra ises +åŁİ 乡 +ul ent +Ġsynt hesis +ar ance +tr im +ĠH op +å¬ · +To String +èľ ¡ +Ġrom ance +羣çļĦ å¾Ī +Ġspecial ists +xx xx +ĠF ashion +P ython +s chema +èĬĤ 约 +bar a +# pragma +Ġd p +å¸Ī çĶŁ +éĤ µ +ä»¶ çļĦ +é¾Ļ 头 +éĢī 举 +Ġcirc ular +ä¸į éĶĪéĴ¢ +Ġsat ellite +_ Z +ĠInt el +ĠL ag +ä¹° äºĨ +ĠV it +Ġcent uries +ä¸Ģ æĿ¥ +Ġg ast +Ġpro gression +Ġag ing +ĠWith in +éĹ ¸ +ĠF ace +åΰ å¤Ħ +ex it +çī© ä½ĵ +ãĢģ 人 +åΰ çİ°åľ¨ +åįİ å¤ı +Sp ring +Up load +Ġo re +Ġr ats +Ġin e +Ġsuggest ion +åIJĪçIJĨ çļĦ +ĠThe rapy +æĪij çŁ¥éģĵ +ĠCamb ridge +æĽ´ 大çļĦ +ĠDef endant +ĠL ICENSE +Ġbu ck +m ade +if iable +éĻĨ ç»Ń +g ot +è¯ Ģ +eg u +ĠN J +ï¼Į åıĹ +æĪ· å¤ĸ +ĠAnth ony +äºĨ èĩªå·±çļĦ +ï¼Į åĪļ +Ġtro ops +Ð ¹ +Ġsl ip +__ _ +Ġliber al +-F i +æīĭ ä¸Ĭ +Ġconsult ing +Ġsp are +con sole +ft en +, å¸ĮæľĽ +强 èĢħ +ĠR ather +ject ion +Ġnon pro +Ġlast ing +æħ¢ æĢ§ +Ġro d +ĠS ort +, 以åıĬ +åį« æĺŁ +Ġconfirm ation +åıij ç͵ +j avascript +æŀ¶ æŀĦ +{ T +Ġsp ir +ĠJ son +Ġflo od +éĹ´ éļĶ +æ¼Ķ 讲 +Ġp ays +fore ach +Ġforgot ten +Ġinitial ize +身 为 +çα å¿ĥ +v able +B its +æľī ä¸Ģç§į +åıª 好 +US B +=" " +ä»ĺ 款 +社ä¼ļ çļĦ +æİĴ æ°´ +Ġra ck +N I +饮 æĸĻ +_P L +Ġpro be +Ġbank ruptcy +Ġadv ise +R ich +çļĦ é£İéĻ© +ivid ually +_ height +arr ass +Ġinter vals +ĠNS String +çļĦå¿ĥ çIJĨ +Ġsynt ax +Un iversity +Ġ Ì +ĠLI ABILITY +è± ¹ +ĠD a +. contains +ãĢĤ æĸ° +饰 æ¼Ķ +Ġvess el +ï¼Ľ èĢĮ +RO UP +p read +as a +ĠF R +char acter +åīį æĻ¯ +C red +ÑĢ Ð°Ð +ĠN A +å½ĵ æľŁ +åĿ Ŀ +Ġo ils +an ol +æĻ® åıĬ +Ġpoint ing +å¦ į +ut her +设计 çļĦ +od o +Ġd ess +Ġun e +ĠP ier +inem a +åĴ ķ +. empty +éĴ¢ 管 +Ġdon ation +æĿ¡ä»¶ çļĦ +ĠG P +et ics +S V +Ġc ure +Ġsupp orters +Ġl ips +ä¸į è¶ħè¿ĩ +ĠAl abama +l g +é» ı +p air +-pro fit +ĠCh at +ĠChar acter +_d et +un ting +f inal +Ġsoph isticated +åĪĨ è§£ +ĠE P +Ġpre view +ãĢģ å¼ł +Ġwell ness +æĪij åĴĮ +ĠQ ual +Ġchem istry +ew ard +åĽĽ 大 +ç»ıæµİ åıijå±ķ +ĠRed ist +Ġenc ountered +Ġmob ility +Ġmat rices +ãĤ ĵ +i w +< std +åħ¬ åħĥ +b efore +Ġconst raint +oc ytes +çľĭ äºĨä¸Ģçľ¼ +Ġg em +产çĶŁ äºĨ +ï¼Į 常 +us age +ï¼Įè¿Ļ å°±æĺ¯ +H older +, åıªè¦ģ +ĠIS O +æŃ£å¸¸ çļĦ +åĬ© çIJĨ +le ans +Ġwork flow +ocr atic +ĠR ick +b one +Intern ational +L ibrary +n orm +âĢľ ä¸ī +ĠE rr +è´¦ 款 +åľº åľ° +Ġfin est +b oy +æĬķ æłĩ +ĠA BC +Ġsw ap +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠ +i ator +è§Ĵ èIJ½ +Ġcal ories +Ġcontribut ing +ï¼ĮæĪij ä¹Ł +app rox +b es +ç͵è§Ĩ åı° +ĠMiss ouri +æĭħ å¿§ +ĠEnter prise +çĥ ¹ +Ġd ynam +Ġso ap +ol ph +A h +çļĦ æĹł +Ġet ern +ad os +Ġproject ion +ĠStat istics +Ġreason ing +ol as +N P +Ġqual ities +çħ§ æĺİ +éķ¿ æ²Ļ +主 åĬĽ +å®ŀçݰ äºĨ +Tra vel +代 æĽ¿ +qu es +Ġrep orter +æ± ° +åħ¶ ä¸ŃçļĦ +ĠP as +Ġachieve ment +ä¾Ľ ç͵ +Ġdem ocracy +-s ite +è¿Ļ 段 +# [ +游 æ³³ +D ict +Ġelder ly +], [@ +* \ +ç² ¤ +UN CTION +åľ¨ æĦı +ĠNet herlands +Ġ ---------------- +) _ +äºĭ çļĦ +æķ´ æĶ¹ +ĠP u +ment ed +Ġ .... +In vest +ang les +CP U +ĠTre nd +ĠJ u +éĵ ħ +Int eg +ç»Ŀ ç¼ĺ +Ġexcept ions +ï¼Į 竣 +ĠS ym +^ ^ +çħ ŀ +over nment +ç´§ å¯Ĩ +Ġconf idential +' > +ĠOr iginal +Ġvol umes +N EW +Ġ 该 +Ġmin eral +è´¢åĬ¡ æĬ¥è¡¨ +æĮ ª +ï¼Į 秦 +Res et +ĠD or +éĺ² èĮĥ +åĺ´ å·´ +, çͱ +ä½ ij +un ique +ir atory +=" $ +Ġt ours +äºĭ çī© +çĿĢ å¥¹ +Ġhand y +i per +模 æĿ¿ +Comm unity +Ġu pt +P arse +ãģ ķ +Ġat oms +ĠT oo +arn ings +çļĦ å¤ļ +Ġcritic ism +èĢģ 头 +éĨ Ľ +Ġfem ales +Ġfem in +åıª ä¼ļ +ï¼Įè¿Ļ æīį +Ġperform s +Ġn ick +ĠT ogether +åĴ ¯ +Ġex ec +-g o +æ³ » +ol er +Ġcomp iled +Ġâī ¤ +. default +羣 空 +å°ij çĪ· +Ġtim estamp +åķ ª +is ure +ĠAct ivity +l as +åŃĺ æ¬¾ +. be +åıį å°Ħ +ĠV M +æĿ¡ 款 +ip h +b ecause +> . +æķ° ç»Ħ +åIJ« ä¹ī +Ġcomp ression +[ key +éľ Ħ +Ġinsp iring +ĠWh it +åĬł 以 +å®ŀ è´¨ +C ross +" github +Ġhighlight ed +ĠT oken +Ġaccept ing +- value +Ġcoupl ing +æĹ¥ ä¸ĭåįĪ +åıij èµ· +ãĢĤ è¿Ļä¸Ģ +olester ol +ï¼Į è¿ĩ +Ġcontain ers +è¿ĺ 好 +Ġr ounds +( var +и Ñı +Ġrandom ly +Ġthrow ing +- way +åĨ· çļĦ +ĠCapt ain +ï¼Į éĩį +è¿Ļ 份 +Ġfig ured +ID S +ĠB alt +Ġst ays +α ι +Ġd ough +O ct +èĨ Ĭ +Ġbe ings +s App +Ġir re +ĠS ac +Ġ åĨį +ĠA qu +èĥĸ åŃIJ +ä¸Ń åįĪ +åIJĦ 级 +ĠS Y +pe ople +Ġpull ing +Ġmy th +éĥ½ å·²ç»ı +.com m +k appa +èĨ¨ èĥĢ +Ġpl t +Ġsp rint +t wo +触 æij¸ +æ¸Ĺ éĢı +{ M +ï¼Į çĽ¸ä¿¡ +Ġwild life +ä½ĵ éĩį +ong o +ĠB ol +对 äºĨ +_ queue +n one +. Log +è¿ĩ æĿ¥çļĦ +P aul +Ġdep loyment +ĠTrans fer +Ġover flow +æľºåĬ¨ 车 +z ens +Ġhere in +SS L +Ġappet ite +Ġaccording ly +Ġbroad er +ä¸Ģ èµ·æĿ¥ +/ en +X Y +Ġtour ism +Ġar med +Ġrel ates +Ġmar ine +çļĦ çī¹çĤ¹ +ĠS urg +ĠS afe +C OM +ell i +ĠUs ers +ä¹Ļ çĥ¯ +åľ¨ 她 +ãĢĭ åĴĮ +Ġpresident ial +ĠW ind +ĠCh i +Cont ents +Ġjust ify +Ġconscious ness +äºĨ 她 +ï¼Įè¦ģ æĺ¯ +ogene ous +ĠMe et +ĠPack age +As k +Ġw age +ĠP ages +ĠOper ations +Ġpre lim +( K +cd ots +V O +ph ant +ĠW indow +Ġ -------- +. o +rins ic +um ing +Ġquant ities +ï¼Į ä¸ĭéĿ¢ +Ġatt achment +ĠCol umn +Ġbott les +Ġfunction ing +Ġtr ash +Ġstri king +ä¸ ŀ +Ġpoly mer +Ġun a +Ġpath ways +Ġcitiz en +åħ¶ 次 +纳 åħ¥ +] ], +æł· æľ¬ +g ent +/ api +Ġal ike +Ġcon clusions +.t itle +Ġch lor +Ġw elfare +Ġt ire +Ġen emies +Cont ains +Un ited +ä¼ ½ +Ġsubt le +ĠL ux +èµ° çļĦ +qu et +è¿Ļ ç±» +å¸Į èħĬ +_b ytes +æµģ æ°´ +æĭħ å½ĵ +Ġextra ction +to String +[: , +ç¥ Ľ +åĨ³ è®® +ann ers +Ġst raw +æĸ° æĬĢæľ¯ +Ġmag ical +ĠS leep +ï¼Į åĿĩ +IC AL +Ġsu its +å®īè£ħ åľ¨ +ĠObject ive +=" _ +Ġâ Ĩ +y g +roll ers +Ġneigh bour +交 åıī +conom ic +åĩº èµĦ +伸 缩 +Ġend less +åĭŁéĽĨ èµĦéĩij +æľĭåıĭ 们 +.p ost +è¿Ľ çIJĥ +符 åı· +Ġdeterm ines +èħ¾ 讯 +her ited +st able +羡 æħķ +Ġelev ated +Ġv endor +L oop +è¯ ı +Ġbar riers +F eed +ï¼Įä½ł çļĦ +ac er +ock ets +çģ« ç®Ń +æ¶ ¡ +Ġgener ous +B ank +urs ive +ĠB attle +ĠP rior +é϶ çĵ· +Ġdri ed +Ġcap ac +çĽijçĿ£ 管çIJĨ +è´Ŀ å°Ķ +_ valid +çĥŃ çļĦ +- ups +çİ« çij° +Ġal igned +_T EST +æĹ¥ åĨħ +B ound +anc ies +ï¼Į æŃ£æĺ¯ +Ġc um +ĠFrank lin +[ x +ener gy +, [ +ä¸Ĭ ä¼ł +ĠS ummary +ï¸ ı +. Generic +_B IT +Sp an +å± ¡ +æĭĽ æłĩ +åĬł å¯Ĩ +_RE AD +天 使 +人 äºĨ +Ġhom ework +åī© ä¸ĭçļĦ +-w ide +- use +ä¹Ł å°Ĩ +äºĭåĬ¡ æīĢ +ç²¾ åĵģ +Ġbuild s +at tered +ĠA z +ĠN ik +æĽ¹ æĵį +Ġ] ; +Ġcorpor ations +å¼Ģ éŨ +ä»Ģä¹Ī çļĦ +æĸĩ æ¡£ +# ! +es ity +par ator +éģµ å¾ª +Ġsal ad +åIJĮæ¯Ķ å¢ŀéķ¿ +Ġrh yth +éĺµ å®¹ +é e +交 äºĴ +ï¼Į ä½ľèĢħ +p in +F our +说 åĩº +æ·±åľ³ å¸Ĥ +Ch ina +wit zer +Ġf ence +Ġ « +ç«ĭ ä½ĵ +åĶ ī +羣 缸 +è° İ +è¿IJ ç®Ĺ +åĭ ĭ +ĠH an +è¯ µ +éĶ ¥ +Ġdec iding +åĪij äºĭ +ä¸Ĭ å¸Ŀ +Ġn ov +éĻª ä¼´ +ï¼Į åºĶå½ĵ +Ġto y +缮 çļĦæĺ¯ +ĠDE FAULT +ï¼Į她 çļĦ +Post s +Ġadapt ed +缸 ç»ĵåIJĪ +ĠO w +anal ysis +d ed +p n +æĤ£èĢħ çļĦ +éĻĦ åĬł +Ġso ck +ĠT rain +Ġn erve +olog ically +ig ator +æĤ į +Ġass ured +Rem ember +èĮ¶ åı¶ +åŃĹ符 串 +Ġsust ained +Ag ent +ur ious +IC ATION +æī¶ è´« +èĥ ³ +Ġport s +hel lo +ĠL emma +å®īåħ¨ çĶŁäº§ +Ġ æį® +j ack +ç²¾ çģµ +Ġs ends +_t rain +Ġd ancing +人 éĹ´ +C N +Ġto ss +_T X +æĹģ çļĦ +F ac +ĠRes erve +Ġb ib +b oolean +åįĥ å¹´ +Ġdecl aration +Ġabund ance +åĭ ĺ +ï¼Į åĶIJ +Ġb ones +cp u +大 èĦij +ï¼Įåıª è§ģ +åºķ 座 +Ġh unt +ĠBel ow +ãĢĤ çݰ +Ġal location +Cons ider +b led +Ġshe d +Ġparticip ated +åĨį 度 +åıĤ çħ§ +for ced +E v +æIJŃ å»º +d one +Ġpresent ing +ï¼Į 带 +åĤ¨ å¤ĩ +at able +Ġprotocol s +Ġwhere ver +åĵ Ħ +æĿ¥ å¾Ĺ +ï¼Į åĩıå°ij +æ°Ķ 温 +Ġ ç͍ +Ġcons ume +ag u +çĶŁ çIJĨ +_T EXT +á n +Ġint rig +åŃ¦æł¡ çļĦ +Rem ote +åĵ ĩ +ĠP ref +w id +D istance +ĠC B +ãĢģ 对 +大 夫 +Ġsu icide +Ġm amm +便 æį· +' ( +Ġbatter ies +ĠMod ule +F I +Ġ" ( +P ersonal +ĠAssoci ate +W A +Ġre cess +Ġun fortunately +Ġc b +V A +Ġst ainless +çķĻ åѦ +D igital +çļĦ åħ¬åı¸ +ĠUt ah +Con verter +.f ile +豪 åįİ +大 äºĭ +ï¼Į 缸 +D river +Ġa u +down load +æĿ¡ä»¶ ä¸ĭ +. al +Ġrepeated ly +æĹ¥ åľ¨ +_ attr +user name +大 åĬĽ +Ġfast est +ĠCon nection +Sub ject +ĠT ut +-m ade +@ @ +ä½ł ä¸į +ch ant +è§ģ çļĦ +ather ine +et work +ĠEx amples +irc le +Ġadjust ment +_c olumn +ĠOver all +V S +ress ions +Ġa er +_C OMP +ï¼Įè¿Ļ ä¹Ī +te am +. ), +ra ining +ä½į çļĦ +Ġout fit +ठ¾ +æ½ľ åľ¨ +ä¼ļ åĩºçݰ +am ar +Cond ition +ĠT ab +OD ULE +Al loc +æĽĿ åħī +.m essage +ç®Ĺ äºĨ +ch anged +C ounter +å¤į ä¹ł +Ġadvert is +è¦ģ åľ¨ +reg ular +== = +Ġdo i +ä½łä»¬ çļĦ +ï¼Įåį´ æĺ¯ +cond s +èIJ į +åģ· åģ· +red ient +ĠMex ican +Ġphotograp her +_FL AG +Ġcon n +æĺ¥ 天 +Don ald +æľº éģĩ +æĴ ¼ +ĠH ERE +ä»ĵ åºĵ +el ay +Ġph ases +%%%%%%%% %%%%%%%% +æĹ¶ æľº +æ¥ ł +Ġs s +èĬĤ æĹ¥ +æİĪ äºĪ +ourn als +ï¼Į çŁ¥éģĵ +habilit ation +ĠS R +Ġc c +Vis ual +oth y +æ°Ķ æ°Ľ +æĭĽ åķĨ +\ leq +ĠForm at +ä¸Ģ åĬ¨ +m os +ä¸Ģ éģį +P y +Ġcompet ing +Ġc p +å¼ķèµ· çļĦ +sp ot +check ed +æł Ī +æ³ķå¾ĭ æ³ķè§Ħ +ĠGrow th +æļ ¨ +ä¸Ń æīĢ +ä¸Ģ åIJĮ +æ¹ Ľ +Ġa uction +Ġdistingu ish +ay ment +um ble +绿 åĮĸ +æİ¥ å¾ħ +j an +Ġm as +æŃ£ éĿ¢ +è¿IJ æ°Ķ +Ġsche mes +ĠCurrent ly +Int erest +Ġglass es +Â Ģ +çĶŁ èĤ² +ĠR ace +æĿ¿ ä¸Ĭ +ĠL ane +_s ession +Ġpro to +æ° Ł +ĠPrem ier +. Object +T emp +Ġmarket place +ĠN BA +ä¸ī 天 +o vers +ä¹ĭ å¿ĥ +res et +æ¤ İ +_ up +Book s +g oogle +Ġt une +Ġend e +_ rec +æ·± å¤Ħ +ï¼Į æĺ¾çĦ¶ +.d raw +ï¼Ľ æīĢè¿° +h ad +Ġexplain ing +渴 æľĽ +. of ++ " +客 æ°Ķ +-d epth +çŁ « +çļĦ èĬ± +ĠIn novation +ĠO pport +Ġdemonstr ates +ĠAn imal +Ġstrugg les +å·® åĪ« +åħ¨ éĥ½ +AR GET +åĤ¨ åŃĺ +ĠV ent +in ars +åIJĮ æľŁ +ãĢģ " +Ġcare ers +oll ar +Ġlog ged +ï¼Į åĮĹ +% ), +å¾Ī ä¸į +igh ters +Ġde leg +id el +M IN +. url +Ġ' _ +Ġke en +å¹´ åĪĿ +Am ount +Ġdisappoint ed +é« ĵ +% ; +ell ular +é£Ł æĿIJ +ï¼Į åĵªæĢķ +第äºĮ 次 +Ġroll ed +ç§» æ°ij +Ġj a +èĢģ å¹´ +ĠS creen +P resent +ĠP anel +C U +èĭı å·ŀ +c ss +Ġsecre ts +Ġtown s +Ġ åı¯æĺ¯ +å¾Ĺåΰ çļĦ +Ġ åĶIJ +ul k +M usic +\ r +su pp +Ġdis advant +åIJ © +pe red +大 éŨ +åįı åIJĮ +ä¼ł åªĴ +J ames +St ay +ãĢĤ å½ĵæĹ¶ +åĩł 个人 +Ġemerg ed +ä¸ĵ è¾ij +ides pread +Ġpers pectives +ress ing +( val +ĠBet ween +äºī è®® +Ġra bb +Ġgu ided +Ġpur ple +Ġv ig +åij¨ å¹´ +o ons +ul ly +çļĦ è´¨éĩı +Ġaud iences +ï¼Į åĽŀ +ç§ģ 人 +Ġcelebr ated +ĠF ly +_st ream +ĠG PS +-b lock +Ġtra its +ĠDe cl +-f amily +err ors +l ice +LO B +em ph +ä¸Ĭ 课 +èµ¢ å¾Ĺ +å®ŀ è¯Ŀ +D rop +竳 ç¨ĭ +ç͍ åĬĽ +Ġrem inder +æ°´ åĪĨ +Ġboot s +æijĺ è¦ģ +ç»ĺ çĶ» +ãĢĤ çİĭ +ç¦ Ħ +ĠF le +ãĥ » +ĠC ars +d est +çĦ¶ çļĦ +Ġsatisf ying +ç¼ĵ æħ¢ +æĮģ èĤ¡ +å®ŀ åľ° +ï¼Į 红 +As set +art ial +ãĢĤ æ¯ı +æĹ ± +æĪĺ èĥľ +Ġl ap +oc a +Ġc s +ä¹Ł éĥ½ +æķ ŀ +et erm +çļĦ åIJį +ĠW el +åĨħ å¤ĸ +r ases +ç¥ŀ å¥ĩ +ãĢģ äºĮ +{ t +Option al +ï¼ļ A +çĭ¬ èĩª +Ġthe ater +am on +Ġcl oth +Ġr ush +Ġconst itute +Ġh ug +,èĢĮ æĺ¯ +大 ç¬ij +æĮĩ åįĹ +.l ast +M apper +æ± Ŀ +Ġout let +(n p +Ġhaz ard +] )) +ĠHealth care +æ³ £ +)* (- +å¤ļ 人 +ĠD ur +ann ah +witzer land +åħ¬ å¼ı +" ]. +忽 è§Ĩ +èµ· çĤ¹ +身边 çļĦ +ĠV ert +ĠH yp +(m odel +ĠGirl s +W arning +x a +Ġcount ing +S ch +ĠSe pt +Ġcomp elling +è¾ĥ 大çļĦ +Ġstruct ured +ãĢģ çİĭ +Ġc ant +碰 åΰ +ãĢģ æĿİ +ĠPat tern +åı¥ åŃIJ +port ed +Ġcon vention +GB T +éģĹ ä¼ł +. for +ĠT IM +ipp i +es ian +å¾Ĺ ä¸Ĭ +æĵħ éķ¿ +, ä¸įè¦ģ +qu arters +p ed +ĠMe chan +Ġimag ination +. or +æıIJ åĩºçļĦ +EN ER +Ġapprox imation +ï¼Į åı« +ä»ĭç»į äºĨ +è¿ŀ éĢļ +åħ¬å®ī å±Ģ +åİĨåı² ä¸Ĭ +éĴ¥ åĮĻ +Ġswitch ing +çĥŃ çα +èįī åİŁ +{eq n +Ġdi ver +ĠMaterial s +, âĢľ +ĠF ant +ĠThanks giving +æĹł åı¯ +ä¿¡æģ¯ åĮĸ +\ { +å¡« åĨĻ +Process or +å±± çļĦ +ä¸Ģ åı¥è¯Ŀ +Ġwat ches +ĠL abel +ĠD ick +å¥ī çĮ® +he ets +ern et +åĤ¬ åĮĸ +åĮħ 裹 +ark er +F inal +æİ¥ ç§į +ãĢĤ åıĪ +Ġaggreg ate +ä¹ĭ 为 +ble ms +åĽŃ æŀĹ +ĠGet ting +at om +çѾ 约 +Ġs its +Ġmod ifications +ä¸į æĺĵ +iss ue +Ġr s +æĸ° çĶŁ +åı£ æĦŁ +Ġpass enger +ç¯ĩ æĸĩ竳 +æĥ³ çļĦ +Ġexpl ored +ĠD rop +ol itan +åĶ® åIJİ +Ġref use +B ay +æľī ä¸ĢäºĽ +Th ursday +ĠG B +ĠD river +æĹł æīĢ +å¯Į æľī +li est +te in +or ious +ĠHow ard +plug ins +Ġte aches +Ġpolit icians +ident ifier +/ pro +Ġf ancy +Ġobst acles +èİ· å¥ĸ +æĻ®éĢļ çļĦ +][ " +ĠL in +Execut ion +äºĨ ä¸ĭ +ĠB uilder +Pro ps +umn i +Ġep it +ĠEmp ire +Ġm elt +对 éĿ¢ +ĠM atch +ĠCoun sel +ï¼Į 身ä½ĵ +,ä»İ èĢĮ +ur ring +ours es +R untime +-s ide +V EL +åīį æıIJ +om ing +Ġbi ology +ĠW edding +nab la +Ġ( (( +ĠIn clude +身 å½± +æĬ¥åijĬ æľŁåĨħ +Ġapp ar +æĺ¯ ä¸ŃåĽ½ +à ¯ +B Y +_s ource +约 æĿŁ +B ad +ĠT ags +Arch ives +åı¯ä»¥ ç͍ +ĠBel g +Ġstream s +ab i +åİĤ åķĨ +_p ost +Ġprospect s +ç«ŀ æĬĢ +( os +Ġglob ally +éľ Ĩ +çļĦå¿ĥ æĥħ +Ġpropos als +-b utton +Ġar row +Ġback ed +ç²¾ èĭ± +ĠEx cel +Ġ ê +éĥ Ŀ +没 åĬŀæ³ķ +Ġdepend ence +ox ic +v et +ĠF O +åħ¬ æĸ¤ +ĠRev olution +æ³ķ çļĦ +am ents +ĠF ont +. IO +ĠB ot +iov ascular +ĠAnt i +Ġsymmet ry +param eter +Ġabs ent +ĠGal axy +ĠWalk er +Ġrec overed +I ENT +, T +ĠMar ine +-s ized +lic al +Ġrecip ient +å᧠室 +è±Ĩ èħIJ +ï¼Į 顺 +Ġshel f +_ ctx +æİĴ åºı +Ġsynt hetic +ĠP ool +çĥ ģ +ä¸į çĿĢ +说 ä»ĸ +Ġval ve +IS H +, ( +Ġbes ides +Ġpe ers +å¨ ħ +f its +D ER +> (" +åħĭ æľį +Ġver ification +Ġprof ound +@ end +i ere +å¤įæĿĤ çļĦ +ĠT emplate +ĠPh ill +ï¼Į 建ç«ĭ +ĠH it +Ġtra uma +ï¼Įåıį æŃ£ +. op +Ps i +ĠJ oy +èĻ ŀ +ract ed +Sc ene +Ġtyp edef +åľ¨ æľ¬ +å§¿ æĢģ +ens ively +Ġl ig +ä¸Ģ æĹ¥ +çļĦ ç¬ij +_ response +èijĹ åIJį +ĠM erc +ĠK EY +æĹĹ ä¸ĭ +ĠJ S += true +-w inning +ï¼Į 带çĿĢ +. arg +çα ä½ł +_h andler +ï¼Į å¿ħ +Ġ" ' +ï¼Į éĿ¢ +Ġmax im +Ġsim ilarly +Ġvari ants +Ġmass es +ĠBow l +ç«ŀäºī åĬĽ +ĠM ars +ç¼ĵ åĨ² +åĻ ¬ +ä¸Ģ ä¼ļ +Ġassoci ations +æµİ åįĹ +Ġz oom +æīĭ ä¸ĭ +èĢĥ éªĮ +S ound +ĠG all +Ġen jo +åĹ ½ +Ġart istic +Ġf itted +] ^ +_ ad +Ġcust ody +ĠInd ividual +éĩĮ æĸ¯ +Ġc er +Ġc ement +Ġp iano +Ġper pet +Th us +Ġw ard +ç¦ ½ +Ġbar rel +æī© å¼ł +Ġdownload ed +Ġsit uated +w alk +æł¸ éħ¸ +DE BUG +qu ote +第ä¸ī æĸ¹ +.p op +W ow +Ġinvest ed +颤 æĬĸ +ĠMar ia +Ġsurve ys +ĠDe an +_ enc +å½ ¬ +ï¼Į èϽ +ä¸Ń èᝠ+æıIJé«ĺ äºĨ +åĽĽ åįģ +Ġsystem atic +说 ä½ł +ï¼Į éĻĨ +ĠL ater +b tn +roph y +Ġinstall ing +ĠTur k +ĠDen ver +è¿İ æİ¥ +ien e +ĠCh rome +With out +il ateral +Ġe uro +P rom +Ġmethod ology +in ion +Ġhand ed +çļĦ 管çIJĨ +M esh +æī¿ åĮħ +Ġret ro +ump y +ĠBeaut y +_ form +Ġindu ction +ĠDI Y +Ġpul se +äºĨè§£ åΰ +. Equal +F oot +åĬł æĭ¿ +_ order +åı£ èħĶ +_sh ape +ER VER +Ġpub lisher +å¿ħé¡» è¦ģ +Ġthere of +ã İ +ĠIsrael i +Ġcom ic +èĪ Ĩ +ĠW orth +Ġsubstr ate +avel ength +ĠPhys ics +大家 éĥ½ +çĬ¹ å¦Ĥ +éķ¿ å¤§ +åľ° éľĩ +е л +Ġr ic +im on +{ s +Ġb ay +Ġfre ely +Ġfund ra +Inst agram +Ġsc aling +èĥ³ èĨĬ +p ond +åĭ º +ĠCam era +çļĦ 头 +Ġdev oted +Ġpen et +_S C +Ġagric ulture +Ġtow er +act ors +Ġp as +ĠR d +select or +ï¼Ī ä¸Ģ +G old +e as +å¼Ģ 设 +Ġd ense +Ġland sc +m aking +ĠB u +ĠRep orts +V irtual +Ġcor ruption +ĠG rid +mon th +ãĢĤ å°Ĩ +Ġ? ? +Ġconflic ts +is an +( [] +con sc +Menu Item +åĮĨ åĮĨ +ç͍ æ°´ +ĠSpring s +ç§ijåѦ å®¶ +åij¨ æľ« +(f loat +ĠB ull +Ġport ray +éĽĨåĽ¢ æľīéĻIJåħ¬åı¸ +Ġs ib +Ġillust rated +Ġaccess ed +, æ¯ı +æľī åºı +æĥ³ è¿ĩ +ï¼Į ç¾İ +R a +äº ¨ +é¢ij ç¹ģ +润 æ»ij +Ġt ies +Ġshel ter +Ġt ale +å¤ļå°ij éĴ± +C ursor +-t ext +Ġa ux +æľª èĥ½ +ĠLoad ing +Ġrem embered +act ual +æ¾³ 大åĪ© +Ġint end +åĹ ĵ +TR L +Ġrelax ed +Ġh i +_f n +Ġin clusive +Ġcol ored +Ġcred entials +ic ting +çļĦ çľ¼çĿĽ +Ġcl one +ĠT ouch +Ġp icks +ĠCal endar +Ġoccur ring +Ġvis a +çľ¼ åħī +è´¯ ç©¿ +ĠS pect +ï¼Į åı¦å¤ĸ +Ġin cons +^ + +-w orld +Ġ çľĭçĿĢ +tain ing +ä¹IJ è§Ĥ +Ġun iqu +Ġimpro ves +C ong +åįģ åŃĹ +ĠG re +cont ents +ac hers +Ġtoler ance +ĠH andle +A lex +ĠO FF +è¾ĥ å¤ļ +ograph ical +Ġmax imize +è·ij åΰ +ä¸į åıĹ +Ġstat utory +Ġentertain ing +åħ» çĶŁ +_s ign +è¿Ŀ è§Ħ +Ġtw elve +Ġmother s +人åĬĽ èµĦæºIJ +om it +ĠF DA +æĿIJæĸĻ çļĦ +ĠS witch +æĻ¯ çĤ¹ +________________ ________________ +èĩªæ²» åĮº +çĩĥ æĸĻ +Ġbless ed +ĠKent ucky +ĠR ank +çıį æĥľ +澳大åĪ© äºļ +Ġp ants +ĠFranc is +x s +éĤ£ 天 +oc he +Ġmagn et +Ġestim ation +rit es +åįĥ éĩĮ +大 éĺŁ +IC S +EO F +åĽĽ å¹´ +举 京 +_ thread +ĠCo ach +Ġco aches +ĠDirect ory +ĠB rew +æĴ ĩ +Not Found +Ġind ividually +æģ į +Method s +Ġdis put +ï¼ĮæīĢè¿° 第ä¸Ģ +ï¼ Ĵ +Ġsusp ended +Act iv +bb ed +ĠDet roit +åĽ½ 产 +rit ers +éĹ® æĪij +Ġam id +Ġbound ed +Ġ çĦ¶åIJİ +Ġstrength s +T ri +åIJİ æŀľ +B G +æĺ¯ è¿Ļæł· +Ġsix th +Ġinter ventions +ot or +ĠUn like +Ġd ur +An notation +Ġ ä¸ĩ +对 æĬĹ +Trans fer +ä¸Ń æľĢ +he ld +Ġorgan izational +Ġstra ins +out heast +Ġcru ise +ĠS ustain +ä¸į åĪ© +ĠOpt im +çĮľ æµĭ +Det ail +Ġret rieve +Ġspect acular +强 åĬ¿ +çļĦ çľ¼ç¥ŀ +A f +æĸ° çĸĨ +EN C +ĠS U +æĶ¾ åΰ +Ġl ambda +Off ice +ä¸Ģ å¥Ĺ +Ġc ous +em ale +çļĦ åIJįåŃĹ +Ġch arts +Ġint im +ä¸ĵ ç§ij +é²ľ è¡Ģ +ĠW ant +Ġlaw suit +_l oss +b rew +ĠF M +Ġgrad uated +Ġveter an +导 ä½ĵ +ĠG ib +d eg +ç¨İ æĶ¶ +Ġisol ation +ĠB rid +c out +lim it +Ġpo ison +Ġindic ators +ĠNet flix +ĠS in +åīį æĸ¹ +å̼ çļĦ +çŀ ³ +ï¼Į èIJ§ +UPD ATE +S aturday +sh ore +c am +. android +Ġrecruit ment +Ġmetabol ism +éĢĤ å®ľ +param eters +ĠA CC +åľ¨ éĤ£éĩĮ +Ġvess els +i u +Ġblock ing +æ°Ķ 象 +缺 å°ij +" }, +Con n +ĠA zure +æį Ĥ +- foot +rel ation +ĠOk lahoma +æĿ ī +Ġal le +çİĭ çĪ· +od a +at ility +Ñģ к +. input +-l abel +æ¿Ģ çĥĪ +pt s +éĺ´ éĺ³ +ign ore +Ind ent +Ġin hab +Ġconst ants +Ġconduct ing +Ġb ases +äºĨ èĩªå·± +ĠF ish +åŁºç¡Ģ ä¸Ĭ +主 å¼ł +åIJ© åĴIJ +ãĢģ 第äºĮ +Ġclaim ing +-t est +å¸ § +èĦ¸ ä¸ĬçļĦ +åįķ è¯į +ĠMedic are +约 ç¿° +Ġg low +èŀº æĹĭ +ãĢĭï¼Į ãĢĬ +C amera +âĢĻ ï¼Į +Ġsupport ive +ro ck +Ġretail ers +Ġass ay +Str ategy +. On +ĠT ell +Ġcomp ile +Ġnic ely +Ġch urches +åĪĨ å¼Ģ +认 åIJĮ +è¶ ´ +Ġm urd +ï¼Įä½Ĩ åľ¨ +ä¸Ģ èĤ¡ +Ġseem ingly +è§£ 读 +ĠMc G +Ġp iv +Ġat omic +Ġalloc ated +ĠK al +.A ss +çļĦ 空éĹ´ +ç§ij 缮 +Ġinc idence +C lear +ä¹IJ è¶£ +ä¹ħ çļĦ +Ġabs orption +net work +_ EXT +A mazon +Ġl ang +Ġ 好 +ï¼Į åĩĨå¤ĩ +ĠRober ts +Ġemb arrass +om ed +ä¸Ģ 座 +è° £ +媳 å¦ĩ +è¿ĩ å¤ļ +ãĤ ¯ +Ġpo em +Ġremind ed +unk nown +Follow ing +æĹ© é¤IJ +inn ed +åı£ çļĦ +ĠD un +Ġ è¿Ļæĺ¯ +Ext ra +Ġany time +ĠH ero +art icles +åĥı ç´ł +ĠV ac +Ġb ol +æĢİ æł·çļĦ +G TH +e ach +ĠPhilipp ines +.b ody +}} ( +ĠS F +交 å¾Ģ +duct ive +o is +Ï Ĩ +- user +ov iet +ãģ ı +Ġrend ered +ĠASS ERT +ĠShe l +/s rc +ST EM +å̾ åIJij +im ental +æľĢåIJİ çļĦ +UFF ER +æīĢ è¯´çļĦ +List en +人们 çļĦ +ï¼Į å¤ı +æķ£ åıij +åķĨ åľº +Ġin box +IS O +ä¾§ éĿ¢ +IC ES +Ġp t +Ġhand ful +æĬĬ ä½ł +éĥij å·ŀ +_ [ +uff ix +Ġden y +Ġ æŃ£ +æ¸ħ åįķ +Ġgl ory +ï¼Į å¾Īå¿« +ĉĉĉĉ ĉĉĉĉĉ +ew orks +车 ä¸Ĭ +ï¼Įè¿ĺ åı¯ä»¥ +ĠMc K +å° § +Ġproceed ing +ĠâĪ ļ +Ġdark ness +Ġind ication +Mon th +ï¼Į 许å¤ļ +, åıªæĺ¯ +Ġanticip ated +è£ ¸ +Ġpe pt +d og +åİ» éϤ +Ġtable ts +ĠR F +or us +æķ Ľ +Ġex tern +Ġcir cles +Ġman if +I ss +è¾ī çħĮ +M obile +ï¼Įä¹Ł 没æľī +ï¼Į让 人 +Ġbas ics +éĽ ģ +_h ost +Ġdes ires +- def +ĠEn able +_US ER +èĬ± è´¹ +è¿ Ń +ric ks +Ġli able +å² ļ +ov en +S ame +h ill +ex per +B ottom +ï¼Į åĬłå¼º +æĥħ æĬ¥ +Ġbonus es +So ftware +ĠG ram +Ġb ite +Ġcl im +é¢ Ĭ +Ġr uled +sc opy +Ġin quiry +Ġblog ging +çļĦ æµ· +/ æĪĸ +ire ments +Ġ é»Ħ +åĩ» è´¥ +Comp are +Ġcons ensus +æľī æľºä¼ļ +/ W +Ġmathemat ics +çİ© åħ· +çĶŁ æŃ» +åľº åIJĪ +ĠL iver +Ġmain stream +ud ge +les h +Ġspecific ations +. File +Ġrest oration +Ġres istant +x b +w atch +Educ ation +Ġexist ed +\ ": +( W +Ġspect ral +_DE FAULT +A ff +Ġl azy +Ġc rown +ĠMan ual +åıĶ åıĶ +t ry +dis c +大 èĩ£ +Ġan th +Ġprelim inary +åįı ä½ľ +_ alloc +Ġst olen +è¯ŀ çĶŁ +ĠIn sp +æ· ij +(n um +Qu ant +å§Ĩ æĸ¯ +çĤ « +ro log +èѦ åijĬ +ĠConfig uration +N ever +ĠTest ing +Ġinvest or +com ponent +ock s +Ġsus cept +Ġdev ast +Ġinteg ers +éĽ ¯ +Å Ł +Ġint ro +åľ° éģĵ +Ġbr ick +Ġleg ally +add ed +æľĢ æĹ© +pos ite +ĠD ATA +ä¾µ æĿĥ +we alth +Ġac res +ãĢĤ 尽管 +ĠCons umer +p iece +ä¸Ĭ 线 +ĠE S +Ġworth y +Ġf atal +ĠA WS +Ġprior ities +_fl ag +h ou +æĪĸèĢħ æĺ¯ +ĠP il +Ġtrad itions +åĩº åľº +æĬ¥ ä»· +Ġthe atre +太 大 +Ġdist inction +M o +ä¸į åłª +ä¸Ģ个 个 +ik ed +level and +n an +Ġprint s +> " +j or +p refix +Ġs lope +Ġstri ve +c fg +ç» ½ +ĠB ron +ãĢĤ åıª +ĠR AM +Anal ysis +ĠCOPY RIGHT +ment ation +aus es +Ġw ound +Ġl obby +Ġw idespread +读 åıĸ +Ġ åıĤèĢĥ +. View +Ġte asp +åĭī 强 +B ounds +Ġbul let +Ġspeed s +ï¼Į çĶ· +mod els +çļĦé«ĺ 度 +å¿ĥ ä¸ŃçļĦ +Ġsqu ad +D irection +Ġam ino +ä½İ 头 +å¹² èĦĨ +åĬĽ æ°Ķ +Ġ åIJĪ计 +.j unit +ãĢģ 社ä¼ļ +Ġs urre +主 æĦı +( output +Ġgro cery +æĭī æĸ¯ +ĠTe a +åĻ ¼ +ĠMet al +Ġam end +Ġd ressed +æŁĶ 软 +Ġgener ates +ĠDef ine +f mt +åĿIJ ä¸ĭ +Ġjack et +Const raint +qu al +é» Ľ +i em +çİĭ åŃIJ +Ġcondition ing +ï¼Į çī¹åĪ« +ãĢĤ åĽł +è¡Į èµ° +B inary +Ġprivile ge +ĠAn ne +常 ç͍ +å§Ķ å±Ī +éĤ£ ä½į +ï¼Įå°± è¿ŀ +æĹ¶ä»£ çļĦ +in x +ĠTr ading +sc r +ä¼ĺ èī¯ +ict ions +ãĢĤ ä¸Ń +ĠB uck +Ġtruck s +ĠJ en +K now +ĠL eb +æĭį åįĸ +) }{ +/c ore +Ġan chor +çľĭ è¿ĩ +åIJ ģ +Ġsufficient ly +im ore +ãģ į +ĠM ORE +Ġkid ney +ard less +er on +Ġout doors +Ġd y +åį³ ä¾¿ +å®Įæķ´ çļĦ +gg reg +T uesday +Ġequ als +ï¼Į ç¡®å®ļ +å¸Ī çζ +Ġl ately +ut ed +sk y +Ġexc ell +p d +TE GER +ä¼ļ 对 +ãĢģ éĩij +W ant +ãĢĭ ( +iss ippi +con duct +Ġlog s +äºĭä¸ļ åįķä½į +Ġcomp iler +æķĻ è®Ń +è¾ĸ åĮº +r ang +P red +çļĦ é¦ĸ +and al +ĠFA Q +Ġcalc ium +Ġsup ra +Ġ' " +p us +ĠR ing +C s +ĠèĢĮ ä¸Ķ +l ated +Ġwarm ing +Ġ çŃī +Ġdecl ined +ĠF urn +Ġth rew +Ġgovern ance +åĴ³ åĹ½ +æĬ¥ 纸 +Ġlic enses +è¯ģ å®ŀ +便 äºİ +æĹ¥ çĽĬ +Ã Ł +ĠSy ria +çŁ¥è¯Ĩ 产æĿĥ +åĮº åĿĹ +å°Ķ æĸ¯ +ä½ĵ æ£Ģ +âĢ ĥ +Ġswe ep +被 称为 +.M essage +éĶ Ĥ +augh ters +Ġconsult ant +Ġexp end +çĹķ 迹 +_ ep +Ġcompan ion +_P ORT +Ġcounter part +_ -> +. web +ĠSo ft +Ġoffer ings +S ide +.b ind +Ġsac rifice +ĠR ent +Ġm asks +Ġun limited +ç½ij 页 +Ġas h +éĶĢ åķĨ +Ġm akers +do i +è§Ħ 磩 +ä¸Ģ æī¹ +å± ¯ +ac ión +Ġwitness es +ï¼Įä¸Ģå®ļ è¦ģ +温 åĴĮ +ract or +èģļ çĦ¦ +éķ¿ å¾Ĺ +ĠE ve +Ġtransl ated +Ġtremend ous +Ġview ers +ĠP red +çϽ 天 +Ġt iles +ition ers +ĠLet ter +Pr imary +Ġneur ons +Ġgraph s +³³³³ ³³³³ +çļĦ éģĵ +æ¡Į åŃIJ +read er +Ġf ought +çģ ¶ +ĠSt ack +åIJī æŀĹ +å¼Ģå§ĭ äºĨ +-p erson +.g roup +ĠLess er +comfort able +Ġdivis or +åĪĿ æľŁ +èĩªè¡Į 车 +ĠS UB +åĢ © +æ½ľ åĬĽ +èĽ Ľ +Ġa apt +Ġ( + +Ġpartners hips +, åĪĻ +LO AD +ĠW y +- radius +ĠEx pl +ä»· æ¯Ķ +.p arent +, åĴĮ +Ġ çĦ¶èĢĮ +Ġpot atoes +еР´ +èᣠèĢĢ +åħ» æĪIJ +Ġc ater +ĠW ine +t m +Ġdim in +( char +çļĦåīį æıIJ +Trans port +{ g +为 ä¾ĭ +ãģ Ĥ +Ġd type +Ġtra ject +Ġf el +Ch art +Ġlit igation +Ġas p +ä¼ļ计 å¸Ī +Ġa pt +Ġsu ck +ãĢĤ æŃ£ +æĩ Ī +AL E +ic ut +Ġtit led +满 满 +ĠC ole +C amp +éģŃ åΰ +æľī è¿ĩ +Ġpreval ence +ĠMalays ia +ĠF iles +Ġdisag ree +缴 线 +对 ä¸įèµ· +ãĢĤ 请 +ï¼Į å¾ħ +B reak +Ġret ention +ĠSimp ly +Ġconnect ivity +æīĺ 管 +ä¿® 士 +çļĦ æ²»çĸĹ +he nd +Ġabandon ed +Ġacc idents +çł´ ç¢İ +Ġexpect ation +Ġdev iation +B i +ãĢĤ ç͍ +ĠRe ason +Ġ\ { +N ative +ĠLo an +æİĴ åĪĹ +ä¸Ģ ä¸Ģ +Ġsurv iv +Ġst iff +Ġact ress +Q Q +åĩł ç§į +å¤ľ æĻļ +Ġcancell ation +Ġste pped +ou p +åª Ľ +è° ľ +Ġstory t +_b it +.P rint +ang a +ĠS weet +æ¯Ķ äºļ +Ġg it +ĠH ousing +Ġâī ¥ +ĠPay ment +ĠChair man +æĺ¾ çĿĢ +åIJij çĿĢ +Ġap olog +Ġnut rients +Ġoverse as +奶 èĮ¶ +ï¼Įå°± åı¯ä»¥ +k b +ĠAl ong +çŀ İ +éĺ¶ çº§ +ĠY OUR +èģĮ èĥ½ +str ument +_d esc +å¤ĩ æ¡Ī + ² +èŀº æłĵ +Ġinstitut ional +ad ays +èĢģå¸Ī çļĦ +Ġconsider ations +. event +åħī èĬĴ +å¥ ł +æĺ¨ æĹ¥ +VI EW +Ġpres cribed +op ing +产 çī© +ä½ı åľ¨ +ä»Ĭ天 çļĦ +å°±æĺ¯ ä¸Ģ个 +ow ers +è¿İ æĿ¥ +è¿ĩåİ» çļĦ +å͝ä¸Ģ çļĦ +ĠNe uro +ogen esis +Ġs ons +w ick +ĠS chedule +keep ing +_ json +å°± ä¸įä¼ļ +åķĨ éĩı +ä¸Ģ æĸ¹ +.get Name +Ġbu gs +Ġsubst ances +ĠDel ivery +éľĩ èį¡ +igh bor +- est +Ġflo oring +ĠA w +Ġmon etary +Ġess ence +Ġdocument ed +Ġcor ners +æĬ± æĢ¨ +Ġend l +强 çĥĪçļĦ +ĠC ape +Ġqu oted +åij½ çļĦ +å¯ ŀ +ä¹Ł çŁ¥éģĵ +äºĨ 大 +çģ« éĶħ +(d ev +ed e +æī¿ è½½ +Ġintrodu cing +ï¼Į éĺ²æŃ¢ +bl ank +var iant +Ġle verage +, åıªæľī +殿 ä¸ĭ +p ages +ĠPro s +å½ĵäºĭ 人 +Ġswe at +ãĢĤ è¿ĻéĩĮ +Ġreg ulated +Ġincre ment +Ġsw ift +æĿĢ æŃ» +Ġphys icians +Ġshould ers +ĠPr imary +Ġmanufact ured +D at +Ġde com +ï¼Į 女 +Ġsc rap +Ġcon form +ĠG T +Ġapprec iation +\ rangle +ç«Ļ èµ·æĿ¥ +Ġtrans parency +olog ic +ĠS ample +åįģ 大 +m akers +Ġrot ate +Ġvoc al +âĢľ å°ı +ĠThom pson +OW ER +he ro +æ·± åĮĸ +Ġlim iting +Ġgovern or +ĠGree ce +let ters +R N +å®Į ç¾İçļĦ +ĠCon vention +Ġc iv +ç͵åŃIJ åķĨåĬ¡ +Load ing +W ednesday +å±±ä¸ľ çľģ +ĠPhot ography +_ iter +em n +åĪĨ 辨 +.To String +Ġr anks +it ual +lis hers +acc ur +P ing +Ġeval uating +ir ical +, åı¯æĺ¯ +s ample +ĠCast le +èᣠèİ· +C lean +ĠZ one +åĸĦ èī¯ +ĠU pon +âĢľ ä¸į +Ġdon ations +æīĭ èĩĤ +all ing +ä¸İ åħ¶ +en o +ãĢģ å¤ĸ +Ġsc ared +ç±³ çļĦ +ra ham +ĠF lo +Ġo z +ĠCor porate +ãĢĤ åıªæĺ¯ +Ġentreprene urs +Ġval ued +åķ ¤ +âĢĶ the +Ġart work +ĠC her +hel ial +ï¼Į 飩 +ĠCy ber +Ġar ising +Ġâ Ĭ +ãĢĤ èĩ³äºİ +eren ced +Ġcomb ining +Ġst ones +Ġpre vents +est y +Ġfrequ encies +comm end +ç¿ ĺ +Us ed +ĠOper ation +æĪij åİ» +} ] +Ġm ush +Ġann ually +ĠLu ke +App le +Ġfavor ites +ĠBe at +Ġb er +Sun day +æł Ĺ +å·²ç»ı åľ¨ +çĶŁäº§ çļĦ +Ġg ently +Ġorgan ize +ç¾İ 人 +Ġl in +ab ri +ĠPhys ical +Ġking dom +D ear +æŃ¤ åīį +Ġantib ody +ĠS ERV +ĠG it +Ġne at +AD D +Ġsqu ee +æĮ¯ åĬ¨ +Ġvar ieties +Ġresp iratory +Ġd rew +Ġbeautiful ly +ĠH omes +ĠN az +Ġb icy +ur pose +æĬ¤ 士 +ä¸Ģ é¡¿ +æł ĸ +æĥ³ ä¸įåΰ +Ġ éĻĨ +.f l +Ġquant itative +æĹ¶éĹ´ åĨħ +æ³ķ 人 +pro to +Ġe arning +ĠD elta +.g nu +Ġd s +Ġ> & +碰 æĴŀ +Ġp i +Ġtrans it +ĠRe ally +? ! +款 项 +çªĹ æĪ· +ĠF T +ÃŃ a +Ġtour ist +ĠGr ade +H H +ENT RY +> * +åĨį çĶŁ +ãİ ¡ +ĠH i +å½± åĥı +For ce +Ġbrief ly +ç͵ 容 +ĠP B + ¸ +al let +Ġst ating +B uff +{ m +å¤į åį° +C orn +ĠIss ues +un ion +Ï į +Ġfac ial +Ġdecre ases +[ string +Ġchalleng ed +ĠS ony +ĠIndones ia +Al pha +ĠBut ton +ĠPro duction +æħ¢æħ¢ çļĦ +Ġassign ments +P oly +åįķ ä¸Ģ +游æĪı ä¸Ń +IB UT +æĹ¨ åľ¨ +ĠD omin +Ġf oo +ć Ĉ +ç®Ĭ çļĦ +æŃĮ æīĭ +Ġle ver +乡 éķĩ +ĠRem ote +讲 ç©¶ +j ob +ĠR V +åħ³ç³» çļĦ +Ġtax p +& gt +ĠPart ner +Ġcompr ises +Ġjun ior +Ġcreat ures +ĠÐ ± +ĠCom ponent +æ¶µ çĽĸ +Ġ Ä +ĠComm ons +Ġupd ating +m ma +æļ ® +âĢľ 大 +ĠEnt ry +ï¼Į 建议 +主 åľº +P od +Ġcross ing +oci ated +Ġel astic +ĠÎ ¸ +Ġcoll ision +æŃ¤ äºĭ +Ġinform ative +Ġbel oved +客æĪ· çļĦ +ä¸Ģ 对 +å§¿ åĬ¿ +ĠW A +Ġlink ing +Ġad hes +Ġtw ist +Be ing +Ġpsych ology +Ġappe als +Ex plore +æ¡ĥ èĬ± +管çIJĨ çļĦ +Ġeffic acy +op les +åįı åķĨ +App ly +{ k +ĠV in +B ind +Ġtem ple +çĶĺ èĤĥ +Ġ: - +P AR +l ack +abul ary +, r +æĬĢæľ¯ é¢ĨåŁŁ +ĠEnt ity +vis it +Ġb ron +.f ilter +Ġgr asp +ç¾İ è§Ĥ +è¯Ĺ æŃĮ +ĠPhot os +S chool +Ġt id +Ġvalid ity +Ġreview ing +Ġjud ges +Ġscholar ship +çĽij ä¼ļ +Ġs isters +C reated +Ġal tered +\ cdot +Ġab ortion +, 人 +_DEV ICE +欺 è´Ł +Ġvar ies +æijĨ èĦ± +å¾Ĺ åĩº +å«Į çĸij +è´ © +Ġalleg ations +ï¼Įå¹¶ å°Ĩ +i ar +ĠK ate +=" / +æı ½ +ert ation +ĠCon sole +Ġcol leges +çĮ Ľ +End point +ãĥ¼ ãĥ +Ï ĩ +itut ed +Ġlegisl ative +åĿı äºĨ +_AD DR +Ġmusic ians +ï¼Į 令 +Ġcomb ines +z eta +Ġam er +å®Ĺ æĹ¨ +Ġwarn ed +è¿ĩ æķı +} | +ĠW R +ï¼Įä½ł åı¯ä»¥ +ĠF resh +æ·ĺ æ±° +.t arget +ĠN ash +ĠS audi +Ġclar ity +ï¼Į ç»Ŀ对 +åıĹ çĽĬ +quir ies +大 åĨĽ +ι κ +Ġsurve illance +Ġappar atus +Ġbrow se +k dir +Ġcontract ors +ï¼Į éĢļ常 +ç¥ŀ çļĦ +ĠHar vard +ĠAn na +ãĥ ĥ +ĠPalest in +. Property +Ġb ust +PE G +Ġd ated +åı¯ çŁ¥ +d it +Ġtong ue +\ / +æĺ¯ 羣çļĦ +ä¼ º +åĨħ容 çļĦ +Ġfl aw +Ġavoid ing +Or iginal +ĠOther s +ĠD ak +ä¸įæĸŃ åľ° +load ed +Ġ第 äºĶ +ãĢģ æµ· +ĠS ab +æĪIJ åĵģ +ĠD ry +票 æį® +Ġoppon ent +ï¼Į ä¹ĭåIJİ +ĠPro blem +å·´ 西 +Ġflu ores +P ages +Ġdocument ary +ĠS omething +, ä¸Ń +Ġcourt esy +çļĦ çϽ +Ġadv ances +Sh ader +. options +% 以ä¸Ĭ +Ġcross ed +伦 æķ¦ +ï¼Į ä»Ĭå¹´ +M AP +ĠK y +ĠM as +ro st +R ound +ç³ĸ å°¿ +æĦī å¿« +.S h +ĠL ength +åľ° éĿ¢ç§¯ +_t he +Ġmechan ics +ĠE aster +Ġal ien +ä¿¡æģ¯ çļĦ +å¤ĸ çķĮ +åĨľ æĪ· +äºĨ åĩºåİ» +Ġad oles +Ġcount less +ä¸Ĭ éĿ¢çļĦ +ï¼Į æĦŁè§ī +Ġhar sh +ãĢģ åī¯ +æľį ç͍ +gl as +Ġanal ytical +/ { +Im g +æī© æķ£ +Ġgirl friend +åĨ Ī +ï¼Į ç½Ĺ +缸 æľº +Ġliter ary +ç»ĵ å±Ģ +èĸ ¯ +èĥ½åĬĽ çļĦ +lo v +å̾ æĸľ +St rip +Ġex ports +ĠAl t +Ġt ennis +ï¼ŁâĢĿ âĢľ +å¼Ģ æĭĵ +Ġlong est +çģ« çģ¾ +_ IT +éĨĴ æĿ¥ +äºĨ ä»ĸçļĦ +ÃĹ ÃĹ +æ¸ħ æ°´ +æĢª çī© +Ġtou ched +ber ries +æĬĢæľ¯ æĸ¹æ¡Ī +ĠRes ort +O IN +Ù ģ +åĢ ¦ +å½ĵ ä¸ĭ +éģĹ äº§ +éĴ± çļĦ +Ġan not +ant ry +ĠV e +Ġrelax ing +ä¾Ŀ æīĺ +ä¸į ç®Ĺ +Ġsimpl er +Ġdes criptions +ĠC old +ĠK ings +å¿« æį· +ma zing +è´¢ ç»ı +U ES +Ġresid ual +Ġn ail +Ġche er +åİŁ åŃIJ +åı¯ä»¥ æĺ¯ +åħ¨ æĸ°çļĦ +Ġgard ens +Ġsh ield +Ġinflu ences +Ġdismiss ed +it us +æĬĬ æīĭ +F ixed +( % +S al +ç»Ħç»ĩ çļĦ +Ġhum or +ARN ING +ymmet ric +ĠSt rong +Ġcontact ed +ĠH AVE +W C +åıij çĥŃ +éĺŁ åıĭ +-l aw +ĠU rban +Ġeas tern +åIJij ä¸ĭ +à ´ +åĭĩ æķ¢ +ĠA part +d frac +el lo +ĠAnd y +Ġinterpret ed +ä¹ĭ ç±»çļĦ +Ġgrad es +Ġ 说 +ĠW elcome +ĠExper t +ï¼Įæĺ¯ ä¸įæĺ¯ +åģļ å¾Ĺ +Ġpeace ful +åĽ¾ å½¢ +Ġappe aring +Ġwithdraw al +(m ap +æ¿Ģ ç´ł +Ġlight weight +ĠE li +iv ari +èģĮ ä½į +æľī åĪ©äºİ +tr as +lev ance +comp ass +Ġgain ing +p ush +ert o +Ġenjo ys +_ ASSERT +Ġev olved +éĤ ± +ĠEm ily +- image +ĠSk in +Ġtr icks +Ġsp here +ĠL ower +ig ating +ch t +resp ons +ï¼Įä»ĸ å°± +Ġtw in +ĠV ancouver +// ! +ĠG race +Ġrepresent ations +ĠW ays +am ins +åĴĮ 大 +Ġde legate +Ġpers istent +å®¶ 乡 +Ġeas iest +æĹł åĬĽ +ĠD ad +æ¢ Ĺ +-l ong +_M IN +管 çļĦ +} = +ig m +ï¼Į ç»ĵåIJĪ +ĠY oga +( J +Be aut +P in +æľĪ åºķ +åĬł è½½ +_AD D +at isf +Ġspecific ation +ro c +ï¼ĮæĪij è¦ģ +, ... +) ï¼Ľ +.s ervice +Review s +个 人çļĦ +亮 çļĦ +оР³ +Ġquad r +c lock +Ġj er +rop ic +Ġbed rooms +ĠÎ ½ +Ġmed itation +è¡Į ç¨ĭ +Ġveter ans +çĭ ¸ +Dep th +pro gram +æ®ĭ çĸ¾ +åľº éĿ¢ +èĢ ½ +Ġsymmet ric +å¹³ çŃī +æĦ¿ æľĽ +idd ing +ĠP osition +Ġpresent ations +è´ ± +ĠS cal +l inks +< br +le asing +ĠCh ap +Ġm unicipal +æĸ° åįİ +è¿Ļ èά +_R X +t ool +ä¹ĺ åĿIJ +p b +force ment +æĬµ è¾¾ +ä¸Ĭ 次 +oun ters +åľ° è´¨ +ke e +ï¼Į éĹ® +Ġdis crete +v ideo +sub scribe +éĹ´ æİ¥ +k ip +-m ed +åıĤåĬł äºĨ +, çͱäºİ +avor ite +Gener ated +åıĬ åħ¶ä»ĸ +Ġreprodu ce +J un +ing o +念 头 +åħĪè¿Ľ çļĦ +é² į +ĠRestaur ant +å¾· éĩĮ +{ tab +ĠUnivers al +Ġbit coin +F un +Ġdump ster +Ġcomput ation +b ank +ĠSt ew +in ent +- new +ä¼Ł 大çļĦ +æ¸ħ éĨĴ +(s ub +Ġvit ro +ãĢĤ åıªæľī +ĠPort land +ï¼Įä¸Ģ äºĽ +Ġg aps +åı¯ éĢī +ä¸į æĢķ +arn ess +na ire +é»ij é¾Ļ +è¿ŀæİ¥ æľī +åľ¨ ä»ĸçļĦ +Ġlength s +_pro cess +Ġaw k +ï¼Į åIJ´ +ef ault +Ġdiet ary +Ch oose +ib bean +ĠMay or +ĠDu ke +ĠA my +çĸ ¤ +UP PORT +ĠâĢĶ âĢĶ +çŁŃ çŁŃ +type of +local host +_P ARAM +ä¿® æŃ£ +ä¹Łå°±æĺ¯ 说 +ä¿¡ 念 +èµ·æĿ¥ çļĦ +T ouch +row ave +Ġdepart ure +è¾ ľ +大 åİħ +ĠHe at +注æĦı äºĭ项 +Ġrespond ents +çİĩ åħĪ +å°Ĩ äºİ +, B +æĭ³ 头 +è ¤IJ +ii i +çľ¼ çļĦ +.f rame +Ġpress ed +åĴĮ 她 +Ġpan ic +Ġo mitted +ä¸į è§ī +>> > +Ġinvestig ations +ad ows +æŃ¤ ä¹ĭå¤ĸ +Ġste ep +éļ § +åĩº äºĨä¸Ģ +col l +ä¼ł æŁĵ +< n +em ade +, åΰ +Ġinsert ed +âĢĿ æĺ¯ +ĠP ok +ć Ć +- position +æĶ¶ åıĸ +& = +å¤ļ å¤ļ +ĠMean while +ĠF lu +Ġdraw s +åıĭ è°Ĭ +ric ane +åľ¨ æīĢè¿° +ĠD ance +ãĢĤ å¼ł +éª ļ +LE ASE +Ġreg istry +çľĭ ä¼¼ +ne um +Ġdig its +ĠP resent +åΤ åĨ³ +书 éĿ¢ +ĠComp lex +ĠS om +à ½ +Ġequ ilibrium +åĺ´ åĶĩ +ĠSh ipping +RE AM +. context +ost ream +ĠBud get +ĠAfghan istan +, æĢ» +Ġsuper b +æľĪ åĪĿ +Ġfund ament +Ġtra p +R R +T ok +Ġret reat +Ġanaly zing +ä¿® 为 +ï¼Į æĺ¯ä¸Ģ +ĠF alls +æ² ¥ +èĻ IJ +ĠExp ression +å« £ +× Ļ +ç²¾ ç¡® +Ġapplic ants +Pro to +ard o +è¿ŀ éĶģ +ĠAd ams +_ head +çļĦ身 å½± +ul as +ï¼Į è¦ģæ±Ĥ +ĠS witzerland +è¯ķ çĤ¹ +çļĦ éŨ +çĿĢ ä¸Ģ个 +еР¼ +ĠCond ition +Ġc ategorized +æľ¯ è¯Ń +Ġrem edy +e ature +ri k +im edia +Ġ 秦 +ï¼Į ä¼ģä¸ļ +_ST ART +æŀģ 大çļĦ +- i +å¯Ĩ éĽĨ +ipl inary +Ġrespons ive +ä» ķ +大 åݦ +éĹ º +Get ting +atern al +ĠG round +åİ» ä¸ĸ +, 说 +Ġsp ins +Ġly rics +ĠIl legal +Argument Exception +ĠH alf +æ°ij çĶŁ +Int ent +ãĢģ çľģ +Ġr amp +羣 å¿ĥ +æĮ¯ åħ´ +ä½ł åı¯ä»¥ +ĠW ang +åŁİå¸Ĥ çļĦ +ĠT rip +em and +_ OB +åıĸåĨ³ äºİ +Ġv o +ĠB right +å¦ ¾ +注 å°Ħ +oot ing +çļĦ ä¸ĵä¸ļ +ĠH ell +Sh ared +Ġhom eless +Ġappro ached +Ġwh is +Ġsem ester +Ġver dict +, ä¸Ķ +Ġconsider ably +ener ation +Ġå¹´ 度 +Ġsod ium +ĠN ormal +第äºĮ 天 +Ġcollabor ative +ä»ħä»ħ æĺ¯ +Ġadvent ures +sh a +åĴ § +Ġicon ic +åı£ ç¢ij +Ġ åīį +ï¼ĮåĽł èĢĮ +Ġg rip +åıij çĹħ +Top ic +èĽĭ ç³ķ +ĠDub ai +ä¸ŃåĽ½ 人 +ĠPRO VID +交 èѦ +æŃ¥ è¡Į +, å¾Ī +ĠS oviet +æĽ¿ æį¢ +, èĩªå·± +åıĺ çļĦ +ĠC ass +Ġab normal +ĠN T +åı¤ èĢģ +ãĢĤ åīį +Ġrender ing +Ġeconom ics +ĠM un +Ġdist ricts +å±Ĥ éĿ¢ +Ġmut ations +Sc an +åĴĮ å°ļ +Ġgot o +ot yp +ä¹ĭ æĦı +pro v +Sl ot +æ¿Ģ æĥħ +åī¯ ä¹¦è®° +ax ies +. all +Ġprof itable +å®ŀéĻħ æĥħåĨµ +è´¨ çĸij +çͲ åŁº +ne ath +red ited +åı¯ æĮģç»Ń +Ġspect ra +ĠS el +çIJĨ æĢ§ +Ġcor rupt +éªij åħµ +Ġsh ore +Ġprom oted +F ast +Ġattend ance +Ġembod iment +ĠEss ay +ĠN eb +Ġactiv ate +æŀģ éĻIJ +Ġl amp +or o +ĠLog ger +se ason +å°± 没 +' => +è·¯ çļĦ +ec ome +ï¼Į æĹłæ³ķ +pos s +å¹ħ 度 +is en +======== ==== +ãĢģ åįĹ +ĠApp ellant +å¤ļ å®¶ +å®ģ æ³¢ +Ġun comfortable +ĠB ond +Ġnew er +ä¸į åĥı +< h +çĥ Ľ +ĠCh art +ĠR at +" You +å¤Ħ å¤Ħ +ï¼Į æ±Ł +S ometimes +æĶ» åĿļ +Ġl ane +N M +æĺ¯åIJ¦ æľī +Ġatt ributed +ä¹ĭ éģĵ +æĬĬ 她 +é£İ 鼨 +äºĮ ç»´ +åİŁ æĿIJæĸĻ +çļĦ äºĨ +Ġcur tain +ĠPar se +æĦŁ æ¿Ģ +Ġres erves +æ¡Į ä¸Ĭ +Ġa str +æĺ¯ å¦Ĥä½ķ +CI AL +好 äºĭ +w ind +Ġinter pre +ç»Ħ 建 +ele m +I ll +åı£ åij³ +Ġc bd +Ġdro pping +ĠBru ce +Ġdist ant +Ġj oke +丽 çļĦ +Ġsear ches +qu estion +çļĦ大 å°ı +ĠC NN +UB L +åĬł åĿ¡ +ç»Ī ç©¶ +åĪĨ æīĭ +æŃ¦ è£ħ +Ġs ulf +, å¾Īå¤ļ +ĠFre edom +ĠQ t +Ġw ages +Ex port +为 èĩªå·± +社 ä¿Ŀ +Ġund ers +Ġbad ly +åĴ ĭ +常 åĬ¡ +èĢģ åŃIJ +ä»·å̼ è§Ĥ +ï¼Į èİ·å¾Ĺ +. image +è¿Ļç§į æĥħåĨµ +ï¼Į åĽ½å®¶ +Ġflav ors +ãĢĤ éϤäºĨ +ĠN i +ãĢģ 西 +Ġam endment +ä¸Ģ åIJ¬ +ĠW ikipedia +Ġmet all +ç»Ŀ æľĽ +Ġtyp ing +l ayer += _ +ĠB ab +ãĢĤ åIJİ +è·Ł éļı +Ġse aled +Ġp unch +Ġdat asets +ĠH ung +çıŃ çº§ +F M +èĢĥ æŁ¥ +çļĦ åıijçĶŁ +_m atch +Ġrev ision +} ` +缸 对äºİ +ĠB ird +Ġexcell ence +S witch +$ ) +æ³ķ å¸Ī +_n ext +Ġm os +ĠHer itage +ä½ł èĥ½ +Ġd ive +ĠPhil ip +Del ay +Ġr gba +对 æİ¥ +Ġf oss +Ġcl ouds +ĠCare er +è¶Ĭ 大 +ĠTh ought +度 åģĩ +ĠR ating +Ġjud icial +Ġtri ple +ĠBeaut iful +F uture +- set +Ġclean ed +Ġvibr ant +è¿Ľåħ¥ äºĨ +PE C +ĠRec overy +ĠU P +ĠOr igin +Read y +Ġmet als +ĠEmer gency +å·¥ ä¼ļ +ĠL isa +Ġscript s +åIJĦ æł·çļĦ +Ġappe aling +- round +_P IN +ĠMel bourne +Ġo ct +çļĦ åºĶç͍ +ç«ĭ åľº +æĥħ èĬĤ +;; ;; +Ġmulti pl +线 æĿ¡ +igr ant +form er +Ġfin anc +ss l +Ġm d +Ġconsum ed +ĠTag ged +or ch +Ġcreat or +_ app +ĠKnow ledge +åIJİ åĨį +ε ι +Ġworld s +Ġan ten +Ġwas hed +h ind +Ġ ï¼ģ +Ġafter wards +åĿĩ è¡¡ +åĩ Ħ +ĠT ower +ĠR oot +Ġbes ide +ãĢĤ é¦ĸåħĪ +Se q +ra h +ĠOr leans +. Q +é³ ŀ +ap a +att oo +and ise +CE PT +éļIJ ç§ģ +Ġcons pir +amp a +ãĢij ãĢIJ +åıĹ æ¬¢è¿İ +ĠT her +W T +at en +ĠReg istration +çİĭ çļĦ +ï¼Ł ä»ĸ +.t op +_ eq +Ġhop ed +Ġch rist +æĺ¯ ç͍ +l arge +ï¼Į 第 +欢 ä¹IJ +Ġpress ing +Ġfle et +ï¼Į ç®Ģ缴 +anc ers +C a +(m ax +Ġs z +Ġappli ances +k k +Ġd oses +è¿ĺ éľĢè¦ģ +S am +Ġmetab olic +Ġexc use +Ġto b +ĠB onus +â ľ +ke eper +var iable +ä¸Ĭ åѦ +Ġb orders +-f unction +åŃIJ åĴĮ +B atch +Ġcon e +in valid +åĩº è¡Ģ +éħ ° +A H +Ġriv al +æķ°åŃĹ åĮĸ +Ġpat io +Ġc fg +Ġf u +Ġtra ct +Ġconver gence +ãĢģ çĶŁäº§ +_ char +m ir +é¥ ¥ +- control +Ġvers atile +ff f +Ġt ires +Ġhar ass +ãĢģ æľįåĬ¡ +Ġpract icing +Ġn od +ä½ķ æĹ¶ +ç¯ · +li ver +Ġcho pped +æ¯ ¯ +Serial izer +ĠZ ero +èĢ ¿ +Ġ è¿Ļä¸Ģ +< script +Ġterm ination +Ġ 个 +Al ignment +å°± è¿Ļä¹Ī +åı£ ä¸Ń +京 åŁİ +Ġc d +Ġad apter +Ġin coming +主 æľº +æĭī åħĭ +ĠB ring +- co +ï¼Į çī¹ +第ä¸Ģ çϾ +}^ \ +uc lear +çł ° +éĿŀ æ´² +ĠJon athan +LO AT +ĠEp isode +Ġinst inct +çļĦ åij³éģĵ +oc o +Ġsec urities +we ights +r age +.d ec +- work +SE SSION +Ġcent ered +_ ms +ack ed +н Ñĭ +m ill +ï¼Į 忽çĦ¶ +Ġang ular +Ġlab our +E B +æĺ¯ åķĬ +l est +Ġimp ly +Ġel ite +ãĢģ æĽ´ +æĪı åī§ +.b uild +ĠRE AD +è¿ĺæĺ¯ è¦ģ +Stud ent +ĠSt age +Ġla undry +Ġnut s +ĠIn j +çļĦ å¸Ĥåľº +Ġmet aph +ĠAl aska +ĠC ards +Ġex clude +Ġmem orable +R ows +ĠS alt +ĠAssoci ates +Ġthe ft +ĠAl an +Ġrel ies +åĨĽ 人 +ĠM R +ĠE ye +Perm ission +ï¼Į çļĨ +Ġmach inery +us ive +_C MD +Sec ret +åĽŀ åΰäºĨ +Ġun clear +å¯ Ŀ +[ ^ +u ke +èĴ¸ æ±½ +ĠUp dated +zer os +oust ic +B order +ä½Ľ æķĻ +u ary +绣 çѹ +Ġrad i +åŀ Ĵ +L ess +ĠG PIO +Ġrelax ation +pat ient +å¨ģ åĬĽ +â ĭ +Ġspons ored +_b ox +sm ith +Ġm erc +è¥ Ħ +aw ait +Ġpert urb +Ġbre w +ä¸ĥ åħ« +çIJ IJ +Ġpost er +Ġr outer +Ġs ized +ĠProt ocol +Ġcryptoc urrency +Ġdo zens +ai ro +exp and +åĬŀ åѦ +Ġcomm od +æºIJ äºİ +ï¼Į åĬªåĬĽ +AR M +çĶŁåij½ çļĦ +reng th +ic i +å¤ĸ åĩº +_ command +Ġpar as +() )) +Ġenc oded +_ api +_d ebug +纳 ç±³ +Ġf usion +ï¼Į éĢłæĪIJ +Ġh iking +oz illa +ä¹ĺ 客 +.F orm +æĺ¥ ç§ĭ +. red +çľĭåΰ çļĦ +D en +ç»ĦæĪIJ çļĦ +ext ra +é¢ĺ æĿIJ +ĠS eg +ç² ª +_p refix +åķ¤ éħĴ +.R untime +Ġtreat s +: The +Ġve gan +åĵģ çļĦ +产 èĥ½ +ĠPoint s +Ġ éĺ¿ +ys ics +L aw +åħ¬ æŃ£ +Ġoptim ized +Ex ternal +Over all +ĠV PN +P ers +æķħ éĢī +ï¼Į åĩł +ĠCh oice +Ġc ave +Ġenzym e +Ġfor b +Ġh ood +_st ruct +iss ues +ĠNews letter +ĠV oice +ment ioned +ĠEx hib +if eration +çij ¾ +T alk +çļĦ çα +Ġpun ishment +Ġantib odies +AM ES +ĠO racle +羣 人 +Ġun employment +äºİ æīĢè¿° +ĠK rist +ï¼Įéļı åį³ +Ġf ate +èģĶ éĤ¦ +_ex ists +Ġcontrovers ial +ï¼Į åĵª +ĠFig ures +Ġdefect s +ic ates +ant i +_ST AT +ĠT a +Ġdiv ine +ï¼Į ä»»ä½ķ +ĠB rain +Ġsl ides +ï¼ļ C +Seg ment +k le +_F R +T urn +å¢ŀ 大 +åı¹ äºĨåı£æ°Ķ +h yd +ĠChar lotte +ĠP aint +ĠMe asure +æłĩåĩĨ åĮĸ +ĠIN C +ĠDo or +Ġle ar +ä½ł 说 +Ġtend ency +ï¼Į æī¾ +Ġproceed s +端 åı£ +ãĤ Ī +ĠB inary +äºĨ è¿Ļ +å±± å¸Ĥ +Ġ$ . +Ġhel per +Ġcand y +Sign ature +ĠAll ow +ll a +ĠD ental +ĠChar lie +l c +Ok ay +Ġref ere +. Resource +åıijçĶŁ åľ¨ +D om +纺 ç»ĩ +Ġsatisf ies +ĠCh icken +ĠLouis iana +- op +ul u +æĶ¶ 缩 +ob ic +andid ate +Ġrest art +ĠL P +ĠW oman +ãĢĤ åıªè¦ģ +Ġinter faces +çĸ ® +Con sole +H O +æ¡¥ æ¢ģ +Ġpro long +ĠB h +Ġadjust ments +Ġbet s +ah o +at ible +ï¼Į 估计 +Ph il +St ill +. at +Ġs ail +Ġlack ing +b ottom +è¿ŀ è½½ +. Re +åĽºå®ļè¿ŀæİ¥ æľī +Anal ytics +é¢ Ĥ +op ter +æīĵ éĩı +, æĪĸ +Ġsc ary +ï¼Į 欢è¿İ +Ġtour ists +board s +举 æĬ¥ +Ġout lined +åºķ ä¸ĭ +ĠL uck +ent e +ĠMult iple +ï¼Įæľī æķĪ +ć ï¼Į +ĠHun ter +opt im +class es +Ġclean er +Ġanim ated +ut t +çļĦ æķ°éĩı +èĥ½ 让 +ä¸įæĺ¯ å¾Ī +ç¥ ģ +ĠF ILE +Ġhealth ier +look up +ĠS ad +Ġan onym +éĢļ è¡Į +h ma +ĠHawai i +Ġelectron ics +{ X +_ open +b uy +ĠL ily +Ġviol ations +ä¸Ģ å±Ĥ +ĠPer iod +og ue +æĺ¯ æĥ³ +[ s +ĠCh amber +åIJĵ å¾Ĺ +Ġm M +ĠB aker +S n +B us +FA Q +ot ional +Ġb tn +L IN +æ²ī æ·Ģ +åIJĮ ä¸Ģ个 +, z +W ould +. left +é¦Ļ åij³ +Ġplug ins +åİŁ æĿ¥çļĦ +I p +Ġon click +é«ĺ å±Ĥ +åħī çļĦ +ĠChrist opher +Ġshe ep +çļĦä¸Ģ 声 +Ġatt ain +Ġcustom ize +æ¿Ģ æ´» +n ia +Ġl ighter +S ent +åģĩ æľŁ +Ġing redient +Ġfore x +ĠC leveland +ol ume +ĠL ind +{ G +ãĢģ ç͵ +.assert Equals +æİĴ æŁ¥ +c ies +Ġ å®ī +ãĢĤ ç»ıè¿ĩ +æ© ĺ +ãĢģ æľĢ +ĠC row +Ġr ally +pre v +( error +S F +ĠEx am +ĠG ordon +è¿ĺ æĥ³ +人类 çļĦ +åIJĽ åŃIJ +Ġbe aches +-A merican +书 çĶ» +ĠProject s +Ġdepend ency +æijĩ æijĩ头 +ï¼Į åľ° +çª ij +b en +erm at +ric a +ï¼Į åħħåĪĨ +ount ain +çŁ ¢ +Ġgu ilt +åįģ 竳 +.st yle +ow ski +äºĨ çļĦ +Ġcont ra +Ä ħ +ĠP ad +, éľĢè¦ģ +on ce +Ġnonpro fit +reat ment +ç¬Ķ èĢħ +Ġcabin ets +ï¼ » +ĠN H +羣 è¯ļ +ĠG ot +èĬ± çļĦ +wid et +ĠFr ame +Ġnic he +umb led +, æĥ³ +Ġ åįĹ +b ial +å°ı åŀĭ +ä¸į好 æĦıæĢĿ +ç»ı 纪 +ï¼Įä»ĸ åľ¨ +ĠBO OST +Ġlaw n +Ġ åıĪ +Ġchair man +_t op +Ġcro ps +ver ify +ill o +éļIJ éļIJ +ĠUn categorized +ĠI g +(t ime +q a +et itive +Ġjournal ist +Sep ar +Ġfavor able +Ġv ivo +Ġreplic a +èīºæľ¯ å®¶ +ĠB ah +Feature d +as ia +cont ract +ĠSt adium +Ġnever theless +Ġ èIJ§ +è¿Ľè¡Į çļĦ +I gnore +Ġph rases +Ġtact ics +ĠJenn ifer +éļı ä¹ĭ +ä¸Ģ 群 +ul atory +æĿ ĸ +- Ch +ĠO tt +ï¼Įå¹¶ ä¸į +Ġfail ures +Ġacknowled ged +Ġtail ored +aster xml +æłĩåĩĨ çļĦ +ĠF E +å°ij äºĨ +ä¸ĢçĤ¹ çĤ¹ +èĻļ åģĩ +丧 失 +æ£ ķ +Ġreg ret +ĠIN CLUDING +Ġcomp ress +/ E +Ġ" + +Ġwhe at +ĠA CT +Ġbur ied +Ġb ell +ĠCamp aign +ĠJust in +éĹª è¿ĩ +æĿ¥ æī¾ +ĠHig her +, f +ĠP CR +re nder +ound er +ĠNut rition +ĠG ray +å©Ĩ å©Ĩ +å·¦ ä¾§ +ï¼Į æį¢ +Ġper ipheral +Ġlo vers +Ġint ensive +S eries +Ġrest ored +Ġnot ing +s burgh +re ts +Ġl id +æĶ¿ åįı +ç¼ĵ åŃĺ +_B O +ĠF actory +Ġcoh ort +g ie +F ailure +Ġv iable +ï¼Į éĻįä½İ +æĪij äºĨ +Ġlist ings +Ġposit ively +ĠNew ton +-h ouse +Ġign or +æĬµ æĬ¼ +B oard +Ġenc ryption +Ġattack ed +ank a +Ġrem inds +hed ral +åİķ æīĢ +sc ar +ĠTransport ation +.ex e +- is +Ġhyper t +î Ģ +âĦĥ ï¼Į +im itive +usal em +re view +Ġfert il +åĮħ åĽ´ +ĠLaw rence +磩 éĺµ ++ = +é«ĺéĢŁ åħ¬è·¯ +好 è¯Ħ +B ACK +_STR ING +å½ĵ æĪIJ +Ġ åħ¶ä¸Ń +ĠC AN +( params +åĩº èī² +Ġque en +load er +æľºæŀĦ çļĦ +书 ä¸Ń +çģ ¼ +ĠSt ri +ä¸ĸ éĹ´ +å® µ +ĠC itiz +Ġsa fer +Ġs aves +æľī å¤ļå°ij +æľª å¿ħ +Ġwond ered +æļĹ ç¤º +L ines +èİ« åIJį +ĠF C +c v +èĦij åŃIJ +S ports +pl aces +Ġj unk +< = +ie ces +ï¼ĮéĤ£ æĺ¯ +_ access +I K +Ġhousehold s +诸 èijĽ +æij § +ustral ia +ĠR aw +Ġon ion +ĠRes erved +ĠEx ercise +ĠW ard +yl on +ĠK han +Man age +Ġin con +ä»Ĭ æĻļ +_L IST +ĠL OVE +ä¸Ģ éĿ¢ +.c ode +åħ ľ +ane an +-s ervice +ï¼Įæ¯ı 次 +åıij äºĨ +身 é«ĺ +åĨ Ģ +ĠAr c +ex ec +è£ħ éħį +æİ¥ è¿ĩ +ãģĻ ãĤĭ +build ing +Ġt orn +æĸ°åĨł èĤºçĤİ +èIJ½ åIJİ +ĠPop ular +Ġc url +åIJij äºĨ +- looking +两 èĢħ +erv able +å¢ĥ å¤ĸ +ç¼ ķ +ze ch +çļĦ æĥ³æ³ķ +ra ble +åľ¨ ä½ł +å¿Ļ ç¢Į +M achine +éĩĩç͍ äºĨ +rep ly +á s +Ġhe ated +ĠW onder +Ġdepos its +è¶Ĭ å¤ļ +ä½ĵ è´¨ +H y +St ructure +ĠF BI +çª ľ +ĠTok yo +ĠAdd ed +ä¿Ŀ å®ī +ĠS V +Ġpros per +Ġ æ±Ł +å¼Ģ 设æľī +ĠNe ither +ĠSumm it +ĠPC I +Ġw ore +ĠB or +ç»Ĩ èĩ´ +åĬł éĩį +Ġcomput ational +ï¼Įå¦Ĥæŀľ ä½ł +ç¼ Ģ +Ġcamp ing +ï¼ ½ +widet ilde +åŃĹ æ®µ +Ġins ufficient +ï¼Į åİŁæľ¬ +Ġj ar +Ġp ine +Ġar th +T y +Th ough +åĨ² åĬ¨ +çļĦ æĹ¥åŃIJ +å°ı å°ıçļĦ +ä¸Ģ éĥ¨ +Ġposition ed +ĠP ink +æĶ¾ è¿ĩ +× ķ +泡 沫 +n atural +ĠC ALL +Ġtrad ers +æľī åĬ©äºİ +Ġo g +è¿Ļæł· ä¸Ģ个 +Ġcomp romise +è̳ è¾¹ +Ġu b +Ġbreak down +T ile +Ġcircul ation +èı ± +两 大 +æµĻæ±Ł çľģ +Ġbas ement +女 人çļĦ +k ill +.is Empty +Ġobs c +ĠIn side +æ´¾åĩº æīĢ +M ail +Ø ¨ +st ick +离 åİ» +ĠOr th +ĠPol and +Ġmut ation +Ġhere by +Ġpharm aceutical +i otic +Ġi i +ĠN eg +ĠL ex +о н +op ath +å¥ĩ 迹 +Ġvacc ines +ä¸į åĩºæĿ¥ +Ġ ï¼Ľ +ï¼Į åIJĮæł· +Ġded uct +ä n +ec ycle +ä¹Ł åı¯ +Ġneed ing +åı¦ä¸Ģ 端 +Ġimm unity +å¾Īå¤ļ çļĦ +H int +_ uint +纯 ç²¹ +ä¸ĭ æ»ij +Ġrese mb +ï¼Į éĿ¢å¯¹ +omorph ism +Ġw avelength +ãĢĤ ç»ı +Ġconf using +Ġalleged ly +Ġint ens +\ }$ +Ġgar bage +Ġsurv ived +Mon itor +ĠDiam ond +Ġun ity +S ing +ĠPrem ise +Ġcreat ure +çĶŁæĢģ çݯå¢ĥ +ç¡ Ŀ +H tml +\ ]) +Ġl ag +p ic +å½ĵ ä½ľ +ĠF ramework +Ġinf erence +ĠPolit ics +ä¸ĭ ä¸Ģ个 +_SH IFT +å» ĸ +ĠD ating +ET HER +Mut able +h n +T ax +Class es +天 æīį +Ġnation wide +ä¸Ģ 带 +Ġtempor al +åºĹ çļĦ +ç´¢ å¼ķ +âĢķ âĢķ +Ġfat igue +ä¹ ŀ +Ġexpect s +å¾Ĺ åĪĨ +Ġbr ut +Ġb ubble +Ġconv icted +at ched +.Ass ert +第äºĮ 个 +ĠIs a +Ġfarm ing +Ġinnoc ent +, è¿Ļç§į +Ġen code +ãĢģ 红 +Ġinv asion +Ġoff line +ĠF ine +Ġs word +Ġeth ics +è¿ĺ åİŁ +Bit map +_ conf +ĠApp s +_d b +D K +ari ans +æĬĬ è¿Ļ +ĠThe me +å½ ¤ +çļĦ æ¨¡æł· +-r un +ä»İ ä¸ļ +-p age +éĺ¿ å§¨ +Ġco ating +IT ION +. register +æĻ¶ ä½ĵ +Ġaccess ibility +A ctions +ï¼Įçľĭ çľĭ +_ IR +Ġnon linear +Ġinflam matory +ä¸į æĺİçϽ +å¼Ģ éĢļ +as ury +æĦŁ æħ¨ +à ¨ +Output Stream +b oth +Ġgal axies +Ġsecret ary +uis ine +ios ity +ĠSc ale +( . +ãĢĬ ä¸ŃåĽ½ +Tree Node +Ġprom otes +Ġrat ios +ĠKenn edy +Ġcop ied +åĵ Ĺ +, éĥ½æĺ¯ +Ġnovel s +ĠR ab +Ġâ Ķ +Ġun ited +ĠExt ra +è¯Ħ 审 +åĴĮ æľįåĬ¡ +Ġal k +Err ors +p aces +аРº +æĹ¶ 空 +ĠPl ans +Ġdom inated +ub untu +og g +ï¼Į å±ŀäºİ +Ġtre asure +å£ģ ä¸Ĭ +楼 梯 +ĠDAM AGES +ĠH at +éģĩ åΰäºĨ +Ġ' : +âĢľ This +å¹³ åĩ¡ +Ø © +ĠMiss issippi +åŃĺ æĶ¾ +çħ ľ +w y +ĠGra ham +产ä¸ļ éĵ¾ +Ġinvestig ating +ï¼Ł éĤ£ +æ·¡æ·¡ çļĦ +() [ +大 æķ°æį® +ĠJ ere +·· ·· +M AC +.e clipse +Ġbacter ial +.ex ecute +ï¼Į ç´§ +Ġw rest +Ġen act +äºĮ æĺ¯ +客 æĪ¿ +æľīåħ³ çļĦ +Pro c +æĢİä¹Ī 说 +ï¼Į ç»Ŀ +Ġdr unk +_F ORM +ï¼Ī äºĮ +> ' +Ġval ley +æĪij们 åı¯ä»¥ +ĠFis her +ht m +)) /( +ĠL am +ï¼Įæľī 人 +id ad +ä½ł ä¼ļ +. items +ĠApp end +åı¯èĥ½ çļĦ +m ers +ĠM ining +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠ +ĠL aura +- In +ç» İ +ĠN elson + º +éľĢè¦ģ 注æĦı +Ġr hs +ESS AGE +ï¼Į æ±ī +ĠNY C +çļĦ人 éĥ½ +Ġwind s +ĠRob inson +let t +èµĭ äºĪ +Ġbre ed +éģŃ åıĹ +ä»·å̼ çļĦ +éĤ£ æł·çļĦ +ens ors +èĩªå·± åľ¨ +Ġcelebr ating +( options +ĠF er +å¨ Ł +Ġt witter +ï¼Į æĹ¶éĹ´ +ï¼Į æİ¨åĬ¨ +Ġillust rate +å¹´ åľ¨ +ra ise +失 ä¸ļ +åħ¥ éĢī +Ġh iding +æĸ ĭ +京 举 +Ġt ropical +Ġ[ - +Ġrun ner +çļ± çľī +Ġdes irable +Ġo v +M ix +b t +ĠEd it +马åħĭ æĢĿ +an on +ed a +Ġdel ays +ES CRIP +Ġ åħŃ +Ġm arch +, åģļ +ĠR C +M al +åģ¥åº· çļĦ +oint s +Ġf lesh +Ġcard iovascular +ĠC abin +åľĨ 满 +B IT +s afe +USE D +ĠB T +ĠQue ens +æĬķ è¯ī +Ġwa ited +ĠD ress +åı¦ ä¸Ģç§į +Ġprohib ited +he imer +ĠSent ence +I AN +Ġdifferent iation +Ġsus pected +Ġb ounce +ॠį +Ġmet ast +çĦ ķ +or ia +ĠSupp ly +说 说 +ĠV el +ĠWe ather +â Ļ +Ġde ce +Ġadvis or +æ²³åįĹ çľģ +Ġhorm one +ĠP K +ĠT aking +_ root +j ax +ĠB eyond +Ġassess ments +Ġveget able +Ġpoor ly +Ġsh ake +pro c +ĠF ixed + using +b x +Ġp ause +湿 度 +ĠStand ards +åħ¨ æĺ¯ +ĠT ar +ï¼Į å¾Ģå¾Ģ +Ġpract ically +Com merce +Ġas ympt +Tr ust +count ry +ä½ł åİ» +ç͍ æīĭ +Cal endar +ï¼Į å¾Ģ +ernet es +ĠInit ialize +app ropriate +ĠA DC +åľ¨ èĩªå·± +os ome +H ouse +ĠAd just +T ele +Ġunder go +åİŁ åľ° +Ġsh ine +满 èĦ¸ +ç¦ ¹ +çļ ĵ +: . +Ġroll er +èĴ ² +Ġjump ed +å°±åı¯ä»¥ äºĨ +x p +Ġbe an +j amin +_st ep +Ġf ond +ä¿ ı +im ity +Con structor +åĵ ī +- ref +Ġsim plicity +Ġboy friend +.D e +Ġtransf ers +Ġint imate +Ġp ione +ĠCar ter +ç³ĸå°¿ çĹħ +çļĦ å®ŀåĬĽ +Ġcle ans +è¢Ń åĩ» +Ġact ed +ĠA le +Ġmet er +ub ernetes +f ocus +ĠSH ALL +Ġ' ', +ĠM Y +èĦļ ä¸ĭ +ĠE SP +ç±³ åħ° +lement ed +模 çħ³ +Ġdes criptor +_E VENT +_OFF SET +èĮ İ +ir i +ĠThe mes +Ġover look +人 对 +ij ing +ass ador +ĠT i +ĠBrook lyn +ï¼Į å±± +åŁº çĿ£ +Ġ' { +ï¼Į æĭ¿ +å¼± çļĦ +é¢Ħ èѦ +Ġtomat oes +Ġp ile +ï¼Į ç¬ij +æ» ¥ +: / +èıľ åįķ +åζ åĨ· +åľ¨ åīį +Ġre nown +æľ¬ åľŁ +ĠM eta +ï¼Į äºij +对 ç§° +ĠAr thur +Ġless er +Ġdisp atch +åº ļ +_B Y +åı¬ åͤ +Ġappro aching +ĠK r +ĠMain e +ï¼Įæľ¬ æĿ¥ +L and +Ġdress ing +. ] +è°Ī è¯Ŀ +ĠLab our +, æľĢåIJİ +ĠBlu etooth +Ġreal m +ãĢģ æĸĩåĮĸ +ĠL GBT +Ġdet ector +ĠP el +å¢ŀ éĢŁ +Ġsu cc +å½±åĵį åΰ +æĦŁ çļĦ +寻 常 +ä¸Ģ åī¯ +çģ¾ å®³ +Ġob esity +æĦŁ åı¹ +Ġunder neath +èĥ¸ åı£ +æĢ§ åĪ« +f old +(s ource +_ work +St age +- section +R oom +å¡« åħħ +ĠM ental +ĠA A +Ġal ias +L T +ĠComp are +( ref +ï¼ļ B +çªģ åıij +with out +or ic +[ ... +âĢľ éĤ£ +Ġperiod ic +ĠMathemat ics +rop olitan +çĿĢ èĩªå·±çļĦ +è§Ĩ è§Ĵ +å§ ¥ +Ġshow case +UIL D +Ġtrans mitted +交 ä»ĺ +-s c +åģı åģı +ĠT ABLE +主管 éĥ¨éŨ +inst ein +èħ ¥ +-orient ed +Ġstat istically +iv als +Ġw it +çłĶ 讨 +Ġmarg inal +, 为äºĨ +ed ges +OV ER +æŃ£ ä¹ī +è¿Ļ åĦ¿ +ä¹° åħ¥ +ĠEXP RESS +P ER +ĠLog in +er ce +ä½ľä¸º ä¸Ģ个 +oun cing +G uid +ro ve +äºĨä¸Ģ ä¼ļåĦ¿ +th ink +Ġnormal ized +åĽ½å®¶ 级 +Ġsil ly +Ġconsum ing +Ġpr ayers +ç¥ŀ èī² +Ġr am +åľ¨ ä¸Ĭ +ut able +è¶Ĭ åįĹ +ç²¾ ç»Ĩ +( start +åĮĸ 为 +çļĦ åĬ¨ä½ľ +Ġpl ots +Ġfe ver +Ent ries +M ass +an gh +m x +çļĦ ä¸įåIJĮ +_ ops +Ġbl ast +ĠSt ra +XX XX +çĿĢ èĩªå·± +re ctions +ole cular +ĠBr un +çŁ¥éģĵ çļĦ +ĠW ords +Ġo l +RES ULT +Work ing +Ġbe ars +æİ¢ æµĭ +ly ph +le ading +om ent +ĠGu est +å®ī ç½® +ĠK ay +Ġded ication +Ġd ens +Ġsk etch +åİĭ åζ +m aker +Ġresil ience +ĠR achel +ĠT M +ĠM ilitary +åŃ£ æŀ« +Ġmaintain s +Ġpass ive +éħĴ ç²¾ +Ġspread ing +Ġtob acco +女 åŃ©åŃIJ +Ġclos et +é¡ · +ĠF irm +ï¼Į å¾IJ +CON T +- inc +ĠB erg +å¾Ī éĩįè¦ģ +ars ed +æłĩ çļĦ +ä¼ł è¾¾ +å½±åĵį åĬĽ +ï¼Įä½Ĩ æĪij +Ex pected +èѦ æĥķ +Ġ å¾IJ +ch r +Ġart ic +Ġsc iences +横 åIJij +Ġoverl ap +Pay ment +re ck +Sur face +/ null +Ġinhib ition +Ġpred omin +天 èµĭ +st airs +Ġstress ed +se en +ĠOr d +T yp +Ġavoid ed +æľ Ķ +Ġw ines +Ġpure ly +och ond +åĵģçīĮ çļĦ +åĿĹ éĴ± +Re q +Ġfoot age +IF Y +ĠOff er +com ponents +ent ries +ĠProcess ing +Ġdest inations +åĩºçīĪ çļĦ +us c +åĽ° æī° +åįģ ä¸ĩ +ç¼´ è´¹ +ro be +ling ton +æĮģ æľī人 +ĠMor ning +å¼Ģåıij åĮº +.c opy +Arg uments +b oot +ĠCar ibbean +ĠEconom ics +åĴ¬ çīĻ +% - +ãĢģ åı¯ +åĭIJ åľ° +. Comp +导 ç͵ +çIJĨ å·¥ +ĠArch ive +Ġreven ues +çĩĥ æ°Ķ +ĠSk ills +ĠV ideos +ith metic +大 è¡Ĺ +ĠF actor +Ġstrugg led +Ġtrans plant +éĺ Ī +çĥŃ çĤ¹ +éĩij éĴ± +Ġreg ulate +ific ant +人 ä¹Ł +åĩº åı° +Ġrespond ing +ï¼Įä¸į åIJĮ +Ġaw ful +ä¸Ģ åij¨ +Ġc it +èIJ½ çļĦ +æļĤ åģľ +ĠBe ck +_t ree +åħ¬ 示 +Ġj azz +An chor +plic ate +y ll +[ t +ä¸į æĦ¿ +Ġover head +车 åŃIJ +Ġy a +ĠJun ior +umb lr +为 åŁºç¡Ģ +ĠE ither +ĠPROVID ED +uc le +æĹł æķĪ +æ¾ Ī +Ġen rolled +ä¹ĭ ç±» +T ensor +\ mathbb +Ġcolor ful +ä¸į æĹ¶ +ä½ľ é£İ +" ãĢģ" +Ä ĩ +, èĩª +åŃķ å¦ĩ +Ä Ľ +Ġreported ly +Ġcalcul ator +ä¸į èĤ¯ +å¥ ķ +Ġ} ) +à Ĥ +éľĩ æĴ¼ +, ä¸Ģèά +Ġpack ing +亮 çĤ¹ +he im +åĵ ¨ +ĠChe l +, æĸ° +Ġang el +æŁ¥ æī¾ +Ġsubscrib ers +D eb +ig a +. User +Ġre vel +ĠR ect +æīĢ èĥ½ +ĠD ent +æijĦåĥı 头 +Ġtra ins +Ġag g +.C ode +l ad +Ġdef ence +W O +mon ary +è¿Ļ 座 +. ac +em et +.t able +Ġunf air +Ġo ste +èĢģ å®ŀ +:h over +G D +Ġdes erves +é¸ ¦ +Ġbench mark +Ġderiv atives +Ġmind set +Ġd aughters +ind ent +rav ity +ĠAl ber +ent ina +ĠY an +许åı¯ è¯ģ +Ġdeploy ed +if le +åı¯ä»¥ 说 +Column s +Ġen rich +T weet +ĠI RS +Ġloc als +Array List +Ġz u +Per iod +ĠF lex +S rc +ç«ĭ è¶³ +. not +çĶ³è¯· 人 +b ling +. Request +_p red +duct or +epend ence +æĹł 声 +æľ¬ ç«Ļ +èı ģ +ĠL ik +ver ts +Ġass ists +ree ze +ren a +ass et +Ġthro at +ä¹ĭ èī² +ãĢģ èĩª +)) / +ER O +æľįåĬ¡ ä¸Ńå¿ĥ +_F OR +Ġduplic ate +Ġ iv +Ġ( (- +ãĢĤ æŃ¤æĹ¶ +ä¸įä¼ļ æľī +å°± 好äºĨ +ĠI BM +å¦Ļ çļĦ +ĠR S +ĠDec or +ĠV ec +ĠSug ar +Ġeduc ated +br is +Will iam +al ias +g ement +Ġbu zz +æĺŁ æĺŁ +èĹ » +æľī éĴ± +ib e +U AL +Ġunderstand s +Ġab dom +éĩĩ æł· +R unning +æĺ¯ä¸Ģ ä½į +åĬ¨ éĿĻ +u cl +æ´»åĬ¨ ä¸Ń +_AT TR +éļ¾ åıĹ +é«ĺ å³° +å·¦ æīĭ +Ref erences +ï¼Į å®ŀ +Ġaest hetic +ä¸Ń æľŁ +çİ© çļĦ +ä¸Ģ æľ¬ +åıª éľĢè¦ģ +为 ä»ĸ +atin um +马 车 +Ġp anc +_N ONE +Ġsl ave +Add ed +åĵ Ł +Ġep och +Ġfif ty +ond e +. str +-sh aped +å±ģ èĤ¡ +IS A +Ġindex es +çĿĢ äºĨ +inter pret +/ kg +åı¤ åħ¸ +Ġvolunt ary +Ġconst itution +ĠClean ing +ĠM os +åħ¨ æĸ¹ä½į +äºĮ æīĭ +osc ow +ĠT ob +Ġg adget +Ġfor ums +ĠBas ics +ID TH +ä¸Ģ æĦ£ +Ġant ioxid +ĠFl ag +arch ive +- art +Ġw rapper +羣å®ŀ çļĦ +ud y +ç²¾ åįİ +. output +Ġb und +è¯Ń åı¥ +Ġhor rible +éĤ ¹ +Ġemb ry +[ idx +çĻ £ +å¹³ è¡Į +_IN TER +çĤ¹ çĤ¹ +pect or +Ġc lo +çĶŁ åĬ¨ +æ´ ½ +Ġconcent rate +ĠR SS +Ġstri kes +çļĦ æĬķèµĦ +éļı åį³ +Ġâĸ ¡ +çĦ¦ çĤ¹ +çļĦ åıĮ +åIJ¬ åΰäºĨ +Ġnow here +H I +Ġcoast al +CH AR +å®Ŀ çŁ³ +, ä¸Ĭ +æİĴ åĩº +ï¼Į åįİ +_ iterator +hat tan +PH P +ç¨ ļ +ï¼Ī å¦Ĥ +åĴĮ åıijå±ķ +_m e +éĢģ ç»Ļ +ĠSy nt +L V +åħ¬åĬ¡ åijĺ +çľĭ ä»ĸ +åıĺ éĿ© +åĬ³ åĬ¡ +æīĭ åĬ¨ +Ġcons iders +Ġind ent +ract ice +ĠScient ific +Ġunder ground +Ġfor cing +ä¸ĭ ä¸ĢæŃ¥ +Ġm RNA +F IN +çĨŁ ç»ĥ +CL U +ag ination +ï¼Į åĪĩ +Ġschol ars +TE LE +_ off +çĸĹ æ³ķ +ott age +è¿ĺæĺ¯ å¾Ī +she et +Ġapart ments +人 äºĭ +Ġst ellar +omorph ic +身 å¿ĥ +out ed +Ġtrad es +le en +Un fortunately +ĠM yst +ĠH app +Ch unk +ĠF am +éĻĦ å±ŀ +Ġhor izon +Ġoppon ents +UT C +建çŃij éĿ¢ç§¯ +_l ib +. Status +ĠHealth y +导 å¼¹ +_c urrent +_se q +Ġ 被 +ĠS ites +ili ation +ĠCreat es +-n ecessary +ĠS qu +ict s +æ´Ľ éĺ³ +_c pu +为 ä¸Ńå¿ĥ +ĠSen ator +ĠP ic +Ġlo ver +ĠH us +ï¼Į æŃ» +Ġdistinct ive +Ġbo ats +Ġrel atives +Ġconfig urations +Ġmole cule +Ġconf erences +Ġconv ince +Ġhe t +头 éĥ¨ +. http +.f ields +E A +ĠAccording ly +æĢ» 书记 +un defined +Ġsl im +P ATH +Ġmin erals +æ¶Īè´¹ èĢħçļĦ +ĠSing h +Ġm ilit +Ġco oper +Ġrev ised +Ġembod iments +{eqn array +Ġinit iated +: ** +ä¼ļ 让 +主 çļĦ +æĢ» çĽij +å·¥ä½ľ èĢħ +Ġsp a +Ġroll s +ĠT u +ĠB rief +ĠM all +ĠCON T +Ġâ̦ â̦ +Ġen compass +åįģä¸ī 竳 +ĠSpecific ally +Ch ris +H ard +åİĨ ç¨ĭ +æĺ¯ éĢļè¿ĩ +Ġguarant ees +éĺ¿ æĭī +常 ç͍çļĦ +ĠChristian ity +Ġa th +ãĢģ 管çIJĨ +ç¨ ½ +æĬ¬èµ· 头 +æĹĹ èΰ +cept or +Ġ ãĢĢ +Ġ æ°´ +ç»ıåİĨ äºĨ +ĠL CD +Ġfeas ible +å¡ij éĢł +Ġw and +Ġrevers ed +.App end +æģ ³ +ĠIN TER +ans wer +åħ¬ å¼Ģåıij +st yles +T w +_l ayer +ber y +Ġtravel ed +æĿĢ æīĭ +Ġmod est +âĢĿ åŃĹ +b as +æ°ij äºĭ +com ed +Pro of +W as +ä¾ ¨ +Ġde put +Ġapplic ant +èĥ½å¤Ł åľ¨ +âĢĿ ãĢģ +Ġlot tery +ade qu +Ġdis closed +/ e +ä¿® 订 +Ġbo iler +å¸Ī åħĦ +ä¸Ģ æµģ +re port +éĻ ¡ +ĠM IN +ist le +Ġ æĸ¹ +Ġimport ed +_set tings +Ġatt raction +umb ai +She et +Ġ è¿ĻäºĽ +å¤ĸ 交 +Ġh ike +Ġinc idents +Ġp ist +Event Listener +e em +Ġm im +rep o +E ss +æĺ¯ä¸Ģ ä»¶ +oub ted +Ġge ometric +ï ¹ +ĠC li +. item +çļĦ çĶŁäº§ +и н +æĶ¹ 为 +Ġà ¢ +ers ed +y te +ä¼ļ 导èĩ´ +S plit +_t emplate +çĶ· 人çļĦ +ach t +-c ell +. Web +ä¸Ģ æł¹ +ï¼Į åıĮæĸ¹ +P a +Ġprogram mes +管 æİ§ +Ġmon ster +K T +Ġscan ning +. ). +ç» ŀ +èĩªçĦ¶ çļĦ +msg str +Ġback ing +ä¸Ģ åĪ» +Al ert +ï¼Ľ ï¼Ī +d irect +ĠCONTR ACT +Ġcou ch +Ġr itual +du p +éļ§ éģĵ +Ġsurprising ly +.c s +åĨ· ç¬ij +Ġâ Ĺ +主 æµģ +å°± çŁ¥éģĵ +æ³ķ åħ° +åIJİ éĢĢ ++ - +_ URL +éĥ½ ä¸įçŁ¥éģĵ +Ġb urg +Ġt sp +_p art +а ÑĢ +ä¹ĥ èĩ³ +cs v +c ourse +ORD ER +ç»Ĩ èħ» +ĠBar bara +r andom +ĠUn ity +ĠNev ada +} ", +Count ry +Ġv a +p her +ron es +åĨĽ åĽ¢ +#### # +, åĽł +Ġindu ce +æ¶Ī çģŃ +æĶ¯ æı´ +å¸Ĥ åĮº +Ġcontin ually +ç¬ Ľ +åIJĪ èĤ¥ +é¡¶ 级 +Ġshow c +ĠÏĦ η +ical s +æĪij们 è¦ģ +æĶ¿ åĬ¡ +è¿Ļ个 ä¸ĸçķĮ +åĵ ® +ãĢĤ 第 +åĸĬ éģĵ +Ġassert That +( log +ï¼ī ï¼Ī +æ¯ķä¸ļ äºİ +Ġexpress ing +Ġcr ate +Ġmin s +Ġdiscuss es +åĭIJ çĦ¶ +ĠS ymbol +ĠR A +å®ı è§Ĥ +Ġcycl ing +Dec imal +& D +稳å®ļ çļĦ +主 èIJ¥ +ĠStew art +Ġch apters +å¦ Ĵ +um per +Ġdil ig +å°Ķ çļĦ +èį· åħ° +ä¸Ģ 度 +ĠIt ems +ext ension +âī ¤ +ĠW W +èĬ ¸ +.get String +追 ç©¶ +ĠT es +A ug +Ġhold ers +æ¯ģ çģŃ +ãĢĤ [ +çĥĺ å¹² +_c allback +WI SE +ĠR ole +( expected +Ġp ockets +å®ī å¿ĥ +è¯Ħ 级 +b ird +æĭ ½ +çĥŃ éŨ +J ack +pos al +-st ar +Ġsimpl ify +ĠT YPE +å¿ħ å®ļ +Ġc u +å®ĮæĪIJ åIJİ +p ing +å®īåħ¨ çļĦ +B ill +Ġsell s +Ġsubmit ting +, 好 +建ç«ĭ äºĨ +Ġpursu ing +ãĢĤ åΰ +art en +Ġf ires +ä¸ī çϾ +ĠMuslim s +çĿ ¹ +w x +ĠPre vention +äºĨ ä½ł +çļĦ æĪij +æĥ© ç½ļ +ï¼Įåıª ä¸įè¿ĩ +.d is +Mean while +Ġgood ness +Ġc ategor +w estern +_C TRL +Ġpun ct +æĺ § +ĠB eth +æ¸ħ åįİ +èĨ ³ +Ġhe d +Ġimpro per +ic ht +Ġexp ed +rad io +r x +æĿ¥ è¿Ľè¡Į +Ġsp ont +( root +pro d +Ġw ax +ä¸Ģ ä¾§ +ĠPark er +Ġoccas ional +-c ar +ĠD omain +éĢĶ ä¸Ń +Ġacc eleration +Ġlaunch ing +Manag ement +éķ ĸ +st ation +ï¼Į æŀģ +Ġdys function +åıĺ äºĨ +车 ç«Ļ +éģĵ æŃī +Ġcar riers +èĢģ 年人 +A w +è¿ĺ æľª +Ġopt ing +E urope +éĽ į +çŃĽ éĢī +æ·± çļĦ +ĠT ro +ä¹ĭ æĹ¥èµ· +author ized +. q +ĠRet ail +å¸Ĥ éķ¿ +两 åIJį +Ġapp ell +æİĮ æİ§ +Ġjump ing +e ous +èĤ¯å®ļ æĺ¯ +为 æĪij +H uman +Ġaccompany ing +åįļ 客 +h w +ï¼Į ç͍æĪ· +ĠFr an +S um +ï¼Įä½ł è¦ģ +Ġp ound +çļĦ æł¸å¿ĥ +Ġpay day +ĠM is +Ġeditor ial +åĨħ æ¶µ +çļĦä¸Ģ 次 +Ġexecut ives +ili ate +ĠP ray +ĠWar ren +Ġwa ist +ĠOffic ial +交 代 +Ġlist ened +æĹł ç©· +é£İ çļĦ +Ġconnect s +ç¿ ¡ +æ³ķ åĪĻ +èı© èIJ¨ +ãĢģ éĶĢåĶ® +ä¹ĭåIJİ çļĦ +L M +P ane +ig g +ĠP I +Ġ* = +åĩłä¸ª æľĪ +Ġd airy +ĠGu y +r p +Ġvirus es +ï¼Į é¾Ļ +_s pec +ĠAR T +Ġpromot ions +< double +- owned +Te X +ĠS HA +ator ies +/ V +ed in +rac ellular +Ch anges +om on +éĩĮ æľī +D IS +æľĿ é²ľ +Ġre nov +ĠCon struct +ãĢĤå¦Ĥæŀľ ä½ł +ä¸Ļ çĥ¯ +ä¸į å½ĵ +没æľī äºĨ +Ġon set +ç»ıæµİ åѦ +èij µ +ffic iency +ĠO ften +.s erver +N orm +: s +ä½ĵ åĬĽ +che t +ĠE ld +Ġdispos al +Ġselect or +h im +çħ¤ çĤŃ +éĿŀ常 好 +h ide +rep resent +ĠDirect ors +he art +S em +Build ing +Ġthank ful +å·² æĺ¯ +ï¼Į å°½ +k n +çļĩ åŃIJ +Ġunder graduate +ĠChe ap +in formation +Ġinv itation +w ritten +ĠPolit ical +æµģ ä¼ł +亲 åŃIJ +tr ation +ring e +Ġtherap ist +åĨħ容 ç®Ģä»ĭ +ot lin +ARR AY +Ġcoord ination +å» ĵ +æł¼ å¤ĸ +Ġdefault s +ĠJ oh +éģĩ è§ģ +Ġfl ush +大 è§Ħ模 +xx x +.c olor +-s peed +ĠClin ic +åij½ ä¸Ń +Ġjournal ists +Ġalt ogether +ï¼Į åIJĦç§į +Ġb ump +æĽ ¦ +Ġpres erved +夫 å¦ĩ +å¹» æĥ³ +B re +Ø ³ +ä¸Ģ 身 +b ased +åı¯ ä¸į +ĠS ri +ĠJer usalem +Ġsched uling +{ e +* ~ +Ġrem arks +om i +åįĵ è¶Ĭ +< Integer +it err +æķ´ é½IJ +Ġl am +ĠRead er +å¼¥ è¡¥ +Ġband width +Ġman ages +æīĢ è¯´ +çļĩ å®¶ +eh icles +, äºİæĺ¯ +éħĴ åIJ§ +.n ode +under line +_l ast +éĩį éĩį +两 æĿ¡ +çļĦéĩįè¦ģ æĢ§ +æł¸ ç®Ĺ +ĠV iol +( sc +Ġac et +åħ¬åı¸ åľ¨ +ä»ĸ们 åľ¨ +(b ool +Start ing +lo ys +ay load +_un lock +Ġd in +Ġbo il +æ¦Ĥ è¿° +ĠM ob +åŃĹ æ¯į +æIJ Ĥ +Ġlif ting +Ġso vere +ĠP OST +æ°Ķ åĬ¿ +Link edIn +Ġliter al +æł¡ åĮº +ĠHy per +ĠBalt imore +红 èī²çļĦ +ĠConnect icut +ä¸ĭ 车 +agon ist +P rem +Ġ- \ +p es +F ront +ç͵ ä¿¡ +Ġagre es +ï¼ĮæĪij åĽ½ +ï¼Į åįĬ +çļĦ èĮĥåĽ´ +Ġwood s +_ search +ï¼Į以 åħį +Ġaccess ing +主æĮģ 人 +æĹ¥ åIJİ +Ġdo ck +. position +m all +çĨ ı +ĠB ou +åīį è¾Ī +çĶ· æľĭåıĭ +ï¼Į 许 +sen al +é¢ľ å̼ +ĠC ertain +ĠA y +arent ly +æ¾³ éŨ +im o +f red +( unsigned +Emp loy +ĠBi ology +Ġdat etime +Com ponents +ĠC N +Ġd ashboard +L ab +Ġretain ed +O cc +æĭ¼ åij½ +Ġbackground s +_p olicy +Ġtravel s +Ġjoint s +æ´Ĺ è¡£ +G N +Ġp ond +ä¸Ń å¹´ +ĠA id +Ġso fa +æĻºèĥ½ åĮĸ +çī¹åĪ« çļĦ +Ġcounsel ing +å¼Ģ æĿ¥ +ĠM i +Ġp sy +Ġforg ive +Ġcontribut es +çļĦéĤ£ ç§į +Ġrespect ed +æīĢ èĩ´ +, ä¸įæĺ¯ +Ġb out +è¡Į 人 +æİ¥ ä¸ĭæĿ¥çļĦ +oh yd +Ġst airs +ag ain +ev a +æľīäºĽ ä¸į +æļ´ åĬĽ +车 éĹ´ +åĤ į +åı¯ åľ¨ +ç»Ħ çļĦ +FL AGS +æĨ ĭ +. values +åĬ¨ 漫 +ï¼Į ä¿ĿæĮģ +rem arks +.get Value +Ġsac rific +Ġaccept s +çļĦ ç¬ij容 +.m od +ic ons +Ġfol k +ĠD ra +Ġimm igrants +Ġ æ²Ī +Ġcar cin +ĠEs pecially +æIJ ģ +åł ¤ +ĠG ov +æģ¶ éŃĶ +ĠFac ulty +Ġsid ew +Ġl bs +伸 åĩº +D rag +US D +éĢĤå½ĵ çļĦ +Ġden ial +C ulture +_ API +ĠCamp bell +Ġf are +Ġvol atile +ore n +è°ĥ 度 +ä¸Ģä½ĵ åĮĸ +çŀ Ħ +Ġpar liament +åŃĶ åŃIJ +//////////////////////////////// //////////////////////////////// +ãĤ ¢ +Ġed itors +ï¼Į 顾 +f m +Ġburn ed +æľĢ é«ĺçļĦ +Ġsp ine +Ġdam n +éª Ĩ +ï¼Į 轻轻 +çļĦ éĤ£ä¸ª +ĠHash Map +ĠB uffer +è¦ģ çĤ¹ +{ v +ä¾µ çĬ¯ +Ġgal axy +å¯Ĥ å¯ŀ +ĠDak ota +æ¯Ķè¾ĥ 好 +Ġprec ip +è§ģ è¯ģ +Ġlect ure +ĠNAS A +Ġorgan izing +æľīæķĪ åľ° +Ġsm iled +ĠFl at +çļĦ æĵįä½ľ +F n +åĭĩ 士 +ï¼Į åŃ¦æł¡ +åı¯ è°ĵ +_b utton +iz ar +èIJ İ +åIJĪ çº¦ +l ord +Ġpri est +Ġdec ides +Q UI +ĠLI ABLE +æĺĨ æĺİ +ĠGener ic +} )\ +ï¼Į çľ¼çĿĽ +W omen +à¥į ठ+ä¸Ģ å¤Ħ +çļĦ é¡¹çĽ® +è¶ħ 声 +å´© æºĥ +åħļ æĶ¯éĥ¨ +ĠEngine er +åĪĨ åī² +exp ensive +ĠUnder standing +_in stance +ra ul +ï¼Į å¦ĤæŃ¤ +ä¸Ģ次 æĢ§ +Ġsent iment +Ġd angers +ifest yle +Ñ ī +ĠCar bon +.set Text +ï¼Į æ¥ļ +Ġdel icate +-d is +è§Ĩ çĿĢ +. use +åı· 线 +Ġ åĽ¾ +çĹ ° +Ġhy g +( row +. Component +å·² 被 +çļĦ çݰ象 +åıijå¸ĥ çļĦ +æĭį äºĨ +ĠS olid +be it +ĠC ann +ĠF el +ĠLabor atory +_t x +\\ \\ +ï¼Įä½Ĩ åį´ +çϾ ç§ij +ĠQ String +åĴĮ 管çIJĨ +ĠM illion +ï¼Į æĢķ +Ġscient ist +mod ules +Ġte ens +ĠMet ro +S afe +Ġ' ../ +为 åĩĨ +Ġpast a +ĠD at +read ing +è·¯ çͱ +éĴ¢ çIJ´ +åĮºåĿĹ éĵ¾ +ä½İ 温 +åįģåĽĽ 竳 +Ġup s +Ġalbum s +Ġgram mar +.st d +å¸ĪèĮĥ 大åѦ +[ p +Ġre ass +举 æİª +it ime +æĤ² 伤 +U CT +åIJĮ è¡Į +sp in +(f ilename +弯 æĽ² +L iteral +Ġprol iferation +k now +ĠAcad emic +ĠR ice +Ġchar m +éħį éĢģ +Ġcha os +è¿Ŀ 约 +Ġatt ractions +Ġacc ent +åĽŀ 顾 +Ġch ick +Ġlook up +ĠT C +æ°¨ åŁº +ĠOut door +ĠCirc le +ĠCra ig +èµ¶ åΰ +Ġemphas izes +Ġ åIJ´ +è¿ĩ 渡 +it ored +æŃ¥ ä¼IJ +ï¼Įåΰ æĹ¶åĢĻ +è§Ĩ åĽ¾ +Ġsh aft +F ort +- ins +.H as +ĠG lo +ĠCh arg +aug hed +Ġver se +Ġstim ulation +å°¼ æĸ¯ +Ġb ikes +Ġp ec +com put +仪 表 +ĠC ategories +Ġ æµ· +Ġpa ired +缩 çŁŃ +ĠConst ant +åĽĽå·Ŀ çľģ +æł¼ æŀĹ +ĠNurs ing +ç¦ ¾ +Ġ ç½Ĺ +fl am +å°Ĩ ä»ĸ +ï¼ģ è¿Ļ +ĠRes et +ï¼Į å¢ŀ强 +人 åĿĩ +Ġste al +ĠH OLD +arcel ona +Ġv inyl +佩 æĪ´ +g re +ĠG rey +æľª ç»ı +è½® èĥİ +å°±æĺ¯ åľ¨ +Q uality +Ġal i +ĠRet rieved +St ation +è£ģ åΤ +Ġ* ( +(" . +ud son +ĠG aming +ĠEffect s +{ sec +Ġ" -- +å°ļ 书 +ĠJ oint +é¥Ń åºĹ +åĪĹ è½¦ +女 æľĭåıĭ +åĿļ å®Ī +/ i +` . +é£İ åIJ¹ +ä¸ī 次 +ĠH ack +å¤ĸ 壳 +( req +ili ary +Ġpol ym +ra pped +uck er +à ¬ +Ġt unnel +m ount +å®ĥ们 çļĦ +说 èµ· +Ġshift ed +亲 æĪļ +E asy +满 æĦıçļĦ +Ġthr illed +æĥ « +Ġlift ed +Su ite +Ġimpact ed +ï¼ļ æīĢè¿° +oubted ly +_re place +ĠT ex +Ġair line +è¿Ľ åĮĸ +Ġ. / +Con v +ç³Ł ç³ķ +W P +åħ¥ ä½ı +è¡į çĶŁ +éĤ ij +æ³° åĽ½ +TELE PHONE +涨 å¹ħ +Ġbe ating +ä¸Ģ æĿ¯ +v p +Ġsuper vision +Ġneed le +,ä¸į ä»ħ +å¼Ģ åѦ +Ġposition ing +Rober t +Ġt d +Ġ éļıçĿĢ +m argin +ĠV erm +âķ IJ +le arn +建çŃij çī© +åij IJ +Ġpharm acy +ĠCommon wealth +Ap pe +ĠN V +æĢİä¹Ī åı¯èĥ½ +太éĺ³ èĥ½ +ä¸į é«ĺ +rou ter +ãĢĤ å¹¶ +Ġvan illa +å¤Ħ åľ¨ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠ +ä¸ī个 æľĪ +Ġcaut ion +ç¾İåĽ½ çļĦ +éĥ½ ä¸įæĺ¯ +åı° éĺ¶ +Ġde bris +\ subsection +æī Ľ +Ġinter pol +详ç»Ĩ çļĦ +Ġdec ode +Ġrenown ed +AR G +Ġg rap +ou rier +ภ² +åĽº ä½ĵ +Ġadministr ator +åĿı è´¦ +ĠN ep +Ñ Ĭ +Ġt a +аР² +ç®Ģ æ´ģ +_ keys +th al +æĺİ å¹´ +n ings +Ġnot ified +鼶 ä»¶ +un def +Ġmerch andise +og s +'] )) +_TR UE +ç§° ä¹ĭ为 +两 åıª +R M +åĪĨ åħ¬åı¸ +溶 åīĤ +åľ° åľ¨ +.s wing +it ational +ric ing +A ud +.M odel +oster one +æķĻèĤ² çļĦ +è¯ § +èĸª éħ¬ +Ġut f +æĪĸ å¤ļ个 +_ left +_T IME +ĠFif th +ĠUs ually +æ°¸ ä¹ħ +éª ı +_s end +ĠU ntil +at ivity +ĠLeg isl +åĺī 宾 +个 åĪ« +ç»ı è´¹ +çľĭ 好 +h ist +ĠWay ne +ä¸į æĺİ +ĠBe havior +Ġam big +on z +ĠP and +ĠMor ris +ĠAR ISING +QU AL +ĠX box +ĠI E +çݰéĩij æµģéĩı +è·¯ éĿ¢ +Ġappropri ately +Ġexam s +Ġsear ched +ãģĹ ãģ¦ +ĠW a +ï¼Įå¹¶ 没æľī +çͲ éĨĽ +æīĭ æ³ķ +Ġspecial ty +äºĨ ä¸ĭåİ» +Ġur i +ĠFed eration +è´Ł éĿ¢ +ĠB apt +ï¼Įæľī æĹ¶ +\ big +ï¼Į ç«ĭåį³ +ĠPro vider +éŃ ģ +ĠHe aven +_c all +- all +ĠA aron +å¤ļ 项 +çϾåĪĨ ä¹ĭ +Ġred emption +ĠI an +ab stract +ãĢģ 以 +ï¼Į åĮĹ京 +Ġl icensing +it ol +çİĭ æľĿ +/j avascript +Ġspec ially +Ġsh ades +IR ST +ï¼Ī åIJ« +col lect +ate ur +åѦ æľŁ +Ġf d +epend encies +ĠAd obe +ĠCoun ter +åĨľ 产åĵģ +Ġ ï¼ģâĢĿ +v n +åIJį è¯į +-in f +ä»İ ä¸Ń +ons ored +ĠSet up +_e cho +he at +Ġneglig ence +ĠColl abor +è¿Ľä¸ĢæŃ¥ çļĦ +认为 æĺ¯ +æĸ° 建 +Ġhospit ality +ias m +Ġunt o +ubb ed +ED IT +\ section +Ex ists +Ġback yard +Ġ æŃ¤æĹ¶ +DE FAULT +书 æĪ¿ +Ġteasp oon +(s rc +n ail +è¯Ĭ çĸĹ +Ġtal ents +W ORK +-b ody +ç§ij æĻ® +Ġh ind +Ġc ents +Ġenter prises +lim its +Ġtouch down +æĮ¥ æīĭ +åĩº 声 +åľ¨ 使ç͍ +.A re +ne red +m ock +ĠH amp +ï¼Į åı¤ +ĠArch ives +ĠArchitect ure +Ġder ive +åŃĺ è´§ +Ġp ale +Ġdon or +um ar +书 åĨĻ +и е +ĠPal ace +ĠD est +ĠTurk ish +{ r +çIJĨ ä¼ļ +è´Ń æĪ¿ +ï¼Į æİ§åζ +çµ ® +.app ly +app le +Ġd as +åįł æľī +Ġpuzz le +轨 迹 +Ġ( . +rient ation +k u +is ible +ï¼ļ ï¼Ī +Ġdent ist +Ġiter ator +itt en +åİ¿ å§Ķ +Re ceive +Ġhome owners +ĠR M +ãĢĤ é«ĺ +ĠNOT E +Sim ilar +Ġchar ming +å®¶ å±ŀ +æľī 害 +ï¼Į å¢ŀåĬł +- ed +çĶŁ æķĪ +Pl ot +Ġp ools +çļĦ é»ij +Ġconspir acy +åı¸ 令 +Ġmail ing +ãĢģ 马 +çļĦ æİ§åζ +[ id +Ġ åħ¨ +ä¾ĽåºĶ éĵ¾ +ĠB uilt +æ²³ æµģ +( Object +[ / +Ġ 使ç͍ += c +ev es +æ´¾ 人 +ĠM AT +ĠCont roller +olut ely +th a +Ġ[ ** +çŃī æĸ¹å¼ı +ĠB os +çĤ¹ è¯Ħ +ĠPub lishing +à µ +L AN +çº ¬ + ģ +( {\ +åįĹ åĮĹ +ĠEx pect +Ġintrodu ces +ĠS ud +Ġassist ed +omit empty +ĠWH ETHER +Ġ 飩 +æĭ¿ ä¸ĭ +åı¹ æģ¯ +Ġ< ? +Ġmagn ific +M ary +Com mit +æ² ģ +P i +å¤ĩ ç͍ +ï¼Ł ãĢį +Ġland ed +Ġtou ches +åĨħ åľ° +Ġstr ide +æīĢæľī 人éĥ½ +æĸ¹ éĴĪ +ĠO B +d id +Ġb od +Ġcall er +ä¿ ŀ +Ġassum es +_ AC +çϽ èī²çļĦ +} ). +æĥħ 人 +Ġn ested +M el +Ġmin us +çĭ® åŃIJ +Ġfor k +ah l +Ġment ions +ĠM es +Ġalloc ate +åı¯ä»¥ 说æĺ¯ +æľĢ å°ı +ï¼Į åΰäºĨ +ï¼Į å°ij +I SE +ĠD S +æĪ ³ +è¯į æ±ĩ +æ´Ĺ åĩĢ +ot on +ĠGr iff +è´Ł èį· +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +_IN VALID +, åºĶ +ĠCl ar +reg n +Ġdi agonal +Ġrecept ors +夸 å¼ł +s pecial +ĠP ant +æĸ § +box ed +çIJ ¦ +çļĦ æĪĺ +éĤ£ æĹ¶ +Ġtag ged +æŀĦ ä»¶ +æİĴ è¡Į +ĠColl ins +Ġbra ve +è¦ģ æĬĬ +广 éĺĶ +ä¸Ń åħ± +Ġenhance ment +æłij æľ¨ +è¹ Ħ +Ġk its +Ġre ck +_s rc +ä¸İ åħ¶ä»ĸ +é¦ĸ éĥ½ +æĸ¯ ç§ij +ĠCol on +å¿ĥ 头 +ast ics +E nergy +ĠCr ime +]\ ] +ä¸į å®ľ +- X +aint ed +è½½ ä½ĵ +For ward +F und +Per haps +Ġple a +ĠH end +èģĶ ç»ľ +Ġch i +äºĮ å¹´ +ot ics +Or d +Ġfat ty +h ook +unt ed +ï¼Į åĨ² +T I +å¨ģ å°Ķ +Ġautom otive +è´Ł è½½ +Ġvacc ination +åģļ è¿ĩ +è´ ¬ +em perature +ä¸Ģ è¾ĪåŃIJ +ri ors +æīĵ æī® +Ġout lets +éĥ½ æĺ¯ä¸Ģ +O s +f y +(b uffer +çļĦ é£Łçī© +. per +èĤ¡ æľ¬ +ĠExt ension +Ġexhib its +æ°´ 管 +Ġachieve ments +pre ced +æķ° çϾ +- key +çļĦ ä¸ŃåĽ½ +ĠH em +çļĦ åĨħéĥ¨ +ĠB S +ĠSet ting +man ager +Ġas leep +, åħ¨ +ĠP ent +æĢ§ åij½ +A U +Ind ust +Ġex empt +讲 座 +äºī 夺 +æIJº æīĭ +å¿ł è¯ļ +Ġsqu ares +. ID +Ġeduc ate +Ġl aughed +_m b +ĠR oof +construct or +Ġb anned +ud es +Ġtrans itions +åĿIJ çĿĢ +Ġt rivial +çIJ ¢ +éħį æĸ¹ +uk i +p riv +ĠT odd +min us +sw ers +d ump +Ġtim eline +容 纳 +Ġ" : +ĠK night +Ġo ch +Null able +Tr igger +ĠT D +m itt +åĩº 身 +æ£ ĺ +B as +é n +l ar +Ġb race +ä½ı éĻ¢ +iterr anean +äºĭ å®ľ +âĢĿ ä¹ĭ +i ates +åįĩ é«ĺ +int eg +Ġu gly +Ġsaf egu +æĺ¯ æĪijçļĦ +æ§ Ľ +Ġ[... ] +Ġpract itioners +ï¼Į çľģ +Ġh orn +èĩª åľ¨ +_ html +E mb +Ġm ant +J ournal +Ġ' $ +Set up +Ġappoint ments +ĠC ris +è§ģ ä»ĸ +Ġre levance +ĠPro gress +ï¼Į å¾® +/m aster +AB C +Ġwel comed +Ġl enders +W alk +C trl +æĬķèµĦ åŁºéĩij +ï¼Į åΰåºķ +Tim estamp +Ġher bs +ï¼ĮæĹłè®º æĺ¯ +b order +Ġc argo +es pecially +St d +Ġdeterm in +åįģäºĶ 竳 +Ġaccum ulation +ri vers +Ġco pe +å̼ 为 +aj ax +ĠGa ussian +ĠK id +Ġh ass +" \ +. connect +éľ² åĩºäºĨ +Ġear liest +ĠAgric ulture +ç¼ ļ +Not es +ï¼Įä»ĸ ä¹Ł +ANN EL +p adding +çļĦ æ¯Ķä¾ĭ +æĮ¤ åİĭ +Ġche f +S chedule +æĶ¶ å½ķ +ĠIn come +çĻĮ çĹĩ +Ġor b +ãĢģ äºĶ +Ġble eding +æķ´ å½¢ +ĠR unning +Ġcompet ent +åĪĿ å¿ĥ +ov able +Ġpe aks +.get Element +Ġdefe ated +Ġarchitect ural +# { +ĠS ame +B o +S ervlet +Ġwh it +æħĪ åĸĦ +ĠAct ually +ĠValue Error +è§£åĨ³ éĹ®é¢ĺ +ãĢģ èĬ± +ĠEv ans +åľ° çĭ± +Br and +Ġcl ay +_C OMM +Ġf ridge +æ¡ ĵ +Ġped est +ass y +Ġsimilar ity +Ġfil tered +. Int +л и +Ġhead quarters +æĸ¹ ç¨ĭ +Ġdraw ings +-p art +ç͍ èᝠ+该 å¦Ĥä½ķ +. search +ï¼Į åĪļæīį +ãĢĤ åħ·ä½ĵ +æ´» å¡ŀ +ãĢĤ æ¯ķ竣 +ç͍ åľ° +ĠT ORT +Ġatt itudes +ĠWeek ly +ĠP un +Ġmag azines +Ġcond itional += ${ +on line +Ġannoy ing +ĠFore x +ĠMar shall +Ġcon gress +are l +æĽ´ é«ĺçļĦ +Ġsh rink +座 ä½į +Ext ensions +Ġsc attering +è¿ĩ å¾Ĺ +SE L +å·® ä¸į +ĠR and +ï¼Į åħ±åIJĮ +线 æĢ§ +æĺ¾ èijĹ +t rain +. select +_ irq +åıį æĬĹ +åµ ´ +ä¸ļ åĨħ +çļĦ å¼Ģ +Adv anced +åĪĽ ç«ĭ +ĠPsych ology +w ould +åŃIJ å¼¹ +AN C +è¿Ļ çĤ¹ +ĠN am +Ġridic ulous +AC HE +b an +âĢĿ ï¼Ľ +}} _{ +ãģ¾ ãģĻ +æĥ³ åΰäºĨ +åī§ æľ¬ +l it +N ER +ograp her +åĿļ 强 +ĠInter face +th an +Ġdes cript +Ġrel ie +Ġdr um +ç»Ļ åĩº +å°ı æĹ¶åĢĻ +ĠGr an +q quad +Ġ å¤ļ +ip art +åķĨ åºĹ +æľŁ åĪĬ +sec ure +out es +å¿ĥ çĸ¼ +( se +Ġg y +åij ķ +Ġurg ent +CL K +Ġam p +IN CLUDING +Ġjournal ism +Ġansw ering +Ġsh ifting +ĠT N +Ġun ve +Ġfree ze +Ġcompar isons +ĠON E +Ġnit rogen +Ġm ud +Ġpat ches +Ġinhib it +re hend +ãĢĤ 第ä¸Ģ +ĠM ason +sh arp +hes ize +ï¼Į æ²Ī +çϽ éħĴ +Ġl ender +常 æĢģ +_ IP +ĠWh atever +æľīäºĽ 人 +Ġr ay +ï¼Įå°± ç®Ĺæĺ¯ +c ity +ç¹ģ èᣠ+, C +çī ¡ +ĠSh opping +int eger +Ġbr ide +ï¼Į 认为 +.index Of +æľ¬å®ŀç͍æĸ°åŀĭ çļĦ +ï¼Į æŀľçĦ¶ +Ġcry ing +ï¼Į ä½įäºİ +Ġret riev +Ġapprox imate +ĠDub lin +Ġch ron +LE T +US H +he res +it ated +æĹł å½¢ +è¯ģ çĽijä¼ļ +å°ı æĺİ +Ġforest s +w ent +æĭ Ĥ +模 仿 +Ġelect rons +å¤į åħ´ +èµ° åľ¨ +, ä¸įèĥ½ +-s ub +Ġse g +åĮĸå¦Ĩ åĵģ +ĠCon cept +Ä Ļ +Ex it +Ġj Query +æĭ § +æĢĿ è®® +Ġir r +ä¸Ń å±± +f amily +Exp ress +cl er +_ ph +Sh ift +çĸ² æĥ« +Ġpro st +onom ous +å¤ĸ ç§ij +çĿĢ ä½ľ +åĨį æĿ¥ +l ades +åħ¸ åŀĭçļĦ +) s +ĠDou glas +Ġenerg ies +- not +ic ion +æľīåħ³ éĥ¨éŨ +Ġgram s +_ AS +å°Ķ å¾· +Ġ ib +b ian +Ġsp ite +è¹ ¦ +审 åΤ +åı¸ å¾Ĵ +度 è¿ĩ +ãĢĭ ï¼ļâĢľ +éĹ´ éļĻ +Ag ain +struct ure +Ġfor ty +ai res +åύ å®ĺ +çĿģ å¼Ģ +çī¹ æĦı +Test Case +lect ric +Ġem it +åĸľ æĤ¦ +:: :: +w indows +op ard +éĻĦ ä»¶ +åύ 械 +f ar +åıĭ 好 +. htm +åIJİ æĸ¹ +Ġc af +const ant +Ġprov es +Ġnon etheless +loc ale +çα åĽ½ +_r un +Ġ åĮĹ京 +ï¼Į 说æĺİ +Ġ æľĢåIJİ +(t mp +å¿ĥ åºķ +ot er +che stra +Ġexam ining +o ct +åĽĽ å¤Ħ +Ġarg v +Ġ# { +C y +èİ ĵ +ĠGener ation +ĠRec ent +Ġp ork +ab etic +Ġd ign +l arg +Ġd ans +ick y +em bers +æ² ¦ +_ vector +ĠCor ps +Ġfoot print +g reat +Ġm apped +Ġpal m +æĥ³äºĨ æĥ³ +is o +Ġmem or +ï¼ĮæľĢ 好 +ĠAgain st +ĠF loat +ĠSt orm +å¾Ģ ä¸ĭ +ĠMit chell +flam matory +Ġcomment ary +ä½ı 宿 +Ġintern ationally +Ġfurn ished +itt est +ĠMad ison +è¡ Ļ +ĠSur v +Ï İ +Ġdead ly +Ġcancell ed +ĠTh ai +æĹģ è¾¹çļĦ +' $ +åĪĨ æĺİ +çĶ Ħ +, çľĭ +EM PL +EG IN +Ġprevent ed +åıĸå¾Ĺ çļĦ +Ġdisc lose +i q +Ġsmooth ly +ä¸Ģ åIJij +ĠVal ues +代表 人 +ident ity +d river +æĺ¾ç¤º å±ı +_ io +ĠC arn +Ġpursu it +Ġh ockey +}} , +ãĢĤ 天 +æĭ Ń +_ch annel +ï¼Į é£ŀ +ab lished +误 ä¼ļ +us hers +Ġdest ruct +( image +Ġg rief +ï¼Įå°± èĥ½ +ĠU b +Ġdon ate +ĠQu ote +ĠR ic +è¹ Ń +ï¼ĮåIJĮæĹ¶ ä¹Ł +ĠPal m +ĠCh ron +âĨ IJ +å°½ çļĦ +Ġdr ill +Ġtempor arily +çļĦ è§Ĵ度 +ĠT rial +Ġ å¤ı +ä¸İ ä»ĸ +ĠSk ill +åħ³èģĶ äº¤æĺĵ +ãĢģ éĻĪ +å´ Ľ +.s ource +ĠR y +OL UM +qu ant +çŀ ¥ +ĠK el +Ġslic es +IL TER +åĻª éŁ³ +çķ Ķ +Ġplan es +亲 å¯Ĩ +Ġinf ant +h p +ĠH O +Ġgrand mother +Ġfun eral +ï¼ļ D +> () +ĠRec ipe +Ġbal lo +im its +åIJ¦ 认 +Ġp g +io let +Ġface book +Ġour s +[ c +ãĢĤ æĽ´ +Ġcontext s +, D +[ assembly +, omitempty +E s +ĠRob in +Ġev olving +èģĶ åĬ¨ +ç£ ħ +ĠUlt ra +R D +ĠCon sequently +æ£ º +ĠSt ev +è¡Į æĿİ +Ġpars ed +æĸ° åħ´ +ä¾ĭ å¤ĸ +å·²ç»ı æľī +_s c +Ġspir its +åľ¨ ä¸ĭ +_c ore +iz a +æµ· ä¸Ĭ +ĠAl bert +åIJİ éĿ¢çļĦ +ï¼Į ä»·æł¼ +ä¸Ģ æīĭ +æŁ ł +Ġtr aged +@ interface +ĠF ill +- un +el if +Ù ĥ +ĠG ill +-d es +.. \ +rat ulations +表çݰ åĩº +.m ake +Ġr t +Ġdress es +ĠD al +Ġgl ance +å¥ İ +é»ij èī²çļĦ +åĩı éĢŁ +次 çļĦ +Ġ 为äºĨ +em en +ement ia +Ġkn ees +é¦ĸ å¸Ń +Ġhapp ily +Ġweight ed +Ø ¹ +Ġre leasing +Al tern +ĠTai wan +ĠArab ia +è¯Ī éªĹ +éĴ ¾ +å¿« è¦ģ +礼 è²Į +Ġex pr +çļĦ è¿Ļ +greg ation +ĠN an +çŃ · +ser ial +ĠPub lished +æľĭåıĭ åľĪ +Ġso y +_s ervice +r n +ing e +\ theta +-com merce +æľī çĽĬ +åĽ½ éĺ² +çıŃ ä¸»ä»» +Ġtrain er +ced es +æīĭ 游 +Ar r +Ġle isure +ĠSurg ery +é«ĺ éĵģ +ï¼Į éĿĴ +åĢĻ éĢī +åħ¥ åѦ +ĠTe aching +éĶ® çĽĺ +æ°´ åĪ© +çī¹ å®ļçļĦ +Ġwel coming +Ġwealth y +æĹı çļĦ +ĠDef inition +ĠChar acters +Ġwh ites +ï¼Į å¸Ĥ +å®¶ åĽŃ +View s +ĠPat ri +éĤ» å±ħ +row th +ĠT ow +ãĤ ¿ +ĠIN D +Ġb ail +åģ¶ åĥı +ĠAl pha +æī¾ 个 +Ġcount ed +Ġc el +sub ject +Ġdistingu ished +bel ie +å¹´ éĹ´ +ans hip +Ġpromot ional +ä½ ¬ +Ġtun ed +(" [ +Ġapp les +äºĶ çϾ +å͝ æľī +ĠH ab +ĠMed ium +ï¼Įè¿Ļ æĹ¶ +Ġdevelop s +ĠA uthors +Ġ å®ĭ +ĠInt o +好 å¤ļ +iss on +ene z +æĻ¯ 象 +irm ingham +ĠM ask +ĠDis count +éĴ» çŁ³ +åıĺ 为 +ĠLiver pool +çѾ åIJį +ä¸į 该 +i ang +ess a +æīİ å®ŀ +Ġpenal ties +f u +èº ¬ +Ġsell ers +Ġb a +ĠE gg +ark ing +ĠZ oom +åºĶæĶ¶ 账款 +U Int +è§ģ è¯Ĩ +æĬ± æŃī +an ic +P o +ç²¾ å¯Ĩ +Ġto ps +am as +Ġ åIJ¬åΰ +çļĦ éĴ± +ey e +Inst ruction +_IN IT +S at +- ac +åĪĽ åĬŀ +åįĬ 导ä½ĵ +pro file +ç» ¸ +En joy +Ġverb ose +S calar +ĠSe lection +Form atter +ĠDis cover +ä¸į 容 +ĠG ene +åĩ¹ æ§½ +她 åľ¨ +éĺ² èħIJ +is i +re act +ST M +æĺŁ çIJĥ +os er +Ġneg lect +at cher +ic iary +P ERT +_b atch +m ust +åİ ® +æł¸ æŁ¥ +Ġmess aging +è¦ģæ±Ĥ çļĦ +ĠPl ayers +sc opic +çģ¿ çĥĤ +å°± è¡ĮäºĨ +Ġware house +è¯Ń æ³ķ +col lection +em por +å¾ģ æĶ¶ +Ġnewsp apers +æĻ®éĢļ 人 +. Event +R otation +Ġ Ùħ +äºĨä¸Ģ åı£ +Ġprotect s +ä¿¡ æīĺ +Ġ äºİæĺ¯ +è½ § +consc ious +pro b +Ġfar ms +è¿Ļæł· åģļ +Ġent ers +æķĻ åłĤ +ç¢İ çīĩ +om eters +è¿ĺ å¾Ĺ +.ex ists +/ " +d jango +ĠGreat er +å®ŀ æĪĺ +çļĦæ°Ķ æģ¯ +ĠP ER +ĠC av +Ġx x +Ġfix ing +ĠInter ior +æĬķ å½± +ĠBo ys +rel ative +éŨ åºĹ +p ersonal +åįģåħŃ ç«ł +ĠM aj +Ġth rive +ĠEmp loyment +Ġl b +åζ åĬ¨ +ĠWorks hop +ĠWi Fi +ĠPitt sburgh +Ġover d +ãĢĤ ä¹Ł +ĠRef orm +èij¡èIJĦ éħĴ +Ġshock ed +åĨħ å¿ĥçļĦ +Ġ æĢ» +ĠKen ya +Ġgovern ed +Ġorg ans +AT T +Ġcrit ics +ĠGener ated +è·ij äºĨ +Ġhe lic +é¦ĸ åıij +Ġqual ifying +F N +Ïģ ο +ĠCl ay +çĭIJ çĭ¸ +Ġclar ify +v id +èµ° åIJ§ +Ġ$ (\ +åĬŁèĥ½ çļĦ +ĠU pper +ä¸ī åĽ½ +ä¸į ç¡®å®ļ +Ġback pack +麦 åħĭ +çļĦ å¾Ī +ĠAtt ribute +è¦ģ åģļ +Ġswit ched +K B +ĠE ST +ĠBirth day +ãĢģ ä¸Ĭ +æ°¸ æģĴ +Ġp est +Ġpropos ition +Ù Ĥ +æ³ķ æ²» +Ġl enses +ill ary +IN ST +lock s +Ġcred ibility +lis hes +Ġcov ari +al ph +Leg al +å°ı éķĩ +Ġsn ack +æĹ© æĻ¨ +Ġt et +Ġspac ious +ï¼Į便 æĺ¯ +èĩ³å°ij ä¸Ģ个 +äºĭ åħĪ +ï¼Į å®ļ +im als +ring s +Ġtast es +Ġsen iors +(type of +AAAA AAAA +Ġviol ated +太 å¤ļçļĦ +ï¼ĮåĨį æ¬¡ +Ġback s +% s +ĠFe el +F iled +te es +W ar +Ġre loc +ĠN ord +ï¼Įæĺ¯ åĽłä¸º +ä¸į ç»ı +- cont +Ġanalyst s +ĠOTHER WISE +A meric +Ġsnap shot +Ġhon ored +ãģ Ŀ +度 åĴĮ +Support ed +e u +硬 çĽĺ +inn amon +-n av +ĠCol omb +Ġpot ent +Ġpot ato +Pre view +-g ame +Ġmedic ines +缸åħ³ éĥ¨éŨ +æ»ĭ åij³ +Ä ĵ +) }$ +æ¶ ħ +ĠRec ogn +.b tn +对 èĩªå·±çļĦ +ĠTru ck +ĠC ub +æ£Ģå¯Ł éĻ¢ +åı£ è¢ĭ +ĠP ope +ĠE sc +ĠA Z +Ġ$ | +W ritten +Intern et +(b ase +Ġfound ing + ¿ +åıij åħī +Ġsubject ed +ĠEm ma +Ġearth qu +Ġform ulation +Ġ Ñĥ +ä¸Ģ åįĥ +emet ery +å®īåħ¨ æĢ§ +gg ed +h appy +ï¼Įè¿Ļ å°± +Ġb ored +.c a +Ġ第 åħŃ +æľī 两个 +ï¼Įè¿Ļ ä½į +-gener ated +Ping back +ĠMain tenance +æĶ¿åºľ çļĦ +女 ç¥ŀ +ï¼Į 表示 +D am +UT O +ä¸į 顾 +Ġprov ing +u pt +Ġremember ing +èĴ¸ åıij +pl t +ĠAny one +管 å®¶ +ãĢĤ ä¸ĭéĿ¢ +èµĦ åĬ© +Ġr ides +å© ¢ +M Y +Ġf right +è¶Ĭ é«ĺ +_c fg +( def +æŃ ¹ +åĮ Ī +ud ent +ear ly +ï¼Į æĺĵ +the ning +ĠF ried +P et +被 åĬ¨ +ï¼Į è¿Ľåħ¥ +Ġins ured +åħ¥ ä¾µ +ĠT on +Ġtomat o +ĠSte am +æĪĺ éĺŁ +Ġcontrovers y +Ġre construction +æķ°æį® æĺ¾ç¤º +ç½ļ 款 +ãĢģ èĢIJ +头 èĦij +ãĢĤ ä»Ĭ +ĠF al +æĪ· åı£ +-f ield +Ġsevent h +_ control +Ġar ter +ĠG host +och astic +éĺĪ å̼ +ĠStart ing +éħ ¥ +Ġtouch ing +OP T +un ing +åĪĨ æĪIJ +Enumer able +_ local +æĽ´ 容æĺĵ +ch y +ĠLine ar +Ġmer it +æī ³ +ĠT an +åĸ ĩ +om o +Data Source +_e lement +æī£ éϤ +Ġres ervation +ï¼Į åŃĻ +Ġad orable +ra wn +ĠY ang +ï¼Į åĬłåħ¥ +Ġinhab it +hand ler +por ter +港 åı£ +骨 头 +ĠArt ist +.M ap +ï¼Įå°± è¦ģ +çļĦ ä»»åĬ¡ +ä¸į ä¸Ģæł·çļĦ +Ġden otes +åĩł 人 +miss ible +ĠReg ular +it les +ãĢĤ 人 +Ġcre ep +Bel ow +t ri +r ift +顺 çĿĢ +Ġpri zes +éĿĻ èĦī +çļĦ éĩij +Dis claimer +åı¬ éĽĨ +Ġenh ancing +ä¸Ń äºĨ +æĮĩ çĿĢ +( op +' T +ĠV ehicle +ĠRev enue +ãĢģ çŁ³ +Ġshel ves +认 羣çļĦ +Ġexch anges +ä¹ĭ æĹħ +约 为 +åĢ ļ +大 æµ· +éĢļ åŃĶ +Ġspons or +å¤ľ éĹ´ +ĠDep uty +Ġanx ious +Ġt an +åĩĢ èµĦ产 +ch air +被 æīĵ +çĿĢ åĬĽ +rom agnetic +ĠSpe aker +. inter +ĠS even +é¡¶ 端 +ĠCamp us +常 è¯Ĩ +Ġst arter +L I +ĠA thlet +Ġtrick y +AR I +å¾Īå¿« å°± +l v +以 æŃ¤ +ä¸ĭ å±ŀ +ĠRid ge +æľĪ 亮 +ĠParam eter +ç²ī ç¢İ +ĠH az +Ġcomm ut +Ġprem ier +è¯Ħ éĢī +ĠS B +Ġdis g +ey er +ĠW riter +åijĬè¯ī ä»ĸ +æ»ij åĿĹ +_ raw +Ġur ge +_FORM AT +翼 翼 +æ°´ æĻ¶ +Ġdegrad ation +M AN +Ġc attle +个 å°ıæĹ¶ +ä¸į éĢļ +åIJį åı« +Ġfilter ing +/ her +r ar +çļĦ åĩºçݰ +ä¸ĭ æīĭ +-sh adow +ĠSw ift +Ġpl ur +æµıè§Ī åύ +Ġdis par +çļĦ女 åŃ© +oci ation +? , +ĠM asters +ĠG OP +举 éĥ¨ +Ġlog istics +Q ual +int endo +ãĢĤ åį³ +.Write Line +çļĦ å±± +梳 çIJĨ +ĠNet works +ä¸į æ¸ħæ¥ļ +Ġtradem ark +Ġexhib ited +ust ers +ochem ical +S cal +Ġathlet ic +angh ai +Ġw ars +è¾ ķ +é£Ł åłĤ +Ġ/** < +èĩª ä»İ +Ġimpl ant +äºĨä¸Ģ çĤ¹ +n il +ĠAn c +Ġam bit +Ġhero es +å®¶ åºĦ +ä¸Ĭ çľĭ +ĠJ erry +ĠS oul +è¿Ļ æĹ¶åĢĻ +ik o +Ġinev itable +Ġlaugh ing +å¤įåį° ä»¶ +ï¼Į åıĸ +ĠP s +æĸĻ çļĦ +rid es +ï¼Įä¸Ģ ç§į +Ġprot otype +éķ¿ åŁİ +ag ne +k w +Ġus eless +ĠBr anch +Ġm ethyl +ĠC bd +ï¼Įæľ¬ åıijæĺİ +ancell ation +詹 å§Ĩæĸ¯ +> / +is y +ĠL D +ĠSe an +ãĢĤ æľĢ +åħļ 建 +t race +轿 车 +Pe er +ĠT s +by e +ä¸į éĢĤ +åĶ® ä»· +ĠH orse +ï¼Įä¸Ĭ éĿ¢ +U A +Ġre imb +Ġdem ocratic +å¯ ĩ +ĠRog er +æĿ¥èĩª äºİ +ãĢĤ 大家 +åı¯ä»¥ æł¹æį® +èIJ ± +in ical +æĶ» çķ¥ +Ġfirm ly +wide hat +es ters +害 人 +ĠS yn +Ġarriv es +M ike +ĠIN TO +ç͍ 人 +Ġas pir +Ġ ou +Ġst adium +ä¸Ģ个 人çļĦ +IF I +} -\ +ĠMur ray +l ene +身ä½ĵ çļĦ +Ġten ant +ĠN AT +Ġrem ot +æľī åĬĽ +åı ¨ +Ġkind ness +Ġse xy +æ· ¹ +ï¼Į ç»ı常 +çĺ « +ĠD egree +enc oding +âĭ ¯ +ĠP eters +æ¸ħ æĸ° +åįĥä¸ĩ ä¸įè¦ģ +Ġnut ritional +æ²Ļ æ¼ł +åĢ ĺ +/ app +带 çļĦ +-ch ild +离 ä¸įå¼Ģ +Ġcert ificates +Ġfin ances +, ç¡®ä¿Ŀ +ĠM oscow +Ġfaith ful +ĠWire less +ra is +æķĻ ä¼ļ +æŀģ 端 +亲 çαçļĦ +å¾Ģ æĿ¥ +ĠExplore r +Ġse ating +h ole +ç»ıèIJ¥ æ´»åĬ¨ +泪 æ°´ +åѦ 士 +éĻĪ è¿° +ç¥ Ģ +Ġ æŃ¤ +D ue +, åħ¬åı¸ +çºł ç»ĵ +çĨŁæĤī çļĦ +Ð ¶ +ç²ī æľ« +.t witter +转åĮĸ 为 +åıĺ éĢŁ +.ex ports +Ġcru el +Ġhabit at +F r +Ġbit ter +ï¼Į äºĨè§£ +-b l +没 æ³ķ +read only +æľīåħ³ è§Ħå®ļ +ie val +ĠPart s +çŁ¥éģĵ èĩªå·± +åľ¨ æĪij们 +ĠI con +Ġex empl +ä¸į 符åIJĪ +ä½ł è¿Ļ +Ġutil s +æĢİä¹Ī åĽŀäºĭ +ç͵åŃIJ 设å¤ĩ +å·¥ä½ľ 室 +em ics +æŀģ 大 +log o +Ġtast y +am el +æ°ij åĽ½ +åΰ è¿ĻéĩĮ +. th +- Y +ĠT S +带 ä¸Ĭ +ï¼Į èµ¶ç´§ +æĹł æĦı +Ġen er +éľ ĸ +ĠP le +æıIJ éĹ® +ig ion +UR RE +_g ener +åĽŀ æĿ¥çļĦ +å¥Ī ä½ķ +Ġrect angle +Ġven ues +Corn ell +éĶħ çĤī +ens on +ï¼ĮæĪij è§īå¾Ĺ +Ind ia +ï¼Įä½ľèĢħ æĺ¯ +å¤Ł äºĨ +Ġblog ger +åĮĸ è§£ +å¤ĸ 线 +éĺIJ è¿° +åı¦ æľī +ï¼Įåį³ ä¾¿ +éħ ® +Ġsystem ic +Ġresearc her +溶 è§£ +plic a +åıĤ è§ģ +æ°´å¹³ çļĦ +Ġperman ently +Ġbless ing +ĠR ou +ram a +[ M +ĠTh an +L IB +èĥĮ å½± +ĠInd ians +çŃĶæ¡Ī æĺ¯ +Ġb om +çī¹ ç§į +è¦ģ æ¯Ķ +æĢ» ä¼ļ +æĪij èĩªå·± +åįģä¸ĥ 竳 +Ġoccup ation +, æ¯ı天 +\ ", +T imes +è·ij æŃ¥ +ĠU l +缴 æµģ +oc ard +Ge ometry +ĠPr incess +s b +Reg ular +in herit +æ· ³ +Ġ æľĢ +ãĢĤ 没æľī +Ġun ions +éĺ İ +ä»» èģĮ +Ć Ĉ +Ġu mb +im ag +R ank +ĠT B +çıį çıł +M iddle +Ġgrat itude +Ġpe oples +æĥ³ åİ» +ï¼ĮéϤ éĿŀ +L earning +æĢ¥ æĢ§ +. Z +èĪ ¶ +ĠPRO C +Ġconcent rated +ä¸į å°ıå¿ĥ +Ġhes itate +注 å®ļ +Ġfif teen +Ġlearn s +ĠGover n +Techn ology +uff y +Ġl ug +( ä¾ĭå¦Ĥ +Ġre lied +ro ved +in is +ĠL G +ĠK aren +çļĦäºĭ å®ŀ +Ġflow ing +çķĻ æĦı +{d ocument +ĠS oph +大 楼 +_t ask +ĠOper ating +游æĪı çļĦ +it ary +æµĭ å®ļ +ro d +. aw +åļ · +çİĭ èĢħ +ĠCorn ell +Ġare na +Ġlearn ers +ĠK ath +管çIJĨ å±Ģ +Ġri vers +ĠS ie +Inst ead +Un ique +Ġelse if +çī¹ èī²çļĦ +ï¼Įä¸Ģ èµ· +ar ium +à § +ĠUlt imate += false +ĠL ost +Ġh r +åķĨ 人 +Ġgrad uation +ãĢģ 天 +ä¸Ģèµ· åİ» +verb ose +agn ostic +ãĢĤ èĢģ +Ġdesper ate +Ġ/* !< +_M SG +C over +åı¶ çīĩ +ĠSo on +Trans ition +M ust +ob i +ĠDis cussion +.R em +_c opy +M en +xff ff +ä½ł èĩªå·± +, g +ï¼Į 马ä¸Ĭ +J im +ï¼Į æŃ¦ +Ġg ri +.D efault +个 å°ı +ï¼Į æīĵéĢł +éĦ Ĥ +AY S +) ' +. os +è¶Ĭ 好 +are tt +t own +ä¹Łä¸į æķ¢ +è¿ĺæľī ä¸Ģ个 +çļĦ æĢ» +Ġintu itive +ä»»ä½ķ 人 +é£İ æ°´ +çļĦ 身份 +欧 åħĥ +ä¸ī åįĥ +log ical +Ġlegend ary +ote chn +ãĢģ ä¸Ĭæµ· +_s cale +ĠDep ending +ä¸į 好çļĦ +æľį 饰 +Ġc ube +çļĦ 红 +-f old +Ġ Å +Ġic ons +l ace +Ġcount ies +åĬ© æĶ» +Ġprob able +u art +All ow +æİ¥ åľ° +Ġawk ward +, ä¸ĭ +Ġd ar +æĪij å®¶ +Ġ 西 +ens ional +Ġg i +ï¼Į æľªæĿ¥ +çĽ ı +_st ats +Mult iple +竳 èĬĤ +Ġe book +.m ain +ï¼Į è·Ŀ离 +per or +ens ed +æīį æľī +Every thing +A Z +ĠU C +{ K +Ġste ering +F ramework +re ational +ï¼Į 鼷 + ´ +åķĨ åŁİ +ç£ ķ +âĢľ ä¸ŃåĽ½ +Ġauthor ization +Fin ancial +act ly +åħĪ åīį +ĠÂł ĠÂł +N othing +ri o +Ġen velope +g ary +çݯå¢ĥ çļĦ +ĠL arry +èļ Ĭ +ĠA round +åħĪ éĶĭ +ĠERR OR +res a +Ġcorrect ed +Ġassess ing +ä¸Ģ è¾Ĩ +re x +çī© èģĶç½ij +æĢ» å±Ģ +Ġill um +转 è½´ +å°ı ç±³ +é¢Ŀ 头 +ìĿ ´ +æį £ +/ K +大 èĩ´ +ĠC red +Ġenthus iasm +ï¼Į èĥ¡ +_re q +ï¼Į æĪIJ +è¦ģ æĥ³ +ight ing +F W +F act +ivari ate +èIJ½ åΰ +ĠLo ans +Y N +ä¸į è§£ +c od +M art +At om +å®ģ éĿĻ +Ġtop ology +Ġmar ry +ĠH V +ĠB achelor +Run ner +æ² Ľ +åīį åĪĹ +Ġg ods +éª ĩ +Ġreg ex +欢 åĸľ +ier ra +Ġfoss il +æĥħ æĻ¯ +大åѦ çļĦ +éĺ´ å½± +ä¸ĩ çī© +s q +ĠSp a +ID ENT +par ing +ï¼Į æĸĩ +ç¾İ åij³ +æĦı åĽ¾ +âĸ Ī +åĿ Ł +Ġle x +注åĨĮ èµĦæľ¬ +n is +ĠAppe als +æıIJ è®® +(' ./ +C AP +i ables +-h igh +ãĢĤ ä»Ĭå¹´ +ι α +Ġc ried +ï¼Į 产åĵģ +Me asure +æĸ½ å±ķ +Ġl ending +ĠJ a +举 èİŀ +Ġdis cre +åī Ķ +Ġemp irical +ï¼ĮæŃ¤ åĪ» +Ġd ire +Ġdisappe ared +ï¼Į åħ¨éĿ¢ +ia z +_L IB +ĠP rices +温 æ³ī +Ġcatch ing +ç¦ Ģ +没 èĥ½ +pl ies +åħī 线 +_p ool +â Ĥ +ĠCrim inal +H it +ag les +Cont in +åħ³ æĢĢ +éĢīæĭ© çļĦ +人 æĸĩ +Ġsince re +ç» · +æĬ¤ èĤ¤ +æľª çŁ¥ +ãĢģ ä¸ĭ +.s w +orth y +r ise +D er +ak y +ç¬Ķè®° æľ¬ +M ag +éĥ½ éľĢè¦ģ +preced ented +Ġfib ers +å°± åİ» +anch or +E q +ĠPar ks +ï¼Į å½±åĵį +ĠRedist ributions +_d ist +ĠA W +ac ies +åĮ ¿ +Ġcur r +çĶľ èľľ +Ġin herited +æĮī æij© +ub er +羣 çIJĨ +comp are +Ġsched ules +Ġrepl ies +s ound +ĠUn iverse +太 æŀģ +Ġ" @ +ा ठ+Ġe co +.s ql +_WR ITE +, ç»Ļ +ï¼Į 缮åħī +çģ¾ éļ¾ +Ġp ics +math sf +æĬĺ 磨 +æĢİä¹Ī åģļ +ĠMar vel +Ġreward ing +Ġappear ances +Ġdon ors +åı¯ä»¥ 使ç͍ +(p arent +im mer +ĠE le +æĮĩ å¼ķ +éĵ ² +ï¼Į对 æĸ¹ +af ood +Ġcontribut or +Decl aration +éĩij åĪļ +è°ĥ æİ§ +Ġeduc ators +ç§ijåѦ æĬĢæľ¯ +Ġelim inated +_ put +她 说 +æļĹ æļĹ +Ġan notation +æĭī å¼Ģ +æ±ī è¯Ń +Ġfort une +Ġdef in +ĠV ers +é t +âĺħ âĺħ +Ġp neum +al one +ĠA PP +ĠTest s +SE C +ï¼Į æĸ¹ä¾¿ +ĠF it +ï¼ļ æĪij +Ġt anks +åį ¤ +ĠSt ructure +çļĦ åħ¨ +å¨ĥ å¨ĥ +åĩ Ľ +/ ml +st rip +Ġcourse work +ĠEn sure +-d ec +urs ion +оР± +ï¼Į èĩ³ +ç³ Ļ +Ġto dd +åĪĨ æĶ¯ +Ġlo ops +prot obuf +/ % +çļĦ ä½ľåĵģ +ç«Ļ 起身 +Ġstrong est +Loc ale +Ġcyl inder +å¾® çĶŁçī© +.C lient +app ings +bb ing +ĠFurn iture +. Id +Ġcont ends +ãĢģ åĽ½å®¶ +ĠAdvis ory +ĠOr lando +Am ong +á Ģ +ä¿® è¡Į +Ġl ined +ener y +çϽ æĸij +D ynamic +Ġener get +æĪ· åŀĭ +æī¶ æĮģ +ï¼Į æľĿ +ä¸Ń ä»ĭ +ĠRec ipes +ĠD T +å¿į åıĹ +ĠL if +Ġdu ct +inc ial +Ġkick ed +Ġadjust able +åįģåħ« 竳 +ĠA verage +ä¾ Ħ +Ġprosecut ion +Ġsac red +ï¼Įæĺ¯ åIJ¦ +bor ough +åĪļ 好 +.Are Equal +Ġc ares +Vis ibility +Ġlater al +æ¯ı æľĪ +Al ign +-f ile +ç§ijåѦ éĻ¢ +ica id +r aska +æĭĨ åᏠ+åĨħ èĴĻåı¤ +Ġblank et +p ref +Ġn as +en es +st orm +Ġr m +Ġprot ests +çŃī å¤ļç§į +Ġje ans +ï¼Į被 åijĬ +Ġincomp lete +ĠMur phy +our cing +Ġinflu ential +æľī ä½Ļ +, éϤäºĨ +_f ields +Ġsport ing +ĠD MA +ĠGener ate +Stack Trace +Ġsurround ings +Ġsan ctions +D one +, é«ĺ +äºĶ 大 +误 å·® +ym e +D ATA +ĠP i +æĹ¶éĹ´ åĴĮ +åį«çĶŁ éĹ´ +ĠArk ansas +æįIJ èµł +车 主 +æķĪ åĬĽ +OB ILE +ï¼Į å°½éĩı +Ġ ä¹Ł +dis abled +dis able +ib les +, 主è¦ģ +ç¿¡ ç¿ł +Ġlight ly +Ġinv oke +ĠMar ie +Ġap opt +ĠMan hattan +ç§ĺ书 éķ¿ +Ġline up +âĢĿ ï¼ī +Ġshall ow +Ġwitness ed +è¿Ļ äºĭ +Ġem erge +å·² æĪIJ为 +( un +æIJŃ è½½ +Ġarriv ing +åĪĽå§ĭ 人 +Ġcorrespond ence +ï¼Į å£°éŁ³ +转 头 +èĦī åĨ² +æľī æĦı +çłģ 头 +ol in +äºĮ æ°§åĮĸ +äºİ æŃ¤ +ä¸Ģ 说 +Ġreflect ing +Ġsucceed ed +Ġgrad uates +Ġ çľĭåΰ +èģĮ åľº +_S YS +çª ¦ +Ġsc am +åįł ç͍ +st o +T unes +ĠPro position +ĠO t +ĠR ust +åī¯ æľ¬ +ĠU INT +Ġp ipes +æĹ¥ è®° +TR UE +Ġrig id +æĪij æĿ¥ +_ img +du cer +ĠOwn er +ä¸Ģ åĪĨ +Ġetern al +èĭ± 寸 +Ġbook ed +. right +O ps ++ b +座 æ¤ħ +en za +Ġimp ose +\ + +主 导 +äºĨä¸Ģ æĿ¡ +C redit +Ġse iz +Ġ çͱäºİ +Ġsun light +au er +æıı ç»ĺ +éŨ åīį +Ġcompr ised +ãĢģ ç»ıæµİ +Ġv ascular +_b its +Execut or +um atic +Ġident ifies +(l ong +_ opt +Ġbel ly +ï¼Į å®ĮæĪIJ +èĮ ľ +æ¶ī å«Į +ĠN ag +çķ ´ +èį Ģ +Ġex clusion +_C OLOR +g em +Ġcl ips +æĭ ¯ +iv ia +ĠC AP +æĬķ æĶ¾ +P eter +Ġre habilitation +ĠI B +Ġren al +Ġg a +Ġclear ance +è¯į è¯Ń +w oman +ĠElect ronic +U ntil +æ·Ģ ç²ī +Ġment or +ĠCent ers +ab olic +worth y +_CL ASS +æĵį æİ§ +æµ Ĭ +Ġal erts +ĠN ice +åıį åĩ» +è¯į æĿ¡ +ĠS old +{ y +_ IO +ç»ĦæĪIJ éĥ¨åĪĨ +Ġvit amins +æİ¥ åħ¥ +-N EXT +_p arent +Ġ 没æľī +Ġdef icit +Qu ote +èĤ¡æĿĥ æĬķèµĦ +Ġcast ing +åĺ ¶ +ĠB eng +è°ĥ è¯ķ +B tn +ç¼ ħ +he w +Ġr gb +af ka +ä¾ ĥ +( Y +åŀ ¢ +if acts +Ġreg isters +. tr +riv ed +Ġar ises +comp lete +R ot +éĿ¢ èī² +ĠV i +æĬij éĥģ +_ AP +pl ess +Ġvari ability +_ red +ur st +ĠP ure +{p matrix +I ts +ĠToy ota +Ġgr ill +ï¼Įçľĭ èµ·æĿ¥ +Ġweek ends +åĪļ å¼Ģå§ĭ +Ġcon gr +ĠCry stal +es p +ä¹ħ äºĨ +æĿ¥ 临 +ä½ł 好 +诸 å¦Ĥ +ĠP G +ĠW or +Ġfort unate +Ġaff air +éĢ į +not es +ĠC ant +j oint +çļĦè¯Ŀ é¢ĺ +åIJİ åį« +ï¼Įä»ĸ们 çļĦ +-c are +F ER +Ġbo oth +Ġm L +ipp et +ĠP ump +ĠMed iterranean +æĸ¹æ³ķ çļĦ +Ġpo et +çļĦä¸Ģ çĤ¹ +åħ³èģĶ æĸ¹ +ï¼ī åľ¨ +akes pe +éĹ® ä»ĸ +æĸ° 人 +头 é¡¶ +ĠÐ ° +en code +å·¥ åºı +á ŀ +Ġgl ue +D N +} ), +è¾¹ çķĮ +d em +大 æĸ¹ +Ġqu i +å¼Ģ çİ©ç¬ij +Ġincent ives +Ġpack ets +Ġl uc +Ġcons erv +ro ps +åĨĽ å®ĺ +-d ata +å¢ŀ æ·» +Ġin adequ +Ġlength y +æ´Ĺ 澡 +æĦ ķ +Ġrep orters +w idget +ï¼Įä»ĸ æĺ¯ +ĠD ynamic +Ġback end +åIJĥ æĥĬ +P ush +ãĢĤ åĪĺ +ï¼Į è¡Į +Stat istics +ãĢģ æĬĹ +åıªæĺ¯ ä¸Ģ个 +Ġorgan isms +St ock +_ word +îĹ¥ îĹ¥ +ĠP urchase +ĠY ES +F s +æł· åĵģ +l ip +éķ¿ çĶŁ +Ġintent ions +Ġmay or +ĠEn abled +: $ +Ġbreat he +ĠJ et +ï¼Į å¸Ĥåľº +Ġcheck out +=$ ( +lass es +为 æŃ¤ +åı° åĮĹ +ĠCh a +ose x +åĩł çϾ +Ġout look +ä¸į 缸 +ĠM aps +ra red +èĢĮ ä¸įæĺ¯ +Ġterror ist +Ġcamp s +Ġvill ages +Ġcalcul ating +cons ider +Ġplant ed +ä½ĵçݰ äºĨ +Y et +iet al +.P aram +çļĦ å°± +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠ +贯彻 èIJ½å®ŀ +Ġmid st +Ġmultip ly +Ġdon ated +C at +ĠC op +Ġconcept ual +æŃ¥éª¤ S +Ġ 亿åħĥ +Every one +ad apter +IS BN +éĿ¢ éĥ¨ +åı¯ä»¥ çľĭåΰ +åĿ¦ åħĭ +c lean +er vers +Ġ 主è¦ģ +Ġ_ { +Ġupload ed +ĠGrad uate +ãĢģ æķĻèĤ² +Ġvulner ability +æĿ¥ ä¸įåıĬ +Ġinf ring +ç¡« éħ¸ +, æľĢç»Ī +éĥ½ 说 +Ġfr ank +æĹ · +她 æĺ¯ +çĶ» å®¶ +er ie +alth ough +Ġro pe +è°ĥ è§£ +æĹ© åľ¨ +ĠL en +ad ers +Ġquiet ly +Ġv ic +ĠActiv ities +ç¥ · +ï¼Į大 æ¦Ĥ +erial ization +æĤł æĤł +ï¼Įä¸į å¦Ĥ +çļĦæīĭ 段 +Ġrhyth m +æµģ çķħ +Rad io +é£İ åħī +åĢŁ åı£ +æĬĸ éŁ³ +èļ Ĥ +建 æĿIJ +Re ason +å« ī +éķ ¶ +å¼Ģ å·¥ +æ¯Ķ èµ· +âĢĻ S +ogen ous +j et +ĠP assword +Ġ ä¸ĥ +, ä»Ĭ天 +æīį çŁ¥éģĵ +ĠAd min +âĢľ You +æĽ¼ èģĶ +åİŁåĽł æĺ¯ +it ical +å¿ĥçIJĨ åѦ +ĠCost a +ĠCont ainer +Ġass ure +å·« å¸Ī +ä¿® 建 +æ¯Ķ åĪĨ +ĠSwed ish +Ġev olve +åģļ ä¸Ģ个 +Ref resh +çļĦ åİŁåĪĻ +Ġharm on +res ources +ĠG PU +Ġsh aping +raul ic +met adata +Ġmeant ime +ï¼Į å®ĭ +身ä½ĵ åģ¥åº· +Ġdiff usion +æ½® æµģ +å½ĵ ä»Ĭ +am y +éļ ĭ +ric ular +Ġhierarch y +ä¸Ģ æıIJ +D a +è·¯ 段 +å¢ŀ å¤ļ +ĠB illy +(f ield +ros is +ï¼Į åĩ¡ +f lu +è¿ ¦ +ra ud +rain ian +ä¸į åºĶ +call back +in ology +ãĢĤè¿Ļ æĹ¶ +vert ical +Ġtravel ers +èĬ± äºĨ +Ġtherap ies +ĠM oving +em ory +ä¸Ńåįİ æ°ijæĹı +st als +, 便 +ãĢĤ éļıåIJİ +ï¼Į å²Ĥ +åıĤèĢĥ åĨħ容 +Ġdef iciency +) set +ì Ĺ +Ġsl iding +Pro s +uls ion +Dis cover +è£ Ķ +èµ° è¿ĩ +?? ?? +Ġlock s +ash a +. amazon +Ġsp rink +Ġfrust ration +èĿ ´ +al ways +äºĨ è¿Ļ个 +ĠB arcelona +欲 æľĽ +Ġre write +ĠGo ing +_on ce +in q +åıij 票 +Ġl ou +ĠN ull +ĠQu arter +Ġpr one +ĠM N +c it +å°Ħ åĩ» +è¾ĥ 好 +ãĢĢãĢĢ ãĢĢãĢĢ +LE MENT +gg le +åģļ åĩºäºĨ +activ ate +ï¼ĮæĪij们 çļĦ +sl ot +Dan iel +Ġdisc our +ï¼Įå°± åľ¨ +/ z +ĠS um +ĠVal entine +rict ed +Ġ 饰 +ad der +_B LOCK +.get Instance +text it +ID s +"" " +K S +ĠåıĤèĢĥ èµĦæĸĻ +ĠST AT +... ) +ĠB row +ĠSec urities +Per cent +. Entity +Ġdim ensional +-l ength +åĬ¨ èĦī +ĠTh or +Ġdiss ertation +Event Args +æĭ¥ æĬ± +éĵ¶ åŃIJ +à¸ Ļ +Ġcr ust +Ġg amma +com merce +ç®± ä½ĵ +Ġn urt +c reat +Ġprox imity +Ġdemonstr ation +b ral +ĠE PA +ãĢģ åħ¨ +ĠB asket +åĸ Ģ +çļĦ人 群 +Ġ 许 +ä¼ļ æĺ¯ +Ġillust ration +åįģä¹Ŀ 竳 +ĠMichel le +ĠAdvert ising +ä¹ĭ æĥħ +termin al +et rics +çĤ ĸ +Ġm ate +åĴĮ é«ĺ +S ar +è¿Ļ æĺ¯ä¸Ģ个 +Ġdis comfort +æĽ´ å¿« +Ġden ying +ĠId entity +表 æł¼ +ĠK am +Ġgl imp +æķ´ 个人 +. env +Ġmiss ions +Ġfrust rating +ma jor +Ġsat ur +ĠH erm +Ġconsist ed +æĸ¯ åŁº +æĹ¥ æĻļ +Ġun cover +So ft +éŨ è¯Ĭ +ÏĦ α +_S UB +ĠMarket s +åİĭ æĬij +Ġtrad ed +B rowse +ä¸įåı¯ æĢĿè®® +é¥ ª +yt est +åĻª 声 +Part icip +ew ater +次 ä¼ļè®® +Ġpl umbing +ï¼Į åį¡ +n x +Ġf iring +al ine +Ġter rain +id ency +åīį è¡Į +riv es +ï¼Į æĶ¯æĮģ +ä½ľ äºĨ +åİŁ æ²¹ +Ġy o +è¾ĥ å°ı +R GB +çıł å®Ŀ +ä½İ çļĦ +Ġcrit ically +Ġrank ings +ĠCele br +个æĢ§ åĮĸ +建设 çļĦ +p f +.f loat +pr ising +Ġsim ulated +ac a +æĿij åºĦ +st orage +Ġfinanc ially +ĠA ld +Ġappro ve +ï¼ħ ï¼Į +.d isplay +æ³ķå®ļ 代表人 +ä¸ĸ çļĦ +éļ¾ åħį +ĠPer ry +ĠG roups +Ġ ÑĢ +ĠP icture +æķ ĸ +ãĢĤ æķħ +Ġaccount ability +re ed +ä¿ĥ 使 +å°± è¡Į +Ġnum b +Ġpro gn +iss ance +éĵ ° +Ġ ie +éĩı åŃIJ +, æīįèĥ½ +Ġpack s +åİ ¥ +åĨħ åľ¨ +ĠT erry +phas is +ç§į æĹı +h uman +Ġp ap +æīĭ å·¥ +Ġre ign +Pro b +il ters +Ġ? > +f ollow +åij¨åĽ´ çļĦ +ãĢĤ åį³ä½¿ +ï¼Į让 ä½ł +ĠR u +人 çĶŁçļĦ +ĠG PL +rec ogn +åIJİ åı° +ĠNor way +太 å¤ļäºĨ +ç»Ī 身 +Ġpost erior +Ġl od +Ġy arn +ĠN ancy +F riend +ä»ĸ ä¹Ł +Ġ ä»Ĭ天 +ĠC rown +rell a +æĢ§ åľ° +强 è¡Į +Ġst icks +Ġcent res +[ TELEPHONE +Ġcommun icating +ĠForm ula +Ġgra bbed +c ue +Obs erver +ĠB A +Pre v +there um +w eg +ä¿Ŀ å¯Ĩ +éĴ § +Ġgl uten +_f ilename +ĠSD L +Ġles ions +人æ°ij 群ä¼Ĺ +ĠAr gs +-m od +- Sh +Ġby pass +St ates +ĠÏ Ĩ +ke ley +ah n +亲 åĪĩ +社 åĽ¢ +Ġexpon ential +.F ield +ï¼Į çݩ家 +å®¶ ç͍ +饲 æĸĻ +èĿ´ èĿ¶ +æĶ¹åıĺ äºĨ +_ assert +Ġmon itored +. Thread +Ġcolor ing +_ right +do es +Ġsp y +ĠHe brew +ĠU nd +H B +Ġmit ochond +ĠLiber ty +åĮĹ éĥ¨ +/ min +osoph y +ä½ł æĢİä¹Ī +ĠB I +ä»İ èĢĮ +ï¼Į æĹ¥æľ¬ +Ġfin ishes +Ġes cal +ue z +çIJ ī +Ġrecomm ends +äºĮç»´ çłģ +.ex ample +çĿ IJ +at um +/ json +.C heck +çļĦ 游æĪı +Ġexp ose +ĠJim my +çŃī ä½ł +èį § +---- - +Ġcool er +Ġlock down +M icro +款 å¼ı +èĢ ¦ +ãĢĤ åĽĽ +è§Ĥ èµı +N AS +å± ¿ +éĺ³ åı° +åĩĢ å̼ +ï¼Įä¹Ł ä¼ļ +Ġorient ed +assert Equals +æŀľ å®ŀ +---- -- +el ong +éģĵ åħ· +st age +Ġhe mp +ä r +æ¯Ľ æ³½ +Ġglo ves +æĬĺ åıł +Miss ing +ĠH arm +èĥĮ åIJİçļĦ +ash i +Ġaffili ated +ï¼Į åĿIJ +åİ Ħ +åİŁ è°ħ +unc her +Ġb arn +çļĦ åľ¨ +irl ines +[ in +ĠManag ing +å¯Į è´µ +ä¿¡æģ¯ æĬ«éľ² +èĭ Ł +Ġund oubtedly +g res +ub le +ĠV IP +太 空 +ĠG DP +pat tern +_ position +å¼Ģ å¹ķ +ien cies +好 好çļĦ +åĴĮ æĬĢæľ¯ +ĠRequ irements +.J son +requ ently +- head +ï¼Į åѦçĶŁ +ï¼Į è¾¾åΰ +Ġent ropy +F eb +Re ceived +ä¸Ń åİŁ +p red +Met rics +ENT ER +æijĨ æĶ¾ +åĨį ä¸Ģ次 +Ġcan ada +CL AIM +Ġres ign +f ailed +åĽ ļ +çݯ å½¢ +ĠPro of +co ord +urd y +Ġobject ion +bor ne +Ġw rist +Gen re +ï¼Į åĨĻ +ĠThrough out +ĠC e +fe ature +W ords +( GL +Ġrecord ings +Ex pect +atern ity +åģ¶ çĦ¶ +if s +Th ink +Ġro ster +æľĪ èĩ³ +ãģ ij +CON FIG +Dis cussion +ç¥ŀ è¯Ŀ +ut down +Ġ\ | +Ġunw anted +èµĦ产 è´ŁåĢº +Cons umer +-r ange +ik h +Ġvoc abulary +( to +ĠHistor ical +ĠHel lo +road cast +L at +Ġb ureau +ĠBe ijing +ĠF ern +AND LE +ä¿¡ èµĸ +TM P +e or +St yles +eal ous +ç»ı éĶĢåķĨ +qu o +Ġmotor cycle +Over view +çĥ¹ 饪 +ph an +sub mit +Ġread ings +S i +Ġactiv ists +ä¸Ĭ åįĬå¹´ +åĩº åħ¥ +ĠJ ourney +Reg istration +Ġdemonstr ating +表 éĿ¢çļĦ +åľ° åŁŁ +åıĺ åİĭ +esc ape +akespe are +ï¼Į æĶ¶ +æľŁ åĪĿ +Ġconc urrent +OT E +æijĦ åħ¥ +Ġb illing +ï¼Į åŃ©åŃIJ +se min +ast ically +ä¸Ģ 头 +ä¸ĩ ç¾İåħĥ +App end +Ġcur iosity +éĴ¢ çŃĭ +Ġconj ug +ĠStud ios +'] [ +ĠMad rid +åĴ ı +ĠRead y +ĠK le +çļĦ åIJĦç§į +ãĢģ åİ¿ +ĠG ay +и м +åıĪ ä¸į +Ġjo ins +, 导èĩ´ +IN ESS +< - +_ vec +Ġg erm +Ġconve x +), ( +èĸĦ èĨľ +IST ER +b oost +w m +ãĢģ åĽĽ +è¿ij 代 +Ġneut r +é»Ħ æ²³ +åĽ° å¢ĥ +åħī æ»ij +Ġle v +? ( +Ġj ournals +V el +ï¼Į åıijè¡Į人 +æĪ¿éĹ´ éĩĮ +ä¸Ģ个 大 +_LEN GTH +, æľ¬ +Ġint act +ï¼Į æĢ»æĺ¯ +Ġref in +ä¸Ĭ ç½ij +c ould +Al t +we i +Ġmanip ulation +ä¸Ģ æĥĬ +çļĦ èĩª +ĠRec ently +Ġdesc ending +ĠS nap +-m inded +ĠC zech +çª į +æĬ½ 象 +Ġrefresh ing +Pres ident +Ġch ill +åĩŃ è¯ģ +½ åŃIJ +ĠB order +Ġ$\ { +æĹ¥ å¿Ĺ +ster dam +Ġprejud ice +Ġtra pped +Ġm ast +åıijçݰ èĩªå·± +ç¬ij è¯Ŀ +Ġ ¶ +ĠB ishop +ĠManufact uring +a udio +èĢĮ æĿ¥çļĦ +Ġgl oss +Ġun changed +Ġtechn ically +ç¥ŀ æĥħ +è¿ŀ è¿ŀ +Ġimp rison +_ one +驾驶 åijĺ +Ġconst itutes +^ [@ +Ġreson ance +ĠD NS +av an +æĪĺ çļĦ +æĪij è¿ĺ +èµ° å»Ĭ +ĠL it +Ġcont empl +_CO UNT +Ġrecip ients +ç³ĸ æŀľ +ç«ĭ æ³ķ +è¶ħ åĩº +ĠS MS +I l +æµģåĬ¨ æĢ§ +ĠSam uel +Ġf ier +/ in +N BA +Ġdist ress +ac s +é£İ æļ´ +Ġhyp othes +ĠP ictures +èΰ éĺŁ +Ġb ubb +缸 éĤ» +U pper +æĶ¶çĽĬ çİĩ +æĿ¥ åĽŀ +åĴĮ æīĢè¿° +Ġclean up +ĠExp and +Ġment ally +US ER +.m atch +Ġdisplay ing +Ġinvari ant +Aut om +S everal +st ats +ï¼Įå®ĥ çļĦ +Ġox ide +Ġg ates +Ġlocal ized +Ġinv isible +Ġelig ibility +Ġs ore +åľ¨ åħ¶ +管çIJĨ åĬŀæ³ķ +ãĢģ åħ¬åı¸ +ACT ION +æĽ´åĬł çļĦ +åıĻ è¿° +ĠStan ley +ched uled +æµ® çݰ +åĸ § +åŁºéĩij 份é¢Ŀ +éĩİ çĶŁ +奥 æĸ¯ +p ur +); " +Ġhighlight ing +客 åľº +Ġsc anner +æľĢ æľī +S af +Ġrac ism +æµģ åĩº +Ġg room +Ġview er +ug a +ĠDesign er +Ġdr agon +让 åŃ©åŃIJ +ç¼ĺ æķħ +\ % +Ġproport ional +äºĨä¸Ģ éģį +IND OW +ãĢģ å·¥ä½ľ +ĠWork ers +Ġquestion naire +D aily +Ġb on +äºĨä¸Ģ 次 +ĠBut ter +çĶŁæ´» åľ¨ +åι éĤ£ +Ġsimpl est +ĠInit iative +ãĢģ ä½İ +å¢ŀå̼ ç¨İ +ĠPlan et +ĠC OMM +u u +ĠE t +ĠFor ward +ï¼Į æİ¨ +Ġp orn +礼 仪 +ĠH il +an imation +Ġc ached +R s +J P +-h ome +er ness +Ġd ummy +çĸ¾çĹħ çļĦ +ĠVari able +Ġcast le +Ġemphas ize +Tag ged +åı« æĪij +( System +Ġdownload s +ç© Ĺ +å¡ŀ å°Ķ +å¯ ħ +im ming +åĩł çĤ¹ +缸 éģĩ +W W +åĵĪåĵĪ åĵĪ +her ical +ä¸ĩ éĩĮ +éĨĴ äºĨ +{tab ular +ä¸ĸ ä¸Ĭ +-k ind +CT L +Ġdr ift +ET ER +å¤ĸ å¥Ĺ +Ġport rait +èĶĵ å»¶ +ï¼Į让 æĪij们 +奥è¿IJ ä¼ļ +åį´ ä¸į +Comm un +ĠM OD +_b l +æ¯ıä¸Ģ ä½į +Ġfrust rated +.getElement ById +é© ´ +åļ ¼ +æľĿ éĺ³ +Sc ience +Cert ificate +以 åIJİçļĦ +é¢Ĩ è¢ĸ +Ġcelebr ity +è¶ĬæĿ¥è¶Ĭ å¤ļçļĦ +ILE D +èIJ ĥ +Ġfire place +, æĪijçļĦ +å¾Ī åĸľæ¬¢ +åºŀ 大çļĦ +æŃ£ æĸĩ +请 ä½ł +æ¯Ľæ³½ 举 +Ġlisten ers +ï¼ĮæīĢè¿° 第äºĮ +z heimer +C art +ãĢĤ å¾Īå¤ļ +åĽŀ è´Ń +ĠEval uation +Ġdownload ing +Ġadvoc acy +. line +ĠPol ish +- Type +es ium +Enc oder +Ġ éķ¿ +éķ ģ +çĤ¹ åĦ¿ +ãģ£ ãģ¦ +ĠH z +Ġins ult +Ġenzym es +ĠL inda +æĪij 没æľī +Ġhom emade +Ġp ier +Ġb aked +ĠP P +Ġrad ar +ĠAdvent ure +æ± ¹ +roph ic +å¹½ é»ĺ +') -> +Ġsn acks +ĠB P +åŃ¦ä¹ł åĴĮ +ä¸į æĿ¥ +C ut +åĵĪå°Ķ 滨 +H ex +æł· å¼ı +Ġdis charg + ¼ +ãĢĤ 好 +Ġrel uct +âĸ ¡ +åĽ½å®¶ åĴĮ +parent s +ĠC oc +or rect +æİ§åζ ç³»ç»Ł +åĬ¨ åijĺ +çļĦåīįæıIJ ä¸ĭ +T B +g el +( client +EM P +è¿Ļ ä»¶äºĭæĥħ +ĠF B +å½ĵæľŁ æįŁçĽĬ +åıªæľī ä¸Ģ个 +ST AT +lades h +磨 æįŁ +å®ŀæĸ½ä¾ĭ çļĦ +N B +è´¦éĿ¢ ä»·å̼ +èĩªçĦ¶ æĺ¯ +Ġp ear +åĩł åı¥ +Ġret ire +ï¼Įä¸į ä½Ĩ +Ġprompt ly +ä¸ĵ注 äºİ +O ffic +x F +Arch ive +åħ¥ æīĭ +åĨ· åĨ· +_F IELD +çķĻ ä¸ĭäºĨ +Ġre lying +èģļ ä¼ļ +Ġgenu inely +{ i +Ġun used +con vert +æĹ¶éĹ´ 为 +Ġ(! ( +ï¼Įä¹Ł å°± +çIJĥ åľº +èģļ åIJĪ +_ over +ĠD OM +å¿«éĢŁ çļĦ +_ print +ru ed +Ġcost ume +ï¼Į çĥŃ +_d es +R UP +Ġw ool +è§Ĥ çļĦ +um ph +空 çϽ +æ°Ķ 缸 +, åİ» +ãĢģ åĪĺ +Ġmount ing +帽 åŃIJ +/ local +ä½ĵ 温 +yst er +Ġt abs +Ġn ails +_O F +the ir +IB LE +Ġin expensive +åħī åѦ +Ġo ak +Ġbatt les +* > +ĠHe ight +çľĭ å®Į +enn a +å°Ĭ æķ¬ +èIJ¥ ä¼ģä¸ļ +Ġn aked +ĠSc i +æµĵ éĥģ +_w indow +-c ore +Ġenumer ate +m ol +ä¿ º +ĠJ azz +ĠTrans form +TERN AL +çݰ æľīçļĦ +ãĢĤ æĤ¨ +ĠH our +æ´Ĺ æīĭ +çĬ Ģ +.s upport +( json +n r +Ġl umin +andid ates +èĻļ 空 +æħ¢æħ¢ åľ° +m are +Ġorig ins +çĽij çĭ± +åIJĪä½ľ ä¼Ļä¼´ +Pay load +Ġemotion ally +åľ¨ ä¹İ +Ġext ensively +红 åĮħ +åĩº æ°´ +she ll +Ġcl ue +Ġstress ful +Ġref erenced +ĠCru z +am bers +çļĦ æłĩåĩĨ +Ġthreat ening +Ġeat en +èĶ ļ +Ġsp inal +_ return +å±ĭ éĩĮ +Ġadapt ive +ĠAll ah +-f ive +ampl ing +çłĶ åζ +ĠM ouse +Ġon ions +k an +æľī ä¸Ģå®ļçļĦ +èģĶåIJĪ åĽ½ +Ġher b +Stud y +çª ¥ +Ġconver ter +ĠÏ ģ +ĠBrother s +为 éļ¾ +é»ijé¾Ļ æ±Ł +äºĨ åij¢ +C OVID +ise conds +Ġphenomen a +Ġambit ious +Ġtra it +Ġ** * +ä¸Ģ éĹª +ed ar +éĩį 建 +Ġ[ ( +æº º +æłij æŀĹ +S ales +ĠSpecial ist +Ġpropri etary +æīĵ å·¥ +è¡Ĺ ä¸Ĭ +Ġf p +æ²Ī éĺ³ +Ġdef inite +ä¹Ł éĿŀ常 +Ġn ost +ä¸Ģ åı£æ°Ķ +ĠA CTION +N UM +Ġperf ection +\ geq +æģ¶ å¿ĥ +è´ ® +å±ĭ åŃIJ +æĮĩ åIJij +pe ak +ĠTrans action +ç¿ © +ov o +. zeros +. active +Ġgeneral ized +Product s +Ġcriter ion +Ġgen ius +ä½ł çŁ¥éģĵ +_m ult +ç»ıæµİ çļĦ +éĤ® ç®± +Ġspons ors +èĤ¯å®ļ ä¼ļ +ĠBar ry +è¦ģ 说 +ĠK an +æľī 许å¤ļ +çīĽçļ® çĻ£ +J es +éĺĢ éŨ +Ġremot ely +纪 å§Ķ +åıĪ ä¸Ģ次 +Ġinterview ed +ĠP ap +ĠGard ens +æĺ¯ä¸Ģ åIJį +æķij åĬ© +ç¹ģ åįİ +审 ç¾İ +æij©æīĺ 车 +R o +Ġtr unc +è¿ĺ å°Ĩ +é¢Ŀ å¤ĸ +è¿ĺ ç®Ĺ +æĮ ļ +Java Script +æı´ åĬ© +ä½ł ä¹Ł +é¦ĸ æŃĮ +éģĹ åĿĢ +ĠK ir +Ġimag ined +ĠDevelop er +Th ings +ĠCustom ers +é¢ĩ 为 +ä¸ļ çļĦ +çİĭ åĽ½ +å¹¶ åıij +èĭ± æł¼åħ° +ï¼Į çϾ +/w iki +ĠAs set +_s core +ĠX P +Ġhe ap +éķľ åŃIJ +Ġinnov ations +arm ing +åı¤ æĢª +T ips +av g +at iv +ar ations +æ£ ± +B rowser +Ġsoc io +Ġc rap +æŁIJ ä¸Ģ +. reg +ĠC it +al ia +äºĨ 对 +笼 罩 +Ġcare g +迹 象 +ï¼Į åĸľæ¬¢ +){ # +åıįåºĶ è¿ĩæĿ¥ +Ġcatal yst +Ġ Ñĩ +åħΠ天 +ĠCont ents +_reg ister +ĠM ack +ĠOb viously +åĿıè´¦ åĩĨå¤ĩ +åĮ» ä¿Ŀ +ĠM umbai +Ġun precedented +Ġaffili ates +Ġdecom position +Ġwh olesale +Ġaccount able +ç©¿ ä¸Ĭ +Ġdisappe ar +Ïĥ η +ĠOlymp ics +å¼ĺ æī¬ +Ġn ap +Ġsew ing +amp ed +大 èĥĨ +[ name +Second s +Ġmerg ed +T oggle +ĠH ydro +ĠT ickets +çģ° å°ĺ +Ġconver ting +Ġdeal ers +书 çļĦ +Ġdis contin +ç´ł åħ» +导èĩ´ çļĦ +Ġqual ifications +ä¹ ¾ +å½ķ éŁ³ +ĠSur face +ĠParent s +. back +rit ies +Mod els +æ¼ı æ´ŀ +èĬ± éĴ± +Ġv et +ä¿ ĺ +Ġye ast +ack age +Valid ate +Ġpal ette +/w p +_de lete +Ġdecor ated +gl ass +çīĩ 段 +Ġg ospel +TR A +æľī æľĽ +.S e +l ists +Ġterr ific +ä¸Ģ ç¯ĩ +è¿ĺ 羣æĺ¯ +åIJİ èĢħ +subset eq +è½® åĽŀ +Ġan ch +ĠMove ment +ĠK B +ï¼Į å¹² +ãĢĤ æĪijçļĦ +æĩ µ +ĠEmploy ee +ç§Ĵ éĴŁ +è̳ æľº +Ġincent ive +b ag +X T +æķĻ å¯¼ +am ation +ï¼ĮåĽłä¸º ä»ĸ +ĠE dd +管çIJĨ åĴĮ +Ġgra ve +å·²ç»ı æĪIJ为 +Ġd read +l oss +Current ly +ãĢĤ å¹¶ä¸Ķ +INS ERT +wh y +综 èīº +aw k +èµ¶ å¿« +Ġ ä½ľä¸º +èĢģ çϾå§ĵ +ãĢĤ æĽ¾ +Ġadvis ory +c amp +C AL +Ġremed ies +æīĵ çĿĢ +end ent +对 çħ§ +çģ« çļĦ +Ġad op +P OS +åŀ Ħ +Med ical +Ġ í +æĢ ¯ +天 åŃIJ +ä½ł æľī +Ġ{ : +_m ain +è£ħç½® çļĦ +Ġun set +Ġcar p +Ġdet erior +il ia +Ġ éĢļè¿ĩ +Ġmot ions +Ġcan onical +Ġhun ger +Ġrep o +Ġrout ines +室 å¤ĸ +åijĬ åĪ« +end e +åIJij å¾Ģ +ĠD IRECT +åįĹ éĥ¨ +Ġcompliment ary +p ers +èŀº æĿĨ +Ġdur ability +红 äºĨ +åIJĮ ä¼´ +Ġopen ly +You Tube +um i +ĠCreat ing +Ġaut umn +èİ·å¾Ĺ çļĦ +Ġall ies +éĥģ éĹ· +åħŃ åįģ +_m anager +ut ral +伯 çī¹ +ch art +or igin +ĠMort gage +å¤ļ åIJį +èµ· ä¼ı +æĵį 纵 +ĠL uther +ï¼Į ä¿Ŀ +PL AY +ĠPat ient +Ġarbit ration +ine e +Ġb is +å¤ľ éĩĮ +, ä¹Łä¸į +. ä¸Ģç§į +Line ar +/ sh +Ġf riction +Ġl p +Ġdess ert +ç²¾ éĢī +æīĵ çļĦ +ç¼ĸ è¯ij +Ġelev ation +st ant +éĽķ å¡ij +Ġintim id +ï¼Į让 她 +æľ¨ æĿIJ +_ exp += s +ä¹ Ĵ +ĠU r +D em +ä»ĭ åħ¥ +Ġrel ay +æĵįä½ľ ç³»ç»Ł +Ġass urance +æijĨ æīĭ +config uration +Ġsod a +ĠE lementary +un ge +{ proof +Ġsoc ieties +被åijĬ 人 +M er +ĠFood s +L iked +Ġillust rates +-m enu +åĹ £ +on avirus +\ sqrt +æIJ ĵ +éĺ» æĮ¡ +æĥ³ çŁ¥éģĵ +ĠOrgan ic +ĠH OW +å°ı å°ı +ãĢģ éķ¿ +æ´» æ³¼ +_ UP +Ġstim ulus +ï¼Į çŀ¬éĹ´ +^ - +Ġchar ter +.C ore +Ġrob ots +ãĢĭ ãĢģ +Ġcub ic +满 æĺ¯ +å°ij 许 +å½ĵ åľº +any on +åı£ æ°´ +non umber +ĠCR M +ĠR acing +we ets +æ¢ ĵ +ĠF IX +æī§ çħ§ +æĮĤ çīĮ +ĠFire fox +Ġmis under +ĠWal ter +Ġpreced ing +éĺ» ç¢į +al ert +Ġrefuge es +天 åłĤ +æĸĩ çļĦ +ĠG ospel +åĪĿ 级 +èĿ ī +çļ Ĥ +Ġ( # +丢 失 +Ġcomb o +为 åħ¶ +Ġform ulas +ish a +æļ ĩ +çļĦ æ´»åĬ¨ +... âĢĿ +ĠJ ake +_pro perty +åįģåĪĨ çļĦ +è¿Ľ åĩºåı£ +ĠSome one +åij½ åIJį +ï¼Į åĨ³å®ļ +(' \ +id av +Ġstri ps +ĠGO OD +Ġprim itive +ĠConsult ing +. form +å¼Ģåıij åķĨ +éĺ» åĬĽ +ĠMin or +ĠMod els +Ġqual itative +.n av +lock ed +ĠMax imum +} ); +Work er +çļĦä¸Ģ éĿ¢ +ç¬ij çļĦ +or ie +D u +éļ¾ è¿ĩ +ä½Ļ çļĦ +( pos +ãĢĤ éĻĪ +- % +Nav igation +ä¸İ ä¹ĭ +ĠPortug al +Ġham mer +åħ¨ åİ¿ +-cent ury +Ġy og +ENT S +Ġ 注 +ĠJess ica +ol ation +mem ory +åij Ĺ +èĩª æĿĢ +å®ŀæĸ½ æĸ¹æ¡Ī +Ġreve aling +Ġs am +çķĻ ä¸ĭçļĦ +éĴ¢ æĿ¿ +ĠTrend s +èģĮ ç§° +Ġprom o +ĠE urop +ĠS und +æľī ç§į +Ġ è§ģ +ĠRef lection +åħŃ ä¸ª +u cc +åĢĴ åħ¥ +_ empty +ĠR av +èij « +åº ¶ +举 é£İ +iqu id +æĺ¥ åŃ£ +.re nder +ĠC ultural +æº ħ +Ġref ined +æ¸ħ ç®Ĺ +, åı¯èĥ½ +_C FG +Ġattend ees +{ u +ven ile +æĺ ¼ +_G PIO +äºĨä¸Ģ 大 +Ġspec ifies +æĺ¯ èĩªå·± +èĦ Ĭ +æŀģ èĩ´ +çľ · +ra vel +ym an +Ġpill ow +è¡Ģ ç³ĸ +we ak +ç§Ł éĩij +. version +R ock +ä¸Ĭ æĺł +Ġ å¦Ĥ +æĪĺ å½¹ +Ġbelie ving +ĠT ensor +é¦ ħ +Ph ase +ĠÌ ģ +Ġarg uing +åı« 声 +åħ³ çα +产åĵģ è´¨éĩı +As sembly +Ġpetition er +Ġl ingu +Ġus b +c atch +ĠS IM +Ġtr ay +å¾Ĺ 罪 +ĠExt ract +Ġh obby +Ġselect ive +bal ance +Ġsold ier +æİ¥ 头 +Can vas +op ens +ä¹° æĪ¿ +ï¼Į å®ŀéĻħä¸Ĭ +Ġ( & +Ġur ged +åı¯ çαçļĦ +ä»Ģä¹Ī äºĭæĥħ +ĠKe ith +Ġf ür +åĩı å°ı +æĢĿ ç´¢ +è¾ĵåĩº 端 +çĽ Ķ +ĠEx cell +c nt +-B ased +Ġsumm it +å½ ª +Ġsprint f +åģ¿ è¿ĺ +Ġassemb led +ï¼Į æľ± +T or +abs olute +() ), +ĠG arc +z el +æĬĢæľ¯ åĴĮ +PRO C +ç¿ħ èĨĢ +ĠCONTR IBUT +M a +string s +Ġabund ant +èļ ķ +" + +n b +// # +ĠBelg ium +rit ic +ä¸Ń æ¯Ĵ +- St +e cca +è¾ĥ 好çļĦ +ä¸į 幸 +ĠG UI +å®¶ éĩĮçļĦ +in burgh +ĠReg ulation +Ġretail er +ĠHamp shire +å±Ģ åĬ¿ +审 çIJĨ +çł´ 产 +羣å®ŀ æĢ§ +Rich ard +åľ° å°Ĩ +ä¸į 让 +. action +常 å§Ķ +ä¸Ĭ å¹´ +< ' +身 åIJİçļĦ +ĠBen jamin +b id +ynchron ous +ï¼ ĵ +-z ero +.p re +[ width +.assert True +æıı åĨĻ +Ġutil ities +B ob +Ġshare holders +åĩº èĩª +产ä¸ļ åıijå±ķ +Ġvine gar +ĠChel sea +没æľī æĥ³åΰ +M AT +d ynamic +S yntax +ĠComput e +ĠGener ally +女 çļĦ +ever se +çļĦ人 æīį +ä¸į åı¯ä»¥ +Ġtub es +Ġo t +Dec oder +Ġqu ilt +health y +åıĪ è¦ģ +èι èζ +Ġmax imal +Ġpress ures +ĠEn h +ç½® äºİ +ĠInst ant +èµ· åºĬ +æĢ» çIJĨ +H Y +ĠR ew +ĠBack ground +-inc ome +æĺ¯ æĹł +Ġsun ny +æĺŁ è¾° +ĠI oT +å¿ħ éľĢ +i pper +ĠH S +ĠâĪ ¼ +est hes +set up +Ġind ia +ï¼Įè¿ŀ å¿Ļ +M ajor +, int +éĿł è°± +çĢ ij +ä¸į æŃ» +å¢Ļ ä¸Ĭ +Ġmagnific ent +Ġrem oves +人 æ°Ķ +åħ± 计 +欧 缣 +DE V +Ġl av +o osing +æĥ ¶ +ãĢģ 第ä¸Ģ +Ġinteract ing +ĠComp etition +Ġhe pat +Ġtrad itionally +å®¶ ç͵ +Ġterm inate +åģľ çķĻ +ĠEconom y +ĠM ovies +ĠS PI +æĿij éĩĮ +c u +_IN DEX +æĮģ æľīçļĦ +_F AIL +Ġgu ards +Ġt attoo +ä¸į ä¿¡ +åĺ İ +ĠC iv +ip ly +ĠWhe el +ï¼Į åѦ +rupt ed +x FFFF +æª ¬ +il ine +ĠAb raham +å¸Ĥ çļĦ +ĠL ate +m ine +< > +ä¸ī ä½į +Ġquick er +en ario +ĠGu arant +æ¶Ī失 äºĨ +_m utex +ĠInt ent +ĠBreak fast +et on +ï¼Į éĴĪ对 +å·²ç»ı å¼Ģå§ĭ +ãĢģ ç͵åŃIJ +Ġcous in +at ics +Ġast hma +Ġobs erver +çļĦä¸Ģ 项 +Ġ% > +ä»ĸ å°± +ä¿¡ç͍ åį¡ +Ġswit ches +_B UFFER +æģ ¤ +仲 è£ģ +ä¸Ĭ 级 +Ġred und +失 误 +æ¯ı åij¨ +M or +åŃIJ 宫 +Ġper p +) % +K ing +The ir +åĩº åĽ½ +D est +好 ä¸į好 +, ç»ĵæŀľ +ĠD ir +éĢī è´Ń +s ync +æĽ´ æĶ¹ +ĠThe ater +ãģª ãģĦ +Ġlay ing +.Form at +ĠD A +åĢ ª +Ġ è¿Ļç§į +æĪij çİ°åľ¨ +æ³ķ å®ĺ +L arge +-per formance +u ator +åIJij ä»ĸ +ĠW P +ĠD raft +éĥ½ ä¸įèĥ½ +ĠF iction +éĤ£ å¼ł +âĨ ĵ +ĠH orn +Ġbra ke +ĠBuff alo +Ġshe er +Sp ot +P aint +Ġdiscipl ines +Ġmic rowave +] / +_ space +cl uster +eng u +à ¹ +et ail +w ere +n l +ĠCon crete +ï¼Į 容æĺĵ +Ġfre ed +Ġdies el +èµ· çłģ +. No +Ġ" ./ +ä¹IJ åĽŃ +Ġassert ion +C as +US INESS +L ondon +Ġvi agra +éĻį æ¸© +天 çĶŁ +ĠH onda +ĠG abri +/c ss +â̦â̦ â̦â̦ +fl at +æľī ç͍ +Ġupgrad es +* (- +. char +å¾Ī å°ı +çª Ł +RE AT +iox ide +Ġk wargs +æĸĩ ä¸Ń +Ġop acity +Ġnorm s +.c olumn +å½ĵæĹ¶ çļĦ +Ġimport s +çĺ ¾ +Ġteach ings +ĠG ate +ä¸į è¿ľ +æĺ¯ æ¯Ķè¾ĥ +, æĪIJ为 +ĠC ODE +âĢĿ ï¼ĮâĢľ +Ġtraject ory +ĠS orry +ĠH unt +è¯ģ ä»¶ +ç»Ĩ ç»Ĩ +ĠY ahoo +Ġadvoc ates +Rec ently +çݰæľī æĬĢæľ¯ +{ }, +åľ¨ åĨħ +so ever +.X ml +èĢĮ è¿ĩ +Ġw ished +æıIJä¾Ľ ä¸Ģç§į +å¹´è½» çļĦ +t ags +Ġbet ray +++++ ++++ +Gener ate +an as +ĠW ash +Ġrecru iting +.P arse +Ġpick up +Ġsp otted +he a +, åį³ä½¿ +æĦıè¯Ĩ çļĦ +ĠCol lections +èķ ī +. local +Ġhel lo +Ġmut ant +Ġgreen house +äºĨä¸Ģ åľº +èµŀ åIJĮ +ï¼Į åħ¨åĽ½ +ĠSp ark +Ġdry ing +_en able +æĪIJ å¹´ +Ġmusic ian +Ġdem ographic +{ Q +éĻĦè¿ij çļĦ +å¾Ĺ 太 +zz le +Ġdemand ed +P icker +ret val +Th ird +B rit +å¤ļ 彩 +ï¼Į对 çĿĢ +s id +_t emp +Ġsur geon +ALL Y +ï¼Ł å¦Ĥæŀľ +ĠS au +ãĢģ é¦Ļ +ĠApp lied +Ġat he +康 çĨĻ +ç»Ļ åŃ©åŃIJ +C apt +ĠSh adow +Ġtrig gers +è± ģ +æŀģ äºĨ +Col ors +Mod ified +åĪ· æĸ° +åı¯ä»¥ 让 +Ġprest igious +ä¾į åį« +Ġb b +Ġre de +ĠR io +ĠList en +Ġsh y +A verage +åģľè½¦ åľº +æľī 两 +å»¶ è¿Ł +åľ£ è¯ŀ +ĠFour th +æĬ ¿ +ç° ¿ +ĠL ayout +æĹ¶ è¾° +ĠChem ical +] } +w en +- U +ãĢĤ ãĢIJ +ĠChem istry +ï¼Į åħ¨éĥ¨ +æ¼Ķ ç»İ +ï¼Į å®ŀåľ¨ +ĠField s +严 åİī +ĠLa unch +å¼ Ī += ` +åħ¬å®ī æľºåħ³ +åͱ æŃĮ +å®ŀéĻħ æİ§åζ +Ġcav ity +çļĦ ä»·å̼ +Ġ- *- +Ġind ul +Direct or +åı¯ è¾¾ +aret te +çłĶç©¶ ä¸Ńå¿ĥ +uzz y +Ġwarm th +Ġhint s +å¢Ļ å£ģ +op ol +In c +.C olumn +æ±ĩ çİĩ +Path s +M ad +ĠPar ad +h m +Ġfil mm +ĠI DE +åIJį ä¹ī +åĪĨ è£Ĥ +ä»Ģä¹Ī ä¸ľè¥¿ +ï¼Į ç¡®å®ŀ +_C ALL +Ġinform al +аР¼ +ï¼Įä¸į åĨį +交 è°Ī +Ġpars ing +Ġinhib itor +çĶŁäº§ 线 +å¹² çļĦ +转 éĢŁ +Aut hentication +Ġath lete +EN O +éϤ å¤ĸ +ĠString Builder +ï¼Į çŁ³ +.D ate +ĠW idget +g om +, å°¤åħ¶æĺ¯ +ï¼Į å·¥ä½ľ +鼨 æ°´ +_LE VEL +M gr +Ġjust ified +ĠC arm +æ´Ĺ 涤 +ãĢģ æ²¹ +è·¨ è¶Ĭ +ĠA irlines +éĿ ´ +ï¼Į 谢谢 +. Start +çĭ¬ç«ĭ çļĦ +ĠWat son +ul o +ĠB enn +az ing +. Controls +éĿĴ çĿIJ +/ data +Ġcustom s +Ġb g +- defined +è¿ĩ 身 +ĠEv idence +( List +×ķ × +Ġ" ../ +éĶĻ误 çļĦ +ĠProv ide +ï¼Įå¹¶ ä¸įæĺ¯ +c z +åŁºæľ¬ çļĦ +ãĢĤ å¤ļ +Ġpe el +In clude +çŃī å·¥ä½ľ +[ m +Ġcer amic +ç»ĦæĪIJ åijĺ +Ġbut t +ĠA K +æķĻ ç¨ĭ +i oc +çĤ¹ èµŀ +æĢ» çļĦ +ä¼ĺ è¶Ĭ +éĻ ĭ +d w +åģľ ä¸ĭ +P ACE +âĢĶ a +ĠCam eron +é¢ ħ +Ġur ine +浸 泡 +çĴ ĩ +Ġh alt +ĠG E +æĭĨ éϤ +ict ory +, l +Âł ä»ĸ +rodu ce +Ġin appropriate +Ġacceler ate +Ġ ### +ĠB attery +Ġexecut ing +ord able +è¿ĩ 大 +Ġexplos ion +J obs +åĢ¡ 导 +Ġvol can +åĨħ ç½® +ĠDr ag +æ¢ µ +人 æĢ§ +çļĦ主 é¢ĺ +Ġlux urious +St ub +_p re +è¶£ åij³ +Cl ip +Ġst ip +Ġfar mer +g ender +æĪIJ ä¸Ģ +ĠM ight +: m +_ IF +æīĭ 表 +导 åħ¥ +co vers +åľ° éĹ® +éĢ ® +æĿĥ éĩį +Ġinter ven +ĠDe als +ï¼Į çģ« +å¾ģ æľį +Market ing +严 è°¨ +b ay +ç»ĺ åζ +ĠU T +.l ayout +æĦŁ åĴĮ +å°ı ç¨ĭåºı +èİ ½ +åĨį ä¹Ł +lu ence +Ġpo ems +[ , +Ġc ad +ar ith +ĠY e +ap is +Com bo +ĠT CP +Ġinsect s +åħŃ å¹´ +S ure +è¿Ļ å®¶ +x C +ä¸į çŃī +, h +天çĦ¶ æ°Ķ +ĠDist ribution +æĪij们 å°± +() ); +é© ¼ +Ġf li +æĪ Ĭ +彩 èī² +é¢Ħ å®ļ +im iter +v r +.M sg +ï¼Į åįģ +Ġc uisine +Ġk om +ä½ĵ åĨħçļĦ +t u +ĠM iles +p anel +ĠZ hang +åIJĦç§į åIJĦæł·çļĦ +Ġliber ty +Ġfundra ising +Ġ é»ij +P adding +Ġdisput es +-f ocus +Ali as +- lo +Ġdiscover ing +Ġm int +itud inal +使ç͍ äºĨ +çݯå¢ĥ ä¿ĿæĬ¤ +åľŁ è±Ĩ +in ces +åħ¬å¼Ģåıij è¡Į +æĪij æľī +valid ation +Ġfact ual +Ġliter acy +Ġrid ers +Ġneg atively +åı¶ åŃIJ +èĩª æľī +åĺ» åĺ» +ç»Ħ éķ¿ +Ġqu arters +è¿ĺ è®°å¾Ĺ +åΰ 大 +ä»İ æŃ¤ +osh op +头 çĹĽ +ET A +ï¼Į æľĪ +使ç͍ 寿åij½ +åĴ ļ +Ġprompt ed +çļĦ女 åĦ¿ +^ k +ĠE sp +åħĭ åħ° +缩 å°ı +se ver +à ² +ĠHope fully +ĠFant asy +H ook +ĠIN TEGER +Ġth ou +åĩ ³ +An onymous +ĠLo op +饱 åĴĮ +Ġbre eding +Ġfle w +Ġcomp ressed +ĠPerm ission +Ġ ids +çα好 èĢħ +_l imit +ĠGuard ian +Ġca ption +[ self +Ġmand ate +çİ© æĦı +bor o +身 æĹģ +Ġrecogn ised +ĠSt eps +ç«ĭ æĸ¹ +Ġtu ition +Ġupgrad ed +_c nt +ĠS ter +ix on +CH O +Ġadvertis ements +ç³»ç»Ł ä¸Ń +æĬĽ å¼ĥ +ĠF inding +Ġsal mon +x it +Ġgra ins +Ġam ple +Ġconnect or +R eb +ç½® ä¿¡ +åĨħéĥ¨ çļĦ +Ġ ions +Ġcollabor ate +, A +ĠO liver +.s ort +, æĿİ +æĦı æĦ¿ +ä¸Ģ æĥ³ +pro f +Ġ 红 +De ep +Ġt ribute +åı¯ ä¾Ľ +c opyright +人 æľī +åĤ¬åĮĸ åīĤ +Y C +ä¹ĸ ä¹ĸ +M ir +Ġcou pons +Ġpredict ive +Ġadjust ing +éĹª ç͵ +æķ° çłģ +ra f +In i +_ch annels +æ· « +.S printf +Ġ& \ +éº Ĵ +å°Ķ çī¹ +_h w +Ġdevelopment al +ç´¯ äºĨ +Ġ 请 +activ ation +Ġparent ing +Ġuniqu ely +{ ' +< \ +R en +æľ¬ ç½ij +ĠM ills +Ġp ins +åºŁ æ°´ +çļĦ 对象 +s ame +N ice +çѾ è¯ģ +ï¼Į è¾¹ +åľ¨ åĮĹ京 +çļĦ è¡Ģ +Ġmulti plication +Ġback wards +-t able +Ġneighborhood s +ĠTest ament +ç«Ļ äºĨèµ·æĿ¥ +ï¼Įä¸į åı¯ +оР¹ +ĠI EEE +Ġback ward +è¯ Ľ +ä»İ åīį +n ik +h space +缮çļĦ åľ° +Ġ å¤ĸ +Ġ åĪ« +Ġsomet ime +认è¯Ĩ åΰ +æĺŁ çº§ +.A l +Care ers +æĪij å·²ç»ı +Be havior +åħ¨ æ°ij +ado op +Ġl d +Ġfix ture +duct ory +Ġanten na +Ġ ä¸Ĭæµ· +Ġwor ries +Ġbus es +ĠT ampa +Ġsens es +B ot +ĠF S +èij« èĬ¦ +ĠT ed +Ġmid night +par ation +çļĦ 两个 +Ġbe ats +Ġr x +ĠSh ar +è¿Ļ å¼ł +Ġ$ [ +ort ex +th ree +åłµ å¡ŀ +åľ¨ ç¾İåĽ½ +西 èĹı +l av +ä¸Ń éĥ¨ +ç½ij åĿĢ +un wrap +е к +Ġbrand ing +æŀĹ ä¸ļ +管çIJĨ å·¥ä½ľ +ĠV T +Ġbicy cle +ens en +ãĢģ åįİ +Ġhe ater +Ġst itch +c ross +ĠL ip +æĺ¯ 说 +_n et +.for Each +ĠF IF +ï¼Į æ²ī +æīĵ æī° +ok u +æİĴ éĺŁ +- read +åĨĻ åħ¥ +Ġk a +im ited +宾 é¦Ĩ +åĨį åİ» +æĶ¯ä»ĺ å®Ŀ +od ox +ä¸ī 级 +R AM +ef it +pro perties +Ġwell being +xy z +ÏĢ Î¿ +Ġconv inc +ĠF acts +Ġ' '' +C ourse +两 人çļĦ +Ġal lev +.next Int +Ġsimpl ified +F ilename +:: $ +æŃ¤ ç±» +L AB +_ connect +ä½İ ä¸ĭ +å¹³æĸ¹ åħ¬éĩĮ +ä¸į éĢĤç͍ +rest ore +çł´ äºĨ +ĠW ave +-res istant +åģĩ çļĦ +Ġutil ization +ï¼Įä¸Ģ èĦ¸ +Ġ åįİ +Ġprof ess +èĥ ¤ +' + +ĠB irmingham +âĢľ 好 +ĠIS BN +$ s +ç»´ 度 +-r isk +Ġshort age +欧 ç¾İ +, æ¯ķ竣 +sc rib +çıŃ çļĦ +V IP +åĬ³åĬ¨ åĬĽ +Im pro +ry lic +ur r +Ġfrag ments +IN TER +Ġis set +Ġsort ing +st eps +æģ¶ åĬ£ +Ġw anna +ĠEth ics +ĠC DC +ĠJ uan +, æł¹æį® +M aking +ĠL ect +)) )) +Ġe en +ĠCh ocolate +Comput er +æĭ¯ æķij +_ ) +æĻº åĬĽ +ĠP ill +ox ide +Ġac oustic +ĠRE AL +Ġb ent +_c b +ĠKind le +Ġ ä»ĸçļĦ +-s ource +um ed +Ġl ounge +/ y +Ġ åİŁ +ĠAl most +èĢģçĪ· åŃIJ +hold ing +æ¼Ĥ亮 çļĦ +à ® +ĠProgram me +ex ecute +Ġincorpor ating +ops is +ï¼Į æ±Ĥ +cast le +Ġe Cornell +å°ı æľĭåıĭ +it ches +Ġcr icket +Ġenroll ment +Ġplant ing +Ġquarter back +P riority +ant age +强 åĬ² +éĶ ¯ +è¡¡ éĩı +Spe aking +ï¼Į被åijĬ 人 +主è¦ģ çļĦ +Ġin ability +ĠF ri +for all +ç¹ģ æ®ĸ +ãĢĭ ä¸Ģ +M id +èĭ¦ ç¬ij +å°¾ å·´ +pl ash +DU CT +Ġacqu iring +Ġbo om +ĠMed icaid +.c sv +ĠAccount ing +éĽķ åĪ» +å¾Ī åı¯èĥ½ +/ ch +ĠVis a +fort unate +Ġ éĤ£ä¹Ī +Ġquestion ed +Vis itor +ĠB ee +æ¸ Ŀ +des cribe +æĬ¤ æłı +åı« éģĵ +éŀĭ åŃIJ +éĤ£ 人 +Re commended +ĠS ell +Ġenc rypted +ri en +Ġloc ale +ĠBroad way +æķ´ 天 +Block s +ï¼Į ç»Ħç»ĩ +ä¹Ł 太 +åįł é¢Ĩ +amb oo +主 ç¼ĸ +ph oto +æ°´ éĩı +çĮª èĤī +åŃĺåľ¨ çĿĢ +ãģĹ ãģŁ += / +ĠAnd re +æĴŃ åĩº +éĢļè¿ĩ äºĨ +ä¼łç»Ł æĸĩåĮĸ +åĩº èī²çļĦ +é¼ł æłĩ +Ġrenew ed +Ġn d +ĠJohn ny +æīĢæľī èĢħ +ãĢģ åĮº +f erence +æŁ ¿ +Ġp Ã¥ +Ġscr ut +i our +Ġs ins +æİ§åζ çļĦ +ä»į åľ¨ +奴 éļ¶ +Ġ äºij +ĠY u +çİĭ åºľ +ac ial +Ġterm inated +ç« £ +ä¸ļåĬ¡ çļĦ +æīĢéľĢ çļĦ +Ġins ulation +é¥ º +åħ» æĬ¤ +R atio +ĠPalestin ian +pe x +å®ĥ æĺ¯ +èĭį çϽ +ĠF err +M oney +Sc ott +ĠC able +_b ody +å¸Ĥ æĶ¿åºľ +ï¼Į ä¹ĭåīį +Ste ve +ro e +åij ĥ +åĽł åŃIJ +çŀ » +ï¼Į åıĮæīĭ +Ġmod ular +ĠL ion +ï¼Įä½ł ä¼ļ +Ġbrows ers +Ġcur v +åį ī +åĪĹ åħ¥ +Ġster oids +um en +å¢Ļ éĿ¢ +ä¸į ç»Ŀ +ä¹łè¿ijå¹³ æĢ»ä¹¦è®° +_ use +ĠPu erto +ĠPro cedure +å´ İ +ex ports +ï¼Į让 ä»ĸ们 +å¨ģ å»ī +Ġtraged y +LE X +Ġreject ion +_d f +ĠÎ · +å¼Ģ ä¸ļ +ĠUn known +Ġlect ures +Ġp ads +è£ħ çļĦ +reason able +Ġfound ations +. attr +---------------------------------------------------------------- -------- +ent ric +æľ ½ +.aw t +Ġso cks +Ġa uf +æĻ ĸ +ĠRich mond +Ġtax i +ĠTurn er +ä¸Ģ ä¸ĩ +åĽ½ åľŁ +f alls +ens ing +Ġsubs criber +è¿Ļ 项 +_ EM +Ġpub lishers +åı¯ä»¥ å°Ĩ +Ġstr len +ç¬ ĭ +æĺ¯ 她 +s elling +ĠL anc +èħ¹ éĥ¨ +w al +Ġsib lings +pan ic +Ġbeh ave +ĠEx ternal +ï¼Į æĿľ +ĠâĢ ĭ +Ġin aug +[ str +Ġsp r +L ittle +æĢ» æķ° +M G +qu est +ĠR ental +Ġcle aring +_ ALL +èĮ ¹ +åĽ¾ ä¸Ń +Ġg inger +ĠAdv ice +å¤ĦçIJĨ çļĦ +ĠN F +Ġkind a +ç¨ĭ度 çļĦ +Ġsum s +el lee +æĿ¥ å®ŀçݰ +Oper and +al er +ãĢĤãĢĤ ãĢĤ +Ġterror ism +ĠM ilk +Ġsevere ly +ä¸į åºĶ该 +æĢģ åĬ¿ +ĠInteg ration +, å¼ł +Ġsp arse +è§£ éĶģ +åıª æīĭ +W ire +大 æ¡¥ +_f irst +ĠB ass +éŨ çªĹ +Ġhass le +au kee +ĠV P +æľ¬ èĥ½ +_fe atures +- red +Ġre conc +Ġf art +comp lex +åı¯ä»¥ éĢīæĭ© +Ġinter sect +T oo +.m inecraft +Ġint rinsic +ar l +å°±æĺ¯ è¿Ļæł· +an ium +è¯ģåΏ 交æĺĵæīĢ +åĨł çĬ¶ +S napshot +/ J +RE EN +转 åıij +æĺ¯ 为 +亲 人 +& T +çļĦ åŁİå¸Ĥ +Q R +Ġincons istent +æĺ¯ å°Ĩ +ç͍ ç͵ +w r +der ived +_l abels += * +ĠN ON +Ġhum ble +ï¼Į åĩĮ +çIJĨ æĻº +Ġh arness +D OM +M ic +ĠTh row +_s ave +Ġab ort +åħ¥ éŨ +æī¾ åĩº +ç¢ Ł +æ·±åĪ» çļĦ +er d +æĶ¯ä»ĺ çļĦ +çĿĢ åIJįçļĦ +æĪĴ æĮĩ +Ġt in +å¼ķèµ· äºĨ +Vari ant +å±± ä¸Ĭ +Ġde er +Ġtr an +马 æĭī +-s chool +ï¼Į åĢŁ +çŁ¥ æĻĵ +m un +Ġbro kers +ĠEqu ity +ï¼Į åĬĽ +Ġpse udo +ॠĩ +æĿ¥ çľĭçľĭ +Ġf ame +Ġcomm erce +, å½ĵçĦ¶ +ä¸ĭ åŃIJ +åı² ä¸Ĭ +_{ {\ +ĠDis covery +åĮ»éĻ¢ çļĦ +% ). +ine a +综 ä¸Ĭ +v oke +ï¼Į æľįåĬ¡ +Ġis ot +ä¹ĭ 以 +è¯ ł +çľģ å§Ķ +ĠA SC +Ġr and +Ġen semble +-s ign +Ġsmart phones +D ao +äºĭ åIJİ +Ġrequest ing +N L +Ġcontinu ity +to Equal +Ġme g +åĪĨéĴŁ åIJİ +æĮĩ çļĦæĺ¯ +çİĩ é¢Ĩ +Ġchem otherapy +Ġoverwhel med +t ar +rel ations +ith ium +.P oint +æ¶² æĻ¶ +ĠMont ana +ï¼Ľ è´Łè´£ +ĠS eb +S omething +ĠBO OL +ĠShe et +M ont +æ¼Ķ æĬĢ +ĠM atter +ir ational +ä¸Ģ çļĦ +ĠC ad +ĠE lection +Ġform ally +-b uild +t v +Look up +Ġthought ful +-r ich +çϽ äºij +æ±ĩ èģļ +t im +ĠG em +åΤ å®ļ +Ġret rieved +ĠIm per +ĠC ache +Ġhum idity +ãĢĤ ï¼Į +.st ream +ĠOld er +ç«¥ å¹´ +çļĦ女 åŃIJ +ç» ¯ +Ġinf ants +ĠM AP +Ġm ul +Ġre vis +éĩijèŀį æľºæŀĦ +ĠTy ler +æİ IJ +ĠAltern atively +-e ffective +æł¸éħ¸ æ£Ģæµĭ +ãĢĭ 第 +Ġsc ent +åķ § +{ V +ĠR ET +b at +åı¯ä»¥ 为 +æīĭ éĩĮçļĦ +Ġquadr atic +Ġ第ä¸Ģ èĬĤ +Ġp ou +äº ¥ +Ġsou ls +Ġrot ating +æŃ£ å½ĵ +Ġd c +ownt own +è¡Į 使 +åİŁ ä»¶ +Ġsil k +çļĦ çŁ¥è¯Ĩ +ĠCons ervation +çľĭ å¾ħ +ä¼ĺéĢī çļĦ +us able +×Ļ × +Ġlack s +çļĦ éĩį +ç¬ijçĿĢ è¯´ +}^{ ( +ï¼Ī 以ä¸ĭç®Ģç§° +éģ¿ å¼Ģ +éĹ ½ +, æıIJé«ĺ +èĢģ é¼ł +åĽ½ åºĨ +Ġsm iling +ĠS it +inc ipal +代表 大ä¼ļ +æİ¨ å¼Ģ +b oss +Ġc inema +uss els +Ġbeat en +说 æľį +_m enu +Ġax es +çļĦ åĽ½å®¶ +ĠU L +An not +Th reshold +Ġf og +æľī åĪ© +æĶ¾ å¼Ģ +N amed +ï¼Įå¦Ĥ åĽ¾ +Ġrefriger ator +Ġd ots +çĽij è§Ĩ +Ġdef ending +èĢĮ èµ· +æ®ĭ éħ· +F rank +éĺ ± +ëĭ ¤ +Ġreal ise +ï¼Ł æĪij们 +O CK +_ env +Ġs ung +ĠCo in +Ġ ____ +.res et +Data Type +ĠR ates +RE T +_B L +èļĤ èļģ +the m +çIJ ħ +Ġstr anger +æĬĬ èĩªå·±çļĦ +qu in +ĠF u +Ġfl ame +ATION S +os omes +vis ual +å¹´ å¼Ģå§ĭ +- as +æľĢ å¤ļçļĦ +ï¼Į大 éĥ¨åĪĨ +客æĪ· æıIJä¾Ľ +Ġinstrument al +Ġeng lish +æĿ° åħĭ +_P REFIX +c ents +æį Ĩ +-y ard +ãĢģ åĪĨ +ĠO scar +.param s +åıį æĢĿ +db c +ENT IAL +对 æĪij们 +ĠB in +itut ions +n v +. Contains +æ¯Ķ çī¹ +Ġà ¤ +æĢĿ 念 +æ¨ Ĭ +sp ired +ĠNeb raska +广 å·ŀå¸Ĥ +åIJij å¤ĸ +åıĹ æįŁ +æłĩ 注 +ĠP apers +war f +æĬ¥ éħ¬ +çļĦ ä¼ĺåĬ¿ +ĠN athan +av our +ĠWell ness +åĪĽ æĸ°çļĦ +ud er +Ġd rought +å®ļ æĹ¶ +Ġpro state +ĠSh ape +- index +Ġdecor ation +çļĦ åı¯èĥ½æĢ§ +åµĮ åħ¥ +/m L +P ick +ï¼Į åĥıæĺ¯ +Ġw ounds +Ġnegot iate +çļĦ èĦ¸ä¸Ĭ +. ~\ +, _ +Ġw ires +ER IAL +Ġ" { +N V +èĥĮ ä¸Ĭ +ĠS AP +n ate +Ġintegr ating +ĠT ip +ĠClass es +çľĭ ä¸įè§ģ +åĩĢ é¢Ŀ +. transform +Ex ecute +Ġorgan ised +è®® 论 +çĥŃ çº¿ +Eval uate +'. $ +ĠArg uments +ĠR ivers +ä¸ĩ å¹³æĸ¹ç±³ +ĠCert ification +ĠAust ria +s ymbol +us ions +éľĩ åĬ¨ +.m ock +æ± IJ +Ġst air +_st ack +ĠAss istance +E arly +D ifferent +Å Ļ +M Hz +J on +ĠLiter ature +Ġinv oice +çģ° èī² +ĠWood s +ĠU RI +Ġball ot +ãĢģ åIJĦ +t ools +è¿ĩç¨ĭ ä¸ŃçļĦ +++ ] +ç§ī æī¿ +æ¯Ľ çĹħ +ĠP ul +Ġar tery +ur ally +\ new +Ġ åΰ +çªĹ å¤ĸ +ĠSD K +åIJī 祥 +ĠPay Pal +ĠL akes +arett es +æĦı æĸĻ +ä¸ĭ å·´ +Ġir regular +åįĸ åĩº +ĠF unctions +åIJĥ å®Į +ĠMembers hip +Ġrig orous +åģĩ æĹ¥ +æĹ¶æľŁ çļĦ +æĸ¯ æĭī +è¿ĩ åĪĨ +Ġdecor ating +âķIJ âķIJ +带 头 +ĠHe avy +e enth +å¹´ éĻIJ +-c ost +ĠCons ists +H on +ĠMarg aret +ĠDE BUG +Ġformer ly +° ÃIJ +X P +Ġinv iting +_P O +Ġvalid ated +ĠDen mark +Ġ第äºĮ èĬĤ +Ġcock tail +Ġo c +View ById +sc roll +çļĦ æĶ¯æĮģ +ĠK irk +R SS +_ ENTRY +æ¿Ģ çĥĪçļĦ +åıª æĺ¯ä¸Ģ +ĠL C +æĢ» æĶ¶åħ¥ +], [ +å°½ åĬĽ +è¿ĩ å¹´ +ĠA x +äºĨ äºĽ +Ġl ime +ĠSec ure +ab lish +ĠAb d +Ġ* _ +t ake +ãĢĤ æĪªèĩ³ +p k +çĿĢ ä»ĸçļĦ +æ¼ ĵ +ä»į æĹ§ +Ġwe aken +ues e +_result s +ĠB ever +L i +ĠStan ford +-g rade +ĠN HS +éķ¿ ä¹ħ +éĿĻ æĢģ +ah u +æĬ¥ å¤į +åħ¨ éĿ¢çļĦ +Ar rays +ĠSand ers +ĠD ialog +Ġcand le +Ġexplan ations +å¯Į åIJ« +å®ŀ ä¸ļ +Ġf ocal +ust in +- transform +ĠArg entina +Ġenc oder +Ġe g +Ġsol ic +ç¬ij ç¬ij +Ġcraft s +Ġtur key +ç®Ģ 约 +ĠCh ase +ä¸Ńå°ı ä¼ģä¸ļ +Ġ é¾Ļ +_ USE +污水 å¤ĦçIJĨ +Ġopp osing +ï¼Į éĢļ +C tx +r il +. ts +Ġcirc uits +ç¥ ł +é¦Ļ æ°Ķ +ĠDel aware +Ġmer cy +F ail +.B ase +Ġhapp ier +åº IJ +p rom +ç®Ģ åĮĸ +ial is +Ġterm ed +ĠO PT +inc inn +ĠFort unately +Mark er +åıª éľĢ +éĢĢ å½¹ +File Path +ĠP rad +-f ace +_ loc +è¡Ķ æİ¥ +Const raints +ç͵ ç«Ļ +Ġattack ing +{ lem +Ġb ang +ĠâĢ º +Sh arp +Ġexam ines +he y +Ġcyt ok +ĠM RI +in ja +F it +Ġ è¿ĺæľī +æĮĤ åľ¨ +æīį è¡Į +å¹³ æĿ¿ +}_ \ +div ision +m ouse +Ġresearch ing +Ġcal ibration +U lt +In gredients +Ġb p +ï¼Ł ï¼Ł +ĠN BC +æĺİ äº® +Ġm align +åħī ä¼ı +座 è°Ī +æľºåύ åŃ¦ä¹ł +å´Ľ èµ· +ĠOk ay +Ġabsorb ed +ï¼Į æĽ´åĬł +èľ ĺ +_g en +å¸ĥ é²ģ +Ġch unks +Ġvis ually +.D ebug +Ġencour agement +ĠE agle +Ġrecogn izing +U int +sc ore +æ© ± +ĠB ug +æľĢ æĸ°çļĦ +(std err +F ive +ä¸ĭ 次 +aly zer +pect rum +ç³ ¯ +æľī ä¸į +éĥ½æĺ¯ åľ¨ +Ġprolong ed +æ² Į +al ink +sc ience +è°ĥ åĬ¨ +Ġharm ony +Ġsuff ix +Ġsubt ract +éģ ģ +ops y +.h andle +ĠEnt reprene +èĢĮ æĺĵ +ĠStrateg ies +Ġd iving +çľĭ æĪij +Ġsynt hes +Ġdis ruption +or ate +ï¼Įæľ¬ å®ŀç͍æĸ°åŀĭ +ä¹ĭä¸Ģ çļĦ +æīĵå¼Ģ äºĨ +æī¿ åĬŀ +Ġc ough +ï¼Įè¦ģ ä¹Ī +åĨ° åĨ· +Ġt ones +交éĢļ è¿IJè¾ĵ +Ġf lee +主 æĴŃ +ä¼Ĭ æľĹ +æĵ Ĵ +ĠCONTRIBUT ORS +ĠC K +ç»ĵæŀĦ 示æĦıåĽ¾ +æĺ¯ 好 +T ell +Ġve gg +èµ¢ å¾ĹäºĨ +Ġhypert ension +Ġdis placement +ĠOut look +Ġsent enced +ĠN M +Ġfeed s +Ġ åIJį +èľĤ èľľ +Ġinv asive +Ġelim inating +å¹³ ç±³ +ĠSh aw +äºĨè§£ ä¸Ģä¸ĭ +l ibrary +Sh adow +Dep artment +声 说éģĵ +_f loat +两 åĽ½ +c pp +ĠC er +çŃ Ŀ +W IN +ge ometry +表 è¿° +Ġtechn icians +Ġparent al +ĠComput ing +Cent ral +it ably +ï¼Į é¢Ħ计 +OK EN +, è°ģ +å¹´ åĨħ +ä¸į åIJ« +äºĭ äºĨ +re lease +.value Of +äºĨä¸Ģ ä¼ļ +åħļ åı² +çļĦå¿ĥ æĢģ +ï¼Į ç«ĭ ++ x +Ġfluct uations +Ġcontin ent +åĽ° æĥij +éĤ£ åĦ¿ +Ġgate way +ä¿¡ èªī +qu it +ĠPro blems +åħĭ åĬĽ +b ug +æĸ° åĬłåĿ¡ +ï¼Į èĦ¸èī² +Ġsq ft +åıĬ 缸åħ³ +ĠIndust ries +éĽ Į +eren cing +交éĢļ äºĭæķħ +NECT ION +eng es +ï¼Įä½Ĩ ä¹Ł +è¶ģ çĿĢ +ä¿ĥ éĶĢ +Ġdown stream +级 åĪ«çļĦ +èµĦæºIJ çļĦ +Ġcr ude +Ġnom inated +æĶ¹ ç¼ĸ +[] { +ĠC ake +çݯå¢ĥ ä¸Ń +at z +Ġsal vation +Ġlaun ches +èĢĥ åı¤ +Ġwilling ness +çĤ¹ çĩĥ +å¾Ĺ èµ· +ĠPartners hip +ĠHOLD ERS +è·¯ è¾¹ +ä¼ĺ ç¾İ +Ġm uc +Click Listener +Ġinhib itors +ä¸İ 第äºĮ +g as +Ġsusp icious +ä¸ŃåĽ½ åħ±äº§åħļ +Ġiter ations +Ġquestion ing +- , +çľĭ åİ» +åIJĮ æĥħ +th m +èĽ Ļ +åĹ ¡ +åİĨåı² çļĦ +Found ation +_s um +Ġver bal +Ġelabor ate +ĠTre asury +Ġcum ulative +ä»ĸ èĩªå·± +ĠS I +éĢī åıĸ +_OB JECT +两 端 +ï¼ĮçĦ¶åIJİ åĨį +T el +Ġident ities +ĠFl ight += âĢĿ +çģ« çĪĨ +ĠWH O +s r +Ġpupp y +çĹħ æĪ¿ +Ġbrack et +ĠAl ert +Ġec ological +çĥ ¨ +æĿĤ è´¨ +ĠM eg +微微 ä¸Ģç¬ij +het ics +èĬ ¹ +/ html +LE AN +çİĽ 丽 +ap ital +Ġnob le +管çIJĨ ç³»ç»Ł +ĠOper a +çļĦè¯Ŀ è¯Ń +Ġrock et +骨 å¹² +Ġæľ¬ æľŁ +Ġprofession ally +Add itionally +é© ¿ +é¡¶ å°ĸ +åľ¨ åĨħçļĦ +ä¼Ĺ çĶŁ +Ġf ried +ew ay +F ACE +èĤ ĭ +ĠSc ar +ĠIns ert +RE ATE +çī¡ ä¸¹ +Beaut iful +Ġ" * +æ²³åĮĹ çľģ +Ġirre levant +ĠAl zheimer +ĠC atal +ner g +(d b +ä¾§ å£ģ +Ġè¿Ļ æĹ¶ +Ġcan cers +Ġreplic ation +车 身 +h om +åĬ© æīĭ +å¼Ĥ è®® +Like Liked +Ġ 产åĵģ +Ġmod al +ç± ½ +Ġexceed ed +Ġr ays +Ġsens ible +Ġper mut +两个 æľĪ +è£ ³ +ï¼Į åŃ¦ä¹ł +å°Ĩ è¿ij +_d iff +Ġvirt ue +Ġw r +incinn ati +J eff +a q +Ġblog gers +Ġrenew al +и к +ian e +éĽĨ 群 +Ġemp athy +Ġinject ed +Ġv ir +trans lation +ĠM BA +å¿ħ å¤ĩ +Ġlun gs +Ġgrand father +Ġsh ook +ç͵åĬ¨ 车 +th ood +Ġpres erving +ay ing +Ġcl icks +Ġl oses +u ite +Ġnot ably +åįĹ å®« +æĢİä¹Ī èĥ½ +èľĺ èĽĽ +ç«Ļ çĿĢ +-d o +ĠInstall ation +ĠV ista +ĠC anyon +离 å¿ĥ +Ġsequ encing +Ġcan n +R ay +çĽ² 缮 +åħĪ è¡Į +佩 æľį +ãģ ¤ +, S +å¥ĸ éĩij +Ġsc attered +å¤ĩ åıĹ +pro xy +Ġres in +Ġrest ing +ĠM t +con y +.S ub +ĠChe f +ĠA sp +å¾Ĺ ä¸į +Ġcat ast +åύ æĿIJ +æ¹ĸåįĹ çľģ +ĠS outheast +< ul +D ark +ä¹ĭ æĪĺ +Ġtow el +Ġpat ri +IB ILITY +åįĸ å®¶ +s ym +ĠD ump +Ġd ull +åŁºéĩij ä¼ļ +ĠT ours +ĠNe il +.A ct +ï¼Įåľ¨ è¿Ļ +ĠRepresent ative +篮 æĿ¿ +(c md +Ġmem orial +ĠB less +å°ı ä¼Ļä¼´ +ãĢĤ çͱ +ä¸ĵ å±ŀ +Ġtr unk +EO UT +W L +M V +åİĤ æĪ¿ +Th an +以ä¸Ĭ å°±æĺ¯ +表çݰ 为 +Exper ience +ãĢģ åĩĨç¡® +Ġ(! $ +Ġrec reation +н а +ĠC MS +ĠPress ure +æ¸ į +éĻį 临 +Ġreferr al +Ġsh irts +b ps +欺 éªĹ +al c +æł½ åŁ¹ +Ġwhat soever +oler ance +icon ductor +W ay +Ġcrowd ed +ac l +çĶŁ çĹħ +- Q +board ing +up dated +Ġsym path +央 è¡Į +n eg +Ġrail way +pl ed +ĠAD D +ç͵ ç½ij +umb ling +ï¼Į é½IJ +Ġintern ally +en cer +-c ount +é£İ æīĩ +èĢķ åľ° +Ġcont ing +ĠBapt ist +æģ ª +Ġtor que +æİ§èĤ¡ èĤ¡ä¸ľ +å½ĵ æĹ¥ +å°Ĩ æĺ¯ +Ġg rep +ä¼ļè®® 室 +Sign al +娱ä¹IJ åľĪ +A pr +ĠG len +ï¼Į æİ¥ +ĠInst ruction +éħ Į +. second +ig hed +String s +é²ľ èĬ± +Creat ing +è¿Ļä¹Ī åģļ +w aukee +èĩªå·± æĺ¯ +, æ¯Ķ +Ġphys iological +aa S +(t emp +æĪIJ 年人 +d eb +ä¸Ģ æĹģ +Ġ åħ³äºİ +ĠA ds +D rive +缮åīį çļĦ +åĽĽ åŃ£ +time out +{ I +æŁĶ åĴĮ +Ġ åIJİ +ycl ing +Ġcom ed +è·Ł 她 +追 踪 +ĠS che +Ġ第ä¸Ģ çϾ +ser ies +ä¸ĭæĿ¥ äºĨ +Ġbal ancing +è¿Ļ ä¸įæĺ¯ +æ» Ķ +Ġb ake +Ġbo asts +çĭĹ çĭĹ +è¯Ħ åĪĨ +åĩł çİĩ +( other +æµ· 岸 +User name +Ġinitial ized ++ ' +ĠC ritical +åį´ åıĪ +Ġexceed s +Ġa fore +大 çĽĺ +转 ä¼ļ +鼶 éĥ¨ä»¶ +Ġpass port +Ġamaz ed +RN As +ï¼Į 满足 +ï¼Įæĺ¯ ä¸Ģ个 +Ġsan ct +Ġmarg ins +ach i +åĩł ä½ķ +轩 è¾ķ +åĬ¨ çĿĢ +ä¸Ģ èĦļ +æĮģ ä¹ħ +.M ax +ĠS UV +èĩ³ åħ³ +ĠTe achers +ĠPot ter +ĠC ox +- axis +æĪij éĥ½ +Read ing +ï¼Įåı¯ æĥľ +or ida +Ġfro st +.C urrent +Ġanticip ate +os ion +pl aced +æĥ³ èµ·äºĨ +Ġantib iotics +Key words +{ item +åĽº åĮĸ +Ġpul monary +en umer +Ġcos metic +ï¼ĮèĢĮ åľ¨ +ãĢģ çĶŁæ´» +With in +çºł æŃ£ +ĠUt il +Ġped iatric +า ภ+åIJŀ åϬ +æģŃ æķ¬ +ä¹Ķ æ²» +Ġeconom ies +Ġt m +æ»´ æ»´ +? > +Ġadm ire +Ġrec reational +ÑĤ о +Ġpup ils +Ġwond ers +ĠAny way +Ġbegin ners +op athy +åķĬ åķĬ +æİ¨ çIJĨ +ä¸Ń çŃī +è¿ĩ æĿ¥äºĨ +Ġemb edding +çĤİ çĹĩ +æŁł 檬 +| ^ +ĠAr n +æ·± åıĹ +. root +é£İ äºij +设置 çļĦ +SE O +Ġenerget ic +Ġret ros +Ġprec aut +ĠN UM +- ne +Ġ æĺİ +âī ¥ +G H +Ġra p +ĠLe af +Ġstation ary +åħħåĪĨ åıijæĮ¥ +çħ§ å°Ħ +Interview er +Ġbath rooms +; " +R ating +. pos +åζ èᝠ+èµĦ产 çļĦ +Ġab b +_en abled +ĠJ os +å®ļ åIJij +éķ¿ äºĨ +æĹł è¯Ń +( info +Ġimpair ment +æĬ¤ åį« +èīº äºº +åįĬ åľº +Ġdam aging +èĪĨ 论 +Ġworks heets +In side +ĠIn nov +ĠP oker +.C lose +St aff +Ġab st +ï¼Į ä¸Ĭæµ· +oc on +Ġl ining +clos ures +Ġgame play +-sh irt +å¹´ 第 +ï¼Į éģĤ +å±Ģ çļĦ +说 åĩºæĿ¥ +Ġhe y +Ġ 顾 +ä¸Ń æĸŃ +p H +omb ie +Ġyear ly +åĴĮ æ°´ +rehens ion +_ active +ĠK ill +Ġc f +ä¸ĩ ä¸Ģ +ç¬ij æĦı +ĠId aho +.sub string +æĢ§ 强 +ol ia +ãĢģ C +pl ing +.cl one +ï¼Įåıª 好 +Ġproblem atic +ä»ĸ 对 +Ġphosph ory +ç»ĵ æĻ¶ +det ails +ĠNash ville +_t mp +Ġp id +ĠC BS +ĠEd wards +Part y +åºķ æĿ¿ +ien na +Ġste pping +Pub lisher +D ot +oc yte +ï¼Įä¸į æķ¢ +Not ice +ç¨ ł +ĠStrateg ic +ä¸Ģ è¶Ł +ä»· çļĦ +Ġundert aken +Ġf ights +è°ĥ åij³ +äºİ ä¸Ģ +æĺ¯ å°ı +or p +Test ing +M att +ç» ħ +岩 çŁ³ +ĠTh under +ĠS we +Ġe lem +Init ialize +åħ·ä½ĵ å®ŀæĸ½æĸ¹å¼ı +Ïī ν +æĬķ éĻį +æģ¶ æĦı +Ġd umb +æ¹ĸåĮĹ çľģ +éĤ ¢ +(N ULL +ãĢĤ åIJĦ +um bs +ä¸İ æĪij +l ining +Ġfe ather +Ġi Tunes +- play +æĿ¥ åģļ +Ġaff ection +ĠA TM +ç´« èī² +Ġdeb ts +æĺ¾ çݰ +z b +Ð Ĥ +ĠNorth west +Ġexplo it +æį¢ çĥŃ +_tr ue +ins ula +Ġplur ality +ĠMont real +Ġdevast ating +ĠK u +为ä»Ģä¹Ī ä¼ļ +æĹ¶ éķ¿ +ĠSh ared +åıĹ äºĨ +affe ine +ĠN intendo +çł Į +Pl ug +g ow +v l +Ġprefer ably +ä¸Ģ è½® +ex ists +T ick +Ġnumer ic +up iter +åįķä½į çļĦ +ä¾ Ī +Ġwh isk +Pref erences +Ġr é +, ä¸ī +ĠH udson +J apan +ĠL imit +å· į +åħĥ å¹´ +Ġn i +çı ij +Ġl akes +Z Z +诸 侯 +{ O +à ´ +Ġhon our +ä¸İ 第ä¸Ģ +ãĢģ æ³ķè§Ħ +w ic +Ġb loom +ĠTra ffic +[ d +ex pl +ĠVit amin +Ġfab rics +P ipeline +ç»ıèIJ¥ èĮĥåĽ´ +Ġpres ervation +ĠEv olution +æ¦ ´ +T OP +åŁºéĩij管çIJĨ 人 +ä¸Ģ å°ı +ĠWell s +ï¼Į ä¹Ŀ +Ġload er +Ġm all +Ġfulf illing +èĥ½ 为 +Ġdos age +ï¼Į è´Łè´£ +ãĢĤ ä¹Łè®¸ +ĠFin n +ro ts +op edia +ï¼Ī ä¸ī +ç½ij æł¼ +b uilder +é¢Ŀ 度 +ĠC el +èµ° æĿ¥ +Ġrout ing +Ġfinger print +éĴ ŀ +Prof essional +Ġind igenous +è°ĥ çIJĨ +ĠR N +ĠVol unte +è½» å¾® +.d ir +, & +ĠX CT +ĠSh ip +说 çļĦæĺ¯ +æĻ ı +ĠAb u +Ġ èĩ³äºİ +. el +* } +éĵĿ åIJĪéĩij +æįķ æįī +Ġbrid ges +ï¼Į å¿Ļ +ĠC rypto +线 åľĪ +Ġsubject ive +Comp lex +Ġdef ic +Ġinstruct ed +æĺ¯ ä¸įä¼ļ +ãĥ ķ +_time out +ant o +Ġsynchron ized +ãĢģ 产åĵģ +Christ ian +RO P +Ġhorm ones +J K +Ġ$ - +é»ij çļĦ +ĠG ear +th anks +Ġx y +ç»Ħ è£ħ +ĠJul ie +ĠN P +OBILE PHONE +è® ³ +URRE NT +ind ers +ĠS plit +!!!! !!!! +èªī 为 +人 人 +}\ , +_ select +åĪĿå§ĭ åĮĸ +.g z +: ", +Comp letion +Ġsk irt +Ġe in +Ġexc el +计åħ¥ å½ĵæľŁæįŁçĽĬ +ĠS r +Ġsurviv ors +å°± ä¸įæĺ¯ +Ġnow adays +Ġpos es +âĢĺ s +ĠEduc ational +带 èµ° +ä¿Ŀ å®Ī +Ġst aining +tras ound +Ġrecycl ed +. entity +ĠL ots +Ġcomp ilation +att a +ĠB anks +Ġsens ory +Ġhard est +Ġprison ers +ï¼Į说 ä¸įå®ļ +Ġs age +ĠN ad +ï¼Į çĶŁæ´» +æĸ IJ +ç¨į ç¨į +Ġshut down +ag ged +_C R +UBL IC +S UB +co h +ĠVeter ans +Ġlic ence +åŃĻ åŃIJ +Ġshort s +è· ĭ +Ġlip id +, ä½ľä¸º +D X +ĠDog s +ï¼ĮæĪij åľ¨ +_c lose +èĹ © +çĽijäºĭ ä¼ļ +Ġun law +- header +Ġcomplex es +è¡Ĺ 头 +ä»Ĭ å¹´çļĦ +" ][" +ĠColumb us +Ġex otic +æĭįäºĨ æĭį +_point s +ĠBrazil ian +çļĦåľ° ä½į +å°Ķ 夫 +ĠM akes +Ġc yst +Ġend if +pi pe +Ġarth ritis +åĽŀ éģ¿ +æĪ¿ éŨ +ĠTH AT +_P RE +ĠSl ot +- so +S ources +Ġac ne +( + +Ġ第 ä¸ĥ +Ġcris p +ï¼Į åŁºæľ¬ +æĺĬ 天 +ï¼Į è¡Ģ +ç¾½ æ¯Ľ +åĨ· æ¼ł +èµ· æŃ¥ +Ġcrow ds +çijŀ 士 +ĠBl ues +og onal +ãĢģ çĶŁ +Res p +åIJ¯ åıij +Ġreserv oir +åι 车 +ĠJere my +ç®Ģ åİĨ +èĮ µ +( img +se ed +_n ull +C riteria +Ġle ap +ull ivan +ĠP repare +Z oom +çļĦ说 æ³ķ +c op +Ġd ioxide +per t +æĬĺ èħ¾ +Ġhead lines +声 åĵį +ĠHon or +.d art +å°± å¾Ī +å½¢å¼ı çļĦ +æĦıä¹ī çļĦ +äºĨ æĪijçļĦ +ss on +Ġsound ed +.in ternal +per iod +ï¼Į åĪļåĪļ +G UI +P en +çģ ¸ +( O +Ġfix es +ãĢĤ å¸ĮæľĽ +åħļå§Ķ 书记 +两 岸 +ï¼Į è¾ĥ +æĻ¯ èī² +ä¹ĭ æīĢ +Ġobserv ing +ĠBM W +; < +ĠHe ader +DM I +ft est +å°Ĩ ç»§ç»Ń +è½® å»ĵ +Ġsuper visor +Th omas +f etch +è¡Į æĺŁ +å°±æĺ¯ è¦ģ +Ass ign +Ġw oke +Ġadministr ators +Ġtrans ient +ï¼Į 羣æŃ£ +åĢŁ éī´ +Ġco zy +åĪ» æĦı +æ¸ º +Ġal phabet +ig uous +ĠPort al +Ġcr ushed +ĠGro ve +ĠFred er +at omy +? ' +mit ter +.H ttp +Ġ( ** +Ġj ealous +ĠC ities +ï¼Į转 身 +èĬ · +ĠK in +ä¹Ł å¾Ĺ +Ġrandom ized +ĠJam ie +鼷 è¾¾ +ï¿ ¥ +ĠØ ¨ +çĮ ¿ +t bl +åĵģ å°Ŀ +éŨ æ§Ľ +BO X +W all +æĽ¾ åľ¨ +p ine +Ġa in +ry s +åĽĽ 人 +åĨµ ä¸Ķ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠ +ĠU TC +\ alpha +Ġrecess ion +åł ķ +ffff ff +å½Ĵ å±ŀäºİ +åħ¨ å¹´ +ang led +éĿł çĿĢ +ra ced +ĠP ra +çī¢ åĽº +ĠUk rainian +Bet ween +ï¼Į å·´ +è¿ĺ ä¸įæĺ¯ +Ġspokes man +åŁİ åł¡ +è¯Ĺ è¯į +ĠAn swers +ĠJeff erson +Ġre construct +æ¢ § +D estroy +éĢĥ è·ij +Ġdiff ers +Al gorithm +h ouses +Ġe Bay +ĠBase ball +æīĢåľ¨ çļĦ +- conf +an vas +_T AG +ur f +å½ĵ åľ°çļĦ +Ġcharacter ization +ĠÏ ī +Ġr ises +Ġsh red +Pro blem +èĢģ 太太 +ĠP ull +ï¼Įéĥ½ ä¼ļ +- space +å¿« çĤ¹ +çļĦ æŃ£ +m L +d ie +ĠN um +客 è¿IJ +ĠCo hen +åİĨ ç»ı +Ġst yl +an ese +View Model +ĠO pp +失 踪 +åĴĮ ä»ĸçļĦ +ĠCom ing +ĠCoord inator +_ ret +è¿ĺæĺ¯ æľī +æķĻ çłĶ +ut ation +Ġch ase +ä¿¡æģ¯ æĬĢæľ¯ +Ġcongr at +st en +ĠC ourses +Ġfra ctions +å·² çĦ¶ +ãĢ ī +br and +Ġ} } +éĿĴ å±± +olly wood +æīĢ æľª +ik er +æķ° 缮 +ï¼ļ æľ¬ +odd ed +Ġsuscept ible +ĠHill ary +_CL K +par able +ĠN HL +ĠInd igenous +æİ¨ è¡Į +ĠCh ile +çĽĴ åŃIJ +ĠVin cent +æ»ļ åĬ¨ +ĠZ en +- get +ĠVAL UES +ï¼Įä¸į å°ij +P rior +ĠHel en +è¿Ŀæ³ķ è¡Į为 +çļĦ æľĢ大 +ĠEffect ive +ãĢģ åĮĹ京 +( @" +g c +ãĢĤ æĮīçħ§ +æĤ² åī§ +æŃ¦ åĬŁ +Ġpump kin +op us +Ġexpress ly +ä»Ģä¹Ī éĥ½ +Ġc t +ï¼Į åħ¥ +M X +-C o +Ġno on +XX X +_g raph +W ood +Ġco il +ï¼Į ä¸Ĭè¿° +ush i +urt le +Ġatt rs +æģ¢å¤į äºĨ +_IR Q +BA SE +深深 çļĦ +ĠDou g +role um +æīĵ è´¥ +che str +èĨ Ľ +Ġrad ial +Ġsp ends +ĠDemocr at +ud get +Ġ} \ +Ġc ables +.res olve +b uff +çĿ£ ä¿ĥ +æĤ¬ æµ® +è§Ħ 竳 +read s +cond itions +see ing +幸ç¦ı çļĦ +plan ation +-d oc +) > +C am +Ġexhaust ed +æ¸ħ æĻ¨ +ĠMount ains +ar an +ãĢĤ 以ä¸ĭ +çĥŃ è¡Ģ +ë Ĭ +Ġre use +Ġim pe +.F rom +rolog y +Ġlast s +, e +é ł +.t ar +ĠHol land +p ng +è re +- o +Ġhe els +ĠIll ust +han ie +ĠB oost +By Name +H a +æľ¯ åIJİ +Ġplan ets +Separ ator +å¼Ģå±ķ äºĨ +åŁĥ åıĬ +/ web +_t otal +s ingle +Ġd ementia +_ ACC +代 çļĦ +宪 æ³ķ +ĠN ap +ï¼Į åı¦ä¸Ģ +å½ĵ ä½ł +èĢħ åĴĮ +.prot obuf +ĠA mazing +ĠPl astic +_S HA += n +å±ĭ é¡¶ +ĠT F +âĶĢâĶĢ âĶĢâĶĢ +Ġ èĭ¥ +管çIJĨ åĪ¶åº¦ +Ġun fortunate +Ġre build +ĠPol l +Ġ åħ« +rack ing +list ed +éĻ¢ åŃIJ +ï¼ļ " +åIJĮ ç±» +Ġ éĿŀ +Ġcreat ors +Ġ ------------------------------------------------ +Ġsign atures +and el +Ġv apor +Ġfulf illed +æľī 大 +ĠProf ession +ĠP ush +åΰåºķ æĺ¯ +ï¼Į æĿĢ +Ġyoung est +. offset +Y ES +æĴ¤ éĶĢ +Ġd well +ĠW AY +Ġ_ . +åĮ» æĬ¤ +ak u +ĠB ike +Ġaccum ulated +Ġsk ew +çĶ· åıĭ +Ġconsequ ently +Ġdown s +bo ys +_c ap +P romise +l ia +ĠLook s +Ġinfect ious +ï¼ĮèĢĮ åIJİ +Ġadequ ately +æĪij 对 +[ a +ï¼Į ç»Ī +ject ed +Ġaim ing +ON D +. Image +ĠD ual +æĹ¥ ç͵ +< I +i agnostics +èĢģ èĻİ +ĠHarr ison +- inter +åľ¨ 天 +ĠSh akespeare +ood le +ä¸Ģ åı° +éĹ® éĹ® +çıŃ åŃIJ +æĺİ äºĨ +B oot +ï¼Ī åĮħæĭ¬ +Ġcraft ed +( code +ĠRog ers +è¯ģåΏ æĬķèµĦåŁºéĩij +Ġgam ers +ä¸į èĥľ +æIJľç´¢ å¼ķæĵİ +Ġv ou +ï¼Įä½Ĩ è¿Ļ +ĠF ee +ï¼Į éĢģ +æļij åģĩ +G I +Ġcapt uring +cel and +Ġz eros +ĠW u +.A tt +, M +å«Įçĸij 人 +Ġseam less +æĸ° 浪 +å¹³åı° çļĦ +æīŃ æĽ² +ĠL ights +Re place +K ernel +" ( +ĠRes pond +ĠTH EN +an ie +åĬ¡ å¿ħ +åIJį 人 +Ġ äºİ +Ġbe verage +丨 丨 +Ġgener ators +çļĦ åIJĹ +Ġ ], +ï¼Įæ¯ı å¹´ +åĮĸ åĴĮ +èµ° è·¯ +b old +Ġcoll ar +ĠC RC +æ²» å®ī +ä¹Ł æ¯Ķè¾ĥ +ç§° åĶ¿ +Ġfl ies +ĠNEW S +ĠFor ces +ĠAlber ta +ĠK il +as se +ï¼Į æļĹ +. location +T OR +æĹ¶ åĪĨ +ï¼Į æŁ³ +Ġsuper market +, åIJij +Ġ ä½ľèĢħ +Ġcoll agen +_ QU +å¾Ī é«ĺçļĦ +_ App +ï¼Įä¸Ģ çĤ¹ +_n odes +Ġlad der +åĩĨå¤ĩ 好 +Ġrun ners +æŀ ī +g m +Ġprint able +ĠV S +column s +d h +ãĢĤ èĩ³ +Tr ump +Ġdr one +åĨ³å®ļ äºĨ +Ġcoal ition +Ġspecial izes +ä¸įä»ħ æĺ¯ +Ġhistor ically +æ³ķ åºŃ +Ġd are +uss ia +åĻ Ĺ +ï¼Į åģļ好 +iv ation +! . +çĿĢ ä½ł +ed i +çļĦ é¢ľèī² +æ¸ ² +Ġmanif old +. Empty +ĠK az +Ġrecogn izes +æĪ· ç±į +ĠN FT +_g rad +om ics +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġpor ch +Ġsp o +Ġan ime +Ġm ism +Ġesc aped +Ġevolution ary +key word +Ġdep icted +_ items +èĬĿ 麻 +Ġt t +Ġvisual ization +ĠT ot +饱 满 +ud ed +о Ñģ +强 è¿« +ograph ics +ĠSERV ICES +Ġthy roid +osp ace +Ġun cle +P ast +ç§ĭ åŃ£ +Ġf ade +ï¼Į éĩįæĸ° +åĩºçĶŁ äºİ +ãĢĤ è¿Ļæł·çļĦ +ĠAm sterdam +Ġexp ired +ĠHar bor +为主 é¢ĺ +éŁ ¶ +se ys +ãĢģ æĻºèĥ½ +.object s +br as +ĠProgram ming +_ emb +_t imer +ĠRes olution +ï¼ ķ +ä¹ĭ æ°Ķ +Further more +åľ¨ åĽ½åĨħ +Ġcher ry +Ġspect ro +Ġsand wich +-m e +_ weight +Ġ åĽłæŃ¤ +æĪIJ éķ¿çļĦ +ĠWhat sApp +æµ´ 室 +m ith +æĦŁ æĤŁ +éĹŃ ä¸Ĭ +å¼¥ 漫 +Ġup ward +S ys +ĠS id +ĠW IN +å¿ĥ åĬ¨ +, ç¾İåĽ½ +Ġa ids +åħ° å·ŀ +ï¼Į åħ« +Ġc i +Ġnot ices +æķ´ æķ´ +ĠWild life +Ġsc aled +çĨ Ħ +ä¹Ł éľĢè¦ģ +L AY +Ġwith d +åĸľ åī§ +Ġeven ly +主è¦ģ æľī +Ġright eous +_d escription +gom ery +æĹ¥å¸¸ çĶŁæ´» +ĠP ORT +(" - +Ut ility +以ä¸ĭ çļĦ +, ä»ĸçļĦ +MS G +Ġw iring +ĠAd visor +Ġrespond ent +[... ] +æī§ ä¸ļ +_ only +Ġg riev +` : +_ o +Ġw ip +arg v +lo id +å°ij éĩı +(p age +Any way +å© ¶ +Class Name +çͳ è´Ń +ĠZ end +, èĥ½å¤Ł +åĽ¢ çļĦ +ĠExcell ence +Ġtra ces +Trans lation +ĠS erv +Ġ 主 +ä¸įä»ħä»ħ æĺ¯ +Ġprob abilities +ĠDoc uments +Ġre he +ĠSh ah +çī¹ çļĦ +空 çļĦ +éĿĻ ç͵ +说ä¸į å®ļ +ãĢĤ 第ä¸ī +åĪĨ å±Ģ +Ġd uck +am med +_S W +æĭ¿ åĩºæĿ¥ +åħĶ åŃIJ +鼶 é£Ł +éĢī æĭĶ +ĠJ ar +åİ» åIJ§ +ĠPet e +Ġrabb it +Ġcomm ence +好 åIJĥ +Ġm asters +e er +æ¦ » +ival ent +ĠPur pose +Ġbright ness +ĠLeg end +Ġt i +çıį è´µ +Ġf ost +m ia +éĿ ¡ +_ ctrl +(d ir +ĠSustain able +æĪĺæĸĹ åĬĽ +è¿Ł çĸij +lu a +ï¼Ī æĪĸ +ï¼Į æĺİ天 +票 æĪ¿ +åĽŀ åIJĪ +âĢĶâĢĶ âĢĿ +ĠM ak +ĠN umbers +bre aking +Ġextract s +绽 æĶ¾ +ç«Ļ çĤ¹ +> { +Ġwater proof +éĻ· éĺ± +is se +.L inq +. pl +.... ... +." ? +é¢Ħ åħĪ +AB OUT +表éĿ¢ ä¸Ĭ +KN OWN +æĹł ç¼Ŀ +çļĦ éĹ® +Ġpol ls +ĠDef endants +ãĢĤ 说 +employ ed +Ġ æ¥ļ +âĢľ There +åĽº çĦ¶ +ï¼ļ å°Ĩ +ur ry +Ġten ants +pos ure +.R ef +Ġcoord inator +B s +ï¼ļ âĢĺ +Ġbra ins +( app +( sp +sw ith +\ mathcal +åľ¨ å®¶éĩĮ +å½ĵ åģļ +G al +SD K +Ġign oring +Tr ade +é£İ æĥħ +)* - +æµģ 失 +Ġ ä¹Ŀ +å¤ĸ 表 +ĠPhill ips +ï¼Į 第ä¸ī +ĠK yle +æľĽ åİ» +ien ced +Ġcor rections +Ġimp aired +b uilt +_p riv +Ġer red +Ġdecor ative +å½° æĺ¾ +enez uel +ĠT ig +ĠCh ip +( min +Å « +è̽ 误 +/ issues +Ġtou g +ĠM oses +ab is +åIJĦ æĸ¹ +ĠE VER +% åĴĮ +ä»· éĴ± +Ġreimb urse +.f ield +Ġfuck ing +诡 å¼Ĥ +( reg +ij n +ĠIm agine +ĠWh ole +å¦Ī çļĦ +Ġhol istic +æĹ¶ 常 +F OR +ud s +说 çļĦè¯Ŀ +ç´¯ 积 +åĪĽä¸ļ æĿ¿ +ãĢģ æľ¬ +ĠS lo +.d o +ĠH oney +Ġfu els +ï¼ĮåıĪ æĺ¯ +帮 æī¶ +ä¸Ģ å¤ľ +G s +Ġcorrel ations +ĠF P +l int +éĽĨä¸Ń åľ¨ +Com parison +ä¸į å¿ĺ +çļĦ æĥħå½¢ +_ pr +é«ĺè´¨éĩı åıijå±ķ +ve h +ç¬ijçĿĢ è¯´éģĵ +é£İ åı£ +åij¨ äºĶ +Ġpron ounced +éĢį éģ¥ +ï¼Į 约 +ĠH ook +æ´¾ çļĦ +åı¯ è¡Į +Ex cellent +Jo ined +Ġmer its +Ġcom ics +人æ°ij åĮ»éĻ¢ +éĿ ¶ +ĠHe y +ï¼Į å·¦ +ĠEx cellent +Ġph y +Ġequ ip +ĠInj ury +Ġhyg iene +E VER +å®ŀåľ¨æĺ¯ 太 +( View +ï¼Į ä¸ĥ +Int o +ĠNC AA +Ġ ä½ķ +Un iform +ĠReg ardless +çĶŁ èĤĸ +.pro cess +ç į +ï¼Į è¿ľ +Ġse lections +ç¼ Ķ +Ġcompet itions +ĠD H +gu ard +ĠBal ance +æľº ä½ĵ +è¯Ŀ è¯Ń +Ġm t +@ Test +çļĦ ç½ij绾 +æ¯Ķ éĩį +Ġassoci ates +éĢŁ çİĩ +Ġper me +Ġpse ud +se ctions +m r +D H +; // +ew idth +Ġw ag +å¤ĸ æ±ĩ +idel ity +èĭ± è¶ħ +ĠM ol +Ġu pl +åľ¨ æ°´ +æĬĵ 好 +Ġharass ment +k ov +åºŁ å¼ĥ +Ġuns ure +ĠBang ladesh +女 åıĭ +åłª ç§° +Ġlast ed +ç¼ĸ åī§ +ĠR ender +H ide +é£Łåĵģ å®īåħ¨ +Ġsupp ression +Ġ 两人 +rist ol +ĠM AN +æĸ¹ å½¢ +[ r +ph alt +å¹² é¢Ħ +åĥµ å°¸ +åĹ ħ +text tt +( it +设置 为 +éĺµ éĺµ +åıijå¸ĥ ä¼ļ +æ¦Ĥ æĭ¬ +f rames +æľį ä»İ +ATE G +- container +Ġm aternal +带 ç»Ļ +触 æİ§ +èĬ Ń +S pecific +Ġout reach +Ġclin ics +èķ Ĭ +\ hline +åıĪ ä¸įæĺ¯ +æĬ¥ éĶĻ +çºł ç¼ł +Ġoptim istic +ĠL anka +Ġsens ation +ĠAppend ix +_s l +ss h +Process ing +Ġsub group +ĠR ug +Ġthere after +å´ĩ æĭľ +åıĬ 以ä¸Ĭ +ï¼Į æĢ¥ +ĠUn ique +äºĶ è¡Į +Ġcolon ial +以 åħ¶ +w ave +ãĢģ ä¸ĵä¸ļ +Ġ( < +Ġimp ress +Ġoff shore +çļĦ å®¶ +E ar +ĠFl ore +åIJIJ æ§½ +æľĪ ä¸Ń +ĠPl ugin +èĥ¡ åŃIJ +è¶ħè¿ĩ äºĨ +Vert ical +çļĦä¸Ģ 天 +sh op +åIJ¸å¼ķ äºĨ +Ġto e +t re +im i +Ġb acon +åľ¨ æĪijçļĦ +, å°±ä¼ļ +åĻ © +æĸ½ åĬł +ĠP OS +og ens +åĮħ 容 +T Y +ï¼ļ å¦Ĥæŀľ +Ġ ä¸ĭ +Ġde position +Ġex ert +Ġspec imens +Sl ice +ĠRed dit +el ist +æľī ä½ķ +æİ© 饰 +Ġc nt +Ġwa iver +Ġup side +оРº +Ġmerch ant +åıĺ å¼Ĥ +v ity +å°± æŃ¤ +s z +çĿĢ å°ı +c ounter +ä¸į åģļ +ÏĦ η +èµ· è¯ī +/ upload +ä¸į 论 +Ġ åĽ½ +.b utton +çļĦ è·¯ +Ġob ey +Ġd ash +æľŁæľ« ä½Ļé¢Ŀ +ç´ł æĿIJ +å®īå¾½ çľģ +_M AP +Ġch ina +ament e +åı¯éĿł æĢ§ +ï¼Įè¿Ļ ä¹Ł +ï¼Į åħ³ +åį« è§Ĩ +- click +_st at ++ y +å£ ¹ +(" < +.n o +u ably +åıĺå¾Ĺ æĽ´åĬł +ĠLe ather +åı· çļĦ +let al +ãĢĤ äºĭå®ŀä¸Ĭ +X L +åįĬ å¤ľ +x FFFFFFFF +ä¸ĸ å®¶ +.sh ould +Mill is +W riting +$ lang +Ġhonest y +çīµ å¼ķ +ĠAssoci ated +ĠAdv oc +ogram s +b u +Ġint ric +ĠG raphics +ĠN g +çļĦ çĶŁåij½ +ĠMc Donald +Ge orge +éĩĮ ç¨ĭ +ä»ĵ åĤ¨ +-pro cess +使 人 +ãĢĤ åIJĮ +ĠM ist +Ġnation ally +synt hesize +ç¬Ķ è¯ķ +ï¼ĮæľĢ è¿ij +Ġdoc s +ass ium +Ġmar ble +çIJĥ çļĦ +\ . +Ġph p +Ġ 页 +çĴ IJ +Iss ue +Ġ} ); +é»ij è¡£ +< typename +< uint +Sen ior +ç§ ĥ +éĴ Ľ +annot ations +åī¯ ä½ľç͍ +太 åİŁ +Ġk ills +flow er +ex us +_C OM +ãģ Ī +ï¼Į æİ¥ä¸ĭæĿ¥ +è¿· èĮ« +ï¼Į ç«Ļåľ¨ +æİĴ æ°Ķ +Ġstart ups +urre ction +ĠEx port +ruit ment +Ú © +Ġs ie +å¥Ķ è·ij +éĤ£ å°± +_P ACK +\< ^ +, q +Ġpo inters +ip ation +Ġover ly +åIJij éĩı +ru ck +Ġdirect ories +ĠT race +Com mercial +Ġpresent ly +Ġ æĮĩ +ĠL V +, L +A ctor +Check ed +m f +Ġn odded +亲 è¿ij +Ġinvestig ators +w ire +AD C +ãĢģ å¸Ĥåľº +ĠN W +Ġtight ly +åΰ ä»ĸ +è¿ŀæİ¥ çļĦ +um ption +ãĢĤ æĹłè®º +楼 çļĦ +Ġsun set +å±ħ å®¶ +ĠF X +èĮ Ĺ +C annot +=" ${ +ãĤ · +èĤ ĩ +Ġtransform ing +Cal ifornia +Ġrepro duction +ĠW es +åĩº å¤Ħ +åľ¨ å¿ĥéĩĮ +ĠGar age +SE S +Ġcom o +Âł 她 +çĸ ļ +Ġh ilar +-w ith +Ġf ict +Ġtransform ations +Ġmal icious +Ġh ay +Ġan kle +Ġval uation +âĢ Ĥ +S HA +.S upp +Ġhe ights +Rel ations +ï¼Ľ å¦Ĥæŀľ +_b uild +ä¿Ŀè¯ģ éĩij +Ġenthusi asts +p it +Ġtr illion +ĠT ik +ä»į æĺ¯ +举 èµ· += x +ĠB ir +Ġre novation +Ġr ubb +Ġb end +Ġarg c +ĠH ood +_s ys +iss ing +Ġinitial ization +好 åIJĹ +纵 åIJij +Ġworry ing +Ġ erg +èį « +è§ ħ +åĸĥ åĸĥ +Ġenc ounters +ï¼Į使 åħ¶ +å°Ĩ 她 +W ashington +åĨĻ åŃĹ +(' -- +们 åľ¨ +Track er +æľº ç͵ +Ġin accur +Ġv ivid +ĠW ag +Tra ining +ï¼Į åħį +åİŁæĸĩ åľ°åĿĢ +Ġsec uring +æĪIJ 人 +端 éĥ¨ +æ·· æ²Į +Ġe ighth +.j ackson +åľ¨ ä¸ĢäºĽ +硬 åĮĸ +E IN +"> & +åľŁ è̳ +Ġalgebra ic +æıIJ åıĬ +V ERT +UE ST +Ġcarb ohyd +ãĢģ ç¾İ +ĠNeed s +æķ° æİ§ +box es +æ²ī 浸 +ĠImport ant +ĠEgypt ian +ĠTrad itional +Ġr ushed +Ġst a +.p age +èĢĥ çĤ¹ +h or +(e lement +Ġapopt osis +Ġconsult ants +P an +ĠTyp ically +æĶ¶ 纳 +ĠD L +ĠE thereum +ï¼Į éĥ¨åĪĨ +æŃ¤ å¤Ħ +_T ABLE +Ġdeput y +Ġdr illing +Ġc ss +ä¸ŃåĽ½ 人æ°ij +ĠF uel +Ġv est +ath s +èĤł éģĵ +T rend +Ġsp acing +B in +幸 好 +Ġinc urred +çĹħ åĽł +Sp ell +head s +ä¿Ŀ ç½Ĺ +ä¸įåIJĮ äºİ +âĢľ A +åĹ ¦ +N d +Ġqu asi +Ġpl ag +缸 ç»§ +udd y +ĠL ayer +Ġfl oral +ä¸Ģ æľŁ +ãģ£ ãģŁ +ĠTes la +ter ms +News letter +çīĽ ä»Ķ +Ġreform s +Ġst ance +ĠLead ers +CE O +ĠCon n +çļĩ 马 +UT TON +åĪĨ å·¥ +Ġdec oder +åĿ ¯ +æĻ Į +Ġb ins +Ġunder going +纵 横 +æľī人 说 +iling ual +ĠTour ism +ï¼Į åĸĿ +åľ¨ èĩªå·±çļĦ +使 åħ¶ +Ġest e +< float +Ġad missions +Hot el +Ġtra ff +ĠFin land +Ġge ographic +Ġs f +ĠG onz +沿 æµ· +ĠT A +Ġwe ed +imp lementation +Ġbr ass +çļĦ ç»ıæµİ +å¼Ģ èĬ± +Ġinit iate +yl um +- em +Ġmet res +Ġrese ar +Ġlegisl ature +ĠD aw +æĪ ® +èĬ ľ +å¦ ŀ +ä¸Ģ åĪĢ +, å·²ç»ı +cent ering +主èIJ¥ ä¸ļåĬ¡ +ï¼ĮæŃ£ 好 +ASS WORD +Ġ åIJ¦ +! ). +################################ ################################ +Ġsubst itution +æĶ¹éĿ© å¼ĢæĶ¾ +Ġtast ing +, èĭ¥ +_d at +à¸ Ń +Ġsh adows +ï¼Įæľī çĿĢ +ĠSk ip +ant ics +_F E +è¿ĺæľī ä»Ģä¹Ī +Ġlocal ization +æĢ ł +Ġdecor ations +ĠL an +c ott +æµ· åħ³ +Ġmob il +çļĦ åİĭåĬĽ +" What +ĠOcc up +人 ä¸İ +r oute +çŃī 缸åħ³ +Ġmir rors +åıĮ åIJij +æī Ĵ +Ġworks pace +ĠElect rical +èģ Ĩ +è¾ŀ èģĮ +Ġb iod +èijĹ ä½ľ +诱 æĥij +Ġclass Name +Ġsens ing +ĉĉĉĉ ĉĉĉĉĉĉ +èĮĥ çķ´ +F re +Ġcollect or +_res ource +Ġn ort +ĠL odge +ä½İ è°ĥ +Ġinadequ ate +ĠV a +åı¯ä»¥ çľĭåĩº +ĠT itan +çļĦ ç͍ +ĠIran ian +{item ize +Ġmist aken +ĠSh a +ĠJul ia +åIJĥ è¿ĩ +-se ason +. ad +ĠK on +_c v +Ġaccident ally +éķ¿ è¾¾ +æĻļ äºĨ +Ġgive away +Ġweakness es +æ¼Ķ ç»ĥ +You ng +Ġ è°ģ +å¯ ¥ +.R un +æī¬ å·ŀ +Ġfree zing +Ġrel ieve +Ph ys +Ġaver aged +J oe +get Name +Ġcopy ing +Ġcommit ments +Oper ations +æĥ³ åĬŀæ³ķ +çζ åŃIJ +m ons +ä¸ĥ å¹´ +Cl aim +\ le +n as +ĠK l +æĺ¥ é£İ +åľŁè̳ åħ¶ +Ġnut rient +Ġt c +æłı 缮 +Ġcitizens hip +ä»ĭç»į ä¸Ģä¸ĭ +Ġcapt ures +ãĢĤ 马 +ä¸Ģ缴 æĺ¯ +åħĭ çļĦ +N H +Ġt ier +çīĪ çļĦ +com ments +åĬ³åĬ¨ èĢħ +ĠR X +ãĥ³ ãĥ +. Config +ORM AL +Ġreput able +ä¸į èĪĴæľį +& P +ï¼Įä¹Ł è¦ģ +ï¼Į åĵªéĩĮ +ĠNot ification +_s ample +ĠY outube +åIJĦ çķĮ +ere k +cre ens +æ²Ļåıij ä¸Ĭ +MB OL +.st op +èĭ Ľ +æĥħ æĦ¿ +两 è¾¹ +å¸Ĥ æĶ¿ +% å·¦åı³ +åıijå±ķ åĴĮ +Ġresid ue +-b ar +è¯ķ çĿĢ +Ġ çľĭ +est ock +log s +aterial s +ĠMont e +è¯ķ æİ¢ +åıĺåİĭ åύ +è¿ĺ 款 +è¶Ĭ æĺ¯ +ĠDec ision +éĿŀ åĩ¡ +ung en +, åħĪ +Ġinv ented +Ġstick ing +Ġenjoy ment +ĠEd inburgh +ar us +以åīį çļĦ +ĠR uth +Ġdoub ts +ĠP ointer +Ġaff idav +åIJĪæ³ķ æĿĥçĽĬ +c ost +Ġinterf ere +.d ll +ĠPark ing +s ch +ver ages +ĠF ourier +çļĩ 宫 +éĵ ® +udd en +æĪij å¾Ī +ĠSPE CIAL +éĺ´ è°ĭ +t ube +Ġelim ination +é» ¯ +å¤į æ´» +Ġtutorial s +Ġsl a +åħ³æ³¨ çļĦ +çĶŁ äºİ +Ġsoc ially +] " +ĠM ine +.f ramework +å¥ĸ 项 +Rec ogn +Ġagg rav +çϾåĪĨ çĤ¹ +ä¸İ ä½ł +IT LE +ĠLi u +ĠTe levision +ĠF F +B ag +, ä»Ĭå¹´ +ãĢģ åĮ»çĸĹ +åIJij æĪij +Ġpe ek +ribut or +çĻ» ä¸Ĭ +ĠP RE +ĠF oster +ros ion +, åĥı +Ġexp ans +ĠSu z +åħĭ æĭī +å¾· çļĦ +Ġbath s +åĺ Ģ +å¾Ĺ æĽ´ +Ġres ide +å¾Ī éķ¿ +( query +Can ada +und ers +'] [$ +å«ī å¦Ĵ +ĠAd apt +ĠN umer +z t +Ġp ent +衬 è¡« +鼷 éľĨ +Ġtestim on +Ġfram eworks +Americ a +P B +ĠM EM +Ġcontact ing +STR ING +. args +ãĢ Ī +åĥ ļ +ä¼ļ å½±åĵį +ĠP V +ï¼Į以 åīį +IV ER +Ġst ove +$ / +ä¹Ł åıªæĺ¯ +æľĢåIJİ ä¸Ģ个 +Ġspin ning +èĥĮæĻ¯ ä¸ĭ +Effect s +g p +楼 çĽĺ +åĨľ åľº +Ġfix tures +第 åįģ +ï¼Į æµģ +æ®ĭ çķĻ +_ record +ĠT OP +ĠOpport unities +ï¼Į åĢĴæĺ¯ +Advert ise +h all +èϽ 说 +èĻ ı +æľĢ åħ· +è¢ ľ +åºŁ çī© +类似 äºİ +process or +åĪ« 说 +Ġnan op +/ images +M IC +Ġil leg +ãĢĤ ç»ĵæŀľ +. Location +/m ain +More over +å® ¸ +ant om +没æľī åĬŀæ³ķ +ä¸Ģ æĺ¯ +Ġpay roll +Ġdu o +Ġreal ised +åºķ å±Ĥ +. Response +R B +ä¹± çļĦ +ä¼ģä¸ļ å®¶ +ĠIn cludes +çłĶç©¶ åijĺ +åºĶ åĬĽ +ç«ŀäºī 对æīĭ +ĠRe ed +ĠCor ner +å®ī å¨ľ +ank ing +Ġcont amination +æľ¬ èµĽåŃ£ +(: , +âĢ » +Ġcompr ise +ĠTH C +Ġ åŃĻ +Ġafore mentioned +ĠArab ic +Ġclust ering +ram id +ĠP IN +( Context +åľ¨ åħ¨åĽ½ +() ` +Ġb c +宽 æĿ¾ +ĠPos itive +ĠR GB +çα ä¸Ĭ +Ġelectro de +ĠSc an +Conf irm +æĺł å°Ħ +çĿĢ çľ¼ +è¦ģ ç͍ +Ġc akes +ĠSaf ari +éĶ ļ +Ġv ide +çļĦ éĢļçŁ¥ +Ġ 游æĪı +Ġintrig uing +ĠSc anner +Ġemploy ing +æĦī æĤ¦ +Ġamb ient +po ons +çĸĹ æķĪ +æ¯ķä¸ļ åIJİ +æİ · +ĠRe leases +æĮī éĶ® +ul ously +un ny +_b ar +éľ² åĩºä¸Ģ +/b lob +N ick +) & +f w +Ġfest ivals +i om +Cal cul +éĹ®é¢ĺ æĺ¯ +åŃĹ ä½ĵ +ï¼Į åħ¬ +Ġun stable +ĠB ool +Ñ Ħ +Rel ative +ack et +ret ch +ć ą +S olution +Ġpaper work +åħļ ç»Ħ +æĻļ é¥Ń +æĸ° åŁİ +å¼§ å½¢ +C apture +夹 æĮģ +Ġe urope +ĠL ed +need ed +ï¼Į ä¼¼ +ĠAl bum +ï¼Į åħ³äºİ +ĠMy SQL +ĠW ide +.C olor +严 å³» +å¹´ 以æĿ¥ +ĠDes k +ampl er +ä¹ĭ ä½Ļ +çķ ¸ +_param eters +ĠM ent +ĠOff set +Ġsub section +, çİĭ +ä¸į 便 +.b lock +çŀ ħ +erg arten +Ġf lick +âĢľ è¿Ļ +ob o +Ġto ggle +Ġcollect ively +ä»Ģä¹Ī åij¢ +躲 éģ¿ +ï¼ Ļ +et al +state ment +渲 æŁĵ +ï¼Įä¹Ł ä¸įæĺ¯ +ç§ijåѦ çļĦ +Ġteen agers +v v +Ġprosecut or +Ġb ru +æĥħ ä¾£ +ä»ĺ è´¹ +å¥Ķ é©° +é£ Ļ +Ġglimp se +.print StackTrace +âĤ¬ TM +Ġce ase +åIJĦ æľī +ï¼Į å¾· +ĠHyp othesis +ï¼Į çķĻ +Ġmill enn +Ġincub ated +ew s +Ġacc redited +许å¤ļ 人 +ä¸ĩ çļĦ +çŃī åĽłç´ł +': ' +{ z +Ġrent als +F ar +åΰ ä¸Ģ个 +qu ee +S prite +_s amples +åįĥ ç±³ +æľĪ åħī +è·µ è¡Į +éĥ¨åĪĨ çļĦ +è¿Ľ åıĸ +Ġrespond s +ear chers +çļĦ 身ä¸Ĭ +çļĦ大 åŀĭ +Draw able +Ġtop ological +äºĮ çϾ +ç§ĭ 天 +类似 çļĦ +ener ated +ĠMin imum +-f ounder +女 çİĭ +ä¸Ģ äºĭ +-m d +æīĢåľ¨ åľ° +åĿ Ĥ +Ġadm its +ãĢĤåħ¶ 次 +ĠSh anghai +ï¼Įä½Ĩæĺ¯ åľ¨ +ĠC rack +. row +J C +Ġen light +ä½Ĩ ä¸į +ĠD ennis +æİĪ è¯¾ +v or +å·® çļĦ +åīį éĶĭ +ĠA ST +ot ions +Ġe rect +Ġfold ing +Ġroof ing +åĶ®åIJİ æľįåĬ¡ +ĠC ord +Ġstreng thening +å½±åĵį çļĦ +Ġa ston +ĠSign al +ĠHe ights +Ġknock ed +ì § +n at +S ets +空éĹ´ çļĦ +å¾Ī 强 +Ġassist ing +ãĢģ ç½ij绾 +该 æĢİä¹Ī +ga e +pp le +fore st +ï¼Įä¹Ł èĥ½ +ĠTre k +E AR +æŁĵ èī² +ĠRod rig +ĠPut in +èĢģ åĮĸ +æ¼Ķ å¥ı +Ġsequ el +ĠBed room +Ġp umps +Ġco herent +Ġst ained +Key word +æĶ¾å¼ĥ äºĨ +otechn ology +ĠT ank +æĺ¯ä¸Ģ åľº +ä¹ĭéĹ´çļĦ åħ³ç³» +Ġnot ebook +çļĦå°ı 说 +ĠD ig +éĤ£ 个人 +硬 度 +å¿ĥ æĥ³ +ĠPrinc ipal +åĩºç§Ł 车 +Ġrou lette +æ¤ħ åŃIJ +åľ¨ 该 +åĩ ¿ +resh ape +Ġarch ae +st a +ï¼Į便 äºİ +Ġmerg er +Ġregul ator +\ ], +Ġrac ist +m ag +Ġab c +=" @ +ĠBe er +åħij æį¢ +.d ate +ĠT ap +æŀľ æĸŃ +æ²ī éĩį +art ifact +R V +ĠÐ ³ +éĤ® æĶ¿ +è¿ĺ ä¸įéĶĻ +ãĢĤ ãĢĮ +常 å§Ķä¼ļ +Ġdign ity +èī° èĭ¦ +ĠAdv ance +T LS +ä¹ĭ å®¶ +ï¼Įä¸Ģ éģĵ +Ġcounterpart s +Ġcheap est +Ġclick ed +ç¦ı å·ŀ +ĠMar io +æĻĭ åįĩ +åĴĮ ä¸Ģ个 +ĠLaw s +AR C +æĿ ŀ +ux e +Ġcos m +É Ļ +ç» ® +åħī ç͵ +Ġb apt +åįļ è§Ī +ï¼Įåħ¶ ä½Ļ +ï¼Į æĶ¹ +Ġconvinc ing +çļĦéķ¿ åº¦ ++ , +Ġev ac +, w +è§Ĥ æµĭ +k ar +Ġmit igate +-med iated +åħ± è¯Ĩ +æ¸ħ æĺİ +æľĢ 强 +Ġdes cent +è¶Ĭ åıij +Ġcont ention +èµ° ä¸Ĭ +å¸Ī çļĦ +ĠW ere +æīĢ ä½ľ +éĺŁ åĪĹ +.h ash +Ñģ Ñı +And rew +Ġw ounded +ãĢĤæĪij æĥ³ +åIJ ± +ĠL amb +ĠTe ams +æŃ£ ç»ı +æĹł æķĮ +å°ı äºĭ +èģĶ æīĭ +Ġinf er +åĨ² æ´Ĺ +Ġb ush +il o +Ġprop he +åī§ çĥĪ +å°± éľĢè¦ģ +_l ayout +\ int +ĠF o +å°ģ 建 +å¾Ĺ ä½ı +å¾Ĺ 好 +Ġbl own +èħ Į +ĠScholar ship +PL C +Ġwas ted +ï¼Į ç³»ç»Ł +èĤ ĺ +åĦ¿ 女 +et ta +ĠC odes +ĠAM D +æĪij 没 +.B uilder +乡æĿij æĮ¯åħ´ +Ġ æĪijçļĦ +æ¦ ¨ +åĵŃ äºĨ +.m y +-t itle +çģ«è½¦ ç«Ļ +åĪĨåĪ« æĺ¯ +Ġstretch ing +å¹³ åİŁ +Ġsub missions +t alk +åĩºçīĪçļĦ åĽ¾ä¹¦ +æīĭ åĨĮ +riter ion +( env +ere rs +rot ation +æ°¯ åĮĸ +amp oo +æĿ¥å¾Ĺ åıĬ +ph ot +ãĢģ å¿ĥ +Ġinn ings +æľ¬èº« çļĦ +为 æľ¬ +ĠSpe ech +, 没 +è°Ī 论 +åĽŀ èIJ½ +亮 缸 +åİĭ è¿« +ï¼Į åĽ½åĨħ +Ġintention ally +T uple +Ġdisc iples +ĠR oth +Ġfun nel +çĿĢ çľ¼çĿĽ +. plot +ä¸Ģ åĩº +Ġden oted +æŃ§ è§Ĩ +Ġmarket ers +åºķ 线 +æ´¾ éģ£ +Ġbill ions +ä¸į åIJĥ +Ġcolle ague +æĮī æĹ¶ +Ġper taining +è¿ĩ å¾Ģ +Ġon t +çļĦ æĥħ绪 +æ³Ħ éľ² +Ġgrav itational +Ġmon itors +ll vm +Ġin quiries +ĠS TE +è¿ģ ç§» +ĠGh ana +Ġey eb +çļĦ 书 +éĥ½ éĿŀ常 +èµ° 访 +ä¸įèĥ½ åĨį +Ġinhabit ants +çݯå¢ĥ ä¸ĭ +Ġb os +èijĹ åIJįçļĦ +好 人 +c uts +ĠE UR +-in put +_ remove +Ġinter cept +å¦ĩ ç§ij +ï¼Į 离 +ĠSch w +:: _ +è¡° èĢģ +åĴĮ 对 +O FF +Ġf ighter +å¹¶ æĹł +Ġs nd +Ġcompan ions +ï¼Į æīĵå¼Ģ +Ġcomp lementary +_class es +Ġfut ures +, ãĢĬ +ST ATE +Ġimm ense +ĠAP Is +ç¡® ç«ĭ +_p ages +ä¿ Ń +é¦ĸ æī¹ +å°ıå¿ĥ 翼翼 +Ġbl ues +éĻ¢ çļĦ +åºĹ éĩĮ +Ġ æľ± +ä¸į 为 +Tr ad +F unnels +Col lege += f +ĠSuper ior +ä¹ĭ ä¸ŃçļĦ +æ¶Ĥ å±Ĥ +Ġ$ ('# +å®ĮåĸĦ çļĦ +Ġamaz on +ĠBr andon +C lock +æĥĬ 人 +M F +ç¢ ¾ +W ild +ï¼Į çľ¼ç¥ŀ +æĢ» éĩı +cite p +qu is +Ġmetall ic +ÏĦ ε +_V ER +. so +ĠN ET +æİ º +pher d +ï¼Įæľī åĪ©äºİ +ĠParticip ants +æ± Ľ +ä»»ä½ķ ä¸Ģ个 +太 çĽij +Ġsh ar +åĨĻ äºĨ +Ġactiv ist +IS C +å±Ĥ å±Ĥ +Ġ ------------ +çļ® å¸¦ +Ġbrack ets +N il +ĠB ott +åľ° æĬĬ +æĸ ¼ +æĤ£ æľī +æľº çͲ +Ġrevolution ary +Ġasc ending +ĠPer form +æĴŀ åĩ» +C li +åĸľ 好 +PT Y +௠į +en vironment +å®ĺ åħµ +_C AP +åĽºå®ļ åľ¨ +/ config +The ta +Ġbe es +ĠMot ors +ä¼ĺè´¨ çļĦ +Ġmal ware +foot er +åįĹ æµ· +Ġâ ĺ +åĪĩ å°Ķ +Ġv ibration +Ġisol ate +ä¸Ń 级 +" A +μ α +Ġst urdy +è´¨ çļĦ +Ġ" ) +妹 åŃIJ +è·ij åİ» +PERT Y +ä¸ĭ 游 +ï¼Į C +礼 åĵģ +好 åĩł +å¤į èĭı +Pol ice +Ġbudget s +, æľīçļĦ +leg end +æķ¢ äºİ +Ġatmosp heric +ĠC uba +ĠUs age +bro ok +æ¯Ķ æĪij +m ysql +å·´ æİĮ +åı¦å¤ĸ ä¸Ģ个 +æ®ĸ æ°ij +Comp ile +I H +èĥ¡ èIJĿåįľ +ffect s +Ġtransport ed +ĠAR M +èĢĮ çŁ¥ +Ġb ore +åķĨ ä¼ļ +ĠO z +Ġse as +éĴĵ é±¼ +on o +Ġweb inar +èĿ İ +l abels +å¦Ĥ æĿĥåĪ©è¦ģæ±Ĥ +Ġmark ing +Sub scription +ãĢĤ åŃ¦æł¡ +ãĢģ åĨħ +Ġne on +Ġt ales +éĩij å¸ģ +åıijçĶŁ åıĺåĮĸ +B ed +ĠKe ys +Ġcomp rehend +æľī äºĭ +æµģåĬ¨ èµĦ产 +set minus +åįĬ çĤ¹ +åĽŀ è°ĥ +it ics +Ġ éϤäºĨ +Ġsp ice +èĢIJ 磨 +Qu est +Ġmed al +çΏ å¦Ī +Ġgrant ing +æĪIJéĥ½ å¸Ĥ +ãĢģ åij¨ +M argin +åºı åı· +æŀģ 管 +, Y +Ġxml ns +对象 çļĦ +ç²Ĺ ç³Ļ +Ġra pe +Ġsplit ting +Ġs d +Ġ ÙĪ +H orizontal +Ġv amp +.l ib +um at +Ġunder went +Ġabs urd +- led +_ insert +Ġdry er +EN U +æ± Ģ +F unctions +Ġproject ions +åīĤ çļĦ +Ġmuseum s +ï¼Į å¹³æĹ¶ +åŃ ½ +' " +éĤ£ä¸ª æĹ¶åĢĻ +Ġle uk +åıijå¸ĥ äºĨ +PL Y +åĬł çıŃ +port ion +)$ $ +éĶĢåĶ® é¢Ŀ +èİ«åIJį åħ¶ +ĠBank ing +S yn +åŁİ å¢Ļ +ĠHV AC +M IT +å°Ĩ é¢Ĩ +l ap +ï¼Įéļı æĹ¶ +Ġr het +Ġk ay +Ġgl ut +ĠElect ronics +ĠC afe +Ġincl ined +Ġopt imum +Ġclass ify +å°¤ 为 +Ġillust rations +Ä Ł +è¿ĺ 以为 +æĪij è¿ĺæĺ¯ +Ġtrans c +ê ° +æĹł æĥħ +éĤ£ åıª +ĠInf rastructure +çѾ åŃĹ +UD IO +_ account +ãĢģ å®ī +æģĴ 大 +an che +_ ps +äºĨä¸Ģ 份 +çļĦ å®¶ä¼Ļ +ĠON LY +ç¦ı建 çľģ +æĮĩ å®ļçļĦ +æľī ä¸Ģå®ļ +Ġstick y +æĹ© çĤ¹ +缸 å·® +ÏĦ ο +ï¼Į éĥij +ĠCh rom +éĢĥ éģ¿ +************************************************************************ ****** +.ex it +åĽ½ 人 +Ġturn over +Ġrec alled +Ġimag ery +Connect ed +Ġpresum ably +ä¸Ńå¿ĥ çļĦ +å·¥ 伤 +央 è§Ĩ +çĶŁ çĶŁ +ĠRe yn +ĠD anny +ab ind +_ location +ĠC lock +Ġtun ing +è¿Ļç§į æĥħåĨµä¸ĭ +ä¹Ł å¼Ģå§ĭ +ĠBer keley +was her +ar in +Ġwork load +ĠâĪ Ĩ +è¿Ļ æľ¬ä¹¦ +å°Ĩ æĪIJ为 +.f ill +ĠSy rian +Ġslee ve +Ġru in +Ġsc andal +Ġup stream +Ġdiagram s +çĭ ¡ +综åIJĪ æĢ§ +éĿĴ æµ· +и й +ĠVar ious +åħ« åįģ +U nd +Ġstr at +ĠAccess ories +ĠE ight +åĹ ľ +ĠK o +çľ ¶ +, åIJ¦åĪĻ +åľ¨ ä¸Ń +çŃī å¤ļ +èĭ Ķ +Ġkin ase +Ġstim ulate +ie u +ĠCan on +ĠBro oks +ĠD h +ĠH A +éĴ Ŀ +Min imum +äºĨ åķĬ +_d isplay +ç͵ è§£ +ĠF itz +Ġnew born +对 è¿Ļ个 +ighbor hood +çł´ è§£ +ymmet ry +activ ated +Ġpast or +_w ait +Ġant igen +AM L +Ġpept ide +Form er +art a +Ġdoub led +æĺİ æľĪ +æ² ¼ +f ood +H AL +- edge +. âĢľ +Ġg raft +pen cer +Ġmy sql +èĢģ人 å®¶ +Ġins isted +. im +at ype +注 éĩĬ +D ead +, èĢģ +æ¶īåıĬ åΰ +ĠO C +ĠGener ator +头 çĸ¼ +Ġd ye +åĮºåŁŁ çļĦ +ĠS ays +ĠIN S +ĠNav igation +æĿ¿ æĿIJ +pro cedure +_g rid +ί α +.L abel +Ġstandard ized +ãĢĬ åħ¬åı¸ +Ġ æĿ¥ +æľī æĿĥ +Ġ} ). +åΏ åķĨ +ĠE co +çĶŁ æľº +, åIJİæĿ¥ +c as +æĶ¶ çĽĺ +ĠForm s +Ġcoron ary +I r +åĴĮ èĩªå·± +楼 ä¸ĭ +ç½ij 红 +ç¥ŀ ä»Ļ +çijľ ä¼½ +-b order +Z ip +åŁİ éĩĮ +èݲ èĬ± +ĠE at +Ġt am +Ġw rink +ç»ıæµİ 社ä¼ļ +é¢Ħ å¤ĩ +Ġsent encing +Ġcry stals +Ġst ro +,ä¹Ł å°±æĺ¯ +ĠP BS +oot er +unct ure +ĠM ul +æĺ ķ +is b +ĠPhil osophy +s imple +ong s +_F UNC +ĠStart ed +place holder +ç͍ 车 +Ġp encil +éĢĴ å»¶ +ĠSp ons +und o +ĠMean ing +ä¹ĭ 大 +Ġstr angers +ä¿ĿéĻ© åħ¬åı¸ +N s +_b uff +ĠP air +失 çľł +Ġl ua +ĠSt ress +åİ¿ 级 +ãĢĤèĢĮ åľ¨ +top ic +ï¼Į ç©¿ +Ġhyp ot +ĠAut omatic +碳 éħ¸ +D ays +ĠA u +Ġadvance ment +Ġdark er +ä¸Ģ ç»Ħ +æ¯į åħ¬åı¸ +Ġ' ../../ +çıł æµ· +) ä¸Ĭ +Ġdis posed +Ġc ared +int s +_ acc +he astern +ĠI gn +Ġal mond +Ġd yn +ob a +Ġrepro ductive +ĠC atalog +Ġn b +ï¼Įæľī æĹ¶åĢĻ +ĠPlaintiff s +ãĢĤè¿Ļ 次 +r pc +ĠCap itol +éĩij èī² +ĠS low +Top ics +_AR G +ç»´ å¥ĩ +åı Ń +ĠPharm aceutical +_ utils +ä¸ŃåĮ» èᝠ+B ucket +ä¸¥æł¼ çļĦ +æī§ çĿĢ +Ġ' -- +åºŁ è¯Ŀ +å¨ ¥ +Ġtox icity +rot ate +ĠA rena +ãĢĤ éĺ¿ +æĭ ĩ +ç¥ŀ åľ£ +é£ŀ æī¬ +ĠC t +Ind icator +ä t +CRIP T +Fl ash +Ġrib bon +èĩª æĦ¿ +{b matrix +èĥ ° +çı Ģ +çķª èĮĦ +ä¿ĿæĮģ çĿĢ +Ġregul ators +Ar m +å¦Ĥ æĦı +Ġthere in +ĠBuy ing +çŁ¥è¯Ĩ çļĦ +ï¼Įä½ł 说 +.g l +ä¸Ģ çŃīå¥ĸ +Ġdis semin +çĮ´ åŃIJ +_ind ices +ar as +Ġst ub +年度 æĬ¥åijĬ +Ġi k +ä¹Łæĺ¯ ä¸Ģ个 +æĬĬ è¿Ļ个 +ĠF ailure +Ph ysical +Ġj umps +è¡£ çī© +in el +Ġd an +Ġtransform s +Ġtr agic +ठ¿ +çļĦ女 æĢ§ +> P +ĠOptim ization +=" ( +s ig +pub lished +ĠA pi +, åIJİ +Ġsw elling +Ġl aughter +UB LE +æĥħ æĢĢ +æīį åı¯ä»¥ +f uture +Ġper ceive +çĿ£ 导 +ä¸į ä¸Ģå®ļ +as gow +ĠLu cy +E U +Ġf reak +ĠN AME +, éļıçĿĢ +ç¼Ŀ éļĻ +_ IC +ric hed +çľĭçĿĢ ä»ĸ +ĠV ir +ĠP rest +æľī ä»»ä½ķ +F inding +on er +Ġall iance +社交 åªĴä½ĵ +}} ^ +ĠPl aces +Ġbe ast +ĠProt ect +Ġteen ager +çģµ æĦŁ +ul ence +_ OR +Ġmix er +Ġcurv ature +ĠB USINESS +GR AM +ĠExper ts +Ġc innamon +Ġb s +P atch +al am +èĥİ åĦ¿ +d atabase +åĴĮ æĿİ +ĠJ ama +ON S +Ġc urs +, ä¸įæĸŃ +ĠC AD +æł¸ å®ŀ +åħħåĪĨ çļĦ +Ġ èĬ± +Ġphot on +çļĦ çľ¼ +et own +Over lay +ä¼Ĺ æīĢ +Ġcart oon +æ³ķå¾ĭ 责任 +éĩįåºĨ å¸Ĥ +ĠProb ably +â Ŀ +éļIJ çŀĴ +ĠImprove ment +.m ethod +. us +Ġd jango +æ°§ æ°Ķ +ĠCon duct +Ġmar vel +ĠR oche +车 éģĵ +Ġadmin ister +v ars +St udio +å¤į å·¥ +\ caption +ç¾İ æ´² +ä¾Ľ æ°´ +Ġneighbour hood +ne apolis +ĠK om +èĤ¥ èĥĸ +Ġint r +æIJ¬ è¿IJ +] ); +Ġspring s +Ġprocess ors +ĠO dd +Ġmetaph or +.T ab +Ġes p +å®ĮæĪIJ çļĦ +B io +ç¾Ĭ èĤī +åıĪ èĥ½ +Ġsem antic +ĠD ining +Qu estions +å°Ĩ èĩªå·± +Ġkind ly +c ards +ĠH aven +èµĦ æ·± +B and +Phot os +_ Type +T er +ut er +Ġ 让 +åıij åĩºçļĦ +è¾ĥ å°ij +ĠWar ner +ent i +Ġpreval ent +æľī åĬĽçļĦ +Ġflav our +P ending +T aking +Ġ' __ +ul ators +çļĦ çģ« +äºĶ åįĥ +Ġpromot er +Ġemp ire +ĠW S +/ ex +(f unc +èİ· æĤī +ro v +ç« ¿ +ĠT emp +éĹ´ è·Ŀ +ĠVerm ont +- imp +âĢĿ ), +rep ared +è¿Ļ åĿĹ +t witter +-in flammatory +Ġgest ure +å·¥ åľ° +Ġc iting +am iliar +Ġo lig +åıij æĢ§ +_p assword +èĢģ èĢħ +å¹´ èĩ³ +åįķ 身 +, å½ĵæĹ¶ +.m ove +, 积æŀģ +Ġacceler ated +Ġs igma +ĠEqu ation +Ġpos ed +L ou +è¿ĩ ç¥ŀ +Ġundert ake +Ġ æĸĩ +Ġhead line +ĠPear l +æ¯ı ä¸Ģ次 +^ i +å¤ĩ èĢĥ +s omething +Ġclos es +æıIJ åIJį +ï¼Į ä¹° +âĢľ If +èĦĨ å¼± +Ġ< > +Ġg um +éĢłæĪIJ äºĨ +r one +rec ip +ĠP izza +t ic +âĸ ² +enc ia +Ġh over +Ġhel met +å·¥ ä»¶ +ett i +ĠCompl iance +ĠSh ows +ĠR anch +( -- +æĻļ é¤IJ +ĠEn um +est ly +æķ´ æķ° +ĠWalk ing +èħIJ è´¥ +æĮ¡ æĿ¿ +è¿Ļ 人 +Ġrepe ating +å°± åĥıæĺ¯ +æ»ij æ§½ +Ġw p +t ol +ï¼Įä¸į 论 +ĠEss ential +èĮ ī +Ġsn ake +ĠT ables +ĠB elt +Ġ 两 +模 èĮĥ +éĿł åľ¨ +åĬĽ åѦ +uff s +A ggreg +åζéĢł åķĨ +(t oken +amp ing +æııè¿° çļĦ +f lex +Ġbegin ner +Ġhed ge +.t ag +ID EO +æĺİç¡® çļĦ +Ġthe e +ĠPro tein +æĺŁ çļĦ +ism iss +he el +æģ° 好 +ĠS equ +æį¢ äºĨ +ĠC raw +ED IA +f ail +çĴ ĭ +æ¯ı个 人çļĦ +_by te +f air +Ad just +Ġart ifacts +Gu ard +CL US +ĠVer ify +ĠK os +Ġpec uli +ourn aments +éĿŀ常 éĩįè¦ģ +_RO OT +æĻĭ 级 +Ġenh ances +}^{ - +Ġass ass +ï¼Į åIJį +Ġp ir +.f asterxml +- pr +ĠLeg acy +Ġrail road +Ġindirect ly +ĠBen ch +å¤ĸ 人 +åĪĩ çļĦ +ĠT ile +å·¨ 人 +ä¹Łä¸į æĥ³ +Ġn asty +i Phone +at to +ï¼Į 严 +all enge +A èĤ¡ +Ġ æĬĬ +Ġ -------------------------------- +_f ull +Bal ance +ï¼Į èĩ´ +/ # +ĠD istance +Ġst ochastic +. result +çļĦ æĹ¶ä»£ +ĠB orn +Ġwh olly +ï¼Į ä»ĬæĹ¥ +may be +Ġcompar ative +Ġn our +ĠHamilton ian +æł¹æľ¬ 没æľī +æĺ¯ä¸Ģ éĥ¨ +Un its +Ġinv oked +od i +人 æł¼ +ear th +q r +ĠRen ew +为 æĪij们 +é¢Ħ æ¡Ī +ic one +ow ing +æ¦ Ĩ +åĪĽéĢł äºĨ +Ġsla very +ĠPol ic +åıij è§ī +æİ¨ åĩºçļĦ +Ġsp elling +ï¼Įä¹Ł ä¸įä¼ļ +- at +. ar +_M OD +ï¼ĮæīĢ以 åľ¨ +éķ ij +Ġinf inity +å¼Ģ æ°´ +Ġcoll ateral +å¾Ĺ çļĦ +Ġf ör +( | +es cope +Ġoverlook ed +, åIJĦ +æ· Į +ä¸ī ç»´ +æµģ 浪 +ĠCH AR +ĠD P +åĽ½å®¶ éĺŁ +åĨ° éĽª +Ġoxid ation +Other s +D W +ĠRe commend +æ¹ĸ 人 +R x +ĠBern ard +ĠPl ate +ĠThought s +ĠM ale +sh ots +In ner +unt il +_W IDTH +Ġmis leading +ä¹Ł æĹłæ³ķ +view ed +ãĢģ åľ° +Ġupt ake +éĤª æģ¶ +ï¼Į å®¶ +ãĢģ åħ¶ä»ĸ +Ġpost ers +Ġn y +Ġe go +ĠM aur +c ro +å·ŀ çļĦ +ï¼ĮæĪĸ æĺ¯ +ee per +éĥ¨éŨ çļĦ +ĠPub lisher +ort ium +è¿ĩ é«ĺ +åĥ » +Ġpriorit ize +ï¼ļ ä¸ŃåĽ½ +æīĵ å¾Ĺ +çݯ ç»ķ +Ġanth rop +æĮ« æĬĺ +ad ic +ï¼Ł 她 +G ot +å°ı é¾Ļ +åıĪ åľ¨ +Ġà ® +åĬ³åĬ¨ åIJĪåIJĮ +o ors +_c ss +ï¼Į å¼ķ +è¿Ļ åıª +转 åħ¥ +ĠB ak +诧 å¼Ĥ +_b e +åĿ ŀ +Sh ipping +.Rem ove +æĪij èĥ½ +( Get +Ġteen age +Ġ åĪĨ +å¾Ģ åIJİ +ç¡®å®ļ çļĦ +Ġjuris dict +L iving +Ġt ribe +b ach +tes y +å¾Ī æ¸ħæ¥ļ +ĠU g +ĠJ ur +Ġfore most +aaaa aaaa +ĠHind u +com pute +Ġthr ust +æĺ¯ éĿŀ +举 æµ· +Ġdrain age +ĠM ead +Ġdelight ful +.M odels +èĥĮæĻ¯ æĬĢæľ¯ +SD L +对 éĺµ +çĶŁäº§ ç»ıèIJ¥ +èĢģ å®¶ +Ġj okes +ĠD inner +c ert +Ġpers ist +ãĢĤ âĢĶâĢĶ +çŃī åĢĻ +_ attribute +éĿ¢ èĨľ +à¹ Ī +åıij è´§ +ĠSE LECT +ëĬ Ķ +æĻ Ł +-g rad +pe ated +çĿĢ å®ŀ +ĠC riteria +us k +_st ore +Ġexc er +Ġb reeze +g irl +Call ed +ĠNo vel +ĠS ql +ĠJ ump +æĹł æīĢè°ĵ +ĠAltern ative +ph ase +ph al +ï¼Į ** +Ġm x +太平 æ´ĭ +ä¸Ń çļĦä¸Ģ +Cap acity +ĠL av +æ¾³ æ´² +ĠOff ers +èĪ Ķ +çĭłçĭł çļĦ +å°ģ ä¿¡ +Ġaut ism +æĶ Ĵ +_L INE +. Update +ĠLe o +微信 åħ¬ä¼Ĺåı· +ĠP ere +å¤ĩ 注 +ĠSupp lement +Ġprob ation +_CH ANNEL +ol lo +ä¾Ľ åıĤèĢĥ +åħµ åĬĽ +Ġtwe ets +Ġth rom +ĠDi abetes +Ġst ain +d epth +imp ly +ĠE ther +ĠF IN +rop ract +L inux +Ġele ven +Comp leted +ne e +ï¼ģ ä»ĸ +ãĢģ æ¸ħ +éŀ į +et ails +æĬ¥åijĬ æľŁ +Ġfluores cence +æĹ¶ 段 +ah an +综åIJĪ æĶ¶çĽĬ +t ails +Ġneighb oring +Ġsympt om +ĠCitiz ens +ĠN D +Ġt ilt +IS ION +åı· åı¬ +C OD +身 ç©¿ +åĮº åĨħ +æĹł åıĮ +ä¼ļ 使 +Ġa i +åīį 端 +çļĦä¸Ģ å¹´ +Ex change +æĪ İ +ÂĤ ÃIJ +ĠP rel +D emo +trans action +çľĭ æ¸ħ +åĪĨ辨 çİĩ +è´¨ æĬ¼ +马 å°Ķ +ĠB IT +Ġcomb ust +ĠPRO F +ĠB undle +opl asm +ĠHost ing +åı¤ 人 +, ç»ı +Ġind ict +çļĦç¥ŀ èī² +C AR +à ¯ +Ġslic ed +Ġcomp artment +å±Ģ éĻIJ +Ġcas c +室 çļĦ +sh ake +åºĶæĶ¶ 款 +ä¸įçͱ å¾Ĺ +ï¼Įå¹¶ 对 +æ°´ åĩĨ +Ġconstruct ing +Help ers +Ġre load +ĠB our +ok ia +-w ise +< img +Ġext r +Ġcor rid +缺 åı£ +ĠDIS CLAIM +Ġfold ers +åŁ Ķ +ç»Ī çĤ¹ +(" _ +am o +.P ower +Ġ æĪªèĩ³ +çī¹ æķĪ +Un able +çĹħ åıĺ +详 è§ģ +rib ly +Ġ 缮åīį +ãĢĤ æľīäºĽ +Ġarm or +åijĬè¯ī è®°èĢħ +æīĵ 磨 +Met ric +om ial +åĨħ å£ģ +ĠEmploy ees +èĥĮ åıĽ +èģĮ åijĺ +ĠB ert +_ ARRAY +ĠP OL +_mem ory +Ġw iki +Ġspecific ity +UST OM +ï¼Į ä¿ĿæĬ¤ +Ġant ique +Access ibility +S uggest +æ· ¤ +** ) +主ä¹ī çļĦ +Q t +Ġsit u +èĮħ åı° +ï¼ģ ãĢį +_un it +ï¼ Ķ +commun ications +ç´ Ĭ +Ġl act +Ġqu iz +èĪĮ 头 +ĠF est +Ġunder way +ãĢĤä»ĸ 说 +F rames +ĠK ick +( ret +ï¼Į éĢĤåIJĪ +ï¼ ĭ +éĹ® ä½ł +IM P +M ount +Ġdetect ing +æĺ¾ç¤º åύ +æĺı è¿· +oc cup +ĠSt re +ces ter +ord inate +ä»ħ æľī +Ġmoist ur +çŃī æĪij +weg ian +Ġc ass +aj a +åĩº æ¼Ķ +Ch oice +åħĪ æĺ¯ +P ot +OT H +se p +Ġb achelor +å¤ļ å¹´æĿ¥ +Ġsoc ietal +L F +ĠReg istered +.set tings +L IST +Ġcompos er +ãĤ ģ +å̼ çıŃ +ï¼Į ä¼Ĺ人 +ä»İ 容 +[ v +ï¼Į 纷纷 +_ helper +骨 éª +éϤ å°ĺ +Ġworth while +å±ı èͽ +éĤ£ æĹ¶åĢĻ +è´§ 车 +çĿ « +å¿ĹæĦ¿ æľįåĬ¡ +ĠS ew +Ġst aring +Ġsun shine +_ loop +æīĵ æĸŃ +åįĬ å°ıæĹ¶ +mal ink +us a +Ġinstruct ors +оР¶ +ï¼Įä¹Łå°±æĺ¯ 说 +Ġreserv ations +,ä¸į 管 +ĠB SD +éĩı 为 +Imp lemented +Ġн е +æĭĽ åĭŁ +ĠLik ewise +Ġro ok +建设 é¡¹çĽ® +ĠL ions +Ġqual ification +Cur ve +_r x +æĺĵ äºİ +ac ious +.N ET +ä¼ļ 产çĶŁ +æīĭ ä¸Ģ +O rientation +Ġal umni +C SS +Ġplay ground +Ġaccount ed +Origin ally +(f rom +éħ± æ²¹ +Ġ åIJĮæĹ¶ +çľ © +åľ¨ æĸ° +åľ° 带 +ĠEl ite +ï¼Įåľ¨ è¿ĻéĩĮ +Ġun ified +Ġmat urity +çļĦ æĦıä¹ī +åĢĴ äºĨ +ĠC incinnati +æľº ç»Ħ +Ġun common +ĠS cheme +Âł Z +Ġprinc ip +b read +Ġbroad ly +Ġroot ed +å®Ŀ 马 +Ġformat ting +æ³ķ å®Ŀ +ï¼Į åŁºæľ¬ä¸Ĭ +æģ ķ +.d omain +Ġh ipp +ĠP atch +. cloud +ĠEng agement +Ġfree zer +is her +åĮ ķ +Ġmanip ulate +ĠC atherine +! , +Ġgra pe +çļĦ 她 +br anch +æĸ¹ æīį +ĠA ware +ä½ķ å¤Ħ +èĩª å®ļä¹ī +.get Message +ĠOak land +_v ars +Ġher bal +ï¼Į 注æĦı +_M ESSAGE +è§Ĥ åħī +P RE +ĠAn a +Ġreal izing +Ġis o +å©ļ 纱 +En code +s ent +unc an +Ġo we +ĠD ollar +ĠHy brid +ãĢĤ æĹł +æĬ Ĵ +Ġ" __ +log ging +ĠMonth ly +æİĴè¡Į æ¦ľ +Ġnom ination +for cing +Ġmicro bi +u o +åįģ ä½Ļ +åĮ» çĶŁçļĦ +ãĢģ ä¿Ŀ +ï¼Į æĵįä½ľ +Ġn aming +Ġend ors +ĠðŁĺ ī +Ġtrack ed +Ġsubsid iary +ib aba +-d egree +ãĢģ å¼Ģ +Ġplace bo +æĥħåĨµ çļĦ +ä¸ī åĽĽ +Ġpar ish +æķ´ æ´ģ +åĨłçĬ¶ çĹħæ¯Ĵ +Ġmot ors +Equal To +Ġk o +åħ¨ æĸĩ +à ³ +---- --- +.m icrosoft +ar b +æ¯ ĭ +ĠF act +Ġcan al +çļĦ æĸĩåĮĸ +Ġ[ ` +ĠA ur +r ice +ï¼Įæķ´ 个人 +contain ing +coh olic +Ġdece ased +Ġs ang +att ach +ä¸į å±ij +w l +ï¼Įåľ¨ ä»ĸ +. icon +åĮ»çĸĹ æľºæŀĦ +èĢħ åľ¨ +/ ? +å¹´ 被 +ĠGu ides +f ixed +{ q +ï¼Į æĽ¾ç»ı +Ġm b +' - +Ġgl orious +Ġbrut al +% d +ch ip +ĠBl ake +éĿŀ常 æľī +\ ,\ +ĠE agles +unn able +西 è£ħ +Ġdep ressed +S kin +ï¼Į æĺ¯ä¸Ģç§į +let es +Ġtext ures +oe ing +ä¿® 饰 +ĠAr ctic +Ġfirm ware +Ġfont s +Ġd ice +_s ystem +å¹³ æĹ¥ +åħī æºIJ +Ġvent ilation +åı¯ä»¥ 缴æİ¥ +çij ķ +æŃ¤ 人 +R om +r ill +ï¼Į èĢģå¸Ī +èĤ¡ä¸ľ çļĦ +对 ä»ĸçļĦ +Ġsurv iving +ĠÏ ĩ +被 åŃIJ +ĠReg ulations +ï¼Į è¿ħéĢŁ +it ives +ä¸į 妨 +Ġav ail +çģŃ çģ« +ens itivity +ĠQu ad +临 è¿ij +Ar c +oc co +èIJ¥ä¸ļ æī§çħ§ +. { +Ġun belie +ĠL aser +çĦ¶ 大 +ĠL OL +æĻ Ķ +ĠD ot +è¿Ļ åĩł +Ġv ag +çī¢ è®° +è§Ĥ éŁ³ +Ġnotice able +ĠL ay +-b ook +ĠAdminist rator +[ _ +ä¸į æĽ¾ +Ġ æ¯ķ竣 +Ġplace holder +Ġobst acle +åľ¨ ç½ijä¸Ĭ +Ġaut onomous +et tes +Ġassert True +B ridge +Ġsp ices +Ġsp awn +è¿Ļ个 å°ı +Ġarch ives +ĠCall ed +_ email +ä¸Ģ åıĮ +cc cc +Ġdiv isions +Ġintent ional +æ´ĭ æ´ĭ +æ²¹ èĦĤ +.m ark +ï¼Į以 ä¸ĭ +Ġbe verages +æ¯Ľ åĪ©çİĩ +ĠS ierra +_ it +ä¸į å¹³ +书 åºĹ +Ġviol ate +Ġvar s +çļĦ æ¯ĶèµĽ +åIJĪ è§Ħ +çĭ¬ç«ĭ èij£äºĭ +ring ton +Ġc age +\right arrow +该 æĢİä¹ĪåĬŀ +çļĦ åģļæ³ķ +åŀ ® +ĠB ottom +it ic +ãĢĤ ä¸ĭ +éĻIJ 度 +æĿŁ ç¼ļ +Ġd orm +ì ł +Ġmod elling +Ġearthqu ake +. Open +çĬ¹ 太 +ãĢĤ çϽ +ï¼Į 符åIJĪ +å°± å°Ĩ +èĤĿ èĦı +ï¼Į éľĢ +éħ ļ +åĴ Ķ +è¿Ļ çīĩ +åijĬè¯ī æĪij们 +Ġincon ven +Ġdraw er +å¼ Ľ +Ġg ases +Mod ern +.m odule +Ġz inc +缺 失 +ï¼Į åŁ¹åħ» +ĠS chema +ĠG ain +个 个 +oll ary +Ġtw isted +ist ries +Ġcon gest +ĠT ol +éĿ¢åīį çļĦ +éķ¿ æĺ¥ +Ġpremium s +Opt s +ĠDe ck +Ġimper ative +ï¼Įä½ł æĢİä¹Ī +åͤ éĨĴ +Ġvoc als + ĥ +Ġcomfort ably +.ext end +éĢļ çļĦ +ĠM ig +ä½ı çļĦ +ãĢģ ç͍ +Ġend ot +欧 åĨł +ä¸Ĭ æ¼Ķ +Ġ èĥ¡ +Ġwork outs +Art icles +ĠCarl os +ä¸Ģ æ³¢ +è¿Ļ å®¶ä¼Ļ +Ġre active +ĠB att +Trans late +Ġc urrencies +裤 åŃIJ +& M +èĩª è¯Ń +.A zure +Ġ åĬł +Ġtim ber +Ġamend ments +ĠDis cuss +ï¼Įä½ł æĺ¯ +ç»Ŀ对 ä¸įä¼ļ +ä½į ç§» +c art +unicip ality +Re ceiver +_RE T +Ġstoryt elling +Ġten ure +Ġw ishing +äºĨä¸Ģ ä¸Ŀ +.F ind +èĪį ä¸įå¾Ĺ +Ġ æĹ¶éĹ´ +Ġbreak through +ãĤ ¸ +ĠCru ise +åİŁ æľī +Ġth ru +_ sec +Ġrespect s +ĠAre as +å°± æĥ³ +AMP LE +空 åĨĽ +ï¼Į åĪĩå®ŀ +çļĦ è·Ŀ离 +ĠSt ats +ĠW M +se g +åįİ äºº +æ¯Ľ å·¾ +tt le +å·¡ éĢ» +_n ormal +ĠHar vey +ar at +. port +( status +test s +ä¹Į åħĭåħ° +Ġdem ol +ãĢģ 以åıĬ +or ig +ĠAppro ach +æķ£ æŃ¥ +at ient +Code c +P odcast +izar re +Ġlight ning +ERV ICE +ĠNon etheless +Jes us +ä¸į æľį +aus ible +Ġcho oses +çķı æĥ§ +. impl +们 éĥ½ +æ°ij æĶ¿ +åĴ Ļ +Ġ( { +. Inter +Ġvol atility +ĠC AR +Ġceleb rities +_RE QUEST +ĠDis able +ĠT IME +Ġcond em +Ġoper a +Ġassert ed +Ġun re +Ut ilities +å¦ĩ 人 +ar u +W arnings +ĠPl ot +Ġcontin uation +ĠL abs +å¤ļ åIJĥ +涨 åģľ +åģļ 个 +ĠGi ants +éϤ éĿŀ +ç´ł çļĦ +ä¹ĭ åŃIJ +est ure +æĸĩä»¶ 夹 +.g rid +æİī çļĦ +ä¸Ģ æĹł +ĠY am +umm ing +å¦Ĥæŀľ æĺ¯ +ï¼Į æıIJ +åĹĵ åŃIJ +Ġdepos ited +Ġm ph +en abled +col ors +èѦ 示 +, ä¸Ģ缴 +çĮ © +æ±½ æ²¹ +h ref +Ġm c +æŃ» åİ» +ä¸į åĩĨ +ä½İ 声 +ĠSub scribe +ĠM UST +Ġadv ancing +å·® é¢Ŀ +Ġkin etic +ĠX ml +ĠTake aways +Ġsp oon +h um +ĠDeb t +ĠEvent ually +He art +Res pons +lob als +C odes +ï¼Į æ´Ľ +ĠCom parison +ä¸Ģ è§ģ +çĻ«çĹ« çĹħ +梦 ä¸Ń +交éĢļ å®īåħ¨ +Ġsem inar +less ness +AP H +ĠESP N +æľī ä¸įå°ij +Ġprem iere +g mt +Ġ ç¾İåĽ½ +çŃ· åŃIJ +ï¼Įæ¯ı 个人 +eterm ined +Ġsc ans +çĻ» åľº +ĠPl aza +yl an +Ġbe e +âij ¤ +é¢Ĩ æĤŁ +whe ther +ï¼Į éħį +ĠÎ Ķ +m ass +at ia +åĩĢ æ°´ +ĠM PI +ĠP am +Loc ations +Ġwe aker +èĻļ å¼± +æīį åįİ +Ġrisk y +S ummer +çľ¼ ä¸ĭ +GR ect +" This +没 åħ³ç³» +æķ£ æĸĩ +ä¹Ł æĥ³ +ĠR an +ĠPret ty +åħħ å®ŀ +mar shal +Comm ands +åį¡ å°Ķ +Ġse ized +_s ort +äºĴ èģĶ +just ice +oss ip +S cheme +æŁ´ æ²¹ +ĠW ed +æ°´ è´¨ +论 è¿° +ĠG ates +Opt im +Ġg reens +Part s +ãĢĤ ï¼ī +ĠF D +ãĥ Ń +æµģ ä½ĵ +ĠV o +ce ans +çŃī æĸ¹éĿ¢çļĦ +ĠT un +å¢ Ł +Ġcrack s +Ġlower ing +Max imum +åıį 驳 +-st art +åªĴ ä»ĭ +ï¼Į æĮģç»Ń +isp atch +ï¼Į åįĥ +åįĩ èµ· +_F LOAT +æ³Ħ æ¼ı +Ind ices +Ġhom ogeneous +Ġâ Ļ +Ġr v +. This +-ch anging +ĠW ish +_P OS +ĠAsk ed +Ġaff inity +any a +èĤ ´ +æ¦ Ħ +åħ¸ 礼 +od b +ãĢĭ ä¸ŃçļĦ +åIJķ å¸ĥ +æĥĬ æģIJ +åŃĹ çļĦ +å¯Ĩ çļĦ +æĪIJç«ĭ äºĨ +tt i +_in st +Ġconc erts +Project s +åģı å·® +ĠV AT +奶 ç²ī +ĠLaw yers +Ġbook mark +ĠH B +Ġwar mer +Ġо ÑĤ +游 çİ© +str cmp +Ġdivid ing +erial izer +è¿ľ æĸ¹ +AB EL +æ®ĭçĸ¾ 人 +v ic +en ium +.n ow +çIJ¢ 磨 +.D iagnostics +éģĩ åΰçļĦ +( view +ä¸ĩ åĪĨ +亲 æīĭ +Ġ第ä¸ī èĬĤ +ĠS aints +_res et +Christ mas +çĤ Ļ +å¤ļ 大 +åŃĹ æķ° +ĠEx c +Ġfulfill ment +) ! +Ġent ailed +åѦ è´¹ +the se +æĭī 伸 +Ġsup ers +ĠO T +Ġfis her +ĠCabin et +ãĢģ åģ¥åº· +è¨Ģ 论 +çζæ¯į çļĦ +anal ytics +çļĦ ç͍æĪ· +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +ĠRe place +ad vert +ĠAdd ing +ĠN Z +ï¼Į åıĬ +ä¹Łä¸į ç͍ +Ġt read +Ġth under +éĽĨåĽ¢ åħ¬åı¸ +-c ode +. abs +Ġab used +åºŀ 大 +Ġtables poons +.S ervice +éĺµ åľ° +ĠS EM +ĠU ber +æ½® 湿 +ĠS ou +ĠU AE +代表 çĿĢ +ĠO ri +ĠT T +Det erm +.L oad +Ġmi R +å®ŀéĻħæİ§åζ 人 +ĠB read +ros so +D estination +W i +çĶŁæ´» æĸ¹å¼ı +饮 ç͍ +ĠIns ights +c lo +ĠK er +ï¼Į æĪĺ +J s +Ġt ex +æĿ¥ çĿĢ +и ÑĢ +_H OST +Ġsc ram +ï¼Į åĽ¾ +åıª åī©ä¸ĭ +饮 éħĴ +Ġincorpor ates +ãĢģ è¡ĮæĶ¿ +H en +Ġreal ization +Gu est +æĮĩ 纹 +æīĵ åIJ¬ +.com ponent +ãĢģ 为 +çļĦ好 å¤Ħ +æĹ¶ æĬ¥ +æ·· èĽĭ +第ä¸Ģ èĬĤ +- no +æĪIJ为 ä¸Ģ个 +æĬĵ ç´§ +_f actor +ä¸į 被 +em in +Ġdismiss al +Ġspot light +Ġd awn +m k +ĠD oll +Cl ause +ĠF emale +æij§ æ¯ģ +ãĢģ åıĮ +ĠAm anda +åŃĹ èĬĤ +æĸŃ è£Ĥ +Block ly +çĥŃ å¸¦ +{ table +ãĢĤ å®ĥ们 +Pop up +ar is +çŁŃ 线 +S ym +è¡Į 车 +æŃ¦ å¸Ŀ +èķ´ åIJ« +esthes ia +f older +èī² åĪĹ +Ġ åįķ +ãĢģ çݯå¢ĥ +èĩ Ģ +ĠHistor ic +大 åı« +ĠHug hes +Ġw ipe +ä¿® çIJĨ +çϾ éĩĮ +H om +N OW +UM P +ancell or +? The +S ig +éĥ½ä¸į æķ¢ +ri ages +on ge +ĠAl cohol += new +é¦ĸ é¢Ĩ +Ġamer ican +Ġprot agonist +åĵª å®¶ +-m ode +ram ento +ç¬ij äºĨèµ·æĿ¥ +opl us +å±ķ ä¼ļ +ï¼Įä¹Ł åı¯ +åIJij æĿ¥ +Ġdifferent iate +Ġtre k +ul p +ĠHol mes +Ġ èİ·åıĸ +it as +Ġir res +ĠBasket ball +_t ensor +Ġl ively +空æ°Ķ ä¸Ń +å¸IJ 篷 +åħĪçĶŁ çļĦ +æ¡Į éĿ¢ +èᝠåīĤ +Ġind ef +âĢľ æĺ¯ +å¼ł å®¶ +ĠX XX +å§ Ĭ +ĠVe get +ï¼Įä¹Ł ä¸įçŁ¥éģĵ +ãĢĤåľ¨ æŃ¤ +ĠDem on +æ² Ĥ +ï¼Į 温 +D isk +m oz +ribut ors +æµ· æĭĶ +arrant y +浩 çĦ¶ +Ġepid emic +ĠImm un +ĠMer cedes +. IN +.Thread ing +col on +èģļ åIJĪçī© +Ġpred icate +Ġancest ors +ĠM ann +ili ans +Any one +åĢĴ éľī +Ġstret ched +Ġemphas ized +ĠW ear +cript s +ĠKat ie +Ġr ider +on itor +æĶ¾ å°Ħ +建 åĽ½ +ãĢĤ æĿ¨ +Ġpol ish +inc are +Ġs ans +ĠSub scription +带 åΰ +Ġrecover ing +ï¼Į éŨ +ph is +æĴ° åĨĻ +RO UND +ĠBre xit +滤 æ³¢ +æ¢ ¢ +åĢĺ èĭ¥ +æĿij çļĦ +ĠC Y +æģ° å½ĵ +p icker +ge bras +.g if +Ġelect romagnetic +ï¼Į ä¸¥æł¼ +æĹĭ å¾ĭ +çĭ¬ å®¶ +_CON ST +, æķħ +æĬķèµĦ çļĦ +ï¼Įè¿Ļ个 æĹ¶åĢĻ +æ² ® +Ġ@ @ +欣 åĸľ +R G +éļĶ å£ģ +åĪĨ åĮº +ĠClaim s +Ġuniform ly +Ġenvironment ally +S ir +éĿ¢ åħ· +ç» Ĭ +ĠNe ck +åIJĦ åľ°çļĦ +å¸ ľ +Ġj our +SE QU +åĸī åĴĻ +ĠS of +ĠB urg +Ġsh aking +_ common +.get Id +in ch +没 éĹ®é¢ĺ +åIJ¸å¼ķ åĬĽ +Ġcush ion +T ur +ĠBar ack +Ġself ish +_P A +æº ī +ĠZ ip +Ġgran ite +ãĢĤ äºĶ +Ex cel +ĠCall ing +E ll +NOT E +Ġflu or +osp here +åľĨ æŁ± +_ rows +' / +æĺ¯ å¦ĤæŃ¤ +ĠCris is +, æĿ¥ +çŁ¥è¯Ĩ åĴĮ +èĩª 豪 +åı¯ä»¥ åĮħæĭ¬ +ĠW aste +Ġbroad band +ç³» çļĦ +åĨħ ä¾§ +被 è¿« +Ġinsert ion +Ġh ats +ä¼ł åĩº +Ġ å¦Ĥä½ķ +åĵ º +Ġout rage +-f i +æĺŁ åº§ +以 æĿ¥çļĦ +å·² åľ¨ +âĸ ł +ï¼Į å¤ľ +Un categorized +_ change +Ġk icks +av ier +Se ason +ĠC ob +ad as +Ġwas t +\ - +ï¼Įåį³ åı¯ +iss an +Ġcoc aine +ĠL T +åij¨ å²ģ +ï¼ģâĢĿ âĢľ +计 è¾ĥ +èµ £ +Ġvot er +L U +å̼å¾Ĺ ä¸ĢæıIJ +æĶ¿çŃĸ çļĦ +æīĭ æİĮ +éĢĤ éħį +ĠLink ed +/ env +åħī æ³½ +Ġinstall ations +U UID +åį ¯ +ä¸įè§ģ äºĨ +ĠV acc += t +çļĦ æĿIJæĸĻ +Ġs her +ãĢĤ 两 +ĠFr ag +Ġflat ten +å¹³ æ·¡ +æ¯ı个人 éĥ½ +äºĮåįģ åĽĽ +Ġinst ability +æĮĩ è´£ +Ġbron ze +çī© ç§į +论 è¯ģ +Is rael +Ġc n +ac ion +ä¸į èĪį +è¡Į äºĭ +ĠQ U +é£ŀ çļĦ +Ġrecogn ise +ï¼Į èĥĮ +御 åı² +ç²Ĵ åŃIJ +Ġdisappoint ment +S ix +åij½ é¢ĺ +ï¼Į æĪIJåĬŁ +erv ations +æķĻ çļĦ +åħ Ģ +Ġdiet s +(t able +- Up +æ±Ĺ æ°´ +ĠDev ices +åħ ¢ +Ġ" \" +çħ¤ çŁ¿ +uck ed +& lt +d ar +åİĮ æģ¶ +V s +ĠProv ince +Ġsho ppers +ĠPhot oshop +_App Compat +Ġaw ake +Ġdeliber ately +( end +Ġh ated +ç» ° +æĻ ¾ +ĠMark down +Ġcrim inals +Ġ æ¸ħ +åī§ ä¸Ń +éĻĦ å½ķ +Ġsc enery +å¤Ħ åĪĨ +åľ¨ å¤ĸéĿ¢ +.D ec +产 åIJİ +ĠReport ing +Ġwhere by +Ġundert aking +Î ¾ +äºĨä¸Ģ çķª +OV A +Default s +ĠR osen +Ġinv ites +-gener ation +P aper +vers ions +Ġtrust worthy +ãģ Ĭ +çħ³ æ¶Ĥ +K nown +ãĢģ åĽ½éĻħ +os i +çŁ¥è¯Ĩ çĤ¹ +Ġarg uably +Ġnormal ize +ĠBe an +te le +Ġ! ! +çĥŃ çĥĪ +ĠC elt +æĺİ çıł +Ġemerg ence +æĭĩ æĮĩ +åĨĻ çĿĢ +å·§ åħĭåĬĽ +det ail +ï¼Ł è¿ĺæĺ¯ +é²ľ æĺİ +ĠUn its +, å¿ħé¡» +è¾ĵ äºĨ +ro ots +az ure +éĻįä½İ äºĨ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġlear nt +éĩij å±± +å¨ ´ +ĠInst ance +ï¼Įä½Ĩæĺ¯ ä»ĸ +. it +iqu it +Ġex emption +ĠC urt +- select +or ient +r ators +ä¹Ł åıªèĥ½ +æļ´ 鼨 +. exp +u pper +-p owered +å®ī å¾· +çͲ æĸ¹ +ĠPre view +Ġun available +æĬĵ ä½ıäºĨ +æĤ¬ æĮĤ +gener ate +Ïģ α +è¤ ļ +é¾Ļ çļĦ +çī¢ çī¢ +å·¨ 头 +çģ« å±± +E r +AT IVE +Ġ æĽ´ +Ġ èĤ¡ +D ataset +_ ct +ub ric +_d epth +k ernel +ĠMil waukee +Ġadvert isement +ĠS equence +( byte +æĦı åIJij +âĸĪ âĸĪ +% ï¼ī +RE QUEST +çļĦä¸Ģ çĶŁ +Ġneg lected +å±ŀäºİ èĩªå·±çļĦ +Ġcheck list +ĠIllegal ArgumentException +Ġl s +Ġexp iration +ĠC rypt +ĠP od +ĠR ico +æĭ¼ æIJı +process ing += p +_ ne +project s +Ġ 鼷 +æ°´ ç®± +Ġadd ict +Ġl anes +Ġ( ` +èĪ ľ +Ġ ä¼ģä¸ļ +æĥ³è¦ģ çļĦ +Ġillness es +j k +_c ase +ĠI A +Ġrout inely +.s ession +Ð ¡ +ä¸Ģ åħ± +ST IT +Ġve in +Ġ= ================================================================ +ï¼Ł ï¼ģ +ĠPortug uese +ä½ĵ çİ°åľ¨ +ind y +Ġintu ition +âĢĿ ä¸İ +, åīį +åħ»èĢģ ä¿ĿéĻ© +Ġcompl ained +K im +ĠBut ler +ì ŀ +Ġexpend iture +ä¹ĥ æĺ¯ +F etch +ĠRelations hip +Ġar ithmetic +ij k +M ade +PO INT +è¾¹ å¢ĥ +åŁŁ åIJį +Ġcivil ization +second s +ĠL S +ap ons +c ology +Ġ æĬķèµĦ +è¯Ŀ éŁ³ +ne a +Ġcur b +icular ly +ä¼ĹæīĢ åij¨ +Ġpred icting +: self +æĹ¥ åĨĽ +ĠW I +ĠG H +许 ä¹ħ +.N et +ĠGit Hub +ĠP ars +ĠD uration +ĠW ritten +B esides +é¬ Ł +Ġspont aneous +work er +as per +Ġsm ok +第äºĮ èĬĤ +Ġyou tube +< bool +ĠFound er +å®ŀçݰ çļĦ +ç«£ å·¥ +åĸ Ķ +åĤį æĻļ +. img +Ġs ailing +Sar ah +Ġward robe +ĠBas ically +çϽ äºĨ +æĸĩä»¶ çļĦ +ĠLaure n +é¢ĩ æľī +Ġfile Name +Load ed +ï¼Į åζå®ļ +av i +å½ĵ æĪij +ï¼Įä½Ĩ 她 +ĠD owntown +ï¼Į èĩªå·±çļĦ +峡 è°· +æµģ 转 +é¦ĸ è¦ģ +æª IJ +å¸Ŀ çİĭ +åīį æ²¿ +atern ion +å°± æĽ´ +ĠQ atar +åıĺ æĢģ +u est +Ġconvenient ly +.pro ject +Ġchar itable +æĶ¯ éħį +ĠF ear +ĠO m +CRE ATE +Ġpre y +讲述 äºĨ +Ġ æķ°æį® +ĠG ifts +æĺŁ ç©º +人 æµģ +Ġor n +ĠSh ore +Ġins ane +ple asant +ack ing +羨 çľ¼ +ĠCo aching +Ġbrid es +éģ ı +un i +è´¨ æĦŁ +æĹ© æĹ¥ +丢 äºĨ +Ġdeb it +Å į +åıĹ ä¸įäºĨ +. UI +è§Ĩ åĬĽ +ĠLet ters +éĻ· åħ¥äºĨ +Ġun related +çļĦ è¡£æľį +å°± è¯Ĭ +Ġprivile ges +åĻ ¢ +éĴ ° +å¼Ħ å¾Ĺ +ĠVictor ian +Ġle aks +ç«Ļ ç«ĭ +ãĢ ĩ +ĠBry an +h art +äºļ 马 +ĠApp arently +æļĹ ä¸Ń +Ġre per +Ġones elf +ĠBr ig +an other +ĠC AS +è¿Ń 代 +Ġinst itute +ç¼ł ç»ķ +Ġshe ar +转æį¢ 为 +Ġfract ure +feed ing +åĨľ èᝠ+Ġc itation +ĠID s +ç§Ł æĪ¿ +l ator +Ġad en +åħįçĸ« åĬĽ +æijĩ æĻĥ +ä¸į ä½³ +here nce +壮 大 +个 åŃIJ +ç© ¹ +Un less +it é +Ġn erves +ĠTop ic +ç»ıåİĨ è¿ĩ +Ġequ itable +Ġæľ¬ åħ¬åı¸ +x iety +Ġaccommod ations +éĻĦ 注 +Ġdo cker +ï¼Į å®īè£ħ +- pl +h one +in as +l iness +Ġsp iral +èģĮä¸ļ çĶŁæ¶¯ +Ġassert s +åѦ éĹ® +Ġinf il +æĹł å¿§ +_LO AD +伤 亡 +} & +ĠT ak +ãĢģ 建çŃij +ĠN ar +Y M +al ore +ï¼Įåľ¨ æŃ¤ +Ġconf irms +Ġ ঠ+Ġ ounces +/ ${ +UM M +( Z +Ġspec ifying +Ġlandsc apes +å¿ħ ä¸įåı¯ +å°± ä¸įèĥ½ +梦 å¹» +ĠC otton +Ġout dated +man aged +ĠCl uster +_N ULL +ĠQu est +ab ol +ĠMus k +åĵ Ĵ +erv ical +T en +æı ª +S leep +Ġh ose +æĭĨ è¿ģ +ĠW ILL +ĠG ender +åĸ· æ¶Ĥ +éģĵ ä¸Ĭ +Const ruction +å¯Į è£ķ +Ġdebug ging +te enth +ãĢĤ 让 +ĠP ixel +ãĢģ 举 +åIJ¬ åıĸ +ĠK oh +.C l +Ġparad igm +ĠDep osit +çļĦéĤ£ æł· +. Instance +åĪĺ å¤ĩ +èģĶ æĥ³ +ï¼Į 游æĪı +\ limits +.C md +RO LL +ĠH yd +hem atic +ĠChe ese +å¹² äºĨ +il ant +ĠOper ator +èŀº ä¸Ŀ +" { +å°ı åIJĥ +Ġstre ak +åĴĮ æĪij们 +-th rough +(b lock +èĦ± èIJ½ +ĠGl ad +ans on +Ġhope ful +éļ¾ æĢª +æł¼ å°Ķ +_st d +ĠSh oes +Ġret ry +Ġmed ieval +ĠD ow +以 å¤ĸçļĦ +Don ate +éĢĥ 离 +éĺ ľ +erg y +âĢľ In +Ġlong itudinal +Ġ è¿ĻéĩĮ +Ġpost ure +ãĢĤ 她çļĦ +Ġ æĶ¯æĮģ +çļĦå°ı ä¼Ļä¼´ +ç«¥ è¯Ŀ +ç¡®å®ŀ æĺ¯ +éĩı åĮĸ +Ġcompl iant +en ic +Ġorig inated +Ġ èİ« +ĠC arr +i ostream +Ġfreel ance +ĠD uty +ï¼Į çłĶç©¶ +ĠD D +U INT +arg uments +ĠI celand +Ġf aint +毫æĹł çĸijéĹ® +, X +ï¼Į éĩįçĤ¹ +Ġutter ly +è¿ij è§Ĩ +ĠUS ER +饮 æ°´ +Ġcol ony +çϽ éĵ¶ +æ¸ħæĻ° çļĦ +Ġfre ight +Ġlaw makers +ĠDep ression +éĺ¿æĭī 伯 +Ġ çα +éĵ¶ æ²³ +æİ¨ éĢģ +imon ials +Ġhook s +ĠImp lementation +Byte Array +{ h +Ġcor pus +éľĢæ±Ĥ çļĦ +Ġdirect ive +/p kg +Ġsurre nder +è¿Ļä¹Ī 大 +âĢĿ ä¸Ń +ä¼ģ åĽ¾ +èĨĿ çĽĸ +Ġre con +诸 ä½į +ar ial +å½ķ åζ +Ġunlaw ful +ast ian +ĠFam ilies +ãĢĤæľ¬ 次 +ç͍ åΰ +itut ing +AY ER +ham mad +а Ñģ +å¹³åı° ä¸Ĭ +Ġl ar +' _ +asp berry +ãĢĤ çͱæŃ¤ +Com pute +Ġcl ues +éĵ¸ éĢł +Ġanal ytic +Ġe u +ï¼Į æķ´ä½ĵ +is ot +åħ« å¹´ +in ers +è¿Ľ èĢĮ +olic ies +pe ctions +éĩį å¿ĥ +ĠCl are +ĠSer ge +Ġsampl ed +è¾¹ ä¸Ĭ +d ig +Perm issions +ï¼Į åħŃ +_st op +èĩª æĿ¥ +çΏçΏ å¦Īå¦Ī +su its +é² ¸ +Ġseason ed +åĮĹ京 æĹ¶éĹ´ +ĠIncre ase +, 两 +Ġjoint ly +Ġcr ush +in ished +Ġequival ence +Ġtra iling +çļĦ 温度 +W N +çŃ ± +Ġhazard ous +ĠMarc us +nav bar +å°± 说 +Ġphotograp hers +èµ ĺ +ill ery +ha o +-d ev +Ġdipl om +éĤ£ ä½ł +cre ment +å°¼ åħĭ +ĠCom fort +è£Ĥ ç¼Ŀ +Ġserv ant +uk a +ä¸Ģ ç¬Ķ +å¤į åıij +æŃ» åIJİ +_M ETHOD +çİ© èĢį +, å®ŀçݰ +ä»ĸ äºĨ +C AD +åħ¨ å±Ģ +åŃ µ +æİ¥åıĹ äºĨ +å¥ij 约 +çŁŃ æĹ¶éĹ´åĨħ +ï¼Į åIJ¬åΰ +b ench +ĠS ue +åħ¬äº¤ 车 +am ide +使 èĢħ +ĠSecond ary +igh b +Ġund erest +л Ñı +op a +Ġpay able +Ġform atted +( ", +ynt hesis +eval uate +( å¦Ĥ +Ġsp ike +/upload s +ĠS ed +ç»ıéªĮ çļĦ +lest on ++- +- +_s cope +ĠTer rit +ĠW idth +yl ene +大 å¥ĸ +æį® äºĨè§£ +Ġst rap +åĶ Ķ +Ġhaz ards +Ġsupp lying +Ġanonym ously +é«ĺ åİŁ +éĩijèŀį è´ŁåĢº +_con nection +Ġen larg +IC A +æħ° éĹ® +.H ash +Ind ividual +ur bs +âĢľ åĹ¯ +Ġaw aken +Ġ åĩº +éĺ Ļ +ä¿Ŀ éķĸ +èĢĮ ä¸ĭ +ul let +éī´ äºİ +ĠM K +ĠR ush +Ġab ol +ĠV enezuel +åĵģ åij³ +Man ufact +amp agne +. Logger +Ġperform ers +è¿IJåĬ¨ çļĦ +åºĶ èģĺ +æĸ° å¹´ +Ġpass words +çĥ§ çĥ¤ +å°± åĮ» +c ases +å¦ Ĭ +oph ag +Ġpiv ot +Ar row +æĢĴ çģ« +å© ¿ +Mod al +w b +ĠRe ality +ĠSp in +item ap +éŨ æĪ· +ãĥ Ĺ +ĠRel ief +æīŃ å¤´ +ĠNorm an +ĠAT P +ĠâĨ ij +主 è§Ĥ +el ia +Ġbod ily +对 ä»ĸ们 +ochem istry +ok a +çļĦ æĶ»åĩ» +ãĢĭ åį· +Ð Ł +um inate +æ³° å±± +r ity +(T AG +ï¼ĮåħĪ åIJİ +Ġval ign +æ¯Ķè¾ĥ å¤ļ +Ġmir acle +æĩĴ å¾Ĺ +éĻķ西 çľģ +Ġcred ible +is ch +Dis able +ï¼Į å¸ĥ +ä¸į å½Ĵ +Ġg t +Ġd d +_h as +# undef +Supp ress +ä¸Ń ç§ĭ +ãĢĤ éĿ¢å¯¹ +ä¿® åħ» +é«ĺ ä½į +_g lobal +Dis abled +çŁ¥åIJį 度 +æīĵ 车 +è°Ī è°Ī +è¿IJèIJ¥ åķĨ +ï¼Į主è¦ģ æĺ¯ +çŃĶ å¤į +ĠPract ical +ãĢĤ éĩij +åı¤ åŁİ +ĠT urb +Ġres olutions +Ġan terior +æĮĩ éĴĪ +aly ze +å¤ĸ ä¾§ +Ġabdom inal +åħ³ åį¡ +R ear +ï¼Įä¸Ģ æĸ¹éĿ¢ +_ENABLE D +Ġpain ter +ï¼ĮçŃī å¾ħ +è¸ı å®ŀ +umin ium +çŃī åIJĮ +åĩł æŃ¥ +pos able +Ġl ob +è¿Ļä¸Ģ åĪ» +es ar +Ġse afood +Ġto es +ĠPart ial +æĶ¶ åΰçļĦ +N or +å°Ĭ 严 +äº ¢ +on n +le af +çŃī æĥħåĨµ +ĠAnc ient +èŀº æ¯į +ï¼Įä¸Ģ 次 +Ġuncertain ties +Ġop i +ĠPe ak +ĠL ua +ä¹ĭ æĹ¥ +Ġsegment ation +ĠSoc cer +çѹ å¤ĩ +Un expected +Ġsim ulate +ï¼Į ä¾ĿçĦ¶ +声 ä¸Ń +Ġreli ance +Ġ éĿĴ +re z +å½ĵçĦ¶ æĺ¯ +ç»Ļ ä½łä»¬ +Ġsecure ly +Ġdecent ral +çļĦ 责任 +Ġexerc ising +Ġspec ulation +ab ove +Ġconstruct ive +** ï¼Į +Ġsp herical +s peed +æĥ³åΰ è¿ĻéĩĮ +Ġc ottage +ï¼ļ æł¹æį® +Ġgen res +ĠCle arly +Ġch ap +æŀģ 度 +çļĦ æľīæķĪ +ä¸į çα +n ick +- positive +Ġr ifle +comm unity +IM AGE +uc ing +Ġsad ly +Ġpredomin antly +Ġsquee ze +å®¶ åħ¬åı¸ +Ġgl am +red itation +æİ¨ åĩºäºĨ +ãĥ ¬ +æ³ī æ°´ +好 转 +az ione +ĠAny thing +_F ALSE +Ġmar athon +ĠWhe never +ç§° èµŀ +硬 çļĦ +_f ont +æ¾ İ +宫 殿 +åIJ¬ çļĦ +Ġinf erior +H am +ĠÎ ¿ +西 åħ° +å°± 读 +Ġang els +ĠApp lic +æŃ¦ 士 +è·Ł åīį +; / +Ġgra vel +ä»» åij½ +m and +èī² è°ĥ +ĠSte in +J oint +ogn itive +Ġp ant +ĠB ristol +æĪij çľĭ +Ġl ord +ç§ijæĬĢ åĪĽæĸ° +ĠConsider ing +å·¥ æĻºèĥ½ +Ġcig arette +çľī æ¯Ľ +Ġblow ing +=" { +Ġtri umph +ä¿¡ è´· + +Ġth igh +IV ATE +Ġde ed +Ġcand les +ï¼Į ä¹Ķ +arg est +Ġcred ited +çı Ĥ +èĬ± çĶŁ +çļĦ å¤ĦçIJĨ +. em +:% .* +Ġfun n +_HE ADER +ĠReason ing +Ġow ning +ĠLux ury +se cret +Ġg aze +ĠL OC +磷 éħ¸ +. Group +-f our +, R +e conomic +Ġfin es +æĸ¯ å¡Ķ +使ç͍ æĿĥ +Ġair lines +C ele +Ġex h +Å ¼ +E th +urre nces +æ¢ħ èĬ± +ĠD ipl +ç¬ Ļ +og o +>\ < +Ġsec ular +Ġc ensus +ï¼Į æĽ´å¤ļ +Key board +Ġscream ing +æİ¢ ç©¶ +åIJĪä¼Ļ 人 +ur istic +ĠâĢľ [ +æ¯ı èĤ¡ +b usiness +\ s +J O +çĥ· åŁº +åį³ æĹ¶ +}} ^{ +Ġ第 åħ« +ï¼Įå¦Ĥ åIJĮ +r ina +еР¹ +ĠN ames +楼 ä¸Ĭ +ĠR alph +Cred entials +åĵª ç§į +çĮ« åĴª +L G +èµĦäº§è´ŁåĢº 表 +Ġphilosoph ical +iv ities +, åºĶ该 +_M ODULE +éĻ¢ 士 +ies el +ĠC OP +æģŃ åĸľ +åijĨ äºĨ +æĴ © +èĥ ļ +,ä¹Ł 许 +æĸ° æīĭ +Ġex cluding +æı į +åύ åĴĮ +约 ä¼ļ +g ra +em e +Ġdisc ourse +K n +å®ļ çĤ¹ +å¹³ æ°ij +å½Ĵ å±ŀ +ĠOtt awa +& B +ret ty +Ġmem o +_P TR +ĠL is +é¢ģ å¥ĸ +Ġsil icon +ä¹ĭ 声 +å¤ĸ å½¢ +St mt +ठ° +èµ· æºIJ +.d evice +åį´ åľ¨ +Ġcur ved +éĻª åIJĮ +æ²ī æĢĿ +æĹłäºº æľº +ogene ity +ĠC IA +ĠCh uck +irect ed +ĠST EM +ĠP ac +C fg +ä¸Ģ æĬ¹ +å½Ĵ æĿ¥ +Ġlat ency +M Q +港 æ¾³ +Ġc ows +ĠC AM +éĿ¢ æĸĻ +x A +ĠT icket +æ£ī èĬ± +_m at +ine es +e atures +çļĦ æ¦Ĥ念 +Ġch ore +ĠSt ainless +Ġκ αι +ä¸Ĭ 游 +ï¼Į çĤ¹åĩ» +ec urity +Ġsk ull +éĩį 伤 +è·³ èĪŀ +_P AD +æĹł ä¸Ģ +Ġtaxp ayers +plot lib +äºĮ çļĦ +, è¿Ľä¸ĢæŃ¥ +Ġsty ling +un ts +ä¸įçŁ¥ ä¸įè§ī +_CONT ROL +ï¼Į åİĭ +磩 å½¢ +ĠMont gomery +Ġgrand children +å¸Ī èµĦ +-s creen +å½¼ å¾Ĺ +çļĦ æĿ¡ä»¶ +åº ĩ +trans fer +Ġtradem arks +个 çϾåĪĨçĤ¹ +æĻļ ä¼ļ +ucl ide +Ġun pack +éŀ ł +. ', +åijĪçݰ åĩº +pos er +Ġdis connect +æĪ¿åľ°äº§ å¼Ģåıij +æīĭ èħķ +ĠF ut +inen o +åľ¨ æł¡ +æīĢ éķ¿ +Ġno od +é«ĺ é«ĺ +ĠC ause +个 大 +åķĨ æĪ· +ï¼Į èĵĿ +az i +ï¼Į åŁºäºİ +èı ł +et ed +ED I +Ġmult ic +Ġ' +è¿Ľ æ°Ķ +Ġar rows +Ġwrit ings +ï¼Į å¼Ģå±ķ +ï¼Įçͱ æŃ¤ +Ġcoord inated +Ġet t +ç«ĸ 缴 +æīĵ åĩº +res pect +æķ° é¢Ŀ +Ć ï¼Į +ĠPr im +导 轨 +追 溯 +éģĵ 士 +umb nail +_pro to +èIJ¥ æĶ¶ +j ah +, åĬł +æĬĢ èīº +æīĵ 个 +ĠReg istry +Ġdivid end +L ANG +çłĶç©¶ çļĦ +Ġgast ro +ãĢģ æķ°æį® +Ġalt itude +EE K +_p arse +å¾Ģ å¤ĸ +f ony +ĠB oss +m v +ä¸ĭ çıŃ +μ m +Ġbuff ers +ĠNAT O +U Y +.w ith +Ġhyd raulic +ay an +Ġscholar ships +Ġexpon ent +. au +obb ies +Ġcompl aining +åľ¨ 第ä¸Ģ +Ġw reck +Ġtr ump +_ex it +um or +ä¹łæĥ¯ äºĨ +Ġthat s +User Id +ĠSand y +æŀ¢ 纽 +m q +High light +èĥ½ æľī +Ġkil ometers +Ġcommod ity +C atalog +ï¼Į çħ§ +Ġimpl ants +Ġmac roph +åħ¬å¼Ģ çļĦ +N atural +è¿ĺæĺ¯ æ¯Ķè¾ĥ +s udo +Ġ å§ľ +set ting +Ġto ast +Ġcompet itor +ï¼ī æĺ¯ +. It +æĢĢ ä¸Ń +é© Ń +Ġflu ids +è̦ åIJĪ +.dec ode +Ġinter im +il let +ï¼Į èī¾ +è¡ ħ +é«ĺ æĸ°æĬĢæľ¯ +Ġfresh ly +çIJĨè§£ åĴĮ +аР· +ãĢģ ç»Ħç»ĩ +ĠBul let +: A +vert ices +_S UPPORT +ï¼Į 享åıĹ +Att achment +Inst ant +ĠScript ure +ament o +çĪ¶äº² çļĦ +get s +Ġa ka +( entry +计 æķ° +develop ment +Ġpol ished +Ġell ipt +Ġhon ors +.in stance +å¥Ĺ è·¯ +ĠCover age +æĭ Ļ +Ġstuff ed +Ġdescript ive +Ġbud dy +. Char +Ġine ffective +èµİ åĽŀ +Rec ords +ge o +èĦļ æľ¬ +Ġsummar ized +ãĢĤä¸Ģ æĹ¦ +è¿Ļ ä¹Łæĺ¯ +mut able +çī¹ æľīçļĦ +Ġunp redict +ï¼ĮæĪij åĴĮ +Ġf ry +D s +k et +å°Ĩ 被 +: C +ou g +ĠA str +[ edit +-n egative +人æīį åŁ¹åħ» += d +ãĢģ é£İ +马 æĿ¥ +id y +uzz le +ï¼Į è·¯ +çľĭ 书 +ä¸ī 个人 +b asic +Ġpolar ization +[ type +ĠR ib +ell ery +æĿł æĿĨ +G ar +éĺ¶æ®µ çļĦ +( Type +è¨Ģ ä¸į +Ġrec urring +ï¼Į缴 èĩ³ +Ġexpl ode +ãĢģ çĥŃ +\ f +plan es +AN A +æŃ ¼ +åĿĩ ä»· +comp any +Ġc inem +Ġdead lines +ĠCy cle +ï¼Į å¾Īå¤ļ人 +Null Or +ert on +ï¼Į æľ¨ +Ġlug gage +å¤ĸ åįĸ +åĨĻ éģĵ +Ġ èĢħ +åĴĮ 个人 +ĠTr inity +, 人们 +_IT EM +_F UNCTION +ĠNS W +Ġerr one +/ q +Ġdiscre p +Ġpract itioner +çķħ éĢļ +ĠXCT Assert +.D is +Ġm ills +åĨ· åĩĿ +Al ong +ĠD ell +为 ä¸Ģä½ĵ +ĠX L +Ġav iation +ĠDesign s +-m odel +ãĢĤæŃ¤ 次 +comp ress +å®ŀ äºĭ +äºĨä¸Ģ éĺµ +fol k +ä¸ĭ 令 +Appe arance +Ġsatisf actory +ĠDes ktop +Ġcertain ty +ĠSt ick +Ġf res +人 æīĢ +æĩ ¿ +Ġmight y +ï¼Į ä¸ĸçķĮ +Ġste ak +gener al +Ġne p +èĹı åľ¨ +LOB AL +over flow +西 çĵľ +Ac ad +he ng +map sto +Ġhilar ious +Request s +ä¹ł ä¿Ĺ +çĿĢ å¥¹çļĦ +it ant +am os +des ign +Ġmel an +Ġpro active +Ġhook ed +Ġacc us +.pro ps +ĠW ORK +说 è¦ģ +R edd +f en +ĠF lower +æľĿ åIJij +è´§ è¿IJ +de ep +ç¡®è¯Ĭ çĹħä¾ĭ +x E +S us +Ġprofess ors +Log ic +æ·ĭ æ¼ĵ +comm ands +PH A +ent ities +Ġapp rent +Ġqu art +ĠN est +Ġ çŁ³ +ï¼Į å¤į +å°± å¾Ĺ +Ġparagraph s +ĠBath room +å°Ĩ 士 +éª¨éª ¼ +ĠE instein +ä»Ģä¹Ī åľ°æĸ¹ +Ġ// ! +å± İ +Ġt b +ys on +çģĮ æºī +Ġw ont +çļĦ ç»ıéªĮ +éĢļ è¯Ŀ +çŁ¥éģĵ ä»ĸ +Ġν α +Ġ{ }, +Ġ? > +âĢľ What +ĠRe asons +l ash +ĠG ross +失 èIJ½ +æĪ´ çĿĢ +ĠFund s +ĠQ UEST +o os +åĴ ¦ +ĠBob by +m ind +om ore +éĽĨåĽ¢ çļĦ +åĿĹ çļĦ +åĨį è§ģ +ãĢĤ 被 +æł¸ æ¡ĥ +_s ymbol +Ġk an +Ġdi abetic +G EN +Ġcaut ious +: The +åζ 约 +Ġorth ogonal +_ impl +ĠS ara +ĠBl vd +ĠE lements +ï¼Ľ æľī +è¡Ģ çļĦ +æŃ¢ æįŁ +è½´ 线 +Ġhome page +imb abwe +- error +- web +Ġwas ting +ï¼ī çŃī +ĠAdminist rative +, ä¿ĥè¿Ľ +åļ İ +og an +.D es +ĠBre ast +Ġst orms +M ODE +å°± èĥ½å¤Ł +rid ges +S el +天 åIJİ +Ġguess ing +\ sum +ãĢĤ 缴åΰ +Ġsepar ator +ĠInd ependence +Ġf ats +æīŃ çŁ© +Ġbless ings +å¤ĸ åľ° +æľīæīĢ å¸®åĬ© +ï¼Ľ åIJĮæĹ¶ +.w ikipedia +Ġgas oline +ï¼Į åģļåΰ +Ġprov incial +å±ı éļľ +åĴĮ ä»ĸ们 +Ġrecru ited +.r and +段 çļĦ +ĠIP v +ĠS pencer +ĠH ob +os omal +æĻº åķĨ +ason ic +Ġrein force +ä¸Ģå®ļ ç¨ĭ度ä¸Ĭ +Ġh ype +In ventory +Ġc affeine +èĢĥ è¯ģ +ï¼Į è¢ģ +åºĶ åľ¨ +建设 å·¥ç¨ĭ +_G R +缴 åįĩ +(d evice +Ġpers istence +W rap +ĠBroad cast +- energy +ãĢĤ æĹ¢çĦ¶ +æĭī ä¸ģ +æıĴ ä»¶ +Ġcl erk +ç¬ ĥ +=" - +ç²¾ ç¾İ +Ġhead phones +m any +åĬł åī§ +( stream +it ures +oid al +éĶħ ä¸Ń +Ġtechn ician +å¤ļ æľĪ +ä¿Ŀ åį« +Ġdet er +A x +del ay +æķħ 乡 +_fe ature +åıł åĬł +\ "> +è¿ĺ ä¸įçŁ¥éģĵ +è¿Ľ çļĦ +Ġ îĹ¥îĹ¥ +éĹ µ +Ġref usal +æĭ¨ æīĵ +ĠOpen ing +pos ing +Ġfold ed +Ġintro ductory +Ġtang ible +æīĭ æĮģ +ĠR OM +ĠSh ift +模 çī¹ +ine z +æİĮ æŁľ +åĩ¶ æīĭ +, ä¸Ģå®ļè¦ģ +- action +亲 çİĭ +å²ģ æĹ¶ +, è¿ĻéĩĮ +伸 åĩºæīĭ +. Time +Ġmom s +Ġalter ations +å°¼ 奥 +Ġrec urrence +çĶŁæ´» ä¸ŃçļĦ +æľĢ éĩįè¦ģ +ĠD iversity +ä¸įåľ¨ ä¹İ +ĠEV EN +Ġlapt ops +ĠC rew +ï¼Į ï¼Ī +( child +Ġn oun +çݯ çIJĥ +çĭ ŀ +hy per +. $$ +S ol +Ġp ix +æīĵ æŀ¶ +h aus +éĩ ī +éģį å¸ĥ +æĿij å§Ķä¼ļ +èľ Ĺ +åįķ 项 +æĪª éĿ¢ +Ġadvis ors +Ġl amps +-s ided +第äºĮ çϾ +H F +ĠS SD +æŁ¥ å¤Ħ +ãģĵ ãģ¨ +âĶģ âĶģ +Ġev angel +å¤ļ ä¸ĩ +M us +Ġfact ories +Ġengine ered +-cent ered +æĬĹ æĹ¥ +Ġ åħī +Br ush +Cong ratulations +æħ ij +æĢĿ æĺİ +ĠFab ric +st rom +Ġcompens ate +Ġdeb ates +ĠBre nd +æ¯ı 人 +çļĦä¸Ģ ä½į +ä¹³ èħº +å¦Ĭ å¨ł +ur an +fl ies +Ð Ĵ +å¼Ģ çĿĢ +Ġ è¿Ļæł· +ĠP ET +Lower Case +æ¼Ķ 示 +Ġhalf way +ĠDiv ine +ç»´ å°Ķ +åѦçĶŁ 们 +-re peat +( host +othe red +e i +m obile +æľī ä¸ī +CL A +å¼Ĥ åľ° +ch unk +ir ie +ys et +Ġremodel ing +ĠE mm +èĢĮ æĪIJçļĦ +åIJij åIJİ +ct ree +Ġcream y +b ies +çͲ çĬ¶ +Ġhas ht +Ġdepth s +/ common +V ERSION +.con cat +ĠC reation +Ġn arc +and ez +é¢Ĩ导 å°ıç»Ħ +Ep isode +ĠDis ability +ï¼ģ æĪij们 +_en code +Ġcr ashes +ĠJo el +èĦ¸ çļĦ +æĬ¢ æķij +Ġdirect ing +éĥ½ æĥ³ +S y +æ¶īåıĬ ä¸Ģç§į +çļĦ è®®æ¡Ī +Ġcry stall +ĠRes cue +ĠCal gary +æĪij ä¸įçŁ¥éģĵ +çĤ¸ å¼¹ +ĠAl loc +ï¼Į åIJĮæ¯Ķå¢ŀéķ¿ +èĵ ¦ +comp ile +Ġdef ender +Ġspecial ize +åij¨ ä¸Ģ +åĴ Ĩ +Ġexecut able +æĭ¼ æİ¥ +ĠF ib +çľĭ ä¸į +Ġback drop +isco very +Ġtrack er +ex ecut +d z +Ġdw elling +åѦ åŃIJ +` s +ĠT P +Ġfart her +æµ· é²ľ +Ġt ide +Ġmicro phone +Re ally +ĠGarc ia +æĹ¶ éĴŁ +Ġmon sters +ĠRet irement +. cont +Ġfram ed +Ġsp ill +EX P +lick r +åħ´ èµ· +åĮĸ äºĨ +Ġsatur ated +Ġdom inate +[ T +-se cond +Ġveget arian +åijĺ çļĦ +ï¼ĮæľĢ 大 +å·¡ è§Ĩ +ĠBenn ett +μ ε +r é +ï¼Įåľ¨ è¿Ļ个 +Ġtim ed +Ġtext book +ĠRem oval +ĠAlex and +Ġv ault +ĠO re +Ġcelebr ations +åĮ» åѦéĻ¢ +ĠAn imals +ĠH annah +Ġcoll apsed +æĮĤ çĿĢ +ĠSe ed +ĠG S +ĠComp ared +cl one +Ġb ou +éĵ ł +便 ä¼ļ +Ġbal cony +æĺ¯ æł¹æį® +污 æ³¥ +ĠPort able +ĠN as +红 å¤ĸ +Ġterrit ories +.en code +ï¼ĮæĪij ä¸į +å¸ · +ĠGl asgow +éħ° èĥº +Ġp oured +ï¼Į åºĶ该æĺ¯ +è£ħ ç®± +ĠMc L +å¾Ī æĥ³ +Ġdestruct ive +Ġpolynom ials +ĠCap acity +Ġadoles cents +è§£ 说 +Ġmult itude +own s +Ġow ed +\ beta +å±ķ示 äºĨ +Ġs ab +丰 çͰ +- tech +è¥ Ł +[] [] +poss ible +å£ ¬ +Ġun authorized +ÑĨ и +ï¼Į åĪ«äºº +Ġch ic +ĠRe uters +Ġgen us +Ġinstall ment +iber nate +ec d +ĠNov a +Ġs v +Ġm aid +Ġind oors +.z ip +Ġce il +rit able +.B ack +ĠRel igion +æķ´ ä½ĵçļĦ +çĬ¶æĢģ ä¸ĭ +_s plit +F lex +Ġpol es +ĠPow der +Ġgift ed +ä¸ļ çķĮ +id ian +ĠApp rox +Ġallerg ies +-f irst +ä¸ī æĺŁ +x D +åģļ æĪIJ +Ben efits +åIJĦèĩª çļĦ +ĠA ub +è¾Ĩ 车 +ĠEngine ers +mat ches +çĸ¯çĭĤ çļĦ +ç¨Ģ éĩĬ +ï¼Į æĿ¥åΰ +-f illed +ĠGr ass +å®Ľ å¦Ĥ +çIJĨæĥ³ çļĦ +Ġproud ly +Ġaccess ory +à¹ Ģ +ĠD riving +æĮĩ æİ§ +æ¯ Ļ +ĠN L +ian ces +ç͍ æ³ķ +_ACT ION +æ» ķ +è®® ä¼ļ +Ġdynam ically +Ġc afe +Ġo ps +ĠW riters +ãĢĤ æŀĹ +B lob +.ch ildren +Ġdiam onds +æĥ ¬ +ener ative +Event Handler +we ed +ä¸Ń å°ıåѦ +ĠWe alth +E mer +è¤IJ èī² +. response +Ġvers a +游 è§Ī +积æŀģ æĢ§ +Ġanaly se +Ġopt ed +çļĦä¸Ģ 大 +çϾ åIJĪ +ĠAccount s +mm m +N ear +æĪIJçĨŁ çļĦ +. ]( +Ġ__ _ +le arning +Ġcon ject +ĠW alt +/ js +Ġsuccess es +Ġlo os +br ush +cy cles +å¼Ģ åºĹ +. As +ĠN ike +ï¼ ĺ +åī¯ å±Ģéķ¿ +ĠG y +æ¶Ĥ æĬ¹ +) åľ¨ +ç½ij çļĦ +Ġf f +U i +Ġcol i +(' - +æķ° éĩıçļĦ +ä»İ ä¸į +Ġw olf +Ġn omin +é«ĺ è¡Ģåİĭ +#### ## +_W ITH +LE AR +Ġcont our +AT ES +Ġmention ing +Public ation +举 ä¾ĭ +R h +çĶľ çĶľ +ï¼Į 令人 +æ¢ħ 西 +ãĢģ éĿĴ +Ġ åı¤ +æ© Ħ +_ interface +ä¹IJ éĺŁ +ĠPolic ies +ll i +éģĩ ä¸Ĭ +ra ces +Ġstar ring +Ġass ays +ï¼Į çĶŁäº§ +æľįåĬ¡ ä¸ļ +int endent +-ass ociated +Ġjust ification +ĠSt ir +éĹ» åIJį +车 åĨħ +åĽĽ æĸ¹ +ä¸Ŀ 毫ä¸į +Ġwra pping +ĠHur ricane +ĠSU CH +c odes +use package +çİ º +ï¼Įåıª æĢķ +åĸĿ äºĨ +- derived +ĠLiber al +Ġinherit ance +Tr im +help ers +и в +éļĶ çĥŃ +åģľ ä¸ĭæĿ¥ +\ ) +Ġle mma +plic ations +ä¸įæĺ¯ ä¸Ģ个 +Ġfert ility +M EM +dec ode +Ġhead aches +ãĢģ å±± +åIJ¬ è¿ĩ +ãģ ¡ +ç¥ŀç»ı ç½ij绾 +ĠB eta +ur ve +çŁŃ è§Ĩé¢ij +Mat cher +-function al +Ġp eg +Ġstead ily +h alf +è¿ Ħ +ĠD rew +osex ual +Ġsuccess or +ĠQueens land +ĠPublic ations +ĠE A +ĠG mbH +Ġcommit ting +hol m +åĵ Ĩ +Ġvers es +. å¦ĤæĿĥåĪ©è¦ģæ±Ĥ +人 马 +åĪ© çī© +å¹´ å¤ľ +åħ¶ä¸Ń ä¸Ģ个 +Ġv oor +æ±ī åŃIJ +{{ { +im an +ï¼Į æĿ¥èĩª +çļĦ æĸĩ竳 +Ext ended +èᝠæĿIJ +è·Į å¹ħ +æĴŃ ç§į +() { +ĠTam il +å¹³ æķ´ +vol ume +T asks +s ervices +ri ad +代 è¨Ģ +ãĢģ åIJİ +Ġlim b +å¹¿æ³Ľ çļĦ +åĩ» æĿĢ +æĦŁåıĹ åΰäºĨ +Ġp ave +æ²³ éģĵ +ä¹Ļ æĸ¹ +ympt oms +ç¥Ń ç¥Ģ +t ex +Ġfresh man +Mut ex +d g +éĴ ³ +Ġinterrupt ed +ad j +ort al +cor rect +Ġshock ing +Ġ 举 +_IN PUT +ag ging +å¢ © +ï¼Į å§ĭç»Ī +Ġjud ged +ĠL ak +çľĭ 她 +Ġignor ance +_E QUAL +ĠN atal +Fil ters +Ġover t +èĦ ĵ +åijĺå·¥ çļĦ +amp us +) }, +.e ach +\in cludegraphics +lu ent +. With +Ġgentle man +çĩĥ æ²¹ +å°Ħ æīĭ +ĠMan age +ĠI MP +ä¹Ł è¶ĬæĿ¥è¶Ĭ +便 ç§ĺ +Ġaff irmed +ĠH ait +âĨ ij +é¢Ħ 订 +.d rop +Ġman eu +èµ° åΰäºĨ +hav iour +ĠD art +èĵĿ èī²çļĦ +Ġg n +ä¹IJ æĦı +ĠCh an +åģļ æ¢¦ +ob il +ĠPow ell +è¤ ª +ĠB oo +rec iation +çļĦæ°Ķ æ°Ľ +(d f +* ]{} +å¯Ĥ éĿĻ +ä»ĸ ä¼ļ +Ġs es +çĭ Ī +/ o +æ§ IJ +Ġbo iling +èıĬ èĬ± +Ġv om +æ·± æ¸Ĭ +trans late +èī² ç´ł +Ġtort ure +Ġ **** +ĠS ox +lo x +ï¼Įä¸į éľĢè¦ģ +ĠS ullivan +-c ase +ĠApp oint +ï¼Į åħ³éĶ® +un gs +_ADD RESS +Ġland lord +Ġv ibe +èįī èİĵ +Ġre open +èģļ æ°¨ +å¼Ģ æľĹ +t ensor +{ W +in se +Ġcommit tees +Ġprosper ity +ãģķ ãĤĮ +å¿Ļ çĿĢ +ĠReyn olds +an ity +ĠN om +åľ° å½¢ +è¹ Ļ +Ġpat ents +ou x +ierarch y +ĠF ees +Ex amples +âĢľ 天 +ĠF ishing +æĻĵ å¾Ĺ +ä¸įä¼ļ åĨį +äºĨä¸Ģ æĬĬ +表åĨ³ æĿĥ +_m ean +Ġn r +Ġte e +Ġsovere ign +col s +Ġdis asters +AD A +Ġhost ile +Ġcost umes +Ġbe ams +Ġcomprom ised +v b +ĠC ult +ãĢģ 交éĢļ +Ġtransfer ring +ï¼ĮåĬł å¿« +( default +- us +Ġan omal +åζå¤ĩ æĸ¹æ³ķ +Ġdestroy ing +æīĢ示 çļĦ +Ġw itch +or ah +æĸ¹åIJij çļĦ +\ mu +çŁ ¶ +ĠN amed +ï¼Į æĺ¾å¾Ĺ +_G ROUP +rais al +æĢ» æķ°çļĦ +建 äºİ +View er +åĿļ åĽº +ĠR angers +åIJ¬ åIJ¬ +身 åīį +åľĪ åŃIJ +Through out +T okens +ĠLegisl ature +Ġp u +al ks +ĠIdent ify +管çIJĨ å±Ĥ +Ġvacc inated +çīĮ åŃIJ +èİ º +ĠCont est +check box +ĠF ake +ov ic +Ġcomm issions +Ġorb ital +ï¼Į ç§° +.A uto +Ġh one +Ġpal ace +.j ar +Ġad ip +enumer ate +ĠL ives +Ġobs essed +- client +F ALSE +Ġtest ify +Ġp ok +åı¯ è§Ĩ +Pr imitive +_p ush +åŁºéĩij çļĦ +ĠTown ship +-c or +æĮĩ çͲ +è¸ ¹ +h aw +éĥ½ å°Ĩ +åħĭ éĩĮæĸ¯ +å®ĩ æĸĩ +ĠUt ility +Ġ åĩĮ +(m ethod +M otion +çļĦ åĬŀæ³ķ +Ġmil estone +on omic +éģĹ æ¼ı +Ġmean ings +ĠR EM +ĠLoc ated +ĠRec ruitment +æĢª åħ½ +Ġp ots +Ġbarg ain +ĠD ynamics +med iately +ï¼Į åĿIJåľ¨ +ĠCh arter +Ġworks heet +_DIS ABLE +ĠBrad y +åĵİ åijĢ +\ * +ĠSal v +ï¼Ł 为ä»Ģä¹Ī +è¦ģ å¤ļ +ĠN y +Ġdrive way +æŃ¦ æľ¯ +ĠFil ip +è¿ĻäºĽ éĹ®é¢ĺ +sp Net +æĪĺ èΰ +Ù IJ +ãĢĤ æĥ³ +Ġw er +Ġdep recated +Ġban ana +åĭ¤ å¥ĭ +ï¼Į å¦Īå¦Ī +Ġsh ipment +ID I +_ rt +Ġres ent +Ġappell ate +Ġpr incess +çªĹ å¸ĺ +è¿ŀ æĿĨ +Ġco res +æ¤ Ń +m ble +ĠA IDS +_a uth +od ic +人æ°ij æ£Ģå¯ŁéĻ¢ +ĠCreat or +Ġo wing +ĠT EXT +аР´ +ĠFlow ers +è²Į ä¼¼ +Ġa per +Ġn ause +éļIJ 约 +侦 æŁ¥ +ĠBr ands +st own +ĠS ync +usp end +, å¿ĥ +ĠCl os +Ġantib iotic +Ġdeal ership +æľĭåıĭ çļĦ +ĠW are +so lete +Ġu id +ĠP orter +Ġdist ract +Ø Ń +çĸ¯ äºĨ +ĠE y +Ġt ournaments +èµ· ä¹ī +ï¼Įä»ĸ 说 +æ¥ ŀ +ri z +ĠFac ility +Ġprocure ment +.n n +, æĮīçħ§ +ĠSpe aking +_m etadata +Ġabb rev +é¤IJ é¦Ĩ +Ġlif es +ĠCh ang +NotFound Exception +ĠHe ating +-p ack +Ġnight mare +face book +ï¼Į åıijå±ķ +H ours +åĪĨ ä¹ĭä¸Ģ +_d ataset +éĥ¨ åĴĮ +ä¸į æĩĪ +æľĹ æĻ® +.s ystem +Ġhuman itarian +custom er +al ty +åĩĨç¡® çļĦ +çİ ¥ +Ġ/ > ", +Ġsuspect s +ï¼Ł æĺ¯ +å®ŀ è®Ń +Ġint estinal +uclide an +ï¼ĮçŃī åΰ +太 大çļĦ +x l +ĠG W +ï¼Į æ¯Ķè¾ĥ +ob acter +æĢĿ 绪 +ĠZ ach +è¯ķ é¢ĺ +ĠColomb ia +, ç»ıè¿ĩ +âĢ Ĭ +Ġref lex +Ġmar itime +Ġcomp ose +SEQU ENTIAL +wic hes +å¼Ģåı£ éģĵ +er ate +iv ot +ä¸Ĭ éĻIJ +Ġkid n +å¼Ĥ æĢ§ +ĠH ang +mid t +ar cer +çļĦ åĦ¿åŃIJ +Ġmurd ered +è¨Ģ æĥħ +Ġfellow ship +èģĶåIJĪ ä¼ļ +ãĢģ çα +æłij çļĦ +ãĢĤ éĢļ常 +大 åıĶ +_N ODE +P ressed +iche ver +å¬ ī +ĠN GO +ç½ij æĺĵ +ãĢĤ 使ç͍ +计åĪĴ çļĦ +ĠR TC +ĠCons ervative +Ġelev ator +ä¸ĭ è¡Į +T exas +b ee +è¿Ļä¹Ī å¤ļå¹´ +Ġcan s +ĠSc roll +cal c +å̼å¾Ĺä¸ĢæıIJ çļĦæĺ¯ +r untime +æ±ī åŃĹ +. RE +Ġread able +é»ij çϽ +Ġcond o +ï¼Įè¿Ļ æĹ¶åĢĻ +ãĢģ åĽ½ +æĬ± ä½ı +Ġsle pt +o qu +çļĦ åı¯èĥ½ +ä¹Ł ç®Ĺæĺ¯ +ãĢĤ æ°´ +_c ounter +Ġtrib al +Ġcontrad iction +Ġfundament als +管çIJĨ éĥ¨éŨ +宣 讲 +åĵŃ æ³£ +æĹ¶ è¦ģ +éģĵè·¯ ä¸Ĭ +å¹ Ĥ +K evin +è¿Ļä¹Ī 大çļĦ +R outer +ï¼Ł åľ¨ +Ġ 说å®Į +åĵ © +Ġl ace +ĠPain ting +z u +ãĢģ æľ¨ +ĠL IB +cl osing +cur r +çݲ çıij +çľģ 份 +湿 润 +ĠMS G +æ°Ķ åij³ +ĠRom ney +è¿Ļ å¥Ĺ +å¢ŀ è¿Ľ +åŁİ éŨ +Ġdefin itive +Ïģ ι +建设 åĴĮ +è¿Ŀ èĥĮ +{ l +' ): +æľī 好 +ĠFeature d +Ġrec ursive +-h ost +åļ£ å¼ł +AT ER +éĺ´ æ²ī +os cope +äºĮ çŃīå¥ĸ +çīĩ åĮº +_d omain +G V +æ¦ľ æł· +åºĹ éĿ¢ +Ġm artial +Ġyog urt +Ġbad ge +. the +Ġrational e +if ty +Ġamb ition +c url +èµ· éĩį +å°ĸ éĶIJ +å°± è·Ł +é¦ĸ 页 +Ġexc av +ãĢĤ æīĢæľī +ï¼Įåı¯ä»¥ 说 +G round +Ġchrom osome +æ°¨åŁº éħ¸ +Ġspl ash +_FL AGS +åŁĥ å°Ķ +èį Ķ +åĺ² ç¬ij +Connect ing +ĠFac ilities +èĢĮ 被 +æł¼ éĩĮ +st ead +Ġtour ing +æij¸ çĿĢ +âĢľ äºĶ +Ġno isy +% ( +Ġl n +éĽ ı +微微 ä¸Ģ +ï¼Įä¸Ģ èĤ¡ +imm une +çĸ ¡ +ĠK ap +æĥ ® +cons umer +åģļ çļĦäºĭæĥħ +und ed +ĠM LS +æĶ¯ éĺŁ +ä¼ĹæīĢåij¨ çŁ¥ +] ", +ï¼Į åĩŃ +æĸĩåĮĸ éģĹ产 +Ġballo on +_d river +N ut +Ġ rip +Ġdiv or +Ġreflect ive +æĹł è¾ľ +åľ° 对 +at ra +æĥĬ èī³ +ĠValid ate +å¹´ èµ· +主 æķĻ +ï¼Į 空 +Ch annels +åı¯ æ¯Ķ +ĠV ue +ï¼Ī ä»Ĭ +SE rror +çĭ¼ çĭĪ +Ġtra ction +ä¸įæĺ¯ 说 +çļĦ èĦ¸èī² +ĠN ine +æĪij æīĢ +g ars +å±± åºĦ +ä¸ĸ 人 +ĠJul ian +ï¼Į æ°¸è¿ľ +ï¼Į åĺ´è§Ĵ +éĻĦåĽ¾ 说æĺİ +çĥ ĥ +çļĦ èīºæľ¯ +åı³ è¾¹ +. âĢĶ +è¿Ľ éŨ +ãĢĤ ä¹Łå°±æĺ¯è¯´ +ä¸ļ æľīéĻIJåħ¬åı¸ +åķĨ è´¸ +Ġswe pt +è£Ļ åŃIJ +Ġpredict able +ange red +Ġrain fall +T s +墨西 åĵ¥ +bul let +c ot +çľģ å¸Ĥ +Ġattract ing +ĠSet t +: @ +-l ight +Ġ| \ +ĠSl ots +Ġemp loys +ï¼Į 伸æīĭ +ur as +Ġl ys +t ax +-p res +Ġo var +Ġins ist +æį® 说 +ç»Ĩ åĪĨ +ï¼Į éĹ®éģĵ +Ġun rest +ĠAt hens +Ġinformation al +åīįåĪĹ èħº +ĠAware ness +ä¸ĵ å¿ĥ +Ġallow ance +çļĦ ä¿ĿæĬ¤ +ĠLO SS +E ver +æľĢ åĸľæ¬¢ +_c lear +妥 åįı +g ru +Cook ies +_ AND +_ socket +à ° +Ġindex ed +D to +Ġins urer +èģļæ°¨ éħ¯ +ï¼ĮæīĢ以 ä»ĸ +om mod +çϾ è´§ +仿 羣 +ĠT rou +éĻª çĿĢ +osc opic +_F ILTER +æı¡ çĿĢ +ĠEdd ie +Quant ity +ĠI SS +ign al +ï¼Į ä¾Ŀ +èĩ³ æŃ¤ +end icular +æ»ļ æ»ļ +åĽºå®ļ æĿ¿ +åĮΠ奴 +act ing +d B +å·¥ 夫 +Ġ éĥij +åĩł åĪĨéĴŁ +LIN ENO +Ġdoub les +g ages +åIJį 声 +apt op +st ill +Ġmod ulation +åĮĸ ä½ľ +åĬ¡ å·¥ +ĠSign ature +ç ¶ +Ġnons ense +äºĨ æĪij们 +éĢł ä»· +é¥ µ +Ġsh out +æĥĬ æħĮ +ĠR ey +UT ION +Ġecosystem s +Ġc in +ĠLess ons +åĮķ é¦ĸ +Ġkick ing +Ġoxid ative +H istor +ĠÏĦ ο +å¡ŀ ç½Ĺ +计 æĹ¶ +ĠB und +ä¸įåģľ çļĦ +ç«ĭ çļĦ +å®Ī åį« +hel f +ĠR ide +_the me +ĠE as +头 æĿ¡ +èģĶç³» æĸ¹å¼ı +names e +in cludegraphics +ĠÎ ĵ +çĶ¨äºº åįķä½į +: get +Ġdetect ive +æī¿ 天 +и Ñĩ +Ġstud ios +稳 æŃ¥ +æĪĺ åĽ½ +ĠST ART +Ġpart nered +}} }} +Ġbl oss +ä¾ ® +Ġaver aging +Ġb izarre +ress es +ĠBlog s +( label +Ġpoll ut +Ġch asing +ï¼Į æĬķèµĦ +en i +) -- +åķĨä¸ļ éĵ¶è¡Į +åįĬ å¾Ħ +Ġkn it +ãĢĤ åħĪ +Ġhel m +.get Logger +åij¨ 转 +.S elect +çł´ è£Ĥ +ĠMod ified +ĠS ys +_set up +FRING EMENT +æ± ² +æ¸ħæ¥ļ åľ° +Ġske pt +B ur +is ers +, % +åĨ² çĿĢ +un less +O A +ber ger +reg s +Ġimag in +ĠPO SS +ry stal +ãģ Ľ +ç»ı çͱ +Ġwall paper +Ġregul ating +Ġcomp ost +åĽºå®ļ çļĦ +( ä¸Ģ +Ġinf erred +åĸ µ +äºļ马 éĢĬ +Ġexport ed +_f ind +о ÑģÑĤ +ï¼Įä¸į管 æĺ¯ +åħ¶ æīĢ +_ OS +èĩª åѦ +õ es +Cl one +ĠC ere +èĽ Ĭ +åıĪ è¯´ +_b ound +è´´ åIJĪ +çģ« çĥ§ +ï¼Į èĭ±åĽ½ +ĠHe ather +Ġlog arith +äºī 论 +u uid +Ġterror ists +_in ternal +ä»ĩ æģ¨ +ig her +åı£ åı· +å°ı 人 +å®ģ å¤ı +è§£æĶ¾ åĨĽ +ich i +( Int +met ry +M om +ç¦ģ å¿Į +çļĦ ç͵影 +åıª ä¸įè¿ĩ +Ġblack jack +. Query +Ġmembr anes +Ġtang ent +) ä¸Ń +Ġfair ness +ra il +åĬłçĽŁ åķĨ +çļĦä¸Ģ èά +åī ¿ +åıĹ å®³ +Ear lier +顺 便 +Ġvegg ies +èĢĮ è¡Į +Ġcompet ence +çļĦ åıįåºĶ +ï¼ ĸ +åĩłä¹İ æĺ¯ +ä¸ī æĺ¯ +un ches +ç¨ £ +Char les +ï¼Į éĢī +ank ed +åħĭ èݱ +åī¯ æĢ»ç»ıçIJĨ +ĠThe m +he ter +åıĺ é¢ij +Ġf ox +H ence +ĠS G +ĠAl fred +Ġwild erness +. åľ¨ +ĠA J +ĠCook ing +Ġtrou bled +IC O +ï¼Įæĺ¯ æĮĩ +çļĦ 马 +çĽ İ +çijŀ åħ¸ +ï¼ĮæĹł éľĢ +ãĢĤ å¦ĤæŃ¤ +Ġg rit +-M an +Ġdefect ive +Ġin active +对 çļĦ +äºĨä¸Ģ åľĪ +Ġinf rared +Part ition +ä¼ļ ç»Ļ +ï¼Įä¸Ģ è·¯ +Ġtraff icking +ĠFellow ship +el ian +æĿ¯ åŃIJ +: d +åĸĿ æ°´ +åı¯ åĪĨ为 +Ġr im +车 åİ¢ +æī§ 导 +ĠJ OIN +-t wo +ope z +ĠÎ © +ĠD EL +ĠEr n +Ġ% % +ç´§ åĩij +ãĢģ çĽijäºĭ +Ġfraud ulent +Ġdilig ence +: ) +西 éŨ +æī¾ æĪij +ĠGo als +è¯ķ åīĤ +ï¼Įä¸į æĥ³ +ĠS hen +åıij åĬĽ +Ver b +.s lice +Ġg amb +åıij æİĺ +转 è¿ĩ身 +Ġl hs +ĠC and +ox id +ĠC ock +in ish +oper ations +_d oc +ä¸Ģ å¿ĥ +ĠS essions +Ġinstinct s +ab lo +Ġsy ll +Com pl +ä¼ģä¸ļ åĴĮ +ĠS yl +çļĦ è¯Ńæ°Ķ +æ¯Ĵ ç´ł +æĬĬ ä»ĸ们 +Sm ith +h ydro +Ġcountry side +ãĢĤ çī¹åĪ«æĺ¯ +IF ICATION +è¿İ æĿ¥äºĨ +Ġ ä¼Ĺ人 +Ġμ g +p resent +å°ı åĦ¿ +ãĤ ı +, 缴æİ¥ +Ġcelebr ates +çε 士 +Abs olute +ar ct +Ġcommand er +b ands +Ġn inth +ĠWat ers +Ġb ir +_c ookie +Ġ č +f re +Ġsub sets +åł ° +ill in +Ġexp ands +_ edge +æķı æį· +ract ive +Ġserv ants +Ġtodd ler +Ġc ath +Ġannot ations +er View +æīĢæľī æĿĥ +en ne +æIJľ éĽĨ +夺 åĨł +代表 çļĦ +åħĥç´ł çļĦ +ĠNic ole +ãĢģ éĵģ +ï¼Į å¿įä¸įä½ı +Has Column +ä¸İ ä¸ŃåĽ½ +-t reated +åĺ² è®½ +模åŀĭ çļĦ +/ day +al gorithm +th ose +ĠRes idential +Ġtransl ates +æ¤ ° +åѦçĶŁ åľ¨ +Ġout lines +æ¯Ķè¾ĥ 大 +Ġforb idden +b ars +ĠArt ists +é² ¤ +ĠV II +Ġspokes person +S ex +å¤ĸ åĽ´ +ĠAr senal +é»ĺ å¥ij +ĠSpot ify +Act ually +G it +ï¼Į åŃĹ +block s +æĵ¦ æĭŃ +_f req +åĮ» åĬ¡ +Ġcere mon +Ġsimult aneous +æľī æĹł +饺 åŃIJ +ce il +ç¾ ģ +å¤į ä½į +ĠChap el +ĠGabri el +ĠOR DER +Ġbul b +èĵĿ çīĻ +Ġm ang +Ġ å¿ĥ +èİ· åĪ© +ï¼Į çİ°åľº +åıĪ ç§° +Ġnews letters +acer b +ог о +ĠâĢ ŀ +ra ps +失 æķĪ +æķĮ 人çļĦ +诱 åıij +åĪĨ è¡Į +H V +ç¥Ī 祷 +ï¼Į è°¢ +L es +å¸Ĥ å̼ +.A ction +éĢĨ 转 +se ek +_S OURCE +è¡£ æŁľ +Ġaccum ulate +Ġno qa +导 çĥŃ +Or g +Ġbl ot +E lem +天 ä¸Ĭ +( player +åľ° å¤Ħ +Ġboard ing +w i +Ġfest ive +Ġdisc ern +è§ ij +_ rect +ĠS or +Ġsm ells +K A +Ġpsych iatric +ĠCON SEQUENTIAL +ĠPal mer +大 åħ¨ +çͲçĬ¶ èħº +åĿļ ä¿¡ +Ġterm inology +ä¹Łä¸į è¦ģ +æ³¥ åľŁ +bin om +N i +Ġenc aps +åĨ» ç»ĵ +èĶ ij +è¾ĵåħ¥ 端 +oc ado +Ġit iner +config ure +che l +Ġhom etown +ĠM Hz +âĢľ æĸ° +æµ· çļĦ +ĠEld er +_C S +ï¼ĮéĤ£ ç§į +Ġapolog ize +ï¼Ł æĢİä¹Ī +ĠAff iliate +æĬĬ è¿ĻäºĽ +== ' +) ï¼ļ +ref s +æ± ķ +olid ay +Ġprep ares +ï¼Į çĽĸ +Ġpil ots +åζ æŃ¢ +ac os +对 ä¸ŃåĽ½ +åĶ ¾ +ĠS uff +( class +Ġadd itive +ĠAr rays +B bb +G Hz +Ġweigh ing +ch lor +ĠR ud +St reet +ĠK re +åŃĺåľ¨ äºİ +ĠD ale +vel t +Ġresid ency +ĠSh ield +åįİ ä¸½ +ĠExt ended +åĸĩ åıŃ +ï¼Į æĤ£èĢħ +ĠL iz +ä»Ģä¹Ī 好 +é¦ĸ 个 +åIJĽ 主 +U IT +Ġinit iation +_f u +åĴ¬ çĿĢ +-p ath +Ġ( $\ +éĿ¢ è²Į +ä¹ĭ åĪĿ +ĠH I +Ġdisadvant ages +_DE P +ä¸Ń å¤ĸ +Ġsc i +å¾ģ æ±Ĥ +åįł åľ°éĿ¢ç§¯ +为 ä¸Ģ个 +éĢļ äºĨ +Ġdischarg ed +P reference +ĠDecl aration +ãĢĤ åĶIJ +ch ard +éķ¿æľŁ èĤ¡æĿĥæĬķèµĦ +Ġd ub +Ġphen otype +ä¹Łæĺ¯ éĿŀ常 +åĪĽéĢł æĢ§ +è¯ļ å®ŀ +ĠM g +Un ity +dir name +red uc +éģ¥ æİ§ +che mas +Ġi Pod +ft p +ĠDem and +åĽ¾ 纸 +éĥ½ è§īå¾Ĺ +ĠTrans formation +æ¡Į åŃIJä¸Ĭ +ĠDE AL +and um +ä¸Ģ åĩ» +ĠComp ensation +Ġmotiv ate +Ġdr astically +Ġcater ing +书 éĻ¢ +Ġorgan ism +al is +ĠF riendly +аÑĤ ÑĮ +R ing +æģ¯ çļĦ +Ġsw allow +ĠW ool +è§ģ è§£ +Ġfraction al +all a +( old +Ġsummar ize +ä¸į å¿į +åħ¬ åŃĻ +ous el +ï¼Į ä¿Ŀéļľ +ĠS eller +St rong +å¾Ģ äºĭ +çݯ æ¯Ķ +çŃĶåºĶ äºĨ +L if +Ġstack ed +为 她 +ï¼Į 交 +Ġcompassion ate +åĭĩ äºİ +log ic +decl are +d ating +ĠP f +ie v +Ġscal p +æĺ¯ä»Ģä¹Ī æĦıæĢĿ +cal cul +å¾Ī æĺİæĺ¾ +ĠAppe al +æī§ æĶ¿ +æŁı æŀĹ +im ed +åĩł åįĥ +ï¼Į çĤ¹ +ãĢĤ å®ĥçļĦ +æĮ Ł +Ġsal on +ĠAUTH OR +åħ¬ éĩĮçļĦ +Ġpig s +éĺ³ åİ¿ +æłij æŀĿ +ĠAct s +ĠL ore +ï¼Į 举 +ill on +çļĦ åºķ +Ġ[ _ +ë ¥ +Ġ" +Ġ} ; +(' % +em u +è·¯ è¿ĩ +ãĢĤ è¿ijå¹´æĿ¥ +Ġtow ers +ãĢĤ éĤ£äºĽ +Ġli abilities +çļĦ åºķéĥ¨ +Ġstoryt eller +ĠTrans lation +ĠIs n +æİĢ èµ· +Ġepit helial +ï¼Įæ¯ı ä¸Ģ个 +ç¿» 转 +ĠMechan ical +M OD +对 éĿ¢çļĦ +Let ter +è¯ « +è¾¾ æłĩ +Ġcareg ivers +è¯Ħ å®ļ +/ share +ï¼Į æ²¹ +ãĢģ æĪĸ +ĠH ide +ï¼Įå¦Ĥæŀľ æĺ¯ +ï¼Į äºļ +ï¼ļ åħĥ +Ġcycl ic +ĠC ay +åĺĢ åĴķ +im ilar +æīĭ ä¸ĬçļĦ +mod al += a +ĠX X +è¯ķ åį· +D og +ĠPo etry +Ġref res +Ġs orrow +ĠCost s +bo at +ĠIn cluded +ĠC rit +Def ined +è¶ ¾ +æİ§åζ åľ¨ +æīį 对 +Ġsw ear +ĠT ail +or um +Ġst ared +th ank +å®Įæķ´ æĢ§ +ĠSac ramento +ĠTh ous +Ġdiss ent +æľª æĽ¾ +ĠIndian apolis +IP P +Ġcollect s +Ġub iquit +Ġgo at +.g raph +æ°Ķ è¡Ģ +ï¼Įæĺİ æĺİ +ĠP ets +ä¸ IJ +Ġha ul +è·¯ 人 +Ġb ilateral +å² IJ +模 ç³Ĭ +amb a +> ] +çļĦ ç¨ĭ度 +ĠD od +Ġen riched +ĠEar l +_con v +D AY +uss y +çļĦä¸Ģ æł· +ï¼Į èĦļ +le ad +ĠAc id +Ġupgrad ing +åĪij æ³ķ +IDENT AL +说æĺİ çļĦæĺ¯ +GL IGENCE +Ġmagn esium +æ½ĩ æ´Ĵ +Ġbreast s +FO RE +G ames +ve ctors +_ util +åħļåijĺ å¹²éĥ¨ +ãĢģ è´¨éĩı +ï¼ī ãĢĭ +Im agine +çľĭ éĩį +Ġair plane +T our +å¼Ģ çĽĺ +ç»Ń èĪª +å¸Ĥ 级 +èĩªå·± ä¹Ł +Ġund es +ĠGriff in +Ġkn ot +ä¼ł éĹ» += m +éĵº 设 +æł¹æį® æīĢè¿° +ä¸į对 åĬ² +im eters +.n umber +Ġbom bs +_N AMES +åѦä¼ļ äºĨ +åĵį äºĨ +çļĦ ç͵ +ĠA LS +ä¸İ æĪij们 +åijĬè¯ī 她 +F an +ĠM olecular +ĠTem perature +pack et +D OWN +å¼Ģ å¿ĥçļĦ +è®® åijĺ +Ġpurs ued +ìĿ Ħ +ĠP ig +Ġd ella +w ig +Ġ' ( +çĹ ¹ +çļĦ åħ¨éĥ¨ +com a +é¢ģ åıij +ĠM int +Ġpsych iat +ç»Ī æŀģ +ĠAff ordable +ĠA CE +ĠE yes +\ def +.R ep +artifact Id +ĠK ol +Ġendors ed +ï¼Į éĺ² +Ch ars +ä¿¡æģ¯ åĴĮ +Ġa we +Ġrook ie +åĽŀ åįĩ +Ġrec urrent +p ick +è¯ ħ +/s he +ä¼ļ éĢłæĪIJ +ï¼Įä½ł åı¯ +B aby +Ġm ais +Ġac rylic +èĥ¶ åĽĬ +ĠMat ters +åħ¬ åħ¬ +ĠAw esome +åľ° ä¸ĬçļĦ +çļĦ åįķ +Col our +Ġaccomplish ments +Ġp enn +ãĢĤ åĽ¾ +As ia +ï¼Įä½Ĩæĺ¯ åį´ +äºĨè§£ æĽ´å¤ļ +Ġemb raced +** , +b ags +U CH +åĻ ľ +.get Class +ç²ī å°ĺ +Ġst akes +Ġra z +Ġз а +_s upport +Ġ åĮĹ +Ġu h +é¸ ½ +w right +_c or +Ġre name +R ew +建 å·¥ç¨ĭ +ĠA uth +å²Ľ 屿 +ed uc +åħĭ åζ +[ l +List ing +Ġfore front +Ġprotect ions +ĠSh aring +Ġ å±± +èĬ± å¼Ģ +Ġrelig ions +ink le +ä½£ åħµ +DE C +ĠS ocket +Ġfe ast +ĠS ister +Organ ization +çļĦ 羣 +ä¸Ĭ æĺ¯ +Ġz a +ĠÑģ ÑĤ +ĠP AGE +Ġdef erred +o ji +Rev ision +Ġandroid x +im ony +ãĢĤ æĹ¶ +温 室 +/d oc +ĠP ricing +è¿Ħ ä»Ĭ +è¿« ä¸įåıĬå¾ħ +æļĸ æ°Ķ +Ġpl asm +_ AV +Ġl ump +Ġdisadvant age +. iter +Ġestab lishes +å¾· å°Ķ +ĠH C +è¿ij ä¹İ +ç§ijåѦ çłĶç©¶ +ν ο +iz oph +qu id +S CR +éĺ¶ å±Ĥ +ãĢĤ è¿ĺ +ĠG az +å¥ĩæĢª çļĦ +_ch ild +ĠLu is +æģ¶ åĮĸ +åıij æĶ¹ +Ġ åį´ +Ġmodel ed +ì§ Ģ +ĠAppro aches +å°±ä¸į ç͍ +Loc ated +Ġcritic ized +ĠH ire +é£İ åij³ +æĶ¹ æĢ§ +/ log +åıĹ ä¼Ĺ +Track ing +- plus +t ion +ĠH ip +ĠBro ker +ĠPot ential +Ġw s +Saf ety +Ġ 次 +å°Ĩ 对 +c ookie +è¿Ļ åĩłä¸ª +纸 ä¸Ĭ +ä¸įå¾Ĺä¸į 说 +C os +ä¸Ń ä¹Ł +Ġendot helial +.Comp iler +Ġretriev al +éļı æīĭ +at im +() } +ĠQ R +Ġcomplex ities +ĠM ile +mer ged +ĠO mega +ĠCann abis +çα 人 +å°¼ å°Ķ +æĪª åĽ¾ +çļĦä¸Ģ 款 +_ex ception +ä¿Ŀ ä¿® +li o +el in +_f ail +Ġsour ced +F ONT +, æĮī +çŀĦ åĩĨ +æ¯ı ä¸Ģ个人 +èIJİ ç¼© +.s cale +El se +è¦ģ çľĭ +æģ ¬ +ĠV C +el o +ç£ º +人 æĥħ +f inder +Ġciv ilians +rec v +ier a +ĠStep hanie +Ġscreen ed +åĭĥ åĭĥ +IST S +eb ra +Ġpol ling +ĠFle et +ĠManufact urers +, åĪ« +åıĤ å±ķ +çļĦæĹ¶åĢĻ å°± +lt ry +G PS +Ġass ortment +çİ°åľ¨ å·²ç»ı +ç«Ļ å¼ı +Ġod or +Ġunpredict able +_trans form +Ġrust ic +å°ı 声 +åı¯ æł¹æį® +Ġmem oir +qu ired +] ï¼Į +ç»ĵ å°¾ +ĠM ong +ĠCar p +_st ates +St roke +ĠCl iff +Ġmar ital +çͰ åĽŃ +C u +Ġp ains +çļĦ è¿Ļ个 +ä½ķ å¿ħ +ĠD iss +pl ays +ĠAdd s +Ġwe ighed +ĠNE GLIGENCE +为 é¦ĸ +Ġrend erer +) }{\ +ï¼Įå°ı å¿ĥ +å®´ ä¼ļ +à¥ Ģ +St amp +Ġnav y +Ġcontract ed +Ġt bsp +ä¸Ģ æĹı +_ price +ãĢģ å®īè£ħ +çº ¶ +ç»Ļ æĤ¨ +Ġimp osing +æİĮ 声 +ĠJen kins +Ġfil tration +Ext ract +Ġr d +ĠSal ad +ï¼Į ç͵ +åľ¨ ä¸į +çİĩ çļĦ +ï¼Įè¦ģ ä¸įçĦ¶ +ĠInvest ig +Ġag gress +对 è§Ĩ +ĠOr chestra +Ġres olving +å¾ģ éĽĨ +Enter tainment +çŃĶæ¡Ī 为 +éķ¿ è¿ľ +æĵħ èĩª +ĠIndividual s +ä»ĸ ä¸į +è®° è´¦ +.S how +d agger +æĪ¿ éĩĮ +Ġlat ent +ä¸¥æł¼ æĮīçħ§ +ãĥ Ĩ +ä»· ä½į +çļ± çĿĢ +represent ed +ä¸ĩ ä½Ļ +Ġg er +-F ree +ĠH idden +Ġple asing +éĻ ĩ +èĢĮ çĶŁ +Ġp ag +éħ ī +Ġteam work +ã Ħ +çİ°åľ¨ æĺ¯ +å¸ĥ æĭī +ĠR ED +W F +ĠT ib +åģļ èµ· +Ġopp ose +åĪ¶åº¦ çļĦ +骨 æĬĺ +çļĦ æĤ£èĢħ +çķľ çī§ +Ġx xx +å°± è¶Ĭ +æŃĮ åͱ +Ġtweet ed +两 å®¶ +Ġunf amiliar +æĢ» åĨ³èµĽ +.F ont +ç´¢ æĢ§ +ĠMyst ery +Ġdis pose +稳 åĽº +p lease +ä¸ĢçĤ¹ åĦ¿ +_ import +çĻ ¸ +t om +Ġgen etics +ĠP ipe +Ġd op +æ° ĵ +ĠCheck s +注 è§Ĩ +f ake +Ġsat uration +" He +æ¶Ī æŀģ +ĠS elling +ãĢģ ä»İ +èĩª å¾ĭ +f eld +ä¸ĵ æłı +_reg ion +çļĦ å̼ +端 åįĪ +Ġeconom ically +交æĺĵ æĹ¥ +Ġcapital ism +Ġthe r +Ġcon ventions +æºĥ çĸ¡ +ç½ij çĤ¹ +Ġout ward +oot strap +_COMM AND +Ġcl iff +ä»į æľī +Ġ+ \ +Ø ´ +(g roup +ĠB oom +ĠB ecome +ĠAutom otive +åΰ ä½ł +åĽŀçŃĶ éģĵ +cos ity +éķ¿ éķ¿çļĦ +. Header +-b uilt +IGN ED +ãĢģ æĹ¥æľ¬ +mem bers +Ġ å¦Ĥä»Ĭ +å°±æĺ¯ 为äºĨ +ä½ı æīĢ +.d ocument +ĠTig ers +or ative +ĠCo pper +ra ins +å½ĵ éĢī +(f rame +Ġwh istle +Ġcont empt +Ġt ense +æ¯Ķ åĪ© +å¥Ĺ çŃĴ +ï¼Į èİ·åıĸ +,èĢĮ ä¸įæĺ¯ +Ġpenet ration +ä¸Ģ ä¸ĸ +åįł äºĨ +Ġsem inars +ç»´ æĸ¯ +åIJĪæł¼ çļĦ +_z ero +亲 æĥħ +Dis count +MA KE +ĠSh arp +ä½Ľ å±± +çļĦ ä½İ +好åĥı æĺ¯ +çļ± çº¹ +ĠSil icon +Ġreb oot +å°ij åĦ¿ +éļı 身 +Ġmed iated +æĢĴ æ°Ķ +à · +ĠO UR +éģ¥ è¿ľ +Ġ$ ("# +Ġover weight +D lg +ĠO live +ãĢĤæľ¬ 书 +Ġcup c +eg o +ĠP om +D ream +æĿ¡ 约 +åĴĮ å¼ł +æ²® 丧 +x B +Ġcontinu um +Ġplay list +讲 å¸Ī +Ġcraft ing +æĪ Į +arth y +q s +Ġadvert ised +ï¼Įå¹¶ éĿŀ +Ġdetail ing +ad al +ĠL iv +w arning +ab us +ç£ģ çĽĺ +ĠU Int +Ġenrich ment +Ġfool ish +Ġ å¤ľ +ï¼Į éķ¿æľŁ +ĠMathemat ical +çļĦ æĿĥåĪ© +ĠRef erences +IC H +ãĢģ æŀĹ +èįī æ¡Ī +è§£ å¼Ģ +Ġadvertis ers +Ġ ç¥ŀ +åį° åıij +æıIJ æ¡Ī +代çIJĨ 人 +_s ql +ĠRes ume +Ġg rac +强 åĬĽ +ĠW ife +æĢĴ åIJ¼ +Ġc ialis +Ġoccup y +_sh ared +Ġsp ans +ĠAss ignment +Develop ment +Ġcru c +ĠY ale +山西 çľģ +çĶµè·¯ æĿ¿ +Ġimper ial +, å¸Ĥåľº +Ġhe ck +æĶ¯ æŁ± +Ġcapac ities +M K +Ġr ude +æĭ¦ æĪª +Ġcar rots +. const +ç»´ äºļ +Ġrespect ful +æİ¢ éĻ© +ÃIJµ ÃIJ +ĠCor onavirus +Ġ è¿ŀ +_ container +ï¼Į è°ĥ +ĠD iana +.prot ocol +îĢ IJ +æł¼ æłħ +Cor rect +EN AME +Ġdesign ation +t ls +ch arts +ç®Ģ æĺĵ +ĠGib son +. er +Ġexam inations +大 æī¹ +Ġsy nd += int +Ġcur ated +, å·² +_th reshold +Ġ åıªè¦ģ +Mod ules +STR UCT +an mar +ï¼Įæľī äºĨ +Ġmy ocard +Ġdig estion +åı° é£İ +èµ· å§ĭ +ĠHOW EVER +crib ing +ä½ıæĪ¿ åħ¬ç§¯éĩij +ä¸Ģ åıĺ +Ġsix ty +ï¼Įè¿Ļ ä¸įæĺ¯ +ubb orn +ĠHung ary +ï¼Įè¿ĺ åĮħæĭ¬ +Ġdeliber ate +Ġw ives +Ġpur ity +n egative +Ġsk ipped +ç³»ç»Ł åĴĮ +ke es +奥 çī¹ +r uby +çļĦåľ° æŃ¥ +ĠChrist ine +ĠA ber +Ġjud gments +res olve +çļĦ çIJĨçͱ +T PL +at ography +Ġcommerc ially +çŃī å¤ļ个 +ç¾İ èģĶåĤ¨ +.S end +è§ģ 她 +éĵ¶è¡Į åį¡ +N K +Ġre ef +åħĥ æ°Ķ +Ġconsent ed +.pro perties +马åħĭæĢĿ 主ä¹ī +AN E +éĢĤåIJĪ èĩªå·±çļĦ +++ , +Ġprofessional ism +è° į +çļĦå¿ĥ æĢĿ +å¹¿æ³Ľ åºĶç͍äºİ +èĬĻ èĵī +it ating +ĠB omb +, 建议 +ä¸Ń ä¹ĭ +å¾® 软 +ĠINC IDENTAL +Ġ第 ä¹Ŀ +/ the +Ġcur se +red irect +ï¼ĮæĪĸèĢħ æĺ¯ +A WS +( ip +æĪIJ交 éĩı +Ġann ouncing +äºĨè§£ äºĨ +åĭŁéĽĨ 说æĺİ书 +h dr +ãĢĤ æĺİ +ï¼Į æĺİç¡® +第ä¸Ģ æŃ¥ +Ġjack pot +ç©¿ æ¢Ń +rim ination +Ġh urd +ark ers +ert ility +Ġdisc arded +Ġcond emn +L ee +il ic +i ott +Ġcustom ization +ĠE z +-p ost +ãĥ ĩ +high light +pres so +æľīéĻIJ çļĦ +æķ°æį® åĴĮ +ĠSupp lier +. inner +马 è·¯ +Ġprison er +ãĢģ æıIJé«ĺ +åģľ äºĨä¸ĭæĿ¥ +He at +åħļ åĴĮ +Ġimm igrant +åĽ¾ åĨĮ +ĠRec reation +ä¸Ģ个 æĸ°çļĦ +èĥ½ çľĭåΰ +èĩ³ ä¸Ĭ +As sets +ĠV ic +ĠV ern +Ġtreat y +ä¹Łä¸į éĶĻ +ens is +Ge o +trans port +说 æĿ¥ +C ash +Ġg p +Ġout per +(s ys +åľ° åİ» +s ocial +v ine +^ T +Ġ åĽ½å®¶ +Ġsm o +ĠAss ume +ĠSy ndrome +ĠC emetery +ï¼Į éħįåIJĪ +纳ç¨İ 人 +Ġciv ic +_t ool +ç² Ł +ç§ijæĬĢ å¤§åѦ +j ug +ri ke +иÑĤ е +Ġide als +TT PS +ĠUg anda +æĵ Ĥ +° F +in ement +ĠV as +De ath +ĠPop ulation +æĻ®éĢļ æĬĢæľ¯äººåijĺ +ĠCh anging +æĪij 认为 +Ġpro ton +ï¼Įè¿Ļ ä¸ĢçĤ¹ +ä¹Ŀ æľĪ +ï¼Į å¼ķ导 +ay ette +m iddle +åĽĽ éĿ¢ +ãĢĤä¸İ æŃ¤åIJĮæĹ¶ +åģľ çķĻåľ¨ +Ġm ont +am pton +å¤ļ åĬŁèĥ½ +è´´ è¿ij +容 è²Į +æŃ¦ èĢħ +éģĵ 人 +Ġexhib itions +åIJ¸ è¡Ģ +ph rase +åįĥä¸ĩ åĪ« +ĠS PD +ĠIn ventory +sl ash +ĠU FC +示 åĩº +F requency +Ġsc aff +Ġcondition er +Ġexch anged +ant ing +Ġsick ness +. % +RE AK +æİı åĩº +Indust ry +, æīĵ +åĩºå¸Ń ä¼ļè®® +çĹħ çĹĩ +æĶ¯ è¡Į +Log ging +çĨ¬ å¤ľ +ast a +Ġ çĶŁ +ch ure +ar shal +-D ay +Ġqu otation +Int el +被 æĪij +å³ ¨ +å®ī å¸Ĥ +æ·»åĬł åīĤ +æ±Ĥ åĬ© +åħį è´£ +åIJ¯ 示 +riv ia +IST R +Ġcaps ule +çŁ¥éģĵ æĪij +-n ight +ï¼Į ãĢĮ +èİ· èĥľ +Ġneck lace +æ¯Ķ çļĦ +itect ure +ĠR ams +çĦ Ļ +ĠF ailed +A uthors +Ġfav ored +_RE F +Ġh ottest +深深 åľ° +Text Field +è§īå¾Ĺ å¾Ī +ĠB D +c ia +ĠW oo +Ġmet ro +ä¿¡åı· çļĦ +ĠSimp son +: åľ¨ +D ave +ĠC AT +est e +ĠI Enumerable +æł¡ åıĭ +Ġbelong ed +åįİ å¾ĭ +um ers +ergus on +, 让æĪij +Ġ 项 +-con sum +ĠRock y +c ursor +çļĦ ä¹łæĥ¯ +æļ § +ĠAD VISED +Ġtox ins +) ', +. rest +DE LETE +ãĢĤ æ¯ı个 +å¤ ¯ +_TR ACE +n h +Con c +Dis patcher +ipl ier +ĠW L +éĩĮ åħĭ +-s upport +per ing +Ġattach ments +天 羣 +ãĤ Ħ +ĠMill enn +ï¼Įä¸Ģ åī¯ +ä¸ĢåĪĩ éĥ½ +/ user +aver n +ĠCou pon +ĠT ah +Ġpl otted +d uration +å¾® ç¬ijçĿĢ +æĴ¤ éĢĢ +redict ions +æ°Ķ å¾Ĺ +éĵ¶ å±ij +çĭ Ļ +sc ene +ĠNaz i +P repare +At Index +åĨ· åĵ¼ +ä¹ĭ äºİ +izoph ren +Ġj og +Ġc ervical +ï¼Į äºĭ +Ġadvis er +åIJĮ çŃī +Ġsecret ion +umb o +å·®ä¸į å¤ļäºĨ +ĠL unch +ä¸Ģ缴 没æľī +æĭ¥æľī çļĦ +天 èĬ± +ãĢĬ 大 +æ¯Ķè¾ĥ é«ĺ +ãĢĤ ä¸Ķ +Ġrevers al +ĠClar ke +ä¸Ģ éĹ´ +.b ottom +å°ģ éĶģ +Yes terday +Ġc sv +Type Name +åıĤ ä¿Ŀ +ä¸ĸ åŃIJ +Ġtrain ers +B all +_s uccess +æ³ ¸ +Ġen forced +ãĢĤ ä¸ĢäºĽ +æĢİ èĥ½ +ĠLog an +Ġtor rent +ç»ı纪 人 +ĠTo ast +Ġacknowled ges +æĹł åIJį +Ġcon ftest +éĩİ å¿ĥ +Ġborrow ing +åĪĩ æĸŃ +ĠAnn ie +Ġcar ved +Ġro ses +以为 æĺ¯ +S a +_in v +Ġstick ers +åĩ¸ èµ· +ä¹Ĵ ä¹ĵ +åŀ ¦ +Get ter +Ġ åIJij +Supp lementary +ĠAnn ounce +Ġb rit +Ġspec imen +è½° çĤ¸ +åįł åľ° +ï¼Ľ âĢľ +ol ith +éĻĨ åĨĽ +Ġproof s +å·¡ æŁ¥ +ĠCertain ly +æİ¨ éĶĢ +ĠTrans ition +ING LE +m anship +åı· ç§° +æŃ¦ æŀĹ +éĿĴ éĵľ +ï¼Į æıIJåĩº +Ġthr one +Ġa erial +ĠX ia +åģľ åľ¨ +Ġweigh s +ĠVert ical +伦 çIJĨ +ä¸ļ ä½Ļ +åº µ +Ġwra ps +ç§ģ åĭŁ +bi ased +Ġsur plus +gr ass +Ġ çͱ +ï¼Į åij¨åĽ´ +温 å·ŀ +Ġperp endicular +Dim ensions +Ġo mega +åĽºå®ļ å®īè£ħæľī +Ġtim eless +èĵ¬ åĭĥ +ion i +æ¹ ĥ +èĩªåĬ¨ 驾驶 +Ġ' [ +ĠE ff +Ġm igrants +Ġcomp elled +[ A +, è¿ĺè¦ģ +Ġbox ing +Ġpin ch +ell ites +ä¹Ł ä¼ļæľī +ĠIn k +éĻĮ çĶŁçļĦ +Ġpo res +® åIJĪ +ar bon +è¿· æĥij +< K +n ell +ç¥ ¯ +åī¯ éĻ¢éķ¿ +Ġbow ls +_ icon +_f d +åħ¨ å®¶ +ç»ıåħ¸ çļĦ +Ġallev iate +B er +çİ© å®¶çļĦ +稿 ä»¶ +๠ī +æľŁ æĿĥ +åĽŀ è·¯ +Ġd l +ä¹ĭ ä½ľ +ï¼Į ç®Ĺ +IR T +ĠT LS +_c ategory +psy ch +ustom ed +.m ean +Ġble w +Com posite +Ġscen ic +两 åı¥ +[] ( +éĢĴå»¶ æīĢå¾Ĺç¨İ +ĠD ocker +çħ ² +为ä¸Ģ ä½ĵçļĦ +æī« çłģ +åı¯æĮģç»Ń åıijå±ķ +ĠB uch +ãĥķ ãĤ +ĠW inn +-s ensitive +åıįæĺł äºĨ +ĠRe in +ï¼Įä¸ĩ ä¸Ģ +ĠE cho +Ġoccup ational +缸 亲 +ĠL ens +ĠWar riors +ĠC ort +åĴĮ æĸĩåĮĸ +ĠE igen +æĸ°èĥ½æºIJ 汽车 +uc ceed +åĴĮ æĶ¯æĮģ +ï¼Į åıĹåΰ +Ġde hyd +ï¼Į ç´« +Ġpart itions +转 åΰ +Ġf rench +ars ing +>> >> +ĠMar athon +Ġsett ling +Ġmean while +Ġm ould +æĹ¥æľ¬ 人 +ĠB ach +.A ll +G un +Ġf ist +Ġbreat htaking +} else +L uc +ï¼Į æĦı +Ġz oo +, 帮åĬ© +å¤ Ń +AC S +ĠHis panic +çѹ çłģ +ĠF ans +ÑĢ Ñĥ +ĠCan adians +ĠRed uce +M aker +æľ¦ èĥ§ +Ġde af +以ä¸ĭ æĺ¯ +Ġsex uality +ĠPOSS IBILITY +Ġimp ressions +ä¹ĭ æ³ķ +è¿Ļä¸Ģ 天 +Ġdynam ical +Ġa ster +Ġver te +isp here +çļĦ æĪIJåĬŁ +éĢļ ä¿Ĺ +Rect angle +Ġhe mat +ĠL il +Ġrein forced +OP S +, è¿Ļæł·çļĦ +ĠJ D +ĠTal ent +æĦŁæŁĵ èĢħ +ĠSc r +ĠRichard son +ĠCon clusion +æķij æĬ¤ +cl ub +æĭ¿ äºĨ +çļĦäºĭ çī© +çĸij ä¼¼ +çļĦ åĵģçīĮ +y ld +ĠDis closure +æī¹ 次 +åİ» åĮ»éĻ¢ +G ain +_s cript +Pos itive +-ph ase +çϽ è¡£ +ãĢģ åħī +大 å°ĨåĨĽ +ï¼Į èµ°åΰ +Ġcross es +Ġintric ate +è¾¾ å°Ķ +æľª å©ļ +çªģ åĩ» +æ»ij éĽª +ed ay +Ġn ão +å°ijæķ° æ°ijæĹı +Ġtempt ed +ĠManufact urer +çļĦ åĽłç´ł +Ġ* . +OL T +OD AY +Ġeven ings +éĢĴ ç»Ļ +ĠRodrig uez +Ġch alk +ua wei +Ġauthent icity +ĠP urch +cal endar +. Common +Ġst all +ãĢģ å¹² +ymph ony +çļĦ åIJ§ +-b ound +land er +-pro file +ric ed +ĠS ensor +Ġd ps +Ġacqu is +Ġmor ality +Ġtrust ing +ĠGu itar +ï¼Į A +æĺ¯ä¸Ģ 次 +åİ» è¿ĩ +ĠR UN +ï¼Į ç¥Ŀ +Ġha irst +Ġext inction +For Key +精彩 çļĦ +éĿ ³ +ĠR ig +çĥ¦ èºģ +Ġslow ing +Ġreluct ant +缸 è§ģ +æĹł è§Ĩ +Ġhigh s +Ġcharg er +ĠS quad +å¹´ åįİ +P ID +al an +av oid +Ġdist racted +ĠUn cle +, ä¸įä¼ļ +Ġasympt otic +c ertain +çļĦ ä¸Ńå¿ĥ +ĠÎ £ +å¾ Ļ +åģļ ä¸ĢäºĽ +ro cy +ancellation Token +.create Element +Ġcolon ies +Ġsl opes +ĠFl ags +Ġspan ning +æĪ Ł +, 缴åΰ +ãĢĤ èµµ +D ie +M ur +=- =- +f unctions +eg rees +ĠN it +ä¹Ł è·ŁçĿĢ +> č +Ġmy riad +ĠArm strong +ãĢģ ç¾İåĽ½ +å¸Ŀ åĽ½çļĦ +ĠAm ount +è° ı +ä¸ŃçļĦ ä¸Ģ个 +æ²³ æ°´ +V B +Ġchat ting +ĠChall enges +_RE SET +Up dates +è¿Ļä¹Ī å¿« +Ġdest iny +Ġn at +ĠD yn +èĤ Ľ +æł¼ æĭī +w ild +Ġfe ared +ĠSh ir +Att empt +-c ard +Ġnot ch +AM A +æĶ¾å¿ĥ åIJ§ +ÂĢ ÃIJ +ï¼Į 忽 +çģ« çĥŃ += [' +-c olumn +ag h +... , +æĢ» 线 +ask ell +ï¼Į å¹³åĿĩ +ĠR andy +å¼± åĬ¿ +åįĬ个 å°ıæĹ¶ +羣 é¢ĺ +ĠM oz +< R +j in +é¦ Ĵ +_a fter +ä½ Ł +av atar +ĠSol omon +å±± è°· +ĠM apping +çļĦæĬĢæľ¯ æĸ¹æ¡Ī +ï¼Į éĩĩåıĸ +-Co V +ant is +ĠFIX ME +_OP EN +Ġunderstand able +æİ¥ 纳 +çİ© 游æĪı +ĠTHE ORY +ip se +åľ°æĸ¹ æĶ¿åºľ +. Button +il ogy +AM D +le et +Ġlog os +Comb ine +.g raphics +表达 å¼ı +oose velt +Ġpost pon +é£İ éĩĩ +. End +Ġpl edge +( Integer +äºĮ 楼 +Ġthr illing +Ġirr igation +ĠCon figure +yy yy +ï¼Ľ å½ĵ +J ess +Ġpur se +çѹ èµĦ +_dec ode +ĠRon ald +D TO +ĠB eg +me al +ĠParad ise +ï¼Į åģĩå¦Ĥ +_ LOCK +ä¹Ł åıªæľī +æİĴ æĸ¥ +ä¹Łæĺ¯ å¾Ī +) ^{- +not ify +ĠApart ments +æİ¨ æµĭ +缸 çŃī +åİ» çľĭçľĭ +W allet +ĠG FP +æĮĩ çĤ¹ +_sign al +ess ential +.comm and +Go ing +ĠSTR ICT +Ġreb ell +ï¼Į她 å°± +è¿ĻäºĽ ä¸ľè¥¿ +Ġfet al +Ġd B +ĠP upp +çıł åŃIJ +Ġlon ge +r hs +çļĦ åĬŁæķĪ +Ġsql ite +tain s +_{ ( +- if +ä¸įåĨį æĺ¯ +Ġrain bow +æĶ¹ èī¯ +åĩº æĸĻ +ĠL ORD +骷 é«ħ +- an +ord ering +Back up +éĹ® çŃĶ +Ġinc arcer +é²ľ èī³ +Ġfrag rance +ãĢĤ æİ¥çĿĢ +ĠK ur +æļĹ èĩª +Ġgr ind +çĹĽ å¿« +Ġ${ {\ +Ġfemin ine +ĠD ear +Ġpur ified +an za +åĨħ æľī +å¼Ģ ä¼ļ +çĬ¶ çļĦ +Ġ Š+çĿ¡ çĿĢäºĨ +ï¼Į âĢĺ +ĠL ucky +Ġh d +ĠW C +车 éŨ +Ġs log +ä½Ľ ç½Ĺ +-h ow +- US +Ġanticip ation +Ġa is +Ġk ings +C ALL +Ġjud ging +Care er +ĠKle in +au f +erm on +ä¿Ŀ 湿 +è¿Ļ æĸ¹éĿ¢ +å¿ĥ æĦı +çķĻ ä¸ĭæĿ¥ +Ġco ats +Ġg ems +西 æ¹ĸ +ĠÑ į +q i +Ġ[ # +ĠP NG +ĠDet ection +Ġre ps +çϽçĻľé£İ æĤ£èĢħ +Ġle tt +çļĦ çİĭ +M otor +éĽĨ è£ħç®± +en berg +Ġtheir s +å¼± çĤ¹ +{c ases +ï¼Įçľĭ ä¸Ĭåİ» +Ġ& # +ï¼Įæľī ä»Ģä¹Ī +Ġt icks +ä¸ĭ åįĬå¹´ +çļĦç¾İ 好 +_ alpha +ãĥ ¡ +Ġdrag ged +Ġy ummy +Ġover looking +ĠUs es +ï¼Į è§£åĨ³ +带 ä½ł +ãĢĤ éħĴåºĹ +-de velop +Ġlat itude +ç¿ Ł +ct r +ãĢij ï¼ļ +ad just +Ġcalcul us +Ġf uzzy +^^ ^^ +g ue +Ġ 大家 +羸 åŃIJ +Right arrow +ĠStev ens +ĠM ega +ĠGOOD S +g old +è·Ł ä¸Ĭ +ï¼įï¼į ï¼įï¼į +Ġdis gu +-g rowing +ĠClick Funnels +åIJį çīĩ +Cl os +ç½ij绾 ä¸Ĭ +ĠDev il +ол ÑĮ +å°ı 麦 +åħļ ä¸Ń央 +Ġb ending +Ġsuper hero +ï¼Įå®ĥ æĺ¯ +ï¼ĮæĪij们 è¦ģ +ĠY ard +ĠOw en +Ġ åħĪ +ĠCele b +.f acebook +ĠGen eva +Redd it +æľī å¿ħè¦ģ +çŁ³ å®¶åºĦ +;;;; ;;;; +Ġ èĥ½ +è° ¬ +Ġseam lessly +") ). +pre ter +ĠOP EN +çļĦ åIJĮåѦ +Ġsepar ating +std lib +ĠSw an +Ġrest oring +(m odule +ãģĦ ãĤĭ +H or +Ġ è°¢ +宽 容 +åħ»èĢģ éĩij +Ġoff spring +Ad apt +èĬ ĭ +å« ¡ +ï¼Į è®°èĢħ +Ġs perm +ï¼Į èĤī +åľ¨ åŃ¦æł¡ +å¿ĥ 缮 +_s can +Ġst aple +_M AC +Ġunc overed +Ins urance +Id eas +ĠMon ica +èµĦæľ¬ 主ä¹ī +Ġfire arm +ĠPe pper +st ed +æľįåĬ¡ åijĺ +[ y +im icro +éĩį 大çļĦ +Inst ances +ĠFil ms +Ġpl aque +æ°´ ç͵ +Dist ribution +èĢĹ è´¹ +Ø ¬ +对 çŃĸ +ind ices +(l ambda +y on +Ġint e +æĺ¯ä¸Ģ æĿ¡ +æ¸ħåįİ å¤§åѦ +Dec ode +ĠMy ers +Ġind ie +Ġsne ak +.de ep +çŁ¿ ä¸ļ +轨éģĵ 交éĢļ +Ġint racellular +æł © +ãĢĤ å®ŀéĻħä¸Ĭ +Ġthreat en +ãĢģ åIJ´ +è¿Ļ ä»¶ +.res ize +Ġup stairs +ï¼Į æIJŀ +ĠR OS +Ġcapt iv +æĬļ åħ» +为 æľ¬å®ŀç͍æĸ°åŀĭ +M ah +( idx +ä¸Ĭ åįĥ +empor al +Ġrent ed +çłĶ 磨 +( Value +Ġref lections +ĠHit ler +ĠSk ype +_d elay +Ġadhere nce +éϤ åİ» +ç»ıèIJ¥ 管çIJĨ +Ġb amboo +ĠPart ies +_ " +å² ij +( at +ĠBit map +交 æĽ¿ +ĠS ense +\ @ +ĠML B +åĦ¿ åŃIJçļĦ +ĠR IGHT +Ġcircul ating +ĠB atch +åijµ æĬ¤ +) (( +ie ves +顺 çķħ +æĦļ èł¢ +Ġs igh +ig nty +ect in +(" { +Ġreimburse ment +Ġre actor +æĬ½ åĩº +çļĨ æĺ¯ +pl er +ï¼Įä»ĸ å·²ç»ı +Ġshoot er +èĸĦ çļĦ +Ġg odd +Ġpit cher +Ġm ug +ĠAlex a +Ġtrans verse +Ġtrust ee +ãĢĤ æīĢè°ĵ +åħŃ æľĪ +_ ; +大 å¸Ŀ +ĠM unicipal +Ġconflic ting +Ġ ç͍æĪ· +éľ ¾ +çķª è¯Ŀ +ĠE clipse +A st +Ġfin ely +Ġche at +èĪħ èĪħ +( col +åħ·ä½ĵ æĥħåĨµ +ĠO EM +Ġ= ~ +par allel +ãĢĤå¦Ĥæŀľ æĤ¨ +çªģ åĩºçļĦ +å°Ķ å¤ļ +äºĨ ä¸ĬæĿ¥ +ip y +_DE CL +Feed back +æīĢ åѦ +ä¸ī çĤ¹ +åݨ å¸Ī +rog ate +æľĢ å¿« +ãĢģ çĶŁçī© +ä¼ł åħ¥ +F oo +Ġy ang +ĠPast or +æľĪä¸Ń æĹ¬ +, ä¸įçŁ¥éģĵ +ag greg +Ġres orts +ig ated +( as +转 åŃIJ +ç¥ŀ åύ +ure n +ï¼Į æķĪæŀľ +ï¼Į ä¸ĵä¸ļ +ĠT uple +Ġconfront ed +Ġplay back +ĠINTER RUP +Ġdecl arations +_g roups +Prof essor +å¾ Ĭ +å½±åĵį äºĨ +åĽł æŀľ +Ġover coming +à ī +Ġcatalog ue +ï¼ Ĺ +åı © +Ġremark ably +ä¸į çIJĨ +ç»į åħ´ +轻轻 åľ° +åѦ æĬ¥ +ĠC ensus +è¿ĺæĺ¯ ä¼ļ +Ġpop up +Ġprecaut ions +Ġw ifi +æĬ¬ æīĭ +ut z +çĴ Ģ +æ¯į亲 çļĦ +ĠSustain ability +in h ++ p +éĤ£ æĿ¡ +@ b +Ġsp ider +Ġnucle i +大 大çļĦ +Ġgall ons +ĠPack et +ç¥ŀ éĢļ +ass es +Ġinflu enza +ant on +ig its +ä»ĸ å·²ç»ı +ĠL ite +, é¦ĸåħĪ +se gment +ole cules +Play ing +)) * +ĠCor inth +(cl s +\new command +çݰ 身 +aw i +ĠBet ty +éĹª éĹª +çļĦ ä¸įæĺ¯ +ĠLad ies +_cl k +æĿ¡ å½¢ +âĢĶâĢĶ âĢľ +ĠNew castle +Ġ[ : +Ġneighb ours +ï¼ĮæĪij们 å°± +Ġо б +天 çİĭ +App s +ĠB uzz +.m eta +å®ŀéĻħ è¡ĮåĬ¨ +ĠBet ting +, æİ¨åĬ¨ +æīĢ ç͍ +é¢Ħ åijĬ +Ġobject ions +å®ļ å¾ĭ +par agraph +æĥĬ åı¹ +ä½įç½® çļĦ +ç©¿ éĢı +Ġmult imedia +es an +ï¼Ł åĽłä¸º +两 çĤ¹ +æīĭ åĬ¿ +, V +Ġst alk +lu id +ĠPr ison +Ġ" ," +ĠEn forcement +ãĢĤ æĢ» +ĠM OV +Ġcont aminated +abb age +骨 æŀ¶ +/ file +ï¼Į æŀľ +èµ ¦ +æIJ¬ è¿ģ +ï¼Į ä¹IJ +Ġanalog ous +ãĢĤ åħ¶ä»ĸ +æ¸ħ æ¾Ī +çļĦä¸Ģ æĿ¡ +qu art +çī¹ æľĹæĻ® +_H OME +_ admin +Ġy y +il ers +Ġsuper f +æķ¬ ä¸ļ +æŀ¸ æĿŀ +é¦ĸ 饰 +og ical +h ub +inst on +Ġp k +ĠIns ider +Ġpermit ting +. Input +æĭĽ çīĮ +æ´Ľ åħĭ +j ournal +æĢ¥ çĿĢ +emb ed +[M OBILEPHONE +Ġfeas ibility +被 她 +ä¸Ń åIJ«æľī +Ġam mon +Ġun le +åıĮ éĩį +ĠM igration +Ġc itations +æķ°æį® éĽĨ +) init +ĠP df +Ġportray ed +Ġpost al +Ġb othered +.con current +Ġs sl +ĠPro xy +åĮħåIJ« äºĨ +Ġco arse +ï¼Į æĬĹ +è´µ 人 +åľ° éĹ®éģĵ +h c +B SD +ä¸į èµ·æĿ¥ +åħ« åŃĹ +è§£ æ¯Ĵ +( äºĮ +ton es +çŁ³ çļĦ +TR Y +format ics +Ġtherap ists +ï¼Į大 大 +éĢļ 车 +mult ic +çļĦ è¯Ńè¨Ģ +Ġcon ception +Ġpro d +Ġuser Id +Ġmot ive +st ones +æĪIJ ä¸Ģ个 +Ġmit igation +Ġhour ly +sm art +çļĦ女 çĶŁ +R yan +sv g +- Time +z ier +ric ting +ĠS ME +ãĢģ æĪij +Ġav id +B eyond +举åĬŀ çļĦ +åįİå¾ĭ ç½ij +å±± æ²³ +" There +æĹ¶ ä¼ļ +ä¹Łæ²¡ ä»Ģä¹Ī +_ rel +ĠPrint ing +оР· +ĠGe ometry +âħ ¡ +< >( +/b ash +ĠAdvent ures +ï¼Į èĦ¸ +ĠE TH +åĨľ åİĨ +第ä¸ī 次 +Ġsurf ing +鼷 诺 +èĿ ł +ĠD X +.Is NullOr +Ġnegot iating +ĠT AG +ä¸įç͍ æĭħå¿ĥ +Ġprodu ctions +Ġnanop articles +Ġsh ark +recogn ized +ĠAb ility +åīĤ éĩı +伺 åĢĻ +çī© ä»· +Ġsubst ituted +åīį ä¸ī +客 车 +å°½ äºĨ +Ġnav igating +ĠS ER +> This +æ´ ± +å®Įåħ¨ æĺ¯ +ãĢģ æīĵ +éĺµ èIJ¥ +Ġsqu ash +ĠQ B +a ñ +J ason +ĠF rost +ä¿¡æģ¯ åħ¬å¼Ģ +N eg +Ġchar ities +ä¸ĢçĤ¹ éĥ½ä¸į +Ġthr ill +in j +æ¸ħ æľĿ +< V +.T ry +Ġprov inces +_block s +å¾® æ³¢ +èĥĮ åĮħ +åħ« æľĪ +off ice +ï¼Įå¾Ī 容æĺĵ +Ġcontract ual +ĠPet erson +Ġcr ashed +int estinal +an ing +Ġn ano +Ġal ly +大 å±Ģ +èĤ¾ èĦı +\ lambda +ias is +çIJĨå·¥ 大åѦ +ge om +able View +çĬ¶æĢģ çļĦ +" ' +m esh +æ²IJ æµ´ +Ġminim izing +{ sub +Ġt art +red is +âĢĶ but +Ġinterpret ations +Ġele phant +Ġ æ¢ģ +ä½ł è¿Ļ个 +_FE ATURE +ĠV iv +Ġobs ession +ç¨İ çİĩ +åĩº 让 +æ¡ Ķ +ĠK nown +Ġspread sheet +ä¸Ĭ 设æľī +Ġh id +, åĪĺ +ĠD rawing +ĠN ak +ï¼Į为 ä½ķ +Ġd ia +Ġvolunt arily +( Http +Ġpuzz les +Ġp ushes +ç£ģ åľº +æĥ ļ +ĠRoman ia +è¯ī æ±Ĥ +P ull +樱 èĬ± +Ġun subscribe +å¾® éĩı +Ġtra umatic +%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%% +[] ) +spNet Core +Ġessential s +ä¸įèĥ½ å¤Ł +Ġ[] * +ĠWy oming +pol icy +误 åĮº +QUEST ION +è¾ĥ 强 +Ġro ast +ĠL ounge +Ġpre requ +Ġ ãĢĭ +Part ner +U AGE +n uts +Ġed itions +Ġm ater +ĠÐ Ĵ +BO OK +Ġ 表 +OLUM N +ç͍ èĩªå·±çļĦ +un o +审 è§Ĩ +F inder +ĠPar ish +Ġrep etitive +ãĢģ æķĻåѦ +ĠT ier +ĠE arn +æ¼ ¾ +n k +ä¸Ĭ ä¸Ģ个 +æĶ¹ æŃ£ +ä¹Łæĺ¯ æľī +Ġresear ched +, 羣 +è±Ĩ çĵ£ +ĠOffic ers +ï¼Įä½ł çľĭ +æľ¬ åij¨ +åīį è¿° +ith uan +_P P +Ġbefore hand +åĩĨç¡® æĢ§ +å¸ĥ å°Ķ +Status Code +车 è½½ +Ġdist ortion +F a +l ide +å¼Ģ éĩĩ +" That +ä¸ĭ 鼨 +Ġphosph ate +Ġin ode +å·§ åIJĪ +åİŁ æľīçļĦ +Ġpept ides +ĠC ats +ĠHold ings +ĠB inding +åįķ纯 çļĦ +Re act +chan ical +Ġcleans ing +Ġad ore +å®ī 举 +èµŀ åĬ© +æħİ éĩį +ĠCat hedral +ĠCl osed +Ġredund ant +Annot ations +ãĢģ å·¥ä¸ļ +ï¼Į ç»§ +æ·± åİļ +Ġem itted +Ġdeleg ates +ï¼Į å®īåħ¨ +_RE C +Ġ è¿ĺ +IL D +丽 ä¸Ŀ +IN A +Ġspread s +åħħè¶³ çļĦ +ĠM anu +B a +,ä¸Ģ æĹ¦ +æĮij è¡ħ +Ġ çĶ· +ĠGil bert +Ġhur ting +_S PI +ãĢģ åĴĮ +_ST REAM +ĠGrow ing +çķĻ ç»Ļ +_ + +ç¥ŀç§ĺ çļĦ +Ġcon ceived +ĠE PS +Ġcomm enced +è·Ł æĪij说 +ĠRe peat +ĠL LP +Ġav g +mont on +ĠF iber +ĠK otlin +s pe +ĠT it +ĠT error +}$ - +Ġst ole +_ex ec +_{ - +ĠR aid +çļĦ æĹ¶åĪ» +åħ¨ åľº +éĺ¿éĩĮ å·´å·´ +Ġinter change +print s +rack et +ric an +Ġrelie ved +_D ESC +æŃ¥ åħµ +L ists +éĹª åħī +äºĶ åħŃ +AM S +Ġ å¹³ +Ġsup rem +èĢĮ åIJİ +å®Į åIJİ +ĠG ambling +åıij èĩª +-s pec +Bit coin +G UID +è¿ĺä¸į å¤Ł +_ images +Ġfac et +_N EW +O X +arr ants +æĮĩ æľĽ +ãĢĤ æ¯ı天 +ï¼Į èĢIJ +ung al +Ġa pr +ĠC rist +人 说 +Ġmig rate +Ġestim ator +éĺ» å¡ŀ +{ enumerate +The orem +du ino +èIJ¥ åľ° +E Q +Ġpl ural +缴 è§ī +ĠYour self +Ġsupplement al +ĠE lev +ĠIter ator +ĠSynt ax +ĠW arm +çļĦ æķ´ä½ĵ +Ġserial ize +Ġun acceptable +ï¼Į ä»Ķç»Ĩ +Ġbenef iciaries +TA IN +ĠL ists +-sh irts +__ . +å¤ĸ æĿ¥ +vis ibility +èį¡ èį¡ +_D OWN +_ attributes +çļĦ éĿ¢åīį +Att rib +饮 åĵģ +ĠReal ty +Ġre arr +Ġbound ing +rown ed +ãĢĤãĢĤ ãĢĤãĢĤ +åľ¨ åħ¶ä¸Ń +ĠS ally +ï¼ī ä¸İ +ç»Ĩ å¾® +ĠWorld wide +Ġâī Ī +.net work +çļĦæĹ¶éĹ´ åĨħ +Ġboost ing +Ġt ed +å¾Ī ä½İ +çļĦäºĭ ä¸ļ +ÏĦ ικ +ĠHash Set +< void +p icture +ï¼Į çİī +ĠM G +Ignore Case +ï¼Į è°ĥæķ´ +ern els +大 èĩªçĦ¶ +Ġpr ince +å¥Ĺ é¤IJ +ï¼Į åĪĿ +åĢĻéĢī 人 +. Items +ug ht +Ġref ine +Ġx mm +宽 æķŀ +ï¼Įå¦Ĥæŀľ ä¸įæĺ¯ +ĠPatri ots +éļ¾ çľĭ +æĿĨ èıĮ +ĠCase y +ï¼Įè¿Ļ æĺ¯ä¸Ģ个 +rom pt +è§ī éĨĴ +Ġisol ates +æ°´ æĢ§ +Ġprogn osis +éĥ½ èĥ½å¤Ł +ĠCom ics +Ġh ij +ä¿Ŀ æļĸ +主 å¸ħ +çļĦ æĦıè§ģ +Ġin ning +struct ured +ä½ľç͍ ä¸ĭ +CO RE +.g enerate +èĥľ è´Ł +Ġpe as +ĠAn s +ĠH F +å±± åİ¿ +Ġprot esters +èĩªçĦ¶ ç§ijåѦ +åĩº ä¹İ +ĠAgric ultural +ĠOrgan izations +List View +ela ide +-------- -- +ĠM ON +Ġab usive +ĠDef ence +ĠN okia +ĠLand scape +Ġcent rifug +оР¿ +Ġpotential s +group Id +ĠDim ensions +Des ktop +æ¯Ķ 为 +çĥΠ士 +_ events +ï¼Į以 èĩ³äºİ +.comm ons +prising ly +ĠH ern +Red irect +_s ym +. -- +æĪĸ æľī +ä¸Ģ åľĪ +çα çļĦ人 +æ¡Ī åŃIJ +ï¼Į æķ° +rodu cing +Ġge ography +ON FIG +èĢĮ èĩ³ +ress or +è¯ħ åĴĴ +( Test +X iv +éĢĥ èĦ± +'=> ' +oci ate +erm any +ï¼Į 代表 +Ġtest ament +çľĭ ä½ľ +,å°± åĥı +d irection +Ġliqu or +çª Ĵ +æģ º +ĠDe uts +æĪij åıĪ +Ġre el +ĠHigh lights +æĹ¶ ä¸į +ç»Ħç»ĩ å®ŀæĸ½ +.m p +Ġnon atomic +Ġawait ing +è¿IJ éĢģ +dis p +ï¼Įè¿ĺ 没 +è´¨éĩı åĴĮ +Ġspecial izing +× Ķ +ï¼Į éĥŃ +ĠG oth +~~ ~ +审议 éĢļè¿ĩ +Ġdep ot +梦 å¢ĥ +é«ĺçŃī æķĻèĤ² +Ġar rog +Ġsm oked +Ġf erm +Ġwar fare +N ON +使 åĬ² +Ġthe ology +çģ« èĬ± +ï¼ĮéĤ£ éĩĮ +Ġj ohn +Pl ain +ĠAs ync +Ġcow ork +ä¼ł æİĪ +æĮī è§Ħå®ļ +cknow led +åIJį æł¡ +ç£ģ æĢ§ +A mb +æĭĴç»Ŀ äºĨ +ĠF actors +对 æīĢè¿° +/ article +e on +ï¼Į è¿ŀç»Ń +(l ength +Ġharvest ed +红 éħĴ +Ġentrepreneur ship +Ġin equalities +é» Ķ +ĠPlay Station +Ġred is +cy l +OP EN +ĠD erek +ĠM AG +ا ر +对 è¿ĻäºĽ +ĠRe ceive +åįĥ éĩij +Ġnot ions +Ġtact ical +ĠS igma +apital ize +å«ģ ç»Ļ +Ġf ru +ï¼Įå¹¶ä¸Ķ åľ¨ +Ġexp ires +if o +m ul +agn osis +ect s +Ġ'/ ' +Fl orida +æ¯ Ĺ +ĠMar sh +æ´ ¼ +ç͵ è¿ŀæİ¥ +_b ool +(m ock +ĠHigh ly +ãĢģ æ°Ķ +为 ä¸ŃåĽ½ +. ok +" If +t ops +ï¼Į ç»ıæµİ +è¯Ń ä¹ī +çļĦ åĮºåĪ« +C oun +Ġ 太 +Ġgraph ical +Ġpedest rian +Ġelect oral +our ced +, name +en ames +ï¼Į ä»ħä»ħ +ãĢģ çݯä¿Ŀ +ï¼Į æ°¸ +éķĩ åİĭ +atur ated +åĨ³å®ļ çļĦ +ĠTik Tok +r g +Ġf oul +ĠO ral +Ġgrad ual +Ġlonge vity +é¢ł è¦Ĩ +Ġallerg y +Ġfin als +_T ARGET +Ġpione er +ï¼Į æĦŁåıĹ +/ man +organ ic +ĠHind i +Ġlandsc aping +F ederal +è® ª +Manag ed +Ġtun es +. ip +ãĢģ åѦ +Ġsh ampoo +Ġd ude +ĠFIF A +ĠL ac +Ġneuro logical +, æĤ¨ +å½ĵ åį³ +åΰ èĩªå·± +ĠAff ero +L os +Ġ åįģ +çļĦ 计åĪĴ +Fe el +ĠA ctions +H ist +ĠT roy +ĠN PC +Ant i +, åħ± +Inf os +.f unction +.s qrt +ĠMe at +gorith ms +r w +Ġcontract ing +Ġcha otic +ä½ł åĴĮ +ĠMart inez +Ġre charge +åĪĩ éϤ +aps es +è§ĦèĮĥ åĮĸ +-b inding +äºĮåįģ 大 +ic us +Ġl ubric +, çľĭåΰ +çŃī åĬŁèĥ½ +li ography +ä¼ļ å°Ĩ +ĠØ ª +he ries +驱 éĢIJ +Ġ åIJĪ +un ter +以ä¸ĭ åĩłä¸ª +å© ª +-se x +:n one +var iables +ida e +åľ¨ ä¸Ĭæµ· +æ¯ı ä½į +Ġ ç¾İ +ĠM oss +è¿Ļ éģĵ +缸 ä¼ł +edd y +ĠEther net +. ** +Ġconstruct s +ĠM W +_p rivate +ÂĢ Â +. Now +M ind +红 åĪ© +Ġlayout s +Ġgrap es +Ġfire arms +Ġdem ons +OT S +_ aut +Ġj ets +Rep o +\x f +ĠE F +ĠUn icode +转 è¿ĩ头 +ä¼Ĺ å¤ļçļĦ +C oin +IT AL +ram eworks +Ġber ries +ï¼Į åħ¨çIJĥ +oa uth +.fl ags +CONT ACT +ĠAr row +ĠR ear +çĴ § +é d +Ġer ad +设å¤ĩ åĴĮ +m ov +Ġm k +åįķ ä½ĵ +åĤ» åŃIJ +_AR GS +æĶ¾ åģĩ +for um +Ġun const +Ġk on +ĠW als +ip ro +Imp act +Ġgall on +anz ania +çľ¼ çľĭ +/d at +äºĮ åŃĹ +æĶ¾ è¿Ľ +ĠSupp lies +å¤ĸ è¯Ń +Ġaut onomy +çĥŁ åı° +å¾Ī好 åľ° +龸 æ°Ķ +× ¨ +Ġun cont +ĠE ating +ãĢģ ç§ijæĬĢ +Ġaw a +æĹ¢ æĺ¯ +âĪ Ī +ï¼Į åı¦ä¸Ģæĸ¹éĿ¢ +oo o +çĤ ³ +ĠMat lab +çļĦ åĴĮ +Ġsom eday +/ include +å«Ĥ åŃIJ +_ pp +åĽ¾ 表 +Ġhack ers +Ġj avascript +oca ust +Ġag on +çļĦ æĪIJ绩 +èł ķ +Ġambig uous +åĽ½éĻħ åĮĸ +çĥŃ åº¦ +ï¼Įä»ĸ è¿ĺ +Ġ[ $ +.w orld +伯 çε +Ġh ull +ë ¦ +æ® ´ +app s +å¾Ĺ ä¸Ģ +éĢĤ æĹ¶ +ãĤ ° +éģĹ å¿ĺ +ä¸į ä¸Ģ +.t ask +RE N +Ġd ug +æŁ ļ +Ġcut off +EX PORT +ĠHand ler +âĢľ When +好 æľĭåıĭ +ract ical +ĠL opez +Ġdi ary +é¢ ĵ +Ġview point +( tr +ä¸Ń èĢĥ +ÑĢ Ð° +æľĢ ç¾İ +èĢģ äºĨ +ig t +çķħ éĶĢ +ĠM F +æĹł å¤Ħ +Ġanim ations +Ġth irst +ä¹ł é¢ĺ +è¶ĬæĿ¥è¶Ĭ 大 +å¤ļ 为 +ä¹Łä¸į 好 +,æĪij æĥ³ +Ġhe p +^ âĪĴ +ĠHe ath +Ġinherent ly +ĠH ilton +ç»Ĩ åĪĻ +ĠEVER Y +åĺ´ ä¸Ĭ +O ID +éĤ ¸ +ĠCh in +ãĢģ è¶ħ +olph ins +ä¹± äºĨ +çļĦ èĥ½éĩı +åģ· è¢Ń +ĠR ica +Ġsu do +_ rem +Ġdeliver ies +F air +dat etime +lin ewidth +ov id +Ġconst expr +df s +ur ous +æĹ¶ å°± +ç¾ ² +éĺ¿ æł¹å»· +str uments +Ġpass ions +ĠH ass +_st orage +带 äºĨ +å¸ħ æ°Ķ +ve e +ç¼ ª +Ġmaj ors +ĠT W +ĠH emp +Ġquestion able +åIJ¸ åıĸ +èµŀ èµı +Ġpo pped +f iction +çϽ èıľ +ãĤ « +Ġfl ames +, å¼Ģå§ĭ +ï¼ĮæĪij们 åľ¨ +æ²»çĸĹ æĸ¹æ³ķ +Ġin vert +é© ¯ +Gener ation +Ġge o +大 佬 +Ä Ĺ +ĠM add +åı« ä»ĸ +Ġapp arel +éģ® æĮ¡ +Ġdev ote +æĺ¯ çͱäºİ +_U INT +æĤ ¸ +S ky +( ', +, äºİ +ĠPear son +ĠTh u +M W +_st atic +_EX PORT +ĠC UR +Ġc uc +ĠQu inn +_FAIL URE +åĪ ģ +se lection +Ġ ÑĢаР+be en +C raft +w k +åĩºåħ· çļĦ +çļĦä¸Ģ å¹ķ +reg ulation +æĮ¤ åĩº +æĹ¥ åζ +ĠL EG +Ġins pections +Ġgreet ed +H u +æīį 好 +æ·± è¿ľ +pro vider +ç§° ä½ľ +d an +使ç͍ èĢħ +å¤ĸ èµĦ +d istance +.t ab +éĢī ä¸Ń +æ°´ åĴĮ +Ġsu e +Ġpop s +[ int +çļĦ æķĻåѦ +vas ive +çĺ Ģ +к а +. msg +Ġis omorphism +åĶ ij +_b us +æĹ¥ è¯Ń +_n etwork +ĠBel ieve +AL T +Ġtake aways +( local +ï¼Įä½Ĩ è¿ĺæĺ¯ +ĠSal mon +ĠPe er +ors che +ä»»åĬ¡ çļĦ +ç͵è§Ĩ æľº +forget table +Ġe ats +Ġb og +æį ħ +Ġthr iller +å¾ ĺ +éħ¸ æĢ§ +Ġs addle +ĠLead ing +. play +èµ¶ å¿Ļ +Ġqu il +ĠFor bes +-b orn +- place +Ġa y +缴 å±ŀ +ç͵åĬ¨ æľº +. State +< P +é£ŀ éĢŁ +ote ch +> ` +Ġev apor +ï¼Į å®ŀéĻħ +Ġl ush +åĮ ¡ +Ġcomp rehension +å¤ľ çļĦ +æĮº 好çļĦ +ĠDi agram +ï¼Į çŁ¥ +ï¼Į çĪ¶äº² +_l ang +ç½ķ è§ģ +çĤ¹ ç¼Ģ +Ġobsc ure +f v +F ly +æĦŁè§ī èĩªå·± +èĭ¦ æģ¼ +ĠMcC ain +- " +ĠM n +Ġre els +enc ers +Ġorgan izers +纯 åĩĢ +æ¯Ķ ä»ĸ +Ġlog istic +åı ½ +ĠB one +ãĢĤ èĭı +åľ° ä»İ +åħħ æĸ¥ +款 çļĦ +Ġer ase +Ġsuscept ibility +Ġhous ed +éħ¸ çĽIJ +Ġsc anned +Q C +ig ible +T EXT +èĥ½ æĬĬ +æľ¬ åŁºéĩij +Ġ] ]; +Ġpump ing +ĠM eyer +ï¼Į ä¿® +use ment +天津 å¸Ĥ +_P os +Ġ éĥŃ +ĠAg ents +群 å²Ľ +åĩı æĮģ +ĠCar roll +ĠG Hz +\text width +èĮĥåĽ´ åĨħçļĦ +_P AGE +Ġperform er +ĠWe apon +æĪĸ å¤ļ +code c +è´¨éĩı çļĦ +ï¼ļ https +re ements +<< " +ен ÑĤ +ä¸Ń è¿Ľè¡Į +龸 éģĵ +çĽĸ æĿ¿ +ĠSec rets +FFFFFFFF FFFFFFFF +è§Ħ模 çļĦ +å± ī +Ġul tr +Ġdom inance +æĬķ ä¿Ŀ +Sim ilarly +ĠUn limited +å¾Ĵ åĪij +-m aterial +ĠInst ructions +if a +åĽĽ ç§į +ĠSh annon +Ġpreced ent +ĠR us +acter ial +Ġdistribut or +ĠW rong +Data Set +Ġbul lying +åIJĦ å¼ı +Ġdemonstr ations +ëĭ Ī +ECT OR +isb ane +ï¼Į æĶ¾åľ¨ +Ġ èī¾ +Ġobserv able +ä¸Ĭ ä¹Ł +ç§ģ ä¸ĭ +å¿ĥ è·³ +R oad +çİ ĸ +.F at +gr pc +çļĦäºĭ åĦ¿ +Ġsurvey ed +Ġche ating +( position +ĠF unc +ov ich +çļĦ æŃ» +çļĦ æĦŁæĥħ +ĠG aza +Ġun icode +Error Code +ç§ij éķ¿ +ï¼Į ä¹ħ +B irth +ãĢģ äºij +ä¸Ģ å¹´çļĦ +tain ed +IT T +Ġp addle +åħ¬ 认 +åĩł åįģå¹´ +erent ial +å¸Ĥåľº éľĢæ±Ĥ +æĢĢ æĬ± +Ġconver ges +Ġexpend itures +ä¸į 离 +K ids +æŃ£æĺ¯ åĽłä¸º +Ġcont ests +Ġimp ulse +Ġbout ique +r ules +/ to +ï¼İ ï¼İ +大 å§IJ +M ission +.R ec +ï¼Į åı³ +æ°´ æµģ +éĵħ ç¬Ķ +_L INK +/ get +ç¿» 身 +æĹ¥æľ¬ çļĦ +éĢģ ä¸Ĭ +ç¥ĸ æ¯į +æĹ¶ æľī +éĽĨ ç»ĵ +åIJĪä½ľ åħ³ç³» +çıŃ éķ¿ +ç¥ĸ çζ +(arg v +åͱ çīĩ +ĠTh reat +@ p +ĠBeng al +ä¸Ģ çŀ¬éĹ´ +_B ITS +çľ¼ çIJĥ +ĠB od +H old +ĠCA USED +D irections +Ġcom fy +ï¼ĮæĽ´ ä½ķåĨµ +认è¯Ĩ çļĦ +ĠAD HD +_ amount +os in +ï¼Į 追 +Ġ æ´Ľ +an ical +ãĢĤ å°±æĺ¯ +è¾ Ļ +æĹłå¥Ī çļĦ +D ll +çļ± äºĨ +Ġwel ding +Ġb idding +ภ± +.T asks +asc ade +Ġd inos +Ġpa used +Ġcont amin +èϽçĦ¶ æĺ¯ +ĠY orkshire +Ġevent ual +Ġacquis itions +ĠPer cent +ib a +/ qu +åīĸ æŀIJ +èµ° è¿ĩåİ» +åİ¿ éķ¿ +主人 åħ¬ +_d own +ä¼ł 羣 +ä¸Ĭ åij¨ +ãĢĭ æĿĤå¿Ĺ +ment e +ĠExhib ition +: a +åī§ åľº +èģĺ ç͍ +ï¼Į åĪĽéĢł +Ġtra ps +Ġtrend y +B AR +ä½ĵ åŀĭ +ãĢģ æĶ¿åºľ +_st eps +Ġb ak +ENS ION +âĢľ 两 +ãĢģ åıijå±ķ +R on +Ġsafegu ard +ï¼Į 两个人 +ãĢģ æľºæ¢° +Ġext ras +éļIJ å½¢ +声 ç§° +纪 æ£Ģ +em phasis +ç͵ ç«ŀ +ĠConnect ed +p repare +Ġpath ogens +åºĶç͍ çļĦ +Ġscal able +Execut ive +å¿į èĢħ +å¤į åı¤ +Ġpredict s +ãĢĭ éĩĮ +åľ¨è¿Ļ åĦ¿ +Ġscreens hot +C nt +Ġple ad +w riters +ĠJohn s +声 èªī +èĢĮè¨Ģ ä¹ĭ +Ġbas al +ï¼Įå¦Ĥ æľī +åŁºæľ¬ ä¿¡æģ¯ +å®¶ åįıä¼ļ +( command +S ay +Ġqu o +ï¼Į æ´» +ä¸Ń æµ· +éļIJ èͽ +ĠBurn s +Ġl inen +äºĨ æĮĩ +ĠMid west +ĠEurop a +-res olution +ert ificate +åı® åĺ± +ĠAd m +Ġsett lements +c alled +大 èħ¿ +L ite +ĠTechn iques +çĥ Ļ +Ġob struct +ĠSuccess ful +Ġdecl aring +ĠSUB STITUTE +ĠSm ooth +è¿Ľ åζ +Ġa k +ĠN issan +Ġout right +åģľ é¡¿ +åŁ¹è®Ń çıŃ +ĠBro s +Ġsand wiches +èĬ± æľµ +Ġ åĽ¾çīĩ +.S c +驾 车 +.S erialization +ĠL ars +绳 åŃIJ +Ġfacilit ating +ä¸ĩ èĤ¡ +ĠS event +èĤ¥ æĸĻ +Ġport raits +Ġpl at +Ġoptim izing +" åĴĮ +ĠG in +ï¼Į åĬ¨ +åįĬ æĻĮ +è¾ĵ åĩºçļĦ +åµ © +ren ew +æ°ij åĬŀ +ĠF ry +社ä¼ļ ç§ijåѦ +ï¼Įä¸Ģ ä¸ĭåŃIJ +Ġg c +Ġk i +m us +æĥ ° +åºĶ æľīçļĦ +ĠInvest ors +hen yl +ĠSp read +ï¼Į è¯Ńæ°Ķ +gg ing +Ġconfidential ity +Strip MenuItem +ä¸Ģ å¦Ĥ +ĠSep ar +Ġord inance +C BD +ä»ĸçļĦ è¯Ŀ +纸 å¼ł +ï¼Įåħ¨ åĬĽ +u ctions +çŃī åIJĦç§į +å¿ĥ å¾Ĺ +Ġpath ology +åѵ åĮĸ +\ pm +ox icity +éĺģ ä¸ĭ +_ APP +ï¼Į ä½ľ +ĠCour ts +ĠSc out +æ¶Īéĺ² å®īåħ¨ +æĺ¯ å¤ļä¹Ī +G allery +Ġ )) +ĠH older +åѦ éģĵ +ĠBlock chain +å¼Ģ éĺĶ +ĠK erry +å¿«ä¹IJ çļĦ +Ġbas in +ĠGraph ic +Ġsympath y +ĠD F +="../../../../ ../ +ĠType Error +å¿ĥ äºĨ +è·¯ æĺĵ +ĠProv iders +Ġ 羣 +ĠRat ings +ĠSpring er +åΰ æľĢåIJİ +åĸľæ¬¢ ä½ł +Ġnumb ered +ä¸į è¯Ń +ĠFl ip +We apon +转 çľ¼ +ç»ıèIJ¥ èĢħ +èµ° è¿ĩæĿ¥ +ãĢĤ æķ´ä¸ª +åī¯ ä¼ļéķ¿ +ĠG ENER +è¡Į 礼 +åħ¬å¸ĥ çļĦ +Ġjun gle +ĠD ish +V ehicle +ĠBre nt +-y ou +Ġfluores cent +çĿĢ éĤ£ +ĠBl ank +ãĢģé«ĺ æķĪ +å·¥ä½ľ æķĪçİĩ +car bon +Ġcovari ance +åħ± 鸣 +æķ£ äºĨ +Ġf erry +Ġident ifiable +ĠT RA +ĠA ce +åŃ ¢ +Ġobs ervers +Ġreceipt s +æķ°æį® åĪĨæŀIJ +å¸Į å°Ķ +éĩijèŀį å·¥åħ· +åĩº éĻ¢ +Ġsan ction +Ġinject ions +D ar +_LO W +ion a +Ġin ception +(t ask +ĠU UID +æŀĦ æĢĿ +sk b +Ĉ ą +Ġparad ise +< Object +京 éĥ½ +a an +æķij åij½ +åĩº 头 +Ġreg eneration +ĠPROC UREMENT +åŃĺ ç»Ń +è¿ĩ å¤ļçļĦ +Ġinfring ement +Ġag ile +Ġpred ecessor +åIJ¸ åħ¥ +Q S +ä¸ĩ 亩 +Ġwar rior +Ġjurisdict ions +d os +Ġ) : +- str +æĺ¯ä¸Ģ çīĩ +AV A +tor ch +Ġempower ing +æĿij åŃIJ +av ia +Ġpri ests +-build ing +é¦Ļ çļĦ +骤 çĦ¶ +?? ? +ï¼Į æĦŁè°¢ +ç±³ å°Ķ +ins ki +ĠSter ling +ì Ĭ +ä¸į å¿ħè¦ģçļĦ +Ġin justice +åľ¨ åIJİ +ãĢĤ éĴĪ对 +Ġre hab +ra x +Ġste er +Ġcomm ute +Ġm alloc +ari at +èĮĥåĽ´ çļĦ +ĠG UID +ä»İ 头 +å®¶åºŃ çļĦ +ä¸į 稳å®ļ +ç²¾ æ¹Ľ +ï¼Įåı¯ä»¥ éĢļè¿ĩ +M ot +ĠD b +女 è£ħ +Ġinaccur ate +ale igh +EMPL ARY +i ag +ĠBay esian +åıª åī© +ĠJ ak +ĠMom ent +åĮĸ 身 +ĠEv an +çݰéĩij æµģ +ĠAmeric as +ol ics +ĠD irection +.add EventListener +Ġsuccess ion +ĠBen efit +Ġw t +å°º 度 +Ġbutter fly +Ġoptim izer +Ġrhet oric +Ġa ustralia +Ġun real +èĤ¡ åĪ© +åįİ çĽĽé¡¿ +æĹı 人 +èι åıª += & +f est +ER M +Ġrep etition +F ake +Ġst aging +æĬķèµĦ 人 +ç¾İ åѦ +ĠPresident ial +çľ¯ çľ¯ +åıĺ è¿ģ +主ä¹ī èĢħ +åıij æĬĸ +rep resentation +æķ¬ 请 +{ }) +:n il +Ġre iter +ĠIn form +Ġdipl omatic +Sc anner +Ġgu ild +( left +Ġru ins +ä½İ ä»· +_h igh +Ġren amed +el p +åĽ¾ æĸĩ +ĠST ATUS +Ġp unk +çļĦ äºĶ +åį´ è¢« +大 çīĩ +éĤ£ èά +Ġ' ') +ï¼Į æ³ķ +Pr ime +in cludes +åĪ©ç͍ çİĩ +Ġ çªģçĦ¶ +å¹ķ åIJİ +å°ı 红 +æľº 身 +èĦ IJ +_s chema +Ġn em +com pl +åIJĮ èĥŀ +(" -- +åľ°çIJĨ ä½įç½® +C rypto +ĠIn strument +-> { +ä¸Ģ 举 +_c ost +F IELD +cel ain +P ACK +ç¤ ģ +Ġattack er +"/ >< +IB UTE +è® ¥ +åľ° åĪ© +ç͍æĪ· æıIJä¾Ľ +ï¼Į éĢIJæ¸IJ +éŃĶ åħ½ +åºĶ æľī +çļĦ 帮åĬ© +èĢĮ æĹł +Ġt ended +Ġshoot s +æīĢæľī 人çļĦ +èĮ« èĮ« +ĠZ imbabwe +æµģ æĦŁ +æĬ¥åijĬ æľŁæľ« +cc oli +ĠMun ich +触 åıĬ +M u +the orem +, æĽ´æĺ¯ +åĸĿ èĮ¶ +Ġcl auses +h oot +ãĢĤåľ¨ è¿ĻéĩĮ +, çĦ¶èĢĮ +ic ans +/ util +rop olis +ä¾§ çļĦ +åħ¶ åľ¨ +% = +å¼Ģ æĪ· +åħĭ éĩĮ +éĿŀ常 éĩįè¦ģçļĦ +Ġinflu encing +个æľĪ åĨħ +Ġaver ages +ĠV ia +Ġstret ches +Ġinhib ited +Ġdis connected +ĠP resentation +_pro b +第ä¸ī 个 +æĬĹ è®® +Ġl iner +ç½ij æ°ij +Ġcor al +çķ¥ æľī +ĠW ik +atto os +Ġlong time +ĠChe st +èĹ ķ +æij¸ ç´¢ +éĿ¢ä¸´ çĿĢ +L ost +ĠO D +un ched +be ck +Ġsqu ared +æķĻ ç§ij +Pr incipal +åĽĽ æľĪ +Ġwas her +, åĬªåĬĽ +. types +, æ¯ı次 +ĠA X +åŃĺ çļĦ +ĠCarol ine +im ator +D ump +. ãĢĬ +G INE +I o +ï¼Į åħĥ +Ġcultiv ation +ob iles +åĢĴ åľ¨åľ° +Ġphil anth +ref resh +æ¾İ æ¹ĥ +qual ified +Ġw ired +È Ļ +å¾ĺ å¾Ĭ +ac re +_IM AGE +- plugin +Ġv illa +V L +-t arget +ĠR iley +Ġexc itation +çĽĬ äºİ +樱 æ¡ĥ +èİ«åIJįåħ¶ å¦Ļ +ãĢĭ åľ¨ +Ġinstruction al +ort uga +Check Box +Ġas phalt +ä¾ĿçĦ¶ æĺ¯ +Ġperturb ation +Ġtax ation +ĠL B +.c ustom +è¿ĺæľī äºĽ +ï¼Įè¿ĺ ä¼ļ +Ġre play +èĤ ½ +åĴĮ ä¸ĭ +ï¼Į 计ç®Ĺ +çļĦä¸Ģ éĥ¨ +Ġde leting +ft s +Ex ist +åı¥è¯Ŀ 说 +Ġc decl +ãĢģ æĸ½å·¥ +å¿ħé¡» åľ¨ +ĠEX EMPLARY +Ġincorrect ly +åĪĨ 级 +,æĪij å°± +) _{ +App lications +) }( +Ġf ibr +": {" +èĦļæŃ¥ 声 +èĥ½ èĢĹ +ĠKey board +åĥµ 硬 +ĠR ising +T K +ä¼Ĺ 人çļĦ +G as +Ġas ylum +qu ared +im ension +éģĵ é¢ľ +Ġr he +Ġadj ud +ä¸Ń 被 +æīĢ ä¸º +it at +H W +Ġpr inters +; j +ink er +åı° çļĦ +è¡£ è£Ļ +Ġå¹´ æľ« +ĠD odge +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +åľ¨ åģļ +å®ī åįĵ +ï¼Įä¸Ģ ä½į +ï¼Į 交éĢļ +åŃ£ åIJİ +Ġb unk +åıij åŀĭ +ĠI van +ï¼Į æĦ¿ +ĠR FC +éĢŁåº¦ å¿« +Ġsharp ly +ãĢģ é¡¹çĽ® +[ e +ĠU E +.g ame +Doc uments +ĠProp het +çļĦ è¦ģ +ro cket +çİ ® +lo ating +ç · +æĺŁ åħī +es i +que z +温æļĸ çļĦ +ju ven +ĠCom edy +Ġel ong +se udo +am d +Ġst ew +red uce +èĬĤ çļĦ +ĠSt y +================================ ================ +èī¯ ä¹ħ +ant ages +ĠPer l +è´ª 婪 +çļĦå·¥ä½ľ 人åijĺ +æŃ£ å¦Ĥ +æ·± æĥħ +Ġment oring +Ġwel comes +ĠDream s +ç¿» äºĨ +der ive +Ġhear ings +Ġins urers +ãĢĤ对 æŃ¤ +Ġrefuge e +çļĦ æĪIJæľ¬ +, è¿Ļä¸Ģ +V ictor +çĭ ° +- equ +CA ST +le ave +< A +èįī åĿª +é¦Ĵ 头 +ĠD as +ï¼ĮæĪij们 ä¼ļ +åīĬ å¼± +ï¼Į èĩ³ä»Ĭ +æ±ĩ éĽĨ +bec ue +L ER +Ġas ynchronous +麻 éĨī +ĠAut umn +_ merge +.st rip +-b ed +Ġst ubborn +IN C +ï¼Į å®¶éķ¿ +ĠÑ Ħ +Ġcon gregation +Ġlate x +ç¼ ī +. Equals +æķ´ é¡¿ +éĢı æĺİçļĦ +麻 è¾£ +ĠINTERRUP TION +æĹĹ å¸ľ +ra ke +Ġhist ories +æĬ¥ 导 +Ġm n +(t f +åı¯èĥ½ æľī +ed ic +Ġd ashed +åŀĥåľ¾ åĪĨç±» +t uple +ä¸İ åıijå±ķ +Comp iler +ä¹Łæĺ¯ å¦ĤæŃ¤ +T OC +åĿļ å®ļçļĦ +ç¥ĸ åħĪ +J D +ĠM old +å°ģ éĿ¢ +S uffix +è°ĥ ä¾ĥ +ä¼ģä¸ļ 管çIJĨ +æ¯ĶèµĽ çļĦ +Âł åľ¨ +F K +Ġdiscover ies +Ġqu ir +è¿Ľè¡Į è°ĥæķ´ +len ess +BO SS +å¹² æ´» +Ġadhes ion +Read Only +æµ· çĽĹ +T CP +\ omega +Ġmultipl ied +è¯Ħ å§Ķ +è¿Ļ ç¯ĩæĸĩ竳 +ï¼Ľ äºĮæĺ¯ +ãĢģ åīį +ãĢģ é£Łåĵģ +å¼Ģ æĶ¯ +ĠP ir +Ġshort cut +ress ure +en y +Ġt ant +为 æľŁ +, éĿŀ常 +建 模 +äºĭæĥħ çļĦ +Ġ å¹¶ +ĠP arser +é¢Ħ çķĻ +Ġrepl aces +ภ£ +å°¤ æĸĩ +åIJĮ 缣 +ä¹Ł æĹł +oll en +(j ava +åħī äºĨ +ill ation +ä¸įè¦ģ åĨį +E PS +plic ated +æĽ¾ç»ı çļĦ +" ][ +èĤ¢ ä½ĵ +å®īåħ¨ æĦŁ +Ġquant ify +ï¼Įå¦Ĥæŀľ 没æľī +çIJĨäºĭ ä¼ļ +Ġwidget s +Ġd it +Over flow +engu in +ä¸Ļçĥ¯ éħ¸ +èħ ® +ĠPort folio +å½± éĻ¢ +Ġinterpre ting +(d oc +Ġindic ations +æ¦ľ åįķ +ere o +(b ody +è¿Ļæł· çļĦè¯Ŀ +å¨ Ħ +Con vention +Ġvill ain +ĠB EST +为 缮æłĩ +Ġrel ent +rack s +Ġer osion +ä¸įç»ı æĦı +Ġle agues +æĹĹ ä¸ĭçļĦ +æķ£åıij çĿĢ +Ġlaw suits +ij u +( format +ï¼Į åIJĮæ¯Ķ +æĨ ¨ +_ % +ï¼Į å°ļ +èµ° è¿ij +å¹¶åıij çĹĩ +游 ä¹IJ +Ġsound ing +T N +uck land +_L ONG +con cat +Ĉ Ć +un ed +ä½İ åİĭ +æľ« 端 +ĠCl one +Man ifest +Ġme ga +\x e +æĵįä½ľ çļĦ +çī¹ è´¨ +ik z +-s elling +åį° èĬ± +èĦ¾ èĥĥ +M arg +ĠR oosevelt +æ·± æĢĿ +ĠV K +åζ æĪIJçļĦ +Ġ æ¯ı +æĬķ æľº +ew orthy +}} (\ +Ġf lock +Ġexplo itation +ĠPow ers +Ġ åį¡ +é»ijèī² ç´ł +å½Ĵ äºİ +èĩª ç§° +ĠG rab +çĵ¶ é¢Ī +Ġmy ths +èĩ³ å°Ĭ +C ycle +ĠDep th +Ġinto x +. If +ĠSur f +Ġp ng +Ġbe et +æµ· åºķ +å¾· æĭī +ç»ıéªĮ åĴĮ +ĠDo ors +纹 çIJĨ +Ġd un +人 身 +æº ľ +p ixel +çļĦ æĢ§æł¼ +ä¸Ģ æĪĺ +åºķ çĽĺ +d ifferent +Ġcommun icated +Pl ayers +- center +reat ing +Ex c +Ġsp rite +, F +ĠLe eds +ä¹Ł æľīäºĽ +Ġinsight ful +it on +od al +ä»Ļ 女 +Ġbread th +Ġres ides +Ġinter med +V o +å¥ĭ åĬĽ +Ġfor fe +Ġcard board +åĪĨ åĮĸ +社ä¼ļ ä¿ĿéĻ© +ãĢģ 被 +éĻª ä½ł +åħįè´¹ çļĦ +ĠPen insula +Ġt ensions +ä¸į 许 +Ġquot ient +Ġcongress ional +P CR +ĠK ra +ib s +ĠS OC +Ġinvestig ator +ĠS ax +vis ory +ĠB rowse +Ġterm inals +ĠLog o +Ġcub es +èĻ Ķ +ภģ +ï¼Į é¼ĵåĬ± +_b egin +ÏĦ ι +iv el +Ġcred ential +HasColumn Type +oc ities +M i +æĢª å¼Ĥ +st ates +åĽ½ åħ¬ +Ġbud d +' D +so ftware +Rear range +Ġtran qu +è´¢ çī© +Ġcl ash +ö n +åĪĨ 身 +ç«Ļ çļĦ +Part ners +B urn +Ġsu ites +åıĺ çݰ +æīĢ è§ģ +è§£ çłģ +èIJ½ åı¶ +\in fty +ç¬ij èĦ¸ +Ġso ak +ç¼ĺ åĪĨ +/ public +çļĦ èĤ©èĨĢ +ĠH uff +Ġdimin ished +å°Ĩ è¿Ļ +pro blem +Ġnick el +åĮºåŁŁ åĨħ +åIJİ åĭ¤ +äºĨ å¤ļå°ij +éĿĴ é¾Ļ +æĥ³ åĥı +ĠRes istance +ĠJan et +Ġmap le +åĵĪåĵΠ大ç¬ij +ĠLi ability +èĦijæµ· éĩĮ +ä¿Ĺ ç§° +M N +è¶Ĭ éĩİ +âĢĿ ; +Ġorgan izer +ast ore +ĠBry ant +ĠExper iment +Ġ} { +å¾IJ å·ŀ +ic ides +ä¸Ĭ åľº +æĥ³ æĿ¥ +Ġali ens +M agic +AP TER +Ġex quisite +Ġconst ituted +ol ang +ä¸į ä¼ij +vers ed +Ġs way +ï¼Į大家 éĥ½ +Ġl odge +block List +cc i +çĨ ł +ill as +å¤ļ 大çļĦ +l ittle +ĠPrinc eton +äºĨ åįĬ天 +Ġcol oured +_H AS +å°± æĪIJäºĨ +ç͍ åľ¨ +rote in +æĬµ 御 +_RES ULT +ç͍ 以 +Ġvulner abilities +ï¼Įæĺ¯ 个 +伪 è£ħ +çļĦ åģ¥åº· +Ġstim ulating +ĠGovern ance +Ġjour neys +计ç®Ĺæľº ç¨ĭåºı +Ġ èĩªå·± +ĠC af +J et +Ġhur ricane +Ġmiser able +\ ge +æ¼Ķ åıĺ +ĠCol our +åĩĨå¤ĩ å·¥ä½ľ +-s aving +Ġretros pective +col m +Ġrefere ndum +æĺ¯ ä»ĸçļĦ +个 æķ° +ĠL or +å¥ł å®ļäºĨ +Ġmem set +ĠSupp lementary +(f irst +æĶ¾ æīĭ +Ġhun ter +Get ty +Ġelim inates +ĠZ ion +缸å½ĵ çļĦ +Ġqu arry +ĠW ade +}} }\ +Ġrec alls +éī´ åĪ« +äºĮ æŀģ管 +AL K +_BY TE +W inter +ĠWat ches +Ġcooper ate +Cl imate +éĶĤ çĶµæ±ł +ä¸Ģ个 éĹ®é¢ĺ +- word +ĠI v +丼 æŀĹ +ï¼Įå¾Ī éļ¾ +Ġsk ies +çľĭ åľ¨ +äºĮåįģ äºĶ +è¯Ĭ æ²» +th ird +_A UTH +è¾½å®ģ çľģ +ĠL ud +åĮ» ç§ij +Config ure +æĺ¯ä¸Ģ æł·çļĦ +ĠP ATH +驾驶 è¯ģ ++ ) +ij a +Fin ance +Imp lementation +ĠAut hentication +Ġra id +è¿ŀ 带 +Ġcont raction +Ġen velop +MENT S +ç«ĭæĸ¹ ç±³ +ĠK or +.res ource +Ġra iny +åĨ² åĪº +çª ĸ +Sil ver +好 ä¸į容æĺĵ +æĪij ä¸Ģ +毫 åįĩ +ï¼Ī åĽĽ +ĠRe ach +è½» éĩį +æĻļ æľŁ +_ low +ï¼Į åŃIJ +Ġ æķ° +å¿ĥ éĩĮçļĦ +Ġinj unction +Ġseiz ure +_ edit +åѤ åįķ +é¢ĺ èĢĥæŁ¥ +ĠD irections +ĠC W +ä¸į å±ŀäºİ +Ġmechan ic +Dam age +äºĨä¸Ģ éģĵ +绵 绵 +t cp +ç¡ķ士 åѦä½į +奢 åįİ +ect omy +ident ified +Ġmac ros +avil ion +Ġso ils +_s a +ï¼ĮéĤ£ æĪij +Ġentrepreneur ial +Sw ap +ï¼Į 沿 +ãĢĤ æİ¥ä¸ĭæĿ¥ +ÃŃ n +æįIJ 款 +çŁ¥éģĵ ä½ł +è°ĥ åζ +åħļ ç»ĦæĪIJåijĺ +è¯ £ +Ġac claimed +C OPY +ĠComp et +ĠViet namese +Y Z +çļĦ 建议 +åľ° 为 +å®ļ éĩı +çļĦä¸Ģ åı¥è¯Ŀ +Ġdispos able +飩 å®ĩ +ĠK iss +Foot er +T ODO +Ġback bone +ãĢĤ åİŁ +åľ¨ 没æľī +Ġmut ants +Ġv ow +.f loor +å¹´ 以ä¸Ĭ +å¿ĥ äºĭ +P urchase +ĠChe ster +Ġelectro ly +( address +AT TR +çIJĨè§£ çļĦ +ãĢĤ ä¸įä»ħ +ï¼Į åĨĽ +ä»Ģä¹Ī æł· +è ³ +æļĸ æļĸ +èģĨ åIJ¬ +(" ./ +æĸ° é«ĺ +Ġfin ale +osc opy +Ġpay out +æľį çļĦ +ï¼Į ç»ĻæĪij +comp ressed +ä¸ŃåĽ½ è¯ģçĽijä¼ļ +Ġhum id +Ġdec oding +æŃ» 人 +t imer +è¶ģ æľº +åľ° 毯 +Ġacc ustomed +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +æµģ æĺŁ +ĠRh ode +ä¸ĵ èģĮ +ram er +ï¼ĮåIJİ èĢħ +ĠL ET +Ġconduct ivity +get Value +. player +çļĦ é¡¶éĥ¨ +æĹł å°½ +ĠL R +éģĹ è¿¹ +ĠF oo +lo o +et ween +pat ched +_s ingle +Ġded uction +.s in +mod ern +Ġdis ks +è¡Į åĪĹ +æ³ķ åĴĮ +ĠM age +å® ¦ +éĺ¶ æ¢¯ +ĠG ather +ĠBl air +æľ¬ é¢Ĩ +ing ing +Ġfriend ships +rt c +å±ĭ åŃIJéĩĮ +æIJŀ ç¬ij +ĠVis itors +é¦Ļ èķī +åįķ 车 +ĠCan al +ĠW rap +åı¯ ç͍äºİ +ĠStand ing +ath a +Ġpitch ing +( project +c ool +å¸ĥ æľĹ +ï¼Į 建 +-f lex +èĥ¡ æ¤Ĵ +lict ed +Ġretain s +Ġcol s +Ġske leton +ï¼Įä¹Ł ä¸įèĥ½ +åĩī çļĦ +D NS +ĠIraq i +å·¥ç¨ĭ æĸ½å·¥ +_ axis +æĪij æł¡ +Fix ture +两 åįĥ +ç²¾ç¥ŀ çļĦ +äºĨä¸Ģ ä»¶ +Ġmar sh +bol ic +. [@ +Ġon Click +Ġsupp orter +e ither +æĬ½ çĥŁ +ĠAndrew s +çĭ © +document class +in ality +ĠLess on +ĠBuff ered +amm ers +两 çϾ +è´µå·ŀ çľģ +伺 æľį +Ġint ellect +åįĬ 身 +ĠRoof ing +ĠVen us +æĭ¿ åΰäºĨ +Ġacad emy +Ġspons orship +åŃ ° +è§£ èĦ± +Ġch ord +N PC +ĠPref erences +ĠM ood +Ġtut or +/ month +ĠT ennis +Ġant it +Ġyield ed +Ġlear ner +åħ« 个 +åĬłå¼º 对 +æłĩå¿Ĺ çĿĢ +ĠR EL +>< ? +.Tab Index +.Fat alf +Ġg land +æĭ¼ éŁ³ +Ġwheel chair +Ġbl ends +åIJį æ°Ķ +Ġelectro des +ï¼Į å·®çĤ¹ +èģĶç³» æĪij们 +辩 论 +Ġठķ +Ġclin icians +Cele br +Ġto do +æīĵ åį¡ +士 æ°Ķ +________ ____ +çļĦ è¿IJåĬ¨ +ĠSud an +ä¸į å®Į +Ġcock tails +Ġstra ps +ç¬ij å¾Ĺ +ĠM orm +è¿ŀæİ¥ æĿĨ +oc om +post s +_N ET +çĬ¶ åħĥ +". $ +F ant +åĵģ ç±» +害 ç¾ŀ +æĥ¬ æĦı +Ġs sh +åζ çīĩ +Ġb ark +up on +åŁºæľ¬ æĥħåĨµ +å¦ĸ æĢª +åįĹ å±± +çļĦ æĪ¿éĹ´ +ä¸į ç»Ļ +èIJ ¤ +Cl inical +ãĢģ å®¶åºŃ +âĢľ They +éĩįè¦ģ æĢ§ +Json Property +FF F +ì ķ +Ġv ain +ä¸İ åIJ¦ +ĠRect angle +end id +Ġdis covers +ĠA ra +Ġch im +åħ± èµ¢ +åĪĩå°Ķ 西 +ĠKash mir +ĠH av +çĹħ çIJĨ +ĠD rivers +(t itle +end ars +p v +ä¸į è¿ľå¤Ħ +æĹł æľº +çļĦ人 æĿ¥è¯´ +O x +Ġdeep est +Ste am +Ġspecific s +, äºĮ +对 æĪĺ +ç¾İ çϽ +Ð ŀ +ï¼Į é¡¶ +çļĦ æĿİ +åĩº 示 +[ S +ĠP iece +ï¼Į çζæ¯į +, u +ĠD uck +ãĢĤ 建议 +Ġher s +ĠC ul +导 游 +ãĢĤ 西 +好 æ¶Īæģ¯ +ĠOrigin ally + Į +ç»Ĩèĥŀ çļĦ +ĠF usion +Ġpe an +å¤ļ 说 +å±Ģ å±Ģéķ¿ +åĩĿ åĽº +L ord +åľ¨ è¿Ľè¡Į +æķĻ ä½ł +ĠAss ist +erm al +çľ¼ è§Ĵ +æĪij们 æĺ¯ +ç¼ħ ç͏ +åŀ £ +Ġiter ate +Ġc trl +Ġdef enses +对 åĩĨ +Ġdisc ard +Ġmedic inal +女 主 +ball s +ĠBang alore +_ os +Ġbl ur +ĠNOT ICE +ï¼ļ éĢļè¿ĩ +Ġre consider +Pr inter +ĠSp ell +大 çģ« +ãĢģ ãĢģ +åľ° è¿Ľè¡Į +_S RC +enz ie +.f ont +t ok +Ġa ce +é¢Ŀ å®ļ +M n +Ġhistor ian +Ġ" & +ĠK urt +rap ist +éļĨ éĩį +ãĢĤ 许å¤ļ +æł¼ æł¼ +å»¶ æľŁ +Ġmon key +Ġpar ity +Ġc ate +-w ater +Ġtoler ate +Ġignor ant +ï¼Į çļ® +Ġve ins +S ounds +RE Q +Ġcode c +ĠBr ush +ĠNob le +ï¼Ľ ä½Ĩ +cre ation +Ġles ion +/p ost +ig raph +ï¼Į è´¨éĩı +è¿Ļ æĶ¯ +æĪĺ 绩 +d al +fe atures +ra ising +W K +æĸ° ä¸Ģ代 +ĠD ict +ĠDes igned +ä¸Ń è·¯ +ĠFun ny +) == +æ¼ © +åIJī å°Ķ +Ġc mp +被 è¯Ħ为 +Ġcollect ors +- string +ä¹Ł åºĶ该 +ä¸İ æŃ¤åIJĮæĹ¶ +åIJĪ å½± +æĺİ æľĿ +ðŁ Ĵ +ĠSch ol +] -- +人 ä½ĵçļĦ +Ġ æłĩé¢ĺ +Ġsu cks +åĪĢ åħ· +-t ool +çŁŃ çļĦ +ç¨İ è´¹ +Ġyoung sters +_sh ort +åĬ¿ 头 +ur on +ĠIm aging +ä¹ĺ 车 +æĬĹ ä½ĵ +.char At +Ġm unicipality +ĠA SP +.c al +Members hip +\ p +Ġs ar +以 èī²åĪĹ +(d st +J osh +ĠS oup +æĶ¹ è£ħ +ĠFarm ers +饿 äºĨ +_d ouble +æĬĵ èİ· +éĿĻéĿĻ åľ° +éĿ¢ä¸´ çļĦ +âĸ ³ +Ġlean ing +[ f +å§ij å§ij +- es +( åį³ +ĠDISCLAIM ED +T her +åĸ ½ +åĤ¨ èĵĦ +èĦ± æ°´ +æŃ¦æ±ī å¸Ĥ +Ġor phan +Look s +Ġsin us +åħ¬ è¯ģ +Ġcar riage +_ sequence +Ġbow el +ï¼Į ï¼Į +ĠL ANG +.c apitalize +¸ ÃIJ +ĠSk i +af il +  +Ġwag ering +Ġdile mma +è¶´ åľ¨ +éĩı 大 +èĹı çĿĢ +æĸ¹åIJij çĽĺ +ĠG ang +ĠSus p +ï¼ĮæīĢè¿° çļĦ +ĠSpot light +å»ī æĶ¿ +N ight +Ġstand alone +Widget s +C irc +èķ ĥ +ĠS urre +ï¼Įå½ĵ ä¸ĭ +IB M +_err ors +ï¼Į åı£ +ace y +é¢ Į +çŃī ä¿¡æģ¯ +æľī æ¯Ĵ +Ġoblig ed +èĮ ¸ +ĠAP K +æĽ ľ +ug u +ĠTra iler +ï¼Į åħ·å¤ĩ +æĥ³ 让 +Ġatt ribut +è¯ļ æĦı +ĠAthlet ic +ĠRena issance +_C TL +ç¯ ± +Ġbit es +å¤ļæł· æĢ§ +Ġfoot er +ĠCapt ure +ĠS ak +éªĹ åŃIJ +ä»Ģä¹Ī 人 +ĠIns pector +ĠC urrency +èĩªçĦ¶ èµĦæºIJ +Ġf b +èģĶç³» 人 +ï¼Į å®¶éĩĮ +æĮĤ äºĨ +, åĪĩå®ŀ +qu ito +Ġfil med +ä½İ 碳 +Ġli en +ist ani +æĺĨ èĻ« +èī° è¾Ľ +西 æĸ¯ +Ġcr an +Âł æĿİ +enc ils +ãĢģ åIJĮ +è¿Ļ åĩłå¤© +夺 å¾Ĺ +ĠA PR +Ġclass ics +ï¼Į æĶ¾åħ¥ +äºī åIJµ +è´£ 令 +ĠBre tt +ç»ĵåIJĪ èµ·æĿ¥ +c ation +Ġ" // +ĠObs ervable +æĻ®éĢļ èĤ¡ +Ġestim ating +Ġä r +Word Press +Ġ æĹ¢çĦ¶ +ĠC inema +ï¼Į 硬 +ãĢģ èīºæľ¯ +大 å°Ĩ +ĠB irds +Ġ 计 +, éĻĪ +åĪĨ æŃ§ +Ġcounsel or +ä¹Ł éĥ½æĺ¯ +è¿Ļä¹Ī ä¹ħ +Ġg h +æijĨ åĬ¨ +.S erver +-------- - +.ch annel +Ġconc aten +æĭ · +riv al +ex isting +/ types +ï¼Į 横 +äºĴ åĬ© +ĠH erald +ä¼ļ å¾Ī +-pro duct +ï¼Į åĽŀ头 +Ġcallback s +. validate +åİĨ 代 +è°ĵ ä¹ĭ +æĻ¶ èݹ +IF O +: i +ĠMay a +Ġveter inary +ĠS eth +Ġc ops +Ġoff sets +ä¸įäºĨ çļĦ +åŃ£åIJİ èµĽ +\x c +Ġur inary +ç´§ç´§ çļĦ +( http +å¡ŀç½Ĺ éĤ£ +æľĪ 饼 +ï¼Įåį³ ä¾¿æĺ¯ +Ġa le +Ġqu artz +çľ¼ ä¸ŃçļĦ +Ġfacilit ates +ãĢĤ ä¼ļè®® +ĠApp ell +C ause +Elect ric +Ġm igr +ĠSpirit ual +d ialog +ç©¿ æIJŃ +ĠRead ers +Ġbi opsy +森 纳 +Ġpour ing +-dec oration +éĤ ĥ +ĠJew elry +P ipe +Ġcre ws +ĠI G +âĢĶ that +åĴ¨è¯¢ æľįåĬ¡ +-t ask +_k ernel +fil m +ãĢĤ ä½ķ +åħ¬ é¡· +- add +ighb ors +äºĭä»¶ çļĦ +éĩĩç͍ çļĦ +Ġcraw l +Ġo le +èµĦæľ¬ å¸Ĥåľº +ãĢĤ æį¢ +漫 éķ¿ +ï¼Į åIJ« +ĠDise ases +ãĢĤ æĢ»ä¹ĭ +æµģ æ°ĵ +P ix +åĽłä¸º ä»ĸ +ĠControl s +åĩº åľŁ +.sc roll +Ġwer den +ĠD rama +åĩĨå¤ĩ äºĨ +çĭ° çĭŀ +èIJ½ äºİ +\x a +è¿ŀæİ¥ æĿ¿ +Ġspectro scopy +ä¼´ æľī +è¿ĩç¨ĭ çļĦ +åĩº åĩ» +que ous +没 è§ģè¿ĩ +åĽ¾ çļĦ +æĺ¯ä¸Ģ 项 +çĹĽèĭ¦ çļĦ +çĿ¡ äºĨ +ĠMar co +éĤ£ä¹Ī çļĦ +æŀķ 头 +ĠW inner +fl ush +Ġfashion able +-p aced +çĴĢ çĴ¨ +ãĢĤ å°±åĥı +Ġsh uffle +Âł èĢĮ +.N ull +æĶ¶ äºĨ +åħ¬ çε +_R IGHT +Ġip hone +ri pe +veh icle +åıĺ èī² +Ġal ot +é«ĺ 空 +u ard +Ġethnic ity +åĮ» ç͍ +Ġenc rypt +ĠD ana +伤 äºĨ +-consum ing +èĥ¸ èĨĽ +ĠEX T +Ġmal aria +Ġgreet ing +ĠAdult s +-d igit +Ġmill iseconds +ĠIde al +B orn +ï¼Į 微微 +-g en +V PN +id ade +pp s +èĢģå¸Ī 们 +p apers +ภĩ +ĠEm irates +ĠChar leston +ĠM ate +æ¼ ³ +ver ting +å®Ĺ çļĦ +ĠKn ox +- search +.L ayout +è´´ å¿ĥ +Ġparad ox +ĠAp ost +çķĻ åѦçĶŁ +Ġphot ons +ip ient +äºļ åİĨ +ean or +ph inx +Ġan ecd +ĠSpect rum +åĨħ 饰 +è¹ ¬ +侦 æİ¢ +æŃ» èĢħ +b ucket +Ġin h +Ġw ander +Step hen +Ġw i +ĠR as +Ġdet ached +Type Def +缮æłĩ æĺ¯ +ish na +æİ¥åıĹ çļĦ +éļĶ çĿĢ +-ch annel +æĸ° 产åĵģ +Ġfac ets +d emo += . +æĮº 好 +_d rop +ĠRob ot +: c +Type Id +Ġmar row +_ printf +åĬ¿ å¿ħ +CD C +Ġqu arantine +Ġob ese +äºĴ éĢļ +ĠManag ers +Ġm art +ĠMar ina +ench mark +M ODULE +ä»ĸ们 æĺ¯ +Ġroyal ty +æļ§ æĺ§ +ãĢģ å¦Ĥæŀľ +æľ¬ æºIJ +æĥ Ń +ĠT L +ount y +Out er +RO C +.p ack +æĥĬ å¥ĩ +æĿ¾ äºĨåı£æ°Ķ +âĢľ èĢģ +ĠAr bit +ism et +å¹´ 份 +Ġì Ŀ +ĠS cope +ĠDon na +æ²¹ æ¼Ĩ +ĠF requency +åĮº åĴĮ +ĠDra ke +Ġattract s +v iol +æĬĹ æĭĴ +Anal ytical +ID D +ĠPay ments +ĠCom ponents +et ter +.get Text +Ġo st +b ai +çī¹ åĭĴ +åĩĿ éĩį +ĠEthiop ia +ind le +ĠLouis ville +h in +) < +åŃĹ åħ¸ +èĮ ¬ +ä¸ĵéŨ çļĦ +æľĪ æľ« +Ġlif ts +Ġbe ard +ä¹Ł æĽ¾ +ĠPublic ation +æĤĦ çĦ¶ +好 ä¹ħ +( width +, å¾Ģå¾Ģ +Ġstiff ness +æİī èIJ½ +ock ing +ĠU PS +èĦij ä¸Ń +åľ¨ 她çļĦ +æ¯Ĵ åĵģ +_sh ift +çļĦ ä½ł +ä¸ĩ è¾Ĩ +c ats +P ic +.f a +å¦ ³ +天 涯 +åIJĥ åΰ +Ġwithd rawn +_ ro +ĠCor respond +缴åįĩ æľº +. level +, çݰ +Ġposs essions +S weet +Ġincub ation +Ġasp iring +ï¼ļ ** +ĠVenezuel a +ag ents +OT A +èĢĥ åľº +Ġaccident al +å¹¶ ä¸įä¼ļ +ĠD ust +Ġreb ounds +Ġprompt s +æİ¥æĶ¶ åΰ +Ġsubstant ive +Ġpand as +æµ· 峡 +ï¼Į éħĴ +SC s +Ġspin ach +Ġbur nt +Ġafter ward +CS I +unt ary +Ġlim bs +åıį 转 +éĻĨ é£İ +åĮ»çĸĹ åĻ¨æ¢° +ĠR end +é¢ij é¢ij +Ġcontext ual +ĠDo ctors +æĤ¬ å´ĸ +ĠGo al +Ġd ancers +and em +æī¶ çĿĢ +ĠA mber +ç§ĭ åĨ¬ +Ġd warf +ãĢĤ åŁºäºİ +çŃī çī¹çĤ¹ +ä¾į éĥİ +ä½ľä¸º ä¸Ģç§į +Ġbreast feeding +ist rate +# a +æĬ¤ èĪª +Ġp ests +èµŀ åı¹ +Ġ à® +amb urg +ç»Ħ åĪĨ +ãĢĤ æľīçļĦ +æľŁ å¾ĴåĪij +Europe an +Ġpar an +æĶ» åĬ¿ +Ġdid nt +$$ , +æ± ¶ +ç¼ Ń +, éĩij +O E +å°ı å§ijå¨ĺ +æ²ī åIJŁ ++ / +B rown +-t oggle +人 å¤ļ +Ġbear ings +ï¼Į é¢Ĩ +Ġso othing +pr im +æĿĢ çļĦ +Ġadvis able +Ġexpos ing +.con vert +, -- +ĠConc ert +ch oose +ie g +Al bum +ĠMag ento +ĠRedist ribution +ts y +Ġconduct or +od or +ĠDay ton +att achment +æĭ¥ æĮ¤ +S cheduler +æīŃ è½¬ +. emit +con g +ipp ets +å̾åIJij äºİ +Ġsystem atically +ĠRuntime Exception +ä¹Łä¸į åı¯èĥ½ +ä½Ĩä¸į éĻIJäºİ +åľŁ æľ¨ +Ġexcer pt +ĠH DMI +Ġ ç½ij绾 +Ob viously +Ġmethod ologies +stan bul +ĠAg es +è¦ģ å°Ĩ +_ plugin +ĠS age +å§Ķåijĺä¼ļ å§Ķåijĺ +ĠM ang +æĽ² æĬĺ +å¤ı æĹ¥ +åĵĪåĵĪ åĵĪåĵĪ +S oc +Ġpan or +ç©¿ äºĨ +iff any +, åħ¶ä»ĸ +ĠMor rison +W HERE +Ġdef er +æĪĺ æľº +Ġsoph omore +fore ign +â ¼ +é m +èįĨ å·ŀ +ĠOrth odox +Ġtruth s +- che +ä¸Ģ å¾ĭ +Ġch ars +.W idth +(res ource +_C PU +æĹ¶éĹ´ 段 +ï¼ĮåĽłä¸º å®ĥ +et ches +èĢĮ éĿŀ +ĠUN IT +ĠHig gs +ĠComm ander +K W +Ġrem ovable +ĠM inecraft +sw orth +éŃĶ çİĭ +ĠC aps +ĠCh oices +车è¾Ĩ çļĦ +ĠRecogn ition +ĠS par +Ġfollow er +per p +q t +ãĢģ 第ä¸ī +und ing +çĿ£ æŁ¥ +[ N +åħī è¾ī +_c lock +Ġwarrant ies +ĠVac ation +éĵ¶è¡Į çļĦ +Ġautom ate +rim inal +. an +Out side +åѦéĻ¢ çļĦ +æľī å°ı +ĠRed uction +ĠFreder ick +éª ¸ +T G +[ MAX +m aterial +Aut hent +âĶ Ĥ +æĬķèµĦ æľīéĻIJåħ¬åı¸ +ĠConvers ation +( addr +ï¼Į 临 +Ġviol ating +åŁ¹è®Ń æľºæŀĦ +Ġ ä¸įæĺ¯ +éĩį åıł +è¾ĥ éķ¿ +åı« çĿĢ +è¿Ļ个 æł·åŃIJ +ĠGr inding +ï¼Įå½ĵ å¹´ +it one +æĽ´å¤ļ çļĦ人 +ac ier +æľĢ å°ij +is ol +为 åħ¬åı¸ +Ġcul p +ex ico +Ġri pe +æĺ¯ä¸Ģ éģĵ +å°± è¿ŀ +åĩł åIJį +Ġassist ants +åįļ士 åѦä½į +, åıĬæĹ¶ +ĠD ylan +Ġdeleg ation +马 è¾¾ +circ le +ĠBib lical +Ġd rones +åıĹ æīĺ +Ġp iss +Creat ive +Ġ çī¹ +ty ard +èµ° åĩºæĿ¥ +æĢ§ çĸ¾çĹħ +_P I +çŃī åĨħ容 +Ġped al +èĬĤ åģĩæĹ¥ +. return +ä¸Ń 使ç͍ +Ġb ait +ĠP our +_M ULT +éĩİ åħ½ +èĥĨ åĽºéĨĩ +ç¾ Į +Ġamb assador +Ġneut ron +满æĦı 度 +æĹ© æĻļ +, åħ¨éĿ¢ +st ated +,è¿Ļ å°±æĺ¯ +ext end +è§ģ åΰäºĨ +atisf ied +ĠDru gs +- App +Ġsub license +èĩªçͱ çļĦ +æ·¡ çĦ¶ +ĠTH REE +.C OM +Ġpill ows +ĠRe agan +, å¦Ĥä½ķ +Ð Ŀ +é es +Ġrevis ions +Ġb isc +ä½ ¼ +Ġdis like +ĠCon version +ãĢģ å¹³ +inet ics +ï¼Į äºĭæĥħ +åľº åĿĩ +ä¼łè¯´ ä¸ŃçļĦ +ä¹Ł ä»İ +ĠÏĦ οÏħ +B right +Ġpercent ages +æľ¬ éĩij +æľ¨ è´¨ +ï¼Į 奥 +缸 ä¼¼çļĦ +ç»ıæµİ æķĪçĽĬ +åĨľ ä½ľçī© +ĠCS V +æł¹ åŁº +æĽ Ļ +åŁºéĩij æīĺ管 +J am +Un signed +Ġwonder fully +ঠ¾ +Ġrec ount +大 æĥĬ +Ġsched uler +Ġ æľª +æ±Ł å¸Ĥ +å¥ĸ åѦéĩij +th us +çļĦ å°Ĩ +.T ag +é»Ħ èī²çļĦ +ï¼ĮçĦ¶åIJİ åľ¨ +op last +W onder +ä½į å±ħ +æĻ¶ä½ĵ 管 +éĶĻ è¿ĩäºĨ +èģª æĺİçļĦ +f actory +n umeric +on ne +Ġhome owner +ĠL M +or por +Ġhug ely +Ġu v +æ¯Ľ åŃĶ +ä¸ĸ ä¿Ĺ +ip ort +ä¸ĸ æ°ij +Ġsolid arity +è¡ĮæĶ¿ å¤Ħç½ļ +ĠG iant +ĠBe ast +æij© å°Ķ +* { +æİĴ åľ¨ +è¿Ľ é£Ł +æĮĤ éĴ© +Ġl ust +erc ase +Ġcom eb +Ġvolunte ering +éķ¿ å¯¿ +æĢ§ æĥħ +Ġestablish ments +Ġrev olves +ĠT akes +Art ist +, è¦ģæ±Ĥ +Ġì ŀ +\ gamma +-l g +Ġp es +ä¸Ģ å¹ħ +ĠF loyd +åħ¬ ç«ĭ +ĠCor rect +Ġmultipl ier +IN IT +å¿į çĿĢ +å°ı说 ç½ij +Ġp ly +eb ooks +ĠEp ic +ĠMar shal +j ay +èµ· åΰäºĨ +第ä¸Ģ åIJį +| c +ox el +ĠBr isbane +let ely +id able +/ com +_ contents +ĠN G +Ġfore going +ä¸į è¨Ģ +åĨħ ç§ij +æĤ£ çĹħ +çļĦ æĪĺæĸĹ +best os +楼 å±Ĥ +_OP ER +ĠDom estic +Ġtop ical +åıĮ èħ¿ +大 åĸĬ +Ġhe n +ĠN RF +å¾Īæľī åı¯èĥ½ +ï¼Į åĨ° +Brit ish +è¾ĥ å·® +ĠGu inea +åĢį çļĦ +å¡« åĪĹ +ĠLast ly +ĠMax well +ä»ĸ éĤ£ +åIJĥ ä»Ģä¹Ī +Ġ... , +Rep orts +Ġ( ~ +hes es +Pref erred +å¥Ĺ æĪ¿ +çľĭ ä¸įåĩº +éĹ² ç½® +转移 åΰ +èIJ½ æĪ· +, ç»ı常 +à « +_t imestamp +夺 åıĸ +Å ³ +å®° 缸 +端 åŃIJ +ĠD AY +âĢĿ æ´»åĬ¨ +( ** +Ġ éĿ¢å¯¹ +亨 åĪ© +产 çļĦ +请 éĹ® +ĠA ggreg +ĠStar bucks +Ġw ager +Ġvert ically +, æĪij们çļĦ +ĠT x +_d one +Ġtoss ed +åĪĹ ä¸¾ +P ur +ĠAl umni +ob l +æĴķ è£Ĥ +ãĢģ ç§ijåѦ +åIJij ä½ł +Ġmim ic +ĠGet ty +E ye +. lock +ä¸įå¾Ĺ å·² +æ¯Ķ ä½ł +ï¼Įå°Ĩ åħ¶ +ãĢģ éĢļ +ï¼Į éľ²åĩº +ç͵åŃIJ éĤ®ä»¶ +Great er +.d oc +' n +m is +çľ¼ çļ® +Ġret val +Oh io +M other +ĠHam mer +ãĢĤ æľĢè¿ij +çķĮ éĻIJ +ĠFun eral +st mt +åħī çħ§ +çļĦ主 ä½ĵ +æľī è¿Ļä¹Ī +为æĤ¨ æıIJä¾Ľ +Ù ı +Ġge ared +èµ· åĪĿ +ĠHero es +ãĢĭ åıĬ +.App lication +ĠBou levard +M aps +$$ . +åıijè¡Į çļĦ +æĹł äºĭ +Ġtext ile +DU CTION +al most +ol lection +Ġpres idency +èľ ķ +ĠChev rolet +åģļ å®Į +Ġinf l +F red +表 çļĦ +å¡ij èĥ¶ +ĠR iv +ĠAbs olutely +ä¸ĩ ä¸ĩ +Ġw izard +æĽ´ åħ· +大 为 +Ġrep ent +ä¾ ¥ +ç¿»è¯ij æĪIJ +Ġgi ants +Ġt au +, åĬłä¸Ĭ +åĬŁ å¾· +追 éĹ® +éĹº èľľ +éĿĻ æŃ¢ +éĩİ å¤ĸ +-ser if +æĹł 表æĥħ +Adv ice +Ġhop eless +Un icode +S imply +麻 æľ¨ +Ġful fil +Ġcongest ion +V e +ENS OR +ĠSt a +éĻħ åħ³ç³» +Ġpain ts +æIJ¬ å®¶ +ĠPl atinum +ï¼Ľ 以åıĬ +ac ao +âĢľ åĵ¦ +Ġf use +Ġspe eches +åıijçĶŁ é¢Ŀ +Ġdream ing +Ġr ugs +èĽ Ł +.y outube +æĤ² åĵĢ +, ä»ħ +使 ä»ĸ +ï¼Į é¦ĸ +-p urpose +yn n +pl ants +, H +ï¼Į 个人 +Ġde cks +M ini +漫 éķ¿çļĦ +- loop +ĠF CC +Ġag gression +ĠC andy +âĢĿ âĢĶâĢĶ +ãĢĤ å°¤åħ¶ +宫 女 +ĠQUEST ION +F ashion +Ġà º +h arm +äºĭ 迹 +(w indow +æ¼Ĥ æµ® +ï¼ļ [ +æľī èĩªå·±çļĦ +ĠScal a +ĠRelations hips +浦 举 +åĨĻ åĩº +é¢Ŀ å¤ĸçļĦ +Ġ ç͵è¯Ŀ +è¿Ļ个 åIJįåŃĹ +and ro +æĢ§ ä¸İ +æľī èĥ½åĬĽ +Ġterr ifying +Pred icate +ĠÐ ¡ +è¿Ļ åIJį +ard on +ĠR ut +ĠCurt is +ä¹ĭ 主 +tr ad +Ġru ined +F ee +Def initions +ä¸Ģ åΰ +otyp ing +lo ver +ĠA MA +çģŃ äºĨ +ï¼Į çľ¼åīį +ap id +Ġsynthes ized +é£ĺ é£ĺ +头 æĻķ +ä¸Ģ åĽŀ +ĠD ates +Ġuniqu eness +èĩªå·± åĴĮ +Trend ing +åľ£ åľ° +æµģ æ·Į +Ġdish washer +TH IS +éĿ¢ 缸 +A zure +res ize +ĠG ir +Ġrece ivers +è¿ŀæİ¥ åΰ +Ġabs or +. we +éĢļ 红 +ĠÐ Ł +å½ĵ ä»ĸ +丰å¯Į å¤ļ彩 +Meta Data +ĠRoche ster +Ġskept ical +ï¼ħ çļĦ +( sh +S port +ĠMay o +ĠTrans it +éĹ´ æĸŃ +æĶ¾ äºĨ +ĠSee k +ĠNetwork ing +äºĽ 许 +âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ +ç©¿ æĪ´ +èİ« æĸ¯ç§ij +éĶ ° +. ne +; č +ä¸į ä½ľ +æ· Ħ +Ġec ology +å®Ŀ çī© +ç»ij æŀ¶ +ĠR oh +ĠR x +, æıIJåįĩ +ot le +Ġswe ater +hy de +ters on +ãĢģ èµµ +gener ic +ĠNot Implemented +Ġto mb +Ġprob es +ĠPB X +æħ · +å·¥ç¨ĭ 建设 +S ong +ĠLoc ations +ur acy +ç¯ Ĩ +帮 她 +ï¼Įè¿ĺ éľĢè¦ģ +.st ore +ï¼Į 究竣 +ip ur +-in spired +ch in +åĩº æĪĺ +ĠMed al +ĠPil ot +Sp in +èĭ¦ éļ¾ +Ġter ribly +åķ ° +ĠAss ign +Ġchrist mas +.b uffer +æĮ¡ ä½ı +ĠM ixed +æİ¥ 线 +çłĶç©¶ åĴĮ +-s olving +æĹ¥ åŃIJéĩĮ +et z +Ġsung lasses +éŀ ĺ +Ġacqu aint +ï¼Į éĿł +che my +Ġinf amous +ĠInd oor +At A +ä»įçĦ¶ æĺ¯ +Ġincrement al +ĠC MD +Ġun even +ĠS pl +.get Type +oll a +ĠA mer +å¸Ĥ éĿ¢ä¸Ĭ +èĸ ° +åĩº ä¼Ĺ +_ ui +对 ä½łçļĦ +ST EP +宫 å»· +Ġdil uted +æ°ij èIJ¥ +.p ers +èĢģ 夫 +_RET URN +åŁºçĿ£ æķĻ +develop er +Ġstr ang +ĠP ix +Ġind emn +, E +çļĦ æ¯į亲 +æij¸ æij¸ +ï¼Į 亲 +en os +Ġgro ove +绣 绣 +Ġcoc oa +Ġgast ric +Ġra ils +éĺµ æ³ķ +ĠPark inson +ĠAd rian +th ought +å¤ļ è°¢ +( æĪĸ +Log o +Ġcytok ines +æĹ© å·²ç»ı +åĸľ çαçļĦ +(h andle +车 éĩĮ +.con v +æ¸ħ é£İ +çĸ¯ åŃIJ +Ġp add +Ġrem and +éĢı äºĨ +éĽª èĬ± +Ġvisual s +ack er +èĢIJ ç͍ +åħ¥ æĪ· +æĸ¯ èĴĤ +Ġline arly +IZ ED +åĪĩ çīĩ +. annotations +ĠG or +é¦Ļ çĥŁ +åį¡ æĭī +æĬ½ åıĸ +äºĮåįģ å¹´ +é³ Ħ +umb ent +Ġ* )( +Ġsel dom +UG H +Ass ociation +ï¼Įåį´ è¢« +æĢĢ çĿĢ +_T OKEN +-b and +æ¯ķ竣 æĺ¯ +'' ' +çļĦ æīĭæľº +_c sv +ĠIde a +æīĢ è¨Ģ +éĿŀ常 好çļĦ +Ġ ................................................................ +ch aft +ĠDem ocracy +ĠDep ot +æīĢè¿° 缮æłĩ +_ - +è´¯ éĢļ +r unning +Ġel apsed +å¢ŀéķ¿ çİĩ +ĠRew ards +Ġ á +Offic ial +- Star +ĠER P +ï¼Į AI +mod ified +T AG +ra ils +å°ı åѦçĶŁ +æµģ éĢĿ +_B E +head ing +åĽ½ ä¼ģ +aj i +ç³ ľ +ern s +ic er +ãĢĤ çľĭåΰ +ãĢģ éĺ¿ +ä¼ļ æĽ´ +ĠCounsel ing +reg istry +.N ext +Ġdivid ends +Ġscr ub +ell es +åĨĽ èIJ¥ +I gn +ĠP icks += y +èĪª æ¯į +ĠRol ling +ĠAp ollo +ãĢĤ æĪĸ许 +使ç͍ æĹ¶ +Material s +Ġend uring +ATION AL +om nia +arn a +ãĢĤ åįĹ +å¹² éļĨ +ä½Ļ åIJį +- opt +åŃ¦ä¹ł æķĻèĤ² +ĠSecond ly +claim s +ĠW WE +Ġflaw ed +Ġpreced ed +Ġday light +éº ĵ +éĶģ ç´§ +pro j +çļĦ 第 +ĠG ig +aj o +éĢļ åijĬ +é g +ä¼ ¶ +ĠSur ve +ä¹İ ä¹İ +.In ternal +NY SE +åĨĴ çĿĢ +ĠBack up +H air +çĥŃæ°´ åύ +å±Ĭ åħ¨åĽ½ +-le arning +æĿī 磶 +Ġdev otion +Ġgroup ing +Ġdetect ors +t one +um ab +Coll apsed +åıĤ æ¼Ķ +Ġdispl aced +Ġg arn +ĠPalest ine +J e +ĠA E +Ġal coholic +ãĤ Ń +亮 äºĨ +è¿IJåĬ¨ ä¼ļ +Ġobserv ational +c ause +ĠG ut +..\ ..\ +ĠD w +çĭ¬ åѤ +æIJŀ å®ļ +min imal +æĹ¶ è£ħ +ĠMar ks +_path s +ĠFore ver +å·¥ä½ľ æĹ¶ +Ġsub division +åī¯ æĢ»è£ģ +ĠB TC +l ime +roid ery +ger ies +åŁºéĩij åIJĪåIJĮ +éĹ® åĢĻ +Ġinstall er +Ġè¿Ļ 次 +ist ro +åģļ åĩºçļĦ +ĠEd en +Ġmacroph ages +æĮī åİĭ +æī¾ ä¸Ģ个 +好 çİ© +ĠMark ov +Ġmus cular +s pecific +.Char Field +å·¥ åĮł +ĸ ´ +ĠYan kees +ä¸īè§Ĵ å½¢ +åģļ 为 +ä¾µ èļĢ +Ġexped ition +Ġbl amed +Ġphys ic +respons ive +Ġv es +Ġerrone ous +æĹ§ çļĦ +Ġhyp oc +.J oin +] ãĢĤ +ĠH ist +ĠS ach +Ġsim ulator +Pro duction +Ġп о +ен ие +ĠF erguson +ĠD iane +åħī æĺ¯ +ĠB ios +ä¸Ģ è´¯ +Ġr uler +Ġexempl ary +An th +amb ia +åĨħéĥ¨ æİ§åζ +ĠAtt empt +Ġpul ses +, K +Ġafter math ++ k +ä¼ł 导 +, G +not ification +ur ate +Ġfertil izer +Ġgather ings +Ġm v +Open ing +å¢ŀ åİĭ +转 æĬĺ +第ä¸Ģ ä½į +AC A +Ġ æĸ¹æ³ķ +åıij æ³Ħ +æĸ° åĵģ +unc iation +å®ŀæĸ½ä¾ĭ ä¸ŃçļĦ +åľ¨ 对 +åľ¨ èģĮ +åķĨ éĵº +çł´ æįŁ +å±± è·¯ +Ġconstitu ents +æĬĬ æĪij们 +Ġcommod ities +Ġvac ant +èı© æıIJ +av ad +访 è°Ī +ï¼Į没æľī 人 +æ²»çĸĹ çļĦ +å¡« è¡¥ +Ġch an +æĬĹ åĩ» +:: { +æĺİ æĻº +Ġw orm +çŁ¥ ä¹İ +Ġde eds +DE P +京 å¸Ī +Ġindic ative +Ra ises +Ġ éļıåIJİ +Ġpl atinum +æIJ IJ +åħħ å½ĵ +Ġj est +æĹł è¨Ģ +å½ĵ å±Ģ +ï¼Įåı¯ è°ĵ +é« ¦ +ï¼Įå¹¶ ä¸İ +IS P +Ġom n +ruct uring +èĥĮ éĿ¢ +ï¼Į 第ä¸Ģ次 +让 å®ĥ +ĠPo isson +ON SE +\x d +åħ³éĶ® åŃĹ +åĴ Ģ +å®Ŀ èĹı +室 温 +è¿Ł è¿Ł +ĠSt amp +人们 对 +m achine +çļĦ éĿŀ +åĴĮ åĽ¾ +Ġsil icone +ä»ĸ 没æľī +UL D +Ġch ili +V ision +谢谢 ä½ł +ï¼Į å̼å¾Ĺ +е Ñģ +æĬ¥ åºŁ +\x b +Contact s +Ġbre eds +æģį æĥļ +(f p +Ġsuper v +, åij¨ +Ġlaw ful +o it +ale z +Ġmod ifier +ĠGuarant ee +Ġdiff ering +R ose +ĠW arranty +ĠExper imental +ĠM au +éĺ» çĩĥ +_ACC ESS +ĠL aur +ĠComp rehensive +ĠAc ross +æ°§åĮĸ çī© +Ġmism atch +Ġst eward +Ġbarrel s +Ġl v +æijĬ éĶĢ +ï¼Į åĽŀåΰ +鸡 èĤī +Ġbl ink +L isa +n om +ä¾µ 害 +Ġthe aters +Ġstaff ing +ĠMus ical +ä¸ĸ纪 çļĦ +Ġl ibr +绣 é¢Ĩ +_S ERVER +I ER +Ġcompress or +èĬ ¥ +ĠBr ick +åįģ åĪĨéĴŁ +Ġent r +æ® Ĩ +å¹´ å¹´ +æ¶īåıĬ çļĦ +h ir +Ġbuff et +Ġeduc ating +Ġreal izes +æĿ¥ åİĨ +Ġbl ending +帮åĬ© ä½ł +éĤ£ 份 +Ġjew ellery +ST D +I Q +ĠBank ruptcy +çĦļ çĥ§ +åºķ èķ´ +iline ar +ĠW ald +post ed +ore an +Ġsyn th +< input +å¾ģ ç¨ĭ +åĴ½ åĸī +Ġmet eor +ï¼Į ç»´æĬ¤ +ĠT emplates +Ġ" ../../ +ï¼Ľ ( +ãĢģ åģļ +ä¸į管 æĺ¯ +ï¼Įä½ķ åĨµ +. ! +ä¹Ł æĺ¯ä¸Ģç§į +åıij åĩºäºĨ +Ġna ive +èѬ å¦Ĥ +in ety +ĠR G +Ġhierarch ical +' M +, æĢİä¹Ī +ï¼Į å¿ħçĦ¶ +è°ĥ æĸĻ +æŁ¥ æĺİ ++ a +id ge +ĠIn fl +Ġsidew alk +A void +åΰ äºĨä¸Ģ +Ġgr illed +.d om +ĠD ynam +T mp +èĵĦ çĶµæ±ł +En h +Ġje opard +.C lear +èĸĦ å¼± +éķľ åĥı +ï¼ĮéĤ£ æł· +ï¼Įä½Ĩæĺ¯ æĪij +ĠÏĦη ÏĤ +Ġp w +_H ANDLE +Ġment ors +éĥ½ æľīäºĽ +for Each +gen ic +Ġprev ailing +> (). +ĠEven ing +,éĤ£ å°±æĺ¯ +, é»Ħ +ï¼Į ä¸ĵ +产ä¸ļ åĮĸ +ĠUt ils +A bb +ĠCongress ional +n ial +ION AL +Ġcompens ated +ĠNON INFRINGEMENT +çĽĹ çªĥ +uc id +对 æľ¬ +AC Y +æĺ¾ èĢĮæĺĵ +_S ERVICE +( ID +Ġche w +ĠRe vel +åħ¨åĽ½ åIJĦåľ° +Ġaug mented +æĹ¥ æľĪ +ĠInvest igation +< D +å¤ĸ è´¸ +inds ay +ĠStra ight +ighth ouse +uff ed +l ords +ess ing +do ing +sever ance +Ġmile age +èĩ³åħ³ éĩįè¦ģ +æŃ» åľ¨ +ĠG ly +ç͵ å¹³ +Ġre location +éħįç½® 为 +åĬ¨ 人 +æ°´ 稻 +ĠAss uming +Ġaccomplish ment +ãĢĤ æµ· +ET S +åIJ¬ éĹ» +å¸ħ åĵ¥ +èİī èİī +æĹłæķ° çļĦ +ãĢĤ çĦ¶ +Ġtub er +ĠWhere as +commun ication +龸 çİĭ +Ġ" ") +æĪĸ 缺 +ï¼Į 建设 +-st ream +ç͵ éĩı +ĠEl astic +ĠPok emon +Ġboot strap +, æľªæĿ¥ +çĶŁæĪIJ çļĦ +-st ory +æľī åħ´è¶£ +Ġcon qu +Ġens ured +é«ĺ å±± +ĠReg arding +ĠH ear +ex ist +c ock +Ġdipl oma +º ÃIJ +讲 çļĦ +å¤Ħ éķ¿ +Ġp ac +Ġgro cer +) ... +Pol itics +app ro +æ´ĭ èij± +ä¼łæŁĵ çĹħ +) }} +_c al +ï¼Į 读 +ä¸įä¼ļ æĺ¯ +at ts +CR M +æŃĮ èĪŀ +æ³¢ æĸ¯ +Ġsty led +ĠCh oosing +Cont ain +ç¯ĩ å°ı说 +\ sub +Ġbelong ings +ä¸ĩ è¾¾ +VID IA +\ {\ +ï¼Į çIJĥéĺŁ +ï¼Į åīij +ĠBe en +Ġh alo +æĭ± æīĭ +Ġ ä»» +该 åħ¬åı¸ +Ġcent r +ĠNort heast +k v +Ġresc ued +æĭĵ 宽 +Ġeduc ator +åĨľæ°ij å·¥ +ç®Ĺ 计 +Ab ove +Ġlif ecycle +áĥ IJ +Ġnause a +. events +N ob +ĠComp act +大 åĸľ +èģļ ä¹Ļçĥ¯ +Ġprevent ive +Ġf c +ISS ION +- active +å¾· æĸ¯ +ĠAud i +ä¸ĵä¸ļ æĬĢæľ¯ +丰 æĶ¶ +è¿Ļ个 åľ°æĸ¹ +ious ness +(i i +еР³ +Ġcorrid or +Ġdef ended +LE SS +辨 认 +I OS +æĬ¥ åĪĬ +Ġ å®ģ +åĮĸ æĪIJ +Suppress Warnings +å¼Ģåıij èĢħ +Ġfasc inated +road s +N ature +\ langle +Ġcomplet eness +qu ality +ä½ł åĪ« +ä¸Ģ å¹¶ +太 好äºĨ +éĺ² å¤ĩ +Ġund is +çļ® çļĦ +, å°¤åħ¶ +.in voke +Editor ial +éħį ç͵ +éķ¿ å¤§äºĨ +çłĶ讨 ä¼ļ +O l +n ational +/p ull +ĠUn iform +æĸ°åĨł çĹħæ¯Ĵ +ï¼Į éģĩåΰ +Be en +Que en +项 éĵ¾ +de hyde +RA W +ĠInvest ing +Ġtra ced +p ieces +èĩ³ æŀģ +ast ype +èĢĮ æľī +ĠCro atia +ä¸Ĭ è¡Į +å°½ åħ¨åĬĽ +Ġreprodu ced +辨 åĪ« +ç§ij åĪĽ +Ġmot ives +) 为 +çĶŁ åĩº +Ġge om +æĺ¯ä¸į åı¯èĥ½ +AP S +. inc +Ġ 尽管 +ĠB ali +Ġsol itary +c annot +Ġang i +Ġmind fulness +ĠB ing +ndef ined +Transform er +ä»Ģä¹Ī éĥ½ä¸į +ĠProf it +& amp +conf irm +, è·Ł +太 å°ı +st reet +R etry +Ġbelie ver +ĠContin uous +\ ( +å¯Ħ åŃĺ +qu oting +汤 å§Ĩ +å¿ĥ æĿ¥ +m ant +Requ ire +ĠDoes n +:%.* ]] +ĠCfg Vehicles +å·¥ä½ľ åĴĮ +.m ath +.s ervlet +æ¶ Ł +Ġmore over +)) $ +ä¿¡ 访 +, * +Ġsh outed +ĠSee ing +oc ide +Ġslow ed +ãĢĤ 主 +ry n +Ġcompet it +æĿ¾ äºĨä¸Ģåı£æ°Ķ +å¢ĥ çļĦ +lu etooth +人 å°± +, çŃī +马 çļĦ +两 æŃ¥ +ĠOwn ers +ĠS ilk +/j ackson +éģį åİĨ +Ġep ile +åģı åIJij +ĠDEAL INGS +Ġ iz +: Get +ud os +çIJĨæīĢ å½ĵçĦ¶ +åĮĸ çŁ³ +Ġn ude +Ġatt ained +ĠLet t +ä¸İ 管çIJĨ +åĬ¡ å®ŀ +åıĪ ä¸Ģ个 +éŁ Ń +uch ar +ĠRe plies +åĽ´ è§Ĥ +åıĤä¸İ èĢħ +-l anguage +举 è·¯ +æĬķèµĦ æĶ¶çĽĬ +人 éĢī +çľĭçĿĢ æĪij +, èĩªçĦ¶ +L AND +ĠS ymphony +Ġinterpre ter +- The +åľ¨ä¸Ģèµ· çļĦ +eral a +Ġ åıijè¡Į人 +ĠKenn eth +ĠAs ide +min ent +@ s +Ġstr and +ĠAl leg +Ġresemb les +Ġref urb +_con n +å°½ æĥħ +h agen +Ġaffili ation +å¥Ĺ 管 +为 æł¸å¿ĥ +æľº èĥ½ +å¿ĥ ç¥ŀ +å¼ĥ æĿĥ +Ġmyster ies +Ġpar al +ĠPers pective +ä»Ĭ åIJİçļĦ +ĠBar b +IR C +_pro perties +Ġpick le +Ġd m +ç½® æį¢ +Ġc oded +^{ + +Ġfram ing +Ġs ich +ib o +V ia +ic idal +åŁİ 主 +le igh +-d i +å¦ĤæŃ¤ çļĦ +Ġunw illing +Ġcontin ental +çī¹ å°Ķ +è¿Ļ ä¸ī个 +åĪĨ éļĶ +ĠZ n +éĥ½ å¾Ĺ +ê ¸ +å°ıæĹ¶ çļĦ +rac er +Sp onsored +æµ® åĬ¨ +Ġ ä¸įè¦ģ +ä¸į æĢİä¹Ī +Ġfacilit ated +éħ ĭ +Ġcool ed +rob ot +Ġexpress es +ãĢģ åı¤ +åįļ å¼Ī +åıijå±ķ æľīéĻIJåħ¬åı¸ +ĠS IZE +ä½ķ ç§į +çĹħ 人çļĦ +æ¼Ĩ é»ij +ĠS AM +ĠD y +åĢ¡ è®® +ĠCub an +, éĢīæĭ© +ĠGu ests +Ġprogram med +, åıįèĢĮ +. For +аР¿ +.T able +_op ts +Ġde form +_pro c +âĢĶ it +为ä»Ģä¹Ī ä¸į +éĹ® 她 +Ġpict ured +PR INT +fol ios +ĠPub Med +å½ĵ 羣 +Ġmat plotlib +ab l +ĠL az +Ġsew er +Ġguess ed +éĥ¨ éĥ¨éķ¿ +æĺ¯ ä»ĸ们 +ä¸ĢæŃ¥ æŃ¥ +æĶ¿çŃĸ åĴĮ +Ġun forgettable +Friend s +åĦĴ å®¶ +Ġmig rations +Ġ*) & +ĠCollect ive +å¼Ģ å°ģ +Ġemail ed +Ġbur ger +åIJį çīĮ +B attle +Ġdiver gence +Ġdeterm inant +æŀģ é«ĺ +Ġ" \\ +åĹ ĸ +Ġpupp ies +h ope +comp iler +ĠManag ed +ĠMess enger +Ġpre g +åĩ» ä¸Ń +åıª å¾Ĺ +åĬ¨ èĥ½ +> A +èĥ ± +valid ator +ĠCal if +碱 æĢ§ +æ¡ Ģ +è¿IJ ä¼ļ +) ^{\ +çİĩ åĴĮ +eg al +éĿĴ äºij +Ġdecentral ized +LE FT +åīįæīĢæľª æľīçļĦ +ĠAr ist +i OS +ud ding +cc c +Ġvalid ator +Ġkitchen s +Ġreconc iliation +ï¼Į 广 +æĩ ¦ +ĠIns ight +Ġ 缮 +ĠDel ay +çŀ³ åŃĶ +ï¼Į åģ¶å°Ķ +_input s +ï¼Į è¨Ģ +ĠA uction +åıij éŁ³ +Ġfore head +åĸ· éĽ¾ +f ruit +çĤ ¬ +Ġpar alle +åŃķ èĤ² +ï¼Įä½ł çŁ¥éģĵ +æľ¨ 头 +éĹŃ åIJĪ +-est eem +ĠMon roe +rel s +ustral ian +Num bers +Ġrese mble +Ġc ues +it zer +Ġt int +Ġn ous +éļı åľ° +Ġde x +Ġstack s +ĠC razy +ĠD ial +ï¼Įæīį ä¼ļ +Ġprohib it +m able +Ġst roll +åijĨ åijĨ +å°ı 女åŃ© +Ġfore see +li able +Ġtast ed +ĠM ond +ä¿ĿæĮģ åľ¨ +Ġsuppress ant +éĥ½æĺ¯ æľī +ãĢģ ç²¾ +é¢Ĩ导 çļĦ +çļĦä¸Ģ å®¶ +H g +çª ĺ +ĠJ ets +af er +imicro bial +ĠK as +èµ¶ æĿ¥ +Ġschol arly +çĶ» åĥı +Ġb ob +æĹł 常 +_b in +Ġ éĩį +Ġment ality +-cent ric +, çϽ +Ġpat rons +Ġdev iations +, çα +Ġcov enant +ef ul +ä¸ĭ æĦıè¯Ĩ +çŃī ä»· +æĶ¾ åѦ +ĠViol ence +ic orn +am en +ĠRel igious +æŃ» äºİ +list ing +Ġv or +/d ev +_C ACHE +å¾Ħ 缴 +è¸ı ä¸Ĭ +ĠU m +ĠCy prus +- types +cd n +ĠVolunte er +ig hest +Ġcon quer +ind rical +Ġcell ul +IM ARY +Ġexecut or +ĠR iemann +_t ry +Ġstick er +Exper t +l ider +åIJĮæł· æĺ¯ +Ġ ç³»ç»Ł +çĥŁ èįī +Ġglow ing +好 ç¬ij +妥 åĸĦ +Ġ' + +" Yes +G ene +Ġdivor ced +Ġinf usion +æĿĢ å®³ +çļĦ å¼ł +æŀĦ çŃij +Ġchlor ide +åīį ä¸Ģ +åĴĮ æĸ¹æ³ķ +â Ĭ +æIJŃ æ¡£ +åIJĪ åͱ +ç§ijåѦ ä¸İ +udd le +Ġdisturb ed +è¿Ļç§į äºĭæĥħ +ĠSim ulation +åĮĹ æµ· +Ġexerc ised +è¿Ľ æĿ¥çļĦ +æ· ¼ +Up grade +æĭľ æīĺ +Null Exception +å¾® åŀĭ +Tr uth +æī¹ å¤į +Ġthick er +_ reference +Ġneur onal +Ġecc entric +M il +ï¼Į æİ¨è¿Ľ +å®Ŀ çİī +èĭ ĵ +åĽ½ èµĦ +åı¯ ä¹IJ +-m ain +é¢Ħ è¨Ģ +g iven +ï¼Į 天ä¸ĭ +Ġurg ency +ĠAur ora +æŃ£è§Ħ çļĦ +Ġstro kes +Ġkil omet +Ġrug ged +supp lementary +人 ä¸Ģ +ì ĭ +åIJĥ å¾Ĺ +our g +Ġsa usage +å®ī å®ļ +ãĢĤ åIJ´ +æīĵ ä»Ĺ +å§ Ĺ +az ole +.M odule +. ab +k r +atic an +Ġt rophy +_per iod +Ġw oven +æĸ¯ é¡¿ +ĠV oid +Ġfore ground +ĠS z +av al +ia e +ä»»ä½ķ çļĦ +ç»ĵ çŁ³ +èģĮä¸ļ æĬĢæľ¯ +å°±æĺ¯ äºĨ +é¾Ļ éŨ +Ġh alls +Ġ æŀľçĦ¶ +Ġval ves +åķĨ è®® +éĩįè¦ģ ä½ľç͍ +è¿Ļç§į äºĭ +bl k +æľ Ń +Ġabnormal ities +Ġnood les +st ay +åĪ©æ¶¦ åĪĨéħį +ï¼Į 产çĶŁ +ep ing +Ġlay ered +Ġn l +ĠF ully +ĠSh o +Ġcustom izable +åŃĺåľ¨ çļĦéĹ®é¢ĺ +F rance +Ġav atar +Ġ ç¼ĸè¾ij +çļ Ļ +Ġa ired +rict ions +d ater +Ġv ul +çļĦ人 æ°ij +ĠSu ites +åłĤ åłĤ +il io +æĽ´ èĥ½ +-l ayer +ĠRe commended +çłĤ æµĨ +_ cont +åĬŀåħ¬ åİħ +è¿Ł æĹ© +段 èIJ½ +ĠJac obs +çŁŃæļĤ çļĦ +Res ize +,ä¸Ģ åĪĩ +G Y +n ative +t el +æĬĽ åħī +Ġmas cul +è¿Ļ ä¸ī +( port +åī§ ç»Ħ +ï¼Įè¿Ļ 款 +.T ool +Ġal gebras +åĨ· èĹı +é¦ĸ ä½į +ĠJ P +Se a +Ġpredict or +Ġg ent +ull ah +车 è½® +å±ķ åİħ +ç²ĺ è´´ +ĠT NF +IT ER +åį° å°¼ +ï¼Įä¸Ģ åIJį +ĠSad ly +ï¼Į åĺ´éĩĮ +Ġ ich +it amin +被 æĬķèµĦ +Ġph i +Ġconsolid ated +æ³ķ æľ¯ +(n on +ĠS AS +ĠK C +ä½ĵä¼ļ åΰ +pt a +åĽŀ è¿ĩ头 +(r ange +ph ony +ĠJama ica +ãĢĭ ï¼ī +ä¼ļè®® ä¸Ĭ +. Not +ĠPC s +åı¯éĿł çļĦ +B rad +re ra +æ´ª æ¶Ľ +Ġpop ul +, å¦Ĥä»Ĭ +çŃī çĹĩçĬ¶ +(t x +Coord inate +posit ories +Ġhem orrh +漩 æ¶¡ +Ġrest s +omb ies +ic ators +æĩ Ĭ +Ġl u +ï¼Į 缸æ¯Ķ +ä½ł 说çļĦ +Ġviol in +Par allel +Ġcit rus +ãĢģ 缴 +Un selected +off er +ï¼Įä»ĸ æīį +_x ml +( values +Ġrem embers +ãĢĤè¿Ļ ä½į +Str ateg +ен иÑı +Ġun just +ular ity +ðŁ ı +纯 æ´ģ +< c +" < +é r +ÃIJµ Ãij +ï¼Įä¸Ģ个 æĺ¯ +åĤ¨ çī© +( entity +è¦ģ ä»İ +Ġun ittest +FAQ s +ä¸Ĭ è·¯ +Spe aker +ĠLou ise +ä¸ĥ 个 +ĠClass ification +ĠT rees +Ġg oose +åį¡ éĢļ +EV ENT +art an +æĹłçĸij æĺ¯ +çļĦ è¾ĵåĩº +ĠUS C +缮åīį 为æŃ¢ +Ann ual +DEF INE +çļĦ éŁ³ä¹IJ +Ġ è¿ĺæĺ¯ +Ġcoinc idence +Ġreferr als +enti eth +"] ) +éĹª èĢĢ +G esture +æĸ Ł +ä¾® è¾± +pos itions +åħ« æĸ¹ +å¼Ģ æºIJ +åī¯ æķĻæİĪ +A z +Ġup front +ä¸įå¾Ĺ äºĨ +设置 äºĨ +Ġ( = +ä¸į å¤į +éĿ¢ æĿ¡ +çļĦ æīĵ +åį¡ è½¦ +è¿Ļ 对 +æ°´ ä½į +Ġé t +an os +ĠUp load +ched uling +åģı 好 +çĶŁ åĮĸ +ocument ed +ĠMach inery +Ġthrough put +ĠPerm alink +is ites +` ) +M ess +ãĢĤ ä¼ģä¸ļ +Ġun successful +ĠExecut ion +Ġ åŃIJ +ç»´ æĿĥ +æĿĤ çī© +Ġri pped +ä¸į 缸åIJĮ +B onus +åıĪ å¼Ģå§ĭ +Ġl um +WR ITE +( bytes +妨 ç¢į +, å¢ŀ强 +er i +æ· ¬ +âĢĿ ( +ĠG ren +èģĤ å͝ +Ġun biased +çĿ« æ¯Ľ +è¿Ļç§į æĸ¹å¼ı +Ġb ible +严 ç¦ģ +å®¶ 常 +н и +æīĢ æ¬² +det erm +ĠBudd hist +éĶ £ +æį¢ æĪIJ +æĦıä¹ī ä¸ĬçļĦ +) ], +Ġev t +Ġdistribut ors +k ed +ä¸į åĪĨ +ĠB ren +人 次 +ĠP VC +çŃī è¡Įä¸ļ +Ġside bar +ĠL amp +ï¼Į æľķ +Ġceil ings +æĢ» 管 +Ġwa ived +çĤĴ ä½ľ +icult y +(" $ +éķ¿æľŁ çļĦ +Operation Exception +äºĮ 代 +ê° Ģ +é¥Ń åIJİ +_p rev +Ġmalign ant +Red uce +ç¼ĵ åĴĮ +ĠC RE +ä¾¥ 幸 +abb ed +Ġcost ing +Ġfre q +ãĢģ ç§ijçłĶ +游 åĩ» +å®ģ æĦ¿ +æ²ī çļĦ +çŁŃ æľŁåĨħ +ĠK ane +Ġprof iling +æ¿Ģ åĬ¨çļĦ +Av ailability +ent in +ĠTrans actions +ĠEss entials +S umm +T ABLE +被 害人 +æĿ¿ åĴĮ +-th ird +iar ism +g z +æĤ ļ +Ġgl itter +ç»ĵæŀĦ åĴĮ +ï¼Ł ä¸ĭéĿ¢ +Ġdisc s +æĪĸèĢħ åħ¶ä»ĸ +麻 麻 +.blog spot +ĠG ap +åĤ¬ ä¿ĥ +Ġemerg encies +_ rc +说è¯Ŀ äºĨ +cul osis +Ġblank ets +åĪĹ åĩº +è´« ç©· +æ¶¡ è½® +- age +ĠB id +ç²ĺ åIJĪ +Ġmed iation +F requently +ile ts +ï¼Į æĶ¹åĸĦ +æĬ½ å±ī +Ġeleg ance +Ġpl astics +ç§ij éĻ¢ +æľī ä¸Ģ次 +æĮ¥ èĪŀ +Ġlim estone +Ġash amed +à¯į à® +é«ĺ æĸ°åĮº +est ing +Ġb ids +Ġx en +Ġan atomy +)) ** +èĥĥ åı£ +AL TH +t ouch +åħĪ æĬĬ +Ġfire wall +Ġrid ge +çĬ¯ç½ª å«Įçĸij人 +æ²» çĹħ +Ġcultiv ate +ro bl +é¢Ĩ åľŁ +( opts +èĩª ç§ģ +è¿Ľ åľº +è§Ĵ éĢIJ +HE AD +( the +ERS ON +èĬ± æł· +两个 åŃĹ +ä¼¼ä¹İ æĺ¯ +Ġrib s +Ġbi ases +ä¼° ç®Ĺ +åı¯ä»¥ 对 +ï¼Į è§ģåΰ +Jose ph +åŃĺ æ´» +(c ur +. container +åıĹ è¿ĩ +ĠW ins +ĠV ehicles +ĠLight ning +ĠD igest +Ġclaim ant +back end +sl ice +Ġnom inee +é±¼ çļĦ +Ġrein forcement +ĠG I +缸 è¿ij +便 åı¯ +Ġinf initely +ĠS ons +Ġfract ures +ĠL ift +èĢģ ç¥ĸ +.m ode +: x +å®¶ 人çļĦ +Sp acing +ĠCars on +åı° è¯į +Ġsem antics +èµ°äºĨ è¿ĩæĿ¥ +ĠIdent ification +ĠDer by +åĨĽ ä¸Ń +ag i +p oke +æĢ¥ äºĨ +ï¼Į æĪ´ +_d ep +ãĢĤäºĮ æĺ¯ +ĠW endy +Listen ers +å¸Ĥåľº ä¸Ń +can vas +ï¼Ł åħ¶å®ŀ +ï¼Įæľī ä¸Ģ个 +ĠPer th +X M +(l ast +Ġha ha +ä¿® è¡¥ +_v ideo +ä»·æł¼ çļĦ +ä½ľåĵģ çļĦ +ä¸Ģå®ļ èĥ½ +Ġnew com +C ached +å¼Ģ æľº +ap est +æĽ´å¤ļ ä¿¡æģ¯ +Ġge omet +ĠE DT +ile e +çļĦ人 æķ° +纳 æĸ¯ +.l ayer +Ġfabric ation +æĭľ ä»ģ +еР· +èħ ĭ +t body +is chen +å¯Į 豪 +åĬ¨ çī©çļĦ +æ°´ æºIJ +Ġcrack ing +-t ree +社ä¼ļ ä¿Ŀéļľ +Ġbusiness man +Ġtempt ing +_ term +ä¼ļ åıijçݰ +.com pare +Be at +Ġbat ches +åĨĽ åĮº +好 æĦŁ +èīºæľ¯ çļĦ +ï¼Į èĩ´åĬĽäºİ +Ġsubs pace +èŀº éĴī +ï¼Į éĽĨ +Ġmaneu ver +人æ°ij 代表大ä¼ļ +çļĦ åľŁåľ° +åħ° çī¹ +享 åıĹåΰ +ï¼Įè¦ģ ä¸įæĺ¯ +çŁ¿ çī©è´¨ +ĠPower Point +ï¼ĮåĽłä¸º ä»ĸ们 +åıĺå¾Ĺ æĽ´ +ç͍ é¤IJ +" $ +_H W +ä¿¡æģ¯ ç³»ç»Ł +åĹ · +åĽ½ åĢº +ĠH ex +Integ ration +Ġembarrass ed +Ġd ich +DA Q +æĺ ± +Ġfault s +æĢ» è§īå¾Ĺ +ä¹Łæĺ¯ 个 +_ an +.f etch +éķ¿ åıij +ï¼Įä¸Ģ æĹ¶éĹ´ +.r untime +/ news +æŀľ æ±ģ +.l ayers +ãĢĤ è®°èĢħ +Ġastr onom +ç¦ı çī¹ +åĢŁ è´· +íķ ľ +Ġvis cosity +Sp y +Ġsupplement ed +ĠEss ays +Ġz e +Ġus ize +åĬŀ æ¡Ī +ĠL ibraries +éĢģ è¾¾ +åı« ä»Ģä¹Ī +Ġyouth ful +Ġmamm als +.b atch +od us +åĩº è´§ +说 è¿Ļ +: f + ¬ +ĠIg G +m icro +ĠP ipeline +Ġpro gen +est ation +æĪª çĦ¶ +Ġspirit uality +Ġembarrass ing +çĤ¹ ä»Ģä¹Ī +çł º +辩 æĬ¤ +ist ine +èµ° è¿ĽäºĨ +Ġrock y +ĠVer izon +Ġo ceans +ï¼Į æĬĢæľ¯ +ĠSh ut +-ex isting +å¥ ļ +\ usepackage +] name +çĤ¹ å¿ĥ +et able +. background +i ators +Ġhel pless +ä¸ĩåħĥ çļĦ +姨 å¨ĺ +åħ¨ 书 +ARCH AR +w g +Ġgener osity +æĸĩä»¶ ä¸Ń +Ġst ark +Ġpost operative +æĢª ä¸įå¾Ĺ +豪 éŨ +èݱ åĿŀ +Ġmust ard +/ sc +:: < +_l ayers +Ġdisreg ard +k om +ï¼Į 鼨 +æ°´ éĿ¢ +åĤ¨ èĥ½ +j b +å±ķçݰ äºĨ +к и +Ġpray ed +ĠP ly +sh ield +æ²¹ çĶ» +Ġ; ; +dep recated +æĸ°éĹ» ç½ij +-th an +ĠTr im +éĻĦ çĿĢ +Ġse als +Ġconject ure +ãĢĤ åı¦ä¸Ģæĸ¹éĿ¢ +ff ield +ĠM IC +åĭIJ çļĦ +çģ¯ åħ· +ĠAv ailability +çĭĻ åĩ» +第äºĮ 竳 +å¯Ħ çĶŁ +ĠRain bow +p ointer +Ġc aramel +ĠB arr +it ars +ĠW AS +ert y +æ¸ħ æī« +- object +l if +æ¯ı天 éĥ½ +æĭĺ çķĻ +å¡« æĸĻ +åİĨåı² æĸĩåĮĸ +oc ese +æ´Ĺè¡£ æľº +_f ilters +åħ¬ ä¼ļ +. round +æ½ľ ä¼ı +æħĮ å¼ł +ç» ¥ +ï¼Įå°Ĩ æĿ¥ +ç»ĦåIJĪ çī© +S ink +ĠJ upiter +}} $, +èĩª å¦Ĥ +Ġrep ayment +\": \" +è¶³ äºĨ +åĶIJ 代 +èµĮ åįļ +ï¼Įä»ĸ ä¼ļ +ãĢģ éĵ¶ +Ġ åĽł +åıijå±ķ æĪĺçķ¥ +Ġnarr atives +飧 æĢ§ +J ud +å®Į 好 +ĠE E +,\ , +ĠRestaur ants +Ġlever aging +ĠH od +åĩĨ æĹ¶ +( opt +æ½ į +Ġneglig ible +( # +è¿« åĪĩ +å« Ķ +æĪĺ ä¸Ń +p ag +åįļ ä¼ļ +ĠO g +ĠCom posite +_ Data +Ġsh am +Ġkn itting +Ġeng ra +ĠL un +çĪĨ æĸĻ +- aware +ens able +ï¼Įè¦ģ ä¸į +Ġunt reated +ĠSau ce +ãĢĤ â̦ +art ist +ĠDis claimer +m ys +çļĦ éĩįçĤ¹ +.en able +Ġ ç»ıè¿ĩ +æİĮ éŨ +Ġgre ed +ï¼ĮçĶļèĩ³ æĺ¯ +èĪĴéĢĤ çļĦ +, ä¾ĭå¦Ĥ +çݰå®ŀ çļĦ +ĠR PG +P ART +å®¶ åĬ¡ +ĠShe pherd +U k +ĠG ummies +ow ired +.parse Int +Commun ication +_PO INT +F ox +æ¶Ī失 åľ¨ +æķĮ åĨĽ +Ġton nes +棺 æĿIJ +( [" +) +\ +-pro of +ĠOl ivia +Ġreview ers +Se ed +ĠRest oration +ĠDump ster +ï¼Į æĹ¨åľ¨ +å¼Ģåı£ 说éģĵ +åıª åľ¨ +Ġunder p +åıĸ èĥľ +ur ia +Ġaspir ations +Ġsacrific es +ĠT ill +éĩı åĴĮ +ï¼Į ç®Ģåįķ +ä¹ĭ å¤ļ +ï¼Į å¼ķèµ· +ï½ŀ ï½ŀ +Ġhomes chool +Ġg oog +eb e +ä¹ĭ åIJį +Ġ 说çĿĢ +Ġsk incare +q p +ĠIN FORMATION +åģļ çļĦäºĭ +å°ĺ åŁĥ +å®¶ åĴĮ +Ġimm ersive +åIJ Ń +(m od +ï¼ĮçĦ¶åIJİ å°Ĩ +Ġsovere ignty +ĠB oeing +ĠSer iously +å¹´ äºĨ +æī¬ èµ· +New er +Dec or +ä¸ĩ å¤ļ +ä¸įæĺ¯ ä»Ģä¹Ī +宫 éĩĮ +: \\ +** : +ĠSh ower +Ġtri angular +Ġlock er +Ġcomeb ack +ib us +b ring +ĠT ODAY +åĬ£ åĬ¿ +çĶļ ä¹Ī +Group Name +èĭ¥ æĹł +. Com +Ġfun g +è¿Ľ ä¿® +å¼Ĥ åij³ +FL AG +ĠNorm ally +US Y +Ġch op +åĩº éĿ¢ +ens ible +Ġrem n +ĠWal let +ĠMcC arthy +ä¸Ģ ä¹Ŀ +å¦Ĥæŀľ 没æľī +ĠÏ Ī +ke yp +æĨ İ +ä¸į åĩ¡ +ä»ĸ èĥ½ +AT FORM +åĪĨå¸ĥ åľ¨ +Cy ber +ĠCl ara +Rem oved +Ar n +:::: :::: +æĤł éĹ² +A round +Å ĵ +è¿ĺæĺ¯ 没æľī +ï¼Ł âĢľ +åıªæĺ¯ åľ¨ +Ġk h +< E +车 祸 +Ġcondem ned +ĠCar pet +-n ative +. Use +çļĦæīĭ æĮĩ +æŁ ¬ +Ġwrink les +Ġseiz ures +AIL ABLE +Ġl ur +çļĦç»ĵæŀĦ 示æĦıåĽ¾ +Ġnas al +ï¼Į èIJ½ +å°Ķ åħ° +osp ice +éĺ» æĭ¦ +Ġnort heast +åIJİ ä»£ +M igration +åIJ¬ ä»İ +m its +# get +B rien +Ġ// $ +åĪĨåĪ« ä¸İ +æĹł ç¼ĺ +E arth +åı¯ä»¥ åİ» +缮 çŀª +æĽ ³ +log y +è¿ŀæİ¥ ä»¶ +Poly gon +. Configuration +ĠAnn ounces +( User +(f eature +ENT ITY +f loor +St one +æĦ£ äºĨä¸Ģä¸ĭ +m uch +æ¡ ¨ +Ġext racellular +Ġencompass es +Ġ ounce +ĠG N +Ġdiarr hea +Ġw isely +é«ĺ é¢ij +Ġpr ag +Ġund ef +åĩĨå¤ĩ 好äºĨ +rim ental +æĶ¯æĮģ åĴĮ +b iz +åĩıå̼ åĩĨå¤ĩ +O Y +Ġple aded +ĠGrand e +_ append +Ġ ç´« +y i +ï¼Į æķ°æį® +ib ling +æ´¥ è´´ +bet ter +j un +ĠP hen +å®ŀ åĬ¡ +Ġserv icing +æĿ¥ å¾Ģ +Ġrestrict ive +æİ§ ä»¶ +-th irds +ç½ij绾 å®īåħ¨ +RE M +ĠF lood +P VC +è¿Ľè¡Į åĪĨæŀIJ +æĪIJ å½¢ +/c m +ĠV ERY +ĠM ama +V AR +ĠB N +N ested +Ġ éĢīæĭ© +æĤ ¯ +ĠC annot +ãĢģ åŃ¦æł¡ +åIJį å¸Ī +Ġthreshold s +第ä¸Ģ 竳 +G ap +u ously +çļĦ ä¸ļåĬ¡ +ĠCO UR +Ġnegot iated +对 æĪij说 +at u +zz a +run ner +åĽ½ å®ī +_TR ANS +éĢ® æįķ +ism ic +è´Łè´£ ä»» +Ġmail box +Ġwor s +Ġinter disciplinary +-e lect +ä¸ŃçļĦ åºĶç͍ +Ġqu ota +å±± ä¸ĭ +_m ember +åı¦ä¸Ģ è¾¹ +ç®Ģ è¦ģ +æĺ¯ 羣 +æĸ° 款 +Ġtrav ellers +èĪ µ +æŃ¦ ä¾ł +ĠAR R +éĥ½ æ¯Ķè¾ĥ +äºĨä¸Ģ åįĬ +- rated +Ġins ure +.S ecurity +å¥ĩ å¦Ļ +Ġwin nings +ĠObs erv +bel ow +ï¼Į åIJĪ +( instance +C ritical +Ġ ]. +Ġb ik +Pro ducer +( offset +_B R +Ġnick name +est ead +Ġ ï¼ +ĠP t +Ġesc ort +ĠL um +ext ensions +æĹł å°½çļĦ +Ġpo pping +: ] +× IJ +Ġbrief ing +æĸĩ 人 +Ġμ ε +ĠS aid +æł¼ æĸ¯ +âĢľ That +Pl aintiff +âĢľ æľī +ä¸į çķĻ +强 åĽ½ +ĠRef lect +et ri +Ġfault y +" Oh +éĤ ¯ +op al +æİ¨ æĸŃ +éº ¾ +åĽ½åĨħ çļĦ +Ġimplicit ly +sec ured +gg y +æĪij们 éľĢè¦ģ +示 åĩºäºĨ +ĠStr at +- ev +åĪĴ åĪĨ为 +缮æłĩ çļĦ +è§Ħ éģ¿ +Ġbel ts +Ġhist ogram +èĤ¡ æĮĩ +æī ¼ +ç¾İåĽ½ 人 +turn ed +Ġche fs +IS M +åζå®ļ äºĨ +çݰ è¡Į +çľĭäºĨ çľ¼ +ĠAd mission +_R G +第ä¸Ģ 天 +æĬ½ æIJIJ +è´« è¡Ģ +åıijå±ķ è¶ĭåĬ¿ +Med ium +为 æ°ij +Ġveter in +Ġg au +ĠÐ » +\ rho +å³ Ļ +W nd +Ġburn er +éŃĶ åĬĽ +âĢ į +_B UTTON +è£ħ æľī +ĠDet ective +åŀĭ æĿIJ +Ġgr im +Ġ ery +ä¸Ń åİ» +.ex ec +Ġsub way +.g ener +avor ites +è¦ģ 好 +æķĻèĤ² åĴĮ +æľĢåIJİ ä¸Ģ次 +åľ¨ ä¸İ +ym l +Ġconver ge +\ cap +ä¹Ł 对 +Ġin quire +Ġbra ces +">< ? +Ġpil gr +_ UPDATE +ãĢĤ åIJ¦åĪĻ +oc ular +éħ¸ 奶 +æĭī ä½ı +Wh it +å°±ä¼ļ 被 +ï¼Įä¸į åŃĺåľ¨ +ä½ł ä¸įæĺ¯ +Ġtraject ories +, 尽管 +Ġcentral ized +Ġo ve +è¾¾ ä¸įåΰ +çݯå¢ĥ åĴĮ +æĢİä¹Ī çľĭ +Ġ åIJİæĿ¥ +å§¥ å§¥ +R ain +ĠT ube +åIJį å®¶ +Ġtact ic +Ġluc rative +ï¼Į 缸åħ³ +ĠTom orrow +ä½ĵç³» çļĦ +R isk +éĢ ŀ +Ġsan it +e lements +å¹´ çīĪ +ãĢģ æīĭ +ĠVert ex +ĠSp y +æŀģ åĵģ +ĠNo ise +Ġ' ) +ãĢĤ ** +楼 å¸Ĥ +Ġ éĤ£ä¸ª +ï¼Į éļ¾ä»¥ +Advert ising +ç»ıèIJ¥ çIJĨ念 +åĽłä¸º æĪij +S hip +追 éļı +æĸĩæĺİ çļĦ +web pack +èĬĤ 课 +åѦ åΰ +{lem ma +st m +Ġ" ." +ç²¾ éĢļ +天 çĮ« +Hand lers +Ġcur ry +ä»İ ä¸Ģ个 +ĠMon o +ĠDim ension +ĠÏ Į +Ġcro re +ĠF UN +æłĩ æľ¬ +ãĢģ 绿 +åĨį ä¹Łæ²¡æľī +寺 åºĻ +æľĢ çα +èĸª èµĦ +æľī è¿Ļæł·çļĦ +d in +, æĪĸ许 +åij Ľ +æĥ³ åľ¨ +Ġsat ellites +\ Component +Ġb inds +ĠNear ly +ĠC edar +ï¼Į åĮħ +Ġilleg ally +ĠHil bert +èĤ² 人 +Ġbar red +她çļĦ æīĭ +åĨ ¢ +èIJĥ åıĸ +F ROM +çī© ä»¶ +å½Ĵ ä¸įå½Ĵ +ih ad +Cred ential +. control +åľº é¦Ĩ +pe z +次 äºİ +, æľĢ好 +åģı ç§» +Gr ant +ï¼ļ ä¸ĩåħĥ +å¡Ķ å°Ķ +鼷 ç͵ +é¢Ĩ导 å¹²éĥ¨ +ĠE scape +ãĢģ åķĨä¸ļ +æħĮ ä¹± +: N +社 群 +ĠIn clusion +Ġun b +ä»ĸ æīĢ +d estination +交 éĶĻ +ĠProm otion +Ġbenef iciary +åĮĹ ç¾İ +Ġg ithub +.g it +ĠCall s +Ġh l +ĠPat ricia +h orn +ä¸Ģ éŨ +Ġ( [] +Ġpred etermined +常æĢģ åĮĸ +ĠT ong +åħ¨ èĩªåĬ¨ +Ġdiagn ostics +J A +m ong +æĦ§ çĸļ +æĸ°åįİ ç¤¾ +顾 èĻij +ĠInit ially +Ġprint ln +ĠIns pection +åĽ°éļ¾ çļĦ +st aff +åĽ¢ è´Ń +æĪIJ åIJį +ĠIs le +å¼Ł åħĦ +Ġnorth west +@ { +Ar ts +ä¸į æĥľ +Ġ æĽ´å¤ļ +èĢģ 太 +Ġlig and +å¼ĵ ç®Ń +Ġw re +-f rom +( < +çļĦä¸Ģ 份 +\ Model +ï¼Į 严éĩį +åı¯ä»¥ æĬĬ +æī¾ ä½ł +ĠC ran +Ġspe ar +rop he +ï¼Į大 约 +Ġdetermin istic +Jenn ifer +ĠE uler +med ium +æĺ¾ å¾® +G erman +ä¹ĭ ç¥ŀ +ĠMc N +MS C +or ian +ï¼Į åī©ä¸ĭçļĦ +Ġso ber +Display Name +Ġh ath +çŁ³ æĿIJ +ĠLevel s +P ed +åħįè´£ 声æĺİ +ç¢İ äºĨ +Ġsubstr ates +en ary +Ġtrans gender +Id le +Ġi x +产 åĩº +å¸Ī å¼Ł +j ure +失 æİ§ +ĠR everse +ĠMatt ress +ĠSym posium +> type +rest rial +yst one +cript ors +Ġun con +çıį è´µçļĦ +^ [ +Ġend angered +æĸ¹å¼ı çļĦ +Ġmar riages +主 åħ¬ +Cl ub +ä¼ļ 说 +è¿Ķ è¿ĺ +-pro ject +Work flow +ç«ĭ åľ¨ +ï¼Įä½ł åĪ« +éķĩ å®ļ +S UV +ĠT as +ĠWag ner +Ġdispro portion +âĢľ As +ä½ĵ èĥ½ +è¿ĺ æĮº +èį Ł +åįĥ åħĭ +Ġart ifact +ĠConf lict +Ġpermut ation +Ġspect rom +Ġsol uble +导èĩ´ äºĨ +Ġoff ence +å¤ļå°ij 人 +Ġfurnish ings +ich igan +糯 ç±³ +ĠMor occo +, åºĶå½ĵ +[ this +Ġre cl +Ġe Commerce +, ç»§ç»Ń +F ab +令 æĪij +Ġtre asures +çļĦ ä¼łç»Ł +Ġ æŃ¤å¤ĸ +Ġs or +ç»ı è´¸ +Ġentertain ed +Ġcarbohyd rates +ãĤ µ +çĶŁçī© åѦ +ref erences +天 å®ĩ +Ġdisturb ance +Ġrepe ats +Ġmask ed +ä¾Ŀ ä¾Ŀ +ï¼Į æ¸IJæ¸IJ +ï¼Į åıijæĮ¥ +Ġrob otic +Ch an +æĨ § +åĶ ł +æ²IJ æĢĿæĺİ +å¨ģ 严 +åĿı äºĭ +Ġsib ling +or ously +C ourt +ï¼Įåį´ æ²¡æľī +oss ary +Ġboo ster +Ġinaug ural +: ', +ï¼Įå°± 好åĥı +. ms +æ¼ ª +_EN V +Ġcommission er +Ġregul ates +ĠEr in +ĠGod s +Ġpancreat ic +Ġdoes nt +Ġha unted +åıijå±ķ è§ĦåĪĴ +und ai +å®ŀç͍ æĢ§ +Ġcl er +ä¸ļ æĢģ +ĠV ote +Ġinn ate +ĠJack ie +æĺ¯ æĬĬ +âĢĭ âĢĭ +ig ens +ik u +_s peed +( el +ĠUt ilities +åľ¨ çľĭ +ĠD ew +ç§ Ĩ +iz ards +ç¦ı å¾· +| $ +ĠFl a +Ġhar bor +Ġ\ ' +Ġdish on +G W +L EN +Ġs arc +ĠEx pr +Ġcard i +DA O +Ex isting +Ġalleg es +好 ä¼¼ +ĠS YS +å®Į ç»ĵ +v ue +Ġc aul +Ġ åıªæľī +(n et +ä»ĸ è¦ģ += C +H ad +)\ , +it ra +æī§è¡Į çļĦ +oph ys +.f ail +èĩª å·² +Bl ank +ãĢĤ èĥ½ +åĬĽéĩı çļĦ +æĺ¯ ä¸İ +- ) +^ a +èĬĤ 度 +æ¤į æłª +ĠStream ing +Ġwe eds +tt a +ç¡« åĮĸ +.dir name +ä¸Ń 人 +Ġstand point +he id +Ġqu int +ed en +A ri +ma ha +Ġadv ising +ãĢĤ çľĭçĿĢ +ĠAs c +ĠS co +ĠR are +ĠUS DA +容 å¿į +" ? +ï¼Į æľĽçĿĢ +ĠCollabor ation +åħ¶ åIJİ +é£İ è²Į +Ġkind ergarten +Ġfore closure +Sp here +=" "> +* $ +ot ers +-b ut +Ġres igned +j ac +and in +çŃī ä»ĸ +产ä¸ļ çļĦ +å¿ĥ åľ° +ï¼Į æ°ij +] he +ĠT iny +èŀį åĮĸ +Ġstd err +æķ´ 车 +å¿ĥèĦı çĹħ +ãĢĤ åı¤ +ok i +re ally +åĵ¥ 们 +大 æĦı +iff e +ĠCl ip +ê ² +âĢľ Our +åıij èªĵ +S ad +èģĶ éĢļ +éĢIJ å¹´ +b ob +ç͵åĬ¨ 汽车 +Ġhepat itis +P sych +ä¸į 认è¯Ĩ +Ġ该 æĿij +I ron +ä¸įçŁ¥ æīĢ +å°Ĩ 该 +线 åŁİå¸Ĥ +Ġrug by +课 æľ¬ +Ġirrit ation +åIJį è¨Ģ +Ġsym pathetic +MM MM +åζ å®ļçļĦ +ãĢĤ 举 +ĠConst ants +Ġsanct uary +Ġbroadcast ing +Ġdraw ers +Ġwand ering +ĠKn ights +ä¸įå°ij 人 +ä¼ļ åľº +Ġdist raction +Ġvict ories +ĠBur ke +ç» « +ĠS ig +Ġdep icts +æľº æ²¹ +æ°Ķ åĴĮ +N egative +ĠB ened +Ġovar ian +表 æĢģ +Ġ åħ³ +çϾ èĬ± +ç¥Ŀ è´º +ĠFirst ly +, æ¯ı个 +Ġbas il +_AD C +åĢ Ķ +acc i +leg iate +ç¯ĩ 竳 +Ġgrap hene +is ations +天ä¸ĭ åįĪ +Ġgrad ed +em ark +ĠR outer +Ġcaps ules +èĪį å¾Ĺ +Ġsn ippet +ä¸Ģ æĭ³ +ç¿ İ +ä¸įè¶³ 以 +, éķ¿ +ĠSt yles +åŁİ åĨħ +Ġp yl +çݯ åį« +ck o +ĠAdv antage +/ **************************************************************************** +ĠR ocket +est ruct +Ġres iding +æīĢ å¸¦æĿ¥çļĦ +æīĢè¿° çļĦæĸ¹æ³ķ +.s ign +ĠIS IS +^ t +N ING +Ġj ars +åħ¨ èµĦ +ipp ers +åIJīæŀĹ çľģ +est hetic +ĠR PC +ort on +ĠCal vin +ĠLeg ends +ç´§ç´§ åľ° +ãĢģ ç³»ç»Ł +ou ched +H o +âĢľ My +ĠCiv ic +ĠS SH +æ½ľ èīĩ +_ON LY +s un +s With +çľ¼ åºķ +çļĦ æĶ¿æ²» +Ġdeduct ible +ĠK w +, éĢłæĪIJ +ĠPack ages +.st ep +(d ocument +ä¸Ĭ 端 +åIJİ ä¸ĸ +鼷 éĶĭ +âĪ ŀ +è·¯ çģ¯ +éĢī åĿĢ +иÑĤ ÑĮ +Ġsynchron ization +ä½£ éĩij +E t +ĠP OWER +èµ¶ ä¸Ĭ +Ġsouth west +ç§į çļĦ +æĢ¥ äºİ +_ACT IVE +G ermany +Ù ī +Ġturb ine +å®Ŀ è´µçļĦ +в а +Ġpit ched +ç¿» çĤĴ +æĺĶ æĹ¥ +èĤ¤ èī² +ĠCall back +ĠPalestin ians +% \ +Dir ty +ï¼Į å®ŀåĬĽ +é¢Ħ çĥŃ +ĠFAQ s +$ x +èĢĮ å®ļ +赫 çĦ¶ +ï¼Į è·Ŀ +ĠUn i +ç®Ģ缴 å°±æĺ¯ +Ġg li +rit z +W AYS +ie le +ist e +Re ce +头 çļ® +Ġ ç»ĵæŀľ +-------- --- +Ġminor ities +l j +Ġ åľ° +is ia +åĪĽä½ľ çļĦ +Ġî n +_N E +室 éĩĮ +åĩłä¹İ 没æľī +éĿ¢ åŃĶ +Ġleak age +ĠMal ta +Ġent ails +_l ineno +ĠDist ributed +. def +ç¥Ľ æĸij +Ġwip ed +é«ĺ åĩº +å°ı 康 +ä½ĵ å¤ĸ +ĠSeb astian +Ġin duct +ĠJud y +æľ¬æĿ¥ å°± +ãĢĤ åºĶ +el en +念 念 +Ġvol t +Ġa queous +ä¸Ń è¶ħ +de legate +ï¼Į好 好 +Ġpist ol +ĠR ings +èĴĭ ä»ĭ +è̏ èĤ© +Ġsix teen +f ax +èѦ æĬ¥ +G ender +æĿŃ å·ŀå¸Ĥ +ç»Ŀ ä¸į +ĠPre paration +ĠRES ULT +æĺ µ +顺 åĬ¿ +ãĢĤ åĪ« +Out Of +. up +èĩª å°Ĭ +Ġtem ples +Ġcal orie +çļĦ 交æĺĵ +T IME +CP P +æŃ¤ åľ° +Ġ[ % +çļĦ 身边 +ĠTal iban +é«ĺ ä¸ī +Ġrot ational +ãĢģ æ³ķ +oper and +ä¸Ģ åĽ¢ +Ġmot if +å·² å®ĮæĪIJ +æĪIJåĬŁ äºĨ +Ġf ountain +åħī 彩 +alloc ate +åı¦ è¡Į +ĠDe pt +ĠApp li +Ġhost name +whe el +ĠCl ause +ë¥ ¼ +Ġ æ¯Ķå¦Ĥ +M U +å½ĵ ä¸ŃçļĦ +Ġstate wide +Ġbr ushes +æŀĦæĪIJ çļĦ +é¢Ħ åζ +Ġantioxid ants +æĪĸèĢħ 说 +I EW +ĠLog istics +f ac +Ġthe or +åĴĮ éĿŀ +Ġconce aled +ãĢģ å®Įæķ´ +æ³¢ æ¾ľ +Act ual +ï¼Į æĺ¾ç¤º +ä¹Ł 让 +ä¸ĵä¸ļ çŁ¥è¯Ĩ +ï¼Į æĥĬ +Ġbl ender +_ AB +AT ING +èIJ¨ æĸ¯ +Ġrem inding +ĠS lim +ten ham +ï¼Į æīĭæľº +ä¸ĭ åľº +ĠPharm ac +ra pping +æºIJ æºIJ +ĠR D +Ġsl ash +Ġwh ichever +B LE +uls ory +Ġç¬¬åĽĽ èĬĤ +çĹĺ çĹĺ +Ġop aque +, çī¹åĪ« +Unselected Node +ing les +好 æŃ¹ +Ġass im +Ġear rings +缴æĴŃ éĹ´ +Ġsurge ons +s app +Inv ocation +å¤į è¯ķ +Ġa il +Ġimpact ing +ĠChar ity +comput er +Coll ision +/ node +éĥij éĩį +æ¹¾ åĮº +ï¼Įä¸Ń åħ± +ãĢģ åħŃ +æħ· æħ¨ +ĠDefault s +人 å®¶çļĦ +è¿Ļ 群 +Ġle aked +æīĵ è¿ĩ +ie ving +åĮĸ çĸĹ +åľ¨ è¿Ļä¸Ģ +Ġr all +ç²¾ç¥ŀ åĴĮ +Parent s +ä¸Ģ åłĨ +art e +社ä¼ļ 责任 +åħ¨éĥ¨ çļĦ +åħ¼ èģĮ +Ġdream ed +Gener ally +. Height +çģ« äºĨ +ra ises +ĠB LOCK +_d istance +Ġunbelie vable +ices ter +IL Y +Ġat op +Ġâ Ĥ +Ġarom a +Ill ust +Ġresign ation +å°Ħ 线 +Own Property +èıľ åĵģ +éĢĤ 度 +ç´Ĭ ä¹± +ĠPers ian +eb ian +Effect ive +om inated +åı¯ ä¿¡ +() - +Ġclean ers +Ġκ α +g ering +èĩ´ çļĦ +ĠS amples +èľ¡ çĥĽ +Ġsl ate +.P ost +. Generated +-s ystem +. inst +-p ower +te c +ĠN egative +Ġiss uance +èĶ Ĺ +çļĦ åĽŀçŃĶ +饥 饿 +Ġpar l +foot note +Ġs ip +ï¼Į æĺ¥ +åīį ä¸ĸ +Ġd h +æ¸ħ åĩī +Ġinsp ires +èººåľ¨ åºĬä¸Ĭ +äºĨ çĦ¶ +æĸ° å¨ĺ +ey ed +ĠM egan +_r andom +主 æķĻç»ĥ +ãĢģ æĶ¿æ²» +Ġt enth +ĠD UI +Ġn ont +åĬłçĽŁ åºĹ +Works pace +ï¼Į æĢ¥å¿Ļ +ï¼Įä»İèĢĮ 使 +_m apping +çŁ¿ çī© +æī¾ äºĨ +sl ant +raw l +Ġhum ility +Ġpop ulate +S on +éĢ Ļ +è¡£ 裳 +ic z +ï¼Į 确认 +èĩªå·±çļĦ 身ä½ĵ +_LO CAL +ï¼Į åĪĨæŀIJ +Ġk eto +S YS +Ġnull able +约 å®ļçļĦ +ç a +, 亦 +h istor +-re lease +æľºæŀĦ åĴĮ +ï¼Į åĮ»çĶŁ +Ġcor ro +æ±ī æĹı +Ġn x +éĵ¶å±ij çĹħ +æĦı å¤ĸçļĦ +æµģ 泪 +ï¼Įç´§ æİ¥çĿĢ +ï¼Į ç±³ +åĽŀ 转 +Ġampl ification +.n ull +çIJĨ论 ä¸İ +Ġfeedback s +in ia +çĸı éĢļ +_det ails +ins ured +QUI RED +combin ant +ï¼Įä½Ĩ ä»ĸ们 +Ġ 以ä¸Ĭ +ï¼Į éĵ¶ +没æľī 被 +æİ¨ è¿Ł +åıijçĶŁ è¿ĩ +Ġgrad ients +ocr ine +uns afe +om ers +è¿· 人 +IO US +Q T +æĪı æĽ² +ĠSpring field +满 éĿ¢ +两个 人çļĦ +Ġinter mitt +be aut +_H PP +izz y +Ġmattress es +\ otimes +åħ³ å¿ĥçļĦ +ĠP est +Ġkn ives +Ġfid uc +ĠD addy +add itional +OR DS +y per +驾 é©Ń +Ġc ri +Ġhum our +path s +Ġmain land +Ġwarn s +æ´ĭ 溢 +æĹ¥ åĩĮæĻ¨ +éļ¶ å±ŀäºİ +.Ab stract +Ġи з +S nap +ãĢĤ æīĢ +Al ice +æı¡ æīĭ +Ġb ast +ĠAtt ribution +ï¼Į èIJ¥éĢł +ĠH H +å© § +Ġant is +ĠD N +表 æī¬ +ĠAs sets +ĠB ACK +: set +åΤ å¤Ħ +Ġlingu istic +æĭī çļĦ +an ion +Ġbl iss +åĤ Ģ +ĠMer ge +" And +_b inary +Sub st +ĠAtt end +ĠPre viously +æĮ£ éĴ± +_ rest +æģIJæĢĸ çļĦ +Ġavoid s +ĠP ione +ĠProdu cer +Ġres urrection +ï¼Įå½¢æĪIJ äºĨ +_IN LINE +çζ çļĩ +ting ham +Inter pol +ĠEN ABLE +ar ag +Ġsl ab +( ptr +ĠTra cy +éĢļè¿ĩ 对 +æ´»æĢ§ çĤŃ +(\ [ +p ix +Know ledge +" But +C ertain +ĠCustom s +ĠEll iott +ĠG ent +(g lobal +, 使å¾Ĺ +Ġins ign +ï¼Į ç®Ĺæĺ¯ +ï¼Ī ãĢĬ +éĥ½ ç»Ļ +ĠK Y +ĠAn xiety +Ġro ds +æĬ¥ åΰ +çģ¯ çģ« +/ u +åIJĥ äºı +ĠNe o +ï¼ļ 对 +Ġmis con +ï¼Į åĩŃåĢŁ +Ġsuper conduct +Ġnumber Of +æĬĹ æĪĺ +................ ........ +(M ath +Ġfol lic +ĠDef initely +Ġcl amp +n pm +Ġcan ine +第ä¸Ģ æī¹ +.C lass +//////////////////////////////////////////////////////////////// //////// +ĠUPD ATE +å¤ĸ å©Ĩ +æĬ¤ çħ§ +ĠD isk +åIJį èªī +纤维 ç´ł +éŁ ¬ +- local +å¼ķ æµģ +ĠEth ical +< _ +Com ing +ĠHu gh +åĮº åĮº +Ġan isot +æľīå¤ļ ä¹Ī +ä¹ī è¯į +_CL IENT +( åĮħæĭ¬ +ĠU X +ãĢĤæį® æĤī +ï¼ĮæĪij çĽ¸ä¿¡ +(f n +: ${ +Ġst ool +ï¼Į éĺ´ +.query Selector +åĩ ĭ +èĢĮ 为 +ä½į æķ° +.M AX +Ġdraft ing +åĽĽ èĤ¢ +ask at +/s ystem +Ġgar ments +çϾ å®¶ +Ġsecret ly +åįļè§Ī ä¼ļ +Ġcou p +op ening +/g oogle +: focus +æĺ¯ ä½łçļĦ +ä¸Ń æŀ¢ +ff t +åıij åĶ® +F ather +ĠD estroy +Ġgre ase +åĹ Ķ +ĠTreat y +åŁİ åİ¿ +_IN S +/f asterxml +A UTH +å½ĵ æĪij们 +åĪĴ ç®Ĺ +sh it +_SE C +Ġwhis pered +G IS +çķ¸ å½¢ +ĠC ave +Ġlabor atories +è½»æĿ¾ çļĦ +p ly +æĪ¿ 举 +_ async +ä¸į åIJĪæł¼ +ĠS ections +æľ¬ é¢ĨåŁŁ +æĸĻ éħĴ +y x +in ic +ĠG F +æ± ¾ +ĠAl uminum +ĠF lying +Ġr ash +åζ åīĤ +ĠC andidates +ĠB es +æľī åĩłä¸ª +Ġarter ial +éħį åģ¶ +ĠBas in +Ġalter ing +ĠH EL +ĠÎ ¾ +éĹ « +ä»Ļ åŃIJ +Ġunexpected ly +OPT IONS +æľī åħ¶ +Ġhypot hetical +ãĢģ åħ·æľī +ĠSPE LL +éķ¿ å®ĺ +ĠL ing +ç͵ èĥ½ +Vert ices +订 è´Ń +{ Y +Ġro oft +ĠFre eman +Account s +满满 çļĦ +il is +å¾Ħ åIJij +åĪĬ çĻ» +Eval uation +çĻ¾åº¦ çϾç§ij +_FA ILED +.Get String +Ġunp aid +å¸Ĥ ä¸Ńå¿ĥ +å¿ĥçIJĨ åģ¥åº· +st ud +ĠL IST +è¿Ľè¡Į å¤ĦçIJĨ +Dis patch +è¿Ľ åĨĽ +Ġadapt ing +ćĆ ï¼Į +å¹³ çļĦ +ĠCr icket +); // +ĠGerm ans +Ġ é½IJ +积æŀģ åıĤä¸İ +Ġut er +ve ct +ä¹ĭ è¨Ģ +ĠQu ite +å¼Ĥ èĥ½ +æŀ¶ åŃIJ +æķ¬ çķı +Ġ å·¥ä½ľ +ĠH T +æĸĩ æĹħ +Ġcart ridge +èĪŀåı° ä¸Ĭ +ä¹Ŀ åįģ +\n o +ä¼ĺåħĪ çº§ +ï¼Į æ¯į亲 +Ġqu ark += */ +å·²ç»ı æľīäºĨ +ĠBudd ha +ĠA CL +g ui +ï¼Į ä¹Į +ï¼Į èĭ± +ĠR um +ath am +追 æĿĢ +V irgin +izz es +_R ATE +Ret ail +Ġsub class +çļĦå¤ĸ è§Ĥ +ĠB ASE +å¤ĦçIJĨ æĸ¹æ³ķ +Ġà ¶ +und y +_D B +Ġvom iting +è®® é¢ĺ +ä¿® éģĵ +ç»Ļ çļĦ +ĠPr inter +è¡Įä¸ļ ä¸Ń +res cent +å°ı éĺŁ +OL S +multic olumn +Ġapp raisal +] string +éķ¿ åīij +Ġschool ing +å·²ç»ı å¾Ī +Ġins ol +å®ŀéĻħ çļĦ +_g c +err al +è° ´ +èµĦéĩij çļĦ +most at +ĠFar ms +Ġmet ic +if old +ĠAtt ributes +Ġaston ishing +Ġsmo other +Ġg own +åŁºéĩij ç»ıçIJĨ +.B lock +æĬĵ çĿĢ +æĪij æĬĬ +/ http +Ġreli ably +åĨ ½ +Ġ"/ " +G AN +ĠC ycl +Ġro s +ç»ĵ æł¸ +Ġp ane +Ġp ans +çĭ¬ ä¸ĢæĹł +_N ON +Ġleg ends +ä¸ĩ 个 +Ġgar ment +Prot ect +Ġnum s +_ lookup +ĠD ermat +(n s +Ġoption ally +ks w +Ġ ä¹Łè®¸ +æŀ¶ ä¸Ĭ +失 è°ĥ +ill i +An ne +bn b +æ½ľ æ°´ +æł¼ æĸĹ +e at +ĠInf lu +Ġalign s +ĠBre aking +ĠÏĦη ν +ï¼Į çĬ¹å¦Ĥ +大 ç¥ŀ +Ġwh ale +_p ub +< List +ä¹ĭéĹ´çļĦ è·Ŀ离 +çĿĢ åĺ´ +æľīå¤ļ 大 +Ġn ond +çĶĺèĤĥ çľģ +ĠNot re +Ġfemin ist +çªģ å¦Ĥåħ¶ +Ġbenef ited +漫 æŃ¥ +ï¼Į åľ£ +.D raw +Ġdet rimental +Ġsub urban +S olid +{ split +ä¹ĭ 士 +个人 ä¿¡æģ¯ +Ġgeneral ization +Ġb ricks +Ġl one +ä¸ĵ åįĸ +è± ļ +Ġover write +夹 ç´§ +åĢŁ çĿĢ +硬 å¸ģ +ãĢĤ åģĩå¦Ĥ +ĠR osa +. Element +ä¹ĭ åĴĮ +æĭħ å¿ĥçļĦ +f an +in atory +ĠCl erk +Ġmotiv ations +ãĢģ å¾· +Âł æŀĹ +ĠAr med +ĠCG Float +çļĦ äºĨè§£ +å°± ä»İ +Ġclass dump +éĢģ ä½ł +åı« ä½ł +éĩĮ 头 +/ client +re verse +çļĦ èĩªçĦ¶ +大家 åľ¨ +ï¼Į 绿 +æľĽ è¿ľ +ĠJackson ville +Initial ized +Ġblow s +Ġp ity +åħ¥ åľº +å¦Ĥ æľī +åΰ åĵªéĩĮ +Ġspark ling +åı¯ æģ¶ +Ġcr ises +ĠCl othing +S quare +ä½į åĪĹ +Dev ices +Na N +ï¼Į ç»Ĩ +ĠContin uing +åı¯ä»¥ 使 +æĿ¥ è§£åĨ³ +ä¸ĸ 代 +åįķ åIJij +ï¼Į æī§è¡Į +ĠG ROUP +è¾ĥ éĩı +ä¹Ŀ é¾Ļ +Ġw il +Ġprogram mer +ath ic +å¿ĥ çĹħ +RO OT +ĠPet itioner +_US B +.pro duct +ĠInvest or +c ou +ãĢĤ æľªæĿ¥ +ãĢĤ ä»ĸ们çļĦ +éĥ¨ ç͵影 +Ġobst ruction +第ä¸ī 竳 +ठĤ +éĵ £ +å·²ç»ı åΰäºĨ +QUI RE +æĿ¥ 访 +å¤Ħ æĸ¹ +ĠProdu ctions +以 èµ´ +好 æĦıæĢĿ +ĠJe ep +éĵ Ĥ +ĠÎ Ń +C our +åĬ± å¿Ĺ +è·¨ åĽ½ +ites pace +en ues +Ġp ts +Ġa vec +-w est +ĠMet adata +h box +å¹² æİī +çĶ· 士 +Ġben ign +Ġsusp ense +Ġcomm its +ä¼ĺ å¼ĤçļĦ +ç¡ħ èĥ¶ +çĸ«æĥħ æľŁéĹ´ +ĠSe q +çĶŁäº§ ä¼ģä¸ļ +æ±¹ æ¶Į +Ġsens it +æ¯Ĵ æĢ§ +ç¼ ¤ +ä¹Ł èĥ½å¤Ł +åĪĨ 管 +ĠWest minster +è¶Ĭ è¿ĩ +ä¹ĭ åŁİ +èĭ¦ æ¶© +Ġp erv +èµµ æģĴ +ĠTrib une +ï¼Į ä¾Ŀæ³ķ +im ing +ä¸ĩ äºĭ +äºĶ éĩij +ï¼Įæ±ī æĹı +çī¹ æĸ¯æĭī +.set On +De leted +ha ul +Ġ_ (" +ĠHar vest +, ç¾İ +èıľ èĤ´ +Second ary +( IN +yd ia +å¼ĢåIJ¯ äºĨ +_com ment +th ora +s at +Ġch assis +æĹ¥ å¤ľ +æ£Ģæµĭ åΰ +Ġnov o +Ġclock s +çĤ¹ çģ« +è½° éļĨ +ugg ling +å¯ ° +G ROUND +H ol +ĠInst itution +Ġviol ates +T icket +äºĨä¸Ģ ä½į +Ġprop het +Ġm ás +åŁº 座 +ĠB ags +Ġent re +T u +ç©¿ çļĦ +å°¿ éħ¸ +å¼ķ èĦļ +ï¼Į éľį +Ġcondition ed +X FF +Ġun employed +Ġw arrants +ĠMut ual +Ġimm ersion +Ġachie ves +l ations +æĪij åIJĹ +Ġ// @ +mut ex +ĠH L +æĶ» æīĵ +t iny +ï¼Į 端 +Ġhe ir +çļĦ 亲 +b h +Ġper ks +ep h +- access +{ # +ĠCont ains +é̼ è¿ij +ĠUp grade +ĠB H +ĠR ack +, 让人 +ĠGr ants +ä¿ĺ èĻı +. URL +Ġtransform er +Ġclar ification +: æĪij +ĠB ars +请 æķĻ +Ġtempt ation +ĠB ills +f as +ist on +L ux +ç®Ģ 便 +ï¼Į åŃŁ +ï¼Ī ä¸ĭ +éĻį è§£ +umb ered +æ´½ è°Ī +éĢIJ ä¸Ģ +ä½ł 对 +Ġheter ogeneous +\ cl +è¿Ļ é¦ĸ +G PIO +æĶ¿åºľ è¡¥åĬ© +æµĩ æ°´ +æ¸ħ åĩĢ +ä¸Ń åŀĭ +ï¼ĮæĪij åıªæĺ¯ +\ sin +Ġbrace let +ann i +æ» ĩ +æ³¢ 纹 +è§ĦèĮĥ æĢ§ +Sub mitted +表çݰ çļĦ +exp ress +Ġl r +åıĻ åĪ©äºļ +% èĤ¡æĿĥ +Ġplay ful +Ġcar bs +. es +ä¸įåı¯ æĪĸ缺 +ãĢģ èĭı +è¾¾ 人 +Ġ ç±³ +icy cle +g able +en et +IB E +Ġadvantage ous +ĠW ol +æıĴ 座 +ĠD ong +éķ¿ éĢĶ +ãĢĤè¿Ļ ä¹Łæĺ¯ +ik k +_T ext +åħ° èĬ± +J B +å°½ æĹ© +åħļ æł¡ +trans ition +Ġ---------------------------------------------------------------- ------------ +Ġinhibit ory +ï¼Į è¿İ +ï¼Į éĿĻ +ãĢģ ç»ıèIJ¥ +Ġtrail ers +ä¸ĭ å±± +è¿ĺ åı¯ +Ġbatt ling +Ġre interpret +_N ORMAL +Ġcyt oplasm +ĠCred its +Ġstrengthen ed +ãĢģ ä½ĵèĤ² +-e ffect +æĻ®éĢļ è¯Ŀ +-s dk +åĨ· æ·¡ +ĠNav al +ï¼Įè¦ģ çŁ¥éģĵ +quee ze +å¤ļ ä½Ļ +åĩĿ ç»ĵ +.M y +\ bar +èģĬ èģĬ +å±ķçݰ åĩº +缸 çα +.assert Raises +/j query +back up +/* . +润æ»ij æ²¹ +Ġg ears +Ġy rs +( ps +Ġ æµĭè¯ķ +æľº 票 +èĥ¶ åİŁ +éĵ¸ éĵģ +è·Ł è¿Ľ +管çIJĨ æľīéĻIJåħ¬åı¸ +-d yld +ç½ ¡ +åĽ½ ä¼ļ +èĤī ç±» +r k +æĸ° åªĴä½ĵ +åı£ åijĨ +,ä¹Ł æľī +organ isms +åĵĨ åŦ +ï¼Į è·ij +å¤ĸ åĮħ +åħ¥ å¢ĥ +åĩŃ ä»Ģä¹Ī +âĢ» âĢ» +她们 çļĦ +æľŁåĪĿ ä½Ļé¢Ŀ +ud ing +.g rad +Ġsupplement ary +åı¤ éķĩ +Ġbur ial +ï¼Į å®ľ +对 åĨ³ +Ġtrend ing +æ´Ľ æĿī磶 +Ġpartner ing +ĠInvest ments +交 æīĭ +å½ĵ äºĨ +ä¿Ŀ é²ľ +C NN +åŀĭ è¯ģåΏæĬķèµĦåŁºéĩij +ĠMir anda +éĿ¢ ä¸ĬçļĦ +Ġin mates +Ġch ambers +åı¯ 羣 +ac i +iss ors +met ric +employ ee +ĠPro per +ĠB ash +ภ¡ +, é¢Ħ计 +åºĶ åıĺ +Ġspeed y +ä¹ĭ éŨ +Conn ell +_h and +å¤ļ æĺ¯ +æ²¹ ä»· +Ġoff ender +Ġsher iff +åIJ¯ èĴĻ +G ift +ï¼Įä½Ĩ å®ĥ +Ġfour teen +Ġmy el +ï¼Į åĴ± +广æĴŃ ç͵è§Ĩ +æĥ¨ åı« +adv anced +åħ¬åħģä»·å̼ 计éĩı +Ġm alf +Ġmon o +追 åĬł +å¼ĢæĶ¾ å¼ı +æĪĺ åĬĽ +åľ° æľĽçĿĢ +ĠBang kok +Ġsub process +çŁ¥ åIJįçļĦ +Ġæľ¬ æĿ¥ +_ch unk +åĬ² åĦ¿ +ĠSurv ival +urt les +$ ). +b undle +åĽŀ å®¶çļĦ +ï¼Į 墨 +ĠC HE +Ġconvey or +é¡¿ äºĨ +âĦ ¢ +é»İ æĺİ +æ¶Ī æķ£ +交 æ±ĩ +æł¡ æŃ£ +ĠN aturally +åĪĢ çīĩ +ä¹ĭ æĺŁ +æĺ¯ æĪijåĽ½ +追 åĩ» +çļĦ åĪĨæŀIJ +ãĢģ åĨľä¸ļ +éĢģ åİ» +A ds +ĠAd elaide +Ġdepend able +æĢ¥ éĢŁ +C ells +Ġ 竳 +è¿ij è·Ŀ离 +çļĦ å®ŀéĻħ +便 æIJº +_S IGN +ph ants +红 æŁ¿ +ĠAR CH +åĴĮ çϽ +ĠGe off +å¦ ĵ +æľī æĹ¶éĹ´ +ï¼Įè¿Ļ éĩĮçļĦ +at itude +ĠPy Object +d estroy +马 å¾·éĩĮ +ç¡®å®ļ äºĨ +çĤ¹ å¤ļ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +ÂĤ Ãij +ox in +Ġ% ( +Ġhun ters +" In +Ġoccup ancy +æįķ èİ· +al gebra +lo qu +èѦ å®ĺ +Ġf al +ä¸Ģ æĹģçļĦ +ĠE ra +éĩĮ è¾¹ +åĵª ä¸Ģ个 +ri zz +Ġbl ah +-f n +çĽĪ ä½Ļ +Ġd ubbed +å¿ĥ æĥĬ +Ġeight een +åīª è¾ij +Î ¶ +.Sh ould +æ¡ ¦ +vent ure +Ġphys i +process ed +滨 æµ· +ĠK athy +æľº åŀĭ +Ġphot ographic +Ġmess enger +个 好 +Ġrot or +å°±æĺ¯ åĽłä¸º +aff le +_un ique +account s +. ` +ĠCon ven +ore ct +ĠB ake +ĠA us +ï¼ļ 第ä¸Ģ +å¯Ĩ å¯Ĩ +: D +Ġ åĪĽå»º +l ake +T rip +ee ee +æĥ³ èĢĮçŁ¥ +Ġmut able +éĩı 身 +p ending +ä¿Ŀè¯ģ äºĨ +MT P +ä¸į äºĨè§£ +缸 ä¼´ +T re +é¢Ī éĥ¨ +E conom +Ġche eks +_ entries +. edit +ĠF t +-b tn +Alloc ator +Z X +\ ; +(ex pr +Ġmush room +ï¼Įèĩª ä»İ +çļĦ åı¦ä¸Ģ端 +Ġhas attr +ref lect +åĪĨ 段 +建çŃij å·¥ç¨ĭ +没 éĤ£ä¹Ī +rec ords +m alloc +A my +Ġdistribut ing +åĮĹ京 大åѦ +èĦ± é¢ĸ +çŁŃ æĿ¿ +ĠPeters burg +Ġst ray +roll ment +( ? +ï¼Įæ¯Ķå¦Ĥ 说 +å®Ŀå®Ŀ çļĦ +: , +ä¸Ģ ä¸Ń +Ġloc us +ĠL oren +Ġur ging +ãĤ ¦ +Ġpet roleum +ĠM d +WH AT +Ch ief +ä¸Ĭ éĥ½ +ac char +ott i +åıĤåĬł å·¥ä½ľ +åķ ®åIJĪ +_s ent +ä¸ļåĨħ 人士 +. Any +ãĢĤ 亦 +Ġpoly ester +A part +Ġc rossover +drop down +CT YPE +çļĦæ°Ķ è´¨ +Ġlifes pan +Ġp ytest +ish ops +é»Ħ æĺı +ert iary +éĢı çĿĢ +éĽħ çļĦ +IS IBLE +.w indow +ur ved +说è¯Ŀ çļĦ +Ġr ansom +æĺ¯ ä½ķ +éĩį çĹĩ +è¿ŀ 串 +-y our +Ġv id +åĿIJ ä¸ĭæĿ¥ +éĢī èĩª +ĠSol o +æĦı 为 +_TH READ +Ġsubsid ies +产ä¸ļ åĽŃ +-th reat +Ġcraft sm +Ġshar pen +åı« 人 +Plug ins +ym oon +-f low +Ġ' & +Ġimplic ation +aster y +ï¼Į ç§ĭ +ĠM ens +æİ© æĬ¤ +ĠEar lier +åºľ çļĦ +å®ĺæĸ¹ ç½ijç«Ļ +å¢ŀ èµĦ +综åIJĪ å¾ģ +æĮī ä¸ĭ +主è¦ģ ç͍äºİ +å¾Ĺ è¿ĩ +Ġelectron ically +ï¼ĮæĪij çľĭ +part y +Ġreminis cent +ĠA uckland +- empty +Å ¥ +i ability +Ġfor wards +ins ky +å¢ŀ çĶŁ +Ġse aling +ç»Ļ åĩºçļĦ +ais y +å®¶æĹı çļĦ +Ġsim mer +ä¾ĿæĹ§ æĺ¯ +åĵ® åĸĺ +ãĢģ åħ¨åĽ½ +ĠBel le +Ġout patient +bro ken +Ġf ries +å°± åºĶ该 +ĠColon el +大 纲 +Ġinter personal +æłı æĿĨ +éĢĽ è¡Ĺ +T ony +Ġcan opy +ä¹Ł åı¯èĥ½ +Ġher d +-w heel +_PRO C +Ġlab elled +Lock ed +ĠD DR +èĦļ è¸ı +-s k +Ġnon zero +m kdir +æłij ä¸ĭ +ĠCamb odia +ï¼Įåıª éľĢ +-section al +ï¼Į 管 +éĺŁ åľ¨ +ĠD anger +osph ate +ert e +Ġant agon +ĠEd monton +è¿ĺä¸į å¦Ĥ +Ġlaugh s +éĽĨæĪIJ çĶµè·¯ +ï¼ļ ä»İ +ä¸ĭä¸Ģ åĪ» +s ometimes +ا ت +Ġe i +åįİ ä¸ľ +ĠPerson ally +Ġstrip es +è¿ ¸ +Ġcon cluding +æĸ° 车 +Get Value +çĥŁ æ°Ķ +: _ +[ < +ï¼Į æİĮæı¡ +ãģ ĺ +. access +h urst +Ġcoord inating +她 äºĨ +åIJij 举 +å¦Ĥä»Ĭ çļĦ +ĠConf irm +DI Y +äºĨä¸Ģ æŃ¥ +Ġadd icted +å¸ĥ æĸ¯ +éķ ¯ +> package +ĠT anzania +stant ial +Col lections +Ġth irteen +Ġor chestra +æľĪ å½± +it ely +ä¸įä½ı çļĦ +æijĦå½± å¸Ī +-est ablished +ph ysical +Ġs inks +大 å°ıå§IJ +Ġauthor ize +_f low +mon itor +Environment al +éķ¶ åµĮ +åĩº äºĭ +Ġæľ¬ 书 +Ġan esthesia +ä¸ĭ 端 +ï¼Ľ èĭ¥ +å·² äºİ +è°£ è¨Ģ +: L +Ġexc uses +ĠV oc +Ġliv elihood +éĩį åIJ¯ +æĴ¤ 离 +J ay +é¢Ĩ åľ° +éͦ æłĩ +Ġkn ots +et ect +为 ä»ĸ们 +Ġan omaly +éĢĤ éĩıçļĦ +Un lock +Ġis omorphic +N m +ĠMy anmar +çϽçĻľé£İ çļĦ +ï¼Į åĩ¯ +è¿· 失 +Ġouts ourcing +Ġa ria +ä¸į æĶ¾ +ä¸Ĭ è¯ī +èĩª åįij +ãĢĤ èĬ± +åĪĨ éĩı +ĠCom ic +éĻĪ ä»£è°¢ +Ġc emetery +æĿĢ æ°Ķ +æİĴ 骨 +tr on +马ä¸Ĭ å°± +æīĵéĢł çļĦ +x space +ä¸Ģ ç±» +éĩij åħī +æ²ī éĩįçļĦ +缸 è¾ĥ +ï¼Įå¿ĥ æĥħ +çŁ³ çģ° +iam ond +_h idden +Ġincor poration +ï¼ĮçĶļèĩ³ è¿ŀ +or get +æĶ¾ çĿĢ +è°ģ èĥ½ +Thread s +åİŁ åŀĭ +åĨį çݰ +é¢Ħ å¤ĦçIJĨ +äºĨ è¿Ľåİ» +ens ely +ĠAnd ers +Ġinspect or +ĠT J +ĠE uclidean +ĠHard y +Ġm illing +äºĮ åĵ¥ +çļĦå¿ĥ çģµ +Ġde comp +åĴĮ ä¼ģä¸ļ +. ps +ä¹ĭ æīĭ +Relations hip +æĦ¿ æĻ¯ +" Well +天 éģĵ +æ·¡ å®ļ +Ġsub po +-col ored +ï¼Įä½ł è¿Ļ +èĥ½ éĩıçļĦ +éħį æĸĻ +顾 å¿Į +ĠMag gie +åıijå±ķ åΰ +ï¼ļä¸Ģ æĺ¯ +) çŃī +声 åύ +the ws +he ard +éĿĻéĿĻ çļĦ +ĠF IRST +Form s +- primary +ä¿ ¨ +-f requency +( form +( shape +ĠPier ce +Ġan te +ï¼Į 客æĪ· +éĶĢåĶ® æĶ¶åħ¥ +ĠRap ids +åį¡ æİ¥ +Ġb ipart +课åłĤ æķĻåѦ +UG E +ĠRo lex +æľĢ åĸľæ¬¢çļĦ +åħļå§Ķ åī¯ä¹¦è®° +ä¸Ģ è¾Ī +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +ĠVAL UE +OR ITY +åѤ åĦ¿ +çļĦ çī¹å¾ģ +math it +æ²³ 举 +ĠH ouses +Ġmult ivariate +ä¸į å½±åĵį +.A nd +hib ition +_l ight +X i +Ġp on +c aps +des criptor +æīĵ æĭĽåĶ¿ +è¯Ĭ æīĢ +å³ Ń +èģĶç³» ç͵è¯Ŀ +æĬķåħ¥ åΰ +æĤ¨ åľ¨ +çķĻ çĿĢ +ĠThank fully +c els +\ tau +åªĴä½ĵ æĬ¥éģĵ +ภµ +Ġk idding +éĻį èĩ³ +Ġ' \\ +.sw ift +身边 çļĦ人 +ĠRef riger +H oliday +ĠE RA +å°Ĩ è¿ĻäºĽ +çľ¼ 羸 +å·´ å¡ŀç½ĹéĤ£ +Ġyour selves +ĠMonth s +_ext ension +C op +ä¸įå°ij äºİ +Ġbag gage +en stein +ĠO W +æĸĩ æ¡Ī +ĠV irus +è¿· ä½ł +_ ED +å¾Īå¤ļ æĹ¶åĢĻ +ï¼ĮæŃ¤ å¤ĸ +ĠL INK +åĽ½ ç«ĭ +æ£Ģ çĸ« +æľĢ ä¼ĺ +太 å®Ī +ĠB eds +.A spNetCore +èµĽ åľº +p ow +ç® Ķ +-pro v +表 ä¸Ń +æ·ĭ æµ´ +ĠTele gram +_des criptor +ĠT ARGET +Ġdiscipl inary +侦 å¯Ł +- standing +ĠZ h +IN ARY +å¾Ģ 常 +ä½ł æĪij +æ°´ ä¸Ĭ +ä»Ģä¹Ī åı« +çī¹çĤ¹ æĺ¯ +Ġcl i +Ġout we +æĺ¯ä¸Ģ 座 +ï¼Į æīĢè°ĵ +ä½İä¸ĭ 头 +ï¼Įè¿Ļ 对 +ä» Ħ +Ġimp rint +èĥĥ èĤł +Car ol +å°Ĩ å®ĥ +Ġgrad ing +ãĢģ å§ľ +ĠL ose +åIJĮ å¿ĥ +ĠCom par +Ġ åΰäºĨ +.s un +ĠSand ra +缸 åĬ© +Ġdr ank +ĠSupp liers +ĠAut hent +ĠA unt +æĥħ å¢ĥ +H OW +饼 å¹² +R os +ĠS ag +西 è·¯ +# elif +奶 æ²¹ +at is +æĺ¯ æĹłæ³ķ +æ»ļ çŃĴ +s ender +ĠLes lie +ï¼ģ âĢľ +.IsNullOr Empty +ĠContin ental +ĠN K +и з +ñ o +Ġred shift +Tool bar +rem ark +èĥ½ 使 +_s ide +ace ae +æİ¥è§¦ åΰ +Ġc yn +æĦŁè§ī åΰäºĨ +it atively +Ġmon ot ++ $ +ak istan +Ġid iot +- am +, åĽ½å®¶ +Ġsc ars +Ġitiner ary +_ play +ç¢ ĺ +he tti +erv ille +OB JECT +Ġamb itions +çłĶç©¶ ä¼ļ +ĠSk etch +ãĢĤ ä½ľèĢħ +' ", +ãĢĤ åı¶ +æ¤į åħ¥ +Ġjack ets +E mitter +ĠM VP +è¡Ģæ¶² 循çݯ +ic able +ãĢģ çľĭ +èĩª 带 +ç¡ ¼ +åĩºçīĪ æĹ¶éĹ´ +Ġst o +(- \ +ĠNew sp +,ä¹Ł åı¯ä»¥ +å¼Ģ åĩº +ĠRuss ians +åIJĬ é¡¶ +ĠHay es +Ġgastro intestinal +Ġcalcul ates +Ġconf ession +ĠTr igger +åĻ© 梦 +ä¸Ģ æī« +_R ANGE +. platform +Ġh ust +å±ıå¹ķ ä¸Ĭ +ĠCar b +Ġax ial +积 æ°´ +Op inion +ĠTri angle +ĠE fficiency +æŃ¦ å°Ĩ +REF ER +å¤ļ æĥ³ +ç ões +æĶ¾ ç¼ĵ +Ġexplo ited +æ²ī 稳 +æ²¹ èħ» +.y aml +ç¾İæľ¯ é¦Ĩ +') ). +ĠR ex +ãĢĬ æĸ° +ĠLib ya +æĭĽèĤ¡ 说æĺİ书 +Ġreg imes +ING TON +Ġ æĮīçħ§ +B road +ch ief +æ¸ħ æ·¡ +è·Ł ä»ĸ们 +ĠWals h +Ġselect s +_W INDOW +å°±æĺ¯ æĪij +âĢľ ä»ĸ +ig rate +åºĦ 严 +Ġiron y +åħ¬ ç͍ +En sure +æIJľ 寻 +Ġbul lets +× ľ +.f actory +ç£ ĭ +ĠCr ushing +ĠAct ual +de al +ä¹Ł ä¸Ģæł· +ä¹ĭ æĦŁ +Hash Code +ä¸Ģ 模 +Ġv icious +è·Ł æĪij们 +Ġcatast rophic +çİĩ é«ĺ +Inter action +ä¸į æĸ¹ä¾¿ +s yn +Ġà ¥ +_ entity +Kit chen +act ively +ampl ed +Ġmon et +op o +ard e +éĽħ åħ¸ +ĠH olding +æĿ¾ å¼Ľ +帷 å¹ķ +ä¸į 符 +ä¹ĭ å¹´ +æ°ij æ³ķ +Ġlumin osity +éĻĪ åĪĿ +ML S +w ife +Ġm ens +( conn +ĠK un +_S M +å°ıæĹ¶ åIJİ +\ draw +ï¼Į æµij身 +åĺ± åĴIJ +ĠCraw ford +Ġgu i +ï¼Į æŁ¥ +om ation +" }) +ãĢģ 车 +èŁ Ĵ +Ġadmit ting +çϽ çİī +çģŃ äº¡ +马æĭī æĿ¾ +Ġpestic ides +, ä»·æł¼ +? v +ä¸ī å±Ĥ +Ġaltern ating +丹 麦 +åįķ åįķ +ä¸į è´¥ +Ġles bian +-ind ependent +æĹħè¡Į 社 +-d ist +ï½ ħ +/ U +ĠP urs +èľ Ĵ +åĶ¿ åĶ¿ +Ġalle le +Ġid ol +Ġmult in +ĠSat isf +Ġerr no +ï¼Įå°± 被 +Ġempower ment +ĠM ongo +ĠDis order +ĠW ings +Op code +ĠCh rys +.c enter +åĴĮ éĶĢåĶ® +代çIJĨ åķĨ +ĠAbb ott +Fil m +Ġpsych ologist +uc son +for ces +çĶ» çĶ» +åħ¬å¸ĥ äºĨ +ï¼Į ä¿¡ +è¿Ļä¹Ī ä¸Ģ +mar ine +çĶļèĩ³ æĺ¯ +{ J +åĮħ åĮħ +Ġgen etically +ä¼ł æĿ¥çļĦ +Ġt ak +红 楼 +ĠSe al +ĠB ytes +ent ral +CR YPT +ĠSem i +ĠR atio +é¸ ¾ +ĠR is +èᝠä¸ļ +Ġs g +Ġtrig gering +- condition +çĸ µ +Ġtel escope +çĩķ åŃIJ +主 æĿ¿ +äºĮ èĢħ +æ°Ķ 泡 +Ġdep rived +Ġpy ramid +ภ¥ +详ç»Ĩ ä»ĭç»į +Ġ åŃŁ +Ġspark ed +åĴĮ å®īåħ¨ +Ġconserv atives +Ġb ells +Ġch orus +æľī 线 +åĶ¿ åIJģ +: B +Ġmurd ers +l x +Ġfun ctor +æīĵ åıij +Ġst mt +ãĢģ æŃ£ +t ile +ĠPol ar +æĦĪ åıij +ĠStock s +L ot +Ġheaven ly +Ġst ern +ĠHelp ful +_U ART +åĴĮ åŃ©åŃIJ +Ġprogress ed +Ġavoid ance +以为 èĩªå·± +Ġwid ow +ãĤ· ãĥ +æĸ°é²ľ çļĦ +æľĢ éĢĤåIJĪ +(" ", +Instance Of +ĠSund ays +æģĭ 人 +.s ervices +ठ¨ +ĠCount ries +åħ¥ çĿ¡ +è·³ åĬ¨ +è¦ģ èµ° +æĪij æīį +åı¯ 为 +_p ar +Ġmerg ing +Ġre written +IF IC +Ġh obbies +é«ĺ åľ° +å¼Ĺ åħ° +ãĢģ D +Ġharvest ing +Ġtravel er +Ġ 导æ¼Ķ +Ġy ork +Ġsn apped +è´¢ è¿IJ +丹 èᝠ+ãĢĤ å®ī +宫 é¢Ī +æŃ£ç¡® çļĦæĺ¯ +ï¼Į æ²Ļ +Ġup hol +([ [ +åķĨåĵģ æĪ¿ +æĨ ¬ +ĠLanc aster +æĭĸ çĿĢ +Ġpropri et +Ġrevers ible +Ï Ī +åĩº åĵģ +çªģ åıĺ +, åİŁæĿ¥ +As String +çŃī ä¸Ģç³»åĪĹ +æĻ Ĺ +_H AND +ad h +ï¼ļ 大 +ĠS IGN +ï¼Įä¸Ģ åıĮ +ĠU int +çij ļ +ç¹ģ å¿Ļ +âij ¥ +Ġsqu ir +ĠHor izon +-work ers +S itemap +Ġt apping +çļĦ è¿ŀæİ¥ +äºĮ åı· +çĻ» å±± +çļĦ å®īè£ħ +Ġ[ \ +å¿« é¤IJ +>: ]< +Ġpast ry +R IC +ãĤ £ +ĠL ithuan +, ç»Īäºİ +ĠIncre ased +Ġtack les +. You +Ġtool bar +ĠU IT +ĠD j +under ing +Ġp ian +åĪ© æĸ¯ +伤 çļĦ +Ġdess erts +Ġcollabor ating +æĬ ī +Ġp antry +ĠD il +ï¼Į ç¨į +ch rome +ä¸ĭ åįķ +ï¼Į æŀĦ建 +semb ler +Ġcan ned +ï¼Į éĢĢ +Ġn ach +æĬķ 身 +缮çŀª åı£åijĨ +rom a +ç®Ģ缴 æĺ¯ +éĤ£ åıĮ +奥 æŀĹ +/ usr +以ä¸ĭ æŃ¥éª¤ +Ġliqu ids +ne ider +/* ! +åı· 为 +Pal ette +ĠL on +_j ob +Ð ł +ĠInterest ing +ï¼Į ç»ĵ +agent o +ï¼Į D +ĠG ol +ï¼ī 为 +_s lice +( server +åĩº çĶŁçļĦ +Ġtack ling +]-- [@ +åħ¥ åĽ´ +è¾ľ è´Ł +éľ ĵ +M ARK +Ġso fter +ç¾ ¹ +Ġcert ifications +R ussia +-p ass +.Print f +, ä¸ĢäºĽ +ke a +åIJij ä¸ĬçļĦ +_f ound +Ġforecast ing +å¿§ èĻij +ĠCur ry +Ġ éģĵ +Rep resentation +ë ĵ +est a +, æķ´ä¸ª +å¹½ çģµ +ç͵éĺ» R +Ġdecis ive +åı· å¡«åĪĹ +Ġfoot wear +, å¦Īå¦Ī +ï¼Įå¹¶ äºİ +ï¼Įéĥ½ è¦ģ +> ? +El izabeth +横 æ¢ģ +ï¼Įåľ¨ æĪij +åħ³ ç¨İ +.m at +ĠB iol +/ apache +anc a +ä¸Ĭ åı¤ +^ x +Ġdist al +Ġdra ined +åĽŀè¿ĩç¥ŀ æĿ¥ +P retty +Ġs add +æīĵ è¿Ľ +c urrency +d isk +ĠM ant +天 æĸĩ +ĠBl ade +ĠRep orter +个æľĪ çļĦ +人为 æľ¬ +ï¼ļ 人æ°ijå¸ģ +é«ĺ çĤ¹ +' est +Ġ åºĶä»ĺ +-ch oice +ĠCondition ing +åħŃ å¤§ +Call ing +ï¼Įä¸Ģ å¹´ +ï¼Įçľĭ äºĨ +åĴ³ åĴ³ +Ġhelp ers +åĩºåİ» çļĦ +B an +ĠDis orders +Ġgest ures +ĠY a +ç»Ŀ ä¸įä¼ļ +Ġaction able +ĠStart up +ĠVeg an +h bar +Ġc is +Ġc ured +导 éĢļ +åİĭ æł¹ +comb ination +ç® « +_F ROM +ĠHol ocaust +v ag +å²Ĥ ä¸įæĺ¯ +Ġvamp ire +çĸĹ ç¨ĭ +K R +Ġs iding +ĠHel iport +_C lick +}} $. +æĺ¯ä¸Ģ åıª +éģı åζ +Cond itions +_l ibrary +ï¼Įæį® 说 +iff erence +min i +å°Ħ åĩº +ä¸į éķ¿ +座è°Ī ä¼ļ +cloud let +ĠEvery body +P ixels +ãĥ ī +ĠSpr ay +åIJĥ 饱 +_m an +Ġprof iciency +Ġmunicipal ities +å½¢ èµĦ产 +åįĹ è·¯ +me asure +éļĶ æĿ¿ +Ġneglig ent +åIJĪä½ľ ä½ľåĵģ +in ally +J V +ï¼Į éŃı +ãĢĤ å¦Ĥä½ķ +马 ä¸ģ +表 çϽ +My SQL +F ri +ä¹Ł 说 +æ»ļ è½® +elle es +-le ading +Ġvel ocities +ommod ation +ä¸Ń æĸ° +Ġà ī +æĪIJç«ĭ çļĦ +Ġa kin +ĠExt reme +ï¼ĮæĪij éĥ½ +ĠB illion +Call s +âĢĿ ? +Ġinter connected +马æĿ¥ 西äºļ +ĠCal c +å±Ĭ ä¸ŃåĽ½ +Ġmyocard ial +èĿ Ļ +ĠImm utable +_P OWER +æĶ¯ çIJĥéĺŁ +_e ach +Ġtum our +ĠAl ready +Ġrepair ing +åģļ åΰçļĦ +åģı 离 +ï¼Į è¶³ +ï¼Į æĽ¹ +å®ĮæĪIJ ä»»åĬ¡ +ĠDevelop ers +P ARAM + ı +åĸľæ¬¢ åIJĥ +è¿ŀ èĥľ +. assign +æŀĦ æĪIJäºĨ +ĠDav ies +ä¹Łä¸į ä¾ĭå¤ĸ +Ġflo oded +Ġampl ifier +rib s +åį¡ æ§½ +é»ĺé»ĺ åľ° +åįĬ æľĪ +, çĶŁ +in cre +he ws +ĠM ozilla +ĠWh ilst +.j p +ĠR w +OT AL +-t rack +ĠCL I +. head +ä¸Ŀ æĿĨ +æİ¨ æĿĨ +åħ» çļĦ +ĠBe ef +ãĢģ åĵģçīĮ +UR ATION +管 线 +Ġfing ert +ĠPenn y +ç¾İ éĩij +Ġintim acy +ï¼Į åıĸå¾Ĺ +çĤ® å¼¹ +Ë ĭ +åĽ½ 度 +Ġin ward +éĿŀ æĺ¯ +. ic +ĠCow boys +èµĽ åIJİ +åı¶ èIJ§ +竣 æĺ¯ +ethe us +Ġno ises +éļ¾ çļĦ +Ġbuck ets +Ġrend ers +ĠD estination +Ġchrist ian +F ed +æīĭç»Ń è´¹ +ĠLat ino +æľĢåĪĿ çļĦ +è¾ĥ å¿« +大 éĿ¢ç§¯ +.... ..... +Ġvou cher +ĠCelt ic +åķĨä¸ļ 模å¼ı +æĪij 羣çļĦ +è¾ « +Ġmin ers +ĠCard inals +Ġcho ir +Ġred esign +Ġday time +ï¼ĮæĪij åİ» +Ġhydro x +ĠâĪ Ĥ +Ġnot icing +ï¼Į åIJ¸å¼ķ +Ġc rab +< th +çĥ¤ ç®± +Mac ro +èĥĮ çĿĢ +.Param eter +over n +æľīæīĢ ä¸įåIJĮ +交 éģĵ +书 æľ¬ +Ġaff ine +-f ashion +rust ed +æµģç¨ĭ åĽ¾ +çļĦ æĸĩä»¶ +Ġ- . +æĪij ä¸Ģ缴 +ä¸į çν +åįķåħĥ çļĦ +Interest ing +ĠMeasure ment +_ est +.c an +Ġscar ce +ĠRec all +çļĦ æĬĬ +,æĪij åĽ½ +ï¼ĮçĶŁ æĢķ +prot ect +Ġsub urbs +{ . +Ġne o +che ap +å¥ij åIJĪ +çIJĨäºĭ éķ¿ +EMPL ATE +S up +Ġal ley +Che ers +ĠAk ismet +ĠRoberts on +< meta +ĠF res +è¿ij åĩłå¹´ +æ¤į çī©çļĦ +çĵ¶ åŃIJ +ç«ŀ ä»· +Collect or +@ in +äºĨä¸Ģ 段 +_T OP +æķħ 宫 +ĠT ales +Ġstrateg ically +ï¼Į çͰ +ĠConsult ation +ç«Ļ起身 æĿ¥ +æ£ĭ çīĮ +_ ab +Ġf aux +c row +Ġbook ings +ocr at +ĠNS A +Ġunder mine +AD I +两 ç»Ħ +ĠComb ine +ï¼Į èĩ£ +äºĮ æľĪ +G LOB +Ġreview er +åįĪ åIJİ +æ°ĶåĢĻ åıĺåĮĸ +æ°ij ç͍ +Ġevid enced +ï¼Į æİ¥åıĹ +av igation +AS ON +åIJĦ æł· +Ġrot ated +H al +ĠAr lington +ĠHait i +j en +æ¯Ķ ä¸įä¸Ĭ +Ġstamp ed +âĢľ æĹł +ĠY emen +èµŀ æĪIJ +ĠEnter prises +ï¼Į顺 便 +æ¸ħ é¦Ļ +Red is +\ mathbf +Ġinv ocation +çļĦ天 空 +n ob +_s ummary +IR M +å®¶ éŨ +. order +Ġth o +ç§Ł 客 +èĩªçĦ¶ 人 +ĠEug ene +ä¸į 稳 +ĠBro ck +Ġread iness +Code Attribute +P oll +y led +ĠRug by +ĠP AR +æĿij èIJ½ +ĠC ec +Do ctor +åĩı åİĭ +p aste +ver bs +Ġ çͰ +Ġar che +=" ' +Ad ult +æ½ľ èĥ½ +, çĶŁæ´» +ãĢĤ æľī人 +ä¸į åIJ¬ +. entry +ime q +St an +çĶľ ç¾İ +çİĦ æŃ¦ +æŁ ij +be ans +F avorite +]. [ +ä¸Ĭ åı° +Ġinv oices +ï¼Į çģµ +å¹¶ èĤ© +SE ARCH +Ġshowc asing +沿 线 +Ġtur f +b az +Ġdest ined +Ġ å·² +ell ation +ãĢĤ éĤ£ä¸ª +ĠW id +iz ons +Ġend ogenous +ï¼ĮèĢĮ ä»ĸ +v oice +ï¼ļ http +Ġbow ling +D st +.b uilder +Hand ling +Ġrec ap +ict im +Ġpar ole +èµ° èµ° +æ¦ ķ +表 çİ°åľ¨ +Ġlo ft +å¯Ħ æīĺ +ãĢĤä½Ĩ åľ¨ +is EqualTo +ĠDi agnostic +.g lobal +ĠBur ton +è°İ è¨Ģ +-d uty +Ed ges +åı£ 头 +Ġprosecut ors +åĩº ä¸į +没 ç͍ +åĨħ æł¸ +Ġhard ship +æİ¨ ç§» +ï¼Įæĺ¯ 以 +b ill +- port +v endor +Ġâ̦ âĢĿ +Ġwh ip +Ġarchitect ures +W estern +é¢Ĩ ä¼ļ +Ġ ž +ĠIC U +Ġmort ar +çļĦ éģĵè·¯ +å°± åĴĮ +ĠEqu ations +Ġ" ^ +ĠH AS +ex c +? ), +}( {\ +omin ium +ĠCard inal +ven ous +Th rows +åĹĵ éŁ³ +ĠP recision +举 æŃ¢ +宾 客 +Ġunder gone +_ lo +åĭ ķ +Ġirres pective +æĪĺ åľºä¸Ĭ +n z +m or +ĠTh r +ĠNew port +ĠRich ards +Ġalleg ing +Ġ第ä¸Ģ 竳 +ï¼Į å®ŀåľ¨æĺ¯ +ĉĉĉĉĉĉĉĉ ĉĉĉ +ĠP orsche +åį° è®° +Ġdem ographics +ä¹ĭå¤ĸ çļĦ +ĠSuper man +Ġgen otype +åĩ¹ éĻ· +Ġ ÙĦ +ï¼Į å°ģ +Ġsk ipping +ä¸İ 她 +å¼Ĥ æŃ¥ +Ġright ly +åı¯ ç¬ij +æµĭ ç®Ĺ +ç»ĻäºĨ æĪij +éĤ£ä¸Ģ åĪ» +âĢĿ ä¸Ģ +ĠMc D +f elt +讨 好 +ĠHar old +Ġr uby +_c ard +Ġz ombie +he mer +ï¼Į éĢIJæŃ¥ +Ġh atch +-c ert +åħ³ éŨ +Ġget attr +ï¼Įä½ł èĥ½ +Ġs ag +çļĦ åĬ¨åĬĽ +Ġab ide +åζ è£ģ +ï¼Įåıª å¾Ĺ +sk in +ĠJoy ce +äºĨä¸Ģ åıª +Ġbl onde +od iac +Ġswift ly +ĠD ashboard +ĠS aving +Inter ceptor +æķij äºĨ +-grad ient +é£İ 湿 +ãĢĤ åIJĮæł· +ĠSt raw +Ġle thal +.as List +çīĪæĿĥ å½Ĵ +( im +å¤ĸ æĺŁ +ĠDevelop ing +ĠÎ Ľ +Ġadvers ely +-h op +æĶ¾åľ¨ äºĨ +- li +å¤ļ ä½į +ä½İ äºĨ +群 çļĦ +adv ance +ĠStri ke +_RE SP +çļĦ人 åı£ +S low +æĸ° æĿIJæĸĻ +ä¸Ĭ å±Ĥ +Ġs outheast +Lou is +Ġal right +y et +åĪĬ çī© +Ġcan on +_pro xy +_K ERNEL +s ell +Ġpul p +ĠS isters +åĽ¢éĺŁ çļĦ +åıĺå¾Ĺ è¶ĬæĿ¥è¶Ĭ +Ġsand y +IN AL +input s +æµ· 滩 +è´µ éĺ³ +大å¤ļæķ° 人 +èIJ§ å¹³ +æľīçĤ¹ åĦ¿ +æ¡Ĥ èĬ± +ĠW itch +ĠE ur +P ractice +åįĪ é¥Ń +ht ra +åı¹ äºĨä¸Ģåı£æ°Ķ +V V +Ġafford ed +Ġ å¾Īå¿« +len a +ï¼Į å®ĮåĸĦ +ç¡® åĪĩ +纳 åħ° +æıIJä¾Ľ ä¸Ģ个 +ĠAt om +b irds +enn ial +se o +空 å¿ĥ +ãĢį ãĢĤ +Prov ide +IN F +åħļçļĦ äºĮåįģ大 +èµĦæľ¬ åħ¬ç§¯ +ä¸Ģ æīĢ +ĠS ic +Ġbi ochemical +æĸ°åĨłèĤºçĤİ çĸ«æĥħ +Ġrec ycle +æĤ ¼ +åĮ £ +计æıIJ åĿıè´¦åĩĨå¤ĩ +_C REATE +åħ¸ èĮĥ +Ġchar coal +ï¼Į大 åĬĽ +éĽķ åĥı +ï¼Įä½ł æĥ³ +Ġterrit orial +æĬµ æī£ +ĠDE V +Ġcyt ot +ĠC GRect +ç¥ º +æĿĢ æİī +ĠPass ion +ĠM AY +æľī ç͍çļĦ +åĮ»ç§ij 大åѦ +. reshape +ĠH ollow +æ° ° +Pl ant +ĠP ale +Ġfurther more +åIJİ æīį +Ġpoison ing +-bre aking +T ail +Ġd ancer +IT IES +ĠNe ural +Ġg le +èĪª 线 +ï¼ģ ä¸įè¿ĩ +缸 æĢĿ +N umeric +Ġg ossip +Ġ èĩ³ +çľ¼ ç§ij +åĩº åĬ¨ +ï¼Į身 åŃIJ +ĠCom es +çŁ³ èĨı +ç§ģ æľī +_ AG +An imal +[ % +| null +ĠI CC +çĿģå¼Ģ çľ¼çĿĽ +ï¼Į åĮħåIJ« +ç§Ł 车 +çļĦä¸Ģ åįĬ +èĿĻ èĿł +åı¯ åı£ +ĠTerrit ory +åĬŁ è¯¾ +人 èĦ¸ +èĢħ 为 +åı² è®° +宽 带 +Ġcous ins +èѦ æĪĴ +交 ç»ĩ +Ġ åŁºæľ¬ +ĠB ac +pro ceedings +Ġuphol st +S olar +{ min +(f mt +-en abled +. pr +ĠH ers +Ġunit ary +ĠComb ined +ĠS lee +od ot +ba um +ĠFran co +礼 æĭľ +Ġim balance +éĢı éķľ +Ġauto immune +ï¼Įæľ¬ çĶ³è¯· +Ġknock ing +_f actory +Ġbl u +ĠAbb ey +-s ix +( Data +ĠIndones ian +Ġsil hou +n ice +sequ ences +Ġrecomm ending +ĠBro ken +æķĻå¸Ī çļĦ +橱 æŁľ +çĸ Ļ +Ġaddict ive +ï¼Į 讲 +Ġtext books +è¸ı åħ¥ +ĠP orts +çŀª çĿĢ +B ug +ç¨ĭ 约 +Ġcook er +}\ \ +åĴĮ æŀĹ +å®¶ ä¼ģä¸ļ +纤 ç»Ĩ +_R D +Cal c +çļĦ èµĦéĩij +æ¯Ľ åıij +åŁĭ æĢ¨ +ĠA gg +容 é¢ľ +ï¼Į ç¨ĭ +om ew +ç¬¬åĽĽ 竳 +缼 ä¸ĸ +Ġ& ( +åĨħ åĪĨæ³Į +ĠAb original +im acy +å°ı å¹ħ +åħ¬ åħ³ +åįĸ äºĨ +æĿĥçĽĬ å·¥åħ· +Conf erence +ĠNew man +ï¼ĮåĪĻ æĺ¯ +Plan ning +Ġsur geries +Ġpun ished +Dep ending +ĠCur riculum +/ sub +çĶ· çļĦ +ĠConf eder +_HE IGHT +Ġpsy cho +ä¹īåĬ¡ æķĻèĤ² +C riterion +Ġcl utter +èµ ĥ +ãĢģ è¡Įä¸ļ +Ġ 常 +ä¸Ĭ éĥ¨ +Ġident ifiers +éĹŃä¸Ĭ çľ¼çĿĽ +LE VEL +èģĮä¸ļ æķĻèĤ² +åĬŀ çļĦ +ï¼Į å½¼æŃ¤ +arr ison +èµ°äºĨ åĩºæĿ¥ +ll ib +ãĥ IJ +åĴĮ社ä¼ļ ä¿Ŀéļľ +çļĦ è¶ĭåĬ¿ +ĠH oo +ï¼ļ è¿Ļ +Ġaffirm ative +ĠS inger +åŁºç¡Ģ çŁ¥è¯Ĩ +å¾Ī大 ç¨ĭ度ä¸Ĭ +Ġ æľĢè¿ij +Ġm sm +ĠWh olesale +_P H +人åijĺ åľ¨ +$ f +ĠB F +æľ¬ å±Ĭ +_read er +åıª çŁ¥éģĵ +èĬĤ çĤ¹çļĦ +-d iscovery +ub ic +Some one +d ma +æģIJ é¾Ļ +Ġprec ursor +çļĦ æľªæĿ¥ +_dis able +Ġint ros +n atal +ï¼Į å¤ĸéĿ¢ +Ġsal ine +ä¹Ĵä¹ĵ çIJĥ +æ³ Ĺ +il age +Ġ Ùģ +_param eter +ï¼Įåıª éľĢè¦ģ +æ· Ĩ +ĠRent als +æĹ¶ 髦 +cal ing +.s ample +åıĭ 们 +ĠPa id +ĠSat ellite +Ab ility +ĠB olt +ï¼Įå¹¶ 以 +ãĢĤåľ¨ è¿Ļ +ang s +转 è´¦ +éĤ£ å°±æĺ¯ +Ġexpans ive +H arry +éħ¸ éĴł +çīĽä»Ķ 裤 +_RE Q +è´¦ é¾Ħ +ĠE Q +Ġin verted +Ġtra ctor +ÑĢ Ð°Ð½ +/ Y +.A d +Ġtw ists +çIJĥ æĺŁ +纸 è´¨ +Ġgrocer ies +Ġir reducible +Ġheter ogeneity +æķĻ å®ĺ +Ġlean ed +Ġdigit ally +Ġru pt +çľĭåIJij äºĨ +ä¸į æİī +èĦ± åı£ +- containing +è¦ģ 让 +诱 导 +人 头 +æĿľ ç»Ŀ +ĠTry ing +n os +å±ĭ åĨħ +Reg ardless +ï¼Į 汽车 +_s im +Ġa o +çĤ« èĢĢ +< M +S now +z oom +ãĢģ 综åIJĪ +.t ools +Ġforget ting +èĬ³ é¦Ļ +iph one +ãĢģ ä¸Ģ个 +ï¼Į æ¶īåıĬ +.t emplate +æī¾ 人 +转åŀĭ åįĩ级 +-w rap +åĨ¬ 奥 +olog ous +éĢļ åħ³ +ĠL OT +åĬŁ èĢĹ +åħ¬åħ± æľįåĬ¡ +ĠPro gressive +B roadcast +Ġdis joint +.F irst +èħ° éĹ´ +Ex actly +arr is +ĠEditor ial +Ġ éĽª +çݩ家 们 +Ġambig uity +è¤ ¶ +Ø ® +gu ide +\ neq +Ġpersu asive +O ffer +æŃ£ çļĦ +ĠA xis +ĠØ ¹ +å¢ŀ éĩı +ãĢģ åĬł +çľ¼ éĥ¨ +Ġdecl ines +Ġins ists +ĠEvent Args +Ġre connect +ksw agen +åı¯ æİ§ +Pub lish +$ as +S alt +ä¾Ľ çĥŃ +Ġscript ure +Ġw o +ycl erView +ĠLI KE +M RI +Ġhydro ph +æĥ³ å¿ħ +é»ijæļĹ ä¸Ń +ä¿ĿæĬ¤ åĮº +åĽŀ æµģ +ix in +Ġnot orious +å°± 缴æİ¥ +Ġburg l +åħļç»Ħ 书记 +qu a +è°ĥ åĴĮ +Ġtrust s +å®¶ å®¶ +ch apter +缴 è¾¾ +æĿĢ æĪ® +çĸij éļ¾ +Ġalter ation +Ġthrom b +计åĪĴ çĶŁèĤ² +åİ» æİī +æ¶ī æ¡Ī +缮 çĿ¹ +åĬ¨ ç͍ +Ġfall back +ä¿Ĺ è¯Ŀ说 +ãĢĤ è¦ģæĺ¯ +ï¼Įå°± è¿Ļæł· +åĺī åħ´ +ĠN MR +ost a +æ¶² çļĦ +å¸Ĥåľº 份é¢Ŀ +Ġpath ological +Lead ership +ĠW elfare +åĿļ 飧 +Ġacc ents +.m edia +ĠCreat ure +Ġob edience +大 鼨 +_P ART +PC I +éĺ² çĽĹ +Dep loyment +çļĦåIJİ æŀľ +çļĦæīĭ èĩĤ +Ġfeather s +ä¼ł è¨Ģ +c ape +ĠB ella +Ġtestimon ials +ï¼ĮæĪij æīį +ĠQu iz +n it +Ġdefic its +oglob in +Ġgam ble +ä¸į æģ¯ +ĠK ubernetes +ç§ijæĬĢ è¿ĽæŃ¥ +åħĥ å·¦åı³ +Ġth or +rang ian +Ġ= ==== +p rec +ï¼Į ä¼ļæľī +Ġret al +æĢĿæĥ³ çļĦ +Ġpres erves +太 好 +k al +éªĤ éģĵ +ä¸ĸçķĮ éĩĮ +èİ« åIJįçļĦ +åľ¨ æķ´ä¸ª +å½ķ ç͍ +_MEM ORY +ï¼Į 综åIJĪ +Ġmis use +ĠH BO +ĠPar l +ä½ĵ è´´ +äºĨåĩł åĪĨ +躺 çĿĢ +f actor +å¢ŀ åĩı +en ment +Ġ å¾Ī +使 æĪij +Ġsk ins +åºĵ éĩĮ +B oy +Ġ第äºĮ 竳 +\ over +_ users +Ġem pt +çŁ¥ æĥħ +sk a +éĵ İ +Ġä¸į 管 +å¸Ĥ åħ¬å®īå±Ģ +ĠEth an +B erry +[ * +Ġcreep y +ĠEn v +Ġm appings +ĠM olly +Ġconting ent +æŀ¯ çĩ¥ +Ġocc urrences +è¿Ļ ä¸ľè¥¿ +EE P +ĠSoph ie +ĠR H +åĽ½ ç±į +_reg s +$ data +ĠFerr ari +ĠB ind +åķ¦ åķ¦ +_ android +ï¼Į è¿ľè¿ľ +åĨ³ æĪĺ +B ron +ĠW inston +sw ick +.d ot +j d +æµ· åı£ +Ch oosing +æŀĹ çļĦ +Ġhep atic +深度 åŃ¦ä¹ł +M atching +ä¸İ 社ä¼ļ +ï¼ī 第 +åİŁåĪĻ ä¸Ĭ +zer bai +Ġcl an +æīĢ äº§çĶŁçļĦ +Ġ åľ°åĿĢ +ï¼Į äºīåıĸ +èĢģ 夫人 +Ġdet ained +Ġappell ants +æijĦåĥı æľº +ä½ł è§īå¾Ĺ +æĶ¯æĴij æĿĨ +èĶ · +åıijæĶ¹ å§Ķ +Ġnut rit +Ġrubb ish +ĠHim self +Bl end +Ġfav ors +Ġp thread +æĢ» éĿ¢ç§¯ +ç»Ļ ä»ĸçļĦ +çĺ © +S erv +çļĦæĺ¯ ä¸Ģ个 +ĠB ooth +ä»ĵ ä½į +Cons ult +aus al +Sl ide +Ġe ax +Ġ åĨ· +åĩº è·¯ +ĠEx amination +( reader +身 åľ¨ +ç¨İåĬ¡ å±Ģ +ï¼Įç͍ æĿ¥ +_log in +% E +ï¼Įæľī åĬ©äºİ +Ġinnoc ence +çļĦ è®°å¿Ĩ +ĠSher man +ä m +Ġincomp et +od ore +éľĢè¦ģ è¿Ľè¡Į +ĠSh in +Ġunlock ed +, åħ±åIJĮ +éĺ² çĪĨ +Ġsub groups +Ġtax able +ENC Y +åľĨ åľĨ +ĠBelg ian +Ġdisput ed +äºĨä¸Ģ æī¹ +Ġpresum ption +,... , +æĶ¶ 款 +éĦĻ è§Ĩ +æīĵåį° æľº +æľī åĩł +open hagen +ç͍ å¤Ħ +Ġpath ogen +_UN KNOWN +æ¸ħ çĥŃ +Ġskin ny +ĠBar on +. plugins +Ġstr ides +ĠUnivers ities +vid ia +çĽĺ çļĦ +åıijçĶŁäºĨ ä»Ģä¹Ī +å§ĭ äºİ +ä¸į èĩ³äºİ +_s erial +c ie +Ġm ah +ï¼Į æ£ĢæŁ¥ +ï¼Įä¸Ģ æĺ¯ +- author +éĩį åŀĭ +ï¼Įä½ł è¿ĺ +ĠM err +Ġdesc endants +List Item +D G +ĠR ails +大 å±± +ĠK ernel +ĠGod d +Log s +Ġmini ature +}} = +ĠNS Object +ï¼ĮåIJ¬ 说 +ĠWe bb +ı n +æĺ¯ åIJĹ +Ġmultip lying +- Al +Ġt iger +å·´ å°Ķ +^+ ^ +åİ» ä¹° +Ġfavour ites +IFI ER +åĽº æī§ +(c allback +ĠIF N +Ang el +ĠSix th +K ill +ĠH ER +åIJĮ ä¸ļ +ĠDet ail +ä¸į太 好 +_e lements +çļĦ身 åŃIJ +ĠSat urn +ĠModel ing +ç¬¬åĽĽ èĬĤ +ĠSal ary +å¤ļ åįĬ +A k +ra j +l bs +äºĨ 两个 +转 弯 +Ġre usable +ĠCorinth ians +é«ĺ 管 +ĠG oes +}{ | +æĸ¹ è¨Ģ +æ¯Ķ æĭŁ +ï¼ī ä¸Ń +ï¼Įä»ĸ åį´ +æĿ¥è¯´ æĺ¯ +ĠM um +åĿı çļĦ +ĠIn cluding +Ġbehavi ours +E sc +åĬ¨ æijĩ +requ ent +ĠGand hi +è½ ¼ +åľ¨è¿Ļ个 æĹ¶åĢĻ +ĠPro cedures +Ġget away +m ans +p ull +Ġben z +ĠD ash +ãĢĤ 秦 +ä¸į ä¸Ģèĩ´ +åĵĪ ä½Ľ +ο Ïį +æĬ¢ åĬ« +Ġinsert s +CF G +é£İ æµģ +Ġvent ures +éĢļ èĥĢ +ç½® çĸij +âħ ł +èĩªæĿ¥ æ°´ +åĦ ¡ +äºĮ æľŁ +èĢĢ çľ¼ +.get Int +è§Ĵ èī²çļĦ +天 åĨħ +Ġadvance ments +ä¹ĭ åĪĨ +v ae +Beaut y +ç²¾ é«ĵ +ud i +Ġmanage able +åİī害 çļĦ +pp ery +Ġb askets +åΰ æŃ¤ +Ġbud s +ĠBright on +è¡Ģ èħ¥ +è°¦ èĻļ +_ ring +Ġdec oded +Ġapprox im +ï¼Į å®ĺ +ĠLe an +der abad +.f ull +IL S +omb re +ĠD SL +åĵ¥ 伦 +ĠNa N +. Result +ï¼Įå½ĵ åį³ +$ {\ +H aw +Ġstream line +åĨ Ĺ +_n one +ï¼Įä»ĸ 便 +åľ¨ åħ¨çIJĥ +çļĦ æ³ķå¾ĭ +ian i +Ġus ability +åŁİ å¤ĸ +Ġartic ulate +Ġdecl ares +Ð ļ +ï¼Įå¹¶ æł¹æį® +}, { +.start swith +Ġrenov ated +prob ably +get Id +(s ql +ĠDavid son +æİĴ 污 +竣çĦ¶ æĺ¯ +éªij 马 +äºī æī§ +ĠI PO +ĠBoot s +n othing +av ar +({ ' +çĸ² å̦ +ä¿® åīª +æĪĺäºī çļĦ +ĠJud a +Ġpersu ade +Ġ çĶļèĩ³ +说 åIJ§ +Ġâ Ŀ +å¤ļæł· åĮĸ +Ġ Ú© +梦 è§ģ +oft en +Ġs lo +ï¼Į ç½ij绾 +éĴ µ +(t op +åΰ 她 +ï¼Į ä¸ģ +æĢ ħ +æIJ Ģ +ĠP AT +UR S +ï¼Į 空æ°Ķ +riv ation +ĠF rid +d ataset +| x +Ġintegr als +ĠD rain +è° Ľ +为 èĩªå·±çļĦ +-e ast +ãĢģ è§Ĩé¢ij +Ġpou ltry +èĢģ çļĦ +Ġaud itor +ĠPer forming +æĻļ æĻ´ +et ting +Ġsing ers +Sp ark +æĮ£ èĦ± +Ġins ider +åIJī ä»ĸ +è¦ģ ä¸į +Ġstring ent +宣 ç§° +Ġuniform s +åįģäºĮ æĿ¡ +ãĥ ĸ +.as px +Ġmanifest ation +A sc +_ protocol +Ġab ras +è¿Ļ ä¸Ģ个 +J oy +at ibility +Ġrefere e +带 宽 +at ri +MP a +Ġpup il +åı¤ ä»Ĭ +éĢĥ èµ° +; ) +Ġso bre +ix ture +ä¸Ģä¸ĭ åIJ§ +t al +ï¼ģ 她 +æŃ» æŃ» +ĠPed ro +ç¬ º +Ġ ä½Ļ +æĢ» åħ± +ĠRest ore +Ġp added +( Color +ï¼Įä¸į å¿ħ +Class ic +大æ¦Ĥ æĺ¯ +éĻª æĪij +ãĢģ åѦçĶŁ +Ġest rogen +ä»ĸçļĦ æīĭ +ĠIdent ifier +ĠL anguages +ĠFor ums +ĠProv idence +F UNCTION +ç¬ij èµ·æĿ¥ +ĠM ET +åľ¨ ä¸ĬéĿ¢ +å®ŀåĬĽ çļĦ +_ inc +é±¼ ç±» +Ġmicro scopic +ĠRef uge +.B ody +Ġpsych ic +æĹ¶éĹ´ éĩĮ +Ġdefault Value +å®ı ä¼Ł +Ġelev ate +ï¼Į 足以 +äºĶ ä¸Ģ +åħ¬ 社 +ä¼ĺéĢī 为 +Ġthank ed +æĥħåĨµ å¦Ĥä¸ĭ +æ¾Ħ æ¸ħ +ĠL IVE +表çݰ å¾Ĺ +åľ¨ æľªæĿ¥ +D OT +if rame +iat rics +ï¼Ľ åħ¶ä¸Ń +ĠCol leges +Ġtransl ator +ĠK aw +åĪĩ åīĬ +åĢŁ æŃ¤ +åĬ© æİ¨ +æīĵçł´ äºĨ ++ j +çŃīå¾ħ çĿĢ +impl ies +çļĦä¸Ģ åIJį +Ġmiss es +ĠP ARAM +Ġcor ps +Content Type +/ net +éĩİ èĽ® +èĮ¶ çļĦ +ï¼Į éĢŁåº¦ +E LL +ĠBi ological +åħ¬åijĬ ç¼ĸåı· +Ġsil ently +饰 åĵģ +ï¼Į说 æĺ¯ +ä¸įåIJĮ çļĦæĺ¯ +Ġm ates +Ġme ats +ĠRes idence +æĹı éķ¿ +Ins pector +ĠA TT +Ġe Book +ter ing +ĠSp an +éĽħ æĢĿ +C MS +ĠW ii +ge ar +åİ¿ çļĦ + ¡ +Ġsy nerg +æľŁéĹ´ çļĦ +çļĦ人 äºĨ +æł¸ 对 +ĠP iano +åĴĮ éĺ¿ +é¢Ħ æľŁçļĦ +Ġadoles cent +B oost +èĤ¡æĿĥ 转让 +/ my +åŁºéĩij èµĦ产 +åIJİ æĿ¥çļĦ +QUE UE +J J +m oney +our ing +Ġte lev +Ġgodd ess +ï¼Į éĹ» +ig gers +ï¼ĮåĽłä¸º æĪij +ĠGuid ance +- ro +æ·® åįĹ +_P LL +éļĶ æĸŃ +ç¥ŀ ä¹ĭ +次 æĹ¥ +Ġst riving +å¿ IJ +ä»ħ ä¾ĽåıĤèĢĥ +å®īè£ħ æĿ¿ +l é +缸åħ³ è´Łè´£äºº +Ġwild ly +èµ· èįī +ĠB ri +Ġke yp +å¿ ij +qu iet +追 èµ¶ +è¿IJè¡Į çļĦ +è¿Ļ å°ıåŃIJ +Ġ 书 +åĴĮ åĪĨæŀIJ +æĶ¶ è§Ĩ +ĠMethod ist +_load er +ï¼Įä»ĸ 对 +ri ott +Ġal uminium +ment al +-th ree +å·¥ä½ľ ç»ıéªĮ +\ centering +Ġremind ers +.sub str +Ġmerc ury +Ġeigen value +æ¯Ķ 对 +ï¼Į å®ŀè¡Į +ĠG U +ĠCou pons +[ B +Ġcann abin +ç»Ŀ 大éĥ¨åĪĨ +T W +e ffect +ëĭĪ ëĭ¤ +ipp y +{ j +ä½ĵ ä¸Ĭ +ĠSch midt +åĪ ¨ +is able +è¿ĻäºĽ å¹´ +u ador +TE CT +-P CR +.Ex ecute +Ġapprent ices +ä¸Ń åĬłåħ¥ +sign ature +LEG AL +å·¥ç¨ĭ é¡¹çĽ® +ick le +ĠAr ms +Ġes presso +èĩª åªĴä½ĵ +ä¼ĺ ç¾İçļĦ +.ex ception +_EX P +d rive +log en +_set ting +æĸŃ å¼Ģ +Ġclass mates +åħĥ çĴĭ +inger print +åħ¼ ä»» +ĠBlog ger +Ġapp ellee +Ġfun gi +Ġer u +åľ£ ç»ı +é¦ĸåħĪ è¦ģ +ic om +ãĢģ 个人 +g id +缼 å®´ +ĠH ole +ï¼Į å͝æľī +éĥ½ çĿ£ +.dat abind +ĠG ab +- analytics +çŁ¥ ä¹ĭ +æĶ¶ èµ· +Ġap ology +设计 ä¸İ +Ġju ices +Ġweak ly +ynth ia +å¹½ å¹½ +åŃ©åŃIJ åľ¨ +ä»ĸ å¦Ī +_SE L +Ġritual s +ä¼ļ å¼ķèµ· +Ġam mun +ĠM agnetic +交 æİ¥ +CH ED +ï¼Į å¯Ĵ +ï¼Įä¸į åı¯èĥ½ +æ® ĩ +çĶ· 主 +éĢĢ äºĨ +Ġepid em +Ġ 缴åΰ +çļĦä¸Ģ çīĩ +Ġmaster piece +ï¼Į S +æ³ķ åŃIJ +çķ¥ æĺ¾ +èĭ¦ èĭ¦ +.T YPE +Ġlett uce +ĠIN ST +Ġvent ricular +强 壮 +.g en +Ġsl ug +ç»ıèIJ¥ çļĦ +ĠD G +ä¸ĭ è°ĥ +_ext ra +ãĢģ åĩº +Ġmetast asis +ĠL aptop +Request ed +æķĪæŀľ 好 +ĠM ilton +ĠWinds or +/ new +int ech +çĽ¸å¯¹ åºĶçļĦ +æĥħåĨµ è¿Ľè¡Į +f usion +em ap +åĸ· åĺ´ +Ġaug ment +å°ı å¼Ł +ä»ĸ åİ» +é£İéĻ© 管çIJĨ +éĻIJåζ æĢ§ +æ¸Ķ ä¸ļ +ï¼Į åĽ´ç»ķ +ï¼Į åij½ +Ġa a +æİ ° +an an +åıª è§ģ +æķĪ èĥ½ +åħħ å̼ +AS P +æĪij们 ä¼ļ +Ġs ect +Ġconf use +æĬĦ è¢Ń +ãĢģ åĨį +(f ilter +.P I +ç͍æĪ· ä½ĵéªĮ +æĥ³ 说 +Ġfib rosis +éĵ µ +Ġquant ified +Ġr if +é£İ ä¸Ń +Ġtemp o +ä¸Ĭ åı¸ +ε ί +ï¼Įçľĭ åIJij +pp ling +å±ķ å¼ĢäºĨ +åıĪ å¦Ĥä½ķ +Target s +é¢Ī æ¤İ +ï¼Į åĩłä¸ª +没æľī ä¸Ģ个 +ĠAppoint ment +ĠMac Book +èĢħ 们 +EL Y +å¾Ī éķ¿æĹ¶éĹ´ +æĬķèµĦ èĢħçļĦ +ä¹Ł åIJĮæł· +èįī 丼 +° Ãij +ĠPak istani +åĪĨéħį åĪ©æ¶¦ +æĦĪ åIJĪ +éͦæłĩ èµĽ +åĽ½ ä¹ĭ +好 åIJİ +urn ame +äºĨä¸Ģ å¥Ĺ +夹 æĿĤ +I U +AR B +R H +åĪĩ åħ¥ +ï¼Įä½Ĩ å¦Ĥæŀľ +Ġflo ods +.p anel +ĠWes ley +ĠK enny +å¸ĮæľĽ èĥ½ +å¸Ĥåľº ç«ŀäºī +d as +iv ative +ĠÏ ķ +æĶ¾ åĩº +ĠMy th +par alle +ĠSym fony +åıĺ 身 +Ġåıª è§ģ +ä»İä¸ļ 人åijĺ +ĠZ el +ĠM ETHOD +ES CO +( Base +Ġel ucid +ĠBever ly +ä¸įæĸŃ åıijå±ķ +in fect +Ġflav ours +ĠCon verter +äºĮåįģ ä¸Ģ +ĠPack aging +ht ub +ĠCh ancellor +å½ĵ å®¶ +ĠSp aces +at y +ograp hers +Ñ į +èĮ § +ĠG ust +yst ick +Ġz oning +.P ointer +åºĬ 头 +IZ ATION +cre am +å¿į èĢIJ +, ä¿ĿæĮģ +C ube +C e +Month ly +åıĹ éĻIJ +æľ¬ å¸Ĥ +s leep +_p b +_en um +-p ub +ï¼Įä»ĸ çŁ¥éģĵ +ä¸ĸçķĮ 大æĪĺ +ï¼Į æijĨ +æµ· 绵 +^ p +åĬł æĮģ +: ãĢĬ +è¿ĩ åħ³ +Sp anish +éĹª çݰ +I OD +Ġ ia +çļĦ è§ĤçĤ¹ +}} _{\ +å¿§ éĥģ +çĤ ¯ +Ġsaf est +Ġqu oting +OR IES +ĠGe V +_WA IT +Ġradical s +c ad +åįģ æĿ¡ +Ġearn s +Ġset Timeout +n aires +çļĦ两 端 +- u +ĠM aid +éĴĪ对 æĢ§ +å½ĵ å½ĵ +é»ij æ´ŀ +-w ritten +ew ood +ĠI stanbul +ä½İ ä½į +åºŁ å¢Ł +ĠSh ane +åĴĮ èĢģ +_D AT +æ°§ åŁº +ĠTim ber +An na +-des igned +Ġcut ter +Ġple thora +}/ ${ +é£ŀè¡Į åijĺ +åħĦ 妹 +çĽĪ çĽĪ +ĠHawai ian +ï¼ĮèĢĮ è¿Ļ +çļĦä¸Ģ 段 +all ic +éķ¿ å¤§çļĦ +ĠE ston +ill ar +/b uild +ĠRel ax +Ġeffort lessly +K rist +Ġst are +ĠBulgar ia +à ĺ +Ġk ar +ĠA ES +ä¸įä¼ļ 被 +.ex pect +缸 è¿ŀæİ¥ +ç§»åĬ¨ äºĴèģĶç½ij +Ġmod ulus +ocomp lete +Ġmarket ed +建ç«ĭ åľ¨ +Ġnic otine +æĻĥ åĬ¨ +Mix in +ĠC ater +Project ion +-g overnment +å¯Ŀ 室 +Ïİ Î½ +åij ¦ +ĠRes earchers +ony ms +ä¼Ĭæĸ¯ åħ° +ĠRel ative +ĠVeter inary +åĪĩ 齿 +_PR INT +ï¼Į è¿ĩåİ» +çĬ ģ +_in f +Ġhyp ers +Ġgl or +æĺİ ä»£ +Ġthreat ens +Ġins pected +.st arts +-d rop +ï¼Įæĺ¯ åľ¨ +Ġforeign ers +Ġlook out +åIJİ èĥĮ +çļĦäºĭ 项 +Ġske letal +ä¸Ģèά æĺ¯ +s ic +à º +Ġr inse +çļĦé«ĺ 级 +åħ¨ æĹ¥åζ +åħ³ ä¸Ĭ +Ġ# : +J ew +åĬŁ åĬĽ +é¹ Ĭ +het ically +K a +å¸Ĥ 人æ°ijæĶ¿åºľ +æķij çģ¾ +Ġam using +Ġexperiment ing +ĠBern ie +åĤĢ åĦ¡ +å¿ħé¡» æĺ¯ +ãĢģ éŁ³ä¹IJ +Ġburd ens +çŁŃæľŁ èŀįèµĦ +æ²§ æ¡ij +(d ec +. ly +organ ized +ut i +ee ks +太 ä¹ħ +man ia +it ched +[ u +ĠL enn +Ġguitar ist +æ±ī 书 +L uke +Ġshowc ases +.or acle +å°± åı¯ +è¿ĺ å¾Ī +çľĭåΰ ä»ĸ +æ´ŀ å¯Ł +ä½İ çĿĢ头 +ĠDev on +[ String +ï¼Į çIJĨ +åī ĥ +Re ach +Ġopp ression +è´Ńä¹° çļĦ +Ġ åĪĿ +Ġseek ers +Mir ror +ar ie +ä¸Ĭ ä¸ĩ +æĤ¨ 好 +WH O +Ġdifferent iated +æĦŁ è§¦ +Ġsc ams +ĠNor folk +åľ¨ æīĭ +è§Ħ ç¨ĭ +éĻĪ åĪĹ +h are +Ġampl ified +f urt +s al +è°Ī æģĭçα +Ġ 两个 +è¸ Ŀ +izont ally +å±ħ 室 +åıªæĺ¯ 个 +åĵ § +ç«Ļ éķ¿ +åĨ² 天 +Sh opping +åī§ çĥĪçļĦ +ĠDent istry +ĠF amous +. email +äºĨ å¾Īä¹ħ +ĠTrans mission +_b efore +ĠEnh ance +hard t +---------------------------------------------------------------- ---------------- +Ġmethyl ation +arm ac +ĠA th +çļĦå¿ĥ ä¸Ń +ï¼Įè¿ĺ åľ¨ +t oggle +Ġderiv ation +ï¼ļ \" +vol atile +ãĢĤ çĽ¸ä¿¡ +ĠS ymptoms +çĿ¡ ä¸įçĿĢ +ï¼Į羣 çļĦæĺ¯ +w ow +èĬ± èĬ± +éĶĻ è§ī +ĠCas ual +ch urch +ãĢģ åħ« +åįķ è°ĥ +æĬ¥ éĢģ +ore ms +( header +åĵģ ä½į +f h +Ġp ods +ãĢĤ 两人 +è¯ģæĺİ äºĨ +æľĢ å¿«çļĦ +å°ı 游æĪı +ĠCh ance +_A F +æ»ij 轨 +ĠConvers ely +er ring +éĥ¨ å°ļ书 +Ġs ess +ot ent +æĦ§ æĺ¯ +d ream +éħĴ çļĦ +" çŃī +èŀ ĥ +Ġ è¯ģåΏ +ï¼Įä¸Ģ æīĭ +æĢİä¹Īæł· äºĨ +Ġc aching +Ġbond ed +åIJ¸è¡Ģ 鬼 +Ġpro j +å¦Ĥ å®ŀ +Rep ublic +ü n +è¿ĻéĩĮ æĺ¯ +温度 为 +åļ ĵ +ï¼Įä»ĸ们 åľ¨ +- less +å®ī ä¿Ŀ +éĩİçĶŁ åĬ¨çī© +S old +æĻ® æ´± +ï¼Į æ´»åĬ¨ +Ġpl a +;\ ;\ +_D BG +Sent ence +åķĨ åľĪ +g ia +Ġce ased +g uid +ch oice +M is +l k +产çĶŁçļĦ çݰéĩijæµģéĩı +梦 éĩĮ +Ġ ä¼Ĭ +å¤ļ æł·çļĦ +Ġ æŃ¤åĪ» +Ġjer seys +ĠInsp ired +éĢĴ 交 +ä¼Ļ 计 +Ġpref ers +ĠDiv orce +_d raw +æľī åIJį +å̾ åŁİ +ï¼Įä¸įçͱ å¾Ĺ +Ġ& $ +Ġsw ings +ĠVlad imir +ĠL ONG +/ , +ï¼Į 寻æī¾ +-the med +ch ini +B lood +ĠW O +ĠHor ror +è¢ĸ åŃIJ +Ġirr ational +Ġtouchdown s +ĠD ock +å¿ĥ è¡Ģ管 +æīĺ å°¼ +å¥ĩ çī¹ +ch ars +åħ¬åı¸ åĢºåΏ +_C NT +ĠP ACK +ats by +Ġbes poke +å¦Ĥ æĺ¯ +é¡¿äºĨ é¡¿ +ï¼Įåΰ å¤Ħ +ĠDaw son +纪å½ķ çīĩ +ä¸Ń åıijçݰ +st arter +æŀ Ń +An c +åħī è°± +Ġcultiv ated +Ġelect roph +W rong +Ġm ans +rac use +inv oke +ä¸Ń 书 +çļĦ åıį +çĮľ æĥ³ +Ġdivid es +/b log +ĠC ove +Ġk w +- js +æĶ ¥ +åĶIJ æľĿ +ï¼Į æķ¢ +çŃī 人çļĦ +BO OST +ç¾İ åĨĽ +çĶŁäº§ åİĤå®¶ +ethyl ene +- II +m ere +çļĦ åį±éĻ© +/ view +èµ· é£ŀ +ie red +åį³ ä¸º +C rypt +re b +ex clude +- standard +åĴĮ 建议 +èĢĮ æŃ» +_f inal +ĠBrit t +ä¸įçͱ èĩªä¸» +æĸ¹åIJij ä¸Ĭ +arm an +åĩĮ ä»Ļ +. author +ãĢģ åĨľ +Ġadvent urous +ä¸Ģ è·³ +Ġsp aced +ĠVari ables +åĵ¼ åĵ¼ +Ġted ious +ĠG CC +in z +_d ialog +c ategories +å¿ħ å°Ĩ +æ¯Ķè¾ĥ 好çļĦ +è¢ĭ åŃIJ +Ġin set +浪 æ½® +æĢ» 计 +_LO C +éľĢè¦ģ注æĦı çļĦæĺ¯ +? id +ĠSY STEM +_f etch +ॠĭ +Creat ure +ex change +âĢľ 人 +é¢Ĩ导 ä¸ĭ +å¼ł èĦ¸ +.F ore +ï¼Į ç²¾ç¥ŀ +Ġdent istry +çļĦ è´¹ç͍ +Ġamazing ly +Ġprefer able +æľī ä½ł +ru ff +ä¹ĭ äºĮ +ĠWork place +\ delta +ĠD OWN +ĠChrist ina +Ġ ç±» +gr an +I AS +Ġabst raction +ĠP ermanent +( This + ¥ +课 åIJİ +æł¹æľ¬ ä¸Ĭ +åħ±åIJĮ åĬªåĬĽ +ĠS oci +åĽ½ éģĵ +Ġh ing +uch i +Ġp als +éĿĴ éĿĴ +交 éĻħ +Ġcr ashing +Ġд а +char set +ç¡ķ士 çłĶç©¶çĶŁ +) -\ +_ rs +(' ', +æĩĤ çļĦ +Ġin comes +_G ENER +ĠDemon str +çĿĢ åij¢ +Ġ æĬĢæľ¯ +å»ī æ´ģ +çĸĻ çĺ© +ĠU CLA +tr ust +ä¸Ģ ç¢Ĺ +ï¼Įä¸Ģ åıª +Health y +Ġle asing +ĠCh and +Ġbl inds +ĠChrist ie +ĠSal em +æĪij们 对 +å¿IJ å¿ij +æŃ£ æľĪ +Ġinform ing +av ian +èĢĮ å¾Ĺ +æ³ķ åĬĽ +èĨľ çļĦ +f req +ä¸Ń éĺŁ +Ġmiss iles +Ġ ä¹Ķ +èĭ± ä¿Ĭ +ĠT ucker +ĠRe habilitation +æ±² åıĸ +.f r +Ġacid ic +, åĸľæ¬¢ +æľī 为 +_m ut +æł¹æľ¬ 就没æľī +Ġhold ings +ãĢĤ çα +Âģ Ãij +ä¸į èµ° +Ġ` [ +nc ia +æĿĢ ä¼¤ +_RE M +æľī 空 +Connect ions +Ġbroker age +å¯Łè§ī åΰ +AC TER +EM A +m ph +é«ĺ è´µ +å½ Ŀ +é»ij å¤ľ +Ġ~ = +Ġpurch aser +æĹ¶ æķĪ +amb o +ãĢģ åĮĸå·¥ +ĠSem inar +Ġcons erve +Ġc ites +č č +æĿ¥ åIJ§ +link ed +Go al +an ed +Ġra ins +ãĢģé«ĺ级 管çIJĨ人åijĺ +ä¸Ģ è¯ķ +æľĢ åħĪ +ĠAng le +Ġlock smith +ib es +Ġd end +_t okens +åı¯ä»¥ çļĦ +Ġinsert ing +_UN S + ĩ +è¾ Ĺ +ĠP OP +ann o +_ ASS +ĠF old +è¶ĭ äºİ +èĪªç©º åħ¬åı¸ +ĠJ i +ì ĸ´ +Ġob solete +Middle ware +it ian +ĠIn fect +\ $ +ĠG ore +åĽĽ æµ· +èĴĭä»ĭ çŁ³ +r arily +çħ§ æł· +åħļ æĶ¿ +ãģ ° +ĠClass ical +Ġd od +_G L +ros se +Comp act +Ġrev ival +SE E +% " +Ġsh arks +æ°ij èIJ¥ä¼ģä¸ļ +erg ic +ch annels +æĥ³ 念 +Ġre juven +AN I +ï¼Į她 ä¹Ł +è¾Ľ è¾£ +ĠGreen e +ĠRab bit +æķĻ èģĮå·¥ +ç²¾èĩ´ çļĦ +ĠC ork +Ġgr inder +ĠS ens +Ġatt enu +æłĩ æĿĨ +Ġammun ition +ãĢĤ çłĶç©¶ +Ġup hold +æł¹æľ¬ å°±ä¸į +åĴĮ å¤ĸ +AT S +Ġdet erg +loc ations +ĠEv angel +æĪĺæĸĹ æľº +ĠExhib it +åŀĥåľ¾ æ¡¶ +大 ç±³ +ç½ij绾 çļĦ +åħĦå¼Ł 们 +Ġsour cing +. Entry +æĶ¶ åī² +Inter ior +och a +ais er +大 åIJĥ +ï¼Į 表éĿ¢ +ĠW er +å¿« æŃ¥ +Ġb ien +ĠR L +quart ered +Ġ è¡Į +è¡° éĢĢ +åį¡ æĸ¯ +ĠÏĥ Ïħ +å¤ľ èī² +R oyal +ĠStr ange +ĠH oll +Ġtr am +æľĢ 强çļĦ +ĠAlb any +ĠMain tain +åĨħ è¡£ +Ġ æĸĩä»¶ +av ailability +ï¼ĮæīĢ以 æīį +Ġadul thood +ï¼ļ 以 +Ġb box +æ·· æ·Ĩ +(s ervice +äºļ 太 +ĠS MB +ĠEx planation +ä¸į åħ¨ +_h istory +ï¼Įçݰ ä»» +.set Attribute +W ed +人 以 +ãĢĤ é¡¹çĽ® +ä¸Ń å¼ı +ĠM itt +æĿij éķ¿ +æ±ī åł¡ +åĽĽ çϾ +-p ol +Ġbat ting +Ġdes p +.get Attribute +Ġsol ves +] {\ +ï¼Į æ·¡æ·¡ +_AR CH +_S D +\ cos +ĠB G +-p anel +Ġc ider +ĠC oul +ä¸Ŀ毫 没æľī +T ot +am us +ä½İ è¿· +认为 èĩªå·± +éĢĥ çĶŁ +. eval +åį¡ çļĦ +, å®ĥ们 +Ġke eper +ĠLect ure +çī¹ éķ¿ +ĠCh ambers +ĠChen nai +$ string +E le +å´ ½ +åķĨåĵģ çļĦ +< link +Test imonials +大 åIJĮ +å·® éĶĻ +太 å¿« +yl ation +-c ap +éĿŀ常 大 +Last ly +éĸ ĵ +_D IV +g ang +i ago +Ġcomfort ing +g ap +Ġconsult ations +æĹłå½¢ èµĦ产 +J ean +ãĢĤæľ¬ æĿ¥ +æĪij们 å°Ĩ +ĠMal colm +å©´ å¹¼åĦ¿ +ĠH undred +th ings +ä½ķ 以 +æĹł çŁ¥ +ĠFlo oring +åĵĪ åĪ© +Ġconce al +il m +ĠK ai +è¿ŀ è¡£è£Ļ +ï¼Į æĭŁ +çĮ Ŀ +èŀĥ èŁ¹ +und a +éĺ´ éģĵ +duc ers +.w here +optim izer +am ics +æĸ° èĤ¡ +Ġ éĥ½ +Ġc ue +马 åĬĽ +Ġbudd ies +Ġhe aled +b ash +ĠIsa iah +ĠAdvis ors +.amazon aws +æ°Ķ åİĭ +ç¹ģ çIJIJ +éĢļ讯 åijĺ +Con clusions +App rox +per fect +-m illion +, è§īå¾Ĺ +ĠMar ion +èĬĴ æŀľ +ä¼ļ æĪIJ为 +è·Ł éŀĭ +Ġ è®°èĢħ +Ġf ir +说 å®ŀè¯Ŀ +Ġapp la +æł¼ 鼷 +ĠP WM +< char +L ed +Un iversal +è¿Ļ次 çļĦ +ç»ħ 士 +ĠJ ung +æľĪ åĩºçĶŁ +è¡¥ æ°´ +ĠRail road +ĠHospit ality +orect al +Ġpres criptions +æĴĴ å¨ĩ +.S ql +Ġrighteous ness +Ġw orms +_s m +arn ation +ĠIn struments +ä¸ĵä¸ļ åĮĸ +. rs +Ġbind ings +ï¼Į åĨľæ°ij +Ġcl ot +S aved +Ġvis ions +ï¼Į åı¯èĥ½æĺ¯ +h ci +建 çļĦ +ï¼Įä½Ĩ çͱäºİ +b ows +l ift +R ussian +RO S +. expand +.p atch +d yn +Ġ æ¯Ķ +etic a +ĠG n +çݰå®ŀ ä¸Ń +- Col +èĢģ 人çļĦ +ï¼Į çİ© +su ite +ĠO VER +ĠGold man +ĠInf inity +ç´§å¼ł çļĦ +method s +_SY STEM +C MD += ] +æĿ¥ è¿ĩ +Ġpat ented +Ġamb ulance +Ġarter ies +Ġpit ches +ĠCh ronic +çϽ åıij +å¹³ å¹³ +Ġhero in +Ġfauc et +ï¼Į æĥŁ +çļĦ 形象 +³³ ³³³ +Ġcamp uses +Ġfl ats +å·´ 士 +Click ed +gin x +ĠAng ular +åĽ¢ åĽ¢ +Ġinvestig ative +大 æ£ļ +-f unded +- * +ĠSub st +ãĢģ ä¸ĩ +èĬ± 纹 +èģĮ æĿĥ +çĶĺ èįī +Ġt ucked +ct ype +Ġconv olution +ĠL INE +å¼ł åĬĽ +åĪ© 好 +Ġfl are +_g pu +if ax +éĤ£ä¹Ī 好 +Ġo val +ff i +_ State +Ġpro se +Ġal g +Ġt ester +åħ¨ 天 +宫 çļĦ +) x +çļĦä¸Ģ ç³»åĪĹ +Ġneur on +èᝠçļĦ +Ġcasc ade +Ġam ph +Ġtext ual +ĠW ARNING +Ġhistor ians +I ce +P ray +ivid ed +Ġop code +>\< ^ +IR S +æ¤ħ ä¸Ĭ +à » +ĠCommun ist +az ar +åĴ İ +ac ic +Ġannoy ed +Acad emic +å¦ © +ĠF en +Ġav ocado +_M ON +_C OL +Ġw ig +-go ing +ĠSlo ven +Ġovere xp +å¹´è½» 人çļĦ +Ġ ä½ľ +H ero +ĠScre ening +ï¼ĮæĪĸèĢħ 说 +Ġtensor flow +ãĢģ 社åĮº +Ġpred ators +Ġcomp osing +æĢ¥ éľĢ +(t ree +çĿĢ æĥ³ +ĠPl umbing +ĠRead s +ï¼Į çł´ +Ġf ec +æľĪ çIJĥ +éĺ² æ»ij +åĽŀ æĥ³ +() $ +Em ily +Ġâĺ ħ +Ġemerg es +çĮİ äºº +çŁŃ è·¯ +éĩĮ äºļ +èĦ± åıij +课 å¤ĸ +Ġscar f +T own +天 æĻļä¸Ĭ +Ġenthusi ast +Âł ä¸įè¿ĩ +rem ely +ä½ł æīĢ +ĠY o +Us ually +ãĢģ 广å·ŀ +å°ı å®¶ä¼Ļ +ãĢģ 建设 +主 å¹² +Up coming +Ġsk illet +ï¼ģ åľ¨ +-e conomic +Camp aign +Ġfor Key +ï¼ĮæĪij åıĪ +äºĶ 人 +æĬijéĥģ çĹĩ +åĩºçİ°åľ¨ äºĨ +åĴĮ åºĶç͍ +n od +ĠP rix +æ´ŀ ç©´ +äºİä¸Ģ ä½ĵçļĦ +ï¼Ľ äºĮ +åĽ´ çĿĢ +æ³¢ çī¹ +ĠLeg ion +ĠG erald +æīĵ åħ¥ +ï¼Įè¿Ļæł· æīįèĥ½ +/ **************************************************************** +-p iece +Ġì ķ +èľľ èľĤ +å°Ħ éŨ +èµ° åĩºäºĨ +ä h +a verage +å¸Ń åį· +ĠG inger +Health care +- road +Ġaccus ations +æĬī æĭ© +Ġembry os +.log ging +/ content +( resp +ï¼Į åı¦ä¸Ģ个 +Ġdivis ible +ï¼Į çĶ» +ĠD um +æĶ¯æĴij æŀ¶ +ï¼Įå°¤åħ¶ æĺ¯åľ¨ +å®ŀ åIJį +è°ĥ éħį +Ġr att +å¡ij æĢ§ +礼 æľį +Ġrehe ars +y c +ãĢģ æīĢè¿° +Ġattribut able +æĻļ å¹´ +Ġlie u +ĠM ats +Ġd if +olph in +ĠBl ind +çĽijçĿ£ç®¡çIJĨ å±Ģ +æīĢ æĮģ +ç¨Ģ åľŁ +K L +æŃ£ èĥ½éĩı +ib i +ĠRub ber +ï¼Ī ä¸Ĭ +Ġcheck sum +Ġperf ume +Mutable Array +ĠSales force +éĴĪ对 æĢ§çļĦ +i ast +Ġtrunc ated +åĬ¨ èį¡ +ãĢĤåľ¨ ä¸ĢäºĽ +Ġstory line +æĮģèĤ¡ æ¯Ķä¾ĭ +Ġst agn +ĠH tml +Ġcard io +el ic +ĠL af +大 æłij +_de cl +äºĨ çīĩåĪ» +// ================================================================ +ĠRespons es +b org +åĪĴ è¿ĩ +Ġroof s +å°ı 鼨 +.s uccess +Ġstraw berries +ng inx +Cross Ref +çĥŃ å¿ĥ +(p red +Ġwar riors +车 éĺŁ +\no indent +ï¼Į她 æĺ¯ +äºĨ 声 +Sus an +-ex ternal +å¾Ĺ åĥı +ĠSV G +ĠMan ning +åī¯ æł¡éķ¿ +Ġcat heter +Ġceremon ies +Res ume +åIJ¸ å°ĺ +ĠN PR +åĮ»çĸĹ æľįåĬ¡ +ev ol +nam ents +phant om +J M +Ġsulf ur +æ°Ķ æĦ¤ +Ġcort ical +f act +çŀ§ çŀ§ +ï¼Į å½»åºķ +_CON F +Spec ification +ĠWar rior +åįģ å¤ļ +æĭĽ æīĭ +Ġ- * +ĠCon cent +ĠID C +æĦī å¿«çļĦ +ativ istic +- MS +Å £ +ç²ī èī² +Ġborrow er +, 使ç͍ +Ġleft over +or on +å°± è§īå¾Ĺ +ï¼ĮæĢ» ä¹ĭ +ĠTax es +ep loy +å®Į ä¹ĭåIJİ +ĠElectric ity +. trim +Ġsh outing +èĢħ æĺ¯ +âĹ İ +ï¼Į æĹ©å°± +交éĢļ å·¥åħ· +æĪIJç«ĭ 以æĿ¥ +. St +ï¼ģ å¦Ĥæŀľ +Ġprev ail +é«ĺ åĪĨ +_s ig +æīĵ åΰ +S ense +ĠC BC +ĠMu eller +ä¸ĩ人 次 +Ġid i +D AT +æĿ¥ 形容 ++ m +ï¼Į R +ãĢģ ä¸ļåĬ¡ +è¯ij æĸĩ +âĢĶ I +Ġfill er +ĠBed rooms +æĶĢ åįĩ +Ġwater ing +åħµ çļĦ +ĠBar rett +Å Ħ +ĠC overed +æĺ¯ä»Ģä¹Ī åij¢ +unk y +gr p +é£ŀ ç¿Ķ +ãĢģ åıij +Ġ ips +oc ative +: id +æĦŁ å®ĺ +åŁĭ ä¼ı +erc ises +ãĢĤ æīĢè¿° +row ning +Ġcollabor ations +Cap ital +é¦Ļ æ°´ +æĽ´å¤ļ çļĦæĺ¯ +ç͵åŃIJ 产åĵģ +_P RI +æĺ¯ æľĢ好çļĦ +du al +Ġacknowled ging +æ°´å¹³ åĴĮ +ĠLit igation +K C +Ġind isp +oz yg +æĦı å¢ĥ +å½±åĵį åĬĽçļĦ +Time Millis +Ġ第ä¸ī 竳 +Ġst igma +ĠW ong +Ġle aking +åħ¬åħ± åį«çĶŁ +ï¼Į 管çIJĨ +-the me +èĭ ĩ +åł ´ +å³° å̼ +Ġbre wing +é¢ľ æĸĻ +áĥ ĺ +Ġcor p +- Year +åīį 线 +( Exception +éĵĥ 声 +åįłæį® äºĨ +ĠNort on +- arrow +Ġb inder +Ġch r +èIJ½ åľ¨äºĨ +åıĻ äºĭ +éϤ 以 +ĠS ev +Ġbeh old +ĠOdd s +et ched +.F atal +çĤ¹å¤´ éģĵ +Ġric her +get Type +& E +ĠProcess or +P IN +å°¾ éĥ¨ +Ġneutr ino +åijĬ è¾ŀ +W iki +Ġsubscrib ing +æľ¬æĿ¥ å°±æĺ¯ +Ed iting +ĠCh anged +/ : +ï¼Į 帮 +ĠG lor +_SH ORT +ĠE pid +æĪIJ äºĨä¸Ģ +ä¸ī çŃīå¥ĸ +ĠSm oke +éŁŃ èıľ +izz ard +æľīæķĪ æľŁ +ãĢĤæŃ£ å¦Ĥ +( con +Mod ify +ãĢģ æ·±åľ³ +å¨ģ å°¼æĸ¯ +å¸Ĥåľº ç»ıæµİ +pl ice +è¿ĺ ä¼ļæľī +ãĢģ 两 +å¾Ģ è¿Ķ +ĠSw imming +é»Ħ çĵľ +Ġpresum ed +Ġb ipolar +Ġv p +æİ¢ 头 +Ġfli pped +Keep ing +L aura +ä¾Ľ è´§ +åŁİ æ±ł +è¯ķ è¡Į +åĿı 人 +ĠSer bia +iju ana +IP v +t g +交 éĶĭ +ÑĤ а +D ES +T a +ĠWe ak +op hers +cal a +ro red +Ġin oc +ĠO E +å¹¶ è¡Į +che ss +_com ponent +L in +yp ed +Ġfore nsic +æ³¢ 浪 +ĠFr aser +çļĦ 空 +åıĮ èĩĤ +æŃ¦ åĬĽ +èĩª åı¤ +ä¿Ŀ å§Ĩ +cl oth +Ġdep letion +é¢Ĩ åĨĽ +åĨį çľĭ +Ġn ets +è´¦éĿ¢ ä½Ļé¢Ŀ +anc elled +ä¿ĿæĮģ ä¸Ģèĩ´ +Grid View +Ġaltern atively +çī¹ åľ° +缺 å¸Ń +åľ¨ é«ĺ +Ġbomb ing +ĠF ail +ĠAdd iction +again st +, æĭ¥æľī +ĠB JP +ä¸Ģ æĹ© +Ġsculpt ures +. Style +ãĢĤ æĸĩ +è¿Ļ 两ç§į +ĠVent ure +Ord ered +F arm +Ġcolour ful +lo oks +Present er +éŨ å°Ĩ +ç´§ è¿« +è´«åĽ° æĪ· +Ġparliament ary +H U +çģ¯ ç¬¼ +ĠInput Stream +ĠPodcast s +amar in +. plugin +æĶ¹ åĬ¨ +æĻºèĥ½ å®¶å±ħ +ï¼Ł ï¼Ī +ĠTalk ing +B oston +Ġsp ouses +ä¸ĵ 人 +ĠW rest +æĽ´ æ·± +Q P +T ogether +est ry +ãĢģ ç»ĵæŀĦ +éĿ¢ åĽ¢ +R ARY +ï¼Į å¤ĦçIJĨ +çļĦ åĬªåĬĽ +ly s +ä¸Ĭ æīĭ +å®¶ è£ħ +æµģ åŁŁ +ä¸Ń 没æľī +def s +ç»ĩ çī© +ĠSeg ment +Ġin version +b ishop +ĠV atican +Ġsn iff +Ġnarr ator +åľ¨ è¿Ļç§į +å°ij å¹´çļĦ +yn es +ĠT et +ĠSh arma +åħī 亮 +ç´¢ å°Ķ +ĠG entle +Whe never +æ´» ä¸ĭåİ» +_d ma +è¯Ń çļĦ +Ġt ails +ĠIn g +V ac +am ina +ĠDid n +çĶŁ åīį +accur acy +Pos itions +Ġen closure +ï¼Įçİ°åľ¨ çļĦ +Ġtheoret ically +.or igin +Ġm kdir +ĠSim pl +Ex ercise +ĠV inyl +Ex cept +èĤº çĻĮ +C AN +\[ [@ +Ġwarrant ed +åĸ· å°Ħ +书 åĮħ +Ġfl ashing +ä¹Ł ç½¢ +it lement +Ġillum ination +es ville +Ġbo ast +ĠDru pal +, U +åºĬ è¾¹ +.draw able +ï¼Į çļ®èĤ¤ +éĥ½æľī èĩªå·±çļĦ +Ġform ulate +urs ively +Ġacceler ating +R ON +è¿ĩ æĪij +Ġh ue +Inter rupt +occ us +æĺ¯ä¸Ģ 缴 +æ´» äºĨ +éĶħ éĩĮ +åıª è§īå¾Ĺ +åĽŀ åij³ +åĩı éľĩ +j ohn +Ġst aged +Ch ip +ny der +ay e +ठķ +. DEFAULT +Ġ è¢ģ +Ġv tk +-t rained +ãĢĤ èĩªå·± +ï¼ļ å½ĵ +_d bg +è¿Ľè¡Į æ²»çĸĹ +od ian +æĸ¹ é̏ +ĠP UBLIC +_sa ida +.f iles +Ġpet rol +ç³»åĪĹ äº§åĵģ +æģĭ æĥħ +ãĤ¹ ãĥĪ +m w +L ake +çŃ IJ +ï¼Įå°± åĥıæĺ¯ +M OS +Ġf aded +æĪij们 åİ» +Ġprof icient +ĠBook mark +ol on +CC C +ĠF ro +å¹´ 头 +Ġ æĺ¯åIJ¦ +o ise +æŀģ åĬĽ +Ġglo ve +ass er +Ġrun way +Ġd azz +ig ar +äºĨä¸Ģ æł· +ĠL ear +_ used +( pr +Ġ: , +大 è¡£ +æĪ¿ ç§Ł +Ġn ar +æł¹æį® èĩªå·±çļĦ +C IT +sh aw +Re commend +ç®Ģåįķ åľ° +çħ¤ æ°Ķ +ed ical +ĠJud gment +ĠNE VER +. Local +rit el +ãĢĤæį® äºĨè§£ +l atest +ot omy +.p arser +.T ypes +ĠP aw +é¢Ŀ çļĦ +ï¼Į æĺ¨å¤© +Ġx i +à ħ +è¾ĥ å¤ļçļĦ +Ð IJ +Ġconsult ancy +ï¼Į åħĭ +ç²ĺ ç»ĵ +Ġremark ed +åįģ åĩłå¹´ +åĶ § +ï¼Į æĥ³åΰ +ind ust +ä¸Ģèά åľ¨ +表达 äºĨ +ä¸ĩ 亿 +w yn +åIJĥ çĿĢ +Ġsp onge +ï¼ĮæĹł çĸij +For um +Show ing +ĠContract or +ãĢĤ åĮĹ +G U +ãĢĤ 以åīį +åºĶ å±Ĭ +script s +_ ap +John son +ĠCath olics +Ġnav al +agg ering +ãĢģ æİ§åζ +è¦ģ 对 +å¤ļ æĸ¹ +å®īåħ¨ åĴĮ +Ġgentle men +çļĦ éĵģ +Ġ§ § +å½Ĵ è¿ĺ +Ġqu as +纳 å¾· +åĮº æĶ¿åºľ +åIJĦ çľģ +af x +-b oard +æī¾ åĽŀ +Ġport folios +Lead ing +J ane +second ary +ãĢĤä¸Ģèά æĿ¥è¯´ +W ARD +Ġmoder ately +çĽij åIJ¬ +Ġshel ters +cl air +f at +åĴĮ è§£ +åĮħæĭ¬ 第ä¸Ģ +ĠT ort +大 ä¸ĵ +æ°ij 宿 +ï¼ĮæľĢ å¤ļ +æī ī +| _ +Ġspace craft +è¿· çħ³ +ä¸Ģèά æĿ¥è¯´ +urs ions +åĨĻ ä¸ĭ +ĠCh ick +Ġra ft +ox ia +çīĪæľ¬ çļĦ +(c fg +çĽijçĿ£ æ£ĢæŁ¥ +Ġsuperf icial +" [ +_R GB +d aughter +è¦ģ æĪij +ĠEs sex +Pro cedure +Ġexpl oded +éĥ½æľī çĿĢ +_column s +æģ¢å¤į æŃ£å¸¸ +å®¶éķ¿ ä»¬ +ast rous +严 å¯Ĩ +Ġd itch +ĠC PP +ly cer +é¤IJ åħ· +_ em +Throw able +L iber +R am +ĠH Q +åħ³ ç¾½ +ĠQ C +}) ^ +ä¸į å°± +ãĢģ åĽ¾ +Con nor +åħ³éĶ® çļĦ +æķĪæŀľ çļĦ +ĠK och +Ġha irc +(t rain +åIJ Ŀ +ĠV irt +thread s +èᣠ幏 +ĠDet ailed +ĠCut ting +çłĶç©¶ æĪIJæŀľ +Ġdownt ime +Ġwast ewater +Ġth inner +~ , +åŁİ 管 +C ases +åĬŁ æ³ķ +ç»ı常 æĢ§ +te acher +Ġann ex +èįī æľ¨ +Ġed u +èĢģ 天 +ĠPat rol +Or ange +Ġpres et +Ch rome +æľĢ大 åĮĸ +_al ias +æ¯Ķçī¹ å¸ģ +ãĤ ª +çļĦ çIJĨè§£ +form al +Ġadv ises +Ġcl ones +rel ax +Ġimplic ated +j pg +Ġconsult ed +åĶ ¬ +Ġur ges +_STAT IC +Ġd ass +æĹł èĢ» +è´µ éĩijå±ŀ +ĠB EGIN +ĠR SA +Ġs que +And y +Ġ åİŁæľ¬ +L ate +p ent +r ass +S anta +Ġmorph ological +ĠLL VM +ĠUnity Engine +Wait ing +ĠHung arian +Ġa uch +Ġexperiment ally +pat rick +忽çķ¥ äºĨ +èĪĴ çķħ +ic ism +Z E +alt ed +-- ) +Rest aur +ĠEli as +Ġse ize +åIJ¼ éģĵ +åı· 楼 +åΰ æīĭ +, 常 +ĠK eto +ï¼Įéĥ½ èĥ½ +ĠSal on +" ` +IV ITY +Ġassemb lies +Ġs ind +ãĢĤ æĪĸ +ĠMAT LAB +ĠAust rian +b ing +bol a +广 ç͵ +ĠIN PUT +ï¼Į èĮĥ +GLOB ALS +éĿ© æĸ° +ç´§ è¦ģ +伸缩 æĿĨ +Ġdepart ed +ON LY +C ro +Ġexhaust ion +Ġsen ator +ĠTer ra +umb ar +ĠI vy +æĹħ éĢĶ +Ġed ible +Ġinv itations +åľ°çIJĥ ä¸Ĭ +Ġrefund s +ĠWe ber +åħ¬åı¸ 竳ç¨ĭ +è¿Ļä¹Ī 好 +Custom ers +çļĦ æĢ§èĥ½ +çŁŃ 缺 +-z A +,è¿Ļ ä¹Łæĺ¯ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +ĠMin ute +Ġtrim med +ï¼Į çݯå¢ĥ +ĠF lickr +å¾Ĵ æŃ¥ +æĶ¶ æĶ¯ +_ angle +ĠMorm on +ãĢĤ è§ģ +In sets +Ġine fficient +_g pio +. EX +åħ¶ äºĭ +æĢ¥ ä¿ĥ +_A UTO +ï¼Į çĶ·äºº +ĠE tsy +OR N +Ġlegisl ators +带 éĺŁ +Ġal gae +Ph ysics +çŁŃæľŁèŀįèµĦ åΏ +ĠH ann +Ġmark up +Ġupload ing +åħ·æľī èī¯å¥½çļĦ +c ox +ä¼ĺ äºİ +ä¾µ åħ¥ +çļĦ æľĢåIJİ +yp se +.t ree +( settings +ĠH aus +åѦ éķ¿ +ä»ħ 代表 +oret ical +æŁľ åı° +r pm +Ġmat s +iat ry +- General +èᝠåºĹ +ãĢĤ æĹ¢ +Ġp aved +ä¹ĭ æľ¯ +_M AN +Ġco ords +Ġped ag +ĠR anger +z n +Ġo der +Ġsl am +çļĦ 对æīĭ +ĠPro posal +ĠMic he +é½IJ é½IJ +æıĴ æ§½ +ĠBudd h +ãĢģ ä¼ijéĹ² +驱åĬ¨ åύ +_z one +Ġp eren +æľª æĪIJ年人 +å¥ĭ æĪĺ +Ġtoug her +ç® į +Or Default +ĠTV s +Ġeconom ists +w arn +èĩŃ æ°§ +a ção +ä½ı æĪ· +èį£èªī ç§°åı· +Ġkilomet res +Ġ åı¸ +en h +ĠE rik +èįī åľ° +ĠD ependency +ex amples +åķĨ ç͍ +ime o +ï¼Į è¿IJç͍ +ä¸Ń å°Ĩ +( ... +Ġgre edy +.ar ange +åĪĨ æľŁ +Loc ator +ĠH uawei +åŃIJ åľ¨ +Ġco herence +_TIM ER +ĠCeleb ration +Ġpropos ing +OUT PUT +I EEE +人 æīĭ +å¿Ĺ è¿ľ +ç»ĵæŀľ çļĦ +åħ® åħ® +å̼å¾Ĺ 注æĦıçļĦæĺ¯ +" ... +æĹģ è§Ĥ +Ġb if +ãĢģ æĺİ +åħ¬åı¸ åĴĮ +æĮĩ å°ĸ +æ¯Ľ è¡£ +Ġbron ch +ç¯ ¡ +æĪIJæľ¬ ä½İ +bal anced +w rong +Ġtransform ative +伤 çĹķ +èĭ ŀ +åĩº éĶĻ +emb ourg +æĹł ä¸į +Ġais le +æ¨ Ł +åIJİ å¤© +- ext +m ins +ĠN ano +åľ¨ 第äºĮ +Ġad orn +m ala +èĦĤèĤª éħ¸ +Ġw ir +æİ¨ ä»ĭ +Ġdat as +Ġt attoos +ï¼Į è·³ +Ġprogress es +ĠG OD +çļĦ åĮºåŁŁ +Ġim minent +ĠTrack er +_F ULL +å»¶ åºĨ +ä¼ģä¸ļ åIJĪå¹¶ +Ġper severance +ĠChief s +\| _{ +èĦ¸ éĥ¨ +Ġill umin +çļĦ女 åŃ©åŃIJ +LOG Y +Ġfost ering +å¤ĸ åľ¨ +åįģ åĩłä¸ª +çģµ åĦ¿ +ĠIn gredients +ãĢĤæĪij åĽ½ +C her +å¿ĥçIJĨ åĴ¨è¯¢ +Ġaggreg ated +C HECK +pp ler +ä¸īåįģ åħŃ +S IM +ĠC ognitive +cre w +羣 æĥħ +** ]{} +åij¨ åħŃ +are th +ä¸Ń 空 +æĺŁ éĻħ +.T ask +/ etc +L ifestyle +åī¯ éĥ¨éķ¿ +ĠTor res +Ġm ansion +æ²³ è¾¹ +åIJĮ ä»ģ +Ġcomput ations +iqu ity +åĽ½å®¶ æłĩåĩĨ +èĨĢ èĥ± +è§£ æķ£ +éĺ² æ±Ľ +empt yset +æ»ĭ åħ» +ãĢģ ç½Ĺ +麻 çĹ¹ +/ con +æīĢ è¦ģ +UI Kit +太 ç¥ĸ +Ġconf inement +_se cret +c red +æ·± éĤĥ +NEW S +Ġ æľįåĬ¡ +Ġc ot +)init With +çŃĽ æŁ¥ +. property +罪 æģ¶ +Ġamer ica +Ġf oc +af a +ĠChap man +Ġl ays +主è¦ģ 为 +åĩºçݰ è¿ĩ +åıijçĶŁ çļĦäºĭæĥħ +æĸĹ å¿Ĺ +èµĭ èĥ½ +Ġattent ive +Reg arding +ĠM sg +Ġyield ing +ĠEd iting +ć ãĢģ +f unnels +çα æĪij +èĤ© ä¸Ĭ +ĠAR G +in ine +ä¸įä»ħ åı¯ä»¥ +åľ° çĽĺ +å¹³ åĿ¦ +Count s +çıĬ çijļ +æ£ķ èī² +è¶³å¤Ł äºĨ +oub t +-a fter +ï¼Į æķĻèĤ² +åħ¬ è¯ī +Ġloc om +. merge +æĪij们çļĦ çĶŁæ´» +Ġpay check +Ġachie vable +导 åĩº +ĠCong ratulations +t cl +åIJİ ä¼ļ +è¿Ļæĺ¯ ä»Ģä¹Ī +Ç İ +Ġt urtle +es ch +.r ange +çļĦ åij½ä»¤ +ãĢij : +(* ) +Ġ æľ¨ +Ġ æīĢæľī +Ġen chant +.st orage +éĽĩ ä½£ +ĠC andidate +æ¸ħ èĦĨ +åĽ¢éĺŁ æĪIJåijĺ +r ified +ãĢĤ åĮħæĭ¬ +çIJ ¥ +Ġmeaning less +Ġsav vy +-ch ain +çĨŁ çļĦ +. var +ç§° çļĦ +P ause +y ch +ĠBullet in +åī ģ +TOC OL +Ġthro ttle +å®ļ 为 +d or +Ġdisrupt ive +èIJ½ äºĨ +ä¸įèĥ½ 让 +b ins +ï¼Į åį« +ĠBath s +çĹĽ çļĦ +}} }$ +ĠBoard s +å°ı çģ« +éĹ®é¢ĺ ä¸Ĭ +æĨ§ æĨ¬ +表 åĵ¥ +Ġabdom en +ä¸Ģ缴 éĥ½ +çĵ· åύ +æĸĻ åΰ +Ġmat hematic +Ġt exas +ĠB old +æľĪ ä»» +ãĢĭ ï¼Ľ += r +Ġmod s +æľ¬èº« å°±æĺ¯ +åı¯è°ĵ æĺ¯ +åĨ· éħ· +ĠSan chez +D ub +Ġo ath +æµ· è¾¹ +ist les +AC L +N X +ï¼Į åĸľ +èϽçĦ¶ 没æľī +Ġjoy ful +ä¼ļ æīĢ +-c oll +æĢ» æĶ¯ +é²ľ æĺİçļĦ +ĠL PS +åĩıå̼ æįŁå¤± +å°ı ç»ĵ +ĠSub sequently +æį¢ 个 +_m d +Ġheight ened +ï¼Į æIJŃéħį +Rest ore +åĪĨ æµģ +ild a +åĵģ å¾· +Ġel ders +å¢ŀ é«ĺ +Ar thur +á t +d rag +大 殿 +Ġext rap +cher y +举è¡Į äºĨ +, æĽ¾ +çĭ¬ æľīçļĦ +Ġcommand ed +ãĢģ åį«çĶŁ +Ġform atter +纳 éĹ· +Enc oded +Ġancest or +åĭĴ æĸ¯ +ï¼Įæĥ³ æĿ¥ +.b in +ĠHor izontal +ï¼Į 注éĩį +( Class +Ġs lick +å¨ Ĩ +åīįæıIJ ä¸ĭ +Ġsurviv or +ç¨ ¼ +è¨Ģ èĩªè¯Ń +ĠDe er +èį· èĬ± +åĽłæŃ¤ èĢĮ +Ġprogress ively +ĠPack ers +ĠScient ists +_w rapper +-N LS +default s +Ġnurt ure +m ur +_ el +Ġwh ales +ä»» æĢ§ +åĩı åİ» +-sh aring +uls a +, ç»ĵåIJĪ +æľ¬ çĿĢ +In cludes +åıª æĢķ +Requ irements +èµ°äºĨ è¿ĽæĿ¥ +ί ν +çĮ ¾ +_OP TION +xxxx xxxx +( property +èĢģ å¦Ī +åĮĹ ä¸Ĭ +e asy +æĹ¶ æĹ¶ +çļĦç¡® æĺ¯ +h app +éĵł çͲ +çļĦ åı¤ +ï¼Įä½ł åľ¨ +ient o +.fl ush +éĴ± è´¢ +ç͵影 èĬĤ +ĠP LL +ĠK ab +DE CL +è¿Ļ æĸ¹éĿ¢çļĦ +éĥ¨ ä¸ĭ +Th ing +ï¼Įåħ¶ä¸Ń åĮħæĭ¬ +ï¼Į å¢ŀéķ¿ +her its +_object s +, æĢ»æĺ¯ +Ġc abbage +rav ings +çľ¼ äºĨ +ĠBehavior al +èĽ ¤ +Ġcoinc ide +ãĢĤ éĩĩç͍ +åıij表 äºĨ +çĿģ çĿģ +âĢĶ to +- aged +è£ ĺ +.h igh +Ġent ang +å¼¹ åĩº +ä¹IJ ä¹IJ +åĽŀ é¦ĸ +è¿ŀæİ¥ åύ +åŃĻ å¥³ +re ase +ä¼ļè®® çļĦ +oc urrency +dis patch +CON D +max imum +éĺ ij +她 åĴĮ +e el +èĢĥ éĩı +rem ember +ç§ij å°Ķ +Ġque er +Ġp pm +( Name +æ±Ł åĮº +yth m +èĮ¶ æĿ¯ +Ġinform s +K I +ĠF ork +ĠSalv ador +\ vert +ll er +çļĦ éĶĢåĶ® +æĿİ æŁIJ +-w ar +ĠR PM +Ġstraw berry +, åĩłä¹İ +ï¼ĮæĪij 没æľī +Ġprof essions +ĠD ee +Ġmulti player +人 çī©çļĦ +äºij 端 +顽 åĽº +_S PEC +æijĦ æ°ı +ĠE lim +Ġ# - +éĤ£ åIJį +M ON +CEPT ION +æĦı æĥ³ä¸įåΰ +Ġ 她çļĦ +P rep +ĠM ush +ä¼ł æĿ¥äºĨ +quo ise +equ ential +ä¸Ń æĢ§ +å®¶ éķ¿çļĦ +ĠDirect ive +ynt hetic +ä¸ĭ 楼 +çī¹ æĿĥ +Ġli ed +Ġaccount ant +çļĦ è·¯ä¸Ĭ +( word +ä»ĸ åį´ + ¦ +ä¸Ģ æĢĶ +å°ıç¼ĸ å°± +ĠAlexand ria +o ing +ous ed +Ġnomin ations +çļĩ 室 +Ġtroubles hooting +Ġ éĤ£äºĽ +ĠT I +reg ex +åįģäºĮ æľĪ +ĠSac red +Ġeng ages +Ġdr m +( ä¸ī +ï¼Į ä¾§ +å¸ĥ æĸĻ +æĥĬ 天 +Ġindex ing +éªĨ 驼 +( level +Ġ åIJ¬ +奥æĸ¯ åį¡ +.dat etime +[ C +ath i +Ġrabb its +äºĨ åĩłä¸ª +è¿Ľ åİ»äºĨ +em os +è¢ ħ +åĴĮ æĹ¶éĹ´ +Ġdefend ers +æ´ŀ åı£ +Ġsch izophrenia +lik ed +_a udio +æ°¸è¿ľ ä¸įä¼ļ +@ class +? ). +Ġ åºĶ +end point +ĠVik ings +Ch anging +Rec order +æľº å¯Ĩ +Gold en +ãĢĤæĪij è§īå¾Ĺ +ĠShe ffield +æĽ´ éĩįè¦ģ +åı£ 岸 +è¿ĺæľī ä¸ĢäºĽ +_NAMES PACE +Ġn ylon +ffff ffff +ĠJu ice +èĦļ çļĦ +Ġparas ites +ä¼ļ éĢīæĭ© +Ġposit ives +W al +ĠB out +ä¸ĭ éĥ¨ +èµĽ éģĵ +æī© 建 +åī¥ å¤º +åĮĹ è·¯ +cl ang +_in stall +BY TE +Ġsp heres +çĶŁäº§ åĬĽ +w oo +Ġsal ads +Ġemb ell +ç½ijç«Ļ ä¸Ĭ +ä¹ĭåIJİ å°± +æīĭ æŁĦ +.S pec +ï¼ Ĥ +çĹ Ĭ +It alian +pre view +ç¹ģ å¤ļ +, åģļ好 +S id +éĢļ ç͵ +U id +ee le +ĠTr ader +夯 å®ŀ +ĠMan or +Ġdoub ling +pe ek +Ġfl uffy +çļĦä¸į è¶³ +ĠIncre asing +os ke +ish able +ĠImport ance +åľ° 主 +è´¨ ä¿Ŀ +åıį æĦŁ +Ġsoft ly +Ġimproper ly +ãĢģ æ±Ł +æĶ¹éĿ© çļĦ +ĠV III +pp m +åħ« çϾ +éĢģ æĿ¥ +ç»ĻäºĨ ä»ĸ +ĠT act +æĸ¹ åı¯ +å¤ĸ å¢Ļ +Ġlink age +Ġtr out +Ġpolic ym +åħ¬å¼Ģ äºĨä¸Ģç§į +Prot ection +å±± æŀĹ +ï¼Į æļĤæĹ¶ +ĠW V +P itch +ĠV ick +" They +Form ula +çļĦ ä¿¡ +è¶³ åįı +U PS +Ġperson a +uck ing +åĩı æİĴ +çī©çIJĨ åѦ +( gl +W are +Ġsol ids +Dep loy +æĭ¥ åłµ +ï¼Į 女人 +Ġsh ines +å·ŀ åĮº +èĩ´ 使 +ant es +Ġacc ret +ï¼ĮéĤ£ æĹ¶ +SE M +ĠSw ing +] == +ï¼ĮæĹł å¥Ī +ä¾Ŀèµĸ äºİ +ĠInsp iration +ï¼Į å͝ä¸Ģ +- around +Impro ve +ĠW B +ĠG REAT +大 é£İ +å®ī åİ¿ +ah i +ï¼Įéĥ½ æľī +åı¹ æ°Ķ +éĿ¢ 容 +Âł æĿ¨ +ãĢĤåħ¨ æĿij +Ġp ore +ĠJ agu +éĩįè¦ģçļĦ ä½ľç͍ +_gener ator +æ°Ķ åĽĬ +/ open +Start up +ĠD ex +UD P +æ·ĩ æ·ĭ +ary n +Ġcommun al +æĴĩ åĺ´ +ĠGonz alez +g ene +Ġhand ing +æĸ© æĿĢ +out ines +Ġclos ures +ãĢĤæľī æĹ¶ +. Byte +$ ? +车 çīĮ +UN TER +_ sock +äºij 计ç®Ĺ +ĠSch war +Ġoblig ated +åIJİ åľ¨ +_c mp +_c ols +Ġê ° +åĢ ı +Ġdown stairs +ĠBald win +çļĦ 建设 +èĥ½å¤Ł 让 +å¿§ 伤 +åħ³èĬĤ çĤİ +Ca ption +ãĢĤ ä¸Ĭè¿° +s quare +没 å¤ļä¹ħ +æĪ´ ä¸Ĭ +-r ays +_ words +{ | +ĠG tk +- area +Ġnecess ities +Ġvol leyball +ĠPat terson +n ear +æķĻèĤ² åİħ +\ Big +Fore ground +ST A +b idden +Ġb h +Me eting +or r +Ġrec reate +d ead +ĠShe ets +Abs olutely +Ġf encing +ed ed +ĠT ian +ĠPres cription +åij¨ åĪĬ +_s z +äºĨä¸Ģ åĿĹ +群ä¼Ĺ çļĦ +Ġcarp ets +b ull +第äºĶ 竳 +: * +çĿ ij +ä¸į æīĵ +宣 è¨Ģ +éĩijèŀį æľįåĬ¡ +Ġmel ody +ï¼Į æĬ½ +Ġad missible +GR APH +k at +per haps +产åĵģ åĴĮ +åįĸ çļĦ +转æį¢ æĪIJ +ĠMoh ammed +k j +åĩº çĤī +å¼Ģ åľº +ĠSun shine +.set Value +è§ģ äºİ +åĽ´ æĶ» +æĹı èĩªæ²» +æĺ¥ æĻļ +ï¼Į æĥħ +主è¦ģ çͱ +Ġround ing +- engine +ãĢĤ çľĭæĿ¥ +ĠT weets +ï¼Į éĴŁ +æŁ¥ éªĮ +é»ij 人 +å¿ħè¦ģ æĹ¶ +out heastern +éŁ³ ç®± +for th +Cl osing +ä¸ĩ 亿åħĥ +Ġ çĤ¹åĩ» +ĠLif etime +Less on +éĩįè¦ģ 讲è¯Ŀ +ad c +, å¿ĥéĩĮ +大 çľ¼çĿĽ +åĴĮ æİ§åζ +rapeut ics +çĶŁ åŃIJ +Ġconnect ors +å¥ĭ è¿Ľ +é«ĺ级 管çIJĨ人åijĺ +Ġ åį³ +Ġpr istine +ĠTrain er +ĠC PA +to ire +_EN UM +,æľī æĹ¶ +WE B +ç§ § +Ġpur ification +ĠHigh land +ä¸įç¡®å®ļ æĢ§ +è§£ é¢ĺ +ãĢĤ åݻ年 +Ġun ite +æĹłéĻIJ çļĦ +ĠT enn +ãĢģ æĵįä½ľ +ĠTe h +æĦıè§ģ 书 +èij © +ĠS askat +è¿Ļ è¾ĪåŃIJ +åĸĺ æģ¯ +审计 æĬ¥åijĬ +, å¸Ĥ +主 æīĵ +æľ¬ 人çļĦ +H OST +Ġk b +åĬł çĽĸ +ï¼ĮèĢĮä¸Ķ åľ¨ +Ġban anas +_C TX +ĠSun set +ĠC i +ĠJ ag +ĠK ris +ï¼Įè¿Ļ çĤ¹ +Ġexpl or +AB ASE +i age +çĭ¬ åħ· +ĠT aken +Ġrel ational +A ware +pret ty +å®īéĿĻ çļĦ +Div ide +Ġalt ar +Ġant imicrobial +ĠYes terday +ë ı +_r ules +z d +ï¼Į ç§ijåѦ +ĠBi ography +个æĢ§ åĮĸçļĦ +ĠI sh +Ġ åģļ +aph ore +é«ĺ å°Ķ夫 +çľĭ çĹħ +该 æŃ»çļĦ +ĠPar ade +æĪIJæľ¬ çļĦ +åĴ¬çīĻ åĪĩ齿 +æĪij çĽ¸ä¿¡ +Ex clusive +p riority +主 æ²» +é£Ł è°± +ä¸į åĸĦ +ãĢģ 绿èī² +æĪij å°±æĺ¯ +)$ - +Ġaggrav ated +çļ ĸ +ĠN ixon +é«ĺ èģĮ +( /\ +inherit doc +ãĢģ æµĻæ±Ł +é«ĺçŃī åŃ¦æł¡ +çĭ© çĮİ +orget own +ĠA H +Ġsp ikes +ä¸Ģ次 çļĦ +AAAAAAAA AAAAAAAA +/ åIJ¨ +两 项 +Ġplanet ary +(result s +h adow +æľĽè¿ľ éķľ +Ġt sd +.com pute +è¡Ģ èĤī +Ġcris py +( column +ĠBl end +, # +ER IC +_n s +ĠRef resh +-con sc +Ġdisag reement +Ġ éķ¿æľŁ +ãĢĤ çĿĢ +Ġg ir +Ġ... ) +æĥ³è±¡ åĬĽ +ad ia +åķĨ èªī +ĠP ond +ãĢĤ 缸åıį +ï¼Į åIJĦ个 +urs ed +ãĢģ æ³ķå¾ĭ +缴 éĶĢ +bound ed +h aving +C ss +产 å¦ĩ +èįī èᝠ+å¹³åĿĩ å̼ +为 çͱ +Is lam +å¿ħ æľī +Never theless +ĠU rl +è¯ģ 人 +-m ails +Ġdiscrep ancy +} f +Ġprox imal +Ġpredict ors +Ġworkflow s +åĨį åĬłä¸Ĭ += N +ĠPC B +æ·± åİļçļĦ +ĠA val +åĿ · +Ġlong itude +åĽŀåΰ å®¶ +åĬ¨ å¼¹ +print ed +Ġstri ker +ĠÏĢ Ïģο +, æīĢæľī +_d elta +äºĨä»ĸ ä¸Ģçľ¼ +âĢľ But +ĠComb at +b idity +å¹´ æ¯ķä¸ļäºİ +ï¼Įä¹Ł 让 +ç»ĵæŀĦ è¿Ľè¡Į +ĠP DT +å¥ĩ å¹» +åįı å®ļ ++ d +. ru +çļĦ 建çŃij +Ġrest ed +åįģä¸ī æĿ¡ +ç»Ļ åĪ«äºº +æ³ī å·ŀ +il tered +ó w +ĠF ur +æľ¬çĶ³è¯· å®ŀæĸ½ä¾ĭ +ç͍ ä»Ģä¹Ī +课 æĸĩ +Ġsubs cribed +-he arted +群 éĩĮ +ä¸į èĩªè§ī +Ġ åİ» +Ġp om +å¯ IJ +dd ing +åľ¨ æİ¥åıĹ +< html +Ġ è¿Ļæł·çļĦ +æķĪçİĩ é«ĺ +Ġun install +C oe +, æīĵéĢł +ĠUn ix +æĮĩ äºĨæĮĩ +ä¹ŀ ä¸IJ +ï¼Į æĬĵ +å¥Ķ æ³¢ +ĠSpec ies +æļij æľŁ +ĠHam pton +G NU +} '. +è·¯ ç¨ĭ +rend erer +çļ® éĿ© +èĥ° å²Ľ +ĠMer cy +èĢģ æĺ¯ +if ace +åħ¨ åijĺ +Local ized +. require +ï¼Įä¸Ģ 天 +ĠAccess ibility +ĠChild hood +åΰ å®¶ +èµĽ ä¸Ń +夫 æĸ¯åŁº +ĠChe ng +Ġ å®¶ +ov i +so on +å¢ŀ æĶ¶ +TreeNode Img +Âģ ÃIJ +Ġshock s +æĭ¥æľī äºĨ +åŁºç¡Ģ设æĸ½ 建设 +éĩį è¿Ķ +åİŁ æłĩé¢ĺ +Ġh amm +ĠMat thews +Ġgu itars +P OP +æİ¥ éĢļ +PC B +è¿· 人çļĦ +éļ¾ä»¥ 置信 +ĠD OT +ĠR i +满足 äºĨ +çĤ¹ éĴŁ +ĠSupport ed +ą ï¼Į +åĿļæĮģ 以 +ãĢĤ æľ± +az ard +- cons +Ġcheck box +- att +END OR +Ġcont ag +æıIJ çĤ¼ +é£ŀ èĪŀ +Ġtit anium +åŃĹ åı· +_P OST +- To +ä¸Ģèά 人 +èĺ ¸ +Ġesc aping +çļ±çĿĢ çľī头 +- help +å·§ çļĦ +G i +W ide +ï¼Įå¦Ĥæŀľ æľī +/ Z +ï¼Į å·¥ +ãĢģ è¿ĩ +_ like +_ch anged +滥 ç͍ +Ġexpos ures +ĠV u +ä¸įåľ¨ æĦı +å¾®éĩı åħĥç´ł +Ġnutrit ious +è§£ åīĸ +_P OL +. qq +Ġinfil tr +çļĦ ä¼ĺçĤ¹ +mult iple +, ä¼ģä¸ļ +âĢ Į +ä¸İ 该 +åħ± æĮ¯ +am ac +.Get Type +ĠBig Decimal +Ġst unned +Ġin i +æĹł ä»İ +åIJĮ å¹´ +is ional +ä¸Ń éĢīæĭ© +ï¼Ł çİ°åľ¨ +_comp lete +Ġdict ate +, éĺ²æŃ¢ +è£ħ è½½ +èĭı éĨĴ +åĪ» çĶ» +ARI ABLE +æĬĬ éĤ£ +èħ¿ éĥ¨ +ĠWin ners +.sh ared +.G lobal +ĠBul ld +-g rid +Ġple as +Ġ ä¸ģ +ĠS q +ĠU nt +å°Ĩ 为 +Ġv ortex +ĠBur ger +ï¼ĮåĪĨåĪ« æĺ¯ +Ġstar red +ĠRet rieve +奴 æīį +Ġbiom arkers +(( * +Ġg ust +èĥ¡ 说 +æĩĤ äºĨ +Ġcomplic ation +ë § +_SE TT +D VD +ov irus +ä¸Ģ对 ä¸Ģ +Ġâĸ ł +- items +Ġc reek +æµģ åIJij +èᝠå¸Ī +] ]. +èĢĮ 导èĩ´ +èħ° éĥ¨ +isc iplinary +Ġintrig ued +伪 éĢł +Ġreb ound +keep ers +c amera +ï¼Į 诸 +ber ly +-f iction +Ġassign ing +Bound ary +Ġfingert ips +ĠS lovak +)) \ +å¨ © +ĠLET TER +. (* +Ġ第äºĮ çϾ +ro cal +为 以ä¸ĭ +ĠMechan ics +ãģĤ ãĤĭ +ä¼ł çIJĥ +Ġreplace ments +) p +ĠI gnore +大 涨 +ĠBel arus +Ġrock ing +Ġd ors +ĠW it +ĠSERV ICE +ĠBrow ns +ĠCare ers +ĠLe ase +å°ı å·· +(s kb +Enter prise +大 åŁİå¸Ĥ +ED TLS +宽 éĺĶ +Ġimport ing +ĠN FC +ï¼ģ éĤ£ +çİī çŁ³ +Equ ipment +ĠCong o +éĥĿ ä»ģ +- eyed +Ġex ting +å°ij æŀĹ +æĬµæĬĹ åĬĽ +è¿ľ åı¤ +Al ive +Ġconv olut +è§£ å¯Ĩ +Ġsub routine +Ġwh ipped +ĠAd apter +Ġ{} ", +ä¸į åħ¥ +Ġ æĽ¹ +âĢĿ ï¼ļ +éĶ Ń +èĢĥ ä¸Ĭ +Ġellipt ic +ä¸įæĺ¯ ä½ł +Ġn h +è¦ģ ç´§ +Min utes +ï¼Į 缸äºĴ +Ġsw ipe +th alm +åľ°æĸ¹ çļĦ +IL ER +- we +Ref lection +Ġdoctor al +弯 èħ° +Ġf ö +.S p +re on +éĤ£ä¹Ī ç®Ģåįķ ++ r +ĠLaw n +Ġexperiment ation +$ ), +_t uple +åºĶ该 åľ¨ +Ġunconst itutional +æ²¹ èĢĹ +ï¼Įè¿ĺ 没æľī +ĠEst ablish +Ġst rives +ĠCas inos +åĽºå®ļ æľī +Ġh ym +Ġre combinant +麦åħĭ é£İ +å¤ļ ä½ĻçļĦ +RES ENT +ĠInnov ative +, 她çļĦ +ä½įäºİ æīĢè¿° +Ġcov id +Ġs yst +麻çĥ¦ äºĨ +ĠPere z +ĠP ag +ï¼Į å®Ī +ä½Ļ å¹´ +èĥ¡ åIJĮ +ç¬ij çľ¯çľ¯ +Ġadvoc ating +çĦ¶ åľ° +ĠRET URN +- α +ĠSo il +ç¾Ł åŁº +åĩı åħį +Æ ° +L ayers +ãĢģ åĮĸåѦ +ä¼ļ åIJĮ +ç®Ĺ ä»Ģä¹Ī +èĮ¶ æ°´ +ent ropy +ĠU R +Ġgo ats +( None +ä½Ľç½Ĺ 伦 +Ġtall er += g +Ġ 温 +Ġamid st +æ·± æµ· +-f l +put s +å®ĥ åľ¨ +çļĦ é£İæł¼ +éħĴ 楼 +åĤ» çĵľ +Ġy acht +ĠHe arts +al on +æĹģ 人 +ï¼Į æĬ¥ +ĠK erala +Ġinc umbent +ĠPhilipp ine +Ġ ^{ +Ġm ell +Ġrot ary +Ġpave ment +åı¯ æľī +ĠRun ner +Ġf name +Ġpostpon ed +ĠSpe ak +Ġastr onaut +Ġt andem +ad apt +ĠY uk +åįĥ 人 +å¤ĸåĽ½ 人 +ĠMeet ings +头 åĥı +ĠI CE +ad ors +ĠZ ur +èĪª è¡Į +毫 åħĭ +(def n +Ġinterven e +S izes +Ġoscill ations +åł¡ åŀĴ +G lyph +çαæĥħ çļĦ +çĤĴ èĤ¡ +ĠP ing +Ġconjug ate +âĢľ åľ¨ +j c +ib ble +ĠFe et +å¼Ģ å±Ģ +-M art +åĴĮ ç»ıæµİ +èµ° åIJİ +w ifi +æĢ¨ æģ¨ +æĺ¯ æĢİæł· +Ġ çīĪ +Ġmod eration +åħ¨çIJĥ åĮĸ +Ġenlarg ed +ä½ł 没æľī +èĩ´ è¾ŀ +çŃī éĥ¨éŨ +Ġplan ar +ĠCr ash +Ġw ary +éĵ¾ è·¯ +主 线 +ĠRem ark +æµĭ éªĮ +cul ation +_f rames +Ġtheat rical +æĸ° ä¸Ģè½® +è¿· ä¿¡ +è² Ĥ +_ arch +追 æį§ +Ġspeed ing +ĠHar bour +Us es +ĠK hal +æµĵæµĵ çļĦ +mon ths +ocal ypse +Supp ly +ĠDipl oma +Ġc ps +Ġbetray al +ĠMig uel +/ res +è¿Ļ个 è¯į +_ AM +ob server +Ġp iles +N Z +æ·± çŁ¥ +ε ÏĤ +åīį åįģ +Ġexhib iting +! []( +å¹´ åΰ +èĩª èĢĥ +Ġfoot steps +ï¼Į åħ» +User Name +Ġproject ive +Property Name +åIJĥ èĭ¦ +åĶ¿ åķ¸ +K V +æĹ¥ æ¸IJ +åı¯èĥ½ åľ¨ +ĠPT SD +l od +Count y +Ġv ous +S aint +ä½ł ä¸įè¦ģ +vers ely +ï¼ĮæīĢ以 æĪij们 +-m ember +ĠM ae +ĠSan ct +Particip ants +/ Web +Ġaqu arium +Ġ èĵĿ +ĠL AN +åĽ½æ°ij ç»ıæµİ +åıij åijĨ +Ġconfident ly +T ickets +Ġd addy +n umpy +ä¸Ĭä¸ĭ æĸĩ +Ġpolym ers +âĢĵ âĢĵ +ï¼Į车 ç¨ĭ约 +Ġw agon +ĠLie utenant +Ġ ç»Īäºİ +涨 ä»· +Ġsp or +åĽ¢ å§Ķ +_DE LAY +, ä¼¼ä¹İ +Ġswing ing +Ġ éĢĤç͍ +ï¼Į æ¢ħ +ĠR he +çİ© äºĨ +å·¥ä¸ļ åĮĸ +ag ation +ĠE lections +Ġmanip ulated +-s eries +Ġdr astic +g is +éĺ¿ åĵ¥ +in ol +ump ing +O il +S ell +Ġsk b +éŨ æ´¾ +ç¾½ ç»Ĵ +Ġincomp atible +æķ ķ +ev idence +R ace +AG ER +åħ±åIJĮ çļĦ +ç»ıæµİ社ä¼ļ åıijå±ķ +Ġmil estones +ĠC NC +-off s +çĤ¼ åζ +ĠT ina +ĠMar itime +ĠBrew ing +çļ®èĤ¤ çļĦ +交éĢļ 大åѦ +S it +ĠMac ro +ç»Ļ人 ä¸Ģç§į +Ġatt ic +ä¹ı åĬĽ +çļĦ çĶ»éĿ¢ +马 æ¡¶ +æĬĹ çĶŁç´ł +,以 åħį +Che ap +/d ist +Ġ åŃ¦æł¡ +Ġr ud +ä¸İ åºĶç͍ +管 å§Ķä¼ļ +è£Ĥ 纹 +ĠPun jab +ãĢģ æ´»åĬ¨ +.c os +ï¼Įä½Ĩ çİ°åľ¨ +ĠB oulder +ull a +co e +大 å®Ĺ +Ġsw orn +Ġco ils +Ġe commerce +æĢĿæĥ³ æĶ¿æ²» +ĠM H +L ik +v ä +ĠV EG +é»ij å½± +, å®ĥçļĦ +Ge orgia +ĠN ights +lo on +ant ine +ä¹Ł åįģåĪĨ +Ġprohib ition +> - +Ġ æĹ¥æľ¬ +Ġinc ur +ĠPred iction +éĢī çļĦ +ĠFor get +M ER +ub es +Ġ/ \ +âĢĶâĢĶ âĢĶ +g oog +oo b +Ġactiv ism +-link ed +ĠD ON +Ġvit ality +Ġon c +ï¼Į åĬ©åĬĽ +åľ° çĽ¯çĿĢ +eg ie +ĠAub urn +in ions +æĹ© é¥Ń +éļĶ ç»Ŀ +æģ ĥ +Ġmand ated +åĽŀ éģĵ +(name of +Ġmin ors +ign on +ĠJ ude +éªij çĿĢ +Ġstere otypes +åħ³æ³¨ æĪij们 +ĠT inder +è¡Į è¿Ľ +ser ve +ç²ĺ 度 +æĬĽ åĩº +Ġboss es +ett ed +ãģĵ ãģ® +缸 ä½į +详 è§£ +çϾåĪĨ æ¯Ķ +Ġbegin nings +, åĮĹ京 +ĠD AC +ï¼Ł ä½łä»¬ +ï¼Į åı² +æľ¬ æĺ¯ +æľº åĴĮ +Ġpl ac +Res olve +oph age +æľī æķ° +Ġadm ired +èĮĥåĽ´ ä¹ĭåĨħ +Ġm und +ĠEmer ging +ĠS DS +Ġg s +软 çļĦ +S ections +ak is +åħµ åĽ¢ +ãĤ ¨ +ĠV ictory +åŃŠ第 +Ø µ +ĠL F +åºĶ该 å¦Ĥä½ķ +Ġbar becue +è£ħç½® åĮħæĭ¬ +Ġmed als +Ġvar char +, {\ +C ru +ĠNatal ie +OUR CES +åĭĺ å¯Ł +夹 åħ· +ĠFil ters +ĠLand ing +ï¼Į ä¸ĸ +æĬķ 产 +ĠG one +èĩª è´Ł +Ġdiscontin ued +å¤ļ è¾¾ +ĠUS ART +Ġfer mentation +æİ¨èįIJ çļĦ +åĬĿ 说 +J u +æĻ® æŁ¥ +oph one +é£Ł çĽIJ +åı¹ éģĵ +_ cond +ĠX I +éĥ½æľī ä¸Ģ个 +æ¶ § +ä¸įæĺ¯ åĽłä¸º +Char acters +Ġd z +_m ulti +IP O +å±± ä¸Ń +_ player +ï¼Į åŃĺåľ¨ +Ġbo iled +ĠC ot +ï¼Į以 使 +ins ide +ynom ial +Object ive +ĠA UD +Ġinflu x +B rain +åŁº ç«Ļ +å§ĭ çļĩ +çļĦå¤ĸ ä¾§ +ĠVent ures +Ġi od +çŁ¥ å·± +an ian +Ġpresent er +åĽĽ äºĶ +D igest +ĠHer bert +åĪ» èĭ¦ +Ć Ć +大 人çļĦ +ä»» ä¸Ģ +ĠErr ors +è¯Ńè¨Ģ çļĦ +Ġconvey ed +Ġ æĪIJ +åºĨ åħ¸ +Ġun supported +. yml +ĠR aven +ä¹Ł éļıä¹ĭ +çļĦå¤ĸ éĥ¨ +Ġg g +ĠClass room +æ¯ Ĥ +ric anes +Ġorigin ating +ek t +æĬĹ èıĮ +Ġv il +é¦ĸ 缸 +Ġwater fall +Ġcal ming +å®ĭ 代 +OD ES +Ġextract ing +ĠProm o +ĠC ST +-h ydro +, çľĭçĿĢ +ä¼ģä¸ļ ä¼ļ计åĩĨåĪĻ +ĠPattern s +æĢª çļĦ +Ġdin ners +Ġrev ise +ruct ures +, 羣æŃ£ +h b +Ġa ided +Ġ* > +_V IEW +Ġon Create +çİĭ æŁIJ +EM S +ä¸ĩ å®¶ +âĤ¬TM s +Ġaggress ively +å¤į æł¸ +Ġbr ushing +举åĬŀ äºĨ +Ġpol len +Ġε ÏĢ +ĠWhe els +Ġsulf ate +- ' +- Pro +ä»ĸ çİ°åľ¨ +åıĹæ¬¢è¿İ çļĦ +Ġfright ening +k c +ĠFilip ino +åıĬæĹ¶ çļĦ +Ġd ome +èĬ± çĵ£ +{ % +Ġpl ague +Ġdeploy ing +ĠP ole +éĥ½ ç͍ +ç½® ä¸ļ +Ġarrest s +è½½ èį· +Det ection +su v +ä¿Ŀ æ´ģ +Ġetern ity +å¯Ĩ æĸ¯ +. weight +K Y +Pr im +ï¼Į åįļ +éĤ£ å¹´ +Ġdiscour aged +ĠF ULL +æ¯Ķ èĩªå·± +è¿İ æĪĺ +-threat ening +大 åĵŃ +< class +ĠL ack +ĠAg enda +åıī 车 +Ġs aints +ãĢĤ è¿ŀ +itt ens +åij½ åIJį为 +c iting +ä¸ī åIJį +èĤī ä½ĵ +åįĹ çĵľ +Ġreb el +ï¼Į 大åѦ +æīĵ èµ¢ +.user name +Ġ icing +_r ank +å¹²åĩĢ çļĦ +Ġ" ... +åıij æĸĩ +Ġbre aches +ĠAll ison +Ġf ren +Ġtut oring +Ġstabil ize +å®ŀ æķĪ +Ġed its +åįİ ä¾¨ +Th u +ï¼Į è®°å¾Ĺ +Ġsuper visors +ĠCor al +ĠL iam +ch id +_c ur +_v ol +g ins +_ engine +it r +Ġindul ge +. ret +æĹ¶ ä¸įæĹ¶ +ĠGu ys +åºĬ åŀ« +ĠTre vor +åºĶä»ĺ 款 +ĠR ip +åIJİ å°Ĩ +ĠPro te +éļı äºij +Ġback ups +Ġt ad +ĠV ER +äºĨ ä»Ģä¹Ī +ra per +äºĨä¸Ģ 天 +AS M +æĿ¾ åĬ¨ +ĠCaf é +æĿ¥ 个 +, æŃ¤æĹ¶ +h our +èĦĸ åŃIJä¸Ĭ +.Se lected +S OURCE +é£İ ä¿Ĺ +)- ( +æŀĹ åĩ¡ +_INTER FACE +| _{ +(d at +Ġimm ersed +å±ķ åĩº +Å ¯ +æĪijçļĦ å¿ĥ +/ Q +b inding +ĠS calar +- forward +ĠL TE +æĸ° æĪ¿ +ĠFo am +ĠCl osing +ï¼Įæ¯ı æĹ¥ +Ġmetast atic +- low +b ri +æ°ij èĪª +DB G +çļĦ å¹³åı° +K ate +ĠInst itutes +Ġt roll +Ġl inger +éľ²åĩºä¸Ģ ä¸Ŀ +Ġpollut ants +ad b +çī¹ äº§ +} ', +ä¸Ĭ è¡£ +æīĭ è¶³ +-fashion ed +ĠBr ass +çļĦ çģµéŃĤ +æľįåĬ¡ äºİ +Co ords +ä½ł åĸľæ¬¢ +Ġp end +ï¼ĮèĢģ åŃIJ +uff ered +è¿ij äºĨ +ĠðŁĺ Ģ +ĠT in +å¾Ģ å¤į +ï¼Į å°¤ +Ġtom ography +Ġj ug +Ġcomp ulsory +ç͵ åİĤ +à ¬ +ä»ĸ 为 +ï¼ĮçĦ¶åIJİ åıĪ +Ġf uss +碾 åİĭ +奥æŀĹ åĮ¹ +ï¼Į ä¸ĵéŨ +UMM ARY +羣æĺ¯ 太 +st h +ĠAll an +æĥ³ æĬĬ +ä¸ī æĹ¥ +ï¼Įä½ł çİ°åľ¨ +can ner +å©ļ åIJİ +a ption +建ç«ĭ èµ· +åij¨ çļĦ +ä¸Ĭ 个 +Ġiter ative +\ subset +ä¸Ń åħ¨ä¼ļ +Ġ éļ¾éģĵ +Ġde i +.M ock +Ġtw entieth +Ġcon greg +ĠO le +ç»Ļ åĩºäºĨ +åĨĻ ä¿¡ +ï¼Į åı¦ +åĩĨç¡® åľ° +Ġon wards +åķĨ åѦéĻ¢ +ĠX u +(p refix +å·¥ä½ľ ä¼ļè®® +Ġmod em +乡 ä¸ĭ +Ġwrong ful +Ġrev ital +P ossible +ĠL ands +è´´ åľ¨ +Ġfabric ated +åıijçĶŁ åύ +_ ic +it os +ä¸Ģ æĹ¶éĹ´ +Ġcomp ass +顺 æīĭ +.L eft +Ġh u +ĠBron ze +ï¼Į 缼 +模å¼ı ä¸ĭ +åĽ½æľī ä¼ģä¸ļ +w u +UR ITY +被 åĽ° +ĠÎ ¦ +pt o +Ġpos itivity +/ sw +ï¼Į çŃīçŃī +ĠRec ording +Ġpl anners +åĽŀåİ» åIJ§ +AST ER +ï¼Į ç³» +å·¥ä½ľ æĬ¥åijĬ +Ġvac ancies +-l inux +ks i +Ġforward ed +pe e +çľ¼ è§ģ +Ġcheck er +åı¯ æĭĨåᏠ+ink i +a urs +åıĺ å¹» +(p oint +æĢĿæĥ³ åĴĮ +Min or +æĸĩ æŃ¦ +缸 è²Į +Ġdraw backs +oped ic +æıĴ æİ¥ +- ever +ut ility +aps ing +B rows +æī§ åĭ¤ +å¹³ æ»ij +å¸Ĥåľº çĽij管 +++++++++ ++++++++ +F ine +Ġin advert +讯 æģ¯ +ãĢģ æ²Ļ +Level s +Ġp v +Ġpro g +åĽĽ 级 +' a +çī¹ æĭī +åħ¶ éĹ´ +亲 身 +èĻ« åŃIJ +_ plot +ï¼Į çļĩä¸Ĭ +.N one +Leg acy +ï¼Į åħ±æľī +Ġin ex +åı£è¢ĭ éĩĮ +_ abs +Ġd v +è½° è½° +Ġç¬¬åĽĽ 竳 +est ial +ĠÎ ķ +B on +th reshold +äº Ł +" [ +Ġh ugs +is Empty +, the +ĠS ail +åıª ä¸įè¿ĩæĺ¯ +åĪĺ æµ· +Ġdis mant +é£ŀ åİ» +-f ilter +åĪĨéĴŁ å·¦åı³ +Ġfun gal +DE L +é»Ħ çļĦ +失败 çļĦ +åĩºçݰ éĹ®é¢ĺ +Ġeager ly +J R +le ader +åī¥ ç¦» +æĢ¥ è¯Ĭ +host name +IX EL +Ġvac ations +ï¼ħ ãĢĤ +çĸı æķ£ +éĢł åĮĸ +å¤ļ å°ıæĹ¶ +_H IGH +Ġstabil ization +ä»ĸ è¿ĺæĺ¯ +èģĶ åĨĽ +ĠSett lement +Ġneat ly +g rown +ĠWe i +nov a +Pat ients +ç»Ļ å®ļçļĦ +We ak +åľ¨ ä¸įåIJĮ +Ġemb ark +ed ition +Ġte al +åĵ¼ äºĨä¸Ģ声 +Ġa ry +ãĢģ å½ĵ +èµ° åĬ¨ +Ġunc ategorized +åĽ½å®¶åĴĮ åľ°åĮº +P ure +èµ· çĿĢ +Ġrest ructuring +Rot ate +åľ¨ æľĢ +Ġ{ ¶ +bel ief +Ġmetabol ites +| = +-year s +å·²ç»ı å®Įåħ¨ +and i +ES H +.M edia +Ġacqu ainted +-spe aking +Ġno zzle +let cher +Ġre building +ĠRe leased +ĠRet ro +J ackson +ä¼ģä¸ļ æĸĩåĮĸ +å°Ħ é¢ij +Ġvoy age +Ġfunn els +Ġde port +é¢Ħ åĶ® +-w ave +çĿ¡ åīį +ï¼Į åĪĢ +Ġster ile +Ġparam ount +_M S +tensor flow +æĹ¶ éĴĪ +Ne il +Ġre ap +ĠM ia +ident ally +åįĥ åı¤ +ĠTrust ees +çļĦ 个人 +Ġdump ed +ink y +è¿Ľä¸ĢæŃ¥ åĬłå¼º +æł¡ åĩĨ +ï¼ĮåIJij çĿĢ +å¿į ä½ı +Ġreact ed +ĠL ug +ĠF argo +读 åĨĻ +Ġh ikes +ĠM ons +/ download +Key Value +F req +大 åIJį +äºļ åĨĽ +ï¼Į è¯ķåĽ¾ +è£ģ å®ļ +ĠD unn +ah ah +_b ind +( LOG +羣 好 +çļĦæĹ¶éĹ´ éĩĮ +ist as +åıªèĥ½ æĺ¯ +ten ant +Ġpop corn +没 éĴ± +æ·¹ 没 +ump ed +æŃ»äº¡ çļĦ +re pr +ĠV on +çľ¼ çľ¶ +P OR +k on +Fig ures +ĠK err +Fore st +ï¼Į 示æĦı +ç¼ İ +åIJĥ ä¸ľè¥¿ +/ aws +. valid +æĶ ĺ +_c enter +ĠFal con +æľº 车 +ĠW ent +ĠProv iding +æĪIJ è¿Ļæł· +sh adow +ĠAc quisition +缸 åĮ¹éħį +ï¼Įè¿Ļ ä»¶äºĭ +Ġphys iology +Ġwa its +ãĢģ å±±ä¸ľ +èµ° åĩºåİ» +ãĢģ åİĨåı² +æĭī åįĩ +ĠT ale +loc ated +å·¨ æĺŁ +èħ¾ èħ¾ +Al an +顺 åºĶ +.P osition +ãĢģ å·¥ +ï¼Įä»İ äºĭ +ĠE FI +d ummy +åĩº åIJį +广 ä¹ī +çªģå¦Ĥåħ¶ æĿ¥çļĦ +æĦ£ ä½ıäºĨ +ĠAbs olute +ï¼ĮæľĢ é«ĺ +Met al +f ine +Ġt k +æľīæīĢ æĢĿ +Aud it +çļĦ大 éĥ¨åĪĨ +(n il +Ġcal f +Gr ade +State Exception +æĪijåĽ½ çļĦ +Ġo ss +大 åĶIJ +ĠMon key +Ġbrid al +å·´ åŁº +å¾Ģå¾Ģ æĺ¯ +un iform +éĻį æ°´ +Ġland marks +ys sey +y ellow +è¿ĩ åī© +OM EM +ï¼ĮæĪij åı¯ +å¾Ģ å¹´ +客æĪ· æľįåĬ¡ +_ex ecut +ãĢĤ æĸ¹ +Ġamplit udes +åħ³éĶ® æĹ¶åĪ» +æĮĤ åı· +ç§ij æ¯Ķ +汽车 çļĦ +at ings +ï¼Į åIJĪçIJĨ +ä¸Ģ éĶ® +ä¸į 失 +ĠLa uder +-d ensity +é Į +Ġfamiliar ity +ãĢģ åįķ +åĨ¤ æŀī +' I +< u +$ ; +æ² ħ +çĽ¸å¯¹ æĿ¥è¯´ +Infl ater +Ġwal lets +ä¸ĩ æĪ· +/h ome +ĠRh odes +ĠDo ctrine +åζéĢł çļĦ +ĠC airo +éĻį ä»· +ĠArch ived +v ir +_in ode +ĠInter views +ĠTor ah +DO CTYPE +Ġabsorb ing +M ort +ĠC yp +act ed +天 é¹ħ +çķĻ åŃĺ +ï¼Įä¸Ģ ä¼ļåĦ¿ +: not +æĽ´ éĢĤåIJĪ +" æĺ¯ +ï¼ĮæĪij 认为 +éĢī ç§Ģ +Ġsn akes +AD S +ä¸Ģå¦Ĥ æĹ¢å¾Ģ +æģĴ 温 +z an +Ġdef icient +ĠSupp lements +ĠComp letion +al ism +åıij ç»Ļ +ĠO M +è¾¹ æ¡Ĩ +ĠM AR +å¢ĥ åľ° +Ġ第äºĮ 天 +al ion +ĠCar rier +æ²ī çĿ¡ +éĺ´ æĢ§ += k +ä¹ĭ åħī +她 èĩªå·± +uy en +CA SE +天 é¾Ļ +Ġ åĪļ +ä½ĵçݰ åĩº +Ġmind ed +or is +诺 è´Ŀå°Ķ +ä¸įéĶĻçļĦ éĢīæĭ© +çĽĺ çĤ¹ +f ers +Ġsupplement ation +å±± æ´ŀ +æīĵ ä¸ĭ +K elly +t okens +çªĴ æģ¯ +å¯Ĩå¯Ĩ 麻麻 +om at +åĽ½æľī èµĦ产 +Ġrec v +éĽª å±± +Man ual +é»ijé¾Ļæ±Ł çľģ +ç»§æī¿ 人 +ĠRivers ide +.starts With +Ġstream lined +éĩij èī²çļĦ +Ġroad map +æľīä¸Ģ æĿ¡ +Ġarr anging +缤 纷 +ak ra +åģľ æ»ŀ +åĽº æĢģ +ĠAl erts +ä»ĺ åĩºäºĨ +met rics +ĠB acon +og h +_B UILD +èħ¿ ä¸Ĭ +contin uous +ï¼Įä»ĸ è¦ģ +ä¹¾ éļĨ +" => +幸ç¦ı æĦŁ +/v nd +Ġterr ified +~ $ +éĻį èIJ½ +åģļ äºĨä¸Ģ个 +å®ŀè·µ æ´»åĬ¨ +åŃ©åŃIJ 们çļĦ +At omic +OR AGE +çļĦ å·¥åħ· +Ġdep rivation +ĠRol ler +ä¸ĵ 线 +Ġprim ers +ocr ats +ĠBlock s +ĠSl ide +ĠMax im +Ġturb ulent +ov ine +ĠEn ough +Ġch ampagne +ad ol +Ġhair s +åİĭ ç´§ +ï¼Į æĬķèµĦèĢħ +æĥĬ éĨĴ +å±¥ åİĨ +++ ; +mark er +ip age +çļĦ ç»Ħç»ĩ +ç»´ åħĭ +Ġclar ified +( IS +Ġover see +ĠO A +Ġdis astrous +.F ields +ig id +ï¼ļ å°ı +ĠS ear +ĠJoh annes +åĽŀ çļĦ +ĠRespons ibility +Ġhom osexual +Ġbak ery +âĢľ ä½łä»¬ +æĪij ä»İ +Ġper ipher +¿ ÃIJ +容æĺĵ 被 +Ġrefin ement +Ġindisp ensable +åıijæĮ¥ äºĨ +-l ived +op or +æŀľ åŃIJ +Ġfile path +: text +ï¼Į æĮĩ导 +ä¸ĩ å²ģ +åįİ åĮĹ +é»ij äºĨ +æĺ¾ç¤º åĩº +.con nection +amp aign +ä¸Ģ 家人 +Ġinconven ience +ãĢĤ åĪĿ +çĭĤ 欢 +Ġcommercial s +Ġpl ung +man agement +. Info +ï¼ħ ãĢģ +åħļåı² åŃ¦ä¹łæķĻèĤ² +ï¼Į æĭį +_b ottom +ĠFar mer +Dep recated +ĠStock holm +为 ä¼ģä¸ļ +è « +Ġb our +.c fg +Ġsl aughter +Ġmut ex +ĠH OME +ak in +ĠProv ided +Ġreck less +/ status +-t ra +ĠTib et +ï¼Į äºĭå®ŀä¸Ĭ +ry an +åİ» åĵªéĩĮ +rit t +Ġkid neys +Ġpione ering +æīĵ éĢļ +ĠM ick +Need ed +åij³ ç²¾ +Ġweb inars +Ġë Ĥ +ĠH IGH +; amp +ĠSuper visor +$ ^{- +Ġwh irl +ĠF ORE +System s +æĪij è¿Ļ个 +çĥŃ æ°Ķ +λ ο +é£İæĻ¯ åĮº +ĠChurch ill +åºĶæĶ¶ 款项 +, ä¸ĭéĿ¢ +ĠF ra +_DE C +éĿ¢å¯¹ éĿ¢ +ä¼į å¾· +åĬł æĪIJ +æĥ ¦ +å±ŀ æĢ§çļĦ +< N +éĥ½ åºĶ该 +- platform +.c md +(C ONFIG +å¦Ĥæŀľ æĤ¨ +æĭĽ æĥ¹ +Ġinhib its +Ġα ÏĢ +ï¼ĮèĢĮ éĿŀ +ç´¢ å°¼ +Servlet Request +ĠRed is +ĠS uddenly +é«ĺ å°ļ +Ġmand ates +Ġm ong +ĠV oy +çģµ çļĦ +Ġfilmm aker +ç¥ Ł +AT AL +çι çι +Ġchampions hips +ap ro +Ġeconom ist +äºĨåĩł ä¸ĭ +èŀ Ĥ +Ġpriv at +ãĢĤåı¯ä»¥ 说 +w f +ãĢģ æľ± +ind ividual +éĺ³ æĺİ +æ±ł å¡ĺ +æ¡Į ä¸ĬçļĦ +ï½ ģ +st uff +ï¼Į æĸŃ +···· ···· +è¡£ è¡« +ĠBO OK +b ys +Ġt idal +æĬĢæľ¯ åĪĽæĸ° +prof essional +C Y +H um +R W +è¿Ļ ç¬Ķ +Ġinter course +ç§ijæĬĢ çļĦ +ĠBook ing +说 ä¸įåĩº +H Q +åIJĥ åĸĿ +Ġ å®ļ +_AT T +ĠWR ITE +Ġun paralleled +ĠIn structor += normal +æĺĤ è´µ +ä¸Ģ éľĩ +æĪij æĢİä¹Ī +ï¼Į没æľī ä»»ä½ķ +åıijæĺİ åĨħ容 +reg istered +Ġe ve +_pack age +Ġghost s +ï¼Į è½»æĿ¾ +çĶŁæĢģ ç³»ç»Ł +Ġreb els +å©· å©· +Japan ese +Ġ 缸åħ³ +çĨŁ äºº +tr ade +Ġg az +æķ´ æµģ +触 碰 +ç»Ħç»ĩ å¼Ģå±ķ +_y ear +ĠTrust ee +åħ¬ 竳 +lu or +ä¹ĭ å¤ľ +ãĢģ è´Łè´£ +pl ots +ĠCON ST +äºĨ æķ´ä¸ª +ä½³ çļĦ +åıijè¡Į 人çļĦ +éªļ æī° +M ETHOD +ĠB ET +模åĿĹ çļĦ +Ġett ä +mm ol +对 æĤ¨ +Ġsn ug +,\ ,\ +Ġlon eliness +Ġpo ets +Ġer upt +cl inical +ĠRespond ent +: int +ä¹ĭ ä¸į +Ġaud its +ä¿¡ç͍ é£İéĻ© +çļĦåıij çĹħ +ï¼Ł ï¼ģâĢĿ +Ġcour tyard +积 èĵĦ +-p arent +.find ViewById +è§Ħ竳 åĪ¶åº¦ +ers et +con tr +- url +Ġfull est +ä¸ī äºļ +æİĴåIJį 第 +Ġloud ly +Log ical +åIJĥ äºĨä¸Ģ +Ġlod ging +oc ab +.nd array +çIJĥéĺŁ çļĦ +ht on +头 æĿ¥ +ä¹Ł ä¸įè¿ĩ +b ounds +ren ched +é̼ è¿« +ĠL ov +è·Ł èĩªå·± +ï¼Į å¾·åĽ½ +好 è¿IJ +åıijçݰ èĩªå·±çļĦ +Ġimp lying +_ role +Ġan ts +ĠHug o +大 éħĴåºĹ +Ġuseful ness +Ġfl ap +.C ustom +æıŃ éľ² +ĠInter action +è¡Ģ èĦĤ +ĠæĪij æĥ³ +ä¸į èĢIJçĥ¦ +: k +static method +éĥ½ å·² +âĢĶ or +é Ĭ +æĢ» åħ¬åı¸ +вÐĤ TM +Ġ èµĦ产 +Ġin let +主 è½´ +ä¹ĭ ä¸ĬçļĦ +App ro +m iah +ä¸İ èĩªå·± +Ġpip elines +lem n +è¾ħ导 åijĺ +å»¶ å®ī +ä¸į 说è¯Ŀ +æıIJ çĿĢ +y ahoo +âĢľ 对 +ĠL uk +Ġheart beat +Ġring tones +Emp loyment +å© ķ +-n ext +v ac +Ġmot to +ï¼ĮæŃ£ å¦Ĥ +对åħ¶ è¿Ľè¡Į +ĠConc ord +Tri angle +Ġret iring +Ġman ic +管çIJĨ 人 +Par is +orm an +-app roved +/ Table +æĬ¢ åħĪ +_ help +ĠState ments +OB J +Ob ama +æ³ ¾ +å®ł çα +åIJĥ ä¸į +çļĦ人 æĺ¯ +ç͵ç£ģ éĺĢ +ĠWe eks +ĠGreat est +ĠM uk +ï¼Į åĵĪåĵĪ +Ġide ological +åĨ° æ·ĩæ·ĭ +m ary +ä¹ħ ä¹ħ +F ET +çľĭ å®Ī +ä¸ī 代 +ĠDomin ican +ĠP PP +T am +Ġp ess +ç¬Ķ çĶ» +x o +Ġbench marks +å°± è·ij +ï¼Įå°± æľī +cl oses +qu ist +è¿Ļ个 女人 +Ġded uce +Ġtranscription al +âĢĻ : +ãĢĤ 注æĦı +æī§ æķĻ +Ġm anga +ĠAl ibaba +ĠMem or +Ġdeterior ation +ag ements +Dis cuss +æ¯ĶåĪ© æĹ¶ +Ġel ic +/ default +se parator +éĤ£ 两个 +N at +ãĢĤ ) +th ouse +å¤ļ åĪ©äºļ +æīĵ åĬ¨ +è¥Ħ éĺ³ +ĠVern on +Ġra pport +ĠLa undry +, å¼Ģ +åIJ¸ æ°Ķ +Ġdist ort +Ġhead set +æľī åħ³ç³» +ien ne +缴 çļĦ +主è¦ģ 以 +ĠDel uxe +ĠAl arm +,让 æĪij们 +w orm +as uring +æķ°æį® å¤ĦçIJĨ +_det ail +é¡¹çĽ® ä¸Ń +Ġ ï¿¥ +im inary +h man +ãĢģ ä¼ļè®® +çĬ¯ è§Ħ +人群 ä¸Ń +顽 强 +çĿĢ ä»Ģä¹Ī +OLD ER +Ġintimid ating +other mal +Ġi os +Ġinflu encers +çŃī åİŁåĽł +Ch allenge +Ġst itches +æĿ¥ ç¡®å®ļ +Ġre visit +af ety +ĠM MA +ä¾ Ĩ +ï¼Į æĬ¬å¤´ +: NS +åıĮæĸ¹ çļĦ +-n umber +æīĭ èīº +ĠCast ro +Ġsh ores +_t s +ĠPa ula +Ġdetect s +ä¸İæīĢè¿° 第ä¸Ģ +_t ests +Background Color +Ġshut ter +åŃ ļ +èµĽ çļĦ +æľ¨ åľ°æĿ¿ +Dll Import +, W +Ġ! $ +æľª åħį +ĠH els +LO L +ãĢģ è½» +ï¼Į æıIJåīį +Ġsocial ist +åĬłåħ¥ åΰ +ĠManu el +çĸ ± +éĺ² ç©º +çIJĨ论 åĴĮ +.to LowerCase +ãĢĤ è¿ĩ +ç³»åĪĹ çļĦ + ¹ +aw esome +éľ² 天 +ç¨ĭåºı åijĺ +Prot otype +ĠBe au +Ġundes irable +) {\ +ï¼Į 毫æĹł +åľ¨ è¿ĻäºĽ +ï¼Į转 è½½ +amb i +leg acy +ç¿ Ĭ +ide press +åµĮåħ¥ å¼ı +ĠTra v +ul ant +ĠHans en +è¡ĮæĶ¿ åĮº +. rm +Ġ å¾· +ä¸ĸçķĮ åIJĦåľ° +åľ° åĿª +Ġra pper +ãĢĤæľī æĹ¶åĢĻ +Ġs aint +Ġann ih +ib ia +Ġw edge +s aid +ä¸Ķ åħ¶ +åģĩ åĨĴ +ĠM itch +As pect +éĶ» éĢł +Ġ çĤ¹ +ãĢĤ ä½łçļĦ +ic ast +ãĢĤ åŃĻ +èĢĮ åħ¥ +è°ĥ åīĤ +re peat +Ġpersecut ion +, '' +` ]( +Bre aking +Ġres umed +_t imes +yn ec +B ib +èᝠçī©çļĦ +_end point +Ġtrans p +èIJ§ æĻ¨ +ï¼Į æĸ¯ +æĪIJäºĨ ä¸Ģ个 +大 åı£ +èĬĤ缮 ä¸Ń +LOC ATION +sh irt +Ġfix ation +åīĬ åĩı +Ġdocument ing +_ Error +ar f +æ°´ éĩĮ +_c m +ĠO maha +.get Item +éĢł 纸 +ĠAg ile +Ġcheer ful +ãĢĤ åĽ½å®¶ +å·¨ åĵį +am age +Ass istant +ĠAuthor ization +ãĢģ çµģ +ud p +ĠJ R +com ings +æĹł å¿ĥ +åħ« ä¹Ŀ +.en viron +ï¼Į è´¹ +ter ror +ãĢģ ç»ı +å¹´ èİ· +Ġweaken ed +ck e +og el +ï¼Įåıªè¦ģ ä½ł +çļ® è´¨ +_in valid +Ġв Ñĭ +ï¼Įçī¹åĪ« æĺ¯åľ¨ +æĹł 害 +Ġaf rica +, èĩ³å°ij +èĥ½ å°Ĩ +ç¾İ å¾· +.p art +,ä¸Ģ è¾¹ +ĠJack et +ĠHP V +åΰäºĨ ä¸Ģ个 +ĠT emporary +Emer gency +ĠXCTAssert Equal +éĩĩ æļĸ +Ġend ured +ĠConst raint +is in +ic her +åĩº ä»» +æIJľ çĭIJ +.col lect +ç²ĺ æİ¥ +Ġsun rise +IN ES +ĠMar ian +-w rapper +Ġc uda +Ġf olds +Ġair way +éĹ® åį· +ä¸Ģ åĽŀäºĭ +è½® çļĦ +Ġfed er +S PE +Ġreconstruct ed +åĽł åħ¶ +æĿ¥ è¢Ń +ĠPost al +åĩĿ èĥ¶ +Ġs ont +Ġserial izer +èħ° 带 +mean ing +ï¼Įæ·± åıĹ +ĠJo ey +ĠDO I +Week ly +ĠMcK in +AT P +Ġfiles ystem +ĠNord ic +Ġt apes +Jon athan +ä¼ĺæĥł åΏ +ĠEn cyclopedia +Ġ$ ('. +ĠAut ism +, åĪ©ç͍ +Ġs ap +Ġe ct +è§£åĨ³ çļĦ +( com +西 åŁŁ +Ġloc ating +Vers ions +- cal +ãĢĤ æ¸ħ +Ġtool tip +åĪĽéĢł åĬĽ +çĶ º +ç§» éϤ +æķij 人 +ĠSN P +ĠS ang +è®¾ç½®åľ¨ æīĢè¿° +åΰ åĮ»éĻ¢ +çľģ çķ¥ +æĤ£èĢħ åľ¨ +ï¼Įå¾Ī æĺ¯ +Ġb illed +(h ash +åħ¨ æĿij +äºĨ好 åĩł +arch ar +éĿ¢ æĹłè¡¨æĥħ +Ġopt ic +ĠContract ors +ĠM ald +éĵ¶ èī² +Ġdet ach +溶 æĢ§ +< table +ĠAtt ention +-sh ow +å°± ç͍ +æĭ¿ åĩºä¸Ģ +Ġ" ; +éĤ£ä¹Ī 大 +Ġaggreg ates +un used +See k +Ġprice y +Ġas bestos +æīĢ åģļçļĦ +Ġæĺ¯ çļĦ +. Integer +es a +ãĢģ ç»´çĶŁç´ł +ĠSh awn +åıĹåΰ å½±åĵį +éĩĩ 纳 +ãĢĤ 游æĪı +Ġ ä½łä»¬ +æ¼Ķ åĮĸ +f und +Ù ij +çļĦ è§Ĩé¢ij +PA RE +Prem ise +ed is +Ġy aml +if lower +Ġbund led +Ġdisrupt ed +ain ting +ĠPhys ician +çļĦ äºĮ +çļĦ è¶ħ +åĪĻ åľ¨ +ĠPr ague +ĠP LC +æŃ£ åĵģ +Out line +ass a +çļĦ çŁ³ +责任 æĦŁ +å£ģ çĶ» +Ġj erk +ade on +Ġpoly g +宿 主 +B oss +çĵ¦ å°Ķ +, çªģçĦ¶ +ĠS CI +ĠL t +Op acity +åĴĮ ç²¾ç¥ŀ +ä¿® 身 +ĠPra irie +Ġdoll s +ling er +èĩªçĦ¶ èĢĮ +å°Ĩ ä»İ +ï¼Į äºĴ缸 +ĠF olk +Ġsk ating +ĠRes idents +uck ets +壮 è§Ĥ +Ġprot otypes +ĠLe icester +Ġinf used +Ġa eros +ĠThrow able +Cli ents +S even +Property Changed +ĠP sy +Ġdi ag +mark ed +Ġ" ] +Ġhash Code +便 æ°ij +çļĦ人 åľ¨ +Ġ æİ¥çĿĢ +ä½Ľ æ³ķ +æ±ł å®Ľ +est yles +. qu +为 缮çļĦ +per formance +opath ic +ĠK ang +å·¥ç¨ĭ åѦéĻ¢ +æĬĹ æ°§åĮĸ +Ġr ag +åĸ ³ +-not ch +it en +Ġloos ely +认å®ļ 为 +Ġr f +Ġal arms +Ġout ing +æĹł åĬ© +ĠEurope ans +Ġcorrupt ed +P ose +çIJ ° +ä¸į 计 +ï¼Į 设置 +享 ç͍ +æijĩ æ»ļ +ff ee +it ious +åīį åĩłå¤© +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +对 大家 +Ġcare t +好 çľĭçļĦ +ĠS word +å¦Ī åĴª +æĺ¯ å±ŀäºİ +.s lf +ĠRes ident +ĠC ubs +W ARNING +ĠIll uminate +_ require +m eth +. ge +D ash +uck le +é«ĺæķĪ çļĦ +Ġpled ged +b aby +ï¼ģ æīĢ以 +Sh aring +UN K +ä¸İ ä¼ļ +Ġ åĩı +ä¼ģ äºĭä¸ļåįķä½į +Ġflour ish +ight ed +çݰ代 åĮĸçļĦ +Ġbath ing +æ°¢ æ°§åĮĸ +åħij çݰ +G lass +ç«ĭ 项 +éĴĪ çģ¸ +è¡ĮæĶ¿ éĥ¨éŨ +ĠElect ro +èį¯çī© æ²»çĸĹ +å¤įåIJĪ æĿIJæĸĻ +her ing +ipe g +: t +Ġarbit rarily +ä¹Ļ éħ¸ +åŃŁ åŃIJ +ë¦ ¬ +b right +, ä¿Ŀè¯ģ +åĬł çĤ¹ +Ġknow ingly +å±Ĥ次 çļĦ +_H ASH +å®£ä¼ł éĥ¨ +éĵ¶ è¡Įä¸ļ +Trad itional +he arted +æį ¶ +ĠC ottage +ac ional +å°±æĺ¯ ä½ł +ç²¾ ç¾İçļĦ +Ġcruel ty +Ġà ħ +ĠPrint able +ï¼Ł ä¸į +å·¡ æĬļ +æľĪ çĶŁ +å±± åĿ¡ +_f ill +ĠC openhagen +A men +ãĢģ ä¸įåIJĮ +ï¼Ī C +Ġbutter flies +T umblr +æŃ£ å¤Ħäºİ +_ex port +Typ ed +ï¼Į æĺİæĺ¾ +Ġboost s +çīĮ çļĦ +Ġfront ier +r ush +ĠInt ellectual +åħ¬ 约 +ï¼Įä¸Ģ éĺµ +No ise +èĻ« 害 +Ġ$ |\ +, æľī人 +as an +ĠUnder ground +" fmt +è¿ij å¹´ +Ġa an +åįĹ ä¸ĭ +é¢Ĩ 主 +by ter +Ġb umps +ag ara +ä¼ļ åĴĮ +Ġhigh ways +ull en +ĠEmploy er +ï¼Į 书 +çļĦ åľºæĻ¯ +R od +çŁ ľ +Ġmeth ane +on acci +åıĸ æļĸ +æł· æĿ¿ +ĠGu ate +ĠD ET +ĠTechn ician +. errors +åħ¬ åĪĨ +ãĢį , +Ġd yst +çļĦ çĬ¶åĨµ +ĠA uss +Ġeffic iencies +ç¾ ¿ +åİļ åİļçļĦ +met al +Ġmir acles +Ġster il +du ce +ç͵ èį· +æĬµ åζ +\] ), +ĠD ob +è¸ Ĭ +åħ¨ è¿ĩç¨ĭ +æķĮ æĸ¹ +ï¼Į æĢİ +æĪij åĨį +åĬĽ äºī +Port al +Ġmon keys +å·²ç»ı 没æľī +å°Ŀ å°Ŀ +åĬł éĢŁåº¦ +Set ter +g ements +Ġl ia +ï¼Ł ä»ĸ们 +æĶ¯ æ°Ķ管 +Ġbe ad +ĠA DA +åĨĽ å·¥ +inst ead +ĠBrun swick +Ġanomal ies +ï¼Į çĩķ +åįĹ京 å¸Ĥ +ĠPent agon +å±± å³° +群 ä½ĵçļĦ +身 æīĭ +-ac re +ï¼ĮéĤ£ æĹ¶åĢĻ +ĠHel ps +çIJĨ çļĦ +æĭī å¾· +è¿Ŀ 纪 +çŃī 离åŃIJ +Ġm apper +_s ite +An alyzer +.e q +Ġsuck ed +å¾ ³ +-m ount +ç«Ļåľ¨ éĤ£éĩĮ +åħī çĽĺ +Ġdetermin ants +ĠMin eral +Can adian +. SE +Ġh an +scrib ed +çϽ çϽ +PL IC +.s chema +çĪ » +Ġag ility +(d est +Ġsl ap +Ch o +éħį ä¸Ĭ +èµŀ æī¬ +ا ÛĮ +Ġhes itant +OTT OM +æ³ķ æ¡Ī +Ġwe bs +D Y +çĤ¹ ä½į +File System +çļĦ 设å¤ĩ +ï¼ĮåıĪ èĥ½ +Ġjur ors +.be ans +_ OVER +( create +å¾ģ æĪĺ +主 å®° +è´¢åĬ¡æĬ¥è¡¨ éĻĦ注 +Ġfound ational +缸 符 +Ġclo ves +éĤ£ åĩłä¸ª +Th umb +æµĭ ç»ĺ +.log in +ćć ï¼Į +so ap +åįĩ åįİ +æĭĸ æĭī +åİ¿ æĶ¿åºľ +ach a +ãĢģ çłĶç©¶ +Ġcons oles +arm esan +éĹŃ åĺ´ +åıĤä¸İ äºĨ +Tool StripMenuItem +z k +pr ay +ĠN N +Ġcompet encies +- others +Ġ æĺĵ +éĨī äºĨ +ĠD AT +Ġh ates +è¿ĺ ä¸įèĥ½ +ĠSw im +çŁ¥éģĵ 她 +éĻ© äºĽ +.word press +åĩł åı¥è¯Ŀ +æľ« æĹ¥ +Ġsil ica +_text ure +C atch +\ quad +Ð ¢ +ro pped +Ġtransport ing +\ Core +,å¹¶ åľ¨ +Ġflow ering +äºĮåįģ åħ« +åį° ç«ł +Ġhum orous +R everse +å¦Ĥæŀľ æľī +y aml +çϽ éĽª +Ġlav ender +转 çĽĺ +Ġdiss olution +ä½Ľç½Ĺ伦 èIJ¨ +å¹´ åĴĮ +è¤ Ĵ +æĺ¯ 缮åīį +å¹² äºĭ +èĤī çľ¼ +Ġng x +Ġex its +åĨħ容 åĴĮ +ĠH utch +the l +it he +çŃ ł +åĬ³ ç´¯ +çĥŁ çģ« +ol ip +æį¢ åıĸ +im ulation +. Resources +Out door +_pro p +a ude +c ms +Ġre pr +å®ŀ å®ŀåľ¨ +gre SQL +ï¼ģ ä½łä»¬ +å§IJ 夫 +Draw ing +.Act ive +_ST ACK +ĠH ank +çĸ«æĥħ å½±åĵį +è¯ ² +æľįåĬ¡ åķĨ +per mission +ĠPas o +é£Ł æĮĩ +_u uid +ï¼Į 说è¯Ŀ +=" , +null able +ĠF etch +为 æľ¬åıijæĺİ +iment o +_var iables +, ä¹ĭåIJİ +罪 åIJį +ĠAbd ul +Dis ney +ï¼ĮæĪij æľī +ä¼ł åΰ +Ġfind ViewById +L emma +pec ified +.Debug ger +Ð Ķ +å°± æľīäºĨ +\ ! +å°Ĩ æľī +_c r +-qu arter +ç«Ļ åĩºæĿ¥ +ĠB oh +âĢĿ çļĦä¸Ģ声 +éĴ» çłĶ +L atin +om ology +Ad visor +но ÑģÑĤ +ãĢģ æĿIJæĸĻ +Ġl aps +ï¼Į éĢıè¿ĩ +åħĭ ç½Ĺ +Ġ ä»Ĭå¹´ +_int eger +åĨ° åĨ·çļĦ +çĤ º +é¸ ¥ +acc arat +Ġ åºıåı· +ãĢĤ åıĬ +ãĢĤ å·² +ĠRE VIEW +ç»Ĩ åĮĸ +ĠWh is +, åħ¨åĽ½ +RE ST +ä¸Ń éĢĶ +âķIJâķIJ âķIJâķIJ +$ result +Ġtra verse +ĠPaul o +Ġì ł +ĠMus cle +Ess ential +. aws +ï¼Į 声 +th umb +-d ark +åĮ ® +骨 é«ĵ +.int o +Ġl umber +Ġcon gru +not ice +orpor ated +ĠChron icle +ĠR unnable +.S ystem +-p resent +Ġrest raint +oc os +.S ave +NS Array +泡 泡 +ãĢģ ç²¾ç¥ŀ +好 åĩłä¸ª +åĨ² åİĭ +ä»İ åĵªéĩĮ +Int ro +è¯ŃéŁ³ è¯ĨåĪ« +( io +Ġra v +ĠI o +åİŁæĿ¥ æĺ¯ +Ġspac etime +Ġs ings +æĢ Ĥ +aren thood +, 红 +ĠPh arma +-e lement +ï¼Į åŁº +ĠT aste +Ġeffort less +(b atch +Ġ çİī +op rotein +ime Type +ĠK O +ĠClay ton +çĦ¶å¤§ æĤŁ +ï¼Į åijĬè¯ī +Ġdisp ers +g od +iw i +Ġst arch +å½ · +幸 åŃĺ +æıŃ å¼Ģ +çķ¥ å¸¦ +æīĵ æĸŃäºĨ +Ġ æıIJä¾Ľ +ur is +Ġas ymmetric +åĪĨ æł¡ +ĠRe ception +ĠArch ae +V K +Ġ åį« +ĠS UN +og ly +被 ä½ł +.A cc +me ch +Ġradio active +çĶŁæ°Ķ äºĨ +uk kit +åıĽ éĢĨ +Ġapp ended +Ġshred ded +U l +辩 è¯ģ +æħĮ å¿Ļ +éĥ ¸ +its u +ï¼Įéļı 便 +ãģĦ ãģŁ +n ets +s an +ï¼ĮæĪij å¾Ī +ĠOut standing +ĠConnect ions +æĺ¼ å¤ľ +ĠP itch +å·²ç»ı ä¸į +åĽ¾ æĸ¯ +hel m +Ġk ernels +Buy ing +åı¯éĢī åľ° +å¾Ī çŁŃ +- interest +ĠS abb +ire n +_ required +Ġpo ised +Ġactiv ating +æĿ¥ å®ĮæĪIJ +ĠWare house +çļĦ è¡ĮåĬ¨ +ãĢģ ä»·æł¼ +éĤ» è¿ij +å¼Ģ æŀª +Ġbro ccoli +Ġvel vet +ĠB race +åĴĮ åĪĺ +æĹł çĹĩçĬ¶ +忽 æĤł +C X +" ä¸ī +ï¼Į æķĻå¸Ī +åįģåĽĽ æĿ¡ +ĠEc ology +Ġmyth ology +ĠGriff ith +ç½ij åIJ§ +ðŁ ĩ +ĠL indsay +ĠN arr +çļĦ è¿ĺæĺ¯ +ĠÐ ļ +Ġphys ique +ï¼Į ç«ĭ马 +Ġsk learn +Ġovers ized +Prem ium +els ius +è±Ĩ æµĨ +re rs +åĭ¾ åĭĴ +Ġnurt uring +v f +åĩº åįĸ +ĠPs alm +ãĢģ èģļ +-pro perty +Ġprogn ostic +ĠAr bor +åı° ä¸ĭ +Ġgrand son +Ġplug ged +im et +Ġav enue +çīµ æī¯ +_VAL ID +Ġsubsid iaries +/ class +ang ible +CON DS +Ġsucceed s +åıijè¾¾ åĽ½å®¶ +ĠJ J +åĩ¡ æĺ¯ +ç¿° æŀĹ +Ġmor bidity +, 许å¤ļ +åĽ¢ åijĺ +Ġh s +Ġu c +çļĦ åı¦ä¸Ģ +çĥ ¬ +ĠNot tingham +éĶ¥ å½¢ +Ġsal ts +K aren +ï¼Į çΏçΏ +æį¢çĥŃ åύ +Ġb d +_T YPES +ĠJohn ston +Ġselect ively +Ġpunct uation +R og +ĠP U +est rian +æĺİ åªļ +ĠCar rie +ï¼Įä½ł éľĢè¦ģ +Ġphilos opher +. ph +P ods +w ant +为 空 +· åį¡ +ĠH iring +\ b +ĠD ATE +ĠJuda ism +å±ŀ ä¸ĭ +PK G +ole on +Ġmis ery +Ġplain ly +æĭ¦ ä½ı +ĠRes orts +Ġcompar atively +\ Http +ä¹Ł æ¯Ķ +Ġ å¦ĤæŃ¤ +åľ° 段 +Ġden ies +ä»ĸ æľī +_C URRENT +绿 èĮ¶ +ãĢģ æĢ» +åģ¥åº· åıijå±ķ +ï¼Įç¥ŀ èī² +ï¼Į æ²»çĸĹ +ĠP CA +è´¢ ç¨İ +F Y +ĠT WO +çī§ å¸Ī +ï¼Į åݻ年 +ãĢģ éĵľ +èĤ¡ä¸ľ æĿĥçĽĬ +代表 åĽ¢ +S amsung +T p +\ }\ +ĠTher mal +Ġubiquit ous +Ġtuber culosis +两 å¼ł +.d ispatch +CR C +S av +RO DUCTION +T reatment +Ġe agle +å¤ļ å²ģ +æľĢ ç¾İçļĦ +缸äºĴ ä½ľç͍ +两 éĥ¨åĪĨ +æ¸ħ äºĨ +Ġnumer ically +æµĵ åİļçļĦ +Ġp inned +ĠR ally +Ġmix tures +Ġstair case +ĠC indy +åĢºæĿĥ 人 +( conf +g f +D Q +æ·± æ²ī +èĩª æĪijçļĦ +满 æĢĢ +ï¼ļ æľī +ĠPri est +ï¼Įè¿ĺ 羣 +.F loat +åĪº æĿĢ +ç͵ æĦŁ +ar x +.com mit +. Vector +两 çľ¼ +ek ing +works pace +Ġju icy +ĠBrun o +Ġrenov ations +, åıĹ +App lic +< v +Sk ills +ĠStef an +ï¼Į é¦Ļ港 +Ġquir ky +Th ought +m atically +Ġpersonal ised +utter ing +pe ace +Ġe z +ri j +æĮģ ä»ĵ +ç£ģ éĵģ +Ġenv y +ĠMake up +Mon o +H ouston +åħĪ å°Ĩ +ĠSPD X +Ġper cept +Ġunivers ally +车 ä½į +( iter +Ġhor rific +æĪĸ åħ¶ +ĠMet rics +))/( (- +æĬķ 篮 +Fund ing +$ a +t ables +èĦij è¡Ģ管 +fil ters +Ġwhites pace +, æĿ¨ +大 åıĺ +ced ent +ĠWill ow +ol ta +Ġdecl ar +Ġe ch +sh i +Ġarg parse +B ern +/ de +ä¹Ł è§īå¾Ĺ +ĠSt arter +OR TH +Ġoper ative +åģļ çĶŁæĦı +ï¼Įæīĭ ä¸Ń +èªĵ è¨Ģ +å¿ĥ æĦ¿ +Ġes ac +_d ot +IM S +ï¼ļ æĮĩ +åĮĹ æŀģ +çĶ· åŃ©åŃIJ +ad ir +Ġst aggering +Ġc older +åĸ· æ´Ĵ +^ j +ee red +åıijè¨Ģ 人 +- ons +ult an +举 æīĭ +âĢľ No +çļĦ ç»ıåİĨ +ĠMe al +大 æĺİ +åįģè¶³ çļĦ +Ġspec ulative +'] ; +å¼ł æŁIJ +èĭ¥ æľī +-reg ulated +ĠSchw artz +ãĢģ çģ« +, å¹´ +cer pt +æ¯į å©´ +ĠT ul +eg g +ĠMan ifest +æİ¥ 管 +ĠTool kit +Ġs ack +çīµ çĿĢ +çŃī æľįåĬ¡ +rav o +Ø · +交 çķĮ +è¿Ļ个 æ¶Īæģ¯ +æ´Ĺ èĦ¸ +Comp anies +è§ĦåĪĴ åĴĮ +æ·± å±Ĥ +-d irected +G ary +ĠEx posure +åªĴä½ĵ çļĦ +P OL +Ġwor ding +R ol +ĠSpec ifies +è¿ĩ ä»ĸ +ä¸į åĮħæĭ¬ +.h ide +_D IRECT +çĹ £ +asion ally +黼 çİī +ä¾Ľ æļĸ +ï¼ĮæĹł æķ° +Ġv u +Ġcur ly +Ġemb roidery +ï¼ĮæīĢæľī 人éĥ½ +éĤ£ 次 +ãĢĤ ä¸įçŁ¥éģĵ +ãĢģ ä¿ĥè¿Ľ +ĠLev i +ĠTurb o +éĢĿ ä¸ĸ +ï¼Į ç¦ı +Ġpor celain +æĸ ĵ +æĿ¡ 纹 +tri angle +æĪijçļĦ è¯Ŀ +æ³Ľ èµ· +y ang +ĠSure ly +ê³ ł +.m atrix +, åįĹ +代表 æĢ§ +ï¼Įåį³ä½¿ æĺ¯ +Ġtar iffs +cho ices +Ġsign age +.sc ene +oment um +Le an +奥 åľ°åĪ© +Ġendeav ors +- On +æµ· è´¼ +Rend ering +ĠLag os +误导 æĢ§ +vis ited +ĠTh rive +åĽĽ ä½į +ç²¾ çĽĬ +as ide +ĠB ey +ä½ł åºĶ该 +E scape +æļĤ è¡Į +ĠHuman ities +ism o +ĠCompl aint +ĠSpons or +çĶŁ çĶŁçļĦ +æĦŁ æµĭ +åIJĪåIJĮ çļĦ +ĠK L +Ġless en +åĽŀå®¶ äºĨ +åijIJ åĸĬ +Ġp endant +å®īæİĴ çļĦ +G RESS +端 æŃ£ +Ġfig ur +åıĹ害 èĢħ +m agic +æ¯ı ä¸Ģ天 +ï¼ĮåĽłä¸º 她 +åĨ¥ æĥ³ +_method s +åŃ¦ä¹ł äºĨ +Ġμ M +E ither +æĶ¶åħ¥ çļĦ +ah r +ç§ĺ è¯Ģ +ä¸ĥåħ« ç³Ł +F at +Ġcon gen +Ġfright ened +- XX +åĪļ èIJ½ +Ġhorm onal +æĤł çĦ¶ +人 社 +ãĢģ ç´« +ĠD V +çͱ æĿ¥ +ĠWonder ful +Ġfib robl +and atory +ĠJ ared +Ġrooft op +å®ıè§Ĥ ç»ıæµİ +æĪij åıªæĺ¯ +èĤ¯å®ļ çļĦ +Ġ# > +太 éĥİ +çļĦ大 åĬĽ +. IS +éĥ½ å¿ħé¡» +ä»İ æĪij +åĴĮ 第ä¸Ģ +ï¼ĮåĽłæŃ¤ åľ¨ +ĠEval uate +ï¼ĮæĢİä¹Ī ä¼ļ +äºĮåįģ äºĮ +åĪĨæķ° 线 +ĠG an +æķ¦ çħĮ +A O +çļĦ ç³»ç»Ł +ĠPros pect +åľŁåľ° 使ç͍æĿĥ +V ote +y z +_ auto +ĠSh oot +ãģĦ ãģ¦ +Ġbureau cr +ï¼Įåı¯ä»¥ 说æĺ¯ +Av atar +_B AD +ä¹ĭ æŀģ +Ġkiss ed +å¼Ģå¹ķ å¼ı +_ this +éĤ£ 头 +Indust rial +åįģ éĩĮ +Ġturn around +åľ¨äºĨ åľ°ä¸Ĭ +ĠSh irt +Graph ic +éľī ç´ł +Ġgra bs +Ġrebell ion +Ġhand held +ĠCa esar +åĬŀ äºĨ +Ġpur ported +ER P +ï¼ĮçĦ¶åIJİ å°± +ï¼Į ç¬ijçĿĢ +-ij ms +ï¼Ī x +P ager +Ġinter state +æĪij åĸľæ¬¢ +Ġcol span +r ather +åĪĨ äºĨ +ĠDec imal +" % +( on +ra bb +low ing +Ġbr ushed +æ¥ Ķ +/ ap +ï¼Į æķ£ +Ġd aring +éļı å¿ĥ +leg ation +Ġautom obiles +-f at +Ġpath ogenesis +Ġr pc +over lay +çIJĨ ç§ij +ï¼Į åĿļåĨ³ +Ġdiscrim inate +EST AMP +ĠSh ark +æĬļ æij¸ +Ġcorrect ness +åIJĪçIJĨ æĢ§ +ï¼Į é¢ľèī² +éļı é£İ +è¿Ļ 天 +Ġ ä¸įçŁ¥éģĵ +Ġm ailed +Ġo at +Ġdirect s +å¯Ĩ éĹŃ +æĥ¯ ä¾ĭ +ç쵿´» æĢ§ +æĺ¯éĿŀ常 éĩįè¦ģçļĦ +ï¼ī ãĢģãĢĬ +åıªèĥ½ åľ¨ +ir is +Ġpos ing +Ġdist orted +åĶ ° +Ġe f +ãĢĤ 红 +è¡Ĺ åĮº +ä»İæĿ¥ 没 +å¾ģ ä¿¡ +å³° ä¼ļ +]: = +æľī å¾Ī大 +str len +她 å°± +Ġh uh +respons ible +( with +b lood +ï¼ĮæĪij ä¸įä¼ļ +æķ´ å¥Ĺ +ĠBr as +Ġ' .' +_d uration +t ors +ĠBened ict +Ġproject or +_D IG +Activ ities +æĬĢæľ¯ åľ¨ +计ç®Ĺæľº åı¯è¯» +Il legal +çļĦ æ°´å¹³ +èĩª åζ +éĹªè¿ĩ ä¸Ģä¸Ŀ +ï¼Į åħħ满 +ãĢģ åĬłå¼º +Ġ( %) +ï¼Įä»ĸ çİ°åľ¨ +Ġbr unch +Ġhospital ized +_PRO PERTY +éħ¿ éħĴ +ĠLag rangian +E H +ĠD iesel +.c ancel +go al +Ġsmooth ing +ãĢģ æŃ¦ +éħ Ŀ +èĥ ¥ +èĴĻ å¾· +ĠD ip +_m essages +Ġjew el +) ï¼ļãĢĬ +ĠTele phone +ĠSp end +Ġcategor ical +å·¥ç¨ĭ çļĦ +Ġbal d +, 身ä½ĵ +è¿Ľè¡Į æ£ĢæŁ¥ +Is a +è¡Į éģĵ +cre st +ĠCoc onut +bs d +Ġnov ice +积累 äºĨ +Ġrobot ics +ĠDeb bie +åĴ§ åĺ´ +ans wered +Ġ è¶ħ +/ image +åĩº åİĤ +æ±Ł 举 +ä¹ħ ä¹ĭ +Ġconfront ation +ĠK NOW +@in proceedings +Pe ace +Ġt z +æĸ° ä¸ŃåĽ½ +çŀ Ł +Not ifications +_v olume +_k ind +Ġdisappe ars +æİ¢æµĭ åύ +ĠB ore +ãĥĥ ãĤ¯ +è´¹ çļĦ +el man +ç» Ľ +æľº ç¼ĺ +.Data Frame +ï¼Į åIJ¾ +éĺ³åħī ä¸ĭ +LE Y +å¾Ĺ æĦıçļĦ +urre t +两 æł¹ +ĠBe a +Ġlow s +.app lication +. att +ä½İ æ²ī +Ġcompl iments +Pack ages +ä¸ĭ èIJ½ +do ctor +Ġan atom +.column s +åĢ Ń +åĺ´ è¾¹ +åѦ士 åѦä½į +ĠM ing +Ġbranch ing +Ġsubpo ena +op ro +ï½ ľ +åŁİå¸Ĥ 建设 +æĹ¥ å¼Ģå§ĭ +Ġsustain ing +温æŁĶ çļĦ +ĠGameObject TPL +Ġst ing +Ġpart e +ĠUn ified +p dev +åŁºç¡Ģ çļĦ +Af rica +Ġ åįĥ +ff iti +æ±Ĥ å©ļ +äºĨè§£ çļĦ +好 象 +Requ ires +V endor +ä»ĸ æĥ³ +R oman +PS C +æ´Ľ ä¼Ĭ +éĹŃ ä¸ĬäºĨ +导 æµģ +à ¶ +å¾® åĪĽ +Ġemb ro +Ġpossess ing +P ASSWORD +_s ensor +åİļ éĩį +ä¿ĿæĬ¤ çļĦ +åIJŀ åIJIJ +_s imple +ĠFO X +ä½ı å¤Ħ +ä¹Ŀ å·ŀ +_ vertex +ä¸į è´Ł +âĢĿ åľ° +aut hent +åĸĤ åħ» +Ġcy an +çĽĪåĪ© èĥ½åĬĽ +ï¼Į ä¸ĭåįĪ +al ogy +åģļ 大 +е в +Z IP +}/ { +.mod ules +_m ul +Ġduplic ates +ï¼Į æ³¢ +Ġg ums +åĨĴ åĩº +ï¼ĮæĢİä¹Ī åı¯èĥ½ +éĢĢ æ¬¾ +C andidate +麻 å°Ĩ +Ġl ament +ĠPrep ared +ä¸į éĢı +åIJį åĪĹ +èĬĤ æ°Ķ +text area +ĠNE ED +æľī 帮åĬ© +åı¯ä»¥ æıIJé«ĺ +ï¼Į她 åľ¨ +ï¼ĮåĬł 大 +äºĶ èĬ± +çļĦ èĦijè¢ĭ +èľĤ çªĿ +Ġch ores +ĠPas cal +æģį çĦ¶å¤§æĤŁ +ĠSc andin +ï¼Ł è¿Ļæĺ¯ +æĥħåĨµ åĴĮ +.M od +ä¸Ń 对 +主 ä½ĵçļĦ +Ġpre ach +ut ing +ab ella +Ġsp ur +åħ¨ æł¡ +ĠTal ks +è¢ģ æĻĶ +v x +Ġl ure +ĠC auses +ĠL una +Ġfor ged +éĢļ éĢı +ä¸ĭ æĸ¹çļĦ +ys s +leg ant +ãĢģ åİ» +Ref lect +Ġ\ > +Mar shal +,ä¸į çĦ¶ +se ud +ĠJ C +æİ¨ å´ĩ +èı² å°Ķå¾· +å°ıå§IJ å§IJ +æĸĩ ä½ĵ +M g +.S ource +Ġarch ived +Ġhar b +ĠK eller +Ġhor izontally +ä½ł å·²ç»ı +_b ucket +æľįåĬ¡ åĴĮ +Ġtend encies +éĤ£ è¾¹çļĦ +计ç®Ĺ çļĦ +ĠTh y +pro g +æĹ© æľī +_s creen +çİĦ å¹» +elect ric +ik en +ç»ĵå©ļ äºĨ +ä½ł åı¯ +A wards +j ee +æ¯ı个 æľĪ +èħIJ çĥĤ +å¥ĩ èij© +Ġse gregation +Ġnot withstanding +çļĦ å½¢çĬ¶ +Address es +度 é«ĺ +ठ¤ +mel on +_ ORDER +鼶 鼶 +mar shaller +诡 å¼ĤçļĦ +Ġw ird +Ġdisappe arance +大 è±Ĩ +åºķ 端 +é¸ ³ +IB C +è¿ĩ ä¸Ģ个 +表 çļ® +ĠFlex ible +Ġp uff +Ġgroom ing +ĠT ire +ĠCS R +æīĵ 好 +ĠPers ons +ar ak +å°ı é±¼ +ä½ł å®¶ +ĠSt ones +æ½į åĿĬ +å¿ĥçģµ çļĦ +éĽª åĦ¿ +èĤļ åŃIJéĩĮ +åIJİæĸ¹ åı¯ +Ġg lands +ãĢģ èĥ½ +伤 çĹħ +ä¸į å̼ +yp ass +çĶ· 主è§Ĵ +ox o +ï¼Į ç©¿çĿĢ +áĥIJ áĥ +çļĦ åĪ¶ä½ľ +è° § +por a +å°±ä¼ļ æľī +çģĮ æľ¨ +ĠBor is +Ġopt ics +æĹ¥ åħĥ +ï¼Įéĥ½ åı¯ä»¥ +ãĢĤ 缸æ¯Ķ +ĠT ac +ĠTR ANS +ä¸Ģ åĪĨéĴŁ +Ġwhit ening +ĠO bl +Pat rick +% 以ä¸ĬçļĦ +æľ¨ é½IJ +å§ĵ æ°ı +Dist rict +åĩº 轨 +ภļ +Ġst ash +ï¼Į æ¶Īè´¹èĢħ +_s witch +åľ¨ 被 +ous se +ç»§ç»Ń 说éģĵ +æ¯ıèĤ¡ æĶ¶çĽĬ +ãĢĤ åĮĹ京 +çº Ĥ +åĨĽ è®Ń + Ĵ +ï¼Į æīĵç®Ĺ +åħ» åĪĨ +ĠCour tesy +ć ĈĈ +éĹº 女 +çĹĬ æĦĪ +æĦıè¯Ĩ å½¢æĢģ +åĹ Ĵ +: T +çijķ çĸµ +èĭı å®ģ +ĠP irates +齿 æĿ¡ +urg ery +w art +çļĦ ç®Ĭ +Enc ryption +" }. +Ġcaf é +( uri +K i +æľ¬ ç½ijç«Ļ +comm ons +ï¼Įåľ¨ ä¸į +uc chini +Ġteam ed +è´§å¸ģ èµĦéĩij +çľĭå¾Ĺ åĩºæĿ¥ +åıĹ åĬĽ +ubb ing +ab it +ä¸ĭ å®ļ +æĺ¾ éľ² +ĠAir ways +å°± è¿ij +Ġdes ks +èģĶ çļĦ +END ING +_ controller +å¹¶ åĪĹ +å®ģ å¸Ĥ +c ritical +Ġhyper bolic +uc er +allow een +Rep orter +might y +æ¼Ķ æĪı +Ġwavelength s +ĠFut ures +åĨ² åĨ² +Ġm olding +Ġacad emia +t in +踪 迹 +æīĢ å¤ĦçļĦ +() ( +çŃĶ è¾© +ç»ĺ æľ¬ +-com ponent +ĠMe ans +ent on +æī¾ ä»ĸ +å¾ħ åľ¨ +æĶ¹ åĨĻ +å¸Ĥåľº ä¸ĬçļĦ +ĠM eth +_P AR +èµĦæľ¬ åĮĸ +ĠB om +ĠD in +Consider ing +AS F +ĠBas il +ĠP ulse +æĪĸ å°ij +ĠFly nn +Ġpret ending +Ġexpress ive +ĠSurre y +Ġads orption +Ġf x +æįŁå¤± çļĦ +æľ¬ç§ij çĶŁ +rupted Exception +Ġsp un +纸 æĿ¡ +g iveness +Ġnew Value +çĽĺ åŃIJ +ĠNeg ot +-P acific +è´ŁéĿ¢ å½±åĵį +Ġdeep en +ĠE OF +ç»§ ç͵åύ +ĠA mm +ys es +Ġmaxim izing +ï¼Į è°Ī +ER C +èĢģ çι +çĸı æĿ¾ +Ġof t +Ġfundra iser +ĠLegisl ative +ä¸į å¸ĮæľĽ +m ot +Ġpe ach +-m ap +iction aries +Change Listener +æķĻ ä¸» +辨 è¯Ĩ +.Att ribute +ï¼Į 游 +Ġe psilon +èµĦæºIJ åĴĮ +Ġfol iage +sl ug +-man agement +z ig +çļĦ ç§ĺå¯Ĩ +Ġgreat ness +g overnment +没 好 +Term inal +P enn +æµģ 产 +éĶĢ æ¯ģ +Ġs inking +Sh ot +åħ¨åĬĽ 以赴 +Ġneighbour ing +id an +迪 士 +ĠHy undai +äºĮ éĨĩ +/ ****** +Ġign ition +ingu ished +ĠSing les +度 为 +ĠØ £ +éĢļ è·¯ +ĠMe asures +Ġstr cmp +Ġb orne +å¼ķ åĬĽ +è¢ ± +Ġtem ps +Ñĥ ÑĤ +user Id +Ġpal ate +¸ Ãij +> true +Ġm ish +former ly +Ġ\ - +åħĥ å®Ŀ +_SE PARATOR +V EN +åħī 大 +Cover age +ĠUL ONG +Ġb ury +Ġinter acts +Ġup held +: < +ç»Ī端 设å¤ĩ +ï¼Įä½Ĩ æĪij们 +on so +ĠB un +ĠX en +Ġ å·´ +ê¸ ° +> .< +ī è· +æľīä»Ģä¹Ī äºĭ +Ġmanif olds +IST ORY +éĤ¯ éĥ¸ +[ len +Ġr p +ĠHom eland +J ar +ï¼Į ä¸ī个 +il us +<< << +) æĺ¯ +åĽ ¤ +æ§ ¿ +ï¼Į åİ¿ +èĩªå·± 对 +Ġse ptic +Ġmm ol +( not +ç»Ī çĶŁ +误导æĢ§ éĻĪè¿° +re pository +ĠFrag ment +èĻļåģĩ è®°è½½ +Ġsprink le +m apping +Ġb ri +.Dis pose +Ġloc i +OO OO +ĠDifferent ial +åħ±åIJĮ ä½ĵ +ĠSant iago +Ġover rides +ä¸Ģèά éĥ½æĺ¯ +Ġp encils +Ġd arn +ä¹ĭ ç͍ +Ġsub urb +èĥ½ å¾Ĺåΰ +é¸ ¢ +ç§° éĩį +Ġcytok ine +Ġor naments +åħ¨ æĻ¯ +Ġcontempl ated +ĠST D +Stud ies +åIJİ ç«¯ +ï¼Ī äºĶ +| - +ĠL IFE +Av g +å·¨ çŁ³ +ĠSy racuse +æ£ĭ åŃIJ +ä¸įç»ıæĦı éĹ´ +Ġt ighter +çłĶ åΤ +è¿ĻäºĽ äºĭæĥħ +åĨĻåŃĹ æ¥¼ +le ast +ãĢĤ è¦ģæ±Ĥ +æijĶ åĢĴ +W edding +åĪĨå¸ĥ äºİ +åĤ¬ çľł +. activity +class Name +ic io +æľī ä¸įåIJĮçļĦ +.get Data +éī´ èµı +æ²Ļ çī¹ +j ury +èIJ¨ å°Ķ +agn a +Ġfiref ighters +L ady +ĠPer f +å¹² æĹ± +æ½ ¢ +U ndefined +åĸĿ ä¸Ģ声 +:: __ +g rep +åįı åĬĽ +Ġdr ills +帮åĬ© æĪij们 +_enc oder +Ult imately +, æĮģç»Ń +ï¼Į M +é¦ ĭ +åĩł å¹´çļĦ +åįģåħŃ æĿ¡ +Ġsne akers +ĠT rophy +ĠK ard +roll able +æľĪå½± æŀ« +Ġd umps +Ġbro chure +ĠOUT PUT +ĠInd icates +çϽ çļĻ +é¹ Ń +åıijå¸ĥ ä¼ļä¸Ĭ +ä»İæĿ¥ ä¸į +Ġmant ra +ec o +çł ¥ +æľ¬æ¬¡ åıijè¡Į +Ġsqu at +Ġtransmit ting +çķĮ å®ļ +ass adors +ï¼ģ èĢĮ +ä¸Ģèά 为 +åĪļ æĢ§ +åģļ好 åĩĨå¤ĩ +æĨ © +Ġm igrant +ĠZ hao +ĠC OLOR +ç¼ ¨ +åĵ¥ä¼¦ æ¯Ķäºļ +åľ¨ æĹ¥æľ¬ +ĠH ert +Ġpoore r +G ro +Ġ èĮĥ +èĥŀ èĥİ +_d irect +åѦçĶŁ ä¼ļ +é£İ æ³¢ +ç¦ı éŁ³ +æĹ¶ èĩ³ +被 认为æĺ¯ +L abor +ä¸ĵ å®¶çļĦ +,ä¹Ł 没æľī +Ġperme ability +ä¸Ģ è§Ĵ +交 åıĭ +Ġparticip ates +æ£Ĵ çļĦ +_col lection +æľº ç®± +eg en +æ´¾ 对 +* )( +er ator +åIJį èĥľ +æĺ¯åIJ¦ ä¼ļ +ĠBench mark +ĠAm en +Ġaccommod ating +ĠF UNCTION +ĠE k +ĠIT S +ç¥ŀå¥ĩ çļĦ +åĴĮ ä¸ī +à · +ä¼ĺèī¯ çļĦ +çĶŁ éķ¿çļĦ +æł¼ åĬĽ +Ġplasm id +æĿ¥ æİ¥ +Ġresult ant +ĠW iley +Ń IJ +å°ı å¹³ +ĠV otes +åķĨæłĩ 注åĨĮ +éĿĴ èĽĻ +åıĮçľ¼ çļ® +ĠPione er +. include +Install ation +P ri +ĠErn est +es k +ĠC ement +å·¨ é¾Ļ +åij¨ ä¸ī +u en +-L icense +Ġmamm alian +N ik +Ch arge +Ġtransl ating +che mic +Ġfier c +src dir +ç¬Ķè®°æľ¬ ç͵èĦij +ĠM b +Ġch ats +_ usage +Ġcl k +Ġun in +æĽ´ æĸ°çļĦ +à¸ Ĺ +A ustin +æĪ¿ åŃIJçļĦ +ĠMur der +æĹ¥ çħ§ +. Anchor +空 空 +, å¢ŀåĬł +che on +Ġè¿Ļ å°±æĺ¯ +çľĭ ä¸Ģçľ¼ +Ġ åıĤæķ° +Ġpr isons +ĠIN DEX +_ uid +çľĭ æĪIJ +èĭį 穹 +Ind ependent +å®Ĺ éŨ +Ġsul ph +T al +EN GTH +Ġchrom atography +åѦ åłĤ +çļĦ大 éĩı +æĹ¶ æĹ¥ +äºĮ ä¸ĸ +-t ag +çļĦæ°Ķ åĬ¿ +æ·Ħ åįļ +Ġint estine +å®Ĺ 主 +æļ´ 涨 +og i +(b uilder +å¤ĩ æĪĺ +çIJĥ è¡£ +å·¥ç¨ĭ æĬĢæľ¯ +Ġout set +ĠChrys ler +nt z +é«ĺ ä»· +åı¯è¡Į æĢ§ +æī« åľ° +Ġ"$ ( +Ġwithdraw als +第åħŃ ç«ł +her ty +te ous +Ñģ п +å¦ĸ åħ½ +æĪij ä¹Łæĺ¯ +é£İ åĬĽ +äºĶ åĪĨéĴŁ +åĢĴ æķ° +æĪij æĿ¥è¯´ +Ġdebut ed +Ġirrad iation +Ç « +ä¸Ģ çĵ¶ +Ġik ke +âĢĿ è¿Ļä¸Ģ +Ġper malink +âĻ ¥ +Ġ 缮å½ķ +_s k +Ġreconc ile +m om +ĠC oo +cl ing +Ġpropag ate +ĠEd gar +_re nder +ç«Ń åĬĽ +-process ing +( readonly +ĠD ocs +å¾· èĤ² +, æĶ¾ +ãĢģ åζéĢł +人 å¿ĥçļĦ +å·¥ä½ľ åı° +Ġh ides +çĽĬ æĻº +Ġmal practice +Ġaut ore +çħ ½ +Res earchers +Ġen rol +_m edia +è·¨ çķĮ +x n +Ġin hal +éķ¿ å¾ģ +æ¶² ä½į +ä¸ĩ åIJį +Def endant +æįĤ çĿĢ +< dt +ib re +let ions +_E LEMENT +ï¼Į åijĪ +Ġb ots +Ġr uth +Ġt ying +éľ ¹ +"> ( +æĺ¯ä¸Ģ çĤ¹ +æĬķèµĦ ç»ĦåIJĪ +roke e +ï¼Įéļ¾ æĢª +èĦ± ç¡« +çŃī æ´»åĬ¨ +ĠBab ylon +Ġ éĵģ +Ġz ijn +Ġgrow ers +æīĵ çIJĨ +ãĢĤ æĹ© +ast on +Ġgl u +/ reference +C aps +åĬ³ å·¥ +Ne ill +ï¼Į åºĶç͍ +æĺİ çŁ¥ +åĪ« æīŃ +ĠQu iet +L ots +ĠMess iah +le in +ï¼Įä¸į 妨 +F ran +ĠSc ulpt +rodu ced +æľ¬ å®ŀæĸ½ä¾ĭä¸Ń +Stream ing +éĢı è§Ĩ +Ġcollabor ated +åħ¬åħ± åľºæīĢ +-g reen +atisf action +ä¸Ĭ è°ĥ +Ġp ued +çĤ¹ æĺ¯ +file Name +Ġpreced ence +Ġcomprom ising +ä»İ åĬ¨ +_n ow +Ġcounc ils +Ġm oss +åIJij 社ä¼ļ +-d ollar +i ya +ç² ½åŃIJ +羣çļĦ è¦ģ +Ġne ph +creens hot +, éϤ +ãĢģ 广 +_T ITLE +Ġdisturb ances +ĠE SG +Ġunus ually +Ġpod ium +_valid ation +Var ious +å·´ 马 +ä¸Ģ åŃ£åº¦ +éģ® éĺ³ +_CH ANGE +åĵŃç¬ij ä¸įå¾Ĺ +rav iolet +ĠKu wait +fin ished +éħįå¤ĩ äºĨ +(s k +ĠMer chant +; } +ï¼Ī åħĥ +æľµ æľµ +åľ¨ åIJĮä¸Ģ +æĥ º +ost asis +é¢ĺ 主 +äºĨ ä¹Ī +G PL +-ad just +ast ings +ä¹ĭ å¿§ +ĠHapp iness +åĮ¿ åIJį +Buffer Size +S erving +X F +缴 è¨Ģ +Ġins omnia +大 è·Į +å¼Ĥ常 çļĦ +ï¼Įç¥ŀ æĥħ +好 èݱåĿŀ +书 çĶŁ +ä¸į 顺 +人 éĢł +_p id +Sl ots +_ elem +åı¯ è°ĥ +åIJ¬ åIJİ +H s +Review ed +ãĢĤ æĹ¥æľ¬ +ä¼ļ计 æĶ¿çŃĸ +Hope fully +ä¸İ 大 +çı ŀ +çłĶåıij çļĦ +ï¼Į åįıåĬ© +ĠProf iles +Ġ 票 +æĪĺ åIJİ +msg id +ASH INGTON +ĠSp urs +Ïģ ÏĮ +çͰ éĩİ +ä¾§ æĿ¿ +ĠDrag ons +å¾Ī åĥı +, å¾Īå¤ļ人 +j ing +.G ame +ä¸Ģ ç«Ļå¼ı +"> &# +åħ¶å®ŀ å°±æĺ¯ +æIJ¬ åΰ +ä¸Ģ ç»ı +éĤ£ åĿĹ +Ø ² +åĵ¥ åĦ¿ +B less +çͲ éĨĩ +ï¼Į åĨ¯ +am oto +çłĶç©¶ 人åijĺ +ĠLo vely +è§ĦåĪĻ çļĦ +Ġmat te +Ġsent iments +embr ance +å¯Į æ±Ĺ +uper t +åı¯ 使ç͍ +.S plit +çŀ§ çĿĢ +ĠTer race +Ġinsign ificant +pl acing +Ġen quiries +Res erved +Des igned +: String +æĥ§ æĢķ +-s afe +ĠDet ect +tra ining +Ġmaster y +, åıĮæĸ¹ +][ $ +Ġanch ors +æķij æµİ +æľ¬æĸĩ æijĺè¦ģ +ãĢĬåħ¬åı¸ æ³ķ +ãĢģ 娱ä¹IJ +ĠU FO +æį » +被 æĬĵ +scan f +ï¼Į æĿ¾ +åĮħ 袱 +红楼 梦 +Ġsurround s +ï¼Į åIJķ +ä¸ĭ æ²ī +long rightarrow +Ġdemol ition +U m +å®ī 妮 +Ġ ä¹ĭåīį ++ C +èĩªå·± æīĢ +exper ienced +U h +iest a +Recogn izer +- ion +åİĤ çļĦ +Ġ ä¸Ģç§į +åĶĩ è§Ĵ +Ġfiduc iary +èµĮ åľº +ï¼Įåľ¨ 她 +éĺ³ æŀģ +æĢ» æĬķèµĦ +ĠSpec ification +çĶŁæ´» ä¹łæĥ¯ +æ½ ¼ +M oon +ï¼Ī ä¸įåIJ« +éĽ¾ æ°Ķ +³³³³³³³³ ³³³³³³³³ +ï¼Į å¤ļå°ij +æľ¬ 竳 +ĠL ips +ä¾Ľ éľĢ +å¸Į çī¹åĭĴ +и и +ĠS ne +åģļ 主 +ãĢĤä½ł åı¯ä»¥ +Ġgr pc +æ³¢ å½¢ +è°ģ çļĦ +ï¼Į æŃ£å¼ı +Ã¥ r +æīĭ æŀª +ä»· 款 +Ġbe i +ĠSant os +å°± çľĭ +æ¶ Ŀ +e pt +ĠH ess +å¸ĥ éĩĮ +Ġbl at +两 声 +ey es +个 ä¸įåģľ +æľ¬ æĬ¥ +ĠRa ise +ï¼Į 强è°ĥ +ï¼Ī %ï¼ī +æĺ¯ åįģåĪĨ +åįĩ åѦ +åįļ士 åIJİ +.D roid +æī¬ çļĦ +Ġnest ing +ï¼ļ æĿİ +_p in +t pl +ç»Ħç»ĩ åĴĮ +Ġdé cor +ä½ł éľĢè¦ģ +éĢĤ ä¸Ń +ĠStre ets +ç®Ń 头 +çĸı 忽 +çĶŁ å¹³ +建ç«ĭ çļĦ +ãĢģ èµĦéĩij +读 äºĨ +ï¼Įä½ł è§īå¾Ĺ +------------ - +åīª åĪĢ +æĽ´ åĸľæ¬¢ +Re uters +sc i +ĠUI View +ç¾½æ¯Ľ çIJĥ +æĹ¶ èĢĮ +ĠSuper intendent +ï¼Įä¸Ģ å¼ł +æĮ¯ èį¡ +as than +åŁº æĿIJ +Ġcycl ists +-d esign +ik an +ĠRav ens +- β +Ġre claim +ãĢĤ她 们 +ä¸ĭä¸Ģ 代 +ji ang +åij » +}$ ) +è§Ĵ è´¨ +èįĶ æŀĿ +[ X +_c ursor +& C +, åįł +æĪij ä¹Łä¸į +注 缮 +Ġet her +ĠJul iet +ĠObserv atory +Ġ æľŁæľ«ä½Ļé¢Ŀ +.pers istence +, Z +ĠI PA +æĢ» å̼ +CS R +ĠHy derabad +ĠSm oking +Ġab err +ï¼Į è´¾ +å®¶ 大 +çĽIJ éħ¸ +IN ED +ãĢģ 天津 +ĠOb j +ĠHE AD +Ġeas ing +约 æľī +Ġsupp er +Design er +åħļæĶ¯éĥ¨ 书记 +wo ff +s ers +-f ull +Ġre positories +æľį èᝠ+Ġref ining +, ä»ĸ们çļĦ +导 读 +Ġphot oc +our ning +éŨ ä¸ĭ +á l +Ġw c +get Instance +Ġcl azz +åı¯ 使 +ĠFel ix +Ġfil ament +æİ§åζ è£ħç½® +Ġcontamin ants +umber land +çīĽ å¸Ĥ +Ġpersu aded +让 åѦçĶŁ +Ġoutwe igh +夫 åIJĽ +åºĩ æĬ¤ +è me +al ers +ole cule +Ġcourt room +ĠForm ation +ï¼Įä¸ĭ 次 +/ gl +åħ¢ åħ¢ +æ² ½ +绿 åľ° +æł¹æľ¬ ä¸į +å£ģ 纸 +_l imits +Ġplug s +ÃŃ s +Ġd al +éķ ° +è¿Ľ é©» +ç»ķ è¿ĩ +ãĢĤ å¸Ĥ +ak k +· å¾· +éļ ĺ +åıĺ è´¨ +ĠSe asons +临 æ²Ĥ +鸣 ç±» +ï¼Į 太éĺ³ +Ġmis dem +è¿Ļéĥ¨ ç͵影 +Ġ\ ) +- connected +B razil +-m et +Ġearn est +çŃ¾è®¢ äºĨ +Ġconcent rating +, éĺ¿ +ĠMc Don +Y L +.p i +Ġdep icting +ç´§ ç»· +.W ait +tol ower +Ġy elled +Ġdissemin ation +Ġk ale +Ġvac ancy +æ³ķ è¯Ń +ĠAg encies +çļĦ 认è¯Ĩ +åįĩ å̼ +,ä¸į å°ij +èĢIJ çģ« +Ġv als +ĠSuz uki +ĠA PC +ãĢģ 产ä¸ļ +çĤ Ĭ +æİĮ å¿ĥ +稻 èįī +ä½ł åij¢ +ä¸Ĭå¹´ åIJĮæľŁ +Ġ åĥı +æĽ´ åIJį为 +, æľĢè¿ij +Ġf ences +软 管 +.T op +FT P +ç¥ŀ çģµ +Pr inc +ĠP TR +Ġcapac itor +Ġg in +Tag Name +- needed +çīĮ çħ§ +_output s +\\\\ \\\\ +Ġintermitt ent +ĠU tt +sp acing +ĠWorld s +åºĶ 注æĦı +Ġc raz +ä»Ĭ çĶŁ +(s um +ìĦ ľ +E c +Ġel usive +å¹» è§ī +Ġfant as +æĪĴ å¤ĩ +éĹªçĥģ çĿĢ +en ate +对 åŃ©åŃIJ +åĨľä¸ļ åĨľæĿij +_se lected +AP A +Ġpl unge +௠ģ +R U +æŀľ æłij +è·ij éģĵ +çĤ¹ åΰ +å±Ĥ 级 +Ġt l +ĠB ere +_d t +æļ Ħ +ĠLED s +Cong ress +Ġa che +Ġb un +ĠInteg rity +) ` +> ', +- rel +est ream +ä¸į æĮ¯ +ĠWatch ing +秸 ç§Ĩ +æĢ» èĤ¡æľ¬ +çģ¯ æ³¡ +æľº çIJĨ +绳 ç´¢ +\sub subsection +ĠB ark +Ġfront al +åıįåºĶ åύ +Ġdiff raction +LO PT +, éĿŀ +ĠS EL +èī² è°± +éĢĨ 天 +ĠM OT +.n c +Ġ& ' +太 å®Ĺ +Ex act +åĭ¾ èµ· +çIJ¥ çıĢ +Ġentang lement +çĮ ķ +_b ias +æ¥ Ĥ +grad es +fe el +ĠL TD +R ent +Ġde ception +.s ync +) å°Ĩ +d igital +ä¸Ģ个 å°ıæĹ¶ +ä¸ī æĿ¡ +代表 ä½ľ +-d ialog +è§£åĨ³ éĹ®é¢ĺçļĦ +( True +æľįåĬ¡ æľºæŀĦ +math op +Head ing +å·²ç»ı ä¸įæĺ¯ +ä¸¥æł¼ æī§è¡Į +è¿ĩ ä¸Ģ次 +-p re +Ġhang s +- unit +çIJĨ æĢ§çļĦ +Ġobs ess +Ġsl ated +avor able +Ġ åĨ¯ +am ong +ĠC trl +æ® ¡ +Ġland lords +, éĩĩç͍ +ĠOccup ational +M H +ĠOut reach +è¿Ľ éĺ¶ +Ġve il +åģļçļĦ å°±æĺ¯ +t ips +çı ı +Ġcorrespond ent +é£ŀ è·ĥ +.b it +Ġ äºĨ +Coord inates +R iver +æĮª å¨ģ +æĪij çľģ +enn el +ĠCh op +_p ublic +端 çĿĢ +ï¼Į æĹ©å·² +Ġd ucks +K ar +B ounding +Event Type +ï¼Į åĪĽæĸ° +æ»ij æĿ¿ +åΰ æĿ¥çļĦ +è¿ĩ éķ¿ +oh ist +ĠEl vis +g ames +ï¼Į T +p ip +ï¼Į çķĻä¸ĭ +- operative +è¿Ļ个 çĶ·äºº +Ġc apped +id ences +Ġest á +_k wargs +ä¸ĭä¸Ģ ç§Ĵ +ãĢĤ éĿŀ +ç»ı常 ä¼ļ +Ġwhis key +, è®¤çľŁ +èĢ ĺ +Ġcl ut +ĠCo operative +让 æĤ¨ +éĢģ è´§ +妩 åªļ +ma id +k W +åıª å°ı +}} {{ +èµĽ åīį +M ul +ĠâĢľ â̦â̦ +/m ol +.d ataset +ĠPower ful +ï¼Į 表æĥħ +座 ä¸Ĭ +éļ¾å¾Ĺ çļĦ +Name Link +ĠChron icles +ĠCha os +_f amily +ĠCl aus +eng ing +åľ¨ åħ¶ä»ĸ +RO SS +Ġcoun selling +Ġg cc +Ġge odes +ĠRot ary +åĩº æµ· +-t abs +Ġuint ptr +I ran +ä¸Ĭ åįĬ +éĹ´ è°į +ĠL ikes +ang i +åĽ½ åѦ +- values +*\ *\ +èĭ± åĭĩ +é¡¿ é¥Ń +ĠG IF +sp oken +ï¼Į åĨħå¿ĥ +-f acing +orb id +ãĢģ åĬŁèĥ½ +å´Ń æĸ°çļĦ +èļ Į +Ġchap el +Ġha il +éĩĩåıĸ äºĨ +ĠG IS +ä¿© 人 +ist em +Ġex claimed +ãĢģ æİĴ +éĢļ è´§ +ï¼Į ç©Ĩ +K P +Ġquestion naires +ĠAdjust able + ¯ +Ġm aze +æľī æŃ¤ +äºĨ åı£ +åĨ° ç³ĸ +Ġip v +å¼Ĥ æł· +æµħ æµħ +ĠCelebr ity +head ed +reet ings +åĽŀçŃĶ è¯´ +笨 èĽĭ +Ġget ter +èĬĤ 缮çļĦ +å¢ŀ æĮģ +per malink +ĠT iffany +åı¯ è¨Ģ +åĵ²åѦ å®¶ +éĽ ³ +ï¼Įä»ĸ åĴĮ +_pro gress +ï¼Į èĭ¦ +Ġ çݰ +å¼Ĭ 端 +D ates +» ÃIJ +Ġdep ressive +æijĦ åıĸ +Per form +âĢľ For +çĹ Ķ +c ake +ãĢĤ ä»»ä½ķ +å°± ç»Ļ +- Identifier +Ġ æĵįä½ľ +ĠWeb inar +Ġfind er +Ġhand book +æĬĹ è¡¡ +, self +Ġ è¿ĶåĽŀ +对 å°ı +åıĤä¸İ åΰ +çŃī è¿Ľè¡Į +æŃ¥ æŀª +士 åįĴ +滤 èĬ¯ +c atalog +æĺ¯ æŃ£ç¡®çļĦ +B eta +ä¼Ĭ æĭīåħĭ +L java +Ġspec ulate +Ġvolcan o +åIJİ åıĪ +Ġs ut +To List +æĢķ æĺ¯ +èµµ äºij +Ġbrain storm +ĠBal let +g ard +-m ass +# line +ient ists +Ġdraw back +Ġrec ursion +éĿŀ常 大çļĦ +d ry +_c ert +Al arm +Value Type +åĶ¿ åͤ +Ġunre liable +缸 éĢ¢ +åıĮ 缮 +Dis closure +H all +P rec +_S R +Ġec lectic +æµ ļ +ĠInd y +ãĢģ æĪIJéĥ½ +è¿Ļä¹Ī å¤ļçļĦ +ภ§ +ä¸Ģå®ļè¦ģ 注æĦı +Ġt unnels +lik es +ä¸į å®Įåħ¨ +为 å®ľ +ãĢĤè¦ģ çŁ¥éģĵ +Ġclean se +æ¯Ľ ç»Ĩ +Ġdimin ish +ï¼Įåħ¨ éĥ½ +ĠIntellig ent +ĠDor othy +ãĢģ 飩 +建 æŀĦ +fl are +ãĢģ 转 +Ġpoint less +æĹħ游 ä¸ļ +èĤ² åĦ¿ +ĠWor cester +pect ral +any e +åĵį 声 +èĬĤ度 使 +详ç»Ĩ 说æĺİ +ï¼Į å¼Ħ +åľ ĥ +åħĭ æŀĹ +Ġch illy +çĤ¹ çIJĥ +Ġperiod ont +Download s +Iter ation +\ log +- ar +U b +æ´» å¾Ĺ +å½¢ ä½ĵ +å¾® å¦Ļ +ï¼Į ç®Ģ +ac ons +åħij ä»ĺ +igs list +ï¼Įå°± æĬĬ +ĠLiter acy +ĠC FO +Ġbreak out +em onic +ign e +( vec +ĠB ucket +ĠCom bo +c uda +ĠEx ec +æĺĵ æĩĤ +çľģ éĴ± +.R ow +Ġ éĹ® +æĸĩ ç§ij +骨 è´¨ +Ġforward ing +L inda +in ous +id os +aw ks +ĠMart y +R ather +h ort +éļIJ ç§ĺ +_string s +BO ARD +_m ake +Stream s +,éĤ£ å°± +.Line ar +å±Ĥ éĿ¢çļĦ +主æ¼Ķ çļĦ +ä½Ļ ç¯ĩ +ĠWill is +èģĶ æİ¥ +æŃ¢ çĹĽ +å¹´ å°ij +uss ed +å·¥èīº åĵģ +å¼ķ æĿ¥ +G race +åĽ½å®¶ å®īåħ¨ +è´¸ å¸Ĥåľº +: function +ï¼Į 欧 +R ated +P AGE +åī¯ æĢ» +AND OM +q tt +Ġmac OS +H oly +ĠP ip +Ġf rog +ĠB ride +Ġk ö +Capt ain +CL C +ä¼ij åģĩ +èı² çī¹ +.f ire +Ġseason ing +认è¯Ĩ äºĨ +Ġplate au +Ġlan tern +ãĢĤ æĹ¶éĹ´ +ĠList ed +sp i +æµ· æ£ł +Ġcel ery +çļĦ åĩĨå¤ĩ +ç«Ļ ä½į +Ġcoast line +ãĢģ æĹ¶éĹ´ +ĠMost ly +ãĥ ij +äºĮæīĭ æĪ¿ +è§Ĥ æij© +ï¼ĮçĦ¶åIJİ ç͍ +. Argument +å¤ļ éĩį +ï¼Į人 çĶŁ +ĠIR Q +Ġper il +Ġgrass roots +ï¼Įè¿ĺ å¾Ĺ +ĠEN V +èĬ± é¦Ļ +O m +ĠEnd point +Ġcontin ual +CON NECT +loc ate +Ġtransc end +Ġdev ised +.col lection +æ¸ħæ¥ļ æ¥ļ +ÃĹ Â +Ġc pp +让 ä½łä»¬ +Ġes lint +p ast +ãĢģ 误导æĢ§éĻĪè¿° +/ int +/ utils +ï¼Įå½ĵ åīį +ĠHouse hold +ĠTable t +L anguages +åģļ çĿĢ +Ġadapt able +ĠP AN +ä¹ĭ åĽ½ +ä¸į åIJĪçIJĨ +ãĢģ èµĦæºIJ +Ġn z +ĠE OS +ES A +åºĹ å®¶ +æīĵéĩı çĿĢ +æģ¼ æĢĴ +è¿ĩ ä¸Ģ +人æīį çļĦ +ĠLE FT +åľ¨ åħ¬åı¸ +Ġdra ins +Ġgener ously +Cas ino +K F +ï¼Į è¾¾ +ä¸įçŁ¥éģĵ èĩªå·± +åıijéĢģ èĩ³ +ores cence +P urpose +Ġ 转 +W is +Ġ 车 +ï¼Į 称为 +art on +æĬ¥éģĵ ç§° +Ġp si +ä¹Łæ²¡æľī ä»Ģä¹Ī +ĠF IELD +Ġiron ic +ç¾İæľ¯ åѦéĻ¢ +ĠByte Array +_ obs +åĬĽ æ±Ĥ +ç¡ Ĵ +LE EP +, å®Įåħ¨ +ing leton +lib s +Per formed +éģŃ åΰäºĨ +Ġlip stick +ĠEvery day +gr ant +åĪ¶ä½ľ 人 +ĠH DR +G rowing +ï¼Į è§£ +ï¼Į åķĨ +ãĢģ æıIJä¾Ľ +åĽł èĢĮ +urd ue +.b oot +âĢ ¬ +èĭ¥å¹² 个 +Ġm ason +, ä»Ģä¹Ī +çİĭ 天 +åĪĹ ç¤º +æīĭæľº çļĦ +_ URI +ç½Ĺ æ±ī +Diff erence +éģ¿åħį äºĨ +ä¹ĭ é£İ +ĠV ault +Ġper for +opl astic +pp a +ç¦ģ æ¯Ĵ +ä¹³ åĮĸ +AG R +çĶĺ å¿ĥ +ol ated +ĠEx amin +举 个 +èİ« æµĭ +çļĦ å±ĢéĿ¢ +-p ublic +Ġdri p +( api +ĠE mit +(' [ +åij¨ åĽĽ +æľº çŃī +ĠPark way +âĪ ´ +ãĢĤè¿Ļ æĺ¯ä¸Ģ个 +éĽ¾ åĮĸ +\] ). +åı¸ 空 +è½» ç¬ij +课åłĤ ä¸Ĭ +- Le +åĴĮ ç»´æĬ¤ +身 亡 +软 å¼± +AR DS +åħ´ ä¸ļ +ï¼Į å½Ĵ +la unch +Ġtime frame +ass ing +æĪij们 æīĢ +Ġoutrage ous +äºĶ 彩 +å¤ĸåĽ½ è¯Ń +ç«ŀ éĢī +èĩª åıij +çľ¼ èī² +ĠExp ansion +Ġ #### +è¡Ģ æ¸ħ +na issance +Ġ] ] +ĠJ unction +^* $ +_M AG +çļĦ 被 +èµ°äºĨ è¿ĩåİ» +, 好åĥı +Ġt rop +.R ight +åĢºåĬ¡ èŀįèµĦ +åѦ åīį +åĨį æľī +ĠCard iff +Ġfragment ation +åı¯ä»¥ å¾Ĺåΰ +Ġminimal ist +Ġarchae ological +ä¼ļ æ¯Ķ +Or th +Ġmarked ly +èģĮä¸ļæĬĢæľ¯ åѦéĻ¢ +.No Error +l ance +å¾Ī éĩįè¦ģçļĦ +Ġmon uments +çľĭ æ¸ħæ¥ļ +头 é¢ħ +lock ing +åĵģçīĮ 形象 +For got +cut aneous +( attr +ãĢģ èĮ¶ +åıĭ 人 +Ġfresh water +( inst +() ] +EM BER +ENC ES += null +Ġin secure +转åıĺ 为 +ĠGl ory +Ġcro chet +Nob ody +ãĢģ æ·± +Ġwater front +æ²¹ 缸 +æĺ¨å¤© æĻļä¸Ĭ +åħļå§Ķ å§Ķåijĺ +ï¼ī 对 +Ġne urop +çĨ ¹ +åIJį å½ķ +ï¼Į æ´ª +.F ramework +éģĹ åĺ± +< csv +ĠV endor +空æ°Ķ è´¨éĩı +last ing +è¿ĩ éĩı +æĦĪ åĬł +ï¼Į åIJ¹ +åľ¨ 身ä¸Ĭ +çĶŁéķ¿ åıijèĤ² +al ach +ut an +çķĻ ä½ı +è´¢åĬ¡ çĬ¶åĨµ +ç»ķ ç»Ħ +Ġnic er +.S ession +S olutions +ĠDisc ord +Ġhe fty +ĠAng lo +, 认为 +ur ge +æĪij éĻ¢ +Sc enario +溶液 ä¸Ń +æ¼Ķ ä¹ł +åı² èĴĤ +sk ill +Ġ åĽ½éĻħ +ï¼Į è¿ĶåĽŀ +è¿Ļ å¹ħ +èį ¼ +ï¼Įè¿Ļ æīįæĺ¯ +åĽ¾ çĶ» +Ġst up +и д +Just in +ä»ĸ们 éĥ½ +ï¼Įåı¯ä»¥ å°Ĩ +ĠRoll s +Ġtodd lers +C ards +Ë Ī +主 æĿĥ +Ġ%> % +Ġen large +æİ§åζ 模åĿĹ +ä¹Ł æĮº +_C OPY +ä¾Ľ åħ» +æĬ¥åijĬ ä¸Ń += e +Ġdent ists +æ¯į ä¹³ +Ġhomeless ness +ĠK ot +(). __ +Ġsubt raction +没æľī ä¸Ŀ毫 +in ctions +ass ed +ä¹ĭ 乡 +Ġtext ing +åį¡ è¥¿ +_V ERT +ĠNeed ed +j on +äºİ å¿ĥ +ï¼Į两 èĢħ +/ search +h of +B UILD +æľĿ 天 +æģ© æĢ¨ +ä¸į æĶ¹ +ĠSav annah +ar ious +Ġprol ifer +ĠL icensing +å¤ļ å°Ķ +æ²» åĽ½ +å¦Ĥ æĿ¥ +es se +Ġpro claim +ak an +p arsed +å°± 好åĥı +åĬ¨ åIJij +Ġauthor ised +ä»ħ代表 ä½ľèĢħ +ä»İ 严 +G MT +Ġn iece +Ġcl ang +ode grad +(s b +ñ a +è¯ij èĢħ +â ŀ +Ġraz or +ĠHospit als +ĠNa Cl +Ġp its +ĠO ven +, $$ +马ä¸Ĭ å°±è¦ģ +大 éĥ½ +,æĪij æĺ¯ +.pro p +ĠInf luence +ãĢģ åŁ¹è®Ń +ç»Ħç»ĩ éĥ¨ +åģľè½¦ ä½į +躯 ä½ĵ +ĠM erg +çļĦç¾İ é£Ł +æ³¢ éķ¿ +伸 å±ķ +ĠDiff erences +, 第ä¸Ģ +h og +åľ£ æ¯į +æĬµ 触 +Ġ ä»Ģä¹Ī +é¡¶ ä¸Ĭ +åįķåħĥ æµĭè¯ķ +ĠStaff ord +éĺ² ä¼ª +åľ¨ åįĹ +Ġhem isphere +D ental +Ġcook book +纪念 é¦Ĩ +Lo vely +ä¸į åħ¬å¹³ +è¶ħ æłĩ +ld ap +Ġchrom osomes +Ġmultid isciplinary +ãĢģ æķ°åŃĹ +Ġloc ality +(Q t +çļĦ éħĴ +Ġanaly zer +ĠLOG GER +ï¼Į满 èĦ¸ +Ġk idd +ĠK urd +èĮī èİī +åįģ 天 +ا Ùħ +çĮķ çĮ´ +ï¼Į ä¾Ŀæį® +ic ia +ĠR W +, åIJĮ +Ed ited +M agento +ĠAustral ians +沦 为 +: g +L imited +pg f +åĩłå¹´ åīį +å§ĭ 建äºİ +X C +ï¼Į 缮æłĩ +Ġ çģ« +å±ħ å¤ļ +åı² æĸĻ +Christ opher +大 æīĭ +ĠMah arashtra +ĠBuild ings +è·¯çͱ åύ +ï¼Į 缸åıį +éĻĦ 带 +ç¦ħ å¸Ī +åħ³ 头 +åı¯ä»¥ åĪĨ为 +Ġshe dding +ID A +Ġdis cret +ï¼Į没 人 +- entry +æĪIJ 份 +åĮĸ èĤ¥ +Ġrecip rocal +Sh ares +ãĢģ ç§»åĬ¨ +éĩĮ åİ» +Ex ceptions +Ġscreens hots +è¿Ļç§į æĸ¹æ³ķ +Ġlog ically +ç° ª +ĠM OS +çĺ Ļ +Ġgen omes +æ²ī è¿· +æ·¡ æ°´ +.In valid +书 ä¿¡ +ĠW ick +_P CI +wood s +Ġcomb inator +ï¼Į ä½ĵéªĮ +ĠH ut +ç«¥ åŃIJ +Ġsyn onymous +ãģ§ ãģį +, æŀĹ +å¹³ æĹ¶çļĦ +ĠSch neider +è¿Ļä¸Ģ æŃ¥ +çļĦ çIJĨ论 +Ġconsc iously +ĠSub mission +Ġ ä¸ĸçķĮ +æĽ´ ä½İ +Ġquant ification +ï¼Į åıijçĶŁ +ĠC hes +å®ļ è¦ģ +U r +Ġche wing +.J SON +åħ° å¾· +çĮİ çī© +_TR AN +ãģĿ ãģ® +åĩº æ°Ķ +ãĢĤ å¤ı +ES P +ä¼ĺæĥł æĶ¿çŃĸ +女 åŃIJçļĦ +ĠAll ied +ĠCav al +, 她们 +M ASK +ãĢģ åĪĨæŀIJ +prot ection +Ġd ic +åĴĮ ä¸ĢäºĽ +- ready +Ġdiscretion ary +åĴĮ 第ä¸ī +IV ERS +ç²¾ç¥ŀ çĹħ +æĬ ł +è¡Į èĢħ +Ġg or +end as +Ġcogn ition +Ġcam el +å°±æĺ¯ è¿Ļä¹Ī +èħIJèļĢ æĢ§ +åĽ½ æĹĹ +ĠChe ers +Ġric hest +.not ify +Ġ åŃ£æŀ« +çļĦ è®Ńç»ĥ +ï¼Į è¿ĩäºĨ +ing ers +Ġemb ody +TR ANS +åľ° éĿ¢ä¸Ĭ +åŁºæľ¬ éĿ¢ +ç´§ éĹŃ +ĠCarn egie +çļĦ 计ç®Ĺ +St even +-trans ition +åĩº ä»Ģä¹Ī +Ġspirit ually +ĠNumer ous +, æĹłæ³ķ +ĠPRO DUCT +Ġat roc +B ang +_ self +åİĭ æĿ¿ +ćĈ ï¼Į +çľ¼ çķĮ +Ġdiss olve +Ġsan itation +Ġsett lers +.P er +I J +éĤ® å¯Ħ +ow e +ä¸īåĪĨ ä¹ĭä¸Ģ +ä¼ļ è°Ī +ï¼Ī ä¾ĭå¦Ĥ +ä½ı 建 +Manufact urer +ä¸įåIJĮ ç±»åŀĭçļĦ +Ġauthor itative +åıĹ äºº +ç§ģ ç«ĭ +ĠThom son +ĠC ITY +Ġ åıijè¡Į +ï¼Į é±¼ +Gl ad +çĿĢä½ľ æĿĥ +æ¤į 被 +C ivil +logen etic +第 åįģäºĮ +Ġfreed oms +Ġle ases +ãĢģ æ³¨æĦı +å¼ĢæĶ¾ çļĦ +Virgin ia +æĸ¹æ³ķ åĮħæĭ¬ +ãĢģ æł¹æį® +Ins ights +å¼ ¼ +çĪĨ çł´ +ĠPet roleum +赫 å°Ķ +F light +åĩº 产 +æĬĬ å°ı +ï¼Į çĹħ +ĠC itation +ir ror +ĠShir ley +side bar +ï¼Į åIJī +æķĻ çļĩ +æĺİç¡® äºĨ +Ġboil ers +, 西 +F ault +Y ears +æĥħ åķĨ +被 æĿĢ +ran o +Ġdel imiter +å®Ŀ åīij +b j +ãĢģ åįĬ +per f +æİ§åζ ä¿¡åı· +- loader +R anges +ç«ŀäºī ä¼ĺåĬ¿ +èĬ± èįī +br ane +ç»ĻäºĪ äºĨ +in fl +bol t +l ane +Ġà Ĥ +çīµ æĮĤ +论 è¯Ń +é©» åľ° +ãģĹ ãģ¾ãģĻ +ĠVM ware +å¿ĥ çĹĽ +Col umb +aid u +ï¼ĮæĪij è¿Ļ +Sub view +ä¸ĢåĪĩ çļĦ +ãĢĬåħ¬åı¸ 竳ç¨ĭ +Ġcon duction +Ġse ismic +ï¼Įä¸į åģľ +Ġtur moil +- ret +] ' +ãĢģ èĥ¡ +or st +æĸ° é£İ +ĠU ps +Ġcont emplate +ĠV AR +ĠI on +ãĢĤæĪij åľ¨ +Ġn uis +Ġparas ite +ä¹ĭ 举 +绿 è±Ĩ +è¿ĺæľī åħ¶ä»ĸ +ĠN ina +åį³ ä½į +M ale +ĠZ IP +ap oration +å³ ¦ +( loc +ï¼ĮäºĮ 人 +V i +Ġmon arch +Ġimpact ful +ãĢĤä¸ī æĺ¯ +-sign ature +-c ounter +/L ibrary +ĠPlatform s +( seq +N orthern +åIJĥ æİī +ĠPhot ographer +× ŀ +é¢ĺ å¹² +ĠInitial izes +Ep och +ãĢĤ åģļ +ĠSm ile +羣çļĦ æľī +ĠAct ing +{ if +Ġas ian +Ġexhaust ive +é¾Ļ 骨 +s it +ä¸īåįģ å¹´ +ï¼Į æĸ½å·¥ +ä¼ļ è¯Ŀ +人们 åľ¨ +æĹłçº¿ ç͵ +å®ĺ åĥļ +Ġstock ed +飵 åij³ +_ms gs +éĤ£ åı¥è¯Ŀ +Ġcarbohyd rate +Ġs oooo +ä¸Ĭ ä¸ĸ纪 +ĠDes criptor +It aly +I g +ĠVal ve +ĠC s +æĤ ´ +Res pond +ĠGu jarat +åݻ年 åIJĮæľŁ +Prob ably +ĠC CTV +è¾ĵ ç»Ļ +\ Delta +\ circ +S olver +. admin +Ġle pt +åĸ§ åļ£ +çļĦ èᝠ+çļĦä¸Ģ åľº +} )$, +ĠR oo +ĠO ber +åıĪ éģĵ +. account +ĠUn s +-v ol +_l r +ä½ĵåζ æĶ¹éĿ© +Ġprophe cy +ĠJ al +æľ¬ å°± +ï¼Ī è®°èĢħ +Ġfinal ized +ï¼Į éĶĢåĶ® +ĠKey words +åįģåĽĽ äºĶ +scrib ers +çļĦæĸ¹å¼ı æĿ¥ +Mar ia +ip o +çĽIJ æ°´ +åĵģ æł¼ +ĠQu int +Place ment +å°±æĺ¯ è¿Ļ个 +é¢Ħ ä¼° +ä¹Ŀ 天 +raft s +engu ins +To ast +è¨ Ń +ĠF iled +du plicate +_s uffix +ä»Ĭ天 æĺ¯ +æľº ä¸Ĭ +èϽ æĺ¯ +两 åĿĹ +åºķ æ°Ķ +Ġg on +çĸij èĻij +Ġun ravel +èݱ æĸ¯ +. Exception +^ d +éĩĮ 奥 +Ġpress es +;; ; +å¿ĥ èĤĮ +_AL IGN +ä¸Ģ个 女人 +.P age +ï¼Ľ ä»ĸ +Ġi ii +ĠEnt ries +Ġportray al +临æĹ¶ èĤ¡ä¸ľå¤§ä¼ļ +. ind +æ¯Ķ èµ·æĿ¥ +.C opy +æİ§åζ åįķåħĥ +Ġsteward ship +, ä¸Ģèµ· +声 èī² +Ġperpet ual +Ġmodal ities +ï¼Į æĺ¯ä¸Ģå®¶ +art ing +ä¸Ģ çĶŁçļĦ +\n ode +Ġbehav es +ĠBar ber +Ġantiv irus +- agent +âĢĿ 主é¢ĺ +åĩº åħµ +ĠAl ison +çķĻ é¦Ļ +.r oute +< B +ict ured +ĠE h +@ media +é¡¶ çĿĢ +ĠJud ges +ĠSt ru +ĠðŁ ij +ä»Ĩ 人 +ä¼ģä¸ļ åıijå±ķ +ynchron ized +åŁİéķĩ åĮĸ +å¸ĥ 鼷 +Y A +ï¼Į ç͵åŃIJ +_P HY +man ifest +对 è¿Ļ +å¼Ģ åħ· +\ hat +Ġnot or +Ġtrust ees +Ġwear able +ä¹ĭ äºī +åĩ¡ 人 +éŨ ä¸Ĭ +åĢį æķ° +ìĿ Ģ +åĢ ij +Ġreloc ate +Ġb ic +çľ¼ çĿģçĿģ +ä»ĭç»į çļĦ +Ġag ar +ACH INE +/ @ +nd ered +ĠLe ah +ĠT GF +ï¼Įä½ł ä¸į +EDIT OR +å¤Ħ 以 +ĠCl aud +ĠCamp ing +ĠSer um +æµģ éľ² +param ref +缴æİ¥ å½±åĵį +Ġhur ried +of s +åĽºå®ļ åĿĹ +è¿ĺ 被 +Ġemb eddings +ĠGe ographic +ï¼ļ 使ç͍ +Ġline back +Sub scriber +ĠLow e +èĩ´ åij½çļĦ +Ġcirc us +h ours +Ġ æľīäºĽ +羣 èıĮ +Initial izer +Ġun se +ĠIN IT +_ edges +åıª 为 +åŃĻ æĤŁç©º +{ }{ +ĠK M +å®ī 康 +-M M +ĠG EN +äºĭä¸ļ çļĦ +, çľĭçľĭ +æģ¯ æģ¯ +äºĶ æĺŁ +ä¹IJ çļĦ +-c ircle +-re aching +oton in +å·´åŁº æĸ¯åĿ¦ +Ġam y +ĠInt ro +ï¼ļ å¼ł +Ġtu ples += D +yn a +ĠTurn s +Ġunf olding +ĠEqu als +ç͍æĪ· åľ¨ +âĤ¬ Åĵ +h yp +ĠD ESCRIPTION +_con vert +é«ĺ 涨 +åįİ å°Ķ +Fl ush +Doc s +ĠB har +æľīæķĪ æĢ§ +ĠMet ric +Ġant agonist +ĠPat el +ãĢģ å¹¿ä¸ľ +管 ç½ij +俱 åħ¨ +O pp +ä¸Ńæĸĩ ç½ij +ãĢĤ èĥ¡ +Ġled ger +Ġsen ators +, åĪĨ +ãĢĤ æ±ī +çªģ åħĢ +_g o +ellig ence +çĺ¦ èº« += args +ãĢģ åĽĽå·Ŀ +ri angle +ĠR oc +Ġbra king +ĠOpen GL +ï¼Į èIJ½å®ŀ +ĠD IM +åľ° åIJį +åĢŁ ç͍ +ĠConsult ants +K at +å°± åıªæľī +çŁ¥è¯Ĩ åĪĨåŃIJ +éģ® æİ© +ãĢĤ æľīäºĨ +åŁºéĩijæīĺ管 人 +ï¼Į æĪIJ为äºĨ +Ġb ree +ĠU mb +( async +. ap +ä¸Ń 举 +ä¹Į 鸦 +æľĪä¸ĭ æĹ¬ +/ android +Ġsub d +è¿Ļç§į 人 +æĪIJåĬŁ åľ° +éģ¥ è¿ľçļĦ +Ġsho vel +çĤ¹ ä¸Ĭ +Ġan ne +Con current +ath lon +ĠChe vy +ĠE fficient +ä¼ļ æĽ´åĬł +好 èĩªå·±çļĦ +è¡Ģ èī² +黯 çĦ¶ +ĠW o +ĠLOC AL +" So +-n ormal +åĪºæ¿Ģ æĢ§ +Ġvan ish +ï¼Į çļĩå¸Ŀ +ĠB ast +Ġ å°±ç®Ĺ +Ġexhaust ing +, æīĢ +ĠG ret +éĻIJ åĶ® +è¿ij æĿ¥ +Ġrad ically +Ġdesc ended +åĨĽ æĸ¹ +çļĦä¸Ģ æĸ¹ +Exp anded +Ġsing leton ++ s +ets y +第äºĮ å±Ĭ +.as ync +[] > +abc def +ï¼Į æĮº +ĠL ung +Ġreloc ated +Ġlou der +_ OV +ĠUN ESCO +èĬĿ åĬł +ï¼Į å¼ĢåIJ¯ +Ġhom eland +ĠCont ributors +Ġapplic ability +ï¼Įæľª ç»ı +ĠKath leen +çļĦ æĪ¿åŃIJ +ĠFried man +ĠT one +ĠR oma +ï¼ĮéĤ£ ä½ł +ï¼Į åģı +che wan +Ġplate let +ï¼Įä¸İ åħ¶ +_config uration +lic ts +好 åľ¨ +ph yl +нÑĭ е +ãĢĤ åĬł +ĠC FR +b lo +è°ĭ æĿĢ +ä¿® ä»Ļ +ven cy +é¼» çĤİ +Ġcuc umber +other wise +vent h +èIJ¨ åħĭ +../../ ../ +&= & +\ psi +å°± åΰäºĨ +ĠJSON Object +æĶ¯æĮģ çļĦ +ĠBlog ging +Scal ing +Ġ 墨 +Ġ( / +å¾Ĺ è¦ģ +çļĦ大 èĦij +ï¼Į èĢĥ +part ition +, åĪĩ +ĠCor rection +IV ES +ĠL azy +ä¹Łä¸į çŁ¥ +ĠBuild ers +Q E +she ets +åĪĽæĸ° åĪĽä¸ļ +-r unning +ĠCarn ival +ãĢĤ å¾ħ +ĠBy ron +Ġrelent less +As ian +flu id +ĠGe orgetown +( Vector +ï¼Į æĬ¢ +å¼Ģ åIJİ +.D uration +å®ŀæĸ½æĸ¹æ¡Ī ä¸Ń +-n atural +Ġpron ounce +ï¼ī ä¸Ĭ +ribut ing +Ġlong ing +Ġextra ord +ä¸Ń央 空è°ĥ +al ignment +oh o +ĠBed ford +aly zed +l aws +ĠLe isure +_MODE L +ĠD aisy +Ġp aw +ï¼Įä¸įå¾Ĺ ä¸į +å ¶ +Ġab elian +C old +-g rand +ä¸įå¾Ĺ è¶ħè¿ĩ +.Get Value +ãĢĭçļĦ è§Ħå®ļ +Invest ment +Ġµ m +æĬķæłĩ 人 +ãĢģ åıijè¡Į人 +ä¼ļ èĩªåĬ¨ +erm aid +ä¹Łä¸į åĨį +å´ĩ å°ļ +ä¸įåĬ¨ 产 +ãĢģ éķĩ +ï¼Įå¤ļ 次 +æĽ´å¤ļ 人 +-m akers +. Conv +A rab +ĠM use +V IS +举 äºļ +Ġweb cam +Be haviour +Ġpal ms +_se ed +ĠC ycling +éģĵ æķĻ +å¤Ħ 女 +æ¬ł 缺 +ĠD aughter +Ġnational ity +Ġconced ed +åħĪè¿Ľ åįķä½į +urs ing +grad ed +ï¼Į åħ¼ +ĠThe sis +ä»ĸ ä¸Ģçľ¼ +Exper iment +æĺ¾èĢĮæĺĵ è§ģ +ĠRec ycling +è´¨ éĹ® +建ç«ĭ ä¸Ģ个 +ect ar +ĠV B +æīĵ åİĭ +oe lectric +IP A +mm as +Ġart is +ä¹Į é¾Ł +ĠW ORLD +èĢĥè¯ķ çļĦ +ĠComput ers +é¢Ĩ çķ¥ +_ ev +è¦ģ åĴĮ +é»Ħ å¸Ŀ +hed dar +.get Time +Att rs +-v olume +Ġdiscrim inatory +Ġstir red +æĪij åIJ§ +ï¼Į å¸Ī +ĠH ir +op ters +ĠWood en +ĠT at +ï¼Į 设å¤ĩ +c redit +( pt +IF S +Ġindu cing +ĠTit ans +çĥ¤ èĤī +èĬĿåĬł åĵ¥ +ure ment +ĠArgument NullException +L CD +æī Ī +åĤ» äºĨ +. verify +Ġ æĪĸèĢħ +Ġr ins +Ġ/ ^ +Ġsumm ers +ï¼Į " +lic ia +æıī äºĨ +ï¼Įèĩ´ 使 +- event +d j +èĥ½ ç͍ +ĠAS AP +ή ÏĤ +Ess ay +æĿ¥ æĿ¥ +æľ´ ç´ł +çļĦ èģĮä¸ļ +éĵ Ľ +íķ ´ +åħ± èģļ +Ġteam mate +å²Ń åįĹ +_un its +ter ra +Ġfuel ed +ĠProp Types +ä¸įä¸ĭ åİ»äºĨ +P AD +ä¸İ å°ı +(w x +éĤ£ éĩĮçļĦ +å·¥ä½ľ äºĨ +ал ÑĮ +å®ŀå®ŀåľ¨ åľ¨ +Ġtrou sers +-m edia +ï¼ĮåIJĮæĹ¶ ä¹Łæĺ¯ +æĸ° 西åħ° +çļĦ èĢģå¸Ī +ign ant +ĠO mar +æĥł å·ŀ +par ated +_T ASK +ĠChe ss +ans ing +Ġinterrupt s +æīĢ æıIJä¾ĽçļĦ +Ġdil ution +_ author +ĠV era +æįĤ ä½ı +ãĢĤ å¤ĸ +åĨĽ åľ¨ +het amine +_ web +ĠSerge ant +, ä½ķ +From String +,ä½Ĩ åľ¨ +:b log +Ġoff season +Te acher +çļĦ åķĨåĵģ +Ġsub type +Ġembry o +æĬ½ å¥ĸ +play ed +ĠCare y +èĥ¶åİŁ èĽĭçϽ +act in +ĠCarp enter +all i +Ġimp ending +è¿ĩ æľŁ +IM AL +Ġdistinct ly += no +', $ +æĥĭ æĥľ +Ġest a +ï¼Įå¹¶ 为 +俨 çĦ¶ +Ġobject ed +Ġscal ability +ht able +çIJ ¶ +, åIJĦç§į +ãĢģ éĩįåºĨ +ĠBox es +ãĢĤ ä¸įæĺ¯ +æľ¬è´¨ ä¸Ĭ +k p +ãĢĤ 两个 +unning ham +ut rients +MA IL +Ġperturb ations +al b +ä¸ŃåĽ½ ç»ıæµİ +ï¼Ł æīĢ以 +Ġmotiv ating +Ïħ ÏĦ +室 åıĭ +æ²ī éĹ· +çݰ å·² +_AL PHA +G y +ĠAl ger +Ġgr in +ãĢģ é±¼ +åĪ©çĽĬ çļĦ +èĥ½ ä»İ +åĩº å±Ģ +éĥ½æĺ¯ ä¸Ģ个 +æĤ¬ 念 +ä¸Ĭ ä¸Ģ次 +ĠTh irty +Ġtoler ated +, 强åĮĸ +Ġul cer +éĢĢ ç¼© +' }, +( inter +} a +ĠP ose +ć ãĢĤ +ĠCh o +表 å±Ĥ +Ġcal iber +ï¼Įä¸Ķ åľ¨ +DO I +iqu ette +Ġprob ate +ĠParent ing +Ġcaf es +æĻ ¤ +Ġj j +Ġblue print +Ġmotiv ational +ou ve +å±± 寨 +,åĨį åĬłä¸Ĭ +Ġknock out +est or +Ġqu ar +kit chen +ä¸Ģ ä¼Ĺ +Ġdisc ord +Ġpharmac ies +ot to +åħ± éĿĴ +渤 æµ· +ĠA j +oul os +ĠGL int +Ġoverd ose +ĠS PR +Ġpres ume +_ EL +ĠCheck ing +_ATTR IBUTE +èłķ åĬ¨ +温 å·® +Ġview port +æ¶µçĽĸ äºĨ +æ²ī浸 åľ¨ +avad oc +äºĨä¸Ģ çīĩ +ä¸İæīĢè¿° 第äºĮ +èĤĨ æĦı +NAS A +ĠSQL Exception +Ġcapt ive +Ġaug mentation +Ġfli pping +ï¼Į åľŁ +ĠVac uum +s ix +ä¸Ŀ çļĦ +Ġadvis ers +Ġ ÙĨ +äºİ ä¸Ģä½ĵ +该 æŃ» +ï¼Įä½ł åºĶ该 +Ġtot als +Ġmast ered +at os +ï¼Į ä»° +Ġf used +ĠCorn wall +amac are +åŁºéĩij份é¢Ŀ æĮģæľī人 +ĠN VIDIA +åΰ æľŁçļĦ +æĬ¥ æĸĩ +Ġassert ing +.w arning +å°ij è§ģ +Start Time +.re al +åĪĽéĢł åĩº +ĠWhe eler +ãĢģ åºĶç͍ +ç½Ĺ 伯çī¹ +æĩĤ äºĭ +ĠB ax +建çŃij çļĦ +Ġserial ization +B attery +ä¸į åħ·å¤ĩ +åIJ¸ æ°´ +ĠT ropical +act us +å¸Ĥåľº èIJ¥éĶĢ +æĺ¾å¾® éķľ +G iving +Ġunder m +Ġhard core +Ġall a +Ġper missible +. Connection +为 æł¸å¿ĥçļĦ +ç¾İ åĮĸ +红 çģ¯ +om ens +å·´ æĭī +ç¾İ å¦Ļ +ï¼Į ç´ł +· æĸ¯ +-in stall +ĠEd itors +飩 ç¡ķ +ä½ľåĵģ ä¸Ń +éħįç½® æĸĩä»¶ +Ġreject ing +c asters +ĠS ke +man age +åĮħ åİ¢ +[ num +Ġ 个人 +åıĪ æľīä»Ģä¹Ī +éĺ¶æ®µ æĢ§ +_ Name +Ġcyl inders +çĭ¡ çĮ¾ +ø r +åĮĸåѦ åıįåºĶ +å¼Ĺ æľĹ +/ cl +ä¸Ĭ å½ĵ +åħ¬åħģä»·å̼ åıĺåĬ¨ +Ġ ಠ+mg p +, 羣æĺ¯ +Link edin +_ imp +Ġby e +ĠFern ando +车 çªĹ +ĠCor b +ĠVer ification +- admin +Ġex cludes +ow ell +ĠO CT +ãĢĤ åı¯èĥ½ +ĠInst itutions +Ġmarvel ous +") [ +.e lements +é£ŀ æĿ¥ +ภ´ +横 æī« +em aker +è¿Ļ åĩłå¹´ +ĠAT TR +çļĦ ä»İ +Ġsou ven +Ġsc av +ï¼Įè¿Ļ 两个 +çŃī ä¼ĺçĤ¹ +d ependent +æľĢ å°ıçļĦ +-t ier +æľīéĻIJ åIJĪä¼Ļ +w omen +åľ¨çº¿ åĴ¨è¯¢ +ï¼Į è¾ĵåĩº +reat or +Ġattend ant +大 æĪIJ +å¹ķ å¢Ļ +ĠFair y +çĢ Ľ +ĠR AW +ï¼Įä½Ĩæĺ¯ çİ°åľ¨ +_W E +( rs +_ AX +Ġve gas +.g ui +ĠOut comes +森æŀĹ åħ¬åĽŃ +ĠLe igh +ä¸İ ä»ĸ们 +App ellant +ĠRab bi +èıł èIJĿ +Ġh yster +èĢģ ä¸ī +æľºæŀĦ åĮħæĭ¬ +ĠPref erred +ĠH its +.e u +礼 åĮħ +çļĦ 综åIJĪ +cal ed +对 她çļĦ +Ġrhe umat +ï¼Į 女æĢ§ +åıĸ äºĨ +,å°± ç®Ĺ +. alpha +at l +Ġn inety +Ġsh uff +éľĢæ±Ĥ åĴĮ +> @ +Ġfam ed +å±Ī æľį +inherit Doc +ï¼Į åıįåºĶ +-t rivial +C offee +P W +k as +ĠN il +æŁ¥ çIJĨ +çħ§ æĸĻ +çļĦå½±åĵį åĬĽ +Ġacceler ator +ĠM am +ä¸Ģ 楼 +add Class +åĵĪ å¸Į +èµ¶ å¾Ģ +ĠVi agra +åĨħ èĦı +com position +表达 æĸ¹å¼ı +Engine ering +ĠLANG UAGE +Ġcompetit iveness +Ġth umbs +ĠBe am +æŃ£ åIJij +å¾® å¼± +ãĢģ èģĮä¸ļ +ç»ı åĬŀ +ĠH erman +< body +re th +Color ado +ĠEl on +$ m +ç͍ å·¥ +class method +ĠDef initions +ï¼Į åı°æ¹¾ +åľ¨ äºĨä¸Ģèµ· +æĥł æ°ij +/l ibrary +çĥĺ çĦĻ +acc ording +h over +Ġun matched +ĠT ara +çªģåıij äºĭä»¶ +HE MA +ç͍ è¿ĩ +Ġcomm end +åįİå¾ĭç½ij å°ıç¼ĸ +d imensional +ãĢģ éĵ¶è¡Į +åį± æĢ¥ +ĠMalays ian +æŀģ éĢŁ +ground s +ï¼Įåīį éĿ¢ +G arden +un er +以 太 +_p ixel +lex er +é»ijè¡£ 人 +ĠNGO s +ĠS X +Ġconstitu ent +çļĦ é»Ħ +_f p +ig gered +æĺ¯ ä¸įåı¯ +è§īå¾Ĺ è¿Ļ +Ġp ci +ĠP une +ãĢģ æĶ¿çŃĸ +Rem ark +Perm alink +Ġh ires +Ġli bert +æķħ çŃĶæ¡Ī为 +èĩªå·± åĸľæ¬¢ +åĪ· çīĻ +M BA +Ġt ally +é¥®é£Ł ä¹łæĥ¯ +ç»Łè®¡ å±Ģ +åIJĮæĦı äºĨ +Ġpolicym akers +Ġ äºĭå®ŀä¸Ĭ +Ġfl akes +ĠWilliam son +Ġchim ney +ro pping +ĠAss urance +techn ology +åı¬å¼Ģ çļĦ +åħ¨ 社ä¼ļ +erm int +Ġdeb ated +_ ANY +Ġ åIJķ +ĠL ah +ara oke +åĩºçīĪ社 åĩºçīĪ +ï¼Į éŃĶ +ĠP ens +ĠSp ice +-in st +è¿Ŀæ³ķ çĬ¯ç½ª +æĸ° æĺ¥ +è´§ 款 +çļĦæ°´ æŀľ +éħįå¥Ĺ 设æĸ½ +oh a +ãĢĤ åĪ©ç͍ +Ġfam ously +ĠFrank furt +ĠIm mediately +è§Ĵ度 æĿ¥çľĭ +Ġover haul +Ġt bl +Ġver ifying +è§ĦèĮĥæĢ§ æĸĩä»¶ +err no +ç¡® ä¿¡ +Ġparent heses +synt ax +Ġv im +åħ¨ åĨĽ +\ : +aw an +{- # +li que +æīİ æł¹ +Ġtread mill +羣 åģĩ +Ġaut obi +èĥĮ 诵 +.' " +_sp in +âĢľ é»ij +ĠAl a +play ing +Ġincre ments +èĩª æĪIJ +ĠSen ators +å¹¿æ³Ľ åºĶç͍ +ĠPsych ological +Com parator +çĺ ª +âĢľ åıĤèĢĥåĨħ容 +å°Ĩ éĤ£ +assert Same +æ±Ł åİ¿ +Ġinstrument ation +Ġf idelity +ow ment +åĴĮ åĽ½å®¶ +.T oken +ace ous +ĠBelf ast +Sc ot +ĠFacebook Share +ĠX i +ĠAv engers +ĠY uan +ĠRock s +ĠKit ty +Ġbed time +ĠQuant ity +çĤ¹ 亮 +-T V +ãĢĤ 欢è¿İ +ä¸į éĻIJ +课 æĹ¶ +çĸ«èĭĹ æİ¥ç§į +Ġpl ank +æĻ´ 天 +Ġconst ructions +æľª 被 +京 æ´¥ +raft ed +åľ¨ ä½łçļĦ +åı¯ä»¥ åĪ©ç͍ +æĢ ¼ +以为 çĦ¶ +å°±ä¸į åĨį +Ġsock ets +åľ£è¯ŀ èĬĤ +wh atever +ãĢĤä¸Ģ åĪĩ +. change +K h +ĠT ribe +)= ( +Ġmem e +K ay +DM ETHOD +D ur +un ami +æ¶ İ +Ġliber ation +çĹ ŀ +}{ {\ +è¿Ļä¹Ī ä¸Ģ说 +ä½³ 人 +èĻļ 伪 +COD ER +Ġsc ooter +éĢī å®ļ +bb ox +ĠW and +Ġammon ia +or ange +çݰéĩij çŃīä»· +H arm +_p oly +ï¼ĮæĪij们 ä¹Ł +à§ ĩ +溢 åĩº +ĉĉĉĉĉĉĉĉ ĉĉĉĉ +r q +NS Number +b ike +Ġb red +ï¼ĮæĪij们 åºĶ该 +Ġgen ocide +è´´ çĿĢ +.pro vider +Ġun beat +æľŁ 为 +B ullet +O nt +ĠI PT +稳 稳 +Ġwr ath +Ġepoch s +Ġch ords +çļĦ èĥĮå½± +æĿİ çϽ +Ġcounter tops +åĩŃ çĿĢ +å¥ij 丹 +_ : +_se lection +éľ¹ éĽ³ +ĠL ets +æį¢ ä¸Ĭ +Ġdrop out +ĠHam as +R ick +åı¯ åIJ¦ +Ġè¿Ļ æĹ¶åĢĻ +æľĢç»Ī çļĦ +_ platform +Ġf rying +è¿Ī åħĭå°Ķ +ĠP J +ĠSnap chat +åľ¨ åįĬ +"> [ +f ade +èĢĮ ç«ĭ +é¢ł åĢĴ +Ġc aves +Ġv c +ĠRom antic +ĠAugust ine +Ġwe e +/ ****************************************************************************** +m nt +ï¼Į ç§»åĬ¨ +çİ°åľ¨ å°± +Ġther mostat +~ âĢĿ +éĢı å½» +Ġmanufact ures +Tim eline += utf +Ġgloss y +Ġdamp ing +åIJ» åIJĪ +z ema +Ġ 注åĨĮ +reach able +Tele phone +ä¿ Ł +h orse +èĭ± å°º +_CONT ENT +ãĢĤ é£İ +Ġha w +æīį å¼Ģå§ĭ +Ġд лÑı +Ġ æ±ī +Ġf g +ac ro +ĠRem odel +T em +åľ¨ 人 +å½ĵ éĿ¢ +.c at +=" $( +& =\ +å®ĺ çļĦ +æ¶Į åħ¥ +Ġintra venous +rat io +çļĦä½ľç͍ ä¸ĭ +OR A +i ants +äºĨ 大éĩı +_r untime +ä¸Ĭ å±± +åĩº æ±Ĺ +Ip v +çŃī å½¢å¼ı +åĩī çν +è´ŃæĪ¿ èĢħ +å¸ĥ çļĦ +ï¼Įåį´ ä¹Ł +äºĭ åıij +åĩł æĿ¡ +Ġdep ict +ï¼Įä¸Ģå®ļ ä¼ļ +åįģä¸ĥ æĿ¡ +- team +ï¼Įä¹Ł åľ¨ +ocl onal +éļ¾ åħ³ +Ġmount s +; ' +f ew +驾驶 人 +_back ground +æĿIJæĸĻ åĴĮ +æĪij们 å·²ç»ı +Cle arly +çļĦ主 åĬĽ +ç¿© ç¿© +èĢĮæĺĵ 举 +ĠH uge +强 硬 +< ( +缸 æ¯Ķè¾ĥ +an ey +ï¼Į åĪĽ +Âł and +æĹł åģ¿ +ä¿¡æģ¯ è¿Ľè¡Į +éĴ± äºĨ +ĠInf inite +ĠR ational +缸 交 +ĠCon clusions +Ġcher ish +poss ibly +Ġp onder +对æĪij æĿ¥è¯´ +ภª +Ġrot ations +èĬ¬ èĬ³ +责任 å¿ĥ +ĠApp et +Ġf action +Eng land +.google apis +è¯ķ 管 +è¸Ĭ è·ĥ +l ause +IG GER +R ab +ig or +Ġnumer ator +èIJ¨ æĭī +åѤ ç«ĭ +ĠDir ac +ï¼ģ èĢĮä¸Ķ +Ġhero ic +Ġalk aline +Ġprim a +å¿«éĢŁ å¢ŀéķ¿ +æľīæľº çī© +éĹ¯ åħ¥ +C W +ĠT ricks +ĠNow adays +æĿ¥åΰ è¿ĻéĩĮ +v ip +ï¼ĮæĪij 便 +ĠBack pack +æ£ £ +T ARGET +ĠH og +亲 åIJ» +ĠÂłĠÂł ĠÂłĠÂł +æ¯ħ çĦ¶ +Ġl atch +Ġ( âĪĴ +ĠHaw ks +ac in +ps um +éĥ½æĺ¯ çͱ +ĠHer b +v ik +Ġfin er +è¾½ 举 +, éĥ¨åĪĨ +: after +å¥ĩ æīį +ĠCo operation +å®ī举 å°¼ +æĶ¾åľ¨ å¿ĥä¸Ĭ +ĠIn quiry +. byte +æľĢä½³ çļĦ +ld b +_M AT +every thing +æ©ĺ åŃIJ +() }, +au ff +Ext ent +éĺ³ æ°Ķ +Ġprosper ous +审 å®ļ +çĹħ èıĮ +çĩķ çªĿ +ĠGram my +x m +羣 æľī +Ġant idepress +ï¼ĮèϽ 说 +B one +è¿ĩ ä½ł +æµģåĬ¨ èµĦéĩij +/ plugins +代表 äºĨ +EP ROM +/ io +ĠPot ato +çļĦ æľĢé«ĺ +Ġm ama +Ġdom inating +è¦ģ 害 +Ġlam inate +ĠD SP +è£ħ åᏠ+Ġpurch asers +ов ан +Ġa zure +éĻIJ é¢Ŀ +oh an +Ġerect ed +ï¼Į æĪIJäºĨ +urn iture +Document ation +ĠSteel ers +Ġregular ity +ä¸ĭåİ» çļĦ +. constant +ĠMath s +Ġd ew +éĹŃ çݯ +ĠW ax +Ġorgan ise +inn acle +ãĢĤ å¼Ģ +_B UF +Ġpers isted +â Ł +it ory +æĺ¨ å¤ľ +NAS DAQ +ãĢģ 人åijĺ +Ġall ied +注 è§£ +ĠARR AY +M it +ĠClick funnels +èĥ¡ ä¹± +"} ]( +å¦Ħ æĥ³ +Ġnarc iss +æĹł 踪 +çĺĻ çĹĴ +ãĢĤ æĺ¾çĦ¶ +Token izer +ĠL ords +å¯Ĩå°ģ åľĪ +D LL +åĩºä¸Ģ éģĵ +æŁĵèī² ä½ĵ +ĠDis patch +ï¼Į 失 +Ġrespond ers +\ eta +oun s +Ġ 管çIJĨ +ãĢģ èĤī +ç²Ĺ æļ´ +. am +äºĶ åIJį +åī¯ å¸Ĥéķ¿ +( static +j p +Ġfin itely +æĺ¯ 没 +lev ation +ç»´çĶŁç´ł C +âĢľ ä»Ģä¹Ī +èĩªå·±çļĦ æĥ³æ³ķ +-e ight +åıijçĶŁ æĹ¶ +ĠLev y +ĠSp ir +um pt +=" \ +èµĶ ä»ĺ +Ġanaly zes +è½° çĦ¶ +ĠA ircraft +æĬĬ å®ĥ们 +(c all +´ ÃIJ +çĤ ľ +Ġpanc akes +亿 ä¸ĩ +乡 亲 +Draw er +åĴĮ åij¨ +æŃ¤æĹ¶ çļĦ +Ġsn ork +åIJĬ è£ħ +èĩª 驾 +æĴĴ è°İ +çļĦ身 躯 +ĠEss entially +ak ov +-back ground +urs ors +è´¢æĶ¿ å±Ģ +ĠPers pectives +U IColor +acc um +éħĿ éħ¿ +Ġwe ary +ï¼Į以 èĩ´ +ãĢĤå¦Ĥ æľī +æĸ¹ä¾¿ çļĦ +åĭ¤ åĬ³ +èĥ½ è¾¾åΰ +Ġab uses +ĠF emin +åĬŀåħ¬å®¤ 主任 +åħ¨èµĦ åŃIJåħ¬åı¸ +åİĨ ä»» +pub lish +æīĢ ä½¿ç͍çļĦ +.add All +A cknowled +h oe +_h ook +Incre ment +Ġre combination +åIJį å°Ĩ +Ġbook let +Ġfur ious +Ġ{ č +ĠApprox imately +ï¼Į 次 +Ġsock addr +- par +F itness +éĥij å·ŀå¸Ĥ +p ots +åıijæĺİ ä¸ĵåĪ© +æĹ¥ è¶ĭ +ï¼Įè¿ĺ ä¸įå¦Ĥ +Spe ech +J a +Ġ éľį +Ġ à° +ç¥ IJ +京 åī§ +ï¼Į åĹ¯ +åĬł æĿĥ +_ void +Ġt urtles +ĠT ay +ĠPer kins +Ġä¸İ æŃ¤åIJĮæĹ¶ +-d el +ãĢĤ è¿ĩåİ» +æľī æĪIJ +Rec ording +ä¸į ä¿Ĺ +ĠW an +Âł the +åıij表 äºİ +åħ¬åı¸ ä¸İ +Ġmisunder stand +çīĩ åŃIJ +éĤ£ä¹Ī å¤ļçļĦ +æº ´ +Altern atively +ï¼Į èĮ¶ +.H ost +æĭį çļĦ +_S K +ĠF rozen +以 åĮĹ +è¯Ŀ äºĨ +f illed +éĿ¢ç§¯ 为 +èļĬ åŃIJ +åĪĨ åıij +ĠWork flow +Ġprerequ isite +ãĢģ éĩijå±ŀ +Se an +ĠSign ificant +IN O +(p art +@ c +æĺ¯ åĵª +è¿Ļ é¦ĸæŃĮ +ï¼Įå½ĵ ä»ĸ +鸡 汤 +AD IO +çĿ¡ å¾Ĺ +çĭĤ é£İ +Ġpl aster +å®¶ çļĦ人 +ml in +Ġlegit imacy +é²ľ ç¾İ +Ġpair wise +åı¯ è¡ĮçļĦ +ãĢģ ç͵åĬĽ +身ä¸Ĭ ä¸ĭ +å·´ åħĭ +è¡£ çĿĢ +ag us +Ġsh attered +æĭ¥ æĬ¤ +ĠRod gers +t oc +Ġd rap +-d omain +çļ± èµ· +è¿ĺæĺ¯ æĮº +åģľ ç͵ +æĤ² çĹĽ +å®¶ æľī +Ġde co +Ġdev oid +Ġcorrect ing +_fl ush +çľĭ çĤ¹ +Ġcle avage +å¼ł 大 +ä¸Ģ ç±³ +Ġprot ector +.n ative +å°±ç®Ĺ äºĨ +ãĢĤ ç¬¬åĽĽ +IC U +ĠLa unches +å¦Ĩ 容 +Ġa rous +æ¸ħ çļĦ +.P K +éĽª çļĦ +F IX +å¤ĸ å£ģ +ä¿¡ å¾Ĵ +asc ar +ĠDVD s +ç»Ļ å°ı +ï¼Įä¸Ģ çīĩ +èĥĨ å°ı +åIJĮä¸Ģ æİ§åζ +Ġrecip roc +è®® äºĭ +Ġaccret ion +Ġsc ipy +arc in +ä¸İ æľįåĬ¡ +Ġh ier +Ġlect urer +-f inal +èµĦ产 éĩįç»Ħ +Ġdivers ified +æĬĬ éĴ± +èĪª æµ· +âĢĿ ç§°åı· +Ġbus hes +ĠNAS CAR +ï¼Į æ¿Ģåıij +Ġp ouch +åij ¸ +åŁº åľ°çļĦ +ãĢģ åıį +åı¯èĥ½ä¼ļ 导èĩ´ +. å°Ĩ +Ġbab ys +- request +Ġgr ated +ä¸Ģ è§Ī +âĢĻ ) +è¯ģ çļĦ +çªģçł´ äºĨ +Prof iles +, å¹³æĹ¶ +d ash +ĠAthlet ics +[ { +ç¼ĸ æİĴ +çļĦæĥħ æĻ¯ +èĩª 强 +× ¢ +ĠST OP +ç¥ĸ åĽ½çļĦ +cal es +ãĢĤ该 æĿij +ç͍ çļĦæĺ¯ +Ġx p +èĥ½ æī¾åΰ +ĠD ess +per ate +å¼Ģ 端 +Ġcowork ers +_ ec +夸 å¥ĸ +, å½±åĵį +æĬķ å°Ħ +è´© åįĸ +Ġprophe ts +Ġ âĢĿï¼Į +ï¼Į çĬ¹ +æ¯ ¡ +é¸ ¯ +代çłģ çĶŁæĪIJ +åıijåĬ¨ æľºçļĦ +Div ision +ĠLis bon +-w ell +åŃķ æľŁ +ĠClean er +çļ ¿ +ĠE MS +ï¼Įä½Ĩ åħ¶ +Ġimprison ed +éĢļ 天 +æ´Ĺ å¹²åĩĢ +m pl +ä¸įæĸŃ æıIJåįĩ +ãĢĤ 常 +th ren +are na +åľ¨ è·¯ä¸Ĭ +reat ure +_W idget +(h w +Aut owired +.int eger +èħĶ åĨħ +ç¨Ģ 缺 +J en +ç ¸ +ĠT aj +.c or +_S OCK +.D o +Ġabrupt ly +ier re +AS Y +å¿ł å¿ĥ +ĠP W +è§ī æĤŁ +"} ), +^ b +Ġr aster +表åĨ³ ç»ĵæŀľ +g pu +çļĩ åĨł +Ġhyp not +Ġ é¦Ļ +ï¼ĮæīĢ以 说 +ĠCh ow +ĠâĤ ¹ +{ prop +ru pal +åIJij 西 +Âł çİĭ +ecess arily +T on +(f s +Ġforg iven +Ġpel vic +oph obia +æī¶ æīĭ +æįĨ ç»ij +B ird +_ In +ä¸Ģ åij³ +纸 ç®± +Ġpsy ched +åıĮ å±Ĥ +Ġauthent icate +ä»» çͱ +, re +Illust ration +et itions +Ġemb assy +Ġveterin arian +åķ ¶ +çľ¼ èĬ± +SE TT +E ight +羣 åĪĩ +书 ä¸Ĭ +设计 æĸ¹æ¡Ī +èĽĭ é»Ħ +Ġbe acon +ç§ij 举 +Ġbiom edical +Ġnostalg ia +大 æĢĴ +Ġpay outs +ĠMet als +åįĵ æĹŃ +çļĦ æĥħæĦŁ +Ġun secured +ĠAr th +éͦ 绣 +unct ive +ĠP redict +ä¹Įé²ģ æľ¨é½IJ +( ms +_M E +Ġwitness ing +Ġfront s +ĠB earing +æĺŁ ç³» +S orted +i ab +ĠP ension +\\ \ +Ġdraft s +å¿ ¿ +ç¡®å®ļ 为 +ĠPed iatric +(" ../ +æ±Ł å·Ŀ +um ina +ë ŀ +ï¼Į æŁĶ +.h idden +ãĢĤè¿Ļ å°± +ĠExt ensions +ig rations +Ġsc all +ĠK ub +é«ĺ æĺĤ +æĢ» æĪIJ +ãĢĤä¸Ģ æĺ¯ +çϾ ä½Ļ +( connection +h air +çĤ¹ æ»´ +ask ing +å¹¶ èģĶ +.t ensor +ä¸ĢæŃ¥ ä¸ĢæŃ¥ +Det ect +çĽij å±Ģ +æ¥ļ æĪĪ +åª ² +çĶµè·¯ çļĦ +å·¡ å¯Ł +åģľ æľº +ser de +éĻĨ å°ı +离å¼Ģ è¿ĻéĩĮ +æĸ Ľ +Config s +ĠM IDI +[ # +an ine +åĬ¨ ä¸įåĬ¨ +åİĭ åľ¨ +sql ite +pos als +ç¨Ģ æľī +éħ¿ éĢł +: end +AB B +.st ats +è¾ĵåħ¥ çļĦ +åĩĿ è§Ĩ +x ing +ct ed +å¨ ĵ +èģļ é¤IJ +å½¢æĪIJ ä¸Ģ个 +ĠT ate +Ġsuit ability +åģ¥èº« æĪ¿ +ĠAdm iral +{ The +ĠIs abel +ç¨İåĬ¡ æĢ»å±Ģ +integ ration +le ter +rep air +Non User +ĠC annon +- format +Ġprogress ing +ï¼Į åħĪçĶŁ +çݰ éĺ¶æ®µ +ered ith +{min ipage +pl ac +vis itor +Sur v +çŃĶ çĸij +ÙĬ Ø© ++-+- +-+- +Index ed +zz o +Ġre lic +Ġform ations +è®°å¿Ĩ åĬĽ +æ¯ħ åĬĽ +J ordan +flow ers +orph ic +æĺ¯ åIJ§ +_s oc +ï¼Įæľī å¾Īå¤ļ +Ġdop amine +å¦Ĥ æĦ¿ +Ġdownload able +ist o +ru gu +FT A +Ġt ul +_S ervice +och rom +Ġcyt os +Ġslog an +" s +K D +ç²¾ç¥ŀ åĬĽ +byter ian +ĠAn chor +åĹ¡ åĹ¡ +说 äºĨä¸Ģåı¥ +ï¼Įè¿ĺæľī ä¸Ģ个 +è̽ æIJģ +å°¾ 声 +Ġembod ied +M ovies +è¦ģ åΰ +éĤ º +up y +è½® åΰ +ï¼Į åIJĮåѦ们 +å°ı ä¹IJ +å¿ĥ èĻļ +Ġali ases +Ġshrink ing +Ġ åľ£ +ĠU DP +æŁIJ ä¸Ģ个 +Ġ áĥ +ĠC oding +èĨľ çĤİ +F ully +Ġsl ips +Ġexist ential +妻 åŃIJçļĦ +è· · +åĩº å¾ģ +Ġresent ment +Sc r +_ align +Ġmod ulo +ï¼Įä¸Ģ 声 +d ns +ĠS aturdays +ĠGive away +.t race +.trans late +Ġput ative +æĬ± èµ· +建 åĨĽ +åĥı æĪij +è§Ĩè§ī æķĪæŀľ +Ġoccup ations +D ial +al ready +Ġkn ob +T abs +ST S +ĠDar ren +å¤ļ åĬł +è¿Ľ 宫 +ä¹³ æ¶² +ĠPres ence +ãĢĤ å¾· +èĻļ å¹» +ĠCirc ular +ĠActiv ation +ĠBegin ners +ï¼Į è§Ĩ +Ġwe aving +æİ§èĤ¡ åŃIJåħ¬åı¸ +" H +m eter +ãĢģ æ°¸ +Ġarm ies +éŨåı£ çļĦ +Ġdownt urn +èĦ¸ 红 +( link +å½ĵåľ° æĹ¶éĹ´ +Ġ æīĭ +ä¸į ç§» +许 éģĵé¢ľ +夫 çļĦ +ç»ĵæŀĦ æĢ§ +åı¯ å°Ĩ +æĥĬ æĦķ +âĢĿ è¿Ľè¡Į +Ġorig inate +åºĶç͍ ä¸Ń +ĠAli en +ĠB rom +Ġdisp ens +表 å¾ģ +åιéĤ£ éĹ´ +湿 çĸ¹ +fin ish +Ġ ire +åįİ çļĦ +Sp ain +ĠColon ial +ĠB alk +人 åİ» +åĩº 游 +_H andle +辩 è§£ +:b logger +ãĢĤèĭ¥ æĺ¯ +# endregion +}) = +P iece +âĢľ So +Ġpet itions +ĠG ian +Ġunf inished +æī¿ éĩį +gres ql +æĺ¥ 天çļĦ +Ġillum inated +-A meric +Ġp uck +æ¶ ĵ +_p ayload +Ġch inese +Ġdiff ered +ars ity +. UN +æĿİ æµ© +Ġm ash +Ġcapital ize +ĠScot ia +ge ons +Ġcre ams +angu ard +Ġch iral +Ġpir ate +Ġ æ¢ħ +ãĢģ åı¶ +Ġdel ve +ĠMS M +ĠSouth ampton +ĠB are +l ut +start ing +.n il +_L AST +,å¦Ĥæŀľ ä½ł +Ġ éĴ± +ãĢģ ä½ł +Ġr ng +åĴĮ å¤ĦçIJĨ +Use ful +ĠEmploy ers +æ¼ ī +顺 é£İ +Ġheart y +å¾® è§Ĥ +Ġdiscipl ined +Ġsp it +å°ı åĵ¥ +Ġm Ã¥ +Ġfl ora +ç¼ĺ çͱ +n ar +Ġ å·²ç»ı +W ake +Ġe bay +ä¸Ģ æĹłæīĢ +ãĢģ åĶIJ +ï¼Ľ ä»İ +Ġfung us +Ñ ķ +çļĦ éĻIJåζ +Ġinnov ate +æīĵ æ³ķ +ioc re +çļĦ 空æ°Ķ +, å°½éĩı +/ ', +Ġo mission +and les +æĹł éĩı +tag Helper +Ġauthent icated +æıīäºĨ æıī +åħ¬ éģĵ +ï¼ļ æĶ¯æĮģ +ĠS ere +æľĹ 诵 +éģį äºĨ +è·Łä½ł 说 +п ÑĢ +ï¼Į åİŁåĽł +,ä¸į ç͍ +å̼ åĴĮ +æľī ä¸ī个 +åĮĸ åIJĪ +åĶ® åįĸ +ĠLink edin +âĿ ¤ +æĿ¥ åΤæĸŃ +ĠString s +charg ed +å¤į åİŁ +æĸĩæĺİ å»ºè®¾ +æĶ¾ 宽 +è¿ĺæľī çĤ¹ +Ġun st +åıĤåĬł ä¼ļè®® +Ġscientific ally +æ¯Ķäºļ 迪 +? \ +Ġtest Get +èİ« éĿŀ +å® ķ +ĠLy rics +è·ij 车 +ĠU W +ang ling +Fac ulty +ë ł +ĠH OT +å®Ī æ³ķ +缮æłĩ åĴĮ +æĨ Ķ +ĠNa ples +it ates +éĹ® åΰ +ĠLux embourg +_F IRST +se in +co ef +ĠMar ijuana +, éĤ£ä¸ª +两 ä¸ī +æĪij们 ä¹Ł +Ġmit igating +Ġsaf ari +红 èĮ¶ +Ġactiv ates +ĠQuarter ly +>{ @ +Ġn ur +Ġcol orectal +ĠM ai +.c allback +( rt +è´£ æĢª +å®ŀç͍ çļĦ +. one +T emporary +Ġ æķħ +çİĽ ç´¢ +O cean +çĶŁäº§ å·¥èīº +h aven +ï¼ĮéĿ¢ èī² +åį« åģ¥ +åħ¨ 线 +åĬłæ²¹ ç«Ļ +缸 éĢļ +le c +ç§Ĵ æĿĢ +ĠJ ensen +å¹³ æģ¯ +Ġpal p +ç§įæ¤į ä¸ļ +ĠK iller +ĠEx isting +èµŀ èªī +们 ä¹Ł +Ġsummar izes +re ly +ï¼Į æĭľ +ĠR im +lic es +Ġling ering +Ġv eto +b eg +Ġg pu +æµ´ 缸 +æĺ¯ æĢİæł·çļĦ +æĪij åı¯ +åĮħ çļĦ +çģµ çŁ³ +梨 èĬ± +æ·ĭæ¼ĵ å°½ +没æľī 说è¯Ŀ +æĸ° æľĪ +纸 å·¾ +ĠSTR ING +ãĢģ é¦Ļ港 +Rec overy +æĬ¥åijĬ 书 +åįĥ æĦģ +è¿Ł åΰ +Con versation +uch s +Ġiniti ating +ur us +æľĢ éĩįè¦ģçļĦæĺ¯ +æĸ¯ åħĭ +-c ourse +ãĢĤä»İ æŃ¤ +/ "> +ĠM oto +éĢģ åħ¥ +æ¸ħ æ¸ħæ¥ļæ¥ļ +_W ARNING +å¤ļ éķ¿æĹ¶éĹ´ +ç¥ŀ 殿 +çͰ éĹ´ +ï¼Į请 éĹ® +^ c +è° ¥ +-conf idence +/ img +为 åķ¥ +è·¯ ä¸ĬçļĦ +å¤ļ è¿ľ +ä l +çł´ éŨ +ĠJ ail +ï¼Į èĴĻ +æĪij åį´ +Ġwar p +ĠMar in +Class ifier +ãĢģ åĽ¾çīĩ +rem aining +Ġ æ´»åĬ¨ +.d ump +const ants +ï¼Įèĥ½ ä¸įèĥ½ +ĠMid night +-b etween +ä¸Ģ ç§Ĵ +æĬµ æİ¥ +Ð ľ +åĻ ¶ +éĽĨ ä¸ŃçļĦ +ο λ +äºļ å½ĵ +鸣 åĦ¿ +æµĭ 温 +Ġbat htub +碰 ä¸Ĭ +Ġstim ulates +åĨ¶ çĤ¼ +大 åIJ¼ +éĢĢ è´§ +( process +, åıį +M ental +ĠD ancing +ç½ij åºĹ +W ine +ĠMar cel +ĠOrd ered +Hard ware +F raction +ar ction +SP ACE +Ġmisunder standing +ab o +çĶŁ çģµ +Ġaudit ory +Ġmask ing +.cpp reference +Ġprofound ly +ĠW orship +ĠDis abled +æ²ī çĿĢ +ï¼Įç»Ļ 人 +ĠP FN +ä¸Ĭ èµĽåŃ£ +Ġfl ask +丢 å¼ĥ +Ġcloud y +/ dis +ĠJ PEG +ĠInter val +ĠDef ender +æłij å¹² +æĵ¦ äºĨ +Ġtight en +ä¹Ķ 丹 +å¤ļ 头 +Ġtime zone +夫 åŃIJ +å°ĺ åľŁ +Pop ulation +rocy tes +ï¼Į åĨµä¸Ķ +ãĢ ĸ +itt al +åħ¶ä»ĸ 综åIJοͶçĽĬ +稳 éĩį +Ġè¿Ļ个 æĹ¶åĢĻ +Ġsurre al +os al +çľ¼ å¸ĺ +ĠCor ollary +ĠMaj esty +Ġ å¸Ĥ +ï¼ Ĭ +Ġo zone +Ġper i +Ġmatch up +éĴ» è¿Ľ +ĠN ail +Âł å°ı +. ec +Ġcongrat ulations +D ocker +_F IX +Ġcontempl ating +ĠM SP +ç»ı 绾 +il ience +å®¶ åľ¨ +Lead er +å¢ĵ åľ° +S UP +æĻ¯ æ°Ķ +IM O +Ġä¸į éĢĤç͍ +æģ Ļ +ç»Ļ å®ĥ +æĿĤ èįī +èĥ½ åģļåΰ +Ġhom ology +ãĢģ è¡Į +æĹ¥ æŃ£å¼ı +ĠLy on +Ġn pm +æľ¨ è̳ +æĮ¡ ä½ıäºĨ +èĥ½ ä¸İ +ĠSav age +< Type +pp i +Ġcontrast s +è¿Ļ åı¥ +åħī 头 +Ġembarrass ment +交 åĵį +Ġrad iant +Ġhone ymoon +\ partial +çļĦ ç̧ +è¾ħ æĸĻ +ham mer +éģį åľ° +æ·»åĬł åΰ +. ones +ï¼Į æĻļ +Ġsimpl ifies +Ġcommut ative +.Back ground +A ustralian +vel yn +Ġbl k +emon str +ĠPut ting +ĠB orough +ield ing +女 è¶³ +ãĥĥ ãĥĪ +çĥŃ çģ« +W s +é ¤ +ï¼Į éļĶ +ĠI k +Ġst alls +ĠZ hou +æıIJä¾Ľ çļĦä¸Ģç§į +, å¤ĸ +éĴ» åŃĶ +åīį ä»» +å¾® åķĨ +åı¯ä»¥ åģļ +.s ummary +_C LOCK +çļĦ 设置 +ĠL DL +^{ * +col our +åįĬ çIJĥ +åIJij æĪij们 +Ġhead ings +ĠEnh anced +Y G +he ed +ï¼Į ç͵è¯Ŀ +Ġo sm +æŃ£ å̼ +ĠSe ah +z w +ãĢĭ è§Ħå®ļ +ophage al +Im plicit +竹 æŀĹ +ap os +Ġsumm aries +Ġpolar ity +Ġsal ty +çĦ ¯ +æ©¡ çļ® +Ġdip ole +.init ial +å®ī å®ī +éĢģ åĩº +ĠCoun c +æ¯į 女 +Ġc uff +æĹ¥ 为 +Ġsk irts +è¿· 宫 +å°± ä¸Ģ缴 +ä¹Į äºij +ï¼Ł èĢĮä¸Ķ +Ġcoh orts +Ġ第åħŃ ç«ł +Ġp udding +ãĢģ æ¶Īéĺ² +ä¸Ĭ 岸 +Ġend omet +è¿Ŀæ³ķ è¿Ŀè§Ħ +éĻĨ åľ° +奥 å°Ķ +AND ARD +cess ions +Ġwidth s +å¾Ĺ çĽĬäºİ +ĠQu art +vere tt +ce phal +åĩł å¹´æĿ¥ +ç»Łæ²» èĢħ +空 èĻļ +Ġrul ers +ï¼Į 表çݰ +çĵľ åŃIJ +C itation +l ayers +Ġcort ic +ĠCollabor ative +ĠLex ington +-s uccess +ĠR ough +å°Ĩ æĮģç»Ń +- ag +Ġdown hill +ï¼Į éĺ³åħī +ä¿ IJ +éķ Ĥ +建 æľī +æ´» è¡Ģ +è¾¹ å½¢ +Ġmed iator +urd en +åĮ»åĬ¡ 人åijĺ +ä¸ĭ æĸĩ +çļĦä¸Ģ åijĺ +/p erson +è½° 鸣 +çļĦ å¾®ç¬ij +èµ Ĥ +ä¸Ģ次 次 +åĩĮ äºij +Ġmund ane +çļĦäºĭæĥħ äºĨ +沪 æ·± +æĽ² å¼ł +éĢł åıį +Ġdepend ents +èĩªå·±çļĦ åŃ©åŃIJ +Ġdevast ated +TH ON +ĠD oyle +ä¸Ģ çŃī +ãĢĤ åħ·æľī +æĺ¯ åĴĮ +Blog s +çļĦ身 åIJİ +èĢģ å®¶ä¼Ļ +åIJ¬ ä»ĸ +éĤ® 票 +p ure +ĠHun ger +Ġsew age +åłķ èIJ½ +æľīä¸Ģ èĤ¡ +ros ine +ä¸ĭ åıij +Reg ional +天çĦ¶ çļĦ +éĽį æŃ£ +ĠJun gle +Ġz ombies +çĽĺ æĹĭ +.get X +part icularly +ĠT ulsa +æľ¬ çİĭ +ï¼Įä½ł åħĪ +Ġpot tery +t ick +çļĦ åĽ½éĻħ +åĪĨ æĭħ +ï¼Įè¿Ļ äºĭ +_SPE ED +C b +æĪij ä¸įæĥ³ +ĠNot ify +re k +Ġlow ers +ĠAndre as +. He += yes +ax on +èĤ¥ çļĤ +ACT OR +Ġh t +Ġst ag +.t ex +lean or +Ġbott led +Ġ æİ¥ä¸ĭæĿ¥ +Ġdo omed +åĬł åĪĨ +æ»ij è½® +ï¼Į åģĩ +ĠS ung +ĠTe eth +aggreg ate +ĠâĨ ĵ +ãĢĤ é¾Ļ +Ġn atives +ub ber +igh bour +é³ Į +ĠPerson ality +ISS ING +/ std +j oice +çļĦ åĩłä¸ª +.C lick +åį« æµ´ +ĠJ M +, æĹ¥æľ¬ +ï¼Į åᢠ+ĠFl int +çļĦç¬ij æĦı +(Int Ptr +. Identity +_ ctl +f ed +comm ission +.B oolean +AM ILY +ãĥ ¥ +il ian +å®Ī ä½ı +çĶ³è¯· 表 +Ġcasual ties +ãĢĤ å±± +ĠH os +è¿İ åIJĪ +Ġmitochond ria +Sm ooth +_f ast +é¹ ¦ +ĠAss ess +Ġre but +åIJİçļĦ 代çłģ +Ġtrans istor +åİī害 äºĨ +ĠAdvoc ate +æ¯ĭ 庸 +Ġcaptiv ating +ï¼Į æĻ® +è°ĥæķ´ 为 +è®¤çľŁ åľ° +(f ull +ï¼Ľ 第ä¸ī +ä»Ģä¹Ī éĥ½æ²¡æľī +ãĢģ åĦ¿ç«¥ +éĩij 丹 +Ġsacrific ed +ĠW iring +Ġins ulated +Ġadapt ations +ĠHon estly +ï¼Į 段 +ĠV IDEO +ĠQ A +æ´Ĺ æ¼± +Ġturb ines +åĽ Ķ +管 æĿIJ +Und o +Ġdist ressed +ä¸į åĪ©äºİ +Ġk ettle +Ġun att +亲 æľĭ +n al +ï¼Į大 声 +纯 æĶ¶åħ¥ +Ġhar bour +, è¾¾åΰ +å®Ī çĿĢ +Ġexpos es +Ġ å¼Ģå§ĭ +Ġas ymmetry +Ġsacrific ing +_ upload +人 éģĵ +æķijæĬ¤ 车 +à ½ +ĠâĿ ¤ +ĠC md +å¥ Ħ +ç¾İ æĦŁ +èĢģæĿ¿ å¨ĺ +Med line +Ġgym n +åºŁ æĹ§ +âĢĿ æĿ¥ +Ġtool kit +æĺ¥ å¤ı +V en +å¼¹ èᝠ+è¿Ŀ 竳 +(j ob +H ay +Ġdirection al +Ġgly c +. That +bra him +ï¼Įå¦Ĥæŀľ æĤ¨ +< F +g rowth +ç»Ļ åĬĽ +èĥ°å²Ľ ç´ł +ĠG arn +sp ath +让 åĪ«äºº +Class Loader +çĨŁ çŁ¥ +Ġstake holder +Z N +Ġ ä½łçļĦ +ra ised +how ever +Ġery th +ãĢĤ åį³ä¾¿ +_ selector +ĠT ou +ï¼Ł è¿Ļ个 +, è®©ä½ł +ĠB IO +å®ŀæĸ½ çļĦ +Ġaer ospace +Z n +ç¼ ® +èµĽ åĮº +ä¾į 女 +ĠOB JECT +opor osis +Ġbit ing +ä¸įåıĺ çļĦ +Ġtr ays +ãĢĤ æ¯Ķ +亦 æľī +ĠGib bs +( Un +åĽ½ è¶³ +)) **(- +Ġ ä¸ĭéĿ¢ +åİ» æĥ³ +ä¸İä¼Ĺ ä¸įåIJĮ +Ġgl ued +ASC II +Ġn ie +yl abel +Ġrespons ibly +åħĦ éķ¿ +Wonder ful +m peg +Ġpr udent +AR GE +Ġanonym ity +ĠMo ist +Ġcart oons +G rab +x ref +ï¼Į åŃ©åŃIJ们 +ä¼ģä¸ļ æīĢå¾Ĺç¨İ +ĠRem ed +Ġdec ays +å®ĩ èĪª +_t bl +Ġmanag erial +Ġ å¾Īå¤ļ +ãĢģ èĩªçĦ¶ +Ġtar iff +åĪĨ 娩 +Ġair flow +ĠIm plications +ï¼Į è¿ijå¹´æĿ¥ +ï¼Ī 约 +æī¾ å·¥ä½ľ +Ġconsolid ate +æĦķ çĦ¶ +ĠN AV +Ġ åıij +mm mm +Ġattack ers +Ġeth n +çĹ ¤ +çĶŁåij½ åij¨æľŁ +_per cent +G ov +cell s +ãĢģ åįĹ京 +Ġc arts +ï¼Į æĶ¹åıĺ +çļĦ身 æĿIJ +ï¼Į æĻ®éĢļ +ä½ľ å¼Ĭ +Ġbook store +ĠFront ier +/lic ense +S ampler +T ue +ä¸ĵä¸ļ å§Ķåijĺä¼ļ +ï¼ĮåħħåĪĨ åıijæĮ¥ +ĠCon nor +éĻĨ ç»Ŀ +å®Įåħ¨ ä¸įåIJĮ +Ġl akh +éĢīæĭ© ä¸Ģ个 +åĸĿ çĿĢ +Ġhorn s +Ġsumm ed +. Init +, åħħåĪĨ +ãĢģ ä¿ĿæĬ¤ +Ġion ic +, ä¸Ĭæµ· +åĨĽ æł¡ +ĠJos é +Ġw ie +没 åķ¥ +cr umb +W ITH +ĠBill board +- plan +ĠAud ience +ĠP SA +æļĸ åĴĮ +Execution Context +Ġhead quartered +æİĪ ä¿¡ +@ ", +Ġ Ø§Ø +ill or +顾 åıĬ +ï¼Įå½ĵ ä½ł +ภ¢ +Ġhyd rated +好 åIJ¬ +} > +ĠJ ade +éĤ£ 段 +ĠGree ks +åij½ä¸Ń çİĩ +E sp +Ġlocal Var +åİ¿ åħ¬å®īå±Ģ +Ġl ame +æķ ¸ +ric ht +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +磨 ç»ĥ +.. " +缸 èģļ +ç»Ń 约 +Ġcast s +Ġske ptic +ï¼Į åĪĨ享 +Ġth ieves +ĠFrid ays +æĬĸ åĬ¨ +Ġrel apse +, çζæ¯į +æī¾ 寻 +yl ie +åºı å¹ķ +f requency +éĶħ åĨħ +ãĢĤ éļ¾éģĵ +Wi Fi +ĠRen o +Ġearthqu akes +? s +æ°´ åŁŁ +ĠEx terior +ĠTim eline +åĭIJ çĥĪ +ï¼Įä»Ĭ æĻļ +ĠGuate mala +æ¿Ģ èµ· +åĸĿ çļĦ +Ġc ron +é¢ Ķ +çł´ 绽 +pl its +Text Color +çŁŃ è¯Ń +éħ¸ çļĦ +ĠExpl oration +ç¾ŀ è¾± +Program s +è°ĥ çļ® +éĩĩç͍ çļĦæĺ¯ +Ġbroad en +Ġcontrad ictory +Ġ_ ) += A +å¡ijæĸĻ è¢ĭ +h ra +ç¾İ åij³çļĦ +S in +Ġdiscour age +å¤ļ äºij +Qual ified +Ġwork around +(c ell +ĠOp ens +Serial izable +æķĻ å¾Ĵ +请 注æĺİ +=\ { +Ġaccomp anies +éĢĥ 亡 +Ġmanip ulating +osex uality +æĹ© èµ· +ĠProb ability +ĠN EXT +Ġvig orous +, æ·±åħ¥ +ĠN ets +æĹ¥ 线 +Ġ éħĴåºĹ +W AY +und e +çļĦçĶŁæ´» æĸ¹å¼ı +æĮ½ æķij +ĠHaz ard +ãĢĤ æĥ³è¦ģ +he p +Ġ? >" +Ġancest ry +XXXX XXXX +è¿ĺæĺ¯ éĿŀ常 +.Un marshal += $\ +åĩŃ ç©º +奢ä¾Ī åĵģ +ĠRe ceived +as us +: type +New ton +ï¼Į对 ä»ĸ +PL ACE +F UL +G ra +ĠM its +ãĢģ åİŁ +æĸij çĤ¹ +æİł 夺 +ï¼ĮæĪij å°±æĺ¯ +ĠCom position +_M odel +ï¼Į å·¦åı³ +åģ ĥ +Ġecho ed +Default Value +/t ools +v ier +ĠN amespace +被 åıijçݰ +ĠL IN +ON ENT +Ġåı¯ æĥľ +ĠHaz el +åĮĹ æĸĹ +ĠHy de +æľ¬ æ¡Ī +çŁŃ åıij +,: , +ï¼Į éĩĮ +çī¹ åħ° +AC P +é¡¶ çĤ¹ +OM A +usp ended +Ġimpe achment += https +at ore +ĠB ead +è¿Ļ åľºæ¯ĶèµĽ +Ġkn ight +ĠSH OW +ä¸įè¿ĩ æĿ¥ +ï¼Įä½Ĩæĺ¯ 她 +åľ¨ æĹģè¾¹ +. tt +触 åĬ¨ +Ġb ans +ï¼ī âĢĿ +cal culate +.g r +Qu otes +in ode +Ġinteg rations +éĺ» åĩ» +YN AM +ä¼ļ æł¹æį® +ä½ł æĺ¯ä¸įæĺ¯ +è¡£ é£Ł +èIJ§ èIJ§ +G h +\ (\ +红 å°ĺ +åĪĨ åŃIJçļĦ +æĽ´ éķ¿ +ï¼ģ ï¼Ī +Ġ ...... +âĶĢâĶĢâĶĢâĶĢ âĶĢâĶĢâĶĢâĶĢ +Action Bar +ĠCG Point +ï¼Į åħ´ +æĸ° éĻĪ代谢 +ĠRes olve +å¥Ĺ æİ¥ +ĠRet riev +åĪĿ æģĭ +mod ify +-ed uc +M ozilla +Ġvolunte ered +Car l +Ġmin ced +éĿĻ çļĦ +prot oc +ĠRoot s +ĠH ate +è¿ĺæĺ¯ 个 +çİ°åľº çļĦ +- under +ãĢģ ç½ijç«Ļ +b red +æĸĩåĮĸ æĹħ游 +èģĺ ä»» +Ñĥ Ñĩ +ä¸Ķåħ¶ åıĺåĬ¨ +带 åħ¥ +èıľ çļĦ +ĠA CA +ãĢģ è¡¥ +ĠB onds +综èīº èĬĤ缮 +ĠCont ributor +Aff iliate +ì ľ +_f unctions +é¸Ń åŃIJ +ç®Ĺ å¾Ĺä¸Ĭ +Ġsel ves +Compat ible +çŃī æĬĢæľ¯ +SS H +æĺŁçº§ éħĴåºĹ +. Output +]^ . +å°ı 溪 +}\ ,\ +ĠAtt ach +é¾Ļ头 ä¼ģä¸ļ +( ext +é¸ ½åŃIJ +游泳 æ±ł +P as +× © +Ġm ourn +ĠC lement +Ġcl aw +æĮĩ导 æĦıè§ģ +/ text +çľĭåΰ ä¸Ģ个 +ĠList ening +_V ENDOR +ï¼Į è¯Ŀ +ï¼ī ãĢĬ +èĩªå·± åĸľæ¬¢çļĦ +çľī头 ä¸Ģ +Foot ball +d uring +ĠC ros +Ġorn ament +Ġcre st +Call er +Ġre acts +ĠKa plan +Y O +ï¼Į éĽĨä¸Ń +_f ront +-b eh +Ġas hes +æĶ¯ æī¿ +Ġa ortic +.M ain +umb a +ĠĠ ³³³ +ĠF AST +åIJ« çĿĢ +è¿ĩ çĿĢ +æ³ķ 令 +çī¹ å·¥ +ĠBron x +b lob +ac ock +Ġbu oy +ç¬ij åĺ»åĺ» +ï¼Į æĽ° +Ġg utter +.S imple +< $ +OUT H +s orry +Ġlun ar +ä¸Ģ åīij +ä¸Ĭ ä»» +rec ipe +æŃ£å¸¸ è¿IJè¡Į +ï¼Į请 æł¹æį® +Ġretal iation +å°± å¤ŁäºĨ +ä¹ĭ è¾Ī +åIJİ å®« +Ġmalf unction +Ġdis sect +çħİ çĨ¬ +æ¸ħ åģ¿ +ãĥ¼ ãĥ« +W a +éĵ Ĩ +åĨ² åĩº +Ġsleep s +ãĢģ è§ĦèĮĥ +éĶ µ +车 éĢŁ +项 ç¾½ +aut ical +æĪIJåĬŁ çİĩ +çī©è´¨ æĸĩåĮĸéģĹ产 +{ def +- log +常 人 +ç»ĵ è¯Ĩ +绣 å¸ħ +æĸ°éĹ» åıijå¸ĥä¼ļ +ind i +olith ic +, æµ· +Ġ 缴æİ¥ +æĺ¯ æĪij们çļĦ +æĹ¶ éĻIJ +ĠV ital +ed ited +D rug +av o +Trans actions +l ite +Nut rition +_s izes +å¦Ĥæŀľ 说 +åij¨ äºĮ +ç«ŀäºī åĬĽçļĦ +ï¼Įä½Ĩ åıĪ +éĢĢ è¿ĺ +Seg ments +太æŀģ æĭ³ +çļĦ 第ä¸ī +çİĭ çīĮ +: absolute +çļĦ çļ®èĤ¤ +] $. +ï¼Į çĥ§ +è¿ĩ 人 +åı¶ æŀ« +Ġreal ism +(n n +ĠLand sc +åģļ å·¥ +çªģ åĽ´ +ï¼Į大 å¤ļ +y b +ĠNew ark +æ¯Ķè¾ĥ 大çļĦ +æĦŁåΰ å¾Ī +Enc rypt +[ offset +ï¼Į åıĺå¾Ĺ +um ulative +-t ax +Ġpe e +Ġsm ash +Ġent itlement +éĻ¢ éĩĮ +Ġsn aps +.se ed +ãĢģ ä¼ļ计 +Ġshot gun +Ġh olog +ĠP iet +åĺ Ī +Ġ èĸĽ +Ġs emic +ä¾§ éĩį +æºIJ æ³ī +Ġdeg eneration +Ġmass ively +ĠTo oth +/s ervices +ãĢĤ äºij +ĠA SD +/p ub +ĠTurn ing +æĤĦæĤĦ åľ° +v u +Bal ancer +In str +åĽĽ 个人 +æļ´ èºģ +Ġâĸ ª +ãĢģ éĢļè¿ĩ +Ġunreal istic +ĠD iaz +_f un +亲 çĶŁ +ãĢģ æ±Łèĭı +Ġcomp el +ï¼Įæĥ³ æĥ³ +çļĦ èµĦæĸĻ +ä¸ĭ åįĬ +对 ä¸Ĭ +! ", +. owner +Ġë ° +Ġv ested +åħ±äº§ åħļåijĺ +Ġf ittings +à® ¿ +åΰ ä¸Ģèµ· +ĠV L +æ´» åľ¨ +Sh a +æĺ¯ é«ĺ +åĪĢ çļĦ +âĢ ł +讲 课 +Ġchlor ine +ĠA ren +å»ī ä»· +ç͍ ä¸Ģ个 +-b lack +Ġsur rogate +IN DEX +(p attern +åĨľä¸ļ 大åѦ +éĹ² èģĬ +ec ake +ãĢģ ä¹Ŀ +åIJĥ çĤ¹ +è¿Ļ èĤ¡ +ä¸Ń éĥ½ +B at +Ġb orough +ĠVI EW +, å°ıç¼ĸ +ik ers +ĠSh an +B ell +ans w +ç¬Ķ 墨 +ĠComput ational +Ġpolar ized +å°Ĭ è´µ +t rip +el m +ĠMotor cycle +et us +缸 åħ¬ +éĽĨ è®Ń +}} < +å·² ä¹ħçļĦ +èĢĹ æĹ¶ +ï¼Į æĭ¿èµ· +çī¹åĪ« 好 +Ġneon atal +G em +ĠM MP +ĠPl ane +uit ively +ä»İ 没 +ภĦ +s aved +ĠLim its +çͲ çĥ· +ĠScre ens +ä¸Ń èİ·å¾Ĺ +æįį åį« +Ġk os +.p ar +- aff +ĠSl ice +Ġsp i +å¸Ī éķ¿ +æĿij éķĩ +帮 æīĭ +b ilt +è¿İ éĿ¢ +è¯ © +Ġst ale +Ġ(! _ +åijĬè¯ī 大家 +. Array +ĠL ob +Ġbright est +ĠRed s +Ġail ments +EN E +Ġkiss ing +_C AL +Ġspecial ised +Ass ociated +å¿ ĸ +Ġk inetics +æĬ¬èµ·å¤´ æĿ¥ +åij¨ åΰ +Ġbirth s +Ġuncon ventional +çļĦ ä¾§ +,å°± åı¯ä»¥ +Ġz ipper +çļĦ æľ¬è´¨ +奥çī¹ æĽ¼ +R J +Ġs alsa +ĠRep resentation +ĠComp iler +é¡¶ å±Ĥ +ls en +è¿ŀ ç»ĵ +Sw ift +Ġ ä¹ĭåIJİ +ĠL ime +å¢ŀ 设 +éģ¥ éģ¥ +/ add +_p cm +' { +èĤ¿ èĥĢ +Ġ åŃ¦ä¹ł +è¦ģ åħĪ +Ġspect ators +éŁ³ä¹IJ ä¼ļ +_EX EC +ä½ľ åĩºçļĦ +éĺ² åį« +丰 èĥ¸ +çα åIJĥ +ĠAd missions +Ġathlet ics +Ġcell ar +å¼Ĥ çī© +Script s +Ġpolymer ase +) this +ĠS oy +ï¼Įå°± å¿ħé¡» +Ġdis ob +ĠEmp ower +, åĵªæĢķ +ï¼Į 第ä¸Ģ个 +羣 æ°Ķ +.S chema +Ġnec rosis +ĠM ud +ä¹ĭ 说 +æijĨ 设 +ä¼ļæľī æīĢ +å£ģ åŀĴ +ç»ıéªĮ 丰å¯Į +Ġh ors +ĠK ah +éĹ² çĿĢ +-sm all +æĹł åĩł +Ġyou re +-d ose +ĠDead line +bit map +Ġrival ry +t abs +ĠT ao +ab er +è´¢åĬ¡ æĬ¥åijĬ +ä¸Ģ 天çļĦ +ĠHon ors +Activ ate +( any +[ + +_ assign +æĢ» æĺ¯åľ¨ +Ġevac uation +St orm +ĠC ors +ãĢĤè¿Ļ个 æĹ¶åĢĻ +èĢģ头 åŃIJ +ä¸į ä¹ı +人 å±ħ +ĠStruct ures +s oc +æĺ¯ 人类 +æĶ¶è´¹ æłĩåĩĨ +æĤ ĸ +_M OVE +.Hash Map +(" : +çĬĢ åĪ© +æ¿ ® +åĩ¹ åĩ¸ +åIJİ æİĴ +ĠAg u +æł Ģ +Ġret ract +èı² åĪ© +-T erm +IM G +ï¼Į 表æĺİ +âĢĶ is +æĴij çĿĢ +æ²¹ æ°´ +ãĢĤ æľª +ä¸ĭ ä¸Ģ次 +åĽ¢ èģļ +Ġscr atching +åĬł èµ·æĿ¥ +æŀľ åĽŃ +_f ace +ĠFe eling +ĠDi ary +çļĦ èµĦæºIJ +.m an +ãĢĤ 没 +v ette +y re +æİ¥ æīĭ +çĿ¡ è¡£ +æĸij æĸĵ +ï¼Į å¯Į +Ġinert ia +) âĢĶ +/ list +_l v +å¤ļ çĤ¹ +èģĶ æĥ³åΰ +ĠM TV +Ġsign ifies +_ operation +ï¼Į 丰å¯Į +æijĦæ°ı 度 +ï¼ĮèĩªçĦ¶ æĺ¯ +ph on +æħ µ +å®īåħ¨ 管çIJĨ +appe ar +yst ore +æīģ å¹³ +_ OT +ad ay +ãĢģ æĹ© +yst e +-h ole +æŃ£åľ¨ è¿Ľè¡Į +äºĨä¸Ģ æĿ¯ +è¶ħ é«ĺ +æĦĪæĿ¥ æĦĪ +âĢĿ ä¸Ģ声 +.st rict +. Zero +æĿ¥ éĢīæĭ© +}} ^{\ +Ġdefe ating +_count s +o iler +Ġteasp oons +R ou +i ota +Pr ince +æĿ¡ çļĦ +AS IC +ç¦ı çͰ +æ²Ļ æĭī +INST ALL +ĠN AD +失 åĪ© +Ġmouth s +\ mid +ï¼Įæľī åħ³ +{t ikz +(' : +UN G +ch rom +她 ä¼ļ +_f ixed +_C LEAR +éĥĬ åĮº +B V +L it +ĠW D +_M sk +< b +Main tenance +ĠÑĢаР· +æľºåύ 人çļĦ +ï¼ļ æĹł +ç¤ ´ +-h uman +ĠH IP +éĥ½ 对 +D omin +m Ah +ãĢĤ åĪļ +åıį åĵį +Fl a +j am +Com mission +ãĢģ éĩįçĤ¹ +Ġmat uration +æľī è¯Ŀ +çħ§ èĢĢ +ĠPay ne +èĥ½å¤Ł å°Ĩ +èĥ½å¤Ł 帮åĬ© +ĠGu er +rus ive +Ġtrou bling +Ġcohes ive +.b its +åIJĦ个 æĸ¹éĿ¢ +æ¯Ĺ éĤ» +ĠL iteral +éĥ½ 以 +éħ¸ 碱 +ĠGen etics +æĹ¶éĹ´ åİ» +Ġfact o +Ġsection al +çļĦ人 ä¹Ł +ero on +èĤ© è´Ł +pt une +ï¼Įæľī 个 +ĠSp ins +Just ice +ä¸Ĭ 楼 +æĺ¯åIJ¦ 符åIJĪ +ä¼ļ æ¯Ķè¾ĥ +â̦ . +ãĢĤåĽł èĢĮ +宽 广 +çıį èĹı +Ġcart ridges +éĺ³ çļĦ +Ġbad ges +ï¼Įå¸ĮæľĽ èĥ½ +åı¯æĢľ çļĦ +Ġr r +ĠKat rina +ï¼Įä¸į 代表 +天èĬ± æĿ¿ +Ġsilhou ette +代 åĬŀ +é¢Ĩ çĿĢ +G RE +s alt +çļĨ æľī +Ġspo iled +Ġd ns +缣 åıĭ +Ġ çľĭæĿ¥ +祥 åĴĮ +åħħ æ°Ķ +ĠMin istries +" When +_ cons +Ġmult im +Ġ çϾ +ĠC REATE +Ġget Value +åŁİ åįĹ +ĠOr t +(m atch +ï¼Į åıĺ +qu el +(s aved +ĠÏ ħ +Ġstack ing +æ¿ĢåĬ± 计åĪĴ +Ġ( £ +车 åºĵ +Ġglobal ization +å³° çļĦ +Ġan k +Ġprompt ing +Ġhurd les +Ġimp atient +Ġrem ix +ĠRep resents +Z en +é¦ Ģ +ãĥ ģ +( location +åı² å¯Ĩæĸ¯ +æķ¬ 佩 +Ġw aved +z x +带 åΰäºĨ +äºĨä¸Ģ å¹´ +Ġdel ine +tr an +ĠCon current +å¤ļ æĹ¶ +åĨį 好 +æİ§åζ æĸ¹æ³ķ +Ġpermut ations +.Debugger NonUser +æĢĿ ä¹ī +ĠAd option +ĠFull er +Old er +å°ı äºĨ +ĠReg ex +ĠN ile +k id +co ords +ãĢģ 广åijĬ +.n orm +æį§ çĿĢ +æĿĤ 交 +è¦ ĭ +( ct +æĸŃ è·¯ +ĠText View +Ġwithd rew +åĴĮ æīĢ述第äºĮ +ãĢģ ä¸ĥ +max t +ï¼Į åģıåģı +B loom +æŀĦ åĽ¾ +Ġwor sh +Ġsyn aptic +æĦŁæŁĵ çļĦ +é«ĺ 大çļĦ +妥 å½ĵ +缴 å¥Ķ +åı° ä¸Ń +åĽŀ èį¡ +-de vel +Ġ éľĢè¦ģ +Ġb x +çĽij管 éĥ¨éŨ +Ġconting ency +ç©¿ çĿĢä¸Ģ +s ales +è® · +åºĹ 主 +ï¼Į å¾ĹåΰäºĨ +ch u +ĠQ ing +.Se cond +è§ģ ä½ł +_ utf +åѦ 好 +å¼Ģå§ĭ åľ¨ +Ġtut ors +èIJ¥åħ» ä»·å̼ +________________________________ ________________________________ +å¹³æĹ¥ éĩĮ +l ots +åħ¬ æľī +æ³Į å°¿ +æĹ¥ æĬ¥éģĵ +ï¼Į P +ĠH ipp +pl asia +æĶ¿åºľ éĩĩè´Ń +âĪ Ĥ +ä¸Ń 年人 +p aid +æİ¥ åĬĽ +.d type +åºķ éĥ¨çļĦ +ç¨ĭåºı çļĦ +N AP +hat t +çĥŃ å¤ĦçIJĨ +æĸ¹å¼ı è¿Ľè¡Į +æ»ĭ çĶŁ +ĠGodd ess +B UFFER +ãĢģ èĤ¾ +èĩ³ é«ĺ +-re viewed +ä¼łæĦŁ åύçļĦ +çĨĦ çģŃ +Ġfare well +_ avg +é£İ æ°Ķ +Ġdel ves +ï¼Į è¡¥ +ä¾Ľ å¥ī +西 çº¢æŁ¿ +åŃĺ åıĸ +Ġ åį³ä½¿ +æĿ¡ä»¶ åĴĮ +转æį¢ åύ +_PARAM ETER +读 è¿ĩ +Y D +ĠLoc ale +çİĩ è¾¾ +ĠAnal og +ĠSoph ia +éĤ Ĥ +ĠGPL v +-a uth +ĠD ix +Ġcour teous +( abs +ï¼Į åĬ¨ä½ľ +.p ow +ï¼Į æĭħä»» +Ġb ile +éŁ³ æķĪ +线ä¸Ĭ 线ä¸ĭ +oth s +âĸ º +Ġlymph ocytes +? is +min or +ï¼Įè¿Ļ 个人 +æĶ¶ ç´§ +, äºĨè§£ +pp o +ĠPl asma +æĪĺæĸĹ ä¸Ń +æıIJåΰ è¿ĩ +k ick +.d iff +ï¼Įåľ¨ æīĢè¿° +/ modules +< x +Ġover he +Application Context +.l anguage +åİ¿ 人 +omed ical +èµ° åħ¥ +éĢĤ äºİ +Ġextrem es +èıģ èıģ +Ġin land +åıĪ åIJį +代表 æĢ§çļĦ +Ref und +æŃ£å¸¸ å·¥ä½ľ +ĠH alo +Ġ( ($ +ĠP unch +Tr ading +, æĻļä¸Ĭ +çģ¾ åĮº +æ¯į åŃIJ +éŀł 躬 +( axis +çļĦä¸Ĭ 端 +Ġdia per +èĶ ¼ +åıĤèĢĥ æĸĩçĮ® +ä¸ŃéĹ´ çļĦ +ï¼Įä¸Ģ åľº +æ¶Ī çĤİ +æľĢæĹ© çļĦ +ĠTr in +说 ä¸Ģä¸ĭ +CON CLUS +Ġdis closures +ĠS Q +cul us +de leted +è¯Ń è°ĥ +åĵįåºĶ äºİ += ', +ĠA jax +é£İ ç͵ +Ġimpe cc +Ġ< # +å®īè£ħ åŃĶ +Ġinf erences +, åĽ½åĨħ +ãĢĤ åŃ©åŃIJ +转 åĽŀ +ãĢģ 交æĺĵ +ax ial +ðŁ Į +Ġcens orship +F emale +о Ñĩ +ĠF ault +Ġj ac +çĹĽ åĵŃ +çļĦåĨħ å£ģ +Ġembry onic +ĠV ince +çŀª 大äºĨ +ĠSh am +_S INGLE +ĠScholar ships +Ġforesee able +å²ģ 以ä¸Ĭ +è½® æ¤ħ +æĹł ç͍ +Ġdi apers +,æĺ¯ åĽłä¸º +h f +Elements By +ãĢģ ä¼ļ +è·¯ äºĨ +çľ¼ åľĪ +åĮĹ å®ĭ +Ġmosquit oes +带 åŃ©åŃIJ +(x ml +åĪº çĹĽ +çļĦ éĩijé¢Ŀ +ĠV ERSION +ĠLam bert +æĹ¥ æ¶Īæģ¯ +_m ock +ç»ĵæĿŁ æĹ¶ +设ç«ĭ äºĨ +æĺ¯ä¸Ģ æľ¬ +åİĨ ç»ĥ +Ġsand als +æľ¬æľŁ åĢºåΏ +Ġlobby ing +Ġ é£ŀ +Ġj arg +æīĢ å¤Ħ +ĠConf idence +ĠBY TE +Ġcont empor +get ting +Ġwhis per +a head +ç²īä¸Ŀ 们 +Ġtet ra +ãĢģ åħ¨éĿ¢ +.Col lection +Ġvou chers +ap at +æľ¬ éĥ¨ +çĿ£ å¯Ł +ite ch +quer que +ilant ro +_PRI ORITY +, 举 +讲 æİĪ +ĠTime out +Ġimmun os +Ġprag matic +æ°ij çļĦ +Ġany ways +çͱ ä¸ŃåĽ½ +( Message +sw ire +Ġmicro bes +ç¥ĸ å¸Ī +Ġf ps +âĢľ åħ¨åĽ½ +ISH ED +_S END +çͲ éħ¸ +追 ä¸Ĭ +Ġcounter fe +ad ena +å·² è¾¾ +ï¼ĮæĽ´ 好 +_s n +Sing apore +, true +Ġmor ally +A ward +ed ium +_L ANG +个 åĽ½å®¶ +Ġbul ky +ç§ī æĮģ +ĠOri ental +l hs +ï¼Įåľ¨ ä¸ŃåĽ½ +Ġan emia +åIJIJ åĩº +Ġd uality +Ġcraftsm anship +ar ctic +OC I +craft ed +D iet +ĠV id +$ _{ +, éĴĪ对 +Ġpow dered +ç͵容 C +è¿Ħä»Ĭ 为æŃ¢ +Ġcomp artments +对 æĪij们çļĦ +ä¹Į é¾Ļ +Ġde em +Ġsuper star +Ġcard inal +w ash +uy a +oss ing +-g ood +ä¹ĭç±» çļĦè¯Ŀ +é£İ 声 +ä¸Ģ缴 åΰ +RUP T +Ġc asing +IR A +. UTF +è´¢ åĬĽ +æŁĵ æĸĻ +s chemas +çļĦ èģĶç³» +ç« º +æĬ¼ éĩij +Ġwholes ome +- ish +sub scription +ab by +ĠSp atial +Ġ å·¦ +Ġmag istrate +人 åķĬ +Ġcreat ively +è¡£ è¢ĸ +ic um +ĠV ox +é«ĺ é£İéĻ© +Ġalle les +çŤ çĸ® +åºĶ æĺ¯ +Ġmic rom +ĠS prite +(p re +NS Dictionary +нÑĭ й +åĩºä¸į ç©· +.S erial +æĪIJ为 ä¸ĢåIJį +Ġinvent or +Ġmat cher +æĿ° åĩºçļĦ +dim en +ãĢ ľ +ãĢģ åħ¬åħ± +è¿Ļ 帮 +Ġro l +æĶ¿åºľ éĥ¨éŨ +éĢĨ åıĺ +éķĩ ä¸Ĭ +ip a +*) & +u ers +è·ĥ åįĥæĦģ +pl astic +æľį å½¹ +å¼§ 度 +ĠS EE +ĠDod gers +è¡ ¢ +ĠD irty +åĨį ä¹Łä¸į +天 æĺİ +ï¼Į è¶ĬæĿ¥è¶Ĭ +Ġinter tw +å±Ģ åī¯å±Ģéķ¿ +") ] +å¾ģ åħĨ +Ġtel esc +ï¼Į N +çļĦåľ° çĤ¹ +ĠY A +Ġregular ization +ĠBro oke +ä¸Ģ åıij +ãĢģ åľŁ +ay be +äºĨ è¿ij +ĠP ork +B LOCK +Cont rollers +Ġaut os +.P arent +Ġtub ing +åįĥ çĵ¦ +åĬ¨çī© åĽŃ +ĠG rav +ĠAn ime +ĠAnt ib +od on +Ġde an +Ġfinal ize +ĠC ure +ĠL oyal +æīĺ 马æĸ¯ +æĢª æĪij +p ie +ï¼Į 康 +Ġfor ge +åĽ½ å¤ĸçļĦ +Ġshe ds +ä¸Ĭå¸Ĥ çļĦ +éͦ èµĽ +Sim ulation +æ¤Ń åľĨå½¢ +Is n +æĹł çĹĽ +Ġsucceed ing +æ°Ķ åĬ¨ +è®° ä½ıäºĨ +è§Ĥä¼Ĺ 们 +w ares +ĠP ike +æĶ¹åıĺ çļĦ +Le on +Ġoscill ation +çļĦæľī çĽĬ +I cons +积 éĽª +ä¸Ģ æľµ +ä¸į 以 +ä½ł 以为 +Ġtransition ing +æµ·åįĹ çľģ +Ġ" :" +Ġthread ing +å±ı é£İ +Arch itecture +ï¼Ľ è¦ģ +SP ORT +- param +Ġdifferent iable +Ġun conditional +ĠWal ton +ï¼ĮåĪ« 说 +Ġnu anced +ĠB illing +ĠF erry +èᝠç͍ +/ commit +ir ations +Ġsand box +åįģåħ« æĿ¡ +ä¹ĭ ä½į +oph yll +ĠMic rowave +ert ime +, opt +ãĢģ 软件 +Ġu d +éĺ² å°ĺ +ĠDis crete +Ġmission ary +æ´¥ æ´¥ +Ne uro +åij¨ æľŁçļĦ +æ°Ķ åľº +ĠRef und +/ is +Ġm arty +ï¼Įå°± å·²ç»ı +æ´Ĺæīĭ éĹ´ +- Th +ĠPhys icians +е ÑģÑĤ +/s ervice +éĿĴå²Ľ å¸Ĥ +( mp +ãĢĤ åħĥ +ठ® +/ php +ĠFound ed +ĠY i +çĭĤ æļ´ +ä¸įäºĨ äºĨ +. Project +ä»į æľª +(" \\ +Tim ing +åħ·ä½ĵ åľ° +ĠPar a +K ent +Ġbre ached +è¡Ģ æµģ +- te +åĵ Į +头 ä¸ĬçļĦ +æ½ º +Ġmaj estic +ä¸į è¯Ĩ +ĠAir bnb +Ġse ams +D egree +\ }$. +鼨 çļĦ +èĬ¬ åħ° +Ġk u +é« Ļ +éķ¿ åŃIJ +Ġgal van +áĥ Ķ +ï¼Į åĸĦ +çļĦä¸Ģ åĿĹ +ĠD LL +ä¸Ĭ å²Ĺ +天 æĢ§ +eb o +Ġcru ising +-sh ot +_TH RESH +ãĢģ åķĨåĬ¡ +ma res +ï¼ĮæĪij èĥ½ +éĥ½æľī äºĨ +Ġin patient +åĴĮ ä¿¡æģ¯åĮĸ +åĮ»çĸĹ åį«çĶŁ +å®ļä¹ī çļĦ +j as +Ġb ilingual +Ġv agu +_l anguage +(t otal +æĬķåħ¥ 使ç͍ +ï¼Į对 äºĨ +è¿ĺ åŃĺåľ¨ +çľĭ ä¸Ĭ +çĶ· åŃIJçļĦ +ĠSh oe +_r oute +ä½ľ çļĦ +âĢľ 没æľī +强 æľīåĬĽçļĦ +ä¹ī çļĦ +(s ample +ffic acy +ĠExper iences +çĺ« çĹª +g art +åı¤ èij£ +ĠIN VALID +伯 æł¼ +æĶ¾ éĩı +ul ia +_f ailed +è§£åĨ³ æĸ¹æ³ķ +ĠRef er +Ġpel let +åŃĿ 顺 +- plane +æĺ¯ ç¾İåĽ½ +为 åħĪ +ä¹Ł ä¸įåIJĮ +å¦Ĥ æ°´ +Ġste alth +q e +is ements +çĮľ åΰ +æĽ¿ ä½ł +Ġfert ile +ĠG BP +ï¼Įæľ¬ é¢ĨåŁŁ +_W rite +Ġmulti plic +ĠDev ils +ç»ĪæŃ¢ 确认 +æŃ£ åĽłä¸º +Ġmod ifiers +èĴĻ èĴĻ +é¢Ĩ 头 +( EX +Ġsh immer +è¾¹ éĻħ +æĬĢæľ¯ æľįåĬ¡ +åĵį äºĨèµ·æĿ¥ +M ilitary +èħ° æ¤İ +ï¼Į 人æ°ij +_j oin +ç»Ŀ对 ä¸įæĺ¯ +åIJĪå¹¶ è´¢åĬ¡æĬ¥è¡¨ +åİĭ å®ŀ +Jul ie +Ġsyll abus +ï¼Į çĽ¸å¯¹ +ac ia +è¡Ģ çĹĩ +è̳ 缮 +ĠCon vey +. ib +oo le +? ", +ĠL AT +ä¸ĭ æĦıè¯ĨçļĦ +éķ¿ çº¦ +Ġvisit ation +Ġn f +æĪij è·Ł +fl ip +omorph isms +æĪIJå°± æĦŁ += v += data +Ġout flow +é¢Ĩ导 åĴĮ +é į +ĠHe ck +ĠMar l +éĸ ĭ +çĽĸ 竳 +.f ront +Ġra ced +ç±» ä¸ĵä¸ļ +Att end +èµļ åıĸ +.w s +ru ly +Ġserv ings +åı° ä¸ĬçļĦ +èµĦ产 è¯Ħä¼° +ĠN olan +Ġun expl +ĠAt hen +ĠIN V +æĹ¶ æīĢ +è¿ľ å¾ģ +Ġbreak er +imb ing +Ġunw ind +Ġexch anging +$ c +âĢĿ å·¥ä½ľ +å¦ĩ 产 +Ġobtain able +Ġ\" % +ï¼Įä½Ĩ 对 +-p ressure +åģļäºĨ ä»Ģä¹Ī +ï¼Į åŃ£ +ign er +çĤ¼ åĮĸ +Ġvirt ues +çĭĦ åħĭ +ãĢĤ ä¸Ĭæµ· +Ġ` / +èĤ¯å®ļ ä¸įä¼ļ +Ġswe ating +åĴĮ é»Ħ +ä¸įèĥ½ 说 +ä¹ĭåIJİ åĨį +缮çļĦ åľ¨äºİ +çĨĬ çĨĬ +ãĥ¼ ãĤ¿ +ĠSt roke +åIJĮ åIJį +ãĢģ çϾ +å¸Ŀ éĥ½ +ãĢĤ ç¾İ +back slash +åİ¿ 令 +åĬŁèĥ½ æĢ§ +M b +æĸ¯ æīĺ +çĸı 导 +M appings +çļĦå¿ĥ èĦı +ï¼Į以 ä¸Ĭ +çļĦ ä»ĭç»į +ä»ĸ æĬĬ +ĠIN F +ĠBrid ges +Ġafford ability +ãĢģ åįıè°ĥ +Ġtext ured +ĠMor al +ĠS aul +ld er +Ġphosph orus +P b +è¿Ļ个 å®¶ä¼Ļ +n ite +åĢºåĬ¡ 人 +Object Type +Ġrun off +-l argest +ĠCov enant +ĠLauder dale +Ġl umbar +ĠS ight +ãĢģ å¾·åĽ½ +åĴĮ çłĶç©¶ +Ġun i +Pro be +_c oll +_DE L +G row +çĥŃ èº« +çģ« åħī +çĸ¾ æİ§ +è½° åĬ¨ +_mod ified +n esty +âĢĿ ãĢĤâĢľ +pc s +çѹ éĽĨ +j er +åºĹ åijĺ +ãĢĤ åıįæŃ£ +al u +ro ttle +Ġcl am +ot ides +p wd +è¿ĺ è¡Į +æĹł èĥ½ +é¦ ¥ +çļĦ å¸ĮæľĽ +Ġmor ale +ĠVari ant +ĠEL SE +prov ided +$ ac +å¿ĥ è¡Ģ +çĭĤ çĥŃ +. if +ï¼Ľ ä¸ī +respond ing +ĠConsider ations +çļĦ å͝ä¸Ģ +Ġfr anc +ĠTele com +Ġcatast rophe +å®ĺ åIJı +Ġdiscre et +/ os +th inking +å¿ĥ 仪 +fa ith +è§Ĩ éķľ +éħį è§Ĵ +éĩį éĩijå±ŀ +ĠBL ACK +注åĨĮ åķĨæłĩ +å¹²åĩĢ åĩĢ +åį°è±¡ æ·±åĪ» +çľ© æĻķ +æĢ» æľī +æĹ© æľŁçļĦ +èĥĮ è´Ł +st icks +é¾ Ī +H ierarchy +åĨį æĢİä¹Ī +urg a +ç§ijåѦ åİĨ +åĿIJ éªij +èľ · +Ġmotor cycles +R eward +Ġret ina +ï¼Į 麦 +ï¼ĮæĪij们 å¿ħé¡» +ĠST AR += T +s amples +ĠS app +è¿ĺ éľĢ +Ġve g +åIJ¸ 纳 +ठ¸ +Ġfarm house +ĠWinn ipeg +ĠPe ach +Ad obe +å®Ĺ å¸Ī +çļĦ 士åħµ +ow ler +ĠAl ignment +ĠDiv ide +ĠHum ans +Q I +Ġt aps +ãĢģ å®ĭ +å¹´ éĿĴ +ĠTE CH +-head ed +Ġl az +ĠD EM +é«ĺ è·Łéŀĭ +Class ification +-sp onsored +ĠAccept ed +寥 寥 +Ġtax ed +ĠAMA Z +- dd +- expression +æ²»çĸĹ æķĪæŀľ +Dec ision +ur ances +ĠP iper +åįķ æĽ² +转 å¢ŀ +B ee +× ij +éĹ´ æŃĩ +åĽŀ é¦Ī +Ġra ff +æģ° åΰ +éĺ» æĸŃ +RE AL +æĹĭ é£İ +Ġп од +G ear +ä»ĭ äºİ +Ġbisc uits +èĩĢ éĥ¨ +è¿Ľè¡Į æ¯Ķè¾ĥ +Imp lement +* : +Ġvan ishing +ãĢĤæľ¬ åıijæĺİ +approx imately +. Process +åĿIJ éķĩ +ĠEmm y +ä¸ī 项 +被 æµĭ +Ġtransf ected +( Action +iz en +ä¹ĭ åĬŁ +æµij çĦ¶ +rem oved +æ¤Ń åľĨ +åı¯ä»¥ æıIJä¾Ľ +ĠSi em +/ mm +Ġro gue +è§ģ æķĪ +çļĦ æııè¿° +Ġdes erialize +Http Client +L uck +ä¸Ģ è·ĥ +h oles +èĭ± éĩĮ +第ä¸ĥ 竳 +ç¬ ł +好 åĿı +( Item +Inter faces +é¢ĨåŁŁ ä¸Ń +被 èªī为 +Ne ither +çĿĢ æĪijçļĦ +ãĢģ åħ³äºİ +ĠGrand ma +Ġwaters hed +ï¼Į 带é¢Ĩ +Ġp ion +Exper imental +ä»·æł¼ 为 +)-- ( +.st at +åijĬè¯ī äºĨ +éļıæĹ¶ éļıåľ° +_not ify +ä»ħ 次äºİ +ç»Ļ èĩªå·±çļĦ +ï¼Įä¹Łåı¯ä»¥ æĺ¯ +ult z +æĸ° èĢģ +ç§ģ èĩª +ε ν +h ov +ãĢĤ ä»ĬæĹ¥ +å·² æĪIJ +ĠAn ch +- ce +åŀ Ľ +ĠV iolet +ĠFed er +ï¼Į æī©å¤§ +ä¸Ģ åħ±æľī +çļĦ 伤害 +ĠL und +B inder +ĠA FP +EL S +æĢ§èĥ½ åĴĮ +æĹł å¼Ĥ +ĠAm enities +ĠA PA +ĠI PS +Ġyouth s +Ass ociate +_qu ant +çļĦ éĥ½ +Ġcher ished +ä¸į ä»İ +ĠW OW +ä¹ĭ å¢ĥ +ï¼Ī ä¸ĩåħĥ +ï¼Į çĻ» +ĠC umm +Check s +. br +åħ³ åı£ +æĶ¿åºľ åĴĮ +ĠâĬ Ĥ +Ġw aving +éĩĮéĿ¢ æľī +Ġblo oms +å±ĢéĻIJ æĢ§ +åĩº 线 +æĿİ ä¸ĸæ°ij +ä¸įè¿ĩ äºĨ +( right +ï¼Į 身åIJİ +ä½ ° +å¹¶ ä¸İ +Ġduplic ation +Ġsol icit +åı¬ åĽŀ +/ ", +ĠH itch +æķ£çĥŃ åύ +EN OMEM +ĠT us +æł¼ åŃIJ +Ġpl ush +æīĵ éĩİ +ï¼ĮåĨį åΰ +ĠÐ Ŀ +%% % +Ġwind shield +Ġrem ake +part um +L as +çģ« çĤ® +çģµ åĬĽ +Every body +ĠBur lington +G ROUP +æ³Ľ 滥 +éļı æĦıçļĦ +è¦ģ æī¾ +å°± æ¯Ķè¾ĥ +Ġlas ers +ĠD IV +å®¶ 主 +ä¸įåĪ© å½±åĵį +v ron +ed ly +主åĬŀ çļĦ +令 çīĮ +_s d +åıį çľģ +( chan +н Ñı +ĠVol tage +Ġhe ctic +è¿Ľ å±ĭ +OM IC +éĤĤ éĢħ +Ġ åIJĦ +ĠK ro +_P AT +Ġrev ive +Ġl il +_ UI +is et +Elect ronic +\ N +ï¼Įä»ĸ å°Ĩ +çł´ ä»ij +S r +-b usiness +_pro gram +Ġring tone +ç«ĸ åIJij +ĠS le +ĠT LR +欧 å¼ı +semb led +contain ed +åĴĮ æĿ¨ +rew rite +ĠVal encia +Ġdismiss ing +over rightarrow +ঠ¿ +ĠGran ite +ä»»ä¸Ģ项 æīĢè¿°çļĦ +R outes +j Query +m otion +ĠMar ilyn +æĹı 群 +æİ¢ 寻 +ĠSpecial ists +å±Ĥ åĩºä¸įç©· +Ġur anium +Ġcar ving +åį³ ä¾¿æĺ¯ +App lied +ภķ +Ġgrac ious +ĠCart esian +工伤 ä¿ĿéĻ© +ï¼Ī éĽĨåĽ¢ +åı¦ä¸Ģ åįĬ +cer al +ĠCOP D +奥æŀĹåĮ¹ åħĭ +对 é½IJ +çŁ³ 榴 +(d p +ä¸įè¦ģ 太 +ï¼Įä»ĸ 没æľī +ACT IVE +We ights +-f ront +åįĥ åħĥ +ãĢģ å¼Ģåıij +ĠR ider +æŃ» è§Ĵ +at ian +ï¼Į é¢ĩ +ä½ĵ ä¸Ńæĸĩ +Ġset ter +ä¸Ģè¡Į 人 +Ġawaken ing +ï¼Į æİ¢ç´¢ +ä¼ļ å¢ŀåĬł +ual a +åįģäºĮ å¹´ +ãĢģ ä¸Ń央 +è¯į åħ¸ +ĠاÙĦ Ùħ +^ , +ï¼Į转 头 +ĠT ory +ARE NT +ĠSpec ify +ç͵åķĨ å¹³åı° +Ġpleasant ly +Ġc es +âĢľ åĪ« +Ġharm ed +Ġexpon ents +ĠGram mar +Ġlymph oma +âĢľ ä¸Ń +åIJĦ å¼Ĥ +.D ocument +西å®ī å¸Ĥ +Ġfest ivities +åı¯ ä¸İ +ç´¯ ç´¯ +lat able +è´¬ å̼ +Hot els +D ATE +Ġl imp +æĶ¯ä»ĺçļĦ çݰéĩij +çļĦ çIJĥåijĺ +ä»Ļ å¢ĥ +ç»Ħä»¶ åĮħæĭ¬ +Ġarrog ant +Ġpro w +Ġcr ane +gr and +ĠSM ART +_w riter +A y +æ¶² åĮĸ +Ġhon oring +at ars +ĠCh u +æŃ¢ è¡Ģ +ĠSur round +ï¼ĮåĽĽ åij¨ +åĴĮ èĭı +Ġ æijĦ +ok in +Ġbl and +ä¸ij éĻĭ +,æĺ¯ ä¸įæĺ¯ +( åĽ¾ +çIJĨ æŁ¥ +天 äºĨ +Ġsw ollen +Car bon +er ole +大 éªĤ +v ote +ï¼Ī B +åıĺ æķħ +éħĴ åIJİ +ĠSQL ite +Ġorth odont +ä¸Ĭ éĺµ +çİ© åĦ¿ +ĠBur k +让 æŃ¥ +åĸľ åºĨ +Ġpercent ile +ãĢģ éϤ +(m ain +Place holder +Z W +ĠH UD +Index Of +NS Object +_ Value +_trans action +åIJĥ èᝠ+丽 æ±Ł +.ex ceptions +ï¼ĮæĬĬ ä»ĸ +Ġ åºĶç͍ +好 åIJĥçļĦ +Ġph yt +èµĦ产 éĺ¶çº§ +æ§½ åĨħ +_l at +ĠAR C +èı² èı² +ĠM ash +çļĦä¸Ģ éģĵ +äºĨä¸Ģ å±Ĥ +åĩł 声 +ĠSabb ath +åīĤ åĴĮ +æŃī æĦı +.assert False +Ġfirst ly +ĠUn iv +ĠComp ound +ĠDec ode +éļ¶ å±ŀ +Ġ æ¯Ķä¾ĭ +({ }, +ï¼ĮåIJĦ èĩª +èĤ© 头 +ï¼Įå¾Ī æľī +ï¼Į æł¡ +æ© Ł +审议 éĢļè¿ĩäºĨ +âħ ¢ +ĠH ain +Ġut ens +鸳 鸯 +Ġ åĪĻ +çļĦå®īåħ¨ æĢ§ +c itation +å¸ĥ 满 +é¡¹çĽ® 管çIJĨ +åĩºä¸Ģ åī¯ +/j av +( add +/ server +åij¨ è¾¹çļĦ +.M anagement +z m +Ġspecial ties +åĬŀåħ¬å®¤ éĩĮ +no op +ä¿Ŀ å®ļ +u ces +è¿ŀ 线 +éĹ¯ åħ³ +Ġcat hedral +-al one +_ remote +Ġpro claimed +åIJij åĮĹ +Ġbin aries +æĪij æľĢ +çĶŁ çĮª +pe i +Ġartic ulated +Ġdes erving +, å±ŀäºİ +_d s +ĠDis p +æ°´ ä¸ĭ +å·²ç»ı æĬĬ +g ain +æ¹ĸ åĮº +Ġâī ¡ +åĪ© åύ +super vised +Di agnostic +Ġpredomin ant +ï¼Į å°½åı¯èĥ½ +空éĹ´ ä¸Ń +ĠPR INT +ç£ħ 礴 +ĠMust ang +å¹² åIJĹ +ĠHigh lands +æł¹æľ¬ 没 +P airs +o que +>< !-- +ä¹ĭ æºIJ +éŁ³ä¹IJ çļĦ +太 åĮ» +C OL +çıł æ±Ł +çŁ¿ æ³īæ°´ +èµĦæł¼ èĢĥè¯ķ +å°´å°¬ çļĦ +) }_{ +ç©· 人 +.com ponents +èIJ¥åķĨ çݯå¢ĥ +ä¸į 带 +æĸ¹æ³ķ æĿ¥ +λ η +h id +æĺ¯ åĪ©ç͍ +åĮĪ çīĻ +ĠF G +åIJĪæ³ķ æĢ§ +ĠC NS +äºĭä¸ļ éĥ¨ +ĠÑĩ ÑĤо +Ġcon verse +ç¨į æľī +Ġreject s +ï¼Į 广å·ŀ +çļĦæīĭ æ³ķ +åѦçĶŁçļĦ åŃ¦ä¹ł +ï¼Į éĿ¢ç§¯ +ĠK ant +æµ· éĩĮ +æĤĦæĤĦ çļĦ +éĩį åĪĽ +Ġm ango +иÑĩ е +Ġwatch dog +è¿ĩ 硬 +} )$. +æĪij 个人 +ove re +åĪĢ åŃIJ +ï¼Į天 天 +ä»İ ä¸Ģå¼Ģå§ĭ +res so +ĠH iro +æŀģ 强çļĦ +upp ly +/h igh +a ic +ï¼Į åѦä¼ļ +æļ´ è·Į +ĠPhil ly +Particip ant +l iving +æĬ¤ éĢģ +æŀĹ åľ° +è¿Ļ æĬĬ +B U +èĩªçĦ¶ æĿij +ĠCOM MENT += NULL +ö s +ĠH TTPS +H op +ĠL ori +æĹ¶éĹ´çļĦ æİ¨ç§» +Ġven ous +ãĢĢãĢĢãĢĢãĢĢ ãĢĢãĢĢãĢĢãĢĢ +æĶ¹ åζ +Ġstock ing +设 为 +çļĦ人 æł¼ +Ġre w +éĴ ´ +N r +åĽ¢ ä¼Ļ +Ġdom inates +Ġhab eas +Ġfasc ination +Ġesc apes +_A UDIO +ĠHelp ing +ĠMB EDTLS +ut or +第ä¸Ģ éĥ¨ +Ġscream ed +ĠD eg +æīĵ æĪIJ +èľ ¿ +以ä¸Ĭ 代çłģ +ĠMiche le +éĩį ä»» +èѦ è§ī +Ġåıª ä¸įè¿ĩ +ãĢģ èĭ±åĽ½ +Ġdo om +ä¾¿å®ľ çļĦ +æīĵ åľ¨ +æ¶Īè´¹ åĵģ +å®ī éĺ² +-c all +S dk +æĺ¯ éĴĪ对 +end ale +éĩij å¥ĸ +交 èŀį +æł¹ éĥ¨ +( Long +_m alloc +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +H igher +M CA +Ġd ma +F actors +èį¡ æ¼¾ +G aming +å¤ļ æľī +ä¿¡ çļĦ +Ann ounce +å¦ĩ èģĶ +Ñĥ Ñİ +åģı åĥ» +ĠHunting ton +ĠB is +ï¼Ł æľī +Off sets +Ġbull shit +ĠLithuan ia +(p arser +ild en +æķ° 次 +.d im +stack rel +ä¸Ģåıª æīĭ +< # +éĥ½ æ¯Ķ +åľ¨ é¦Ļ港 +ä¸ī ä¸ĩ +Block ing +èĦĸ é¢Ī +Ġhack er +s olid +åIJij åı³ +å¸ĥ æŀĹ +Ġcommun ist +D allas +c ash +Ġ è¦ģæĺ¯ +ĠMajor ity +Ġso aring +}$ { +çij Ļ +ãĢģ ä¿ĿéĻ© +OM G +çij Ľ +æ»ij 稽 +Ġb ary +Ġcomp rehens +, 大éĥ¨åĪĨ +l ain +Ġex fol +æľī å¤ļç§į +å̼ å®Ī +Ġpol ishing +æ²¹ çĤ¸ +缼 çļĦ +M ichigan +: active +_p adding +çĽĹ è´¼ +è¤ ¥ +Ġly ric +# , +Ġ} _{ +æĪIJ è´¥ +ç´« å¤ĸ +Ġban ning +Ġtens ors +ĠO man +å°ı ä¼ĻåŃIJ +ĠTwitter Share +è¸ı æĿ¿ +æĪij å¦Ī +çī¹ åĮº +Ġdec rypt +Ġretros pect +Ġth aw +Ġexplo its +D raft +Ġserv iced +çīĩ ä¸Ń +èĶ· èĸĩ +è¿Ļä¸Ģ å¹´ +.L ink +Ġdiplom acy +Podcast s +è§£åĨ³ åĬŀæ³ķ +ï¼Įèµ· çłģ +åıĤéĺħ åĽ¾ +ĠB W +æij© æł¹ +_DEP TH +stit ial +UFF IX +ĠBart on +ãĢģ åĨħ容 +çģµ èĬĿ +ĠO st +Ġv ind +comp atible +ĠOb st +ç¢İ çļĦ +ãĢĤ 使 +ĠP AL +è¿ĩ èĬĤ +Ġp aving +ĠD os +ĠC ursor +å°±æĺ¯ æĪij们 +Ġneg atives +*- *- +æĸĩçī© ä¿ĿæĬ¤ +Ġinv oking +æ±Ł åĮĹ +åħī éĺ´ +该 é¡¹çĽ® +abb ing +F ew +Press ure +S ony +is or +ib al +羣 æĮļ +çī¹åĪ« 注æĦı +为 鼶 +TH ING +âĢľ ï¼įâĢĿ +åħ» èĤ² +麾 ä¸ĭ +_ commit +ãģĦ ãģĨ +he v +ï¼Į对 æŃ¤ +å¹²éĥ¨ èģĮå·¥ +综åIJĪ ç´łè´¨ +Last Error +Ġre pt +UR AL +社 ç§ij +ä¸Ģ æĮ¥ +let ons +. ob +Ġ= ================ +Ġmen ing +ï¼Į é϶ +è°IJ æĮ¯ +( trans +Ġv b +æłĩåĩĨ åĴĮ +çļĦ æīĢè¿° +æĹł é¡» +伤 æ®ĭ +çĶ» é£İ +Ġconc at +ĠBath rooms +è¿Ļ è¾¹çļĦ +ĠCharacter istics +ãĢĤ å¿ĥ +Ġconfig urable +ĠNation als +纯粹 çļĦ +èĬ± åĦ¿ +ĠKard ash +P rel +ru gged +ï¼Ī åĮĹ京 +Ġstart Time +ĠAL T +ĠT ie +åĽ½èµĦ å§Ķ +åľ¨ å®ŀéĻħ +Ġcab ins +ç͵ 容åύ +ne ck +yn ch +jud gment +æĹ¶éĹ´ éķ¿ +Ġelect ive +Ġexport ing +) "> +åħī çݯ +ĠCourt ney +- rec +ip ay +ç¬Ķ å½ķ +ĠTw ilight +ĠTob acco +Ġcrystall ine +æĥ¦ è®° +Ġ æĭī +ĠD ud +åĪĨ离 åύ +ug gets +Ø ° +ãĢĤèĩª ä»İ +ãĢģ è¦ģ +ãĢģ ä¹Ļ +å¥Ķ èħ¾ +Ġhorse power +ä¸Ŀ绸 ä¹ĭè·¯ +( è®°èĢħ +Ġ} } +大 ä½ĵ +çϽ èĻİ +éĻIJ æľŁ +çݯ氧 æłijèĦĤ +ch rist +ç»Ŀ ä½³ +_const raint +强 çĽĹ +_HE LP +Ġtrans pose +Ġcond ol +ĠOr b +Ġdial ysis +ĠSach s +ĠIM AGE +EN G +Ġstere otype +åĴĮ åĪĽæĸ° +Output s +/ al +ĠB ram +æķĻ åĬ¡ +Ġnarrow ly +åĩĢåĮĸ åύ +B EGIN +ä»Ģä¹Ī éĹ®é¢ĺ +ä¹° 车 +æĢİæł· æīįèĥ½ +æĬĬ æİ§ +( ind +\ linewidth +ps is +ï¼Į请 æĤ¨ +Ġl est +ad minist +-s izing +奥 ç§ĺ +E at +p as +ä¼ļ éģĩåΰ +åħ¶ä»ĸ ä¸İ +\ set +æľī 以ä¸ĭ +éĿĻèĦī æĽ²å¼ł +Cho oser +å¦Ĥ ä¸Ĭ +ĠCarl son += http +Ġte as +ĠHe ads +_T est +æĢ» åĴĮ +ç¢İ çŁ³ +åıijæĮ¥ ä½ľç͍ +å¿« åľ° +ä¹Ł å·² +sub st +éĢı åħī +æ¡ĥ åĽŃ +ä¸ŃæľŁ 票æį® +æĺ¾çĦ¶ æĺ¯ +X A +Ġ{ ( +Ġ 注æĦı +èµ° ä¸Ĭåīį +è¿ŀ带 责任 +æľ± åħĥçĴĭ +ĠEm manuel +ĠCer amic +CH ANGE +Ġcan non +å¹¶ è¿Ľè¡Į +Ġturn out +sex ual +' ', +( order +ä¸ ķ +int ent +Dis p +ãĢģ ç͵è§Ĩ +è¿ĺæĺ¯ æľīäºĽ +åĢĴ éĹŃ +ĠL illy +åŁº çŁ³ +aff les +ĠPartners hips +Ġstrip ed +ï¼ģ ä½Ĩ +ĠShel by +P df +.W here +ãĢĤæīĢ以 说 +ĠTrib unal +_BY TES +Ġredd it +es igned +ones ia +å¢ŀ æķĪ +éħį åζ +ĠRe hab +å®īè£ħ çļĦ +ure ka +æĺİ æľĹ +Ġ__ (' +ĠSubject s +ĠNap oleon +Ġs x +ï¼Į èĴĭ +gg er +-en h +Ġse greg +è¿Ļ åī¯ +/ admin +< w +ĠP enguin +æĹł ä¸Ĭ +ï¼ī è´Łè´£ +ä½Ľ å±±å¸Ĥ +ĠSaskat chewan +ĠComb ining +Ġch or +度 éĩı +ä¸ĵ 访 +ĠPe oples +ĠFred die +Ú ¯ +Ġh oard +ãĢģ åı³ +_p k +äºĨ éĤ£ +-d et +-re ported +ĠKazakh stan +( cc +/ op +Table View +ä nd +ol v +ĠG omez +ãĢģ å¿«éĢŁ +Ġp izz +åĴĮ 产åĵģ +Ġst en +ç¾İ èªī +符 æĸĩ +ãĢĤ éķ¿æľŁ +ãĢģ èĩªæ²»åĮº +彩 票 +è¯Ĺ æĦı +ĠC inem +å·²ç»ı ä»İ +Ġwild card +ĠMood y +ï¼ļ ä¸Ģ个 +æĥħ人 èĬĤ +Ġdrop lets +Ġvan ished +_ hex +_f ix +.t x +èĪª è¿IJ +om id +od en +éģ ´ +çĵ¦ è§£ +Xml Element +Ġun published +_p kt +çļĦç¥ŀ ç§ĺ +} t +åįĥ ç§ĭ +å·¥ç¨ĭ ä¸ĵä¸ļ +ĠPharmaceutical s +å¨ģ é£İ +fl ake +an imate +ï¼Į 欧洲 +Ġg eek +ä¼łéĢĴ ç»Ļ +Ġdep ressing +åĪĽéĢł çļĦ +åijĬè¯ī èĩªå·± +: I +p un +.f eature +æ´Ĺ 车 +-inf ected +ask ed +ãĢģ æ£ĢæŁ¥ +åĴĮ åIJİ +Ġill icit +ä¸ĭ 马 +æĮ¯ 举 +ĠH CC +. uint +Own ed +med icine +ç¨Ģ å°ij +Ġsingular ity +Âł al +çļĦ å·¦ +âĢĻ T +Ġanatom ical +D riving +ä¸Ń åѦçĶŁ +. True +-f e +ĠCheck list +ĠShow case +Ġawa its +åįļ 主 +å°± åľ° +çĥŃ æ³µ +ĠRE LEASE +) (\ +@ article +ĠNot ebook +T ow +P ear +ĠJ asper +æĺ¯ä¸į ä¸Ģæł·çļĦ +n w +æĬĵ åΰ +, max +.get Y +åİĤ éķ¿ +ĠB ates +Ġem ulator +ĠQu artz +)+ ( +ĠMort on +å¼¹ åĬĽ +aza ar +ĠS ellers +è§£åĨ³ çļĦéĹ®é¢ĺ +墨 éķľ +B old +| Any +/f ont +æ·± å¤ĦçļĦ +-m atch +æĦŁæĥħ çļĦ +åĪ« åIJį +èij£ åįĵ +沫 沫 +èµĦæł¼ è¯ģ书 +Clos ure +(d es +it imate +od ied +.p repare +ĠCr ane +ãĢģ åķĨ +æ°¸è¿ľ æĺ¯ +ĠV W +Ġjealous y +Ġs ane +èĥ½å¤Ł éĢļè¿ĩ +- turn +Ġlit ers +[ J +ãĢģ æĿij +-n et +浩 çī¹ +ĠPerm it +Ġa ur +çĹħ æĤ£èĢħ +:: - +Ġdisadvant aged +Ġre used +ĠG ob +/d ocument +ce p +ãĢģ 微信 +_D OM +å¼Ħ æ¸ħæ¥ļ +feed back +çļĦ ä¿¡åı· +Ġsuff ices +æĪij åķĬ +ex ual +Ġback lash +ON Y +ä tt +Ġaer obic +ĠL OW +åľĪ çļĦ +ĠKrist en +et imes +è¿Ļ æĺ¯åľ¨ +_t raining +æ£Ģæµĭ ç»ĵæŀľ +ï¼Įä¼ļ ä¸įä¼ļ +éª ĭ +Ġinv aded +Ġbl aming +Ġmoder ator +é¡· åĪ» +ĠâĢľ ... +éĩij éϵ +ç£ IJ +-S ch +ĠC unningham +ĠCh loe +Ġref lux +åĪº è̳ +- mentioned +Ġant igens +ĠCr ushers +å¾Ī å·® +æĹłæķ° 次 +æ³ķ åύ +ÏĦ αι +å§ĭç»Ī åĿļæĮģ +çļĦ人 åĬĽ +æľī æĦıä¹ī +Ġk s +èĥ½å¤Ł å¾Ĺåΰ +oph ila +Version UID +od ial +.h ref +Ġnation als +æĢ» åĨłåĨĽ +Ạ¡ +. = +ut c +çľģ éķ¿ +åĨ° åĩī +, èµ° +ret te +æ¼ ķ +in ian +ï¼Į åĨħéĥ¨ +èļ Ŀ +éĢĤåºĶ æĢ§ +ĠâĦ ĵ +< User +.d umps +å¸ĥ 线 +.c ur +ĠVer ified +ĠCol ts +Ġtight ening +_PRO P +å·¥ä¸ļ åĽŃåĮº +ĠTreat ments +ju vant +Ġmerg ers +Ġduplic ated +缸 声 +_d ocument +cept s +æľī çIJĨ +ARS ER +ch ains +ANG ED +ST REAM +R TC +Ġd anced +rad es +ol one +AL TER +Ġdyn asty +æĿ¥ ä¿¡ +ä¸įåΰ ä½į +form in +Ġens l +Ġtub ular +_ (" +Ġpl ated +Ġchi ropractic +ĠO rientation +èĩª è´¸ +ç¥ŀ æĺİ +Ġc z +Ġl amin +è´¹ç͍ çļĦ +Ġsympt omatic +çīĪæĿĥ 声æĺİ +ĠâĹ ĭ +ĠGoth ic +-f it +ĠComp etitive +å°ı é¢ĺ +b il +, å¼ķ导 +ĠP tr +part icip +Ġtorn ado +n mgp +ï¼Įä¸į åĥı +ĠSME s +ï¼Į 诸å¦Ĥ +Ġp acing +ĠF requently +æľįåĬ¡ æľīéĻIJåħ¬åı¸ +ï¼Į åī¯ +å¹´ æľĪ +' un +ï¼Į å¢ŀåĬłäºĨ +Ġd ou +In cluded +ĠDoll ars +åİ» å¤Ħ +b w +ter a +Ġп еÑĢ +åıĮ 人 +am ble +æ°Ķ çIJĥ +Ġm ural +ãĢģ æ±ī +Ġcongen ital +éĢĢ åĽŀ +ï¼Į é£Ł +ĠCh al +ĠHel ena +ot in +åħ¨ èĥ½ +ç¿» å¼Ģ +ĠHigh light +ĠV oting +åŃ ¸ +ï¼Įå¹¶ åIJij +_p anel +å¤ļ ç±³ +åIJĦ åįķä½į +.R andom +å®Įåħ¨ çļĦ +Ġqual ifies +åĴ¬ äºĨ +ĠScript ures +ĠNi agara +Ġundis puted +ä¸į æĤ¦ +åĩŃåĢŁ çĿĢ +æģ© æĸ¯ +Ġglam orous +, æľª +. Convert +è¿ Ĥ +Ġb f +çļĦ 妻åŃIJ +ĠK afka +Ġimp urities +-t ouch +æĹ© çļĦ +Ġm alls +产 åĮº +çļĦ çŀ¬éĹ´ +Ġinvestig ates +ãĢģ é¤IJ饮 +æŃ¤ åIJİ +.p oint +ãĢĤåľ¨ ä»ĸ +ä¸ĥ ä¸ĥ +crum bs +çļĦ 年轻人 +att an +罪 è¡Į +æĬ¥éĶĻ å¦Ĥä¸ĭ +æĹłçĹĩçĬ¶ æĦŁæŁĵèĢħ +ĠP ending +åĨħ åIJij +éķ¿ æķĪ +_p c +æī¿ éĶĢ +Ġr l +é«ĺ è¶ħ +Ġsoc iology +ï¼Į ç§ij +th ick +ĠGe ography +DR AM +Ġma ize +å°± åħĪ +æļĤæĹ¶ æĢ§ +Ġpur ge +K y +æľī è¿Ļ个 +-c ultural +r ances +éĽĨ çļĦ +åİĨ æĹ¶ +çĨ ¨ +Ġspokes woman +å°ı 丫头 +è¿Ļç§į ä¸ľè¥¿ +çľ¼çĿĽ éĩĮ +èį ¤ +ä än +éĹª çĿĢ +st ores +èĢĮ ä¹ħä¹ĭ +Ġpr uning +ç§ijæĬĢ æĪIJæŀľ +å¤į è®® +ij ay +Ġb inge +pro be +ĠDaniel le +ul d +Ġcon ceive +Cong rats +åħħæĸ¥ çĿĢ +å¤į æŁ¥ +ï¼Įæĺ¯ 为äºĨ +onal do +ym al +ä¾Ľ è¿° +åĨ³ ç®Ĺ +ĠÑģ л +ãĢģ 主 +ĠKir by +Ġben ches +ĠNaz is +M OBILEPHONE +ä¸Ĭ æĿ¥çľĭ +èįĴ åĶIJ +ĠEx cess +Ġinf arction +ý ch +Ġ åŁºäºİ +åĩº æĸ° +åºľ éĩĮ +. attribute +åľ¨ ä¸ī +Ġev aporation +åĨ² åIJij +ï¼Į éĵ¶è¡Į +è½» èĢĮæĺĵ举 +Ġsoft en +ĠArmen ian +Ġ åıĹ +St rength +åĬł æ°´ +æķ° ä¸ĩ +ĠSo le +PRO JECT +åĭĭ 竳 +å°± åı« +Ġpen insula +ãĢĤå½ĵ åīį +, å¼Ģå±ķ +ĠJac ques +.j et +ä»ĺ æģ¯ +ĠGaz ette +ï¼Į ç¦ģæŃ¢ +èĢĮ å½Ĵ +Ġmis information +ĠD ies +Ġt ing +Ġscarc ity +ä¸į 强 +鹦 é¹ī +.A ccess +Des erializer +èĤº éĥ¨ +ĠIn ches +æŁ± åŃIJ +Ġphilos ophers +çĤ¹ æķ° +ĠMar ried +oly mer +ĠRand all +纸 å¸ģ +Brows able +Ġ æķĻèĤ² +Com fort +ĠðŁ Ĵ +å¼¹ å¹ķ +ure th +讲 åΰ +缩 åĩı +RESULT S +æİ¨ ç¿» +B X +ĠH ale +æĪij ä¸įèĥ½ +æī¾ æĿ¥ +^ s +ĠBah amas +ĠGreen wich +Ġ åŃĹæķ° +代 为 +Ġfl air +EX TERN +ä½³ èĬĤ +æĭŁ åIJĪ +è¿ŀæİ¥ éĥ¨ +ĠElect ron +çļĦåIJį ä¹ī +F ord +ĠC athy +åľ¨ ä¸Ĭè¿° +ĠDE P +Ġspont aneously +Du plicate +çĶŁ éĶĪ +Ġprot ons +Ġenvision ed +åĵŃ å£° +æIJľ æŁ¥ +ĠM SC +Ġ$ ? +fit ting +âĢ ŀ +. Enabled +S ending +ĠBre ath +人åijĺ è¿Ľè¡Į +形容 è¯į +r ne +è¯Ħ æµĭ +C OR +es on +åįĹ éĿŀ +FORM AT +èľ¿ èľĴ +åĩĨ åħ¥ +红 åįģåŃĹ +ä¸ŃåĽ½ åı¤ä»£ +rew s +ĠDep loyment +rabb it +Measure ment +Ġgo v +Î £ +Ġble ed +f ps +_S O +ĠPrem iere +Ġreact ing +M eter +ãĢĤ - +ĠL EV +_s ame +ĠC d +ss i +è½» 度 +ĠDef ines +ĠPl at +appe arance +ri et +æīĵ çIJĥ +ç²Ĵ å¾Ħ +S hel +Ġleft overs +èĤ¡ä»½ æĢ»æķ°çļĦ +Ġax i +MC U +-re port +åľ¨çº¿ éĺħ读 +chem ia +äºĮ æĪĺ +-c i +ä¸İ åħ¬åı¸ +å¤Ħ äºĭ +èİ Ĩ +ĠTest Case +Ġgram matical +讲 äºĨ +ï¼Į éĻį +ĠAfric ans +ãĢģ 建 +ĠH G +å¼Ģ åºŃ +Ġbl ush +(c rate +(file Name +ãĢĤ以ä¸ĭ æĺ¯ä¸ĢäºĽ +Ġs clerosis +å¾® ä¿¡åı· +ĠIn ject +èµĭ å̼ +æĸ°çĶŁ åĦ¿ +Ġmark ings +Ġ× IJ +æľī è¶³å¤ŁçļĦ +uct ive +ĠHom ework +Ġun checked +å±Ĥ åĴĮ +ĠDr inking +ĠVol vo +Ġar senal +, 没æĥ³åΰ +- response +让 ä½łçļĦ +å¥ĸ æĿ¯ +å±± æµ· +ãĢį çļĦ +ĠP ax +ĠKend all +ĠMillenn ium +a vers +m gr +ut ta +çī¹ éĩĮ +To Int +æ²Ļ é¾Ļ +æĭ¿ åİ» +äºĮåįģ ä¸ī +æĢ§è´¨ çļĦ +D ual +Ġp ÅĻ +æŃ£ æ°Ķ +uk es +ĠO vers +çłĶç©¶ æĬ¥åijĬ +earn ed +ĠL ef +ID C +æĬ¢ 夺 +å·®å¼Ĥ åĮĸ +ĠCe iling +Ġnewcom ers +éĺ ĸ +late x +åĴĸåķ¡ é¦Ĩ +ĠFranch ise +Ġ è¿Ľè¡Į +fig ures +åı¯ä»¥ ç͍æĿ¥ +æİĮ 管 +.ed itor +b os +ĠNeed less +. ACTION +on ial +ï¼Į æĬ±çĿĢ +enu ine +åİ» çļ® +èĤ¡ä»½ 转让 +Ġr n +污æŁĵ éĺ²æ²» +\ varphi +-t aking +Off line +Ġd re +Ġwith holding +Ġ æĿ¥æºIJ +Ġ Ñı +Ġmake over +éĩĮç¨ĭ ç¢ij +Ġcont ender +å¹² è´§ +æĪ´ åı£ç½© +ĠThous and +çļĦ èĦļæŃ¥ +ĠW itt +ib el +ç§ĭ é£İ +Ġcam per +è¢ľ åŃIJ +_b asic +ĠHarm ony +ãĢĤ ä¼ļ +any thing +æī« è¿ĩ +Ġatt ribution +_P C +Ġcompl ied +ä½ĵ æĵį +ĠJer seys +Ġpat hetic +èĹı 书 +è¶ĬæĿ¥è¶Ĭå¤ļ çļĦ人 +ĠC elsius +m h +ï¼Į æĸĩåĮĸ +ï¼Į åĩ¤ +ä¸Ń æĿ¥ +ĠK od +åĪ© 害 +åĿļ 强çļĦ +éĥ½ä¼ļ 被 +E ff +æľī ä¿¡å¿ĥ +ary ing +Ġro lex +ĠGu pta +-w arning +AI æĬĢæľ¯ +ĠJer ome +I oT +åľ¨ ä¸ĸçķĮ +Ġep oxy +躬 身 +- cloud +ĠT owers +Ġpromot ers +ĠP UT +æĪĸ å°Ĩ +ï¼Į åĨľæĿij +High lights +.c lean +Access Token +çݰå®ŀ çĶŁæ´»ä¸Ń +Ġhall uc +ï¼ģ ä½Ĩæĺ¯ +ĠOut s +ï¼Į äºī +Ġb ouncing +ĠWork force +äºĶ ä¸ĩ +楼 æĪ¿ +E I +AN CH +ĠReg Exp +é©» æīİ +æĪIJä¸Ģ åĽ¢ +åĽ´ç»ķ çĿĢ +Ġgut ters +åºĶç͍ åľºæĻ¯ +ä¿ĥè¿Ľ äºĨ +.in ject +大 头 +Ġnew line +Ġbirth days +æľī ä»·å̼ +ĠReg ression +è±Į è±Ĩ +ç½Ĺ åħ° +è¿Ļæł·çļĦ æĥħåĨµ +æĹ¶éĹ´ æĿ¥ +èĨ º +ĠCEO s +å¤Ħå¤Ħ éķ¿ +ä¹ĭ æľ¬ +æ°´ ä½ĵ +ãĢĤä¸į ä¹ħ +ĠOl iv +Ġst int +ĠE rie +èIJ¥ åĪ© +çIJĨ åıij +çŃī 产åĵģ +Comp ression +æķĻ å£« +Ġpred nis +æŁĶ软 çļĦ +ĠPill ow +X I +ĠRE F +åı¯ 羣æĺ¯ +_n ative +K ath +Ġè¦ģ çŁ¥éģĵ +åŃĶ çļĦ +çŁ¥ åºľ +So on +Ġbin omial +æļ´ é£İ +为 客æĪ·æıIJä¾Ľ +ĠSher lock +ï¼Į èĦ± +ĠM ild +大 好 +大 å¦Ī +ä¸īå¹´ 级 +t rial +Ġv ap +éĶ Ħ +Ġam mo +ï¼Į ç«Ļ +çα ä¸ĬäºĨ +åı¦ä¸Ģ ä½į +ag ogue +Ġnarrow ed +ient e +', [' +âĢ º +对 å³Ļ +Ġvill agers +éĩijåŃĹ å¡Ķ +f rey +æŀ¶ çļĦ +ĠLog ging +é̏ é£ŀ +Ġv iz +ĠStart s +Ġcorrel ates +ĠN SD +.F ull +A FE +get String +å·¥ è£ħ +ç¾ § +ç²® èįī +æĺİ æ²» +Ġdi vert +è°Ī èµ· +ç¼Ķ éĢł +: e +. axis +Ġstring With +åĪĩ åı£ +med ical +Ġ åķĨ +ĠAugust a +è§Ħ模 åĮĸ +Ġbrief s +Ġoverse es +Ġl ashes +æĸ¹ æĸ¹éĿ¢ +çļĦ çľ¼åħī +âĦĥ ãĢĤ +N FL +åıį è¿ĩæĿ¥ +å»¶ 误 +_PR IVATE +ĠW alls +ĠL ys +Ex planation +Ġev olves +åıĸ æł· +æĢĢ çī¹ +ç¥Ń åı¸ +Ġcort isol +re ceived +First ly +åįģå¹´ åīį +èī°éļ¾ çļĦ +Know ing +æīĭæľº ä¸Ĭ +া ঠ+Ġ åħ¨åĽ½ +ä¸į ä¸ĭæĿ¥ +é«ĺ æ°´å¹³ +æĢ» èĥ½ +离åIJĪ åύ +ä½ľ ç͍äºİ +az el +èİ« è¿ĩäºİ +ht a +è¿ij çϾ +IGN ORE +Ġanten nas +] ', +为 代表çļĦ +æķĻåѦ 楼 +æĭĮ åĮĢ +Ġbl urred +æĶ¾å°Ħ æĢ§ +Ć ą +, åΰäºĨ +re ve +çļĦ åIJĪåIJĮ +ï¼ļ æľ¬æĸĩ +Ġtoler ant +ï¼Įå°± è§ģ +\t ilde +Mon ster +Ġ ï¼į +åĨ· æ°Ķ +èĥľ åľ° +å¾Ī å¥ĩæĢª +Ġrespons iveness +Ġstra ined +_ex ample +ç®Ĭ æĥħåĨµ +Ġa ide +æĸ¹ åºĶ +çIJĥ å½¢ +ott ages +å®ļ çIJĨ +Ġsp elled +å¸ĥ è¢ĭ +Ġric hes +M odes +re cht +é¢Ħ è§ģ +Ġvacc inations +Ġtox in +T oy +ï¼Į æİ¨èįIJ +ĠW att +åĸĦ æĦı +. ini +æį¢ å±Ĭ + Ŀ +Ġ åħ± +ä¸į æıIJ +天 æķ° +åĪĽéĢłæĢ§ åĬ³åĬ¨ +å¤ Ļ +Ġtim ers +ï¼ĮæĮĩ çĿĢ +æĹ¶ éľĢè¦ģ +åŃ© åĦ¿ +anc ock +ï¼Įä»ĸ æĬĬ +å¹² å¹²åĩĢåĩĢ +æĬĵ æīĭ +ĠCarol yn +æĥ³è±¡ ä¸ŃçļĦ +Ġl ombok +èIJ¥ä¸ļ å¤ĸ +ĠJ ol +ï¼ĮæĪij ä¹Łä¸į +check ing +Ġre const +Ġwh ims +æĹ Į +åĴĮ åIJĦç§į +An imated +, å®ī +Ġadd Criterion +é» Ŀ +ĠMin isters +Ġ és +ä¸į 注æĦı +ç§»åĬ¨ çļĦ +Ġun avoid +ä¸ĢäºĽ å°ı +Ġcolumn ist +_AR M +) }) +ãĢĤ æ´»åĬ¨ +ç¡Ŀ éħ¸ +**** * +Ġdors al +ĠS utton +æĺ¯ æĸ° +res se +F uel +Ġtra vers +sh an +æĿĥ è¯ģ +ë © +ĠT ER +Ġtrans genic +åģľ æĶ¾ +Äģ n +常åĬ¡ å§Ķåijĺä¼ļ +.load s +v il +ä½ł éĤ£ +æĪij们 åºĶ该 +ï¼Įçľĭ æł·åŃIJ +å¤ĦçIJĨ 好 +ĠMaz da +Ġ' .$ +æĪĸ åħ¶å®ĥ +å®¶åºŃ æķĻèĤ² +éŁ³ä¹IJ åѦéĻ¢ +SK U +ä¸Ń ç«ĭ +éķ¿ éķ¿ +ï¼Įå¿ħ å®ļ +ç§ĺ å¢ĥ +.p rev +Ġsc oped +, æŃ¤æ¬¡ +A wait +ĠVis iting +ĠFind s +带 ä¸Ģè·¯ +\ setminus +urs es +ali ases +对 人 +åĨĽ æ°ij +Ġbar ley +åĬ¨ 人çļĦ +Ġre aff +. ^[@ +Ġfin ed +ï¼Įè¿Ļ ä¸ĢåĪĩ +ä¸į æ¯Ķ +Ġcav ities +ï¼Ľ ãĢĬ +ĠSettings Accept +åı¯ä»¥å¸®åĬ© ä½ł +åĭĺ æİ¢ +åºŁå¼ĥ çī© +Ġincarcer ation +ï¼Į å¸Ŀ +=' $ +Ġwarm ed +ĠSp iel +ä¸Ģæł· äºĨ +ĠBroadcast ing +çļĦ éŃħåĬĽ +责任 人 +ĠStri pe +ĠJ O +çĤ ķ +ç§Ł åĢŁ +Ġbroadcast s +ĠO mn +åĪĨæŀIJ äºĨ +Ġmis car +ĠCere mony +ä¸ĭå®ļ åĨ³å¿ĥ +ãĢĭ æĺ¯çͱ +åĴ« å°º +ĠS erg +æ´» åĬĽçļĦ +ĠAnt arctica +ul g +åħ¶ å®ĥçļĦ +å½ĵ æĻļ +亲åŃIJ éī´å®ļ +ãĢĤ åİŁæľ¬ +д а +ï¼Įåľ¨ ä»ĸçļĦ +å¦Ĥæŀľ æĪij们 +è´Ń 车 +ĠLEG O +å¼ ij +Non null +/ users +ï¼Į 带æĿ¥ +_comp are +Ġap ologies +ï¼Į èĶ¡ +åĬŁ èĩ£ +Ġvag inal +Ch rom +-l ib +Ġatt enuation +ĠDark ness +) f +. after +Ġge ological +V iet +au coma +ãĢģ è±Ĩ +鲤 é±¼ +æºIJ çļĦ +çĶĺ æ²¹ +- era +Ġb w +ï¼ĮæĪij ä¸įæĺ¯ +J udge +ir con +ĠNick el +R ights +up us +ĠMar ble +(s lot +z ek +ĠI ST +ĠHol t +engine ering +åįµ å·¢ +ä»Ģä¹Ī äºĨ +Ġwa ive +ä¹Ł éĢIJæ¸IJ +æĹ© äºĨ +iam eter +Ġinvent ive +éĿ¢çĽ¸ è§ij +ï¼Į å¹¿ä¸ľ +Ġ> :: +注æĦı åΰäºĨ +æ²ī æ²ī +ï¼Į æ³° +ãĢģ æĸ¹æ³ķ +绿 çģ¯ +为 åįķä½į +ãĤ § +ĠSh ops +Ġsens ations +ver ified +Ġper cussion +Ġdeg enerate +ä¹± ä¸ĸ +èĩªå·± äºĨ +Ġexempt ions +ä¸ĵåįĸ åºĹ +åĴĮ èµµ +Ġ ä¸Ńæĸĩ +ĠU d +Ġinadvert ently +( theta +è¶Ĭ éķ¿ +Ill uminate +ï¼Į åģ¥åº· +ert il +åĩ¶ çĭł +äºĮæīĭ 车 +crib es +ï¼Į çªģåĩº +ĠAl one +Ġshowc ased +$ } +ĠB ian +Ġpost ings +æľ¨ éŨ +åĥı ä¸Ģ个 +ĠRE PORT +. ctx +人åijĺ åĴĮ +æIJŀ æ¸ħæ¥ļ +ĠRoman ian +Ġupholst ery +çļĦ åķĨä¸ļ +Ġdel inqu +ç»ıæµİ æįŁå¤± +ym ers +ä¸įçŁ¥éģĵ 该 +L ew +ĠI U +åľ¨ åIJİéĿ¢ +为 æķ° +Ġsens ational +ï¼Į æ¯ĶèµĽ +_ Un +Author ity +ç½ķ è§ģçļĦ +en burg +Ġdi pped +ä¸ĥ 大 +Pattern s +ï¼Ł ä¸įè¿ĩ +çļĦ人 æ°Ķ +Ġflux es +ä¹Ł 该 +ãģ Ń +åij³ åĦ¿ +Ord inal +ĠP Y +, åħ¨åĬĽ +ä»İ ä¸Ĭ +æľĽ äºĨ +Ġsle w +æ·± æĦŁ +ad io +èĦij åIJİ +éģĹ çĹĩ +æ²¹ ç®± +ĠLind sey +ç»ļ 丽 +被 éªĹ +æµ· éĩı +d igit +Ġ æľī人 +ãĢĤ å¹³æĹ¶ +è¿ĺæĺ¯ ä¸į +ĠDev Ops +Phot ography +空 èħĶ +éĢĴ å½Ĵ +Ġan eur +plic ates +ĠLe aves +o C +éķ¿ åģĩ +Event ually +ï¼Į ä»Ģä¹ĪæĹ¶åĢĻ +åıĹ çļĦ +æĹł éĻħ +年代 çļĦ +Ġra ining +g io +Ġsm ashed +åħħ满 çĿĢ +åįĪ å¤ľ +ĠPres byterian +C rystal +Ġ\ ` +èĴ ¿ +Ġent ail +Ġun resolved +å¿ĥ åħ¨ +åĨľ æľº +ĠSent inel +åľ° åĬ¿ +Ġperson als +ç§° å¾Ĺä¸Ĭ +æ´ģ çϽ +-se lected +Ġ* >( +çŃī 诸å¤ļ +ĠLaure l +@@ @@ +åĨĽ æľº +ĠInj uries +éĶ ¢ +boot strap +Ġflee ing +( 以ä¸ĭç®Ģç§° +, è¿ĽèĢĮ +- change +æĪij们 æĿ¥ +èĩªçĦ¶ çķĮ +çļ± çľī头 +天 æĺŁ +ï¼Į人 åı£ +Ġster ling +Ġ æĽ² +Ġlip ids +ï¼Įåħ¶ä»ĸ 人 +Ġ{ - +Ġinvari ably +åı¯ 没æľī +Ġmult icultural +åĴĮ åĽ½éĻħ +ĠIllegal StateException +ĠCollect or +âĢľ åħŃ +res erved +(p ost +çͳ è¯ī +ãĢĤå¦Ĥ åĽ¾ +Ġad am +èģĮä¸ļ éģĵå¾· +æĿ° 伦 +åĬ¨åĬĽ åѦ +all ows +æ³ķå¾ĭ æĦıè§ģ书 +å¼Ģåħ³ 管 +ĠPal ette +med ian +ä¼ł è®° +Ġmon oclonal +Ġrev oked +Ġconsum es +Tor onto +ãĢģ æĮĩ导 +æĿ¡å½¢ çłģ +ĠJ ab +-cert ified +åĩº èĩªå·±çļĦ +éĥ½ å¼Ģå§ĭ +èĤĨ èĻIJ +空 äºĨ +dec imal +Ġpist on +ĠDest iny +, åIJĮæ¯Ķ += [" +第 åįģä¸ī +å®ŀçݰ ä¸Ĭè¿° +Ġpool ing +Ġincident al +ĠParticip ation +å°ı å®Ŀ +Ġtake over +Ġunrest ricted +ä¿Ŀåģ¥ åĵģ +Fr anc +pt ime +< head +Ġ èĤĸ +ab outs +çŃī级 çļĦ +im us +åģľ ä¸ĭäºĨ +ï¼ĮåĨį ç͍ +ĠP anc +_b ad +Ġrecept ive +åħļ é£İ +ï¼Įåı¯ ä»ĸ +æł¸å¿ĥ ç«ŀäºīåĬĽ +Ġpal let +ĠG rac +åı¯ä»¥ çIJĨè§£ +ĠRe vere +ï¼Įä¸Ķ æīĢè¿° +B ERS +âĢĶ as +è¯Ĺ è¯Ĺ +ï¼Į åıĸå¾ĹäºĨ +pp ermint +天 å°Ĭ +tern ess +ï¼Į æĦıåij³çĿĢ +人 ä¸Ń +list en +ĠPres ervation +Ġpiss ed +æ²»çĸĹ æĸ¹æ¡Ī +Ġcarbon ate +ï¼Įä¸į ä¹ħ +离 åĪ« +. record +E PT +è¿IJ è´¹ +ĠBent ley +ãĥķãĤ ¡ +éĢŁåº¦ åĴĮ +ĠH int +itone al +Ġb iz +Ġin organic +女 æĸ¹ +Ġobject ively +Ġunset t +-f ood +{ frame +ĠDis pose +Ġsw ell +æĺ¯åIJ¦ æĺ¯ +ĠPort rait +Ġsub title +Ġpean ut +ĠConfeder ate +g w +ï¼Į 设 +å¤ĸ åħ¬ +æĭī å¼ĢäºĨ +å·´ æĸ¯ +åĽ´ æłı +伤 çĹĽ +u aries +ï¼Įå°± éľĢè¦ģ +ĠRog ue +im ble +ç͵ ä½į +æ°´ åĬ¡ +æĸŃ å®ļ +plit ude +ur ricular +nd on +æĸ¹ åĿĹ +Ġfib res +s cheme +ï¼Į 绣ä¸Ģ +Ġv iv +å¤ı 天çļĦ +Ġmicro f +Ġcow boy +港 èĤ¡ +Ġrad ii +\ sim +âĢĿ æĹ¶ +ä½ł æĥ³è¦ģ +çłĤ ç³ĸ +å®ŀè´¨ æĢ§ +( items +ĠK olkata +大åѦ æ¯ķä¸ļ +Ġcatch y +å¹´ çĶŁ +åĨ³ æĸĹ +Ġimm utable +REG ISTER +Ġ éĺħ读 +Ġcle ars +è§īå¾Ĺ ä»ĸ +å᫠士 +nut rition +) ]( +f name +ï¼Į æĬķ +se ctor +ubb ard +. Extensions +CR A +Ġburst ing +å°± å¾Ģ +å£ ķ +ĠÐ ľ +Ġconve ction +åĮł å¿ĥ +Ġobl iv +( sw +åĬ¨ æĦŁ +缼 ä¼ļ +. av +Ġenvelop es +á Ł +ĠS hed +缼 è¡Į +ĠProt ected +ĠInv oke +èĮ¯ èĭĵ +Ġder ives +æīĢ æĢĿ +. unit +C oc +R uby +ĠE SC +exper ience +. IP +ï¼Į èĩªçͱ +Ġexpans ions +: ss +çļĦ è¿Ļä¸Ģ +Ġcommut ing +å±± èᝠ+ĠOb amacare +çĶľ åĵģ +Gen esis +ĠSide bar +æĪĸ åľ¨ +çļĦä¸Ģ å¼ł +Ġexc ursions +ï¼Į ä¼´éļıçĿĢ +[ row +RO UT +éĩijå±ŀ æĿIJæĸĻ +Att ention +, åIJ´ +Rob in +ï¼Į使 ä¹ĭ +( org +Ġdivers ion +çļĦ åį±å®³ +Ġpo ke +伤 人 +æĺ¯å¯¹ çļĦ +" How +. theme +ä½İ èIJ½ +Ġter restrial +ÈĻ i +ï¼Į æĤ¨çļĦ +æ³ ĵ +aaaaaaaa aaaaaaaa +ĠCau chy +Ġblock er +éļIJ 身 +è¾ĵéĢģ 带 +Ġpre tt +éĴŁ å±± +ĠG uns +Ġalk yl +- equiv +ĠEl f +Ġty res +Ġl m +æł ¾ +Ġturb o +/ (( +il oc +çİĭ éĽĦ +èıľ 鸣 +çļĦ æĶ¶åħ¥ +è¡Į åĨĽ +mas ters +Ġrev oke +èŀºçº¹ æĿĨ +Ġdep ended +ĠVar iety +Ġv l +Ġcl iffs +sc ode +åľŁ å·¥ +UI View +ï¼Įä½Ĩ 对äºİ +åijĺ å¤ĸ +Ġ! [ +ç¿» æĸ° +Ġancest ral +-n ull +Ġvig ilant +educ ation +ãĢģ éħĴ +çĸ¼ çα +ä¸į 妥 +åĽĽ åĪĨ +Ġpharmac ist +ĠPerson nel +ëı Ħ +ï¼Į åIJ¯åĬ¨ +To Array +ĠNumer ical +ï¼Įä½ł åİ» +Ġpeace fully +åľ¨ 身边 +Ġnon ce +Ġhom icide +åĨħ å¹ķ +ï¼Įè¿Ļ ä¸ĭ +ĠKel ley +ç§ij çļĦ +ding er +Ġresource Culture +çĮ® ç»Ļ +ĠèĢĮ åľ¨ +äºĭä¸ļ åıijå±ķ +ï¼ĮæĢ» éĥ¨ +纲 è¦ģ +ãĢģ ãĢĮ +Pro posal +ï¼Įä½Ĩ ä½ł +åħ¬å¼Ģ æĭĽèģĺ +.Set tings +é«ĺ å¤Ħ +æ¶² ä¸Ń +èĥľ è¿ĩ +çĽ¼ æľĽ +第ä¸Ģ 款 +è¿· éĽ¾ +t emplates +ĠE QU +clos est +M AR +et cher +ĠSt okes +ä¸ĥ æĺŁ +ĠBE FORE +ĠG iov +èĩªçĦ¶ç§ijåѦ åŁºéĩij +_ plan +Ġun question +Ġm olar +_gener ic +æĭī åĬĽ +èµ°äºĨ åĩºåİ» +èµļ åΰ +ï¼Į æĶ» +Ġmethod ological +]{ }\ +æľ¬ æľĪ +Ġmed ial +_ radius +ï¼Į éķĩ +éĿŀ éģĹ +äºĭå®ŀ è¯ģæĺİ +Ġstabil ized +Ġs ank +ï¼Į 梦 +èĢģ èĢģå®ŀ +Y TE +Ġp aced +éĩij æĸ¯ +æŀĹ æ¸ħ +Ġcorrect ive +ĠCalling Convention +ol ulu +ĠNg uyen +ĠS CC +=" + +ãĢģ æĪIJ +éĺħ åİĨ +Ġd ine +ĠW arn +IC ON +arth ritis +天 èµIJ +åĬ¨ 车 +åħ³èģĶ åħ³ç³» +ï¼Į ç¬¬åĽĽ +ç¯ĩ å¹ħ +西åĮ» ç»ĵåIJĪ +Ġl ava +太 ä¸Ĭ +éĿŀ çī©è´¨æĸĩåĮĸéģĹ产 +åĬ© åѦ +èĭ±åĽ½ çļĦ +åıijéĢģ ç»Ļ +æĬĢèĥ½ çļĦ +ĠJac qu +Ġsupermarket s +Ġr ation +交 æĥħ +Ġbicy cles +Ġmos que +Ġrain forest +ĠVan illa +à§į ঠ+ãĢģ èij± +ï¼Ł è°ģ +æķħ æĦıçļĦ +El astic +Ġbul ly +\ Vert +) is +ĠAl ign +ï¼Ľ åħ¶ +Wh ilst +irs ch += tf +Ġshow room +ãĢģ æ¹ĸåįĹ +ï¼ļ ç͍ +ĠEq s +> % +Rel oad +å¿ Ĵ +ĠWork out +夹 æĿ¿ +ri ot +ip so +Ġeight y +. Order +Ġe e +ï¼ĮèĢĮä¸Ķ è¿ĺæĺ¯ +Ġout bound +代 åħ¥ +åĻ İ +uth an +è¯Ĺ æĸĩ +ï¼Į没æľī ä»Ģä¹Ī +èĩª å®¶çļĦ +Exp ressions +ANG LE +ĠAzerbai jan +: name +龸 主 +åħ³éĶ® æĺ¯ +ï¼Į å·¥ä¸ļ +Ġsc rib +Id e +请 èģĶç³» +B rief +Ġpast oral +ĠT attoo +ãĢģ èĵĿ +åįģ æĿ¥ +ãĢĭ æĽ° +æĪij åı« +ä¸ĭ 头 +åĽł æĸ¯åĿ¦ +èī¾ ç±³ +大 çϽ +Ġcont iguous +_m issing +Any thing +ĠRam an +æķĸ æ²IJéĺ³ +_ verify +Y OU +ï¼Įè¿Ļ åľ¨ +Ġ> / +å±ħ äºİ +åĶł åı¨ +Ġbipart isan +Ġg ait +âĢĿ é¡¹çĽ® +Ġre think +ç±³ ç²ī +Ġoverwhelming ly +å¾Ĺ å½ĵ +æijĩ æĽ³ +ä¸Ģå¹´ åĨħ +çĽ¸å¯¹ çļĦ +端åįĪ èĬĤ +N u +å¾Ī æŃ£å¸¸ +è´¨ æ£Ģ +/dat abind +ĠL ey +_p atch +Ġsl ender +Ġq t +认åı¯ çļĦ +ï¼Įè¿Ļ æĦıåij³çĿĢ +èĩª ç«ĭ +_t ail +_d ead +ï¼Įæĺ¯ ä»ĸ +re ceive +Object Name +id ium +oun cy +寺 éĻ¢ +ĠC IO +æį® æŃ¤ +_f eed +ï¼Į该 åħ¬åı¸ +西 æ±ī +. Editor +Ġspat ially +ra ight +ĠApp Compat +Ġintr usion +ĠEston ia +Ġ åħ·ä½ĵ +Ġw rench +åŁº äºļ +Adv ance +W izard +er ity +ãĢģ åıĬæĹ¶ +ire z +Ġg m +Ġscreen ings +cuss ions +_res ources +Organ ic +室 åĨħçļĦ +aph rag +ç»ıèIJ¥èĮĥåĽ´ åĮħæĭ¬ +Ġ 饰æ¼Ķ +çĹ ¿ +Ġ第ä¸ĥ 竳 +æĺ¯ æĿ¥ +æŀľ èͬ +Ġemail ing +æľīä»Ģä¹Ī åħ³ç³» +Ġmin er +ä¸Ĭ æĿ¥è¯´ +N early +ich let +常 æľī +block quote +ĠA FC +象 æ£ĭ +Ạ¿ +Ġoverrid den +ĠS ink +ect l +Ġprot obuf +çĬ¶æĢģ æĹ¶ +, æĹ© +- player +le z +Ġen listed +å¿ĥ æĻº +åİ» åIJij +ç®Ĺ ä¸įä¸Ĭ +-t ailed +iver ing +α ν +ç͍ åħ· +}{ }{ +å¿ł äºİ +Ġunder grad +ç±» 产åĵģ +Ġcirc a +æİº æĿĤ +Ġirrit ating +Ġsail ors +ĠT oll +ĠSt em +ĠTw elve +æĩĬ æģ¼ +ä¸Ģ çѹ +_m onth +å¼ł çĭĤ +ä¿® 羣 +C ath +ãĢĤ é»ij +Ġinstitut ed +æŁIJç§į ç¨ĭ度ä¸Ĭ +ĠGeorg ian +ri que +.n umeric +Ġparticular s +Ġwin ters +ãĢĤ æĭ¥æľī +ĠM VC +æīĢ ç͍çļĦ +æķ£ åıijåĩº +æĿ¥æī¾ æĪij +Ġqu itting +ãĢĬ å°ı +å±± 人 +空æ°Ķ ä¸ŃçļĦ +Ġ æīĢè¿° +_pro vider +Ġaward ing +ä¸Ģ 颤 +ĠA min +åΰ è¿Ļ +ĠWe aver +expect s +Ġinst itutes +æĮģæľī人 ä¼ļè®® +ï¼Į 羣æŃ£çļĦ +çı © +åıĽ ä¹± +æµģ éľ²åĩº +_DE LETE +Ġ× Ķ +over nor +å±ħ å§Ķä¼ļ +Ġcontain ment +æĭ¼ å¤ļå¤ļ +常è§Ħ èµĽ +对å¤ĸ å¼ĢæĶ¾ +N b +ä¼ļ åıĹåΰ +ere f +ç»§ç»Ń 说 +è¿ ¥ +å·²ç»ı å°Ĩ +Ġrem anded +éĢļè¿ĩ æīĢè¿° +åĿĩ çͱ +èµ¢ å®¶ +ĠFac ial +åįĹ éĺ³ +åı£ ä¸ŃçļĦ +åĬŁèĥ½ åĴĮ +ãĥ¼ ãĥĪ +Ġh oop +大 çģ¯ +çļĦ å§¿æĢģ +ãĢģ åĮħè£ħ +ãĢĤ æ±Ł +ü ck +æĬĺ 弯 +iv irus +èĬĤ åζ +ï¼Į对 ä¸įèµ· +_back end +Ġem itting +å¥ĭ åĭĩ +ĠVolunte ers +Ġpl as +æĶ¾ æĺł +éĿĴ è¡£ +ĠFriend ship +ĠO V +Ġk Hz +text sc +.op ens +èĢĮ éĻį +Ġgener ality +羣 çļ® +airo bi +éĥ½æľī åı¯èĥ½ +å¦Ĥä¸ĭ åĽ¾ +_se g +.f c +S RC +Ġproc rast +ä¸į éĹ® +ãĢģ åIJij +Ste el +çľ¼ è¢ĭ +æľ¨ åħ° +_ phy +çŁ³å¢¨ çĥ¯ +ĠX V +="@ + +åºĶ 纳ç¨İ +.p ayload +. En +è¿Ļä¹Ī 个 +ĠCalcul us +U rban +Ġs dk +ĠC yt +c ulture +åĴĮ åĪ«äºº +ĠRespons ibilities +ï¼Į æ·±åľ³ +ult on +第 åįģä¸Ģ +" å°ı +U åŀĭ +in cluded +ï¼Į L +æĭį æĪı +å¾Īå¿« å°±ä¼ļ +ĠRen ov +ĠReg ions +-S emit +CAP TCHA +ä¸Ń åħ´ +ç¼ł 绵 +Sal ary +ĠM aver +ï¼Į 头åıij +Ġplant ation +( False +ĠE rd +éķ¿ é£İ +"] =" +Ġpast ors +ĠR ak +åΰ å°¾ +åĩº åĬĽ +éģĹæĨ¾ çļĦæĺ¯ +_S UP +ĠHam mond +F rag +ĠS UPPORT +ĠP on +è¡ ® +sp irit +-pack ages +qu ares +便 被 += # +Ġe urop +Ġann um +, 主 +åºĦ æĿij +Ġgamb lers +Ġ$$ ( +Ġcentr ally +Ġ 奥 +Ġm ai +ĠS UM +ä¹Ł å°±ä¸į +Victor ia +æľī åĩłåĪĨ +åļ· åļ· +ent iful +ĠF em +æĶ» åħ³ +Ġsubsid y +A aron +Ġble ach +Ġbull ish +Ġd osing +æĪij们 å®¶ +ĠChe rokee +å¤ľ 空 +ĠPOL ICY +åĴĮ å¸Ĥåľº +_c amera +Ġconvey ing +Ġm ating +ĠID S +, ä»¿ä½Ľ +ĠA ux +è¿ĺ ç͍ +Ġdam ned +é²ľ æ´» +Ġpric eless +å¹½ åĨ¥ +ï¼ĮäºĮ æĺ¯ +\ mathrm +ï¼Į èĢĥçĶŁ +á r +ĠBless ed +å°ıä¼Ļä¼´ 们 +ãĢĤ åįİ +ass ignment +Ġreason ed +éĩijé¢Ŀ 为 +å°Ĭæķ¬ çļĦ +[ g +ï¼Į 注åĨĮ +Ġm ute +Ġst ab +åįĸ æİī +ï¼Į æĢĿ +è¦ģ ç»Ļ +Ġdivers ification +Monitor ing +ä¸Ńåħ± ä¸Ń央 +(_ . +ĠRoad s +ä¸Ĭ æĿ¥çļĦ +ï¼Įä¹Ł ç®Ĺæĺ¯ +Ġwithdraw ing +Plan et +. ptr +Ġ æĥ³åΰ +Ġp ony +ãĢģ äºļ +ä¸Ģä»¶ äºĭæĥħ +ĠLe aving +ĠClass ics +æŃ£ çīĪ +ï¼Įåı¯ä»¥ æł¹æį® +Ġresemb ling +P ink +Ġpath ogenic +Calcul ator +< j +ĠT ues +Ġsp indle +Ġhome ostasis +Ident ify +æľ´ å®ŀ +Ġ æīĭæľº +Ġpl umber +åĪļ ä»İ +Ġair borne +level s +ä¹Ł æĽ´ +opt ic +-c ampus +ï¼Į她 æīį +æĦ£ æĦ£ +ï¼Ł æĺ¯ä¸įæĺ¯ +ig ibility +Y ep +en eg +ï¼Į å±ķçݰ +enc rypt +Ġex ile +ä¸ī äºĶ +Ġw oo +Ġcra ve +_OPT IONS +éĥ½ åħ·æľī +èĵĿ åŃĹ +Ġvoc ab +é¦ĸ è½® +æĪIJ æŃ£ +èĥ¶ æ°´ +èĿ Ĺ +Ġdict ators +ĠK ia +Pro d +é¼» æ¶ķ +Ġnom inees +comp at +Ġt rench +ĠF unk +com be +ä¸Ģå®ļ ç¨ĭ度 +_ rm +å¸ĮæľĽ çļĦ +说è¿ĩ è¿Ļæł· +Ġ éŨ +Ġin mate +_ rad +æĿľ é¹ĥ +FER ENCE +ĩ Ĵ +Ġe iner +ä¸į 平衡 +ãĢģ çī¹ +åĬłåħ¥ ä¸ŃåĽ½åħ±äº§åħļ +å¿Į æĥ® +Ġhalt ed +(in ode +åį¡ ç½Ĺ +åĶ¿åIJ¸ éģĵ +ãĢĭä¸Ģ 书 +ä¸Ģèµ·æĿ¥ çľĭçľĭ +ĠB unny +åĪĺ 举 +ĠSom erset +碳水 åĮĸåIJĪçī© +ĠLG PL +_ rep +ff s +æĻĵ æĻĵ +Ġplace ments +çģ« å½± +çī¹å¾ģ çļĦ +åģ Į +èĢĥè¯ķ æĪIJ绩 +Ġd yes +åĴĮ çĿ¦ +ĠAc res +C ID +Ġ ç±»åŀĭ +ĠThe ology +oc aly +ç®Ģ éĻĭ +缼 大 +-by te +ĠC ute +åĴĮ åľ¨ +æĹł çķı +Ġmom my +Ġow es +ĠT os +ĠD IG +æĶ¯ 票 +æĪ¿ è´· +ĠImport antly +ï¼Įä¹ĥ æĺ¯ +ĠP overty +èĩª çIJĨ +æıIJä¾Ľ äºĨä¸Ģ个 +ãĢĤ â̦â̦ +éĵģ éģĵ +缺 æ°§ +Ġfi ery +Ari zona +ĠH ou +éģ® çĽĸ +Emb ed +Ġv ibes +åĮĪçīĻ åĪ© +Ġsub conscious +èį¯ æ°´ +< s +Ġ åįĬ +_s mall +çļĦ æĢ§è´¨ +_h ome +Ġtroubles ome +/ off +Ġk itten +ï¼Ľ ä¸īæĺ¯ +ï¼Įä½ł ä¸įæĺ¯ +æİ¨ åIJij +TR AN +èĮ¶ åĩł +缸åħ³ æ³ķå¾ĭæ³ķè§Ħ +Ġsaved InstanceState +åįij éĦĻ +ï¼Ł è¿ĺæľī +åģı å¿ĥ +i ère +Ġv ines +Ġimpl ode +L icensed +æĺ Ļ +ĠY E +IM A +Ġdiv ider +åĩ¯ ä¹ĭ +å¥Ķ èµ´ +Ġwork places +è·³ åĩº +Ġ; ) +çĸ«æĥħ çļĦ +and as +Ïĥ ει +ĠKy oto +çľģ çļĦ +Ġdem os +.D omain +ç²ĺ èĨľ +-co ated +L arry +缸 éļĶ +ĠST UD +ï¼ĮåIJİ æľŁ +Ġgri ps +ĠKick starter +_ aux +Ġur llib +ï¼Įä¸Ģ 身 +Ġanal ges +å¾· ç½Ĺ +Ġwave form +ï¼ĮæľīäºĽ 人 +r ifying +åı¯ä»¥ æľī +ĠZ imm +Bind ings +, åĪĨåĪ« +ĠE u +Ġcl ing +çļĦ åIJ«ä¹ī +Ġlo oming +çīĪæĿĥå½Ĵ åİŁä½ľèĢħ +( if +_ nd +Mult iply +ï¼ī = +失 常 +Ġsu icidal +Ġreg rets +Sub mission +Ġp archment +å®ī åį± +Ġtra inees +Ġrad ios +: j +å¢ŀéķ¿ çļĦ +çķĻä¸ĭ æĿ¥çļĦ +-d ir +ï¼ĮæīĢ以 她 +æIJŃ ä¹ĺ +Ġsem if +Co ach +æµĵ度 为 +ĠI GF +ĠPart icularly +大æ¦Ĥ çİĩ +ĠSloven ia +é϶ éĨī +ĠP aste +åıĸ å̼ +plus plus +ï¼Į 群 +æľ¬ åĽ½ +稳 妥 +åIJĮ æĢ§ +çĶ» ä½ľ +ĠEd ison +æ°ijæĹı çļĦ +æľ¬ å¹´ +åij¼ åIJģ +Ġpsychiat rist +æľĢé«ĺ 人æ°ijæ³ķéĻ¢ +,è¦ģ ä¹Ī +Ġast ounding +æĶ¿ äºĭ +æĬ½ æŁ¥ +Ġtag ging +å®ŀç͍æĸ°åŀĭ åĨħ容 +ĠVan essa +c et +c ultural +i ography +æĥ¯ äºĨ +带头 人 +è¡Ģ æłĵ +ĠQual ified +cept ors +åIJ« çħ³ +åŁĶ 寨 +_M AIN +ĠSand wich +ator ium +æĿ¨ åĩĮ +è´ª 污 +ĠR J +å·® äºĨ +(p ayload +æĸ½å·¥ çİ°åľº +ï¼Įä¸įè¿ĩ æĺ¯ +ï¼Į以 æıIJé«ĺ +ilit ating +function al +_ comm +Ø Į +ï¼Į ä¸ļåĬ¡ +æľ¬ éĴ± +é«ĺ æ½® +æħ¢ äºĨ +School s +ĠLore ntz +åħ¼ åħ· +åĤ² 天 +.z ero +out side +Ġweb log +ĠDE LETE +READ ME +Ġslo ppy +ĠC obb +æĹ¶ 俱 +ĠTy r +åºĶæĢ¥ é¢Ħæ¡Ī +åĴ§ åĴ§ +Ġ 广å·ŀ +ort ing +Ġle ash +天 æĪIJ +Ġel ast +纽 带 +ĠKn ife +ag ulation +Ġfur ry +æ°® æ°Ķ +G ets +åĴĮ æķĻèĤ² +çīµ è¿ŀ +ĠK et +å®ŀä½ĵ åºĹ +æ£Ģå¯Ł å®ĺ +ĠRaj asthan +åĺĪ æĿĤ +ä¹ł æĢ§ +æ²³ çļĦ +å®ŀçݰ æĸ¹å¼ı +lo it +pack ed +Ġb rom +ĠTh urs +æ¿Ģ è¿Ľ +Ġcass ette +S z +ĠAn alyze +æĢİä¹Ī ä¹Ł +åij½è¿IJ çļĦ +èµ· å±ħ +- used +_ LL +èĩªå·± åģļ +æĻļ éĹ´ +Ġdom ination +ï¼ĮåİŁæĿ¥ æĺ¯ +ĠT K +Ġg ib +æĸ¹ æŃ£ +ï¼Įæľī çļĦ人 +å« ¦ +ä¸ĢèĦ¸ çļĦ +ĠSurve illance +ou fl +Ġis Equal +æľīä¸Ģ ä¸Ŀ +ĠAdv antages +Ġ ãĢİ +Ġen ch +Ch a +chron o +B ruce +ĠCo at +Ġaud iob +ĠCor por +ĠData Frame +é²ģ æĸ¯ +Ġit ching +Ġch ased +ï¼Įä½ł ä¸įè¦ģ +(l ayer +.Code Dom +Ġpl um +æ°Ķ ä½ĵçļĦ +Ġhyp oxia +il ver +ĠApplic ants +ï¼Į å§IJå§IJ +âĢĶ not +æĿĥ è¡¡ +ĠTree Node +ãĢĤ åįķ +åĬł åºĬ +åĬ¿ åĬĽçļĦ +ĠMaur ice +çĭ¬ä¸ĢæĹł äºĮçļĦ +L on +éľ ı +ç͵åŃIJ 书 +ugg ish +Ġcontag ious +åĴĮ å¤ļ +ĠY ah +ĠÐ ł +ãĢģ èIJ¥éĶĢ +Ġappro vals +大ä¼ļ ä¸Ĭ +); \ +.D B +Ġastr onomy +çľĭ ä¸įåĩºæĿ¥ +ĠK ad +éļ¾ åłª +Ġing en +gu ided +ãĢĤ çİ°åľ¨çļĦ +æĭ¬ åı· +Ass ume +ach ines +ĠNurs es +Element Type +Ġampl ify +Ġsl it +çļĦ éĺ³åħī +amb iguous +eng lish +çħ§ 亮 +DU C +col First +Ġthread ed += head +de ath +dep ending +est ep +å¼ł åı£ +æŀĹ ä¸Ń +AG G +Ġprop ane +ost at +Ġant ic +ĠR ee +éĿŀ常 ç®Ģåįķ +( th +ĠN ed +-b eta +D OC +os ine +/ The +L BL +ĠTh ur +ĠO ste +Ġre opening +ä¸įæĺ¯ ä»ĸ +çķĻ ä¸ĭä¸Ģ +gs l +Ġacknowled gement +ĠJere miah +Ġg ren +æĥ³ ä¸Ģæĥ³ +羨 äºĨ +Ġd g +ĠB AL +ĠL augh +æ´Ľ 夫 +Access ible +ï¼Į 客 +大 åħ´ +: ä½ł +éĢł èι +åĪ· åĪ· +P oss +Ġcharacter ised +è¯ģåΏ åħ¬åı¸ +å¹´é¾Ħ 段 +çĦ ¡ +wit z +rugu ay +ile en +ang an +TR ACT +æĹ¶ 被 +èĦļ ä¸ĭçļĦ +èģĮèĥ½ éĥ¨éŨ +est ate +no ise +Ġhomot opy +.v m +ri ever +Ġacid ity +åĢŁæ¬¾ 人 +V ue +Ġg uts +ac ulture +天 éĻħ +ĠAlex andra +ĠS J +ver te +èĦ¸ åºŀ +uzz i +çļĦ 第ä¸Ģ个 +ä¸ĭ åŀĤ +Ġweek day +St ride +æĬµ æ¶Ī +çļĦ æ°ĽåĽ´ +OR G +,æĪij ä¼ļ +ĠBoot strap +ĠT ray +车 ä¼ģ +Ġintern ships +æĿ¾ æĩĪ +æĭŁ è®¢ +Decl are +Ġscream s +ï¼Į æľŁéĹ´ +ãĢĤ ä¼Ĭ +_t ra +iol a +临åºĬ ä¸Ĭ +) e +S AP +Ġwait er +Ġe a +èĢĥ è¯Ħ +æľª è§ģ +稳 ä½ı +Ġcultiv ating +W at +ĠN yg +对 æĸ° +æĿİ å®¶ +ĠOver ride +ï¼Įè¿Ļæł· åı¯ä»¥ +Ġperen nial +èµĦ åİĨ +ä¸įäºĨ å¤ļä¹ħ +Ġpun ches +I AM +dd d +ĠRe ceiver +éĤ£ä¸ª 女人 +ĠFound ations +ĠI ch +丢 ä¸ĭ +é«ĺ 强度 +Ġquick est +å¿ĥ åĬĽ +_S SL +è¦ ĥ +åĩºäºĨ éĹ®é¢ĺ +Check box +ĠWorks hops +ĠMill ions +it in +è¿ĺæľī 个 +èĵ¦ çĦ¶ +åīį æ®µæĹ¶éĹ´ +红 è±Ĩ +ili ates +_e ffect +åĴļ åĴļ +c err +人 人éĥ½ +ä¸Ń åĩºçݰ +Ġro bbed +ĠMin imal +Ġ 便 +.init ialize +å¼ł æľĽ +æ±ĩ ç¼ĸ +-e ffects +- ended +Ġbureau c +Ġresh ape +ĠV k +YY YY +Ġpre season +ĠT oxic +ĠPhil ips +ãĢĤ ä¸įçŁ¥ +ãĢģ æľĪ +缼 åħ¸ +æŁ³ åı¶ +ĠRick y +in able +Ġar Xiv +Ġart illery +ï¼ĮåĪļ 好 +Ġ æľĢç»Ī +çŃī æķĪ +èᝠæĪ¿ +____ __ +Ġspray ing +P ok +çļĦ éĻĪ +Ġper oxide +ĠPRO GRAM +Ġpsych ologists +K id +ï¼Į æłij +ĠW ond +.nav bar +é¼ĵ èµ· +_ ONE +ĠH ogan +cl ave +æľ ĥ +æŁIJ çļĦ +污 åŀ¢ +Ġserial VersionUID +æĭĽåķĨ å¼ķèµĦ +Typ ically +æĦŁ æĢ§ +ĠMon etary +-old s +ĠDen ise +ï¼Į èĸĽ +çļĦ å°ijå¹´ +st alk +oc ene +女 éĥİ +åĤ¨ èĹı +ĠCommission ers +åħ¥ åºĵ +.D isplay +Ġbreath able +_AL T +åĬł åİĭ +arag ua +æĪĺ 马 +Ġcr ad +Ġcos ine +M es +m ixed +p ause +å§Ĭ 妹 +Ġart ific +é£ŀ åΰ +æĺ¯ ä¸ĸçķĮ +æ¶ £ +Ġsauce pan +Ġsa pp +ç»Ħ éĺŁ +- eye +Dr agon +éĴ Ĭ +ĠI PC +ob ar +æī¿æĭħ 责任 +ï¼Į天 æ°Ķ +ĠEmer ald +, æķĪæŀľ +/ output +Ġdev ise +åIJĪä¼Ļ ä¼ģä¸ļ +æĦŁ äºº +éľ² éĿ¢ +é¦Ļ çĶľ +ç²¾ æ²¹ +Ġguard ed +Ġmenstru al +st udio +ĠM amm +Ġrec ursively +isp ers +便åĪ© åºĹ +P or +n of +ï¼Į让 æĤ¨ +-p rom +ä¸į æľ½ +ĠJ ub +Ġ ï¼īãĢĤ +ed uct +ĠS AN +ĠA SE +ĠCOUR T +Ġad opts +ass oc +get Message +ä¹° åįķ +çīµ æīĭ +è¹² ä¸ĭ +éĥ½æĺ¯ 以 +!!!!!!!! !!!!!!!! +ĠC hern +åĨį ä¸į +è®Ńç»ĥ çļĦ +Ġc ray +int osh +ĠSt eele +éĢĶ ç»ı +ï¼Įä¹Ł 为 += u +K in +ãĢģ åħ¬æŃ£ +马 äºij +åĪij èѦ +åħħç͵ åύ +æĿŁ æīĭ +ĠI z +åĬł å°Ķ +åıĸæ¶Ī äºĨ +åħ¬åħ± 交éĢļ +åĩĿèģļ åĬĽ +B TC +æĵĤ åı° +æ²ī éĻį +v w +åĬł åĢį +Ġoptim isation +Ġ æ³ķå®ļ代表人 +Ġ ä¼Ĺ +ï¼Į å¡ŀ +ï¼Įä½Ĩ æĺ¯ä¸Ģ +端 ä¸İ +rot ic +Ġhand shake +Ġmus lim +.f ocus +æĬķèµĦ æĪIJæľ¬ +Fant astic +& $ +ç¾İ è²Į +l abs +ĠL ump +_m etrics +hem atically +Ġdazz ling +åѦ åĪĨ +ãĢĤè¿Ļ 款 +CS V +Ġnot ebooks +Ġgr ate +emp o +herit ance +Ġc ords +è¿Ļ éŨ +iv ate +Up dater +æ»ĭ è¡¥ +Ġbetray ed +è¿Ļä¹Ī 好çļĦ +lean ing +Ġspons oring +( keys +ĠS ect +令 èIJ¥ +B ow +rol ley +Ġclust ered +ä¸Ģ个å°ı å°ıçļĦ +ary a +产çĶŁ å½±åĵį +è· » +ĠSc opus +il og +-s hell +æIJľ çĭĹ +ĠT rap +天 åºľ +az ers +ĠRib bon +-b in +Ġfinal ists +èĩ´ çĻĮ +([ ^ +( search +å®ī å±ħ +æĬĬ ä½łçļĦ +æł¼ è°ĥ +ĠBrend a +u ators +è¦ģ èĢĥèĻij +å®Įåħ¨ ä¸į +аР± +Ġw att +ass igned +çĥĪ çĦ° +æŁ¬ åŁĶ寨 +Ġlower case +Ġroll ers +躲 å¼Ģ +ãĢĤ çŃīåΰ +Ïģ ί +C hen +éĶĭ åĪ© +isc opal +Ġsoft ened +âĶģâĶģ âĶģâĶģ +Ġbank ers +Ġv owed +Ġemp owers +è¿Ī è¿Ľ +be havior +å®īè£ħ 座 +以ä¸ĭåĩłä¸ª æĸ¹éĿ¢ +Ġaneur ys +over view +Ġparalle ls +leg ated +Ġpast ure +Ġs ailed +åŁº åºķ +play ers +ĠÎ ¶ +Ġt ummy +ï¼Į çĽĺ +Ġne arer +Ġqu aint +两 头 +åħĪ ç͍ +æŀĹ å³° +izz le +Second ly +ent imes +ãģ« ãģª +ï¼Į æŀĦæĪIJ +Ġy en +ĠThe ft +åĪĻ æľī +Ġhom ogen +Ġrevers ing +ĠAF TER +Ġ çŃīåΰ +Ġpupp et +Ġ:- ) +ile vel +Ġs words +士 é¡¿ +åIJİ åı¯ +ĠGe cko +ĠRed uced +Ġconstitu ency +ĠParam s +.S QL +åįĸ çĤ¹ +ĠPar agraph +ï¼Į æ²³ +äºĶ ç§į +( In +ãĢģ éĥij +ano i +Ġconst ipation +ï¼ĮæĪij ä¸įæĥ³ +๠Ħ +ãĢģ çİīç±³ +åijĨ æ»ŀ +book ing +ä¸įåΰ çļĦ +ç ¹ +æĪĺ 线 +ĠAutom ated +Ġunint ended +it Ãł +ãĢĤ ä½Ļ +ä¿® éķ¿ +æ¶Ī éĢĢ +s j +åľ¨ è¿ĩåİ» +E mit +éĻ¢ åĨħ +(m m +: length +ï¼Įä½ķ å¿ħ +æĥ³ ä½ł +Ġ ÅŁ +Ġs acks +åľĨ åij¨ +.l bl +åĵĢ æ±Ĥ +çĦ ± +_b ig +éĩįè¦ģ æĦıä¹ī +代 åı· +_st mt +- State +éģĵ éķ¿ +ä¹ĭ å¾Ĵ +Ġund ue +H eb +è¿ľè¿ľ çļĦ +Ġha ird +Open GL +ĠA DS +ĠTru ly +å·¥ä½ľ åİŁçIJĨ +éĻĨ å³° +-ass isted +- ground +W ang +æ°´ åľŁ +.sh ift +------------ -- +ç͵ éķĢ +ï¼ĮæĪij å°Ĩ +Begin ning +F est +æĺ¾ çĿĢçļĦ +ĠTex ans +if def +ĠL PC +对 ä¼ģä¸ļ +âĢĿçļĦ åİŁåĪĻ +Ġcritic ize +arx iv +ĠS rc +( thread +"> ' +ï¼Įåıª æĺ¯åľ¨ +åħį ç¨İ +Ġprep aid +Ġforg iving +OIN TER +æijĨäºĨ æijĨæīĭ +Ġtow ing +ĠNotImplemented Error +åŁº çļĦ +Ġw y +ĠI ris +Ġpay off +å¸ĪèĮĥ åѦéĻ¢ +Ġdiss ipation +> {{ +ĠD ESC +ĠW ifi +两 款 +work ed +rec ation +æıIJ æĹ© +èĪĮ å°ĸ +Ġcollabor ators +/ âĪĴ +Ġl ends +L azy +Ġto re +Ġsnap shots +Ġs urname +ĠIn jection +è¾¾ çļĦ +äºĶ åĪĨ +Ġes ter +Ñ ij +ĠT ee +_ $ +Ġit r +ĠH ed +_F S +.DebuggerNonUser CodeAttribute +åħ¨ éĥ½æĺ¯ +éĩį åIJĪ +æĪĺ äºĭ +dr agon +St encil +åħ´ 建 +ï¼Į她 说 +åĪĺ éĤ¦ +ĠMod ules +C ov +ï¼Į åıĮçľ¼ +Ġlo osen +æĿĤ ä¹± +Ġexcess ively +éĥ½ ä¸İ +éħįåIJĪ çļĦ +ä¸Ģ æ²ī +çŃī ä¸Ģä¸ĭ +头 äºĨ +Ġinf lated +ĠS IL +èĢĥ åīį +çĥĺ çĥ¤ +Ġd ag +çĽij å§Ķ +æĿ¡ è·¯ +æĿ¥æºIJäºİ ç½ij绾 +åľŁ 豪 +è¿ĻéĩĮ éĿ¢ +( ans +, å¾ħ +计åĪĴ åĴĮ +âĢľ é«ĺ +äºĨä¸Ģ 座 +Ġstand out +ĠRober to +çŀ§ è§ģ +ul ses +ĠY as +ĠPro posed +Ġfoot ing +çĹħ èϫ害 +欢 ç¬ij +Ġmes mer +å¾Ĺ 天 +ï¼Ľ å°Ĩ +Am anda +ĠBox ing +el ow +ÎŃ ÏĤ +ĠW ORD +ov olta +举 å±± +éĻ¢ èIJ½ +ï¼Įæĺ¯ çͱ +ĠAg reements +å°ī è¿Ł +ĠD OS +Ġimm ature +Ġcas ually +z hen +ä¸įæĸŃ å¢ŀåĬł +微微 çļĦ +( parse +sd n +A gg +çϽ é¢Ĩ +çĽ¾ çīĮ +oc cur +å®ļ åŀĭ +ãĢĤåľ¨ è¿Ļç§įæĥħåĨµä¸ĭ +亿 çļĦ +h ousing +æĪĸ 个人 +.A fter +Ġc uring +å¤ļ åıĺ +man ent +éĢıæĺİ åº¦ +ĠB ucks +ãĢĤ æŃ¦ +éľĢè¦ģ 说æĺİçļĦæĺ¯ +éĿĴ èī² +çł´ çĥĤ +森 çļĦ +åİĭåĬĽ åĴĮ +O LEGAL +} |\ +ãĢģ éŁ©åĽ½ +大 èĤ¡ä¸ľ +ĠAl ive +Ġquot ations +not iced +åĿIJ èIJ½äºİ +ï¼Į çݰ代 +è¶Ĭ å°ı +, Q +çĹĽ é£İ +Ġ æµĻæ±Ł +ï¼Į è§Ĥå¯Ł +ä¸ĵ åijĺ +ãĢĤ è¿Ľåħ¥ +åĨ· 空æ°Ķ +CC CC +é£ŀ å¥Ķ +Ġamount ed +={ " +Ġmell itus +_se cond +è½´ è·Ŀ +. import +. KEY +交 è´§ +Ġinf rast +å¾Ĺ æĪij +æĸĩåĮĸ 交æµģ +T ar +æĶ¾åľ¨ çľ¼éĩĮ +_ other +åĽ½åľŁ èµĦæºIJ +ï¼Į 好çļĦ +Ġbu ildup +urb ed +ĠMat te +æĪĸ çŃīäºİ +æĿİ ç»´ +(arg uments +ï¼ī âĢľ +é»ĺ çĦ¶ +æľīåħ³ äºĭ项 +ĠRenew able +ĠInter actions +Ġpear ls +iv ar +Ġpet ty +iz o +Ġfals ely +ĠÐ ŀ +èι åijĺ +Ġneg ativity +Ġanticip ating +Ġg igs +-b earing +-f ire +æĺ¯ èĢģ +åľ¨ æĢĢéĩĮ +ass ociated +Work ers +åijĬ çϽ +票 ä»· +çIJĨ论 çļĦ +ĠStory t +\ subseteq +Ġz ebra +S OL +g rey +身 æĹģçļĦ +_T EMPLATE +åħŃ ä¸ĥ +æĮij èµ· +ye ah +ac io +å¯Ĵ åĨ·çļĦ +ĠIr vine +. visible +红 线 +çķľçī§ ä¸ļ +ãĤ·ãĥ § +计åħ¥å½ĵæľŁæįŁçĽĬ çļĦ +end ra +ĠComp atible +IF A +Ġfluor ide +æıĴ 头 +åħ¨éĿ¢ åıijå±ķ +creat ive +æĬĬ æĪijçļĦ +ãĤ ĥ +ä¸¥æł¼ èIJ½å®ŀ +S AT +ĠB arg +å¯Ĩ 室 +ĠMessage Box +_g t +: P +åѦ çĿĢ +èı ¡ +带 她 +Ġgra ves +轻微 çļĦ +, ä¸įå¾Ĺ +ï¼Į çݰæľī +and es +ï¼ĮèĢĮ è¿ĻäºĽ +system s +_t wo +é¢Ĩ è¡Ķ +Ġbuff alo +.G ET +Ġepit he +ç²¾ èī¯ +//////////////// //////// +q c +Ġ æ·±åľ³ +Ġfly er +ov sky +(c b +è¡ĮåIJĦ ä¸ļ +, 令 +ï¼Į 沿çĿĢ +oplast y +en ance +Ġcoh omology +å®ī çIJª +æİ¢ æľĽ +Ġcon cession +ĠU A +_t ile +ĠEx ercises +ĠPen al +Ġoverd ue +Ġcond ensation +.er ase +Ġm ars +æµ Ĵ +ç»ı èĦī +è¿Ľ éĢĢ +.m essages +ĠButter fly +-b r +éģĵè·¯ 交éĢļå®īåħ¨ +.s f +ä½ł éĥ½ +æĽ¾ æ¯ħ +ä»İå°ı å°± +De ck +ãĢģ ä¹IJ +è¿Ľä¸ĢæŃ¥ æıIJåįĩ +ãĢĤ ç»ĵåIJĪ +ĠSt d +ract ory +å®Ī åľ¨ +èį· åı¶ +é£ĺ é̏ +_t im +åĨ· ç¬ijéģĵ +rad a +èĢIJ é«ĺ温 +Ġmild ly +_ ed +ĠC rop +ĠSlee ve +- cycle +Q B +Ġt aper +串 串 +Ġhelm ets +, æĺ¯åIJ¦ +Ġs ow +H ydro +f resh +çĬ¯ 人 +åĨ³çŃĸ éĥ¨ç½² +ï¼Į åİĨåı² +ur ator +ãĢģ åı£ +( ui +- print +æ¯Ķ åħ¶ä»ĸ +ill ard +éĤ£ å®¶ä¼Ļ +_const ant +ĠB low +(d escription +Support ing +ĠFlo ating +Ġ åŀĭ +ãĢģ 客æĪ· +å®¶ æĶ¿ +>: < +form ula +éĺ» æĬĹ +éĩĩåıĸ æİªæĸ½ +ãĢĤ å¦Īå¦Ī +ĠF PS +Ġref ill +æĿ¡ 缮 +Ġpel lets +Contin uous +. remote +ĠZ ar +$ sql +çļĦ åĽŀ +æĶ¹ 建 +æ°¸ ç»Ń +Ġsho pper +éĻį åİĭ +- arm +åIJij å·¦ +ĠAdv ices +ĠProdu ce +ãĢĤ åį´ +ãĢģ æī§è¡Į +æı ĸ +current ly +Ġscrut in +çļĦç½ij绾 å°ı说 +, 主è¦ģæĺ¯ +äºĮ è¿Ľåζ +缮 äºĨçĦ¶ +C row +ĠB aking +ä¸ĭ 人 +æīĢ åģļ +å¼ķ çĪĨ +Ġinitial izer +åĶ¿ åı« +åĪij ç½ļ +Ġl ondon +Book mark +æĺŁæľŁ åħŃ +Ġà ĸ +J ones +æŃ¦ åύçļĦ +æĬ¢ éĻ© +inal g +_n av +oad er +Ġb erry +ÏĦ ά +im at +ĠB enson +.Event Handler +ch ures +åī¯ ç§ĺ书éķ¿ +ĠDeb ian +ĠÑ ħ +åIJĮ éģĵ +_ bs +Ġsumm oned +ĠRF ID +ĠCarm en +/ ^ +ĠL AP +èĴĻ çī¹ +æħµ æĩĴ +éĥ¨ 级 +Ġproject ing +Ġretriev ing +n pc +åİ» å¹´çļĦ +Ġmyst ical +ĠSte ak +M ine +Ġcount able +Ġcer amics +温 æĥħ +.F unction +æľŁå¾ħ çļĦ +Ġtranqu il +_s uper +ä¸ĸ åĩ¯ +丰 åİļçļĦ +st udy +çα ç¾İ +b elt +天 åij½ +管çIJĨ ä¸İ +íĬ ¸ +éĩij é»Ħ +æĮģç»Ń åıijå±ķ +roph ies +el ope +åĤ» åĤ» +æĦ¤æĢĴ çļĦ +Ġrehears al +Ġ åĪ©ç͍ +ĠSur geon +Ġdisappe aring +è´Ńä¹° äºĨ +æķŀ å¼Ģ +Ġ 交æĺĵ +å®ĭ æľĿ +Ġstuff ing +她 åıĪ +Ġins ulating +Ġgar nered +Ġsimpl istic +------------ --- +regn ant +_w arning +ĠId ol +ulner ability +ï¼Įæĸ¹ æīį +: ä¸Ģ +All en +è¸ īè· +åį· åħ¥ +hh h +c ube +å¹³ 庸 +éϤ 湿 +çIJµ çIJ¶ +W X +ĠâĢ ķ +æİ¨ åΰ +åĸĬ 声 +è¸īè· Ħ +天 ä½ij +ĠFre el +Ġf iat +å°± åĪ« +åħ¨ ç½ij +åŁº åĽ¢ +ï¼Į åĩĿ +èĥ½ ç»Ļ +_DR IVER +Ġ é¢Ħ +çļĦæĹ¶åĢĻ äºĨ +sd k +éĵ ¿ +çĶ³è¯· çļĦ +U Integer +ï¼Į èģļ +Ġconst rain +éĩİ çĮª +åģļäºĨ 个 +ĠTreasure r +管 äºĭ +å°± æĺ¾å¾Ĺ +arch ing +èĬ± æ¤Ĵ +A KE +Ġ åĨĻ +ç¾İ 满 +女 æ¼Ķåijĺ +ä½ł çļĦ人 +Ġtherm odynamic +ĠF RE +ps z +Ġview points +é¢ĺ 为 +éĢīæĭ© æĢ§ +ĠGen ius +ãĢĭ ) +å®īåħ¨ 带 +Cond itional +è¿ĺ æ´»çĿĢ +éģĹ å¤± +Ġcl ergy +Ġassert False +ĠEm otion +ĠBlue host +Z Y +Ġs Ã¥ +åľĨ å¼§ +ĠHel m +Runtime Exception +ĠAlloc ate +Ġh og +Ġun fore +大家 对 +Ġall iances +ĠBe acon +使 æĪij们 +** . +è¦ģ è¿Ľè¡Į +å¹» å½± +( unit +g row +ĠCh urches +"> {{ +á» ĩ +Ġ第äºĶ èĬĤ +al en +计 çļĦ +åĨĽ èΰ +.d ep +æĽ² åŃIJ +L ONG +ä¸į æŃ£ç¡® +éļı ä»İ +sec ute +åıijè¡Į èĤ¡ä»½ +ä»ĸ çļĦ人 +Ġal b +æĸĩ åĩŃ +Ġ åĹ¯ +Ġgl m +çļĦçݯå¢ĥ ä¸Ń +_d est += < +ï¼Į å±ķ示 +Ġcou rier +riv ol +çĭĤ å¦Ħ +çŀª äºĨ +Ev idence +- ending +ĠAl pine +-c ells +éĥ½ä¸į æĥ³ +' l +Ġc k +Ġxy z +ĠF ountain +Ġfre aking +Ġa ure +_col ors +æĬ¥ 社 +ĠImpro ving +- users +ï¼Į èĢģæĿ¿ +Ġal loys +. Trim +St ars +空 æĹ· +/ >< +ĠT ek +ject ory +man i +ĠEst ates +Iss ues +ãĢģ åľŁåľ° +为 åĽ½ +äºij é¾Ļ +(s cope +Ġ éĵ¶è¡Į +ï¼Į ä¼łç»Ł +å°ı äºĮ +_F ile +Att ached +Ġadvers ary +Ġsh ingles +Ġnav igator +Ġsevent een +èĥ½ä¸º åĬĽ +Ġend emic +Ġterm inating +尤为 éĩįè¦ģ +itt s +红 è¡£ +ĠSee king +ĠP ieces +ĠConf idential +Ġtens ile +Ġst umble +çĶŁ åŃ©åŃIJ +Resp onder +_ ar +ãĢģ èµĦ产 +æĭįæijĦ çļĦ +ĠL oving +æł¸å¿ĥ ä»·å̼è§Ĥ +软 骨 +ĠBar cl +ï¼ĮåIJİ ç»Ń +羸 åħī +( vector +B US +to Have +缼 å¼Ģ +Ġadren aline +ĠS itting +åı¤ è£ħ +m ud +Ġrem ission +åİĮ å̦ +ĠEll iot +Ġfor c +åĶ® 票 +ä¸į 讲 +åħ³ ä¹İ +循 åºı +Ġperp lex +ĠEas ily +çħ® çĨŁ +ĠA very +åıĺ éĩıçļĦ +Ġe books +G rowth +S AM +çļĦ åŃ£èĬĤ +.test ing +ĠTow ards +O d +èĩªå·±çļĦ çĶŁæ´» +çģµ åĬ¨ +rug al +(d AtA +Ġassemb ling +ĠMe ghan +Cour tesy +_ chan +Ġp onds +_RE L +draw al +-prov oking +Ġto te +çļĦ 举åĬ¨ +ĠEst her +q n +st ellar +ERV ED +_ any +ĠProv incial +Ġcommand ing +ĠSuz anne +ĠAber deen +q v +zb ek +ç¬Ķ 缴 +Ġ è¿Ľåħ¥ +so f +TAIN ER +r z +ĠPL AY +-c ustom +ä¿ĿæĮģ äºĨ +ĠâĬ Ĺ +ï¼Į è¾ĵåħ¥ +Ġlob ster +èľķ åıĺ +è¡Į éķ¿ +Ġregist rations +ĠNad u +Ġarriv als +åķĨ çļĦ +åıĭ 好çļĦ +ä¹° ä¸ľè¥¿ +åħ½ 人 +Ġpromin ence +çŁ¥ ä¸įçŁ¥éģĵ +çļĦ好 å¥ĩ +/jav ase +Ġsub net +è¾ĥ ä½³ +éĩĮ çļĦ人 +. Unit +Ġpost age +å¤į åĩº +.de legate +èĥ¡æ¤Ĵ ç²ī +/ ui +åı ± +æŀģ å¤§åľ° +åı¯ åıĺ +èĤ©èĨĢ ä¸Ĭ +ol son +å¿ĥ åŃĺ +Ġref inance +çłĶç©¶ æĸ¹åIJij +.set Visibility +æ¼Ķ ä¹ī +-f ounded +/ bar +ot ropic +_cl s +è´ ° +itt on += {{ +éĥ½æĺ¯ ä»İ +æļĹ æ·¡ +aby rin +min utes +ä¸į 详 +Com position +ĠB AR +æķĻ åħ» +åıĤ åĨĽ +å²³ éĺ³ +ï¼Į æħ¢æħ¢çļĦ +ä¸Ģ个 好 +æľįåĬ¡ åύçļĦ +ĠIter able +ĠE BIT +红 åħī +uit ar +ï¼Įåį´ è§ģ +æŃ£å¸¸ 使ç͍ +ĠB ust +_m argin +æ²Ļ åŃIJ +_D ONE +Ge om +Ġorganis ational +åĮĸ å¤ĦçIJĨ +-reg ulation +Ġ>> = +.V er +Ġ 综åIJĪ +it ance +.s erialize +Ġcount down +çªģçĦ¶ éĹ´ +E CHO +ĠMc Connell +éĥ¨ä½į çļĦ +éĸ ¢ +ä½ł 羣çļĦ +ç¥ŀ åħ½ +ï¼Į人 åĿĩ +dr v +èĩª æķij +åıĸ åIJį +ç¥Ī æ±Ĥ +_ operator +ï¼Į 身边 +, String +ãĢĤ çŁ³ +æİ¥ çıŃ +ĠApp alach +èħĶ ä½ĵ +Ġformal ism +/ èĤ¡ +èĬĤèĥ½ çݯä¿Ŀ +Ġmuff ins +ä¿ ¸ +ĠBlack Berry +çĺŁ çĸ« +ist ication +Ġser otonin +å¢ŀ 产 +çĹħ äºĨ +ĠFis cal +Y NC +\ epsilon +Ġ( ). +-p riced +èµĽåŃ£ çļĦ +.v ideo +in formatics +æĪij åħ¬åı¸ +Ġ ä»»åĬ¡ +Ġw ipes +b ab +æĿľ çĶ« +ï¼Įæĥ³ èµ· +.d ownload +а Ñı +OLUM E +缸è¾ĥ äºİ +Ġst unt +ĠF AT +女åĦ¿ çļĦ +ä¸ĵ项 æķ´æ²» +Fre edom +çĩ İ +Ġapproxim ations +ĠG oose +Ġcar box +hydro gen +åºĶå½ĵ åľ¨ +ãĢģ éĥŃ +Ġv ents +Ġent angled +.c ard +Ġout file +_m gr +Ġbook marks +Tur key +Ġsub marine +é¡¶ æĿ¿ +åľ¨çº¿ è§Ĥçľĭ +ä¸Ģ æĭĽ +åĨ² è¿Ľ +æĺ¯ èĩªå·±çļĦ +åIJĵ 人 +çļĦ 念头 +ĠEmb edded +ி à® +ç¬Ķ ä¸ĭ +Ġacc using +æĿĥ åĬĽçļĦ +Append ix +- operation +ĠP AD +æĹł çĹķ +æĢİä¹ĪåĬŀ åij¢ +Ġ\ # +ush ort +è£ħ çĿĢ +å°ı èħ¿ +æijĨ æijĨæīĭ +èIJ§ è¾° +Ġcon cessions +.h andler +ĠSen iors +ä»Ķç»Ĩ çļĦ +鲨 é±¼ +Ġstrang ely +ic ative +被 çĽĹ +éĥİ ä¸Ń +еР¶ +åįłæľī çİĩ +äºĨ è¿Ļä¸Ģ +åįĹ å®ĭ +ĠSc outs +ï¼Į åħ¬åı¸çļĦ +, ä¸Ģç§į +踩 çĿĢ +Ġst amina +两 ä¸ĩ +_user name +/ he +没æľī å¿ħè¦ģ +åīij æ¡¥ +(r andom +ãĢģ åŁºæľ¬ +å¯Ĵ æ°Ķ +ĠPhilip pe +âĢĿ ï¼Ł +ï¼Įå¦Ĥæŀľ æĪij们 +RA IN +, åįķ +Ġco ached +ç¥ŀ é¾Ļ +人 寿 +ĠV ale +ĠExpl oring +Ġext inct +, è¿ij +P u +å°ı ä¸ī +åıį åħī +æĭ¿ èµ° +èĩªçĦ¶ çģ¾å®³ +_clean up +ï¼ĮæĪij æĿ¥ +Ġson o +ad m +App lying +å¦ĤæŃ¤ ä¹ĭ +Ġs led +ï¼Į ç½ij +ĠC argo +ä»ĸ们 ä¼ļ +.d river +åħĦå¼Ł å§IJ妹 +ï¼Į 汤 +) åĮħæĭ¬ +ĠBe aver +é¡» ç»ı +è¿Ķ 乡 +Ġbuff ered +没äºĭ åIJ§ +LO SED +åĢĴ åľ¨åľ°ä¸Ĭ +Ġserious ness +Ġremed iation +éĢļè´§ èĨ¨èĥĢ +ç¦ı æ°Ķ +-M obile +çļĦ å¿« +Ġd ick +ĠM atching +Exp ired +ĠSold ier +ĠGlor ia +ĠDi agnosis +Ġठ¹ +Spe ak +. lookup +ĠPr imitive +ĠV all +è¦ģæ±Ĥ åĴĮ +ãĢģ æĬĬ +Ġcl oning +ext ends +.Check ed +ag em +ĠL ace +软 æĸĩ +å°±åľ¨ è¿ĻæĹ¶ +éύ çŁ³ +' A +l ug +ãĢģ ä¸ģ +ç´§ éļı +M erg +ãĢĤ 太 +ĠY EAR +Ġmi RNAs +ï¼Į éĤĵ +Ġmet am +ãĢģ æľª +ri le +ï¼ĮæĪij æĢİä¹Ī +EN ABLE +管çIJĨ çŃī +.f ilename +orn o +Ġsat in +Ġè¯įæĿ¡ åĽ¾åĨĮ +ph osph +_B US +Ġmar ched +Ġintens ities +Ġs ided +åİħ éķ¿ +çĮ© çĮ© +ult ies +åĪĻ ä¼ļ +æĭī æĿĨ +èĩªå·± èĥ½ +Ġest eem +åĪ©æ¶¦ æĢ»é¢Ŀ +Ġadoles cence +äº ĺ +宣 èªĵ +ch io +ins pect +ĠGl uten +æ¸Ĭ æºIJ +Ġfet ched +ãĢģ åı° +äºĮ ä¸ī +çļ® çIJĥ +çĽĶ çͲ +åįģ ä½³ +(m ask +åĢĴ å¡Į +座 åŁİå¸Ĥ +æĬĵ æįķ +ĠOtt oman +æ°ijäºĭ è¯ī讼 +Ġ åĵģçīĮ +D emand +_{ -\ +è´µ éĩį +Ġ åĵĪ +æĬĹ çĻĮ +çĽijçĿ£ç®¡çIJĨ å§Ķåijĺä¼ļ +Ġ å¿« +Ġ åᢠ+ĠOper ators +æıIJåıĸ çī© +Ġware houses +Ġe k +Ġcan yon +/ resources +ĠL OV +转 äºĨ +ï¼Įä½Ĩæĺ¯ å¦Ĥæŀľ +View Holder +触 çĤ¹ +Sc i +F V +ĠNiel sen +ä¸į 代表 +ĠL obby +è¨Ģ éģĵ +ä¹Łè®¸ æĺ¯ +stad t +èĩ³ æīĢè¿° +ĠMod ify +Ġrupt ure +Ġimp oses +表çݰ åĩºæĿ¥ +çĶŁäº§ æĪIJæľ¬ +å¥ĸ åĵģ +æ´Ľ åĮĹ +声 åĬ¿ +UL ONG +å·¥åķĨ éĵ¶è¡Į +Ġlun ches +l aces +ãĢĤ æĿ¥ +èĢ» è¾± +ĠFor got +åĸľæ¬¢ ä¸Ĭ +è£ģ åīª +-support ed +ĠA im +Con cat +, åĮĹ +å¹³åĿĩ æ°Ķ温 +Ġsuit case +Âł A +Ġret ard +.Fore ign +so far +Ġbullet in +èĦ¸ çļ® +åĽº æľī +Ġexceed ingly +æķ´ æľº +çļĦ éĺ¿ +æķħ ä½ľ +ãĢģ ä½ķ +ĠG at +ĠI rene +éĿ¢ åĴĮ +ç»Ŀ ä¸įæĺ¯ +ï¼Įä½Ĩæĺ¯ 对äºİ +net t +ĠEll a +Ġtouch screen +ish ments +ĠGar lic +S erve +éĢģ 礼 +ï¼Įå·²ç»ı æĺ¯ +w end +é¡¶ å³° +ä¸ī个 åŃĹ +G uy +çľĭ åĩºäºĨ +Ġremn ants +ys c +ĠAll owed +(d ist +ë ³ +id ia +_l iteral +.set Property +ï¼Į çͲ +ĠD K +ĠPl ato +RET URN +_ act +åıĮ åŃIJ +éĹ² æļĩ +ĠQ CD +èĥ½ ä¸į +isk y +èĭı æł¼åħ° +vare z +说 äºĽä»Ģä¹Ī +.h ome +.k ind +Ġto fu +- create +l ucky +ï¼Į è®°å½ķ +ï¼Į çĽ¯çĿĢ +.B undle +å¢ŀåĬł åΰ +kl ore +æ¯Ĵ çļĦ +èĥ¸ èĦ¯ +æĿĥ å±ŀ +ĠDown s +座ä½į ä¸Ĭ +ren ches +ç»ıåİĨ çļĦ +Ġdark est +- ice +Ġfl orida +Ġens uing +DEV ICE +æ² ĵ +ï¼ļ 为 +èĩªå·± æĥ³ +å¿ħé¡» çļĦ +Ġ( âĢĺ +Re ject +ï¼Į è¿Ī +ä¿ ij +Ġpred ator +Ġroom mate +, æĶ¯æĮģ +, åĬłåħ¥ +{ cor +ä¸į åĩĨç¡® +åºŁ æĸĻ +Ġprop ensity +Ġas par +Ġk eras +èİ« éĹ® +- ord +ï¼ĮåĽłä¸º è¿Ļ +avour ite +/d es +ĠArch bishop +äºĨä¸Ģ æĶ¯ +, æķ´ä½ĵ +ä¹Ł å¹¶ä¸į +ï¼Į æĶ¶éĽĨ +Ex terior +é¾Ļ èϾ +身 ä»· +}[ \ +身å¿ĥ åģ¥åº· +ï¼Į è¿ijæľŁ +. ast +ï¼Į ä½ı +ï¼Į è¦ĨçĽĸ +ĠT EX +äºĮåįģ ä¹Ŀ +æ¶²åİĭ 缸 +. Stream +Ġmotiv ates +Ġsc out +åĨį åģļ +Ġflatten ed +/ [ +èϽ æľī +Ġs inc +åįģ åĢį +Ġwin ery +Ġseed lings +-pro ducing +Ġrev ocation +ĠV et +天 ä¹ĭ +ãĢĤ åıĮæĸ¹ +us ual +åıįæĺł åĩº +. AP +N J +Ġdifferent ially +Ġhouse keeping +ĠF uk +åĨį ç͍ +.d ebian +åIJĥ éĨĭ +æł¸ å®ļ +but tons +羸 ä¸Ń +_FL ASH +Ġf w +ĠD rops +uss en +åŁŁ ç½ij +èģĶèµĽ ä¸Ń +æķĻç§ij 书 +大 红 +, æ·± +æĽ´å¤ļ åľ° +ĠPFN GL +ĠP SD +V ED +äºĮ åįĥ +Ġmult it +ĠBe cker +rack ed +Ġaeros ol +ï¼Į éŁ©åĽ½ +\" \ +å¤ĸè§Ĥ 设计 +. asm +ategor ical +ä»° æľĽ +Ġsky rocket +S FR +oun cer +ĠNich ols +ĠC rab +ï¼Į好 åľ¨ +æķ¬ æĦı +Ġv odka +Ġhe ed +产åĵģ åľ¨ +éĥģ éĥģ +ĠLuc ia +/ code +éĤ£ éģĵ +ĠAM P +Ġ åıĬ +æħķ å°¼ +ĠCont ribution +å¯Ĵ åĨ¬ +ĠVal ent +åī©ä½Ļ çļĦ +ãĢģ çĶļèĩ³ +_EM PTY +Ġmisdem eanor +ĠS odium +_n r +File Info +ç¥ Ĥ +ĠHy g +-t emperature +/ cont +ms on +Ġc amb +åĴĮ æĹł +æł¹ ç³» +comp act +ĠSerial izable +滤波 åύ +, åĿļåĨ³ +ï¼Įè¿Ļ åľº +ĠJud ith +Fore x +ĠH ose +表 å§IJ +ä¸Ģèĩ´ çļĦ +ï¼Į è§£åĨ³äºĨ +Ġl icens +æĺ¯ å¤ļ +两 éĵ¶åŃIJ +éĥ½æĺ¯ éĿŀ常 +Ġantib acterial +åıijå±ķ ä¸Ń +Ġ è§Ĩé¢ij +() > +ä¸ĵ èijĹ +åĩĨå¤ĩ 好çļĦ +åĩ¡ äºĭ +Ġorgan ising +Ġt unn +åѦ 龸 +D IRECT +åħī å½± +è¾Ľèĭ¦ äºĨ +ĠMotor ola +ucc i +Ġm oot +ĠCompar ative +" date +ĠStep hens +P PT +è®® éĻ¢ +ift ed +Ġha unting +Ġclient ele +Ġpro phyl +åıĪ åı« +ï¼ĮåĨ· åĨ· +åħħ è£ķ +[ I +æĶ¶ åıij +å¿§ å¿ĥ +æĬĽ å¼Ģ +ĈĈ ï¼Į +ï¼Į大 éĩı +æľ¬ åĵģ +iss ued +社ä¼ļ ä¸Ń +(b ox +, è¿Ľåħ¥ +éĩį çĸ¾ +æľĪ åŃIJ +Ġasp ire +ĠE bola +Ġ` < +ĠSch r +ãģ« ãģ¯ +ct p +æľ¬ çļĦ +ach y +éĹ® ä¸ĸ +Ġ? : +}_{ {\ +.img ur +åĮħæĭ¬ äºĨ +Pl aces +æĺĨ å±± +ld on +Ġy uan +èĢĮ åĸ» +Ġwand ered +W y +ore a +.A t +Ġth ym +PL US +ĠØ ³ +ï¼Į æłijç«ĭ +Ġd ues +å°ı å·§ +ors et +Ġimag ining +p he +ĠCont ributions +Ġ 设计 +Ġn pc +å¯ĨåĪĩ 缸åħ³ +Ġende avour +ãĢĤ 个人 +ä¸Ń æŃ¢ +Ġdist inctions +f rag +Ġs plic +ĠP LEASE +å°± çŁ¥éģĵäºĨ +æ³¢ çļĦ +Rec v +Ġbud ding +Ġrum or +æĺ¯ åĽ½åĨħ +ĠSal vation +ĠAer ospace +n ome +ĠL al +Ġpred ecessors +(p air +ĠSome how +ĠæľĪ éĶĢåĶ® +ï¼Į 幸好 +Ġdoubt ful +ãĢģ æł¡ +ãĢĭ çͱ +æ¯į 线 +es ac +Ġto dos +ä¹ĭ åıĪ +EX PECT +Do ing +; ", +ĠT yson +. option +_ ready +Ġsc or +æĬĬ äºĭæĥħ +åįĬ æŃ¥ +st s +çĶŁäº§ æķĪçİĩ +Ġcurl s +_ List +_attr s +ï¼Į åĨ¬ +Ġoccup ying +ĠPrint ed +为 ä¸Ĭ +Ġfirst hand +空 æł¼ +åİĭ æľº +åĨ² 泡 +( Config +w ine +è¾½ éĺĶ +è¯ĿéŁ³ åĪļèIJ½ +Ġexper iential +çİĭ 大 +Ġlocal host +pop ulation +为 å¥ijæľº +èµĶåģ¿ è´£ä»» +Sc ientific +çľĭå¾Ĺ åĩº +ĠAs he +çļĦ å¿ħè¦ģ +è·¨ 度 +æĪ¿åľ°äº§ å¸Ĥåľº +Ġsie ge +ĠPlan ck +_ identifier +å°½éĩı éģ¿åħį +C andidates +ï¼Į åı£æĦŁ +w at +Ġ éĵ¶ +ĠF avorites +æ¨ ½ +Ġconf isc +æķ² åĩ» +ĠR ox +ĠAlex is +åı ¼ +ï¼Ī A +Ïĥη ÏĤ +ĠM oor +load s +>> ( +ĠLog ical +ocyt osis +ab la +Ġsuper intendent +ĠMead ows +. ai +é¦ĸ éķ¿ +ĠMom ents +C ars +Ġf erv +ä¸Ģ åłĤ +åIJ´ åĭī +ä¸Ń 西 +_c ancel +åīĶ éϤ +. ï¼Ī +æĻ® æĥł +ï¼Į æĭīçĿĢ +Ġd ing +æľª å°Ŀ +èѦ åĬ¡ +im plicit +ãĢģ åĽ½åĬ¡éĻ¢ +ĠN er +åıĸ ä¸ĭ +-int ensive +æij Ĵ +æ©Ļ èī² +æŃ¦ èīº +Ġdiscrep ancies +ï¼Į åĺ¿åĺ¿ +Ġsc opes +ĠFor ge +åĩºåıij çĤ¹ +app ropri +dir ty +åįģä¸ī äºĶ +ä¸į çķħ +Ġdis closes +è¨Ģ è¾ŀ +æŃ£ 缴 +ï¼Į è°ĵ +. ro +Ġcon duc +è¶ħè¶Ĭ äºĨ +beaut iful +Ġfis heries +Ġm uddy +ĠG es +Ġbo ating +ï¼Į ç«¥ +æ¯Ķè¾ĥ 容æĺĵ +, çĶ· +Ġhe uristic +çļĦ人 æĸĩ +, éķ¿æľŁ +. character +ol ysis +ĠG ala +åı¤ 迹 +è¡¥ ä¹ł +Ġmis placed +ĠAra bs +ĠMid lands +( ev +m ite +åijĬè¯ī ä»ĸ们 +gl omer +icip ated +ĠCon rad +ï¼Įä¸Ĭ ä¸ĭ +ĠScot ts +Ġhe ats +æĮĩ示 çģ¯ +è¦ģ é«ĺ +ĠX S +æĮģç»Ń çļĦ +æłħ æŀģ +è¿Ŀ约 éĩij +åĴĮ æĶ¹è¿Ľ +ãĢĤè¿Ļ ç±» +oph ysical +åĨĴ åħħ +åĩ¯ çī¹ +ç¨Ģ çĸı +éľį å°Ķ +对åºĶ äºİ +ĠKat z +Ġincons ist +Ġreg s +åĩł åıª +第åħ« 竳 +/ articles +ï¼ ¡ +ĠRe action +约 èĢĮåIJĮ +Ġgive aways +æĺ¯ä¸Ģ个 å¾Ī +ï¼Į åŃĶ +ĠF ence +Ġz h +Acc eler +çĽĨ åľ° +Ġrede emed +ĠMond ays +Ġart works +çĨĶ èŀį +缸 ä¾Ŀ +$ t +Ġtruth ful +-he ld +cl ips +ä½ĵéªĮ åΰ +ep rom +è¯Ŀ çŃĴ +ä¸ĵå®¶ ç»Ħ +æīįæĺ¯ æľĢ +ï¼Į åijĺå·¥ +( âĢľ +.Act ivity +æľĢ好 ä¸įè¦ģ +) ^\ +f id +ï¼Į çĶĺ +åIJİ æľŁçļĦ +Ġfragment ed +èĴ¸åıij åύ +V ault +éĥ½ ä¸įè¦ģ +å¨ ² +è¸ ± +ĠPrint s +ï¼Į æıIJéĨĴ +Ġdis b +-c r +er p +ï¼ļ 该 +ãĥ ĭ +æľįåĬ¡ çŃī +æİĢ å¼Ģ +éĢļ åIJij +m ale +ï¼Į åIJĵå¾Ĺ +Ġin p +UN ITY +.Un lock +-n etwork +ãĢģ 车è¾Ĩ +è° ¤ +å±± éĩĮ +ĠSh ake +äºij éĽ¾ +åĩłä¸ª å°ıæĹ¶ +æī§è¡Į å®ĺ +Occ up +ä¼ İ +å°± å·² +æ® ĥ +DT D +Ġhal ves +Ġscript ures +.strict Equal +åķ ¬ +çļĦ主 è§Ĵ +æŀĿ æĿ¡ +Ġb arr +âĢľ éĺ¿ +Ġch ol +ãĢĤâĢĿ ãĢĬ +ges ter +ï¼Įç͍ 以 +æĸĩ竳 åĨħ容 +Ġuns pecified +ä¸Ń æī¾åΰ +ous ands +åºĶ该 æľī +Ġg els +æĪIJ åįĥ +Ġult raviolet +Ġun equal +éļ¾ æ°ij +ï¼Į æĬĵä½ı +æĭ¿ çł´ä»ij +ï¼Į没 ä»Ģä¹Ī +b ble +ĠD EN +æį¢ 代 +大åѦ åĩºçīĪ社 +Ġhing es +ĠN ONE +ĠSh ak +æ»ij èIJ½ +L n +å®ģ åİ¿ +ANC EL +, å®¶éķ¿ +Ex plicit +è¿Ļä¹Ī æĥ³ +éĿĴæĺ¥ æľŁ +åį° ç¬¬ +Ġdown right +æIJŀ 好 +纵 çĦ¶ +ä¸į å̼å¾Ĺ +Ġwat ts +æ°¸è¿ľ çļĦ +ï¼Į 空éĹ´ +Ġar cs +ä¸ŃåĽ½ å®¶ +å°±ä¼ļ åĩºçݰ +èµĦæĸĻ çļĦ +åĨľæĿij ç»ıæµİ +Ġ ï½ŀ +éĥ¡ çİĭ +çĿģå¼Ģ çľ¼ +Ġper tains +人 æĿ¥è¯´ +Ñı ÑĤ +Ġmir rored +ï¼Įéļ¾ åħį +ãĢģ - +Ġtake away +ri ous +ĠBut t +éĢģ åΰäºĨ +èIJĮ èĬ½ +ç©¿ åŃĶ +d ad +Ġpre empt +æł¡ çļĦ +为 éĩįçĤ¹ +å¾Ĺ ä»ĸ +buff ers +Ġ éĩĩç͍ +Ġ ä½ľåĵģ +Int roducing +计ç®Ĺ åĩº +Ġpione ers +Ġdock ing +ĠJ edi +Ġsub string +åľ¨ 建 +å¸Ī 妹 +MS O +isc ard +éĴ¢ä¸Ŀ 绳 +Ġab norm +æīĢ æĥ³ +ĠK R +.c ategory +æ³³ æ±ł +溺 æ°´ +Ġcaul iflower +ä¸į åħī +ist en +è¡Ģ 红 +çĬ¯ éĶĻ +ĠRob otics +游 èīĩ +ĠP AY +- ID +ĠH icks +_ JSON +æīĵ äºĨä¸Ģ个 +ĠDes c +Ġclim ates +ï¼Į æĻ¯ +åĴ » +Ġch ir +åĨ· åĨ·çļĦ +俱ä¹IJ éĥ¨çļĦ +çļĦ åij½è¿IJ +Ġoper ands +ãĢĤæĪij 说 +ha el +ï¼Į æģ¶ +and als +æĤī å°¼ +æį· å¾Ħ +ozyg ous +Ġto asted +Ġsh ields +对 ä¸Ģ +on ics +ub ectl +Prov iding +/ TR +Ġpl entiful +çľĭèµ·æĿ¥ å¾Ī +ек ÑĤ +ãĥ £ +ï¼Į å½Ń +ãĢģ çݰ代 +ãĢģ è¿IJèIJ¥ +å¿ĥ æĢ¥ +Ġunder line +M ile +ĠB arker +è¿Ļä¹Ī ç®Ģåįķ +, åĨĻ +æĶ¶ éŁ³ +ĠBout ique +åĴĮ åħ¶å®ĥ +.p redict +' % +æµ· å¸Ĥ +åıªè¦ģ ä½ł +ç»ķ çĿĢ +å¿ĥ æĢĢ +Ġhon orable +äºĴèģĶç½ij éĩijèŀį +Ġ çĥŃ +Ġ çĶ·äºº +çŃī å¤Ħ +ĠAm end +å¾Ĺ å¿« +æĶ¿ åħļ +ĠEm erson +s pectrum +éĴŀ 票 +èĩªæĪij ä»ĭç»į +对ä»ĸ 说 +ĠBeck y +-m ost +Ġunn amed +p pe +Ġe b +Âł S +åİ» åĵª +ĠMir acle +Ġp interest +.config uration +Ġutter ance +éĿ¢ 带 +ĠOr n +Ġ ________ +ig ree +ãĢģ åIJĪä½ľ +Ġcon com +ï¼Įåı¯ä»¥ ç͍ +W ifi +o ys +ĠF AR +éĤ£ é¢Ĺ +Ġpok ies +t as +ãĥ Ĭ +Ġg ummy +ä¸ī ä¸ĸ +Ġcomm e +h rs +Ġh ut +Quant um +, è¾ĥ +丢 èĦ¸ +Ġsidew ays +ãĢĤå½ĵ å¹´ +ĠH of +STAT IC +V itamin +ï¼Į æľ¯è¯Ń +åĬł æĸ¯ +,è¿ĺ åı¯ä»¥ +ĠCharg es +ĠA the +åIJ¬ æĩĤ +(s pec +Group Id +Ġprem iered +åĽ°éļ¾ åĴĮ +. off +çļĦ è¡Įä¸ļ +Ġkeyboard s +C od +ä¸Ĭ æī¬ +åıĹ è®¿ +æĪ¿ åĨħ +é£Łçī© çļĦ +ic ent +åĽłä¸º ä½ł +åįij å¾® +ĠWrest ling +( al +ĠPre v +Ġsubtract ing +æĿĥ åĴĮ +ĠP b +让 å°ı +? # +D ating +çļĦ èĢģ人 +âĢĵ and +Ġhuman ities +(c s +ï¼Įæĥ³ ä¸įåΰ +_HAND LER +" ä¸ŃåĽ½ +ãĢģ 常 +å©ļ äºĭ +ot ically +Ġal go +os ocial +Ġα ν +ĠR apt +è´¨éĩı éĹ®é¢ĺ +çϽçĻľé£İ åĮ»éĻ¢ +Ġmon ks +åħ´ æĹº +Ġcommon place +å¦ĩ å¹¼ +Ġrel ativistic +è¿Ļ个 大 +ĠCA RE +ãĢģ éĢļä¿¡ +Ġad joining +ĠK ens +.F ilter +æĪ¿éĹ´ çļĦ +id ase +åı¯ æĥ³èĢĮçŁ¥ +IP åľ°åĿĢ +Ġarch ival +Ġsal iva +åħĭ èIJ¨æĸ¯ +ï¼Į æ£Ģæµĭ +Ġin experienced +Ġsau ces +S ibling +ï¼Įä¹Ł åıªæľī +-d iv +ä½ľèĢħ ç®Ģä»ĭ +åľ¨è¿Ļ æĸ¹éĿ¢ +Ġmigr ating +Ġst al +ï¼Į åĵªä¸ª +åľŁåľ° ä¸Ĭ +åħŃ ä¸ªæľĪ +æ°ijæ³ķ åħ¸ +Ġ è´Łè´£ +æĬĬ ä»ĸçļĦ +ãĢģ åĪ¶ä½ľ +Ġdispar ate +ĠKarn ataka +Ġstation ed +аР¶ +ĠSign ific +Break fast +ãĢĤ è´Łè´£ +åĢĴ æĺ¯ä¸į +è¿ĩ 失 +说 åĩºäºĨ +ach ts +Ġrein forcing +ĠNa omi +ĠRank ings +ĠC CD +äºİ 人 +éĩį ç£ħ +ĠZ bl +_h ist +_REG ION +çĭ¬ è§Ĵ +Ġcondemn ation +æĪij们 èĩªå·± +normal ize +Ġdinos aurs +ĠNyg ard +- ${ +att ention +综åIJĪ ä½ĵ +第ä¸Ģ å±Ĭ +ĠM oy +Ġtherm ometer +Ġpre clude +è§£ æķij +æĽ¾ç»ı 说è¿ĩ +Ġl obe +åľ¨ åīįéĿ¢ +ĠAggreg ate +T enant +ac ry +Ġall ot +Ġhom omorphism +Ġmagn ification +æ®Ĩ å°½ +Ġ æī§è¡Į +ï¼ĮæĪij åıª +Ġaud iting +Ġl ith +ĠCreat ivity +Full Name +Ġcompos ites +æĸŃ ç͵ +ĠC OS +qu ite +erv oir +ĠO ps +çīĻ èĨı +Ġvisual ized +è£ħ æ½¢ +çĪĨ 竹 +FFFF FF +ĠBer ks +è¦ģ ä¸İ +åı¯ä»¥ ä¸į +èĥĨ åĽĬ +Ġconf essed +ĠAdv ances +áĥĶ áĥ +åģ İ +Se attle +T U +b lower +ãĢģ èĦij +æĬ½ æł· +r ists +ĠF av +Type Info +Ġshoot ings +s uggest +ĠS HE +Âł æ²Ī +ĠTot ally +, æĺĵ +E K +Ġblind ness +æĺ¥èĬĤ æľŁéĹ´ +Interest ingly +ĠS olving +ĠG eek +åħ³ çħ§ +_UN USED +size i +é£İ 度 +缸 æİ¥ +/re act +Ġblock ade +ij e +-l a +" åŃĹ +åĮ® ä¹ı +% ï¼Ī +ĠR MS +讲 æķħäºĭ +çļĦä¸ĭ æĸ¹ +æľ¬ çͰ +un icode +éĿŀ常 åĸľæ¬¢ +ãĢĤè¿Ļ æĺ¯åĽłä¸º +,ä¹Ł ä¼ļ +Ġ æĪij们çļĦ +æľ¬ èIJ¥ +天 æ²³ +Let ters +ĠBra un +çĿĢ èī² +Ġminim izes +è¯ģæĺİ æĿIJæĸĻ +ĠO G + // +member NameLink +@ example +å¤ ī +ï¼Įåΰ çİ°åľ¨ +redit ary +ustain able +prov ide +Ġcraw ling +ï¼Ī è§ģ +b ugs +ĠMe V +为 åĽ½å®¶ +Âł åij¨ +å¦ĸ çİĭ +Pot ential +p ac +éĴŁ æĥħ +.G raphics +åĴĮ æĵįä½ľ +Ġimp osition +Def ense +Ġdepart ing +Ġfranch ises +çļĦ çIJĨ念 +èµ· æºIJäºİ +und i +è·³ èµ·æĿ¥ +ĠPRO JECT +éĩĬ ä¹ī +Ġw akes +Ġpres cribing +CON F +åIJį åĪ© +Ġcat ar +ĠMau i +ic ially +订 å©ļ +) **(- +ĠGL uint +Ġsub types +Ġcav al +â ŃIJ +ä¹Ŀ ä¹Ŀ +bring ing +ĠC ory +åĿ¦ è¯ļ +ï¼Į转 çľ¼ +B h +带 åİ» +Ġwork ings +ĠMem ories +LE M +åįİå°Ķ è¡Ĺ +ĠD ing +Ġsc raps +Ġurg ently +ãĢĤ è®°å¾Ĺ +åĪĨ åΰ +èĥĮ å¿ĥ +çļĦ åħ´è¶£ +Ġbe asts +ä¸ĵä¸ļ 人åijĺ +Ġransom ware +or an +Ġ ç«ĭ +ï¼Įçľ¼ éĩĮ +Ġsp aghetti +erv atives +æŃ£ åĩĨå¤ĩ +éĹ¹ éĴŁ +ent ence +表 妹 +Ġ å¹¶ä¸Ķ +up grade +ï¼ĮæĢİä¹Ī æł· +好 æĦı +ä»Ļ ä¾ł +éĩĩ çŁ¿ +ç´¢ åıĸ +Ġimm obil +ä¹Łæ²¡ æĥ³åΰ +ĠProm otions +æĢ§èĥ½ 好 +lig a +ir an +æ±Ł æ²³ +è¶ħ 人 +Ġh ive +对 社ä¼ļ +Ġmeth anol +读 çī© +ä¹Į æĭī +Ġsw ung +ĠAb road +Ġridic ulously +Ġbrace lets +åIJij ä»ĸ们 +.M ult +Ġsn ippets +ĠRel ay +Ġlimit less +semb les +Ġmoder ated +ãĢģ æĸĩæĺİ +ĠB AD +Ġsc ala +Ġà ª +Ġa kt +åķ Ħ +Ù ¾ +ä¹Ł 表示 +ĠCo ins +ĠAS N +ç©Ĩ æĸ¯ +é£Ļ åįĩ +第ä¸Ģ æĿ¡ +Ġjur or +ç½ij 讯 +. ï¼Į +ĠF asc +å§ Ĵ +æīĵ ä¸įè¿ĩ +ä¸ŃåĽ½ 人çļĦ +OLT IP +å¼Ģ æĴŃ +åĨħ容 çͱ +çĬ¹è±« äºĨä¸Ģä¸ĭ ++ N +ï¼Ł è¦ģ +æıIJä¾Ľ æľįåĬ¡ +人 èĥ½ +ĠLe one +ä¸Ģ缴 éĥ½åľ¨ +ĠMal ay +ĠInterest ed +åĬ¨çĶ» çīĩ +ï¼Įä¸Ĭ 次 +å¾Ģå¾Ģ ä¼ļ +ĠAn ita +Ġdrunk en +Ġconfig uring +ĠPub lish +æ°ı çļĦ +ĠB ose +åİĤ å®¶çļĦ +Ġels if +Ġ è¿Ļä¸Ģ次 +Ġw ed +Ch icken +-t alk +Ġquil ting +f ur +ĠG RE +ich ita +_s kip +_c at +è¸ µ +æĿĥçĽĬ æ³ķ +Ġprosecut ed +Ġanth ology +V m +ç»§æī¿ äºĨ +天 ä¸Ģ +æŃ£ ä¸Ń +çϾ 强 +ĠL ending +lo ff +ä¿¡ å°ģ +ĠPres ents +ç»ĵ èĬĤ +ï¼Įä¸į åΰ +åįıè®® çļĦ +ĠAng lic +æĬĬ åħ³ +ï¼Įä»ĸ们 ä¼ļ +ãĢģ é¼» +_p arts +PT O +Ġfe ud +æ°§åĮĸ 碳 +Ġconqu ered +æľį ä¾į +é£ŀ åĩº +迪 æĸ¯ +å®īæİĴ äºĨ +Ġ èĶ¡ +ï¼Į åħµ +Ġobserv es +Ġminim ise +çıŃ è½¦ +念 ä½Ľ +ĠGuarant eed +è· ¤ +åŁº è°ĥ +ç¨İ 款 +Ġtrain ings +F IT +ĠJ U +ĠSc rap +Ġpattern ed +æĥħ ä¸įèĩªç¦ģ +å¯Ĵ é£İ +p recision +ĠD und +Ġab lation +ĠAccess ed +ĠM outh +ĠApp l +Ġtw isting +en as +iss y +æīĺ è¿IJ +èĢĥ å®ĺ +åļ ı +Ġabort ions +ãĢĭæĺ¯ è¿ŀè½½äºİ +è¶ĬæĿ¥è¶Ĭ å°ij +Ġdegrad ed +d igest +ep isode +- editor +ãĢģ åŁºéĩij +Ġins ensitive +æĢĿ ä¹± +/d b +çļĦ èī²å½© +á g +Ġwit ty +å¾Ī 满æĦı +æĹłç¼Ŀ éĴ¢ç®¡ +Ġdi pping +产ä¸ļ ç»ĵæŀĦ +< i +èĩ § +èģĮå·¥ èĸªéħ¬ +ãĢĤæ¯Ķå¦Ĥ 说 +å¸ĮæľĽ ä½ł +interest ing +- employed +ĠG adget +è¿ģ å¾Ļ +éĤ£ çīĩ +su itable +æĪij åıª +AS N +Result Set +inf ection +D ON +çļĦ åı« +Ġm ourning +æ¶Ī失 ä¸įè§ģ +Ġcellul ose +Ġpess im +èµ ¡ +èĩª éĹŃ +-b al +Ġasp iration +ç±» 游æĪı +èĬĤ æ°´ +è¾ĥ çŁŃ +åĸ· åĩº +Talk ing +æĥ³ åIJĥ +Ġel it +å¹³éĿĻ çļĦ +Ġgrace ful +b abel +ĠFin ished +ãĢĤå¦Ĥæŀľ 没æľī +ĠT unnel +å¨ĺ 亲 +ĠB ail +æµģ éĢŁ +äºij å³° +Âĥ ÃIJ +ĠM PL +è¿ĩ ä½İ +çļĦå°ı 女åŃ© +ĠAcc ord +ĠSO CK +p ole +iv ism +åºĶ çŃĶ +ĠKat y +ĠS ey +ĠM iz +ĠIM O +M exico +W ASHINGTON +æŀģ æĺĵ +Ġliber ties +çݰéĩij åıĬ +çİ«çij° èĬ± +ä¸Ń æĹ¥ +Ġcontest ants +_INST ANCE +Ġunanim ous +ate au +å°ı è·¯ +em ann +Ġri pple +ĠL oud +ĠP is +ä¹Ł ç»Ļ +-M ail +Ġv ows +éĩij çīĽ +Ġjo ys +bra ins +bad os +Ġlever aged +al og +Ġprox ies +ĠWor st +ĠP urdue +æķ° 以 +ä½İ çĤ¹ +她 çļĦè¯Ŀ +Ġdilig ently +ï¼Į åIJ¸ +æľĢ 主è¦ģ +ĠCon way +çłĶç©¶ ä¸İ +mat ics +S ugar +åľ¨ ä»»ä½ķ +è´¢ æĬ¥ +ĠAdapt ive +p kt +nd ra +ĠF iji +son ian +æŃ» æ´» +.f s +.W ork +æ¯ĭ庸 ç½®çĸij +/ å¹´ +in ness +åIJį ä¸ĭ +Ġemp ir +èĤ¡ä»½ åħ¬åı¸ +丢 人 +ĠT ED +od os +äºĶ èĦı +ĠB MC +Ġhe irs +ãĢģ èĥĥ +åįķä½į åĴĮ +Ġelectric ally +ĠChi ropractic +ä¸ĵ 为 +éĩį çݰ +缸 åĬł +æīĵ æĭ¼ +第äºĮ ç§į +y ne +Ġd ared +Ġt ipping +ĠLaf ayette +ĠHuman ity +è§Ħå®ļ äºĨ +PROC ESS +_ Status +ä¹ĭ çģ« +çİ© ä¹IJ +Ġdub ious +( Log +Ġprior i +.config ure +Ġdehyd ration +ï¼Į å®¶åºŃ +f rm +Ġwas tes +æĹł å½± +Ġro aming +Ġwhat s +åīį è¨Ģ +str ide +çŀ© 缮 +_ ssl +Ġearth ly +sect s +èIJ½ éŃĦ +Ġax le +_ID X +ãĢģ åĽºå®ļ +-se ven +Ġmaxim ise +Âł åı¯æĺ¯ +Ġind ist +åıį éĹ® +ĠBrig ade +ãĢģ æ¸© +Ġit alic +ĠFellow s +( elem +Ġcrack ers +ĠJenn ings +ï¼Į çķ¥ +_ ident +éĺ² æ½® +è¿ĺæĺ¯ ä¸Ģ个 +.A c +åħĭæĸ¯ åĿ¦ +p ain +- self +er ase +大 åĬ¿ +åľ¨ åºĬä¸Ĭ +_AR B +ĠV PS +èĽ ° +ĠGl oucester +Ġz ucchini +^ r +ĠH IS +çļĦ人 士 +è¡Ģ 绣 +é½IJ é²ģ +Ġkidn apped +èµĦæł¼ çļĦ +v ie +ĠDE CL +éŃĶæ³ķ å¸Ī +Explore r +к о +å¹´ 产 +åľ° äºļ +Ġhab en +Ġdebug ger +ĠMut able +ĠCon verts +âĢ ¯ +ĠT ent +Ġpro verb +ĠB BB +_d ependency +鼨 ä¸Ń +Ġ åĪ¶ä½ľ +认 æ¸ħ +Ġsav ory +, æĪªèĩ³ +Ġh ues +管çIJĨ åѦéĻ¢ +[i i +ä¼łåĬ¨ è½´ +.tr igger +Ġthem atic +OP LE +Ġir is +ï¼Į æĶ¯æĴij +èįĴ éĩİ +çļĦ çĶŁéķ¿ +ä¸į æŃ£ +Ġdeterm inations +å°ı æĿ¿ +å¿Į 讳 +é³ŀ çīĩ +Lif ecycle +Ġ æĹłè®º +å® Ł +Ġclass ifications +ãĢģ大 åŀĭ +un likely +æĿ¥ å¤ĦçIJĨ +ä¹Ł æĪIJ为 +Ġindef inite +k V +åľ° è²Į +Ġkit ty +ï¼Į åı£ä¸Ń +èĢĮ 代ä¹ĭ +Ġind ifferent +é¢Ŀ 头ä¸Ĭ +Command Line +ĠPe el +æĹłå¿Į æĥ® +.c b +约翰 éĢĬ +çļĦ èĩªçͱ +è¶ħ 大 +ä¸Ģ æĸ¤ +çļĦ ç¨ĭåºı +ä¹ĭ çζ +åĹħ è§ī +æĹ¥å¸¸ å·¥ä½ľ +Ġkiss es +éģ¥æİ§ åύ +ãĢģ S +Ġquil ts +t ie +walk er +Ġim itation +ï¼Į æī¬ +èĻļ èᣠ+éģį åıĬ +大åѦ 士 +诸 ç¥ŀ +Ġasympt omatic +ï¼Į çŁŃ +Ġl ov +ĠZ oe +ĠSu icide +Ġp irates +BO SE +_PRO FILE +ĠAqu a +ro ach +å·¥ ä¿¡ +Ġ ä»ĬæĹ¥ +æīĵ èµı +t asks +h ore +ï¼ĮèĢĮ çİ°åľ¨ +Ġo mp +红 æŀ£ +为 好 +èĩª çŁ¥ +sec s +åıªèĥ½ 说 +Ġey ew +_ Int +Sh ar +ï¼ĮçĦ¶åIJİ æĬĬ +ĠNe ut +AMP LES +ä¸į ä¸ĭåİ» +oll s +æ°ij主 åħļ +Ġ( ^ +ç£ģ åĬĽ +ĠCL ICK +Ġmon k +èĥ½ èĢIJ +åħĥ ç¥ŀ +ç§ij 夫 +æĻ¶ çŁ³ +æĭĸ æ¬ł +çļĦ 车è¾Ĩ +( Date +ï¼Į åĮ»éĻ¢ +-t reatment +Ġdeploy ments +ice ps +ãĢģ åıĬ +arm or +è¿ľ åİ» +, èµµ +ä»İ ä»ĸçļĦ +ĠEle phant +( rect +å¹³ è£ħ +ĠF ate +ĠH astings +读 èĢħçļĦ +.m kdir +ĠLinked List +ĠSuff olk +M bps +.b ounds +Ġbank er +ç͵ æĢ§ +O US +Ġinst al +è£ħ åħ¥ +çIJ¼ æĸ¯ +Ġ æ´ª +没 æĶ¶ +Ġfluct uation +ä¸Ń åĮħåIJ« +举 缣 +ĠHig hest +å°± æĹłæ³ķ +åī Į +æľĢ çŁŃ +ĠRem oves +Ġprepared ness +ex act +Ġx l +缩 åĨĻ +ĠGrad ient +ĠR HS +çŃī éĩįçĤ¹ +] # +æħķ çϽ +è§Ĵ度 çľĭ +Ali ases +Ġfet us +ĠTher m +ç§į ç±»çļĦ +Ġwalk er +Ġsp ills +ES M +Ġell ipse +欧洲 æĿ¯ +Ġbrilliant ly +ott le +amb urger +éĺ» å°¼ +å½ĵ ä¸ĭçļĦ +举 ä¸ĢåĬ¨ +ĠGen ome +D ashboard +Ġd ictionaries +èĥ Ń +Ġbib liography +.c ast +-p aid +éĢĤç͍ èĮĥåĽ´ +äºĨ ä½łçļĦ +ident al +åIJ¬ æĪij +uc ent +C oder +Ġl bl +ĠL ibr +大 å¢ŀ +Ġinf antry +Tele gram +B anner +å°ij éĩıçļĦ +æİ¨ ç®Ĺ +-c urrent +Ġprincip ally +ol ing +lic he +éĽĨ å¸Ĥ +ä¼ĺéĽħ çļĦ +pect ing +书 åIJį +ï¼ĮæŃ¤ å¤Ħ +ĠCad illac +èµĽ ç¨ĭ +ĠC ogn +ĠK ru +ONE Y +PP P +lik elihood +æĴ ¸ +çĽĬ æ°Ķ +ðŁ Ķ +di ag +l aughter +ĠPh i +Ġav al +.H TTP +f ee +Ġant if +j h +_m odels +ï¼Į请 åľ¨ +ĠLeg o +ed ient +æ¯Ĵ èᝠ+Ġ Ç« +_m etric +Requ irement +ĠSPE C +Ġpolic ing +æ´» åĮĸ +Ġbos on +ĠV ibr +ï¼Į å±ĬæĹ¶ +æĺ¯ä»Ģä¹Ī æĹ¶åĢĻ +_PROC ESS +Âł M +个人 æīĢå¾Ĺç¨İ +å¯ĨåĪĩ åħ³æ³¨ +ï¼Į éĹ®é¢ĺ +Ġsp leen +_D e +Ġroad side +ĠDel icious +æĺ¯ 该 +ãĢģ å¤ı +ĠD re +ĠSc or +ä¸Ĭè¿° æĬĢæľ¯æĸ¹æ¡Ī +Ġin equ +Ġtruck ing +ĠSer ious +ER ATION +æĬĴ æĥħ +ĠAppell ants +Ġr ut +éĩij æ²Ļ +çļĦ æŀĹ +IN UE +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +大家 ä¸Ģèµ· +:m iddle +çļĦ æ¯Ķ +Ġl is +模åĿĹ åĮĸ +ĠShe lf +ä¹Łä¸į ç®Ĺ +åĸĺ çĿĢ +æĪij å°±ä¸į +ä¸ī 缸 +_N EXT +Ġmedium s +âĢĿ åIJİ +Ġ& _ +çļĦ å®ļä¹ī +Ġint olerance +éĩijèŀį å¸Ĥåľº +è·¯ ä¸İ +ï¼ĮæĪij ä¸įçŁ¥éģĵ +èĻļæĭŁ çݰå®ŀ +æľª å°½ +-m oving +èĭı è½¼ +Ġstri pping +Ġol ives +=-=- =-=- +_m on +Ġfac ade +el n +Ġup hill +åľŁ åĮª +Ġast rology +- address +Ġ èĴĭ +æŃ¥ åŃIJ +ä¸įèĥ½ åľ¨ +Ġgly cer +Ġpian ist +ĠT v +ãĢģ ä½ĵ +ĠLD AP +N an +åı¯ä»¥ èĢĥèĻij +_d irection +èī³ ä¸½ +/ abs +Y ahoo +ãĢģ æľĭåıĭ +éĢĥ åĩº +ï¼Įè¿ĺæľī ä¸ĢäºĽ +天ä¸ĭ 第ä¸Ģ +Comb at +æĿ¥ åĨ³å®ļ +sp ell +ï¼Įä½Ĩ éĤ£ +æ®ĭ ä½Ļ +念 åı¨ +L ay +ãĢĤ åĪĻ +å¥Ĺ çļĦ +Ġg rou +ĠM are +ĠN em +ä¿¡ éģĵ +out il +æĺ¨ 天çļĦ +å·¡ èĪª +ĠMeasure ments +èµı èµIJ +ï¼Įä¸ĢåĪĩ éĥ½ +Ġ ä¹IJ +ĠT rem +Ġcomp lements +. results +Ġwork book +ä»Ģä¹Ī éĥ½æ²¡ +åĽŃ èīº +ĠNewsp aper +Ġcl ipping +表 åįķ +Ġecho es +, åĿĩ +_t xt +_at om +Ġprest ige +& R +( of +en force +Bar bara +Ġphy logenetic +Ġbloss om +ĠDeuts ch +åĽ ± +app roved +碰 åΰäºĨ +( isset +åľ° æĥ³ +åħĪçĶŁ 说 +è¿ĻæĿ¡ è·¯ +( (" +Ġc ation +ä¸Ń åĮ»éĻ¢ +åģļ ä»»ä½ķ +èι çļĦ +ĠL ok +ĠCan berra +Ġweight ing +ãĢģ æĢ»ç»ıçIJĨ +ĠSub aru +æ¯Ķå¦Ĥ 说 +ãĢģ æ³ķåĽ½ +ä»ĭç»į 说 +ĠShel ter +Ġgriev ance +ĠNS F +åĴĮ ä¿¡æģ¯ +ĠAccount ability +æ¶Ł 漪 +人 å°Ĩ +å¤ļ æĿ¡ +ĠProm pt +re ro +Ġmach ining +æĥ¨ éĩį +ä¹ĭ ä¹ī +ENT R +å½Ĵ ç±» +èµĦæĸĻ æĺ¾ç¤º +ĠNation wide +ĠSt am +fter s +N g +æľ¬ èī² +éĢļ çķħ +PC M +Ġd ime +Mel issa +er ably +ï¼Įä½ł è¿ĺæĺ¯ +host s +å¯Ĵ æĦı +è¹ ¿ +P arsed +ä»ĸ çŁ¥éģĵ +ä½ķ å°Ŀ +Ġsubst itutions +æ´Ĵ èĦ± +Ġsh ading +æļĹ æĿĢ +ĠBen ef +ĠOffic ials +ĠBrief ly +ï¼Į åĢŁåĬ© +EN DS +éĢł ç¦ı +Ġ 亿 +Ġm ashed +Ġ æĻļä¸Ĭ +Ġf actions +н ого +éĶĭ èĬĴ +ĠMel anie +ip ulation +_or ig +ãĢĤ åı¦ +ĠP ec +Ġbar b +Ġalign ing +Ġhect ares +Ġ 软件 +ce mic +å·¡ æ£Ģ +_ am +ĠPract itioner +æĹ¥ éĩĮ +hel ps +, åĨħ +n ails +á ¸ +ot te +, ä»» +Ġelement al +åIJµ éĹ¹ +ï¼Į å¤Ħ +Ġst or +è§£éĩĬ äºĨ +ãĢĤ æ¯ıå¹´ +Ġgovern ors +Inst alled +ĠMark er +_ ld +ol ytic +ang ent +Ġwind y +ãĢģ çļ® +çļĦæ°Ķ åij³ +ĠTrust ed +Ġ$$ {\ +ĠBi otechnology +Ġvine yards +ĠB j +ĠD olphins +Q s +ac om +æĺ¯ä¸Ģ个 人 +åѦ å¾Ĵ +å¾Ĺ æľī +äºĨä¸Ģ å®ļ +黯 æ·¡ +ĠSof ia +å¹´ æĺ¯ +RA DE +cal er +è´Ń 票 +竹 åŃIJ +ï¼Į H +è¿ŀ åIJĮ +æĬ¥ äºĨ +Im mediate +éĻį åĻª +UN CH +c w +åıĹ è´¿ +." ) +æī« é»ij +}} =\ +çŀ© 缮çļĦ +Pro vision +çİĩ è¾¾åΰ +媲 ç¾İ +åĩºæĿ¥ åIJİ +. But +Ġconform ity +Ġexplan atory +Î ĵ +ĠTra uma +ãĢĤæĪij çŁ¥éģĵ +B ear +ãĢģ çİ»çĴĥ +è¿Ľ åΰ +é½IJ èģļ +ï¼Į ç´§ç´§ +æľĪ ä¸Ĭ +ï¼Į æŃ¦æ±ī +_ alt +text color +åį°ç¬¬ å®ī +Ident ification +, 估计 +J I +轻轻 ä¸Ģ +çĹĴ çĹĴ +Af rican +Clean up +çģ« çĤ¬ +ĠX iao +wh ose +,å°± è¦ģ +éģĩåΰ è¿ĩ +åĸ ĭ +Ġposition al +åī§ çļĦ +æ¯Ķ 以åīį +æ¡Ĩ åĽ¾ +ï¼Į èĬĤçľģ +âĢ ij +ĠS ultan +Âł èϽçĦ¶ +.j ob +åºĬ åįķ +ĠV ie +Ġapprox imated +, @ +A u +ĠSt alin +è¯ķ ä¸Ģè¯ķ +Track s +_w arn +i Pad +ĠFind ings +åħĥ èĢģ +ãĢģ 人æīį +-mod al +æľº åľ¨ +ĠCell ular +è¿Ļ 两天 +æıIJ 请 +ĠBlack jack +.N amespace +Ser vers +). \ +ï¼Įä¸Ģ æŃ¥ +(un ittest +se vere +ç¾ Ķ +æİī 头 +ä½Ľ éĻĢ +æĮº æĭĶ +Ġgeneral ize +t te +å¤įæĿĤ 度 +ely n +æŁ¥ åĩº +ï¼Įå®ĥ 们çļĦ +O LED +comp letion +Ġgrav y +, ä¸Ģå®ļ +\ l +æµ· æ·Ģ +Ġpan creas +Ġy s +Ġ èĭ± +ara oh +gu est +Ġinhab ited +åĨ·ç¬ij ä¸Ģ声 +ä¸Ń æİ§ +Ġout liers +ï¼Įåı¯ä»¥ 让 +æĹł éĿŀ +Ġresemb lance +ĠDo om +F u +Ġdim s +,åĽł èĢĮ +s ensor +ä¹ĭ éĥ½ +-s ol +Ġm ound +ĠC CC +äºİ ä¸ĸ +èĩªå·±çļĦ æĥħ绪 +Ġirrit ated +æŃ Ĩ +rop ractor +_ST ORE +R CC +Ġ æīĵå¼Ģ +.B itmap +ĠFl ame +W G +Ġins urg +ĠCh oir +èĤ¡ æ°ij +ä¾Ħ åŃIJ +大 èĴľ +ip ers +And re +Pointer Exception +大 æīĵ +èģļ éħ¯ +consc iously +Ġm oles +uk ary +ĠMent or +% èĩ³ +ç»Ļ ä»ĺ +åħī 临 +ĠData Type +ĠFund amental +大çIJĨ çŁ³ +ĠS ands +per l +ĠMix er +ä¸ĩ åĥıç´ł +çĿģ å¼ĢäºĨ +ï¼Ľ 对äºİ +帮 主 +ç¥Ŀ ä½ł +Ġalloc ations +éĵ¸ ä»¶ +,ä¸į ä½Ĩ +Ġmer ry +ĠH orm +å¹´ éĩij +ĠG unn +好 äºĽ +å¸Ī åıĶ +ĠArist otle +P X +st akes +ĠC andle +Ġmov able +Ġhairc ut +èģĶç³» çļĦ +* " +èµĦæł¼ å®¡æŁ¥ +( socket +å¹¶ 没 +Ġæľ¬ æĸĩ +Ġpit chers +ĠBren nan +ï¼Į æģ¨ä¸įå¾Ĺ +Ġb loc +ãĢģ æŃ¦æ±ī +æĪij åħĪ +çļĦ æŃĮ +zz arella +ĠO sw +_M ARK +æľĢ好 æĺ¯ +ĠP DE +Ġslow down +Attribute Value +ï¼Į 寻 +æĭĽ åij¼ +( account +æĢĿ æĢĿ +OG LE +ct omy +Ġcap illary +èĪªç©º èĪªå¤© +dim ension +ĠT eddy +ĠS olic +âĢĿ èĢĮ +ä½Ľ åĥı +ãĢģ æĶ¯æĮģ +éº ½ +åľĨ 润 +s am +Ġ çĩķ +.Aut omation +ym es +/ assets +ĠAm ar +añ ol +Sad ly +ä¸Ģ å±Ĭ +get Data +n ol +ãĢĤ çĶŁ +ĠM ao +ist ible +çļĦæľīçĽĬ æķĪæŀľ +çļĦ èĤ¡ç¥¨ +åģı ä½İ +Could n +answ ers +W arm +å°± æīĵ +AM I +Ġbit terness +年代 åĪĿ += F +ST AR +ĠSu itable +ĠD ummy +æľīä¸Ģ éĥ¨åĪĨ +eps i +Ġc ork +ĠPh p +å°ĸ 端 +-se q +: ãĢĮ +ä¼ļ计 å¤ĦçIJĨ +çļĦæĸ¹å¼ı è¿Ľè¡Į +re load +ĠMe h +_n umeric +part ner +. ss +ĠT issue +ä¸į èĩ³ +Ġwet lands +象å¾ģ çĿĢ +Ġs op +çī¹ å¤§ +æĿ¥ ä¹ĭ +(f ont +ĠNS Log +åħĭ åĪ© +人æ°ij æĹ¥æĬ¥ +åĪĽ ä½ľèĢħ +åłª æ¯Ķ +ĠNUM BER +äºĨä¸Ģ é¡¿ +ĠP apa +Ġun official +æĶ¶ åħ» +Ġstand by +å²ģ 以ä¸ĭ +_DE FIN +æŃ¤æĹ¶ æŃ¤åĪ» +ï¼Įè¿Ļæł· ä¸ĢæĿ¥ +ĠEL ISA +Ġ 容 +Ġa es +ä¹Ł åĽłæŃ¤ +ĠAn im +å¹´ èĸª +ĠV ish +\ Gamma +ĠWe apons +è¿ľ æ¯Ķ +æĹ© äºĽ +roid ism +ãĢģ çī© +Ġinstant iate +ãĢĤå°± è¿Ļæł· +çϽ ç³ĸ +çIJĥ èıĮ +Ġ æ¯ı个 +ãĢĤ æĿ¥èĩª +åŃIJ æĽ° +EN R +uss i +ĠModel Renderer +ä¸Ń 以 +ret ry +è¡Įä¸ļ çļĦåıijå±ķ +æīĩ éŨ +_SUPPORT ED +çĮķçĮ´ æ¡ĥ +ĠV ict +åıį åĢĴ +æ´Ľ æŀ« +Ġscript ing +ĠCoord inate +! $ +ons on +ĠCom paring +Me chan +æĬĬ 头 +çĵ ® +ä¸Ń 天 +ĠAnnounce ments +èĬ ª +éĴ ¯ +_c ut +.M enu +Ġnit rate +Ġsol vents +Ġword press +大 éĺµ +ä¸ī å°º +ìĭ ľ +Ġse psis +_D IP +umbers ome +O Auth +Ġd rowning +ĠF ury +ĠF TC +Ġcoll age +( auto +Ŀ ¼ +ere x +/ rest +ĠG ina +ĠMin n +æŁ¿ åŃIJ +, æīĭ +ull ing +sub str +H ugs +çļĦå°ı æīĭ +èĤ ® +ç§ģ å¯Ĩ +åĬ³åĬ¨ åħ³ç³» +c ars +ab leness +è° ij +ĠCount ies +å¹¶ä¸į æĦıåij³çĿĢ +al ms +Ġb ounced +ä¸Ń åŃĺåľ¨ +æ°´ è§£ +æī¿ éĶĢåķĨ +ãĢģ é£İéĻ© +UI Image +ack le +ĠK em +Ġcr c +Sh utdown +, 谢谢 +re ports +ĠC f +ãĢģ åı°æ¹¾ +). [ +ood les +ĠDep loy +Mod ifiers +ĠHead ers +ĠS iber +St ress +æµģ æ´¾ +ĠU h +声 æľĽ +Unity Engine +ĠL H +ĠShel ley +è¿ ¢ +Ġe inen +Ġco aster +Ġbefore Each +.d ialog +See ing +- termin +oo ch +æį º +Ġlo ot +å®ŀ è¯ģ +ruct or +è¯Ħ æ¯Ķ +ut m +ĠO xygen +. rect +åīį åį« +æĪĸ éĹ´æİ¥ +.P RO +å¤ļ å²ģçļĦ +G er +ig il +è§Ĵ èĨľ +è§£åĨ³ è¿Ļ个éĹ®é¢ĺ +ĠDec isions +ï¼Į çIJĥ +大 æŀĹ +æĥ³ åģļ +ĠAr rest +ï¼Įç͍ åĬĽ +ge al +æĶ¿ å§Ķ +irc uit +ong writer +Ġelectro static +åľ¨ 马 +ç»ĺ åĽ¾ +ï¼ĮåĬł éĢŁ +opens sl +ĠL ia +æķĻèĤ² åŁ¹è®Ń +æĻ® æĭī +éĿĻ è°§ +p rep +æĭį çĿĢ +ĠCommit ment +èĸĩ èĸĩ +ãĢģ æ¶Īè´¹ +ï¼ĮæĪij å°±ä¸į +As m +cro ft +Prov ided +ï¼Į åı° +é»Ħ å±± +.get Width +Call able +-p olicy +ĠBe ans +S ector +-Col a +说 好 +ĠNeuro science +为 æŃ£ +(p riv +at ical +å°Ĩ éĢļè¿ĩ +è¡° åĩı +ï¼Į 表达 +sequ ential +ä¸İ å¤ĸ +Ġfil enames +rat om +Bro ok +ä¼ģä¸ļ æıIJä¾Ľ +éĥ½æĺ¯ å¾Ī +åĵĪ éĩĮ +_ad apter +N ation +çªģ è¢Ń +( original +< object +Î ¦ +ï¼Į æĽ² +åIJĦ æĸ¹éĿ¢çļĦ +Ġplag ued +å¾· åĭĴ +.pro file +åĿļå®ļ ä¸įç§» +< H +çļĦ èĩªå·± +atic a +ï¼Į è¿ijæĹ¥ +ï¼Įä¸Ģ å¼Ģå§ĭ +it rogen +ãĢģ æľīæķĪ +çĥĪ æĹ¥ +çĽijæİ§ ç³»ç»Ł +éĺ²çģ« å¢Ļ +Ġa ches +âĢľ 第ä¸Ģ +èĥ½å¤Ł 对 +Ġsleep y +- acc +lo an +ï¼Įä¸Ģ åħ± +ET CH +Ġwhisk y +U nt +Ġst encil +B id +çļĦ ç»ĵåIJĪ +Ġj i +æ³¢ 段 +ï¼Į 丹 +æīĢ çŁ¥ +å°Ĩ çͱ +Ġcompos ers +æī¾ æŃ» +ä¸Ģ åijĺ +åĩº ä¸Ģç§į +ĠMagn et +Pro position +en iable +ï¼Į åħ· +çĶ ¥ +ä¸į å¼Ģå¿ĥ +çĿ ¨ +åĨĽ ç͍ +-f ed +èŀ ¨ +ίν αι +re cent +ĠI PL +ĠGo a +Ġcher ries +: YES +f ragment +ĠF inger +èĩª ä¿¡å¿ĥ +被 ä»ĸ们 +失 æİª +éĨĴ è¿ĩæĿ¥ +v ale +Ġtim etable +人 çŃī +Ġtra pping +Ġsimpl ifying +CRE T +ä¸ĵ åĮº +ĠInter vention +ï¼Į è®° +IS ON +: System +ï¼Į åŁĥ +Ġins anity +Ġn ailed +ext ended +ĠNet anyahu +Ġbif ur +. evaluate +ä¸ĵ åľº +èĪĴ å±ķ +ĠHass an +ĠR oe +oc can +Ġsequ entially +KE EP +Ġplead ings +ä¿ĿæĬ¤ 好 +æľī é£İéĻ© +为 第ä¸Ģ +ĠMom my +çļĦ å¦Īå¦Ī +ä½ľ åĿĬ +被 åĪ«äºº +å¤ĦçIJĨ åIJİ +æĽ¾ æĺ¯ +S ac +Ġ å¸Ĥåľº +od ings +ĠJ orge +èĮĥ æĸĩ +.rand int +Ġs ands +åľ¨ å¿ĥ +Ġpr é +èĻļ å®ŀ +Ġsab ot +Ġis nt +ĠC ritic +åŃIJ ç³»ç»Ł +é«ĺ 楼 +ä»İ è¿Ļ个 +ï¼ĮæīĢè¿° åĽºå®ļ +ĠReg ina +ï¼Įåΰ æĹ¶ +åħ±éĿĴ åĽ¢ +h int +æīĵ åĵį +ç³ķ çĤ¹ +(t p +Ġpregn ancies +Ġtort ured +æĺ¥ è¿IJ +Previous ly +IEL DS +æ±Ĥ ä½ł +_RES OURCE +æ±Ĥ æĺ¯ +_S ERIAL +E uro +Strict ly +ig ail +ä¹ĭ è°ľ +äºĮ éĥİ +the ad +çŃij çī¢ +orks pace +ä¹Łæĺ¯ è¿Ļæł· +ĠGa uss +r ification +Ġyoung ster +led ged +ç§» åΰ +ĠG SM +lic ensed +Ġæľ¬ å®ŀç͍æĸ°åŀĭ +究竣 æĺ¯ +Ġpetition ers +Under stand +Ġ 广 +åľ¨ çĶŁæ´»ä¸Ń +le w +æĪIJ åħ¨ +ä¸İ æĤ¨ +è¾ĥ å¼± +åĵĪ çĻ» +磨 åIJĪ +VE LO +ic illin +èĢĮ å¤į +人 åij½ +_PRO TOCOL +ĠC ush +Ġ) ( +å®ĺ åľº +Account ing +éļľç¢į çī© +æĸ° å¥ĩ +Ġgoal keeper +Ġge opol +ç«Ń è¯ļ +åĽ¾ 为 +Ġabandon ment +S parse +ent ered +ĠH iking +ng ine +Ġa e +ĠAn k +å®īè£ħ äºĨ +Ġ{} '. +Ġacc rued +ï¼ĮæĪij 对 +ave ment +ï¹ ij +Ġimplant ation +i atives +ĠA ck +Ġweb pack +å±ħä½ı åľ¨ +iff er +é£İ åIJij +.N on +滤 ç½ij +c alls +ĠQ ur +ä»·æł¼ ä¸Ĭ涨 +Ġconfront ing +_RE LEASE +ĠAm elia +. Std +G ran +ä¸Ģä¸ĭ å°± +éªĮè¯ģ çłģ +çļĦ æķ°åŃĹ +åı¯ ç¼ĸç¨ĭ +ä¹ĭ æĢ¥ +åĩºä¸Ģ ä¸Ŀ +_ Object +便 èĥ½ +å§Ķæīĺ 人 +, å¾Ĺ +çªģçĦ¶ åĩºçݰ +ĠLaw son +æµģæ°´ 线 +Ġ åį· +Ġpl ains +Ġem itter +ï¼Įä¸Ģ 头 +åį± åıĬ +ï¼Į æ·¡ +ĠSc enario +çĽĬ çĶŁ +Qu ota +ï¼Įç͍ æīĭ +_SHA RED +IP HER +çīĽ æİĴ +ĠTra ff +_T E +Âł ä½Ĩæĺ¯ +éļıå¤Ħ åı¯è§ģ +ĠG MO +Ġgood will +åĵ¥ çļĦ +ĠTR ACE +Ġcl own +åı¤ ç±į +cap acity +G uild +_ js +红 èĬ± +ï¼Įçľ¼ 泪 +å°Ĩ æł¹æį® +bi otic +Progress Bar +æģ¢å¤į åΰ +æĭ³ åĩ» +ĠYam aha +ï¼Į K +Ġb unny +UIL T +ĠS cores +/v ideo +.V ERSION +å½Ĵå±ŀäºİ æ¯įåħ¬åı¸ +ĠLic ence +å°¼ çİĽ +ï¼Į 伸 +åħ¶ 人 +éĿŀ å¾Ĺ +ä¼ļ åİ» +æĪij们 æľī +c rypt +çĶŁ æł¹ +åģļ é¢ĺ +顾 ä¸įå¾Ĺ +ub en +åĨľ 夫 +UR A +éĨĴ æĤŁ +Ġhor izons +Ġdinos aur +é³Ħ é±¼ +_ UT +ĠB AT +ï¼ī æĪĸ +å¿« äºĨ +çļĦ人 çļĦ +天 è¡Į +.Foreign Key +, ç»Ħç»ĩ +ãĢĤ è¿ĺæĺ¯ +åĴĮ çIJĨè§£ +åıĪ æ²¡æľī +ĠGall agher +ĠB icycle +å°ıç¼ĸ 为大家 +/ conf +ï¼Į åĦ¿åŃIJ +大 èĥľ +çĤ¹ å·¦åı³ +Transport ation +.is Array +ãĢĤåĨį åĬłä¸Ĭ +åĩº åŁİ +详ç»Ĩ ä¿¡æģ¯ +fore ground +空 头 +模å¼ı åĴĮ +Ġirre versible +ãĢģ åĽł +ä¸Ń 说 +Ġmed iate +.s ite +ĠW aves +ĠTru cks += function +Ġ åħ¬åijĬç¼ĸåı· +ĠH edge +ĠPost greSQL +Gu ess +诱 人 +ĠпÑĢ ÐµÐ´ +ĠDr ill +Coun cil +åħĭ åĬ³ +Ġroad way +ï¼ĮåĬĽ æ±Ĥ +Ġdict ates +Ġt ipped +æĹ¥ åİĨ +ĠBl ast +ĠCyber security +ĠConserv atives +ä¹Łæľī æīĢ +YNAM IC +/ projects +å®¶ éŨåı£ +ç»Ļ å®Ŀå®Ŀ +失 è¡¡ +g cc +k Hz +ï¼Į è´Ńä¹° +ĠMay er +/x html +-work er +Ġl g +un able +Pro tein +Ġlos er +Ġam ber +" D +ĠI CO +åı¯ 缴æİ¥ +F lo +Ġg ol +é£İ åĴĮ +ĠAv iv +Ġk W +.d uration +湿 çĥŃ +FR AME +ĠSleep ing +ç¾½ç»Ĵ æľį +i ad +Ġh ops +} < +ãĢģ æ°ijæĹı +æīį ä¸įä¼ļ +åĬŁ åºķ +Ġб Ñĭ +ĠIn cent +ãĢĤå¦Ĥæŀľ 说 +Ġgri pping +- var +G X +涨 è·Į +Ġsou ps +ĠG lu +Ġmonument al +M iami +Ġ åĪļåĪļ +ynchron ously +Ġrou ters +ĠI FS +è§ģ åΰçļĦ +çĥŃ ç͵ +åIJĥ è´§ +æĶĢ çĻ» +è¿Ķ æł¡ +ĠR oses +ãĥ Ł +åĪĨ éĺŁ +åºĦ å®¶ +> null +åħ¬å®ī éĥ¨ +P id +[ node +第ä¸ī å±Ĭ +åºĬ éĵº +Ġabol ished +Ġl ax +Ġ æŁ¥çľĭ +è¶ħ 强 +éĺ¿ æĸ¯ +Sp atial +ĠPR IMARY +åľ¨ æľ¬åıijæĺİ +ï¼Įä¸Ģ 缴åΰ +,, ,, +on ian +çģ¯ çļĦ +Ġsubst ituting +ãĢĤ åĽ½ +ĠJ ury +æ¼Ķ 说 +.ext ract +Ġpier cing +ãĢģ ä¸ĸçķĮ +Ġauthor itarian +æĹ© å¹´ +.w hat +ĠBever age +_inter rupt +ç»ĵ 转 +æķ´çIJĨ äºĨ +V intage +.b uf +åĵģè´¨ çļĦ +Ġfer mented +_t ables +Ġpain ters +Ġcrown ed +j m +w alls +ĠAn not +-R ay +Ġrepent ance +n en +Ġbenef iting +gt k +æłĩ è¯Ń +ï¼Įä¹Ł ä¸įè¦ģ +éŃ ĩ +Ġmaster ing +çİĦ åħ³ +.ch art +Ġn el +主 æİ§ +è°Ī åıĬ +çĶŁåij½ ä¸Ń +æĪij åij¢ +Equ ation +ĠAssoci ations +åIJIJ èķĥ +ograp hed +Tw enty +è® ¹ +Ïģ γ +æĵ¦äºĨ æĵ¦ +ç¬ij è¯Ń +å±ŀ å®ŀ +rt le +Ġconvers ational +çļĦæīĭ ä¸ĭ +ĠØ ± +oit te +ç¼Ŀ åIJĪ +Ġ 以åıĬ +çļĦ åħĥç´ł +ĠB LE +ĠF aces +ä¸Ĭ æŀ¶ +_sign ature +åĩĢæ°´ åύ +ĠRed irect +- context +Ġa rab +端 èµ· +ĠPass port +ĠGM AT +{ matrix +åĬŀ 好 +Ġä¸Ģ åĪĩ +w ed +Ġj ul +æ°ij å¿ĥ +æľįåĬ¡ è´¨éĩı +TE CH +Ġrest itution +ĠÑģ в +, åIJ¬ +ä¸ī æĸ¹ +è¯į çļĦ +art ner +èĭ±åĽ½ 人 +ï¼Į两 ä½į +Y o +å°± åΰ +Ġph antom +ern o +ĠFif ty +ĠT asks +å·¥ä½ľ æĹ¥ +. Number +åĨ¬ çĵľ +K ings +put ation +OP TION +Ġk arma +ï¼Ľ æĪĸ +æ´Ľ çī¹ +ä½ĵç³» åĴĮ +ĠSTAT ES +ï¼Į å¤ı天 +Ġch k +æĥĬ éĻ© +说ä¸į åĩºçļĦ +便æIJº å¼ı +æĺ¯ 让 +ĠN inth +R FC +T URE +缸 å¤ĦçļĦ +-f iles +wh it +Ġsw arm +Ġdistract ing +/ how +ock ed +åĽĽ åįĥ +Ġre joice +Ġj it +ä¿Ŀ åĪ© +åĪ« æıIJ +ãĢģ åĨ· +Ġv ase +ĠSepar ate +ä¹Łæľī çĿĢ +ä¹Łæľī ä¸ĢäºĽ +LET TER +çľĭ äºĨä¸Ģä¸ĭ +æľī 礼 +è¿Ľè¡Į ä¸Ģ次 +]( ../ +еР¿ +Ġ ****************************************************************************** +çļĦ èĭ±éĽĦ +_IN TEGER +Contain ers +Ġdict ated +un checked +ĠBig Integer +åºĶ 以 +d ependencies +ä¸Ĭ 身 +ex istent +ï¼ļ ä¸Ń +å¤ļ å¾Ĺ +æīĢ å¾ĭå¸Ī +ĠTrans cript +ï¼Į å§ĭ +Ġfunction ally +æĪĺ 车 +Ġhand writing +ser vers += get +b anks +ĠC FG +èĩª ä¹ł +Ġdel le +Ġmen opause +ï¼Įåįł æĢ»æĶ¶åħ¥çļĦ +ĠRefriger ator +Ġv andal +èİ·å¾Ĺ èĢħ +ĠSomal ia +Q M +ĠHe in +\ ^ +è¯ ½ +iv ist +iddle ware +éĥ½æ²¡æľī äºĨ +Ġdeck ing +ĠMock ito +åī¯ åħ¶å®ŀ +Message Box +in ances +Ġcamp ers +å͝ ç¾İ +sm ooth +L ily +c ir +.S ervices +Ġground ing +$ ' +Ġle vy +çŁŃ 裤 +Program ming +Near by +ĠEN GINE +Ġw rought +æĭ Ĺ +ĠY ug +æ°ij å±ħ +æĬĢæľ¯ æľīéĻIJåħ¬åı¸ +è¨Ģ çļĦ +åĢĴ ä¹Ł +ĠPant her +æĽ´ 强çļĦ +ees e +ĠMor ph +ç´łè´¨ æķĻèĤ² +/ install +Ġy um +å¾Ī æ·± +çݯ è·¯ +ç»´ å¤ļåĪ©äºļ +ï¼ļ ä»ĸ +.c m +Ġemb arked +æ²īé»ĺ äºĨ +ãĢģ ç«ĭ +.W rap +çīĻ é¾Ī +æĢ» æī¿åĮħ +Ġvision ary +Ġcow ard +éĹ®é¢ĺ æĹ¶ +æĺ¯ä¸į æĥ³ +coll apse +V a +ram ed +Ġpost season +è¿ľ è¿ij +ĠCam den +æ¡Ī åıij +ãĥ ¢ +æ³¢ åıĬ +Ġ æľŁåĪĿä½Ļé¢Ŀ +S b +j avax +æ°Ķ åĸĺ +è·Ł ä¸įä¸Ĭ +å²ģ æľĪçļĦ +çĥŁ çļĦ +ĠGard ening +éĩį éĺ³ +å¤ĩ å¿ĺ +.d one +åĩĦ åĩī +Ġc ape +ä¸į æĦ§æĺ¯ +Ġmass acre +Ġset ups +ĠZ ak +Test Method +. There +N ONE +æĹ¥ åĿĩ线 +ãĢĤ她 说 +èĢĮ 产çĶŁçļĦ +åĵį èµ·äºĨ +ĠD LC +æ°ij æĦı +bl ur +ú n +ĠTot tenham +orth and +K atie +å¾Ĺ å¾Ī好 +Compl iance +å°± åıªèĥ½ +é¢Ħ éĢī +typ ically +é ı +Spec ified +åĴ¸ éĺ³ +Ġinvert ible +Ġ è´¾ +ens it +éϤ æģ¶ +_T EMP +ĠBig gest +ĠÐ ¢ +< dl +et i +åĴķ åĻľ += l +end i +çļĦ åIJĦ +ä¸İ ä»ĸ人 +(f ind +R oy +æĽ´ åĥı +'] -> +: last +ĠM UX +ãĢģ éĵĿ +ect ure +ä¸ĵä¸ļ 课 +}. \ +_target s +F o +ic l +_F IFO +ĠSTE P +åıĪ æĥ³ +eter ia +è´Ł éĩį +Ġnight ly +.G raph +( require +ĠP IC +ï¼Įä¹Ł å¾Ī +lic hen +鼷 鸣 +_sh ould +è¦ģ æł¹æį® +ï¼Į åıĤ +ï¼Į 缺ä¹ı +ĠT urtle +Ġresp iration +Mult iplier +_ allowed +ç§ijåĪĽ æĿ¿ +Ġvul gar +ï¼Į çĹĽ +便 å°Ĩ +Ġprob ing +å¸Ń ä¸Ĭ +T rial +ãĢĤæĪij 认为 +èĸ ı +Spec ifications +Ġ ä¹Łæĺ¯ +åĩº éĶħ +Ġhyper link +ĠBan ana +ĠEn semble +Cor onavirus +Ex am +ï¼Įçľĭ åΰäºĨ +.In f +ĠD ense +åħ±äº§ 主ä¹ī +ĠLeon ardo +Ġ 举æĸ¹ +çļĦäºĭ äºĨ +ä¸įçĶĺ å¿ĥ +Ġc id +åİĨåı² ä¸ĬçļĦ +Ġhon oured +æĥ© æĪĴ +ĠNe utral +& ) +份 ä¸Ĭ +ï¼ļ çͱ +å°Ĭ èĢħ +ĠTol edo +çĶ» åĩº +秦 åĽ½ +åĩº å¢ĥ +ests eller +Ġkill ings +Ġinterrog ation +- peer +Ġsh rugged +ind e +Ġacc ol +dd ev +hes da +Ġmar in +ĠF I +aff in +Ġinvent ions +ãĢģ æłĩåĩĨ +ab h +ĠV est +igm oid +çݰ å¦Ĥä»Ĭ +Ġ. " +Ġg an +_s b +æŀª 声 +åŁºäºİ æīĢè¿° +Ġpol yp +客 æ°ĶçļĦ +Ġbi odegrad +Ġdir name +SUB SCRIBE +ãĢģ 广西 +çħ§ 缸 +Ġequival ents +è¿Ļ ä¸ĭ +稳 åİĭ += dict +( ap +(' @ +触 è§ī +per missions +ĠPast a +< ![ +Ġa iding +Ġon boarding +Ġem oji +b ond +Ġ å®ļä¹ī +AD ATA +ĠBel ly +ï¼Į éĿ¢åIJij +æľĢ åıĹ +ĠOver flow +çľī å¿ĥ +d well +(w riter +Ġp ope +IT IVE +-in vasive +ĠTra ils +.assert Is +ip ed +ï¼Įæĺ¯ ä½ł +Not Empty +主任 åĮ»å¸Ī +åĽ¾ 示 +顺åĪ© çļĦ +æ³ķå¾ĭ çļĦ +M as +] :: +ï¼Į æĮ¥ +ng x +çļĦ大 åѦçĶŁ +Ġmother hood +Ġmicro soft +( II +Ġpet als +ãĢģ åĪ© +æĿ¥ æİ§åζ +Ġmel anch +( rc +P WM +Ġpaper back +åĮĸåѦ çī©è´¨ +ass ic +建çŃij æĿIJæĸĻ +ar ra +ĠP anda +æį· åħĭ +ä¸Ģ 红 +举 çĿĢ +éĻIJä½į åĿĹ +$ n +æľī æĥħ +_level s +Ġde cedent +ç´ł é£Ł +" ãĢģ +ãĢģ æ¢ģ +å®ī é̏ +æĸŃ ç»Ń +ãĢĤæĪij æĺ¯ +oper ators +ĠâĻ ¥ +ãĢģ æĿŃå·ŀ +åĪļ ä¸Ģ +äºĨ好 å¤ļ +ï¼ĮæĹĭ åį³ +ãĢģ å®ŀéĻħæİ§åĪ¶äºº +Ġwater falls +ï¼Įä»ĸ们 æĺ¯ +Vis itors +åĽŀ æļĸ +ç»Ĩ å°ı +Ġgrand daughter +.time out +_Z ERO +* (( +AC ES +æĹł 以 +ä»Ĭ å¤ľ +C rop +.d iv +Ġcomplement ed +, æľ¬æĿ¥ +ĠT witch +é£İ 头 +ĠBow ie +åī§æĥħ ç®Ģä»ĭ +æ»Ķ æ»Ķ +ĠS ears +æĸĩ éĽĨ +Ap is +ĠUnder graduate +[ email +ate x +-c ritical +_B ase +cket t +大 æ¸ħ +ĠSub stance +Ġmut ed +ĠMad ness +ĠAltern atives +ĠHal ifax +å¾® å°ı +åıĤèĢĥ èµĦæĸĻ +ĠEnt rance +.trans pose +Ġlogarith m +rob at +Ġreass uring +ig ent +ãĢģ å¯Į +j peg +op rop +ĠTh ames +æķ° 个 +ä¸Ń央 ç͵è§Ĩåı° +Prot obuf +éĢĢå½¹ åĨĽäºº +ï¼Į ä¿¡æģ¯ +å¤įæĿĤ æĢ§ +Tool Tip +. rc +ãĢģ æ¸©åº¦ +Ġsc rape +product ive +}_{ - +ĠAlb u +_ soft +× ł +çĶŁ 计 +_tr igger +ĠAL WAYS +è»ĭ çĹħ +ĠB inance +ĠF oss +èIJ½ èĦļ +ĠVamp ire +. Combine +å¼Ģ åΰ +(m at +å¼± èĢħ +èij£äºĭä¼ļ ç§ĺ书 +- security +s orted +Ġf ait +ä¹ĭ 交 +å°¿ ç´ł +åĢĶ å¼º +Ġimpro v +å°ı è§ij +ĠF ahrenheit +Ġz eal +è·¯ åĨµ +,å°± èĥ½ +çļĦ çī©è´¨ +Ġh arp +åħ¶ ä¸į +å®Į é¢ľ +è½» åŀĭ +Ġsuper visory +è·¨å¢ĥ ç͵åķĨ +Âł éĤ£ +缴 è§Ĩ +ĠZ heng +ĠW EEK +Pro j +ï¼ĮåIJĮæĹ¶ è¿ĺ +Ġinvari ance +Ġneutr ality +ï¼Įä¸Ńåħ± åħļåijĺ +ư á» +çģ« é¾Ļ +> ; +åĸ Ĩ +常 ä½ı +bas ename +K enn +g x +op oulos +.t hen +Ġflavor ful +/ not +[ F +缼 å¤ı +Ġsegment ed +H oney +Ġpract ise +Ġscaff old +D ou +建 åħļ +段 åŃIJ +å°±åľ¨ è¿ĻéĩĮ +äº µ +ear able +Ġneed y +ä¸ĸ äºĭ +è¾¾ åħĭ +âĪ Ĩ +åı¦ä¸Ģ æĸ¹ +Ġse ren +emo ji +Ġunavoid able +le hem +ĠC yl +åľ¨ çİ°åľº +åĨĽ å¸Ī +in ject +Ġun interrupted +Ġassoci ative +å¾IJ å¾IJ +(v m +ĠC ous +ĠA UT +æĸ° 书 +ĠHand les +çļĦ 缮çļĦæĺ¯ +_fl at +. Store +ell ij +Ġorigin ates +客æ°Ķ äºĨ +Ġ åħ¶æ¬¡ +ĠY ield +æĭī èµ· +ãĢĤ第äºĮ 天 +âĢĿ å°±æĺ¯ +Ġreg exp +Volunte er +ĠAuss ie +ĠA be +Ġconf er +æį¢ æĿ¥ +ä¸į å¤ļçļĦ +ĠInd ie +æĹ© æ³Ħ +äºī åħĪ +éĩį大 èµĦ产éĩįç»Ħ +Ġnostalg ic +æĪIJ 大 +èµ° ç§ģ +éħ¸ çĹĽ +æĮĩæĮ¥ ä¸Ńå¿ĥ +_mem bers +ub ert +Ġk un +), ' +IM PORT +éĻį æ°´éĩı +.em place +åºĶ 声 +Ġblue berries +ä¸Ń 转 +åı¯ 说 +åıĪ å¤ļ +æĢ» å·¥ä¼ļ +Ġspray ed +Ġres ale +å°Ĩ è¿Ľä¸ĢæŃ¥ +è¿ŀæİ¥ åĿĹ +itiz ens +Ġply wood +ores is +CL I +ç͵è¯Ŀ åı·çłģ +æĹłå¥Ī åľ° +ĠCl othes +åħŃ çº§ +ĠDES IGN +/ year +缸 约 +å·¦ ä¼ł +comp leted +sk irts +ĠWild erness +Ġresidual s +HE ADER +_R C +Act s +Ġf anc +æµ £ +ï¼ĮçŃī çĿĢ +åŁĭ åľ¨ +à ģ +ï¼Į æķij +.s dk +ç½ijç«Ļ 建设 +æ³Ľ çĿĢ +åŃIJ å¤ľ +E UR +è¡Ģ éĩı +iam s +res c +æ¯Ľ 主å¸Ń +, åıij +, 主åĬ¨ +pl ist +æľ¬ èģĮ +ann ual +Ġsight seeing +ï¼Į ç²Ĺ +ĠL ol +èįĨ æ£ĺ +q b +ï¼Į çĶ³è¯· +羣çļĦ 好 +èĤĿ çĻĮ +Ġvamp ires +æ¡ĥ åŃIJ +ĠS aga +ãĢĤ çݰ代 +Ġr anc +Ġind ebted +aus ing +.F ree +Comp arer +ĠReal m +, 让ä»ĸ +. InputStream +f ab +Ġ åĪļæīį +åĮĹ åĮº +ç´¢ èµĶ +Ġslee per +[ P +ag ascar +å®ļ åζçļĦ +å¢ŀ èĩ³ +R aster +ĠM ish +-s w +IL LE +Ġextraord inarily +. bytes +çļĦ 强大 +Ġno ab +çļĦçĥŃ éĩı +v ivo +Ġ` . +åįĥ æĸ¤ +_ < +æĭ Ī +/ IP +å®īåħ¨ äºĭæķħ +Ġhairst yles +ï¼Ľ åħ¶æ¬¡ +士åħµ 们 +åİ» 寻æī¾ +æ¯ı å°ıæĹ¶ +ä¸ī个 æĸ¹éĿ¢ +, æĦŁè°¢ +Ġp ardon +.D evice +Rest art +Ġanomal ous +éģĹ çķĻ +ĠAc ute +æļĹ é»ij +oz o +好 æ¯Ķ +Ġhard ened +CL R +Ġhydro chlor +é¢ĩ åħ· +人 éĥ½æĺ¯ +sh own +èŀį 为ä¸Ģä½ĵ +ĠC aleb +Ġres urre +å¿ĥ å¢ĥ +app ers +è¶³ çļĦ +æ£Ĵ çIJĥ +Ġo tt +ng oing +伤 å¯Ĵ +çĶĺ æĥħæĦ¿ +Ġflav ored +W INDOW +ĠPam ela +e lection +Ġ 社ä¼ļ +éĥ½ åįģåĪĨ +Ġreplic ates +, æĽ´åĬł +ä¸Ĭ æĸĩ +Ġel icit +.c anvas +Rog er +æľ¬ åľºæ¯ĶèµĽ +åİ» æīĵ +Ġhot ter +åį¸ è½½ +ass en +In jection +ĠSo OLEGAL +端 åºĦ +ĠD unk +ĠDen is +.dis abled +ä¹Ł ä¸İ +Ġrest less +la very +_ch ildren +isc rim +çļĦ æĪĺçķ¥ +ĠAb by +omy cin +ĠK not +ne al +ĠCOMM AND +ï¼Į åĽĽå·Ŀ +raw ler +.b order +Ġsymmet ries +c plusplus +yp ical +ĠPass age +åīį çŀ» +,è¿Ļ æĹ¶ +ĠAuthor ities +. One +Į Ģ +Inter mediate +Ġneuro trans +æĬ¥ çŃĶ +-m on +odes k +Play list +Ġch icks +æł¡ åĨħ +Ġclean liness +, æĤ£èĢħ +Ġ 女人 +éķĩ éķ¿ +Ġdilig ent +Ġb ishops +é¦Ĩ çļĦ +Ġgl aze +èݱ çī¹ +èħĶ å®¤ +âĢľ è¿Ļ个 +触 æīĭ +Ġillust rative +Ġpromin ently +ĠThor nton +p ars +åĬł åĩı +è¿ij åľ¨ +Ġ@ _ +-per forming +åĬł è£ħ +çªĹ åīį +Ġesc orts +è§Ĥå¯Ł åΰ +Ġpun ched +ĠSer vers +ä¸Ń çĶŁ +åı£ 红 +å¦Ĥæŀľ ä¸įæĺ¯ +å°Ķ é¡¿ +-develop ed +ek yll +Trans parent +ĠGra ves +ing en +çŁ³ æĿ¿ +æĽ² 缮 +两 级 +éĥ½ 为 +at ology +ĠBos ch +ìŀ IJ +æĢ» åĪĨ +( The +S cheduled +PN G +çļĦ æĸĩåŃĹ +sw ana +å°±æĺ¯ æĥ³ +æķĻçłĶ 室 +é£ ĵ +åı¯ä»¥ åĴĮ +ï¼Įä½Ĩ ä»İ +ĠPro ven +abyrin th +æĢĿç»´ æĸ¹å¼ı +ĠHab itat +èĵĿ åĽ¾ +T weets +ï¼Į åĬŁèĥ½ +æĥĬè®¶ çļĦ +ãĢĤ èİ« +if ton +hen ce +An swers +æļĹ å½± +ĠAfter wards +ĠHE ALTH +å¿ĥ缮 ä¸ŃçļĦ +und ant +-h ours +run s +L iver +Ġ 没æĥ³åΰ +at ro +ä½İ 级 +ï¼Įä½Ĩæĺ¯ ä»ĸ们 +Short cut +ãģĤ ãĤĬ +. ops +_ Config +ä¸į éĩįè¦ģ +cl iffe +Ġradi otherapy +op ausal +ĠCUR LOPT +W ish +Ġ 汽车 +ï¼Į çĨŁæĤī +ãĢģ ç͍æĪ· +UNT IME +Ġkidn apping +Ġ æ¯ı天 +åIJİ åįĬ +-m aster +åĪĿ åѦèĢħ +Ġvac ate +_ch ars +I owa +t em +ĠP AS +ĠD ome +ĠThe rapist +欧 æĸĩ +C hel +a èĤ¡ +ãĢģ 交æµģ +åħ¨ æĹł +Ġord inal +enter prise +ï¼Į 宫 +èĩªçĦ¶ ä¸įä¼ļ +ÂŃ ing +绯 éĹ» +æĿĢ æĦı +P acific +Ġopport un +æ±ī ä¸Ń +åĬŀ å®ŀäºĭ +.R aw +iter ation +ĠTE AM +G ard +ĠT Y +umin ous +Ġatt ends +æīį ç®Ĺ +sp y +mic ron +è¾ĵéĢģ æľº +Ġcatal y +为 çİĭ +å¦Ĥ çģ« +æķĻ è¯² +éĻIJ æĹ¶ +ĠEl aine +Ġreact ivity +è´ » +file ID +Block chain +纱 å¸ĥ +åı ģ +èµ· çģ« +. æĪij +_ pe +Reg ards +ĠVel ocity +Ġg oof +ĠOd yssey +ãĢģ å·¥èīº +æĹł 为 +ĠPro ceed +å¼ł å°ı +é«ĺè´¨éĩı çļĦ +un ordered +To File +ĠL ara +Ġad hering +ĠH CV +ï¼Ī ç®Ģç§° +ĠP vt +se at +æĿ¥ æıIJé«ĺ +ĠWel ch +Ġ" ~ +ç¥ŀ åĮ» +éģµ ä¹ī +èĢķ èĢĺ +åĨħ 线 +秦 å§ĭçļĩ +Max Length +Ġ ä¸ŃçļĦ +é±¼ åĦ¿ +ris y +Ġ[ & +ãĢĭ æĬ¥éģĵ +åĪĿ 审 +,ä»ĸ 说 +ï¼Į åĽŀåİ» +ĠSl ides +Ġhe mod +天 çĽĸ +Ġinter connect +æĬĺ ä¸į +< Long +ãĢģ çŁ³æ²¹ +Ġhol omorphic +_ created +un ner +( me +å¾Ģ éĩĮ +Ġhem oglobin +å½ĵ æĪIJäºĨ +åıª ç͍ +ist y +ĠK G +åĮº éķ¿ +Qu eries +ç»Ŀ对 ä¸įèĥ½ +å°¼ 西äºļ +Ġ第ä¸ī çϾ +è¿Ļ é¢Ĺ +_dir s +çĪ±åĽ½ 主ä¹ī +ä¸ªå·¥ä½ľ æĹ¥ +ä¿Ŀ ä½ij +Ġres olves +åĽ¾ èħ¾ +Ġinf init +çĽĬ å¤Ħ +.n an +åζ å®ľ +å±ķ åĮº +èι éĺŁ +å¢Ļ å£ģä¸Ĭ +el ier +èµ° é«ĺ +aff er +ç§ijæĬĢ åħ¬åı¸ +.in cludes +Ġmultip lex +ãĢĭ æĺ¯ä¸Ģ款 +Ġtur meric +\ |\ +Ġexam iner +ä¸ĸ纪 æľ« +Ġkil ograms +pl at +åıĬ åºĶç͍ +_st ructure +ï¼Įåı¯ä»¥ éĢīæĭ© +- Object +ĠBuffered Reader +æľĿ ä»ĸ +éĴ ¼ +æł¹ æ²» +uk in +ç»ĵæĿŁ ä¹ĭåIJİ +æľĢ éļ¾ +ons ense +Ġlap ar +ot i +å°Ĩ è¾¾åΰ +Ġcock pit +è¿· æģĭ +å®ŀ å¹² +空 ä¸ŃçļĦ +å±ħ 士 +label ed +UST ER +l azy +æĽ ¸ +èĩªå·± çļĦ人 +eb p +åıĮ 羸 +Â Ī +Ġ$ " +re lu +Ġan them +Ġres isted +å¾Ī æ¼Ĥ亮 +å®Ī ä¿¡ +_ > +ä¸Ń ä¸į +åĩº æģ¯ +è¡ĮåĬ¨ çļĦ +Ġsh ave +建çŃij 设计 +驱åĬ¨ è£ħç½® +壮 æ±ī +ï¼Į èħ° +ãĢĭ æĺ¯ä¸Ģéĥ¨ +· 马 +ĠDo ppler +_E P +Ġc umbersome +åĴĮ çͰ +æī£ æĬ¼ +L ng +Ġ åĢºåΏ +çļĦ å¹³åĿĩ +åľ¨ æŁIJäºĽ +nc mp +çϽ éĩij +Suggest ed +ĠBer k +Ġcrunch y +Ġï¿¥ : +^ N +ic are +æłĩå¿Ĺ æĢ§ +éĿĴ æ¢ħ +Ġinject ive +ãĤĪ ãĤĬ +西 èĴĻ +åij¨åĽ´ çļĦ人 +^ . +ãĢģ åĮħ +çľĭ ç͵影 +ie ber +æ¯Ĵ èĽĩ +éĵ¾ 表 +IZ ER +ĠCelt ics +羣 è¦ģ +Ed itable +.sp ark +ĠDrop box +ä¹Ŀ 个 +ĠPoly gon +Elig ibility +Ġent icing +èĥľ çļĦ +Ġhom estead +_AL LOC +æ±Ĥ æķij +æŀģ 强 +è° Ĵ +Ġwork manship +éľĢè¦ģ èĢĥèĻij +ãĢĤä¸Ģ ä½į +Ġexh ilar +èĩ´ è¿ľ +èIJ¥ä¸ļ æĪIJæľ¬ +ä¸įåIJĮ ç¨ĭ度çļĦ +ä¸İ åĽ½éĻħ +ern a +Ġbit ch +ĠArm or +Ġacet yl +Ġval or +确认 çļĦ +μ ο +t ub +ĠD ian +Ġpop ulous +ä¹ĺ 以 +åį«çĶŁ éĥ¨ +-k now +-mount ed +ĠE verett +Ġne b +(` ${ +âĢĿ ç³»åĪĹ +ç¢İ è£Ĥ +Ġ"- " +åģļ å¤ļ +Ġmon st +ç¦ģ éĶ¢ +) 第 +ãĢĤ è¿Ļä¸ĢçĤ¹ +ern ed +Ġrevel ations +èĩªå·± æĥ³è¦ģ +open ed +çļĦå¤ĸ 表 +ï¼Į G +ĠF eld +Ġat rial +å°± 对 +ans i +yt ically +Ġstead fast +Ġ 人çĶŁ +_T rans +æļĸ å¿ĥ +Ġtrig on +s chedule +ĠCh arm +åĽĽ åIJį +åľ£ æĹ¨ +ĠFire place +Ġreck on +çļĦä¸Ģ ä¸ĭ +Ġmar rying += FALSE +ĠF LO +ĠJ E +èĹ ī +Ġcurs ed +Ġconf erred +Sc oped +Ġadren al +ï¼Į è¯ģæĺİ +éĥ ľ +éĩį äºĨ +诺 æĸ¯ +qu s +çĪ·çĪ· 奶奶 +par r +_SE SSION +çļĦ å¹´è½» +åĪĨ éĶĢ +æ± ŀ +Ġfibrobl asts +. Arrays +M t +ce le +çĥŁ åĽ± +éĢĴ å¢ŀ +, åѦ +if ecycle +Ġv y +æµģ åħī +ĠCont ain +, çİ°åľº +ä¹ĭ 缮çļĦ +è¿ŀ å¤ľ +å·® ä»· +åĬŀåħ¬ æ¡Į +Trust ed +B enchmark +è¢ģ éĩİ +ĠFre ight +ub in +( Event +Ġdis patcher +æ£Ģ 讨 +Ġpropos itions +åıĤåĬł è¿ĩ +pot ential +Ġd ow +建çŃij å¸Ī +è¯ģåΏ æĬ¥ +çĮ¥ çIJIJ +Service Client +æµ® èºģ +Ġ åħ¬ +(d one +da o +ï¼Į身 为 +Ġnew bie +åįģ å¹´çļĦ +åĨł å¿ĥçĹħ +Ġsuff erers +ĠObs erve +rac ial +伯 伯 +ü ss +Ġincarcer ated +æŁ¥ èİ· +.w rap +çļĦ æİªæĸ½ +ä¸İ ä¼ģä¸ļ +éĽĦ ä¼Ł +if ice +åIJİ éĻ¢ +ÙĪ ÙĨ +_P ASS +ç¬Ķ éĴ± +ĠAcc uracy +åıijéĢģ åΰ +Ġв ÐĤ +éĢĤéħį åύ +harm onic +Ġtre asury +ĠCS I +Ġcommission ers +追溯 åΰ +< size +åĴĮ æµ· +å¾Īæľī è¶£ +, åĩĨå¤ĩ +Ġap nea +çϽ åħī +Ġ è¿ijæĹ¥ +Ġstand ings +ĠAnt ique +ĠKardash ian +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +è̳ ä¸Ń +å®£ä¼ł çīĩ +麻 çĸ¹ +æĺĤ è´µçļĦ +ï¼Į éĩĮéĿ¢çļĦ +âĢľ 两个 +ĠG AME +_S h +m ob +éĩij åŃIJ +å·® ä¸ĢçĤ¹ +b ler +çŃī ä¸ļåĬ¡ +ind ependent +ä¸įä¼ļ åĩºçݰ +åIJĵ åͬ +å®¶ äºĨ +ä¸ī åħĥ +.S earch +Ġparan ormal +Ġd olphins +ĠO verse +ĠCan terbury +ĠP unk +Ġsp arks +Ġover turn +åŁº ä½ĵ +Ġrock ets +Ġfont Size +Ġprogram mable +Ġunt rans +_in ner +漫 ä¸įç»ı +æļĹ éģĵ +Servlet Response +åīij æ°Ķ +ï¼Įè¿Ļ æĺ¯ä¸Ģç§į +ä¸Ń æ·»åĬł +è½´ çļĦ +çĪĨ 款 +ĠCV S +ĠNear by +ĠC ame +Ġemb races +ard own +te a +åįĩ æľ¬ +Ġ ************************************************************************ +ï¼Į 车è¾Ĩ +man ual +ĠÐ ¶ +çĶŁäº§ è¿ĩç¨ĭä¸Ń +ĠTH ANK +, åĨ³å®ļ +ï¼Į ä¿Ħç½Ĺæĸ¯ +éĢł 诣 +å®ľ æĺĮ +羣æĺ¯ 个 +çľ¼åīį è¿Ļ个 +ï¼Įè°ģ çŁ¥éģĵ +Ġel ites +_c red +è̳ æĽ¼ +ï¼Į请 ä½ł +Ġc ilantro +ä¹Ł åĪ« +_LIB RARY +Ġper ch +am eth +im os +ä¼ĺ åĮĸçļĦ +åįĥ çϾ +ĠRes p +á m +script scriptstyle +Under lying +çļĦæĦıæĢĿ æĺ¯ +amp hetamine +(c p +ä¸įçŁ¥éģĵ æĢİä¹Ī +uit ary +ob ox +.n a +, æŃ£åľ¨ +ĠM oves +ĠAr cher +åıĤè§Ĥ äºĨ +tool tip +æĿĢ æĪij +åIJ«éĩı 为 +Camp us +_c alled +åIJĮåѦ çļĦ +沪 æĮĩ +大 ä¹± +èĩªå·± è¦ģ +æ°Ķ åľ° +Ġself ie +Ġpur pos +âĢľ æĪijçļĦ +ä»ħ æĺ¯ +Ġed itable +ç²ī çļĦ +ĠCelebr ate +Ġ å®īåħ¨ +Ġc og +, 大æ¦Ĥ +è¿ĩ æĹ¥åŃIJ +ĠV G +-l iter +ĠBr illiant +alloc ated +Ġqu bit +è¯ī 说 +-man ager +ĠGather ing +èĢĮ 产çĶŁ +Ġmission aries +è° Ļ +Ġdread ful +ĠDanger ous +ï¼Į å®Ľå¦Ĥ +ĠN ass +åĴĮ åѦçĶŁ +æľ¬ å®ŀæĸ½ä¾ĭ +ĠHttp Response +ä¸Ĭ æĿ¥äºĨ +Ġsl uggish +ĠComm od +设计 äºĨ +ne os +Ġend owed +æºIJ åľ° +ä»·å̼ åĴĮ +æľīçĤ¹ åĥı +ï¼Į 纪 +ĠL one +é£ŀ 天 +ĠRel iable +\ not +zz i +å°± éĤ£ä¹Ī +è¿ĩ ä¸Ĭ +Ġi Phones +â̦â̦ ãĢį +ĠG TA +Ġche ers +人åĿĩ 纯æĶ¶åħ¥ +æĸ° æĿij +_t asks +åį¡ çī¹ +ç¦ģ ä¸įä½ı +ï¼Į æIJħæĭĮ +set Value +éĥ½ä¸į ç͍ +B IN +å°ı 鸣 +}^ * +_reg ex +Ġaccompl ishing +Ref s +ÙĪ Ø± +Ġar sen +æķ°æį® ä¼łè¾ĵ +ä¸Ĭåįĩ åΰ +ĠEll ie +ï¼Į åĭIJåľ° +ĠH LA +éĤ£ åı¥ +ĠK emp +æıIJ çĿ£ +Ġcro pped +r ising +Ġ ä¾ĭå¦Ĥ +ĠC USTOM +æĶ¯ 座 +/c pp +åħ½ åĮ» +Ġcorner stone +Ġexp orter +éĻį 级 +Ġred esigned +м а +< style +ĠMed ieval +Ġarbit rator +çļĦåľ° ä¸ĭ +as in +Ġn M +ĠT ick +ãĢģ è°ĥ +Ġcomp lying +TH READ +å¾Īå¤ļ 人çļĦ +ä¸ī çļĦ +ç§ĭ æ°´ +çľĭ ä¸įæĩĤ +使 ä¹ĭ +è¡° å¼± +æĭĵ æīij +Ġra ped +From File +å¾· åĨĽ +" -- +åºĬ ä¸ĬçļĦ +ĠDesign ing +W V +ĠAn atomy +Ġdeb ating +ãĢģ å·´ +å®ļ åŃIJ +ĠAl ps +_S ER +è´¨éĩı 管çIJĨ +é²ģ èĥ½ +hd ad +-h ard +延伸 åΰ +ï¼Į æķ¬è¯· +ĠCh ampagne +ç´§ è·Ł +è¯ļ æĮļ +Ġkill ers +éĩįéĩı 份 +, æ¸ħ +C ovid +æĬĬ æīĢæľī +æľīä»Ģä¹Ī ç͍ +Ġn ib +_TO OLTIP +F aces +_ cloud +åĴĮ é»ij +ç«ĭåĪ» å°± +Ġgran ular +ï¼Į çݯ +åľ¨ åĵªåĦ¿ +Ġk ä +æĬ«éľ² çļĦ +çŀ ¿ +ĠOS HA +å¦Ĥ æķħ +æ¥ Ń +Ġnut shell +Im Gui +以åıĬ 对 +Ġinvol untary +ç¨İ é¢Ŀ +åĩº åħ¥åı£ +add ers +arc raft +OH N +ĠGreen land +ï¼Į åĢĺèĭ¥ +ĠH ancock +ĠL NG +_b p +Ġper l +Ġ第åħ« 竳 +é¦ į +Ind iana +yl a +çļĦæĬĢæľ¯ 人åijĺ +éĢĤ ç͍çļĦ +и ÑĨ +Ġri ots +çļĦ çIJĨæĥ³ +ig ations +ä»İ ä½ķ +Ġfull er +ĠMoh amed +ä¸į åĬł +两 é¢Ĺ +èµĦäº§è´ŁåĢºè¡¨ æĹ¥ +S OC +ĠAcc ommodation +_f sm +ĠS add +Ġj ed +-m eter +èϽçĦ¶ åľ¨ +ä½Ļ å®¶ +aud it +åıĤè°ĭ éķ¿ +绿 æ°´ +ãģĿ ãĤĮ +ĠScr atch +å¿ĥ缮 ä¸Ń +Ġelong ated +次 åºı +ĠBre ed +erm ons +_p m +æĭī èIJ¨ +.D ouble +èīºæľ¯ åѦéĻ¢ +P TR +åĽĽ 次 +æĦıè¯Ĩ åΰäºĨ +ï¼Įä¸İ æŃ¤åIJĮæĹ¶ +åζåĨ· åīĤ +ï¼Įåıª ä¼ļ +ç§ijåѦ åıijå±ķ +硬 è´¨ +ç͵è¯Ŀ éĩĮ +Ġtown ship +Ġin verter +ĠWat kins +Y e +ä¸Ģ èīĺ +Ġmembers hips +ĠOt to +Ġind ifference +å¾®åįļ ä¸Ĭ +ĠA ires +and ing +ort a +è¶³ èģĶ +æķ´çIJĨ çļĦ +k ish +è¯Ħ è®® +åĨľ çī§ +ĠVer de +çļĦ çĹħ +ãĢĤ åİ» +.G oogle +Ġbra very +-form ing +L N +h ound +åıĸ çļĦ +Rel ax +é« » +ï¼ī åħ¶ä»ĸ +æĴŃ æĬ¥ +鸣 人 +éª ħ +_sh a +åĴĮ è¡Į为 +交 èģĶ +ï¼Į èįī +ĠM SD +å·¥ ä½į +ĠIm ag +Ġpow ders +çĬ¯ç½ª çļĦ +-d anger +Ġcond ens +ĠPet ition +Spec s +Ġeru ption +ĠCh arts +åįļ 大 +Ġæľ¬ åŁºéĩij +Ġв Ñģ +Ġ 康 +ä¾Ŀ éĻĦ +åĨ· æĪĺ +ĠRel ation +ynchron ization +ĠInd icator +Ġnovel ist +çļĦä¸į èī¯ +å±ķè§Ī ä¼ļ +T ek +ĠCre ed +E Z +Ġback log +are z +ph il +CS A +ĠLight weight +ï¼Įä¹ĥ èĩ³ +ä¸į çľĭ +使ç͍ æĸ¹æ³ķ +è¿ŀæİ¥ äºİ +çī©ä¸ļ åħ¬åı¸ +Ġcommem orate +. Container +ention ally +Ġdam s +æĽ´ è¿Ľä¸ĢæŃ¥ +综åIJĪ èĢĥèĻij +ä¹ĺ æ³ķ +ec ode +太 éķ¿ +约 çijŁ +é©» è¶³ +èī²å½© çļĦ +é£İ èĮĥ +ma h +ĠDry er +Ġ ç§ij +èµ· èĪŀ +ï¼Į èĭ¹æŀľ +ãĢģ å¹¶ +Ġfl ushed +.j upiter +ĠBir ch +Ġruth less +-s ocial +客 æµģ +çļĦä¸ĭ 端 +ãĢģ åIJĪ +ä¿Ŀ æľī +è£ħä¿® åħ¬åı¸ +ĠM eter +åıĹ éĺ» +P am +ãĢĤä½Ĩ ä»ĸ +ĠCert ificates +ĠG MC +Ġcritic isms +Ġencompass ing +设å¤ĩ åıĬ +æĪIJæľ¬ åĴĮ +Ġcircul ated +æİ§åζ åĴĮ +~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~ +âĢ ħ +âĢĿ 建设 +代 人 +åį´ è¯´ +ĠEX IT +ĠInv olved +Ġth yme +ch ten +aton in +ãĢĤ ä¸ĩ +å¼Ģ åĮº +èİ ħ ++ q +ĠE arnings +æĽ´ è¦ģ +çİĭ 室 +åĩº 个 +ĠCol ony +Ġsw apped +° , +Ġimp oss +ï½ Ķ +Ġ ä¼ļè®® +ï¼Į æ´¾ +åį° åº¦çļĦ +ï¼Įå°Ĩ ä»ĸ +red dit +Ġ å¸ĮæľĽ +ä¸Ģ å¹ķ +.prevent Default +ĠP es +å±ĭ éĿ¢ +çϾåĪĨ çϾ +ï¼Į åζ +åıĺ æĽ´ä¸º +æĪ¿ åŃIJéĩĮ +ĠGem ini +lib c +å¹³æĿ¿ ç͵èĦij +âĢľ æĺ¯çļĦ +ĠMot ivation +Ġem inent +Ġer otic +lic ks +亦 åı¯ +Ġfort unately +ĠSlovak ia +< Node +k owski +ĠN U +uc ose +ik it +举个 ä¾ĭåŃIJ +Ġ è£ħ +Ġd uel +Ġsm ug +ï¼Į å¨ĩ +eth nic +âĢľ 她 +æł¼ é²ģ +ĠInvestig ations +" _ +( Image +- pe +çī¹å¾ģ æĺ¯ +ä¿Ĭ æĿ° +ĠBak ery +Girl s +ĠDO ES +( init +are as +åĥı ä½ł +ĠO LED +éĩį éĢ¢ +çīµ åζ +éĩį æŀĦ +IP C +åħ·å¤ĩ äºĨ +ï¼Į åħįè´¹ +ĠM ULT +å¾Ĺ æĿ¥ +ç¾ ļ +éĵ¶è¡Į éĹ´ +Ġslides how +çĥĺå¹² æľº +-b urn +ise ase +ÑĨи и +ĠHans on +ĠM iy +ä¸Ĭ çģ« +éĢł åģĩ +ĠBritt any +Ġsc ents +é«ĺ 声 +社ä¼ļ ç»Ħç»ĩ +graph s +ĠC rescent +ï¼Į å¼ķåıij +å§ £ +åģļ 客 +Ġbuilt in +Ġsoy bean +Ġove rest +è¿ľ è¶ħ +Ġcomment ators +Cert ified +ï¼Į åĤħ +ãĢģ æķĻå¸Ī +.set Enabled +ĠWal nut +E co +éĥ½ åŁİ +f arm +çļĦ éģĵçIJĨ +Ġup keep +èµ¶ è·¯ +å͝ çĭ¬ +æĿ¥ 帮åĬ© +Net flix +ç¼ ĩ +Ġconcent rates +Ġcrow ns +- CR +> &# +_ include +li ber +**************** **** +Ġmetast ases +il ate +å®ļ æĢ§ +IGHT S +æ¶Į åĬ¨ +( store +ï¼Į åĬ© +ĠG ives +æ¡Ī çļĦ +ĠX T +ï¼Įçľ¼ ä¸ĭ +-com mercial +Ġattain able +èĤ® èĦı +Ġrep ression +ãĢģ æĺ¯ +èĩªå·± 没æľī +ï¼Įä¸Ģ ä¸ĭ +åĪº 绣 +ĠPO INT +æģ¯æģ¯ 缸åħ³ +Ġres isting +erm o +.c uda +ï¼Į å·¦æīĭ +ĠR onaldo +ï¼Įä¸į æĢķ +ĠSof a +ĠCliff ord +x on +Ġf ou +ä¸Ĭ è¯ģ +Ġ马 åħŃ +åľ° ä¸Ńæµ· +å¥ĭ åıij +ï¼Įå¹¶ æľī +-lo ving +Ġ å±±ä¸ľ +æĪIJéķ¿ ä¸º +Ġacknowled gment +ĠK ok +åĢĴ éĢĢ +è´´ å¿ĥçļĦ +ï¼Į åĽŀæĿ¥ +ĠH b +缴 ç«ĭ +Ġdup lex +d ain +è§£ æĥij +æĽ¿ 她 +,åį³ ä¾¿ +Ġqu bits +Ġman a +æĽ´ æĸ¹ä¾¿ +Ġph ishing +ç´ł æıı +å¿ĺ åį´ +Ġanalys ing +ĠO val +åľº åĨħ +ç¨ĭ å¼ı +åĩ¯ å°Ķçī¹ +, { +Ġz um +-A ss +æ½ľåľ¨ çļĦ +] =" +ĠA BA +OR IZ +Res olved +Ġgl azed +[ h +åŃĺåĤ¨ åįķåħĥ +( ...) +ãĢģ æĮī +æĹ¥ èIJ½ +_DIS PLAY +ï¼Įå°ı æĺİ +ĠMemor andum +- load +\ underline +ĠW ig +-qu al +Ġ åĩ¤ +ĠCater ing +Ġ 段 +å¦Ĥ 鼨 +ï¼Įä»İèĢĮ å®ŀçݰ +Ġmic ron +ï¼Į å¤Ħäºİ +"> # +Ġspectrom etry +T odd +ĠS lam +è¿Ļ éĥ¨åĪĨ +CHO OL +W ere +j t +æī¿æĭħ çļĦ +Ġ ä½į +ï¼Į æĭĽ +ï¼Ł å½ĵçĦ¶ +å¹³ åľ° +æł¡ å¤ĸ +ï¼Įå°±æĺ¯ åĽłä¸º +ï¼ĮéĻįä½İ äºĨ +âĪ Ĺ +æŃ£ è§Ĩ +ãĢĤå®ĥ æĺ¯ +_P CM +è§īå¾Ĺ æľīäºĽ +åį´ æľī +CL ICK +ภ« +des erialize +Ġalloc ator +[ start +N OS +_p y +me et +(m em +Ġenc odes +std err +ĠAlbu querque +âĢĿ åΰ +èĨ³é£Ł 纤维 +ĠB orders +ĠAl gorithms +-C D +Ġmother board +-out s +åĬłå¤§ 对 +ãĢģ è¿Ľ +ĠL ies +ĠWater loo +C OME +J UST +Ġover p +ï¼Įæĺ¯ æĪij们 +Can onical +ĠFig s +, 带 +ðŁ İ +. When +Ġmid i +å¿ı æĤĶ +M ate +int ed +Ġreg rett +Ġsys call +, 以åīį +: } +Ġ ÙĬ +ï¼ļ æŃ¤ +Ġcan oe +ä»· å»ī +Ad j +ĠDep artments +Ġro asting +Ġinspect ors +Ġc udd +ĠM oms +ĠR eward +ĠJon as +Ġscram bled +/ id +an onymous +æĪij åıijçݰ +ä¸ŃåĽ½ å¸Ĥåľº +z ar +ï¼Į åģļåĩº +Ġsu fficiency +ath om +ix er +ï¼Įå¦Ĥæŀľ ä»ĸ +Ġinconven ient +_ accept +or c +çļĦ æĶ¹åıĺ +.t ile +M oh +ĠU AV +-t ools +Ġ 以åīį +ĠT ir +Ġsm oker +Ġhost ility +SE Q +ä¸įåIJĮ äºĨ +Re placement +ï¼Į æ±Łèĭı +ĠN X +ĠH OST +好 åı¤ +Ġout age +_N o +带çĿĢ ä¸Ģä¸Ŀ +沿 岸 +æĬĢèĥ½ åĴĮ +çĶŁæĪIJ ä¸Ģ个 +å¹¿æ³Ľ ç͍äºİ +ï¼Įåīį å¾Ģ +èĤĩ äºĭ +Ġwood land +/ ref +åĽ´ å·¾ +Ġher pes +Ġhairst yle +L imits +Ġun ch +éĩij åįİ +/s chema +Fun ny +: add +çϾ å®ĺ +éĢģ èĩ³ +ĠExper iments +Ġst ent +.c ap +åħ» çĮª +net te +ĠNe on +m ute +Ġact uator +ï¼Į åĵŃ +å®ģ åı¯ +大 便 +PR S +Ġdesert ed +ĠHe ater +ãĤ º +ï¼ĮåħĪ æĺ¯ +ï¼Įä»Ĭ åIJİ +p ard +ä¸Ń æıIJåıĸ +ãĢĤ 许 +ĠDes ire +ĠCost co +Ạ£ +ĠZ ucker +ï¼Įæľī ä¸Ģç§į +ĠSch ro +Ġê ² +F x +l uck +éĿ¢ éĿ¢çĽ¸è§ij +éĢļ æ°Ķ +Ġdef init +-M en +è§£éĩĬ 说 +_ nt +Ġover b +Ġequ ities +ĠRes pir +.n lm +/ product +_m ail +å¾® ä¸įè¶³ +ï¼Į没 äºĭ +Ġban quet +ĠEd win +Ġintrig ue +ĠMead ow +å¹´ å¹¼ +Ġche esy +Be am +ठ² +æĮģç»Ń æĹ¶éĹ´ +Ġp iled +oss a +缣 主 +æĪij çŃī +ä¿ ª +Ġpar cels +çŃĭ 骨 +F acts +åĺŁ åĽĶ +Ġfl ashed +æģ© æĿ¥ +\ Entity +交æĺĵ å¹³åı° +Ġ æĺŁ +ĠN GC +é² « +汤 åĮĻ +ä½łä»¬ 两个 +ad ge +ĠK um +身ä½ĵ ç´łè´¨ +æijĬ ä½į +" As +å°ı å±± +æľĢ 容æĺĵ +æķ¢ 说 +utter stock +O w +is man +ï¼Į æĥ¹ +å¤į 审 +(t ab +åĴĸåķ¡ åİħ +以å¾Ģ çļĦ +ĠP SP +马 èĻİ +ĠAD V +åĴĮ 她çļĦ +Ġë ĭ +, åĪ«äºº +IC ODE +er ic +åĨĻ æĪIJ +Ġindividual ity +èѦ 车 +ros ophila +ãĢģ 带 +åĩº èµ° +éļ¾ äºĭ +ï¼Į çĶļ +Ġbar code +ĠPass enger +ĠM é +pe re +e ated +ol ina +ĠA O +bb bb +SC O +ĠD ive +æĪij å¿ĥéĩĮ +Ġimpecc able +Ġass orted +Ġα ÏħÏĦ +-second ary +æĬĢæľ¯ éĹ®é¢ĺ +aj an +大éĻĨ çļĦ +Ġcontrad icts +ãĢģ éĻįä½İ +æĿ¥ çļĦæĹ¶åĢĻ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +æĺ¾ çľ¼ +V ox +Ġf rowned +åѦ åºľ +åİ» æĢĿèĢĥ +被 认为 +Ġins isting +éĤ» éĩĮ +Ġvo ic +Ġtors ion +纹 è·¯ +éħįç½® çļĦ +Ġfresh ness +C XX +S equ +éķ¿ ä¸īè§Ĵ +åĶ¿ åºĶ +ðŁ ĵ +ĠÏĥ ÏĦη +ud it +ĠL SU +éģ¿ éļ¾ +çĮ ĸ +_R AW +Ġbus iest +```` `` +pport unity +_r w +åĩĮ ä¹± +åĸ· æ³ī +åĨħå¿ĥ æ·±å¤Ħ +交 ç»ĻæĪij +éļĶ èĨľ +,ä¸Ģ æĸ¹éĿ¢ +Ġlink er +ĠMongo DB +C oding +åĪĨ éĥ¨ +imes ter +è´¸ çĦ¶ +ĠTable ts +Ġiv ory +E cho +ĠX D +æ¯Ķ 她 +éĢļè¿ĩ çļĦ +Art ificial +-Sh irt +ãĢĤ åĬłå¼º +oss ed +Ġmodel Builder +åıĺ å°ı +å¿§ æĦģ += float +IT ICAL +ĠObject ives +Real m +- ing +Ġartific ially +ï¼Į æĦ¿æĦı +Ġin accessible +_C B +åĬłå·¥ çļĦ +, æĪĸæĺ¯ +Ġun noticed +åĩºçĶŁ åľ° +igraph y +V ISION +ĠK ob +èIJ ¼ +åºŃ 审 +ĠE velyn +æĪĸå¤ļ æĪĸå°ij +Ġmod ality +åĩī äºĨ +Ġj ag +è·¯ åĨĽ +Ġorder ly +ĠPres erve +æĮģç»Ń æĢ§ +ä¼łè¯´ ä¸Ń +Ġpharmac ological +Grad uate +.define Property +åłĤ çļĦ +æľīä»Ģä¹Ī 好 +Ġbright en +ãĢģ èIJ¥åħ» +âĢľ 红 +ong a +иÑĩе Ñģк +å°ı æīĭ +Ġamb iance +åģ ķ +èī² æĸij +- et +yt ime +Ġson ic +ĠMo vers +æħĮ äºĨ +ĠTob y +ĠH ive +åı° éĿ¢ +Ġcent imeters +') ( +ä¼łéĢĴ æĽ´å¤ļä¿¡æģ¯ +co verage +ĠTrans parency +ç¾ŀ æĦ§ +ï¼ĮéϤ æŃ¤ä¹ĭå¤ĸ +nd e +èĥ¡ æĢĿä¹± +æĺ¯ ä¸įåIJĮçļĦ +éĥ½æľī åĵªäºĽ +ä¸įç͍ äºĨ +_initial izer +微信 群 +ĠPan els +C pp +ĠR CT +çͳ é¢Ĩ +big cup +Ġsan itary +æ°¢ æ°Ķ +åıĪ æĬĬ +å¿ĥ çľ¼ +æ±Ĥ åĩº +æĭĴ ä¸į +opens ource +res ume +åľĨ çĽĺ +ĠWall paper +Ġbust ling +âĺħâĺħ âĺħâĺħ +Ġ è¾ĵåħ¥ +ĠI J +çķĮ 线 +ĠComp ass +Ġsumm ons +侯 çĪ· +W OW +ep id +ãĤ¤ ãĥ« +-b oot +æŁIJ çͲ +ĠConstruct s +Ġnational ism +/ sec +A çļĦ +ke it +ï¼Į å±ķ +ĠON LINE +/ show +Ġ åıįæŃ£ +imp lemented +æīĺ ç¦ı +-c an +-c lose +çļĦå°ı å§ijå¨ĺ +ä¸į åıª +åħį éϤ +Ġsubt itles +Ġfle a +ï¼Į æī¿æĭħ +ĠQu ilt +éħ¸ çĶľ +ä¹Łæĺ¯ä¸Ģ æł· +y ms +æīĵ æĬĺ +_ ajax +g uns +ï¼Ī åİŁ +å¾· çͲ +CH R +ãĢĤ 女 +è¿ŀæİ¥ 管 +N am +ere otype +ĠPay day +åįıè®® 书 +ï¼Įæµ· æĭĶ +åĨħ容çļĦ 羣å®ŀæĢ§ +ï¼Į é²ľ +è¿ĩ åįĬ +_C heck +ĠGr anted +ä¸Ĭ æ¦ľ +ub i +app lic +ä¹Ł åĴĮ +åħ¬ æĬ¥ +at itis +æĪij们 çŁ¥éģĵ +Ġspark le +Lux ury +ç͵ çĥŃ +æİ¨ éŨ +äºij éĽĨ +ĠC YP +大 åŃĹ +缴 è§Ĵ +ï¼ĮæĪij 没 +ä¸Ģèά æĥħåĨµä¸ĭ +ĠI p +ends With +ĠTom ato +éĵ° éĵ¾ +ãĢģ å¿« +æķĻåѦ ä¸Ń +Ġwal nut +Ġreplic as +ãĢĤ ä¸ĥ +Ġla uncher +, æĺ¯ä¸Ģ个 +Blog This +ä¹Ł å¤ļ +ä¸İ æĿİ +两 éĺŁ +æķ£ å¼Ģ +ĠSun rise +âĢľä¸Ģ 带ä¸Ģè·¯ +evol ent +Be acon +( by +ĠS RC +æĺ¯ ä¸Ń +.c sdn +ĠReg iment +Ġchem ically +ĠE leanor +çİ ij +è¿ľ 举 +çķĻ æģĭ +C ann +ĠH ID +ert a +ĠRa ises +. Validate +æīĵ å®Į +åģı é«ĺ +ate urs +user content +å·¥ä¸ļ åĽŃ +Ġflash light +ĠAtt achment +, åĪļ +æį® çĤ¹ +Ġsign up +æŃ£å¸¸ æĥħåĨµä¸ĭ +_dec ay +ĠG ould +_b ounds +fin ally +-pro gram +Ġse rene +plic as +O t +常 说 +AN AN +Ġbal m +éĢĬ èī² +颤æĬĸ çĿĢ +度 è¿ĩäºĨ +.m aterial +ï¼Įä½ł è¦ģæĺ¯ +ĠShe ikh +l ue +oc able +_ resp +Ġg ag +Ġif rame +ĠCh r +ĠNo el +Ġk y +èĢĮ 论 +åľ°çľĭçĿĢ ä»ĸ +æį ĭ +ĠRound up +åĿIJæłĩ ç³» +ĠDialog ue +-m ar +éģ İ +ï¼ĮéĤ£ è¾¹ +æ³ ¯ +In novation +PH Y +gester one +æľĪ å¼Ģå§ĭ +åģľ äº§ +\ > +o L +çļĦ 迹象 +ĠC IS +å¹´ èĢģ +éŁ³ 符 +_RO W +w allet +ä¸ŃåĽ½ æĸĩåĮĸ +æŃ£å¼ı å¼Ģå§ĭ +_AL LOW +æĶ» åħĭ +v art +ï¼Įåħ¨ ä½ĵ +Ġ æ·»åĬł +Ġun ilateral +éĢīæĭ© é¢ĺ +erial ized +ĠTel escope +è¾ į +_b lob +ç«Ļ åı° +åİĨ æĿ¥ +Ġimpl anted +Ġsmooth ie +ĠBas el +ĠFL AG +æħķå°¼ é»ij +æijĨ çĿĢ +åĽ½ è¯Ń +Ġ é¦Ļ港 +Ġmon astery +there fore +ĠPoss ibly +t ear +åįĹ æŀģ +Ap ache +Ġcra ppy +ĠK anye +' all +Ġeffect ed +满 åľ° +å®ŀéĻħ æĵįä½ľ +Ġrod ents +U CTION +çļĦ å§¿åĬ¿ +Ġd oping +) ((( +ä¹ī ä¹Į +.set Color +æķ£ å¸ĥ +I W +æĥ Ĩ +æĹłæ³ķ åΤæĸŃ +ĠFort y +(null ptr +ï¼Į åĪĨåĪ«ä¸º +ied er +_s eries +ä¸ĵ åζ +åİĭ ä¸ĭ +纳 å°Ķ +ä¸Ĭå¸Ĥ åħ¬åı¸çļĦ +Ġdump sters +ï¼Įä¿Ŀè¯ģ äºĨ +æŁ Ĵ +两 æł· +_INTER VAL +ĠKos ovo +æĭ ´ +Ġrel atable +ĠAd oles +an ova +åıĮ èĥŀèĥİ +ĠYork er +çĽ¸å¯¹ åºĶ +èĪį 人 +åıĤæķ° çļĦ +æ»ŀ åIJİ +æĺ¯ ä¼ģä¸ļ +yst ers +ç»´æĮģ åľ¨ +PM I +ï¼Ľ åĽĽ +红 èĤ¿ +æģį çĦ¶ +Ser iously +ĠInf ection +éĿŀ ç»ı常æĢ§æįŁçĽĬ +åĪ» 度 +Ġmel odies +flat ten +z man +Ġ"" ; +Ġvill ains +ĠLat via +Ġuns ur +ĠDyn asty +Ġ ä¿¡ +çł ¾ +.t mp +åĵª 天 +æł¼ å¤ĸçļĦ +æĹ¢ èĥ½ +as ource +认 éĶĻ +_P M +没æľī éĤ£ä¹Ī +åįķ æµĭ +P ets +ad u +.c pu +ĠZ ambia +yy y +$ d +æĢ» è¦ģ +éªij 车 +æį¡ èµ· +.nav igation +Ġcyn ical +Ġ ä»Ģä¹Īæĺ¯ +ĠM argin +ĠEx act +çģĮ è£ħ +æŃ¦ æĺĮ +æĸ¹æ¡Ī çļĦ +.dis able +$ ^ +å°½ æĺ¯ +Ġsw amp +ä¸Ģ éĥİ +ff e +ik t +-m ort +Ġassert NotNull +G reek +å°Ĩ 缮åħī +Ġsa ver +åľ¨ 欧洲 +é»ij 马 +ĠRec ap +å¾Ī好 å¥ĩ +æ²¾ æŁĵ +é¢Ķ é¦ĸ +çĶŁ åĬ¨çļĦ +Ġpre natal +åħ³ ä¸Ń +Ġmod ulate +Ġrefund ed +ï¼Į æľĿå»· +Ġs per +ï¼Į çĶŁåij½ +éĿŀ æµģåĬ¨èµĦ产 +Ġsuggest ive +ĠPeg gy +ĠE ck +æĶ¶ 容 +(m ake +ĠSoci ology +$ ^{ +ex cluding +ĠCh unk +宦 å®ĺ +ï¼Į èĢĥèĻij +_f alse +ä¸ĸçķĮ 第ä¸Ģ +News letters +Ġresear ches +Ġto l +Ġcons pic +ç쵿´» çļĦ +Ġ å±ıå¹ķ +ĠÐ Ķ +LECT ION +ĠG ithub +å¹´ åĩºçĶŁ +è¿Ļ个 äºĭæĥħ +æĻ¯ çī© +ï¼Į èģĶåIJĪ +å®ī çŁ³ +æŁ¥ åΰ +D iversity +ï¼Į ç®Ģç§° +Ġm ime +Ġ å°±åĥı +_m any +ĠSil ence +ï¼Į è¶ĬæĿ¥è¶Ĭå¤ļçļĦ +D un +ess ay +ĠBy z +çĹ´ è¿· +i ander +äºĨä¸Ģ èĤ¡ +æĺ¯ä¸Ģ éŨ +ï¼ĮåĨį 说 +DE ST +, åĩºçݰ +ĠA ED +ĠæĪij åĽ½ +æľĢ åŁºæľ¬çļĦ +å¦Ĥä½ķ 使ç͍ +åį± æľºçļĦ +Ġcontrad ict +. Visible +ï¼Įä»ĸ们 éĥ½ +ï½ŀ âĢĿ +ĠFather s +( gen +çļĦ çļĦ +é£İ éģĵ +ä½Ļ çĶŁ +æİª æīĭ +滨 æ±Ł +ä¸Ĭ æĸ¹çļĦ +çŃī çݰ象 +ï¼Į äºĮåįģ +Ġble ak +æīĶ åΰ +ï ve +ç± ģ +æľīä»Ģä¹Ī åĮºåĪ« +plac ian +éĤ£ 群 +漫 漫 +ĠCam eroon +人æĢ§ åĮĸ +Ġpoore st +è¿Ļ åŃ©åŃIJ +' é +ä¸Ģ åĢį +Ġ( {\ +Ġdep iction +æīĢæľī çļĦ人 +ĠST DMETHOD +Play back +åŁİ乡 å±ħæ°ij +Ġ 纪 +åľ¨ 举 +大 èĤĨ +ĠOl son +ï¼ĮæĹ¥ åIJİ +交 åī² +çĶŁæ´» åĴĮ +ĠComp ile +æ°¸ ç£ģ +Inf inity +æ±¹ æ±¹ +m ovie +ä¹Ł åıª +hib it +, string +大 çīĮ +ek a +Ġcounter top +Õ ¡ +ĠSt o +大å°ı å°ıçļĦ +æ¸Ķ æ°ij +ĠEli jah +C re +设置 äºİæīĢè¿° +Ġconvolut ional +% / +æĢ§ éĹ®é¢ĺ +v ascular +Ġ ä¸ļ +表示 æĦŁè°¢ +ĠOs lo +Ġtrib unal +ï¼Į åįĹ京 +Sp aces +鸡 èħ¿ +ãĢģ åĭ¤ +ĠâĢ » +åºľ ä¸Ń +Mag azine +æŃ£ å®Ĺ +ï¼Įä½ł 以为 +ï¼Įå¹¶ çͱ +è°ĭ æ±Ĥ +èĭ±éĽĦ èģĶ缣 +, å¿« +åĨħ èħĶ +ä¸Ģèµ· åIJĥ +ï¼Įåħ¶ä»ĸ çļĦ +ocaly ptic +Ġ åĨħ容ç®Ģä»ĭ +_r gb +execut ion +æİ¥è§¦ çļĦ +sem ary +ĠParticip ant +.get Parent +RO I +ĠJo ined +Ġincompet ent +Ġ ç͵åŃIJ +å¾Ĺ éĿŀ常 +ĠFl ush +Ġword en +æĹĹ è¢į +in p +ĠH uss +çľĭåľ¨ çľ¼éĩĮ +em is +Ġtor us +çĦĬ ç¼Ŀ +/ media +e ce +j q +æ¿ ij +-pro duced +_v irtual +.s ymbol +ĠZ ERO +(d t +ĠFIG S +Ġselect ivity +. Values +âĢĿ åıĬ +èĩª 以为 +_t opic +ãĢĤ ä¹Łæĺ¯ +æľī å¾ħ +ä¿¡æģ¯ å®īåħ¨ +ĠCharg ing +ç͍ ä¸įçĿĢ +å®ŀ æĵį +.m ask +eb u +è¿ĺæľī 人 +ãĢģ å¦Ĥ +IC LES +Other wise +è´¹ çŃī +åıĺ æļĸ +ĠMoh ammad +Ġutens ils +Ġp ussy +ĠSpons ors +Ġco vert +太 è¿ľ +缸åħ³ èµĦæĸĻ +ed x +>( < +Ġscore r +ĠProm ote +Ġtur quoise +ĠBe coming +åıijå¸ĥ æĹ¶éĹ´ +对 åѦçĶŁ +æµģ è¨Ģ +åģľä¸ĭ èĦļæŃ¥ +.Acc ount +ĠB atter +ach inery +ĠX M +ï¼ĮåıĪ è¦ģ +ï¼Į 害 +Ġd ads +ap ur +西 å¤ı +çİ°åľ¨ å¼Ģå§ĭ +ĠOrd inary +Ġrall ies +ĠW ines +æĪĸ æľįåĬ¡ +åIJĦç§į åIJĦæł· +Ġfaith fully +_BO OT +ï¼Į æĮģ +ĠSt retch +ĠPo ison +ĠA ffect +åħ¶ è§ĤçĤ¹ +According ly +ĠTrack s +Ġco ales +ĠRun s +Ġtou red +ä¸ĺ éϵ +è¿ĩ 她 +Ġstar vation +ãģĽ ãĤĵ +_dec oder +ãĢĤ ä½įäºİ +Ġgen ital +çīĽ çļĦ +ãĢĤ ç͍æĪ· +Ġbapt ized +A rena +Ġhero ine +Ñ ĺ +ĠY ep +è¾ĵ æ¶² +éĿ¢å¯¹ çļĦ +L aser +g reens +èĦļ è¸Ŀ +Ġassault ed +b art +Ġd odge +pass ed +] [] +Ġro am +两个 æĸ¹éĿ¢ ++ H +< Q +ĠThis BlogThis +κ B +顾客 çļĦ +丽èİİ çϽ +/ init +own ership +ä¸įæĸŃ æī©å¤§ +æħĪ ç¦§ +Ñħ од +Ġadvers arial +Ġfatal ities +æĺ¯ä¸Ģ åī¯ +.f eatures +Ġnight time +ï¼Į天 åľ° +.Group Layout +ĠæĽ´å¤ļ åĽ¾åĨĮ +ï¼Į åĪĽå»º +ç»Ŀ éĿŀ +Ġcur ator +æĿ¯ éħĴ +Ġmur ine +å¿ » +ĠDist inguished +ç²¾ æ°Ķ +ĠGen re +Ġ'- ' +Ġwors ening +D igits +L AG +.l in +ĠCam eras +Ġjeopard y +ãĢĤ åŃIJ +å°ı说 åľ¨çº¿éĺħ读 +åĭĺ æŁ¥ +æĦı æ°Ķ +ĠRe levant +ĠACC ESS +ĠFe ather +è¡Ĺéģĵ ä¸Ĭ +çļ±äºĨ çļ±çľī +/ set +N AT +ĠD ug +ĠComp ression +Ġfort ress +ç§ijçłĶ æĪIJæŀľ +å¿ĥ çαçļĦ +ä¹° çĤ¹ +æįı çĿĢ +Ġo missions +çļĦ çĹķ迹 +è·ij è¿ĩæĿ¥ +åĨ¬ èĩ³ +ĠOb tain +Ġar b +ash ions +(s z +Iter able +é«ĺ éĽħ +æ¹ĸ çķĶ +éķ¿æ²Ļ å¸Ĥ +ĠDal ton +Ġcon duit +Ġ å¹² +åĿĩ æĺ¯ +(n ames +对 å®ĥ +æĥ³è±¡ ä¸Ń +ĠETF s +Ġoverrid ing +- yl +ef e +æĹħ游 æĻ¯åĮº +éķĩ å®Ī +_SE Q +ãĢĤ 人çĶŁ +ĠUn expected +åĨĻ åΰ +åĪºæ¿Ģ çļĦ +FILE S +ãĢĤ ä¹ĥ +Ġconve ys +us o +erm is +_dis abled +Ġspr outs +Ġdis place +åľ¨ ä¸Ģè¾¹ +. Protocol +(ch unk +SV G +Ġha iled +è¿ŀ 绵 +ĉĉĉĉĉĉĉĉ ĉĉĉĉĉĉĉĉ +åIJĦ å®¶ +Jam ie +_ String +b uds +大 å·´ +ib bon +骨 åŃIJéĩĮ +éģĵè·¯ 交éĢļ +ĠAl ter +Ġ åºĶæĶ¶ +åŁİ 建 +ï¼Į身 æĿIJ +ãĢĤåıª è§ģ +E gypt +ï¼Ľ ä½Ĩæĺ¯ +arg ed +ä¸ī éĥ¨ +-b rand +çĴ Ł +Ġz o +CON ST +ãĢģæ°´ æŀľ +ï¼Į好 ä¸į容æĺĵ +an imated +Ġan arch +ĠL arger +ĠP ia +Âł B +è¦ģ ä½ł +åı¹ ä¸Ģ声 +ï¼Į åįİ为 +GT K +( angle +- java +ĠBo ise +åįĥä¸ĩ ä¸įèĥ½ +Ġworld view +ĠSe en +ï¼Į æĪIJ绩 +ä¸į éĹ´æĸŃ +è¡Ģ ä¸Ŀ +çĽ¸ä¿¡ æĪij +æįķ é±¼ +ĠStev enson +ĠCoc oa +( 约 +ĠI brahim +ĠA GA +C ipher +ãĢģ åħ± +顺åĪ© è¿Ľè¡Į +éĤ£ ä¸Ģ天 +æĶ» çł´ +_comp lex +ĠP reference +å¤ļ åľ° +Ph rase +Ġstar ving +ãĢĤ å·²ç»ı +cyl inder +çŃī åIJĦç±» +ä¼ĺ åĬ£ +Ġsin ister +PRO DUCT +Ġillum inate +·· · +Ġ èij£ +Ġ ä¸įçŁ¥ +éĤ£ å°ı +ãĢģ R +Ġsp ared +çŁ³ åŃIJ +ĠCo ke +Ġ æłĩåĩĨ +ĠP att +ãĢģ åIJĪçIJĨ +b ru +l und +èĥ¤ ç¦ +Ġpriorit izing +Ġ 使 +å¹´è½» æĹ¶ +ĠJul ius +Ġqu ench +ival ence +, åıįæŃ£ +Ġis Valid +太 强 +Mutable Dictionary +Ġhover ing +Ġin et +Un ivers +å¾Ī好 çľĭ +è®°å¿Ĩ ä¸Ń +ĠBra ke +Ġadmitted ly +In verse +æľ¬èº« å°± +Ġg erman +èģĶ è½´ +éĽ¾ éľ¾ +Ġem blem +C HE +ä¹Ł åºĶ +---------------------------------------------------------------- ------ +.m ouse +Tom orrow +Ġu it +ä¸ĵä¸ļ ä»İäºĭ +ĠDon ovan +ĠNor ris +οÏħ ν +ĠHoo ver +doctor al +ãĢģ 绣ä¸Ģ +_co ord +hes ians +ĠH eg +ac id +sp ark +æĪ¿ ä¸Ń +_P WR +il ated +Ġconsequ ential +æĸĩæĺİ åŁİå¸Ĥ +ĠDick inson +Ġk ios +DECL ARE +嫦 娥 +s x +èᝠåѦ +sk ins +âĢĿ ï¼ģ +æĪIJ é¾Ļ +oc ent +Ġdel ights +Ġl int +Ġra pt +Ġion ization +ä¸į èĢģ +Ġhard ships +Equ ality +åĨ° åĨ» +åħįçĸ« ç³»ç»Ł +Ġ, \ +Des criptors +an ax +ore station +æĪij 为 +åĪĽæĸ° èĥ½åĬĽ +æ½ľ å¿ĥ +ĠS es +Ġpl ight +Ġoff ences +ACC ESS +Ġpatri ot +ï¼Į åĮĹ京å¸Ĥ +ĠL amar +è½»æĿ¾ åľ° +æĪĸ许 æĺ¯ +èĥŃ èĦĤ +ook ies +_ID LE +Cur r +çķľ çĶŁ +Ġ åĨħ容 +ĠB ravo +ĠV es +ĠCom cast +-d evice +大家 æĹı +åºĬ ä½į +à ĸ +å¹´ ä»» +åİĭ ä½İ +Ġs izable +ãĢģ èͬèıľ +åĨī åĨī +è°ģ çŁ¥éģĵ +/d etails +, 第äºĮ +ç»ĵæŀĦ ä½ıæĪ¿ +éļIJ å±ħ +.se lection +. actions +: F +ĠA AC +æµ® éĽķ +ĠLock smith +å°Ħ ç¨ĭ +ĠLeg islation +ĠSpeed way +ĠSt ur +æ²Ļ åĵij +æĥ¨ çϽ +Ġk ittens +æĦı 象 +.N ormal +ã ij +ĠT EM +Def s +æĻºèĥ½ åζéĢł +åľ° æııè¿° +Ġmilit ants +u et +Ġquant ization +ä¸įä»ħ èĥ½ +ĠRad ar +-v irus +ĠDomin ic +çĭ¬ è¡Į +ĠHD L +ĠMight y +ï¼Įä½Ĩ 没æľī +.f low +ĠPil gr +Ġhurd le +' < +å·¥ 人çļĦ +åħ¶ä»ĸ åĽ½å®¶ +ĠAN SW +Ġa ck +ä¸Ĭ 书 +ĠTr ick +æĬ¢ åįł +ä¸Ģ 棵 +使ç͍ çļĦæĺ¯ +æŃĩ å°Ķ +ĠSUP ER +çļĦ è¿IJè¡Į +ef ully +æĹł è¯Ŀ +ãĢģ æ²³åĮĹ +.R ev +Of Type +ureth ane +Ġ** ( +p rior +Ġ 计ç®Ĺ +ãĢĤ æĽ¾ç»ı +符åIJĪ é¢ĺæĦı +åİĮ çĥ¦ +å¾Īéķ¿ ä¸Ģ段æĹ¶éĹ´ +- angle +: true +r st +f light +.t imestamp +++ . +æĦŁåΰ éĿŀ常 +ĠHig gins +æ´Ĺ åıij +Be ck +åħ¨ä½ĵ æĪIJåijĺ +er obic +ĠF alk +ĠE rl +é¢ į +æ¿Ģ æĪĺ +ç§»åĬ¨ åΰ +λ α +ĠNik ki +P riv +ĠL AST +å¸Ī å¾· +,ä¸į å¦Ĥ +姨 å¦Ī +äºİ äºĭ +Ġdis may +ĠPil ates +Ġsurre ndered +ä¸Ģ å·´æİĮ +æ²³ åı£ +_G LOBAL +å¤ļå°ij 次 +ê s +Rec ipes +Ġblu etooth +Ġh umming +é¥ ¬ +ĠInd ies +ĠRep airs +ä¹İä¹İ çļĦ +} ~ +Ġmechan ically +æĻĵ æĺİ +åħļé£İ å»īæĶ¿ +ï¼Į åı¦ä¸Ģ端 +ï¼ĮçŃī æĪij +çĶĺ èĶĹ +èįĴ è°¬ +ãĢĤ æ³ķå®ļ代表人 +è½» çļĦ +Ġkind le +.on Create +d aily +ure en +ãĢĭ 以 +ĠByte Buffer +, åŁ¹åħ» +éģµ çħ§ +Bro ken +ä¼Łå¤§ å¤įåħ´ +ID GE +eterm inate +ĠS AY +æĶ¶ éĵ¶ +æľ¨ å±ĭ +( Required +Prot ected +Ġtrump et +Ġb ac +ãĢģ æĸ¹ +ĠDet ox +-re view +ĠX Y +åı¹ åı£æ°Ķ +ãĢģ 管 +Ġde ceptive +." . +.add Action +A ux +t ow +ĠShe ep +羣çļĦ åIJĹ +ãĢĤéĤ£ æĹ¶åĢĻ +ï¼ĮåIJĦ åľ° +è¿Ľç¨ĭ ä¸Ń +ĠL HC +ä¸ĭ å¿ĥæĿ¥ +ï¼Įä¸Ģ ä¼ļ +tr fs +ĠSh apes +çĽĨ æł½ +ax i +_H AVE +. ST +Ġs é +ãĢĤ 车 +ç»ı åıĹ +Jul ia +ĠOw l +ï¼Ī æľīéĻIJåIJĪä¼Ļ +å°ģ åłµ +_arg uments +ï¼Į éĽħ +ĠIm mediate +Ġrev ived +ĠD ante +Est imated +) // +, ä¹ĭåīį +J erry +çļĦ åĮ»çĸĹ +Ġ 忽çĦ¶ +Ġbr ute +é¢ł ç°¸ +ĠNeck lace +è¦ģ 大 +ĠPh ar +Ġbureauc racy +æĺ¯ åħ¨ +é£Łåĵģ èį¯åĵģ +ĠC ups +å°Ĩ ä½ł +(d ict +纳 ç²¹ +ĠGen es +ĠB av +èĢģ äºĮ +.d et +èµ·æĿ¥ åIJ§ +çĶŁäº§ èĥ½åĬĽ +çĶ³è¯· 书 +ĠEnd s +-op ening +et ra +æľī害 çī©è´¨ +éķ¿ åŃĻ +读 å®Į +åıijçĶŁ éĩį大 +æ·¡æ·¡ ä¸Ģç¬ij +ï¼Ľ å¦Ĥ +èµĽ åľºä¸Ĭ +è¿Ł éĴĿ +document ation +gra ve +oret ic +Ġacquaint ance +è¯Ŀ åī§ +Ġn ginx +å¹´ éĩĮ +ä¸Ģ éĹ® +åĴĮ åĨħ +æĢ» ç®Ĺæĺ¯ +ä¸įè¡Į äºĨ +ä¸ĵ项 è¡ĮåĬ¨ +ä»ĸ å®¶ +åĨį åĬł +Ġheart breaking +Ġbo ils +Ġund eniable +èµ· åIJį +ĠFor giveness +ĠTe ens +Ġacqu ires +Ġsnow y +Ġorient ations +ĠU IColor +åħ¨çIJĥ ç»ıæµİ +Ġmurd erer +.setOn ClickListener +ĠCol oring +-c overed +讲 åłĤ +ï¼Į å®īæİĴ +æłĩ 示 +åį¡ å¡Ķå°Ķ +App oint +Ke ith +计 çĶŁ +H ill +ĠOwn ership +ä¸Ģ缴 没 +Ġhom osexuality +ï¼Įåį´ åľ¨ +Ġprov ocative +Ġaut istic +æĸ°åĨł çĸ«æĥħ +ï¼Į çϽçĻľé£İ +ï¼Į åĵ¥åĵ¥ +air a +éĻIJ åħ¬åı¸ +.h ave +两个 åŃ©åŃIJ +çŁŃ çīĩ +Ġment orship +ĠØ ´ +èİ«åIJįåħ¶ å¦ĻçļĦ +, ä¿Ŀéļľ +{ pro +Ġev iction +Ġinc ision +ĠCU DA +oc ations +( pp +ä¸Ń éĶĭ +天 å±± +Ġex e +Ġmo ons +Ġtwe aks +ĠDeuts che +( tt +w aves +ĠC off +æĸŁ éħĮ +Ġporn ography +pl anned +/ All +åħ³ æĿij +人çļĦ æĦŁè§ī +ĠST A +pop up +æĹłæĦı éĹ´ +ãĢĤ çĪ¶äº² +ĠAr range +Ġvict orious +D ad +H at +ad ies +æĸŃ éĿ¢ +æ³¥ æµĨ +Ġunfore seen +åĽ½å®¶ éĩįçĤ¹ +æĴŀ åΰ +Ġisot ropic +çļĦ æĬĢå·§ +åıĹ ä½ĵ +Ġjud iciary +Ġsp anned +Ġent rusted +æ³° åĿ¦ +åĿIJåľ¨ éĤ£éĩĮ +ä¸İ æ°´ +ĠCharg er +Ġg d +å°Ĩ æŃ¤ +èĤł çĻĮ +夸 大 +@ n +à ľ +é»Ħ æ²¹ +idd y +æıIJ çݰ +çİĭ å°ı +· å·´ +åľ° åıijçݰ +å°Ĩ è¿Ļ个 +_ On +ãĢģ äºijåįĹ +éĢĢ ä¼į +åĿIJåľ¨ äºĨ +Ġneuro science +ĠRis ks +Ġutil ised +ĠApp ellee +Ġfemin ism +车 åŀĭçļĦ +æ¯ı å½ĵ +çα 丽ä¸Ŀ +ä¸ĥ çϾ +ç«ĭ å¿Ĺ +ï¼ĮéĤ£ 人 +ĠNe umann +Ġeth os +ĠIllust rated +ä¸į è§īå¾Ĺ +ĠW olves +op he +ull ed +Ġroll out +ĠAv atar +Cr ime +O c +Ġch oke +Ġ& , +å°±æĺ¯ 说 +_S PACE +oly gon +v ana +Ġs size +ï¼Į èι +oc yan +太 è¿ĩäºİ +.P h +æĭ³ éģĵ +:n th +( Entity +z c +Ġ å°¤åħ¶æĺ¯ +_OB J +ç»ıæµİæĬĢæľ¯ å¼ĢåıijåĮº +Ġf ug +mut ation +ĠCOMP ANY +峨 çľī +Ð ķ +ĠA ble +æ³ķ åĽ½çļĦ +åħ¶ä»ĸ åľ°æĸ¹ +_H DR +å¹´ ä¹ĭåIJİ +ï¼ģ è¿Ļæĺ¯ +迪 奥 +ĠT ec +没æľī ä¸ĢçĤ¹ +ĠCos metic +ĠCharg ers +ĠV and +åIJĦ æĹı +Ġfl ank +åĪĻ å¤© +н ой +ĠQuant itative +ener ic +-p article +ĠSar as +H ell +å¦Ĥæŀľ æĪij +Ġbl aze +ç¿» 天 +ing ale +èıľ è°± +Ġcolon ization +Ġseas ide +åĪĨ享 ä¸Ģä¸ĭ +ig ators +ãĢģ åĮ»éĻ¢ +ï¼Ł å¦Ĥä½ķ +.s core +Ġcompl ains +ï¼ĮåIJ¬ çĿĢ +Jere my +Ġrespect fully +æĻĴ 太éĺ³ +" mid +s olution +ĠK D +è´¥ åĿı +�� � +ãĢģ åį°åº¦ +æľī å¦Ĥ +åľ¨ åŃ¦ä¹ł +Ġsh aving +åģļ 强 +ĠCont rolled +/s ite +- Level +主è¦ģ åĨħ容 +æ´»åĬ¨ åĴĮ +ĠVo IP +çļĦ åĪĨç±» +åħĪ çŁ¥ +.app le +ĠN airobi +ï¼ĮæľĢ éĩįè¦ģçļĦæĺ¯ +è¿ŀ è´¯ +Ġport ability +Ġrecruit ers +. Options +R K +ç»ıæµİ å¼ĢåıijåĮº +äºķ çĽĸ +_ LED +/ < +æīĢ åIJ« +ï¼Įåħ¶ä¸Ń æīĢè¿° +Ġadapt ability +太 ä½İ +èĤ¡ä¸ľå¤§ä¼ļ 审议 +ï¼Į æĪIJç«ĭ +ï¼Į ä¿ĥ使 +Ġtrig lycer +( head +Ġh f +ĠS cho +为 åİŁæĸĻ +å¤ļ äºİ +ä¸ī æĪIJ +gan o +ĠFl our +ĠBra ve +Ġchees es +" She +äºĭ åħ³ +Ġdivers ify +Se en +ched ules +éĹŃ åħ³ +à³į ಠ+ç» ¾ +赤 裸 +æ£Ģå¯Ł æľºåħ³ +Ġ ******** +ä»ĸ们 两个 +ĠEn rollment +/ ******************************** +Ġ éĤĵ +Ġaccess ion +ĠAp k +-s uper +Z L +Ġ åĤħ +æĺ¯ä¸Ģ å¼ł +Ġaspir in +p illar +è¿ĩ å¤ļä¹ħ +æĺ¯ä»Ģä¹Ī åİŁåĽł +ä¼ļ è§ģ +Ġname of +ET ERS +Ġchocol ates +( html +-p refix +-st atus +éĺ¿ åį¡ +/b ootstrap +(g rid +代è¨Ģ 人 +è¡£ é¢Ĩ +为 ä¸Ģç§į +常 æķ° +èģĶ å¸Ń +Ġcongrat ulate +åģļ 空 +å¿ħé¡» æľī +ãĢĤåľ¨ æĪij +ĠCam el +ĠSt ro +uel ess +-d riving +çļĦ æĽ´ +大 éĽª +俯 çŀ° +ãĢĤ æĥŁ +ĠM isc +æľĪ 以æĿ¥ +har ma +两侧 çļĦ +å·¥ä½ľ æĹ¶éĹ´ +ĠThe odore +_T LS +Absolute Path +ag ate +èĩª èIJ¥ +Ġshe pherd +æµ· 滨 +Ġbi otechnology +hem ian +Ġvagu ely +p ixels +Ġè¿Ļ ä½į +åıijå¸ĥ åħ¬åijĬ +ï¼Į åİĭåĬĽ +in berg +使 åĩº +ĠBegin ner +Ġ ÈĻi +åĩºåİ» çİ© +ĠLim a +Ġantiv iral +æ§ ĥ +æĮij äºĨ +åĸĥåĸĥ èĩªè¯Ń +ä¸ĵç§ij åĮ»éĻ¢ +o arthritis +è§Ĥ å½± +ĠFr anz +Ġadjust s +Emb edded +umpt ech +h fill +æķĪ ç͍ +ĠVis ibility +Ġunders cores +Atl anta +Ġ ä¸Ĭè¿° +Ġ éĴĪ对 +æīĵ æĪij +éĥ½æľī çĤ¹ +ĠC ES +ĠW Y +.v ue +Ġcos y +转åĮĸ æĪIJ +ĠCitizens hip +ar oo +æŃĮ åī§ +(P y +Ro oms +çļĦ è¿Ļç§į +(& ( +ĠY acht +æĹ¥ æĪIJç«ĭ +ï¼Į èĻİ +.S ingle +_f it +è¿Ļæł· ä¸Ģç§į +å¿ĥ å®ī +çŀ¬ æģ¯ +æŃ»äº¡ çİĩ +åĬ ¾ +æ¹ĸ è¾¹ +西游 è®° +ith rom +ĠK irst +_C UR +rap ists +Ġair planes +é½IJ å¿ĥ +-s ensors +Acc um +åºŁ éϤ +å´ĩ é«ĺ +æ¤ ¿ +ĠPol o +Ġcapt ions +Ġsoft ball +Ġf ooth +责任 åζ +Ġs rv +é¼ĵ é£İæľº +n ard +Ġtra cer +åĿĩ å·² +ĠBrew ery +åŀĭ åĴĮ +ĠSt ake +ï¼ģ åĽłä¸º +led on +. Variable +k rit +éĩį 度 +åIJij 导 +çŁ³ åύ +Ġcamp site +ĠMart ial +å¿ĥ æĤ¦ +èij¬ 礼 +Ġoste oporosis +q xs +ï¼Į ç§ijæĬĢ +åľ° æļĸ +ãĢĬ è¯Ĺ +Ġμ l +çļĦ å¼Ģåıij +è¿Ļ款 游æĪı +opp el +Cra ig +大 ä¾ł +Ġnarrow er +ĠU rs +m ist +Ġi P +ĠSe afood +è´§ çī©çļĦ +éĺ¶ çº§çļĦ +Ġwrong ly +ï¼Į åĭIJçĦ¶ +ost ring +AC M +æĵħ éķ¿çļĦ +, éļ¾éģĵ +ay ed +åĩĢ æµģåħ¥ +ï¼Į æŃ¢ +åį ŀ +æĶ¶ å¤į +ä¼ł 人 +isc us +åĴĮ ä¿ĿæĬ¤ +ip ients +åĨį æĹł +èĢ¿ èĢ¿ +áĥĺ áĥ +S IGN +ãĢĤ æ²Ī +ĠK E +ĠDr inks +çݰéĩijåıĬ çݰéĩijçŃīä»· +ï¼Į çĿ¡ +Ġhelic opters +ï¼Įè¿ĺ 羣æĺ¯ +ĠCON F +.background Color +x v +ĠC out +Ġgl ac +å®ŀ æĻ¯ +Init ially +Ġon ward +ä¸İ 客æĪ· +æijĩ 篮 +Ġrend ition +ï¼Į å¥ĩ +çļĦ éĺŁä¼į +Ġcom orbid +çݯå¢ĥ åį«çĶŁ +åĮĸ éªĮ +ĠSch a +çİĭ ä¹ĭ +贯彻 æī§è¡Į +orde aux +ĠMar vin +å¹¶ä¸į 大 +Ġie ee +æķ° åĪĹ +Ġav ant +æ·± ä¿¡ +ï¼Į ç͵影 +Status Bar +Ġcru ises +æĸĩåѦ ä½ľåĵģ +Ġdisg race +ucceed ed +ãĢĤ é¢Ħ计 +æĺ¯ä¸ª 好 +Ġnight mares +å¥ĩæĢª çļĦæĺ¯ +ç§ij åijĺ +ï¼Įåıį åĢĴ +Ġå¦Ĥæŀľ ä½ł +åł´ åIJĪ +Per f +| ^{ +um ably +ĠG OV +æ¦Ĥ 论 +_R T +, æĸ¹ +p lectic +Ġdeduct ed +ap o +Ġfun k +(c urr +.sc ss +Ġt ac +æĹ¶ åı¯ä»¥ +éħį æ¯Ķ +ä½ł èĩªå·±çļĦ +Ġad ept +Ġmanif ests +Ġsin on +åľº ä¸Ń +æ¯ı ç§į +åĪ« å¿ĺäºĨ +å¹¶ä¸į ç®Ĺ +ĠSold iers +- interface +çļĦ æ¸ħ +ec ost +Ġgather s +为 客æĪ· +æĸĩ æĺĮ +amp oline +! Share +" ', +ĠC ancellationToken +th orne +éľĢè¦ģ æľī +Ġelectroly te +å®ļ æł¼ +æ´ĭ溢 çĿĢ +å·¥ä½ľ ç»Ħ +åĢį æĦŁ +OC US +Ġr aspberry +ä»Ķç»Ĩ è§Ĥå¯Ł +çļĦ èĤ¡ä»½ +ĠB lob +clus ively +åįķ æīĵ +鼶 çĤ¹ +æĹħ游 å±Ģ +åľ°éĵģ ç«Ļ +@b logger +âĢĻ all +ä¸Ĭ 大åѦ +åĭ¾ ç»ĵ +Ġpro actively +ä¾Ľ éĶĢ +çݯå¢ĥ å½±åĵį +inds ight +Mort gage +, ä¸įåı¯ +ĠS ick +常 温 +å¤ļå°ij 个 +Dec oration +ĠNurs ery +Ġdo ve +.p ublic +请 æĤ¨ +人çļĦ æĹ¶åĢĻ +ãĥ ĵ +vent ed +许 许å¤ļ +ä¹Ļ åŁº +åĬ² çļĦ +ous y +_c ross +Ġad hered +åIJĮ 为 +åĩĮ åħ° +ãĢĤæľ¬ å®ŀç͍æĸ°åŀĭ +çļĦ主 æµģ +ĠDeb ate +ĠVEG F +ï¼Į ç¨įå¾® +离 æķ£ +ĠSupplement al +G ravity +çļĦ æŃ£ç¡® +æĪIJ 亲 +éģĹ ä½ĵ +人工æĻºèĥ½ æĬĢæľ¯ +Reb ecca +.get ElementsBy +ä¸ŃåĽ½ ä¼łç»Ł +ĠColl ision +ç»Ħ åĽ¢ +N IC +n ah +ê me +Ġro be +ï¼Įä½ł èĩªå·± +-W est +Integ rated +ĠBulld ogs +. Stop +ĠF aster +rem ain +æĬ¢ æĸŃ +äºĮ çĪ· +交 ç»Ļä½ł +Ġvalid ating +ĠSa unders +ĠBab ies +- ring +åİ» åĵªåĦ¿ +ok ers +Acc uracy +åı¸ä»¤ éĥ¨ +" name +Ġ{ }) +以 对 +ä¿ĿæĬ¤ åĴĮ +å¤ļ å§¿ +PR I +æ¡Ĥ åĽŃ +( Text +Ġ 建ç«ĭ +楼 å®ĩ +纺ç»ĩ åĵģ +ãĢĤ åIJ¬ +æĭī æĭī +Ġre agent +Ġsm elled +ĠOn ion +ä½ıæĪ¿ åĴĮ +Sustain ability +çļĦ éĶĻ误 +å¿ĥ å¦Ĥ +Man chester +L K +v P +çĶŁ é²ľ +æĸ° æĺŁ +Ġac ronym +Ġlo oph +æī© 大äºĨ +åŃĺåĤ¨ 空éĹ´ +ĠSport ing +il and +ĠP redictions +çĿ ¾ +åįĬ åĬŁ +Ġluck ily +ĠTh orn +äºĴ åĪ© +ĠBlack s +itt ance +çļĦ éĿĴå¹´ +éĩį ç͍ +æ¯Ķè¾ĥ å°ij +Ġmillenn ials +Ġcommem or +ä¸Ģ è·¯ä¸Ĭ +ĠA licia +em erg +com plicated +空 åīį +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +äºĭ项 çļĦ +Ġ 以ä¸ĭ +ĠMC U +Th umbnail +\ overline +ĠS SC +ĠDirect ed +Vertex Attrib +çķľ ç¦½ +ĠÑĤ ак +ĠPrel iminary +çĤ¹ åŃIJ +AT M +/d L +Ġty rosine +ĠC arly +=" % +ç²¾ åħī +ä¸Ģå®ļ ä¸įä¼ļ +ĠComb ination +N om +çļĦ 顶端 +ãĢģ æ¯Ľ +åĪ°è¾¾ äºĨ +她 å·²ç»ı +mail to +cap ital +Ġ ........ +æłĩ æĺİ +e ering +Re peated +éģ´ éĢī +ĠC SA +Ġhydroph obic +ãĢĤä»ĸ 认为 +çĭIJ çĸij +è¿ĩæķı æĢ§ +_STAT S +j l +Ġl c +Ġg f +æŃ£ 绣 +èĢģ åħµ +访 客 +TA IL +ĠHydro gen +, æĺ¯ä¸Ģç§į +F t +ï½ Ĵ +-f ast +å¸Ĥåľº åĴĮ +æĹ¢çĦ¶ å¦ĤæŃ¤ +break er +ï¼Į æĽ´å¤ļçļĦ +åı¤ æľ´ +æ¯į åIJİ +æĬĹ åİĭ +æĶ¶èİ· äºĨ +ĠTrou ble +ï¼Į 个个 +è¢ Ħ +æīį ä¼ļæľī +Ġstart ling +bl as +Ġredist ribution +ĠTun isia +Ġw m +ï¼Į è¾Ľ +Ġtent ative +/ project +ĠN ach +å°ı åºĹ +ãģ ³ +书 é¦Ļ +强度 é«ĺ +, æľįåĬ¡ +ä¸Ĭ æł¡ +åķ ı +D oug +Ġvig orously +çļĦ åİŁçIJĨ +å¹¶ 举 +ç³»ç»Ł çŃī +ï¼Į åįķä½į +ĠC HA +.s uper +Ġprep ayment +v ide +ï¼Į 建çŃijéĿ¢ç§¯ +ig rated +第ä¸Ģ ç§į +æıIJé«ĺ åΰ +çĶŁ æ°ĶçļĦ +两 åı£ +ĠUS PS +æ®ĭéħ· çļĦ +éļ ½ +æŃ¦åύ è£ħå¤ĩ +éľī èıĮ +驳 åĽŀ +Ġalbum in +å°ı说 ç±»åŀĭ +Ġrecon cil +çĿ ¬ +å¹³ äºĨ +Ġpay pal +å±ħ çķĻ +èĵ Ł +ä¸Ģ æĥ³åΰ +以 举 +å·¥ä½ľ å²Ĺä½į +.group Box +ç´§ 缺 +èĦļ è·Ł +Ġvalid ates +æĭ¥æľī çĿĢ +Ġawa ited +ĠPar cel +Walk er +åύ çŃī +oph ile +寻 è§ħ +å¤ĸ æķĻ +Ġmon omer +UT R +Ġcomfort s +çļĦ 汽车 +æİ¥ åIJ¬ +eval uation +ãĢĤ åºĶ该 +Ġal ph +åIJij æĤ¨ +æ·± èĢķ +åħ´è¶£ çα好 +Wire less +` * +å¼Ģ æĮĸ +eb a +çłĶç©¶ 室 +ĠPr att +èµı è¯Ĩ +ï¼Į å¥ī +è¿ŀæİ¥ åľ¨ +ç½Ĺæĸ¯ ç¦ı +ä¹Ł 纳 +Ġso fas +çŃĴ ä½ĵ +Ġhybrid s +Ġaspar agus +è¿ĩ æĹ© +Ġcr umbs +æīįæĺ¯ 羣æŃ£çļĦ +ĠCS F +[ current +ä¸įçŁ¥éģĵ 为ä»Ģä¹Ī +(sub ject +ï¼Į å·¥ä½ľäººåijĺ +ĠV ed +æį¢ ä¹ĺ +Ġcorrid ors +Ġunderest imated +ãĢĤ éĥij +Ġu long +ĠF ang +Ġsym posium +æ»ij æĿĨ +çļ® ä¹¦ +ĠDesign ers +Ġj umper +AM B +ñ os +å¤ļ ä¸ĢçĤ¹ +.M vc +å̾ åĢĴ +赫 è¿ŀ +Ġlibr arian +} u +un ger +ä¿¡ ä»¶ +Ġbest s +ĠNor wich +Ġexplor atory +ad ores +-s uite +éĽª çϽ +.cl uster +¬ ģ +åĴĮ èĩªå·±çļĦ +åĩł 大 +P arsing +ï¼Įéĥ½ 没æľī +zz zz +ĠP arenthood +_t b +Ġven om +ĠHun ts +ĠDiscount s +- activated +C ow +ass andra +,ä½ł ä¼ļ +æĶ¶ è´§ +è®° äºĭ +: G +åħĥ é¦ĸ +Ġmicrobi ota +çķĻ æľī +Ġbog us +ãij ¹ +w hel +Ġcom for +æŁĶ å¼± +çļĦä¸Ń å¹´ +åįķçĭ¬ çļĦ +e am +æĹ¶åĪ» åĪ» +ĠY ours +Ġpo op +åIJ¯ 迪 +æīij éĿ¢ +æĢ» éĺŁ +ï¼Įä¹Ł æĹłæ³ķ +{sub figure +-s ymbol +ç¼Ń ç»ķ +h aha +ĠP ound +En velope +ãģ¨ ãģĦãģĨ +Ġun ix +æĹł åĬĽçļĦ +-h idden +éľĩ 天 +Ġch ond +Ġ é϶ +__ (( +.n s +ĠRES P +ARI ANT +æĹ¶ä¿± è¿Ľ +Ġc aches +ï¼Į ç½® +äºķ ä¸ĭ +æĭİ çĿĢ +ï¼Įåıª åī©ä¸ĭ +ĠTim ing +Ġclock wise +æīĶ æİī +Ġpuzz led +, åŁºæľ¬ä¸Ĭ +е Ñĩ +ç¢Ĺ éĩĮ +. over +æĬ½ çѾ +éĢĤåIJĪ äºİ +Ġmar bles +comp iled +ĠMich a +èĢĮå¾Ĺ åIJį +å© µ +ĠConf erences +å¿ħ ä¿® +Ġ ® +Ġpar ab +(c opy +(w orld +æĪij们 ä¸Ģèµ· +}) ^{ +æĺ¯ åħ³äºİ +Ġst abbed +åĴĮ æ°Ķ +, åıĮ +q ing +æĢĿ æĶ¿ +åIJĥäºĨä¸Ģ æĥĬ +æĹ¶ æĹł +çļĦç¬ij 声 +å®ļ åģļ +åIJį åī¯åħ¶å®ŀ +ierarch ical +ĠSp ike +_in line +ĠCl int +æĺ¯ä¸Ģ èά +Sur f +ĠKim berly +ĠLeb anese +Ġengra ved +Ġad mon +è¡Ģ çĹħ +Q å°ı说ç½ij +ç»ı åķĨ +aryn geal +ãĢģ èįī +ï¼Ī åĽ¾ +ãĢĭ èİ· +AG EMENT +/p df +ãĢģ å®īå¾½ +é«ĺ é¾Ħ +,ä¸į åĨį +Ġren ters +æIJĤ çĿĢ +ĠÐ ĺ +诺 夫 +åįģäºĮ äºĶ +æĢ§ çĶŁæ´» +Ġclos ets +Ġcas p +Ġt iers +æİĴåIJį 第ä¸Ģ +ĠCo aches +ï¼Įå¿ħé¡» è¦ģ +ĠO sc +Request Mapping +æŀª æīĭ +ĠVel vet +ĠB ien +ï¼Ī www +Ġallow able +æĦıè¯Ĩ åľ° +æľĢç»Ī è¿ĺæĺ¯ +é̼ 羣 +Ġapp lause +less sim +A i +å®ī æ£Ģ +Ġinf low +/\ / +ĠNag ar +Ġpre ached +åĨħ åĬĽ +ï¼Į让 åŃ©åŃIJ +Ġfool ed +âĸĪâĸĪ âĸĪâĸĪ +Ġbik ini +Ġuter us +o ire +èĩª æĭĶ +ĠIs abella +SHA RE +æľīäºĽ åıij +åIJ Ĩ +ï¼ĮæĪij们 éĥ½ +å°ļ æľī +ĠMal i +ĠSP SS +ï¼Į身 å½¢ +. Handler +Ġbut cher +ä¸Ģ个 好çļĦ +ux tap +æĪĪ å°Ķ +Ġant iqu +Ġarm our +IM UM +å°ĺ æ²Ļ +- { +ï¼Į èݱ +çļĦ å½±åŃIJ +LET ED +ĠEpid em +åįı åĴĮ +.res ources +V ectors +ĠNo ir +Jun ior +ĠTo String +-in line +åĪĨåŃIJ éĩı +æ³ķ æĭī +yt orch +åģı è¿ľ +Ġfort unes +çļĦ çľĭæ³ķ +æ£Ģæµĭ 仪 +p olar +åĴĮ æĶ¿æ²» +çĶŁäº§ åķĨ +Ġhot spot +çļĦé«ĺ 端 +ĠCh ill +Index Path +驱åĬ¨ æľºæŀĦ +or ama +ï¼Į åıijè¡Į +éŨ å¸Ĥ +çģ« èᝠ+çĿ¿ æĻº +åĨ² åİ» +, éľĢ +ĠB UILD +ĠWin chester += M +_N S +ĠM MC +(g ame +æĭĨ åĪĨ +// **************************************************************************** +èIJĮ èIJĮ +ç͍ æĪ¿ +Ox ford +Ġtire lessly +ĠH g +_r and +ĠNews letters +ĠCG I +Cook ing +åıĹ éªĹ +æ¯ı个人 éĥ½æľī +æłħ æłı +ĠR outine +åIJĮ è¡Įä¸ļ +æĬķ éĿł +è§ĦåĪĴ 设计 +åľ¨å®¶ ä¸Ń +Ġ çĶŁæ´» +ĠG low +éĺħ è§Ī +è·Į èIJ½ +踢 çIJĥ +z in +Ð ij +å¼ķ åĩº +-se parated +urope an +ĠProp he +ĠMald ives +ï¼Į æľĭåıĭ +ä¸į æ³ķ +ä¸įè¿ľ å¤ĦçļĦ +-f light +ĠReb el +ĠExpl ained +Clean ing +åĪĨæĶ¯ æľºæŀĦ +ä¼ļ éĿ¢ +Ġdesper ation +åĨĻ äºĨä¸Ģ +ç»ıæµİ çļĦåıijå±ķ +re ment +ä¸ī åı£ +çIJĥè¿· 们 +ch itz +ĠMor se +ĠSocial ist +Ġv aping +âĢĻ n +ĠW anted +è¯Ŀ çļĦ +çŁ³ åĿĹ +Ġmut ated +çļĦå·¥ä½ľ åİŁçIJĨ +H app +ä½ľ è¯į +Ġco ined +æ°ij åħµ +Ġ{ {{ +iz umab +ç»ĵ çĤ¹ +Ġclim bs +u pe +ï¼Į æıIJä¾ĽäºĨ +Ġ\ @ +ĠTo ilet +aut ore +( Abstract +ĠL ydia +ĠK ush +O LEAN +b ath +Ġinv ade +çļĦ è¾ĵåĩºç«¯ +pr on +æĶ¯ éĺŁä¼į +说è¯Ŀ çļĦæĹ¶åĢĻ +Web Socket +ĠA pex +orb ed +Ġbib li +- ROM +以 ä¸Ģç§į +åĦ¿ äºĨ +åºĦ 稼 +ic hen +pl ings +up dates +F ade +s ons +ï¼Į æħ¢ +.ch at +_ph ase +@ t +g il +sp ir +Set Name +ãĢģ çļ®èĤ¤ +å·¥ä½ľ ç«Ļ +æ£Ģæµĭ è£ħç½® +ä¸įæĥ³ åĨį +ï¼Į ä¸ľè¥¿ +ver bal +游 è¡Į +ãĢĤè¿Ļ æĦıåij³çĿĢ +ä¸Ģéĺµ åŃIJ +ĠHa irst +缴æİ¥ åľ¨ +Ġpo ignant +顾 ä¹ĭå¿§ +èĹı åĵģ +ठ¹ +éĩį å·¥ +-b all +ï¼Į è¸ı +ĠF iona +ä¸Ĭ 说 +éĥ½ä¸į ä¸Ģæł· +os ide +.Anchor Styles +Ġ åįłæ¯Ķ +ä¸ĭ 线 +çα ä»ĸ +Ġever lasting +SP A +çļĦ åIJĦ项 +çļĦ第ä¸Ģ æŃ¥ +_ activity +ãĢĤ åıĮ +ï¼Į å¡Ķ +Ġj oking +ï¼Įä¸įåIJĮ çļĦ +ĠOut line +ĠRob bins +ä»ĸ们 ä¹Ł +ï¼ĮæĪij å¿ĥéĩĮ +ĠæĪij æĺ¯ +åĽ¢éĺŁ åIJĪä½ľ +ĠAnt arctic +Ġmo et +Ġe ject +æĥĨ æĢħ +éĥ¨ 主任 +ĠApp reciation +ï¼Įçľĭ å¾Ĺ +çijŁ çIJ³ +ĠGov t +æĥħ è°Ĭ +æ·±åħ¥ çļĦ +Ġty re +æĪ· éĥ¨ +åŃĹ å½¢ +ĠSom ew +ĠMc Int +ĠSupp ress +æŀģ æĢ§ +æ²ī é¦Ļ +uts ch +ĠGuard ians +åĬĽ 强 +åĩł ä¸ĩ +rid den +å¦Ĥä½ķ å¤ĦçIJĨ +ĠSO UR +L W +ĠA UC +^ *( +Ġl cd +æ¯Ķè¾ĥ ä½İ +ĠPres idents +ï¼Ł ä½Ĩ +éŃĶ æĹı +v it +æĹł æĤĶ +Ġant ip +æŃ£å¼ı çļĦ +g ens +Ñĥ Ñģ +èĤ¡ä¸ľçļĦ åĩĢåĪ©æ¶¦ +ĠF ever +). $$ +çϾ ç±³ +Ġwater color +ï¼Į请 æ±Ĥ +èIJ¥ä¸ļ ç¨İ +ĠEnc ourage +Ġpolymorph ism +. Visual +C ourses +ĠâĢ ł +ĠShow ing +Ġchron ological +ĠJ elly +lis le +Ġfrag rant +ë Ŀ¼ +In come +亲æľĭ 好åıĭ +ĠA FL +ĠFollow ers +ï¼Įæķħ èĢĮ +çļĦé«ĺ ä½İ +ó s +[ root +Ġ åĨħåŃĺ +åľ°æĿ¿ ä¸Ĭ +å¤ĸ è²Į +_d ark +åĵªéĩĮ æľī +æľĿéĺ³ åĮº +ï¼Į åı¸ +ĠReview ed +åŃIJ åħ¬åı¸çļĦ +æ±Ĥ åѦ +å®ľ 宾 +ĠWood y +ï¼Į æĭ¿åĩº +,æĪij åľ¨ +ä¸įéĶĻ äºĨ +R oss +çݰ åŃĺ +b race +Ġ" << +Git Hub +k or +è¢ Ĥ +使 é¦Ĩ +çIJĨè§£ äºĨ +çĭ¬ç«ĭ æĢ§ +åŀ« çīĩ +åıĹ éĤĢ +· ç±³ +Direct ories +æĿijæ°ij 们 +( å¸Ĥ +çĿ ½ +ĠSt all +建设 å·¥ä½ľ +Ġupp ercase +) æľīéĻIJåħ¬åı¸ +Ġ å¹¿ä¸ľ +å±± å·Ŀ +çļ® çĤİ +ĠF FT +ok ane +å¾Ĺ æĿ¥çļĦ +ĠK raft +请 示 +Rec ursive +-up date +Ġlab ore +? Let +J E +h ua +an us +Ch all +ĠSh ine +æĺł åħ¥ +ĠR K +ĠSt able +а еÑĤ +Field Value +Ġtransl ators +Ġd vd +å²ģ å·¦åı³ +J T +k ern +ï¼Į åģľ +çļĦ 宽度 +ĠH in +Ġgl are +Ġunn ecessarily +天 å¸Ŀ +æĸŃ ç»Ŀ +ĠCont rast +çŁŃçŁŃ çļĦ +Ġpestic ide +Ġcytoplasm ic +ï¼Į 论 +ï¼Į æģIJ +ä»ĸ ä¸İ +å¦ ¤ +åIJij 大家 +Ġval ence +br ates +Exp ansion +ĠEV s +_for ce +Ġaure us +ï¼Į 起身 +Ġdress er +ce iving +ĠAss isted +åı¦ä¸Ģ ä¾§ +.qq qxs +start ed +èĤ¡ç¥¨ çļĦ +ak ar +é«ĺä¸Ń çĶŁ +] _ +E dd +_ usb +a N +_t ax +ĠE CC +é¦ĸ ä»ĺ +Ġfront end +ĠJak arta +ãĢĤ éĻĨ +un expected +ä»ĸ éĥ½ +.B atch +c rop += S +st em +ä»ĸ ä»İ +对 åı£ +转 转 +conf irmed +Ġε ίναι +ä½¼ ä½¼ +ï¼ĮæĪij èĩªå·± +è¿Ļ个 è¡Įä¸ļ +åĮ Ŀ +Ġcl ich +为 è¿Ľä¸ĢæŃ¥ +è£ħ åľ¨ +ĠF rem +âĢĶ for +aut ion +æī« è§Ĩ +çłĶåıij ä¸Ńå¿ĥ +忽è§Ĩ äºĨ +g ateway +_p d +ãĢģ æĸ°éĹ» +èĩª ä½ľ +ev o +æ®· åĭ¤ +ï¼Į åŃ£æŀ« +纯 æŃ£ +K ansas +ĠOper ational +éĢīæĭ© åIJĪéĢĤçļĦ +;;;;;;;; ;;;;;;;; +çĹħ 害 +Ġtable Name +ĠAssert ion +ļ è¯Ń +åıĬ æĹ© +åĿIJ èIJ½åľ¨ +py x +çŁ¿äº§ èµĦæºIJ +Ġp agination +èµ· è§ģ +åħ¥ èĤ¡ +el im +oc arcin +çĥ¹ è°ĥ +Ġfel on +æĹ¶ äºĭ +ĠY Y +amb ique +Ġstr cpy +Log ged +ĠM ey +æŃ» æŃ»çļĦ +å¾Ģ åĽŀ +ĠOrgan izational +带 è´§ +Ġà § +形象 çļĦ +ĠSuggest ions +/ path +åĵį 亮 +Ġps oriasis +æĪIJ å¥Ĺ +.D esign +- Class +çľģ ä¼ļ +ĠEnc ryption +Ġm V +Ġ{ ! +ĠHe ap +åĪĹ å®ģ +Ġviol ently +Ġsi RNA +ĠAccept ance +q d +Ġt ai +ï¼Į ç»ıèIJ¥ +ï¼Į åѦéĻ¢ +ĠB ones +user id +Ġinterven ing +åIJĮ è½´ +\[ \ +In form +åĮħ æīİ +Ġsoc i +.set ter +ठª +ĠGovern ments +为 人æ°ijå¸ģ +ĠпÑĢ Ð¾ += -\ +ï¼Į 人çļĦ +å°± çľĭåΰ +好 åĩłæ¬¡ +åĪ© èIJ½ +éĢīæĭ© åľ¨ +缴æİ¥ ä»İ +ï¼Į让 大家 +Ġjav afx +ĠCh ili +伸 éķ¿ +è§Ħå¾ĭ çļĦ +$ query +åıij èĦ¾æ°Ķ +Ġaut ophagy +ãĢĤä½Ĩæĺ¯ åľ¨ +Ġimmunos upp +< byte +() == +SS D +Ġax iom +cr ime +Open ed +Ġch ops +Ġun identified +è®Ń æĸ¥ +她 对 +å·² å°Ĩ +е ÑĪ +################################ ################ +Ġpizz as +ĠE SL +", $ +Ġcond os +ãĢĤ ç»Īäºİ +of lu +-h ot +Flex ible +ver m +èIJ½ åIJİçļĦ +ĠIV F +, åĮ»çĶŁ +éĺ³ åĮº +ĠSN R +z mann +. Control +ĠAl mighty +ĠACC EPT +ĠH ulu +å¤ĸ å¸ģ +ç¬ij äºĨä¸Ģä¸ĭ +âĻ ª +.h h +ĠMat hematic +FE ATURE +el ed +Ġprov oke +é£ŀ äºĨ +EF I +_P OINTER +Ġskeptic ism +åIJĮ å±ħ +å¼ł æģĴ +Ġcar ve +ĠREAD ME +) n +åĴĮ é£İ +Ġmess ing +Ġwre ath +被 èĩªå·± +å°ı çIJĥ +ount ains +æĸ¯ åį¡ +Ġbre thren +ĠCO RE +ï¼Į æİĴéϤ +åݦéŨ å¸Ĥ +(time out +oc occus +éĩij ä»· +Ġsw irl +- lock +ĠWolf e +_PARAM S +çĶŁäº§ 设å¤ĩ +æ²ī 声éģĵ +ĠSch ul +/qu estions +天 人 +Ãł n +Ġ æĢ»ä¹ĭ +ï¼Į 亲èĩª +ĠI OS +该 æĿij +çľģ å¿ĥ +ï¼Įæľ¬ æĸĩ +ĠMult imedia +çļĦåĨħ ä¾§ +P olar +å¼ł çħ§çīĩ +æ²»çĸĹ çϽçĻľé£İ +$ y +- variable +_ ES +è¿Ļ ä¼ļåĦ¿ +éĽĨ èµĦ +. Interface +æĪ· æĪ· +éľĢè¦ģ 使ç͍ +éĢģ æĸĻ +Ġcard inality +ĠPar al +麦 å½ĵ +Ġm ing +ãĢĤ æĻļä¸Ĭ +ï¼Į æĺ¾ +Service Provider +Ġsil ky +ĠD over +nt e +对 çĹĩ +åı¯ä»¥ çͱ +Ġsign ify +ĠOcc asionally +F el +åΰ ä»Ģä¹Ī +两 ä¸ĭ +两 åįĬ +SY STEM +è¾¾ æĭī +ĠBi omedical +Ġas cent +ĠF are +ä¹Ł å¾Īéļ¾ +H mm +Ġsub lime +æĬ¤ æ³ķ +åĴĮ ç¾İ +ä¸ī æĿ¿ +Ġprotest ing +F UNC +c oding +åIJĦ ä¸į缸åIJĮ +ED S +ãĢģ 磷 +ĠR ach +ä¸Ń å¾Ĺåΰ +æľ¬ ä½ĵçļĦ +Ġmon oton +äºĶ åĽĽ +å¯Ĵ åĨ° +çļĦåľ° éĿ¢ +éĥ½æĺ¯ æĪij +.Is True +ur ated +ãĢģ åıĺ +Ġvis ceral +ê ncia +Ġclim ax +Ġfru ity +* ]{}, +Certain ly +Ġn ä +马 å°¾ +_C USTOM +æľįåĬ¡ ä½ĵç³» +æĵ ¢ +Ġrefurb ished +, 表示 +Ġ ç¦ı +ãĢĤ æĪªæŃ¢ +è¡¥ èĤ¾ +顺åĪ© å®ĮæĪIJ +天æ°Ķ é¢ĦæĬ¥ +ĠTyp ed +强åζ æĢ§ +, éĩĮéĿ¢ +æķ´ä¸ª ä¸ĸçķĮ +,以 便 +æľŁéĻIJ åĨħ +æĦŁ æĥ³ +ital ic +æĢİä¹Ī è¿Ļä¹Ī +èİĨ çͰ +Ġf ury +åIJĮ åŁİ +ĠBe ard +Ġgar ner +ãĢģ ç¬¬åĽĽ +Ġgover ns +Phil adelphia +. attach +ä¸ī åŃ£åº¦ +æİ§åζ äºĨ +å¸IJ åı· +Ġec zema +ig ans +æµ· åĨħå¤ĸ +D SP +H ack +ten cent +æ°Ķ éģĵ +ç³Ĭ æ¶Ĥ +Ġaut or +åį« åĽ½ +_N V +ro tt +us cular +Ġag ony +"> " +设å¤ĩ æľīéĻIJåħ¬åı¸ +Field Type +åIJ¯ ç¨ĭ +Ġ è¯Ńè¨Ģ +é£ŀ æľºçļĦ +ĠString Buffer +ĠShould er +ent ary +çĭ¬ åĪĽ +td own +Ġabsor bs +/ auth +ï¼Į 交æĺĵ +Âł ä½Ĩ +ĠI W +åĶ ı +çļĦ缮çļĦ åľ¨äºİ +ãĥ Ģ +ĠSm ash +ĠTw ist +à ¹ģ +ro cess +Ġg b +ï¼ĮåĨį å°Ĩ +ĠRob bie +Ġи н +_h i +asc us +ĠCy rus +åĽĽéĿ¢ åħ«æĸ¹ +l w +éķ¿ éĿĴ +çĹħæ¯Ĵ çļĦ +æĮ¥äºĨ æĮ¥æīĭ +reg istration +éģĵ å®¶ +两 å¹´çļĦ +ä¸ĢäºĽ çļĦ +åľ¨ åĮĹ +ĠF ACT +缴æİ¥ è¿Ľè¡Į +åIJ«æľī 丰å¯ĮçļĦ +Ġsucc umb +Ġb m +ĠH Y +.m etrics +Ġpost graduate +åĸľ æĢĴ +Ġsales man +F ear +K eeper +åħī 顾 +(m atrix +èģĮä¸ļ åѦéĻ¢ +çά åΰ +ç¦ º +Tra il +W ays +éªĹ å±Ģ +Ġclo ak +ï¼Į åį°åº¦ +ãĢģ æŀľ +èĭ¦ å¿ĥ +çļĦå°ı åŀĭ +. le +被 éħį置为 +篮 åŃIJ +Ġimmun ization +_ST EP +N t +Û ģ +ĠH ors +.R el +Ġsucc inct +对 åķĬ +")) ; +ï¼ĮæĪij åį´ +Ġconfig s +Ġrot ates +Ġparan oid +æ©Ħæ¦Ħ æ²¹ +ãĢģ å¤ĦçIJĨ +est o +gram mar +åĶIJ è¯Ĺ +me as +Ġcomp ounded +ĠNeg ro +ĠT omb +åĴĮ 浩çī¹ +å¾Īå¤ļ äºĭæĥħ +Mut ation +e ax +ĠP AP +ĠFund amentals +ä¹Łä¸į è¡Į +ung sten +ãĢĤæĽ¾ ä»» += B +{ definition +åľ° åĽŀçŃĶ +ä¿¡ç͍ è¯Ħ级 +ĠPom pe +- responsive +ï¼Į 太åŃIJ +ĠC ary +Ġbi j +ĠJ ain +ĠG rape +Ġdis co +è·³ æ°´ +éĹ ° +éĿł èĩªå·± +T reat +ãĢģ åĽŀ +Ġcomplex ion +ãĢĤ åħ¬ +ĠInc orporated +éĽĦ å¿ĥ +_ep i +ĠLen ovo +Ġgel atin +åı¯ä»¥ éĩĩç͍ +æ³¢ 士顿 +ä½ł åĨį +( Source +ä¸İ ä¸Ń +ãĢĤ èĭ±åĽ½ +ĠD ag +æľ¬ éĻ¢ +头 åĴĮ +RE CT +çĥŃ éŨçļĦ +éĻĪ æµ©çĦ¶ +Pe ak +Ġ è¨Ģ +ĠÐ IJ +æİªæīĭ ä¸įåıĬ +åIJĮ 级 +ï¼Į ä»Ļ +Ġun equiv +产 äºİ +Ġdi aphrag +_\ - +Ġvegg ie +æĢ ¦ +Ġforb id +Ġfor s +ĠM p +ĠLars on +` t +_ rot +Ġ åħ¶å®ĥ +im ulated +æĭ¼ äºĨ +, NULL +èī ® +éĩįçĤ¹ é¡¹çĽ® +ednes days +pub lisher +ä¸Ģ æľ¬ä¹¦ +Ġquiet er +Ġon cology +åģļ äºĽ +Ġdet ract +课ç¨ĭ çļĦ +Ġtor so +Ġsuck ing +æĹł æİª +ç»ĵæŀĦ ç®Ģåįķ +Ġdetect ives +ĠB ands +Ġsp illed +åıª 管 +举 å®¶ +å¸Ī å¾Ĵ +ĠF owler +æī¾ äºĨ个 +ips is +è§īå¾Ĺ æĪij +.pro xy +ays cale +å®īè£ħ æ§½ +çĻ º +æīĵ åºķ +èĵĿ åħī +éĢĨ è¢Ń +èĻļæĭŁ æľº +( App +Ġchar ismatic +_or igin +ĠDent ist +\ c + ħ +Ġ æľīäºĨ +ãĢģ æĶ¾ +é¦ĸ å°Ķ +ape ake +è·³ 转 +Ġp unt +å®ŀ å¤Ħ +Ġserv ic +Ġref s +Ġlast ly +Ġfamil ial +Ġed ema +Ġcal am +ĠT PM +çĸ £ +let able +ä½ķ åľ¨ +ĠN LP +ï¼ĮèĢĮ 她 +ill ow +å¤ĸ åĬĽ +è¦ģ ä¿ĿæĮģ +Ġsymbol ism +Ġfro gs +Ġ è·Ŀ离 +梦 çļĦ +éļIJ åĮ¿ +Ġlos ers +ĠR TP +Ġcal ves +çĶŁäº§ æĢ»å̼ +ĠDeal ers +ate red +åıĬ çļĦ +æĪ¿ åŀĭ +bat is +Ġmod ulated +Ġshort ening +OK IE +æĶ¶è§Ĩ çİĩ +B rid +X amarin +ab an +ĠCrypt ocurrency +ĠP adding +åĬĽ 士 +ĠArc ade +{tikz picture +Ġinf ancy +æ¦ Ī +Prob ability +ĠS UCCESS +ï¼Įåľ¨ éĤ£éĩĮ +ĠUIT ableView +客æĪ· åľ¨ +_ he +æľĢ æ·± +åŁİ ä¸ĭ +ä¼Ĭ å§ĭ +ar Xiv +qu ets +ãĢģ å·¦ +强 å¼± +çķĻ å¿ĥ +ç»ĵæŀľ æĺ¯ +èĵ¬ èݱ +ä¸Ģ 女 +Ġgang s +ĠD IR +Ġcr us +ĠSl ave +ĠBru ins +Id eal +aut hentication +V N +ãĢģ å¡ijæĸĻ +ãĢģ åĨľæĿij +St ick +以 西 +Ġte aser +ä¿Ŀ è´¨ +åįĹ åĮº +ome gran +Ġre habilit +Adv antages +aken ing +ĠCr imes +l augh +N athan +Ġ åIJī +en ko +-- " +ï¼ļ éĻĪ +èĩª æŁ¥ +ä¸ĩåħĥ 以ä¸Ĭ +ĠTutorial s +, 竣çĦ¶ +Ġ æ¼Ķåijĺ +-aware ness +çIJĨ论 çŁ¥è¯Ĩ +' ^ +Ġc ada +è¦ģ åIJĥ +Hy brid +ĠY in +_POS ITION +âĸ ª +æĩĴ æĥ° +ĠP erc +ä¹ĭ æŃ» +-cl i +Ġ æīĢè°ĵ +äºĨ æĸ° +_n amespace +opt imal +æ¯į æł¡ +D ance +游 èµ° +å·¥ä¸ļ ä¼ģä¸ļ +ĠView er +åįķ çļĦ +al om +ĠOut come +Ġstream ed +éĵº 天çĽĸ +çĩĥæĸĻ çĶµæ±ł +. Reset +Ġdel uxe +Ñĥ н +è¿Ł ç¼ĵ +ĠProt ective +çħ§é¡¾ 好 +READ Y +ãĢģ ç»´ä¿® +å®¶ æĿij +ä¼ĺåħĪ èĤ¡ +å°Ĩ ç»ĵåIJĪ +åĢĴ 车 +Ġchem o +ĠStraw berry +ĠP riv +pro per +ANN OT +Ġundergo es +- common +æ¯Ľ æ¯Ľ +交æį¢ æľº +Ġ æĪ´ +T ai +ãĢĤ å·´ +ï¼Įè¿Ļæĺ¯ æĪij +Ġm f +åľ° ç«Ļåľ¨ +å¿ħçĦ¶ ä¼ļ +. upload +ãĢģ ãĢIJ +åīį æĸ¹çļĦ +· åĵĪ +æĻ®æ´± èĮ¶ +Ġapprentices hip +æĸ¹æĸ¹éĿ¢ éĿ¢ +Âł åĪĺ +å°ı 鬼 +æ¤įçī© æ²¹ +ĠDO UBLE +ing ular +大 æ²³ +abol ism +ãĢģ åĬ³åĬ¨ +è¿ĺ没 æĿ¥å¾ĹåıĬ +ĠKid ney +Ġcatalyst s +V F +Ġ 交éĢļ +ãĢģ çĮª +() / +_u pper +ï¼Į èİ·å¾ĹäºĨ +天 çĶŁçļĦ +读 æĩĤ +ï¼ī å°Ĩ +.se q +ĠA i +æķĮ æĦı +ï¼Į æģ¨ +Ġgo vt +éĩĮ 没æľī +Ġmod elled +track ing +( Op +-d eterm +ãĢģå¸Ĥ æĶ¿åºľ +é«ĺ é«ĺçļĦ +ä¸Ģ个 æĸ° +Ġbot anical +m oving +n ox +r ish +åħļ å·¥å§Ķ +æīij åħĭ +åĮĸåIJĪ çī©çļĦ +ï¼Į å¼Ģåı£ +.M ake +Ġdefe ats +æŃ¤ è¨Ģ +两 ä»¶ +æĬĢæľ¯ å¼Ģåıij +ĠUN IX +Ġeigen vectors +Ġnight life +注åĨĮ çļĦ +ï¼ĮæŃ¤ 人 +ãĢģ åİĭ +ri os +èµ° å¼Ģ +Exper ts +å¯ ¾ +çĿĢ çģ« +.m et +Ġsweet heart +ĠFed Ex +ãĢĤ éĥŃ +èIJ½ å¯ŀ +çĥŃéĹ¹ çļĦ +éĿĻ éŁ³ +Ġpharmaceutical s +M EDI +åĸ· åļı +Ġcarn ival +C ir +Ġ ä¸įèĥ½ +Ġadm irable +Ġub untu +ĠAd j +纪 åħĥ +ay o +ãĢģ åĬŀåħ¬ +Ġch amp +çIJĨ 说 +ç² ij +NS Integer +G rey +æŀľ 羣 +_INIT IAL +ãĢģ å°ij +Ġro semary +é¢Ħ è§Ī +åģ¥åº· çĬ¶åĨµ +ig ers +view port +积 èģļ +_ST D +大 åłĤ +ĠB ec +è´¢ç»ı 大åѦ +: url +O regon +t ell +åľ¨ 身 +A Q +w ashing +ĠN ora +_co eff +\ cup +scr atch +ĠH N +Ġhydro xy +æµ· å°Ķ +èĢĥ åħ¥ +ĠÑģ п +ĠGraph QL +Ġscrap ing +示 å¨ģ +De an +Ġì ĭ +ĠD SM +å¤ļ ä¸ĩåħĥ +ï¼Įåıª è§īå¾Ĺ +Ġimpro vis +ĠRE QUEST +Ġt acos +ï¼Įä»İèĢĮ 使å¾Ĺ +ĠstringWith Format +( Time +ä½ķ çŃī +çºł éĶĻ +ophag us +Ġbr ink +çļĦåĨħ æ¶µ +ä¸Ģ å°ģ +def endant +ĠA kt +ĠRes ervation +èī¾ å°Ķ +Y u +è´¢ ç¥ŀ +.set Layout +æ¼Ĩ é»ijçļĦ +Ġm olds +ĠG um +lic ting +ĠCh lor +Ġel bows +CE LL +.mock ito +天 çļĩ +Comp ared +ãĢģ æķ°åѦ +Ġbr illiance +ĠNav igator +ĠS ending +大 éĥ¨ +oy l +ï¼Įä»ĸ å°±æĺ¯ +ĠâĪ © +J NI +Ġtoken izer +åĿĩ ä¸İ +O USE +åĩº å®¶ +Ġout skirts +ãĢģ 空 +Exp iration +.grad le +m ie +å¤į ç͍ +_int eg +ĠDiss ertation +es la +æīĢ åįł +Ġp ont +让 åħ¶ +åı¯èĥ½ ä¼ļæľī +ĠSing leton +èĭ¯ ä¹Ļçĥ¯ +; t +ï¼ļ åĩºåľº +ï¼ĮèĢĮ ä¸į +(s chema +å®īåħ¨ éĹ®é¢ĺ +ĠAt ty +_re ceive +å°ı çĮ« +_d p +ä¸įè¦ģ åĩºçݰ +U IS +ï¼Į 毫ä¸į +âĢĻ Brien +ç±» åĴĮ +ç§»åĬ¨ ç͵è¯Ŀ +Ġcompar ator +» Ãij +ĠN apa +ĠE CB +æĹ¥ èĢ³æĽ¼ +IDD EN +Ġcont ractions +ï¼Įè¿Ļ 人 +è§ģ ä¸įåΰ +管çIJĨ 模å¼ı +ï¼Įåį³ å°Ĩ +END ER +ĠBerg er +çĽ¸ä¿¡ èĩªå·± +ĠFis heries +ï¼Į æĭħå¿ĥ +ä¸İ åIJĪä½ľ +è§ĦèĮĥ çļĦ +Ġcel estial +N EXT +Ġr aging +åºĶ éģĵ +_bl k +ĠMother s +ä¸Ĭ æĸĻ +çĤ¹ éĴ± +_p riority +ãĢĤä¸į 论 +ï¼Į åŀĤ +Ġout law +Ġconc ur +Ir ish +_SOCK ET +Ø ¶ +ï¼Į ç»´æĮģ +Ġforest ry +ен и +æ¸Ĺ æ¼ı +ï¼Į è¿IJè¡Į +_SE CTION +- owner +d ap +Ġc logged +åıijæĮ¥ åĩº +C BA +ma j +: v +m ess +Ġass ures +ĠOr chard +IR ON +ç¼ĸç¨ĭ è¯Ńè¨Ģ +第äºĮ æŃ¥ +rec iate +ĠT ough +èµ° ä¸ĭ +und les +PL IT +é İ +Did Load +DEF INED +奶èĮ¶ åºĹ +ĠC ERT +æĵį ä¹ĭ +Ġq q +ĠWest on +g ren +ï¼Į æĴĴ +ĠH DD +_ validate +ï¼Į æģ° +ĠP ew +ä¼ł ç»Ļ +Reg Exp +.s creen +Ġseem ing +èŀįåħ¥ åΰ +å½ĵäºĭ 人çļĦ +Ġinter ruptions +Ġ_ (' +_m onitor +ĠIMP ORT +es ley +ç¹ģ æĺŁ +æĺ¾å¾Ĺ å¾Ī +, æĬķèµĦ +éĩĮ äºĨ +æľĢ èĥ½ +âĢĶ you +åIJ¦ åĨ³ +p unk +ä½ı æ°ij +Ġgrad ers +ä»ħä»£è¡¨ä½ľèĢħ æľ¬äºº +èĢĮ åĬªåĬĽ +粪 æ±ł +ï¼Į å¼Ĥ +ãĢĤ åĪĨ +ãĢĤ åIJij +设计 ä¸Ĭ +RA INT +Ġfut ile +/ MS +ĠN ah +è¿ĺ å°ı +åħ¥ å¸Ĥ +_IN TR +ĠÏĢ Î± +Ġmicrobi ome +b ang +Ġ æ±Łèĭı +æķ° ä½į +éĩįçĤ¹ åħ³æ³¨ +. Kind +er ning +Ġk otlin +è¿Ļæĺ¯ 个 +Ġsidew alks +çļĦ 尺寸 +( evt +Ġwith hold +ç͍ è¯Ń +.s ide +.f ree +å²³ é£ŀ +Ġimperfect ions +, æĽ¾ç»ı +ï¼Į æĪIJæľ¬ +æľī åĽĽ +éĢģ æĪij +ĠT IFF +ç³ ł +å·¥ä½ľçļĦ éĢļçŁ¥ +Ġdun geon +Ġ åĩ¡ +ĠG lam +çļĦ è¿ĻäºĽ +ãĢģ æ¹ĸåĮĹ +人 åij¢ +æĢ§ æĪ¿åľ°äº§ +çľĭ ä¸Ń +.p m +ĠS AVE +land ers +Ġgeomet ries +èģĶ èIJ¥ä¼ģä¸ļ +Ġtyp o +Ġwood working +第ä¹Ŀ 竳 +? (: +éľ ģ +ç³»ç»Ł ä¸ŃçļĦ +大åŀĭ çļĦ +ĠS ECT +Ġpl umm +æīĭ æı¡ +æĮĩ 使 +agn osed +åĽºå®ļ äºİ +çļĦ æľĽçĿĢ +Ġn ests +è¿Ļ çķªè¯Ŀ +ä¸ī çľģ +Ġsimp lex +.b asename +Print f +æĵ Ģ +ä»ĺ 诸 +Ġ 赤 +Ġbooth s +id iol +Ġcat cher +Ġå°± è¿Ļæł· +- health +æľī 幸 +ih u +Ġ$ ^{ +Ġend ocrine +纬 度 +ï¼Į æľºæ¢° +ï¼Į æĢ§æł¼ +åºĶ äºĨä¸Ģ声 +Ġbored om +Ġag endas +åij¨æľŁ æĢ§ +Ġreopen ed +< any +ĠM FA +ä¸Ń æĺ¯ +EO C +ĠP ace +ind erella +lect or +(f iles +严 å¯Ĵ +饱 åıĹ +ãĢĤ . +缺 å°ijçļĦ +åĨ° å¯Ĵ +( KEY +ĠColumn s +V als +Ġcont ended +Ġdest abil +-per iod +.m k +Ġd urch +(d esc +èĮ¶ 饮 +ĠMad ame +Ġjog ging +ĠN umeric +Z I +y am +Ġcl ashes +ä¼ĺ 缺çĤ¹ +åĬŁèĥ½ éļľç¢į +çͰ åľ° +æĢĶ æĢĶ +ric o +ral tar +å¦ĸ åѽ +C t +Att orney +IV ED +ĠAppell ate +. ERROR +æľī 误 +Ġle gg +å¹³ çĶŁ +æĥĬ éªĩ +大大 æıIJé«ĺ +ĠRes ilience +ĠCirc us +Ġmuff in +ĠI so +class ified +çļĦæ°Ķ ä½ĵ +å°Ĩ èĩ³ +å°±æĺ¯ ä¸Ģç§į +Ġsuperv ise +é© ¸ +åıĸ è¯ģ +ç®Ģ 书 +IRE CTION +æ¸Ķ èι +P arm +ĠD oub +.s plice +ï¼Į æ²» +ï¼Į æķĻåѦ +ï¼Į æĸľ +Ġj al +Ġ èĤ¡ä¸ľ +ĠIt al +å½ĵ 她 +ç¥ŀ è¯Ĩ +Ġaut op +åį« åħµ +ç͍æĪ· åIJį +æ´Ĺ æµ´ +-off ice +çī©ä¸ļ æľįåĬ¡ +æİ Ĥ +ï¼ļ ãĢIJ +åīį ç¨ĭ +iss au +楼 éģĵ +Ġexcer pts +\ colon +ĠV IS +Ġcatalog s +代 ä¼ļ +ï¼Įåľ¨ ä¸Ģ个 +-r anked +è£Ĥ çĹķ +og on +å¹³ åİ¿ +åIJĮåѦ 们çļĦ +å¼¥éĻĢ ä½Ľ +ĠFOR MAT +Ġtrek king +åĵª ä¸Ģç§į +å͝ çī© +Ġ å®ŀéĻħä¸Ĭ +ĠPro ve +ï¼Įä¸Ģ åı£ +åѦ æ´¾ +å®Į èĽĭ +ä¸Ńå¿ĥ 主任 +Ġ~ /. +æģ³ æ±Ĥ +Ġhyg ien +in ence +Ġalloc ating +ree ce +=' " +ä¸Ĭ åºĬ +Ġper oxid +_log ic +Ġsub merged +ä¼ijæģ¯ ä¸Ģä¸ĭ +ï¼Į 室åĨħ +ãĢģ å®ĮåĸĦ +æĥħåĨµ åıĬ +款 车åŀĭ +å¹¶ä¸į 代表 +ĠInf antry +ä¹³èħº çĻĮ +< long +.D rop +è¿Ļ åıªæĺ¯ +æļĤ æĹ¶çļĦ +' >] `: +大 èIJ¥ +Ġimp art +Ġprote ase +ĠH atch +éĶĭ åĪ©çļĦ +两 å¸Ĥ +ä¸į è®°å¾Ĺ +女 çĶŁçļĦ +-ne utral +ï¼Ī M +dd dd +Ġchar ms +åĨĻ æ³ķ +温度 çļĦ +ĠOB J +å¡ŀ è¿Ľ +çĻ»è®° 表 +-c ross +ãĢĤæīĢ以 åľ¨ +ï¼Įåı¯æĺ¯ ä»ĸ +ĠLoad er +ĠSem inary +çļĦ åį°è±¡ +ĠG G +æ¸ħ äºĮ +ent ies +ãĢģ æĹ¶å°ļ +æĪij们 èĥ½ +.add r +é³ ĸ +ĠRos ie +Ġ æľĢ大 +Ġc auc +ĠE CG +åIJİ ä¾¿ +å°ģ 为 +è¿Ľè¡Į æ£Ģæµĭ +ĠEp stein +èĩ³å°ij ä¸Ģç§į +Ġcent red +(f ields +æ±ī åĨĽ +ï¼Į åĽŀå¤į +op hen +uc s +æĺ¾ åŃĺ +沿 éĿ© +ãĢĤæĪij们 åľ¨ +æĻºæħ§ åĴĮ +人 被 +//////////////////////////////////////////////////////////////////////// //// +_ Exception +rec ursive +Individual s +ĠCl aude +课 ä¸Ĭ +èµ¶ èµ° +Ġabstract s +θ ε +ĠH ubbard +ä»Ĭ天 æĻļä¸Ĭ +çļĦ大 äºĭ +ĠRut gers +æľī éģĵçIJĨ +æĥħ çIJĨ +(p rev +ä¸ĵä¸ļ æĬĢæľ¯äººåijĺ +Ġà ģ +å²³ çζ +ãĢĤ ä¹Ŀ +Ġst roller +λ ε +åݨæĪ¿ éĩĮ +éŃĶ ç¥ŀ +éĤ£ 座 +ĠTime Span +h ooks +ĠN EC +ä¸Ĭ è¿Ľ +åºķ éĿ¢ +Ġaccess es +l ion +ef er +port al +åĸľæ¬¢ ä»ĸ +ceed ed +ä¸Ģèĩ´ æĢ§ +blog s +Ġro tten +.B ig +页 éĿ¢çļĦ +éķ¿ åĩº +éĿŀ常 éĢĤåIJĪ +奥 巴马 +壮 å¿Ĺ +Ġθ α +ĠE ternal +æĶ¯ è·¯ +- Out +å¾® é£İ +èģª æħ§ +IPP ING +Ġepithe lium +, æĪIJåĬŁ +ĠI rr +èĦ± äºĨ +Ġreson ates +ĠGENER AL +Ġparal ysis +_P IXEL +顾 èĩª +Rece ipt +å¤ĩå¿ĺ å½ķ +/ XML +; - +æľī è¿Ļç§į +Ġman ure +Ġsk im +èIJ¥åħ» ç´ł +ie res +æĬĢæľ¯ ä¸Ĭ +åѦ å¹´ +çłĶ ä¿® +åĨ² åĪ· +j w +æĢ§ çĬ¶ +ä¸īå¹´ åīį +ĠEve rest +termin ate +. rel +P ractical +ä¹ĺ é£İ +ĠFlor al +Ġblind ly +cook ies +åĪĩæį¢ åΰ ++ | +ï¼Į 幸ç¦ı +å¥ĩ èīº +èıľ å¸Ĥåľº +Be ach +.class Name +ĠAstr onomy +Ġ ä¹Į +ãĢģ éĿ¢ +set Text +Ġhist ological +ĠMar cos +éĵº åŀ« +å¹² æ´Ĺ +大 象 +Ġfa una +-set ting +ï¼Į 代 +æ°´ éģĵ +åįĬ åĪĨ +Rec ipient +å¸ĥ åħ° +ls x +å¹³ çŃīçļĦ +-m m +åı¥ æĦı +.k ernel +Ġ éĹ»è¨Ģ +ĠS ard +Ill inois +_PACK ET +åĬł åĪ© +åĪ« å¤Ħ +éĺ¿ çİĽ +Ġhandic ap +ä¸ī é¤IJ +å¾· è¡Į +ĠClean up +Response Body +æľī äºĮ +è¶Ĭ æĥ³ +ĠRam irez +ĠJ ournals +çļĦçľĭçĿĢ ä»ĸ +Ġgriev ing +å¤ľ æĻ¯ +Ġc ó +ï¼Į éĻĽä¸ĭ +没 æĿ¥ +åĮĹ京 çļĦ +Hel en +泪 æµģ +V enue +(p acket +ç©¿ åĪº +æĺ¯åIJ¦ åı¯ä»¥ +弯 æĬĺ +big r +åłĨ æĶ¾ +çĹ ¢ +ï¼Į æĿ° +Ġtum ours +ο ν +.R est +ĠTravel ing +: =\ +ĠD ont +( åĽĽ +) t +ĠH AD +å®ļ éĩij +帮 æĪij们 +å¸ĮæľĽ éĢļè¿ĩ +л Ñİ +.L ong +ĠL AS +ĠK ier +url s +Ġcrit iques +å¤ĸ æĸĩåIJį +Ash ley +< option +index Of +为é¦ĸ çļĦ +Ġsub contract +-p ay +' Brien +ãĢģ å¢ŀ强 +Ġget All +ï¼Ī 第 +èµĦ ä¿¡ +å°±ä¼ļ åıijçݰ +ï¼ĮæĹ¶ èĢĮ +/ week +转 è¿ĩ +ĠOs borne +_SC ALE +ĠJur is +èľĹ çīĽ +Ġt uck +ï¼Į å±ħ +ï¼Į è¿IJåĬ¨ +anag an +Ġmus cul +éĿĵ 丽 +.nc bi +or io +Ġp he +çĿĢ è¿Ļ +-h over +éĢĢ ç¨İ +_r unning +ĠBulgar ian +_R CC +Ġhar ms +ĠDocument ary +å¢ŀæ·» äºĨ +西 ä¾§ +.b ukkit +æķij èµİ +ãĢĤ ä½ľåĵģ +ĠC p +æľīä¸Ģ é¢Ĺ +Inv ite +ĠBos nia +- ton +ĠT art +_A UT +yal gia +. Transaction +Ġplay wright +.A bs +罪 çļĦ +ĠHow e +èĥ¶ ç²ĺ +p rem +ab ytes +ĠD illon +æŀģ 好çļĦ +, +éĢĤ åºĶçļĦ +åħħåĪĨ èĤ¯å®ļ +ĠP roud +ç§ĺ ç±į +C riminal +Ġv ie +qu ake +ï¼Įä½Ĩ è¦ģ +åıĺ 缸 +ĠM ink +èįī çļĦ +ot ify +äºĨ 大éĩıçļĦ +ï¼ļ æĸ° +Ġ< ![ +-m emory +夸 èµŀ +æ¶ĪåĮĸ éģĵ +ĠTIM ER +/ th +ĠDon ate +M isc +æĤ » +èµĦ 管 +è¾¹ èµ° +椰 åŃIJ +, åı« +W el +Ġ 欧éĺ³ +åĴĮ å®¶éķ¿ +ĠPoll ution +b is +亮 åħī +èᣠåįİ +K o +åħ´ éļĨ +èĦļ æīĭ +Ġsleep ers +ĠProv ision +ãĢĤ 大å¤ļæķ° +ĠCon nie +æļ´ åĩ» +åľ¨ åľºçļĦ +å¹³ å¤į +éĺ² éĶĪ +溯 æºIJ +_ ge +ub ation +åIJĮ æ¡Į +ĠEric a +us i +Ġrum ours +åħ¬è¯ī æľºåħ³ +Ġerupt ed +åıį éĿ¢ +ãĢĤ æľķ +else y +ĠSe as +çİĦ å®Ĺ +缴æİ¥ æĪĸéĹ´æİ¥ +å¾ģ åľ° +obs erved +ĠIsrael is +ĠEst ablished +ä¸Ńæĸ° ç½ij +W ATCH +Ġaut og +åľ¨ åĮ»éĻ¢ +ĠW rites +åĪĨ 寸 +æ¥ļ æ¥ļ +Ġtax a +l ays +ol ed +åĨ² çł´ +foot notes +èŀįåIJĪ åıijå±ķ +Ġf oe +é«ĺ éĽĦ +é«ĺ åĤ² +-m ult +Ġinnov ators +ä¼ĺç§Ģ æķĻå¸Ī +ar um +转 èĢĮ +Ġ_ {\ +ï¼ĮéĤ£ä¹Ī å°± +çĸ² åĬĽ +æīĵ éĢłæĪIJ +åIJ¬ è¯ģ +京津 åĨĢ +J F +%çļĦ èĤ¡æĿĥ +ä¸į æħİ +åľ¨ 第 +æľª åĪĨéħįåĪ©æ¶¦ +å¯Į 人 +åıij表 çļĦ +Ġcolour ing +ä¸Ģ éĺŁ +åĩº 没 +Ġche ated +çľģ åĬĽ +ãĢģ ç¦ı建 +Ġk en +GS M +Ġsub units +ĠMed ian +å¡Ķ çļĦ +æĶ¾å¿ĥ äºĨ +辨 è¯ģ +as her +ĠM AL +ĠW end +æĢ¨ æ°Ķ +- parameter +社 å·¥ +è½» èĶij +" æĪij +ï¼ĮéĤ£ ä½į +: æīĢè¿° +g ravity +r ath +Ġc aster +ont ology +æİ¥è§¦ è¿ĩ +ĠHon olulu +éĵĥ èĸ¯ +ĠS gt +ĠM org +çİĭ åIJİ +Ġnumber ing +绿 åı¶ +P urch +ï¼Įä½Ĩ åħ¶å®ŀ +- âĢĿ +Ġpsy che +, ä¸ĸçķĮ +ä¸į åĨ³ +å¸Ĩ èι +Ġdisrupt ing +- price +Z O +Ġa usp +Ġint angible +Inter preter +ed eration +çļĦ 次æķ° +ãĢĤ æĿľ +.is Valid +ä½ł ä¸įçŁ¥éģĵ +åł ĩ +ĠLe ap +ãĢĤä¸į çĦ¶ +å©ī åĦ¿ +(t okens +ĠEn code +æĥĬ æĤļ +ãĢĤæľī ä¸Ģ次 +åIJĪèĤ¥ å¸Ĥ +( Json +è¡Į 踪 +讲 讲 +åĪĽå»º çļĦ +Respons es +åįĬåĬŁ åĢį +_ ag +å¦Ĥ ä¸Ģ +_LO OP +ï¼Į åįĩ +ens in +la id +ra e +ãĢģ çŃī +æľī ä¿Ŀéļľ +天 åºŃ +alk er +Four th +ï¼Įä½Ĩ ä»į +æīĭæľ¯ åIJİ +ĠOrig ins +ä¸į é½IJ +ĠL au +çİĭ è´¤ +interest ed +Ġ å±ŀæĢ§ +ãĢģ éĽª +ord en +.h istory +çĤ¼ 丹 +ä¸Ń åIJ« +顾 ä¸įä¸Ĭ +ĠTr is +ĠJagu ar +Ġiod ine +L ag +-p rivate +ãĢģ èĥ½æºIJ +ä¹ĭ 女 +-c ou +ĠGl acier +Ġminim ization +Ġ' ? +Ġmult is +Ġun answered +.B ind +纵 è§Ĥ +ĠEp iscopal +Ġsurf act +I reland +ï¼Į åĩıè½» +Ġpress ured +ĠWar fare +ï¼Į æģ© +ĠBl oss +éķĩ éĿĻ +åĽºå®ļ 座 +Ġboy cott +ä¸Ļ åŁº +Ġnan os +ëĤ ĺ +çļĦ ç¾İåĽ½ +æŃ¤ ç§į +æµģ éĩıçļĦ +know ledge +um ont +Ġsc rum +çϽ çŁ³ +ĠSh arks +ĠCO LL +åľ°ä¸ĭ æ°´ +çŁŃæľŁ åĢŁæ¬¾ +Qu aternion +, this +D iamond +Pro per +Ġcoll apsing +Ġbow ed +) ä¹ĭéĹ´ +ay as +Ġprerequ isites +Ġir responsible +æijĺ ä¸ĭ +vd ots +(Q String +âĢĿ æľŁéĹ´ +âĢĶ they +yl ase +æ¢ģ æĺĬ天 +颤 åĬ¨ +åľ¨ æīĭéĩĮ +éĿ¢ å̼ +å¦Ĥ èĬ± +ä¿Ŀ ç¨İ +åįİ è¯Ń +ï¼Įå°±æĺ¯ 为äºĨ +çļĦ èµĦ产 +æľī ç¼ĺ +âĢľ Well +Prof iler +-car bon +åŃĿ æķ¬ +JSON Object +Ġenumer ated +大å¤ļ æĺ¯ +Ġ æ·± +ãĢģ åħ±åIJĮ +ç¼ ° +æ¸ħ ç§Ģ +_D ES +End points +Found er +è¿Ľè¡Į åħ¨éĿ¢ +> "; +Ġr sp +å·² æľīçļĦ +çĶ· ç¥ŀ +Te ams +ä¸ĭ è§Ĵ +ĠBow ling +çł¥ çłº +è§Ĩç½ij èĨľ +æľī å¾Ī好çļĦ +æĸĩåĮĸ 产ä¸ļ +Ġwip ing +约 åįł +.D ef +åį° åζ +Ġfall out +çļĦ æķĪçİĩ +cons istent +æ½® æ°´ +æĪĴ çĥŁ +ell as +çĪĨ è£Ĥ +Ġla ure +èĦ± ä¸ĭ +-M ay +Ġann uity +çļĨ çŁ¥ +ï¼Į æĪIJå°± +ĠJ asmine +_C AM +产åĵģ åĴĮæľįåĬ¡ +Ġcombinator ial +éĢĢ åIJİ +esc ence +S AN +Ġo asis +ac ad +åĺ ¤ +( selected +ä¸į å¹² +-b ooks +Ġrev olving +dep loy +[ R +ãĢģ 主åĬ¨ +åİ» æİ¥ +Ġsn printf +ĠLenn on +/ æľĪ +äºĮ å±Ĥ +被 è¿Ļ +_W ARN +ï¼Įå°±æĺ¯ åľ¨ +äºĭ åīį +-f ill +ï¼ĮæŃ¤ åīį +å¤ĸ交 éĥ¨ +Ġsens ed +çģĮ 注 +est i +prom pt +at ism +åĽŃ çļĦ +Ġunlock ing +ang ement +Ġcl js +α ÏĤ +_ex amples +ĠSt ereo +缴 éĿ¢ +åħ¬å¼Ģ èµĽ +| string +声 éĹ®éģĵ +D AV +åĬ Ī +ĠTh reshold +头 è¡Ķ +èĩªå·±çļĦ èĥ½åĬĽ +å½¢çĬ¶ çļĦ +Ġmetic ulously +_C F +weight ed +è´¢åĬ¡ æĮĩæłĩ +éĢĨ åIJij +Ġt iled +ym our +ĠS MTP +Ġps z +Ġк ак +ç»ı éĶĢ +ines cence +çݯ çĬ¶ +è¡ĮæĿİ ç®± +ĠN arc +Sh ield +æĭ¿ è¿ĩ +D yn +Ġrect angles +acchar ide +è¿Ļ äºĭåĦ¿ +ä»İ 她 +Ġem ph +åİŁ æī¿å¤© +åĩłåįģ 个 +Ġg ps +Ġ' '). +ï¼Į以 åħ¶ +ĠCal iforn +s uffix +We apons +Ġdispos ing +_P B +åīĶ éĢı +éªģ é¾Ļ +ãĢģ è·¯ +éĽĦ åİļçļĦ +Te en +E ase +Ġjack pots +a at +æĺ¯ ä¹Ł +å° į +âĢľ ä¸ĸçķĮ +åIJĮäºĭ 们 +(" ( +Ġmult il +ĠF raction +ĠE W +_P S +ãĤĤ ãģ® +pe aker +èĥ Ħ +åĭĴ ç´¢ +ä¾µ åįł +é«ĺ ä¸ŃçļĦ +åħ« éģĵ +CH APTER +Ġfac ulties +_A ES +åIJĿ åķ¬ +大 ä½ľ +_un stable +ï¼ĮæĹ¢ æľī +Ġanisot ropy +_ atomic +ĠS chn +Ġimpair ments +ï¼Į çħ® +ä¹Łæ²¡ åĬŀæ³ķ +èIJ¥ä¸ļ é¢Ŀ +èĢIJ çĥŃ +M d +ĠÐ ķ +æĻļ å®´ +建ç«ĭ åģ¥åħ¨ +ĠMod ular +Ġknock down +ĠPercent age +ĠBax ter +西 å±± +好 è¿ĩ +å¦Ĥ 梦 +çĶ» è´¨ +Le o +Exp ires +ĠRecommend ation +ä¾ĭ è¡Į +ç²Ĵ 度 +ï¼Į é»Ħéĩij +ï¼Įä»ĸ们 ä¹Ł +æ°¸ ä¹IJ +Ġbe ige +ãĢģ ä¼ĺåĮĸ +ĠSt eward +Wh itespace +åģľ éĿł +Min us +ĠMo ines +ĠCross Ref +Ġ---- -- +_ span +ï¼Į æijĩ +ãĢģ æľįè£ħ +å¿į å¿ĥ +f k +{ conf +Ġs con +çζ çİĭ +å¨ľ å¨ľ +Ident ifiers +, æĢķ +Ġ\ ,\ +ER G +ĠAbd ullah +J OIN +æıĴ æĽ² +.u pper +Ġw art +æĺ¯ ä¸ī +ĠF rog +Ġcan als +ç¬ijçĿĢ éģĵ +( return +çļĦ åIJĦ个 +Ġst alled +ar ab +æĭĸ ç´¯ +Ġt ame +.set State +ĠTrad ition +Bro ther +å¸Ĥ çĽĪ +Ġhasht ags +- One +ï¼Į 表 +ãĢģ é¢Ħéĺ² +ops ies +å¾Īæľī æĦıæĢĿ +B ab +交 åĩº +Ġred ness +交æĺĵ ä¸Ńå¿ĥ +g ov +Ġfantas ies +ĠL ighthouse +ĠAd a +åĨ· éĵ¾ +omet rics +_dat etime +_ peer +ï¼Įè¿Ļ å°Ĩ +æİ§åζ éĺĢ +Qu ite +æ¸ħ æĸ°çļĦ +Mat hemat +佼佼 èĢħ +ĠPOST S +ç͍ å®ĥ +ï¼ĮæĹł 人 +.un iform +. constructor +礼 çĽĴ +ĠChart ered +ĠSh ay +Ġcross ref +磨 éļ¾ +é«ĺçŃī éĻ¢æł¡ +ĠR ao +çº ¨ +arch itecture +ç¨İåĬ¡ æľºåħ³ +Rew rite +. Encode +c oder +Ġvolt ages +( option +( Player +ĠH ort +Ġex ogenous +èĥĮ åħī +æĸĹ å£« +c wd +.sub mit +ãĢĤ è¿Ļä¸Ģ次 +以 身 +ãĤ ĩ +ï¼Įåħ¶ ä¸ŃçļĦ +.M ove +Ġplastic ity +Ġdizz iness +ĠFL AGS +Ġstyl ist +H ur +åĵģ è¡Į +Ġri pen +} y +Ġ 欧 +ï¼Į å·®ä¸įå¤ļ +ac ulate +å®Ī æľĽ +举 åĮº +(B undle +屡 屡 +å¿ħ ç»ı +ĠMcC oy +_ contact +add en +è¿ľ åľ¨ +uilt in +oter ic +ĠEBIT DA +% D +è¶ħ åĩ¡ +Invest ing +Ġne aring +åħ¨ æĺİæĺŁ +_M P +Des k +. identity +T an +.d esc +ĠShe ila +.ch annels +Âł èµµ +Ġrad iative +Ġscreen play +æĿijæ°ij å°ıç»Ħ +. ctrl +管çIJĨ æľºæŀĦ +éĿĴ èĬ± +åºŁ äºĨ +Ġwid est +_ OC +â ¾ +und ance +ĠExper ienced +ä¼ļ æĽ´å¥½ +ĠJ oker +Ġbackground Color +Ġadminist rations +æĹłæĦı ä¸Ń +æĸ° èµĽåŃ£ +_p ose +( sock +çļĦ ç§ijåѦ +åľ° æłĩ +ĠUn lock +sc aled +Ġsent imental +, æ¯ıä¸Ģ +Pre paring +åħ¼ å¹¶ +B ias +d ensity +马 åĮ¹ +åĪ¶åº¦ åĴĮ +Ġul cers +ï¼Įç®Ģ缴 å°±æĺ¯ +j es +ï¼Į éĩįåºĨ +ãĢĤ å¹´ +ãĢĤ åĬłä¸Ĭ +客 å®¶ +ä»»åĬ¡ æĺ¯ +åıĤèĢĥ åĽ¾ +æķ² æīĵ +ĠI CD +erm ost +FF T +å®Ŀ 座 +_h int +è¶ĬæĿ¥è¶Ĭ è¿ij +" äºĨ +ç²¾ çĤ¼ +è¯ķ æł· +顺 å¾· +-h aired +Ġutil ise +æĹĭ éĴ® +Ġn erd +ĠT ide +åľ¨ åIJĦ +Network ing +Develop ing +, 个人 +z v +Ġ$\ {\ +R even +id in +ĠT EMP +ĠL AB +æĥħ æ·± +æ°´ æ§½ +设å¤ĩ çŃī +çľĭ æľĽ +宽 çļĦ +p hen +çĽ ħ +_f w +æłij ç§į +Ġillust rator +ĠNic aragua +ot ential +åİļ éĩįçļĦ +normal ized +çļĦèĦ¸ é¢Ĭ +åıij 麻 +Ġsurv ives +_se m +èĻIJ å¾ħ +, æ°¸è¿ľ +é£İ å¯Ĵ +åħħç͵ æ¡© +æĿij å¹²éĥ¨ +ä¼Ĭ åĪ© +top ics +Bi ography +çĶŁ åľ¨ +çͱ è¡· +æŀģ 好 +Ph ill +, éĻįä½İ +Ġs is +è¿Ļæł· äºĨ +Ġmut ate +çĿĢä¸Ģ æĿ¡ +d ust +d type +ï¼ĮéĤ£ ä»ĸ +éģµ ä»İ +æĮĸæİĺ æľº +на Ñĩ +ï¼Įåīį åIJİ +RESS ION +f ight +以 ä¸Ģ +å¨ģ æħij +æķĻ室 éĩĮ +-read able +L ie +ä¹ĭ æľī +Ġmy sq +-s core +Check out +-ch air +bound ary +! : +èı² äºļ +ro ker +op f +éϤ æİī +å¿ħéľĢ çļĦ +ĠX K +å¯Ĵ åħī +ĠCou ples +èį¡èį¡ çļĦ +H ug +å°½ æķ° +Ġreact ors +Ġmiscon ceptions +P ump +å½Ĵ æł¹ +æķij æĪij +é¡¶ éĿ¢ +ä¸Ģèµ·æĿ¥ çľĭçľĭåIJ§ +Ġenlight enment +, *) +H dr +举 æ±ī +ï¼ĮæĪij åĨį +ï¼Įéĥ½ å±ŀäºİ +ulk an +Ġskew ed +æģ £ +Ġfl ute +ï¼Įå°± 让 +Ġtable View +ĠSin clair +Ġindent ation +Ġhind i +å®Ī éŨ +ĠCant on +n ested +æıIJ éĢŁ +ï¼ĮæĪij ç»Ļä½ł +红 æľ¨ +ä¸įè¦ģ èĦ¸ +çļĦä¸į 满 +ĠC oh +æĪĸ å¤ļç§į +çĩķ 麦 +Dep osit +èĬĤèĥ½ åĩıæİĴ +èķ¾ ä¸Ŀ +Ġlong standing +Ġfer ment +, 女 +[ count +å¾Ī è¿ij +Ġair y +Ġopp ressed +Ġat l +âĢľ è¿Ļæĺ¯ +ç«ĭ æĹ¶ +çİī 佩 +Ġdis belief +ï¼Įä½Ĩ åĽłä¸º +-d im +éĢĢ åĮĸ +ĠBel lev +å¹½ å·ŀ +Ġsac ram +ï¼Į æµĭè¯ķ +Ġorigin ality +_READ Y +ĠScar let +èĥ½å¤Ł å®ŀçݰ +ãĤ·ãĥ§ ãĥ³ +åľ¨ä¸Ģèµ· äºĨ +Ġast roph +Ġ 人æ°ijå¸ģ +ĠEntreprene urs +ĠHond uras +åı¯ è´µ +_def inition +.u uid +Ġparam et +ĠTime Unit +Employ ees +W oman +ãĢĤ èĩ³å°ij +Autom ation +ĠT U +ãĢģ æľ¬æ¬¡ +Ġ æĸ¯ +ï¼Į å¤Ħå¤Ħ +Ġout field +åħ¨ èģĮ +_B IND +çĥŁ å°ĺ +Ġr idden +å°± å¦ĤåIJĮ +am ines +ä¸į ä¸į +ä¸Ĭ èħº +è¦ģ 为 +Ġqual ifier +ä»İ éĤ£ +ï¼ĮæĪij ä¸įèĥ½ +Ġc aste +Ġv ä +éŃĶ å¥³ +ĠUp coming +ĠSi oux +Ġt n +Dis connect +Ġl apse +ä¹ĭ æŃĮ +ç§» å¼Ģ +çĹĽ æ¥ļ +Ġoat meal +ĠD egrees +设 äºİæīĢè¿° +æ·±åħ¥ äºĨè§£ +ĠPed iatrics +ĠOE CD +Ġ æīĢå±ŀ +ï¼Į èĦijè¢ĭ +çļĦ çIJĥéĺŁ +ä¸ĭ è¿° +åŃ¦æł¡ éĩĮ +ä¸Ģ ç´§ +ĠU i +è¯Ĺ è¯Ń +éĿłè°± çļĦ +åľ¨ å½ĵæĹ¶ +çļ® åį¡ +ĠPh ones +ï¼Įå®ĥ åı¯ä»¥ +A qu +以 èĩ´ +ĠNe ptune +æĭī æĭ¢ +Ġshut ters +Ġs vc +ï¼Į è§ĦåĪĴ +è̳ é¼» +éĿĴå¹´ æķĻå¸Ī +???? ???? +Âł K +Ġdec ipher +ÃŃ m +Ġì Īĺ +.b g +Confirm ation +Ġtrans ports +.t oggle +ä»ģ æħĪ +- vector +ç¨Ģ ç½ķ +ĠD ane +ä¹Łä¸į å°ij +âĢĺ I +ĠT bsp +ï¼Įä¸Ģ 人 +Hand les +Ġinterview er +Ġcyt ometry +, ç§° +-ind ust +åıijçĶŁäºĨ ä»Ģä¹Īäºĭ +è¶³ é¢Ŀ +ĠÑģ е +iv as +Lo an +Ġtransl ucent +ĠMont erey +çļĦä¸Ģ åı¥ +è·ij çļĦ +cover ing +ĠS napshot +ä»ĩ 人 +Ġpy ram +. Player +Ġ 建çŃij +ĠKnox ville +ä¸į åģĩ +åı¯ åĮħæĭ¬ +Ġso othe +In coming +åĶIJ æģ© +æī«é»ij éϤæģ¶ +Ġra ke +co eff +Ġcommun ion +ĠInd o +éĶĻ æĦķ +Ġfac ie +严 å®ŀ +oph ilic +å®ŀæĸ½ ç»ĨåĪĻ +å±ħä½ı çļĦ +é£İ å°ļ +çĶŁæ´» è´¨éĩı +ãĤ ½ +åĪĿå§ĭ 确认 +çĶŁ æĢķ +ä¸İ æŃ¤ +åıĹ è®© +éļı身 æIJºå¸¦ +ĠLump ur +en ants +æľĢ å¼Ģå§ĭ +èĦļ å°ĸ +vey ard +Ġconce ivable +身 æĺ¯ +çͲ 级 +æĻ´ æľĹ +鸿 竳 +æ®´ æīĵ +r ants +åı Ł +è¨ » +ĠMad onna +ä¸įåı¯éģ¿åħį çļĦ +Ġn ombre +ä»ĸ æīį +ĠRock ies +mond s +Ġl w +Ġtall est +ĠRose mary +ĠActive Record +Ġ// !< +.M IN +ellig ent +p redict +ï¼Į é¢ľ +Ġam alg +å¢ŀåĬł çļĦ +ĠEconom ist +çĿģ çľ¼ +S ara +ä¹Ł å¿« +ins n +log ue +èı ı +(b uild +r asing +ĠM k +ãĢģ çīĪæĿĥ +per se +æĺŁ æ²³ +.b asic +ĠL och +è·Į åĢĴ +- Re +Ġ é¢ľ +_D OC +ìĿ ¸ +ĠT k +.f ast +é¡¹çĽ® åĴĮ +Ġ? , +浸 润 +ĠS overe +ĠL id +åĬ© åīĤ +-f ound +æĿĢ èĻ« +å¹² åķ¥ +Text ures +Ġbi ologically +Ġpain fully +ĠBlue print +.ab spath +H ung +ĠM ansion +Ġcon co +Ġro ar +ax e +Ġtax onomy +å¢ĵ ç¢ij +_ext ensions +(std out +ï¼ĮæĪij们 æĺ¯ +ãĢģ çͱ +èĨĿ åħ³èĬĤ +L ic +ãĢģ çĽijçĿ£ +æĬĹ äºī +AND S +Ġglam our +ĠJ B +该 æł¡ +Sub tract +_PL AYER +ï¼Į æ¡ij +æīį æĻº +è¯Ń å½ķ +Rec over +ĠRO CK +ĠCh im +åħ± ç͍ +çļĦé«ĺ 温 +åĪĽ ä¸ĭ +-f in +(l abels +ĠLO CK +ä¸įä»ħ æľī +çIJ³ çIJħ +没æľī åľ¨ +_s i +.R ange +æijĴ å¼ĥ +ind uced +.j sp +å±ĭ æªIJ +ĠSmart phone +UIT ableView +Check sum +èį ĥ +æİ¨ æĭī +æµĩ 注 +çĿĢ è¿· +æĶ¾ çľ¼ +_re v +åĭº åŃIJ +. geometry +人 æĢ§çļĦ +æŃ£ æŀģ +èĦij çļĦ +Ġord ained +ç«ĭ éĿ¢ +q w +Ġ å®Ŀ +ĠL evin +åį¡ åĿĹ +_default s +Ġpatri otic +Ġbios ynthesis +_b order +æ£ķ æ¦Ī +f ault +, 两个 +çļĦ åİļ度 +è¿Ļ个 æĹ¶ä»£ +-h it +Ġmistaken ly +åħµ æ³ķ +Ġ åŁİå¸Ĥ +åĨĽ éĺĢ +Reg s +èĩªçͱ è´¸æĺĵ +åĩº ç¥ŀ +_d i +è res +ĠSiem ens +ï¼Į å®ļä½į +ĠJ VM +åį¡ æī£ +祷 åijĬ +ï¼Į åħħ满äºĨ +åĬ¨ äºİ +æ¯ı ç§Ĵ +èµ¶ èµ´ +æĺı æĺı +. he +æĺ¥ 鼨 +/ ms +ä¸į æĢ¥ +è° Ł +温度 ä¼łæĦŁåύ +number Of +/ compare +Ġapprent ice +er ph +è¿ħ åĭIJ +Ġshoot ers +Luck ily +print ln +ĠTrans former +æķĻèĤ² èµĦæºIJ +ĠWH Y +g ies +å¦ ² +Ġ éĩĮ +Ġ åħ° +ĠU ID +æĺİç¡® è§Ħå®ļ +Ġoverr uled +Ġw char +here inafter +IS PR +ĠGood reads +ç° Į +å¤ļ å¤Ħ +ĠGreen ville +Ġadorn ed +Ġmor als +op ies +è´¹ åĴĮ +Ġrandom ness +all ah +Ġorbit als +V ice +æĪij ä¸Ģ个人 +çİ© å¼Ħ +ĠDH CP +ãĢĤ æĺ¨å¤© +ĠCh ances +å¸Ĥåľº çĽijçĿ£ç®¡çIJĨå±Ģ +ï¼Į竣 æĺ¯ +PERT IES +ĠISS N +Ġ å½ĵåīį +æĹł èĥ½ä¸ºåĬĽ +Is Valid +è¿Ļ座 åŁİå¸Ĥ +comput ed +z f +ä¿¡æģ¯ æľįåĬ¡ +Ġset Value +Th om +_b and +Ġont ology +ä¸į æħ¢ +ä¸Ń åįĹ +é² ² +Ġmel ts +ä½ł æĺ¯åIJ¦ +èµ° ä¸ĬäºĨ +çIJĨå·¥ åѦéĻ¢ +ï¼Į ä¾ĿéĿł +ä¸Ģ æ°Ķ +p ipeline +ï¼Į ä¾Ŀæīĺ +èĤ¾ çĹħ +Ġ é»İ +ort e +ï¼Įä¸Ģ 群 +æŀĹ åŃIJ +èĤ² èĭĹ +çŃij åŁº +FA ILED +T v +åľ¨ 为 +Âł J +AB I +Ġge ographically +äºĮåįģ ä¸ĩ +èµļ äºĨ +ï¼ĮæĢİä¹Ī èĥ½ +Camb ridge +ĠMaced onia +will Return +Ġ ä½İ +èµĦæĸĻ æĿ¥æºIJ +, å§ĭç»Ī +r na +Ġ} č +av ir +æĺĵ 失 +æĭ¼ åij½çļĦ +çļĦæĸ° é²ľ +Order ing +-date picker +) c +ĠI cons +æĪij ä¹Łä¸įçŁ¥éģĵ +ï¼Ī åħ¶ä¸Ń +æķĻ æĪij +粤 港澳 +Ġirres istible +à± į +_ archive +Ġ æĥ³è¦ģ +èIJ¥ æķij +ĠParam ount +ï¼ĮéĻĪ åĪĿ +olog ue +( initial +L iquid +Ġbe ware +éļ¾ ä¸įæĪIJ +碧 æ¡ĤåĽŃ +amer ican +èĬŃ èķ¾ +æĮī è¦ģæ±Ĥ +æĸĩ竳 çļĦ +ĠMand arin +ĠOFF SET +Ġhemorrh age +ĠW ants +ï¼Į她 å·²ç»ı +ãĢĤæĪij å¸ĮæľĽ +éĴ± å¸ģ +讨 ä¼IJ +ï¼ĮæĢ» ç»ĵ +-gu ided +T int +{ and +Ġ åŃĹ +ĠDe legate +çľĭåΰ æĪij +ï¼ĮæľĢ 大çļĦ +Foreign Key +ĠD HS +åĨħ æĸ¯ +空 åľ° +Ġworld ly +Ġfinal ist +leg round +åıĽ åĨĽ +ĠCHAR ACTER +ä¸Ĭ è¿ĩ +æĹ¶ ä¹Ł +Ġj ot +Ġcare less +ĠCou ple +Ġequip ments +æĺĵ çĩĥ +( height +Res erve +Ġapolog ized +éĦĻ å¤· +ĠR azor +Ġded uced +ï¼Įä»İèĢĮ 导èĩ´ +ĠPros per +è´¨ æľ´ +Ġ æİ§åζ +Ġd ab +ä¸Ń åı¯ä»¥ +-d em +åİ¿ åŁŁ +å·¨ èŁ¹ +zz y +ç§»åĬ¨ ç»Ī端 +详ç»Ĩ æııè¿° +ãĢģ åĴĮè°IJ +éĺµ éĽ¨ +çĿ¡ çļĦ +Scroll View +è¿ŀ äºij +ĠBund es +ï¼Į åѤ +ĠE cosystem +Ġli ar +éĿĴ å¹´çļĦ +Ġoverw ritten ++ A +èĢĮ éĢĢ +Ġfor ks +åĪĴ å®ļ +ĠAppli ances +st ab +æīĵ æĿ¥ +ç¥ŀ åºĻ +.M ouse +High ly +è¿ľ 大 +/lib s +Ġattenu ated +. vector +红 润 +çĪĨ åĩº +çļĦæīĭ éĩĮ +ĠM SE +ä¼ļæľī ä»Ģä¹Ī +Ġunfold ed +)) ). +åĪĹ ä¼ł +ä¸ļåĬ¡ æĶ¶åħ¥ +CG Float +e ffective +ãĢĤ 书 +ãĢģ åĨ° +å®¶ æķĻ +æĹł æ±Ĥ +ĠLex us +ter ies +åIJĥ èĤī +Ġsequ enced +详 å°½ +Read able +, åݻ年 +ãĢĤæĪij çĽ¸ä¿¡ +ç͵åŃIJ çĥŁ +央 ä¼ģ +æĿľ åĩ¡ +Ġrheumat oid +ĠB UY +她 è¿ĺ +ç¬ij éĿŀ +ĠAss ad +Ġcovari ates +ch airs +åľ¨ ä¸ĸ +ĠV ita +åĦ¿ 媳 +æĹ¶åĢĻ çļĦ +h ya +ord ial +头 缮 +åįĹ åİ¿ +éŀ ij +å¾Ĺ 主 +Ġsub ordinate +ĠBel ize +Ġout burst +ç»´ ä¹Łçº³ +ï¼Įæĺ¯ 为 +ch ol +没 å®Į +éĴī åŃIJ +CLUS IVE +æ¿Ģ æĺĤ +æľīåħ³ æ³ķå¾ĭ +, 缮æłĩ +F ULL +è¦ģ 好好 +è¿Ļ个 è¯Ŀé¢ĺ +Imp orter +åĩ»è´¥ äºĨ +Ġ 鼨 +_S S +Ġsymmet rical +Ġincent iv +- utils +ĠE H +Ġall otted +LO VE +ä¸į 缺 +Ġthere on +便 å¼Ģå§ĭ +æ±Ł æ°´ +æĺ¯ä»Ģä¹Ī æł· +åĸ· æ¼Ĩ +ĠChem icals +Ġtransl ational +ç½ij è´· +arr ays +light ing +ï¼Į åĩı +ä¸į ç®Ģåįķ +é£ŀ äºĨåĩºåİ» +人们 çļĦçĶŁæ´» +-n ine +ald i +个 åŃĹ +ä¼ļ 带æĿ¥ +Ġexp orters +ĠHel d +åĩ¯ æĹĭ +èī° å·¨ +ÑĨ иÑı +ĠOver night +ĠAud rey +res erve +éĩį å¡ij +æĤ¬ æŀ¶ +ĠZ ombie +Book ing +ĠQuick ly +-pres ident +Y W +rom yalgia +åIJİ åıijçݰ +olog ne +Ut ah +ï¼Įä»ĸ ä»İ +ĠRe ject +ĠHy att +æ¸Ĺ åħ¥ +ä¸į èĭŁ +åĴĮ èĥ½åĬĽ +ï¼Ľ åı¦å¤ĸ +ĠCont rary +Hon estly +Ġpatri arch +B arn +Ġk lass +Ġ: ( +åıĹ伤 äºĨ +h omes +ĠT LC +(p oints +Ġdeg rade +ĠSign als +Ġclim atic +P ermanent +å¾Ĺ çĿĢ +class ification +æĹ¶ æīį +åıijå±ķ åīįæĻ¯ +ĠPo ems +åıij çĤİ +è¯ģåΏ æ³ķ +大éĥ¨åĪĨ çļĦ +éĢļè¡Į è¯ģ +ĠDET AIL +ï¼Į èĭį +Ġre agents +åĴĮ 缮æłĩ +æ·± éĢł +.st atic +Ġbrut ality +{ }\ +hes ive +Ġp ups +ĠM ILL +Ġacc ru +ç²¾åĩĨ çļĦ +ĠAnglic an +Ġ åĨ° +Ġto ppings +æĽ´ ä½³ +ĠNo on +ĠRE P +Ġpra ises +Ġves icles +Ġnotor iously +Ġh oo +åįķ æľº +ç¼ĵç¼ĵ åľ° +ĠAG N +ĠS ind +天 大çļĦ +ĠPS I +ĠV ul +çŃī 她 +广 大çļĦ +çĶ· æĸ¹ +(f lags +åĽĬ æĭ¬ +Ġuniform ity +åºĶ åıĬæĹ¶ +çŀ¬éĹ´ å°± +躲 è¿ĩ +Ġreservoir s +Ġabbrev iated +/w rite +ĠTrou bles +çĤ¹ åIJį +Ġret in +ĠS VM +ĠY osh +ĠCal dwell +ãĢĤï¼Ī ãĢĬ +Ġmisunderstand ings +ãĢģ 许 +åĴĮ 家人 +Ġhand crafted +ç¿ ± +ĠEn emy +ä ll +Dat um +èµŀåIJĮ åħ¶è§ĤçĤ¹ +ÂĢÂ Ļ +, é¼ĵåĬ± +è½ ¶ +ĠY og +Ġx range +Ġmed itate +TM LElement +.Null able +Ġthin ning +æĸŃè·¯ åύ +key board +File Sync +Ac ross +åģ¶ éģĩ +åıĺé¢ij åύ +Ġa ft +Ġp und +Ġsh util +Ġdr ifting +ï¼ĮæīĢ以 ä»ĸ们 +Ġrad ians +( Array +æ°´ 溶液 +Pro x +ï¼Įä½Ĩæĺ¯ æĪij们 +身ä½ĵ çĬ¶åĨµ +ĠLad en +ä¸Ģ带 ä¸Ģè·¯ +ë ° +âĢľ With +ä¸ĭ æĸĻ +带 ä½łåİ» +Ġhappen ings +cast s +-F riendly +S essions +å®¶ ä¸ŃçļĦ +ï¼Į èµĦéĩij +ĠT aco +æĹ¶ åľ¨ +èįī åľ°ä¸Ĭ +åŁ¹è®Ń åŃ¦æł¡ +op code +å¤ĸ åĬł +td c +ï¼Įé«ĺ 级 +ĠTah oe +ĠMon k +opl an +олÑĮ з +çĶ ¬ +Ġsettings ACCEPT +Ġalert ed +æľ« å°¾ +Ġsom atic +åĪĿ ä¸ī +ï¼Į被 èªī为 +ĠPal o +èĦĬ æŁ± +ä¸į ä¸İ +Ġv engeance +Ġrest ricts +ç´« äºij +program s +è¿ĩ çĺ¾ +Ġsm elling +ĠAr r +éļı åIJİçļĦ +S Z +ä¸įä¼ļ 对 +ĠWould n +Ġcath olic +åģļ äºĽä»Ģä¹Ī +稽 æŁ¥ +ï¼Į åĭIJ +çļĦ ç»ıèIJ¥ +as ions +Ġse cluded +èĩª åĺ² +åłµ ä½ı +ç»´ æĭī +Ġred o +åijµ æĸ¥ +Ġ 欧洲 +å°ij å°Ĩ +ç»Ŀ åľ° +OUR N +两年 åīį +éĿĴ å·ŀ +um as +// ---------------------------------------------------------------- +æĹł æļĩ +HA HA +' = +Un used +å¢ŀåĬł å̼ +Ch ocolate +Ġser a +Ġgly cos +åħ¬ çĦ¶ +hern et +æĢĴ éģĵ +åŁºéĩijèµĦ产 åĩĢå̼ +Ġfl ips +Ġtheat res +ãĢģ éĢīæĭ© +Ġunn atural +çŁ Ĺ +è¾¾ æĪIJäºĨ +H ousing +it ius +ĠY ong +è¶ħ çŁŃæľŁèŀįèµĦåΏ +é¥Ń åIJĥ +Hash Set +ilden afil +她 没æľī +Ġbehav ed +åIJ¬è§ģ äºĨ +ĠN ex +ο ι +ç쵿ķı 度 +Ġs ling +Ġst ren +ï¼Į æī¶ +åľ¨ ä¸Ģ次 +Ġbr as +åºĹ éķ¿ +ĠSou ls +讥 讽 +at ters +ï¼Į å·¥ç¨ĭ +æĹħ è¡ĮçļĦ +Red ucer +ig mat +ä¸į æĺİæĺ¾ +åĴĮ ç½Ĺ +inst ant +ĠBel ief +ï¼Įæ°Ķ åĬ¿ +ä¿ĿæĬ¤ èĮĥåĽ´ +ĠMidd leton +âĢľ ç¥ŀ +为 代表 +财产 å®īåħ¨ +ĠI MD +ç´§ 身 +ĠRE QUIRE +ol vers +æľī æĪij +éĩij é»Ħèī² +eng ed +çļĨ 为 +åIJ¹ é£İæľº +ĠMist akes +èĬĬ èĬĬ +. Stat +ï¼Į 鸡 +缮 ä¸į +ãĢĤè¿Ļ ä¹Ł +åIJIJ è¡Ģ +-trans fer +; n +ĠB MP +ĠThe oretical +Ġgl omer +ch s +èĢĥ çĶŁçļĦ +Ġshel ving +. section +ĠB alls +.m onth +Ġenact ment +æµ· æ¹¾ +AC ç±³åħ° +P Q +b ones +å¹¶ åŃĺ +éĻĨ ç¾½ +Ġroot ing +æŃ¤æ¬¡ æ´»åĬ¨ +è¿ĩ çĥŃ +ï¼ģ âĢĻ +æ¶Ī çĺ¦ +Ġtax ing +åģ· æĩĴ +Min nesota +ĠDeterm ines +ĠHus sein +ä¸İ éĿŀ +ï¼Į以 éģ¿åħį +å¢ŀåĬł é¢Ŀ +æ´ŀ 天 +el ike +缸åħ³ æĢ§ +Ph y +ĠInvest igator +wait ing +_END IAN +Ġdens ely +ME A +ï¼Į å¢ŀ +Ġcl ipped +RE A +ï¼Įä¸Ģ çľ¼ +ä¸ĢåĬ¨ ä¸įåĬ¨ +>? [< +ĠSear ches +gr unt +INT RODUCTION +.b i +åĩºåħ¥ å¢ĥ +é¢ģå¥ĸ åħ¸ç¤¼ +as eline +åIJĽ çļĦ +ĠBuck ingham +å¹´ 年度æĬ¥åijĬ +å±ŀ åľ° +ä¹ĭ åij½ +ric ula +æľ« æľŁ +v acc +it ä +æĻº åºĵ +Ġfoot note +.get Parameter +èµı æŀIJ +ĠCong rats +ä¼łéĢģ 带 +Incre ased +ĠHarm on +yst ic +åī§ éĽĨ +åIJ¯åĬ¨ äºĨ +ĠMass ive +our d +ĠN ay +And rea +Ġsh ard +Ġsc ot +è¾¾ æĸ¯ +åľ° çľĭäºĨ +cc s +ï¼Įåľ¨ éĤ£ +ï¼Įæīį èĥ½å¤Ł +èĤĸ æĪĺ +_ clip +Ġb tw +ãĢģ å®Ŀ +éħĴ é¦Ĩ +æ°´ å¤ĦçIJĨ +Ġet a +æĿij éĩĮçļĦ +åĩºä¸Ģ èĤ¡ +çªĿ éĩĮ +ep am +Ġfl ax +HE ME +ĠBoy le +æĮĤäºĨ ç͵è¯Ŀ +Pix map +, but +åIJij éĺ³ +M ISSION +ĠB U +- vis +ä¸Ģ个 æĺŁæľŁ +åŀ ł +.l abels +ĠBeth lehem +d bl +æľī åij³ +`` , +Ġunf olds +ĠJo anna +åĺ¿åĺ¿ ä¸Ģç¬ij +ĠKath ryn +/ CD +ĠC CS +æĪIJ 群 +Ġunder cover +.S canner +Ġpolit ely +_ins n +鸦 çīĩ +M j +ä¹ĭ éķ¿ +á rio +ĠWar wick +Ġhint ed +Ġrh in +Ġ---- --- +o ine +Ġw b +æłª æ´² +Contin uing +, sizeof +Ġe j +ĠConnect ing +Market s +n oreply +Ġ iced +说 ä¸Ģåı¥ +Ġvis ibly +ĠG TX +æ¿Ģ èį¡ +Ġthin ly +Ġ å´Ķ +å°Ķ åħĭ +综 è¿° +Good s +ï¼Įè¿Ļ éĥ½æĺ¯ +建 æ¡£ +ier o +Ġgl itch +Ġharmon ious +åIJĪ ä¹İ +Ġrec ol +æĺ¯ä¸į éĶĻ +说äºĨ ä»Ģä¹Ī +Ġ ile +åIJ¬ ä¸įåΰ +èĩªå·±çļĦ åĬĽéĩı +_H TTP +Ġcompl ies +_DOM AIN +com mercial +Ġac cl +ÙĬ ÙĨ +ĠTur ks +Ġ åĵªæĢķ +Ph p +BT W +r iment +Comp ressed +æĿ¯ ä¸Ń +Ġperipher y +ĠO pc +ĠSim one +ç¥Ī ç¦ı +à¥įठ° +. figure +ä¸ĭ èĤ¢ +K ick +è¶ħ é¢Ŀ +ĠSub mitted +ï¼ħ ï¼Ľ +æ±IJ èİŀ +Ġend lessly +Ġgl acier +å¹´çļĦ åİĨåı² +ĠK aj +éĩij çŁ³ +sub mitted +æľ¬å®ŀç͍æĸ°åŀĭ æ¶īåıĬ +Ġprohib iting +-s ale +Ġdri pping +å»¶ å±ķ +-M ar +ä¼łæĿ¥ ä¸Ģéĺµ +Ġit ch +ĠH ospice +Ġwor sen +ï¼Ł åı¯æĺ¯ +ĠZimm erman +Ġint rins +.m emory +Ġallow ances +çģ« èħ¿ +éĴ¢ åİĤ +åĶIJ å±± +æĭ¿ ä¸ĭäºĨ +ĠMin erals +ACH ED +ï¼Ľ æĪĸèĢħ +.n on +诺 åŁºäºļ +_MAG IC +æĿ¥ åĪĨæŀIJ +.d w +, çļ®èĤ¤ +B ible +åΰ ä»ĸçļĦ +ç¹ģ æĿĤ +Ġsecret ed +ĠRe iki +ĠRe eves +æľīä¸Ģ çķª +Ġcar avan +åĪĽä½ľ çļĦç½ij绾å°ı说 +çĽİ çĦ¶ +. Bytes +. rule +è®°å½ķ çļĦ +对æīĭ çļĦ +P ASS +ĠT ight +_F AST +åľ¨ æŁIJ +çŃī éĥ½ +ĠH ak +å¤ļ ç͍ +大 å¨ĺ +B illy +ï¼Ī ä¸Ĭæµ· +,å¹¶ ä¸įæĺ¯ +ĠTr udeau +åĵĪ å¼Ĺ +pl s +éĩį ç½® +çα ä¸İ +çļ® æ¯Ľ +ĠVal erie +Ġrib bons +all ax +pl l +T orch +Ġcough ing +ãĢģ åζ +çŃ µ +ä¹ĭ èĭ¦ +第 åįģåĽĽ +PACK AGE +.get Column +æĹ¢çĦ¶ æĺ¯ +æĻºæħ§ çļĦ +ĠFlo res +ä¸į éĢĢ +ãĢģ éĩį大 +æľī 礼è²Į +âĢľ To +... ( +Ar senal +象 çīĻ +н ик +on ance +Ġz ig +.App ly +ï¼Į ä¾Ŀçħ§ +âĢľ åħ¬åı¸ +Ġle aps +缸 çīĩ +ĠTr ash +æĴѿ; åύ +ĠDiam onds +ĠRoy als +H UD +æĹł æŃ¢ +Ġtrans porter +IS D +Byte Buffer +è·¯æĺĵ æĸ¯ +åĨ¬å¥¥ ä¼ļ +. xyz +Ġd rowned +ãģ ¹ +æİ¨ åĬĽ +Private Key +/ module +ĠAr ithmetic +åĨ³ æĸŃ +è¿Ļä¸Ģ éĹ®é¢ĺ +Ġhor rors +ĠCru ises +asm us +ĠE in +ï¼Ł èĢĮ +Ġwork station +[' _ +Ġ æĽ¾ç»ı +Ġcaus ation +浪漫 çļĦ +缴 è§ĤçļĦ +ä»ĵ ä¿ĥ +ï¼Įåıį 对 +Ġnarrow ing +Ġshield ing +Ġpe eling +ç¬ij åijµåijµ +orph ous +_red irect +ì ĥ +ox ins +Execut able +ĠLoren zo +wit ness +ĠRef lections +ĠDeep Copy +å¹¶ éĢļè¿ĩ +åĪĨæŀIJ ä¸İ +çĤ® åħµ +Ġiter able +) .. +Ġmoistur izer +çļĦ éħįåIJĪ +åĪĨ æijĬ +宣 æ³Ħ +ä¸įå¾Ĺä¸į æī¿è®¤ +| > +太 æ¹ĸ +Rep air +fin ancial +æľĢæĸ° 竳èĬĤ +ãĢĤ å¾Ĺ +ĠC CR +岩 æµĨ +,åı¯ä»¥ 说 +Ġpunch ing +Ġg imm +ah s +è¿ĻäºĽ äºĭ +缸åħ³ èģĶ +èĶ º +/// < +ĠM ention +æľį äºĨ +Ġobject ForKey +ãĢĤ åįģ +Âł èIJ§ +ĠK nee +ï¼Įä»ĸ éĥ½ +.Att ributes +ï¼Į çī© +Ġat ypical +æľ¬ 宫 +没æľī åħ³ç³» +管 åŃIJ +ä¼ĺè´¨ çļĦæľįåĬ¡ +sp here +çĭł æĬĵ +ç²® æ²¹ +_ip v +æĪij 以为 +马 è¹Ħ +Ġrest orative +çļ± èµ·äºĨ +ï¼Įè°ģ çŁ¥ +Ġrecall ing +Ġtrans cribed +ç®Ĺ çĽĺ +ĠT ata +éĥ¨åĪĨ åľ°åĮº +éª ¥ +Te a +å¿ĥçIJĨ åѦ家 +Ġ çͲ +å½Ĵ 宿 +ĠInstall ing +ï¼Į åºĦ +åĴĮ æıIJé«ĺ +aps ible +Ġprejud icial +Ġloc ator +Ġescal ation +ç¥Ń åĿĽ +ĠKurd ish +Ġab orted +æĸ° æĶ¿ +.f oo +ĠFl av +æ¿ĢåĬ± 对象 +åīį ä¸Ģ天 +æĻ®éĢļ åIJĪä¼Ļ +ĠCrit ics +ol st +Cons ent +Ġµ g +ï¼Įä¸į论 æĺ¯ +ãĢĤ èIJ§ +Ġpersu asion +çľĭ è¿Ļ +AD V +Ġslic ing +âĢĿ - +-n ote +æŀ¢ å¯Ĩ +å¨ģå°Ķ 士 +æĬĵç´§ æĹ¶éĹ´ +N BC +âĢľ å½ĵçĦ¶ +Che f +: string +Ġl inem +ĠTown s +r isk +ï¼Ī åľ¨ +ĠSe vere +_T C +ĠTest imonials +Dir s +Ġast rolog +Ġf intech +ĠSt im +æľ¬ é¢ĺèĢĥæŁ¥ +ath ione +Ġcontent ious +ĠCrim son +Ġexert ed +Ġcl aws +ĠY AML +br ate +c apture +举 ä¾§ +, å¾Ī容æĺĵ +ĠB uh +ident ly +åĮĹ å¹³ +æľįåĬ¡ 端 +.j boss +æĭĽ äºĨ +Ġconc ave +åĽŀå¿Ĩ èµ· +俯 è§Ĩ +ĠShut tle +ï¼Į æĪIJéĥ½ +mer c +Ġ_ ( +篮 ç½ij +弯 è·¯ +çº £ +ï¼Įåľ¨ ä»ĸ们 +èħ ± +Ġmag ically +åĩº åIJįçļĦ +é«ĺ ç´łè´¨ +Ġemerg ent +æ·ij 女 +ĠL yme +红 é¢ľ +found land +al us +éĥ¨åĪĨ æĺ¯ +fe at +ä¸Ĭä¸ĭ 游 +Ġantit rust +· ÃIJ +礼 åłĤ +ا د ++ v +Ġv oxel +-c ig +æĸĩèīº å¤įåħ´ +çļĦ å®ŀçݰ +Ġ! !! +Ps alm +< & +ï¼Į è´º +Ġin ks +Ġd j +Ġhe reditary +ĠZ ap +å¿«éĢŁ åľ° +æİ¨åĬ¨ äºĨ +ãĢĤ ç͍äºİ +Ġdorm ant +S cores +® , +/ edit +为 ä¸Ń +ä»į å°Ĩ +μ g +ä¹ī å·¥ +乡 åľŁ +稻 çͰ +å¹³ ç§» +æķ Ŀ +Ġeurope an +Ġnont rivial +db g +ĠTh ou +èIJ¥ä¸ļ éĥ¨ +é¢Ħ示 çĿĢ +ç¥ŀ åĬĽ +ãĢĤ åį¡ +论 çĤ¹ +æĤ¨ åı¯ä»¥ +åĩı çģ¾ +-re li +Ġartis ans +Ġin quired +ĠE MT +å°½ æľī +ä¸Ĭæµ· è¯ģåĪ¸äº¤æĺĵæīĢ +ç¼´ åŃĺ +ä¸įåı¯èĥ½ çļĦ +èĢĮ åĿIJ +æ¯Ķè¾ĥ å°ı +åѦçĶŁ 对 +çīĽ æ´¥ +ĠAM L +ĠTrin idad +缸 è·Ŀ +æİ¥ äºĨ +CA DE +John ny +en ia +ä¸Ģ æ¡Ī +Ġattain ment +ĠH anoi +Ġhas hes +ee z +ĠPand ora +ãĢĤ åĪļæīį +躲 éĹª +/r hs +Ġserv o +. They +åħ¥ 宫 +表 象 +rest art +ĠSem antic +% @ +åħΠ驱 +Ġexp elled +åĴĮ èµĦæºIJ +Ġch atter +) åıĬ +ï¼Į éĩijå±ŀ +åΰ æĪij们 +Ġenforce able +ĠDEV ICE +æ¼Ķ çļĦ +éĵ¶è¡Į åŃĺæ¬¾ +gl ut +ĠMal awi +Ġlin ers +. loop +N ich +ï¼Į æ²³åįĹ +æ¯Ķ è¯ķ +æĶ¾ 纵 +æij ģ +_M ASTER +ï¼Įå¾Īå¤ļ 人éĥ½ +èµ°äºĨ è¿Ľåİ» +åıijçĶŁäºĨ åıĺåĮĸ +ĠLy ons +Ġpilgr image +æııè¿°çļĦ å®ŀæĸ½ä¾ĭ +ãĢģ èĭ± +ï¹ IJ +Ġch al +a ise +ĠPl ays +Ġbi ometric +_num bers +ĠItem Stack +Ġsel enium +åѦ è¿ĩ +ix ing +ĠTut or +ĠP yth +(c ursor +Uk raine +èĢIJ ä¹ħ +ï¼Į éļIJéļIJ +个 èĭ¹æŀľ +å°ı éĥİ +Ġcar c +åıijçݰ éĹ®é¢ĺ +æ°§åĮĸ éĵĿ +.Field Descriptor +Ġ æ±Ĥ +author ization +оÑĤ оÑĢ +å®ģæ³¢ å¸Ĥ +æĪij åĨĽ +æīĢä½ľ æīĢ为 +åĨ· äºĨ +åºĶ该 å°±æĺ¯ +ĠTen ant +Ġc airo +ï¼ļ âĢĿ +ä¸İ 伦 +ãĢģ P +ĠK uala +ĠPhot ograph +th in +Ġex position +led ger +åı· åĴĮ +Mill iseconds +Ġaster oid +åĪĨ ç»Ļ +ä¹ĭ æĽ° +ï¼Įä¹Ł ä¸įçŁ¥ +失 æģĭ +åı¯çα çļĦå°ı +ĠPhill ies +C arp +x or +æĬ¬ çľ¼ +(B uild +èݹ èݹ +ãĢĤ åĸľæ¬¢ +å°ij 主 +åIJ¸ çĽĺ +Ġlod ged +Ġinsult ing +éĩį æ¸© +èĩ´ çĹħ +Ġsepar able +H ang +Ñĥ м +reat er +ï¼Įä¸Ĭ å¸Ĥ +åij¼åIJ¸ éģĵ +_att ach +" M +: ä¸ŃåĽ½ +éĩį æķ´ +-in clusive +Ġtilt ed +ĠP ty +è´Ł æľī +Ġd avid +Ġ' :' +ï¼ĮæīĢ以 å°± +ĠR out +èĩªå·± æľī +åıĹåΰ çļĦ +ĠRub in +Ġadip ose +Ġle th +éģĵ éģĵ +Up dating +Elect rical +缸 è§Ĩ +ãĢĤæŃ¤ åĪ» +ï¼Į éħĴåºĹ +Ġbe gged +Ġair ing +Ġgarn ish +ol ite +çĭ¬ æľī +ĠHay den +Ġ ä¸Ģ缴 +éĺ³ çĹ¿ +Ġaud ible +ĠPROC ESS +Ġepidem iological +ç»ĵæŀľ 为 +F allback +h igher +éľĢæ±Ĥ éĩı +æ°ı æĹı +Ġerr ands +åİĭåĬĽ 大 +ĠBerks hire +ä¹ī 项 +'] ). +. restore +al ist +åijĬ ç»Ī +-in stance +Ġoc clusion +ï¼Įè¿Ļ æĿ¡ +æİ§åζ ä½ı +æĻ® æ³ķ +âĢľ , +éĩij æĺŁ +ĠMerr ill +Ġsten osis +B UTTON +Creat es +åľ° åĪ¶å®ľ +Ġover turned +è¿Ļ个 æķħäºĭ +Ġgl aucoma +è·³ è¿ĩ +ç¹ģ è¡į +ter dam +Ġacc use +æıIJ éĺ² +æĬĬ 缮åħī +Ġì Ħ +ĠDress es +ãĢģ æ£Ģæµĭ +ä¸ĭ 课 +oot s +æŃĮ è¿· +èģĶ ç»ĵ +ĠAdd ition +_IN V +Ġgrass es +Ġspawn ed +çϽ æĺĵ +rr rr +Ġ æĪĺ +å¼ı ä¸Ń +å¼ķ 诱 +åıĬåħ¶ å®ĥ +\ Schema +ï¼Į \" +ĠS ik +åħ¥ ä¼į +ï¼Įä¸Ģ é¢Ĺ +ï¼Į以 满足 +MA IN +UI Application +éķ¿ èħ¿ +å¤ĩ æŁ¥ +Ġflo ated +æĪIJ为 ä¸ŃåĽ½ +ĠRuntime Error +ï¼Į åĩĨç¡® +val ence +Ġchang er +æı¡ ç´§ +ãĢģ 空æ°Ķ +Ġweek days +积 æ·Ģ +éĥ½ çͱ +ĠV od +åıΠ好 +ãģ ¸ +æ¸ħ çϽ +ĠX II +å·¥ç¨ĭ 设计 +Ġton er +Ġdiss imilar +æ¹ĺ æ½Ń +: l +ï¼Į æĦŁåΰ +ĠB ess +Ġf us +Ġchild birth +æľīä¸Ģ å¥Ĺ +-ex ecut +n ecessary +at hed +.h and +ĠEver ton +# > +ĠH ubble +Ġnumber With +æ²³ çķĶ +ï¼Į被 称为 +Help ing +Ġcardi omy +Ġ æĻºèĥ½ +, æĶ¾åħ¥ +人 æĪĸ +墨 æ°´ +é£ŀè¡Į åύ +ĠThe ss +ç½ Ķ +éŨ åħ³ +åıĪ æĿ¥äºĨ +ĠTr out +åѦçĶŁ åŃ¦ä¹ł +ãģ¾ ãģĹãģŁ +ä¸į æĶ¾å¿ĥ +ĠM ice +IO C +éªĹ åıĸ +ãģĹ ãģĦ +  +id ious +ITT LE +ĠT ess +Ġ= " +çŁŃ æĹ¶éĹ´ +ĠEth nic +_not ification +% B +Ġmat hematically +ott ie +f itted +åıij æĦģ +eb x +纷 åijĪ +Ġmis fort +ãĢĤ å̼å¾Ĺä¸ĢæıIJçļĦæĺ¯ +Ġse u +ĠOut doors +( Model +ĠH orses +Incre asing +Ġ ................................ +Ġm ö +ĠLong er +ĠWorks heet +.int ellij +ï¼Į è¯Ń +âĢľ âĢĺ +æľª æĪIJå¹´ +ä»ħ æľīçļĦ +Ġoverwhel m +ï¼ļ é«ĺ +åĵ Ķ +浪费 äºĨ +ãĢĤ çĶŁæ´» +çļĦ人 å·¥ +湿 äºĨ +ç¬¬åĽĽ å±Ĭ +ĠInterest s +CRE EN +ãĢģ èĩªæĪij +ĠA lo +ï¼Ľ ç»Ħç»ĩ +æĿ¨ å®¶ +Ġconstruct ors +ĠMaster Card +Ġ å¾Ĺ +ĠD SS +ä¸ī èĢħ +éĵģ çļ® +ipt ic +advant ages +æ²ĥå°Ķ æ²ĥ +a qu +c us +Ġr idd +.Status Code +人 åı¯ä»¥ +æ¡¥ ä¸Ĭ +Ġn aughty +ĠGl ue +SH OP +Ġre claimed +Ġcl asp +æīĢ éĢī +æĦŁ åħī +æīį å¹² +é¾Ļ å±± +è¢ģ ä¸ĸåĩ¯ +Ġét é +_ constructor +Ġdata frame +.p k +_S ub +åŁºåĽł ç»Ħ +çļĦæĥħ æĬ¥ +, in +ï¼Ī H +æľª å®Į +é¦ĸ 款 +ä¿Ŀ温 æĿIJæĸĻ +åĩłåįģ ä¸ĩ +Ġdec ad +ĠPers istent +Ġspraw ling +( scale +ï¼Į èµ°è¿Ľ +港 åĮº +Ġpre operative +oth ic +å®ŀæĸ½ äºĨ +æĭ¥æľī ä¸Ģ个 +åĩ¶ åĭIJ +ï¼Įæĸ° å¢ŀ +ĠFres no +us band +åı£ éŁ³ +ĠImprove ments +ĠS CR +å°ı åı¯ +æµ· 伦 +æķ´ 容 +-d isplay +: æŃ¥è¡Į +åĴĮ åįİ +åı¯èĥ½ä¼ļ åĩºçݰ += h +HE L +-r anging +æĪij ç»Ļä½ł +äºĶ 天 +çģ« æŁ´ +ĠCoin base +ï¼Į è¯Ńè¨Ģ +é«ĺ æ°Ķ +æĹ¶éĹ´ çĤ¹ +}) } +Ġg es +ä¸Ģ æł¼ +ĠCom Visible +ĠPl uto +åıĹ å½±åĵį +æľ« ä¸ĸ +ĠEuro s +ĠEx odus +~ ( +Ġhal ftime +æ· ħ +ĠZ ag +.normal ize +al as +åıįåºĶ çļĦ +ï¼ĮçľĭäºĨ çľĭ +Ġ æĺ¨å¤© +, åĨῬ¡ +? < +次 åħĥ +è¨ ĺ +çĽĺ éĿ¢ +çªĹ å¤ĸçļĦ +Ġaffirm ation +pp les +Ġflow ed +åħ¨æĸ¹ä½į çļĦ +.s im +ï¼Įä»ĸ们 å°± +åħ³æ³¨ 度 +uster ity +Nic ole +æŃ£ äºĭ +renew command +æĿ¥ 表示 +é£İ åįİ +âĦĥ ä¸ĭ +éĻĦå±ŀ åĮ»éĻ¢ +åΰ æĻļ +.t m +éħĴ åºĦ +Ġneutr inos +ä¹ĭ åīij +请 注æĦı +åĪĿ è§ģ +T erry +g aming +ï¼Į æĺİçϽ +ï¼Į æĹ¶åĪ» +Ã Ļ +èĩ³å°ij 两个 +- reference +/ tr +ï¼Į æİ¢ +_b ook +åij¨ 身 +å°ij åħĪ +. animation +å·¥ä½ľ é¢Ĩ导å°ıç»Ħ +ä¿® æĸ¯ +å´© å¡Į +Ġf ray +æİĴ çīĪ +çľĭåΰ ä½ł +æĺ¾ç¤º äºĨ +æĺ¯ä¸į æķ¢ +ĠJew el +çŀ§ ä¸įèµ· +çİ»çĴĥ 纤维 +ĠAdvert isement +红 æĺŁ +ĠFre ed +Initial ization +ä¸Ĭ æľĪ +St uff +Ġ æį¢ +åĽĽ å°Ħ +.De ep +- rock +Ġmet ropolis +åİŁåĽł çļĦ +ü ller +ĠM ines +ĠN odes +ï¼Įéģ¿åħį äºĨ +Ġde re +Ġgenes is +å¼ł åĺ´ +éĻĪ å®¶ +ãĢĤä»ĸ è¿ĺ +: æīĵ车 +社ä¼ļ åıijå±ķ +ä¸Ģ次 åıĪä¸Ģ次 +éĨĴ 缮 +Ġgro oves +Ġfract ured +Pred iction +Ġuntrans lated += j +Ad mission +IR Q +ĠC JK +Ġv r +ä¸Ń ä¸įè¦ģåĩºçݰ +西 æľį +åĵĪ æ¬ł +T OB +Ġap ache +Ġfull ness +Ġneg ate +¿ Ãij +appro val +æĭĽæłĩ æĸĩä»¶ +ä¸įèĥ½ ä¸į +ä¸Ģ大 åłĨ +çļĦ 带é¢Ĩä¸ĭ +ĠI st +c rate +ãĢĤ å·¥ä½ľ +ĠY N +æīĵ åŃĹ +äºĭæĥħ èĬĤ +ĠCH ANGE +SH OW +ĠSlo an +ãĢģ 西å®ī +æºIJ 代çłģ +åıįåºĶ éĩľ +ĠAMAZ ING +å¿ĥ çĶĺæĥħæĦ¿ +Ġpurpose ful +ĠJ W +æııè¿° ä¸Ń +. forward +Ġkn obs +Ġ å§ĵåIJį +ol ks +ER GY +æĶ¾ ä»» +ä¹° 个 +Ġs ms +æŀģ ä½İ +Al igned +åħ³æ³¨ åĴĮ +æ³° åĭĴ +ĠEthiop ian +Ġ è´¦éĿ¢ä½Ļé¢Ŀ +-S ah +ãĢģ 竹 +ĠGr inder +ĠJud ah +Ġune asy +两 åľº +Ġreg imens +è´´ åIJ§ +T AGS +Ġprow ess +In nov +ĠWorks pace +ï¼Į åζéĢł +ht e +è¦ģ é¢Ĩ +大 å±ķ +éĻĪ å°ij +éĺµ åŃIJ +æµĵ 度çļĦ +Ġunt uk +æĸ°éĹ» æĬ¥éģĵ +ï¼Įä¸į 许 +éķĩ æ±Ł +éľ² èIJ¥ +æIJĢ æī¶ +ä¸į çIJĨè§£ +åĢĴ 计æĹ¶ +å§IJ åĦ¿ +滤 éķľ +Ġhand set +éĿŀ常 æĦŁè°¢ +ĠRand olph +ere e +ĠZ o +_rec ords +ĠAsp en +-d rive +Ġbeh aving +èѦ åijĺ +åįł åΰ +Ġtrav eller +Ġleaf y +Ġastronom ical +/ shared +Ġun ethical +çĶ· åĦ¿ +ĠLO OK +ĠM ou +ĠW arrant +-> {' +_F ONT +è½® æį¢ +Ġpyl int +Ġ 客æĪ· +éϤ å¤ķ +Ġdram as +ĠF ulton +çŃī æİªæĸ½ +åIJ« æ°´ +麻 éĽĢ +Dead line +B ah +-Americ ans +A AP +æĺ¯ éĤ£ä¹Ī +Ġdo able +天 ä¹IJ +_T CP +é¾Ļ æ³ī +åºĶ该 ä¸įä¼ļ +_p oll +ĠKey Error +Ġrout ed +æĿIJè´¨ çļĦ +or bit +ä»Ģä¹Ī æł·åŃIJ +çľĭçĿĢ èĩªå·± +mat htt +,å¦Ĥ æľī +Ġgl aring +设å¤ĩ è¿Ľè¡Į +æĪIJ为 ä¸Ģç§į +Ang ela +Ġt ann +èī² ç³» +Ġconc ussion +ĠBatt alion +R aj +æĸľ åĿ¡ +çļĦ å¼Ģåı£ +æŀĹ æŀľ +红 çģ« +åĭ¾ å¼ķ +ĠPas adena +Ġo cular +许å¤ļ çļĦ +ĠW omens +ĠL oy +Ġhears ay +( lib +; $ +Ġr as +(p aste +èĪį åĪ© +. old +ãĢĤ æĪIJ +éĢ ² +èĢģ æľĭåıĭ +Ġf ishes +Ġout ings +åĩĮ 空 +ç¼ĵåĨ² åĮº +ãĢĤ ç®Ģ +ãĢģ å®ŀçݰ +æĹł èµĸ +å¤ľ å¹ķ +ĠPan asonic +ĠL ank +ĠG AP +ĠNSD ictionary +ãĢģ ç½ij +ä¸į è¿Ľ +åIJİ è§Ĩéķľ +-s u +ĠTR AN +ĠCommit tees +Ġsyn opsis +æŀ¯ èIJİ +ìļ Ķ +Ġç͍æĪ· éĹ®é¢ĺ += P +ï¼Į è§Ĥä¼Ĺ +æİĴ çļĦ +æĽ´ å¿«çļĦ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġorth opedic +~ O +Ġover arching +ç¼ ¥ +äºij éľĦ +Ġpar abolic +æİ¥çĿĢ è¯´ +. @ +Ġ å½Ń +æľĢ 常è§ģçļĦ +Ġemb odies +La uren +| < +åIJĦ è·¯ +ç¿» äºĨ个 +Ex porter +ä¼ļ 为 +Ġi kea +.N um +èĥĨ æĢ¯ +Ġreput ed +ĠEl le +ĠHeaven ly +æŀ · +é£Ł çļĦ +æ·±åĪ»çļĦ åį°è±¡ +åΰ å°ı +èĢģ å¨ĺ +Ġmin ed +_m ag +ï¼Į åĽº +im ely +ç² ¼ +ĠI KE +Ġmon opol +Rel igion +ãĢģ 代 +Ġco y +ring es +ãĢĤåľ¨ ä¸Ģ个 +Ġfre es +æ¶Į åĩº +æŃ» åİ»çļĦ +Ġbott leneck +Ġrig idity +ï¼Įä¸į æĹ¶ +Ġtender ness +éħĭ éķ¿ +æ³ ł +Ġwrong doing +å®ŀä¸ļ æľīéĻIJåħ¬åı¸ +Ġc umin +ĠE MP +èĢIJ å¿ĥçļĦ +term ine +Ġexacerb ated +é± ¿ +Ġerad icate +.s k +æĿİ éĺ³ +éķ¿æľŁ 以æĿ¥ +ĠPet ro +ĠOp rah +Ġë ³ +B UF +Ġpick er +.p oints +Ant on +ä¸ŃèᝠæĿIJ +循åºı æ¸IJè¿Ľ +ãĢĤ åŃ¦ä¹ł +pl anted +sm arty +в ан +.I ter +主åĬŀ æĸ¹ +ĠJ ah +ĠPL A +æī¿ å¾· +åħľ éĩĮ +Ġenzym atic +H its +ä¾Ľ æ±Ĥ +çļĦæĹ¶åĢĻ æĪij +è·Į åģľ +H ear +Q i +Ġ èĭ±åĽ½ +ĠP OD +åĨħ 设æľī +åĮĹ æ¬§ +åĨ· åĨ°åĨ° +Ġelect orate +Decl ared +Ġä¸Ĭ æľŁ +]] > +éªĮ èµĦ +unk er +dom ains +ĠAm id +ĠMac Donald +å¾Ĵ åĬ³ +Ġ' ^ +ãĢģ æ±Łè¥¿ +çα 她 +Ġquest s +åĶ® 楼 +å·¥ åѦ +ol ocation +Ġfin s +èĽ Ģ +å®ı 大 +cc ión +Custom ize +Ġs cept +ĠAf ro +Inst itute +Ġdiss atisfied +褪 èī² +Ġad apters +_t ools +è¶ħ ä¹İ +ä¼ij çľł +çļĦ åħ¬åijĬ +eed s +æ¤į åıij +ç²ĺåIJĪ åīĤ +Ð Ĺ +ĠT avern +èĥ½ å¹² +对 æµģ +reg ation +_w rap +*~ * +Ġbe ck +ĠWh ites +Return Type +.ext ra +ä¸į æİ¥åıĹ +um etric +Ġdist rust +Ġcare rs +ä¸ģ åŁº +åħ¬è®¤ çļĦ +_ aff +Ġleg ality +Ġrecogn ising +ภ° +å¹¿æ³Ľ çļĦåºĶç͍ +åĽĬ èĤ¿ +j r +Ġ ä»· +æĶ¯æĮģ è¯ģåΏ +ĠPRO P +p itch +Ġf ores +Ġm ower +] ? +Ġ åĦ¿ç«¥ +, è´¨éĩı +åIJĦ åİ¿ +M J +is bury +ĠS ão +æĸ° ä¸ĸ纪 +ï¼Įè¿Ļ ä¸į +åı° å±± +ãĢĤ æĢİä¹Ī +çļĦåİŁåĽł æĺ¯ +ãĢģ åıĤä¸İ +Ġinf estation +éĴ¦ 佩 +/ DC +ç͵ åĽ¾ +ç»ĵæŀĦ åĮĸ +è¤IJ æĸij +" ä¸Ń +Ġg ated +л ен +ĠYan kee +ĠV E +ï¼ĮæĪij åıijçݰ +CT IONS +é¡¶ éĥ¨çļĦ +åĪĨå¸ĥ çļĦ +èĩ³ åĽ¾ +Ġphen otypic +ĠTu ition +Ġ` \" +Mod ification +> Must +æĺ¯ åħ¨çIJĥ +缩 æĶ¾ +è¸ı æŃ¥ +Ġspot ting +ĠPir ate +.m em +Un ary +_R SA +ï¼Įå°ı åĮº +è¿·çħ³ çħ³ +Ġconst itut +Ġbas ename +(st ep +ï¼Į èĥĮåIJİ +åIJİ éģĹçĹĩ +_s id +Ġpost pone +Re asons +ĠProb ate +_up dated +Ġ ä¸Ķ +æľ¬ æĥ³ +ĠInc redible +IST ANCE +ĠBl izzard +æķĮ 对 +æī« äºĨä¸Ģçľ¼ +" S +ĠN ODE +èĢĥèĻij ä¸Ģä¸ĭ +Install ing +ĠDick ens +ĠErn st +) << +çļĦ èĥ³èĨĬ +æ° Ĺ +em outh +身 ä¸ĸ +ï¼Ľ çĦ¶åIJİ +emb edding +ĠMuseum s +Ġgolf ers +åį«åģ¥ å§Ķ +æĽ´ éļ¾ +_d ynamic +ä½İ 端 +éĺ´ åĩī +rob es +et ine +äºİ ä»ĸ +.C iv +ä¹° ä¸ĭ +èĦī æIJı +Delay ed +J ax +ä¹Łæ²¡ ç͍ +contin ued +Ġenorm ously +, 人çĶŁ +ĠB K +Ġha unt +æĹł è·¯ +List ening +Ġtem pered +ĠBar rel +script size +è¨Ģä¸į åıij +åı¯ 没 +çī² çķľ +^^^^ ^^^^ +åı¯ä»¥ åĩıå°ij +åªĽ åªĽ +on en +ï¼Į æĭĴç»Ŀ +Ġh b +ĠB ain +åį´ æĺ¯ä¸Ģ +ve olar +Ġenerg ized +Ġcapac itance +F olders +Ġa lex +ï¼Į 女åĦ¿ +ĠF oley +Ġrep ub +_H andler +Ġè¿Ļ æĺ¯ä¸Ģ个 +ĠEN TER +Ġincons istency +Ġparl ament +女 æĺŁ +缴æİ¥ 被 +^* \ +ers en +åıij çģ« +ĠCl an +_D IM +Ġsingular ities +Ġdat atype +æīĵ èµ· +è¨ Ī +Ġfacilit ator +, éĩĩåıĸ +Ĉ ãĢģ +Ġj uxtap +ne z +ĠMult ip +IMP LE +Ġswallow ing +Ġre inst +ãĢģ æĪIJæľ¬ +å°Ĩ ä¼ļåľ¨ +ï¼Į è§Ĥ +Ġv f +åľº å¤ĸ +æĿ¨ æ£ł +Ġsat ire +EXTERN ALS +ord ion +ç»Ļ 对æĸ¹ +æłĩ çļĦåħ¬åı¸ +Re plication +å½±è§Ĩ åī§ +ĠPupp y +ä¸į åĽŀ +ãĢĬ ä¸ī +éĩij ä¸Ŀ +éĵĥ æľ¨ +绿èī² çݯä¿Ŀ +çļĦæĪĺ 绩 +i ak +Ġext rac +æĽ¾ 说 +å¿ĺ æİī +èħ¾ 空 +_COMM ON +âĢľ ä¸Ĭ +ç«Ļ èµ· +Ġinject ing +Ġsh rine +å¤ļ 好 +æķ°æį® ä¸Ń +综åIJĪ æ²»çIJĨ +ĠSHO ULD +> false +Q D +æĺ¯ éĶĻ误çļĦ +åĸľ äºĭ +,ä½ł åı¯ä»¥ +- added +åľ¨ èĦ¸ä¸Ĭ +reat he +ç»Ĩ å°ıçļĦ +ĠGreen wood +åĨľ æŀĹ +UR Y +æĿ¾ ä¸ĭ +ç«ŀäºī ä¸Ń +ĠPH R +.se parator +Ġallerg ens +Ġ çłĶç©¶ +ï¼Į 尽快 +å¾Ī èĪĴæľį +Ġnon fiction +/g pl +ĠBring ing +IN V +éĹŃ å¹ķ +çļĦåīį 端 +i ert +ä¸ĵ åįĩæľ¬ +ĠC ouch +ym p +ä¸Ģå®ļ æľī +å¦ĸ æĹı +{ AD +Ġclass ifiers +ç´« èĸĩ +ĠM ango +ãĢģ 秦 +ĠB isc +ĠU ran +-m ean +ãĢģ H +é»Ħ è±Ĩ +Ġround up +æĭĨ è£ħ +intern et +çIJĨæŁ¥ å¾· += {\ +äºĨä¸Ģ 项 +éħĴ çĵ¶ +å¸ĮæľĽ èĥ½å¤Ł +çľ¯ èµ· +ĠVine yard +Ġem its +è¿ľ è·Ŀ离 +ä¸ĩåħĥ 人æ°ijå¸ģ +åĨľä¸ļ 人åı£ +Ġ è´¢åĬ¡ +as co +ãĢĤ æ¯ıä¸Ģ +ĠI ve +ðŁ ¤ +豪 æĿ° +Ġcommission ing +Ġt outed +ĠG astro +good s +åĩıéĢŁ æľº +ç»ļ çĥĤ +ĠM CC +ç¼ Ī +ĠSh adows +è¶Ĭ å¿« +æī¿ åİĭ +é¼» åŃĶ +( Constants +ãĢģ æĶ¹åĸĦ +J U +ĠP au +åŃIJ æĺ¯ +æĻĥ æĻĥ +æıŃ ç§ĺ +Ġtreat ies +æ°¸ çĶŁ +Ġmid way +Ġing estion +äºij 天 +è¿Ī åIJij +å±Ĥ éĿ¢ä¸Ĭ +w arming +or ations +ãĢĤ ä¸ĸçķĮ +Ġcrypt ography +- option +çļĦ åijĺå·¥ +为 åĽ¾ +ä¸Ĭ 没æľī +-f aced +.P OST +grad ing +p owers +äºĨ å°± +Ġfetch ing +ãĢģ é£Ł +Ġ 论 +ãĢĭ âĢľ +Top ology +ĠWin ery +èϹ æ¡¥ +Ġcupc ake +ï¼Į çijŀ +Ġr nd +(s cript +Ġarrog ance +, å¼ķèµ· +âĢľ å¦Ĥæŀľ +ĠAd ri +ĠBrid g +Ġdoctr ines +Ġd usk +,ä¹Ł è¦ģ +Ġlumin ous +( es +ĠH et +çζ 女 +èī¯ å¤ļ +,æĪij è¦ģ +ĠDom ino +æģ¶æĢ§ èĤ¿çĺ¤ +ĠL over +}} + +irc ular +Ġ第ä¹Ŀ 竳 +Ġis a +å§Ķ å©ī +-P resident +èĤºçĤİ çĸ«æĥħ +ĠPE OPLE +-ren owned +[ ** +ä¸Ń æĸ¹ +ĠG ast +Ġad verts +ie k +æĸĩåĮĸ ä¼łæĴŃ +Ġdeb ilitating +Ġdat um +Ġclar ifying +cons ult +Ġ åĽºå®ļèµĦ产 +ç»Ŀ 伦 +ro k +ĠB DS +çϾ 人 +Ġdata Type +ï¼Įå°Ĩ 她 +æĥ ĺ +示 å¼± +æıIJ交 çļĦ +Bag Constraints +[ out +å½ĵ 头 +温 çĥŃ +ä¼ĺåĬ¿ çļĦ +Ġcaut iously +S ERV +æµģéĩı 计 +) \\ +大 éĹ®é¢ĺ +. Compare +ĠF n +ĠO CD +æĴ ħ +éĿł å±± +ãĢģ 强åĮĸ +ĠHundred s +Ġdi agon +åĽŀæĿ¥ åIJİ +æľº çģµ +æĢ¥ èºģ +Ġsquir rel +L AGS +P AT +çŁ³ éŨ +Ġbreath s +积åĪĨ æ¦ľ +ungal ow +< Item +Ġw ards +èIJ½ æ°´ +\ vec +ĠC aul +åıĬ åij¨è¾¹ +æĢķ ä½ł +éĤ£ åľº +被 åħ³ +ĠAcc reditation +v at +ä¸Ń 表示 +å§Ķ 书记 +é¡¶ çļĦ +å»Ĭ åĿĬ +w ort +积 æĶĴ +Ġ< $ +ec ret +ãĢģ 鼷 +ĠF ay +ĠAr rang +$ ", +Ġcon quest +ĠTax i +W inner +orn a +æĻļ è¾Ī +c rit +Ġin clus +åħ·æľī ä¸Ģå®ļ +çļĦéĤ£ ä½į +å̼å¾Ĺ ä¿¡èµĸ +.input s +缸åħ³ æ³ķå¾ĭ +éĥ¨éĺŁ çļĦ +âĢľ çα +ä¾Ŀ ç¨Ģ +让 对æĸ¹ +èĢĥ çļĦ +_ED GE +ĠM og +å§IJ å¼Ł +part icle +END IF +Ġstrat ified +Ġmotor ist +ĠHon orable +Altern ate +æ¶ħ æ§ĥ +B luetooth +ĠC SP +çī¹ çº³ +åħ« æĪĴ +ĠMor an +.y ear +Ġh ob +Ġgu inea +åıĸ èĪį +åĨ¬ 天çļĦ +æľ± éĽĢ +). : ][< +ĠSwe at +ig ua +Ġhand written +åĿĩ 设æľī +è¾ħ ä½IJ +Ġfuck ed +ĠR OW +Ġj ihad +è¨Ģæĥħ å°ı说 +an se +大 ç±» +equ ivalent +charg ing +ï¼Į以ä¸ĭ æĺ¯ä¸ĢäºĽ +Ġh ither +å̼ éĴ± +ĠAN N +产åĵģ æľī +交 äºĨ +ï¼Į她 è¿ĺ +ĠIv ory +A UD +\ wedge +ut ely +èº ĩ +æĴ ® +æģ¼ çģ« +Ġ éĿ¢ +Ġ éĢļ +ä»Ģä¹Ī åİŁåĽł +åİĨåı² æĤłä¹ħ +Ġescal ating +æĺµ ç§° +ĠFib onacci +ĠR oles +åįķ æį® +ĠIn verse +次 åĬ©æĶ» +è®Ńç»ĥ èIJ¥ +å¼Ģåıij åĴĮ +çĶ³è¯· æĿIJæĸĻ +ï¼ĮæĢķ æĺ¯ +ic ul +P RESS +Ġse ab +ï¼Įèĭ¥ ä¸įæĺ¯ +ĠGL float +ï¼ļ ä¸į +å¾ģ ç¨İ +ĠAr b +Mar ie +\| _ +certain ty +Ġjarg on +èį ŀ +éĢļè¿ĩ ç½ij绾 +umer able +çĻĮ ç»Ĩèĥŀ +_B INARY +otyp ical +Ġmon ol +.T ree +Ġnit ric +Ġreloc ating +ä¹ĭ 书 +ï¼ĮæĪij ä¸Ģ缴 +.d ouble +åĨľ åķĨ +arc her +æıĴ åľ¨ +t itles +ãĢģ èĥ¸ +éĵģ éĿĴ +ĠBy rne +çļĦæ°´ åĪĨ +ä¹ĭ éŁ³ +Ġque ens +å¬ī æĪı +Ġ ³³ +çŃī åĽ½ +ĠL ester +ac us +ç»Ĩ çļĦ +Ġsupernat ant +> Q +Ġpar ishes +Ġmur m +ĠEst imates +adjust ed +欢 声 +设å¤ĩ ä¸Ĭ +Ġneuro deg +Ġ å·¥ç¨ĭ +çŃī åįķä½į +.D escription +Ġuncont rolled +åŃĻ æĿĥ +d egree +Ġatt est +åIJ« èĵĦ +æ·¡ æ¼ł +ĠColomb ian +Ult ra +F riendly +achel ors +Ġimmer se +/ create +Ġ 楼 +ĠF irms +读 åΰ +æĬ¬ é«ĺ +Ġrob ber +Ġ(* ( +Sustain able +ĠANSW ER +ï¼Į è¡£æľį +Ġp j +ĠB aba +ud ev +æĭį æīĵ +: left +ä¸į æĥĬ +ï¼Įä»ĸ 认为 +çłĶç©¶ å·¥ä½ľ +ï¼Į åħĦå¼Ł +del im +Ġplung ed +J ake +å¹ħ çĶ» +_str ategy +ĠE EPROM +", & +åĬŁ åĭĭ +_w hen +l apping +ĠS plash +ç¾ŀ èĢ» +ult ures +丹 çͰ +ĠE GL +ä¸İä¼Ĺ ä¸įåIJĮçļĦ +,ä½Ĩ æĪij +åĭŁ æĬķ +转åĮĸ çİĩ +å¼Ģå§ĭ æĹ¶ +人æ°ij ç½ij +亮 丽 +第äºĶ çϾ +Ġfertil izers +éϤå°ĺ åύ +ĠA SA +ä¼ļ ç»§ç»Ń +计 è°ĭ +åĪļ éľĢ +ĠUser name +. obs +Ġl ash +åĪĽ æĬķ +reason ably +åºĶ éĤĢ +und able +èĤī è´¨ +çļĦ ç¾İ丽 +å· ½ +ĠK obe +羣 è¯Ŀ +ead y +ï¼Įä½Ĩ ä»ĸçļĦ +Ġlo fty +âĢľ çİ°åľ¨ +ĠR AD +éĹ· éĹ· +( .* +s erv +en arios +西 æĸ¹çļĦ +è¿ĺ ä¸įåΰ +设计 çIJĨ念 +ĠARTICLE SMORE +Ġrhyth mic +ãĢĤ æĦ¿ +ä¹ĭ é«ĺ +åĪĩ ç£ĭ +ĠLa place +å¦ĸ éŃĶ +Ġch ant +Ġ% { +Ġinc arnation +æķij åĩº +ĠAv a +ä½ĵèĤ² è¿IJåĬ¨ +ĠCock tail +Y a +å¿ĥ çŁ¥ +... ]( +交 åĬŀ +åIJĥ ä¸ĢäºĽ +ãĥ Ļ +_buff ers +. operator +ĠB enny +Ġen sembles +ĠSearch ing +comb ined +er ne +ä¹Ł åŃĺåľ¨ +Ġappro ves +è¤ Ĥ +ĠOrt iz +cond itionally +ET IME +]{} ]{} +IC ollection +帮 æĤ¨ +_s wap +åį« åĨĽ +æ¶Ĥ å¸ĥ +é¼İ é¼İ +( rule +ï¼Į æıIJåĩºäºĨ +è¡ Ĩ +æĤ¦ è̳ +ĠExpert ise +è´® èĹı +Ġgriev ances +æľªå©ļ 妻 +ĠS cheduled +é¢ ļ +ph ans +æĿĢ æľº +éĴ» äºķ +Ach ieve +( rec +× ĵ +æľī åĪ« +ĠD FS +æĶ» 读 +å¸Ĥå§Ķ 书记 +Ġheavy weight +Ton ight +åħī æ´ģ +æĢĢ æĹ§ +ĠRequ ire +ä¸īåįģ äºĶ +abcdef gh +çļĦ æŃ¥ä¼IJ +Ġam used +欣 å®ī +åį³ä½¿ æĺ¯ +ï¼ĮçϽ 天 +h ands +Ġf uzz +Ġsuccess ors +汤 æ±ģ +Ġ èĤ¡ç¥¨ +说 æ¸ħæ¥ļ +B lo +ä¹Ł ç¡®å®ŀ +Ġbl ister +ĠLogger Factory +Ġf ools +å¸ķ çī¹ +ĠS MC +åĪĨ éĻ¢ +æŁIJç§į æĦıä¹īä¸Ĭ +ĠFried rich +åĽ½ èĹ© +å®Ī åĢĻ +ä¸įè¦ģ ç´§ +缸åIJĮ æĪĸ +Ġanthrop ology +B estseller +ĠG ael +身 åıĹ +_g ain +Norm ally +ĠSew ing +ĠO ro +èά èĭ¥ +ĠM LA +Ġ[ . +.P rim +ĠF ired +è¿Ļ éĹ´ +ĠCh ains +åĪĨ享 äºĨ +ä¸į åĢĴ +Ġset back +èµ° è¿ĽæĿ¥ +/ all +è±ģ çĦ¶ +Ġflee ce +t ics +ãĢĤ æĤ£èĢħ +K al +ãĢģ çī¹èī² +og any +åıijå±ķ æĶ¹éĿ© +éĥ½ä¼ļ åľ¨ +第äºĶ å±Ĭ +Hist ogram +re peated +çļĦä¸Ģ å¤Ħ +å¯ĦçĶŁ èĻ« +^ ) +Ĭ åŃIJ +Ġcol legiate +requ isite +æļ´ åıij +æľŁå¾ħ çĿĢ +Ġembro idered +ĠW ah +ĠSp am +Ġsw agger +æ¹ĸ å·ŀ +è¿Ļéĥ¨ åī§ +èĥ° èħº +e ight +å®ŀ å½ķ +ĠAL IGN +强度 åĴĮ +List ed +_AD MIN +ï¼ī 人 +ĠInterpret ation +éĺĢ ä½ĵ +oc in +éľĢè¦ģ ä½ł +term ilk +EXTERNALS YM +( empty +æĺİ çĽ® +ãĢĤåľ¨ è¿Ļç§į +Ġcash ier +ch al +Ġund e +许 诺 +Ġpoly ethylene +æĺŁæľŁ ä¸Ģ +contin ental +çĥ§ 伤 +æķıæĦŁ æĢ§ +_ EDIT +以 åĨħçļĦ +Ġup regulated +éĢī 为 +ĠLan tern +, æĺİç¡® +. # +ï¼ļ å¦Ĥ +åıĮ åįģä¸Ģ +Ġsyn ch +ï¼Į èIJ¨ +èŁ Ĩ +ĠSpec s +ï¼Į çĿĢ +Host s +_ identity +Ġ åı° +is Valid +âĢĿ äºĮåŃĹ +åIJĪ èIJ¥ +å±ķ é¦Ĩ +ï¼Į èĢĥè¯ķ +çļĦ éĩı +ĠArg entine +ith a +ï¼Įè¿ĺ ä¸į +è¦ģ ä¸įæĺ¯ +建议 大家 +(k ind +Ġdet achment +å¨ģ 夷 +港 å¸ģ +æł¹æį® åľ° +å¾Īå¿« çļĦ +麻辣 çĥ« +大 åı¯ +çĶŁäº§ ä¸Ń +æĢ»ç»ĵ äºĨ +B read +pp en +Ġperf ected +Wil son +çļĦ æĦŁåıĹ +åħ¥ è´¦ +Ġhere after +ï¼Į æĿIJæĸĻ +li oma +Ġ è§ģåΰ +ãĢģ å¢ŀåĬł +声 åIJį +å°± å¦Ĥ +ä¸ī 竳 +Ġ` ( +å¤ļå°ij å°ij +å®¶ç͍ ç͵åύ +, å®ĮæĪIJ +H ire +ĠR amb +æĪij å°Ĩ +åħ¶ä»ĸ 人çļĦ +åľ°çIJĥ çļĦ +èµIJ äºĪ +ĠHold en +åħ» é¢ľ +æĻº åĪ© +Ġtestimon ial +. ali +m age +äºĨ æĥ³ +Ġcat ers +çĭ¬èĩª ä¸Ģ人 +ĠMethod ology +Ġintertw ined +$ str +Ġl ign +èĥ¡ é¡» +çļĦ主è¦ģ åĨħ容 +Ġr arity +æ°´ æ»´ +leq slant +çŃĽ åĪĨ +ĠWas her +éĶ¦è¡£ åį« +em ically +åħ¬åı¸ 对 +ï¼Įåıª åIJ¬ +ä¸ĭåĪĹ æĥħå½¢ +å®ŀè´¨ ä¸Ĭ +æ½ľ åĬĽçļĦ +: ä¸Ģæĺ¯ +e j +åΰ èĩªå·±çļĦ +åºĶ éħ¬ +( Is +é«ĺ 精度 +游 è®° +æİ¨ æĭ¿ +çļĦæĹ¶åĢĻ ä¼ļ +_EX CEPTION +æ³ķåħ° åħĭ +Ġretire es +ĠM PH +ĠH au +æ´Ĺ éĿ¢ +Ġcapital ization +Cent re +ï¼Į 缮çļĦæĺ¯ +ï¼ĮæīĢæľī 人 +-round ed +å®ļ äºİ +åŃ¦ä¹ł ä¸Ń +ç¨İ åIJİ +Did n +ĠFarm ing +Ġå¼Ģ æľ¬ +åIJį åĮ» +ĠBar th +çIJIJ äºĭ +Ġget Type +转 è§Ĵ +ĠDec ide +Ġ æĿŃå·ŀ +ãĢĤ å¸Ĥåľº +ĠDu chess +åIJĮä¸Ģæİ§åζ ä¸ĭ +s ong +Ġt q +_t ri +å±ķ ä½į +ĠDe af +Ġhapp iest +çļĦå¤ĸ å£ģ +Ġfo i +IRT UAL +P aid +\ fs +n orth +ï¼Į è´µ +æĵį çĽĺ +_ editor +鼶 åħ« +çļĦéĹ®é¢ĺ æĺ¯ +.read Line +Ġscram ble +çļĦ 广åijĬ +ãĢĤ èĥ½å¤Ł +çļĦä¸Ģ å®ļ +ĠOff shore +å£ģ æĮĤ +ĠPath s +Ġun imag +æº ¥ +åĮĸ ç²ªæ±ł +Ġpain less +åħ¥ åĽŃ +çݰéĩij æµģåħ¥ +Ġ 空 +an j +ï¼Į æ·»åĬł +Ġnew found +ï¼Įä¸įæĺ¯ åIJĹ +ĠKar ma +Ġent ice +çļĩ ä¸ĬçļĦ +_MEM BER +ĠS CH +fil tered +ï¼Į å´Ķ +ï¼Ī åħ¬åħĥ +_g a +ĠSub sequent +交æµģ åĴĮ +çŀ¬ æĹ¶ +ĠDar ling +缮 ä¸Ń +è¯ļ å¿ĥ +ĠBow en +ĠKind ergarten +Ġretreat s +on ation +Ġinter ception +Ġjurisdict ional +ĠSt ub +Ġcover ings +ï¼ģ 好 +Ġlik eness +Ġsuit ably +ĠTown send +çļĦ åIJij +大 çĤ® +æ°´ åİ¿ +ĠHe ard +ç¥ŀ åĨľ +()) -> +ä¼¼ æĺ¯ +âĢľ How +ĠG ott +ĠHead lines +Tex Coord +Ġaz imuth +- im +ä¼´ çĿĢ +ï¼Į建ç«ĭ äºĨ +_ operand +ãĢĤ æ¯į亲 +äºī 缸 +åĪĨæŀIJ 仪 +ç»Ļ大家 带æĿ¥ +ç£ķ 头 +çļĦ éĢłåŀĭ +field set +è¡Į åIJĹ +ĠÙħ ÙĨ +çĥĻ åį° +ĠL ub +é«ĺ 丽 +大åѦ æķĻæİĪ +亮 çľ¼ +éĵ¶è¡Į 贷款 +/t mp +ï¼Į 汪 +ä»ĸ å°Ĩ +åĪ« æľī +éĩĮ 尼奥 +å¹³ å®ļ +çİĦ æľº +ä¸į çģŃ +end um +Integ ral +æľĿ æ°Ķ +åŁ¹åħ» åŁº +æĴ¤ åĽŀ +Ġsquee zing +lyss a +le ur +Ġw l +åı¯ éĢīæĭ© +Ġun ambiguous +Ġadj oint +Process ed +åıĹ çģ¾ +ĠRed emption +ãĢģ åĬŀçIJĨ +ç͵ çŃĴ +Ġprocess ion +æĢ¥ æĢ¥ +亿 åIJ¨ +l ush +ä¸į åΰäºĨ +ĠG room +ï¼Įå°± 说 +Pref s ++ D +ĠP raise +éĢļ常 ä¼ļ +Christ ine +ĠJenn a +çĨı é϶ +ç¦Ģ æĬ¥ +x fe +åı¯ ä¸įèĥ½ +ĠEp isodes +SETT ING +est imate +âĢľ ä¸ĥ +ul ose +und ra +æ´ŀ éĩĮ +ĠInf ant +Ġ åĪłéϤ +ï¼ļ åĪĺ +æıIJ æĮ¯ +ä½ľ çŃĶ +å¤ĸ å±Ĥ +ĠAr d +Ġdiagn osing +Ġfrust rations +( alpha +Ġ éĹ®é¢ĺ +ĠN SC +Cred its +Ġunders core +æĥ³ å¾Ĺ +çī¹ ä»· +(h andles +urga on +åħ¬åı¸ æ³ķ +å±Ģ åŁŁç½ij +=" ./ +Ġmon oxide +Edit Text +Ġpenet rating +ãĢĤ åħĪåIJİ +âĢľ æĢİä¹Ī +G MP +æĿ¥ 表达 +Ġsh ri +Specific ally +Ġfren zy +大 å®ĭ +Ġadd Object +/d ocker +ĠTitan ium +ess es +umm ed +æľĪ åŃ£ +æīĵ è¶£ +èĦij éŨ +Ġmiscon ception +Ġb or +ï¼Įè¿Ļ åı¥è¯Ŀ +æĽ¾ 被 +å¢ŀéķ¿ äºĨ +åĮª æµħ +Ġips um +E PA +_un icode +æŃ¤ 举 +SE A +躺 ä¸ĭ +Ġphysic ist +Ġam el +æ»ļ çıł +/ Object +G ENER +åĴĮ éĤ£ +æŃ¤ æĸĩ +æŃ¦ åѦ +ĠCons umption +æĪ¿éĹ´ åĨħ +ï¼Į æĦŁæĥħ +Ġv ort +Ġget Current +Ġrep etitions +åŃ¦ä¹ł æĸ¹æ³ķ +ĠDi agnostics +ï¼Į åı«åģļ +人 ä¸Ģæł· +åŃIJ ä¹Ł +æ²ī éĨī +ĠPay roll +åĽļ ç¦ģ +çļĦ éĩijå±ŀ +ĠV or +ĠCh apters +端 æľ¨ +< Map +个 æĢ§çļĦ +Ġupt o +æ´»å¡ŀ æĿĨ +ĠRenew al +âĢľ ä¹Ŀ +Ġvis cous +CT P +Ġgre asy +ç͍ æīĭæĮĩ +å°ģéĹŃ å¼ı +. pp +ĠM DA +ï¼Ł âĢĻ +éĩĬ 迦 +å¿ĺ ä¸įäºĨ +. original +Ġc heddar +Ġel m +论 çļĦ +仪å¼ı ä¸Ĭ +Ġf iddle +ä¸ī çͲ +ĠX YZ +ï¼Įåΰ è¾¾ +Ġapr on +Ġsyst olic +Ġsec s +- We +ãĢģ åĢºåΏ +è¡Ģ ç¼ĺ +å¨ģ æľĽ +æĥ¹ 人 +% çļĦ人 +缸 æľĽ +urg ence +Ġcrow dfunding +çϾåĪĨä¹ĭ çϾ +" time +B ubble +Ġ% , +_PRO DUCT +_err no +ï¼Įä»» çͱ +\ mapsto +{{ / +ï¼Į èᣠ+Ġn ag +æŁ³ éļıäºij +è¾ŀ éĢĢ +Ġfry er +ĠR az +ĠQuestion naire +ĠRon nie +ĠMull er +\ mbox +ãĢģ åľ°çĤ¹ +.P re +ï¼Įä¸įçĦ¶ çļĦè¯Ŀ +_SW ITCH +ï¼ĮæĪij æł¡ +ä¸įä¼ļ åĽłä¸º +? > ( +Ġsight ings +Ġrum ored +åįļçī© éĻ¢ +Ġfollic les +ĠT SA +ĠM oodle +åĪĿ ä¸Ģ +åºĶ该 注æĦı +Ġarom as +F acing +åIJİ æīįèĥ½ +ï¼Į对 åħ¶ +_RE PORT +G all +Ġ æ½ĺ +Col on +igs aw +åīĸ éĿ¢ +ĠS IR +ï¼Įè¿Ļ åĽŀ +å·¥ä½ľ éĩı +Ġdraw able +ĠUnsupported OperationException +D h +ĠP epsi +ä¹Ł æľī人 +天 åĽ½ +Ġdisc erning +ĠP ASS +èĥ½ 被 +ä½łä»¬ äºĨ +éĢı æĶ¯ +Port able +_ age +h ospital +Ġ è´¹ +de ck +ĠPr imer +_ST YLE +å¼± å°ı +å¾Īä¹ħ äºĨ +çļĦ åĨĽéĺŁ +if ix +ĠG ABA +ĠJ ian +æľīä¸Ģ åıª +ĠC LEAR +ç͍ æĦı +åı¯ä»¥ èĤ¯å®ļ +ĠHel ic +Ġcomplain ant +S pect +ä¹ĭ å®Ŀ +Ġdrop let +容积 çİĩ +ĠAuthent ic +åĮĸ çī© +ï¼Įä½ł è¿Ļ个 +æĹ§ éĩijå±± +ĠCL K +. rec +âĢľ These +éĹ® çļĦ +ï¼ĮæĪij çŃī +åĬ£ è´¨ +z ac +ĠC ARD +ĠP ence +ĠL ig +Ġreli ant +èĥİ è®° +ï¼Įå¹¶ å°Ĩåħ¶ +Ġweak est +çĶ© å¼Ģ +ä¹ĭ éĢī +çķĻ å¿µ +Inter section ++ w +_ plane +ĠM inds +-t oken +åĩĨå¤ĩ éĩij +Ġprost itution +èī¯ çŁ¥ +Ġpred ic +α Ïĥ +ĠActiv ate +que ued +çĶŁçī© çļĦ +å¤ĸæĺŁ äºº +ä¸į å·® +.c ols +Ġri pping +æ¶ĪåĮĸ ä¸įèī¯ +ĠContin ued +% ï½ŀ +- animation +c riteria +ĠA rial +ĠL oves +ä¹ħ è¿Ŀ +ĠEgypt ians +Ġ åºĶæĶ¶è´¦æ¬¾ +he he +ãĢģ éŃı +声 åĵįèµ· +èĬ± åľ¨ +AD O +æŀª æĶ¯ +åĽ¾ä¹¦ 缮å½ķ +ï¼Į严 ç¦ģ +Ĉ ćć +大 èĤł +Ġhand gun +ra re +æīĢ å¯¹åºĶçļĦ +çŁ³ ç¢ij +çѹ 建 +æ°Ķ象 åı° +Ġsuperconduct ing +D ining +ĠT anner +ä¸į éĹ» +å¼Ģ çªĹ +(s ymbol +Ġanecd otes +.L earn +B ride +æķ°æį® ç±»åŀĭ +åľ°ä½į çļĦ +oc ode +ç͍äºİ 对 +æĽ´å¥½åľ° äºĨè§£ +dist ributed +ï¼Į æŃ£ç¡® +æĥħæĦŁ çļĦ +ĠMagn us +ãĢĭ è¯Ĺ +_b oard +èŤ èĶĵ +Ġcytot oxic +å¤ļ ä¸ĢäºĽ +åıijçĶŁ çļĦäºĭ +.sub scribe +-g irl +">< !-- +.bl ue +ä¸Ģ èµ° +æıIJ è´¨ +Ġchief ly +å°Ĩ è¿Ļ段 +S ew +Ġunders erved +L iz +Ġpower ing +ï¼Įåı¯ä»¥ çľĭåΰ +.parent Node +" ï¼ģ +æİĴ ç»ĥ +榴 å¼¹ +Y B +ãĢģ å°±ä¸ļ +Ġreg enerative +ĠSquad ron +ï¼Į çĤĴ +ãĢĤ æĹ©åľ¨ +å¼ł æµ· +æŃ¦ å½ĵ +UM MY +itiz en +ĠÑģ о +- abs +r ud +Ġ 约 +ĠS hat +Can on +ĠJa ipur +å¦Ĥ çĶ» +ä¿Ŀ åįķ +éĤ£ä¹Ī ä¹ħ +_ST ORAGE +st amp +ĠEl igible +Ġdent ures +[ ind +èµ· åĽł +Gu ests +Ġliquid ation +å¹³åĩ¡ çļĦ +Ġor deal +Ġlibr arians +çľī头ä¸Ģ çļ± +ï¼Į å¯Ĩå°ģ +åı¯ å®ŀçݰ +( parameter +Ġconc ord +åľ¨ä»ĸ 身ä¸Ĭ +Ġd art +ãĢģ åĩıå°ij +管 åĴĮ +ĠAdvance ment +st ations +âĢľ 为ä»Ģä¹Ī +ĠJe ans +è¯ķéªĮ åĮº +Ġhunt ed +Ġ åĩºçīĪ社 +ign ored +ĠTe V +èģĬ çĿĢ +Ġsp ong +ãĢģ åĽ½åĨħ +é¢Ĩ 带 +æĻ® ç½Ĺ +åΰåºķ æĺ¯æĢİä¹Ī +Coll apse +White Space +åı¯ ä¸įæĥ³ +Ġher ald +é£Ł 客 +' _{ +Ġke V +çĽĺ åı¤ +Ġoptim ally +âĬ Ļ +ĠS PA +æľī æ°´ +çª ¿ +ία ÏĤ +_RE AL +Ġsy nergy +ĠAri el +çļĦ åĬŁå¤« +Ġ 鬼 +ĠC aring +å¿Ļ èĦļ +Ġbroad ening +ĠIT EM +åłĨ åıł +FORM ANCE +an ely +Ġb lem +Ġdem eanor +Ġsol ace +Ġappla ud +/ pr +å°ı é¢Ŀ +Ġlegal ization +飦 å¾· +æłĩè¯Ĩ 符 +! $ +çī¹ éĤĢ +æĭī éķ¿ +-w all +éĢļ éĢļ +_t cp +ion age +qu oted +Ġanc illary +Ġwall papers +æľīä»Ģä¹Ī äºĭæĥħ +ĠFac ilit +ï¼Įä¹Ł éĥ½ +æĪı è°ij +CL IENT +_back up +im eline +Ġfor d +æĺ¯ 第ä¸Ģ +ari ance +è¡· å¿ĥ +Ġeste emed +M eg +S oul +åľ¨ å¿ħ +(' { +ĠApp ropriate +åĪij éĥ¨ +ç²ĺ 稳 +åŁĭ èij¬ +ĠB ias +ĠReg ents +/ us +æłĩ çīĮ +è¿ĺæĺ¯ æľīçĤ¹ +yl in +ĠPre paring +bel ieve +ï¼Įå¤ĸ è§Ĥ +éģŃéģĩ äºĨ +ĠSh iv +ä»Ĭå¹´ 以æĿ¥ +. white +Ġneed less +头 ä¹Łä¸į +çľ¼ çıł +_S CRIPT +ĠMcL aren +å®ĩå®Ļ çļĦ +_ vertices +Ġw afer +èĦļ è¶¾ +èİİ å£« +ï¼Įä½ľä¸º ä¸Ģ个 +Ġ åĿıè´¦åĩĨå¤ĩ +ï¼Į ä¾Ŀ次 +岩 æ£ī +-a verage +ĠCG Size +ï¼Į 计ç®Ĺæľº +çİĭ éģĵ +S MS +åIJİ è¢« +/d etail +g v +ãĢĤ ç»Ļ +æľĿ å¤ķ +_IN CLUDE +ĠD ai +Rem oving +RT OS +æķ² äºĨ +æ¸ħæ´Ĺ å¹²åĩĢ +(ex ception +ĠZe it +Ġ éħįç½® +禽 åħ½ +æĶ¹ åIJį +An a +Ġstruct uring +Ġbox er +Ġhabit ual +åIJ¸å°ĺ åύ +åĴĮ ç»Ħç»ĩ +åIJĮ æĹ¥ +Ch arg +ä¹Ķ æ±IJèİŀ +PF N +( (- +Z hang +没æľī çľĭåΰ +è¨Ģ åıĪ +ĠBrid al +å¼Ĺåħ° åħĭ +Ġt rophies +ĠS OS +éģ® èͽ +Ġforfe iture +ĠR outing +æĹł 礼 +Ġpe a +å¼Ĥ çķĮ +伸 åħ¥ +ï¼ĮæŃ¤ äºĭ +ĠDO E +Ġelectro chemical +俯 身 +ãĢĤ C +GL IBC +sem antic +åĬłå¿« äºĨ +大 ä¹ī +åı¯ä»¥ èĩªçͱ +ï¼Įä¸Ģ è·¯ä¸Ĭ +çļĦä¸Ģ 对 +. LOG +Ç IJ +ĠN Ps +apt cha +ãĢĤè¿Ļ åı¥è¯Ŀ +ĠOut fit +Ġo ok +Ġr iff +ĠSt rain +åı¤ å¸ĮèħĬ +_part ition +Ham ilton +ãĢĤ åĽ½åĨħ +_b oolean +Ġsed entary +Ġsc anners +çļĦä¸Ģ åıª +Ġtight ened +è¡° èIJ½ +_z ip +zbek istan +M akes +ï¼Į 软件 +ef eller +dep ends +A ust +M iller +åĸĤ é£Ł +. clip +ä½ł 说ä»Ģä¹Ī +è¯ģ è¨Ģ +èµł ä¸İ +uther land +typ ical +w anted +ĠB ally +çIJĥ éŨ +ĠCart oon +è¿Ľ 京 +Ġty ph +_pos itions +Ġbreat hed +" Not +Ġ@ { +Pro ceedings +åıį 常 +åijĪ çİ°åľ¨ +Ġfreel ancers +coord inate +åIJij èĩªå·± +æļ´éľ² åľ¨ +çļĦé¦ĸ è¦ģ +m atter +æĿ¥ 西äºļ +_d om +ĠZ ig +å¯Ĩ å¸ĥ +-sc enes +éĥ½æĺ¯ä¸Ģ æł·çļĦ +/d ec +connect ions +ĠSt akes +第ä¸Ģ åŃ£åº¦ +_message Info +ï¼Įèİ« éĿŀ +度åģĩ æĿij +æİ ³ +av ior +Ġcontrast ed +Ġcontrovers ies +or rent +\ hspace +ĠG ale +IP H +- Con +- Line +Ġ çİ°åľ¨çļĦ +让 人们 +带 åħµ +, ä»į +_D P +_PACK AGE +â IJ +ĠW AR +Ġun h +çī¹ æĢ§çļĦ +³³³³ ³³ +_ team +âĢľ All +åĴĮ åĨħ容 +çĦ¶ éģĵ +满 天 +ç±³ äºļ +æıĴ çĶ» +ï¼Į人 人 +Ġmood s +U x +Ġatt aining +è¿ŀ è´¥ +åĨĽéĺŁ çļĦ +ç»Ĵ æ¯Ľ +Ġj unctions +Ġne urom +Ġuns atisf +Ġr asp +Ġ# ' +E lim +le on +ĠIn vent +åĦ¿ 媳å¦ĩ +Ġtop ography +åĴĮ æĪijçļĦ +被 人们 +åĪ« åĨį +}, $$ +Ġcool ant +CP tr +ĠAtt acks +寿åij½ éķ¿ +ĠD AV +ĠN ell +é»Ħ æ³ī +åħ¬åı¸ å°Ĩ +Ġrel ish +åŃ¦ä¹ł æĪIJ绩 +å¤ľ å¸Ĥ +ĠVan ity +Ġbro chures +Ġphotos ynthesis +æ£į åŃIJ +ĠF rames +.F ill +åĩºå¸Ń äºĨ +æŃĩ æģ¯ +Ġintens ified +åħĪ ç¥ĸ +ĠF ACE +sh r +ï¼Įä¹Ł åıªèĥ½ +æľī æĽ´å¤ļçļĦ +对 ä¸į对 +_US ART +主èIJ¥ä¸ļåĬ¡ æĶ¶åħ¥ +éĢļ è¾¾ +ï¼Įåıª 为 +æİĮ ä¸Ĭ +ä¼ĺè´¨ æľįåĬ¡ +æĹıèĩªæ²» åİ¿ +å®Ī åĨĽ +æĭĨ å¼Ģ +Ġaest hetically +ï¼Į åĶ¿ +ï¼Į å®Ŀå®Ŀ +éĹ ¾ +åıĮ çľ¼çĿĽ +ç´« çłĤ +âĢľ è°ģ +åħ¬åı¸ ç»ıèIJ¥ +Ġprot ested +ĠChe es +comp ression +æĹ¥æĻļ éĹ´ +Ġ 建 +Ġc Äĥ +Ġst umbling +ib il +åı¤ æĸ¯ +ĠInter ruptedException +ymmet rical +åIJı éĥ¨ +at uration +_SL OT +ÏĦ ή +ĠPlug ins +æľī线 ç͵è§Ĩ +Ġfl ushing +çļĦçĥŃ çα +å°ģ åı£ +æ»ŀ çķĻ +æĸ°æµª ç½ij +åĬ¨äºİ è¡· +D ennis +x ed +Ġg zip +ĠB rack +åĬł æ³ķ +Ġund ec +Ġpol o +Ġrelie ving +Ġpl acent +被 åĩ» +sw ift +_set s +.or ig +åIJ ® +ç¿» éĺħ +column width +ĠSN AP +æĶ¾ åΰäºĨ +ah aha +Ġdiam eters +Ġrefriger ation +ĠTrib al +ï¼Į èĸĦ +ĠD ia +ĠK idd +-b g +åľ¨è¿Ļ æĹ¶ +Ġbast ard +> a +Ġsp aring +ĠInt roducing +Ġj ams +积æŀģ åĵįåºĶ +çĶĺ éľ² +ãĢģ 宽 +ĠO TC +游 èį¡ +_id le +ï¼Į大 åĵ¥ +C Q +G orgeous +Ġp wd +Ġn th +ĠW imbledon +Ġpart ed +Ġdi as +çĻ» åŁº +.as array +Ġfed eration +E ating +æĮł 头 +Sem i +åĴĮ åģ¥åº· +æĶ¹ äºĨ +IV ING +åħĪçĶŁ åľ¨ +Ġbelie vable +-d escription +bul k +ä¼ « +å°± 以 +åħ¬çĽĬ æ´»åĬ¨ +i R +ï¼Į ç§į +Ġpar ap +ï¼ĮæĢİä¹Ī åĬŀ +ĠMess i +ç«ĭ å¼ı +Ġeth yl +第äºĶ èĬĤ +Dom ains +. et +ĠPh o +ä¹Łæĺ¯ 为äºĨ +ĠBh ag +ĠRemed ies +æĬ¨ åĩ» +_ der +ĠPhys iology +ĠBeg ins +åı«ä»Ģä¹Ī åIJįåŃĹ +_ energy +ĠB SA +Ġwal nuts +abil ia +.p ool +Ġl ui +ph oria +缸åħ³ ä¸ĵä¸ļ +微波 çĤī +è¿Ľè¡Į åĪĨç±» +ĠSun der +g pio +ĠB ordeaux +èĩ ¼ +èģĶ ç¤¾ +.P ublic +ĠAst roph +Ġê tre +æıIJåįĩ åΰ +ĠDir ichlet +ĠDOWN LOAD +. ep +ä¹ĭ æ°´ +.d i +Off ers +è¡£æľį çļĦ +ï¼Į å¹´è½» +å°ı ä¼Ļ +åı· çīĮ +亲 çĥŃ +sec urities +港 å¸Ĥ +æİ¥ ä¸ĭ +åĪ© å¾Ĺ +_M M +Read ers +ĠCour age +ç®± çļĦ +, éģĩåΰ +åĴĮ ä¸Ĭ +ä¸Ģèµ· çİ© +è·³ åΰ +(file Path +æĶ¶éٳ æľº +a ith +{ (\ +缸 å¹² +人æ°ij åĩºçīĪ社 +inst ancetype +Ġalleg iance +Ġlay ering +Ġd urations +ä½ĵ å¼ı +ĠMe asuring +æĹł æ°´ +aw l +太 å·® +Ġpost card +ï¼Įä½ł è¿Ļæĺ¯ +Ġcrypt ographic +ĠÏĮ ÏĦι +Ġproject ile +priv ile +çĥŃè¡· äºİ +_ override +Ġt amb +Ġb fd +ĠMETHOD S +ag han +大 çĹħ +éļ¾ éĢĥ +Ġvisual izations +æľ¬æĿ¥ æĺ¯ +st ri +åĵĢ ä¼¤ +Ġextravag ant +im etric +Ġbe z +ĠP X +ï¼ļ åĮħæĭ¬ +ä¸İ ä¼łç»Ł +éľĢè¦ģ ç͍ +ï¼ĮæĿ¥ åΰäºĨ +@ param +Ġre define +ä¹° èıľ +_ad just +. install +åĴĮ æĬķèµĦ +(p ackage +Ġequival ently +ä¸ĵéŨ 为 +Circ uit +ĠD DS +é£İ æľºçļĦ +æ¥ļ åĽ½ +ìļ © +T rou +ï¼Į以 éĺ²æŃ¢ +arth a +èıı æ³½ +C isco +ãĢģ åİĭåĬĽ +ĠMac au +ãĢĤ对 äºĨ +ĠBT W +Ġremn ant +- ent +æĪij æĦŁè§ī +æĩµ æĩĤ +åĨ² é«ĺ +æŀ« åı¶ +\_ [ +is cher +ĠE ph +æĹ¥ æĿ¥ +管 æķĻ +ning ton +æĮģç»Ń ç»ıèIJ¥ +Ġaccus ation ++ l +ĠV ega +åIJį åı· +Ut c +Ġupl ift +. ag +æĻ¨ åħ® +èŃ¦å¯Ł å±Ģ +imp act +éĺ³åħī çļĦ +Ġparas itic +r oring +èIJ½ 泪 +ĠSir i +( edge +ĠIn line +æ¶Ĥ è¦Ĩ +ï¼Į 裴 +ä¸Ģ åĩ¡ +ãĢģ çłĶåıij +erm ont +è¶ħ æĹ¶ +c ine +Ġ æ°¸ +Ġt ion +ï¼Į åĩºçݰäºĨ +ĠA in +ä¿¡ äºĨ +.st ub +_ Start +ãĢģ éĢļ讯 +åįļ æł¼ +Ġsu cker +æ°´ 车 +ĠFor rest +æĬ± ä½ıäºĨ +-gener al +ĠAppe arance +m akes +ĠK yr +æĹ¥ æĺ¯ +éĽĨåĽ¢ åĨĽ +ĠApost le +Ġsens ual +çļĦåĨħ åľ¨ +_com ments +uther ford +" net +åı¯ä»¥ éļıæĹ¶ +èĬĤ èĬĤ +week ly +交èѦ 大éĺŁ +obacter ia +C ry +ac am +ĠGu ill +Relations hips +ac onda +æĬĢæľ¯ çłĶåıij +Ġoptim izations +isc overed +APP ER +Di agnostics +ĠBios c +éļıå¿ĥ æīĢæ¬² +Ġc ures +管 ç͍ +çī¹ æľī +æµģ è¿ĩ +å¾½ 竳 +Ġcaf eteria +çļĦ ä¹IJè¶£ +M oved +大 åħ¸ +ĠCabin ets +, æĦ¿ += name +Ph D +Ġ åĮħæĭ¬ +Ġcent roid +Reg ression +æ´Ľ æŀĹ +ä¸Ĭè¿° çļĦ +æĮ½ çķĻ +âĢ Ł +å¹´ æĹ¶ +天 å·¦åı³ +æıIJ äºĨ +unknown Fields +F abric +äºĮ ä¸Ń +å¸ĮæľĽ 大家 +len ame +说äºĨ åı¥ +ĠO asis +Ġmilit ant +ï¼Į æĢ§ +åľ¨ éĤ£ä¸ª +æĪij å¿ĥ +æŃ£ åĪĻ +说äºĨ ç®Ĺ +ĠFort ran +Ġimp over +Pl ate +Ġbom ber +Ġ åĬŀåħ¬ +çļĦ èĬĤå¥ı +ver n +éĢļ ç͍çļĦ +Ġed ged +A ES +ãĢĤ åĮ»çĶŁ +ĠI CON +被 åIJĵ +æµ· è¿IJ +æ·® å®ī +ĠS ut +-t rial +.C L +鼨 ä¼ŀ +æij© 天 +çν æľĹ +ä¼ijæģ¯ äºĨ +N ine +大 åĪĢ +éĩį大 äºĭ项 +Ġov ens +ĠT itles +( web +B uzz +ĠDel phi +ĠAv on +ĠMess aging +.Compiler Services +ĠSchr ö +ä¼ļ 社 +ather ing +å¤ĦçIJĨåύ æī§è¡Į +ĠT ribute +Ġam et +æĪij åģļ +èĢģ å°ij +éĿĴ åŁİ +ÃŃ as +ĠNic ola +ç´§å¯Ĩ ç»ĵåIJĪ +B le +ï¼Į èĪĴ +ãĢĤ æŀľçĦ¶ +æĬ ¡ +æ¯Ķ æ¯Ķ +Ġwater ways +ĠSam my +å±Ī åİŁ +Ġt gt +ï¼Į æł¼ +ï¼Į éĻª +åĴĴ è¯Ń +éĽĩ åijĺ +ãĢģ åijĺå·¥ +bb b +Ġtransf ection +ĠBeng als +对 讲 +è¿ĺ 为 +ĠMaster card +ï¼Įè¿Ļ åı¯ +ç§» éĢģ +çIJĨè§£ çļĦæĺ¯ +éϵ åĽŃ +ìł ķ +. TR +èĦ¸èī² ä¸Ģåıĺ +construct ed +ï¼Įä¹Ł åı¯èĥ½ +åı¯ä»¥å¸®åĬ© æĤ¨ +ĠJehov ah +C URRENT +H AVE +ac enter +éĢļ ç¼ī +ãĢĤ çζæ¯į +ĠU rb +Al bert +.P os +ĠPaper back +dog s +Z H +} d +ï¼Į åĸĦäºİ +âĢĻ âĢĻ +ĠApp arel +é¦Ĩ éķ¿ +ä½³ ä½ľ +ï¼Į åIJĮæĦı +ĠH are +åģľ å·¥ +Ġadvert iser +Ġcancell ations +O lymp +id opsis +ĠM CA +Ġ× ľ +/app s +M ULT +çļĦ ç²īä¸Ŀ +Ġre writing +ï¼Ī âĢľ +Ġinstall ers +ĠEvent Handler +çļĦ çĶŁçī© +ĠM PU +å¤ĸ è¾¹ +约 车 +Ġdri zzle +Ġnames paces +æĬ¢ è´Ń +ĠArm our +R ename +ä¸Ģ æĹ¶çļĦ +Ġcont r +Al cohol +æķij çĶŁ +å°± èµ°äºĨ +管 åĨħ +Invalid ArgumentException +ĠDIS C +ç³»ç»Ł æĢ§ +æł¹æľ¬ å°±æĺ¯ +Ġtum ble +Ġdisp ensing +Ġmillenn ia +th orn +å¿Ĺ æĪIJ +AV EL +ĠInnov ations +ou k +åĪ« æĹł +èIJ½ ä¸ĭæĿ¥ +ĠPr ism +ĠOper and +ĠPath ology +_work er +. asset +h ay +ĠD ucks +éĩįè¦ģ çļĦäºĭæĥħ +Ġgre ener +Ġpremature ly +Ġdet riment +Ġtro op +æĪĺ æĹ¶ +Ġdisp enser +Ġmes hes +/pro file +æĢł æħ¢ +t il +åĩº å¥ĩ +çĿĢ è£ħ +产 éĶĢ +æĸĩåѦ å¥ĸ +-th inking +Ġmel odic +ĠW ednesdays +åΰ è´¦ +æ·±åħ¥ 人å¿ĥ +h ape +ĠE ase +为 æĮĩ导 +itt a +æ±Ł å¤ı +æ¦ Ķ +Ġred ox +é¢Ħ设 çļĦ +Ġhere to +Ġ================================================================= ======== +Ġun loading +头 çŃī +ĠIntern ship +,èĢĮ åľ¨ +Ġpolic eman +æĸ° ä»» +æĭĨ è§£ +åĩł çľ¼ +çļĦ è¯Ĺ +Ġtr imester +_c urve +/ New +æĿ µ +ãĢģ çݯ +大 æ±Ĺ +ĠV anguard +Ġsc uba +/k ubernetes +- import +. allow +G ST +c akes +çļĦ çľ¼ä¸Ń +åĬ¨ åľ° +è¿ĺ åī© +Ġdown fall +ä½ķ 人 +æĭį åΰ +_PL AY +à¯ģ à® +ad r +ä¹ĭ è§Ħå®ļ +ãĢĤ è¡Į +Ġbl azing +.b ool +ãĢģ äºĴèģĶç½ij +ex cel +ibr ate +Ġord inances +ĠBlock ly +ï¼Į ( +Com paring +unc ated +Ġorgan ically +ĠPa ige +å°ijå¹´ åĦ¿ç«¥ +ĠCle aring +Ġde ceive +Ġtow ering +Es pecially +Ġelucid ate +ĠM ST +被 害 +éĥ½ä¸į éĶĻ +( queue +ç»ĵ 缣 +å±ħ æĺĵ +æĸľ éĿ¢ +/ red +ãĢĤ ç¥ŀ +éģĵ åħī +åŃĹ çľ¼ +çģ« ä¸Ĭ +Ġbreak point +ĠCount s +æĸ½ ç͍ +Ġste amed +g raphics +(file path +_DIRECT ORY +re ject +ĠM ODULE +ĠH aj +Ex clude +è¦ĨçĽĸ çİĩ +åįĩ åΰ +æĿĢ æŃ»äºĨ +Ġä¸Ģ 声 +Ġse o +ĠChe ney +_fl ash +Ġquad rant +. IM +re levant +ĠM our +ID GET +PE LL +临åºĬ 表çݰ +. bs +çļĦ æĿ¥ +人 æĿĥ +æīį åŃIJ +åĿĩ å̼ +ĠBra ves +广éĺĶ çļĦ +. placeholder +: + +ï¼ļ çͱäºİ +æĻ ģ +ï¼Į为 æĤ¨ +åħļçļĦåįģä¹Ŀ 大 +Ġst itched +az ioni +å°Ķ éĽħ +anag ed +ì Ĥ¬ +ï¼ ĩ +æĹ¥ æĻļä¸Ĭ +åħį åıĹ +åıijæĮ¥ çĿĢ +çĽĨ æĻ¯ +OC O +Ġintimid ation +åıĹ å®³äºº +ĠSc am +èİ·åıĸ çļĦ +Ġintermed iary +èİİ士 æ¯Ķäºļ +H ip +æľ¬ åįķä½į +, ä¾ĿçĦ¶ +æĸ° åĨľæĿij +Ġdifferent iating +Bar ry +æ·¡æ·¡ éģĵ +Est imate +_ ob +èĩª èĭ¥ +Ġstore front +ĠL ark +èĥ½ 帮åĬ© +οÏħ με +c old +âĢľ äºij +èĩªå·± ä¸į +ä»ĸçļĦ 身ä½ĵ +.un pack +Ġbin ocular +Ġunfair ly +çĶŁäº§ èĢħ +æī§è¡Į åĬĽ +ĠCl oth +ĠFl utter +ä»ĸ ç͍ +ĠEx ams +ĠSp art +é©° éªĭ +ĠH orton +ç´ł æľī +åĢ ¤ +ĠY O +èĬ± 鸣 +èİ« 大çļĦ +çļĦ æĪIJåĪĨ +ĠS AME +åĩł ç§ĴéĴŁ +管çIJĨ è§Ħå®ļ +åĩ¶ éĻ© +ĠV ocal +ï¼Į以 å®ŀçݰ +éķľ éĿ¢ +çļĦçݯå¢ĥ ä¸ĭ +Ġj ailed +èĢĮ åĩºçļĦ +é«ĺ 强 +éĺµ åŀĭ +çݰ代 åĨľä¸ļ +ï¼Įåħ¨ çľģ +åĩº è¿ĩ +Ġeffect or +· 奥 +Ġf url +åľ © +if ers +éģĩ éļ¾ +-th read +ãĢĤ æĭī +ĠL DA +æĿİ åŃIJ +ãĤ ± +è´¨éĩı 管çIJĨä½ĵç³» +å¨ģ æµ· +ĠBO X +åįĬ个 æĹ¶è¾° +ë© ´ +k B +ï¼Į è̳ +ãĢģ æĿľ +Property Value +ĠWer ner +/ linux +h ui +ĠI IS +ï¼Ł ä¸įæĺ¯ +主è¦ģ ä»İäºĭ +æ¼Ĥ æ³Ĭ +Ġ` * +åºĶ该 ä¼ļ +çİ» å°¿éħ¸ +ĠMad agascar +~ * +Ġ 宫 +Alloc ate +, ç»ıæµİ +? org +ãĢģ éĵģè·¯ +ï¼ģ ãĢij +Ġcol loqu +ĠIs les +ç»ĵæŀĦ ä¸İ +/m aterial +对大家 æľīæīĢ帮åĬ© +主 讲 +羣 åħĥ +åĩºçīĪ çī© +.a udio +D egrees +\ index +~ - +她 èĥ½ +ĠAng ry +çĥŃ æĴŃ +.p id +çīĮ åĿĬ +åIJĦç§į ä¸įåIJĮçļĦ +Ġfear less +æĢľ æĥľ +âĢľ äºĴèģĶç½ij +åĽĽ 溢 +æ¯ģ åĿı +B uck +ĠSt acey +æĹł è¶£ +åľ¨ ä¸Ģå®ļç¨ĭ度ä¸Ĭ +Ġstr ata +ung a +è½°çĤ¸ æľº +ĠHuff ington +好 å¤ļäºĨ +(' ../../ +ĠUS SR +çļĩå®¶ 马德éĩĮ +Ġunle ash +ĠM OVE +æĹ¶ åºı +Ġun read +Ġ@ ( +_B IN +Ġì ĺ +ï¼Įæ¯ı æľĪ +åį³å°Ĩ åΰæĿ¥çļĦ +Ġfreel ancer +Ġ è½° +Ġo prot +ar ro +ĠB d +ç±į è´¯ +çĶµæ±ł çļĦ +{AD IE +ä¸į æķ£ +å°± åıĪ +-ch ief +è£ħç½® åĴĮ +Ġqual itatively +è·ŁçĿĢ ä»ĸ +Ġwhen ce +çݯ 顾 +åŁİå¸Ĥ ä¸Ń +驱éĢIJ èΰ +, æ¯į亲 +çĭ¬ åįł +ORIZ ONTAL +ĠG REEN +Ġsur jective +oci ations +res olution +Ġro aring +ä¿® è¾ŀ +éĺ³ çº¿ +il ts +ï¼Įä»İ ä¸Ń +Ġvirtual ization +ç¥ŀ æĢģ +L ights +Ġt ides +æķ° åįģå¹´ +Ġmet formin +æī§è¡Į èij£äºĭ +æĥ¨ è´¥ +ç¡ħ è°· +( math +ï¼Į è·¨ +ep loyment +å®ī åIJī +ä¸ĢäºĽ éĹ®é¢ĺ +Ġbo asting +Ġnour ishing +N arr +åĬ¨ èµ·æĿ¥ +Ġrest room +Ġf oes +åĴĮ åįĹ +ï¼Įä»ĸ èĥ½ +ç§ĭ 天çļĦ +横 æĿĨ +ĠCr ush +åİŁæĿ¥ å¦ĤæŃ¤ +, æĸ¹ä¾¿ +ä¿ĿæĮģ èī¯å¥½çļĦ +zy k +ĠSUV s +ãĢģ 山西 +声 ä¹IJ +, åıĸ +å¹´ åıĤåĬł +é¡¹çĽ® éĥ¨ +ĠEn ables +æľĢå¿«çļĦ éĢŁåº¦ +Q W +ol it +Normal ized +åIJİç»Ń çļĦ +积 æľ¨ +ĠOb esity +_ ports +ĠY ay +ON US +ಠ¿ +æĶ¿åĬ¡ æľįåĬ¡ +Again st +/ ip +set Name +ãĥ³ ãĤ° +ĠS amp +æľ¬ åħ¬å¼ĢçļĦ +åĨħ 裤 +uch a +IC I +ĠCD T +丼 ä¸Ń +mir ror +- character +ĠB ikes +人 è¡Į +.r ules +( en +ï¼Į ä¸Ĭæµ·å¸Ĥ +åı¯ 被 +ĠK W +Ġgr unt +wh itespace +éĤ¦ å¾· +ĠBonus es +- edit +ĠC ascade +ï¼Ł å°±æĺ¯ +ï¼Įä½ł å¿ħé¡» +çľĭåΰ è¿Ļ个 +åľ£ è´¤ +代çłģ çīĩ段 +Ġquart et +èĦļæīĭ æŀ¶ +R at +æľ¬ æł¡ +空 èħ¹ +è¾ĥ æĹ© +RO UGH +Ġwin eries +å®ģ å¾· +踩 åľ¨ +Ġw iser +ĠH ugs +_M IC +åĩĢ å¢ŀåĬłé¢Ŀ +Invest or +con version +ĠInteg ral +ann ounce +-d om +Ġ 梦 +pc m +. An +C rew +å½ĵ ä¹ĭ +åĩº äºĭäºĨ +对 ä»· +ush ima +ds a +ä¹Ŀ 竳 +ĠCar la +Pub Key +Ġeyeb row +à į +ï¼Į ç©¿è¿ĩ +ĠZ ones +ĠMoz art +ãĢģ éķ¿æľŁ +Ġinaug uration +ĠP DP +ï¼Į æĵħéķ¿ +åĴĮ æĹ¥æľ¬ +_F N +Ġf idd +ä¸į éľĢ +ib ase +rac ies +Thread Pool +å¹´ ä¸ĭåįĬå¹´ +ĠK ag +èĩªå·±çļĦ å°ı +èĹı çļĦ +åįıä¼ļ çļĦ +Ġfertil ization +. Expression +an h +Ġcomm encing +ä¿ĿæĬ¤ çļĦèĮĥåĽ´ +Ġrig orously +âī Ī +âĤ ģ +ric orn +Def erred +_RE QUIRED +âĢĺ The +çĿ£ åĬŀ +汤 åľĨ +, æľīçĤ¹ +ãĢģ çŁ¥è¯Ĩ +çϽ 头 +ita ire +ä¸Ģ çıŃ +ĠExt end +Ġinstant iated +ĠKar achi +ä½ł 个 +ç¥ŀ çζ +ä¹Łæ²¡æľī æĥ³åΰ +诵 读 +hen y +ĠPr as +ç͍æĪ· 对 +å·¾ 帼 +æ°Ķ æĢ¥ +åĮħ è¦Ĩ +æĸ¹æ³ķ åıĬ +æŀª çļĦ +det ect +Pur ple +æĿĢ伤 åĬĽ +M AG +ï¼Į èĤ¡ç¥¨ +ĠSt anton +级 以ä¸Ĭ +马 çͲ +å¸ĥ 满äºĨ +åĵį å½» +åģľ ä½ı +typ ing +draw able +Ġcro oked +èµ·çĤ¹ ä¸Ńæĸĩç½ij +ĠCann es +å¾Ĺ天 çĭ¬ +Ġf rac +å¾Ĺ è¿Ļä¹Ī +Ġstart led +å¿« åİ» +çĶľ çļĦ +éĻĢ èŀº +Âł çϽ +å¾Ī ç´¯ +ÙĦ Ùī +ĠM AD +Ġem bol +Ġgene alogy += X +åĴĮ çα +ä¸ĭ åºĬ +Ġaffect ive +æīĢåѦ çŁ¥è¯Ĩ +æĹ¶ 为 +çīĪ åĿĹ +æ´ŀ åºŃ +çijŀ æĭī +ĠNO AA +å¬ Ľ +Ġrecharge able +-adjust ed +Ġmelanch oly +m pp +èĥľ äºİ +het to +ĠPy ramid +ä¸Ģç³»åĪĹ çļĦ +ent es +æ¸ħ åĨĽ +Ġalk al +ä¸ĭæĦıè¯Ĩ åľ° +Ġa cl +è½½ 人 +ãģ§ ãĤĤ +.Test s +ä¸įçŁ¥ä¸įè§ī ä¸Ń +p ink +ï¼Į ç´¯ +åΰ ä½İ +åµ ĺ +Ġsplic ing +ë ² +æĪij 羣 +æķıæĦŁ çļĦ +:` ~ +ĠLect ures +ï¼Į åĽŀå®¶ +ĠS ark +ĠC URRENT +ä¸į åīį +uh n +onom ics +ĠGall eries +Ġph ag +ĠDet ector +éĹŃä¸ĬäºĨ çľ¼çĿĽ +C ab +ä¼ł ä¸Ń +带 è·¯ +Columb ia +- export +M p +çĻ ¿ +æ±ī å§Ĩ +Ġarr h +agg io +Ġfauc ets +Enh anced +ä¸Ģ åľ° +ag ree +iv os +Ġro y +åĩĨ äºĨ +Ġà ľ +Ġnewcom er +P ont +åĮ¹éħį çļĦ +Ġf ashions +æĸ¹å¼ı æĺ¯ +ä½³ 绩 +Mar ine +æĹłäºº 驾驶 +/r uby +Ġpir acy +( Input +åŃ¦æľ¯ 交æµģ +_ACT IVITY +. uri +; T +ï¼Į ç¥ĸ +ur ple +ĠAcc ent +æĶĢ çά +ocarcin oma +Pre heat +"/ > (), +笨 æĭĻ +Ġdrift ed +åľ¨ 京 +_P WM +è£ħä¿® é£İæł¼ +è¡¥åħħ éģĵ +çļĦä¸ĢåĪĩ éĥ½ +ç¾½ 翼 +ĠTravel ers +å®īå¾· çĥĪ +( Arrays +éĥ¨ æĹı +Ġac claim +èľ ĵ + namespace +_ext ract +ï¼ĮæĬĬ å®ĥ +Ġfluct uate +Ch ance +åı¯èĥ½ åŃĺåľ¨ +宽 大 +Collect ors +é« ĭ +AL A +_h alf +/m o +Ġapprec iating +ĠWal ters +( images +æİ £ +Ġcons olation +åİħ éĩĮ +Ġcere bell +Ġip a +Ġphilosoph ies +ĠM acy +Ch ron +ä½Ļ ä¸ĩåħĥ +ĠNS MutableArray +Ġ\ {\ +ĠAn alyzer +° ï¼Į +ç«Ļåľ¨ äºĨ +å¸ĮæľĽ èĩªå·± +Pay ments +ĠBad ge +, 社ä¼ļ +, 大å¤ļæķ° +ï¼Į æĥł +Ġ å®ŀéªĮ +Ġve z +.l ight +_sub ject +Ġ éħĴ +æİ¥ åºĶ +åIJĦ åĽ½çļĦ +ï¼Įä¸Ģ 举 +éĴ¢ ç¬Ķ +or ator +ä¸Ģ åºĶ +Ġx t +æ®ĭ æļ´ +æĹĹ é¼ĵ +åĬĿ 导 +Ġhate ful +ne k +ä¸ĵä¸ļ æĬĢèĥ½ +èĬĤ缮 ç»Ħ +åī¥ åīĬ +飧 带 +ï¼Į æĶ¶åħ¥ +round ed +æ³¢ æ¶Ľ +ç»ĥ åĬŁ +交æĺĵ ä¸Ń +W IDTH +Ġst rom +ãĢģ 麦 +=" ..\..\ +è¶Ĭ ä½İ +ĠSy ll +ç¼ĸçłģ åύ +ä¸į å°ıäºİ +å¿ĥ èħ¹ +çŃī äºĭ项 +è¾Ľ 亥 +Ġconver ters +. Extension +im etry +}\ | +èĢĢ çľ¼çļĦ +_ keep +åıij çĸ¯ +oint ed +计ç®Ĺ åħ¬å¼ı +pat ches +ä¸ĭäºĨ 车 +Ġ**** * +åĬ¨ 身 +_C AST +åIJ¬ åύ +æ¶Ĥ ä¸Ĭ +ĠTom as +ĠCE LL +çŃī çĸ¾çĹħ +ĠDr one +Ġpri zed +ake up +: ^( +L t +æī¯ çĿĢ +Ġnan ot +ĠKens ington +ä¸Ń å±Ĥ +éĿĴæĺ¥ çļĦ +Ġ对 æŃ¤ +ĠP ARK +ãĢģ N +-w riting +åıĸå¾Ĺ æĪIJåĬŁ +ĠFern andez +èĤ¥ èĤī +oz ora +-d at +å»¶ç»Ń äºĨ +ï¼Į å¹³åı° +æĹ¥ èijµ +ien a +ĠMem oir +Sen ator +Ġthromb osis +pec ies +å®īæİĴ éĥ¨ç½² +fill ment +(se ed +å°ı çľĭ +eter a +ĠAS M +,çĦ¶åIJİ åĨį +, æĦŁåıĹ +P el +ĠD FT +æĪij 身边 +é½IJ äºĨ +篡 æĶ¹ +ï¼Į åĭ¿ +ĠT olkien +æľī çļĦæĺ¯ +ä¹Ł ç§° +Ġph arma +æľīæľº 溶åīĤ +@ test +× Ĺ +ä»ĸ è¿Ļ个 +Ġequ ate +rav a +Ġferm ions +ĠM AV +t emperature +éĽª åľ° +ä½³ èĤ´ +ãĢĤå½ĵ ä»ĸ +æ¶Īéĺ² æķijæı´ +ench ymal +ĠK ann +æĪĺ ä¹± +èIJ½ å·® +Ġlingu istics +M OV +Â Ķ +Ġ eller +ĠC oco +çĤ Ķ +Ġproperty Name +,æīĢ以 åľ¨ +Ġsp anish +åĸĦ å¾ħ +Of Week +ÏĢ Î± +ï¼Įä»ĸ 被 +Ġrest ores +ĠSp okane +è°ģ ä¼ļ +æĻĵ 彤 +ĠSpr inkle +Ġalphabet ical +p ictured +ï¼Į çģ° +ä¸Ĭ 人 +æĢ§ åıĬ +éĺ² æĴŀ +åĤ¨ ç½IJ +Ed ition +åħ¨çIJĥ çļĦ +ï¼Į两 åıª +æĴ Ĥ +Ġsubt ly +ä½ıæĪ¿åĴĮ åŁİ乡建设 +, 对æĸ¹ +åħ¬ é¦Ĩ +æīĭ æĿ¥ +.f ragment +Ġhist opath +èľ ĥ +Ġobsc ene +/ ac +ĠL act +广 为 +_n umpy +Ġs ipping +ust a +èĥ½ åIJĥ +èĦij éĥ¨ +ĠWilk inson +M z +å°ı 便 +Ġgr inned +ĠHow ell +å¾ħ åĶ® +Ġtyr anny +Ġ æ¯Ľ +ï¼Į çĭĹ +ï¼ļ If +ï¼Į æİ¨åĩº +大 é¹ı +Ġi Cloud +ãĢĤæĪij ä¼ļ +æĥ¨ äºĨ +ä¹ĸä¹ĸ çļĦ +æģ°å½ĵ çļĦ +ĠîĹ¥îĹ¥ âĢľ +min ute +Ġtre asured +æĹ§ åĿĢ +éĺ´ æ£® +ĠProject ion +ĠWater front +in crement +æĥħ æľī +èľ » +ä»Ļ åīij +åıĹåΰ 伤害 +好å¥ĩ çļĦ +K u +Ġu rea +ĠM VT +èĢĮ éĹ»åIJį +ãĢģ å·Ŀ +ph olst +èļ ĵ +èľ ´ +å·¥ä¸ļ åĮº +æ®ĭ 渣 +azz i +ĠD oo +æłij 人 +Ġstaff ed +Ġamb ience +accept ed +# L ++ ] +ra i +_d type +éĢł çļĦ +ãģ¦ ãģĦãĤĭ +èij± èĬ± +æĺĵ失 æĢ§ +, ), +åľ¨ è¿Ļæł·çļĦ +请 ä»ĸ +ĠLow ell +Âł The +è¯ģ çĽij +æ²³ åĨħ +åŁİå¸Ĥ éĩĮ +纱 线 +æŀĦéĢł åĩ½æķ° +ãĢĤæĪªèĩ³ 缮åīį +c en +ï¼Į çĭłçĭł +ãĢĤ ä¸įå°ij +Ġmicro array +Ġang led +ĠTom atoes +åħ¨èº« çļĦ +( activity +Ġa usterity +ãĢģ æŁ³ +æ°´ çģ« +") : +fl ate +ĠBar bie +Ġdehyd rated +l ery +ĠRev ival +夫妻 ä¿© +H orse +ĠDomin ion +为äºĨ éģ¿åħį +举 人 +ĠSeason al +.m ulti +Message Type +Ġaccum ulator +ĠAutom atically +P LE +Ġ æŃ¦æ±ī +Ġr tl +æģĴ å®ļ +çļĦç¥ŀ å¥ĩ +ĠChev ron +天 éŨ +ĠTam my +ãĢģ çͰ +çº ¾ +ä½ľ çĶ» +票 éĿ¢ +ä¸Ģ缴 被 +ĠExecut or +aphy loc +ï¼Į ä»·å̼ +ĠW ired +è¶Ĭ åıijçļĦ +car bons +el ius +Ġbe gs +ç»ĻæĪij ä¸Ģ个 +æĭľ è§ģ +ĠCL IENT +D utch +K s +ĠN ucle +Ġk or +ĠCard iovascular +æĪ¿äº§ è¯ģ +H AM +H ooks +og raf +ï¼Įå¦Ĥæŀľ æĪij +Ġepigen etic +ï¼Į æķ°åŃĹ +Ġt aped +Ġinv ading +ä¹İ åħ¶ +çι å¨ĺ +_ ce +ud u +缴 éĢļ +Whit ney +Ġ æ¡ij +ĠF olding +lo ch +Ġun born +ï¼ĮèĢĮ è¿Ļ个 +Order By +ĠXia omi +çļĦ åĩłçİĩ +祸 é¦ĸ +, æĹ¢çĦ¶ +åŃĺ æĶ¾åľ¨ +ĠPat reon +ĠCly de +all on +åĶIJ åĥ§ +ĠRest art +Ġabsent ee +as n +å¾Ĺ å¿ĥ +Ġam ps +å¸Ī å°Ĭ +éĶĤ 离åŃIJ +L X +[ Illustration +åĴĮ åīį +离 äºĨ +è¿ĺæĺ¯ 没 +æĹ© æķĻ +ï¼Įä½Ĩæĺ¯ ä½ł +éĽĨä½ĵ ç»ıæµİ +ĠMah ar +Ġ" .. +åĸĿ 彩 +ï¼Į åģļäºĨ +çļĦ å±ŀæĢ§ +Ġwe arer +ĠFree ze +è´ŁéĿ¢ æĥħ绪 +帮åĬ© ä»ĸ们 +ĠCra igslist +åį´ åıijçݰ +æ²ī éĿĻ +ä¼¼ä¹İ ä¹Ł +ä¸į åħ·æľī +æĹł èıĮ +äºĮ 年级 +æ·± åĬłå·¥ +Ġreve rence +ĠShr imp +ç»Ŀ äºĨ +伦 çī¹ +,è¿Ļ个 æĹ¶åĢĻ +Ġ åĩĨå¤ĩ +第ä¸Ģ çľ¼ +æĸŃ è¨Ģ +ĠCos metics +> # +up arrow +泸 å·ŀ +D ependent +ï¼Į ç½ijç«Ļ +ãĢĤ èĢĥèĻijåΰ +ãĢģ åĮĸ +åıĪ æĺ¯ä¸Ģ个 +ä¸Ģ å¸Ĩ +ãĢģ E +ä»ĸçļĦ åIJįåŃĹ +ï¼Įå½ĵ æĪij们 +ä¾į ä»İ +ĠAdjust ment +çݰæľīæĬĢæľ¯ ä¸Ń +J AVA +ud ential +ï¼ĮæĪij åı« +æ¸ħ æ°Ķ +ĠPay ing +Help ful +F amilies +C ARD +ct ools +Ġaut opsy +, èĩ³ä»Ĭ +- fig +e ys +Ġb lower +ell ants +éķ¿ åº¦çļĦ +ï¼ī + +ä¸ŃçļĦ æīĢæľī +ĠAtt ractions +walk ing += >>> >>>> +ï¼Į éĢĥ +åıij çIJĥ +Ġoff end +CC I +ĠAnal ytical +Ġhydro carbon +寿 åı¸ +Ret ention +ï¼Į åŁ¹èĤ² +Ġo ysters +åĽº ä»¶ +ant z +ï¼Įåľ¨ åħ¶ +模æĭŁ åύ +Ġin herits +. identifier +è¦ģ åĿļæĮģ +ä½Ļ çĥŃ +Ġna ïve +Ġ'@ / +: R +} O +ĠS CE +ĠThe o +éļ¾ åº¦çļĦ +Ġob edient +SP AN +çļĦæīĭ æİĮ +Mat chers +ĠSam pling +åľ¨æĥ³ ä»Ģä¹Ī +ãĢĤ åħ¨åĽ½ +æĸ¹ åŃIJ +_C ENTER +ĠPh osph +ĠQuick Fix +ĠRoy ale +éĩĩ访 ä¸Ń +-test ing +Ġmason ry +-L ife +æ¸ħæ´ģ èĥ½æºIJ +å®ŀæĸ½ä¾ĭ æıIJä¾ĽçļĦ +Ġsan itize +.new Instance +Ġ ä¿ĿåŃĺ +ï¼Į éĩįè¦ģçļĦæĺ¯ +ãĢĤ åĮ»éĻ¢ +- pos +çŃī æĸ¹æ³ķ +ĠQ Q +az o +ĠAs ians +osp els +åĪ©ç͍ äºĨ +ç²Ĺ 壮 +ĠTw ain +ï¼Į æıĴ +ĠR DF +-> __ +][ : +Ġexhib itors +å¹¿æ³Ľ åħ³æ³¨ +Optim ization +C BC +Ġ åζ +å¤Ħ ä¸ĸ +Im mediately +Ñĭ й +Marc us +Ġb im +ĠSe ymour +-d iff +ï¼ĮæĬĬ æı¡ +ï¼Įä¹Łè®¸ æĺ¯ +, åıijå±ķ +ve al +AL LOW +åĩłä¹İ æīĢæľī +ige on +_emb edding +çĽijæĬ¤ 人 +èľ· 缩 +ĠF ayette +ĠN ab +ï¼Ł 欢è¿İ +常 说çļĦ +满 çļĦ +ĠPl um +åĨĻ å®Į +西 åĮº +ï¼Įåıªè¦ģ æľī +ĠWild cats +ĠRece ipt +abcdefgh ij +çŃī å¤ļ项 +ç§° è°ĵ +ĠAb rams +欣 欣 +æĹ¨ æĦı +some one +ä¿ı èĦ¸ +Ġexting u +å°± åıĺæĪIJäºĨ +red ential +èĥĥ çĤİ +ulner able +ઠ¾ +ĠIn visible +æİ¨ è¾ŀ +约 约 +Ġver d +æĥ¨ çĥĪ +å¾Ĺ éĢŀ +åIJij åħ¶ +ä¿® èĢħ +æ¦ Ń +Ġintellect ually +M ID +Ġcompanions hip +ĠNotImplemented Exception +ĠZucker berg +çļĦäºĭ åĬ¡ +ĠDermat ologist +as pect +éĩį è´Ł +ç»Ļä½ł ä¸Ģ个 +Ġchief s +Ġ$ @ +ä¹ĭ çģ¾ +ĠV inci +碳 æİĴæĶ¾ +ĠCR YPT +Ġprospect us +ç¯Ŀ çģ« +ä»ĸ 没 +Run s +ag ency +ens ibly +miss ibility +è³ ĩ +, å¹³åĿĩ +an imal +Ġsh aky +æµ· çĶŁ +æł¸ éĶĢ +Ġmicro phones +Ġmalign ancy +eque ue +ord inal +ĠSp artan +Ġcmd let +大 é»Ħ +è·µ è¸ı +Ġaccol ades +Ġab duct +绣 èĢĥ +å·´ åĭĴ +åĸĦ æģ¶ +ĠM arm +æĪij å½ĵæĹ¶ +ĠGu inness +å¹¶ èĮĤ +Ġdict ator +Ġcha ired +ï¼Į ä½ĵçݰäºĨ +åīį èĮħ +被 æ·ĺæ±° +Acc ommodation +| [ +sh ipping +jug ated +ĠD CA +,åľ¨ è¿Ļ个 +Ġconv ict +O OK +ãĢģ 人工æĻºèĥ½ +Ġsyn onym +SH IFT +ĠRub io +Ġwill s +çĤ¹ åľ¨ +Ġra is +Ġcor iander +æĸĩä»¶ åIJį +Ġcategor ize +C it +Cl aire +ï¼Įæľ¬ 书 +Fant asy +ĠParl ament +{ remark +å°ı å¿ĥçļĦ +Ġper ish +pc i +gener ally +Ġsynerg istic +F iber +_ ER +Ġex clusions +Ġprov oked +Ġresp ite +å±ħ ä¸Ń +Ġcas o +ĠOm ni +ĠC ologne +ĠF lux +为 ç͍æĪ· +缸åºĶ åľ° +ic olor +ç½ij绾 游æĪı +| " +Ġan glers +ut tered +æŀĹ å®¶ +inter rupt +æŃ¦ ç¥ŀ +æ¶ī å¤ĸ +çĸ¼ çļĦ +_F ILENAME +ï¼Į 躲 +çıŃ ç»Ħ +Ġrefresh ments +/ packages +Ġm pg +,éĤ£ æĺ¯ +Ġip ad +Ġdisgu ised +J L +Ġsp ar +æ¿Ģ æĢĴ +ir ut +çłĶç©¶ äºĨ +- loading +H RESULT +çļĦ æŃ£å¸¸ +ĠR t +ä¸Ģ è¯į +ĠTh rift +eg ra +Ġreg ained +ĠBut tons +r ÃŃ +ĠM ord +Ġpro gesterone +åĽŀ 访 +ruct ose +QU ENCE +/ AP +ĠP ROM +çŃī ä¼Ĺå¤ļ +åľ¨ èĭ±åĽ½ +ĠG ourmet +æĿij 级 +ĠC umberland +Ġpass ports +æĶ¶åħ¥ åĴĮ +ro de +èĭ± çī¹å°Ķ +念 çĿĢ +, èĩ³äºİ +y ar +Ġ è´Ŀ +å¤ĸ ç±į +åĬŁ åIJį +ä¸¥æł¼ è¦ģæ±Ĥ +target s +èģĶ缣 çļĦ +,t p +ĠRot terdam +çļĦ帮åĬ© ä¸ĭ +æ¸ħ æ²³ +éĿŀ æµģåĬ¨è´ŁåĢº +ë ¬ +ens ical +Ġid ols +享 æľīçļĦ +OS C +Ġsu ing +åij¨ éģŃ +åĪĿ å¤ı +ãĢĤ 综åIJĪ +ãĢĤ èĻ½è¯´ +æį¢ åIJij +æĻ® 京 +å¦Ĥä½ķ åİ» +奢 æľĽ +æīĵ个 ç͵è¯Ŀ +T iny +ĠP ines +Ġal c +ï¼Įåı¯ ç͍ +æĪIJ绩 åįķ +Ġjournal ing +ä¸įå¿ĺ åĪĿå¿ĥ +æľī ç¥ŀ +Ġ' (' +å¼Ģ å¼ł +AC ING +ĠBuild s +çļĦåľ° åĽ¾ +çľĭå¾Ĺ è§ģ +, æīįä¼ļ +od ie +ìľ ¼ +l ave +ãĢĤ é½IJ +åĴĮ 交æµģ +-t rip +欢 å¿« +åģ¥ ç¾İ +æ½ ¦ +.w riter +pal ette +åİ¿å§Ķ 书记 +ä¸Ģ èIJ½ +大 ç¢į +å¿ĥ éħ¸ +çŃī è¯Ń +ĠAs king +Ġun suitable +é©» æĿij +ĠDu plicate +F ight +UN TRY +ठµ +ĠSub division +ĠT oni +ĠG iles +-d ays +çļĩ æĹı +Ġmis interpret +éĿ¢è¯ķ å®ĺ +[ H +co herent +å¯ĨéĽĨ çļĦ +Cou pon +Q ty +_ so +ĠInter im +.a ozora +人 åįķä½į +ĠH ector +éĢģ è¿Ľ +Ġprejud iced +S pr +ac lass +说å¾Ĺ 对 +ä¸Ģ æĪIJ +ä¼Ĭ 丽èİİçϽ +. rad +Ġt esters +èĩ´ åĬĽ +ĠMem o +Dom estic +Ġl angu +ï¼Ł åıĪ +满 æľĪ +, ~ +å°ı éĥ¨åĪĨ +æĿĤ ç²® +ĠRock ets +Ġguard ing +$ i +, ä¸ĩ +ãĢģ è¿ĺæľī +èµĦ产 æĢ»é¢Ŀ +Ġdecay ing +Ġ æł¸å¿ĥ +Ġg enders +èµ° 人 +è¡Įä¸ļ æłĩåĩĨ +çİī åħ° +ä½ľèĢħ çļĦ +Ġ ä¼ļ计 +å°¼ åħĭæĸ¯ +æĬ½ åĬ¨ +,ä¸Ģ ä½į +Ġ× ŀ +åįķä¸Ģ çļĦ +ĠMob il +ro ok +± оÑĤ +Î Ľ +å¾Ī éĩį +çİĭ æ°ı +åĵij å·´ +Ġm og +æĹł éĹ» +太 å²ģ +-label ed +Ġbrut ally +ĠMadd en +T cp +ĠAny where +,ä¸į管 æĺ¯ +< label +ĠT ears +ãĢģ çͲ +å®ł 溺 +ĠAppli ance +ëĵ ľ +å¤ļ ç³ĸ +西 è¾¹ +.b lack +å¥ĸ çīĮ +C ENT +F luid +s pect +åħ¶ çĦ¶ +åı° åīį +De als +ĠView Group +Marg aret +trad itional +ĠDud ley +ï¼ļ ä¸İ +æľ¬ è½® +_C OLUMN +çĽĺ ä¸Ĭ +é¼» å°ĸ +çŁĽçĽ¾ çļĦ +, out +åħ¬ èĭ± +åĩĦ åİī +ãĢģ åIJĥ +Fig s +å·· åŃIJ +' en +St opped +æį® ä»ĭç»į +termin ation +T ING +l ou +Ġ åĨῬ¡ +å°± 容æĺĵ +åĸĿ äºĨä¸Ģåı£ +å̾ å¿ĥ +çĤĴ èıľ +对ä»ĸ æĿ¥è¯´ +ĠNiger ians +çĿ¡è§ī äºĨ +ĠFly ers +ĠComfort able +è¿Ľè¡Į ä¸Ń +Ġsimpl ification +SEL F +弥漫 çĿĢ +> ) ): +Ġh h +çĬ¯ç½ª äºĭå®ŀ +Ġ{{ / +/ em +ç¾İ å¦Ĩ +PI PE +æīij é¼» +æĹł èĻij +Type Error +Ġamb assadors +Ġ 她们 +åıij 红 +åIJĮ çIJĨ +Bounding Box +s burg +ject ives +个ä½ĵ å·¥åķĨæĪ· +.Sh ared +f lo +o jo +Ġ æľŁ +ĠSch l +.net ty +ï¼Į éģĵè·¯ +St y +æķĪ ä»¿ +æĿİ æĺİ +ĠEm il +ĠFrank ie +B ET +ï¼Į åĪ· +Ty ler +p romise +w arts +Ġ éĢī +è§£ ä½ĵ +ĠReve als +ĠHIP AA +ĠAnch orage +H ab +ï¼Įåľ¨ ä¸ĢäºĽ +Ġdep rive +å¯Ĵ æļĦ +ç»ĵæĿŁ çļĦ +Ġhyp o +Ġcoerc ion +æĪij çΏ +ream ble +ĠDirector ate +< d +ï¼Į åĵ¦ +ï¼Į æĪªæŃ¢ +代 åIJįè¯į +åŃĺ æľī +ĠRes idency +æĦıåij³çĿĢ ä»Ģä¹Ī +åįĵè¶Ĭ çļĦ +çļ ij +ç»Ħç»ĩ æľºæŀĦ +Ġ(( * +\ plain +åľ¨ ä¸Ģç§į +åįĥ 亿 +ä¸ĢåĿĹ åĦ¿ +.Column s +( ad +ä¸į æĺİçļĦ +Ġey ed +_ commands +ä¸įåı¯ åIJ¦è®¤ +罪 éŃģ +æľªæĿ¥ åıijå±ķ +Ġwholes alers +n ature +ãĢģ 鸡èĽĭ +ĠB ai +âĢľ åĸĤ +éĹ² è¯Ŀ +Ġapprec iative +OU CH +Ġm ingle +TE E +羨 çľ¼çĿĽ +Ġ'* ' +Ġ èµ· +_h d +çļĦ åıĮæīĭ +ï¼Į没 éĶĻ +åĿĩ 设置æľī +mb re +Ġfootprint s +æĹł æģ¯ +éĻIJ 度çļĦ +Josh ua +igar h +为 è¿ĩ +Ġcommand ments +åİŁæľ¬ çļĦ +, æħ¢æħ¢ +l ld +ä¸Ń ä¸İ +å¾Ī æĸ¹ä¾¿ +install er +Ġan ew +çļĦ èĦļ +Ġn ook +ĠLab rador +ADD ING +ĠBour bon +满 è½½ +å¼Ģå±ķ ç»ıèIJ¥æ´»åĬ¨ +Ġblue berry +äºķ çĦ¶ +on ant +ï¼Į 饶 +EN N +ĠVill as +ï¼Į å®ŀæĹ¶ +ĠB aj +Ġam orphous +ĠWh itt +æĺ¯åIJ¦ åľ¨ +ï¼Įä¸įè¿ĩ è¿Ļ +/ Sh +Y ork +ãĢģ 空éĹ´ +å¤ļ åıij +oss ier +èIJ¦ ç»ķ +* sizeof +ĠAd idas +éªij èĩªè¡Į车 +ãĢĤ æľīä¸Ģ +ĠC LE +ãĢģ è¿IJè¡Į +nd i +临 æŃ» +ĠReg isters +éĻ Ĥ +æĮīçħ§ è§Ħå®ļ +漫 å¨ģ +ĠAT V +ĠCow boy +на Ñı +ĠP ry +ä¼ł çĥŃ +Ġhard working +Ġmis cellaneous +ä¸ĭ è¾ĸ +åѦ ç³» +Ġ第 åįģ竳 +Ġsusp ensions +ĠHav ana +Ġs ieve +ĠT RE +ĠPro ducers +ts on +Ġfl aming +å¤ĦçIJĨ è£ħç½® +åģĩ è´§ +éĢłæĪIJ å½±åĵį +ï¼ĮèĢģ çĪ· +H w +_S AMPLE +RO T +äºļ è¿IJä¼ļ +ĠQt Gui +ck i +åĽłä¸º 她 +æļ´ é£İ鼨 +è°ĥæŁ¥ æĺ¾ç¤º +Br andon +ï¼Į 足足 +对äºİ æĪij们 +bl a +Ġany how +åºĶ è¿IJ +- Ar +ï¼Į åĭī强 +ĠT OR +åĩº ä¸ĢäºĽ +ç«ŀäºī çļĦ +Ġmetall icity +ust ic +éĺ´ èĻļ +Ġquant itatively +ĠAtt ached +_S ORT +Ġinform ant +ä¸Ń è¯ģ +åĨį ç»§ç»Ń +_C M +(p c +æ£Ģæµĭ åύ +éĻª 她 +idel berg +Ġatroc ities +t rait +ĠSp ine +ï¼Į èĦī +am ax +}} }_ +_F UN +建设 åįķä½į +è´ª åĽ¾ +æ°ijæĶ¿ å±Ģ +çµ® åĩĿ +Ġglac iers +ï¼Į åij¨è¾¹ +The mes +å±Ĥ ä¸Ń +ç½ļ çIJĥ +ĠPH OTO +per ial +è¿Ļ个 é¡¹çĽ® +èģĶç³» åľ¨ä¸Ģèµ· +Ġcooper ating +. Optional +Î ł +âĢĿ è§ĦåĪĴ +åı£ 令 +ç͵è§Ĩ èĬĤ缮 +EE DED +ĠMet all +ĠBot swana +od der +èĢģå¸Ī 说 +.L ook +Ġunmist ak +ĠT orn +以 äººä¸ºæľ¬ +ĠCent res +åIJĬ éĶĢ +çģĮ è¾ĵ +Ġgy ms +ĠP AM +çα åĽłæĸ¯åĿ¦ +éĥ½ä¸į 太 +Ve get +; | +æķ° çϾä¸ĩ +éķ¿å¤§ åIJİ +ï¼Į èī²å½© +åŁºæľ¬ éĥ½æĺ¯ +/h our +P ie +ĠSh ame +æĻ¯ èĩ´ +.Get HashCode +æķħ äºĭæĥħèĬĤ +éĵģ çļĦ +ï¼Į 幸 +äºļ çī¹åħ° +Format ting +Ġb py +st y +· ç§ij +æ¦Ĥ è¦ģ +Met ro +Ġendorse ments +Den ied +.dis pose +强 æ±Ĥ +ĠBarn ett +/ update +ï¼Į è¯Ĺ +åı¯ æī§è¡Į +注 æµĨ +åħ³æ³¨ çļĦæĺ¯ +Instance Id +è¾ŀ åİ» +åĬ¨äºĨ åĬ¨ +åĬł 湿 +Ġappet izer +ĠLud wig +çľ¼ éĩĮçļĦ +å¾· æ¯Ķ +Ġsuper class +æĮĤ æĸŃ +æijĨèĦ± äºĨ +Ġdiplom at +çļĦ å®ļä½į +y ah +ï¼Į 女åŃ© +am n +æĹĭ转 è½´ +éĤĵ å°ıå¹³ +diff erence +ï¼Į éĴ¢ +Ġp lex +ĠN im +满 å¿ĥ +-p ocket +G ING +R m +泡 èĮ¶ +Ġ åĪ© +el ite +Ġg fx +å¹´ èĩ³ä»Ĭ +缸åħ³ èµĦ产 +çĻ»è®° æľºåħ³ +ä¸Ģä½ĵ æľº +ĠUr du +åIJĮ æµİ +ä¼Ĺ çѹ +Ġinvent ories +il ess +ol iber +æĹł 罪 +.re nderer +çά è¡Į +æ´» ä¸ĭæĿ¥ +å¼Ģåıij 人åijĺ +éĵ¾ è½® +Layout Panel +_DISABLE D +ï¼Į åķĨä¸ļ +ĠI ber +ĠW IFI +大 ä¸ļ +åŁİå¸Ĥ è§ĦåĪĴ +ĠData Source +æij© æĸ¯ +ĠMcG ill +_ connected +èħ ĵ +åģ¥åº· çłģ +ĠA uf +æ³ķ åѦéĻ¢ +纪念 ç¢ij +Autom otive +ĠPrec ious +ĠR ae +æıIJä¾Ľ æĽ´ +ucl id +污水å¤ĦçIJĨ åİĤ +Ġdeclar atory +SE G +æĭī äºĨ +ï¼Į åįıè°ĥ +åľ¨ ä¸įåIJĮçļĦ +管 å±Ģ +ç±» åĪ«çļĦ +ãĢģ éĻĨ +ç͍ ä¸Ģç§į +hold s +ĠLa os +ä¸ĵåĪ© æĿĥ +æĴĩ äºĨ +ï¼Į çŀ§ +Ġ* __ +pro blems +ĠStep han +ĠQt Widgets +N est +br ick +Õ¡ Õ +n printf +ç͵èĦij ä¸Ĭ +ĠOwn ed +, åį³åı¯ +b eyond +éģĵ æĿ¥ +æī¬ æī¬ +Ġextrem ist +Character istic +YC LE +ent ing +ĠC TA +Ġpart ake +ĠCom parator +Ġaff lict +ä¼ļæľī ä¸Ģ个 +ĠC uts +èĩª æĭį +Ġun married +åħ¥ çļĦ +èĬĤ ä¿Ń +åı¸æ³ķ éī´å®ļ +ĠDiff icult +Ġcrad le +åĥı æĺ¯åľ¨ +çģ« è¾£ +ĠZ L +çĨĶ æĸŃ +y out +åĨį 说äºĨ +IS SN +Ġcontrad ictions +.Auto Size +C NT +ens ch +å®ļ ä¼ļ +è´Łè´£ çļĦ +åĩĨç¡® çİĩ +åº ¾ +æĺİ æĸĩ +vent ures +è¿Ļæł· ä¸ĢæĿ¥ +éĢĢ çģ« +ĠRed skins +.std in +æĮ¥èĪŀ çĿĢ +ĠAnthrop ology +ï¼Į åIJ¸æĶ¶ +est het +å¼ł ä¸ī +å¸ĤçĽĪ çİĩ +æľĢ çαçļĦ +ä»ĸçļĦ èĦ¸ +(b oost +ĠGro cery +_ ACK +est ra +ï¼Ľ æľīçļĦ +Ġrich ly +çĶµæľº çļĦ +Ġdw ind +ĠLanc ashire +åľ¨ ä¸įæĸŃ +åıĺ æ³ķ +éľĢ è°¨æħİ +Ġhand bag +è·³ çļĦ +å¹³æĸ¹ åįĥç±³ +åı· è§Ĵ +æĿ¨ é£ŀ +å±¥è¡Į èģĮè´£ +æ²§ å·ŀ +ĠClos et +ãĢģ å¾Ī +ĠW ilde +-b en +Ġlemon ade +以æŃ¤ 为 +åĩł åĿĹ +Ġrest raints +ï¼Įæ¯ı å½ĵ +Ġä¸Ģ è¾¹ +èIJ¥åħ» ä¸įèī¯ +-le arn +_P ID +ï¼ĮæĹł éĿŀ +.trans port +æİ¥ç§į çĸ«èĭĹ +Ġrefin ancing +ï¼Į å¹¿æ³Ľ +ä¸ī 两 +ĠCar lisle +ze it +Ġc ep +è·Į å®ķ +_var iant +T olerance +Ġ å¾·åĽ½ +大 èĽĩ +åħ¨ çıŃ +æ°´ æµĴ +.to List +æĤ¬ æ®Ĭ +Ġpend ulum +ï¼Į è½½ +å¢ŀ æı´ +ern ary +Ġrest arted +壮 éĺĶ +ĠH och +ï¼Ī æ¯Ķå¦Ĥ +ĠNew est +æĭ© ä¼ĺ +omen cl +ï¼Įæĸ° 车 +Ëĭ Ëĭ +H J +ï¼Į å¾Ī好 +ä¹Łä¸į ä¸Ģæł· +Ġgrape fruit +ĠC PC +åľ¨ å±± +æīĵ æīĵ +sp awn +Ġtool ing +ĠS ST +å½¢æĪIJ æľī +åĴĴ éªĤ +Ġreluct antly +- events +æ¯Ķ æĸ¹ +Ġthanks giving +åľ¨ å½ĵåľ° +ist ar +Ġun heard +Ġunder lined +æ¸ħ æ¸ħ +èĬ± å²Ĺ +_F P +èººåľ¨ åľ°ä¸Ĭ +H TTPS +ãĢģ è¿ŀæİ¥ +th or +ĠH odge +ĠIn struct +abet ics +Ġ ][ +ï¼Į åIJĮæł·çļĦ +ĠD ias +tern oons +iss or +åįģ å²ģ +离 å¥ĩ +ç¨ĭåºı 设计 +Ġupload s +Ġdeter rent +åij ² +çĩĥçĥ§ çļĦ +è¯ģæĺİ èĩªå·± +ĠÑģ ÑĬ +ĠOrder ing +æĸĩçī©ä¿ĿæĬ¤ åįķä½į +åĬł 大äºĨ +(" \" +-L ength +"} ; +/ arch +et to +.w av +ĠPER F +ĠASE AN +' une +- = +P aste +Ġ åĮĹ京å¸Ĥ +ï¼Į åĬ³åĬ¨åĬĽ +åĴĮ èͼ +ä¹ĭ èĩ³ +_m et +Ġ æİ¥ +Ġand rogen +大 æĶ¾ +èĬ± æŀľ +Ġmar ina +çļĦæīĭ èħķ +ĠB ayer +åĴĮ ç¬¬åĽĽ +ï¼Įä½ł å°±æĺ¯ +Ġbit ters +Ġ 社 +çļĦ èį¯çī© +le i +å¹´ æĹ¶éĹ´ +ĠY MCA +é«ĺ å¼Ģ +åºĦ éĩį +is ke +çļĦ æľīåħ³ +ĠC TO +Ġnot ations +ä¹Ł 说ä¸į +Ġpat ched +], " +å®ĺ 宣 +åΤ åĪ« +P AL +åĶ¿ åĸĬ +(! ( +è¯ŀçĶŁ äºĨ +ï¼Į çĶ»éĿ¢ +Ġg db +ç»Ħ ç»Ħéķ¿ +.l aunch +ãĢĤ å¹³ +ĠD ice +鼨 æŀĹ +, 约 +@ Component +ãĢģ æĽ¹ +æľ¬ åľ°çļĦ +å¼Ĥ æŀĦ +plan et +Pray er +ä¸İ åĪĨæŀIJ +Ġevery time +æİ§åζ ä¸Ńå¿ĥ +ĠAg nes +åį«çĶŁ ç»Ħç»ĩ +Ġdecom posed +ä»İä¸ļ èĢħ +/ es +_ org +ä¸Ģ ç¼ķ +ãĢģ æħ¢æĢ§ +Ġsh one +åĸĿ ä¸ĢæĿ¯ +ï¼Įå·² æľī +ĠCos mos +ï¼Įæĺ¾çĦ¶ æĺ¯ +ä¸ī éĩį +èIJ§ æĿ¡ +, æ°ijèѦ +åħ³éĶ® çĤ¹ +inv oice +ĠL oose +éĩį äºİ +pr ised +æł¡ å°ī +Ġweb pages +ĠHar b +ĠB rem +ä¸ŃçļĦ ä½ľç͍ +- ordered +æ£ĢæŁ¥ åĴĮ +_SE G +ĠDecor ating +f av +ĠA ero +å°ı éģĵ +Ġpr une +è´¹ åĬ² +ï¼ĮæĪij们 å·²ç»ı +èĢķ åľ°éĿ¢ç§¯ +ä»Ĺ çĿĢ +ĠG OLD +ç»ĥ åħµ +_ ir +Ġbest owed +å¼Ģå±ķ å·¥ä½ľ +osoph ical +ag os +ĠD é +EX IT +第ä¸ī 代 +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠ +ĠMenu Item +æĺĨæĺİ å¸Ĥ +Ġa ides +iff in +ç¼ ľ +æ¡ ¢ +æµ· è±ļ +表çݰ åĩºæĿ¥çļĦ +äºĨåĩł æŃ¥ +ADI US +ï¼Į 广西 +ãĢĤ æĽ¹ +ãĢģ è°ĥæķ´ +âĢĿ æĮīéĴ® +ĠE SR +å¹´ å°± +Ġcomm ens +Ġamel ior +r ino +éª ° +_p ython +å¹³åı° åĴĮ +sl ave +ç«Ń å°½åħ¨åĬĽ +<< ( +mem item +Bel ieve +æĺ¯ ä¸ĸçķĮä¸Ĭ +ä¸į è¿Ń +ĠL ila +çķĻ åľ¨äºĨ +èľ¥ èľ´ +x in +ãĢģ 建ç«ĭ +ĠEx xon +ĠVisual ization +ãĢģ åįķä½į +dis cuss +_ex ecute +Ġ çĻ¾åº¦çϾç§ij +Ġg ens +Ġme adow +ns ics +åį°è±¡ ä¸Ń +techn ic +èĽ¤ èŁĨ +éĮ ² +ĠF ires +ä¸Ģ个 åŃ©åŃIJ +oid es +æ·± çα +ĠCon ce +ĠAb igail +Ġ åĬ¨ +æľĢ 主è¦ģçļĦ +que ues +stream s +Ġsalt ed +ĠGar ner +.key Set +ĠH CI +Ġmorph isms +ï¼Į åıĸåĩº +## _ +Ġrec overs +Ġtele port +Prov ince +_ To +天 主æķĻ +Ġet ched +Ġcheck points +Ġâĸ ³ +åı« äºĨ +Ġwhole hearted +else if +éĩįè¦ģ讲è¯Ŀ ç²¾ç¥ŀ +åıį å·® +ï¼Įä½ł 好 +Ġair s +éķĤ 空 +ew ays +çļĦ大 çľ¼çĿĽ +}) =\ +èµŀ 许 +ä½ĵèĤ² å±Ģ +è¦ģ éĢīæĭ© +æĺ¯ä¸Ģ 份 +Ġtax is +rehens ible +Ġfort ified +am ura +ĠS out +ãĢģ M +ãĢģ èĦĤèĤª +人 ä¸ĸ +æģ¶ æ¯Ĵ +omew here +ç©¶ åħ¶ +idd ers +ç»ĵæŀĦ 设计 +Ġbrainstorm ing +P ap +or gh +ãĢģ K +ĠE sk +Cont rast +Ġâ Ł +Ġ æł¹æį®æĿĥåĪ©è¦ģæ±Ĥ +éĥ½ åĴĮ +è§ī çĿĢ +{} ", +F illed +éĢĢ ä¸ĭ +æĴŀ ä¸Ĭ +éĤĢ çº¦ +å¤ĸ åķĨ +-st ick +ä»ģ çα +ĠW orse +åĨħ éĻĨ +æĺİ æĻ° +åĨ· æļĸ +æ¥ļ äºijé£ŀ +ĠArt istic +ĠI AS +ĠS ID +cl ients +è¾ŀ åħ¸ +, æīĢæľīçļĦ +/ ns +_b h +ĠChe er +æĪĺ士 们 +Ġc et +ãĢĤ 鼷 +ĠF aul +ä¹Ł å¾ĹåΰäºĨ +Ġ[ â̦ +Ġmultif aceted +> (& +Ġab er +çľĭåΰ è¿ĩ +Ġfear ing +{ [ +ene ath +j ie +ï¼Į çĸ«æĥħ +游æĪı ä¸ŃçļĦ +Ġcircum vent +éĶIJ åĪ© +ï¼Įä¼¼ä¹İ åľ¨ +å½Ĵå±ŀäºİ ä¸Ĭå¸Ĥåħ¬åı¸ +Ġshar per +/ex amples +- validation +Ġp ak +Ġth a +ĠS ALE +ç¨İ éĩij +ç½¢ ä¼ij +old t +Ġbust ed +Ġmim ics +åIJīæĢĿ æ±Ĺ +å¾· åĪ© +Ġden oting +Ġsuc rose +ins k +éł ħ +; font +E z +ä»ĸ 认为 +å§» ç¼ĺ +(code c +D ogs +Ġun complicated +èĩªå·±çļĦ å·¥ä½ľ +Ġest r +Ded icated +Ġl itt +å®¶ å¢ĥ +群 ç»Ħ +æĬĹ åĬĽ +è¾ħ 以 +ä¸ŃåĽ½ è¯ģåΏ +æĭī å°Ķ +-n utrients +è¾ĵåħ¥ æ³ķ +ĠPick up +ãĢģ å±Ģéķ¿ +ï¼ģ åı¯æĺ¯ +ĠNew foundland +_R AM +_log its +çļĩåIJİ å¨ĺå¨ĺ +B ITS +çļĦ ä¼ĺç§Ģ +ĠT MC +è¿Ľè¡Į éĢīæĭ© +éķĢ éĵ¬ +. dd +å·± æĸ¹ +-m aker +å®ŀæĸ½æĸ¹å¼ı çļĦ +ĠFlo ors +åı¯ä»¥ åıĤèĢĥ +rad er +-pro p +B Q +qu estions +Ġun conditionally +å¾ħ 人 +æ»ij åĿ¡ += max +L emon +def initions +åħįçĸ« åĬŁèĥ½ +, æķĻå¸Ī +Ġ æĸĩåĮĸ +ener al +Character istics +人 æľº +è¿ĩ æĹ¶ +ĠSh iva +ĠFl ames +ĠPot atoes +åľ°åĽ¾ ä¸Ĭ +Simple Name +ĠSapp hire +T ank +ãĢģ éĽĨ +ç§ij åįı +å°ıå§IJ çļĦ +.Rec ord +pp ard +Ġconsent ing +è´§å¸ģ æĢ§ +å¸Ĩ å¸ĥ +Ạ¥ +Ġre legated +Ġif ace +Ġher ds +å¾IJ æ¶Ľ +Ġmature d +Ġmonot onic +D anny +ï¼Į æħķ +ĠG au +ç͵ 车 +ï¼Įä»ĸ çĶļèĩ³ +ç²¾çĽĬ æ±Ĥç²¾ +ãĢģ ç»Ĩ +åıij éĹ® +横 å¹ħ +ubb les +_POL ICY +åĴĮ å®¶åºŃ +çīĩ éĿ¢ +ĠSmall er +ï¼Į çİ»çĴĥ +æİĴ 便 +ä½Ļ 个 +Access ories +ãĢģ æĬ¤ +ĠShe pard +éĶĻ å¤± +ä»»ä½ķ äºĭæĥħ +å¿įä¸įä½ı äºĨ +olog na +æł¼ æĭīæĸ¯ +- γ +Ġfor aging +ĠR ULE +Ġ好 åľ¨ +_ du +SC ALL +Ġt body +æľ¬ å°±æĺ¯ +Int Ptr +Ġpriorit ized +Ġ åĸľæ¬¢ +esc aped +ĠTor rent +Ġ ä¿¡æģ¯ +åľ¨ åį³ +ï¼ĮæĪij å®¶ +éĿŀ ä½Ĩ +QU ERY +è· Ĩ +Ġx hr +dist ribution +çŀ¥ äºĨä¸Ģçľ¼ +Ġ é²ģ +为 该 +Ġdon uts +IL A +ĠLO AD +æ³ķåħ° 西 +éļ¾å¿ĺ çļĦ +ĠM ater +çĥŃ å·´ +è´§ åĵģ +.t ake +å°½éĩı ä¸įè¦ģ +ĠConven ient +- contact +ï¼Į è§Ĩ线 +åŀĤ ä¸ĭ +ped o +ãĢģ æ¤įçī© +con ftest +๠Į +Ġ åIJ¬äºĨ +ï¼ģ ä»Ĭ天 +åĮħæĭ¬ åľ¨ +Se ems +ï¼ĮæĬĬ èĩªå·± +éªĹ æĪij +ĠT OM +天 马 +éķ¿ çº¿ +ä¹IJ 祸 +Ġâ ģ +Ġsubt ree +as see +第äºĮ æī¹ +Ġdi verted +ĠMix ing +c ru +Ġ 代 +Ġsc ares +åį´ å·²ç»ı +å½ĵçĦ¶ çŁ¥éģĵ +å§ijå¨ĺ 们 +toHave BeenCalled +Th r +Th reat +举 éĺ³ +ï¼Į没 åĬŀæ³ķ +Ġrom an +Compat ibility +- Service +æł¡ æľį +ä¹Łä¸į æĦ¿æĦı +说çļĦ éĤ£æł· +Ġencaps ulated +( ä¸ŃåĽ½ +erv al +.m akedirs +æĥĬ 诧 +ãĢı ï¼Į +Ġlev itra +/ ne +ãĢĭ ä¹ĭ +ĠClass ified +æ¡¥ éķĩ +女æĢ§ æľĭåıĭ +æµ®çݰ åĩº +.N ONE +è¾ĵ åįµ +][ / +åͤ èµ· +åIJŀ åĴ½ +-s plit +æľįåĬ¡ ç«Ļ +à³ ģ +åİĭåĬĽ ä¼łæĦŁåύ +к Ñĥ +Ġle mons +ç͍ èĦļ +_p ol +æĪ´ ç»´ +çαæĥħ æķħäºĭ +åľ°è´¨ çģ¾å®³ +ng a +ĠAss umption +-w ife +çīµ æ¶ī +ĠC air +ãĢģ ç³ĸå°¿çĹħ +Ġox ides +ĠEnhance ment +çļĦ åĪĽæĸ° +鼨 éĽª +]( ./ +files ystem +Ġassim ilation +w ik +ä»ĸ们 èĩªå·± +带 æĪijåİ» +æĺ¥ æĹ¥ +æĬ¢ èµ° +, ç͍äºİ +为 群ä¼Ĺ +Ġtemper ate +çĸı è¿ľ +Ġdef raud +éĽĦ æĢ§ +æĺİçϽ çļĦ +Ġdef erence +, å¹² +ä½ł ç»ĻæĪij +ĠCh andra +éĻį å¹ħ +Trans cript +Ġcontra ception +ãĢģ è¯ļä¿¡ +æĢ» åħµ +Ġfacilit ation +ĠSher ry +Ġsaut é +b ibr +æŀģ å°ijæķ° +sc apes +.C H +re name +ï¼Į çĿĢå®ŀ +æĺ¯ æĺ¯ +Ġsh aved +ï¼Į æ²īé»ĺ +il or +æĵ¦ å¹² +Ġ 举é£İ +ãĢģ 空è°ĥ +ĠW ink +ĠCall able +izz o +çļĦ åľ£ +ãĢģ åĨĻ +ĠSm ok +. cons +æĪĸ 被 +enu ous +) v +ï¼Į åĨ·åį´ +ĠF its +é«ĺ éĢļ +åı¯ä»¥ åIJij +. å¦Ĥæŀľ +ĠN CC +å°± 绪 +Ġq emu +è§Ħ模 åĴĮ +( Index +å½ĵ åħĪ +ĠCl ifton +ä¹ĭéĹ´ éĢļè¿ĩ +.r x +ä¸ĵç§ij åŃ¦æł¡ +èĽĽ ä¸Ŀ +ä½ľ 对 +éķ¿ å¤Ħ +交 æľĭåıĭ +ĠDec o +G ED +I UM +令 ä¸ĭ +Ġgoal ie +.re ject +社ä¼ļ主ä¹ī çݰ代åĮĸ +ĠMoz ambique +æ·± åĪĩ +æĢ»ä½ĵ æĿ¥è¯´ +ĠC FP +ĠK es +éĺ´ åĨ· +arant eed +, 满足 +. low +ãĢģ ç²¾åĩĨ +Ġunderstand ings +åŃIJ宫 åĨħ +Ġvide ot +Gar age +ĠD rosophila +rou se +ach o +Ġfavor ably +ä¸Ģ ç¯ĩæĸĩ竳 +åΰ èĢģ +é£İ è¶£ +.p ackage +åĽłç´ł çļĦ +Ġconstitution ally +const expr +ĠPok ies +ä¸Ĭ åĵģ +å·² è¿Ľåħ¥ +æķĻèĤ² æľºæŀĦ +çł´ 浪 +举 ä¸ĸ +Ġclub house +ĠCR P +åľ¨ æīĭä¸Ń +ĠThe ological +ĠK au +ä¼ł ä¸ĸ +Account Id +* N +ĠB aked +Ġco op +åĨį éĢł +Ġfl otation +éĨ º +Ġsequ est +ĠRen ee +ĠSere na +太 éĩį +ç®Ĺ è´¦ +yn man +æĬ½ è°ĥ +ĠRad ius +Ġdecomp ose +av ings +å¦Ĥ åĪĿ +ï¼ģ åĪ« +äºĨä¸Ģ é¢Ĺ +ĠAm ish +èĤ¡æĿĥ æ¿ĢåĬ± +Ġhol iness +Enumer ation +tagHelper ExecutionContext +? ä¸ĭéĿ¢ +Ġw enn +Rou ge +æĮ İ +æİ¨ äºĨ +ä¼ij æķ´ +d raft +ĠM bps +ĠR oku +å´ĩ é«ĺçļĦ +="# " +å¸ĪèµĦ åĬĽéĩı +d q +ion ate +èį ļ +ask an +ï¼Į西 çıŃçīĻ +id le +Ġsub lim +_S AVE +æĿij åŃIJéĩĮ +å¯Ł çľĭ +bl ast +Gram mar +èŁij èŀĤ +ï¼Į X +ĠH olds +Ġ\ } +/m ysql +à± ģ +Ġ 羣æĺ¯ +ru v +åı¯ä»¥ åħĪ +éħ IJ +. force +ä¸į 满足 +um sy +Ġse af +Ġmillion aire +Ġton ed +E FF +r ime +.get Bytes +å¦Ĥ èį¼ +ãĥ ı +èĥľ çİĩ +çĻ» ä¸ĬäºĨ +é¼» èħĶ +Ġg ist +å¾Ī æĹ© +н е +Ġcalcul ators +ERR IDE +ä¸įåľ¨ çĦī +åĴĮ åIJ´ +å½ĵ 个 +è£ħ 满 +ç»ĵæŀĦ ä¸Ń +. ignore +æĺ¯ä¸Ģ åĪĩ +ä»ħ éľĢ +Ġblock ers +Ġver ifiable +ï¼Į æĿĥ +Ġp q +æĬļ æħ° +.per form +Ġinert ial +RE SET +åĨĴ åĩºæĿ¥ +缺çĤ¹ æĺ¯ +Ġskate board +ĠByz antine +CH AN +,æľī äºĨ +Ġm ellow +// === +åĴĮ æĶ¿çŃĸ +ign ation +ĠSe ab +ĠMy rtle +常è§Ħ çļĦ +$ db +S erves +Ġt witch +Ġs ous +çϽ æĹ¥ +ç½® ä¸ļæľīéĻIJåħ¬åı¸ +ĠDevelop ments +å°¾ çĽĺ +èĢĮ æľª +æĦı åľ° +Ġfr aternity +(c anvas +Ġä¸Ģ æĹ¶éĹ´ +Ġdo od +第 åįģ竳 +åĩ¯ æģ© +Ġtape red +Drop down +pers ons +ä½ł 羣 +åĪļ æĥ³ +Ġfav oured +îĢ Ħ +åħĭéĩĮæĸ¯ èĴĤ +\ varepsilon +æ² ± +ĠOlymp ia +æĹ¥ åĩºçĶŁäºİ +æĪ· 人家 +Ġbreed ers +ä½ĵ éĿ¢ +Cl ay +ãĢĤ 奥 +est ar +Ġcapt ivity +åı¯èĥ½ä¼ļ 被 +ä¸īåĪĨ çIJĥ +Ġgreet ings +Ġnort heastern +Ġ åĵ¦ +åĽ´ ä½ı +ĠSpe ars +Ġunle ashed +ĠAtmosp heric +ĠJ L +Ġgl ide +Ġt riv +åĺ¶ åIJ¼ +Ġ____ __ +榴 èݲ +s weet +çļĦ ä¸ŃéĹ´ +.pub lish +. agent +k 线 +ĠA cer +ĠL itt +-C al +ï¼Į åĮº +ï¼Į 注åĨĮèµĦæľ¬ +çĿĢ è¯´ +IG INAL +ĠRec yclerView +åĪĨéħį ç»Ļ +äºĨ å¾Ĺ +èĪ · +ili ency +ï¼Įå¼Ģ æĭĵ +m ime +in ib +ãĢĤ çİ°åľº +é¾Ļ åĩ¤ +åĨ¤ å®¶ +Administ ration +ĠM asc +ä¼ļ è¯Ĭ +è®® ç¨ĭ +Ġhon ours +æĶ¯æĴij æŁ± +è¿Ļ 两ä½į +èĩªå·±çļĦ åĬªåĬĽ +åı« äºĨä¸Ģ声 +æĬĬæı¡ 好 +ĠA stra +âĢĻ l +th on +åĿĩ éĩĩç͍ +ä¸ľè¥¿ çļĦ +èįĴ èĬľ +Ġget User +管çIJĨ æ°´å¹³ +ĠComp assion +çĶŁæĹ¥ å¿«ä¹IJ +or ning +ĠD res +Ġsm b +.f inish +oll ip +éĹŃ çĿĢçľ¼çĿĽ +èĤ¯å®ļ è¦ģ +å°½åı¯èĥ½ åľ° +x FE +Ġl upus +æľĢ ç®Ģåįķ +Ġmust er +Ġdream y +pat ients +åĮºåĪ« äºİ +ĠLah ore +Ġ 说æĺİ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠ +ĠÎ ¹ +Ġcode cs +ä¼Ĭ å°Ķ +-P ack +ï¼Į è®Ńç»ĥ +pe as +ĠJ avier +å°Ĩ å®ĥ们 +æīĢè¿° 第ä¸ī +ï¼Įå°± å¦ĤåIJĮ +æijĬ å¼Ģ +å¤Ħç½ļ éĩij +åĩº æłı +å°±æĺ¯ 对 +Ġparticip atory +çľĭäºĨ ä»ĸä¸Ģçľ¼ +à¥įठ¯ +çļĦ åĨĽ +çĭ¬ åΰ +ä¸ļåĬ¡ åıijå±ķ +ĠP eng +ĠR iders +åĩº 人 +Ġrem uneration +è¿Ī æŃ¥ +ĠHyg iene +ĠU IS +æĶ¿ å±Ģ +Ġphilanth ropic +ï¼Į æĢĢ +容 ç½® +丰 缼 +表çݰ å½¢å¼ı +Ġrev olt +Ġcritic izing +- settings +\ V +çļĦ åĪĺ +æİ¨ 车 +ĠVeget ables +Ġchees ecake +vart heta +ï¼Į éĢĤå®ľ +_s ound +( ep +åIJį è´µ +çī¹ èĴĻå¾· +AL I +ĠDiam eter +Ġleisure ly +, ] +Ġ æĿ° +ï¼Į åĸ· +Ġn ectar +ä¹ĭ çģµ +æŃ¦ çİĭ +ĠMcC artney +âľ ħ +: Is +_ until +æľī åĪ©çļĦ +Ġch ancellor +æĽ´ æĦ¿æĦı +ĠIs Set +Ġphysic ists +. ready +b ron +ä¼ij åħ» +ĠTow ard +ĠJ ord +ĠK ale +æıIJä¾Ľ ä¸ĢäºĽ +Ġden omination +Ġinitial s +ĠC OD +od end +è¿Ļ çķª +Pro fit +è¯ķ è¿ĩ +åĪĿ åĪĽ +-int ensity +ï¼Įåĩıå°ij äºĨ +F PS +e V +çļĦåĨħ åŃĺ +: E +q li +ãĢĤ å¿« +Ġdis ordered +conf erence +æĢ»ä½ĵ è§ĦåĪĴ +press ure +ĠTerror ism +ãĢĤ åĽ½éĻħ +ï¼Įè¿Ļ 使å¾Ĺ +UL ATION +ï¼ĮéĤ£ä¹Ī ä½ł +ï¼Įè¿Ļæł· çļĦè¯Ŀ +èĩªéĹŃ çĹĩ +ï¼Į æŃ»äº¡ +-c ourt +ĠSA FE +Ġn anny +第äºĮ æľŁ +Ġdebt ors +ï¼Įä¸Ģç§į æĺ¯ +Ġchandel ier +, ç«ĭåį³ +ĠK T +ï¼Įè¿Ļ 座 +rug ated +, 女人 +ãĢĤ æľīäºĽäºº +ä¸į äºĮ +ï¼ĮéĤ£ 天 +Ġuser Name +Ġmbed tls +Z F +ãĢģ 该 +cor p +.inner HTML +ĠB AM +ä¾ Ĺ +In g +å®ĺ æĸ¹çļĦ +å¤ĦçIJĨ åĴĮ +y rs +ĠR ath +ä¸ĩ å·¦åı³ +äºĨä¸Ģ 身 +绾 ç»İ +(i ii +Ġsup ersed +_cl i +DOWN LOAD +PI LE +Rec yclerView +(int ent +ï¼Į åĴ¬ +Ġkn ack +æĭī éĩĮ +ä¹° æĿ¥ +åı¤ è¯Ĺ +çļĦåīį æĻ¯ +ï¼Į èµ¶å¿« +ĠC CL +Ġ" ? +æĥ³ 好 +äºĨä¸Ģ 款 +绿 æ´² +Line Width +åıijèĩª ç®Ģ书 +ĠB ett +çķ ¿ +iss en +认 åĩºäºĨ +çļĦåİŁåĽł ä¹ĭä¸Ģ +éļı便 便 +èł¢ èł¢ +Ġcon glomer +ï¼Įè¿Ļ å°ıåŃIJ +Ġerror Message +{{ # +Anim als +ãĢĤ æķ´ä½ĵ +æľī åı¯èĥ½æĺ¯ +ĠR ory +tr ast +Ġhead board +ĠBe ats +Ġkinem atics +m allow +Ġ 尺寸 +çĶŁ æºIJ +ãĢĭ ä½ľèĢħ +Ax es ++ T +Â İ +ĠJ T +_t f +be hind +ä¼ģä¸ļ ç»ıèIJ¥ +èİ· çĽĬ +å¹³åĿĩ æ°´å¹³ +åįģåĽĽ å¹´ +ĠRo ast +Ġ è´¨éĩı +Ġl umen +âĢľ å¿« +åŀĭ éĴ¢ +æİ¢ 访 +åı¸æ³ķ æľºåħ³ +ĠDish washer +k im +Ġ å¹´çļĦ +Ġ[ ]) +çŁ³ è¶Ĭ +ãĢĤå¦Ĥæŀľ æľī +Ġmicro gl +Ġdestruct or +è¨Ń å®ļ +z ia +ãĢģ è£ħ饰 +Ġro i +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠ +éģ£ ä½¿ +; margin +Ġ 个æľĪ +çļĦ 课ç¨ĭ +éĺŁ åĴĮ +.g roups +对å¤ĸ æĭħä¿Ŀ +Ġ æŀľ +sg i +çĬ¶åĨµ ä¸ĭ +- structured +ĠWe yl +åĥı ä»ĸ +çı Ī +è¡ĮæĶ¿ 许åı¯ +ĠNeed le +Spons or +N ano +ol ist +ä¹Ł æŃ£æĺ¯ +ĠCh ong +çĶ· æ¼Ķåijĺ +, åŃĻ +ĠP ett +em er +ĠAbb as +_VERT EX +ĠY ad +ĠPer ipheral +Ġrev olve +h x +ĠN ico +au ce +âĪ ¼ +ï¼Į主è¦ģ ç͍äºİ +Ġunw avering +ĠFO UND +×ķ× ª +Ġd rib +ĠG ing +ä¹Ķ æľ¨ +ĠLiving ston +åĽļ çĬ¯ +èĵ¦ åľ° +åıĬ æľįåĬ¡ +-p y +çĭ¼ 人 +ï¼Ī G +æ»ĭ éĺ´ +åĵĢ åļİ +.equals IgnoreCase +åĬ¨åĬĽ çĶµæ±ł +MO ESM +骨干 æķĻå¸Ī +ãĢģ å°¿ +使 èĩªå·± +Att ributed +ĠEar rings +åħ¨ èĥľ +ï¼Įå¦Ĥ æĤ¨ +Ġign ite +便 è¦ģ +ï¼Įåľ¨ è¿Ļç§į +Ġinfrast ructures +ĠM ID +ï¼Įåĩłä¹İ æĺ¯ +Ġneurop athy +. room +åIJĪ ä½ĵ +Ġfulf ills +C ute +{ in +Ġj asmine +Ġcomp ilers +æĥħ åĬ¿ +ç²¾ è¿Ľ +å°ı éĿĴ +é¦Ļ èįī +ĠT ires +缴æİ¥ å°± +çľī çľ¼ +ĠN ur +Ġco ke +Ġins cription +åĵĪåĵĪ ä¸Ģç¬ij +Ġde odor +èĩª 大 +ĠPro st +Ġview Model +ĠIm pl +ä¸ĢçĤ¹ ä¸ĢçĤ¹ +空æ°Ķ çļĦ +_AL G +lo ans +åı¶ é£ŀ +Ins pect +ĠCharl ott +d pi +å¿ĥ èĤº +Ġext rad +æıı æ·¡ +ĠCarl ton +ĠGib raltar +, çϾ +ç²¾ å·§ += } +Ġ ä¿®æĶ¹ +ĠMid land +ĠC ic +çIJĨ çĸĹ +ï¼Įä¸Ģ æł· +ï¼Įæľī ä¸ĢäºĽ +_F IN +ĠTor ch +ï¼Į让 å®ĥ +. uid +çļĦ éļ¾åº¦ +åı° è´¦ +inter faces +çļĦ人 å½± +ES OME +w ish +æµ· éĿ¢ +Ġprob iotic +åŁºæľ¬ åĮ»çĸĹä¿ĿéĻ© +褶 çļ± +- force +_c alls +å°ģ ç¥ŀ +裸 éľ² +Ġarsen ic +é ¦ģ +ï¼ī âĢĶâĢĶ +æµĵ çĥŁ +f rog +ĠP OV +Ġde ceived +Ġad ored +Ġpr ick +主é¢ĺ æĽ² +ĠNic ol +Ġpion eered +ĠF atal +å¤ļ éĥ¨ +ĠV ide +of i +éŀĭ åºķ +ç¼Ŀ 纫 +hed ra +ãĢĤä½Ĩ æĪij +æĸij 驳 +èIJ¥éĶĢ çŃĸçķ¥ +ĠSelect ing +ĠGam ble +ï¼Į éĶģ +_c art +è¶ħ åīį +Ġaff licted +åī¯ å¤Ħéķ¿ +离 åľº +è¿Ļä¸Ģ æĭĽ +soft max +.sub ject +é¢ĵ åºŁ +est ablish +Ġsp rang +åĪĨ享 çļĦ +å¯Ĩå°ģ ä»¶ +è¯ŀ çĶŁçļĦ +å±ħé«ĺ 临 +è¡Į æĢ§ +éŨ æĿ¿ +By Key +_dir ty +Ġs out +ç»ı 审计 +Ġdef lection +以为 æĪij +åĨħèĴĻåı¤ èĩªæ²»åĮº +ãĢģ èĶ¡ +大 åĿĹ +_V ARIABLE +ĠPres cott +Ġfol ly +- Commerce +è¦ģ æĺ¯ä¸į +_c ar +val ued +Ġec static +ï¼Įåı¯æĺ¯ 她 +ĠLuc a +ĠExpress ions +大大 éĻįä½İ +âĸ¡ âĸ¡ +D al +R are +ĠS asha +Âł of +AP R +é¤IJ åݨ +Ġcos mos +æķ´å½¢ ç¾İ容 +ãģĿ ãģĨ +Ġkinem atic +ot os +Ġpres ets +ä½İ 声éģĵ +çİī åύ +Ġ[ ]( +çĥ½ çģ« +ĠM eng +ä¸ĵ éķ¿ +ï¼ĮæŃ¤ åIJİ +om aterials +ãĢģ è§£ +ĠAl as +临 ç»Ī +ĠAdd ison +Ġfire wood +åĿ¡ 度 +ĠWu han +(" * +åı² åѦ +alloc ator +Recogn ition +ĠVaugh an +æľĢ大éĻIJ度 åľ° +, åı¦ä¸Ģæĸ¹éĿ¢ +ãĢģ 麻 +ous ine +被 é̼ +ĠRe el +ĠTravel er +Ñ ļ +Ø « +Host name +Ġpig ments +å¼ł æ°ı +Ġmol ten +éĹ® èĩªå·± +åħħ è¡Ģ +éĩĩ访æĹ¶ 表示 +Ġtrans duction +TE CTION +Ub untu +- κB +I k +ç͍ ä¸Ĭ +æķ¬ äºŃ +ç»Łè®¡ åѦ +éľĵ èϹ +* A +Oper ators +Ġsweat y +ï¼Į æĬ¤ +åѦ åΰäºĨ +ï¼ī 以 +åħį äºİ +Ġsn ail +åħ³éĶ® æĬĢæľ¯ +çģ¾ ä¹IJ祸 +ä¹ı åij³ +Ġdeep ening +ĠBuck ley +ï¼Į æĶ¶èİ· +ĠM CS +ĠP CS +æľ¬ åľº +æĽ´ éĩįè¦ģçļĦæĺ¯ +_c orrect +ÙĪ ÙĦ +Ġly rical +渺 å°ı +ãĥĹ ãĥŃ +ĠS cheduling +Ġq r +ĠâĹ İ +Ġhydrox ide +way ne +.t imer ++ X +ï¼Į çĮ« +ĠG aut +èĢħ ä¸į +ĠT od +æľī å¿Ĺ +_BO X +æĺ¯ æĮīçħ§ +_D ESCRIPTION +ä¼ijæģ¯ åIJ§ +ä¸ĵåĪ© çĶ³è¯· +g cd +rib es +æĬķ æİ· +请 大家 +伤 åijĺ +ĠMcK enzie +Ġunders ide +ãĢģ b +Ġsc outs +线 ä¸İ +ภ¸ +ĠBrig gs +I v +Ġh oses +Ġu g +å¾Ĺ çĽĬ +_in stances +ĠPC M +Ġgear box +Ġaston ished +çİ°åľ¨ è¿ĺ +夺 åĽŀ +çľĭè§ģ ä»ĸ +ĠAlbum s +, å±± +大 åºĨ +_c i +å¼Ħ åΰ +社åĮº å±ħæ°ij +ĠT ender +ãĢģ èĥ¡èIJĿåįľ +æİ§åζ åı° +Med ian +�� �� +Ġinduct ed +çļĦ æ¶Īè´¹ +Ġre position +๠ĩ +################################################################ ######## +Ġ çģµ +Ġsc orn +ĠClo ver +ĠE IN +åĬŁ ç»© +ãĢģåī¯ å±Ģéķ¿ +é£İ éĩı +èĬ± æµ· +, is +à ĩ +æĺ¯ 她çļĦ +ĠB ly +ub ar +éŨ æ¡Ĩ +(p id +ĠTH ERE +ĠAngel o +ç½IJ ä½ĵ +Gal axy +åĴĮ ä½łçļĦ +ä¹Łä¸į å¤ļ +èµĦæľ¬ å®¶ +ĠMeaning ful +æ¦Ĩ æŀĹ +ä¸Ģ æŀª +åĴĮ åħ¬åı¸ +èĢģ äºĶ +æµ® äºij +åIJ¾ å°Ķ +Ġun structured +åºĶ 符åIJĪ +ç²¾ ç®Ģ +èĭĹ æĹı +Ġbran ched +N aturally +u let +Ġb uns +ä¸Ģ æłĭ +ug en +åıį æĤĶ +çĦ¦ çĤŃ +åıijèĩªç®Ģ书 app +ãĢĤ ä¹Łå°±æĺ¯ +ib b +å¦Ĥ 鼷 +ï¼Įåı¯ æł¹æį® +arc gis +åIJ¬åΰ çļĦ +ĠC AB +Ġexc ite +å°¾ çļĦ +Vis a +污æŁĵ çļĦ +, åĽŀ +Ġf ades +ĠY um +ï¼Įæ¯ı 人 +_dead line +ĠR é +åıª çľĭ +ĠSh ampoo +æľĿ 代 +Ġboard ed +ĠRoss i +Ġfost ers +Ġexhilar ating +Ġactiv ator +æĪIJåijĺ çļĦ +re leases +âĢĻ est +å¼Ģ åİ» +åģļ éĶĻ +éĽĨåĽ¢ åľ¨ +ÑĢаР¼ +Ġim balances +åłĤ çļĩ +åĽĽ æĿ¡ +z burg +æľī çĹħ +âĢľ åħ« +ĠV F +.t a +ĠRel iability +ĠView ing +, ç͍æĪ· +W ikipedia +c ipher +_TR ACK +Ġw agers +ĠH IF +éģ ½ +ĠDeterm ination +广æĴŃ ç͵åı° +opa edic +( valid +r sp +å¼ķ 以为 +-m ot +Ġapproach able +认è¯ģ çļĦ +Ġtx n +C er +害 èĻ« +Ġspl ice +èĥ½ æķĪ +ĠAss am +.E vents +çļĦä¸Ĭ 表éĿ¢ +Ġker atin +Cath olic +ï¼Į ä¼ı +index Path +æĬ¥åijĬ çļĦ +çĦķ åıij +Ġg st +åΰ 访 +ä½įç½® åĴĮ +çŀª çľ¼ +_or iginal +è¾īçħĮ çļĦ +every one +ĠRetriev es +Ġ 麦 +没æľī åĽŀçŃĶ +ï¼Įåı¯ éĢļè¿ĩ +çĽ¸ä¿¡ ä½ł +åĩ¶ æ®ĭ +æĩĪ æĢł +ï¼Į ä½ľåĵģ +Con ven +æĸ¹æ¡Ī åĴĮ +å¿ĥä¸Ń ä¸Ģ +< Point +å£°éŁ³ çļĦ +_E FFECT +åĸĥåĸĥ éģĵ +' ex +çĨŁæĤī äºĨ +, åħ³æ³¨ +a es +ä¿Ŀ åŃĺåľ¨ +åĩł 款 +è¿ĺæĺ¯ 第ä¸Ģ次 +Ġtan ning +ĠPartial Eq +( owner +ï¼Į åĭ¤ +Ġdes ktops +ï¼Įä¸į éĶĻ +æģ¯ èĤī +/d l +åĵŃ éĹ¹ +èĩª 认为 +åįķ èĸĦ +ï¼ĮæĪij ä¸Ģå®ļ +Ġdon key +追 æįķ +oflu orescence +ä¸Ń é£İ +li an +éĩijé¢Ŀ çļĦ +Ġæį® äºĨè§£ +ĠRespir atory +ĠMicha els +, éĢĤåIJĪ +P ictures +ï¼ģ èϽçĦ¶ +Ġform ative +But ter +Ġconc ierge +ĠMore no +çļĦä¸ī 大 +Ø§Ø ¨ +ä¸ī åĵ¥ +_t ex +Ġfe ats +å̼ æĹ¶ +çłĶ åѦ +ĠÃIJ ºÃIJ +åĨ¥ åĨ¥ +åĽŀæĬ¥ çİĩ +Ġgymn astics +æĪĺ çͲ +)} (\ +ãģķãĤĮ ãģŁ +ä¸ĩ èĭ±éķij +çĺ ł +_ch anges +Ġtoilet ries +ROLL ER +_p ref +ç´§ èĩ´ +é¦Ļ èıľ +çļĦè¯Ŀ 说 +æīĺ å°Ķ +Ġmuc osal +. DEBUG +ï¼Į åĪ«çļĦ +Ġcy to +ĠST ORE +ĠOrgan ized +opo ietic +è·º èĦļ +N FT +Ġn inja +ĠW ills +Ġpol ype +Ġ_ $ +æĪ¿å±ĭ çļĦ +ãĢģæĸ° åĬłåĿ¡ +åĽŀ头 çľĭ +ĠConsolid ated +Ġunexpl ained +Ġ 人们 +ĠWh ale +ĠCan ary +è·ij è¿ĩåİ» +ĠDec oration +pol itical +ï¼Įä¸į åĪ©äºİ +Ġant iques +åī§ çĽ® +ä¼ĺç§Ģ å¥ĸ +ĠWik imedia +c oc +c ible +Ġgener als +arm acy +å¿Ĺ 强 +Ġmountain ous +.high light +P PC +ï¼Į æ¯į +è¿Ľè¡Į è¯Ħä¼° +olve ment +ä¸ĢåĪĨ éĴ± + ¨ +ï¼Į çαæĥħ +ï¼Į 麻çĥ¦ +éĢģ 她 +Ġhe par +大 åħ¬ +ere t +ä»Ĭ åĦ¿ +Ġcentrifug ed +ä¸įè¨Ģ èĢĮåĸ» +/ by +is ex +ï¼Į å¼Ħå¾Ĺ +Ġf ave +ĠH obby +ç͍ æīĭæľº +åIJĪ çħ§ +Out look +Ġcomfor ter +Ġ 严 +ĠT su +Ġne ces +æĦıè§ģ çļĦ +åģļåĩº è´¡çĮ® +ĠKen yan +.s cope +ç͵影 ä¸Ń +Bar rier +-b oy +åĩ¯ æĴĴ +.J ob +Ġnic hes +åĨ³èµĽ ä¸Ń +æ¯ı æĻļ +åį« åĨķ +Ġcharacter izing +.R ows +è¡¥ ä¸ģ +Ġstre pt +åīĤ çŃī +w ali +æĸĻ æĸĹ +çϾ åĢį +çĦķ çĦ¶ +æĪij è¿ĻéĩĮ +æĸ¯ æĸĩ +Ġreal tor +失 ç¬ij +ãĢĤä½Ĩ çͱäºİ +Foot note +H art +åīį ä¸įä¹ħ +Ġwid ened +N SS +h ower +.F eature +ĠDam n +Ġchrom ium +å°ıå¿ĥ翼翼 çļĦ +ï¼Į 巨大çļĦ +大 éĢī +åı¯ä»¥ åģļåΰ +ĠC TC +æ°Ķ åĬĽ +Ġmel atonin +R t +d iss +_F REQ +,大 åĬĽ +est r +éĥ½ åĮº +é«ĺ è¿ľ +çĥŃ éĶĢ +æľīä¸Ģ 座 +ä¾Ľç»Ļ ä¾§ +æĮª åĬ¨ +, ä¸ĬéĿ¢ +ä½ł æľī没æľī +客 çļĦ +åŃĺåĤ¨ æľī +-se eking +ĠEmer itus +æĺ¯ åĵªä¸ª +Ġnarr ated +èĬĿ 士 +C USTOM +ï¼Į çĶ·åŃIJ +åľ° åĿIJåľ¨ +容 许 +Ġcor ollary +umm us +çĽĸ çļĦ +}. $ +Ġhes itated +ä½ Ĺ +ĠB J ++ } +Ġ ç»ıèIJ¥æ´»åĬ¨ +Ġcour thouse +å®ŀéªĮ åŃ¦æł¡ +Ġstag gered +ĠPay load +Br ick +Ġ åįģäºĮ +æľĪ 楼 +åįģ çĤ¹ +ç¾İ å¼ı +Ġstip ulation +ĠA STM +ind o +ĠK ell +çİĭ åĿļ +awa ited +Ġк оÑĤоÑĢ +ä»ĸ 身ä¸Ĭ +str ate +第äºĮ 大 +çĵ Ĵ +ï¼Į第ä¸Ģ æĹ¶éĹ´ +Ġwra ppers +èĥ½ æĪIJ为 +æĸ° å©ļ +Ġraff le +t icket +åįģ åIJį +çµ IJ +ãĢĤçͱæŃ¤ åı¯è§ģ +, å¾IJ +ĠW ester +ĠS OP +(m d +å¥ĩ çī¹çļĦ +ĠRic ardo +Ġe jection +ä¹Ł 纷纷 +æĮĩ 头 +ÑĢ Ð¾ +ĠMcG regor +Ġsubdu ed +z p +ĠB oca +äºĮ åħĥ +ï¼Įä¸Ģ çĶŁ +çŁŃ ç¯ĩå°ı说 +è®°å½ķ ä¸ĭæĿ¥ +Mar riage +å¾Ī æĺ¾çĦ¶ +ES I +/ USD +Ġf ide +Ġun o +_m y +ï¼ĮèĢģ 夫 +, åĬ©åĬĽ +B asis +Ġs ire +ĠSL OT +çĥ« 伤 +ĠS ocks +ĠA SL +Ġal ia +ä½łçļĦ æĦıæĢĿ +æµĩ çģĮ +ĠClear ance +ï¼Į çŁŃçŁŃ +å¹¶ æİĴ +Ġquest o +åIJ¸ æ¯Ĵ +çķħ 游 +Ġstric ter +ag ically +ĠP he +æĹ¶ å·² +æĢ» éĩıçļĦ +ĠDis pute +tool bar +æľī å¼Ĥ +èĩªå·± çļĦ人çĶŁ +(n amespace +/ github +æĹ¥ åIJij +ä¼¼ä¹İ åľ¨ +缸åıį çļĦ +- essential +String Utils +(d irectory +Fe eling +K el +in ventory +Ġd unes +ãĢģ ä¿ĿæĮģ +Ġoff s +^{ (\ +Ġ<< " +ĠJo anne +\ cdots +ĠC RS +æĪij们 没æľī +ĠLiber ia +Ġtelesc opes +ĠF ou +- variant +ãĢĤ æĬķèµĦ +ãĢģ æ¬§æ´² +ĠBomb ay +Ġs older +ï¼Į æĢªä¸įå¾Ĺ +Ġre pos +Ġz ap +Cl usters +ç´§ç´§ åĽ´ç»ķ +ĠRiv iera +/ map +ĠM ESSAGE +åħŃ åįĥ +æĥĬ å¼Ĥ +以为 ä»ĸ +é¼ĵ é¼ĵ +Ġentr ants +æļĤæĹ¶æĢ§ å·®å¼Ĥ +ï¼Į éĩİ +ä½ł ä¸įèĥ½ +æĸĩ è¨Ģ +Ġsm ack +Ġenrich ing +creat or +æĪ· æķ° +ãĢģ ä¿Ħç½Ĺæĸ¯ +èµ¶ åΰäºĨ +表éĿ¢ æ´»æĢ§ +Port land +ĠBR CA +棱 è§Ĵ +Ġ è¶Ĭ +Ġ å¢ŀåĬł +ï¼Į åĨľä¸ļ +çİī çļĦ +Ġerrone ously +F lu +Ġ ä¼łçľŁ +æĸ° ä¸ĸçķĮ +æģ » +Ġing enuity +ĠExt remely +.* ]{} +åħ¨ä½ĵ èĤ¡ä¸ľ +å¼Ģå·¥ 建设 +_ Item +ore al +ãĢĤ 谢谢 +ĠF c +æŃ» æİī +éĽ¾ æ°´ +ĠTrad ers +éĴ¢çŃĭ æ··åĩĿåľŁ +åĸ§ åĵĹ +_b uttons +ãĢĤ以ä¸ĭ æĺ¯ +åIJ¬åΰ è¿Ļè¯Ŀ +imm el +ĠT ASK +ĠF RI +åı£ å¸Ĥ +ĠÏĢ Î¿Î» +èĪĨ æĥħ +ĠYuk on +AL YS +è½´ ä¸Ĭ +åIJįåŃĹ åı« +ĠRock efeller +ĠCommun icate +j ans +Ġstart Index +Format Exception +ĠD ems +Ġconst ituting +Ġprot otyping +éĢĥ é̏ +Ġarrest ing +Ġob t +Ġdig ested +Ġtwe aking +b ok +-r ank +, éĥ½æľī +Ġwhere abouts +çīĽ ç¾Ĭ +å·į å·į +E h +åΰ 缮åīį为æŃ¢ +ĠHome page +æĬ¢ äºĨ +æĪIJåĪĨ çļĦ +D ent +åIJĥ æ³ķ +红 å¤ĸ线 +å§Ķ åĨħ +Ġisol ating +æľī æĽ´å¤ļ +éŨ ç¦ģ +hav iors +ï¼ļ人æ°ijå¸ģ åħĥ +ĠS li +太 åı² +ä¸ĸ å¤ĸ +ä¸ĵä¸ļ æĢ§ +æī¿åıĹ çļĦ +æıIJèµ· è¯ī讼 +Ġhug ged +è§ģè¯ģ äºĨ +h il +ãĢĤ è¿Ļä¹Ī +ãĢģ è¯Ħä¼° +Th ousands +sh ifts +æ¯ı æ¯ı +}} $$ +CT C +æĸ¹éĿ¢ çļĦéĹ®é¢ĺ +è¯ĬæĸŃ ä¸º +ic iation +ãĢģ åĬłæĭ¿å¤§ +çīĽ å¤´ +ĠP ony +æĿ¥ 对 +St ores +ç£ĭ åķĨ +ide on +Ġ\ "" +Ġbet tors +Ġter ra +æīĢå¾Ĺ é¢Ŀ +é«ĺ å®Ĺ +block ing +æĢİä¹Īæł· åij¢ +ĠTai pei +æľĪä¸Ĭ æĹ¬ +Ġâ ķ +/g atsby +çļ±èµ·äºĨ çľī头 +ĠI CA +è¡Į çľģ +ï¼ģ è¿ĺæľī +åįķ项 éĩijé¢Ŀ +éĴĪ对 æĢ§åľ° +ĠD PS +ç©¿ åĩº +æĮ¡ ä¸įä½ı +å°Ĩ ä¸Ģ个 +ĠAl uminium +Ġaggrav ating +ç¡®ç«ĭ äºĨ +ĠS SR +IN ATION +éĢĢ æĪ¿ +å͝ä¸Ģ ä¸Ģ个 +éĥ¨ä»¶ çļĦ +ĠMans field +ï¼Į èģĤ +per ia +Ġinter ceptor +æĶ¾ æħ¢ +建çŃij çī©çļĦ +_al ert +_DEF INE +, æľĢé«ĺ +Ġu ber +åĴĮ ä½łä»¬ +Ġj peg +_c alc +Ġder ail +å®Ĺ 室 +dec ision +adal afil +ĠCec il +T ro +çĶŁ 俱 +ä½Ĩ åľ¨ +åĪĽå»º äºİ +Ġfrag rances +ĠRah ul +Ġ åīij +ãĢģ åıĪ +Ġagree able +/r untime +Ġ[ ^ +Ġfl ares +Ġcreat inine +ç«¥ è£ħ +ĠAP PLICATION +æĢĿç»´ èĥ½åĬĽ +Ġretro fit +ĠC ig +âĢľ C +åĮĹ ä¾§ +æĻ® é¡¿ +emp re +诱 åĽł +ãĢģ åĵĪ +Ġdown wards +Ġrun down +çŁŃ è£Ļ +/ pre +æĽ´ å°ı +_per m +ä¼ĺçĤ¹ æĺ¯ +intern ational +ãĢģ A +æĶ¹ çīĪ +ĠRec order +ä¹īæĹł åıį顾 +åIJĥ åIJ§ +çĶ· æĢ§çļĦ +éĻĪ ä¸ĢåĪĢ +第ä¸ī åŃ£åº¦ +Le ast +.To Array +ä½ĵ è£ģ +ä¸İ æĸĩåĮĸ +åı¤ çİ© +ĠDE A +æĢĿèĢĥ åĴĮ +Ġwash able +- ob +@ SuppressWarnings +æľ¬ 年度 +Sh ut +Method Name +.z oom +FIN ITY +ur ist +ä¼ģä¸ļ ä¸Ń +ĠIT V +Ġech ocard +D eg +[ new +ä¹ĭ ä¹IJ +err in +sign ificant +Ġdec imals +çĮ ¬ +, å¹³ +ą Ć +åİ» éĿ¢å¯¹ +Ġpost cards +Ġstandard ization +Ġhyp oth +ä¸ĭäºĨ 头 +, åĽ½éĻħ +: image +ĠE ly +åĩº éĴ± +ĠPro x +èģĶ å§» +ĠInd icators +gs ql +代çłģ ä¸Ń +mult ip +.deep Equal +Ġin clusions +ĠCh am +å·¥èīº ç¾İæľ¯ +UPPORT ED +Ġ 缴 +Ġ éĩįè¦ģ +å¾Ĺ 人 +Ġin hom +Ġg azing +æ¹ į +Pre vent +Co al +çļ Ī +çĸĹ ä¼¤ +åı« 好 +ĠAcceler ator +B IO +d ress +Ġch ained +St raight +çİĩ åĨĽ +Ġsyn agogue +othe rapist +çIJIJ ç¢İ +ĠG OT +ä»» éĢī +太 è¡Į +jo ined +Ġexperiment ed +æ³µ çļĦ +bl ah +ï¼ĮæľĢ åĪĿ +èĭ±è¯Ń åŃ¦ä¹ł +b asis +h ara +ãĢģ åķĨåĵģ +Ġas ynchronously +æīĵ åĢĴ +ERR Y +Ġsh oved +è¿ĩ ä¸įåİ» +odd y +s old +Ġfl op +(\ " +ĠPM I +åĩ¸ åı° +ĠéĻĪ åĪĿ +F er +ï¼Į è¿ľç¦» +ĠS ai +ĠC IT +éŁ³ è´¨ +æĿĢ äººçļĦ +豪 æĥħ +-sh are +ĠBu cc +require ments +agu ay +F LOAT +op i +Ġsh in +çľĭ æł·åŃIJ +æĿ¿ ä¸İ +ä¹ĭåīį åľ¨ +ĠMag ical +åĩºçĶŁ åľ¨ +B EST +.m esh +_T REE +ĠKe eps +.col ors +Ra ise +Mary land +Ġ é¦ĸ +ï¼Į 俺 +ur vey +{g athered +ĠCli ppers +æĥĬ å¿ĥåĬ¨éŃĦ +ä¼ij åħĭ +ĠL ayers +Ġsub mits +Ġliber ated +åħ¨ å¿ĥåħ¨ +/w idget +Ġsumm ertime +ĠHan over +ä¸į è½» +æľª åıijçĶŁ +ç¦ı å°¼äºļ +Thank fully +æĸĩåŃĹ çļĦ +è´¡çĮ® åĬĽéĩı +coord inates +acchar ides +åİ» åΰ +æĹł åīį +æķĻ æ¡Ī +)) ( +æľ« å¹´ +Ġaffirm ing +_TH ROW +Ġcongru ent +ãĢģ ç͵影 +åĴĮ çĥŃ +ĠSh red +èᝠæĸ¹ +ĠAg o +æ²ĥ çī¹ +æī³ æīĭ +æ¯Ľç»Ĩ è¡Ģ管 +ãĢģ éĩĩè´Ń +å°± æĦıåij³çĿĢ +å¾Ģ åIJİéĢĢ +梦 å¯IJ +,\ " +æ¶Ĥ 鸦 +ï¼ĮèĢĮä¸įæĺ¯ åħ¨éĥ¨çļĦ +.l oss +æĤ£ å¤Ħ +Ġw k +çļĦ æĮijæĪĺ +ion g +ãĢĤ ç±³ +Ġat m +èĩª ä¿Ŀ +($ ( +" All +_ rr +am mer +ĠH ISTORY +ä¹ĭ éģ¥ +å¾Ĺ 她 +REC ISION +_sc roll +( ar +Ġ å¦Īå¦Ī +ay ne +ä¸ĭ éĻIJ +两 è·¯ +, æľĪ +N eb +Âł is +å°ı æķ° +åĿļ å®ŀçļĦ +æĬ¹ å¸ĥ +æĵİ å¤© +ĠRu iz +T ED +Ġt rolley +ĠS cheduler +æıIJä¾Ľ åħįè´¹ +åľºæĻ¯ ä¸Ń +, åģ¶å°Ķ +P REFIX +æĪij 说çļĦ +èµ· è·ij +ãĤĵ ãģł +Ġin cess +ĠP ounds +åĪĨ享 åΰ +Ġ'../../ ../ +sp arse +ĠR upert +ν η +relations hips +file Path +div ide +éĿł æĭ¢ +æī¹åĩĨ çļĦé¡¹çĽ® +Ġana erobic +Ġinhal ation +ì Ĩ +Ġn ieces +form atter +æĹł å¼Ĥè®® +çϽ èĮ¶ +å¢ŀ åıij +_s napshot +第äºĮ åŃ£ +plic ial +å±ħæ°ij 身份è¯ģ +(st at +/re leases +( Token +ãĢģ æ¢ħ +Ġk ur +ogn ito +_RE GS +积æŀģ ä½ľç͍ +ãĢģ çĶĺ +ĠD ONE +éĥ½ åıªæĺ¯ +红 æĸij +Ġinform ations +å¿ĥçIJĨ åĩĨå¤ĩ +èįĴ æ¼ł +- No +< vector +鼶åĶ® åķĨ +ĠEz ra +ĠE MC +ä¹Łå¾Ī éĩįè¦ģ +ï¼Į éĢĤ +大 è´¥ +æĶ¯ 线 +Ġeight eenth +Ne ighborhood +exper t +çļĦ æ¯Ķè¾ĥ +ãĢģ åħ¬å¹³ +çϽ 纸 +Ġbusiness men +R uth +æĥ³ çľĭ +举 éģĵ +-b o +( logger +ĠF ir +zb ollah +ÑĤ ÑĮ +å°ļ åı¯ +Ġprop hetic +Ġ å¤ĦçIJĨ +Ġpre text +RE V +Ġcath ode +IM M +.F lags +Russ ell +\ Tests +åĪ« åħ· +è¿Ļç§į çݰ象 +æ£Ĵ æ£Ĵ +mith y +- const +A IR +æĺ¯ éĿł +Ġres usc +ï¼Įä»ĸ ä¸į +,ä½Ĩ ä»ĸ +Ġå°± è¿ŀ +åIJ©åĴIJ éģĵ +pl en +æľ¬ åij½ +åIJĥ 鸡 +it ability +ĠW yn +get t +Ġafter market +ĠGl ossary +ï¼Įä¼ļ 导èĩ´ +Ag ency +ĠGrad uation +ä¿Ŀ温 管 +ãĢĤ å¤į +ĠI IT +åĴĮ å®ī +羣çļĦ 没æľī +Ġrenew ables +çļĦçĬ¶æĢģ ä¸ĭ +åľ¨ åĽ¾ +.get Resource +è§īå¾Ĺ 她 +Ġpred icated +Ġи м +@ string +C m +Ġs ooo +ä¸į å®īåħ¨ +åIJĪ ä¸Ĭ +å¾Ī 严éĩį +交æĺĵ å¸Ĥåľº +å¿ħè¦ģ æĢ§ +æµ· 浪 +Ġthink er +社åĮº åį«çĶŁ +,, , +D or +ï¼Įä¸Ģ æĿ¥ +ĠSaras ota +Ġ oust +Ġint ram +ĠDe Fi +纺 å¸ĥ +太éĺ³èĥ½ çĶµæ±ł +ĠOliv ier +ld rich +Ġsh alt +ç͍ 书 +åįł åįľ +Ins ight +èĤĭ 骨 +ĠConsc iousness +ï¼Į é¢Ĩ导 +erv a +åĽŀ æĶ¾ +æ¶Ī 泡 +åĽºå®ļ å®īè£ħåľ¨ +åıĤåĬł çļĦ +Ġn autical +im iento +æľī çļĦæĹ¶åĢĻ +-b odied +ĠSk ate +è´¯ 注 +Ġb az +Ġin versely +ĠDart mouth +, no +. Option +Ġ éŁ³ä¹IJ +人 èĦī +ĠBe ethoven +é¡» è¦ģ +岸 ä¸Ĭ +çĨł çĨł +Ġu art +åĴĮ ç»ıèIJ¥ +å¹» å¢ĥ +è¿ĺ没 说å®Į +ĠLion el +ï¼Į èīºæľ¯ +大 é¾Ļ +æĹ¥ æĹ¥ +äºij æµ· +E lection +Ġ åΤæĸŃ +ĠT ITLE +æĶ¾ æ°´ +ï¼Į éħįç½® +åĨħ æķĽ +èĩªçĦ¶ ä¹Ł +cast ing +Ġfr antic +åĩĿ ç¥ŀ +ï¼Įçİ°åľ¨ æĺ¯ +,ä¸Ģ 天 +Eth ics +Ġn oses +ĠM CP +ĠR oz +æĥħ绪 çļĦ +è§Ĵèī² æī®æ¼Ķ +_sp i +æĹł æĿĥ +åĨ³å®ļ 书 +为主 线 +鹿 æĻĹ +ĠSans krit +" Don +f ib +ãĢģ å½±åĵį +åħĭ 鼷 +Ġk ol +举 è¯ģ +æľīä»Ģä¹Ī éĹ®é¢ĺ +ï¼įï¼įï¼įï¼į ï¼įï¼įï¼įï¼į +Ġpresent ers +Ġten or +æİ¥çıŃ äºº +y k +ï¼Į åĪĹ +ĠK ak +Ġent renched +éĺ» æĮł +èĥĨ éĩı +_per mission +çĽ¸ä¼¼ 度 +D avis +Ġphot ovoltaic +å·¥ä½ľäººåijĺ çļĦ +ĠS ensitivity +å¤ļ æĥħ +æĹłçº¿ éĢļä¿¡ +人èĦ¸ è¯ĨåĪ« +åĴĮ ä¼ĺåĮĸ +( Set +åıį ä¹ĭ +èΰ å¨ĺ +^ e +ç®Ĭ æĢ§ +éĻĦåĬł å̼ +ĠR outes +å°ij åį¿ +ç©¿ è¡Į +Ġsen ate +Ïĥ ε +èIJ½åΰ å®ŀå¤Ħ +Ġbl asts +礼 è®° +_m ount +Ġorgan izes +_D DR +Ġfresh men +adec imal +G K +ĠP OT +ãĢģ æĥħæĦŁ +ĠIm possible +ĠMon a +çĩĥ çħ¤ +G overnor +ĠC ancellation +ãĢģ èħ° +éĩįçĤ¹ æĺ¯ +_ ie +ä¸Ģ个 æľĪçļĦ +Ġmass ages +彩 ç»ĺ +ç§ĭ 鼨 +Ġpunct ure +is oft +Ġ[ / +æĹ¥ æľŁéĹ´ +ãĢĤéϤ éĿŀ +ï¼Į é¡¿ +se mi +ãĢģ éĥ¨éŨ +åľ¨ åİŁåľ° +æŃ¤ çĶŁ +åŁºç¡Ģ æķĻèĤ² +Ġec x +Den ver +\ bib +åŁŁ çļĦ +,ä¸Ģ æĺ¯ +åĪĨéħį åΰ +Ġentr ances +a uthors +ĠC SC +Ġtechn ologically +.h ibernate +mb uds +åĶIJ 书 +è·ij è·¯ +ï¼Į æķ´åIJĪ +æľĢ ä¼Łå¤§çļĦ +æ» ģ +åĥı 以åīį +请 åıĤéĺħåĽ¾ +Ġd agger +ç´¢ è¦ģ +R p +ist ler +up a +交 ä¼ļ +åĽ¾ è°± +ä¸ŃåĽ½ ä¼ģä¸ļ +å¼Ģåıij åĪ©ç͍ +å·¥èīº æµģç¨ĭ +Ġdimension ality +IFICATION S +ra ch +Ġwait For +Ġexecut ions +-r ise +é«ĺåħ´ äºĨ +Ġα ÏĢο +æ²ĥå°Ķ çİĽ +f am +Ġtimestamp s +Ġsegreg ated +_ led +ï¼Į æķħäºĭ +ric ot +æ²¹ æĢ§ +ĠChe shire +Ġmut tered +ï¼Į åľ°ä¸Ĭ +PC S +ĠStr anger +Ġtroubles hoot +Ġstalk ing +ĠAuditor ium +ä¸Ĭ 讲 +ER ATOR +主è¦ģ éĽĨä¸Ńåľ¨ +读 éŁ³ +亿 人 +Ġpropag ating +ĠPresent ed +á ¿ +ï¼Į ä¸ĸçķĮä¸Ĭ +两 æĬĬ +æįķ æįŀ +èµĦäº§è´ŁåĢº çİĩ +, åĢĴ +- New +åľ¨ å¾Īå¤ļ +åħ¨ éĺŁ +âĢĿçļĦ çIJĨ念 +åͱ çļĦ +ract able +ĠEng ines +Back Color +ĠPale o +ĠC CA +æł¼ åĭĴ +-t uning +-c rafted +Re plica +åºĶæĢ¥ æķijæı´ +Ġspir ited +Ġgirlfriend s +Ġ ï¼» +ĠN uts +好 çİ©çļĦ +çļĦ人 éĥ½çŁ¥éģĵ +æŁ¥ åħĭæĭī +:\ /\/ +ï¼Į çĥŁ +EN UM +Activ ated +-drop down +S ydney +Ġ åĿIJåľ¨ +it et +å¸Ĥåľº 主ä½ĵ +ĠDav en +(w in +l ady +iew icz +CC R +ĠL X +oc ortic +_P USH +Sub net +Ġmal ice +çļĦæľĢ å°ı +-imp act +* % +æīĵ 游æĪı +ä¼ĺ éħ· +æľ¨ 马 +Ġstock holders +Ġcommand ers +Ġcytot oxicity +ĠStr ick +ĠComplex ity +ning en +æĬĬæı¡ ä½ı +ä»İå°ı åΰ大 +_c pp +ĠSadd am +, åijµåijµ +: value +éĺ²æĬ¤ 罩 +åľ¨ éŨåı£ +åħ¶ åIJį +-t ail +èĩªå·±çļĦ å®ŀåĬĽ +-g rowth +(w idget +V IN +大 çľ¼ +.A uthor +Ġtra inee +ĠW INDOW +æĹ¶éĹ´ çŁŃ +.get Content +_D S +ĠBay ern +Ġ Ùĥ +Ġw f +ãĢĤ åIJĦç§į +ç͍ è¿Ļç§į +å¤ĸ 头 +ä¸Ĭåįĩ è¶ĭåĬ¿ +ĠCul inary +Viet nam +ãĢģ å®ŀæĸ½ +æĬ¥ çļĦ +just ify +è¿İ çĿĢ +çѾ äºĨ +å¦Ļ æĭĽ +_SE LF +å±ħä½ı åľ° +$ A +ï¼Į æµħ +Ġ$ -\ +Ġrec oil +cre ening +çīĩ ä¸Ĭ +ĠBl uff +Ġstatus es +]{ }. +, çİ°åľ¨çļĦ +ï¼Į åĦ¿ç«¥ +Ġquery ing +/ plugin +Ġ ä¸ĢäºĽ +Ġl f +ĠL STM +éĤ® å±Ģ +ï¼Įçݰ å·² +ĠInstall er +ï¼Į èµ¢å¾ĹäºĨ +é«ĺ éĩĩ +æī§ æİĮ +.Ex it +æĸ¯ 大æŀĹ +æ¹ Ħ +ĠLoad s +ãĢģ éĻį +Ġha ze +åĨ· ä¸į +Dist ributed +- cylinder +ĠRe ign +çļĦæķ°æį® æĺ¾ç¤º +Ġgol fer +ĠB EL +åĪĨ åĩº +ä½İ æĶ¶åħ¥ +ĠPRO PERTY +ï¼Įæ°Ķ æ°Ľ +æ»Ķ 天 +id ue +ä¸Ńå¿ĥ åĴĮ +å§ļ æĺİ +åŃ£èĬĤ æĢ§ +ä»ĸ ä¿© +åĽ½ èĦļ +åĢŁ çͱ +åIJĪåIJĮ 约å®ļ +ĠHe ading +å¾· æĦıå¿Ĺ +Ġet ching +Re action +Ġmis represent +, 缴 +h ora +rac ula +ä¸ĭæ°´ éģĵ +ĠP seud +åľ¨ åįİ +th umbnail +ĠSur geons +Ġpure e +' am +ĠF ries +ĠF CA +Ġsh orthand +ç»Ļ æ°´ +chen ko +omin ator +Ġcraft y +Ġun balanced +ï¼Įä¸Ģ ç»ı +èĮ¶ æłij +Ġsand ing +} c +Ġ æĿ¥åΰ +or relation +ĠCh ak +çİĭ 宫 +åĨ² ä¸Ĭåİ» +(n o +Ag reement +W arranty +ãĢĤ è¿ĩäºĨ +ãĢģ æĪ¿åľ°äº§ +Le od +ãĢĤæĽ´ ä½ķåĨµ +Ġenlight ening +Ġquer ied +èİ·åıĸ åΰ +Ġhe lix +ĠG rip +ä¸ĭ æĭī +åIJİ ç»§ +ĠAs per +çļĦåıijå±ķ åĴĮ +ĠPL AN +ç»´çĶŁç´ł c +ãĢĤèĩ³ æŃ¤ +olog ia +åĨľ è´¸å¸Ĥåľº +ä¸Ńæĸĩ çīĪ +ĠAshe ville +g os +t aken +ĠL imb +å°ij æľī +ï¼Įåı¯ å°Ĩ +åªĴä½ĵ è®°èĢħ +æĩĴ èħ° +REFER RED +c ao +çļĦä¸Ģ项 æĺ¯ +ĠHutch inson +- formed +åı¯ è§ĤçļĦ +ãĢĭ 以åıĬ +Mon key +Ġce ases +- Ad +ãĢģ ä¸Ļ +为 天 +ĠBas eline +ĠCOUNT Y +Ġtelev ised +Ġstagn ant +æľĢ æĢķ +_B IG +ï¼Įå½ĵ æĹ¶çļĦ +EL COME +ĠStr and +ĠFund raising +åķĬåķĬ åķĬåķĬ +Expl oring +åįĹ æ´ĭ +è¶³ åĿĽ +ĠBO ARD +l z +ĠC inderella +ä¸Ĭ åıij表 +-in variant +Ġalcohol ism +çļ±äºĨ çļ±çľī头 +ï¼Į æĭ¨ +_F ield +æģ¶ 人 +-x l +Ġsmok y +- CH +Ġo id +Ġright ful +设计 é£İæł¼ +____ _ +请 æĿ¥ +å®ŀéªĮ å°ıåѦ +ç²Ĺ ç»Ĩ +Ġalign ments +é¢Ħ åΤ +è·ij åĩºæĿ¥ +Ġaffili ations +Ġculmin ating +# u +çļĦ åĨ²åĬ¨ +请 è¾ĵåħ¥ +å°Ħ ç®Ń +ä¸į å±Ī +ãĢģ 人æ°ij +ood oo +ï¼Įä½Ĩæĺ¯ çͱäºİ +è¦ģ 约 +åı¯ çĩĥ +iel le +èĢģ 乡 +æ¯ı ä¸Ģç§į +-m otion +究竣 æĺ¯ä»Ģä¹Ī +ĠN ES +ĠK U +ï¼Įä¹Ł ä¸įæķ¢ +Out standing +/ testing +ä¸Ń 带çĿĢ +èį ª +ãĥķãĤ¡ ãĤ¤ãĥ« +ï¼Į å·¨ +æľī æ°§ +åĽ½ ç¨İ +éª ¡ +æĬĢ å·¥ +æĶ¾ ä¸įä¸ĭ +éļı è¡Į +æ¼Ķ è¿Ľ +Ġflu ency +-develop ment +Ġa ve +å¥ĩ å¦ĻçļĦ +强大çļĦ åĬĽéĩı +calcul ated +, 第ä¸Ģ次 +_p i +èĭı å·ŀå¸Ĥ +Ġgg plot +ãĢģ æĪ¿å±ĭ +Ġdies e +Ġsc our +èĵĿ çѹ +Ġairt ight +çļĦ çľĭåIJij +ãĢģ èī²å½© +å¹´ å¹´åºķ +æľĢ éľĢè¦ģ +æ´» 人 +åĮħè£ħ è¢ĭ +Ġg if +ĠS PL +ä¸į éĨĴ +å½± ä¸ļ +ç®Ģ æĺİ +å°½ åľ¨ +åħ° çļĦ +$ M +ä¼ļ éļıçĿĢ +èĮ¶ åĮĻ +Ġmorph ine +âĢľ . +åħµ 士 +Ġwill ful +ĠCumm ings +éĢļè¿ĩ åIJİ +æ··åIJĪ åĿĩåĮĢ +O EM +Ġ å¸Ī +ï¼Į 主人 +âĢľ ä»İ +æŃ¥ éģĵ +Ġsw irling +åİĮ æ°§ +æĹ¶æĹł åĪ» +çļĦ å®ŀæĸ½ +ĠC DS +ï¼ĮæĪij åı¯æĺ¯ +Ġsk ysc +ä¸ŃçļĦ æķ°æį® +ĠWhat sapp +åı« ä¸Ģ个 +磨 çłº +Ġbail out +æº Ł +ç»Ħ æĪIJäºĨ +ek o +Ar my +åĨ³å®ļ æĢ§ +Make file +Ġrect ify +ãĢģä¸į éĶĪéĴ¢ +æľ¬æĬ¥ 讯 +Ġmonst rous +F mt +Q V +ãĢĤ åѦéĻ¢ +CC D +计ç®Ĺ æĸ¹æ³ķ +çıŃåŃIJ æĪIJåijĺ +MMMM MMMM +éϤ èįī +_D RAW +Ġelev ations +缮å½ķ ä¸ĭ +ï¼Į æĹħ游 +_p ix +,å¹¶ 没æľī +erent ially +â̦â̦âĢĿ âĢľ +âĢľ åĵ¼ +ç͵ ä¿¡åı· +File Size +ubs cription +åŀĤ缴 äºİ +ï¼Į两个 æīĢè¿° +ç»Ļ åѦçĶŁ +ĠÐ ij +Ġhard y +è§ģéĿ¢ äºĨ +, è§£åĨ³ +ãĢģ æĻºèĥ½åĮĸ +ge ometric +Ġcounter act +è§ģåΰ ä»ĸ +/ aut +ĠT ad +å°± éĿŀ常 +åIJĮ 声 +æīĵ ä¸ĭäºĨ +éĵº åŃIJ +Ġ____ _ +( CH +对 æīĢæľī +车 头 +æ¸ħ åĨ· +åĸľæ¬¢ 她 +å¼Ģåıij äºĨ +èħIJ æľ½ +æ´ª èįĴ +yll is +ĠW izards +Ġin eligible +åģĩ 象 +Rest riction +Ġ è´ŁåĢº +ãĢģ æµĭè¯ķ +ï¼Įä½Ĩ åıªè¦ģ +é£ŀ è¿ĩ +ï¼Įåı¯ä»¥ æĺ¯ +mac d +ol and +ur ic +ï¼Ī åīį +CT A +åĬ³ ä½ľ +Ġcoll apses +åĪĩåħ¥ çĤ¹ +ĠM IME +ï¼Įåľ¨ 使ç͍ +è¿ľ æĻ¯ +ĠRel oad +è§Ħæł¼ çļĦ +ĠHistor ically +Ġescal ate +> s +ĠFor ced +马 åĪº +_b t +-s erving +ĠList ener +çļĦ第äºĮ 端 +-compl iant +ï¼Į ä½Ľ +ãĢģ åĮºåŁŁ +æĬ¥ åºĶ +_st ar +çIJĥ ä½ĵ +UM B +ĠCould n +_F ORE +Ġsuper st +汽车 ç«Ļ +upp et +ĠRow e +Ġverte bra +ent re +è¾¹ è§Ĵ +çİ°åľ¨ è¿Ļ个 +Ġgra pple +ĠSk ull +_CON NECTION +ĠSal man +Localized String +Ġlinger ie +Ġg ulp +ĠC uisine +è¿Ļ 两人 +çľĭ åIJ§ +Ġrest rain +å®Ŀ ç®± +.Add Range +ADD RESS +C UR +ĠE mber +åħĥ 人æ°ijå¸ģ +æľ¬åıijæĺİ æ¶īåıĬ +non ce +Way ne +R ATE +Ġg id +ĠS SE +y lic +Ġbi opsies +Ġbreakthrough s +Respons ible +- End +ä¸Ģèά éĥ½ +Ġswe eter +(' $ +便 å½ĵ +æŃ» ä½ł +ä¸Ģ缴 å¤Ħäºİ +以ä¸Ĭ åĨħ容 +ĠDel oitte +å¨ľ çļĦ +is ive +ä¹łæĥ¯ çļĦ +Ġbull pen +Ġb ellow +ĠN AC +_m ass +æĸĹ ç½Ĺ +æī« éϤ +åı°æ¹¾ çļĦ +_ pts +åIJİ æľī +ï¼ī è¿Ľè¡Į +ä¸Ģå¹´ å¤ļ +ä¼ĺå¼Ĥ æĪIJ绩 +ä¹ĭ åĪĹ +äºĮ 个 +æĽ¿ 身 +ĠSunder land +P IC +å¾Ī å°ıçļĦ +-c oded +çĨ µ +çŃī éĩįè¦ģ +ä»į å¤Ħäºİ +ov als +让 æĿİ +_s ources +Ġelabor ated +ĠL umber +头 åı· +ĠSW IG +stat istics +ĠпÑĢ Ð¸ +ĠB anc +pp t +临 åħ¶ +è¾ħ 缸 +ĠBu ick +ĠIt alia +ï¼Ī è¯ķè¡Į +èģĶ åIJį +çIJĥ 磨 +åħį ä¸įäºĨ +ĠFound ing +Ä ° +è¿Ļæł·çļĦ éĹ®é¢ĺ +èĢģæĿ¿ çļĦ +t og +-s ervices +èĴ² åħ¬èĭ± +ĠAthlet es +P AN +ĠG ee +Ġrec ollection +éĢī æĿIJ +++ + +åĽ½éĻħ åľ¨çº¿ +mem brane +L AST +ĠY ates +Ġra cer +-m embers +ĠCall er +å®ī é¡¿ +å¾· è¯Ń +èµĽ å°Ķ +_TR IGGER +Ġinstruct ive +ĠMaid en +ĠArchae ology +Ġde activate +ax el +è¡ĮæĶ¿ åĮºåŁŁ +Ġmot ility +é¤IJ æ¡Įä¸Ĭ +Scot land +Ġ å¿ĥä¸Ń +Ġj query +Ġdisc olor +iol i +æľºåύåŃ¦ä¹ł ç®Ĺæ³ķ +Ġw d +è¿Ľè¡Į æ¸ħæ¥ļ +Ġsal inity +L ING +ï¼Į èµ°åIJij +Ġb aud +ĠS CHOOL +åĸĿ çĤ¹ +Do S +ĠAtl antis +éĢĴå»¶æīĢå¾Ĺç¨İ è´ŁåĢº +Z D +åıĶ çζ +Ġsusp icions +çİ°åľ¨ è¿Ļæł· +å¼Ĺ æ´Ľä¼Ĭ +éĻĦåĽ¾ ä¸Ń +Buff ered +å®ŀçݰä¸Ĭè¿° 缮çļĦ +- command +_P AY +åİ¿ 人æ°ijæĶ¿åºľ +US C +n B +Ġn row +è§Ĩ ä½ľ +_b b +ï¼Įå½ĵ åľº +å½Ĵ ä¸Ģ +èĻİ çļĦ +Ġ ../../ +ãĢģ åIJ¬ +åĴĮ èĢģå¸Ī +å¤ĸ éĥ¨çļĦ +åı° å¼ı +ĠGener ates +丰 ç¡ķ +/P rivate +åıĸ代 çļĦ +ãĢģ éģĵ +åĨľ èĢķ +uit o +èĥ¡è¯´ åħ«éģĵ +t ur +Ġpres ervatives +Ġfore arm +Ġadopt ive +Ġsid el +è°¦ éĢĬ +ï¼Į å¼¹ +è¿ĺ 带çĿĢ +ç³»ç»Ł åľ¨ +æĬĵ äºĨ +èĬ¦ èįŁ +.And roid +ĠRem aining +Ġdef orestation +ĠAm os +ĠWild er +çŃī éĥ½æĺ¯ +ï¼Įä»ĸ ä¹Łæĺ¯ +-st orage +æŃ£ç¡® åľ° +Ġwid en +Anal og +q f +ĠA OL +æ´» çĿĢçļĦ +ãĢĤ çİī +ĠW TO +第ä¸ī 人 +è¹ ĭ +Subject s +$ r +' ? +on k +Ġu z +èĩ³ å®Ŀ +æĶ¾ åĽŀ +Ġinf ographic +设å¤ĩ ä¸Ń +å®īæİĴ 好 +ĠFP GA +ãĢģ 审计 +主 æĹ¨ +Ġfree zes +è°ĥæķ´ åIJİ +: å¦Ĥæŀľ +Ġemb argo +åºŁ åĵģ +ĠPH YS +Ġapt itude +_ Button +... [ +ï¼Įä¸į å®ľ +æ¸ħ 羣 +Ġuns olicited +æĺ¯ æĿİ +ä¼ļ åIJĪ +她 è¦ģ +å¾® è°ĥ +ĠCR T +åIJĮå¿Ĺ 们 +æĺŁ äºij +è´Ń 建 +å§ĭç»Ī 没æľī +ĠPic asso +: new +ãĢģ èĢĥè¯ķ +åĩı 产 +Ġenc ore +æľī人 æĿ¥ +æıŃ çīĮ +\ Api +Ġw icket +Israel i +\ C +Ġ 谢谢 +强 横 +Ġpolymorph isms +Ġ äºĭ +ĠR ipple +ĠN ose +é«ĺ è̏ +æĶ¾ 声 +åħ·æľī çļĦ +K er +ĠF ritz +åĬł ç´§ +_p ipeline +æ·± èī² +Ġcustom ised +ĠLe a +(m etadata +udd in +Conf idence +她 æĥ³ +(f older +ï¼Įæľī åı¯èĥ½ +æ¢ħ å°Ķ +Ġcomplic ate +Ġspo iler +.jet brains +ĠN ess +é¼İ 缼 +ï¼Į åįļ士 +ĠCh ard +丽 丽 +ä»Ļ çķĮ +æŃ£å¼ı åIJ¯åĬ¨ +othe lioma +ç¼ĸè¯ij åύ +å½· 徨 +. rt +it ät +ï¼Į 微信 +im ar +çĭ¬ ç§Ģ +æľ¨ åģ¶ +åºĶç͍ é¢ĨåŁŁ +å¹´çļĦ åıijå±ķ +-E uropean +b ah +çļĦ æĻ¯è±¡ +ec ure +-b uy +================ ==== +身ä½ĵ éĩĮ +ĠAle c +R i +ĠF MC +å¾Ī å¿Ļ +转 æľº +åĮħ 头 +ï¼Įæľī ç§į +ĠEd g +ĠIslam ist +糸 äºĨ +åľĨæŁ± å½¢ +N AM +Ġsub cutaneous +åĮĹ ç«Ļ +Ġe rection +åĨ² äºĨè¿ĩåİ» +ï¼Ľ 为 +Ġ` -- +æĽĿ æ°Ķ +J oh +ĠL ymph +çıł ä¸īè§Ĵ +APP Y +Ġ× ij +Ġstrat ification +s phinx +ãĢģ æľº +天 åķĬ +Ġar du +åįķ æīĭ +转 æĴŃ +åĮ»çĸĹ ä¿Ŀéļľ +çݰå®ŀ ä¸ĸçķĮ +容åύ ä¸Ń +Enh ance +Ġempt ied +ï¼Įä½ł åıĪ +çīĩ åĴĮ +ug get +èĤ¯ å¾· +.ch oice +ĠDam ien +ĠAJ AX +Ġfl uff +ĠMan uscript +(M ain +åĩºæ°´ éĿ¢ +ä¸Ģ åį· +ĠB ANK +管 äºĨ +产åĵģ ä¸Ń +Ġvol ts +顺 æĹ¶éĴĪ +æļ´ æĢĴ +è½°è½° çĥĪ +æį¢ ç®Ĺ +ä¸įè¿ĩ åİ» +ç͵è§Ĩ ä¸Ĭ +å¹³éĿĻ ä¸ĭæĿ¥ +ä¸Ģ æİĮ +og yn +ĠPol ly +ìĿ ¼ +ä¸į 为人 +éģĵ è°¢ +Ġbody building +_dist ribution +ĠMSN BC +为 è¾ħ +æŃ¥ è¿Ľ +éĵģ å¡Ķ +User ID +,让 ä»ĸ们 +ĠHem isphere +N ursing +ĠM ailing +(n etwork +èĥĮæĻ¯ å¢Ļ +Ġincub ator +ĠB ieber +ĠL ep +æıIJ æĪIJ +ĠSh ri +_b w +Node Type +link ing +èľĤ æĭ¥ +Ġmemor ize +oe lect +NY C +-C an +ï¼ĮæĽ´ èĥ½ +ãĢģå®Įæķ´ åľ°æııè¿° +åĪ« 离 +ĠChrist church +éĽĦ 鹿 +çļĦä¸Ń éĥ¨ +ãĢĤ å¼Ģå§ĭ +åħ¨ ä¸ĸçķĮçļĦ +.M ode +ĠBart lett +ä¸ī åĪĨéĴŁ +è´¢åĬ¡ ä¼ļ计 +åı¯ä»¥ ç¡®å®ļ +åı£ æīį +-con v +Ġnewborn s +å; æ¶² +] =' +b low +Ġprov oking +ĠAcc om +ä¸į æ¼ı +ans ki +ä¸İ 被 +Ver se +Isa iah +B oo +Ġrins ed +ĠH oy +åįĬ 空ä¸Ń +è°ĭ åıĸ +æĤ¬ èµı +ä»Ķç»Ĩ çľĭ +H ANDLE +ï¼Į å®£ä¼ł +ĠS OME +å¥ĸ çļĦ +s è +Ġpolit ic +Ġten ancy +ĠZe us +Ġcongreg ations +. ^ +ãĢĤ ç«ĭåį³ +ĠC Z +ãĢĬ ä¸īåĽ½ +åį³ åħ´ +ĠFire wall +çī§ ç¾Ĭ +ĠBurg ess +çͲ 骨 +omer ic +Ġcl ueless +åħ¥ åij³ +åįĬ æķ° +å±¥ 带 +娴 çĨŁ +t is +æĹłæķ° 个 +. peek +çļĦ å¢ĥçķĮ +Ġme lee +AD VERTISE +Ġcur ricula +ï¼Į æĬķåħ¥ +ä¸ĩ åİĨ +æĸ°éĹ» åĩºçīĪ +- DC +é£İ éĢŁ +uk h +Ġfre eway +èݱ å¾· +åıĺæĽ´ çĻ»è®° +ĠâĬ Ĩ +ĠCrow d +Ġfilmm aking +ĠD rives +Ġ? ?? +请æ±Ĥ çļĦ +ĠReve al +å² ĸ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠ +ĠVer se +ãĢĬ 天 +_d l +Ġappropri ation +VO KE +ี à¹Ī +( OH +ĠR adeon +ĠPM C +omin ations +Ġcro pping +(cl uster +ä»Ģä¹Ī è¯Ŀ +åĽłä¸º è¿Ļ个 +Ġcirc adian +,åľ¨ è¿ĻéĩĮ +Ġnut meg +ĠSP ACE +çݰéĩijæµģéĩı 表 +Ġv æ +被 ä¸Ģ个 +ĠNe u +ÃŃt ica +ä¸Ģ åĪĻ +ãĢģ å¤į +ĠCyp ress +, çŁ¥éģĵ +, éĩįæĸ° +ag in +ĠRE PLY +Ġgar ages +Ġ åѦçĶŁ +Ġor chard +ä¸ī éĢļ +梦æĥ³ çļĦ +wp db +Ġgr ating +Cl ark +èĭ± æĿ° +Ġchalleng er +è¡ĮæĶ¿ å¤įè®® +Ġgrat is +\ E +äº ³ +ĠT REE +AM PL +uk o +_qu ote +ex am +-r andom +éģĩåΰ çļĦéĹ®é¢ĺ +Ġbrew ed +ï¼ Ń +Ġl ousy +ĠSign ing +ä¸Ģå¼Ģå§ĭ å°± +ĠRespons ive +" x +两 æĶ¯ +æķ°æį®åºĵ ä¸Ń +- ath +Ġ åŃĺè´§ +AT G +举 éŨ +-b ind +Ġsize able +æĨĭ å±Ī +B uddy +ĠA ura +ĠW rit +ä¸İ æİ§åζ +ï¼Ī R +æŃ£ 大 +pr inc +æŃ¢ æ°´ +åħ¨éĥ¨ éĥ½æĺ¯ +-M in +âĢĻ an +åŁº çĤ¹ +é£İ çĶŁ +çĶŁæ´» è´¹ +ä¹ĭéĹ´ æľī +æİ¥æĶ¶ åύ +Bath room +Ġbour bon +æŀ ³ +ĠBlack burn +åħ»èĢģ éĻ¢ +ĠRemodel ing +, ä¾Ŀæ³ķ +L ind +Ġ æĻ®éĢļ +Ġk ube +åı« æĿ¥ +Ġmeas les +说çĿĢ è¯Ŀ +çĮİ æĿĢ +w elling +ov ski +Ġunt rue +ä¼Ł ä¸ļ +Ġ è¯į +é«ĺåħ´ åľ° +çĥŁèĬ± çĪĨ竹 +\ # +Ex tras +äºij æ·¡ +-car b +ĠThess alon +, å®ĮåĸĦ +é¾Ļ åŁİ +ï¼Įå°Ĩ æīĢè¿° +åįĸ æĸ¹ +主åĬ¨ æĿĥ +çļĦ éĺ¶æ®µ +èĦ¸ éĿ¢ +çī¹åĪ« å¤ļ +æĮ¥ éľį +为ä»Ģä¹Ī åij¢ +PC lient +èĪŀ å¼Ĭ +é«ĺæł¡ æ¯ķä¸ļçĶŁ +_ primary +Ġt renches +ï¼Ľ æľĢåIJİ +Ġattract iveness +æł¡åĽŃ éĩĮ +- letter +ï¼Į 失åİ» +ç²ī 红 +å¹¶éĿŀ æĺ¯ +ĠTarget s +ut ures +co ast +Ġfl ange +èĥ½å¤Ł çľĭåΰ +çķħ å¿« +ï¼Į两 人çļĦ +ï¼Į为ä»Ģä¹Ī è¦ģ +ĠCoo ke +ãĢĤ è¿ijæľŁ +身 ä¸ĭ +æĽ¾ ä»» +ãĢģ ç»ĵ +åIJİ è¾¹ +æľĪ åĪĬ +èĤ¡ç¥¨ éħįèµĦ +W alter +ĠT umor +大 伯 +ĠJ OB +æijĦ æĶ¿ +çĭĹ ç²® +åIJĮ 人 +Ġte k +æīĭæľº 游æĪı +设æĸ½ çļĦ +Mar vel +un ame +çĹħ æŃ» +Service Impl +ĠRo asted +èĢģå¹´ 人çļĦ +ä½ł è¿Ļæł· +æį İ +éķ¿ æĮī +Ġpel vis +Ġins istence +æĸĩåĮĸ ä¸İ +åľĨ åľĪ +Ġthe res +'] ), +çĥ§ çĥŃ +Ġsh roud +iot ensin +F UN +t ro +æĺ¯ åħ¬åı¸ +iv ating +Ġgener ative +ig matic +(p rivate +Pack ets +主é¢ĺ æ´»åĬ¨ +K am +Ġint est +éľĢè¦ģ å°Ĩ +ĠDisc ipline +çªį éŨ +stead y +Ġcentrifug ation +W u +çϽ ç¾Ĭ +ï¼Į对 æĪij +_IN C +Ang les +Ġtransf usion +Jew ish +çļĦ åIJĪæĪIJ +ãĢģ åĮ»åѦ +åıĺ ç͵ç«Ļ +éģĩ è§ģäºĨ +å¸Ń ä¹ĭåľ° +è¸ı ä¸ĬäºĨ +ĠFer din +Ġr ink +ĠG raz +éĶ Ĩ +ä¸ŃåĽ½ çĶ» +åł ± +ĠMar ino +\n onumber +Ġquad ru +ĠF iat +åŃIJ éĥ½ +_m u +èĥ½å¤Ł 满足 +ĠPe ck +ĠNat asha +re ef +ĠM Sc +ĠS ass +ä¹ĭ çŃĸ +Ġcommut er +Ġinsol vency +iconduct ors +ï¼Į æĪĺæĸĹ +ĠR IP +ie vers +æ¯ķä¸ļ çļĦ +Ġtab oo +-tr ad +_ unknown +.T otal +å§Ķåijĺä¼ļ 主任 +andon ed +ä¸į对 ç§° +Ġpleas urable +ber ta +ä¸įæĺ¯ 个 +.t asks +Ġceremon ial +Ġsynth ase +( collection +çļĦ åıijçݰ +åľ¨ åIJĦ个 +åı¯ä»¥ å¢ŀåĬł +交 åΰ +äºļ 麻 +yl us +ç¡ħ èĹ» +ä¸ĩ åIJij +ï¼ĮèĢĮ ä½ł +åĮ»çĸĹ è´¹ç͍ +ĠDi aries +Ġdivis ors +C ape +C MP +v pn +Ġm A +Ġd iner +ĠM osaic +_p recision +大 æĿĥ +åĽ½ åIJĽ +ä¹ĭ å·ħ +该 åĽ½ +请 äºİ +Ġdir s +M m +T one +ut to +éĥ¨ 份 +å¼Ģå§ĭ åīį +å²ģçļĦ åŃ©åŃIJ +é«ĺ éĺ¶ +离 ä¸ĸ +_reg istry +(f lag +羣æŃ£ æĦıä¹īä¸ĬçļĦ +Ġaqu a +Ġapt ly +" ä¸İ +Ġ èģĶ系人 +ï¼Į è§Ħ模 +ä¸į æŀĦæĪIJ +ĠD warf +and ers +Ġconc urrency +Ġsong writing +* [ +D ense +ĠE uchar +å¾Īå¤ļ æľĭåıĭ +éĿĴæµ· çľģ +éľĢ åľ¨ +æµ· äºĭ +roll back +欺 人 +( email +, æľ± +ĠThe rapeutic +ä¹ĭ 礼 +ä½įç½® ä¿¡æģ¯ +ulture Info +ãĢģ 羣 +å°ı å°Ĩ +举 åįİ +ĠCatal yst +è°ĥåij³ åĵģ +( adapter +æĥ ´ +çĶŁ è¾° +éķ¿ æĸ¹å½¢ +Ġunder statement +ä¸ŃçļĦ å®ŀæĸ½ä¾ĭ +Ġhom ologous +ĠCast ing +_check point +éķ° åĪĢ +et es +ĠI OP +åľ¨ æīĢæľī +Ġjust ices +Ġauthor izing +è¿Ļæł·çļĦ äºĭ +Ġa ri +om ach +Ġabs cess +-p ointer +å¯Ĵ çļĦ +Co ast +ĠChi ang +ï¼Įä½łä¼ļ åıijçݰ +it ively +ĠS SP +åĨ° éľľ +åħ¨ä½ĵ åħļåijĺ +çį ł +, æİ¥çĿĢ +ï¼Į æĮij +Ġk im +ary ana +çϾ èά +Ġsuper imp +_B ANK +çĶŁåij½ å®īåħ¨ +-man aged +ĠCHE M +PLIC IT +ĠO SS +RO AD +U ps +æĬĬ è¯Ŀ +羣çļĦ 太 +, æŃ¢æįŁ +-out line +ih anna +Ġsig m +çĭ¬ä¸ĢæĹł äºĮ +( iv +) ]) +. primary +he avy +ĠO LD +æĸ¹ èĥ½ +两 è½® +ev olution +Ġtechn o +çĥ§ æ¯ģ +Ġrust y +: false +f ashion +ĠI BD +con ies +çľĭ éĶĻ +é¥ ¨ +缸åħ³ æĶ¿çŃĸ +ĠAss assin +Comp rehensive +å°¾ çģ¯ +O ral +ĠC MP +ç»Ŀ æĿĢ +计åĪĴ åľ¨ +ĠMain taining +Ġmetabol ite +磾 æĮģ +ï¼Įä¹Ł 被 +æł¡ ä¼ģ +çļĦ好 åıĭ +ĠMON TH +F ILTER +Ġpre con +åįĬ æĪª +Ġsource Tree +_g enerate +åĶIJ éķ¿çĶŁ +çĵľ æŀľ +Ġinject or +Ġf umes +è¾ ĺ +éħĴ ä¸ļ +Ġprop elled +主æĮģ ä¼ļè®® +Ġinh aled +Ġparadig ms +é£Ł 管 +_h al +å¸ķ åħĭ +t os +ï¼Į æ±ŁåįĹ +马 é¾Ļ +b uck +ĠT FT +Int ensity +ĠSal isbury +ĠMand ela +j ew +Ġt arn +é¾Ļ èĪŁ +.n z +_L IN +æ¶Ĥ åĪ· +QU ARE +, < +ä¸į åĪ©çļĦ +ãĢģ è¿ŀ +Com poser +åį¡ åIJĪ +ĠMc Mahon +Ġlegal ized +.H tml +ĠInitial ization +Ġkö nnen +s now +ç½® ä¹ĭ +_EXT ENSION +Ġvene er +çļĦ éĢ»è¾ij +说 ä¸Ģ声 +åİ¿ å¸Ĥ +_D M +ĠØ Ń +Ġchunk y +ĠFab ulous +ï¼Į 转åĬ¨ +Ġaut of +Ġcoe ff +ï¼Įåįł åľ° +Dam n +( Stream +Ġcomm ons +ĠRad i +ĠInput s +ĠPred icate +_ph ys +( round +Ġ æŃ¥éª¤ +Ġ\ \\ +_b inding +Ġsl ang +çĽĹ çīĪ +(ch ip +ĠAl am +çģ« çº¿ +_M EDIA +èµ¢ åĪ© +年代 æľ« +R oche +ãĢģ äºĨè§£ +ãĢģ è§ĦåĪĴ +ĠH IM +_f ifo +ï¼ĮåĽłä¸º ä½ł +èģĶèµĽ çļĦ +ãĢĤ çݰæľī +ç§į 群 +Ġinf ra +ĠFR AME +è°ĥåij³ æĸĻ +T J +Ġal d +ib ur +çĽĹ å¢ĵ +Ġredirect s +Ġ 产 +èµ° çļĦæĹ¶åĢĻ +Ġlay offs +optim ization +ĠKl aus +, é¢ľèī² +æ°Ķ 缸 +Ġprim ed +prot ective +Ġmetaph ors +Different iate +èĮģ 壮 +M erry +Ĉ ãĢĤ +st aking +Ġfor all +sc ala +ç²ī 红èī² +ä»·æł¼ åľ¨ +Ġassess es +ĠLow est +æĻĥäºĨ æĻĥ +ĠD SC +Ġwh istles +Ġass hole +.d elta +çĭ¬ çĶŁ +ï¼ĮæĹł ä¸į +ĠHun ters +Ġbart ender +l us +ĠJ ules +Ġvoc ation +ï¼Į 顺åĪ© +ĠG urgaon +以 ä¸Ģ个 +éĢī ä¿® +æĽ¾ æľī +ĠPer forms +æıŃ示 äºĨ +ãĢģ éħ¸ +åı¯ ä»İ +Ġimpro b +ĠH AND +ere g +L v +ä¸Ń 线 +åĨį åĪ©ç͍ +åıĸ æĿIJ +lex ia +utt gart +åħ¨ ç¾İ +ik el +éĥ½æĺ¯ è¿Ļæł· +ç»Ļ大家 ä»ĭç»į +Ġj ab +éĿŀ 线æĢ§ +ren o +prop ylene +market ing +éĤ¢ åı° +B art +ç® ķ +Ġbooks helf +, size +æĬĺä¸į æī£ +人 åĵģ +çŃī åľ°çļĦ +Ġvac u +Ġg ala +Ġpe at +æĭ¼ åĽ¾ +ulum i +ĠStruct ured +ç¡Ŀ çĥŁ +ĠFest ivals +& W +åİ» éĹ® +,è¿Ļ 款 +æ³¥ é³ħ +Birth day +ä½ ļ +为 åIJį +ĠEng el +ĠAg ree +çıį åĵģ +Ġlatt ices +is ual +ĠB CS +ĠD eng +å½ Ĺ +çĶŁ è¾ī +èĢģ æ±ī +Ġgr p +_M B +æĢķ æĪij +çĤ¹ å¼Ģ +ä»ĸ们 æīĢ +å·¥ä½ľ 计åĪĴ +IT ING +å¤±æľĽ äºĨ +, æĶ¶ +æĹł ä»»ä½ķ +çĶļ å¤ļ +Ġice berg +说å¾Ĺ 好 +_ ft +ãĢģ å®¶éķ¿ +ĠF UNC +ub by +Ġev ils +Ġore gano +ĠJoh ann +/XML Schema +Ġt ous +çĹĽ ç»ı +éĹŃ çĽ® +Ġmid day +ãĢĤ ç¬ĶèĢħ +Ġap ical +é»Ħ åŁĶ +.P ort +èĻļ å½± +TE GR +á» ĥ +èϾ ä»ģ +.entry Set +Ġ å°ij +ĠE MI +ï¼ģ ï¼ģâĢĿ +å°Ķ èĴĻ +åįĩ èģĮ +Ġpret rial +è¿Ļæł·çļĦ ä¸Ģ个 +éĺ´ å¤© +ãĢĤ çī¹ +st asy +ĠE nerg +Cons umers +èĤ¾ çĤİ +D DR +ãĢĤ æľīåħ³ +ä¼ļ å¦Ĥä½ķ +第ä¸Ģ åŃ£ +Web Kit +ï¼Įä¸ŃåĽ½ çļĦ +amer on +ĠAd mit +Ġregist rar +ä¹Ļ äºĮéĨĩ +ï¼Ľ èĬ± +é¦ Ħ +Ġsign aled +åĩºæĿ¥ ä¹ĭåIJİ +鼨 åŃ£ +æı´ åĨĽ +.run ner +åĪº çľ¼ +-W orld +Ġ([ # +ĠS AC +Ġwork able +aw ns +ï¼Įä¸į 让 +En s +ĠAc ne +ĠEnc oder +Ġpoison ed +ĠPBX FileReference +c ra +ĠP arr +Ġseed ing +ï¼Į纵 çĦ¶ +O OM +ĠS PORT +IT TER +_c ategories +æĹ¢ è¦ģ +奶 çīĽ +âĹ ĩ +ç¥Ľ éϤ +ï¼Įå¹¶ä¸į 代表 +æĪªçĦ¶ ä¸įåIJĮçļĦ +åħ¬ åĭŁ +Ġprim aries +åĽºå®ļèµĦ产 æĬķèµĦ +ĠPl ates +_re verse +Since rely +_emb ed +-pr one +Harm ony +. lo +M yth +çļĦ åIJįç§° +ãĢĤ åĵĪ +ĠD iving +ä¼ļ å¾Ĺåΰ +ï¼Ł è¿ĺ +ï¼ģ ï¼ī +çľ¼ 线 +å®Ŀ çıł +( Resource +m oon +ĠTh ats +ĠFl ask +çļĦ çŃĸçķ¥ +con i +第 åįģäºĶ +æĸ¯ å·´ +ĠRef erral +ĠSpace X +num s +-disc iplinary +L ar +为 çĶŁ +éĤ ¬ +å¼ķ 线 +Ġnetwork ed +åĽ½åĨħ å¸Ĥåľº +Ġbureaucr atic +ï¼Į æľ« +åĽĽ æ°Ł +ï¼Įåį´ ä¸į +æ³ķåĽ½ 人 +.move To +ãĢģ åıĹ +轻轻 æĿ¾æĿ¾ +éĺ²æİ§ æİªæĸ½ +LED çģ¯ +Ġcate red +ll ll +è¿Ľè¡Į è¿ĩ +åĶ® è´§ +ĠRE SET +ãĢĤè¦ģ æĥ³ +ĠÅ ¼ +COD ING +on ucle +Ġre ordered +Ġdef ends +ĠAl ma +.is file +Ġrev ising +Ġroll back +åĦĴ åѦ +s ie +Ġimp etus +åĪ« çļĦåľ°æĸ¹ +Ġblot ting +ĠC AC +红 ç³ĸ +缸å½ĵ 大çļĦ +éĽĨä¸Ń äºİ +éĽĢ è·ĥ +M olecular +_l arge +Ġå¦Ĥæŀľ 说 +Ġf ountains +âĢľ æµ· +å¾Ĺ æĺ¯ +ãĢĤâĢĿ ï¼Ī +_F T +åįĥ å®¶ +éĥ¨åĪĨ åĨħ容 +Ġletter ing +D æīĵåį° +sc p +è¿İ æĺ¥ +å°ıé¾Ļ èϾ +ï¼Į 奴婢 +æ¯Ľ åĿ¯ +,è¿Ļ æĹ¶åĢĻ +太éĺ³ ç©´ +åıij çİ°åľ¨ +)) -> +ç§ij ç§ijéķ¿ +ĠCl ash +Ġrecess ed +ï¼Į æĦıæĢĿ +æĪĺ åħ¢åħ¢ +åĪ©æ¶¦ 表 +_CO UNTER +èĢĥ åĭ¤ +_h it +åħ¬éĩĮ å¤Ħ +Ġdictators hip +Ġh ci +_M UT +Sec rets +âĦĥ å·¦åı³ +Ge V +Where as +ï¼Į è¾¾åΰäºĨ +ãĢģ å¹´é¾Ħ +åĴĮ èīºæľ¯ +ä¹ĭ èĢħ +æĸĩ 稿 +å·²ç»ı è¾¾åΰäºĨ +.t imes +Ġaf loat +Ġknock s +aus age +ï¼Į éľ²åĩºäºĨ +âĢľ -âĢĿ +åĩł 天çļĦ +Ġident ically +çŃĶ åį· +ç»ĻäºĨ 她 +Ġc usp +çļĦ åĨ³å¿ĥ +ch os +ä½ł è¦ģæĺ¯ +() " +é«ĺ æ·± +Ġeas ement +人类 社ä¼ļ +åĤ» çľ¼äºĨ +-ref undable +ï¼ļ ç¾İåĽ½ +æĮ¡ é£İ +á» ij +Ġw atered +ĠG ow +ud ges +ç͵ è´¹ +ç´ł é¢ľ +çºł èijĽ +ĠRealt ors +åľ¨ ä»Ĭ天 +çīĪ åŀĭ +Set t +åµĮ å¥Ĺ +ĠæĿ¨ å¿Ĺè¿ľ +å°ı åħµ +头 åĦ¿ +è¿ij ä¸īå¹´ +CD F +~ ) +Ġt ainted +ĠO scill +ä¸ī éĥİ +ys er +çİ°åľ¨ åľ¨ +Ġsouth western +è¿ĺä¸į ç®Ĺ +H DR +ï¼ļ AI +Ġext rinsic +ä¹Łä¼ļ 被 +ic ile +ï¼Įä¸į æĸĻ +dam age +( pl +P ract +s umm +çļĦ åĩº +ass ociation +Ġvar n +hand lers +j our +è¿ © +Ġman power +åĨ· æ¸ħ +ILE S +C J +Ġo xy +ĠO U +ĠK ont +åĽł çĹħ +Ġchild ish +_M OV +æĶ¾åľ¨ ä¸Ģèµ· +Bad ge +ĠDest inations +çľĭ çĥŃéĹ¹ +.p resent +-f inals +P redict +_r q +é¢ľ åħŃ +ä½ĵèĤ² éĶ»çĤ¼ +åij¼ åı« +åıijè¾¾ çļĦ +ĠJen ner +åĨĢ å·ŀ += E +Ġth orn +osp heric +ãĢģæ°´ æ³¥ +æĮ¨ çĿĢ +applic able +Ġl ute +ĠC es +æľĪ å¤ľ +缴 èIJ¥ +-f ixed +AD MIN +ĠRot ate +åĪĨ è¿° +requ irement +Ġsw ine +ĠSC ARPA +Ġgrasp ing +ivari able +c ord +al chemy +å¿ĥ æľº +cl imate +ĠRN As +Ġhym n +o E +tt ps +èϽçĦ¶ æľī +åºĵ å°Ķ +深度 èŀįåIJĪ +WE LL +j abi +ä¸Ń 带 +å®ļ æĬķ +åħ¬åı¸ äºİ +éŨ éĿ¢ +.in v +转è¿ĩ身 æĿ¥ +ï¼ģ ä¸į +è·ĭ æ¶ī +çļĦ è¿Ľè¡Į +ĠAm ended +ĠEst imation +Ġl td +ä¹Łæĺ¯ æľĢ +ä¸ĵä¸ļ çĶŁäº§ +Ġlicense e +Column Name +好好 åŃ¦ä¹ł +ĠINT ENT +s nd +çļĦ æĸ°éĹ» +èĢģ åĵ¥ +IL ITIES +ĠFlor a +æįī æij¸ +èľ» èľĵ +A mp +åĬł ä»ĵ +èħ¹ åľ° +call er +oqu ine +( fig +( attrs +C atherine +_ define +Ġt ribut +çݰ å½¹ +天 宫 +çϽ è¡ĢçĹħ +ä¸ĸçķĮ ç»ıæµİ +温度 åĴĮ +Ġfill ings +Short ly +ĠHB V +Ġâľ ĵ +F isher +Ġdis dain +ï¼Įä¹Ł ç®Ĺ +æĬĢæľ¯ æ°´å¹³ +A sp +{ class +_d emo +åĪĩ 身 +_gener al +ï¼Į éĿŀ常çļĦ +Ġin securities +å¿ĥ æ³ķ +äºĶ æĮĩ +_S CREEN +鼷 ç¥ŀ +æīĢ ç§° +ĠIn box +èIJ½ èIJ½ +ï¼Įå°± è·Ł +ET C +ĠNon linear +åĽĽ åĪĨä¹ĭä¸Ģ +马 èµĽ +-t imes +éļĶ å¼Ģ +ĠSau vignon +L iv +çļĦ åĭĩæ°Ķ +iet ta +çīĽ è§Ĵ +åĺī å¹´åįİ +Side bar +Ġ åIJĮæł· +ï¼Į é¥®é£Ł +Ġshow down +ĠSh ores +abel le +Ġprevent able +é»ĺ 认为 +Ġpoly urethane +询 ä»· +è¾Ī åŃIJçļĦ +_res ume +Py Object +on ium +ï¼Į æ³ķéĻ¢ +ï¼Į è®°ä½ı +ä¸Ģ è¯Ń +ç±» çŃī +亲 çļĦ +åIJ« æ°´éĩı +ĠCON S +æ¯ķä¸ļ 论æĸĩ +æ·ĭ æ·ĭ +, åı¤ +ĠP OW +NotFound Error +ï¼Į å¾ĹçŁ¥ +ĠD olly +æ°´ åį° +ä¹IJ äºĨ +åįĬ 空 +T ruck +ï¼Į å¿«ä¹IJ +è¯ Ļ +pport unities +-c oding +伦 å¤ļ +çݰéĩij æµģåĩº +ĠTerrit ories +Ġreproduc ible +ĠR iot +ĠCo il +Ġcarp enter +Ġ èİ·å¾Ĺ +ĠE FFECT +ç²ī åºķ +没äºĭ çļĦ +, æ¯ıæĹ¥ +ãĢĤ éģĵ +ĠH Cl +ĠE aton +Âł åı¯ +)) + +Ġoste oarthritis +Da emon +æľ¬ 级 +equ ipment +èᝠæĢ§ +.ex pr +代çIJĨ æľºæŀĦ +( up +âĢľ ä¸Ģ个 +text width +'] = +欧 äºļ +H annah +× § +æĺ¯ä¸Ģ æĶ¯ +ĠS oda +ãĢģ èĭ¹æŀľ +Ġhum iliation +è̳ çݯ +Ġd ll +ãĢĤ çݩ家 +åıij çϽ +lin eno +ĠMont essori +appe ared +t et +ĠS outheastern +æľī è°ģ +ĠPan ic +. Op +f riendly +ĠS erved +_p df +Ch ronic +åį´ æĺ¯ä¸į +åŁºéĩij 财产 +æĬ± æĬ± +ĠER ISA +, æīįæĺ¯ +ve get +VER SE +Y K +ĠS SI +DIT ION +ά ν +, èIJ½å®ŀ +大 æĬĬ +æĹ¶ åıijçݰ +Ġmon archy +çĶŁæ´» äºĨ +çĭIJ èĩŃ +è¾± éªĤ +[ V +çīĩåĪ» ä¹ĭåIJİ +ĠMillenn ials +ä¸Ģ çĽĺ +åĢĻ è½¦ +åºĵ ä¸Ń +å¿ł åijĬ +åĪĨéħį çļĦ +幸è¿IJ çļĦ +ä¸Ģ æĬĸ +å°Ĩ è¿İæĿ¥ +éĢĢ å¸Ĥ +EC s +纵 æ·± +: table +åΰ å¦Ĥä»Ĭ +Ġent hal +åij³ è§ī +ĠÑ Ī +ï¼ĮèĢĮ ä¸įèĥ½ +NUM BER +ãĢĤ 羣æĺ¯ +åģļ åĩºæĿ¥çļĦ +Line No +ĠMP a +ãĢĤ ä¸ĸ +èĢ Ļ +æĪij èĩªå·±çļĦ +Ġch atted +BC D +Ġexplore rs +åľ¨æĪij éĿ¢åīį +Ġ åĪĨæŀIJ +Ġt ä +ä¸į 大äºİ +, å¾Ĺåΰ +åĴĮ ç»ıéªĮ +ä¸ŃåĽ½ ç§ijåѦéĻ¢ +åĪĽ æĦıçļĦ +æ¥ ¹ +è¶³ è½»éĩį +âĦĥ ãĢģ +. '' +ĠN ested +å°ı å¾®ä¼ģä¸ļ +å¤ĸ æİ¥ +æīĵ åĪĨ +An ime +æŃĮ é¢Ĥ +bf d +ĠSem in +æ±Ł åŁİ +Ġguess es +åºĶå±Ĭ æ¯ķä¸ļçĶŁ +ൠį +_ asset +p urchase +èᝠæķĪ +éĻĦ æľī +Reg ist +Ġreass ure +Ġ æħķ容 +âĢĿ ä¸ŃçļĦ +ĠN un +Ġfeed ers +Ġ( -- +æ³ ŀ +åIJİ åı¯ä»¥ +å¿ħ èĥľ +Ġphen yl +ä¸įåĬ¨ äºĨ +åħ»æ®ĸ æĪ· +exper imental +ĠIn clusive +æłĩ å®ļ +Ġexc itations +é¼ĵ åĬ¨ +åĩłä¹İ æīĢæľīçļĦ +Ġhippoc ampal +å¿Ĺ æĺİ +ĠApplic ant +工信 éĥ¨ +_ ETH +Ġsur ged +ĠMod al +èµı èĬ± +ãĢģæ°´ åĪ© +creat ing +S J +ï¼Į èµ°åĩº +缸 å¾ħ +è£ħ ä¸Ĭ +å·¥ä½ľ çݯå¢ĥ +è¿ĺæľī çĿĢ +By ID +ç§ĭ åįĥ +Author ized +ÅĽ ci +éĢłå°± äºĨ +.Global ization +Ġe clips +æĺ¯ éĩĩç͍ +pt uous +æĥħåĨµ åIJİ +éļĨ èµ· +Ġabund antly +æīĽ çĿĢ +ãĢģ èī² +æµij 身ä¸Ĭä¸ĭ +ĠBrew ers +ĠScre w +ĠK ne +éĿĴå¹´ 人 +æľīåĵªäºĽ åij¢ +ï¼Į æ²IJ +ï¼Į æłĩå¿ĹçĿĢ +æīĭ å¿ĥ +åħĪ åıij +Ġhair y +åѦåijĺ 们 +, 符åIJĪ +u gh +ãĢĤ æŁIJ +.C o +Ġant ics +IL I +Mass achusetts +F rozen +H omes +Ġ ä½ĵ +us ages +Ġpre term +åħĪ åīįçļĦ +Ġins anely +ä¸ģ åł¡ +ï¼Į åľ°çIJĥ +Ġbl inking +_T ime +(s ig +Read s +åįģåĪĨ éĩįè¦ģ +nes ia +m inton +ï¼Į åģľæŃ¢ +iv ores +Cont aining +ĠTr uman +cycl ing +å°± æĽ´åĬł +è·ij åĩº +un ce +ĠH ANDLE +çİĭ åħ¬ +struct ural +Be ans +Ġtreasure r +Ġunve il +èĪ Ģ +æĶ¯ æķĻ +Ġep is +å¡« 空 +çĭł å¿ĥ +Ġdepartment al +ch imp +âĢľ åĽ½å®¶ +.... " +åī¯ åİ¿éķ¿ +æĿ¥è¯´ 说 +çļĦæīĭ ä¸Ĭ +ĠNare ndra +æĽ¼ è°· +_ major +ï¼Į 稳å®ļ +ãĢģ æľīæľº +ĠD VR +åĴĮ 设计 +Ġup rising +ÃŃ c +d ynamics +Ġsp ying +ç»ıæµİ æŀĹæŀľ +-ne ck +罪éŃģ 祸é¦ĸ +- / +U a +ï¼Ł ä»İ +Ġover came +åĽŀ åĵį +Time Stamp +娶 äºĨ +Ġcontempl ation +Ġdistort ions +Ġ é¢Ħ计 +ch mod +old ed +w inter +âĢĿ éĹ®é¢ĺ +æİ¥ 踵 +ï¼Įä½Ĩ ä¸įèĥ½ +Ġrep lying +èµĦæºIJ 丰å¯Į +纲 é¢Ĩ +Ñģк ий +. endsWith +ĠC ME +ä¸į éĢĤåºĶ +un lock +ĠW TF +åīį æľŁçļĦ +äºĨä¸Ģ è¾Ĩ +寿 éĻ© +æīĢåľ¨ åŁİå¸Ĥ +Ġarchae ology +æĹł éĺ» +å¼ı åĴĮ +鼨 éľ² +ĠSecret ariat +Ġbust le +] n +ç³»ç»Ł åĮħæĭ¬ +åĨľ åºĦ +Ġacceler ates +çļĦç²¾ç¥ŀ åĴĮ +éĺ Ĥ +åĿ ¨ +Ġpopul ace +g ings +ill ions +No vel +Ġtrack ers +é¢Ĩ导 åĬĽ +_log o +f ell +ä¸ĸçķĮ 级 +æį¢ æ°Ķ +æ¾ ¹ +åı® å½ĵ +Ġmacroph age +ĠN SError +æĹħ éĢĶä¸Ń +-sc roll +L s +P roud +ĠC url +ost e +è¿Ļ个 æĸ¹æ³ķ +sub string +ä¸įä»ħ åľ¨ +æĺł 衬 +åĵģç§į çļĦ +飦 å°Ķ +_CONST ANT +< t +èĸĦ èĸĦçļĦ +ĠN SS +åĽ½ å®Ŀ +åĪĨ ä¸įæ¸ħ +å¹³ åºķ +(p arts +Ġactress es +, 羣çļĦæĺ¯ +ĠS ES +em m +å¡Ķ åħĭ +izz ie +ç½ij çĬ¶ +åij¨ æĹĭ +ï¼Įä¹Ł èĥ½å¤Ł +ĠRes ervations +ä¹° ä¹° +éĺ¿ å¼¥éĻĢä½Ľ +ĠCA ST +\ Contracts +ĠG ert +Ġj eg +è¶ħ 强çļĦ +æIJŃ ä¸Ĭ +ãĢį ãĢģãĢĮ +ä»Ķç»Ĩ åľ° +åı¸æ³ķ å±Ģ +åĩ¯å°Ķçī¹ äºº +æľī ä»·å̼çļĦ +å¦Ĥ é£İ +Ġstren uous +S v +ĠS utherland +å½ĵ è¿ĩ +bit rary +under stand +, èİ« +Ġd unk +-e lectric +éĵ² éϤ +åIJ ł +åĪ» 骨 +Al abama +æĻ® å°Ķ +çĭ¬ å¤Ħ +å¹³æĸ¹ç±³ çļĦ +- HT +at iva +ãĢģ ï¼Ī +ç«ĭ ä¸ļ +èŀ³ èŀĤ +, æĭ¿ +bro ker +Ġoutper forms +_b box +æĹłåħ³ ç´§è¦ģ +Jeff rey +æµ¦ä¸ľ æĸ°åĮº +ï¼Į ä¹ī +ä¸į æĹł +æĽ´ è¿ľ +åįģ ä¸īå¹´ +è¾Ľèĭ¦ èĭ¦ +d ims +Ġr ussian +åĽŀ æĹĭ +ĠCent ennial +å²ģ 以ä¸ĭçļĦ +ham pton +A ce +K athy +w olves +Ġper ched +ĠEx ceptions +Ġhot spots +æĵ¦ éϤ +ĠUI Kit +à® ¾ +, åıijæĮ¥ +Ġ åĩºçīĪ +æ¡ § +æĸ¯ åĴĮ +è¡£ åĨł +ï¼ĮçĦ¶åIJİ éĢļè¿ĩ +æ¶² 缸 +, æĪIJ +Ġ å®ŀä¾ĭ +ãĢģ æ³° +ä½ł åı¯èĥ½ +SC AN +ĠIsland ers +re i +è¾ĵ ç͵ +ç»Ļä½ł 带æĿ¥ +Ġtele metry +. Product +L iu +æĮī çĿĢ +伤 èĢħ +åı³ éĶ® +Ġbatch ing +ĠAtkins on +Ġel k +ï¼Įä¹Ł éľĢè¦ģ +Ġdry ness +ä¹łæĥ¯ äºİ +V oc +Ġc map +æľī èĩ´ +ĠL iga +Ġsc andals +Ġ. âĢĿ +Ġfl a +ĠPM ID +æ£Ĵ äºĨ +.âĢĻ âĢĿ +B anners +if olds +ĠF DR +.p oll +éĿĴ åħī +.g lyph +-se cret +æĺ¯æľĢ éĩįè¦ģçļĦ +ĠNull able +, 第 +B UT +Ö · +Ġ çIJĨ +è¦ģ å°ıå¿ĥ +éĿĻ é»ĺ +Ġclos eness +çģ¿ çģ¿ +or ov +ä¸Ģ 线çļĦ +æ°´ çĵ¶ +æĽ´ éľĢè¦ģ +é¥ ķ +_D LL +.R oot +èµ£ å·ŀ +_ User +ï¼Į çĪ·çĪ· +è¿ĺæľī 许å¤ļ +å§¥ çĪ· +ï¼Į æŃ£å¸¸ +Ġv k +ĠLib ra +导 è´Ń +è¿ĺæĺ¯ éľĢè¦ģ +ĠSp here +ç®Ģ çŁŃçļĦ +åºĶ该 çŁ¥éģĵ +èĹı 身 +Ïģ η +Ġåħ¶ä»ĸ åºĶæĶ¶æ¬¾ +ãĢģ é£ŀ +· éľį +å±ł é¾Ļ +æĤļ çĦ¶ +, åħįè´¹ +æľī åķ¥ +大 é¢Ŀ +å¸Ĥå§Ķ 常å§Ķ +è´§çī© è¿IJè¾ĵ +Ġfur the +ALYS IS +ĠR uff +ial ect +æµģ çĿĢ +Ġelect rom +Ġeth ylene +-ex clusive +èį¨ éº»çĸ¹ +is ks +ĠA ries +åĽŀ 京 +Ġtre ason +ĠBlack berry +- awaited +en ade +éϤ æ³ķ +Trans mission +Ġham per +ï¼Į éĽĨåĽ¢ +Ġun riv +ç§Ł æĪ· +å¿ł ä¹ī +Ġpart ing +å®£ä¼ł å·¥ä½ľ +ĠHon est +éļIJèĹı çļĦ +Ġg ee +Ġped igree +Ġgrass y +(pro file +_cm ds +Ġf eline +ä¸Ĭ çŃī +没 æĹ¶éĹ´ +Ġstud s +çħ§ ä¾ĭ +ĠO yster +_add ed +ĠTud or += config +åħ¶ 对 +ĠGu o +ĠGround s +计æıIJåĿıè´¦åĩĨå¤ĩ çļĦ +åıΠ以 +_n il +è£ħéħį å¼ı +Ġmuc us +æľįåĬ¡ åĮº +&& ( +comb ine +Ġpenn ies +CONCLUS ION +; height +[ scale +ãĢģ éĴ¢ +æĽ´ å°ij +æķ£ èIJ½ +æŁı æĭī +art i +Ġcomp ressive +éĢī åŀĭ +é£ŀ åĩºåİ» +ĠCo ch +Ġproof reading +Ġmanifest o +åij¨ çijľ +OM O +ç»ĵæŀĦ è°ĥæķ´ +ãĢģ åIJĪåIJĮ +åĨį èµ· +çĽij管 æľºæŀĦ +, éĥ½è¦ģ +Ġu mp +ç¼ĸ èijĹ +åħ¨åĽ½ æĢ§ +Âł ä¹Ķ +çŁ¥ è¶³ +å¼Ģå§ĭ å°± +æĬķèµĦ æ´»åĬ¨ +Config urations +Ġbi otech +ĠWell being +ï¼Įåıªæľī åľ¨ +æĥ¨ éģŃ +ĠNik ol +åĸĦ æĽ° +ĠWood ward +; åľ¨ +èIJ½ æĹ¥ +ç¯ ĵ +ĠDur ant +ĠM ek +(s ystem +çİī åĦ¿ +æ´Ĺ äºĨ +ĠAcc um +èįĴ åĩī +ï¼Įä¸įä»ħ æĺ¯ +Ġlin ens +ãĢĤ å¸Ŀ +å½ĵ åĽŀäºĭ +å¯Į äºĮ代 +çĨŁç»ĥ æİĮæı¡ +@ f +å®īåħ¨ ä¿Ŀéļľ +(\ \ +ï¼Įä¸Ķ 使 +ĠContract ing +, éϤéĿŀ +.S H +群 éĽĦ +Ġbrown ed +ĠColl ar +æ°¯ ä¹Ļçĥ¯ +åĽŀçŃĶéĹ®é¢ĺ æĹ¶ +T Z +Ġpl edges +åľ° é»Ħ +F IRST +Âł G +åı¯ä»¥ éģ¿åħį +å·²ç»ı è¿ĩåİ» +ĠHum or +åĪĨæ³Į çī© +ä¸į èĢĥèĻij +çľĭ æĩĤ +åı£ å¤Ħ +_p ci +çİ°åľ¨ æĪij +.f eed +ãĢĤå½ĵ ä¸ĭ +èĭį èĢģ +æľ¬ç§ij ä¸ĵä¸ļ +nas ium +.getElementsBy TagName +[ col +id ay +人士 çļĦ +Ġinterpol ate +赡 åħ» +_m ouse +Ġrefriger ators +A ura +ors ch +m aterials +ï¼Į èģļçĦ¦ +ãĢĤ éĴ± +igh am +Ġbl urry +ĠSc enes +ĠPOS IX +Ġredd ish +ĠW eld +ĠSuccess fully +R PG +æµģ ç»ı +ä¸į 饶 +ĠK ear +åĽŃ åĨħ +object ive +T ITLE +)) = +ĠAc rylic +æİĪæĿĥ çļĦ +çļ±èµ· çľī头 +he i +ä»ĸ 身边 +åį´ èĥ½ +è¶Ĭ èµ° +-p review +æĬķèµĦ 管çIJĨ +MS M +ï¼Į 计 +ĠW ATER +ĠScal ing +Ġdealership s +, åħī +- liked +/ type +ë ¶ +Ġ é±¼ +ãĢģ 身份è¯ģ +ĠK DE +Ġover power +åĬŁ äºİ +åħŃ è§Ĵ +ãĢĤåľ¨ ä¸ŃåĽ½ +åħ« è§Ĵ +å¥Ĺ åĪ© +Ġvir ulence +Ġ æ¯ĶèµĽ +ĠD imit +æĺ¯ä¸Ģ 段 +éĢĢ åİ» +æĻĵ çϽ +æį£ ä¹± +åľ¨ 身åIJİ +è¿Ļ ä¼ļ +ĠDe S +åħĭæľį äºĨ +ĠR ican +ä»ĸ们 æĿ¥è¯´ +sv n +( State +ï¼Į 竹 +æĽ´ å®īåħ¨ +Ġ åĮº +éĺµ ä¸Ń +éĿ¢ç§¯ 约 +çĹħæ¯Ĵ æĦŁæŁĵ +H ollywood +R ST +ç»Ŀ å¢ĥ +ç§ijåѦ家 们 +åĽ½åºĨ èĬĤ +ĠN OV +åѦ åIJį +æŃ¥è¡Į è¡Ĺ +åIJ¬åıĸ äºĨ +ĠK ri +æ°´ 壶 +åıĤ æĪĺ +-l ast +Ġground work +å¿ĥéĩĮ éĿ¢ +Ġgran ules +ĠF ink +IST RY +Âł C +Ġcommun ism +(p assword +åīij çļĦ +æ´ģ çϽçļĦ +ä¹Łä¸įæĺ¯ ä»Ģä¹Ī +Sam uel +ï¼Įä¸į å°±æĺ¯ +m j +鸡 ç¿ħ +çĮ® çŃĸ +Ġrandom ised +.pre vious +ert ia +ä»İ åħ¶ +bb en +ç½Ĺ马 å°¼äºļ +ĠDol by +æijĩæĻĥ æĻĥ +it izer +ĠD odd +ï¼ļ ä¸Ĭæµ· +ä¿¡æģ¯ åıijå¸ĥ +ç°ĩ æĭ¥ +- åĪĨ +çļĦ åŃIJ +ÑĢ Ñĭ +æĬ¹ åİ» +åĽ½éĺ² éĥ¨ +-auth ored +Ġin se +ï¼Ł åĪ« +æĹł åĬ¨äºİè¡· +éĩį ä¿® +éĺ² æ´ª +çĽij åζ +ç»ıèIJ¥ 许åı¯è¯ģ +åľ°çľĭçĿĢ å¥¹ +am t +ï¼Ł ä»Ģä¹Ī +ar ize +Ġcru iser +为ä½ķ è¦ģ +_RG BA +Ġw ilt +ous and +è¿Ļ个 åŃ©åŃIJ +ĠMc Call +ĠFlu or +ãĢģ åĵģè´¨ +ï¼Į èģĬ +åħ¨ åŁİ +ï¼ĮæĪij 羣 +_D AY +ĠSic ily +åķĨ å®¶çļĦ +Ġalarm ed +ĠUtil izing +stud ents +è¿Ļ ä¸į +çĤ® çģ« +\| ^ +\ !\ +Ġcommun icative +IS R +èĹı æĹı +ĠTool box +Ġdepos iting +åѤçĭ¬ çļĦ +ï¼Į å̾ +ĠR utherford +ĠL enders +ĠO bit +æĽ´ ä½ķåĨµ +Ġpass er +ĠTo e +Ġfaith fulness +çļĦ天 æīį +ä¸Ģ大 æī¹ +æĭŃ çĽ® +Fin ite +ĠWhit man +å¦ĩ产 ç§ij +- webkit +en k +çĶŁæĢģ æĸĩæĺİ +Ġcm ds +宫 主 +使åij½ æĦŁ +Ġupset ting +çļĦåıĮ éĩį +v ous +ï¼Į æŁIJäºĽ +è´ ² +æ¸ħ éĿĻ +鼷 鼨 +é¢ij 段 +Ġpsychiat ry +educt ible +äºij 轩 +Ġret ries +Ġä¸ī 人 +Ġbird ing +H arris +å¼Ģ èĥĥ +.p attern +å·¥åħ· çļĦ +ĠS IN +ï¼Įä¸į 说 +æī¶ èµ· +缸æ¯Ķ ä¹ĭä¸ĭ +ä¸Ńä»ĭ æľºæŀĦ +è¾ĵåįµ ç®¡ +ĠF OUR +ĠH OL +.M ulti +ĠS ailing +èĮħåı° éħĴ +T REE +ï¼ī åıĤä¸İ +çα æħķ +çĽĪ è¢ĸ +ç²® æ¶² +éĢĨ æµģ +第åħŃ å±Ĭ +T enn +Ġ ç´§æİ¥çĿĢ +Ġb ends +Ġpre clinical +Ġhyp ogly +ĠD aly +æĪij åİ¿ +éļ ¼ +çīĪ åĽ¾ +çĶŁäº§ è¿ĩç¨ĭ +ï¼Į缴æİ¥ å°Ĩ +éŃħ æĹı +çļĦç¾İ åij³ +clean up +Ġmans laughter +èµ° 强 +èģĶ éĺŁ +Ġ, " +Ġcharg ers +伯 åĪ©äºļ +è¯Ŀ说 åĽŀæĿ¥ +缸åħ³éĥ¨éŨ æī¹åĩĨ +Ġpued e += sub +ĠL up +ĠJ as +rove ment +IT C +èĵĿ æµ· +Ġ æŁ¥è¯¢ +al ah +ir u +é¢ ī +马 è¶ħ +.st roke +Ġbul ge +_load ed +ç«ŀæĬĢ åľº +_symbol s +ĠDaven port +ï¼Į å¯Ĩ +æŃ£è§Ħ åĮ»éĻ¢ +ĠBot ox +Ġlever ages +.Dec ode +ĠRead ings +ĠDun geon +æĸ¹åºĶ çī© +. reference +ĠA UDIO +_s upp +带 ä»ĸ +临åºĬ åĮ»åѦ +è¿Ľåı£ çļĦ +Ġunderm ined +å·¥åķĨè¡ĮæĶ¿ 管çIJĨå±Ģ +b st +Ġ èĪĴ +Ġ çīĪæľ¬ +æĮī æıŃ +èĭ¥ çݰ +Ġredu cer +è´¢ åĽ¢ +æĿ¾ å¼ĢäºĨ +Ġintellig ently +Y R +Y ay +e lected +r outes +Ġ 度 +ãĢģ å¼Ĥ +Ġ' ~ +ĠAl mond +çİ°åľ¨ æĪij们 +ĠMar i +_ Input +ĠP encil +Ġåľ¨ è¿Ļ个 +为主 导 +å®´ 请 +/ group +\ exp +Ã Ģ +追 æŁ¥ +人æ°ijæ£Ģå¯ŁéĻ¢ æĮĩæİ§ +ĠÙģ ÙĬ +^ / +ĠI ke +Ġall uring +å¾Ĺ åĬĽ +æľĪ èī² +èĦ± æ¯Ľ +殿 ä¸Ń +S eb +ĠInv ite +ĠVir go +åĵĪä½Ľ 大åѦ +æľ¨ å·¥ +æģ¶ ä½ľ +Layout Manager +Ġforc ibly +im in +ãĢģ ä¿¡ +å®īåħ¨ éĺ²æĬ¤ +å¸ĥç½® çļĦ +Ġconspic uous +, çĿĢåĬĽ +y brid +ï¼Į 羣å®ŀ +ï¼Įè¿Ļ è¾¹ +é£ŀ èµ· +Ġdisplay Name +ĠPL AYER +åIJ± åIJ± +} n +ĠP ools +ud ging +itt sburgh +-in cludes +çİī é¾Ļ +æ»ij ç§» +ny a +ĠâĨ IJ +. book +代 æķ° +ç»´ åIJ¾å°Ķ +ãĢĤ æĶ¯æĮģ +pe ating +Ġdev iate +_DE CODER +/g it +çģ¼ çĥŃ +åĪĨ æĭ£ +Ġra cc +éĽª çϽçļĦ +ĠVal le +Ġcasual ty +æīİå®ŀ æİ¨è¿Ľ +å·² è¾¾åΰ +亦 ä¹IJ +æīĵå¼Ģ éŨ +æ¶ĪåĮĸ åIJ¸æĶ¶ +. tencent +T itles +主è¦ģ è´Łè´£äºº +Ġhum bled +pl in +æĪijçļĦ æľĭåıĭ +Jo el +æ³ķ å¼ı +Ġemb arking +Ġsoft ness +L Q +GR P +Bring ing +ur istics +举 ç«Ļ +Ġru ining +为 åѦçĶŁ +该 æľīçļĦ +å¿Ļ æ´» +. thread +ab cd +âĢľ åѦ +éķ¿ æ²³ +æįŁ æ¯ģ +æģŃ ç»´ +Bed room +, 累计 +æĿ¥ åıĤåĬł +ï¼Ľ ä¸į +åįģ ä¸Ģå¹´ +è§ģ 鬼 +With Error +ï¼Į对 她 +é£İéĻ© è¯Ħä¼° +çĵ¶ ä¸Ń +éĢĨ è¡Į +ĠD are +ĠF ringe +ĠL isp +å¿« æ´» +Ab d +为 åŃ©åŃIJ +ï¼ļâĢľ â̦â̦ +ï¼ĮæīĢ以 ä½ł +Trans ient +NS MutableArray +ĠWord press +çĽĨ èħĶ +ĠEXP ER +Ġunquestion ably +m ort +带 åĽŀå®¶ +CT R +char acters +è¿Ľä¸ĢæŃ¥ å®ĮåĸĦ +éĥ½æľī æīĢ +夺 缮 +ĠPR ICE +Ġlymph atic +ä¸į è¡° +ĠP IP +Ind oor +åĪĨ æī¹ +建 åĬŁ +-T ech +ç͵ç£ģ æ³¢ +; p +éĶĻ è¿ĩçļĦ +å²ģ æķ° +麻çĥ¦ çļĦ +Ġpedag ogical +Ġ éĽħ +å¹´ å¤ļ +_count ry +Ġh anger +ãĢģ é½IJ +Ġun ivariate +åºĶ ä¸İ +èIJ½ èĬ± +éĽª èĮĦ +Ġretriev es +: Number +ãĢģ å®ĹæķĻ +ĠN ish +èĩª é¦ĸ +_D C +W ash +æīĢ å¤§åѦ +Ġfl irting +èIJ½ å¾Ĺ +é»ij æĿ¿ +乡 éĩĮ +Div ider +- that +ĠSE ARCH +æķĻ åijĺ +ï¼ĮèĢĮ æĪij们 +èĥ¶ ä½ĵ +ï¼ĮèϽçĦ¶ 没æľī +Ġoxid ized +åĭĩæķ¢ çļĦ +ĠK illed +-b orne +è®°èĢħ ä»İ +å§Ķåijĺä¼ļ çļĦ +èħ» åŃIJ +Mean ing +, ãĢĮ +ist ribution +è´¢ ä¼ļ +çľĭçĿĢ éĤ£ +æ¯Ľ éĴ± +urb ation +ï¼Į æļĤ +IN TEGER +å·¦ ä¸Ĭ +è¡į çĶŁçī© +( ne +F tdc +)) ), +Ġet iology +Ġe greg +åĭIJ åħ½ +ï¼Į 缸å½ĵ +ch allenge +ant t +æĹ¥ ä¸ŃåįĪ +ĠAT L +Ġkin ases +ĠJ our +éĴ £ +ç§ij 大 +_S UR +ĠSome body +Ġ åĩł +ä½ł ä¸Ģ个 +å¤ļ æĸ¹éĿ¢ +éķ¿ ä¹ħçļĦ +-c opy +Ġdiscrim inator +ĠSchrö dinger +ï¼Į她 æĥ³ +Ġste aming +å§ĭç»Ī ä¿ĿæĮģ +ĠV oters +å§Ķ æ´¾ +沿 ç͍ +åĵªéĩĮ æĿ¥çļĦ +åIJĮä¸Ģ æĹ¶éĹ´ +ï¼ĮåĢĴ åħ¥ +Ġtr unks +æĬĬ æīĢæľīçļĦ +ç²¾ åħµ +e ine +on et +ï¼Į è¯ı +å¸Ĥ ä¸Ń +å«ģ æİ¥ +Ġfro ze +个 èµĽåŃ£ +Ġlib s +Ġlif etimes +-pro fits +ĠProt ector +/ Data +ä¸Ĭ åĩł +è·Ł åĪ«äºº +åķĨä¸ļ ç§ĺå¯Ĩ +è®°å½ķ äºĨ +个 æĿijæ°ijå°ıç»Ħ +ä»İ éĩĮéĿ¢ +æīĵ æĿ¥çļĦ +çϽ åĨħéļľ +åĩºä¸Ģ åı£ +ĠDeb ra +ĠHind us +ĠC MB +ä¿¡ ä¸Ń +_S LEEP +åŃ¦ä¹ł èĥ½åĬĽ +See king +f j +Ġ éĩįéĩı +ãĢĤ æīĢæľīçļĦ +车 åĴĮ +(t xt +å¤ı å¨ģ夷 +Ġmel tdown +Ġreprodu cing +ĠST S +. adapter +ä½ł å°ıåŃIJ +å¹³ ç¼ĵ +Ġlog its +Ġq p +åĽŀæĿ¥ çļĦæĹ¶åĢĻ +Typ ical +Ġtq dm +ï¼Į æĭĸ +ĠS ITE +åŃĺåľ¨ äºĨ +ĠArch ie +Ġfract al +-f lag +Ġemp ath +å¥ĩ è§Ĥ +ĠIncre ases +Ġvene ers +ï¼Į æĶ¿æ²» +æīĢ åĬ¨ +ä»ĸ们 没æľī +ĠNull PointerException +ĠF iling +æİ¨èįIJ éĺħ读 +it cher +Ġnorm als +èįīåİŁ ä¸Ĭ +ĠStra uss +Liver pool +ãĢĤ ãĢı +ist ream +头 é¢Ĩ +å·¥ä½ľ æĥħåĨµ +åºľ ä¸Ĭ +Ġlockdown s +R c +Ġt iring +Long itude +Ġbread s +ĠMo j +ĠDifferent iate +Ġharass ed +mbuds man +ĠCart ier +N v +对 ä»ĸ们çļĦ +天 å®Ŀ +éĵģ çŁ¿çŁ³ +(n ormal +西åĮĹ éĥ¨ +< X +ãĢĤ çĥŃ +ach able +åĨĽ å§Ķ +ĠHttp Client +Ġunb earable +ĠM IP +Ġun lucky +}$ ï¼Į +left arrow +溪 æµģ +hand les +ĠLock s +ic ure +md b +ç¾İ好 çĶŁæ´» +Unexpected EOF +Ġ æŃ¤æ¬¡ +ãĢĤ æīĵ +çŃī ä¸įåIJĮ +使 ä½ł +è§ģ éĹ» +åħĭ æĺŁ +æ±Ł å®ģ +éº Ŀ +å¨ģ åİĭ +API ENTRY +-gener ator +Ġvivid ly +s queeze +Ġ ä¿ĿæĮģ +ï¼Į 宣å¸ĥ +ãĢģ èµ· +lo in +ere m +ĠÎ ļ +è½® èι +åľ¨ä¸Ģ çīĩ +å¨ĩ å¨ĩ +ent anyl +è¿Ľè¡Į å®£ä¼ł +èĤ¡ä¸ľ 大 +contin ent +_ ENTER +d eps + ª +Ġde letions +æľ¯ çļĦ +Object Id +åı¹æģ¯ ä¸Ģ声 +Ġen closing +Ġup he +éķ¿ çĿĢ +çĽĪ äºı +æĺ ĩ +æĢ»é¢Ŀ çļĦ +èIJ¤ çģ« +L ens +Ġal imony +âĢľ æĿ¥æºIJ +ï¼ļ éĩĩç͍ +æ¡Į åīį +åIJĵ åĿıäºĨ +} ") +Ġ èµ° +Ġgerm any +ĠLug gage +$ ]{} +Ġo lf +æĺ¯ 两 +Ġcom et +没 è§ģ +By Type +ï¼Įè¿ĺæľī å¾Īå¤ļ +æĭľ çĻ» +ãĢģ æ¾³å¤§åĪ©äºļ +un istd +ĠComm iss +path y +Ġcit rate +Ġneglect ing +ĠENABLE D +ĠK ahn +æ¯Ķ åĪĴ +âĢĶ he +ï¼Įæĺ¯ çļĦ +.l at +ĠAst ro +a çļĦ +ï¼Į å²³ +Ġch akra +åıijå±ķ éĺ¶æ®µ +.read er +Ġk ans +class ic +ĠSun ni +ï¼ĮåĨį 度 +ä¼ijæģ¯ 室 +Ġ 代çłģ +ãĢģ æĸĩåѦ +满 缮 +Ġscal ars +r ils +Ġj uggling +.get Boolean +Ġexempl ified +à ĵ +.m ail +Ġpalp able +Ġ åįģä¸Ģ +_T RI +(m esh +çĪĨ æ£ļ +Ġcandid acy +ICE F +( use +as ms +èĢģ åĽĽ +çα æĦı +éĢĴ åĩı +ĠCond itioner +ç»° åı· +æľº ä¸Ń +æŃ» åľ° +Ġlam inated +sp aces +æķĪ åĬ³ +åĨĻ ä½ľä¸ļ +æĬķèµĦ åħ¬åı¸ +éŃĶ å¤´ +Ġcaut ioned +U ber +大 åĴĸ +Ġnot ifying +åħĭ æ´Ľ +So ap +ĠLik ely +æķ°éĩı åĴĮ +- region +Ġ å°ijå¹´ +ĠT CR +_c urr +ä¹ĭä¸Ģ æĺ¯ +"=> " +> public +èĬ± æĿ¿ +æķĻèĤ² åŁºåľ° +è´¢åĬ¡ æķ°æį® +æħİ éĩįçļĦ +.In variant +Ġtrunc ation +I b +re ceiver +ĠC IF +Ġ( -> +Ġres urgence +ĠÎ ij +Organ izations +ĠNewton soft +cccc cccc +. team +çĬ¹è±« ä¸įåĨ³ +ĠImm une +Ġcatar act +大 æľ¬èIJ¥ +Ġdis ple +comp ared +U å½¢ +ĠK eeper +åı¯ä»¥ 满足 +è¦Ĩ çģŃ +çİĽ çijĻ +ï¼ĮæĽ¾ ä»» +, å±ħçĦ¶ +ĠR ename +.d irection +can onical +Ġmel od +.back end +Ġf iance +Ġl ads +IV AL +fr anch +c our +ï¼Į æĹĭ转 +åįķ äºİ +Mark up +ĠSIGN AL +. INSTANCE +ĠS ensors +æľĢ åħĪè¿ĽçļĦ +DE T +-l ab +Mar shall +ä¾Ŀæ³ķ é¡»ç»ı +CHED ULE +æ¸ ¥ +åIJij æīĢè¿° +è¿Ļ个 游æĪı +åį° æŁĵ +èµ¶ åĽŀ +ĠBeck ham +alam us +ï¼Į èµ¢å¾Ĺ +å¹´ 满 +Ġlact ose +Ġpreponder ance +_ restore +ery l +ĠLe opard +ĠHis pan +ü l +Ge org +约çij٠夫 +/ trans +æľī å¾Īå¤ļ人 +è¿Ľ æĿij +ĠCook er +ĠObs ervations +à±į à° +T ennessee +èıľ åĪĢ +ĠMag istrate +ĠBrown ian +Non ce +æ°® åĮĸ +æ¥Ķ å½¢ +çĸ ¸ +To Many +ĠMy ths +缮çļĦ çļĦ +ä¸ģ é¦Ļ +ĠRob ust +× ¤ +ĠG ior +åĨ¥ çİĭ +ï¼ĮåıįèĢĮ æĺ¯ +, 严 +Ġ ith +æ°´ é¾Ļ头 +une i +åħ·æľī 以ä¸ĭ +Ġadj acency +Miss ouri +, çΏçΏ +: ? +L atch +S ter +Ġtrans mits +示èĮĥ åŁºåľ° +磮 人 +ĠStev ie +Ġst ink +ĠM OST +以 è½» +ĠZ odiac +Data Size +ĠFort nite +Ġ 没éĶĻ +çħ ħ +ĠEn ch +åĵĩ åĵĩ +E lev +ï¼Įå°± èĥ½å¤Ł +. recv +çĿĢ èħ° +ĠAb ortion +specific ally +( IP +. Change +{ id +qu i +ĠL TC +_w p +代表 éĺŁ +çν çļĦ +-cent ral +imon ial +ĠD olphin +Ġsp ies +åºķ çīĮ +ä¸ī è·¯ +çī¹ åĬ¡ +(m ember +çķª ç¦º +Ġsmart est +_ref resh +榻 ä¸Ĭ +girl s ++ "/ +[ G +çĶŁ æĿ¥ +éĥ ´ +æŀĹ èĤ¯ +itch ie +åĥµ å±Ģ +ĠPoly ester +Ġret ic +_re peat +ĠBi har +åľ¨æľ¬ å®ŀæĸ½ä¾ĭä¸Ń +æ°´ æ·± +è¿ľ å±± +ãĢģé«ĺ éĢŁ +Sm oke +ä¸İ çłĶç©¶ +å®ī åºĨ +ãĢĤ 大éĥ¨åĪĨ +ev ed +.S eries +_V IS +ĠAD P +女åŃ© çļĦ +对 èģĶ +init is +ĠCar a +å®¶åįıä¼ļ ä¼ļåijĺ +, åIJİéĿ¢ +ï¼Į èѦæĸ¹ +ä½ ĥ +ä¼ĺç§Ģ 人æīį +Re ality +æķĻåѦ æĸ¹æ³ķ +Ġaccommod ated +-point s +( JS +R IS +ä¸Ń ä»»ä¸Ģ项æīĢè¿°çļĦ +ĠRe union +éĵ¶ éĴĪ +ĠAc robat +ĠHar rington +é IJ +iz u +æĶ¶ å°¾ +ä¸įä¼ļ çļĦ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠ +Success fully +Ġ é»ĺ认 +çľĭåΰ èĩªå·± +_V M +ĠMechan ism +ed ish +ic in +ãĢĤ æ¥ļ +ĠS MA +Ġsp ruce +-g raph +ĠReg ist +åĸĬ äºĨä¸Ģ声 +Ġdermat itis +ä¸Ń çļĦ人 +con j +å¿ĥ æľī +é¡¶ å¤ļ +ĠTHE M +ä¹Łè®¸ ä¼ļ +ĠShel ton +æķĻå¸ĪèµĦæł¼ è¯ģ +L yn +\ User +äºĮ åıĶ +éĶĻ ç»¼ +ï¼Į她 è¿ĺæĺ¯ +L ikes +P f +ãĢĤ æĺĵ +æīĢ åIJ¸å¼ķ +天 æľº +ä¸Ģ个 ä¸Ģ个 +èĶļ èĵĿ +åħĭç½Ĺ åľ°äºļ +ĠW OM +ĠRes urrection +å¢ĥ éģĩ +B ALL +åľ° å°± +çͱ å¾Ĺ +_s hell +.m m +åĸĺ ä¸įè¿ĩæ°Ķ +ĠK lo +yn aptic +Fire fox +ĠNewsp apers +ä½ı ä»ĸçļĦ +ĠÎ ¨ +Ġsquare ly +讨论 äºĨ +ï¼ĮåıĹ åΰäºĨ +C ities +æĪij 好 +ç´¯ çļĦ +èļ ¯ +åīį æİĴ +_s leep +ï¼Įä¸Ģ åŃĹ +á c +ãĢģ åªĴä½ĵ +æĪij ä¹ĭåīį +ass ment +ç»§ç»Ń éģĵ +Ġspect ator +[ Test +Ġ å¼Ģåıij +ç͍ æĹ¶ +,åħ¶ ä½Ļ +ĠJacob ian +Ġcyst s +F ellow +h q +ï¼Į 认è¯Ĩ +åºĶ该 çļĦ +.add Widget +Fac et +大 ä¸ī +表 çİĩ +æĵįä½ľ æĢ§ +.B ottom +A thlet +\ footnote +ar ized +ĠO MG +å¾Ī åı¯èĥ½ä¼ļ +Ġshrink age +-b efore +Mem o +ãĢĤåħ¨ 书 +ä¸ŃåĮ»èᝠ大åѦ +ĠZel da +ĠiP ads +Ġtrans itive +_P K +士 大夫 +æĭį æīĭ +ĠPre heat +çļĦåıijå±ķ è¶ĭåĬ¿ +ï¼Į æĦıæĢĿæĺ¯ +ĠAn and +'] ] +Ġcharacter izes +ï¼Į ç»ĥ +ä¸Ģ 毫 +(n ums +éĹª åĬ¨ +. & +" testing +B ull +ï¼Į çͳ +ess aging +ĠD ressing +ä¸İ å®ŀè·µ +å¹² ç»ĥ +管çIJĨ å¹³åı° +ä»ħ åľ¨ +æ³¢ åĬ¨çļĦ +çĤ¼ éĩij +b road +re asing +ãĢģ è¡Į为 +æĽ´ æ·±åħ¥ +æł¹ æ·± +åĬ© æķĻ +C ultural +Type Enum +.C hat +ä¸įæĥ³ 让 +æĥ¨ åı«å£° +åĭ¾ å½ĵ +çļĦçĥŃ éŨ +ï¼Į 温æŁĶ +è¿IJ åΰ +å¤į çĽĺ +OD S +ç¨İ åīį +Ġbackpack s +Le ague +éĢļéģĵ çļĦ +ä»ħä»ħ åıªæĺ¯ +ç»· ç´§ +Ġtroll s +( section +Ġpre amble +åIJĮæ¯Ķ ä¸ĭéĻį +ï¼Įä¸įæĸŃ çļĦ +篮çIJĥ åľº +Ġ çݯå¢ĥ +åij¨ åIJij +éĢĢ è·¯ +лÑİ Ñĩ +ï¼Į éĢĤå½ĵ +çĶľ ç¾İçļĦ +ĠMicro biology +Mad ison +f ried +Authent icated +æīĭ 头 +_com pute +Ġ 临 +å¥ĸ 竳 +ä¾Ľ 大家 +åįĩ èħ¾ +ä¹ħ èĢĮä¹ħä¹ĭ +âľ ĵ +æķ´ä¸ªäºº éĥ½ +omencl ature +ãĢģ æĢ§ +ãĢģ æ²Ī +-m etal +Arg entina +case cmp +管éģĵ çļĦ +\ Support +é£İ è¡£ +å¡« 满 +Ġreson ances +Ġdischarg es +å¾®ä¸įè¶³ éģĵ +以 èĩ³ +è¿Ľè¡Į è°ĥæŁ¥ +ĠRev is +ä¸įèĥľ æķ° +eni ably +" os +( DB +. Uri +ï¼Į E +ãĢĭ è¦ģæ±Ĥ +åįİ ç¤¾ +stra ctions +åĸ³ åĸ³ +éķ IJ +项 å·¥ä½ľ +é£ŀ é©° +ï¼ĮæĢ» è§īå¾Ĺ +ĠInv itational +建éĢł çļĦ +ĠL ESS +Ġpe asant +èĢĥ ç©¶ +æĤĶ æģ¨ +ĠRo oney +Ġpanc ake +ç¯Ĩ åĪ» +ĠDix ie +éĺ² çģ¾ +缸åħ³ éĹ®é¢ĺ +(c ategory +Thought s +he er +æģ© æł¼æĸ¯ +Ġaf ternoons +åĨĴ çĬ¯ +çļĦä¸ĭ åľº +Ġ 两个人 +ĠF ILTER +ä¸ĭ è¿Ľè¡Į +Ch orus +Ġgrand e +Ġwave let +ï¼Įè½» 声 +, æ²»çĸĹ +ï¼Ł éĤ£ä¹Ī +ï¼Įä»ĸ çªģçĦ¶ +Al gebra +é¦Ļ æ§Ł +æ²¥ å¹² +od in +ï¼Įåħ¶å®ŀ æĺ¯ +s port +types cript +Ġa ussi +ï¼Į 询éĹ® +int el +ush a +POP ULAR +PFN GL +å¦Ĥ å½Ĵ +缴 缴 +Ġtrans planted +SE TS +åĽ´ åī¿ +çªŁ 窿 +åĪ« èĩ´ +ç´§ æī£ +ä¸Ģ缴 èĩ´åĬĽäºİ +Ġpun ishing +è¾¾æĪIJ ä¸Ģèĩ´ +é«ĺ é¢Ŀ +ĠMult iply +, æī¾åΰ +ãĢģ 对äºİ +èĢĮ éĢłæĪIJ +çIJĨ ä»ĸ +èĬ ¡ +å¿« æīĭ +åij½ åIJįçļĦ +func s +æĪ·å¤ĸ æ´»åĬ¨ +Ġber th +C p +Ġde ze +ach al +çª Ī +é¢ij 次 +ĠCampaign s +Ġelong ation +ĠF ruits +第äºĮ éĺ¶æ®µ +äºĴèģĶç½ij çļĦ +ĠScor pio +ï¼Įè¿Ļ åĩłå¤© +åΤ å®ĺ +ĠCR ISPR +æł½ æ¤į ++ ---------------- +åľ¨ å¸Ĥåľºä¸Ĭ +åīį äºĶ +ĠS EEK +å°Ĩ ä¹ĭ +Ġsol vers +éĥ¨éŨ åĴĮ +çĶĺ æĦ¿ +ĠTrust s +éĴī éĴī +' Connor +ĠP c +ãĢģ 京 +ï¼Ī ç®ĬæĻ®éĢļåIJĪä¼Ļ +è¿Ļ个 åĽ½å®¶ +ather s +Web inars +Ġtweet ing +Ġrepublic an +ãĢĤ ä¹ĭæīĢ以 +ï¼Įå¹¶ æĬĬ +Ġphot ometric +Ġpast ime +åĿIJ å§¿ +读书 人 +æķ°åŃĹåĮĸ 转åŀĭ +Lat ency +_un register +ĠHand ic +Ġdehydrogen ase +ĠJ FK +_head s +éĿ´ åŃIJ +ĠDEF INE +lod ash +ll is +ĠK ING +é»ij åIJįåįķ +Ġlocal Storage +ĠLe ib +WH Y +èħ¥ åij³ +åĬł ç´¢ +ãĢĭ æľī +åį´ è¦ģ +éĺ³ åı°ä¸Ĭ +ä¸ĥ æĹ¥ +-C V +ä¼¼ä¹İ æľīäºĽ +" use +( ast +C and +r ude +ĠH HS +ans son +ç¥ŀ éĩĩ +.Err UnexpectedEOF +æºIJæºIJ ä¸įæĸŃçļĦ +N ev +c q +i eth +çĻ ¼ +ä¸ĸçķĮ åIJĦåĽ½ +-S A +âĢĿä¸Ģ è¯į +Ġv ets +Âĥ Ãij +ĠBatter ies +. ce +é«ĺ å±Ĥ次 +çĤ¹ çĿĽ +åĽŀ 款 +æĥĬ åĬ¨ +ĠMc Gr +å¼Ģ设 äºĨ +ĠLind en +åħ´èĩ´ åĭĥåĭĥ +) è¿Ľè¡Į +put er +åħļ 课 +常è§ģ éĹ®é¢ĺ +symbol s +æ¸Ń åįĹ +åĮĹ æĸ¹çļĦ +âĢĵ the +lor o +åŁ¹è®Ń ä¸Ńå¿ĥ +æĺİæĺİ æĺ¯ +Ġintrins ically +åħ¶ çī¹å¾ģåľ¨äºİ +åħĭ é²ģ +ĠIM G +Ġouts ourced +çªĺ è¿« +D up +el ift +æ¯Ķ å̼ +å·²ç»ı å®ĮæĪIJ +ï¼Įæī¾ åĩº +S ri +ãĢĤ 建 +ay ama +Ġon Change +ĠM ee +红 çĿĢèĦ¸ +Not Supported +è¿ĺ没 åΰ +ãĢĤ åħ¥ +Ġindex Path +æĩ ī +ä¸Ĭæµ· çļĦ +èĤĿ èĥĨ +Sen ate +âĤ ¹ +åħ¬ åŃIJçļĦ +Ġmag ician +åĬłéĢŁ åύ +Ġscar ring +Anim ations +Sn ippet +_REM OVE +ä¸į æ¶īåıĬ +å·²ç»ı éĿŀ常 +Ġgr ills +壮 丽 +çī¢åĽº æłijç«ĭ +ä¿Ĺ è¯Ŀ +Ġ åĩłä¸ª +ĠP IO +ĠB urt +æĿ¡ çIJĨ +ES PN +CT S +unders et +ĠMind fulness +ãģł ãģij +ĠF ung +ç͵ åħī +åĮĹ çº¦ +cul pt +,ä¹Ł ä¸įæĺ¯ +èļ £ +. astype +æŁ³ èĭ¥ +ï¼ĮåĴĮ ä»ĸ +ï¼ĮåIJİ æŀľ +åζå¤ĩ çļĦ +ĠItal ians +ãĢģ æıIJ +ä¼ļ éĢļè¿ĩ +Ġint val +æħĪ ç¥¥ +f q +ĠV IN +Ġend oscopic +ï¼ĮæĪij æĦŁè§ī +ik ov +ï¼Į她 çİ°åľ¨ +Le aving +Ġkick er +Ġboo ze +对æĤ¨ æľīæīĢ帮åĬ© +- earned +ĠD HA +May or +@ hotmail +m td +_config ure +ãĢģèĬ± çĶŁ +. space +> } +ï¼Į æ¼Ķåijĺ +ï¼Į æģ°å¥½ +æĥ ĩ +assert False +Net works +大å¹ħ æıIJåįĩ +ĠWhis key +j ad +ä¸Ģ æķ° +å°± å¤ļ +.f inal +Ġinc ense +Ġgross ly +Ġrais ins +ï¼Įä¸į 顾 +ï¼ĮåĩĮ ä»Ļ +f emale +Ġo phthalm +æīĵ åĩºäºĨ +Ġautom orphism +åIJĥé¥Ń äºĨ +梳 å¦Ĩ +å¦Ĥä½ķ æīįèĥ½ +Ġaud ited +ç͵åŃIJ çīĪ +èΰ èīĩ +èĭį çϽçļĦ +camp aign +Ġseren ity +\ paragraph +Ġfor ging +æĶ¶ åľº +éĻ¢ ä¸Ń +æĸŃ æĸŃç»Ńç»Ń +å²Ľ ä¸ĬçļĦ +èĩ³å°ij è¦ģ +Ġjew ish +Ġз ап +f inger +ãĢĤ ç»ı常 +iel s +Ġsl ain +appe al +Ġ ________________ +ãĢģ 请 +ä¹Ł æľªå¿ħ +èĤ© éĥ¨ +Assign able +Ġ ÙĤ +æµĵ çĥĪ +Ġ{{ # +Ġ 裴 +ï¼Į 仿 +ĠI BS +äºļ èĥº +Ġsanit izer +ä¸į ä¸Ń +Ġget Item +ange a +æĶ¶ çľĭ +åį´ ä¸įèĥ½ +夹 å±Ĥ +åĴĮ éľĢæ±Ĥ +ast atin +Ġprot agonists +ï¼Į åĶĩ +ĠF TX +Ġdec als +åĽ½å®¶ åħ¬åĽŃ +ĠLeaf s +èĮ ´ +æĢĿ æ½® +ĠCon cerning +çϾ年 åīį +pattern s +人ä½ĵ åĨħ +ä¸į åħ¶çĦ¶ +åĪ© ç´¢ +vent us +ãĥ İ +(b uff +Vis iting +Ġpier ced +ĠP umps +Ġwh ipping +$$ $$ +Ġheal er +ĠM elt +æľĢ åŁºæľ¬ +å¿Ĺ åIJĮéģĵ +- mer +d ont +ï¼Į èŀį +ĠP URE +两个 æīĢè¿° +æķĮ æīĭ +Min imal +Ġnour ishment +ĠM HC +_PRO TO +widget s +çļĦ æĹ¶åħī +ĠEd ith +ĠCalcul ation +! ... +left rightarrow +à¸ Ī +æķ°åŃĹ ç»ıæµİ +åĩ¯ çijŁçIJ³ +Ġfurn ishing +ĠBack end +åĵĴ åĵĴ +ãĢĤ ä¼łç»Ł +åĴĮ æĮijæĪĺ +æĮ¥ æ´Ĵ +ĠGro ove +.b ias +exper iment +D inner +M ast +V u +ì Īĺ +ik in +ĠNC ERT +ĠLINK S +Ġmysq li +, åŃ©åŃIJ们 +.s quare +åĮĹ æ´ĭ +å£ģ åİļ +åķĨä¸ļ åĮĸ +å¡Į éĻ· +æijĦåħ¥ éĩı +ĠB IM +ä»İ åı¥ +_c ov +åĬŁ åĪ© +Ġrun away +é»ij éģĵ +综åIJĪ å®ŀåĬĽ +ä½ĵ åĮĸ +Ġ ä¹ī +Ġ ä½įäºİ +æħĮ ä¸į +Ġrefriger ated +æ¡ĵ åħ¬ +. amount +F leet +} z +ĠP up +è¿ĺ åī©ä¸ĭ +åĬªåĬĽ åŃ¦ä¹ł +-z one +æĶ»åĿļ æĪĺ +Ġactual ity +,æĪij们 è¦ģ +,æ¯Ķå¦Ĥ 说 +Ġn ova +é£İ è½» +ç½ij 课 +ĠAd ler +Log out +ĠMush room +Islam ic +/ events +ĠS LE +-st ack +Ġscan f +áĥ Ŀ +( Field +v ary +ãĢģ åľ¨çº¿ +Ġj p +Ġimp ede +Ġsay ings +çł´ çļĦ +æĸ¥ è´£ +ial e +ie gel +æĪij们 å¿ħé¡» +æĪ· æĻĵ +æĸĩåĮĸ æ´»åĬ¨ +è·ij å¾Ĺ +æ·· è¡Ģ +ãĢģå¸Ĥ æĶ¿ +é¢Ĺç²Ĵ çī© +ç§»åĬ¨ éĢļä¿¡ +éħįç½® æĪIJ +D rupal +O OT +ï¼Įå°± å¼Ģå§ĭ +ä¸ĩä½Ļ åħĥ +k ies +ãĢģ G +ï¼ĮæĬĬ æĪij +ĠUn ions +梦 çij¶ +ãĢģ åıijè¡Į +头 åıijçļĦ +.d elay +.B inary +ç²Ĺ çķ¥ +, æĸĩ +, æĬķèµĦèĢħ +Ġ èIJ¥ä¸ļæĶ¶åħ¥ +çļĦ 表æ¼Ķ +Ġg eese +èĩª å¹¼ +av s +å°½ åħ¶ +ï¼Įä¸Ģ缴 åľ¨ +æĻ¯å¾· éķĩ +失 å¿Ĩ +Ġcontinu ance +Ġì § +æľī çĶŁ +-t opic +æľ¬æ¬¡ èĤ¡ä¸ľå¤§ä¼ļ +æĢ¨ è¨Ģ +Ġ-------- --- +éĻ¡ å³Ń +Ġbrig ade +æĺ¯ æĿ¥èĩª +æ¡Ĥ æŀĿ +, åĽ´ç»ķ +Ġun sub +}$ \ +ĠEd itions +_th resh +ucc o +m ach +ãĢģ èĬĤèĥ½ +ax ed +éĩij èĬ± +æŃ» è·¯ +Ġshut s +èĵ¬ æĿ¾ +Ġcomed ic +Cont emporary +è± ī +Ġattend ants +Coll ins +" strings +Ġto gg +LO OK +æģ¶ èĩŃ +\ ud +ĠH AR +ĠDE VELO +æ¸Ĺ åĩº +Ġche ered +ï¼Įä½Ĩ è¿ĻäºĽ +.s el +Ġtele health +_COMP LETE +Ġspectrom eter +Ġc rou +List Box +ĠJan ice +çĤ« éħ· +ãĢĤ è´¾ +ä¸į 使ç͍ +ãĢģ é¢ľèī² +缴 æ°Ķ +ĠJack pot +ï¼Įä¸Ń åįĪ +id l +æĢİä¹Ī å°± +ä¹Į åħ° +误ä¼ļ äºĨ +è¢ħ è¢ħ +le ms +Ġtra versal +ĠØ ¥ +èĩªå·± æĿ¥ +Ġcut est +SY NC +-tool bar +uchs ia +ĠC ipher +é© ® +åŁº å°¼ +æĴ µ +atal ina +Ġå¦Ĥæŀľ æĺ¯ +åķª åķ¦ +Ġs ash +çĬ Ĭ +åĵĪ çī¹ +æļĹ å¤ľ +çļĦ天 èµĭ +, èĥ¡ +| ( +ãĢģ åij³ç²¾ +åľ° æľĿ +_f ramework +.get Block +ĠBro chure +驱åĬ¨ åĬĽ +æ¯ı次 éĥ½ +) y +Ġo o +ä¹ĭ æķħ +ĠEx haust +ãĢģ éĶĮ +åŁİ 西 +play list +Ġill usions +åĭĩ èĢħ +çŃ¾è®¢ åIJĪåIJĮ +ÏĮ ÏĤ +ĠHack er +享åıĹ çĿĢ +St reng +å¼ł 仲åĨĽ +è¾ĥ æħ¢ +åĴ¨è¯¢ å¸Ī +åķ° åŦ +ç»Ĩç»Ĩ çļĦ += UTF +ï¼Į 夫人 +ct urnal +_d c +Ġspecial ise +çľĭåΰ è¿Ļ +è¿ŀæİ¥ æīĢè¿° +ĠFile System +,ä»İ æĿ¥ +³³³³³³³³ ³³³³³ +l ng +åĴĮ è°ĥæķ´ +ĠU CS +Ġbl asp +åİŁä»¶åıĬ å¤įåį°ä»¶ +ãĢģ æīĢ +ĠW EST +ĠV ive +ĠCor ridor +Ġnons ensical +ĠB au +ck en +åľĪ åŃIJéĩĮ +ï¼Įèĥ½ åľ¨ +Ġhun ts +` ; +yl lic +Ġsocial ize +æĺ¯ éĤ£ +Ġst alks +æĥ³ è§ģ +iff ies +Button Item +ĠGood win +Ġiniti ates +ot onic +ãĢģ æĬĵ +ĠTer rain +ï¼Į åĬĿ +ï¼Įè¿Ļ è¯Ŀ +ĠReve aled +人 åΰ +ble y +åħ¶ä»ĸ æĸ¹å¼ı +å²Ĺ äºŃ +æľ¬çĶ³è¯· çļĦ +c umin +ĠF au +åİ» å¾Ģ +åħ¨ ç§° +Ġcivil ized +) âĢĵ +åĽŀ 身 +oid a +Ġflash y +C razy +ãĢĤ æľīä¸Ģ天 +Ġ| -- +æĬļ æij¸çĿĢ +ĠRealt or +: UI +Ġ è¡¥ +ou m +ä¸Ģ çĽı +åīį åľº +ä»Ģä¹Ī ä¹Łæ²¡ +é£ŀ åįĩ +. related +Ġmethod Name +åı¶ çļĦ +æĸ¹åIJij åĴĮ +çļĦé«ĺ å±Ĥ +-sh op +ipp les +ĠPain ter +åij ¤ +-f ollow +.b rowser +Ġrig s +á» į +( transform +\ chi +che at +att rib +ĠGe ological +ï¼Įé«ĺ 度 +ï¼Įäºİæĺ¯ ä»ĸ +ĠCab ernet +- about +大 å±ı +åıijçĶŁäºĨ ä»Ģä¹Īäºĭæĥħ +æľī è¶³å¤Ł +èĢĮ 没æľī +ç§įæ¤į çīĻ +z r +ä¹Ł æľª +æĻ® æŀĹ +æłij çļ® +Ġopp oses +Law rence +L ef +_ character +Ġide ologies +Ġmeth amphetamine +ï¼ĮåºĶ åľ¨ +çĭ¬è§Ĵ åħ½ +ĠJ Label +ä»ĸ们 å°± +ĠMer lin +ATT LE +Ġ`` , +æĺ¯ å¾Īæľī +éĤ Ī +éĺµ é£İ +ãĢĤä»ĸ 对 +亦 çĦ¶ +æĸĩåѦ çļĦ +ï¼Įå¾Īå¤ļ æĹ¶åĢĻ +.ext ensions +ĠParl amento +V acc +Ġ åĮ»éĻ¢ +ĠS arg +Ġwh ining +äºĭ ä¾ĭ +ï¼Į她 çŁ¥éģĵ +å·¥ç¨ĭ ç³» +çļĦå°ı åĮº +Ñĭ е +ãĥ¼ãĥ ł +or ated +ï¼Į å¸Ĥæ°ij +Ġf abs +il ogue +ĠI OC +åħ¨ æĿĥ +çĥŃ æºIJ +åŃĹ æł· +DIT IONAL +Ġ åĮħè£ħ +it ized +ĠB GC +æł¼ 丽 +åĮħ éĤ® +è¯ģæį® 表æĺİ +ç³ĸå°¿çĹħ æĤ£èĢħ +èł¢èł¢ 欲 +ï¼Į çͷ女 +Ġfact ored +é¦Ļ èĤł +ĠBi ochemistry +/ Core +D awn +ãĢģ åħ³ +æĹł ä¸İ伦 +çİ© è¿ĩ +è¡£ è¥Ł +ĠUI ViewController +< Int +M ISS +)) *- +缮æłĩ ä»»åĬ¡ +Interpol ation +æīį åĪļ +å·ŀ åºľ +rap ies +ãĢĤ ä½ķåĨµ +Pro g +AB STRACT +jud ge +ĠDM V +èĩªæľī èµĦéĩij +K an +Ġ 级 +ï¼Į åĪĿæŃ¥ +çļĦ æĬĹ +åΰ ä¸Ģè¾¹ +鼶 ä¸ĭ +åĢį 以ä¸Ĭ +Ġphosph olip +-cut ting +ĠM anny +åĴĮ éĤ£ä¸ª +ĠPhot ographs +溪 æ°´ +çĺ¦ å¼± +è½ ² +好 çĶŁ +.C ache +çļ® èĦĤ +ç»§ èĢĮ +亿 欧åħĥ +æ¸ħéĨĴ è¿ĩæĿ¥ +æīĢä½ľ çļĦ +ï¼Į çĤ¹äºĨçĤ¹å¤´ +ãĢĤ éī´äºİ +ï¼Įå°ı å§IJ +ä¹Łè¦ģ 注æĦı +\ lib +é«ĺ äºĮ +åĪĨæŀIJ æĸ¹æ³ķ +ĠCount ing +æĮ¥ æĮ¥æīĭ +ï¼Įéļ¾ ä¸įæĪIJ +âĻ ¦ +ĠGonz ales +çļĦ æĿ¡ä»¶ä¸ĭ +ne au +çļĦ åľºæīĢ +Ġm ids +Ġd ao +ĠDis qus +ĠMod ification +. ant +çļĦ èĥľåĪ© +ĠA VR +ĠG est +ĠCor vette +Scr atch +- Christian +L eb +Ġon der +Ġread ership +Ġver a +横 æĪªéĿ¢ +ĠLaure nce +E yes +J PEG +ãĢģ åΰ +ER K +SE P +/( (- +ĠCit rus +$ labels +ï¼ļ 人工æĻºèĥ½ +ĠSt ochastic +Ġunder mining +/s vg +æIJľ æķij +Ġchrom at +ãĢģ åijķåIJIJ +èĬ± åijĹ +å¹¶ä¸į åĥı +_se p +ul ge +ĠCont ra +MC s +pir acy +hyper link +Sever ity +V H +è·¯ æ¡¥ +åĺī å®ļ +b anner +ĠE HR +çŃī 设æĸ½ +æŀĿ 头 +* The +m ium +çĤ¹ 个 +Ġcr amped +Ġunderstand ably +BO OT +ĠCur ious +.list dir +è¯ ĺ +å½Ĵ æ¡£ +Ġwa ivers +ĠKath mandu +ĠRus so +í Ļ +ĠQ M +æģ© 人 +åľ¨ ä¸ŃåĽ½çļĦ +æĪij们 ä»İ +æĿĥ è´µ +建设 éĵ¶è¡Į +_alloc ator +æĹ¶ èµ· +è¿ĺ æīĭ +Ġsub graph +触 ç͵ +æī§è¡Į ä»»åĬ¡ +鸿 èĴĻ +太åŃIJ å¦ĥ +论åĿĽ ä¸Ĭ +- location +S AR +PO OL +Ġlib el +( ed +ãĢģ æ¸ħæ´ģ +âĢľ æĤ¨ +ç»ı å¼ĢåĮº +è½» å·§ +åħŃ äºº +éĵ¸ å°± +B ED +en en +ï¼Į æĭĵå±ķ +ĠO y +éĹ® 询 +Ġextrem ists +æĺ¯ä¸įæĺ¯ 羣çļĦ +Ġpl ating +Ġrep aid +å¨ģ æĸ¯ +è·ij çĿĢ +çīĻ ç§ij +红åįģåŃĹ ä¼ļ +l ates +èįī èįī +åĹ Ŀ +Ġinstruct ing +åΤåĨ³ 书 +ï¼Į æİĴåIJį +ãĢĤ åİŁåĽł +â̦â̦ âĢľ +è¿Ķ ç¨ĭ +æĻ¨ åħī +_bl ank +-s urface +ole v +è¿Ķ åĪ© +Ñĥ п +che e +çα åĴĮ +æĺ¾ç¤º è£ħç½® +å¹¶ä¸į éļ¾ +Ġstation ery +S ymptoms +没 å¾Ĺ +Ġconst ru +è´Ń åħ¥ +å¥ĩ çijŀ +-in vest +Ġder og +ภ¹ +æĬ½ äºĨ +Rel ay +èĢ Ĩ +好 ä¹ĭåIJİ +ä¸ī åıª +åĹ ª +è°IJ æ³¢ +BM W +Ġathe ists +- ounce +_ ignore +ï¼Į é»ijèī² +Ġright fully +äºĨä¸Ģ çϾ +Ġbr im +ä s +æŀª 械 +Ġbio film +åĴĮ è§Ĩé¢ij +å°±æĺ¯ ä»İ +Ġesc orted +- III +ĠF U +Ġwild fires +T win +ĠO BS +_f ocus +社ä¿Ŀ åį¡ +Ġb ordered +val uation +Ġtri ples +è¾ĵ æİī +ĠBack yard +Ġlegit imately +æ¿Ģåħī åύ +M ismatch +ĠB IA +度 æķ° +å°±æĺ¯ å¦ĤæŃ¤ +Ġlist a +ĠYork ers +Ġtim id +宽 大çļĦ +, ç½Ĺ +nd s +æľª 央 +Ġtri um +rest rict +çľĭä½ľ æĺ¯ +Ġf les +ĠC ENTER +ä¸İ åİŁ +RE ET +_d ispatch +é¢ĦæľŁ ä¿¡ç͍ +hard ware +re ich +ĠA β +é¾Ļ äºķ +Ġsand stone +^ (- +ä¸ĩ ä¼Ĺ +å±Ĥ åıł +éĺ´ çº¿ +èļ ¤ +ï¼Į è£ħä¿® +ï¼Į çµģ +ĠH ats +å¹´ åħ¬åı¸ +çł´ æĹ§ +Ġsn ag +Det roit +éĺĢ èĬ¯ +ĠSing ing +Ġpin ning +ill ust +Ġk ms +.s witch +ĠSch we +ĠHost ed +转载 请注æĺİ +(pro to +P g +S izer +éĥ½ éļ¾ +ĠUn employment +vis align +éͤ çĤ¼ +X t +ï¼Į ç¬ij容 +ï¼Į åģ·åģ· +Ġ( ++ +åı¯ æĤ² +åģļ çļĦæĺ¯ +æĺ¯ä¸Ģ æĬĬ +æĻķ äºĨ +Ġfle ets +奶奶 çļĦ +羣è¯ļ çļĦ +ç͍æĪ· éľĢæ±Ĥ +Ġpref s +Ġ æĸ½ +ãĢģ åĪ¶åº¦ +转 æİ¥ +.S uccess +Ġter pen +ä¿Ħ è¯Ń +ĠåįĹ å®« +Ġinfinit es +Ġd av +æĻ¾ æĻĴ ++ |\ +ĠC BT +ke red +ï¼Įä»ĸ å¿ĥéĩĮ +è·³ ä¸ĭ +ĠSan ford +Ġinfl amed +Ġ æĥħ +Ġp arm +form ats +Ġass es +.j av +\\ / +Ġsun day +.make Text +ãĢģ ç͵æ°Ķ +th reat +ĠL DS +没 ç͍çļĦ +Ġmathematic ians +F ingerprint +} X +Ġed s +é¾Ļ æ¹ĸ +éĹ® äºĨä¸Ģåı¥ +åĨħ容 è¾ĥå¤ļ +ĠHam let +ĠValid ator +ãĢĤ åıĤåĬł +ĠP enguins +Ġbo on +ä¸Ģ段æĹ¶éĹ´ åIJİ +ikh ail +A AC +C ole +is ión +-s um +ä¸īè½® 车 +.lin space +_m aps +éĶĻ èIJ½ +Ġbeaut ies +-ij erph +R b +Ġextrem ity +è¿IJèIJ¥ çļĦ +缸æıIJå¹¶ 论 +Ġin activation +èĢĮ æĪĺ +åĽĽ 竳 +ï¼Įä»ĸ ä¸İ +æłij èĭĹ +åŃĶ éļĻ +rich ment +è°ĥçłĶ åijĺ +Ġundef eated +, åħ³éĶ® +, 人家 +: center +Ġcommun icator +è¿Ļç§į çĸ¾çĹħ +ãĢģ ä¹³ +Ġpl aza +æĢİä¹Ī çŁ¥éģĵ +-m outh +-cl uster +. Standard +ç¦ı æĻĭ +.Cl uster +欢声 ç¬ijè¯Ń +æľī 天 +Ġsche mas +æIJŀ ä»Ģä¹Ī +.t w +ï¼Įå½ĵ 天 +åľ¨åľ° ä¸ĬçļĦ +did n +è±ģ è¾¾ +Ġb ays +oc ry +ä½ł 为ä»Ģä¹Ī +åζ èĥľ +æºIJ æĢ§ +_res olution +é¦Ħ 饨 +Ġhom er +, ä¸ĭåįĪ +ĠG TP +åĴĮ æĮĩ导 +转 åŀĭçļĦ +IT IONS +åĨ·åį´ æ°´ +åĴĮè°IJ çļĦ +Suggest ions +ï¼Į åĺ´ +ä»· è¿ĺ +- contract +ãĢģ æ´Ĺ +ä½ł åΰåºķ +_d ummy +å¼ł åĩ¡ +åħ¬åħ± èµĦæºIJ +Ø§Ø ³ +. ax +p ig +ĠA ly +irect or +Ġsw ore +æĸ¹åIJij ä¸ĬçļĦ +NO LOGY +ĠHost s +å¦Ĥ çĥŁ +Ġblock age +Ġmonot one +G raham +å°± å¾Īéļ¾ +ä¸ĵä¸ļ 人æīį +ç®Ģåįķ æĺĵ +ĠTrans fers +åijĨ çĿĢ +æĭŃ缮 以å¾ħ +ess o +_c ells +äºĨä¸Ģ æĬ¹ +ä½İ å¼Ģ +-g ray +-Ch ief +大ä¼Ļ åĦ¿ +ï¼ģ æĪijçļĦ +Business es +) C +ĠY ii +å¸Ŀ åIJĽ +- land +个 交æĺĵæĹ¥ +rid ing +楼 é¡¶ +Ġtim ings +Fe aturing +Ġdoubt ed +åĽĬ ä¸Ń +éħį 饰 +Ġsport y +æ¹ĺ 西 +ĠW IRE +Ġso ared +Ġmon soon +Ġmur als +ĠEss ence +Ġexch anger +Ġfec al +ĠUIL abel +( load +D AG +S ans +ï¼Į æīİ +å±ķ åı° +ï¼ĮæīĢè¿° è¿ŀæİ¥ +触 缮 +ĠAf ternoon +Ġconven ed +Art ists +n th +ĠB AB +æĪij们 è¿ĻäºĽ +å°±æĺ¯ 被 +ĠBy rd +exp ires +Th or +竣çĦ¶ ä¼ļ +æĿĥåĪ© 人 +FL D +IRON MENT +W itness +ĠF acing +è¿ĺæľī ä¸Ģç§į +åĿIJ èµ·æĿ¥ +lp Vtbl +Ġ'_ ' +f urther +ãĢģ ä¸ĵå®¶ +å¦Ĥ äºij +声 åѦ +ï¼Įåľ¨ äºİ +游 åİĨ +-g rained +_ IGNORE +ä¸Ģ çϾä¸ĩ +ĠF ell +_INS ERT +ĠVict ims +ãĢģ æĽ² +ĠV GA +Pro ve +ĠStaff ing +Ġham pered +è¿IJæ°Ķ 好 +è¶Ĭ 强 +æł¹æľ¬ ä¸įåı¯èĥ½ +åIJ¯åĬ¨ 仪å¼ı +Ġappet izers +èĥ½ æľīæķĪ +åĪĨ æĹ¶ +OM ET +ï¼Įåıį åĢĴæĺ¯ +RT L +å¢ĥå¤ĸ è¾ĵåħ¥ +, éħįåIJĪ +.s ingle +åĮĹ è¾° +çľģ åŁİ +Ġhum ankind +Ġmis chief +æīĭä¸Ģ æĮ¥ +Bright ness +) }$, +- æĢ» +åİ» æĭ¿ +Ġunder stated +缴æİ¥ æİ¥è§¦ +Ġplain text +ĠS our +åľ¨ éĿ¢å¯¹ +Ġad o +第ä¸Ģ éĺ¶æ®µ +-p ull +ĠMar iners +)) **( +Ġhyp nosis +çļĨ åı¯ +åŀĥåľ¾ å¤ĦçIJĨ +Ġ"_ " +Ġbisc uit +In Progress +Ġdes erts +ï¼ĮæĽ´ åĪ«è¯´ +è°± åĨĻ +å®Įç¾İ ç»ĵåIJĪ +çĭŃ éļĺ +æĹ¶ 说 +ĠInc or +èĢIJ åıĹ +éĥ½ä¸į è¡Į +ĠH U +Ġhe als +ä¹Ł ä¸Ģ缴 +çϾ åı¶ +页 岩 +滤 æ¶² +ĠRob ots +å¤ļäºĨ åĩłåĪĨ +åı³ä¸Ĭ è§Ĵ +P etition +Ġsh aker +å°ı ä¸ĥ +å®ī 详 +åIJĥ åIJĥ +ä¹° è¿ĩ +Ġtw ilight +S olving +ro ductive +å°± åĸľæ¬¢ +åĽł 人èĢĮ +å¹¶ä¸įæĺ¯ å¾Ī +com parable +åĻ » +ä¸ĵ ç¨ĭ +è´¢åĬ¡ 顾éĹ® +Ġa pl +ãĢĤ çľĭçľĭ +æ´ º +éĴ» æĪĴ +ĠØ ¬ +/ 人 +对 æ¯Ķä¾ĭ +æĪĸ æļĹ示 +èĢĥ äºĨ +æĬķèµĦ åĴĮ +ãĢĤæį® ç»Łè®¡ +ĠÅŁ i +Ġmuscul oskeletal +le aders +.d est +å¥ĩ å¼ĤçļĦ +ä¸ĵä¸ļ åIJĪä½ľç¤¾ +Ġå½ĵ åĪĿ +åıĺæĪIJäºĨ ä¸Ģ个 +åѦ åĽŃ +åıĪ éĩįæĸ° +æķĻåѦ 模å¼ı +ogen es +_ ram +Ġb raz +ĠP MA +é» į +ma ids +rel igious +éĩİ çļĦ +çĵ ¯ +åŁºæľ¬ åħ»èĢģä¿ĿéĻ© +ï¼Į为 人 +è¯ ĭ +æĪij éĤ£ +两 段 +çł´ æ¡Ī +çĶļèĩ³ è¿ŀ +Ġcoun sell +(_ ) +In cluding +Ġcomm as +ym in +çŁŃ æĸĩ +Ġrep ur +riend s +åĵĹ åĵĹ +ĠCatal an +å¤ĸ åªĴ +ym ax +çģ«ç®Ń éĺŁ +ï¼Į谢谢 ä½ł +ĠRa ised +çĿĢçľ¼ äºİ +, èĭı +_ we +Ġev asion +Ġunder rated +çĶŁäº§ åζéĢł +ðŁ ļ +夹 åħĭ +éĬ ® +K U +m aven +ãĤ Ģ +ĠH umb +个 ä½ĵçļĦ +Ġ[ ? +reg ions +Vol tage +æĻķ åĢĴ +ì ² +(c ore +ĠMathematic a +ï¼Į 伯 +not ag +éĥ½æĺ¯ åı¯ä»¥ +ĠHol istic +Ġferm ion +à ļ +ree ks +æīĢæľī åĨħ容 +åĨ² è¿ĩåİ» +ÑĤ оÑĢ +ĠHimal ayan +ï¼Į 宣 +æľĪ èĩ³ä»Ĭ +Ġ/ . +太 åĤħ +.load ing +Ġgrat ification +严èĤĥ çļĦ +F IND +ĠR ookie +Ãł o +M aur +ï¼Į æģĴ +Ġgl azing +å±ĭ å¤ĸ +çļĦéĤ£ 天 +å¨ľ å¡Ķ +ĠCK D +Ġ ç³»åĪĹ +ï¼Į åħħ +对 çĦ¦ +è¿Ļæł· åķĬ +ĠAss igned +çŀ ł +åĢĴ ä¸įæĺ¯ +æīģ æ¡ĥ +亲 亲 +Pass ed +Ġlibert arian +B rew +sc opes +j os +社ä¼ļ åѦ +空æ°Ķ 污æŁĵ +C ant +X s +ill is +æĪIJ 羣 +Ġ... " +ç͵åŃIJ 设å¤ĩçļĦ +å°ı æŀĹ +-p recision +_lock ed +ï¼Į ä¼ĺéĢī +ãĢģ è´¢æĶ¿ +ĠR AF +.D ock +她çļĦ èĦ¸ +èĢĥè¯ķ ä¸Ń +. available +Ġto aster +ĠS ED +.s ingleton +表çݰ äºĨ +ĠCH ILD +æ··åIJĪ åĬ¨åĬĽ +Ġin ad +ãĢģ 赤 +ĠR ode +circ um +ĠCop ies +ĠI CS +æĢ» ç«Ļ +çĸij å¿ĥ +ç§Ģ ç§Ģ +çīĻ åij¨ +åĹľ è¡Ģ +_ Request +ro pe +ä¸į åIJĮæĹ¶ +å°± åı¯èĥ½ +ç´§ ä¸įæħ¢ +(c m +Ġter races +æ£Ģå¯Ł éķ¿ +åįģåŃĹ è·¯åı£ +ä¿Ŀ æĹ¶æį· +强 强 +.g wt +Re actions +ï¼ĮéĤ£ä¹Ī ä»ĸ +çĶŁåŃĺ çļĦ +๠ĥ +Ġchrom osomal +ĠJa ime +Ġtant al +j ian +éĩĮ å¤ĸ +æĹł éĹ´ +ener able +èģļ åĬĽ +SH IP +_le af +éĩįéĩį åľ° +( NS +. pe +ĠL om +_P OP +B UR +\ S +t æģ¤ +æĿ¿ æłĹ +éĢĢ åĩºäºĨ +Ġpen is +åĢºåΏ å¸Ĥåľº +翻天 è¦Ĩ +ï¼Į ä¸ĵå®¶ +US R +æĿ¾ æķ£ +ä¹Ł å¿ħé¡» +ï¼ļ - +Ġwat cher +Ġglut athione +( bb +B RA +_ room +r á +.p ower +æī§è¡Į æĥħåĨµ +_G roup +æľīä¸Ģ åįĬ +exp anded +Ġbenchmark ing +M ol +ï¼Į ä¼ģåĽ¾ +绵 éĺ³ +éĢ µ +第ä¸Ģ è½® +æĢģ çļĦ +æķ¬ éĩį +Ġd ut +Ġh ikers +å¾ħ äºĨ +ĠCons ensus +æ³ķå¾ĭ æľįåĬ¡ +Ġ è§£ +å¤ĩ éĢī +Ġauthor izes +-r anking +ĠL l +çĻ» æľº +Connect ivity +ãģ¨ ãģĹãģ¦ +.Q t +Ġperturb ed +acam ole +Ġ éĹ» +' er +G ordon +Ġf ission +ãĢģ è£ħç½® +å¾ħ ç»Ń +Ġsec p +æ³¢ éŁ³ +ĠPost ing +ç»§ç»Ń åľ¨ +Sw eden +TX T +Ġcra ps +ï¼Į æ¶Īè´¹ +Ġe arm +ĠM PC +ï¼Įä¹Ł åıªæĺ¯ +bl ade +_r d +è¿ĽæŃ¥ çļĦ +Ġvars ity +( expression +int ro +头 åŃIJ +åŃĶ åĨħ +åij¨åĽ´ çݯå¢ĥ +Ġabsorb ance +港澳 åı° +) dx +çļĦ é±¼ +åı¯ è¾¾åΰ +å°ij女 çļĦ +ä¿¡æģ¯æĬ«éľ² ä¹īåĬ¡ +od il +ab al +ĠW arming +Ġx f +æ¶Ī éģ£ +_T S +è¾ĵ ç»ĻäºĨ +æł¼å¼ı åĮĸ +F c +Ġsc ented +åIJ¹ éĽª +- vers +_p ic +ĠZ ah +éĵģ éªij +Ass igned +Ġsuperv ising +æµ·æ·Ģ åĮº +éĿ¢ æĺ¯ +ĠPa olo +æ·±åħ¥ çłĶç©¶ +Ø º +ĠSpirit uality +lat itude +Ġplacent a +, æ¯ı个人 +æµ Ķ +åĪĨ å·¦åı³ +ĠSh u +åħŃ éģĵ +以ä¸Ĭ æīĢè¿° +夺 èµ° +ĠPop up +Ġbg color +( TM +/ ubuntu +ä¸į æĬĬ +ĠâĪ ª +inn ie +_EX TRA +åĩ¸ åĿĹ +Ġiso forms +ä¸į äºī +åľ° å¹³ +åĮħæĭ¬ ä½Ĩä¸įéĻIJäºİ +å»¶ å¹´ +ĠBurn ett +. enc +V ir +Ġo me +è¿ľ æ´ĭ +亲 åı£ +LL VM +ï¼Įåı¯ ç͍äºİ +-block ing +C c +ool ed +ste am +-des cribed +Ġ æľĢä½³ +å°±æĺ¯ æĪijçļĦ +马 å±ģ +åľ¨ä»ĸ éĿ¢åīį +Upload ed +Ġobliv ious +ï¼Į ä¸Ńåįİ +ĠC ART +æľī äºĨä¸Ģ个 +æľī èµĦæł¼ +ï¼Įä»ĸ åı¯ä»¥ +Sign s +λ ά +ï¼Į éĩįå¤į +éĿ¢ 纱 +ĠAn ast +åĿIJ åľ¨ä¸Ģèµ· +äºĴ åĬ¨çļĦ +ï¼Įè¿ĺ èĥ½å¤Ł +ĠKe pler +ĠDel ight +ĠElev ator +Mis cellaneous +ĠVaugh n +ĠS od +ĠE ner +ä¸İ ä¸Ĭ +å·¥ä½ľ éľĢè¦ģ +ĠBrow ne +ä¸įè¦ģ åİ» +æĺ¯ä¸ª å¾Ī +èĪŁ å±± +Land scape +奥æĸ¯ æĽ¼ +å·§å¦Ļ åľ° +æĹ¶ å¿ħé¡» +_m asks +.R ptr +ĠCou pe +æµĵ çľī +å®ŀåľ¨ çļĦ +室åĨħ 设计 +åı¯æĢķ äºĨ +Ġsabot age +天 é»ij +## ' +Ġprogram matic +æĻ® éĻĢ +åĬĽéĩı åĴĮ +èĽŁ é¾Ļ +æł· æł· +.s chedule +Ġwater mark +åħŃ åIJĪ +ĠDI FF +åĹĵ éŨ +Ġlact ate +ç»Ŀ æĭĽ +åįĥ å±± +.P anel +åĢŁ æľº +触 çĬ¯ +Ġhydro carbons +çī©çIJĨ åѦ家 +Sem antic +CD SA +Ġing est +èĿ ¼ +å¥ĭæĸŠ缮æłĩ +.Rem ote +人å±ħ çݯå¢ĥ +ĠD BS +irm ing +èµ¶ çĿĢ +$- $ +Ġ[" ", +.Ref erence +. Intent +ĠB ites +对 大 +åĪĨ åĮħ +éĴ¢ çļĦ +.r pc +å¹¼ èĻ« +ï¼Į 丢 +Ġh c +ose c +æıIJ è¦ģ +ms elves +以ä¸Ĭ æĺ¯ +don ald +_OPER ATION +Ġantagon ists +ãĢĤ 便 +ĠAl onso +å¾® å°ıçļĦ +Ġvent ral +Ġchore ography +ĠQuin cy +. INFO +(" + +ï¼Įè¿Ļ åIJį +å°ij æľīçļĦ +.T emplate +è¶£åij³ æĢ§ +Ġw inger +ãĢģ éĴĻ +QU IT +ï¼Į大家 åı¯ä»¥ +ĠSqu ares +æļĤè¡Į åĬŀæ³ķ +Z M +n est +Ġ è¿ĩåİ» +头 ä¸Ģ次 +常 åľ¨ +Ġtrans ducer +.get State +åĹ· åĹ· +N igeria +çļĦ è¿ĽæĶ» +ro th +oth o +| } +Ġlast Known +çĶŁæ´» çݯå¢ĥ +èĥĥ èĤłéģĵ +åŃ¦æľ¯ çķĮ +ĠLips chitz +: r +R ome +ï¼Ī L +.S cript +èİī ä¸Ŀ +" æł¹æį® +, æĭī +ãĢģ çĭ¬ç«ĭ +ä¿® æķ´ +幸 çģ¾ä¹IJ祸 +_comp letion +Ġm over +Ġdist illation +åı¤ æłij +å°ı康 社ä¼ļ +\ ln +çĥŃ æ³ª +ä»ĸçļĦ å£°éŁ³ +Be er +纵 容 +çijŁçijŁ åıijæĬĸ +ï¼ĮéĤ£ä¹Ī æĪij们 +ld a +Âł åĽłä¸º +ï¼Ľ å¹¶ +ĠPlay ground +éĵŃ æĸĩ +大 åĬŁ +St able +æ·± æĦı +de alloc +.F rame +ï¼ĮçĦ¶åIJİ æīį +ä¸Ĭä¸ĭ çıŃ +ï¼ĮéĤ£ä¸ª æĹ¶åĢĻ +arc a +DF LAGS +APP ING +Ġcomorbid ities +. help +åģļ çĤ¹ +è¿Ļ个 æĦıæĢĿ +_c am +åĪĿ è¯ķ +æĹłç©· çļĦ +çĶŁ æĬ½ +åŁº åĿij +éĤ£ä¸ª åľ°æĸ¹ +è¿ĺ没 çŃī +欺 åĩĮ +ï¼Į以便 äºİ +å¤ļ çľĭ +é» ł +è¿ŀ éķ¿ +ï¼Įçİ°åľ¨ å·²ç»ı +Ġbirth place +Blue print +åĿļå®ŀ åŁºç¡Ģ +, è¶ħ +U g +f ighter + ij +è¿ĩ éģĵ +ĠK iev +ä¹Łä¸į æĢķ +ï¼Įåľ° å¤Ħ +C ats +ï¼Į åĵ¥ +ç͵ å¼§ +æĿĢäºĨ ä½ł +Dat as +æ·±åĮĸ æĶ¹éĿ© +âĢ § +äºĭ ä¸Ń +Ġob nox +_UNS IGNED +ি ঠ+/ about +说 æĪIJ +å¿ĥ ä¸ĭ +åij¢ åĸĥ +Pat ent +S oup +Ġt g +ãĢĤ åĨħ容 +èĢĥ åıĸ +Ġpub erty +æī¾åΰ çļĦ +Ġt aut +ãĢĤ å½±çīĩ +ĠG EO +第ä¸Ģ åľº +ĠEM PTY +æĽ¾ å¤ļ次 +æ¯Ľ 骨 +Ġunknown s +Ġt rot +äºļ çļĦ +çªģåĩº éĹ®é¢ĺ +å°Ĩ æīĭ +Ġdiff e +ç¥ŀ åħµ +认 罪 +Ch ap +éĵ¶è¡Į è´¦æĪ· +οÏħ ÏĤ +ĠTrad itionally +ç»ĵ èĪĮ +çļĦ缸åħ³ è§Ħå®ļ +our ism +ä»İ å¤ĸéĿ¢ +æĸĩä»¶ åĴĮ +ha ft +Ġgau ges +ï¼Ł æĪijçļĦ +_f ragment +_c redentials +-m obile +ä¸ĩåħĥ åĴĮ +æ¯ķä¸ļ è¯ģ书 +æİĴæĶ¾ æłĩåĩĨ +Ġobsc ured +ï¼Į W +ï¼Į æĢ§ä»·æ¯Ķ +ï¼Įä»İ æľª +ĠMod erate +± Ãij +Ġ ä¹Łå°±æĺ¯è¯´ +ç® ĵ +-l ibrary +Ġheart break +纪å§Ķ çĽijå§Ķ +w ag +Ġag n +eg ovina +å¨ £ +.d f +_D raw +èħ¹ ä¸Ń +åΰ æĪijçļĦ +ĠIn gram +Char Array +æIJŃè½½ äºĨ +P un +_ terms +åľ¨ åİ¿ +çIJĨ åºĶ +iet f +çļ® å°Ķ +------------------------------------------------------------------------ --- +游åĩ» éĺŁ +N ash +ĠG ains +Ġso b +AC EOF +è¿ŀæİ¥ èĩ³ +éģĩ ä¸ĬäºĨ +åĪĽéĢł ä»·å̼ +åĮĸåѦ æĪIJåĪĨ +ç§įæ¤į éĿ¢ç§¯ +ä½łçŁ¥éģĵ åIJĹ +ä¸ī 人çļĦ +åıijå±ķ ä¸ŃåĽ½å®¶ +Pl ural +ĠQual ification +è¡įçĶŁ åĵģ +, å®ŀåľ¨ +B ush +ĠCl imbing +ĠAct a +Ġmid term +Ġpul ver +T odo +p owered +ĠP ins +Ġsp iked +èĩªå·± æľĢ +io ch +åĨľä¸ļ éĥ¨ +. reply +get Current +. ssl +对 岸 +no vel +-ad minist +æľīæĺİæĺ¾ åĮºåĪ« +b ush +åIJĦ èī² +ĠOpen ings +Ġc wd +Ġsh outs +ä¹Ł 缸å½ĵ +Ġunbeat en +" To +ãĢģ 让 +ĠB REAK +å¹³ ä»ĵ +], \ +Count ries +ĠMer idian +çļĦ å®īæİĴ +Ġd angling +éĢļ åĭ¤ +ãĤ ¬ +ĠAc oustic +âĪ ij +S sl +_ answer +l id +p gen +ä½ĵ éĩı +åģļ ä¸įäºĨ +åŃĺåľ¨ éĹ®é¢ĺ +ç¼ĸè¾ij éĥ¨ +ĠStir ling +j al +Ġp ode +Ġcar ic +游æĪı ä½ĵéªĮ +èģĶèµĽ åĨłåĨĽ +Ġsensit ivities +R atings +st ub +ĠP DA +çİĭ ä½į +第ä¸Ģ 大 +.Index Of +ĠH mm +åIJij ä¸ŃåĽ½ +æīijéĿ¢ èĢĮæĿ¥ +ik on +è¯ļ çĦ¶ +Pal m +iph any +å´İ å²ĸ +: NO += item +æīĵ 人 +.id x +ä¼ı åĩ» +rivile ged +çIJĨ论ä¸İ å®ŀè·µ +çŁ ¾ +Ġhyper links +Print able +ĠVari ation +iop athic +ï¼Į åĬłå·¥ +ĠS EN +Ġhor rified +èļ¯ èļĵ +ĠR ever +), $$ +Ġfl oss +Ġ** ) +åIJ¹ çīĽ +Ġconcert ed +åĺİ åĺİ +ĠJacqu eline +Ġh aste +Ġdef lect +_b lack +符åIJĪ åĽ½å®¶ +ï¼Įåı¯æĺ¯ çİ°åľ¨ +Ġreset ting +ĠJam mu +å§Ĺ å§Ĺ +ul le +Ġsa ute +.com tag +-l im +_r anges +ĠCass idy +Aud ience +çķĻå®Ī åĦ¿ç«¥ +æĺİçϽ è¿ĩæĿ¥ +Ġcere als +西æĸ¹ åĽ½å®¶ +è¿ŀéĶģ åºĹ +vell ous +R ide +ãĢĤ èĩªçĦ¶ +个 å±ģ +ç¥ İ +å¼ł ä¸ĵè¾ij +è¿ij çļĦ +.RE LEASE +ĠCreature TPL +æıIJ åĩºæĿ¥ +ä½Ļ 项 +éĴ٠声 +ĠSub scriptions +_EN C +imm ers +pur ple +èĤĽ éŨ +( (' +Ind onesia +æĬ½ æ£Ģ +Ġpy game +ä¸Ńå°ı åŀĭ +详æĥħ 请 +Anc ient +S uddenly +女 çļĩ +ĠUn c +ĠDec oder +åĴ¸ 丰 +.Ver ify +ge b +eg ment +社ä¼ļ åĮĸ +ĠPI PE +æIJģ ç½® +Ġcomprehens ively +ä¸ĭ é£İ +åĨĻ å®ŀ +* pi +~ $\ +ï¼Įä¸Ģ åIJij +ä¿¡æģ¯ ç½ij +秦 æ·® +-n ational +Ret rieve +D ivid +Ġkn ights +Ġimmun otherapy +è´© åŃIJ +, æīĵå¼Ģ +âĢľ åħĪ +大 è·¯ +车 æŀ¶ +Ġcre ed +满 头 +Ġwalk way +Ġpsych ologically +_int o +ĠW earing +两 è¾¹çļĦ +ï¼ĮæĪij è·Ł +åįĹ éŨ +.S kip +-l iving +æĶ»åĩ» çļĦ +çŃīçĿĢ ä½ł +Ġvoic email +í ĺ +æ°´ åĨĽ +but erol +ĠB ub +ä¸Ńå¿ĥ å°ıåѦ +åºĬ åīį +ĠFound ers +/sw agger +liter ally +Ġ æ³ķåĽ½ +ĠV ance +ature d +ĠComput ation +åıĤèµĽ éĢīæīĭ +( work +Ġaneurys m +Ġbid irectional +Ġdef iant +Ġshe af +_M ACHINE +managed Type +çļĦæĹ¶åĢĻ è¦ģ +èĹ ĵ +ifer ous +woo commerce +" ä¹ĭç±»çļĦè¯Ŀ +ĠL ansing +åįģ æĹ¥ +Ġmon ocytes +ĠCol o +åľŁ çĿĢ +Ġtimes cale +W ear +Ġb um +çIJĨ çŁ³ +_C ODES +åĨ· çľ¼ +ĠMarx ist +Ġreconsider ation +æĺ¯ èĥ½å¤Ł +.s peed +Ġ% @", +S pectrum +Ġf ad +éĢł 车 +ĠVer dict +Ġirregular ities +. Children +ĠR ptr +Ġmed iating +é£ŀ 鸣 +ĠVol leyball +çļĦ èĤ¡ä¸ľ +ï¼ļ é»Ħ +ä½ı æīĭ +_l ists +罪 è¿ĩ +.Argument Parser +Ġensl aved +! } +{ ex +ï¼Į æľªæĿ¥çļĦ +Ġte ased +æŁĶ æŁĶ +Ġrenew ing +ĠFol ks +ä»ĸ们 äºĨ +ä¹Ŀ çĤ¹ +æĥĬ å¿ĥ +éļIJ èĭ¥çݰ +/p ool +æĥ³åΰäºĨ ä»Ģä¹Ī +H aha +M OST +ï¼Į éĢĴ +åľ¨ æľ¬å®ŀç͍æĸ°åŀĭ +请 å®ī +èĤł çĤİ +åľ°çĤ¹ çĤ¹å¤´ +_the ta +Ġomin ous +* ), +Ġd lg +缴 éļ¶ +âĢī h +ĠSta ples +æĺ¯ ç͍äºİ +ĠR ift +项 è§Ħå®ļ +èĤ¡ æģ¯ +ĠOrgan izing +ĠM illing +-d eal +令 她 +æ·ĺ æ°Ķ +éĤ£ä¹Īå¤ļ 人 +ãĢģ æĶ¶ +Ġph ilippines +ç»ı常 åĩºçݰ +Ġing ested +Fire wall +ĠRosen berg +Ġ é¢Ŀ +Ġinitial izing +special chars +ĠEff orts +P ivot +_s plits +çļĩ åIJİçļĦ +áº Ń +å·´é»İ åľ£ +æĭįåįĸ ä¼ļ +ĠBh utan +Ġunsett ling +e ast +n ak +ing way +ãĢģ èij£äºĭéķ¿ +æĪij å¾Ĺ +ä½ĵ ä¸Ń +Ġback side +Http Response +ç½IJ åŃIJ +ï¼Įæķ´ 天 +é« Ķ +ET O +Ġcondition ers +æĬ± æĭ³ +_SE CRET +Ġconvey ance +ãĢĤ æľ¨ +_G ame +Syn opsis +Ġcuc umbers +Ġ( ?) +Ġ. * +éĿŀ åħ¬å¼Ģåıijè¡Į +.set Visible +.class es +ĠAuthor ized +ĠNY U +ä¸Ĭåīį åİ» +ĠConstant in +c um +Ġ å¸ģç§į +çļĦ èĤī +ht i +Ġå¼ł è¡į +èİİ æĭī +åĢŁæ¬¾ è´¹ç͍ +Ġscar ves +) b +, æľ¬æ¬¡ +åĽ½ åºĵ +常 éĩı +ä¼ļæľī å¾Īå¤ļ +Ġsm uggling +è¿ĻäºĽ 天 +PD U +æ·ĭå·´ ç»ĵ +sth rough +ĠF BS +åħ¬å®ī åİħ +æĺĶ æĹ¥çļĦ +ĠConcent ration +ìĬµ ëĭĪëĭ¤ +ĠS ore +ĠB odies +å¸Ī åĽ¢ +伸 åĩºäºĨ +ĠSwan sea +Dou glas +> I +ï¼Į ç»ķ +çļĦ é»Ħéĩij +æĺ¯ åħ¨åĽ½ +åĩł å²ģ +Ġà ¸ +ï¼ĮåĨį ä¹Ł +Drop Down +ĠAut odesk +å¥ī ä¸Ĭ +Ġ åĬłåħ¥ +Ġs ä +Ġm use +åľ° ç¬ij +åı¯ä»¥ è®©ä½ł +Ġlog out +Mark down +p data +on ormal +ĠB etsy +ĠK ylie +AT O +dom inal +, 离 +p ak +Ġse vered +åĪ© 空 +Sh ows +ï¼Įè·Ł æĪij +) V +Ġs amsung +ï¼Į æĹ¥åŃIJ +ber man +_C lass +Ġdeg enerative +Ġgerm ination +_ ot +ãĢĤ åĽŀ +Ġout spoken +Set Value +âĢľ 鼶 +Ġdo cks +ĠDe V +好çļĦ è¯Ŀ +Ġcabin etry +ĠMAT ERIAL +he rapy +Ġin und +ĠG areth +å¿« å¿« +,è¿ĺ ä¼ļ +M egan +æĬĬ ä¸Ģ个 +ĠSp ur +åı³ èĩĤ +æĭħå¿ĥ äºĨ +ç²ĺ éĻĦ +\ }_{ +ĠF arn +å½ĵ 天çļĦ +ä»ĸ们 ä¿© +Con duct +å¼Ĥ åħ½ +Ġcirrh osis +åľ¨ æ¯ı个 +ãĤ¹ ãĤ¿ +Ġtelev isions +w el +ĠP PT +-c ancer +и б +Ġë Ħ +ĠPul itzer +. radius +åľ° äºĨè§£ +æĹ©å°± å·²ç»ı +ĠSon ia +åĩºèµĦ é¢Ŀ +ä¸Ń åĮħæĭ¬ +æĪĺ éĺŁçļĦ +Ġpap rika +çĮĿ ä¸įåıĬéĺ² +Ġaval anche +ĠCh un +享 èªī +æī¬ å°ĺ +ĠØ ® +Ġreass urance +è·Ĩ æĭ³éģĵ +F amous +çļĦ æĸĩ +éķ¿ çŁĽ +ĠX III +äºī 端 +UN KNOWN +Ga ussian +ĠEstablish ment +Ġ" {{ +å°ı ä¼ģä¸ļ +é£İ éĽª +æĬķèµĦ 建议 +åĽŀçŃĶ ä¸İ +ãĢĤéĤ£ 天 +_ analysis +æŃ¤ çķª +ĠBl ink +Ġtable top +_DECL ARE +ï¼Į å°¹ +pl ans +å®ŀäºĭ æ±Ĥæĺ¯ +L ack +å̼å¾Ĺ çļĦ +åıijå±ķæĶ¹éĿ© å§Ķ +/ an +w rapped +Ġsp urred +With Name +ĠIns pect +æĽ´æĸ° æĹ¶éĹ´ +Ġdis qualified +èµ° ä¸ĢæŃ¥ +Ġbro om +Q String +â ł +é»ij è¢į +arrow ing +éļĭ åĶIJ +è¿ĽæĸĻ åı£ +æĻĤ éĸĵ +gh i +^{ {\ +La TeX +( Command +ãĢĤ âĢĺ +ĠD uffy +åĪĨ éĴ± +DR AW +Ġantidepress ants +ase ous +å¼ł åĽ½ +ĠGu cci +Ġpred atory +ĠEnt ities +Ġå½ĵ ä¸ĭ +æ¿ĢåĬ¨ åľ° +âĢĿä¹ĭ ç§° +S MB +ach ieve +Ġcreat ives +ï¼Į大 人 +æ§ Į +çļĦåºķ 端 +ç¬ijéĿŀ ç¬ij +âĢĻ - +ĠR ies +æ¶² ä½ĵçļĦ +ãĢģ æĢ§èĥ½ +å®ĩå®Ļ ä¸Ń +Ġlect urers +ï¼Į è·¯ä¸Ĭ +åĬł æ°¢ +æİĴ çĥŁ +Ġes ophageal +ĠAdvis er +j is +Ġch oking +åħ¶ 身 +æĥ³ çľĭçľĭ +_C BC +å¹´è½» çļĦæĹ¶åĢĻ +m otor +Ġsp urious +æĢ» æĪIJ绩 +åĽ½éĻħ åIJĪä½ľ +oph on +çѾ åıij +bour g +$ P +ï¼Į 女åŃIJ +il en +ĠS FR +_C ATEGORY +èİ·å¾Ĺ æĦŁ +è´¥ äºĨ +é£İéĻ© æİ§åζ +out line +Ġsumm ing +ï¼ĮçŃī ä½ł +. entities +ï¼Įå°± åºĶ该 +onn ay +-b rown +rap id +Ġfire places +åı¦å¤ĸ çļĦ +ãĢĤä»İ èĢĮ +ĠEnlight enment +- constant += R +éĩij çIJĥ +Host ing +var iants +åĵª åĴ¤ +ĠLe ak +åħµ ç§į +éļIJ æĢ§ +è¡ĮæĶ¿ è¯ī讼 +P ilot +ĠM AS +æĹ¶ 使ç͍ +天 ä»Ļ +ĠTel ugu +ÂĴ s +åºĶ å°Ĩ +æĿ¨ æ´Ľ +/ network +T ill +Ġ åĮĸ +ĠD ST +èĩªå·± å·²ç»ı +ĠBent on +ĠF IND +ç»Ļ æĪij们çļĦ +èĢĥ é¢ĺ +éĽĨ éķĩ +Ġâ IJ +des pite +C OS +Ġf on +pp p +éĢł çī© +ĠGu adal +address es +éľĦ äºij +ä»İ头 åΰ尾 +í Ħ +ä¸į èĻļ +ãĢģ åı¯ä»¥ +Cl azz +ĠHer bs +æī« èį¡ +èĤ¡æĿĥ ç»ĵæŀĦ +æģª å®Ī +( js +ï¼Į ä»ĺ +Ġl uk +åĴĮ åŃĻ +ĠK K +_s ervices +èĥĮ æĿ¿ +æĮģç»Ń äºĨ +è·Ŀ离 çļĦ +ĠCa es +/k ernel +_AX IS +( ä¸ĭ ++ F +Ġm ême +ãĢĤ åĨħ容ç®Ģä»ĭ +åĴĮ ä¸ļåĬ¡ +ĠDon ations +é¢ĦéĢī èµĽ +$ th +j id +Ġ è¾¾ +ent ional +ä¸į è§ĦèĮĥ +are e +ï¼Įä»ĸ åıª +ä¼ļè®® åı¬å¼Ģ +End Time +.To Lower +_ et +éľĢè¦ģ æł¹æį® +Ġaccompan iment +åľ¨ ä¸Ģå®ļ +å¾Ĺ ä¸ĭ +In active +åı£ åı£ +欢 欢 +çīĪ éĿ¢ +è´´ çݰ +Ġparameter ized +Administ rative +attan ooga +P LEASE +rest ricted +åIJ¸æĶ¶ äºĨ +ï¼Į çĵ¦ +ãĢģ æĸ¹ä¾¿ +åıij 车 +и г +èݱ æĺĤ +Ġnucle otides +ãĢģ çŁ¥ +ĠD ementia +pl a +,: ,: +为己 ä»» +M ENU +Ġj ä +å½ĵ åħ¶ +Ġass uring +ç½Ĺ çļĦ +Ġund ist +å´ ½åŃIJ +è¦ĨçĽĸ äºĨ +[ ad +ï¼Į æ³ķå¾ĭ +ä¸İ å®ŀéĻħ +å½ĵ ä¸Ģ个 +Ġx r +åĮħ éĹ´ +åĨĽ åĮ» +ç¦ģ åĨĽ +éĶħ çĽĸ +ä¸įçŁ¥ æĥħ +ãĢĤ åĩº +楼 éĺģ +èĥĨ æĪĺ +è¡Ģæ¶² ä¸Ń +åĪijäºĭ æ¡Īä»¶ +æ·ĺæ±° èµĽ +Ġdin ers +Ġ 管 +ï¼ģ æľī +quir rel +æĪĪ å£ģ +Ġle vers +ĠK av +æŃ¤ è¡Į +ï¼Įä¸į 太 +éĵģ è¡Ģ +详ç»Ĩ äºĨè§£ +转让 ç»Ļ +èĶļ æĿ¥ +. vertex +C x +T ower +ft ed +è¿ij 身 +Ġlaw fully +è·ij åΰäºĨ +èĩªçͱ éĢīæĭ© +v irus +ï¼Į æĽ¿ +æį¢ æĪIJäºĨ +' http +S HELL +ĠClin ics +è¯ Ł +qu ires +åħ± è¿Ľ +ç§ĺ é²ģ +è¹ ´ +ĠH ARD +ĠDIS ABLE +ä¿Ŀè´¨ æľŁ +? ... +m oor +US ART +è¿İ 宾 +æ¦Ĥ念 çļĦ +T c +ĠR itz +ĠPS U +. ke +ĠC yan +é£İ æİ§ +*, * +ĠAst rology +: å°Ĩ +EM U +Ġoptim ised +æµģåĬ¨ çļĦ +追æ±Ĥ çļĦ +念念 ä¸įå¿ĺ +èµ° ä½İ +åįĹ ä¾§ +æ·· æĪĺ +åŁ¹åħ» åѦçĶŁ +ĠEsc herichia +c is +Ġal f +Ġr tc +IN I +åħī åIJĪ +.B order +建çŃij æĸ½å·¥ +Ġarth ro +Ġartic ulation +Ñ Ķ +Ġst if +è¿Ľè¡Į 交æµģ +åĽ¾ è§£ +Ġz eta +æĮī æįº +æķ¬ ä»° +为大家 ä»ĭç»į +_t ick +åĪĿ ä¸ĥ +ç»Ŀ ä¸įèĥ½ +ä¼łéĢĴ åΰ +.gener ic +Al ter +éĥ¨åĪĨ æł¹æį® +é²ľ å«© +(user Id +J y +Ġdis ables +ä¸İ åģ¥åº· +ĠCom parable +.b efore +Ġbur geoning +âĢľ Why +å·® åĪĨ +P regnancy +ĠP ods +éķ Į +å¿« æĦŁ +ä½ľåĵģ å±ķ +×Ļ× Ŀ +ä¸Ģ ä¸įå°ıå¿ĥ +缸 容 +.s ome +éļ¾ åIJ¬ +Be zier +MC I +Ġlip oprotein +Ġ èᣠ+ĠSt oke +çķ ² +Us b +表达 èĩªå·±çļĦ +ĠHard ing +ĠSnap dragon +uss el +å¾ģ æĸĩ +éĹŃ çľ¼ +Ġhind ered +" ï¼Ł +et ected +è¦ģ éĹ® +èĢģ é¹° +AT ORS +æ£Ģæµĭ 设å¤ĩ +Ġm Context +è¿ĩ ä¸ĸ +æ·± æľī +,åľ¨ æŃ¤ +ob ility +Ġinj ure +Ġharmon ics +å¾Ī éķ¿çļĦ +ï¼ĮåįĬ æĻĮ +ĠS ulf +ãĢģ èĪŀè¹Ī +建 åζ +çŁŃ è¢ĸ +çļĦå°ı ä¼ĻåŃIJ +Ġwholehearted ly +è¿Ľè¡Į æ²ŁéĢļ +ãĢĤ 临 +éŁ³ èī² +Ġbol st +Ġconjug ation +at ians +-g lobal +èķ ¨ +ĠPlay list +åIJĦ个 çݯèĬĤ +ĠPART Y +ad visor +os uction +ip zig +ï¼Įå¹¶ ç»ĵåIJĪ +è¿ĻäºĽ ä¿¡æģ¯ +а Ñĩ +(c x +åĽ½éĻħ å¸Ĥåľº +Ġsubsequ ence +åĸ» æĪ·æĻĵ +为æĤ¨ æľįåĬ¡ +æĸ§ 头 +Mur ray +ĠCPP UNIT +è¯Ļ è°IJ +ï¼Į åIJĵ +ï¼Ł \" +а ÑĪ +_g ap +Ġens ued +/ settings +S AVE +_C A +åij¨ åħ¬ +ĠMer ck +çĿ¡çľł è´¨éĩı +åīĸ è§ĨåĽ¾ +å°±æĺ¯ éĤ£ä¸ª +ï¼Į åĬ¿ +ï¼Į åĩĿèģļ +æĪ ¬ +以为 ä½ł +ĠLand mark +éĢĨåıĺ åύ +D AR +ĠSouth western +âĢĻ Connor +é¢Ħ çŁ¥ +个人 éĺ²æĬ¤ +Ġclaim ants +èīºæľ¯ èĬĤ +sl aught +ĠHom estead +çªĥ åıĸ +ĠLag oon +N ope +-se lection +该 æĢİä¹Īåģļ +ret rained +ç¦ı ç¥ī +Ġintox ication +æĸ° 京 +头 çIJĥ +çϾ 亿 +è¡Ģ æµĨ +æĮģç»Ń å¢ŀéķ¿ +Ġrid ges +Ġbass ist +] & +åį« è¡£ +æĹħ åºĹ +Administ rator +ãĢģ 讲 +æĪij æĥ³è¦ģ +_C ID +Ġhom olog +åľ¨è¿Ļ åľº +Ġdiscrim inant +( 第 += list +ãĢĤ ä¼Ĺ +ä¹± 象 +ĠPe acock +æģĭ çαçļĦ +ĠMont enegro +é©° åIJį +强 æķĮ +çľ¯ çĿĢçľ¼çĿĽ +ĠDSL R +D x +Ġ çİ°åľº +ç§Ģ åıij +åıijæĺİ äºĨ +ç²ĺ åľŁ +.~ (\ +F etcher +ãĢĤ 女人 +éĥ½ åĥı +身 ä¾§ +åıĺ æķ° +.get Selected +纯 æ°´ +ĠSub scriber +竹 马 +åĤ² å¨ĩ +_CH ANGED +Ġmic rop +ĠAk ron +Kent ucky +ĠH PLC +ep oint +ï¼Ł èϽçĦ¶ +For ums +no DB +éĢĥ çªľ +åĿIJåľ¨ æ²Ļåıijä¸Ĭ +å°±åĥı ä¸Ģ个 +ĠM ENU +å°± æľīçĤ¹ +å·¥ä½ľ ä¸Ĭ +Ġbi ochemistry +ĠC um +ç»ĵ ä¼´ +转 åĩº +ĠOp code +æľī é«ĺ +å°Ĩ å°± +ç¾İ ç¾İ +åĩł ç§Ĵ +,è¿Ļ ä¹Ł +ç½ijä¸Ĭ æĬ¥åIJį +Ġheter osexual +ĠJar vis +æīĢèİ·å¾Ĺ çļĦæīĢæľī +f iddle +Ġ å·¥ä¸ļ +ent ions +ï¼ļ ãĢİ +Ass oc +/g raph +å¦Ħ åĬ¨ +çĨĦ çģ« +ĠDETAIL S +ï¼Į å¿ĥçIJĨ +Ġp innacle +èIJ½ æĪIJ +Ġbas eman +Ġsweet est +Ġdeterior ated +æı£ æij© +å¤ľ åıī +æĸĩåĮĸ ä¼łåªĴ +-product s +X Z +pe a +ĠF unnel +å°ı 鸡 +Ġimp ulsive +表示 为 +éģĹ çī© +Ġfamiliar ize +Ġwet land +z ag +ãĢĤ åIJ¾ +_p ayment +B REAK +ï¼Į è·Į +Ġag ro +.get Size +ĠHapp en +åĽŀè¿ĩ头 æĿ¥ +å¨ģ士 å¿Į +d le +{ Name +ç½Ĺ èĮ¨ +OM UX +è½® çķª +ï½ Į +éľĩæĥĬ äºĨ +è¯·ä½ł æł¹æį® +ï¼Į æĿŃå·ŀ +ow an +ced ural +转æĬĺ çĤ¹ +j x +ï¼ļ åĮĹ京å¸Ĥ +å°±ä¸ļ æľºä¼ļ +Gesture Recognizer +Z ONE +ï¼Į 年纪 +col lege +顺 æ°´ +æĶ¿æ²» å±Ģ +说 åij¢ +ĠLi ang +ÏĨ ο +大 é¤IJ +éĩĮ æľĢ +主 å¦ĩ +è¿Ľè¡Į æİ§åζ +IC Ag +.Get Object +æľĢæĸ° æ¶Īæģ¯ +Ġaffection ate +å§ĶåĨħ çijŀæĭī +Ġ æıIJé«ĺ +ab ler +ĠH ens +Ġpr és +æĸ½ åİĭ +åħ´ åĽ½ +è¿Ļä¸Ģ å¥Ĺ +第ä¸ī æŃ¥ +ĠMac ron +åĩºå£° æĿ¥ +Ġspr ung +orp ion +ï¼Į 飦 +è¿IJèIJ¥ 管çIJĨ +ĠHOLD ER +-oper ated +è¦ģ ä»ĸ +åĪĽ ä¸ĸ +ĠZ ac +Ġspac er +, column +Ġso res +第äºĮ ä½į +oper ands +积æŀģ 主åĬ¨ +Ther mo +d ex +è§£ èᝠ+RE VIEW +Ġed x +åĿı å¤Ħ +ĠLegend ary +as one +ä¸Ń ä¸ĸ纪 +çĮ® ä¸Ĭ +ç§Łèµģ åIJĪåIJĮ +( DateTime +ĠB SP +åħ¶ ä¸Ĭ +Ġcol itis +ï¼Įä½ł å°±ä¼ļ +/s pec +Ġapost le +Ġfiref ighter +H earing +ch anging +ĠIn formed +å¹¶ä¸į é«ĺ +ãĢģæĸ° çĸĨ +ä¸į ç¾ģ +âĢľ åı¯ +ĠD IST +Ġover looks +è°ĭ çķ¥ +ï¼Įåħ³éĶ® æĺ¯ +- rem +/ icon +Ġg orge +ĠF AB +Ġpa ul +æľīä¸Ģ 份 +æĸ¹ä¾¿ äºĨ +åĪĨ åºĹ +羣 è°Ľ +ĠBar ney +ĠTw ice +Ġadd ictions +ä¿Ŀ èĤ² +æĥħåĨµ 说æĺİ +(B ig +ãĢĤå¤ļ å¹´æĿ¥ +ãĢĤ 羣æŃ£ +浦 åĮº +æĴķ å¼Ģ +elect ronics +Ġshaft s +ï¼Į éĻĦ +ĠM GM +ĠB inder +åıĪ æ²¡ +ĠApp ropri +èĦ± æİī +ĠIr win +Ret irement +&= &\ +v otes +ï¼Į éĩĬæĶ¾ +Ġso bri +ph ysics +ï¼Įå¹¶ åı¯ +_ex ist +xi om +_EM AIL +æĿ¥ä¹ĭ ä¸įæĺĵ +tear ray +ĠSt abil +Ġkn ead +ĠIsland er +ãĢģæľī éĢ»è¾ij +ĠI EC +ĠPro verbs +åı¯ä»¥æł¹æį® èĩªå·±çļĦ +Calcul ation +ï¼Į 平常 +è¿Ļ个 è¿ĩç¨ĭ +åŃ¦æł¡ åĴĮ +æ²Ļ çĽĺ +Pack aging +Ġcran berry +对 æķ´ä¸ª +äºĶ 个人 +-qu arters +.ch ain +åı°åĮĹ å¸Ĥ +èĩªçĦ¶èĢĮ çĦ¶ +- expand +Ġc log +ĠT riton +ag og +åĴĮ ä½İ +ang ana +, åħħåĪĨåıijæĮ¥ +ĠG ynec +è° Ħ +éĢļ 宵 +ï¼Įä½ł åĨį +Inter cept +è¶£ äºĭ +ç¢į äºİ +ãĢĤä»ĸ们 åľ¨ +[ cur +Ġ æ¯ı次 +ãĢĤ åıijè¡Į人 +åıĬ æł¼ +ï¼Įè¿Ļ ä¸İ +çļĦ人 éĥ½æĺ¯ +_B AR +è½½ åħ¥ +_m c +æľ¨ çļĦ +mod ifier +Ġnod al +氢氧åĮĸ éĴł +åIJ«éĩij éĩı +C aret +ĠD ire +éĽ ¹ +Ġ------------ -- +ï¼Į å¨ĺ +ĠS CM +app lied +å¿« åΰ +ĠAs us +èĦļ ä¸Ĭ +Ġh m +ä¸ĭ 身 +me asured +.l ines +mon o +Ġdb g +quire r +æĮª ç͍ +Ġc uz +Ġm é +ãĢĤ å·¦ +Ġper col +éĩį çͳ +Th ailand +ä½ı çļĦåľ°æĸ¹ +管çIJĨ 软件 +_CON N +(pro gram +çħİ é¥¼ +" Yeah +Ġg cd +du ces +ĠX er +é»ij çľ¼åľĪ +ĠCro chet +âĢľ ãĢĬ +ĠPaul ine +ä¸īåįģ ä¸ĩ +oj is +Ġdeterior ate +s ense +Ġcontent ment +èĨ Ī +Ġenthusi astically +Ġshame ful +强 éĺŁ +.d p +Ġk ad +Ġcl umsy +æĿĨ çļĦ +Ġmasc ot +å½ĵä¹ĭ æĹłæĦ§ +ĠG ri +æĬķ æ¡£ +ï¼Įå°Ĩ åĨĽ +Select ing +ĠNeuro logy +Ġs ermons +çļĦ 客人 +ä»ĺåĩº 代价 +æķĻ åĮĸ +é¢Ĩ åħµ +åĪĴ çĹķ +åĪĽä¸ļ çļĦ +æ¶µ åħ» +L android +ï Ĥ +alk ing +èĵ ģ +Ġled ge +ï¼ģ ( +å¼Ĥ äºİ +ï¼ĮçĶļèĩ³ åľ¨ +éĢļ常 åľ¨ +ï¼Į èµĽ +è§ Ĭ +ï¼ļ è¿Ļæĺ¯ +å¿ĥ åı£ +Ġz z +æŃª æŃª +_lib s +w onder +ä¸į ä¹± +_L abel +ĠHome owners +ĠTax ation +Ġbother s +upro fen +( Application +ãĢģ éĩįè¦ģ +è¿ĩ å°ı +å¿ĥ èĤł +æĥħ æĵį +æľºæŀĦ æĬķèµĦèĢħ +çĸ¼ äºĨ +éģĵè·¯ 为 +æķıéĶIJ çļĦ +V eg +we e +... ] +åģļ éĶĻäºĨ +reg exp +éĺ³ æĺ¥ +-m achine +ç»Ĩ å¿ĥçļĦ +Ġwater y +\\ [ +ĠRev enge +éĶģ éĵ¾ +, æıIJåĩº +ï¼Į èĤ¡ä»· +ad ult +aur ant +ĠTra its +ĠSub way +Box es +Phone Number +ä¸įèµ· çľ¼ +Rotation Point +ip ad +æľĪ çīĻ +Ġdef y +ĠStat ute +, å̼å¾Ĺ +N eal +çļĦ 顺åºı +âĢľ 绿èī² +AM C +çĦ¶èĢĮ æŃ¢ +æŃ£ç¡® çŃĶæ¡Ī +ç§ijåѦä¸İ æĬĢæľ¯ +, num +ĠN FS +.S leep +amed a +ĠC DF +ĠAm ir +_re ceived +ä¸ij éĹ» +äºĨ两 次 +, è¿ijæľŁ +Ġcler ks +ĠQué bec +l ung +çĿĢ ç§° +-m icro +å¿ĺ æĢĢ +( select +Ġ åIJ¦åĪĻ +ĠT MS +.p adding +è¿Ļæł· çļĦ大 +æŃĮ çļĦ +ä¹Łåı¯ä»¥ éĢļè¿ĩ +ç¥ŀç»ı çĹħ +ï¼ĮäºĮ æĿ¥ +çłį ä¸ĭ +ô ng +ĠP CC +è´¨éĩı å®īåħ¨ +ĠæĪij åľ¨ +ITH ER +æĪIJ ä¸Ģçīĩ +欢 å¿«çļĦ +åŁºç¡ ħ +æĶ¹éĢł é¡¹çĽ® +ä¼ĺåħĪ èĢĥèĻij +ĠP EN +飩 åĽ½çļĦ +ĠDar cy +Ġreign ing +ï¼Į å°Ĭéĩį +å®¶ åºĹ +å°ı ä½Ļ +Ġcar ousel +ï¼Įæľī å¿ħè¦ģ +ä¹Łä¸į ä¸Ģå®ļ +Secret ary +ãĢĤ ä¹Łæľī +ãĢģ åŁºç¡Ģ +Ġstabil izer +, çŃīå¾ħ +g arden +çļĦ æĿĥåĬĽ +è¦ģ è·Ł +ä¹ĭ èĥ½ +get ter +éĩį 天 +éĢģ èµ° +ï¼Įè¿ĺæĺ¯ åľ¨ +é£ĺ æī¬ +(st ats +ĠSax on +- June +Ġj ames +æ´» æ´» +ĠBall room +еÑĤ ÑģÑı +u il +ç®Ģ çķ¥ +æĦ¤ æģ¨ +Ġ è´Ńä¹° +ï¼Į æ¶µçĽĸ +èĩª è¾¾ +ä½ĵ å¾ģ +主 æī¿éĶĢåķĨ +æĪijæīĢ çŁ¥ +Ġscoot ers +} P +âĢľ 好äºĨ +_H S +éĹ² äºĭ +Ġvot re +游ä¹IJ åľº +Ġvow el +ĠEpidem iology +B ake +èĩªå·± 人 +_c ached +æĸ¯ æ´Ľ +ï¼ĮèĢĮæĺ¯ åĽłä¸º +ãĢģ åĨ¶éĩij +ï¼Į缴 å¾Ħ +ãĢģ人 æĸĩ +èľĪ èļ£ +åį°èĬ± ç¨İ +oc entric +Ġmet av +[i NdEx +计æıIJ æ¯Ķä¾ĭ +M ight +ĠT ories +åı¯ä»¥ ä¿Ŀè¯ģ +-m atrix +伤 åΰ +ĠAN IM +Plaintiff s +S audi +ä¹ĭ æķ° +表 éĿ¢ç§¯ +Ġins ufficiency +, ä»ĬæĹ¥ +ä¸Ń ç»§ +âĢĶ an +èĥ½å¤Ł èİ·å¾Ĺ +ä¸įäºĨ å¤ļå°ij +-sh ort +ĠGa ia +ĠÏĥÏĦη ν +- visual +h oun +al con +ä¸į éĩĬ +ä¸įæĺ¯ 没æľī +客è¿IJ ç«Ļ +Ġpredic ament +çļĦ è¿ĺæľī +ĠD ao +ec s +äºĴ 为 +yout u +Ġb ordering +åĴĮ åĬŁèĥ½ +çł § +æį¢ æĮ¡ +ĠDis posal +æ´ª æŃ¦ +Inf rastructure +èĬ± åºı +é£ŀ åIJij +ãĥ ¯ +Ont ario +Ġardu ous +ä¸į å¤ĸ +æĦ « +Ġad herent +Ġsp illing +æĹł å°ĺ +è½® åŃIJ +Ġpurpose fully +è·ĥ è·ĥ +ĠBall ard +ĠSomew here +. EMPTY +ĠR NG +std in +å¿ĥæĥħ ä¸į好 +ĠSalmon ella +ãĢģ éĩİ +书 å±Ģ +.f ilters +çĶ» ä¸Ń +åĴĮ å¦Īå¦Ī +ĠContin ues +ä¸įä¼ļ å½±åĵį +ä¹ĺ æľº +微信 å°ıç¨ĭåºı +ãĢĤ ç¨ĭ +æłĩ ä»· +åĩºçīĪ äºĨ +_ex e +/ ext +j ak +ãĢĤ åĶī +erm ission +è´¨ åŃIJ +åĨ² åħ¥ +åħ¨éĥ¨ 被 +_MAN AGER +S åºĹ +çļĦ åIJ¸æĶ¶ +ä¸Ģ æĦı +Ġr ue +æĿ¥ ä¿ĿæĬ¤ +å°± 羣çļĦ +çľ¼ çĨŁ +Ġund one +BB B +éĥ½ å¸ĮæľĽ +L adies +Ġp st +ĠCh amp +æĿ¡ 第 +æĶ¯ æµģ +æ£Ģ è§Ĩ +_sh utdown +Ġuns ustainable +ĠâĪ ĩ +涨 äºĨ +ĠBarcl ays +< ID +åĴĮ 被 +å·² å©ļ +çļĦä¸Ģ 段æĹ¶éĹ´ +Ġnood le +ĠW nt +åĮ į +æľ¬ æĦı +åĩ» æºĥ +éĴŁ è¡¨ +Ġrecogn ises +ç¿» èħ¾ +éĥģ éĩij +S creenshot +Ġbe gg +ĠJ V +_P ULL +读 åĩº +- account +- oxid +/ apis +\ Console +Ġs int +ĠD umps +è¿Ļ 两年 +og rad +ç͵ ä»· +ĠGar bage +Prem ier +Ġplasm ids +æĶ¹åĨĻ åIJİçļĦ +_ plugins +ï¼Į æīŃ头 +ãĢĤ ç¥Ŀ +AL Y +add ock +Del imiter +ãĢĤ æ¢ħ +per iment +对 åŃ©åŃIJçļĦ +Ġquot as +翱 ç¿Ķ +åľ° æĿ¿çļĦ +Ġ: ] +.d rag +åħŃ çĤ¹ +App Data +çī©åĵģ çļĦ +梦å¯IJ 以æ±Ĥ +âĢĿ 以åıĬ +åΰ ä»Ĭ天 +å·²ç»ı è¶ħè¿ĩ +Ġswe eps +ä¸īè§Ĵ æ´² +çļĦ ç͵åİĭ +id or +宣 åΤ +éĵľ éĴ± +ä¸įå¼Ģ çļĦ +Appe ar +èĦļè¸ı å®ŀåľ° +" B +ï¼Ł "" +äºĨä¸Ģ å°ı +UM AN +æ´ģ éĿ¢ +Host ed +æľºåĬ¨è½¦ 驾驶è¯ģ +社 çļĦ +åįĬ æĪIJåĵģ +ä½łçļĦ çĶŁæ´» +ĠHand made +çŃīåIJĮ æĽ¿æį¢ +.mult iply +Ġ ç»´ +Ġcon ical +Ġwe ep +ss ss +æ¨ ¾ +Line Number +åį§ åºĬ +, ä¿ĿæĬ¤ +T esla +pl atin +eg l +åĴĸ åĸ± +re au +ĠB uf +Ġob fusc +Pres ence +é¢ĨåŁŁçļĦ åºĶç͍ +qu eries +åĴĮ åĶIJ +ĠOr well +AB L +/d t +ĠAp ocalypse +çĬ · +äºĮåįģ å¤ļ +$ b +- IV +ä¿¡ æľį +æīĭæľº åı·çłģ +Ġrat ified +åĽ¾åĥı å¤ĦçIJĨ +J W +he aring +f iled +Ġst aking +æĿ¥ èİ·å¾Ĺ +au gment +Ġbre eder +ĠEX EC +Ġuns us +.V ISIBLE +Ġf ists +est anding +ãĢij , +ĠLoc ate +ĠAnt hem +ï¼Įèĥ½ 让 +ï¼Į åıĸæ¶Ī +ĠB X +ä¸įå¾Ĺ èĢĮçŁ¥ +ĠDor is +G ifts +ï¼Į 绿èī² +æľĢ æľĢ +)) ^ +Ġwh ispers +Ġbl uff +ĠC PL +å¼ł çĦ¶ +IG IN +éĹ» ä¹ĭ +豪 è¿Ī +chell es +Hex String +Ġmin ima +åıijçĶŁ ä»Ģä¹Ī +ï¼Įä¸įè¿ĩ åľ¨ +ĠHer cules +ï¼Įåıį ä¹ĭ +ä¸ŃåĽ½äººæ°ij éĵ¶è¡Į +ĠC ortex +è·¯ èĻİ +file path +è´« å¯Į +/man ual +çİĭåħ« èĽĭ +æ°Ķåĸĺ åIJģåIJģ +Ġ èĭ±è¯Ń +Ġr ye +åĪ« åħĭ +åħ¬äº¤ 线路 +è¦ģ éĿł +Ġjust ifies +Ġserv let +ï¼Į对 åºĶ +åĪĿå§ĭ æĬķèµĦæĪIJæľ¬ +ĠEsp resso +- html +Ġa pril +ç©Ĩ éĩĮ尼奥 +J G +ãĢĤ æĵħéķ¿ +两 æĹ¥ +/c ard +åľ° 说çĿĢ +åIJİ ç͍ +èĦ ² +ĠR AS +au k +æĹ© çŁ¥éģĵ +Ġcaus ality +交æĺĵ æĢ§ +Ġtransfer able +_list ener +ç¡®å®ŀ æľī +Ġsuppress es +Interpol ator +è²Ĥ èĿī +Ġcondens ate +L ever +æľī人 ä¼ļ +_COMP ONENT +Ġphosphat ase +ĠP antry +åIJİ ä¸įä¹ħ +Ġ` - +- ness +Ġu reth +Ġag itated +ĠCal ories +åIJįåŃĹ çļĦ +Ġneut rons +ç»´çĶŁç´ł B +æī©å±ķ åΰ +Americ ans +Ġ æĺ¯ä¸Ģ个 +Ġh osp +æĮģ ä¹ĭ以æģĴ +æ»ij éģĵ +å°Ĭ è´µçļĦ +ess ler +av vy +ĠDeb it +ĠFerdin and +ï¼Į éĺħ读 +没æľī æĹ¶éĹ´ +_c aps +ag reement +.c rypto +Ġcrim son +Ġelev ators +ĠGlass es +Ġ åģ¥åº· +ï¼Į å¹½ +ĠO rr +éĿĴ çļĦ +Ġsea weed +Bank ing +Ġextr usion +; k +Ġme hr +太 åı¤ +AD R +对äºİ ä¸Ģ个 +Ġsym phony +ĠChall enger +ï¼Į æĢª +Ġp oking +Ġrem over +ï¼Įåľ¨ ä½ł +rang ler +A head +Ġ åıijå±ķ +Ġg arant +è¢ ¤ +çľ¼ çıłåŃIJ +å¼Ĥ 彩 +æ´¾ é©» +æĦıå¿Ĺ åĬĽ +ãĢĤ åIJĥ +Ġv v +åģļ èıľ +æīĵ åŃĶ +Ġed gy +éĹ¹ äºĭ +è¿ľè¿ľ ä¸įå¤Ł +éªļ åĬ¨ +An on +æĿİ ä½³ +æ¿Ģ å¢ŀ +æĶ¿åºľ 对 +é¥Ń é¦Ĩ +f ld +éĩı åĬĽ +å¥Ĺ ä»¶ +ĠFore nsic +ĠRich ie +_CL R +Ġbuff ering +羨 å·´ +isen berg +Ġè¿Ļä¸Ģ åĪ» +P AC +ãĢģ å¼ĢæĶ¾ +对 åIJ§ +æĹł åŀł +che f +Load Balancer +ĠCard iac +Ġenv oy +_ OD +s rv +er b +æĺ¯ æīĢæľī +ib et +次 éĥİ +强 åģ¥ +Trans parency +.comp iler +ĠSovere ign +ade v +ç²¾ æ·± +ĠInd icate +PS A +诸 天 +åĩĮ 天 +_AC EOF +fu els +Ø ¥ +æŀģ æľīåı¯èĥ½ +( IM +c box +h ana +Ġan ode +ä¸į ä¹° +Time Zone +ii ii +éĥ½ä¸į å¦Ĥ +ï¼ĮåįĹ å®« +th r +ĠBy e +æľŁéĹ´ åĨħ +èµı å¿ĥæĤ¦ +æķ² è¯Ī +Ġ× © +_ass oc +- appointed +Ġt inct +çĽ Ĥ +è·¯ ç«Ļ +å§Ķ ä»» +大ä¼Ĺ çļĦ +# set +ĠS OD +Ġpro clamation +est ors +为 ä¸ī +æ°´ çĶŁ +被 åΤ +ĠEN ERGY +as ley +çĥŃ é£İ +ä¹Łä¸į æĸŃ +aur i +mark down +åĽŀçŃĶ äºĨ +Ġportray ing +ĠJagu ars +å¹´ æĪIJç«ĭ +å°ij æł¡ +æģ¨ æģ¨ +çł´ç¢İ æľº +çļĦ 西 +è¿IJåĬ¨ éŀĭ +ç»ı常 被 +Ġmal aysia +Adjust ment +ĠM unicipality +æĻļ æ¸ħ +_R ST +æľīä¸Ģ éģĵ +ĠTerm ination +@ h +çļĦæ²»çĸĹ æĸ¹æ³ķ +ам и +çªĹ åŃIJ +æĭĽèģĺ å²Ĺä½į +Ġmeta verse +.Sp ring +ĠP ang +ãĢģ åĴ¨è¯¢ +Ġal log +æĿ¡ æĿ¡ +é¾Ļ åįİ +åĬª å°Ķ +诺 æĽ¼ +Suggest ion +Ġ 请æ±Ĥ +è¿ĩ åĩłå¤© +ĠY ing +åĬĽ æīĢèĥ½ +èĢIJ åĬ³ +Ġdeterior ating +ĠT id +Âł å¤ı +åľ° å¾Ģ +èݲ èĹķ +åħ¬å®ī åĪĨå±Ģ +æĺŁæľŁ ä¸ī +_me asure +çĶ Ń +ĠL ash +å¼ķ è¿ĽäºĨ +/p rivacy +ĠMet allic +Ġ çαæĥħ +ro i +ãĢĤ å¿ħé¡» +ãĢĤ æ¯ıä¸Ģ个 +ve ction +ĠS rin +ĠH arness +æĭī ä½ıäºĨ +ä¿Ŀåģ¥ é£Łåĵģ +Ġdisreg arded +æķ°æį® ç»ĵæŀĦ +ä¹Łæĺ¯ åįģåĪĨ +Ġ 说åΰè¿ĻéĩĮ +Ġpre cluded +ĠPass over +ç§ģå®¶ 车 +' id +* out +ï¼Į çĿ£ä¿ĥ +女 娲 +ĠMc Lean +ĠSoc ieties +ĠCH F +æ¶Īè´¹èĢħ 对 +_emb eddings +Ġm oll +ic rous +rodu ces +æľºä¼ļ äºĨ +Ġge othermal +ä¸įåľ¨ å®¶ +è´ŁåĢº çļĦ +, åѦä¼ļ +- Path +ãĢĤ 带çĿĢ +ä¸Ģ çݯ +ĠM afia +åľ¨ åIJĮ +天 èĿİ +ä»Ķç»Ĩ éĺħ读 +ĠFed ora +Ġ 设å¤ĩ +æŃ£ èī² +å°±æĺ¯ è¿Ļæł·çļĦ +äºĮåįģ ä¸ĸ纪 +Ġhyd rate +B ryan +Ġ\ |\ +åķĨ çķĮ +Ġpar alyzed +Ġmicro tub +B ry +Ġn ods +åij¨ æĿ°ä¼¦ +Jul ian +åľ¨ ç»ıæµİ +çī¹ æĸ¯ +_C HA +und en +æĢ¥ å¾Ĺ +æĢĿæĥ³ ä¸Ĭ +åį±éĻ© åĮĸåѦåĵģ +Ġcust odian +Neg ot +om od +ä¸į æīĵç®Ĺ +Ġch ipped +te ga +å¹³ ä»· +ĠAnd hra +ser ious +ãĢĤå½ĵ åľ° +壳 ä½ĵçļĦ +广åijĬ çļĦ +ĠJean ne +ĠFD IC +H ighest +b ios +ï¼Į å±ķå¼Ģ +ï¼Į 赫çĦ¶ +Ġn col +ĠSp ots +Ġcut ters +å¥ĸ èµı +å°Ŀè¯ķ çĿĢ +æķ´é½IJ çļĦ +ĠPurs uant +. images +V ern +t weet +ĠTr ilogy +æĭį åĩº +esh oe +bo a +-sh ift +R an +Ġpre historic +aj u +ç§° 龸 +Ġcod on +Ġk B +ĠGe ometric +Ġfast ened +æIJŃ åľ¨ +ĠAff iliates +STR ICT +M and +ä¸į çĶŁ +ç͍ åħµ +åIJİ å¸Ĥ +åħ¶ä»ĸ å®ŀæĸ½ä¾ĭ +[ http +ĠE LECT +.st age +ow icz +Ġdem oc +çķĻ äºĨ +ç§ijåѦ çłĶç©¶éĻ¢ +ï¼Įå·² çĦ¶ +ï¼ĮæĽ´å¤ļ çļĦæĺ¯ +Ġproverb ial +Ġmisfort une +> N +v ian +yp ress +çIJĥ éĺĢ +åŃ£ é£İ +ó l +çĽ¸å¯¹ è¾ĥä½İ +.mod al +, åζå®ļ +Ġam assed +ä¸ĥ åĪĨ +employ ees +ï¼Į轻轻 çļĦ +Ġ第åħŃ èĬĤ +D ic +åĴ Ĥ +ä¹ĭ æīį +ä»İ 大 +è·¯ é£ŀ +Cor relation +Iter ations +åĴĮ æľªæĿ¥ +åİĭ ç͵ +æµĭè¯ķ ç»ĵæŀľ +ï¼ĮåIJĦ ç±» +ãģĭ ãģ£ãģŁ +Ġdenomin ations +çļĦ èĦĸåŃIJ +为 é«ĺ +èĥ½ èİ·å¾Ĺ +Ġsa usages +ï¼Įåľ¨ 没æľī +å§Ķåijĺä¼ļ åī¯ä¸»ä»» +æıIJéĨĴ ä½ł +ĠP MS +æķ°æį® ç»Łè®¡ +èĽ İ +ï¼Į使 ä»ĸ +{l st +s ites +Ġdet ections +åĨ² 浪 +Ġsitu ational +FFFFFFFFFFFFFFFF FFFFFFFFFFFFFFFF +Ġad iab +é£İ å°ĺ +èĥľ åĩº +ĠEm ails +Ġshr unk +éĺ¿éĩĮ äºij +ĠTob ias +ä¸Ģ æĸij +å¾ ĩ +åIJ¸ åĬĽ +ĠChamber lain +å¼ĵç®Ń æīĭ +Franc is +/ non +ï¼Į åĺ´å·´ +ac illus +ï¼ĮæĪij æĢķ +é¦ĸ æĹ¥ +çIJĨ论 åŁºç¡Ģ +ĠJama ican +lo res +社ä¼ļ ç»ıæµİ +ä¸ĭéĻį åΰ +æī¯ äºĨ +⣠© +( rows +ĠC app +String Ref +ĠÐ £ +Ġshort listed +oph il +èĩ£ æľį +é£ŀæľº ä¸Ĭ +H uge +R AP +_ Query +Ġf andom +ĠT uscany +get User +ï¼Ī F +çī© æĿĥ +åĿļ å®ļäºĨ +Ġè¿Ļ 让 +ĠForm ats +_ ht +um ptions +ie ver +å¸Ĥ éĩĮ +æ¸ħ 鼶 +ç»Ħç»ĩ äºĨ +表示 äºĨ +Ġang st +Ñĥ б +R aid +个 ç³ĸæŀľ +å¿ĥ 满æĦı +ne b +è§ģ ä¹ł +é»ij åıij +两大 ç±» +. ubuntu +j av +举 举 +åİĭåĬĽ ä¸ĭ +ĠAssess ments +ĠD ose +åѦ åΰçļĦ +宫 åĨħ +_re ward +ĠWil helm +( Client +ï¼Į 绣çѹ +å¼Ģ çģ« +å·¥ä½ľ çĬ¶æĢģ +Ġequ ator +pkg dir +çļĦ å¨ģèĥģ +ĠM ilit +è¿ĩ å¢ĥ +ĠV y +eth anol +èĮ¶ é¦Ĩ +ĠGrand pa +äºĨ她 ä¸Ģçľ¼ +ĠI MS +浸 æ¸į +m ilk +Ġl or +os its +çľĭ ä¸įä¸Ĭ +ï¼Į éĩĩ +ĠW ishes +没 åľ¨ +Ġcr anes +é½IJ çİĭ +åıĺå¾Ĺ å¾Ī +- reading +Ġsing let +Ġmock s +( SD +Ġdis mal +åıĪ åĽŀåΰäºĨ +-comp lete +交 æĪ¿ +é¦Ļ港 çļĦ +# . +ä¸Ĭ ä¹ĺ +Ġref ute +Ġ) -> +ä¸įèĥ½ 被 +Pr incess +,ä¸į æĥ³ +ĠJo ining +Cost s +Hu gh +Ġsouven ir +Ġup ro +åĩł çϾä¸ĩ +åĽĽ 驱 +ĠCol legiate +ĠAll ies +ĠDef ining +_att ention +Ġha uled +æĿ¡ æııè¿° +ä¾ĿçĦ¶ åľ¨ +åįģåħ« å²ģ +Ġcord less +éĹªåħī çģ¯ +ç»ĵæŀĦè¿Ľè¡Į æııè¿° +p is +at ron +ĠE ileen +磨 è¹Ń +åĽĽ èµ· +æ¸IJ åıĺ +éĺ´ æ°Ķ +å¿ĥä¸Ń æľī +å®īéĿĻ åľ° +åµ´ æŁ± +O SE +am is +State Changed +æĮģæľī å¾ħåĶ® +Ġ" âĻª +ï¼Įä½Ĩ 表达æĸ¹å¼ı +导 åĽ¾ +èĥĮ 对çĿĢ +-l imited +æľīæīĢ ä¸ĭéĻį +溢 æµģ +åŁºå±Ĥ åħļç»Ħç»ĩ +å½Ŀ æĹı +_ Key +d rug +大 åĸĿ +ä¸İ 设计 +_t ot +ĠWood land +Scal ed +I x +Q H +ab as +æĿĢ çģŃ +第ä¸ī 天 +,ä¸Ģ è·¯ +Ġprefer ring +ï¼ĮåĪĻ çĽ´æİ¥è¿Ľè¡Į +éĢĤå½ĵ æĶ¹ +èĢķ ç§į +åĩºä¹İ æĦıæĸĻ +Ġend anger +D rivers +ä¹ĭ ä¼Ĺ +å¦Ĥ åIJĮä¸Ģ +Ġaf ar +SQL ite +Ġbacks plash +åĺ¶ åĵij +ĠP helps +(c r +ï¼ĮåĨ² çĿĢ +ä¿ĥéĶĢ æ´»åĬ¨ +å·¥ çļĦ +Ġburn ers +åľ¨æĪij 身边 +ï¼ĮäºĮ èĢħ +ä¸Ģ线 åŁİå¸Ĥ +åįµ å½¢ +ãģķ ãģĦ +B EN +Ġth rives +ï¼Ľ ä¸İ +Th rown +å¤ĦçIJĨ åIJİçļĦ +ĠBr agg +Weight ed +. region +es que +iz io +éķ¿ ä¹IJ +ĠQ in +Ġä¸Ģ ä½į +Ġnorm ality +çµ ¶ +Ġmig raines +ãĢģ缴 è¾ĸå¸Ĥ +Ġoft entimes +ĠP orn +è¿ĺ 设æľī +è§Ĩ èĢĮä¸įè§ģ +et in +ĠC riterion +群 ä¸Ń +ãĢĤæĪij çİ°åľ¨ +Ġeyes ight +bi otics +Med icare +æĴĴ ä¸Ĭ +èĩªä¸» åĪĽæĸ° +Ġoverhe ating +éĵģ èĬ¯ +æĥ¨ æ·¡ +!!!! ! +ĠH eter +uck les +è¡Į å¾Ħ +éĺ² é£İ +Ġmodern ity +æ½® å·ŀ +ï¼Įåīį æľŁ +ĠInsp ire +ĠM CI +æĸ¹ å·® +Ġser pent +Ġfan atic +ĠMa ureen +举 åIJ´ +æĤ¨ æĺ¯ +课 çļĦ +ĠSim ult +Ġresemb led +Ġengra ving +ï¼Į æ»ļ +åİ» äºĨè§£ +_s r +使ç͍ 该 +èĹ ľ +æĶ¿æ²» ç«Ļä½į +ĠCap abilities +. tech +< k +£ ¼ +ãĢģ åĽŀçŃĶéĹ®é¢ĺæĹ¶ +Ġdis integ +ĠAl k +.R ole +Ġrepro gram +åĭī åĬ± +ef orm +åıĸ åħ¶ +-d eductible +ï¼Įéĥ½ éľĢè¦ģ +ĠSte iner +Ġking doms +.z end +.X ML +Ġlad en +ĠRespond ents +Ġm ites +ch all +ĠK ry +Ġl Ãł +ch uk +ĠD ri +å¾Ī éĢĤåIJĪ +çļĦæīĭ ç»Ń +bro ther +ï¼ļå¦Ĥæŀľ æł¹æį® +Ġ äºĨè§£ +ï¼Į åIJĪåIJĮ +am aged +âĢĿ åĽŀçŃĶéĹ®é¢ĺ +èĢģ è¿ľ +ãĢĬ åı²è®° +æĶ¶ åIJ¬ +ĠIs olation +ä¸Ŀ çĵľ +w inds +ä¸Ģ åħĥ +æĿĢ æķĮ +CE P +Ġfib rous +Ġassass in +Ġsuper position +ï¼Įåħ¨ æĺ¯ +ĠBreak s +_ ", +ï½ Ī +åįķåħĥ æł¼ +åIJİæĤĶ äºĨ +.S ample +War ren +ï¼Įå°½éĩı 丰å¯Į +ro z +âĢĿ çļĦ人 +æľª 满 +Ġpay er +ĠID irect +-M uslim +å¯Ħ äºĪ +ï¼ ¿ +åį´ è¿ĺæĺ¯ +ĠAcc urate +Ñĥ ж +anal og +åŃº åŃIJ +Ġo auth +ãĢĤ ä»Ģä¹Ī +导 è¯Ń +Ġes ports +OL A +èħ¿ çļĦ +åĽłç´ł å½±åĵį +re ys +ï¼Į èĦļä¸ĭ +Ġwe eping +ï¼Ľ 第ä¸Ģ +ĠEx amine +èι èα +ï¼Įæīĭ ä¸Ĭ +ĠChrom ium +åĪĨ段 æııè¿° +ãĢĭ 对 +åĽłä¸º æĺ¯ +ä½Ļ ç§į +åĿIJ çļĦ +.T race +èIJ¥ä¸ļ åĪ©æ¶¦ +å¾Ĺ åİī害 +Ġsc ree +IF ont +èĭ±éĽĦ çļĦ +ï¼ĮèĤ¯å®ļ ä¼ļ +åıĤèĢĥåĨħ容 å¦Ĥä¸ĭ +ĠAthen a +K ES +ent ious +âĢĿ æĹłæ³ķåΤæĸŃ +Ġsing apore +Ġbar ber +,åı¯ è§ģ +æ°Ķ象 å±Ģ +é£ŀéĢŁ åıijå±ķ +Ġcol lo +-b rowser +ĠNot ch +åľŁåľ° çļĦ +é¤IJ饮 ä¸ļ +è¯Ńä¹ī ç»ĵæŀĦè¿Ľè¡Į +Ġt ink +åľ¨ ä¸ŃéĹ´ +ãĢĭ æĹ¶ +æĦıæĢĿ ä¿ĿæĮģä¸Ģèĩ´ +åºĶä»ĺ 账款 +è¯Ģ çªį +" åıĤèĢĥåĨħ容 +âĢĿ ç»ĵæŀĦè¿Ľè¡Įæııè¿° +åĪĨ 许 +æĿĢ ä»ĸ +(M andatory +表述 æľī礼è²Į +ãĢģå¦Ĥæŀľ åĨħ容è¾ĥå¤ļ +ï¼Įä¸Ķåľ¨ åĪĨè¿° +âĢĿè¿Ľè¡Į éĢĤå½ĵæĶ¹ +æĪĸåĪĨ æĿ¡æııè¿° +ï¼Įä¸Ķ使 æĶ¹åĨĻåIJİçļĦ +"æł¹æį® åıĤèĢĥåĨħ容 +% + +ï¼Į åĨľ +ãĢģ ä»» +em akers +èĢĮ åıijçĶŁ +ĠAl ta +éļ¾ äºİ +Ġpret rained +_per missions +Ġenumer able +Ġmemor abilia +ĠLimit ations +el age +ä¸į ä½İ +ĠD ER +çľ¼ éľľ +åIJĥ 个 +润 èĤº +æİ¥åıĹ æ²»çĸĹ +Me chanical +è±¹ åŃIJ +Ġfor age +她 è¿ĺæĺ¯ +å®ŀéĻħ éĹ®é¢ĺ +æİ¢ éĴĪ +ros ive +^* _ +))/ (( +( obs +Ġst opp +ĠO ll +è§£ äºĨ +Ġtext area +æ·· åĬ¨ +Ġunve iling +æĺ¯ åħ·æľī +ä¸į æĢª +:: : +åĨľæĪ· æķ° +Ġ 许å¤ļ +Ġ( ... +но е +, åı¯èĥ½æĺ¯ +Ġhas hing +没æľī éĶĻ +Ġcustom izing +Log Level +åīij 客 +绣 èĬ± +ãĢĤ 康 +ĠM EG +åıij èµ·äºĨ +ĠK ettle +ä»İ ä¹ĭ +éĴ¢ åĮĸ +åīª è£ģ +ï¼Į åĺ´åĶĩ +æĸĩ åįİ +å·§ å¦ĻçļĦ +Ġdimension less +_IO CTL +# " +G am +G len +R ocket +ĠL HS +ĠG X +æ²¹ åĴĮ +Un available +Ġ大 æ¦Ĥ +K ir +uss ing +ï¼ĮèĢĮä¸Ķ ä»ĸ +Aut umn +Does n +Di abetes +ĠART ICLES +Ġdiplom ats +å°ı åŃ¦æł¡ +Ĉ ćĈ +Âł å°±åľ¨ +Ġli ens +.c ar +ĠPh y +çªģçĦ¶ åıijçݰ +, è®°å¾Ĺ +O z +æİ¥ ç¼Ŀ +ä¿Ŀ 级 +Ġback ers +红 æĻķ +åıĸ代 äºĨ +Ġr k +åĴĮ ä»·å̼ +å¸Ī çĪ· +ĠPer uvian +-A ldrich +çļĦ主 æĮģ +D ress +.f unctions +è¡Ģ æ°Ķ +Ġsol l +ĠPat ton +Ġover board +éĺ² éĿĻç͵ +è¯Ĺ çļĦ +åį· å¸ĺ +ad ish +Ġ" =" +äºĶ çĤ¹ +æķ°æį® æºIJ +.set Title +éĵģ æĿ¿ +Cr unch +B ench +Ġ 满 +Ġkn itted +åĩı æ³ķ +å¾Ĺåΰ ä¸Ģ个 +èª į +åĩī äºŃ +ï¼Įè¿Ļæĺ¯ ä»ĸ +Server Error +ãĢģæĸ° èĥ½æºIJ +ç¾İ容 éĻ¢ +ĠProdu ced +ĠB oz +Res idents +çľģ éģĵ +æ½ĩ æ½ĩ +Ġorthodont ic +-burn ing +Ġ ____________ +ä½ł è¿Ļæĺ¯ +å¹¶ æıIJä¾Ľ +ä¿¡æģ¯ ä¸İ +Ġenc ro +.set Image +æĸĩåĮĸ çĶŁæ´» +Ñģ ли +å®ħ åŁºåľ° +Ġswim mer +& s +: key +l vert +ĠE TS +åŁİ æĬķ +åĮ» åĺ± +-d estruct +å¾· æĽ¼ +Ġsqu amous +ï¼ĮæĽ´ 为 +Ġbubb ling +Strateg ies +S pl +âĢľ çİĭ +.m aster +Ġsl ur +ä¸ĬäºĨ 车 +åĴĮæĪij ä¸Ģæł· +ç͍ éĴ± +ï¼Įä¸Ģ åıªæīĭ +åŃĹ è¿¹ +ä¸ĥ åįģäºĮ +ä»Ģä¹Īæł· çļĦ人 +ä¸Ģ ç¬ijéģĵ +- ST +def ining +çłĶ ä¹ł +Ġanecd ote +/ logo +al ics +Ġnew bies +ï¼ģ è¦ģ +éĢł 访 +ä¼Ļ åĦ¿ +å¹´ æĺ¥ +æĪIJ æ´» +éĥ½ æĬĬ +溪 åİ¿ +ĠW arcraft +Sc ar +Ġextrem ism +Ġintest ines +- enter +c ow +Ġ ãģ® +å¤ĸ éĵ¾ +ç«ĭ ä¸ĭ +Ch arts +å±ħä½ı è¯ģ +èħĬ æľĪ +ath ons +è´¢æĶ¿ æĶ¶åħ¥ +èĤ¾ èĻļ +.text ure +R ap +_EX TERNAL +Ñĸ д +ï¼Ľ åħ¬åı¸ +ĠK H +åķ ī +æĢ» ç½² +yn ski +鸡 æ¯Ľ +âĤ¬ âĢľ +ĠBuff et +B UY +ĠS aves +La ure +ĠBa um +å½±è§Ĩ ä½ľåĵģ +Ġ èĥ½å¤Ł +ï¼Į éĩijèŀį +çļĦ åıĮçľ¼ +Ġg nc +ĠW ARN +对 ç¾İåĽ½ +ĠTest ed +AX B +ä¹Łå°± ç®ĹäºĨ +æ¶Į ä¸Ĭ +Ġ å²ģ +_g p +Ġpiece wise +is empty +ภ· +Ġε ν +好 åѦ +å¸Ĥ åİ¿ +èᝠ䏏 +æ¹Ľ æ±Ł +è¯ģå®ŀ äºĨ +) }^{ +Ġman or +缴 çϽ +çĨŁ çĿ¡ +ê n +Ġcolonial ism +z ap +ï¼Į çϽèī² +æŃ£ 轨 +仪åύ 仪表 +è´± 人 +çĭĻåĩ» æīĭ +-incre asing ++ ^ +l anguages +var rho +éĩį大 é¡¹çĽ® +æij§ æ®ĭ +å°ijæŀĹ å¯º +b ir +Ġ ç½ij +Ġ å±Ĥ +_d ocs +ï¼Įå®ĥ ä¼ļ +Ġcam ar +ĠCompl aints +åIJİ çĽ¾ +IN CT +åĮ»åѦ è§Ĥå¯Ł +Ġtip o +_PI PE +æľīäºĽ å¥ĩæĢª +硬 çĶŁçĶŁ +fe as +ĠPear ce +å¾Ĺä¸į åģ¿ +ï¼Į å½°æĺ¾ +ĠC MA +bl ind +Ġrespond er +obacter ium +Numer ous +\ M +Ġqu its +eb el +yl im +åĪĢ éĶĭ +çıį 妮 +åı²ä¸Ĭ æľĢ +为 人æ°ij +æĭĽ èĢĥ +á¹ £ +Ġhast ily +-Semit ism +ãĢģ åºĶ +ãĢģ åĴ³åĹ½ +çļĦæĹ¶åĢĻ ä¸Ģå®ļè¦ģ +_sh adow +åĴĮæĪij ä¸Ģèµ· +æĶ¹ç¼ĸ èĩª +' ` +, æIJŃéħį +ä½ł åĪļæīį +ï¼Į对 åIJ§ +ford shire +æĤ¬ 空 +Ġtort illa +Ġcl inging +Ġun protected +Ġcoll ars +á v +Ġorth odox +ĠRan cho +ĠG ad +)) : +éĢģ ä»ĸ +Ġlinked in +ĠBus ch +âĢľ 请 +.Try GetValue +op rene +ĠX Path +') \ +ï¼Į常 å¹´ +Ġlent ils +Ġhe res +ain ment +Ġev okes +ĠCAP ITAL +n ement +.g son +ĠAng ola +Ġnic est +ï¼Įæĺİ æĹ¥ +æĢ» æ¯Ķ +è¾¹ 沿 +Ġap ocalypse +Ġpilgr ims +at ypes +åįģ ä¸ĢçĤ¹ +å¤Ħ æŃ» +åįĬ è·¯ +ä¸ļåĬ¡ åijĺ +Obs ervation +ĠCred entials +Ġ åī¯ +ĠB az +ass ie +Ch r +ï¼Įåıª æĥ³ +çĶļ è¿ľ +åįķä½į åĴĮ个人 +.put Extra +n oc +åİ» åŃ¦ä¹ł +认为 è¿Ļæĺ¯ +â Į +ost atic +ber os +ãĢĤå¦Ĥæŀľ åľ¨ +.Des erialize +-trad itional +** ãĢĤ +åıij éľī +æijĬ ä½Ļ +æ¯ĶæĭŁ çļĦ +ĠEvangel ical +_ enter +}} }{\ +Celebr ity +ĠP ell +对 åħ¶ä»ĸ +Ġ\ \[ +æ¶ ª +ĠMir iam +Ġgrat uit +ï¼Įå¹¶ 使 +ç³ĸ åĪĨ +å¦Ĥä¸ĭ æŃ¥éª¤ +» , +社 åijĺ +äºij å¹³åı° +ãĢĤæĪij ä¸įçŁ¥éģĵ +åĵĢ ä¹IJ +lene cks +ä¸Ńåħ¨ä¼ļ ç²¾ç¥ŀ +Ġco ef +åįİ ç¾İ +ĠER K +亦ä¹IJ ä¹İ +ĠS phinx +Ġsh ove +çĿĢ è¿Ļ个 +éĻĨç»Ń ç»Ń +æĪij们 åħĪ +表 éľ² +æĵį ç»ĥ +ãĢĤä»ĸ 表示 +ä¸įåĬ¨ 声èī² +æĸĩåĮĸåĴĮ æĹħ游 +ĠB ourn +åŃ£ æĬ¥ +è¡¡ éĺ³ +ĠLo vel +ĠNav igate +ä¸į æĶ¶ +èµ· åħµ +Ġplan ter +è°Ī ç¬ij +驱åĬ¨ ä»¶ +^+ $ +St rike +Ġdef amation +Con sequently +é¦Ĩ èĹı +èĤĮ çĺ¤ +( Element +av ens +ä¸İ èĩªçĦ¶ +åIJij è®°èĢħ +Ġhot line +ä¸įå¾Ĺ 转载 +æĸ¹æ¡Ī ä¸Ń +éŁ³ä¹IJ èĬĤ +ï¼Į éļ¾å¾Ĺ +çļĦ æĿ¥è¯´ +ãĢģ ç»Ļ +çĺ ´ +Mer chant +ĠC ah +trans pose +èĪŀ ä¼ļ +Ġpsych osis +B att +Ã İ +çļĦ 说æĺİ +Ġre CAPTCHA +ub arb +æĬ± åĽ¢ +ĠShould n +éĤ£ è¾Ĩ +Ġget Class +Ġback story +ï¼ĮåIJĮæĹ¶ åıĪ +竣工 éªĮæĶ¶ +Ġb mp +Ġl da +Ġover run +ĠInd uction +Ġconsider ate +注æĦı çļĦ +æ¤įçī© åĽŃ +对å¤ĸ æĬķèµĦ +大 æķĻåłĤ +Ġab iding +Ġgra ppling +Ġo sg +çļĦ çĽ¸å¯¹ +ĠG rap +æĭī å§Ĩ +çĶŁäº§ æĬĢæľ¯ +ä»ķ éĢĶ +P eg +ä¸ī åľ° +Ġextension Registry +Ġµ M +Ġimpover ished +ä»İ严治 åħļ +j sp +ï¼Į æĢģ度 +ãĢĤ 忽çĦ¶ +Ġunder represented +ï¼Įå°± å°Ĩ +Ġpar anoia +ĠSl ater +Ġrein vest +Ġindemn ify +Ġcanc elling +v ich +se i +end ium +è¿Ľè¡Į æµĭè¯ķ +ä¸ĵ æŁľ +ï¼ĮåIJĦ 大 +Ġski pper +ĠDynam o +Ġrept iles +ĠLibr arian +ï¼Į åĮħè£ħ +æīĢ æĸĻ +没æľī ä¸Ģä¸Ŀ +ç»ĵ è¯Ń +é» ľ +éĩĬ çĦ¶ +ĠDE FIN +ï¼Įå¤ļ åįĬ +Ġdisp ense +Ġmaxim izes +å®ļæĹ¶ åύ +Ġse ep +ost i +çľĭ åģļ +Ġmon op +.W in +年被 è¯Ħ为 +éĻªåIJĮ ä¸ĭ +å¼ķ人 æ³¨çĽ® +Âł to +con cil +æĥħ ç»ĵ +ĠAs st +亩 产 +纽 æī£ +çĽ¯çĿĢ ä»ĸ +Histor ic +-s ales +éŁ³ åĥı +é»ij 帮 +Be coming +询 éĹ®éģĵ +çľ· æģĭ +æĻĵ 举 +æ³¥ æ³ŀ +ĠStat ue +æľī æĸ° +Ġem erald +âĢĶ one +æķĻåѦ çļĦ +è§Ĵ度 æĿ¥è¯´ +æ¹ĸåįĹ åį«è§Ĩ +ĠlastKnown FileType +c oded +PE X +磨 çģŃ +润æ»ij åīĤ +çļĦ é»ijèī² +-d eg +å®Įåħ¨ 被 +Val ley +èĥĸ èĥĸ +oct et +è¿ijåľ¨ åĴ«å°º +ï¼Į å±ħæ°ij +æ´ µ +ï¼Ľ æŃ¤å¤ĸ +ĠED TA +, 转 +- env +Ġform ulating +_in formation +Ġcur bs +åľ°åĮº åĴĮ +èݱ åħĭ +ĠHaw thorne +ĠAnalyst s +ĠAttend ance +ï¼Į è¡¥åħħ +ãĢĤ éŨ +ass el +Ġdis content +头 绪 +Ġlight est +Ġpick les +å®ŀæĸ½ä¾ĭ æĪĸ +ï¼Į主è¦ģ åĮħæĭ¬ +P d +f ishing +Ġl umps +-> {$ +æĭ¼ æŃ» +Ġfra ught +Ġproc ured +ï¼Į èµ¶å¿Ļ +Ġem ulsion +èĢģ 大çļĦ +产åĵģ å¼Ģåıij +_n b +æł¹æį® èĩªèº« +Ġquarter backs +ĠEU RO +Jan et +Ġâĭ ħ +ĠHe zbollah +游æĪı éĩĮ +èīĺ èι +Ġchi ropractor +- radio +ĠP ause +ĠR itual +ĠG AM +Ġsp oof +ĠCon an +Ġhit ters +-ch arge +åij¼ åͤ +Ġreproduc ibility +Ġc w +ĠN DP +èĻ ± +Ġtrans national +ä¼ģ åĪĴ +field Name +寻 æĢĿ +ï¼ĮåħĪ åīį +ä¸įåĩº æĪ· +Ġdiscrim inated +.al ibaba +ĠG AL +ĠG PI +æĹ¥ åIJİçļĦ +åİŁ åij³ +-n one +_dis claimer +Ġpedag ogy +L oved +ãĢģ èĭıå·ŀ +è¿Ļ 头 +æİ¨ 论 +ç o +Prel ude +C arm +ĠH aha +主è¦ģ åĪĨ为 +æ¯į è¯Ń +SO LE +) çļĦä¸Ģ端 +_ IV +æĭ Ħ +æľ¬ æ³ķ +åı¤ 建çŃij +Ġasync io +/st orage +ĠWhit ening +ĠB TS +æīĵ æ°´ +Ġcra bs +W et +r dev +çļĦ åį¡ +Ġv antage +ä¹Ł æ¸IJæ¸IJ +åħ¶ éģĵ +Ġdeb unk +Method Impl +æĢĿç»´ çļĦ +â te +æĭĺ æĿŁ +ĠLon ely +M ars +Ġi y +Ġprim ordial +ĠEver green +Ġconjug ated +Ġholog raphic +/ arm +Ġl ak +æĸ¹ çĻ¾è®¡ +æĢ§ åĮĸ +ç¾İ çͲ +WO OD +( åħ¶ä¸Ń +s ensors +åĨħ åĬŁ +ä¼ł è°ķ +ï¼Įåľ¨ åħ¨åĽ½ +代çłģ å¦Ĥä¸ĭ +ï¼ĮçĶļèĩ³ è¿ĺæľī +çīĪæĿĥ æīĢæľī +ic ata +ry ker +管çIJĨ åѦ +åħ« éŨ +ï¼Į请 åıĬæĹ¶ +ĠSuper visors +unn ies +ĠStream s +· å¸ĥ +comm itted +(l oss +mar shall +åĬłæĿĥ å¹³åĿĩ +- coming + į +Ġ 建议 +Ġp cs +æ³ ± +è¿Ľè¡Į çİ°åľº +ï¼Į便 åı¯ +çĶŁçī© æĬĢæľ¯ +Mar co +Ġprox imate +V ILLE +pr udence +å¹²åĩĢ æķ´æ´ģ +å¿ħçĦ¶ çļĦ +. required +n ip +ä¸į æµİ +çļĦå£°éŁ³ åĵįèµ· +Ġg ynec +ac ross +åĩº èĩªå·± +æ´ª æµģ +ãĢĤä½ľä¸º ä¸Ģ个 +$ ~ +L TE +ĠH uck +代 è´Ń +)/ \ +_END POINT +ĠâĬ ķ +"> č +æľīä¸Ģ åIJį +æı´ å¼ķ +äºİ ä¸Ģ身 +wit cher +-li quid +èĬį èᝠ+ä¸ŃçļĦ éĩįè¦ģ +Ġcapt ains +ï¼Įåĩº éŨ +æĪIJ åĽł +空éĹ´ åĴĮ +èĩ£ 妾 +è¿Ī åħĭ +_ad j +_AC COUNT +ain ter +sh ade +æ¯Ķè¾ĥ å¤įæĿĤ +ä½łä»¬ éĥ½ +ĠCOR POR +( äºĶ +Ġ ä¸ĬåįĪ +ï¼Įåı¯ æĢľ +Ġsprink ler +at chers +Ġc love +ï¼Į å¿ħè¦ģæĹ¶ +ĠT ract +åĽŀ èĢģå®¶ +æĢ» éĥ¨çļĦ +éĢIJ 个 +ãĢĤæĪij们 è¦ģ +æijĬ è´¹ç͍ +x fc +åĬ¨ 产 +天 æ¡¥ +éĥ¨ ç»ıçIJĨ +å·² è¶ħè¿ĩ +Ġtra b +Ġmen ace +ĠCal m +Ġfre estyle +æĿı èĬ± +Ġle ur +ĠTechn ological +æĮ¯ å¹ħ +\ fi +Ġsu g +主 éĺŁ +æŃ¤ è¯Ŀ +åĩºçݰ æķħéļľ +. Entities +è° Ĩ +åħ¬åı¸ æ²»çIJĨ +é¾Ļ çıł +èµĦ产 ç»Ħ +Ġax ios +人群 çļĦ +Ġtint ed +Ġ åķĨåĵģ +Ġo int +-p ayment +åĭĩ å¾Ģ +æ¶Ī失 çļĦ +ĠAdminist rators +M ATH +Ġbe arer +ere l +ä¸İ çĶŁæ´» +ï¼Įåīį èĢħ +æģ¼ ç¾ŀ +å°ĸéĶIJ çļĦ +åģľ ä¸ļ +Ref Count +)= - +Ġcurs es +ï¼Į çĻ¾åº¦ +ï¼Į æĽ¼èģĶ +çļĦ èĮĥåĽ´åĨħ +ãĢģ èij£äºĭä¼ļ +ï¼Įåı¯ çİ°åľ¨ +ĠRE IT +CON S +ĠMack enzie +RE W +æĭī éĿ¢ +è£ħ饰 æĿIJæĸĻ +ĠInitial ized +happ iness +B erg +èĢģ å¹²éĥ¨ +ä¼ł éģį +Big g +ï¼Įçݰ å°Ĩ +Ġsnork eling +, åıĹåΰ +对 åIJĦ +æ¶Ī失 åľ¨äºĨ +ÂĢ Ãij +æľī ä¸īç§į +· èİ« +sv c +è´¸æĺĵ æľīéĻIJåħ¬åı¸ +éħįå¥Ĺ çļĦ +磮 å°ı +æķ°åįģ ä¸ĩ +.Byte String +ĠCorpor ations +ĠS ensitive +Ġes l +ĠE id +ĠV ag +ç§ij 级 +_p review +Ġerr atic +j g +ess er +æĪij 被 +Ġtra umat +æĪı 份 +Ent repreneur +track s +en emy +ĠW n +åıij æĿ¥çļĦ +éĩį åĨĻ +ä½İ è¯Ń +.R eturn +ï¼ĮæľĢ æĸ° +ĠS op +åģľ ä½ıäºĨ +æī¿è½½ çĿĢ +ISTR ATION +-bal anced +Ġ éļıåį³ +Ġlic ences +Ġundis closed +c ision +Ġn oc +ĠE bay +ï¼Įä»ĸ ç͍ +åħ·æľī éĩįè¦ģæĦıä¹ī +main ly +nav List +.send Message +ä¹°åįĸ åIJĪåIJĮ +ãĢĤæ¯ı å½ĵ +å´©æºĥ äºĨ +泪æµģ 满éĿ¢ +. []{ +ud ad +éķ¿ åľ¨ +æĬĬ éŨ +èĩªæĪij ä¿ĿæĬ¤ +_ ast +ĠP arm +ĠL OS +ä½ł å¾Ī +IL ON +Red uced +ãĤĵ ãģª +, éĿł +ãĢĤ ä¸ĭä¸ĢæŃ¥ +-s cript +缮åīį æŃ£åľ¨ +é¥Ń çļĦ +Gr anted +Ġvibr ational +ĠSie gel +.S DK +ĠDent on +ĠC iti +ĠM andy +ĠG om +èĥ½ è®©ä½ł +Entity Type +_u art +Ġstew ards +ãĢģ éĿĴå²Ľ +Ġsp ree +éĿĴ 天 +oph osph +æ±Ĺ çıł +(err ors +W STR +Ġass urances +åħ¬åı¸ 注åĨĮ +åİŁ çĤ¹ +ï¼ĮæĪij åĸľæ¬¢ +Of Class +ĠSem ester +ĠGoth am +_OPER ATOR +æľ¬ ä¼ļ +åĽłä¸º åľ¨ +è°ģ ä¹Ł +_R W +ĠBank er +Tra iling +åĸĦèī¯ çļĦ +Ġscaff olding += L +Ġ åķĨä¸ļ +åĪĿ æľŁçļĦ +Ġpret ended +ĠWeb inars +, ç©¿ +ve cs +ä¸Ģ è¿ŀ串 +Ġinter ceptions +Ġref inery +çļĦå°ı äºĭ +代çIJĨ è®°è´¦ +æĭĽåķĨ éĵ¶è¡Į +Ġsarc astic +ï¼Į æĪIJç«ĭäºİ +Ġimp urity +ĠAr rival +æľĢåIJİ è¿ĺæĺ¯ +å¿ħé¡» å¾Ĺ +æĿ¯ æ°´ +κ ε +ĠElastic search +\ Lambda +ĠN acional +ain en +ĠO culus +è¿ĩ æ²³ +åķ ¾ +acet yl +ĠG uth +ãĢĤæĪij è¦ģ +.Invariant Culture +, value +â ļ +ï¼Į 念 +æİ¥ ä½ı +ĠSh abb +æĵ ŀ +综åIJĪ çĹĩ +(B oolean +ĠI ps +Ġreimb ursed +èĤ¯å¾· åŁº +ĠCl ouds +ĠMart ins +åĭ¾ åĭ¾ +ĠBUT TON +ï¼Įç»Ŀ对 æĺ¯ +, ä¸įåIJĮ +åĽ½ å¸Ī +.M ODE +Check point +ĠGal ois +( editor +K w +ĠV apor +å¯Ĩ ä¸įåı¯ +æĹłç¼ĺ æĹłæķħ +( Layout +ï¼Į å®ĺæĸ¹ +Ġg char +æ°´ å¸Ī +ä¿Ŀ çĽijä¼ļ +ĠU zbekistan +ENT AL +带æĿ¥ æĽ´å¤ļçļĦ +ĠBL UE +,âĢĻ âĢĿ +Ġperpet rator +ĠεÏĢ Î¹ +Ġdomest ically +C ycl +ä¹ĭ è¯į +; : +ç»ı 纬 +ï¼Įè¿Ļ 段æĹ¶éĹ´ +U d +ãģ ² +ron omy +å®ģ å¹³ +æł¹æľ¬ ä¸įçŁ¥éģĵ +ç¾İ丽 乡æĿij +_min us +ä¸į 饱åĴĮ +ç³» 主任 +æĭ¥æľī ç§»åĬ¨ç͵è¯Ŀ +Del iver +X box +Ġ åıĤåĬł +Ġan aph +é£İ èµ· +å© º +Ġmetaph ysical +å¿ĥçIJĨåĴ¨è¯¢ å¸Ī +ĠBloss om +åĪ© åīij +æ¥ļ 天 +] ` +Ġ è½» +_log s +T itan +çļĦ æķĮ人 +ĠB ingham +ĠF elt +ä¸ĭ åįĬåľº +æ²¹ æĿ¡ +Item Type +_R ING +ĠDynam ical +- Share +_P D +ĠPar an +Ġsubs cript +éĥ½ä¼ļ éĢīæĭ© +ĠMark us +èĢ½è¯¯ äºĨ +c airo +j y +Ġ æŃ» +Ġcr ates +Cost a +, ä¸ŃéĹ´ +ï¼Į æ²³åĮĹ +ï¼ĮåıĪ åľ¨ +æĹ¥ ä¹ĭåīį +åħ¬åı¸ æĺ¯ +" Then +R ational +éĺ´ èĮİ +Ġconver ged +M ari +æ¯Ķ ä¹ĭåīį +两个 å°ı +鸡 çļ® +渡 æ²³ +ãĢģ åºĶæĢ¥ +ĠR acial +æ¶Ī 亡 +ï¼Įåıª æĺ¯ä¸Ģ +æ¶Īè´¹ çļĦ +ä½ł 身边 +_SC HEMA +çļĦ æįŁå¤± +ĠH DF +å¹´ æ£Ģ +ĠTrans itional +åıªè¦ģ æľī +没äºĭ åĦ¿ +Ġvow els +Ġh opped +ãĢĤ æķ´ +Ġinc arn +åĨ· é£İ +ä¿ĿæĬ¤ å±Ĥ +Vari ation +é£ŀå¿« åľ° +G PT +l bl +Ġs acked +åŃĺ 亡 +æĹłæ³ķ æİ¥åıĹ +å°¼ æĹ¥ +ĠHay ward +ï¼Į åķĨåĵģ +好 åĩłå¤© +_S ize +å¦ĥ åŃIJ +C ub +J ill +åİŁ ä½į +ars on +æijĬ åŃIJ +ï¼Į æłĩåĩĨ +ĠC PD +Ġcr umb +é¾Ļ å²Ĺ +ĠIm agination +èĪŀ åĬ¨ +表éĿ¢ å¤ĦçIJĨ +ĠIKE A +\ ne +Ġp ears +å°±æĺ¯ ä½łçļĦ +ibr ated +pre viously +, å½ĵåīį +ĠC URL +our ke +æķĻ ä¹ī +ĠPart icular +åĪº çĮ¬ +ĠMaur itius +å¸ĥèݱ æģ© +æİĴ éĩı +乡 å¸Ĥ +ĠGr atitude +çĨŁ çŁ¥çļĦ +ĠIm am +çĶļèĩ³ è¿ĺ +èĥĨ æ±ģ +ĠCrow ley +Ġ ç®Ģåįķ +æ°´ åĮº +ï¼Įä¸į æĦ¿ +_ place +To Add +- span +it ans +æĹ » +Ġdes ir +绾ç»İ ä¸įç»Ŀ +( one +F lying +} v +ï¼Į ç»ĦæĪIJ +Ġn ifty +Ġaden ocarcinoma +Ïģι Ïĥ +rocy te +\ Data +è£ ± +tt ed +ï¼Įä¹Ł åı¯ä»¥éĢļè¿ĩ +ĠSystem atic +æİ¨å¹¿ åºĶç͍ +绵 ç¾Ĭ +, åĬłå¤§ +Ġ çijŀ +å°± æĭ¿ +å¹² åĬ² +æĵįä½ľ æĹ¶ +æĺĤ é¦ĸ +Ġnod ules +B ert +åĬ« æĮģ +åĨĽæľº 大èĩ£ +ãĢĤ å®ŀ +çĶŁçĹħ äºĨ +ĠW icked +æĽ´ åĥıæĺ¯ +é£ŀ åĪĢ +çļĦåīį éĿ¢ +. Args +ĠS ushi +åĨĽ è£ħ +å¹¶ä¸į 好 +æĬĴ åıij +è̏äºĨ è̏èĤ© +F ork +ĠG ED +ard t +Ġres ized +ï¼Įä¸į åľ¨ +è°ĥ åħ¥ +å¾® åĩī +Ad ams +é©¶ åħ¥ +夹 çĿĢ +Ġmsg id +æıIJéĨĴ éģĵ +anim als += z +ĠR TX +åľ° 使ç͍ +Ġstock ings +Cert ificates +Ġwart ime +t ys +ï¼Į 说çļĦ +ãĢģ 身ä½ĵ +å¤ļ 说äºĨ +Ġmay o +害 çļĦ +Thanks giving +D OS +O liver +Ġ 读 +Ġ ä¸ī个 +An nie +ĠOutput s +åĵĪå°Ķ滨 å¸Ĥ +ï¼Įç»Ļ人 ä¸Ģç§į +, çĶ·äºº +ä½ĵ éŃĦ +.N O +Facebook Twitter +ĠHelp ers +飵 å¾ĭ +å¸ĺ åŃIJ +(pro perties +ç»Ļ人 以 +D av +ãĢĤ ä¸ĭåįĪ +ĠC LOCK +ĠW IDTH +没 æĢİä¹Ī +Ġcard stock +ĠD yson +ï¼Įæľī 许å¤ļ +ĠMiss ions +Well ness +åľ¨ä¸Ģ å®¶ +è£ħ饰 åĵģ +ä¸ĵ æĪ· +çļ® æįŁ +æķij ä¸ĸ +.Append Line +_ Id +åΰ æīĢè¿° +ï¼ĮæĪij ä¹Łä¼ļ +cr atch +å°Ŀè¯ķ ä¸Ģä¸ĭ +H TT +宣 读 +à¤ Ĺ +Opt imize +é«ĺ æķĪçİĩ +æŃ¤ 书 +ES G +第ä¸ī åIJį +çļĦä¸į è¡Į +é¢Ħ ä¹ł +Se at +çĮ« çļĦ +ader ie +E rin +é»Ħ çŁ³ +ãĥ į +Ġstaff ers +ç§ģ èIJ¥ +DC s +èĢIJå¿ĥ çŃīå¾ħ +ĠIn formatics +ä¸İ åIJĦ +让 她们 +åij¨ åĨħ +ĠRec ession +å¡« åŁĭ +Red uction +ãĢģ ä¿Ŀéļľ +åĮ IJ +Ġfact ually +_{ [ +客 å¥Ĺ +ä½İ éŁ³ +åį« ä¸ľ +ä¿ĿéĻ© åIJĪåIJĮ +ph thal +Ġ<< < +èĤ¤ è´¨ +ï¼Įå®ŀ åĪĻ +Ö ¸ +è¿Ľè¡Į 管çIJĨ +the rapy +.d st +å®Ŀ 鸡 +-st at +æ¼Ĥ ç§» +é«ĺ级 ä¸ŃåѦ +ï¼Į æĺ¨æĻļ +个 ä¸Ń +åİĭ ä½ı +çĶŁäº§ åĬłå·¥ +ĠBal loon +ĠTrace y +â ĥ +Ġ( ); +åĴĮ å®ŀè·µ +âĢĶ are +Ġback stage +.h l +rect angle +æľīäºĽ çĸijæĥij +ï¼Į å°ļæľª +ä¹Ł å¾Īå¤ļ +èĤ¡ä»½ åζ +å±Ī æĮĩ +C ER +ãĢģ èij£äºĭ +_s f +Ġble aching +åı¯ è¦ģ +ä¹Ł ç͍ +äºķ æ°´ +ĠMold ova +ãĢģ çĶ³è¯· +åĪĨ å¿ĥ +ãĢĬ ä¸ĸçķĮ +æŁIJ ä¸Ģ天 +å±¥ è¡ĮçļĦ +ĠIMD b +ĠK are +Ġco z +è±Ĩ åŃIJ +ä¸Ģéĥ¨åĪĨ å®ŀæĸ½ä¾ĭ +ä¸į å®ŀ +è¦ ı +å͝ä¸Ģ èĥ½ +ĠNev ille +ï¼Į å¥Īä½ķ +ĠB RA +åĴĮ éĤ£äºĽ +alf a +Ġtick er +Ġpartition ed +Mal aysia +Ġ æĶ¾ +åıĹ åij½ +_{ { +inter op +=' # +ä¾Ŀèµĸ æĢ§ +.Un known +U çĽĺ +ä¸Ģ æĸ¹çļĦ +åΰ çİ°åľº +Ġem ulation +ĠAm herst +, åİ¿ +ï¼Į æİ§åζåύ +å¦Ĥæŀľ è¦ģ +Ġleft ist +æīĢæľī åζ +(p i +Ġbroad casters +å®īå¾· é²ģ +- operator +t reatment +in itive +æľ¬ æĿ¡ä¾ĭ +ï¼Ľ èĢĮä¸Ķ +Ġpr ides +Ġrearr ange +e urs +j ia +åľ¨ æĹ¥å¸¸ +Ġad obe +æĽ´ åºĶ该 +ç¾İ é¢ľ +æ¯į çĮª +åŃĺåĤ¨ 设å¤ĩ +Ġneck line +ĠYose mite +K on +ãĢĤ åĢĺèĭ¥ +() "> +.w in +ï¼ĮåıĪ éģĵ +èĩ£ åŃIJ +åİŁæľ¬ æĺ¯ +ĠMAG IC +G lob +re ast +at el +大 éĺŁéķ¿ +è¿ľ 端 +Dis position +ls l +C mp +Ġt é +çļĦ æĪĺäºī +åΰ 大家 +äºĴèģĶ äºĴéĢļ +\ dot +ãĢģ ä¼łæĴŃ +æĹ¥ å¼ı +åħ³ ä¸ĬäºĨ +.U int +Ġnot ary +Ġsome place +-d uration +(* ( +ĠM odes +ĠP lex +Ġ& . +åıĺ åŀĭ +OO LEAN +ĠTher mo +é½IJå¿ĥ åįıåĬĽ +çļĦ åĽ¾ +ĠSh ang +IDD LE +h un +m ethyl +Ġ å®ļä»· +ĠF illing +æµ· åķ¸ +èµ° è¿ľ +inc ar +comp an +Ġlit ig +ĠMur doch +ï¼ĮåĽĽ å¤Ħ +ãĢģ ä¸Ķ +éĵ¶ 两 +Restaur ants +çĿ ¢ +åĬ³ æĸ¯ +å²Ĺä½į èģĮè´£ +ãĢģ 鼶 +ru id +æ¡ ģ +Ġgu ise +å·²ç»ı ä¸įåĨį +Ġsn iper +- encoded +Ġn ipple +ĠH ib +æĹ¥ åıijå¸ĥ +Qual ifier +_ adv +åĽĽ åIJĪ +主è¦ģ æĺ¯åĽłä¸º +ï¼Įæľ¬ æľŁ +Ġstra pped +.mark down +产åѦ çłĶ +s imp +ãĢģ åĽ¢ç»ĵ +pl aintiff +ä¸ī æĽ´ +ĠEl sa +ä¸į ä¹IJ +Ġav ian +è¡Ģ å°ıæĿ¿ +ĠApp les +å«ģ å¦Ĩ +Ġδ εν +. cy +ĠB azaar +ĠMy c +asc a +,以 æŃ¤ +ĠMagn olia +ï¼Į çĭ¼ +ĠPet ty +ĠAL PHA +Est ablish +çīĩ çĬ¶ +ĠAttribute Error +ab br +ä¸ĭ æľī +åĨį è¿Ľè¡Į +å¿« èµ° +Ġsec ures +Ġä»ĸ 说 +Ġcom un +ĠE ste +Th irty +åĨį æ²¡æľī +ï¼ĮåĨį ä¸Ģ次 +讨 ä»·è¿ĺ +è£ħä¿® 设计 +ç¡ķ æŀľ +é£İ 顺 +ç¡® åĩ¿ +èĥ½å¤Ł è¾¾åΰ +(c v +± ÃIJ +Ġsuppress or +Ġunp repared +.B ox +- Ind +ãĢģ åľ°çIJĨ +çĸ¾ é©° +ï¼Įéĺ¿ å°Ķ +åĪĨæīĭ äºĨ +åºĶ纳ç¨İ æīĢå¾Ĺé¢Ŀ +ï¼Į èŃ¦å¯Ł +id ata +åľ¨ 两 +æīĢ çļĦ +åįĬ çĶŁ +/ max +D f +ĠF uck +ob ot +ĠCon ner +èĥ½å¤Ł ä»İ +ĠF ilename +ï¼Įä¸Ģ å¿ĥ +Ġpower fully +ĠGu am +æĢª å¼ĤçļĦ +, åįģåĪĨ +大 çŁ³ +Ġ' ] +Ġcomm utes +,ä¸į éľĢè¦ģ +R outine +å®Ŀ åħ¸ +顾 åIJįæĢĿä¹ī +åѤ å¯Ĥ +hash Code +èĢIJå¿ĥ åľ° +ĠWE LL +Agric ulture +æģ¶ä½ľ åī§ +ĠBeth any +.Logger Factory +ï¼ ¯ +ãĢĤ å®¶éķ¿ +ĠCh alk +li pped +åĮ»éĻ¢ ä¸ĵå®¶ +Par ad +æ¹ĸ人 éĺŁ +/ example +El apsed +çĭŃ å°ı +Ġe erie +ä¸Ģ 绣 +Ġex cretion +ï¼Įä¸Ģ 座 +è´´ çīĩ +ä¸ĭ æ£ĭ +æľ¬ ä¸ĵä¸ļ +ĠPat ron +æ´ŀ åºľ +ĠFeed s +, åĿIJ +ï¼Į 顾客 +ãĢĤ ç»Ħç»ĩ +ol ition +Ġon s +ä¼ł 令 +åı° å¸ģ +æľīä¸Ģ å¼ł +g ps +çļĦ è¾ĵåħ¥ +ä½ ¶ +ĠD uties +Ġr RNA +Ġsl ag +Ġ ¿ +æĭ¿ æįı +ï¼Įçľ¼ åīįçļĦ +p z +ãĢĤ ä¸įåIJĮçļĦ +ig l +ĠR ama +ĠJ ia +åıij æĿ¥ +æ¯Ķ ä»ĸ们 +ï¼Į çIJĨè§£ +ĠOr bit +æĿĢ ä¹ĭ +Ġarm ored +è®°å¿Ĩ çļĦ +ĠProsecut or +ĠCorb yn +k bd +ĠS ponge +渴 æ±Ĥ +Ġt igers +ä¸ ¶ +Ġan che +çļĦ åĽºå®ļ +ĠS weeney +çļĦæĹ¶åĢĻ åı¯ä»¥ +ĠDE ST +ĠDC HECK +Ġcrack down +ĠPyth ag +om mu +ãĢģ å®ŀéªĮ +ĠG w +ĠO c +-s ongwriter +-f ledged +Ġbal conies +åĪ¶ä½ľ å·¥èīº +ĠGL FW +ĠGeorg es +G auge +ãĢĤ 缸åħ³ +ãĢĤ 建ç«ĭ +id irectional +ĠG rief +æĪij们 ä¸įèĥ½ +ç²¾ç¥ŀ çĬ¶æĢģ +ĠEm phasis +Ġchip set +åºķ åŃIJ +ç²Ĺ æ°Ķ +Ġda ÃŁ +pay day +Viol ation +M os +æĪĺ åĬŁ +_R ULE +奥 åĪ© +}}( {\ +; x +S andy +æĪij åı¸ +ä¸Ģ个 éĿŀ常 +ĠU pt +å¤ļ æĭī +ä¸ŃåĽ½ åĽ½éĻħ +æ²Ļ æ²Ļ +OV ERY +èİ« è¨Ģ +ç¡ķ 大çļĦ +emp lo +å®¶éĩĮ æľī +graph ic +第ä¸ĥ å±Ĭ +.Visual Studio +å°ı èι +ĠSt ing +ĠUS P +ï¼Įè¦ģ åľ¨ +åĩºæ°´ åı£ +è´¨éĩı æİ§åζ +_G UID +æĬµ éĶĢ +æĬ« é£İ +Ġlunch time +ĠKom mission +. visual +æĪĺ å±Ģ +çł´ åı£ +缴æİ¥ ç͍ +IF ORM +, è¿ĩåİ» +Ġ ç»§ç»Ń +ĠI EL +ĠR ho +Ġcomm uters +Let t +èĦ± ä¿Ĺ +æģ¢å¤į çļĦ +å¼Ģåı£ éĹ®éģĵ +ĠAT A +ĠP TA +é£ĺ é¦Ļ +ï¼Į çİĩ +大 礼 +ex clusive +ap r +楼 æĪIJ +è¿Ļ款 车 +, åΰåºķ +Ġe aves +æ±Ł éĺ´ +P ts +k ubuntu +pp elin +举 å³» +.m ember +èĬ± çĽĨ +Ġsk yl +,å¦Ĥæŀľ æĺ¯ +Ġthe ses +ap ar +_M T +èĭį çĶŁ +ĠPo et +ï¼Įä½ł åΰåºķ +æĿĢ ä½ł +Ġsuper flu +ä¹Ŀ äºĶ +MP P +'' ( +ĠRh ino +建 ä»ĵ +åĦ¿ æĹ¶ +åį± éļ¾ +ï¼Į轻轻 åľ° +ĠKel vin +al leg +å°± åĽłä¸º +ï¼ļ åĪ©ç͍ +End Of +å¹¶éĿŀ å¦ĤæŃ¤ +ĠRh ythm +\ Form +æł¡ éŨ +ä½ľåĵģ æľī +Ġchem ok +Ġsoft ening +çĥ¤ é¸Ń +Ġrig ged +N ike +ï¼Į åĩĢ +åĪĨæŀIJ ä¸Ģä¸ĭ +åı¯ ç͍çļĦ +å°ı å²Ľ +å̼ æĺ¯ +许 æĦ¿ +èĤī é£Ł +ç®Ģåįķ äºĨ +æĻºæħ§ åŁİå¸Ĥ +cr ash +âĢĿï¼Ī ãĢĬ +ĠAl c +éģ¿ å¼ĢäºĨ +æĺĤ æī¬ +))) ** +- el +æµģ äºij +æĮģ ä¹ħçļĦ +Ġaw fully +æĮ¯ ä½ľ +å·ŀ åŁİ +Ġaf rican +è´ŀ è§Ĥ +ĠMos que +id on +Ġun for +Ġsm e +,ä¸Ģ å¹´ +èIJ½å®ŀ åΰ +ï¼Į æ¹ĸ +ä¸Ģ ç¿» +åĴĮ åĸĦ +æİ¥ æĽ¿ +åģļ æĪIJçļĦ +ï¼Įä½ł ä¸įä¼ļ +erc a +å¾ģ åħµ +èµĦæºIJ åħ±äº« +rien ne +åij¨è½¬ çİĩ +æĥħ æŃĮ +Ġyear ning +Ġautom ating +æ¡Į éĿ¢ä¸Ĭ +ĠGold stein +ad one +æĿ¥ åķ¦ +Ġun ic +éĩij 森 +æŃ¤ èµ·å½¼ä¼ı +An alyze +èϽçĦ¶ ä¸įæĺ¯ +ठ¦ +宽 é«ĺ +ĠImage View +åºĻ å®ĩ +èĿİ åŃIJ +Aggreg ation +ãĢĤéĤ£ æĺ¯ +åľºä¸Ĭ ä½įç½® +æĩĬ æĤĶ +ï¼ļ çĶ· +OT OS +Ġåľ¨ è¿ĻéĩĮ +ĠBal m +è¿Ļä¸ĢçĤ¹ ä¸Ĭ +Ġrepay ments +çļĦ æī¿è¯º +åĴĮ åķĨä¸ļ +ï¼Įä¹Ł æĺ¯ä¸Ģ个 +åı² ä¸ĬçļĦ +iner ary +éĩijåĪļ çŁ³ +交çķĮ å¤Ħ +è¿ĺ å¤Ħäºİ +çļĦ人 å·¥æĻºèĥ½ +ĠST L +Sim ulator +ĠStock ton +(sc ene +R ising +Ġg reek +ãĢģ å¢ŀ +_C ARD +æī§ 念 +æ²ī 沦 +Dr ink +ROW N +, æķĻèĤ² +[ q +Ġf oyer +ãĢĤ éĥ½ +ĠT OS +è£ Ĩ +ãĢĤè¿Ļ éĥ¨ +ĠWho ever +(s d +çļĦåIJį 声 +er ous +ĠY ok +é«ĺ å®ĺ +ç² ³ +Ġbreak age +çļĦçľĭçĿĢ å¥¹ +A mber +å¿ĥ 声 +çIJĨ æĪIJ竳 +éļ į +Ġx c +long itude +ĠInf rared +æķ°æİ§ æľºåºĬ +s us +å¾Ī æĹ©å°± +客 åķĨ +åĵĪ æĭī +_g uid +ĠMal ibu +Ġadm iring +_ Open +x ious +ĠD ort +带 ç͵ +ä¸ĩ æĸ¹ +æ¸ħ 空 +çļĦä½įç½® ä¸Ĭ +èĭ¯ äºĮ +çļĦ ä¸ī个 +åĬªåĬĽ åľ° +Ġal veolar +çĶŁæ´» æ°´å¹³ +ĠIslam abad +_COM MENT +········ ········ +( MAX +cent ed +æ¯į 鸡 +Ġhor rifying +_process or +ĠM SA +ĠL SD +ĠG ems +æĿİ å°ı +Ġsw ag +å©ļ åºĨ +ĠBig ger +çķª å¤ĸ +ç»ĵå©ļ è¯ģ +ĠKauf man +ä¸Ń 大 +åIJĪçIJĨ å®īæİĴ +/g ems +b ud +ä¸Ń éĵģ +çĶŁ åĦ¿ +æĸ¹æ³ķ è¿ĺåĮħæĭ¬ +(b it +宿 è¿ģ +Ġp oo +ĠS ao +Ġrec ited +åľŁ çļĦ +, åı¶ +ï¼Įè¿Ļ åıª +çĽĺ æķ´ +PRO GRAM +伪 åĬ£ +åᏠæĸĻ +onitor ing +AUTH OR +s imply +ä½ł å¾Ĺ +_ex pect +re ported +çļĦ çģ¯åħī +âĢĻ a +å¢ĥ åĨµ +è½® èŀįèµĦ +ĠLa uncher +ĠShen zhen +F ruit +Ġde ems +ä¸İ åĪĽæĸ° +èΰ éķ¿ +èĬ³ åįİ +ç»ŀ çĹĽ +èIJİ éĿ¡ +, 建设 +G RA +天 å¿ĥ +Ġ# $ +ï¼Į为 客æĪ·æıIJä¾Ľ +çĽIJ åŁİ +å°±ä¸į ä¸Ģæł·äºĨ +Ġip rot +Ġd udes +ĠP ik +ï¼Įä½İ 声éģĵ +æĻ¾ å¹² +Ġ èµĦ +ĠH ahn +è¡Į äºij +çľĭ éĢı +ĠK rak +ï¼Ł ä½Ĩæĺ¯ +-d ouble +æŃ¤ åħ¬åijĬ +ĠCom pos +åıĪ ä¸įèĥ½ +å·¥ä½ľ æĸ¹æ¡Ī +æŀ¶ 设 +P oker +h ipping +l uster +ï¼Į 举åĮĹ +ig u +Ġr st +æĹ¶ ä»» +ä¹Ł 以 +ç±³ æĭī +èµĮ 注 +å©ī 转 +çĸ¯çĭĤ åľ° +严åİī æīĵåĩ» +ĠæĽ´æĸ° æĹ¶éĹ´ +, åĴ±ä»¬ +çļĦ ä¾Ŀæį® +åı¯ åĪ« +ov ar +ä½ķ åħ¶ +ä»ħ åī© +éĻį æģ¯ +ĠU ng +CT G +ĠSnow den +( .. +\ lim +in ative +对 åħ¬åı¸çļĦ +åı£ èĪĮ +ï¼Įåľ¨ 线 +æŃ¦ åĪĻ天 +ĠAdd r +é£İæł¼ åĴĮ +Ġdoubt less +Ġwave guide +Ġwashing ton +ĠNort heastern +pkg ver +ĠEdd y +ï¼Ł å®ĥ +-m atched +Ġid yllic +_IN FORMATION +æµ® èĤ¿ +. Offset +Ġ ä½ĵéĩį +Ġvol umetric +åħ±åIJĮ çĤ¹ +ront al +ï¼Į车 åŃIJ +mill an +ĠNathan iel +åºĶæľī å°½æľī +å¦Ĥçģ« å¦Ĥèį¼ +ா à® +. ãĢIJ +.get Source +ĠThat cher +åģľ åľ¨äºĨ +ur de +éģĵ åľº +è·¯ åŁº +çŁ³ åĪ» +Ġing ress +ĠJur assic +Ġwast eful +S quared +ï¼Į æĸĹ +om ac +the ory +æĿij æĿij +нÑĭ м +( Activity +ĠAl ph +ĠNew ly +ĠEz ek +Ġwhirl wind +è¡ ĵ +Service Helper +ĠDraw er +Ġpatriot ism +ä¸ī å®Ŀ +å®ī 迪 +亲 åħµ +ä¿ĿæĮģ ä¸įåıĺ +äºĭæķħ åıijçĶŁ +_PL UGIN +P ED +ĠF K +ĠEnd e +çļĦ å¿«ä¹IJ +ĠR uf +é«ĺ éŁ³ +Impl Options +.Cl oud +ĠPACK AGE +L icensing +ï¼Į ç½ijåıĭ +ãĢĤ 社ä¼ļ +åįģåħŃ å¹´ +Ġcontest ant +çļĦ çİ»çĴĥ +im id +æĪij åĪļæīį +æĤ² æĦ¤ +æİ¥è¿ij äºİ +åī§ æ¯Ĵ +æĮģç»Ń åΰ +èª ª +Ġdivis ive +ĠG Ps +èħ ¼ +å°Ķ æľ¬ +åĶIJ 人 +-ind ent +åıij æĢĴ +æĪijçļĦ åIJįåŃĹ +é¢ij è°± +f leet +us ky +ne ur +丰 åı° +ä»įçĦ¶ åľ¨ +ĠSant o +Ġe agles +æľº çİĩ +_c mos +è½» åĬŁ +Ġvolcan oes +, id +ĠS orted +Ġst abbing +** âĢľ +å¾Ĺ æ°´ +å¤Ħ 级 +ä½ı çĿĢ +ern al +éĢīæĭ© æĿĥ +ãĢĤåľ¨ è¿ĻäºĽ +Equ ivalent +ï¼Įéļı æĦı +O SS +çļĦ åĬ¨çī© +æķ° åĢį +æłĪ éģĵ +丨丨 丨 +å° ĩ +ä¸į çŃīäºİ +æĹł å¥ĩ +æĸ°éĹ» åıijå¸ĥä¼ļä¸Ĭ +Ġsouven irs +ç͍ ä¹ĭ +ä¹Ł æĿ¥äºĨ +åŁ¹ æł¹ +Ġear buds +.st ereotype +æīĩ å½¢ +( Result +in variant +ĠM ister +æĿ ³ +ãĢģ éĤĵ +AD T +ä¹Łä¸į å¾Ĺä¸į +Ġhor rend +,åı¯ æĥľ +缸ç»ĵåIJĪ çļĦ +Ġin activity +ĠT f +è° ª +ï¼Ľ C +åĪ« æł·çļĦ +cent re +èİ« åıĬ +ĠMod ifier +ĠCare g +æľīä»Ģä¹Ī ä¸ľè¥¿ +Ġì ĥ +æĭīå¼Ģ 帷å¹ķ +åĩºåĽ½ çķĻåѦ +Ġse per +çŀħ çĿĢ +çļĦ æ¯ĶèµĽä¸Ń +ĠR ai +åıij åΰ +çĤİ é»Ħ +Ġsplit ter +åįģåħ« å¹´ +, èµ· +Ġz w +_M ISC +ĠCo pp +Ġserge ant +- Sp += test +Ġg ulf +_m id +Ġhealth iest +ï¼Įä»į æĹ§ +ãĢģ åľĨ +æīĵ æ»ļ +ç»Ī æĹ¥ +ET TE +Ġliter als +Ġretro grade +Ġd olphin +è¦ģ éĴ± +ĠSt ellar +Ġder by +æĭ¨ ä»ĺ +æī³ æľº +K ER +ï¼Į æīĶ +å°±æĺ¯ ç͍ +èĥģ è¿« +Ġdwell ings +æľ¬é¢ĨåŁŁ æĬĢæľ¯äººåijĺ +, åħ¨éĥ¨ +好 æĹ¥åŃIJ +ä¸İ çݰæľīæĬĢæľ¯çĽ¸æ¯Ķ +æłij ä¸Ģ +Ġpsych opath +ĠLoad ed +ĠKoh l +ĠMoment um +è§£ åĽ´ +Ġpneum atic +ï¼Į åĵİ +æĬķ å¥Ķ +ern ames +æµĭè¯ķ 仪 +. tele +ï¼Į 鼶 +æĥ³ ç͍ +æ¸ħ 宫 +Ġmed i +æ»ij è¡Į +çĿ¡ åIJ§ +æŁ± å¡ŀ +诽 è°¤ +ï¼Į 导æ¼Ķ +ĠC TR +ĠG ong +Ġob lique +ĠSk inner +åıijèµ· 人 +Ġp name +ä¸Ń å°± +è´Ł 离åŃIJ +endo za +Ġdissemin ate +ãĢĭ ç¬¬åĽĽ +Name In +æ½ ŀ +Ġdeep copy +Ġsem is +æĢĿæĥ³ å®¶ +çĦ¦ è·Ŀ +Rest ricted +, 女æĢ§ +ĠAss oc +ĠTrend ing +ï¼Į è¯Ħä¼° +ç«Ļ ä¸Ĭ +åıªè¦ģ æĺ¯ +çľŁç©º æ³µ +Ġintersect ing +( product +ãĢģ 书 +Ġlot teries +ç²¾ç¥ŀ ä¸ĬçļĦ +ĠSt rom +åı¯ä»¥ æľīæķĪ +ĠBe irut +顺 çľ¼ +æ»´ åĬł +ĠE I +Ġby stand +ï¼Įå°± ä¸į +stra ight +ĠG IT +Ġch ills +App roved +uten berg +.comp lete +ĠFO LLOW +Ġt rom +ãĢĤ 第äºĶ +ĠR OT +缸 çļĦ +西 奥 +è¿Ļ个 ä¸ĸçķĮçļĦ +åįİ æ¶¦ +ç½Ĺ 宾 +åıĤåĬł æ´»åĬ¨ +åĭIJ èĻİ +s outh +Ġ 帧 +ãĢģ ç£ģ +åľ¨ è§Ħå®ļ +满 èħĶ +aron i +Ġapolog ise +T ak +ãĢĤ æ¯ı个人 +ç® ´ +çľĭ ä½łçļĦ +omet ime +Ġ è®°å¾Ĺ +ï¼ī æľī +Ġfl utter +ESS AGES +æĹł çŃĸ +Ġsk id +ĠTri umph +åİĭ缩 空æ°Ķ +R ATION +or os +ãĢĤ ä»į +ãĢģ é²ľ +大 å¸ħ +åı¥ åı¥ +éľ² åı° +秦 æ±ī +ĠMon et +fe res +D 项 +Ġ å·¥ +ous s +ĠCan aan +ä¹Ŀ çϾ +Ġbare foot +çļĦåıij èĤ² +pl and +Âł æŁ³ +ĠV endors +ï¼Įä»ĸ ç»Īäºİ +广 çļĦ +me asures +æ¯į çα +æ·±åħ¥ æİ¨è¿Ľ +ÑģÑĤв о +as sembled +è¿ĺ åıªæĺ¯ +Ġapp rais +åĽŀæĶ¶ åĪ©ç͍ +Ġsore ness +åĮ» ç¾İ +Ġsil enced +hum id +( plot +èģĶ æ¬¢ +ï¼Į è§īå¾Ĺèĩªå·± +ãĢģ é¤IJåİħ +ĠR age +ep ro +ĠK odi +ĠIncre ment +èļĮ åŁł +ank o +ĠOne Plus +agg ie +-lo ved +æĹıèĩªæ²» å·ŀ +Ġhither to +åĨĽ 人çļĦ +离 åİ»çļĦ +Sh apes +-per fect +ç»ĵ åºķ +æ±Ł è¾¹ +åħ´ çļĦ +èĥ¡ éĢĤ +åı¦è¡Į éĢļçŁ¥ +ï¼Įå¹¶ 被 +èĭ¦ çļĦ +æľīçĤ¹ å°ı +åĪļåĪļ 好 +管 ä»ĸ +Ġread out +Ġbl itz +-p olar +Ġо д +æijĶ è·¤ +ï¼Į以æŃ¤ æĿ¥ +Ġtranqu ility +ãĢģ æķ´çIJĨ +æĬĬ æĮģ +è£ħ é̼ +_pro j +çļĦå¿ĥ è·³ +éϵ å¢ĵ +没æľīä»»ä½ķ çļĦ +Ġpes ky +Ġim aged +æį¢ è¡£æľį +Ġpast ures +欲 è¯ķ +ï¼Į请 èģĶç³» +ĠBlog gers +ĠSqu ee +a ussian +Ġ ç´¢ +ä¸į å®ģ +Ġi outil +Res istance +-def inition +Ġrenormal ization +s impl +ï¼Į 积 +(' " +ï¼ĮæĽ´ è¦ģ +Ġdepart ures +çĶŁèĤ² ä¿ĿéĻ© +Linear Layout +ãĢģåī¯ æĢ»ç»ıçIJĨ +ĠTob ago +_src dir +ĠVT SS +, é¡¹çĽ® +Ġt innitus +re li +_C IPHER +ane ers +ĠRed dy +ä¸Ģä½ĵ åĮĸçļĦ +Ġunint entionally +ï¼Į åģ· +Ġb ah +ĠH irsch +ĠN inet +æĸ¹éĿ¢ æľī +éľ² 头 +åIJ¸å¼ķ 人çļĦ +èĩª éĩį +è¾¾ å·ŀ +è¿ľ æĸ¹çļĦ +æĶ¿åºľ åľ¨ +ĠAg ar +æĪIJæľ¬ é«ĺ +V ent +es gue +ĠM EDI +æ°Ķ è¿IJ +éĹ® åı· +о е +åħļ åĨħ +åĶ¿ 声 +f ection +ĠA CH +ä½ľ è¯ģ +å·² åΰ +èį ł +ä¸įåı¯ æĬĹåĬĽ +ãĢģ æĮģç»Ń +å¼ ģ +è§īå¾Ĺ èĩªå·±çļĦ +Ġrub ble +Ġnu ance +_IM PL +å¹´ åIJĮæľŁ +ç½ij 约车 +è´µ éĺ³å¸Ĥ +ãĢĤå¦Ĥæŀľ ä¸įæĺ¯ +èĩªåĬ¨ æİ§åζ +Ġmic row +ĠRow an +d av +} r +åĩº è¨Ģ +Ġreb ates +.con sole +Jes se +幸 çļĦæĺ¯ +ĠOff ensive +éĶĢåĶ® æ¸łéģĵ +indust ry +åĸ ı +åĮĹ å¸Ĥ +çļĦä¸Ģ ç±» +_g u +ãĢĤä»ĸ æĬĬ +Ġrev olutions +æĶ¯éĥ¨ 书记 +Haw aii +- cy +为 æķĮ +æĸĩ çİĭ +_d isp +åłĨ éĩĮ +è§Ĩè§ī ä¸Ĭ +_COMP AT +çģŃçģ« åύ +ĠR ansom +Comp etition +çĵ¶ è£ħ +èł ¡ +ĠLor raine +m apper +ï¼Į å°ij女 +Ġb anging +Ġm ichael +ĠPM P +ï¼ĮæĹł ä¸Ģ +h urt +å°ı çİĭ +åύ ä»¶çļĦ +åĽ¢ æĪIJåijĺ +产ä¸ļ çļĦåıijå±ķ +åĽŀå®¶ åIJİ +Ġhood ie +ä¸į çĨŁæĤī +ĠDe aling +Ġconf defs +ï¼ĮæĹ¶ ä¸įæĹ¶ +[ K +æĺ¯ æľ¬ +æĽ´ æĹ© +åĪ© å¼Ĭ +_N EG +ĠBhar at +/ dd +ate e +ĠL J +åĨħ çī¹ +-t oo +Ġelev ating +Ġaggreg ator +_ Log +k f +Ġgr out +éŁ³ è§Ĩé¢ij +éĤ® ç¼ĸ +èĢĮ å°½ +ï¼Į许å¤ļ 人 +period ic +Ġa version +Ġbe ets +å¤ļ 级 +åĪĨ 以ä¸Ĭ +Ġsynt actic +åħ»èĢģ æľįåĬ¡ +åīĸ èħ¹ +Ġy olk +å¦Ĥ 常 +Or Update +çĸ¾ é£İ +çĶŁéķ¿ åľ¨ +第ä¸ĢçϾ ä¸Ģ +ĠLandsc aping +ãĢģ ç§ĭ +æīĭ åĨĻ +ĠNo Such +ĠInst alled +Hel vetica +ãĥĥ ãĥĹ +Ġga ussian +ï¼Į å¼ĺæī¬ +Ġex uber +æİ¥ ä¸Ģ个 +ĠMake file +Ġer as +Ġfra il +Ġcere bro +Ġclever ly +ĠEmpower ment +æĪĸ éĿŀ +æĬ¤ 身 +Ġautog enerated +èĢĥèĻij è¿ĩ +æĸ°åĨł çĸ«èĭĹ +ĠBuh ari +Ġr amb +ï¼Į 读èĢħ +Ġto pper +ĠT anya +表达 çļĦ +Ġfict itious +_ ix +av link +æ¯ķ çĶŁ +è§£åĨ³ æİī +Mark ers +Ġmerg es +Ġunimag inable +Ġ æ³¢ +Ġ æĿ¾ +.p ass +软 äºĨ +_PRO GRESS +sear ched +ï¼Į èĦij +æ± © +ob le +åįĥ å¹´çļĦ +æĸĩåĮĸ èĬĤ +look ed +Ġrail ways +ä¸Ĭ 表 +åIJĮ æĦıçļĦ +转 念 +Ġline ages +è¾ĵ çIJĥ +Ġpar able +çĨŁ èĻij +ï¼ĮæĢ» åħ± +ï¼Įæĸ¹ é̏ +/ __ +ĠT rey +âĢľ 谢谢 +天 æĹ¶ +çĭ¬ç«ĭ æĦıè§ģ +Coun sel +Celebr ating +/ nginx +n pos +Ġcl a +ï¼Ī åħįè´¹ +å±± èħ° +ï¼Įä½ł åĴĮ +ç½Ĺ 纳 +èĥ¸ èħĶ +ĠSolar is +Ġdowns ides +åŃľ åŃľ +Ġè¯ģåΏ 代çłģ +ë ª +Ġup holding +çĤ¸ å¼Ģ +Works hop +Z ach +s ess +ãĢģ ç¾İæľ¯ +å¾® åĪĨ +(w rite +Ġbill board +ĠText Appearance +Ġtrigon ometric +ĠL n +æĿ¥è¯´ æĺİ +Ġ 缸 +ĠK iwi +缮 åħ± +ym b +å°ģ åı· +Ġleak y +ãĢģ æ¯Ķ +è¡Į ä¸įè¡Į +éĿŀ常 ä¸įéĶĻ +DR A +å·¥åħ· åĴĮ +null ptr +ĠDev in +Configure Await +éĴ£ éĩij +为 ä¾Ŀæīĺ +æĢĢ éĩĮçļĦ +ç»ĵåIJĪ çļĦ +æĬ½ 空 +(dir name +w ah +Ġal oe +Com pose +åĩı ç¨İ +ç§ģ è¯Ń +è·Į èĩ³ +source LineNo +ï¼Įåı¯ä»¥ ä»İ +Basic Block +, çļĨ +. other +d ana +es o +Ġp armesan +St ake +åħ³ æĸŃ +]) [ +ĠBrit ann +å®ĺæĸ¹ å¾®åįļ +(err no +OND ON +Ġ 京 +lect able +èĦ¸ åŀĭ +Ġfont size +ĠEffect iveness +ĠVenet ian +T rav +Ġun loaded +åĨĻ å¥½ +æĻĴ å¹² +议论 纷纷 +ãĢĤ æĺ¥ +äºĨ 该 +ï¼Ł è¿ĻäºĽ +Ġte h +Ġam enable +å·¥ä½ľ æĺ¯ +äºĶ æĹ¥ +éľĢè¦ģ æĽ´å¤ļçļĦ +表 å¼Ł +_RE CE +æĬĺ çݰ +ภĤ +Gr ass +ĠHard wood +ĠGro ovy +Comb ination +ãĢģ人 äºĭ +å¯Łè§ī åΰäºĨ +(f oo +å¹¶ä¸į 容æĺĵ +Ġprosecut ing +æĦŁåºĶ åύ +Ġ åı³ +ĠN atur +Ġemp athetic +å¹¶ä¸įæĺ¯ ä¸Ģ个 +ĠN ails +åĴĮ å®ŀæĸ½ +-f amous +åħļ 代ä¼ļ +èµµ åĽ½ +Ġrev amped +ĠN ost +åĮĸ å¸Ĥ +.Set up +èĮĤ 缼 +Ġman ne +亲 æĺµ +è¿Ī åħ¥ +ĠG ron +å±Ĥ 楼 +åĿļ 硬çļĦ +çļĦ大 å¤ļæķ° +bad ge +ose conds +az es +è¶ħ è½½ +Ġair ways +Ġ é¢ĺ +ĠTrans parent +æĮĤ ä¸Ĭ +ãģª ãģı +_form ats +å´Ń æĸ° +( Collectors +æŃ£ æĥ³ +åŃĺ æ¡£ +.C ross +.assert In +ĠAu ctions +Ġm exico +ST ACK +н ов +没ä»Ģä¹Ī 好 +è¶Ĭéĩİ è½¦ +ãĢģ åIJ¸ +æ°´ 乡 +ï¼ĮæĪij ä»İ +çİĭ ä¸Ģ +Ġblack out +.B asic +åĩºåı° äºĨ +Ġconcaten ate +B race +b roadcast +Âł åıªæĺ¯ +ä¸ĩ æ¡¶ +èµĦæºIJ éħįç½® +Block ed +碰 碰 +buy er +åŃIJ æħķ +Ġco agulation +Ġincl ine +Ġ çݰ任 +ĠP emb +Ġdev otional +_pro bs +åħįè´¹ æıIJä¾Ľ +ï¼Įå¿ĥ 头 +ëĵ ¤ +èĥ½ åĬ¨ +彩 å¦Ĩ +è¿Ļ两个 åŃĹ +N FC +ould ers +没 å°ij +å®ĺ èĥ½ +(& $ +èį· å°ĶèĴĻ +æĸ¯èĴĤ èĬ¬ +é¡·åĪ» éĹ´ +_ ), +ï¼ī åıijè¡Į人 +æŀĹ éĹ´ +Ġdi aries +åıijçĶŁ äºİ +oper atively +ï¼ĮåıĪ ä¸įæĺ¯ +|| || +åĬŁçİĩ 为 +å°ı æĿİ +å¿ĥ èĦijè¡Ģ管 +Ġapp rehension +追 梦 +- Net +Ġ æ²IJ +ãĢģ å®¶åħ· +ãĢĬ éĩij +è¡Ĺ åĿĬ +ä¹Ļ éħ° +Ġdissemin ated +æİ§åζ åύçļĦ +ä¸İ 大家 +没æľī çļĦ +Ch ad +chan ics +Ġprec arious +çĭł æīĭ +ä»° åį§ +Ġvandal ism +ãĢģ èĪªç©º +åıĮ é±¼ +wit ched +è§ĦåĪĴ 建设 +éĶIJ æĦı +, æľĭåıĭ +ï¼Į éͦ +ess enger +Ġbi jection +Ġ æĺ¾åį¡ +æī¾ æī¾ +ĠMah m +D as +ï¼Ī åħ± +å±ķ æ¼Ķ +CT OR +å¢Ļ çļĦ +.read Int +Ġverb atim +Ġspat ula +ĠCauc asian +Ġm uy +æīĭ ç»ĺ +Ġam in +Ġ\" $ +ãĢģ çľģ级 +æī ¦ +Ġk cal +Ġout raged +åī¯ å°Ĩ +æĭĽ æŀ¶ +Ġcapac itors +à« ĩ +( Unit +. vertical +åĽłä¸º æľī +ĠMu ir +ĠLear ners +å·¥ä¸ļ éĿ©åij½ +.sh adow +ez vous +ï¼ĮéĿŀ常 éĢĤåIJĪ +ĠAw akening +ĠFlu ent +- util +ï¼Į åķĨå®¶ +ãĢĤ æ£ĢæŁ¥ +ĠA TC +ĠR if +å·¥ä½ľ è¿Ľè¡Į +è¿ŀ ç´¯ +-p atient +éĺ¿ åħĭ +æ®ĭ 骸 +Min ister +I z +u ire +ĠWe instein +.P ackage +Ġrect al +.Generated CodeAttribute +Ġin continence +ä¸Ń æĹ¶ +éĹ® åıĬ +Hand led +Ġп оÑģ +âħ £ +W U +ä¸į 误 +ãĢģ é϶ +ĠF reak +å¸Ĥ éĽĨ +å·² è¿ĩ +éľĢè¦ģ 帮åĬ© +mon ster +ĠRed eem +")) ) +S witzerland +s age +Ġ èĩ³å°ij +Ġw reak +å¾Ĺ åIJį +ï¼Ľ 第 +è§£ çļĦ +西 欧 +å·¥ä½ľ æµģç¨ĭ +éĢł åŀĭçļĦ +ï¼Įåħ¶ åĮħæĭ¬ +ä¹ĭéĹ´ å½¢æĪIJ +, çłĶç©¶ +, æ¯ıä¸Ģ个 +ĠD ior +ĠDe cker +ales e +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠ +ENS IONS +æµģéĢļ èĤ¡ +w et +im me +ĠC ebu +_p arty +Ġbond age +Ġfunn iest +Ġsapp hire +ç»Ļ 她çļĦ +éŃĶ çķĮ +åĨľå®¶ ä¹IJ +Ġchuck le +åįĹå®ģ å¸Ĥ +ĠC utter +ä¸ī éŨ +ï¼Įå®ĥ å°Ĩ +çļĦ æĻºèĥ½ +ç¾İ äºĨ +.s cal +ç¼ĸ å§Ķ +Key Name +è½½ æ³¢ +Ġbar ring +.Set Value +çĨĶ çĤ¼ +quant um +Ġdiast olic +Âł years +ens ky +- img +; * +åIJĪ æĪIJçļĦ +è§ģ ä¸įå¾Ĺ +æ±ĩ åħij +é£İæĻ¯ 线 +_SK IP +Ġ 带çĿĢ +æĭĽ æı½ +.F LAG +è¿« äºİ +微微 ä¸ĢæĦ£ +ä¸Ĭå¸Ŀ çļĦ +ĠS aud +ĠP ledge +ĠH SP +roph ot +åIJĬ 车 +ãĢĤ æĺ¯ä»¥ +èĩª æŃ¤ +Ġra ined +(t ok +çĿ£ æĬļ +ĠConvey or +C able +Ġtr ich +ĠAl le +æĽ¾ 说è¿ĩ +Ġå¹´ åĪĿ +åĩºæīĭ äºĨ +Ġremodel ed +Ġbrit ish +: ä»İ +ĠM ais +ä»ĸ éĿ¢åīį +ï¼Ł æĤ¨ +æĮĩ æ´¾ +Ġpred icates +=\ "" +æ¦Ĥ念 èĤ¡ +Ġpriest hood +ĠFor bidden +ĠFrank enstein +Ø ¸ +ĠH RT +æĥ³ ä¸įéĢļ +çīĪ çĶ» +ï¼ĮæľĢ ä½İ +SO CK +ĠHomes chool +缸éĤ» çļĦ +h ler +Ġm owing +ãĢģ çŁŃ +ãĢģ ä»ĵåĤ¨ +ä¼ļ åıĺ +没 çľĭè§ģ +ĠCh ore +Su itable +ï¼ ® +Ġl adders +ç¿ Ĵ +è¯ij 为 +ä¸įæ¸ħ çļĦ +âĺĨ âĺĨ +, éģĵ +m apped +t old +èĩª å°ı +Ġad jectives +ĠK and +èĢģ éģĵ +AG O +ï¼Į为 ä½ł +rote ins +ÈĽ i +ĠProte ins +çĿ¾ 丸 +/ error +Ġ åıijçݰ +ol r +ä¼ł éŁ³ +ĠRh ine +DIS CLAIM +ĠBlank et +Ġs og +, èĭ±åĽ½ +c itations +æĸ¹ åľ¨ +æīį 被 +ä½ı åĿĢ +_l ambda +ï¼Łï¼Ł ï¼Ł +Ġmoistur izing +at oms +ãĢģ åIJį +ãĢģ åħ±äº« +our y +å¹¶ ç»Ħç»ĩå®ŀæĸ½ +åħį åİ» +è¡£ 人 +ĠÏĦ ιÏĤ +Ġlag oon +Ġ{ [ +é¡ ŀ +åħ¨ çĽĺ +.s ignal +(p ub +çĭĤ åĸľ +host ing +_grad ient +, 讲 +缴 éĢļ车 +ï¼Įä»ĸ 竣çĦ¶ +ĠRes earcher +.reg ex +ir ate +çľĭ æĬ¤ +UT ER +Ġinj unctive +ĠLa placian +,åı¯ è°ĵ +æĬ¥çº¸ ä¸Ĭ +.inter faces +èĶij è§Ĩ +Ġreplen ish +ĠIMPORT ANT +ell ion +Data Frame +çͳ æĺİ +Ġlin ewidth +Ġprob ed +ĠCG AL +, æīĢè°ĵ +天 åı° +.S ort +stand s +Reg exp +els en +Ġpract ising +ĠMod elling +èĩªä¿¡ çļĦ +ĠNorm andy +åħ¬ 车 +ĠK ras +Ġx max +LE V +With Context +ï¼Į 广åijĬ +Ġg fc +åľ¨ åIJĦç§į +åĨ· èIJ½ +ĠRest ricted +ÙĬ ر +æĸ¹ä½į æĪĸ +æłĢ åŃIJ +åĩº æľī +iff el +Ġrestart ing +zs che +ĠO MX +çŃī çݯèĬĤ +-d en +éĶĢåĶ® éĩı +enz o +Ġvas cul +ĠC us +æľ¬æ¬¡ ä¼ļè®® +Media Type +B racket +D iane +V irt +Z ERO +ĠB ets +åİ» çİ© +åľ¨è¿Ļ çīĩ +ĠMil ford +_al ignment +Ġindemn ity +ï¼Į æĹ¥å¸¸ +ãĢģ ç͵åĬ¨ +Ġme ch +æIJŃ è®ª +ĠHam m +Pay Pal +ĠSt unning +Ġflo ppy +åģı æĮ¯ +çļĦèĢģ å©Ĩ +ï¼Į éļIJ +ra co +åĩ» éĢĢ +ĠRec ipient +ĠChar itable +W endy +Ġmay onnaise +ï¼Įè¿Ļ åħ¶ä¸Ń +åŃĹ çĶ» +åºĵ åħĭ +çŃīä½ł æĿ¥ +åĴĮ æī§è¡Į +ARR Y +Ġ åıĸå¾Ĺ +ï¼Į 羣çļĦå¾Ī +od ate +âĢľ ä¸ī个 +ep hy +è¿ĺ ä¸İ +æĥħ 书 +ä¸ŃçļĦ åľ°ä½į +ĠCol lier +æĹ© åīį +å·¥ä¸ļ åĴĮä¿¡æģ¯åĮĸ +串 è¡Į +åĩĽ çĦ¶ +. sequence +o cean +ï¼Į å¹¿ä¸ľçľģ +ĠP ops +ç³»ç»Ł åıĬ +Ġjo ystick +æĶ¹éĿ© åıijå±ķ +Ġpra wn +å¤ļ ç»´ +ï¼Įä½ł ä¸įèĥ½ +åħ³å¿ĥ çļĦéĹ®é¢ĺ +èĢķ ä½ľ +it ro +Ġor phans +åı¯ 转åĢº +ä¼ļ è¶ĬæĿ¥è¶Ĭ +å±Ĥ 为 +åįĩ åİĭ +Ġsubs paces +ĠOpen API +èĤĿ èĤ¾ +ĠF ond +Over lap +LOG O +ĠP endant +ãĢģ åĿļæĮģ +èĥ½å¤Ł 为 +Ġ<< = +Ġpen icillin +ï¼Įè¿Ļæł· å°± +Ġru pees +ĠMcCorm ick +Ġcamoufl age +d na +im aging +_t ls +å¸ĥ è¡£ +J azz +Ġt ambién +_M IX +uf req +æĬĢæľ¯çļĦ åºĶç͍ +å§IJ妹 们 +Ġsign er +æ¶Ī äºij +ÑĤ е +ä¸īåįģ ä¸ĥ +æ³ķ 西æĸ¯ +离 çļĦ +_S Z +çļĦ第ä¸Ģ 天 +ç»ĵå©ļ çļĦ ++ xml +get Num +Ent ropy +ï¼ĮåĽłä¸º å®ĥ们 +åĹ ij +Ġfif o +Ġobjection able +J os +b oo +on is +å°ı é¼ł +è¿Ļ个 æĥ³æ³ķ +è¡¥ åħ¨ +Ġem bar +Brook lyn +h oo +ĠR AP +Ġach ing +å·´å·´ çļĦ +Ġtreacher ous +Th ai +强 äºĨ +è¶Ĭ å°ij +ĠCol leen +_MAT RIX +èĥ½ 对 +ä½ĵ ä¾ĭ +çī© ç¾İ +æł¸ éªĮ +ä¿ĿæĮģ 稳å®ļ +ĠEnd owment +Ġmeat balls +Ġinfring ing +d if +u ve +çļĦ é¢ijçİĩ +ãĢģ çĥŁ +ãĢģ 人çĶŁ +èĢħ ä¸İ +Ġpopular ly +è¿ŀç»Ń æĢ§ +phan umeric +Built in +å°Ķå¤ļ æĸ¯ +. art +qu arter +.t opic +omy ces +-p assword +æĹłæ³ķ çIJĨè§£ +ĠSub s +ĠGood bye +éĥĬ å¤ĸ +Ġr ims +ä¼ļ é¦Ĩ +åIJĮ è´¨ +æĬĬ æĪı +æĶ¶ åĽŀäºĨ +Ġcre pt +æĮ¥ åıijæĢ§ +åķĨä¸ļ è¡Ĺ +- even +ï¼Į åĿļå®ļ +ĠS ON +Ġon Error +ä½ł è·Ł +Ġun ification +æĹł ç¥ŀ +让 ä»ĸçļĦ +Ġent hr +ä¼ĺ å¾ħ +ä¸ĢäºĽ äºĭæĥħ +Bo ys +ä¸į èĩªåľ¨ +æľī åı¯èĥ½ä¼ļ +åľ° çłĸ +ï¼Ł é¦ĸåħĪ +æĽ´ æĺ¯ä¸į +åıΠ大 +åı° å·ŀ +cont rollers +ĠPack s +éĽķ çIJ¢ +ç쵿´» éħįç½® +å¿łè¯ļ 度 +Ġinaccur acies +@ the +ï¼Į åIJĪä½ľ +æīĭ çݯ +å¹¶ 对 +ĠCon or +Ġcr umble +-c amera +/s cripts +åᢠåį¡ +_WR AP +/ UIKit +ĠT itus +ĠA IS +ĠP regnant +主è¦ģ çļĦæĺ¯ +äºļ åįļ +- AD +G adget +ĠC RL +ĠH ail +éħį 以 +Ġinteresting ly +å©´ åĦ¿çļĦ +åħ¬åĬ¡åijĺ èĢĥè¯ķ +Ġdischarg ing +ë§ Į +( Query +For ge +rad o +大åѦ åĴĮ +çĭ®åŃIJ 座 +ãĢģ åħ¬è·¯ +Ġdis perse +举 åİ¿ +_n l +ĠDep recated +ĠEl iza +åĪ¶ä½ľ æĸ¹æ³ķ +ĠCap ability +ï¼ĮåĽł åħ¶ +,æĪĸèĢħ æĺ¯ +Ġcovari ant +严åİī çļĦ +, è½» +D uke +好 æĪı +_P AN +Ext reme +j f +w oven +on uclear +Ġ( ? +Ġequ iv +éļĶ éĺĤ +author ised +西çıŃçīĻ äºº +å¤§åľ° éľĩ +à ı +Ġc ay +Ġcre ase +Ġbrown ies +ä¹Ł æŃ£åľ¨ +åĵ Ŀ +å¤ļ åĩº +缸 åIJij +éģĹ ä½Ļ +FL ASH +-Z a +. span +B çļĦ +D ynam +硬 æľĹ +л ед +Ġê ¸ +Ġfec es +Ġ å®¶åºŃ +ĠM PS +空 åİĭæľº +ĠFl avor +ĠEli ot +T iger +} =( +re se +çļĦ èĮ¶ +ĠT NT +Ġtra pez +åŁĥ å¾· +æĺŁæľŁ 天 +F iction +ç͍ å°½ +-c aps +沸 çĤ¹ +ĠM IX +ĠM anga +Ġdel im +æĬ¤ çĽ¾ +éľ² éľ² +Font s +Ġrac ially +ĠEld ers +Ġtraged ies +ĠIceland ic +çĶľçĶľ çļĦ +ï¼Į 讨论 +ĠP osit +Ġde ported +è£ ¨ +Ġel ves +åĽºå®ļ ä»¶ +è°· åºķ +åİŁåĪĽ æĸĩ竳 +Ġunbelie vably +C PI +T f +çļĦ 年代 +Ġr és +顺åĪ© éĢļè¿ĩ +.Text Box +- SC +ĠA EM +ĠP DO +è®° èµ· +ĠFour teenth +çŃī æľīåħ³ +Th under +iel en +_D RV +Ġkil n +ĠWolf gang +è½¬çľ¼ éĹ´ +åľ¨ åĽ½å®¶ +Ġi pt +åĽ´ æĮ¡ +ï¼Įè¿ĺ æĥ³ +饱 åIJ« +Ġevapor ated +S s +at oga +ĠP RC +éĩįçĤ¹ å®ŀéªĮ室 +Ġmill igrams +i eren +ãĢģ 乡 +å±± ä¹ĭ +Ġsl iders +CA RE +Ġphotos hop +åIJIJ äºĨ +Ġsur charge +Ġlot us +åħ¬ å¢ĵ +毫 ç§Ĵ +è¿Ľæ°Ķ åı£ +Ġbeet les +çī Ĵ +éĿ¢ 罩 +èĩ´ åĬ¨ +åĨħ容 为 +éĻĦ ä¸Ń +ĠEN G +Ġ 头 +ĠP IX +ĠPro ps +è§ģ åºķ +çļĦå¿ĥ æĦı +åį«çĶŁ æīĢ +Ġtrace back +åı¤ å¢ĵ +åᏠä¸ĭ +ĠS ikh +ĠE EOC +ific ent +æīĵ ä¸Ĭ +cd r +ĠOver lay +Ġadapt or +â t +è̶ 夫 +ĠCome y +æľīè¯Ŀ è¦ģ说 +ãĢģ çĽijäºĭä¼ļ +å¾Īä¹ħ 以åīį +In Bytes +åºĹ éĩĮçļĦ +çł´ 空 +_w r +Ġbi ographies +ĠDi ablo +Ġreal ising +åħ´ é«ĺéĩĩ +临 å®ī +ï¼Į åıĤèĢĥ +Ġm ongo +ãĢģ åĪĽä¸ļ +ãĢģ æĸĻéħĴ +两 åĪĨéĴŁ +Ġpersecut ed +Ġgalvan ized +- rule +æľī å¤ļ个 +æĶ¿ åıĺ +S ant +Ġe jected +åĪĹ é¢ł +åĨ· è½§ +Ġmamm ary +Ġinaug urated +ĠSequ ential +_C ANCEL +é»ij åŃIJ +ARG IN +Ġkom mer +åħ¼å®¹ æĢ§ += models +Ġb ilinear +æīĭ å¿ĻèĦļ +缴 ç³» +举 è¾¹ +è¾Ľ è¾Ľèĭ¦èĭ¦ +æķĻæİĪ çļĦ +åIJĪå¹¶ æĬ¥è¡¨ +éĨĩ åİļ +ĠRo vers +çħ½ åĬ¨ +ç´§éļı åħ¶åIJİ +O tt +Ġn op +ï¼Įä½Ĩ åIJĮæĹ¶ +ĠCustom ize +Ġdos ages +ĠNET WORK +éĤ£ 以åIJİ +车 åºĬ +æĺŁæľŁ åĽĽ +Ġinterle ukin +ï¼Į åĩºçİ°åľ¨ +ĠF UT +Âł åIJ¬åΰ +å¾ħ çĿĢ +gen ces +Ġg out +ag et +IN Y +.m aven +ĠCol t +ãĤĮ ãģ° +L ift +Ġc us +ï¼Į çİĩåħĪ +å®ļ 论 +å½ĵçĦ¶ åı¯ä»¥ +.de cl +åĪ® 缮 +ĠCob ra +Ġ ################ +End ed +ĠBlock ing +ä¾į èĢħ +ĠÏĥÏħ ν +Ġ æľĽçĿĢ +ï¼Į åı¦æľī +åĪ© åĪĥ +éĺ¿ èĥ¶ +æī§ äºĭ +ĠCheck er +Ġslog ans +" }} +\ Microsoft +ç´§ éĶģ +æĥĬ éŃĤ +ï¼ĮåĨį æĿ¥ +æīĵéĢł åĩº +S wing +ĠCh attanooga +ï¼Įä¹Ł å°Ĩ +æ²³ éķĩ +Ġcart e +Ġbru ises +è´¢ æºIJ +åı¯æĮģç»Ń åıijå±ķçļĦ +Ù Ĵ +.. ' +æĹ¥ ç͍ +çĺ¦ äºĨ +Ġantidepress ant +P ent +at ten +å·¥ äºĭ +ĠCY REG +èĩªå·±çļĦ åĦ¿åŃIJ +çĶ· çε +èī¾ çģ¸ +Ġbacter ium +精彩 åĨħ容 +ĠC ER +äºĶ åij³ +Ġteam ing +'] [] +ç´§ éĤ» +\ m +马 å°ıä¹IJ +å©ļ æģĭ +Ġmoment a +Ġdiet ing +ãĢģ å®ŀè·µ +çŃī æĿIJæĸĻ +ä¸įçŁ¥ ä½ķæĹ¶ +ä¸įæķ¢ åĨį +å·®ä¸į é½IJ +L m +ç» ¶ +Ġlo om +å¥ĩ éģĩ +æĿ¯ èĮ¶ +æłª å¼ı +Ġchuck led +ol ive +å°± éĹ® +Âł å½ĵ +äºĮ èĥİ +ĠChrist y +æ³¢ æĬĺ +æĺ¥ çļĦ +å½ĵçĦ¶ ä¸įæĺ¯ +æĬ¢ çĿĢ +Ġsnow fall +othy roidism +çİ°åľ¨ å¾Īå¤ļ +Ġtemp file +ĠAstr onom +ĠEVERY THING +et ur +âĢľ ä¸įè¦ģ +èĢĮ æĹłæ³ķ +Ġstatic ally +ï¼Įä¸Ĭ åīį +侯 åºľ +åIJĬ 带 +$ user +D BC +æĪij çľĭåΰ +ĠG MP +æıIJ è¿ĩ +Ġcr umbling +çªĹ è¾¹ +çģ° åº¦ +Ġsymbol izes +åºĶ对 æİªæĸ½ +l ı +at te +ï¼Į æĪ¿ +ä¸ĵ æľī +Cl Compile +ij d +æĻ¶èݹ åīĶéĢı +D w +\ Bundle +ĠSt ages +第 åįģä¸Ģ竳 +ull er +amp ers +ï¼Įå¹¶ åıĬæĹ¶ +-c ategory +Che st +Ġbank roll +弯 éģĵ +ĠRad iology +æīĢ说 çļĦè¯Ŀ +_ret ry +D emonstr +å¾Ģ æĺĶ +в ÑĢ +)** ( +Ġ æĶ¯ä»ĺ +ãĢģ åıĸ +è¾ĥ éĩį +æľĽ äºĨä¸Ģçľ¼ +éĹª 身 +at here +ï¼Į ä¼ij +ĠT uck +æľīéĴ± 人 +èļĿ æ²¹ +G ross +ä¸Ģ æķ´å¤© +Ġpe ppermint +æŀģ ç®Ģ +_de leted +Ġfisher y +' i +, åħ¥ +Ð ¤ +be ats +è¡Į为 åĴĮ +,å¦Ĥæŀľ 没æľī +Ġ å®ĮæĪIJ +ï¼Į 俱 +ĠG ao +çľĭ ç©¿ +Ġfavor ing +Dis pose +ï¼Įè¿Ļ个 ä¸ĸçķĮ +- ft +ï¼Į å±±ä¸ľçľģ +Ġplan ks +ĠBar row +,å½ĵ ä½ł +ä¸į论 æĺ¯ +ĠS is +ĠCom o +ä»»æĦı çIJĥ +Ġmultipl iers +( center +Ġ éĤµ +att ached +è°ĥæķ´ çļĦ +ï¼Įæĸ¹ åı¯ +äºīè®® çļĦ +ĠCatalog ue +ĠP eb +ï¼Įåį³ä½¿ åľ¨ +) ä¸ŃçļĦ +_ evt +ĠE PO +åĬ¨ åIJ¬ +æīĵ çĤ¹ +Ġdev out +ĠAm es +( region +, éĢIJæ¸IJ +ä¸Ģ èĩ³ +-t racking +æķħ 人 +Tr iggers +çIJĨ念 åĴĮ +ĠâĦ ĥ +Ġun enforce +åĨ° å°ģ +è§Ĩé¢ij ä¸Ń +ĠM ature +å¼Ģ è·¯ +éķ¿ å¹´ +Ġpet ite +Ġsports books +ï¼Įç»Ŀ 大å¤ļæķ° +g ons +å¹³ åĪĨ +ĠCon sequences +æ£Ģæµĭ æĸ¹æ³ķ +ĠPhil anth +card ia +å®Ļ æĸ¯ +.param etrize +Ġ ä»¿ä½Ľ +ï¼Į åĬ¡ +ï¼Į èĪĮ +ãĢģ åľ°æĸ¹ +建 ä¸ļ +ï¼Įä¸į 对 +.get Sub +ç´ł æĿ¥ +-com ponents +寰 å®ĩ +Ġn ad +ĠB ishops +éħį ä¹IJ +Ġhear th +Ġfluct uating +FO X +æīĵéĢł ä¸Ģ个 +èĬ±åĽŃ éĩĮ +ĠYE ARS +Y F +ãĢĤ 积æŀģ +ĠR ang +ä¼ļ æľī人 +Ġpain staking +ï¼Įåΰ æľĢåIJİ +ĠMcG raw +.Integer Field +( By +ou le +ï¼Į çİĽ +å°ĸ çļĦ +Mill an +Mouse Event +Ġisot opes +urn ed +ĠV erg +èģĶ ç¿© +æģ¼ç¾ŀ æĪIJ +åIJį åĨĮ +åħĪ åΰ +.L en +ĠFix es +顽 强çļĦ +al em +Ġm uzzle +ĠSwitch ing +Ġ 以åIJİ +ĠH SV +车 åIJİ +ĠTHE IR +Ġä¸Ģ éģĵ +not in +åī§ åIJį +è¿IJç͍ åΰ +ĠColl apse +æŁ´ èĥ¡ +éĤ®æĶ¿ ç¼ĸçłģ +, nonatomic +请 ä¸įè¦ģ +_P ARENT +æĺ¾ç¤º éĿ¢æĿ¿ +imer ick +äºĭä¸ļ ä¸Ĭ +æīĢå¾Ĺ çļĦ +Tool Bar +N ord +_ En +ãĢģ ä¼ĺç§Ģ +å¤ļ åľ¨ +åħ± å¤Ħ +Ġfat ig +ĠIron ically +ro o +ï¼Įä¸į ä¼ļæľī +æµĭ è·Ŀ +æį¢ ä¸Ģ个 +ML B +Ġ................................................................ ................................ +ãĢģ æĻº +éķ¿ æĸ¹ +li us +ä¼ģä¸ļ ä¸İ +ï¼Į大 èĩ´ +ĠSpecial ized +ĠSt air +track er +ĠTes co +( active +re views +ou in +ï¼Į çĶŁæĦı +æĺ¯ ä»¶ +ink a +åĶIJ ä¸ī +URL Connection +_un iform +ä¹Ł æĿ¥ +æīĢ åĪĹ +ï¼Ľ åĴĮ +èĢģ ä¸Ģè¾Ī +Ġ/* ! +è°ģ 说 +Ġq i +æĮ¥ åĬ¨ +p ox +线 段 +_C RC +çŁ³ éĽķ +ä¹Łä¸į èĩ³äºİ +Sw ipe +è̳鼻 åĸī +ĠE CS +-s ession +éĵĿ æĿ¿ +Ġmanip ulations +ĠBengal uru +Ġ 羣çļĦ +ore f +æĢ» 产å̼ +èĵĦ åĬ¿ +Ġrefriger ant +P agination +Ġt iling +ï¼Į å·Ŀ +Less ons +ĠCauc us +p agination +æŃ¥ å±¥ +端 çĽĸ +ç«¥ å¹´çļĦ +_length s +乡æĿij 人åı£ +ï¼ĮåIJİ éĿ¢çļĦ +_TO OL +磫 æĥħ +Ġwrest ler +å¸ĥé²ģ æĸ¯ +ç²¾æ¹Ľ çļĦ +ãĢĤ çͰ +è¦ģ çľĭçľĭ +ĠAl s +èµĽ 马 +åį· çĥŁ +ĠNass au +ï¼Į åºŁ +ãĢģ ä¹± +ï¼Įä½ł å°Ĩ +亲 æ°ij +å±ħ çļĦ +bb les +, çī¹ +/ St +大 大å°ıå°ıçļĦ +åĩĨ å¦Īå¦Ī +åĩłå¤© åIJİ +ï¼Įèĥ½å¤Ł åľ¨ +ĠArab idopsis +ĠDat abases +z et +Ġbut termilk +ç¥ŀ çİĭ +ä½ķ ä¹Ķ +ä¹IJ 竳 +.* , +æ¶Īè´¹èĢħ æıIJä¾Ľ +Go als +ĠReform ation +Ġan gr +Ġst bi +ä¿¡ ç®± +å·² å¼Ģå§ĭ +Ġexp ended +满 身 +ãĢĤè¿Ļ å°Ĩ +Ġcov enants +Ġpron oun +. escape +L ATEST +ï¼Į ä¸īå¹´ +ĠT AP +ãĢĤä½ł çľĭ +Ġbifur cation +ï¼Į å°Ķ +ï¼Į åĬŀçIJĨ +æľ¬ ä½ľ +ä½İ åIJ¸ +rid ged +ĠSp are +ĠèĢĮ æĺ¯ +ãĢĤ ç«Ļåľ¨ +ãĢģ åĪ©æ¶¦ +ç»Ļ ç͍æĪ· +Data Member +è·ij éħ· +åħ¬ä¸» çļĦ +åľ¨æĪij 身ä¸Ĭ +ĠTransform ers +Ġdetox ification +ant ra +åıª åģļ +ä¸ĩ åħ¬éĩĮ +ĠPh uket +.b ig +V egan +ãĢģ æĬĢèĥ½ +inc ible +ö k +æįı äºĨ +" Now +- Ab +/ string +æ¯Ķ è¿Ļ +æĪĸ æľª +åĸľæ¬¢ çľĭ +C leveland +Ġm idd +Ġ\ ;\ +é»Ħ è¤IJæĸij +åĵª æĿ¥çļĦ +æĿ¥è¯´ å°±æĺ¯ +ãĢĤ她 åľ¨ +æī¿æĭħ äºĨ +Pat ricia +ĠSQL ITE +P rest +ĠL UA +çݰ æĪIJçļĦ +åįķ æĮij +_N ot +ä¿¡ç͍ çŃī级 +Conf irmed +-head ing +Mess enger +w reck +ï¼Į åĬĽéĩı +ĠS ender +ĠG artner +ç¨ĭåºı ç¿»è¯ijæĪIJ +_sh apes +Christ ina +he en +ï¼Į çļĦ +Ġstr ife +åŁºæľ¬ä¸Ĭ éĥ½æĺ¯ +Ġsobri ety +G EM +_ Action +ĠPlay ed +渡 åı£ +.Des criptor +Ġw igs +ï¼Į è¿ĩæĿ¥ +ä¸į 约èĢĮåIJĮ +éĩı èĥ½ +pr incipal +åįĥ çݺ +åĩłä¹İ éĥ½ +-res ource +re pl +åħ¬åı¸ 为 +ç³»ç»Ł éĽĨæĪIJ +Co pper +Ġreinst ated +, è·Ŀ离 +, åħ¨å¸Ĥ +é¢ĺ çļĦ +西 å®ģ +çĹħ çĹĽ +-In ch +Ġ å²³ +Ġst s +ä¸Ĭ åįĬåľº +ä¸įåIJĮ ç±»åŀĭ +ole um +Ġpenal ized +Ġç®Ģ ä½ĵä¸Ńæĸĩ +ĠKyr gyz +ï¼Į 亲çαçļĦ +ĠR ope +Ġsh abby +-s ac +Res ervation +-E ast +aze era +å·¥ åĨľ +æĭĽ èĩ´ +éĺµ åĬ¿ +_v ocab +迪 åħĭ +交æµģ ä¼ļ +ĠEm ory +Sem aphore +åı¯ä¸į åı¯ä»¥ +. Async +N OV +ãĢĤ æķħäºĭ +ãĢģ 缮æłĩ +Ġsp oons +B igr +Ġampl ifiers +èĩªå°Ĭ å¿ĥ +_ vel +ä»ĸ们 åİ» +èĥ½å¤Ł åģļåΰ +Ġvirt uous +åIJŁ åIJŁ +ĠPey ton +, éļıæĹ¶ +w ara +ĠT rac +ä¹Ł å¾Īæľī +get Bytes +Ġsp rites +ä¹IJ åĿĽ +åĪ· åŃIJ +ç½ijåıĭ çļĦ +δ ο +j v +k its +ide as +ĠU CHAR +ï¼ī éĢļè¿ĩ +_s orted +该 åī§ +Ġcapital ized +æľīäºĨ æĸ°çļĦ +éªĤ æĪij +ĠGram marly +ï¼Į ä¿Ħ +ä¹Ł ç»Īäºİ +li as +åĽĽ åįģäºĶ +é¢Ħ æĶ¶ +æı¡ ä½ıäºĨ +Air port +åħ¬ 鸡 +èĩªå·± åĬ¨æīĭ +BS ITE +Ġkick off +Ġsadd ened +äºĨ ç»ĵ +ĠP ays +Ġres urf +_t l +æĽ´ åħ·æľī +åıijå±ķ 空éĹ´ +occ urrence +ï¼Į åIJ¸å¼ķäºĨ +Ġm RNAs +. contract +åı¯ 对 +ĠHe al +Ġconf luence +ĠAN T +pack ing +L orem +L ynn +Ġl ily +è¿ĩ çĶŁæĹ¥ +失 ç¥ŀ +Obj C +è£Ļ æijĨ +jud ice +ĠSab ha +æľ¬èģĮ å·¥ä½ľ +p ill +ï¼Į åĩī +Ġp omp +åľ¨ ä¸ĬçļĦ +èĥ½ éĢļè¿ĩ +.f m +ï¼ĮæīĢ以 è¦ģ +ï¼Į大 éĩıçļĦ +å¹» çģ¯ +æģ¨ æĦı +æĿ · +ĠR abb +éĺ³ ç¦»åŃIJ +ĠFe ynman +ó d +æĶ¯æĴij ä»¶ +æıIJåĩºçļĦ éĹ®é¢ĺ +_off sets +incre ase +å¼Ĺæ´Ľä¼Ĭ å¾· +-d ocument +åı¥ åı· +å¢Ļ 纸 +ĠText ile +elect ron +Ġun important +æ°Ķ åŃĶ +_ex c +Ġphosphory lated +Asc ii +K orean +ter o +Ġbudget ary +_p kg +åıį åıĽ +uc ene +me at +ĠDr illing +table Name +Ġpsych otic +殿 åĨħ +Ġmarty r +ĠL Z +au i +Ġbl inding +Col in +æĺ¯åIJ¦ æŃ£å¸¸ +op atra +å°ı 楼 +客 éĺŁ +Or lando +åİĨåı² åĴĮ +ĠMet ropolis +缮åħ± çĿ¹ +Ġit ertools +-b ye +Ġrad iology +Ġtail oring +( an +ĠX OR +ĠDis posable +Ġinn umerable +詹 å§Ĩ +Offic ials +å¼łå®¶ çķĮ +, äºīåıĸ +ä¸į æĥħæĦ¿ +ç¥ ļ +æĶ¹ è§Ĥ +éĢģ æŃ» +éĺ¿ å¸ĥ +Ġpret reatment +Ġarch iving +-em erg +iel lo +rt p +,å°± åľ¨ +å½Ĵå±ŀ æĦŁ +å¼Ĥ åĬ¨ +åŁ¹è®Ń 课ç¨ĭ +åΤæĸŃ åĩº +R ough +æľī ç®Ĭ +è° © +导 å¼ķ +AL ES +è¾ī æĺł +çģ«éĶħ åºĹ +Liter ature +ĠStrick land +ï¼Į æĮĩåĩº +ï¼Į åĭ¾ +ĠG ed +åijĬ ä¸Ģ段 +第äºĮ æĿ¡ +ï¼Įå°± æĥ³ +ĠTaiwan ese +ĠJD BC +Ġpals y +E aster +ĠÐ ĵ +ç§»åĬ¨ 端 +ĠMcK ay +Ġpanor ama +æĺ¯ è¿Ļ +ĠF ertil +çľģ äºĭ +ĠLe asing +é£Łåĵģ çļĦ +Ġunatt ended +, éĢIJæŃ¥ +. ud +èĢĮ ä¸įåı¯ +ie ux +èĬ © +æľĿ ä¸Ĭ +Toy ota +ĠE O +缴 æĮĩ +_b n +Ġ第 åįģä¸Ģ竳 +d ough +çļĦ ç͵åŃIJ +ĠA ven +Âł åĶIJ +min a +éĿŀ常 éĩįè§Ĩ +åİŁåĽł å°±æĺ¯ +ä¸Ń央 éĵ¶è¡Į +éĹ¹ äºĨ +Ġcheap ly +åıįå¤į åıijä½ľ +ĠHur ricanes +ãĥĨ ãĤ£ +. Keys +è¿Ļæł· æīįèĥ½ +åĸĿ åĴĸåķ¡ +ä¸ĢåºĶ 俱åħ¨ +Ġt ect +ä½įç½® å¤Ħ +âĢľ H +æĹł éĤª +è§£ åĨ» +æīĵ çĮİ +åıĹ访 èĢħ +å¦Ĥ ä¹Ł +以ä¸ĭ æľīæľŁå¾ĴåĪij +æĭ¼ è£ħ +cor r +楷 模 +Ġdetain ees +Ġ éĺ® +æĿ¾ æīĭ +Tree View +Ġpractical ity +. Headers +Ġ å¯Į +ï¼Į æİĮ +åı¯ éĥ½æĺ¯ +åħī æĻ¯ +ï¼Įåħ¶ éĹ´ +ç½Ĺ ä¼Ĭ +溶 èĥ¶ +ä¿Ĺ è¯Ń +cor responding +ĠP DB +pp c +åİŁ å½¢ +Cor respond +ĠSE Q +ä¸Ļçĥ¯éħ¸ éħ¯ +ä¸Ģ éķ¿ +å¼Ģå§ĭ çļĦæĹ¶åĢĻ +,æĪij们 åľ¨ +èįĴ è¯ŀ +indust rial +è¹Ĭ è·· +ĠIth aca +ï¼Į åĽ¢ç»ĵ +ĠA GE +é«ĺ å³¥ +主 æµģçļĦ +éĩij åıī +-b lood +Ġpick led +æ³ķå¾ĭ 顾éĹ® +èĦ¸ä¸ĬçļĦ 表æĥħ +ï¼Į éĢłåŀĭ +Ġun imp +èĩªå·±çļĦ äºĭæĥħ +认è¯Ĩ åĴĮ +ĠMS U +ï¼Į åĽ¢éĺŁ +ãĢĤ 人çī© +ãĢģ èij£ +åĴĮ 好 +è¿ij 两年 +Ġsk is +(p y +- organized +ĠC NT +ri osis +èĥ½ çIJĨè§£ +arg as +.b ad +_VER IFY +ĠEff ort +åĴĮ 秦 +æĹł 妨 +Ġplay book +æĹ¢ åı¯ä»¥ +Ġhug ging +Ġ ç¥Ŀ +ï¼Į æĸĩ竳 +æķĻåѦ åĨħ容 +Ġash ore +Ġp db +åIJī æĸ¯ +asp ers +draw ing +ANC ED +Ġgrac iously +ĠHert z +, éĴ± +- actin +çļĦ人 åIJĹ +ĠQu ora +ā Ă +ï¼Į åĵģ +ĠT read +ĠC CP +od oxy +ãĢģ ç¨İåĬ¡ +æĹ¥ ä¸Ĭ +è§Ĥ çľĭäºĨ +ĠCl ive +ç±»åŀĭ 为 +Rem oval +管çIJĨ æľįåĬ¡ +æĺŁ åŁŁ +-re act +åĨ·åį´ å¡Ķ +ĠHil ary ++ g +çļĦ æĸ¹æ¡Ī +ĠA man +nd t +åį³ æĪIJ +Ġsper mat +( inner +PS P +å®´ å¸Ń +dist inct +.Active Cfg +or ca +ï¼Į æĭ¿åΰ +ĠB OTH +åѦ èĢħçļĦ +Ġreturn Value +æľĢ æĥ³ +ä¿Ŀ éĩį +åŁĥ éĩĮåħĭ +æĭ¿åĩº æīĭæľº +羣å®ŀæĢ§ è´Łè´£ +ãĢĤ å¤ľ +æĬĢæľ¯ åıĬ +Ġsales person +çε ä½į +è¿ij æľŁçļĦ +åħ¶å®ŀ æĪij +ç¿» çľĭ +Ġhospital izations +Ġ ~~ +ol ini +Ġch icago +Ġsw am +a quin +âĢĿ 被 +get Text +ï¼ģ æĪijæĺ¯ +ĠSub section +-qual ified +. Out +Ġan esthetic +åij ĭ +ä¹ĭ åĬĽçļĦ +转 è½® +ï¼ĮæĢİä¹Ī äºĨ +ĠSOL UTION +çī¹ å¼Ĥ +ĠTop ology +çĵ¶ çĽĸ +ĠAff air +æĸ°åįİ ç½ij +Ġcraw led +p el +ï¼Į æ¹ĸåĮĹ +ue jin +ç»Ĩ çľĭ +N ST +× ĺ +è¿Ļæł· æĥ³ +Ġgate ways +åĪĥ æľīä½Ļ +éĺİ çİĭ +ï¼Įä¸Ģ åIJĮ +çħ§ æĹ§ +社ä¼ļ å·¥ä½ľ +"] ; +åľ¨æŃ¤ ä¹ĭåīį +è´«åĽ° åľ°åĮº +ĠS addle +对 çϽ +å°ij 说 +å¼¹ çIJ´ +Ġmigr atory +âĢĿ æĪĺçķ¥ +å½ĵ çļĦ +å¼ı 设计 +ï¼Įä¸į 好æĦıæĢĿ +åĭIJ çĥĪçļĦ +{ max +ï¼Įæĺ¯ 缮åīį +ĠPoly technic +æĭī æīĭ +åİĨåı² åѦ家 +Ġmis represented +èĤ¥ 大 +ĠStr as +å©¢ 女 +_PO OL +op ot +é¢Ĩ èĪª +æķħ éĩĮ +ç¼ĵ æŃ¥ +æŁ³åı¶ æ¢ħ +ä¸Ģ åħ« +ĠCon se +UPD ATED +Ġwrest le +ĠMAN AGEMENT +ï¼Į 姬 +get Class +LL ING +ä¾ĿçĦ¶ 没æľī +ĠHyper t +/ gen +/ security +åĬ¡ ä¹ĭæĢ¥ +ĠX code +ä¸Ŀ è·¯ +èĦļ 踢 +åĸĩ åĺĽ +Ġbeet le +ĠT ester +-S eries +ç²Ĺ çļĦ +- rep +- English +ï¼ģ â̦ +Ġinf os +ĠHum ane +Ġfet ish +æľīå¿ĥ 人 +_ dup +tern ut +ä½ı 她çļĦ +é¢Ħ åŁĭ +-J ones +æĥ¶ æĥ¶ +- acting +ine x +bold math +å¯Ĩå¯Ĩ麻麻 çļĦ +Ġbri bery +ĠLingu istics +/ å¹³æĸ¹ç±³ +he on +åľ¨ æĿİ +ä¸ĭ éĽª +士 å¤ļ +Ġunbeat able +éĩįçĸ¾ éĻ© +ï¼Į æĪ¿éĹ´ +梦 éŃĩ +Comp iled +ом Ñĥ +think able +ĠDew ey +< Self +Ġb outs +人çļĦ 身份 +ï¼Įåıª åı¯æĥľ +<< _ +Ġfo resh +kn ife +æīĵè´¥ äºĨ +äºĶèĬ± èĤī +} ï¼Ī +Ġ åĨħç½® +ï¼Į 好äºĨ +if ndef +request ed +带åĬ¨ ä¸ĭ +ãĢĤ ç½ij绾 +ĠF idel +all en +Ġme adows +cy c +çļĦåľ° çĽĺ +T une +W riters +Ġb tc +ãĢģ 鼨 +ant ib +Ġget Instance +ï¼ģ ä¸įè¦ģ +Ġcond enser +éĹ¹ çļĦ +åĬŀåħ¬å®¤ çļĦ +.I con +L åŀĭ +Ġthe rapeutics +eg lasses +.t ail +Ġa ra +ãĢĤ ç͵影 +ĠAddress es +æ½ĩ æ¹ĺ +Ġlac rosse +< % +Ġre printed +ä½İ é¢ij +"] : +æµ® æĥ³ +Ġm á +ĠAr s +.s n +æŁ¥ å®ŀ +ĠAd ele +è¡ĮåĬ¨ ä¸Ń +. ion +缸 è¾ħ缸 +è¯ģ çħ§ +ĠUS ING +å¿Ļ äºĨ +æĻ®éĢļ é«ĺä¸Ń +Ne ighbor +k ai +ĠB eware +ä¼ļ è¿Ļä¹Ī +éĢļ ç͵è¯Ŀ +è½» æııæ·¡ +ç«ŀäºī èĢħ +,ä½ł å°± +ï¼Įå·² æĪIJ为 +ãĢģæĹł å½¢èµĦ产 +ĠStam ford +e us +Ġpro ff +ht ar +ï¼Įä½ł ç»ĻæĪij +åĬ¨åĬĽ æĸ¹éĿ¢ +ĠTor o +森æŀĹ éĩĮ +ä¼¶ ä¿IJ +ä¸Ģ èĬĤ +æĹ¶ 对 +IN VALID +Ġtest Case +Adapt ive +ĠHitch cock +Ġreason ableness +ores cent +éľĩ 颤 +ĠS GD +and re +个 éĹ®é¢ĺ +å¿ Ħ +Ġapp alling +äºĶ è°· +To Be +å¼Ĥ åŁŁ +å¨ģ åĬ¿ +ï¼Įè¦ģ æĬĬ +-A z +ĠBloom ington +大 åłĨ +ä¹Ł æŃ£ +æľĪ èĸª +çŀ Į +åıªæĺ¯ åĽłä¸º +,ä¹Ł å°± +æ²Ł æ§½ +碰 å·§ +è·¨ è¿ĩ +èĥİ çĽĺ +ĠInv itation +æľīäºĽ å°´å°¬ +ĠImp lements +BI OS +ĠEc ological +Sure ly +ãĢĤ æĥ³æĥ³ +è¾ ¼ +åĽ½ ç͍ +å·² çŁ¥çļĦ +ï¼Įä¸Ģ ä»¶ +ĠRe active +éĿŀ常 å¿« +- Party +éĻª ä»ĸ +Ag ree +ãĢĤ æľŁéĹ´ +éĥ½ éħįæľī +Sp a +record ed +åIJį éŨ +Ġred ress +ĠSEC URITY +ï¼Į åıijå¸ĥ +æĺ¯ éĤ£ä¸ª +æ°ı å®¶æĹı +ĠTour ing +_ velocity +ï¼Įä½ł ä¹Łåı¯ä»¥ +ĠId le +Inv oker +ococ cal +) }$. +[ Int +b ite +ĠM CL +大 åħ³ +.p ad +æ°ı éĽĨåĽ¢ +Ġrevis ited +-vol tage +粤港澳 大湾åĮº +ãĢĤ æ³ķ +çĿĢ èĦļ +ä¹Łæ²¡æľī åĬŀæ³ķ +ĠRefuge e +D ell +S keleton +ï¼Į ä¸ĭä¸Ģ +åľ° çľĭåIJij +ï¼ĮæĪij们 è¿ĺæĺ¯ +è°ģ æķ¢ +Ġsu ede +ä¸Ĭ 表éĿ¢ +羣 èĥ½ +ï¼Įä¸Ģ åĿĹ +åŁºæľ¬ åİŁåĪĻ +å¹¶ä¸į éľĢè¦ģ +Ġincident ally +Ġrebell ious +ag li +è¿Ļ æīįæĺ¯ +æĸ¹ 管 +çŃī åIJĦ +.p b +æľª èIJ½ +æĢİä¹Ī ä¸į +.C ancel +ĠStart ups +ĠSyll abus +å®¶ åŃIJ +ï¼Ľ ä½ł +ĠBo ats +Ġ-------- - +################################################################ ################ +ãģĵãģ¨ ãģĮ +çļĦ 转åĬ¨ +to a +ï¼Įå¹¶ ä»İ +åĽ½éĻħ æłĩåĩĨ +Ġintellectual s +( acc +Ġc affe +ĠM im +çļ®èĤ¤ ä¸Ĭ +ç²Ĺ é²ģ +ï¼Įæīĭ æĮģ +Ġcomm ended +Test Data +ï¼Įè¦ģ æľī +( role +ãĢģ åįĥ +åıĬ æĬĢæľ¯ +æĿĢ åħ¥ +ä¹Łæľī ä¸įå°ij +_bound ary +åºĶè¿IJ èĢĮçĶŁ +çº Ń +æ°ij æŃĮ +ĠPass es +Celebr ate +è·ĥè·ĥ 欲è¯ķ +ust ing +str ncmp +çĻ» é¡¶ +æ·¡ éĽħ +西åįĹ éĥ¨ +, åı¯èĥ½ä¼ļ +Ð Ľ +Ġc uffs +ĠA eron +ä¸Ń åį« +yn os +è¨Ģ 诺 +ĠJournal ists +Stat istic +ĠLag range +Ġleng then +å°¼ çļĦ +Ġemb ossed +åĺī åºĨ +ĠSE AL +æ¸ħæĻ° 度 +èµł åĵģ +说æľį åĬĽ +, æ¿Ģåıij +ãĢģ èĽĭ +çģ« æµ· +åIJĮæĹ¶ è¿Ľè¡Į +Hand shake +Ġje ep +ĠE ats +å½± è¿· +表示 èĩªå·± +çĸ¾çĹħ é¢Ħéĺ² +Ġneuro log +Ùĩ ا +ĠBlo ody +Ġdevote es +, ç»´æĬ¤ +âĢĿ 模å¼ı +ĠAn at +-p ackage +åŃĺåľ¨ æĦŁ +æķ°åŃĹ è´§å¸ģ +ï¼Įè¿ŀ è¿ŀ +Ġfuel ing +ï¼Įæµ· åįĹ +ĠBL M +Ġç¬¬åĽĽ çϾ +ĠRece iving +k art +_s uite +Ġè¿Ļ ä¹Łæĺ¯ +May a +ï¼Įæĥ³ äºĨæĥ³ +çłĶç©¶çĶŁ åѦåİĨ +ĠAG M +Ter rain +ĠUn ve +åij¨ 天 +SS A +稳 产 +ĠMc P +allow ay +ĠLat itude +ä¸Ń æ±Ĥ +æİ¥ ç»Ń +Her itage +ĠP RESS +Ġend Time +åŁİ éĤ¦ +åı² çļĦ +Print ing +ĠT AC +." \ +Ġdel i +_B lock +IB Action +Ġalt ru +ç»· 带 +éŃĶæľ¯ å¸Ī +.Full Name +ãĢĤ 好çļĦ +Ġk z +,ä»ĸ å°± +Attribute Name +.y ang +_ch oice +jor ie +quis itions +T urbo +天 å¹³ +é» ŀ +и ли +åĮħè£ħ çĽĴ +ĠPupp et +æ³ķåħ°åħĭ ç¦ı +ï¼Į çĶŁçī© +ess on +æĪIJ ä½Ľ +get text +Ġbra ided +_RO LE +; èĢĮ +om n +ï¼Įä¸į èĤ¯ +绣 ç§° +Ġcra ze +ĠAdvoc ates +Serialized Name +ä¸į 以为çĦ¶ +å°ı çĮª +_R ANDOM +åį· èµ· +åĮ»çĸĹ è®¾å¤ĩ +ä»Ģä¹Īäºĭ äºĨ +(/ ^\ +D IG +ä¸į æ±Ĥ +Ġ! _ +éĽĨ æķ£ +.c od +ãģ£ ãģ¨ +_SM ALL +éģĵ èıľ +Ġ\ : +åĽŀ 乡 +ï¼Įä¸įå¾Ĺ ä¸į说 +西éĥ¨ åľ°åĮº +æī¿ç§Ł 人 +n P +åĩł è¿ij +åĬŁèĥ½ åĮº +_W ORLD +ï¼ĮæĽ´ æĸ° +æĬ¥èѦ åύ +åħ¢åħ¢ ä¸ļ +ab en +åħĪ è¾Ī +é rie +念 书 +åĸľæ¬¢ åľ¨ +,åĽłä¸º æĪij +åIJĦ个 é¢ĨåŁŁ +_cl usters +Ġmascul inity +ĠD WI +Ġlif eless +m z +p rivacy +ãĢģ æĬ¥åIJį +Ġres ets +Ġself ies +æľª åΰ +-m ark +ĠChrist ensen +Ġæľī æĹ¶åĢĻ +Ġadvers aries +å¹´ æĶ¶åħ¥ +ĠV ad +çİĭ åĽ½çļĦ +åįİ è£Ķ +åģı æī§ +éĽĦ å®ī +Port rait +OWN ER +æĺŁæľŁ æĹ¥ +ĠBrow ning += q +Ê » +Ġconf ocal +æģ© å¸Ī +ä¸įåºĶ æ±Ĥ +.tt f +m ts +ãĢģ 产 +em aking +hen ko +Ġsw iss +æĺł çħ§ +Ġneuro psych +Ġexped ite +Ġinterpol ated +ĠAppCompat Theme +in ities +am u +_m r +çł´ å¼Ģ +lam ide +ç¨į åIJİ +ĠTreat s +ãĤ¯ ãĥĪ +åıijå±ķåĴĮ æĶ¹éĿ© +ï¼Į 佩 +get Resource +éĩij é±¼ +ä¸ĸ åįļ +Ġlim o +çͳ åĬŀ +-effect iveness +: w +å¹¶ 讲è¯Ŀ +éļı éļı便便 +çIJĨ论 åŃ¦ä¹ł +Ġbour geois +) # ++ L +人 åIJĹ +Ġ+ ( +ĠY en +ĠSu k +.W orld +æļĸ æĦı +,ä¸Ģ个 æĺ¯ +ĠTh orough +æŀľ èĤī +åĩł é¢Ĺ +ESS ION +" io +T rap +ä¸į éĻIJäºİ +ä¸įæĺ¯ éĤ£ä¹Ī +ç§ijæĬĢ ä¼ģä¸ļ +ç§ĭ çļĦ +Sign er +å¤±æľĽ çļĦ +åľ¨ 设计 +ĠJ ed +åİŁ ä½ľ +Ġbas ins +ĠSil icone +ï¼ħ 以ä¸Ĭ +äºļæ´² æĿ¯ +ĠScript s +Ġglycer ol +ä¸İ ä¸ĭ +ĠExt ensive +èİī å¨ħ +Ġfle as +åĴĮ 飩 +对 éĶĻ +Ġlocal ities +.b oost +ä¹Łä¸į éľĢè¦ģ +Dub ai +V IOUS +} ï¼ī +Ġ èĻ½è¯´ +ï¼Į 礼 +Ġb v +ĠF EMA +为 å®ĺ +æľĢ éķ¿çļĦ +å¼ķ åħ¥äºĨ +ĠAtt raction +y enne +ĠM off +æŃ£ åĿIJåľ¨ +æł¸ èĭ· +ç»Ļ èᝠ+广 æ±½ +大家 åı¯ä»¥ +æĤ£ äºĨ +è°· åŃIJ +å¼Ħ æŃ» +Ġcommercial ization +寻æ±Ĥ 帮åĬ© +Ġinfl ater +a er +æľ¬ 说æĺİ书 +Ex e +}} . +ä¸Ģèµ· åľ¨ +åįļ åѦ +æ´Ĺ è¡£æľį +çĿĢä¸Ģ ä¸Ŀ +ĠSE A +å¿ħçĦ¶ æĺ¯ +ĠBeat rice +ĠIter ate +, 设计 +ç͍æĪ· çķĮéĿ¢ +(l p +å§IJå§IJ çļĦ +Ġfibr illation +Rol ling +Ġ åı£ +or ity +ĠBl itz +_D ESCRIPTOR +,æĪij 说 +æĩĤ å¾ĹäºĨ +ĠHttp Request +OURN AL +- AP +åŃIJ æĸĩ +Ġen igmatic +Ġ< % +-t one +åĩĮ é£İ +ä¼ı ç¾² +ï¼ĮåįĹ åĮĹ +ĠScan ning +æłĩ 段 +è¿ĩäºĨ å¤ļä¹ħ +Ãł i +_cl one +åĪĽç«ĭ äºİ +b ak +ãĢĤ æŁ¥ +åıijå±ķ ä¸İ +.t gz +åłĤ 课 +F p +P AY +Ġle ans +å°ı å¦ĸ +_p ixels +èĩªçĦ¶ å°± +Sk etch +Ġreform ed +_pack ages +F BI +ï¼Įæīį çŁ¥éģĵ +å¤ĩæŁ¥ æĸĩä»¶ +ust ered +åı¯èĥ½ åıijçĶŁ +param etric +åĪĴ çł´ +Des criptions +汽车 è¡Įä¸ļ +ĠKend rick +Ġv end +è½´ å¿ĥ +æĦŁæŁĵ äºĨ +ĠHerm es +, 竣 +es co +ï¼Į åĬ¿å¿ħ +SE MB +çľŁäºº ç§Ģ +æ²Ļ滩 ä¸Ĭ +y um +ãĢĤ åĪļåĪļ +Ġon stage +ï¼ĮéĤ£ æĺ¯åĽłä¸º +Ġdecad ent +j unction +ä»ĸ 羣çļĦ +åīį 两天 +çļĦç¾İ æĻ¯ +è·Įä»· åĩĨå¤ĩ +, ç®Ģåįķ +Ġ å±ķ +çļĦ éĽª +Ġcross roads +Ġqual ifiers +驱 èµ¶ +SA FE +æĬij æĪĸ +Ġconj unct +ĠD IN +容 åύçļĦ +éĢĤ é¾Ħ +Ġant iquity +-l ang +_h our +ä½łä»¬ è¿ĻäºĽ +è§ İ +Ġfr an +æij¸ æİĴ +ĠNS Number +Ġdermat ologist +ĠR ye +ä½ĵ æł¼ +Ġrec ite +ç»ĵ è´¦ +åĦ¿ èĩ£ +ĠRIGHT S +è¿Ļ åı¯æĺ¯ +åĴĮ æ´»åĬ¨ +é£İ 浪 +äºĶ èī² +社ä¼ļ å®ŀè·µ +_con verter +æĻ®éģį 认为 +ãģ¾ ãģ§ +ç²¾åĩĨ æī¶è´« +Ġsubcontract ors +/ azure +ãĢģ æĶ¹ +ĠO at +_t w +èĩªå·±çļĦ æīĭ +ç»Ħç»ĩ ç»ĵæŀĦ +åĿł åħ¥ +-tr ade +/ per +ï¼Į éģµå¾ª +ä¸Ģ æŀĿ +æľĢ åıĹæ¬¢è¿İ +li qu +éĵĿ åįķæĿ¿ +Ġundergrad uates +f acts +Ġ æĿ¡ +ĠRes istant +ĠSpecial s +æĺ¾å¾Ĺ æĽ´åĬł +_bl ue +Ġhepar in +/ fl +ĠP é +ä»ĸ æĿ¥è¯´ +ap ort +Ġfl atter +ĠQ LD +éĢı éľ²åĩº +è¿Ŀ èĢħ +æľºæ¢° å·¥ä¸ļ +éĽĩ ä½£åħµ +Ġcinem as +Ġunm anned +ä½įç½® æĹ¶ +.Se lection +\ input +ãĢĤ çϾ +æīĢ å½¢æĪIJçļĦ +第ä¸Ģ åį· +ĠCool er +åѦ æľŁçļĦ +ï¼Įå¹¶ æĹł +а Ñħ +(); // +Ġsyn apses +Ġmamm al +R Q +Ġsn oring +è¿ľè¿ľ åľ° +说ä¸įåĩº è¯ĿæĿ¥ +Ġg eb +æīĢ é«ĺæł¡ +Ġher nia +ah ua +份 çļĦ +Ġadapt s +éĽĨä¸Ń éļĶ离 +ãĢģ å¿«ä¹IJ +Ġbro ok +讲 ä¹ī +æķ´ä¸ª è¿ĩç¨ĭ +éĥ½ä¸į å¤Ł +ï¼Įå¤ļ è°¢ +. bean +ãĢĤ çͲ +ãĢĤ 大æ¦Ĥ +Ġe er +æľī æĽ´ +çĮª è¹Ħ +ाठ° +ï¼Į 设ç«ĭ +Ġp ours +ĠA loe +== ( +Ġsp awning +oh m +æľ¨ çĵľ +çĤĴ é¥Ń +car ry +çļĦä¸ĭ èIJ½ +ĠOutput Stream +Sil ent +, éĵ¶è¡Į +D ont +åľ¨ 两个 +éĥ½ å°ī +Ġup regulation +åħ¥ åĨħ +ç¨Ģ éĩĮ +ä¼łæŁĵ æĢ§ +Ġbout iques +P ope +_ Val +t rap +ãĢĤ æ²»çĸĹ +ĠR MB +æ³ķ éĺµ +温åĴĮ çļĦ +Ġlev ied +Ġbos ons +ï¼Į éĩįè§Ĩ +åĽŀ æĹı +åħΠ容 +åĬ© åIJ¬åύ +Ġblog ged +å°½åı¯èĥ½ çļĦ +ï¼Į åĮĨåĮĨ +Ġn ir +æ¿ ł +é¢ijç¹ģ çļĦ +ãĢģ ä¼Ĭ +ĠL ough +è§ī çļĦ +èĥĨ çļĦ +Term inate +_DIP SETTING += : +L ICENSE +Ġrec reated +声 åĴĮ +ä½łçļĦ åIJįåŃĹ +æ±ī æŃ¦å¸Ŀ +ILL S +in strument +Ġexc els +Ġhills ide +Ġhandic apped +s amp +ãĢĤ åIJ¬è¯´ +ĠA PO +ess ors +ä¸įæĸŃ åĪĽæĸ° +èī¾ æĸ¯ +ĠAth lete +çŃī 大 +éĢĢ è®© +çļĦé«ĺ åİĭ +Ġmerc iful +âĢĿ æĪĸèĢħ +ĠJ UD +æŁIJ ä½į +æī« ä¸Ģæī« +.re q +主é¢ĺ çļĦ +ĠRO OT +.el astic +. \[[@ +el on +Ġ# ( +çĥŃ çĨĶ +ä¸įä¼ļ 让 +ï¼Į æĵ¦ +ï¼Į æĤĦæĤĦ +ä¹Ł éļ¾ +Ġpl umbers +è¿ľ æľŁ +-g al +ĠRE CE +Local ization +ï¼Ī åĮº +Ġinst ill +åİŁ çīĪ +Ġï ģ +.ib m +} C +ĠC ray +ä¹Ł å¾Īå¿« +_RE V +Ġcoff ees +ĠUl ster +ĠVers us +_SL AVE +ä¸Ģ ä¼Ļ +åĴĮ åį¡ +Ġar re +ov ascular +.b ot +ĠCal ed +ï¼Įåı¯ä»¥ 使 +/test s +èļķ ä¸Ŀ +Ġesp ionage +\ if +ï¼Į åħ³éĹŃ +ï¼Į æĩĤå¾Ĺ +人çĶŁ ä¸Ń +Main e +ï¼ĮéϤ åİ» +p ter +Ġ åĵģ +ä¹ĭéĹ´ è¿Ľè¡Į +è°ģ åķĬ +æĹħ游 åĮº +å¾Ī大çļĦ å½±åĵį +ĠBellev ue +Ġ$ [\ +ä¹ĭ 象 +ï¼Įæĺ¯ 她 +OL F +ĠPublic Key +æģ° å·§ +Ġend owment +ï¼Įä»ĸ å¾Ī +å¹² ç³» +ĠMar quis +_s cheduler +å®ŀæĸ½ åĬŀæ³ķ +ĠDist ricts +å¦Īå¦Ī 说 +display Name +_ prime +çļĦ èµĦæł¼ +ãĢĤ åŃŁ +西 éĩĮ +çľ¼ åĬĽ +ĠBe aches +æĪIJåĬ٠䏾åĬŀ +à° ¾ +âĢľ çϾ +ĠE tc +ĠSt amps +Ġgr ime +(d ep +under brace +-government al +Ġ$ ${ +注 å®ļäºĨ +ç¼ĸ 导 +èĭ¥ ä¸į +ĠHait ian +äºĮ çĥ¯ +ç¾İ åıij +é»Ħ é¾Ļ +æĬĹ æĹ± +H olly +al ien +åıĹ åζ +åħ± æĢ§ +ED GE +ç©¿ 好 +Ġcash back +ä¸īåįģ åħ« +æĸ¹ä½įæĪĸ ä½įç½®åħ³ç³» +ä¸Ģ æĶ¹ +ĠB istro +Ġi w +· è´Ŀ +å¾Īéļ¾ åıĹ +ĠVenezuel an +çľĭåΰ è¿Ļä¸Ģå¹ķ +CO UR +Ġdelinqu ent +ç» Ķ +Ġdes de +_s dk +æ¸ĹéĢı åΰ +æĢİä¹Ī åİ» +è£Ĥ åıĺ +å°±åľ¨ è¿Ļ +é³ Ĺ +缴纳 çļĦ +ãĢģ ç»Łè®¡ +Ġdo xy +ED A +(r and +ä»İæĿ¥ éĥ½æ²¡æľī +为ä¸Ńå¿ĥ çļĦ +Ġdisrespect ful +;| & +ï¼Į çĶµæ±ł +两 åı° +fo res +端 åΰ +ĠHel vetica +Ġmotor ized +Ġk J +æĹł çĽĬ +_G AIN +Ġstraight en +åŃĺåĤ¨ çļĦ +åĩºä¸Ģ å¼ł +Ġsp ed +çłĶ åζçļĦ +Alex a +Ġ å°ı说 +ä¸ľè¥¿ éĥ½ +èĮ¶ 壶 +,ä½ł è¦ģ +åįĩ级 为 +ä¼ijéĹ² 娱ä¹IJ +need le +åĸĢ ä»Ģ +Ġdis location +Ġinter feres +è¿ŀ 个 +gs m +æĶ» åįł +ĠCapital ism +åįĹåĮĹ æľĿ +æ±¶ å·Ŀ +Ġ æ²IJæĢĿæĺİ +èµ· 头 +-f act +éĢīæĭ© åĴĮ +æijĨ å¼Ħ +(T IM +GR AP +jug ate +- ir +f ighters +ãĢģ è¾£æ¤Ĵ +_p g +Ġtrust y +éªĹ äºĨ +_MIN OR +elastic search +, æıIJåīį +ě [ +ï¼Į çł´åĿı +çļĦ 稳å®ļ +ov ial +ĠInter iors +æijĨ æĶ¾åľ¨ +Ġdark ened +åıĤèĢĥ çŃĶæ¡Ī +.un wrap +ç͍ å°ı +Ġun ifying +ĠRead Only +éĩİ èıľ +å¡« åħ¥ +å±ħä½ı çݯå¢ĥ +Ġnan ost +uis ines +beaut y +ĠF arr +éĹ ľ +ä¼ļ 缴æİ¥ +itt arius +_c a +æļĹ å¤Ħ +è§£éĩĬ çļĦ +æĬĸ æĵŀ +æĹłçĹĽ 人æµģ +' b +, åıĬ +çļĦ 缴å¾Ħ +Ġde ft +amm able +/ opt +Ġ ä½Ľ +æīį æĢª +å·® äºĭ +对她 说 +é¦ħ 饼 +rom o +åı¯ 好 +鬼 èĦ¸ +ĠCro hn +* B +u ir +ĠW ille +Ġen closures +the mes +å¾® å¼±çļĦ +ĠS ERVER +ä¹Ł æĪIJäºĨ +è¾ħ é£Ł +失ä¸ļ çİĩ +ç»° ç»° +-gu ide +ï¼Į åĽŀçŃĶ +ĠP atti +ios ync +赤 å£ģ +æī¿è¯º 书 +æīĭæĮĩ 头 +çĽ² 人 +Ġrelic s +¢ çĹķ +ĠD enny +ans ky +FER ENCES +l ä +as al +âĢľ èĩª +æķĻèĤ² åѦ +ä½ľç͍ åĬĽ +ä¸Ģçľ¼ å°± +ĠTi O +] )/ +ãĢĤ ä¸Ģ缴 +ĠT OTAL +们 对 +æĵįä½ľ ç®Ģåįķ +ãĢģ 奥 +âĢľ B +ĠMar se +æµĵ çĥĪçļĦ +PH I +ä¸ŃéĹ´ ä½ĵ +Sl ug +ãĢģ 课ç¨ĭ +Ġtr u +eb ner +Ġund eniably +ĠStud ying +éĽĮ æ¿Ģç´ł +( common +åĬł éķ¿ +æį¢ è´§ +ä¹ĭéĹ´ 设æľī +Ġbad ass +ih il +Ġreception ist +Ġanx ieties +大声 çļĦ +Spirit ual +ou la +çļĦ åĺ´ +et na +ass ociate +Ġcons erving +æīĵ 转 +Ġes ophagus +.M an +IZ EOF +ç¾Ĭ çļ® +ĠUl tr += w +Ġsa ff +åģľ ç͍ +æīĺ æŀ¶ +Truth y +: K +æĸ¹æ³ķ è¿Ľè¡Į +(d c +ä¸īåįģ å²ģ +, 缴èĩ³ +w string +æīĵ ä¸Ń +rab ly +cs r +_CON TAINER +ĠText s +çļĦæĸ¹æ³ķ æĿ¥ +ï¼Įåĩº äºİ +ï¼Į èIJ½åľ¨ +ï¼Į æīĢè°ĵçļĦ +ä¸Ń åħ·æľī +æ°´ ä¹ĭ +使 她 +äºĨä¸Ģ åłĨ +é¦ĸ æĴŃ +Ġsubsid ized +éĽı å½¢ +ä¸Ģ个 åı« +æĥ³ ä¹° +ĠHe idelberg +ĠBut cher +ä¼łç»Ł æĸĩåĮĸçļĦ +anda fter +, åĮ»éĻ¢ +Ġ çŁ¥éģĵ +Ġe bx +人 å·²ç»ı +æĿ¥ èĢħ +ma i +Ġfl ung +Ġoffset of +æ¡Īä¾ĭ åĪĨæŀIJ +ĠNight mare +温馨 çļĦ +C zech +Ġc ringe +Ġre inc +åľ£ æ´ģ +æĭĽçĶŁ 计åĪĴ +; padding +f ic +ï¼Ľ B +Ġgl Get +Ġtyp ography +æİ§åζ çĿĢ +iter als +驱åĬ¨ çĶµè·¯ +ĠReport ed +rog ens +æĬµæĬ¼ 贷款 +Attempt s +H erm +J IT +ãĢģ 交 +Ġdown grade +èĤ¯å®ļ æľī +å·¥ä¸ļ çĶŁäº§ +oxic illin +âĢľ T +èݱ æģ© +ç¼´ èİ· +Ġstadium s +ĠAval on +J i +ï¼ģ ï¼Ł +æĹł åŃĺ +-d ebug +ohyd rate +å¨ĵ å¨ĵ +æľĢ èµ·çłģ +羣 伪 +ĠAir line +åĪĽå»º æĹ¶éĹ´ +Ġlat te +ĠPsy cho +> Returns +i ership +ãĢģ 骨 +ĠG ros +File Dialog +lim itations +ï¼Įä¸įæĸŃ åľ° +. encoding +ĠP ug +Ġde pt +ä¸Ĭ å°± +åķĨ çĶ¨è½¦ +å¦ĥ å¨ĺå¨ĺ +_CNT L +x mm +un iversal +Ġv ox +æľĿ ä¸ĭ +è°ģ åij¢ +Ġpref ixed +Ġtend ons +: V +ï¼Į æŀª +Ġsh l +ĠU gly +çłĶç©¶ æľºæŀĦ +å·¥ç¨ĭ éĻ¢ +ä½ĵåζ æľºåζ +æĶ¿æ³ķ 大åѦ +N aming +ï¼Į å°Ħ +æ£Ģ å®ļ +(p arsed +åģĩ ä½ĵ +èᣠåħī +Ġutil ising +ĠMor i +èį· åĮħ +Long rightarrow +Ġkidd os +: block +Z r +_ UTF +ï¼Į åľ°æĸ¹ +ĠP orch +大 åĶ¿ +Ġint s +Ġet hernet +ç¿» çĻ½çľ¼ +欲 è¨ĢåıοѢ +bow l +ú mer += num +c ds +å¤ Ķ +ãĢģ éĢī +åıij é»Ħ +ook er +ich a +äºĮ è¯Ŀä¸į说 +æłĩ çĤ¹ +Ġ åľŁåľ° +ãĢģ éģĵå¾· +nt l +æĹ¶ ä¸Ģå®ļè¦ģ +æŀ ĩ +æİ§ çIJĥ +鼶åĶ® ä¸ļ +ĠRav i +s and + Ĩ +âĢľ People +çݰ æĹ¶ +å¤Ħ æīĢ +Names paces +c ivil +ä¸ī æĢĿ +.s wt +ï¼ĮèĢĮ éĤ£ +ä¹IJ 天 +ina ire +æĿĥçĽĬæ³ķ æł¸ç®Ĺ +_ uc +Ġ æĪIJæľ¬ +ãĢĤ ç§ĭ +åı¯ 转æį¢ +.s ym +ä»ħ ç͍äºİ +me eting +æ°ijæĹı æĸĩåĮĸ +ĠPlant ation +Ġhe eft +éĢļ åħ¥ +è·¯ è¾¹çļĦ +Ġplan ters +Ġmor atorium +Ġanim ator +äºĮåįģ å²ģ +ĠUnder wood +æįŁå¤± 以 +æĺĮ 缼 +æĦŁåĬ¨ äºĨ +Ġost ensibly +ä¼ļ ä¸İ +äºĮ åĪĨ +ĠSH OP +é³ Ŀ +ãĢģ åIJĦç±» +ĠE MR +é¦ĸ èĦij +Ïģ Ïİ +ÏĢ ÎµÎ¹ +COM MENT +op in +ï¼Įä½ł 羣çļĦ +Cmd let +çĥĽ åħī +W AR +ãĢģ æĥħ绪 +reat ive +_m ux +_c apture +ina e +çłĶç©¶ å¼Ģåıij +ĠSk yl +æijĨ åĩº +//////////////////////////////// //////////////////////// +···· ·· +Ġintros pection +ĠI da +äºĮ 线 +åij³ èķ¾ +Author ities +Ġm unch +ä¹IJ è§Ĩ +-l imit +Ġstre aks +( or +- IN +f ir +å®¶ é£İ +(" ${ +产åĵģ 线 +éĢļçŁ¥ å¦Ĥä¸ĭ +/- / +å°¼åħĭ æĿ¾ +ĠThr iller +ãĢĭ åIJİ +Ġph age +.get Color +èģĶ èĢĥ +Ġpul umi +ĠHay nes +åĴĦ åĴĦ +Ïĥει ÏĤ +(v ideo +ĠProtocol s +ĠAle jandro +- raising +Ġ èģĤ +ĠT BI +说 è¿Ļè¯Ŀ +çѹ æİª +æķ· 设 +ĠGlo ver +; q +Ġt tl +ãĢģ æĽ¾ +vers ations +ĠEs per +ĠD oha +ĠV oucher +èĭ¦ 头 +æ¯ı天 éĥ½è¦ģ +åį«çĶŁ å±Ģ +天ä¸ĭ ä¹ĭ +('/ ', +et Address +åĴĮ æĶ¿åºľ +å·¥ä½ľ åİĭåĬĽ +ç¼ĸ éĢł +) dealloc +ĠT CL +没æľī ç͍ +att ie +åħĥä»¶ çļĦ +.Response Writer +Ġenrol ment +æĺłåħ¥ çľ¼å¸ĺ +为 ä¾Ŀæį® +(t race +è¡£ 橱 +ĠGUID E +/ inf +Ġre duct +ĠM Äģ +çIJĨ 缴æ°Ķ +-t une +ĠAuto CAD +ï¼Į好åĥı æĺ¯ +Ġis lam +Ġch auff +éķ¿ ç©º +ï¼ĮæĪij们 è¿ĺè¦ģ +壮 士 +åħ±åĴĮ åħļ +ĠSco op +S aw +æĪij å·² +客 ä½ĵ +(r oute +Ġrelent lessly +éļĨéĩį 举è¡Į +Ġm aven +ĠR udd +ress ors +åĪĻ æĺ¯åľ¨ +ĠCross Fit +Ġw av +ĠM MO +æµİ äºİäºĭ +åľĪ åľĪ +诸 å°Ĩ +( selector +Ġdrink ers +å°¸ 骨 +Fore ver +éļıæľº æķ° +æĸ°åįİ ä¹¦åºĹ +Ġlou is +issau ga +as el +ãĢĤ æĬķèµĦèĢħ +è¿Ļ éĥ½æĺ¯ +æĥ³ äºĨè§£ +_p itch +ï¼Į她 åĴĮ +èµ¢ çIJĥ +Ġpuzz ling +E FAULT +S ally +lic ht +çŃī é«ĺ +ï¼Įç͍ å¿ĥ +ĠBrief ing +å¸ĤåľºçĽij管 å±Ģ +( IT +ï¼Į éĿ¢ä¸Ĭ +ĠM DR +Ġra cers +Ġcollabor ator +Ġcup boards +ĠSon ny +æ¸ħ æ³ī +ĠString Comparison +æĿ¡ä»¶ æĺ¯ +.cal culate +." ). +çħ§ åºĶ +æĬķèµĦ åĨ³çŃĸ +Ġpa uses +ĠDis hes +én é +( mean +æĶ¹ åIJį为 +è¿ĺåľ¨ ç»§ç»Ń +ï¼Įæīĵ çł´ +Ġho ax +T ol +ï¼Į å®Įç¾İ +om acy +åı¯ è°ĥèĬĤ +Ġun planned +_R AT +.I gnore +Ġescal ated +Ġp ager +Ġst oves +Ġ" ); +ç¬ij é¢ľ +Ġå½ĵ å¹´ +Ġи ли +ĠWel ding +ĠCant or +ä¹łè¿ijå¹³æĢ»ä¹¦è®° åľ¨ +âĤ¬TM t +, å®ŀéĻħ +_ Enable +p ayer +Ġs istema +æĥ³ ä¸Ģä¸ĭ +西 çļĦ +çϽ ç²ī +ĠTr istan +Ġselfish ness +Ġpatron age +he ated +pl otype +åľ° ç¬ijäºĨç¬ij +Ġun ify +ä¸įç͍ åĨį +Gr pc +ĠFam iliar +碳éħ¸ éĴĻ +ĠHumph rey +äºĨä¸Ģ åı¥è¯Ŀ +.c ross +Ġdom ino +æ¡ĥ æĿİ +ãĢĤ ä¾Ŀæį® +em ple +_d eps +AM M +ç§» å±ħ +èµĦéĩij æĿ¥æºIJ +ä¼ļ åIJİ +bers pace +è·Ł ä»ĸ说 +ä»»ä½ķ éĹ®é¢ĺ +åģı ç½® +rup ulous +Ġumb rellas +pref erred +- {\ +ä¸Ń 庸 +ĠU G +ĠV ictim +Info List +ĠRed wood +Ġwar ped +ĠSH ORT +Ġmacro economic +èĢĮ 亡 +è§ģ ä¹ĭ +æµģ ä¸ĭ +ĠAl vin +请 计ç®Ĺ +å¾ħ æľº +Ġsever ance +亲èĩª åİ» +ĠPsych iatric +ĠSqu ash +ĠT oul +ãĢģ æµ·æ´ĭ +æľĢé«ĺ æ°Ķ温 +ĠW rote +Ġde leter +iv ic +åįĥ å¤ľ +=' ', +ĠAnn ounced +ĠGra vel +Ġgloss ary +A DE +è¦ģ å¦Ĥä½ķ +åIJĥ å®ĮäºĨ +èµĦ产 æĶ¯æĮģè¯ģåΏ +ï¼ĮæĹł æīĢ +Ġauthors hip +æŃ¹ å¾Ĵ +Ġpolyp hen +åĮį åĮIJ +? t +C FLAGS +ï¼Į ç͵èĦij +ĠC ached +Ġst on +çĻ» éŨ +/d ebug +è¯Ńè¨Ģ æĸĩåѦ +æĺ¾èĢĮæĺĵ è§ģçļĦ +ĠForgot ten +ï¼Į èģĶç³» +以 éĺ² +_N ATIVE +ĠOri oles +Ġdesp ise +Ġcauc us +Ġ åIJĪåIJĮ +å®Ŀ åºĵ +壮æĹı èĩªæ²»åĮº +åĨħ ç»ı +," % +è±Ĩ èĬ½ +ore xia +åıį è¶ħ +-s olid +åħħ çĽĪ +åĨ² äºĨè¿ĩæĿ¥ +ï¼Įåı¯ä»¥ èĢĥèĻij +Root s +åĬłåĪ© ç¦ıå°¼äºļ +Ġvalu ables +çļĦ 请æ±Ĥ +ĠéĤ£ æĺ¯ +æļĸæļĸ çļĦ +ĠPhen omen +åľ° åºķ +ä¸ī åĨĽ +-s n +ï¼Įåħ¶ 主è¦ģ +ï¼ĮæıIJåįĩ äºĨ +-Q aeda +ĠPRODUCT S +âĢľ å¤ļ +åĩº ä»· +åľ° 头 +æīĵ æįŀ +Ġextrac urricular +en ol +con crete +åĬł åĪ©äºļ +é¢Ĩ äºĨ +.e ql +state ments +ĠMey ers += top +åį³ æľŁ +Ġprop hes +ĠTim my +hot el +ĠAhmed abad +åĹĸ åĹĸ +ç͍ åĬĽçļĦ +AT TER +_R S +{ th +éĤ£ 帮 +ï¼Įä¸Ģ éĥ¨åĪĨ +èĵĿ èİĵ +è¾Ľ åĬ³ +Ġaer odynamic +E thereum +F ried +âĢľ M +Ġall ure +ring ing +Ġgraft s +çļĦ è®°å½ķ +ãĢģ 康å¤į +ĠJ DK +Ġmax ima +Block Size +éķ¿ è¢ĸ +æ²³ å¸Ĥ +Ġdigit ized +Ġele venth +Ġpund its +< Data +l ur +p nt +æ±Ĥ æĥħ +Car oline +Âł å®ī +没 说è¯Ŀ +çŁ³ çªŁ +æĺŁ é©° +æ¼Ĩ çļĦ +éħ± æ±ģ +Ġpeas ants +丰å¯Įå¤ļ彩 çļĦ +éľİ æĹ¶ +O Z +st retch +ĠC ite +ĠA Q +-e lected +åİĦ è¿IJ +å¸ĥé²ģ åħĭ +-che cbox +U ARY +ĠJ em +og ia +ĠK elsey +å¹´é¾Ħ çļĦå¢ŀéķ¿ +ï¼Į马 åħĭ +åħ¬ éĴ¥ +_A UD +ãĢĤä¸į管 æĺ¯ +èħ¾èħ¾ çļĦ +. ); +åľ¨ ä½į +æľº 壳 +Ġgen omics +ĠS ensing +gh al +é«ĺ è°ĥ +åĮĹ éĿ¢ +èĩªå·±çļĦ çĶŁåij½ +äºij å±Ĥ +ç͵æºIJ 线 +rah ydro +åĽĽåij¨ çļĦ +Ġ(+ ) +b attle +Ġt ights +ï¼Įä¸į 好 +yd on +éĤ£ä¸ª å®¶ä¼Ļ +Ġsharp ness +_ owned +管 æīĢ +åıĸ èĩª +失 çģµ +-m eg +个人 è§ĤçĤ¹ +ĠRec ycl +ĠLib re +ĠDivid end +- Atlantic +Ġcol ossal +æĮģ åį¡ +积æŀģ åľ° +èµĸ 以 +éŃĦ åĬĽ +.Entity Data +ãĢģ æ¡Ĥ +ĠF us +ex plain +æİĴ æĮ¤ +ĠPr at +Ġgrat ifying +h ive +ãĢģ 幸ç¦ı +ĠG orge +æĶ¾ å¼ĢäºĨ +管çIJĨ èĥ½åĬĽ +.P arser +-B ar +è¢ĭ éĩĮ +( types +F IFA +ï¼Į åIJĥäºĨ +æĸ° 鼶åĶ® +Post ing +prov ides +r agon +åĩł æł¹ +æŃ¦ 好åı¤ +/std c +Ġ 好åIJ§ +Ġdise ased +Ġetern ally +è·ĭ æīĪ +ICollection View +d ia +ä½ľ å®¶çļĦ +缸 仿 +ï¼Įæ¯Ķ ä¸Ĭå¹´ +Ġè¯ģåΏ ç®Ģç§° +Ġlapar oscopic +ĠContain ers +ĠErl ang +g ado +æĺ¯ æķ´ä¸ª +ĠE pson +Ġpre process +oph ysics +æīĢå±ŀ çļĦ +ĠT odo +èĥ « +Ġreal isation +满 è´¯ +cent ric +é¼ĵ åIJ¹ +éĩijå±ŀ çļĦ +Week end +Ġprophyl axis +çļĦ çĹħ人 +æĸŃ å±Ĥ +æľīäºĽ äºĭæĥħ +rep end +Ġbunk er +ãĢģ ä¼ĺè´¨ +oc ally +æĸ¹ 士 +å®ī åĽ½ +ï¼Įåıª ä¸įè¿ĩæĺ¯ +æĪª äºĨ +ï¼Į 游客 +Ġm ike +ï¼Įæ¯ı åij¨ +çľ¼èĬ±ç¼Ń ä¹± +Ġ| > +æĿİ æĸĩ +æļĹ åύ +ãĢģ æ¯ı个 +è¦ģ ä»¶ +Ġwhe y +æĶ¯ çĤ¹ +æŃ¦ æĬĢ +м ен +Ġtwe aked +Ġcamar aderie +åĿ ³ +ç¥ŀ åħī +é¦ĸ éĥ¨ +-e fficacy +梯 åŃIJ +Own ership +Ġquint essential +ãĢģ 西çıŃçīĻ +åĴĮ åĩıå°ij +ç»Ħ åĴĮ +ĠQu ot +Reg isters +æī¿æĭħ 个åĪ« +ĠAlloc ator +Ġgoof y +{lst listing +çīĩ çīĩ +Ġpar rot +.b inary +PM G +Ġtort illas +缮çļĦæĺ¯ 为äºĨ +ĠÄ ° +äºŁ å¾ħ +è¶Ĭæĥ³ è¶Ĭ +/ ajax +U IC +ĠG K +å¾Ī æ·±çļĦ +_NOT IFY +æİĴæ°´ 管 +Ġ éĺ³ +èĬĤçľģ äºĨ +æĶ¿åĬ¡ åħ¬å¼Ģ +大æīĵ æĬĺæī£ +ï¼Į æ»ij +ï¼Įå°± ä¸įèĥ½ +æĶ¶ ä¹° +åħī é²ľ +çļĦ人 æĿ¥ +以ä¸ĭ åĩłç§į +ä¸ºåŁºç¡Ģ çļĦ +J uan +ä¸Ģ æľĽ +_s quare +æĢİä¹Ī åı¯ä»¥ +æĶ» åħ¥ +èµĦ产 åĴĮ +æľīçĽĬ çļĦ +.im show +æłªå¼ı ä¼ļ社 +j im +ãĢģ åIJī +âĢľ å¿ĥ +âĢľ åįĹ +æĮĩ æİĮ +å·²ç»ı çŁ¥éģĵ +弯 ä¸ĭ +Ġye a +ánd ez +( AP +Ġ æĻ® +ĠO sm +æķ¬ éħĴ +# Translate +d un +ĠM ISSING +ab on +ĠR TS +å¹³ æĺĵè¿ij +ĠSub missions +_al gorithm +ĠMand al +, The +Ġ æ¹ĸåįĹ +om ethyl +æĹ ĸ +Ġcon forms +åºĶ æ¿Ģ +éª ľ +Ġsub mar +Ġcount erex +_L ICENSE +åIJ¯ è¶ħ +atal yst +æ£Ģæµĭ çļĦ +æıĴ åĺ´ +(h Object +çļĦéĤ£ ä¸Ģ +Ġconver ging +sock opt +, å±ŀ +æĪij 以åīį +èĩª éĹ® +Ġac utely +del ivery +V II +âĢľ çĸ« +op old +ä¸ī çĪ· +ĠCon vergence +åħ° åį¡ +Enter ing +& L +Ġ( ± +åħ¬åı¸ åĨħéĥ¨ +æĥħåĨµ æĿ¥çľĭ +æĸ¹æ³ķ åı¯ä»¥ +ros cope +Ġsem aphore +ĠNOT HING +åºĶå½ĵ æĮīçħ§ +ĠRout ledge +_ rotation +Ġnd array +Ġdisob edience +ĠÏħ ÏĢ +Ġt ween +Ġth resh +ĠInd igo +ĠDes mond +Ġrol lover +ĠRapt ors +D ESC +å¹´ éķ¿ +çĸ² 软 +ĠChin atown +æ´ĹéĿ¢ 奶 +D ynamics +Ġf fi +åĪĩ ç¢İ +åĨħ容 æĺ¯ +in str +ï¼Į æ®ĭ +çļĦ è°ĥæķ´ +ä¸Ĭ çļ® +å¸Ī çĶŁçļĦ +èĭ± æĺİ +âĢĵ I +zz led +Ġsal ons +, table +W IFI +Ġ ðĿ +ol vable +Ġk araoke +åħ¥ çĭ± +OT ION +_init ialize +_OP TS +ATEG ORIES +Ġeyew itness +ant as +Ġdis illusion +_f uture +çľģ å¤ĸ +åĨĻ ä¸Ģç¯ĩ +å¹ķ åºľ +éĽĨä¸Ń 度 +, åİŁæľ¬ +\ par +ĠMag dal +Ïģ Ïĩ +éĤ® æĬ¥ +æĹº è¾¾ +åĭŁæĬķ é¡¹çĽ® +èļ© å°¤ +, ç¡®å®ļ +ĠL é +ä¸ī çķĮ +åĽŀ ä¸į +Ġpath name +ĠEn thus +ĠMat cher +è¯Ńè¨Ģ åѦ +ï¼ĮæĢ» æĬķèµĦ +çŁ¥åIJį ä¼ģä¸ļ +ĠAyurved a +å¤ĦçIJĨ 模åĿĹ +ĠHor izons +. enter +ï¼Į å¨ģåĬĽ +çļĦ æľĢç»Ī +ãĢĤ åħ± +od ds +ãĢģ éĩĩç͍ +éĩį çī© +车 ä½ĵ +Ġmin is +ĠSn acks +( ids +转 çĿĽ +.get Status +åĨ³å®ļ æĺ¯åIJ¦ +âĹĭ âĹĭ +Ġcentr ality +å¹ Ķ +æ²¹ æ³µ +ĠCO X +Ġtest ifying +æĿĢ äºĨæĪij +票 æķ° +Ġspy ware +ä¸Ģè¾Ī åŃIJçļĦ +ãĢĤ æĸ¯ +è¾¹ ä¸ĬçļĦ +Ġsculpt or +ĠD ora +ï¼Įä»ĸ åıijçݰ +Con verters +Ġart istry +N ova +ï¼Į ç»ıåİĨ +ãĢĤ åİ¿ +ip i +ĠK udos +ĠGr und +ä¿ĥè¿Ľ ä½ľç͍ +nam ely +橱 çªĹ +rompt u +% ' +ï¼Į 产 +æĹ¶ 表示 +Ġgener ational +ĠLa unched +ï ¾ +ct xt +æľįåĬ¡ åħ¬åı¸ +âij § +eless ly +Ġtele gram +ĠCrime a +< File +p urpose +Ġv ile +大 æ³ķ +è¿ĺ æľ¬ +ç±» 缮 +äºĴèģĶç½ij ä¸Ĭ +Ġbear ish +( br +anc ipation +æŃ¦ ç£Ĭ +.M aterial +ä»ģ æĿ° +è½»æĺĵ åľ° +Ġcongr ats +B or +r Ã¥ +ĠS MP +ä¸į è°ĥ +ting ly +æł¹ çļĦ +Ġpast ed +è¿IJåĬ¨ ä¸Ń +ĠNov ak +PRO P +ä¸īåĪĨ ä¹ĭäºĮ +-inst alled +Ġlist Of +Ġlevel ed +Ġchron ically +ãĢģ æķĻ +ãĢģ éĴ± +air n +Ġmet avar +Ġinvent ors +qu ila +ew e +ä¹ĭ åѦ +æĹł ä¸Ģ人 +竳 é±¼ +ĠZ ara +åĮ»çĸĹ éĺŁ +ĠMel ody +ĠOpt ics +) A +amb ient +æħ¢ æĤłæĤł +fl akes +ä¼ij æŃ¢ +Ġmicro environment +åĽºåĮĸ åīĤ +æĿŁæīĭ æĹłçŃĸ +. age +ĠC LEAN +ob ble +Ġnon existent +åıijå°Ħ åύ +B orrow +[ z + Ħ +æĶ¿æ²» ä¸Ĭ +èģĶåIJĪ ä½ĵ +èģļéĽĨ åľ¨ +âĸij âĸij +ï¼ĮèĢĮ 对äºİ +Ġpoly meric +Ġpor que +ĠConsult ancy +Ġrational ity +Ġhypert rophy +æı£ æµĭ +ìł ľ +Ġg aseous +æľĢ 常è§ģ +Ġx si +åĨ° åŁİ +空æ°Ķ åĩĢåĮĸåύ +kt or +dep artment +åı¤æĢª çļĦ +\ Request +åĴĮ æĪij说 +_s cheme +_pro d +æľīæīĢ äºĨè§£ +çIJ³ è¾¾ +ĠCatholic ism +å¼ŁåħĦ 们 +( av +ĠR TE +çļĦå¿ĥ æĦ¿ +ï¼Įè¿ĺæĺ¯ è¦ģ +éĵĥ éĵĽ +稻 è°· +æĪIJæŃ£ æ¯Ķ +Ġ æ°Ķ +Ġ æ²³åįĹ +èīº èĢĥ +ĠChile an +ä»»åij½ 为 +_FIX ED +F lower +al uronic +ĠW ad +AT I +çĿĢä¸Ģ æĬĬ +-med ium +G LOBAL +V W +il ical +å¼Ģ å¾Ģ +ms dn +Ġseek er +ãĢĤåΰ æĹ¶åĢĻ +ĠHairst yles +- ONT +Ġas ia +Th ost +Ġass ail +æ¸ħ çĤ¹ +Com ma +å½Ĵ åĬŁäºİ +Web inar +è¸ı è¿Ľ +Ġpor osity +ĠGar field +ĠVick i +è¶ħ çĦ¶ +_F rom +身ä½ĵ ä¸Ĭ +æij¸ ä¸į +éĻIJä½į æĿĨ +æĮł äºĨ += (\ +H OT +ï¼Į åĨ¬å¤© +Ġm oose +ig os +ID L +便 æľī +New swire +Ġbi ologists +Ġwood y +ä¼ļ å½±åĵįåΰ +Rem ain +Ġpanel ists +åĩ¸ åĩº +Republic an +вÐĤTM s +Ġolf actory +" T +æ·¡ åŃ£ +,æĪij们 åı¯ä»¥ +ĠClo jure +ĠVod afone +S andra +ĠF iesta +æĢ§ çŃī +离 åŃIJçļĦ +æĺ¯ä¸Ģ èĦ¸ +Ġcig ars +R ub +S usp +ä¸į 认 +éĴ Ĵ +Re vel +ĠCal ibration +ĠBo ilers +ĠSer ie +')) ; +ĠProtect ing +éĺ²æĻĴ éľľ +æľī å½±åĵį +åħ¥ 室 +è§Ħ模 æľĢ大çļĦ +临åºĬ ç»ıéªĮ +BL ACK +-sol uble +Ġso v +_p ay +sc aling +ĠLe hman +çİĦ å¾· +ï¼ĮåĬł æ²¹ +Ġmanip ulative +ie ee +æĬķ è¡Į +ä¸įä¼ļ åIJ§ +诱 饵 +ĠDund ee +\ operatorname +çļĦ æµģç¨ĭ +ĠR PA +çī© è¯Ń +ense mble +Ġinitial izes +Ġfacilit ators +Supplement al +s ad +ãĢģ 室åĨħ +Ġк о +ï¼Į 带æľī +ãĢĤ åı¦ä¸Ģ +è¿Ļ 丫头 +ĠN ing +æŀ ł +两 çīĩ +ä»ĸ们 è¦ģ +ä½łä»¬ åľ¨ +ï¼Įåį´ è¿ĺæĺ¯ +æıĴ ä¸Ĭ +ĠпÑĢ Ð°Ð² +) ') +< Double +am atory +Ġwh ist +Ġjust ifying +åģĩ éĿ¢ +Ġdiv est +è¯ĨåĪ« åĴĮ +ĠKy iv +< Value +al id +ç§ij æķĻ +é¦ĸ åºľ +_L CD +Ber lin +ï¼Į ãĢIJ +Ġp iling +Ġl ysis +ĠL ola +ï¼Įä»ĸ å°±ä¼ļ +çIJĥ åľºä¸Ĭ +ï¼Įå¹¶ æĮīçħ§ +requ encies +å¤ı é¾Ļ +èĤ¡ç¥¨ ä¸Ĭå¸Ĥ +é¢Ħéĺ² æİªæĸ½ +{ tr +ï¼Į åıij表 +ĠL ax +使 å¾Ĵ +Ġfl urry +é¾ Ľ +æ°ı æĽ° +Ġinterfer on +- SP +W ARN +åĽĽ 天 +ä¹Łæĺ¯ è¦ģ +([ - +(Un managedType +: æĪij们 +ãĢģ 丹 +ä»Ĭ天 è¦ģ +ï¼Įæĸ° çĸĨ +è´ª å¿ĥ +ï¼Į æĿijæ°ij +âĢĿ ... +ç͍ çĿĢ +è®° åı· +æļ´éľ² äºĨ +Iss uer +id r +举 åįĩ +é¾Ļ åį· +ĠFocus ing +ãĢģæ³ķè§Ħ åĴĮ +l amp +t rying +ãĢĤ åĩŃåĢŁ +ãĢģ èĩªåĬ¨åĮĸ +ter r +大 å±ıå¹ķ +åı¯ åģļ +æĶ¯ åĩºçļĦ +text up +çĻ» åı° +ï¼Į æīŃ +她 åį´ +ï¼Ī K +AL D +Ġcor r +ï¼ĮæīĢ以 æīįä¼ļ +_OT G +Ġmis ogyn +Ġscript ed +åĨľä¸ļ éĵ¶è¡Į +è¯įæĿ¡ åĨħ容 +E g +æ°´ çħ® +æ´» 该 +æĸ¯ åºķ +çģ¯ ç®± +ä»ģ å®Ĺ +åĪĨ享 ç»Ļ +vin yl +urop a +æ·±è¿ľ çļĦå½±åĵį +ĠTact ics +Thost Ftdc +Ġa woke +Ġper tain +è¯ģåΏ å¸Ĥåľº +ãĢģ å®Ī +éĢģ ç»Ļä½ł +åįģä¸Ģ äºĶ +Ġincompet ence +åĭĩå¾Ģ 缴 +ï¼Į èĩªèº« +ĠCh orus +ç±» èį¯çī© +è½» 伤 +è¿ľè¿ľ è¶ħè¿ĩ +-sp in +N SError +Ġa orta +imp lement +åĨĻ çĶŁ +-l isted +Ġ çŁ¥ +ä¸Ń èİ·åıĸ +åıijå±ķ æĪIJ +ç³»ç»Ł åľ° +åĽŃ éķ¿ +éĢĨ æĹ¶éĴĪ +_reg ular +Ġtar ot +ĠW orry +ild ed +Ġpr agma +ç¾İ å°ij女 +ms m +åıĭ çα +ï¼Įå¹¶ ç¡®ä¿Ŀ +/f ull +åij½ä»¤ è¡Į +oret te +æŀĹä¸ļ å±Ģ +åľ¨ 广å·ŀ +_S UFFIX +LL LL +çļĦ女 æľĭåıĭ +Ag ents +åłµ 车 +Ġrein vent +ĠPsych ologist +ĠRaj a +Ġis ometric +ï¼Įä½Ĩ ä¸įæĺ¯ +使ç͍ è¿ĩç¨ĭä¸Ń +abin et +纵 éĺŁ +è¯ĬæĸŃ åĴĮæ²»çĸĹ +ĠWith drawal +çľĭåΰ çļĦæĺ¯ +Ġten ets +åĩºåıij äºĨ +åŁºæľ¬ä¸Ĭ æĺ¯ +ĠSimple DateFormat +le ck +æĸ¹ æĹŃ +Context s +ç»ıèIJ¥ æĿĥ +æĴĩäºĨ æĴĩåĺ´ +ï¼Į åı¸é©¬ +ch te +ãĢģ çĶľ +âĢĿ [ +ç»ķ è¡Į +ç¬¬åĽĽ 个 +_sp arse +.check ed +ĠC ider +." ); +"> , C +d ip +g ay +Ġ æĪIJåĬŁ +æĢ§ åħ³èĬĤçĤİ +To Remove +æī¾ åĩĨ +ç»ĵ 对 +PR T +ï¼Įä¸įæĸŃ æıIJåįĩ +à¸Ń à¸ĩ +åIJŀåIJIJ éĩı +Ex cerpt +åıĹ å®ł +To Point +Al erts +Ġgy ro +ç»Ļ 客æĪ· +åŃ£ 度çļĦ +éĦĤ å°Ķå¤ļæĸ¯ +ï¼Įå¦Ĥæľī ä¾µæĿĥ +Ġh amburger +è¡Į éĶĢ +åĽŀ åºľ +éŨ ç±» +Ġcap itals +追究 åĪijäºĭ责任 +ä¹ĭ è°ĵ +è̳ çķĶ +æ´ŀ ä¸Ń +ĠImp acts +ç§ijæĬĢ åıijå±ķ +Ġparty ing +Ġglimp ses +Ġyarn s +对 人çļĦ +æ¯Ķ ä¸ĺ +æĿİ å¢¨çϽ +(t m +ç§įæ¤į çļĦ +ä¸Ģ 顾 +Ġk ap +èĩª æİ§ +åıĬ 以ä¸ĭ +-s pecial +åİŁåĽł ä¹ĭä¸Ģ +Ġsex ist +ĠMart ian +ĠSam son +Ess entially +åİ¿å§Ķ 常å§Ķ +ĠPs alms +Ġaudiob ook +B OR +Ġaut ocomplete +ä¿ĿéĻ© ç®± +Ġc actus +好 æĪIJ绩 +å·¥ä½ľ è¦ģæ±Ĥ +è®° çĿĢ +[@ " +_ FF +ï¼Į å̡坼 +ãĢĤ åľ° +Com parable +OS X +ĠMont y +çļĦ 软件 +erv ous +åıĤ å·®ä¸įé½IJ +-d ried +Ġlocal ize +ç§ijæĬĢ åĽŃ +ï¼Į西 å®ī +ï¼Įç»§ç»Ń 说éģĵ +Ġrefere es +åħ¥ éĻ¢ +æŃ£å¼ı æĪIJç«ĭ +è´¢æĶ¿ åİħ +_SET UP +ĠEisen hower +b ek +k ci +ĠT Result +å°± æĪIJ为 +Ġcl adding +åıijçĶŁ åĨ²çªģ +ĠTravel s +å©ļå§» çļĦ +Ġ æ²» +åĵ ¡ +èµ· èī² +å·¥ä½ľ å¼Ģå±ķ +éŁ³ 讯 +æĻĭæ±Ł æĸĩåѦ +ĠD ying +.d ylib +è¿Ł äºĨ +aph ne +ï¼Įä»İæĿ¥ 没æľī +mer chant +_g amma +ĠKr ish +/ ro +ï¼Ī N +两 å¥Ĺ +orb idity +ĠTow ing +åIJįåĪĹ åīįèĮħ +ä»ĸ ä¹Łæĺ¯ +ng en +éĤª æ°Ķ +k ap +å°± å½ĵ +Ġï Ĥ +ĠRend erer +ĠGor illa +s child +Ġ çĶŁæĪIJ +ä¹ĭ æľĢ +(s orted +容æĺĵ 导èĩ´ +妮 åŃIJ +èħ¼ èħĨ +b illing +âĢĶ if +Ġz er +åŀĤ æ¶İ +.un defined +Ġdeng ue +å¤ļ éĹ® +ç¢İ å±ij +âĦĥ æĹ¶ +å¼ķ导 åѦçĶŁ +缸è¿ŀ çļĦ +) _{\ +Ġ 好åĥı +ä¸į çIJĨæĥ³ +ãĢģ 群ä¼Ĺ +è¿ĩ éĩį +so v +Ġdef ensively +è̳ éĹ» +Ġnurt ured +ï¼Į ç½Ĺ马 +ĠÃ Ń +ĠMe i +æĻ¶ åľĨ +ĠBring s +ĠHaus dorff +, éĥŃ +ä½IJ èŤ +as il +ĠY uri +ule le +éĩĬ æĢĢ +Do ctors +Ġneurodeg enerative +ĠD ROP +èĬĤ æĭį +Ġsuper l +NS Data +丰å¯ĮçļĦ ç»ıéªĮ +F IFO +ĠM MM +å¿ĥ æħĮ +å·¥ éĥ¨ +com parison +ç»ĵæŀĦ ä½ĵ +Ġss id +Ġgamb ler +, ä¸Ń央 +ä¸ĭåİ» åIJ§ +ç»Ļ大家 åĪĨ享 +ĠRoll ins +åİ»ä¸ĸ åIJİ +åĴĮ æĻ®éĢļ +Ġcour ting +çĶļ èĢħ +ï¼ĮåıĪ è¢« +ĠMate o +[ size +ãĢģ æķ°éĩı +ï¼Ľ æĪij们 +表 éĩĮ +è¾ĥ è½» +); č +æĥħå½¢ ä¸ĭ +Protocol s +ï¼Į è¶ħ级 +å®ī æĶ¾ +æĸ¹ä¾¿ éĿ¢ +ĠI bn +ä¸Ģ é»ij +ĠP ru +Ġhist ograms +Inter ruptedException +Ġ åĪĨéĴŁ +çļĦ æĪIJæŀľ +cl r +ä¸Ģ个 å¤ļæľĪ +ç»Ŀ é¡¶ +word press +æ¯Ľ åĪ· +æ²Ļ åľº +ĠCast illo +Ġmultip art +\ ]( +p ayers +ãĢĤ éĽª +ĠF use +ĠN PS +å°½ æĶ¶ +鼷 åħĭ +ÏĢ Î¹ +memItem Left +ed u +Ġst rolling +éħį çļĦ +ik awa +失 礼 +éħ· æļij +渣 çĶ· +_sl ug +Ġcl ots +å¾Ī çĶŁæ°Ķ +æķ° ä¸Ģæķ° +Re play +Ġste pper +ĠRet ry +详ç»Ĩ åľ° +_log ging +ĠInvalid OperationException +æģ°æģ° 缸åıį +( and +ä¸Ĭ æ·» +ï¼ĮæľĢ éĩįè¦ģçļĦ +ä¾į å¥ī +_PRO XY +_par allel +Ġhamm ered +, 以为 +w q +ï¼Į éħįå¤ĩ +çļĦ åľºéĿ¢ +ãĢĤ åģĩ设 +ç® IJ +åĪ© çī¹ +.d irect +for ums +é¾ ĭ +Ġret reated +ĠElse vier +( ab +Č ĠĠĠĠĠĠ +çĤ¹ 对 +å¿« èι +è·Ŀ ä»Ĭ +-sm oking +ĠSu pper +çłĶç©¶ é¢ĨåŁŁ +社ä¼ļ ä¿¡ç͍ +Ġfasc ist +F lux +ĠM ermaid +ĠB ells +ä¸İ ä»ĸçļĦ +士 åĿ¦ +.N OT +ãĢģæľī åºı +éħįå¥Ĺ èµĦéĩij +缸è¿ŀ éĢļ +Ġld ap +Ġabras ion +L abs +ĠSt u +çα çݲ +åįİ åĽ½ +ĠSp iral +æľĹ æľĹ +Pub lishing +Ġdisag rees +-educ ated +Ġpaj amas +èIJ¥ çļĦ +_st a +Ġcasc ading +ĠG ER +å°ı èĻİ +ï¼Įä½Ĩ éļıçĿĢ +_S QL +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠ +ĠP az +ãĢģ åݦéŨ +Ġdies er +ĠGadget s +éķ¿ åĪĢ +Ġam put +èݱ èĮµ +Ġliv estream +( rel +p ytest +çļĦ åıĤæķ° +SC P +é»ĺé»ĺ æĹłéĹ» +, å®¶åºŃ +j ani +ĠS CO +æĸ° èᝠ+æĹģè§Ĥ èĢħ +Ġor nate +å¿ĥ çİĩ +帮åĬ© æĤ¨ +ĠConst raints +. oo +Ġ ä¸Ģ个人 +Ġt ic +å°ı èıľ +主 ç¥ŀ +ĠCont rovers +ĠMal ware +Ġfortn ight +ĠD uel +ĠSt ove +éĤ£ éĹ´ +.n pm +. anim +Ġon slaught +ï¼ļ éĩij +ï¼Įè¿Ļ åıªæĺ¯ +æĺ¯ä¸Ģ èĩ´çļĦ +æľīäºĽ æĥĬè®¶ +Ġerad ication +g rove +ï¼Į èħ¾è®¯ +çļĦ ä¿¡å¿ĥ +Ġmed all +çģ« çĥŃçļĦ +åĮºåĪ« åľ¨äºİ +éĻĮ 离 +èĥ¤ç¦ Ľ +ãĢģ æ³ķ人 +ĠBra h +ï¼ĮæĪij åıªèĥ½ +ĠX VI +è°¢ éĤĢ +å¨ģ ä¿¡ +uz u +ç§įç±» ç¹ģå¤ļ +obb led +èµĶåģ¿ éĩij +á¹ ĩ +_ af +e ep +ï¼Į b +大 åĿĿ +Ġch ow +Ġmod ulating +AT C +Ġdon ut +åĪĻ éľĢè¦ģ +ĠPr ag +pre ting +,以 èĩ³äºİ +Inf inite +ĠE manuel +use ppe +.sh utdown +_PRO T +ï¼Į èѬå¦Ĥ +Ġl abyrinth +ä»Ĭ æľĿ +-c lear +å¹´ åĮĹ京 +é£ ķ +(" // +stand alone +è½´æī¿ 座 +æĺ¯ä¸Ģå®¶ éĽĨ +Ġphilanth rop +O ri +_g r +ĠRE V +Dep uty +Ġcoinc ided +ĠR ats +Ġdex ter +whel ming +T rivia +V illa +tt l +_c ipher +è¾¾ æĪIJçļĦ +é¢Ħ èµĽ +éĢĥ åij½ +éĥİ åIJĽ +å¾Ī大 ä¸Ģéĥ¨åĪĨ +æ¶Į èµ· +Ġadul tery +s af +Ġ ä¹° +ä¸į åĬĽ +ĠO O +ens a +æĬĽ çī© +z ones +Ġp edd +Ġconf ection +UR ST +_g rp +Ġaren as +åį§ åºķ +ãĢģ åŃIJ +ãĢģ æķĻæİĪ +æĹ¶éĹ´ ä¸İ +è¡¥ 课 +第ä¸ĢçϾ 鼶 +ĠPic nic +ãĤĪãģĨ ãģ« +ãĢģ è°ĥèĬĤ +åĽŃ 寺 +.d esign +,æĪij åĴĮ +æļ´ éĽª +Te levision +$ L +-> $ +åĬł å¾· +å±± ä½ĵ +Ġequ ates +AG C +ä½łçļĦ æīĭ +- strong +es us +æĬĢ å¸Ī +TT T +çļĦé«ĺ æł¡ +人åĿĩ èĢķåľ° +æŀ· éĶģ +J H +P urs +d rops +f on +æĹ¥ æĻĴ +che st +ĠDon or +Ġmut agen +第ä¸Ģ次 è§ģåΰ +( origin +cert ificate +/font s +çļĦ åĽ½ +å¤ĸ å¾Ħ +è¿IJ ç͍äºİ +ÑĢ ÐµÐ¼ +Rew ards +\ cell +× Ľ +Ġhe bben +æĢ» åľ¨ +Ġä»ĸ åľ¨ +. printf +Ġ åħ¨çIJĥ +ãĢģ çĶĺèĤĥ +ĠCl oser +Ġlo oms +Ġerror Code +PM ENT +碳 éĴ¢ +ant ec +èĩª 认 +( ãĢĬ +Ġ éŁ³é¢ij +ï¼Į æİ¥æĶ¶ +å¾ħ æijĬè´¹ç͍ +å¨ģ èĥ½ +容æĺĵ éĢłæĪIJ +è°ľ åĽ¢ +æĿ¥ çļĦ人 +è£ħç½® ä¸Ń +,ä¹Ł ä¸įä¼ļ +Tok yo +u F +åŁ ļ +åħ¨ 产ä¸ļéĵ¾ +å¾Ī æħ¢ +ç«ĭ çĿĢ +Ġobject AtIndex +å§ĭç»Ī æĺ¯ +.trans ition +Ġlands lide +Ġper ce +åĿļ æĮº +纯 天çĦ¶ +åĩłä¹İ éĥ½æĺ¯ +æĵ¦èĤ© èĢĮè¿ĩ +é«ĺ èĸª +å¤©åľ° éĹ´ +彪 æĤį +, åĦ¿åŃIJ +ï¼Į åįĥä¸ĩåĪ« +主 æĶ» +举 åŁİ +å¾· 森 +ĠPost ers +Me ans +Rad ar +ĠHa as +Ġà ¾ +åıijæĺİ çļĦ +污æŁĵ æºIJ +èµĦ讯 ç½ij +Ġs inner +ãĢģ æİ¥åıĹ +In vent +æĬĹ çĹħ +åľ£ 殿 +Ġnick named +西 ç«Ļ +Ġleaf let +Ġê° Ģ +s ports +Ġw ij +bl adder +ãģª ãĤī +Ġ\ _ +身份 ä¿¡æģ¯ +author ity +Ap artment +ĠINST ALL +Ġman ned +å±ģ é¢ł +å©´åĦ¿ åºĬ +/ art +被 æĴŀ +ç§ij æĸ¯ +ogen etic +Sign ificant +ãĢģ åı¸æ³ķ +us ional +åĩº è¡ĮçļĦ +Ġhere under +ï¼Į èĥľ +al beit +çļĦ æĿ¥æºIJ +åĭ Ļ +é²ģ èݽ +ĠHop f +ĠBernard ino +Ġ ç¾İåħĥ +em ing +ĠAd Words +Ġhost ess +亦 èı² +Ġdry ers +access ibility +è¿ŀè½½ ä¸Ń +ĠAdjust ed +him self +Ġ æľīçļĦ +ï¼ ¥ +ĠP addy +Ġv oir +ï¼ī åĨħ +(" x +r ased +Ġ ç»Ħç»ĩ +åIJİ è®° +åĬĽ è¡Į +.c gi +è°ĥæŁ¥ ä¸Ń +. READ +Ġ 缮æłĩ +ĠN IR +ject ure +ä½İ ä½İ +äºĴ æĦŁ +缩 éĩı +)} = +ĠReb els +-pub lished +-enh anced +Ġball park +_c ircle +SS ID +æİ¨åĬ¨ ä¸ĭ +Connect icut +Ġeyel ashes +Wh olesale +,è¿Ļ æĺ¯ä¸Ģ个 +çĽ¸å¯¹ æ¯Ķè¾ĥ +Tow ards +ist ently +Ġro ost +åĪĽæĸ° é«ĺ +çģĮ æµĨ +Ġcushion ing +Q N +ĠP g +ĠB ER +der a +æµ· æ¶Ľ +Ġorth onormal +大 éĢļ +ï¼Įä¸į æŃ¢ +Ġfund ers +ãĢģé«ĺ è¡Ģåİĭ +, len +H p +w C +yst y +ĠLog istic +ĠGC SE +Ġastronom ers +Ġe gy +Ġ$ -$ +_T AB +Ġpract icable +æŃ¦ 夷 +æľĿ ä¸ĢæĹ¥ +åIJĪå¹¶ èĮĥåĽ´ +lix ir +ĠHost el +- ear +> .< +ä¸Ģ éĴ± +è® « +è¡ ¿ +(s ym +æŁIJ 个人 +åIJ¸å¼ķ çĿĢ +ï¼Įåĵª æľī +ĠRud olph +Ġman oeuv +èĪ « +èİ« æĦģ +Ġconsolid ating +ĠWid ow +æĹ¥ 以 +å½±åĵį åĽłç´ł +orb is +ĠOS X +æĮ¨ æīĵ +. Controller +S vc +åľ¨ ä»Ģä¹Īåľ°æĸ¹ +ĠF itting +éĶģ æŃ¢ +è®Ńç»ĥ åĴĮ +Execut ing +大è¡Ĺ ä¸Ĭ +, åıĤåĬł +int e +ä¹ĭ çľ¼ +éªĤ 人 +oglob ulin +ï¼İï¼İ ï¼İï¼İ +/ rs +äºĨ åı¥ +åıį æīĭ +}} / +Ġmyel oid +她 åİ» +è§ģ çĿĢ +åįĬ å¤ı +åī§ çħ§ +大éĥ¨åĪĨ 人 +éĻĦåĽ¾ æłĩè®° +ãĢģ æĸĩåŃĹ +ress ible +èĥ½å¤Ł ä¸İ +éͦ ç¨ĭ +Ret ro +bm c +A str +ĠT old +ãĢģ çĶµæľº +Ġextrem ities +å®ħ åŃIJ +éĺ²èħIJ åīĤ +ĠFrem ont +v scale +ï¼Į çĭłçĭłçļĦ +èĩ ¾ +æĭ ® +Ġcont ending +æĺ¯ä¸Ģ é¢Ĺ +ĠSc enic +ä¾Ŀ æģĭ +è¾ĵ èµ¢ +ĠBY U +çıĬ çıĬ +; ++ +get Parent +Ġqu el +ãĢĭ ï¼īï¼Į +éĢļè¿ĩ åIJĦç§į +çİ© åģ¶ +ĠMed i +ä»»ä½ķ ä¸Ģç§į +/M icrosoft +ï¼Į éĿłè¿ij +Ġm pi +è°ĥ æį¢ +åIJ¸ äºĨä¸Ģåı£æ°Ķ +æ»´ è¡Ģ +Ġä¸Ĭ å®ĺ +ĠLat ina +ĠA erial +(p ublic +iny in +ï¼Į åĨ¬åŃ£ +è¿Ļ 两个人 +çī© åĬĽ +_s ched +以åıĬ åľ¨ +ï¼Įä»ħ ä¾ĽåıĤèĢĥ +æīĢå¾Ĺç¨İ è´¹ç͍ +v io +æľī æ°Ķ +Ġas n +ä½ł åģļ +åıij ä¸Ŀ +Ġ+ - +In herited +åIJĪ äººæ°ijå¸ģ +çŃĸ 马 +ï¼Įè¿ĺ 以为 +sub s +ĠNe ighbor +Ġcorrespond ed +æĹ©å°± çŁ¥éģĵ +-store y +Ġdist ressing +å¿« éŨ +ï¼Įåħ¨ å¹´ +ĠDermat ology +Ġinter loc +/ products +ãĢĤ åĽŀåΰ +å¸ħ çļĦ +ĠFrances co +ï¼Į çĭ¬èĩª +ãĢĤ ä¼¼ä¹İ +ãĢĤ çĤ¹åĩ» +Ġfall acy +endor f +Ġneutroph il +- My +è¾ĥ éķ¿çļĦ +ä¹Ł åĸľæ¬¢ +转 磩 +åįģä¸ĥ å¹´ +Ġmobil ize +Ġunst oppable +Z m +Ġ æĭ¥æľī +ãĢģ åĸĦ +ĠW rapped +ä¹Ł éļ¾ä»¥ +Ġres ins +iff on +没æľī åħ¶ä»ĸ +è§Ħ模 æľĢ大 +强åζ æī§è¡Į +ĠVoy ager +Ġconscient ious +Ġlud icrous +R andy +Ġ åľŁ +ĠG ideon +ä¹Ł å¸ĮæľĽ +ä¸İ æĶ¯æĮģ +-b udget +ä¹Łæĺ¯ éĿŀ常çļĦ +-ind ucing +ugg led +d ependence +çļĦ人 身 +ĠLib by +è¿ĩ è½½ +å·² å®ļ +èĥ¶ çīĩ +è´Ŀ è´Ŀ +coll ision +èĥ½ 详 +_f name +æºIJ æŀģ +æĴŀ æĴŀ +Ġiron ing +åĩĢåĪ©æ¶¦ 为 +Ġuncover ing +Imm igration +Ġsuperhero es +åħĥ宵 èĬĤ +ãĢĤ å¿ĥéĩĮ +è°Ī è¿ĩ +V GA +ï¼Į è¯ļ +ĠA irt +ĠM ott +(" __ +ram ing +Ġimprob able +Ġt ec +Ġc ello +ï¼Į åĩºæĿ¥ +âĢľ æīĵ +å¿ĥ æĢ§ +åIJĮ ä¸Ĭ +å¤ĸ ç͍ +ï¼Įè¿Ļ 让ä»ĸ +_P IX +çĶ· ä¸Ģ女 +ä¸ĢåĪĩ éĥ½æĺ¯ +(st mt +èī² å·® +ann ies +æĭĽ å½ķ +ĠAM I +AV I +jud icial +Ġsharpen ing +Ġhydrox yl +ĠMoist ur +.Iter ator +Ġcon forming +ĠH ym +Ġpr ong +æĮij æĭ¨ +çIJĨ论 çłĶç©¶ +ä¸Ńæĸĩ ç³» +ĠPeriod ic +Ġparaph rase +- images +h ak +çļĦ å¤ļ个 +åĴ¸ é±¼ += right +å°ģ é¡¶ +Def endants +ĠChar ities +.N il +ä¸ī个 å°ıæĹ¶ +æĶ¯ä»ĺ æĸ¹å¼ı +笼 åŃIJ +ĠBreak down +-en vironment +ĠEas ier +ĠRac ism +嬴 æĶ¿ +$ g +) è¿ŀæİ¥ +. ma +it re +ãĢģ éĺ²æ°´ +èĢĮ æĶ¹åıĺ +æ¶ ¿ +èİ« å±ŀ +湿 æ¼ī +ä¿Ĭ ç¾İ +ĠCur tain +P ac +ï¼Ī éĿŀ +ĠBe ckett +æĿIJæĸĻ ä¸Ń +èĥĥ éħ¸ +tra ffic +Ġclim bers +çļĦä¸ĢçĤ¹ æĺ¯ +, åij³éģĵ +Ġprot racted +.N ative +fe et +设置æľī 第ä¸Ģ +ERV E +Autom ated +Ġabbrev iations +Ġinter stellar +éªı 马 +Pen alty +M int +ï¼Į éĢīç͍ +åĴĮ åĽĽ +çľ¼ å½± +å¼ķ å¾Ĺ +ç§Ģ çļĦ +Ġep ist +Ġiter ating +ĠBiosc iences +ĠP eek +Sh annon +è¶£ çļĦ +Ġpron ouns +Ġchron icles +N Ps +è¿Ļ è¾Ĩ +iv ore +æīĢ æ¶īåıĬçļĦ +è¿ĺæĺ¯ å¾Ĺ +åĨ² è¿ĩæĿ¥ +ĠRC W +ĠTodd ler +ĠB ite +åŃIJ 模åĿĹ +ï¼ĮæĪij ä¹Łä¸įä¼ļ +æĬĢæľ¯ çŃī +ç»ıæµİ 建设 +Ġæľ¬æľŁ åıijçĶŁé¢Ŀ +对 éĤ£äºĽ +ä½ľ åĪĻ +ĠWe ir +pro tein +åĩ» çł´ +è¿İ é£İ +ĠMcG ee +- odd +ĠA CP +ind icated +çĿĢ åľ° +Part itions +ãģª ãģ© +bed o +_qu estion +ĠC ull +found ation +ï¼Į å¸ķ +诺 ç»´å¥ĩ +ä¹ĭéĹ´çļĦ èģĶç³» +æĹłå¥Ī ä¹ĭä¸ĭ +E igen +ï¼Į æī¾åΰäºĨ +IV ERY +åį§ å¼ı +å´© åĿı +Ġml x +Ġflaw lessly +ĠSHA RE +æ´¾ äºĨ +åĵŃ å¾Ĺ +ĠTur f +éŃħ æĥij +Ġsed uctive +ĠSes ame +大 秦 +æĹ¥ 飩 +æĺİ è¯´ +å°±æĺ¯ æĮĩ +Ġposs ib +Ġwar ms +æķ¬ 礼 +沿 æ±Ł +.U UID +ï¼Į è¶³å¤Ł +ĠS ND +Ġout p +(" & +è¡£ èįī +æĿĥåĪ© çļĦ +} T +ĠT CG +ate k +Ġpe ep +æĿİ åħĪçĶŁ +ĠMin im +çļĦ æİ¥è§¦ +使ç͍ ä¸Ń +è¿ľ é«ĺäºİ +è¿Ļä¸Ģ åľº +.T imestamp +ï¼Įæľ¬ å®ŀæĸ½ä¾ĭ +Med iator +Ġfibr in +ĠLuk as +åĬł èĸª +Ġtype Name +åijĬ çĬ¶ +éĿĴ çŁ³ +讲 éģĵçIJĨ +=' \ +ĠCustom ized +Ġbackpack ing +ĠRend ering +Appro val +et ah +ĠS BS +Ġne oc +ï¼Ľ å¹¶ä¸Ķ +.s mart +ï¼ĮèϽçĦ¶ æĺ¯ +马 ç«ŀ +åįı åĬŀ +, ç¥ŀ +ï¼Į éĿĻéĿĻ +ä¸į éĿłè°± +ĠWh irl +é¹ Ī +ï¼Įæľ¬ çİĭ +}^{ * +_a verage +K irk +æĶ¯ åIJ¾ +å±Ģ åľ° +Ġcool s +ĠAst hma +é¼ĵèµ· åĭĩæ°Ķ +å¯ĨåĪĩæİ¥è§¦ èĢħ +Ġ çĶŁæĹ¥ +æĢĢ æľī +ï¼Įèĩª æĪij +Ġg z +ĠS ank +ĠC ough +ĠK abul +éĩį éĩij +_S B +éĿĴ æ¶© +ĠSR AM +/ facebook +: normal +s db +ĠS ides +-s heet +æł¸ åıij +ĠBr at +ĠMid i +. Provider +_ standard +Ġhe lical +ord a +Ġheav iest +.assert That +ï¼Įåħ¨ æĸ¹ä½į +ç»ĵå©ļ åIJİ +ал и +ĠWake field +S aver +or ro +çļĦ æľĢ好 +ãĢģ åħħ满 +ç½ij绾 ä¸Ń +æĹ¢ ä¸į +害æĢķ äºĨ +ĠFred dy +_pop up +ï¼Į ä¸ĵ注äºİ +ä¸Ģ ç«Ļ +vers es +ï¼Ī éϤ +Ġperson able +绿 æ¤į +Ġ[[ [ +派人 åİ» +// \ +对 ä¸Ĭè¿° +éĢļ æĺİ +åı¯ä»¥ åIJĥ +_M PEG +Ġexc used +çļĦ主 导 +Ġchart ered +é¢Īæ¤İ çĹħ +im achinery +ort ical +æľŁ ä¸Ń +ï¼Įèĭ¥ æľī +æ£ĢéªĮ æ£Ģçĸ« +ĠFIN AL +ĠConserv ancy +g db +Ġ åĩłä¹İ +å¾Ī åı¯èĥ½æĺ¯ +"> )> +æĺ¯ åģĩçļĦ +ç¥ŀ 贯注 +æł¼ 纳 +Ġdec ryption +gg plot +_D ST +çŀª å¤§çľ¼çĿĽ +/ et +l ating +ak ens +åħ¬ 害 +æĬĬ æŁĦ +.M e +å°¼ æ³Ĭå°Ķ +Ġzero es +ĠFly er +æĶ¯æ°Ķ管 çĤİ +, é¡» +以 ä»»ä½ķ +åIJİ èħ¿ +ĠIn equality +没æľī åĩºçݰ +åıĪ åİ» +ĠX HTML +ï¼Įé«ĺ éĢŁ +.Is Valid +å·į 峨 +( orig +ï¼Į è´¹ç͍ +çļĦ èĭı +ä¸Ģ èģĬ +æıIJé«ĺ èĩªå·±çļĦ +çĭ¬ç«ĭ äºİ +ĠDest ruction +. Plugin +_ Channel +å°ı æıIJçIJ´ +å½ĵ åħµ +ĠGuang zhou +å½ĵ ä»ĸ们 +许 æĺĮ +ä¸Ŀ 线 +åĽ½éĻħ éĩijèŀį +ĠAcc redited +Ġdark net +ĠIP V +çļĦçĶŁæ´» ä¸Ń +Ġblo ated +ĠDh aka +D uck +in verse +ãĢģ æ²ŁéĢļ +åľ¨ æīĢ +âĢĻ ãĢĤâĢĿ +èģĶåIJĪ èµ·æĿ¥ +è¯įæĿ¡ ç¼ĸè¾ij +Ġquir ks +ಿ ಠ+æĺ¯ æĮī +ne o +ä¸ļåĬ¡ èĮĥåĽ´ +vest on +çĩĥ æĶ¾ +åħ±åIJĮ ç¼ĸè¾ij +ï¼Įäºİæĺ¯ å°± +ĠGentle man +Ġegreg ious +ĠD ab +å¼¹ éģĵ +æıIJåįĩ èĩ³ +èīºæľ¯ 设计 +æĹĹ åı· +ĠFort ress +(: ,:, +ï¼ ´ +çļĦ åĽ¾çīĩ +ä¸į ä¹łæĥ¯ +Ġ" {} +èĩªå·± ä¸Ģ个人 +allow ing +.max imum +红äºĨ èĦ¸ +ĠAx el +cou pon +ê ·¸ +èĭ¥ ä¸įæĺ¯ +enn ett +-l aden +顺åĪ© åľ° +ï¹ £ +å¿Ĺ åĪļ +Net herlands +ur faces +Ġk ang +å§Ĩ å·´ +Ġbrief ed +åŁİéķĩ å±ħæ°ij +æĺ¯ åĪĨ +ĠB PM +åľ° 级 +Data Table +ĠIm plant +Ġenh ancer +ç®Ĺæ³ķ çļĦ +æĭIJ çĤ¹ +Ġrenov ate +OFF SET +uter onomy +çļĦ çĶŁ +ãĢĤ éļı +ĠS lavery +ä¸į åİĮ +ä¸Ń åı¶ +ï¼Ľ åĽłä¸º +ç®Ĺ äºĨåIJ§ +ah assee +Ġopt ically +Ġaccommod ates +R alph +ï¼ĮæĪij è®°å¾Ĺ +åĽĽ 项 +é»Ħ å·¾ +森 çī¹ +ĠRel iance +çĤ® çģ° +âĢľ 好åIJ§ +Ġsh rew +æį¢ ä½į +\ ref +ä¹ĭ åĬ© +两 å¤Ħ +ãĢĤèĢĮ è¿Ļ +ĠFOR CE +.res erve +ãĢģ å¿«æį· +åĨħ è¿Ľè¡Į +ï¼Įä¸į è¡Į +ĠCam ino +- os +Sh anghai +ĠSub sid +åĸĬ è¯Ŀ +äºĶå¹´ 级 +ĠLif ecycle +G ithub +çļĦ åĪĢ +ac ola +.d irectory +è¿Ļä¹Ī éķ¿æĹ¶éĹ´ +æĢª 人 +è¯Ĺ 人çļĦ +,éĤ£ ç§į +T bl +æľº ä½ĵçļĦ +æ· ŀ +th i +ĠG aw +éķ¿ æ²» +Ġsur ging +ç®Ģ ç®Ģåįķ +Ġ æĢ§åĪ« +ãĢģ æĬ¤çIJĨ +ä¼ļ éĢIJæ¸IJ +åıĬ åIJĦç§į +éĢī æīĭçļĦ +Ġdec id +è¿IJåĬ¨ æĹ¶ +pred icate +Ġascertain ed +W x +_c atalog +å·ŀ ç«ĭ +æĸĩåĮĸ åºķèķ´ +ĠAng er +äºĭä»¶ ä¸Ń +å¼¥ åĭĴ +Dist inct +èģĬ天 è®°å½ķ +Ġrasp berries +åIJ¬ 她 +çĹħ éĢĿ +æł¹æį® ç»Ļå®ļçļĦ +æĦģ çľī +ethe red +-bl own +ä¼Ĭæĸ¯åħ° æķĻ +ig ram +.v olume +Liber ty +_ ## +æ±Ĥ åĴĮ +èµµ 丽é¢ĸ +åĨῬ¡ 被 +éĺħ读 åİŁæĸĩ +çļĦçĥŃ çĤ¹ +éĻĪ æĹ§ +ĠMAP K +Ġadam ant +D n +L ion +飩 å¼ı +ç»Īäºİ åı¯ä»¥ +Ġer st +é©» 马 +ç¼ł çĿĢ +pers istent +Ġbru ised +ĠVu itton +R ental +ad oes +ĠO TT +è¦ģ äºĨ +ib uf +aut ilus +_F ree +åŃ¦ä¹ł èĢħ +Ġhex agonal +é¡¶å°ĸ çļĦ +spe ech +æŀĩ æĿ· +ï¼Į éĢŁ +port ing +_ST A +ä¹ĭéĹ´çļĦ å·®é¢Ŀ +Ġgru esome +é¢ģå¸ĥ çļĦ +.Res olve +(Method ImplOptions +M ist +v ig +Ġ( ...) +æĺ¯ æ°´ +å°±æĺ¯ å°Ĩ diff --git a/vocab/ling_vocab.mllm b/vocab/ling_vocab.mllm new file mode 100644 index 000000000..c12f8df7d Binary files /dev/null and b/vocab/ling_vocab.mllm differ diff --git a/vocab/smallthinker_merges.txt b/vocab/smallthinker_merges.txt new file mode 100644 index 000000000..54f3c8765 --- /dev/null +++ b/vocab/smallthinker_merges.txt @@ -0,0 +1,151387 @@ +['Ġ', 'Ġ'] +['ĠĠ', 'ĠĠ'] +['i', 'n'] +['Ġ', 't'] +['ĠĠĠĠ', 'ĠĠĠĠ'] +['e', 'r'] +['ĠĠ', 'Ġ'] +['o', 'n'] +['Ġ', 'a'] +['r', 'e'] +['a', 't'] +['s', 't'] +['e', 'n'] +['o', 'r'] +['Ġt', 'h'] +['Ċ', 'Ċ'] +['Ġ', 'c'] +['l', 'e'] +['Ġ', 's'] +['i', 't'] +['a', 'n'] +['a', 'r'] +['a', 'l'] +['Ġth', 'e'] +[';', 'Ċ'] +['Ġ', 'p'] +['Ġ', 'f'] +['o', 'u'] +['Ġ', '='] +['i', 's'] +['ĠĠĠĠ', 'ĠĠĠ'] +['in', 'g'] +['e', 's'] +['Ġ', 'w'] +['i', 'on'] +['e', 'd'] +['i', 'c'] +['Ġ', 'b'] +['Ġ', 'd'] +['e', 't'] +['Ġ', 'm'] +['Ġ', 'o'] +['ĉ', 'ĉ'] +['r', 'o'] +['a', 's'] +['e', 'l'] +['c', 't'] +['n', 'd'] +['Ġ', 'in'] +['Ġ', 'h'] +['en', 't'] +['i', 'd'] +['Ġ', 'n'] +['a', 'm'] +['ĠĠĠĠĠĠĠĠ', 'ĠĠĠ'] +['Ġt', 'o'] +['Ġ', 're'] +['-', '-'] +['Ġ', '{'] +['Ġo', 'f'] +['o', 'm'] +[')', ';Ċ'] +['i', 'm'] +['č', 'Ċ'] +['Ġ', '('] +['i', 'l'] +['/', '/'] +['Ġa', 'nd'] +['u', 'r'] +['s', 'e'] +['Ġ', 'l'] +['e', 'x'] +['Ġ', 'S'] +['a', 'd'] +['Ġ', '"'] +['c', 'h'] +['u', 't'] +['i', 'f'] +['*', '*'] +['Ġ', '}'] +['e', 'm'] +['o', 'l'] +['ĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠ'] +['t', 'h'] +[')', 'Ċ'] +['Ġ{', 'Ċ'] +['Ġ', 'g'] +['i', 'g'] +['i', 'v'] +[',', 'Ċ'] +['c', 'e'] +['o', 'd'] +['Ġ', 'v'] +['at', 'e'] +['Ġ', 'T'] +['a', 'g'] +['a', 'y'] +['Ġ', '*'] +['o', 't'] +['u', 's'] +['Ġ', 'C'] +['Ġ', 'st'] +['Ġ', 'I'] +['u', 'n'] +['u', 'l'] +['u', 'e'] +['Ġ', 'A'] +['o', 'w'] +['Ġ', "'"] +['e', 'w'] +['Ġ', '<'] +['at', 'ion'] +['(', ')'] +['Ġf', 'or'] +['a', 'b'] +['or', 't'] +['u', 'm'] +['am', 'e'] +['Ġ', 'is'] +['p', 'e'] +['t', 'r'] +['c', 'k'] +['â', 'Ģ'] +['Ġ', 'y'] +['i', 'st'] +['--', '--'] +['.', 'ĊĊ'] +['h', 'e'] +['Ġ', 'e'] +['l', 'o'] +['Ġ', 'M'] +['Ġb', 'e'] +['er', 's'] +['Ġ', 'on'] +['Ġc', 'on'] +['a', 'p'] +['u', 'b'] +['Ġ', 'P'] +['ĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠ'] +['as', 's'] +['in', 't'] +['>', 'Ċ'] +['l', 'y'] +['ur', 'n'] +['Ġ', '$'] +[';', 'ĊĊ'] +['a', 'v'] +['p', 'ort'] +['i', 'r'] +['-', '>'] +['n', 't'] +['ct', 'ion'] +['en', 'd'] +['Ġd', 'e'] +['it', 'h'] +['ou', 't'] +['t', 'urn'] +['ou', 'r'] +['ĠĠĠĠ', 'Ġ'] +['l', 'ic'] +['re', 's'] +['p', 't'] +['=', '='] +['Ġth', 'is'] +['Ġw', 'h'] +['Ġ', 'if'] +['Ġ', 'D'] +['v', 'er'] +['ag', 'e'] +['Ġ', 'B'] +['h', 't'] +['ex', 't'] +['=', '"'] +['Ġth', 'at'] +['**', '**'] +['Ġ', 'R'] +['Ġ', 'it'] +['es', 's'] +['Ġ', 'F'] +['Ġ', 'r'] +['o', 's'] +['an', 'd'] +['Ġa', 's'] +['e', 'ct'] +['k', 'e'] +['ro', 'm'] +['Ġ', '//'] +['c', 'on'] +['Ġ', 'L'] +['(', '"'] +['q', 'u'] +['l', 'ass'] +['Ġw', 'ith'] +['i', 'z'] +['d', 'e'] +['Ġ', 'N'] +['Ġa', 'l'] +['o', 'p'] +['u', 'p'] +['g', 'et'] +['Ġ}', 'Ċ'] +['i', 'le'] +['Ġa', 'n'] +['at', 'a'] +['o', 're'] +['r', 'i'] +['Ġp', 'ro'] +[';', 'čĊ'] +['ĉĉ', 'ĉĉ'] +['t', 'er'] +['a', 'in'] +['Ġ', 'W'] +['Ġ', 'E'] +['Ġc', 'om'] +['Ġre', 'turn'] +['ar', 't'] +['Ġ', 'H'] +['a', 'ck'] +['im', 'port'] +['ub', 'lic'] +['Ġ', 'or'] +['e', 'st'] +['m', 'ent'] +['Ġ', 'G'] +['ab', 'le'] +['Ġ', '-'] +['in', 'e'] +['il', 'l'] +['in', 'd'] +['er', 'e'] +[':', ':'] +['it', 'y'] +['Ġ', '+'] +['Ġt', 'r'] +['el', 'f'] +['ig', 'ht'] +['(', "'"] +['or', 'm'] +['ul', 't'] +['st', 'r'] +['.', '.'] +['"', ','] +['Ġy', 'ou'] +['y', 'pe'] +['p', 'l'] +['Ġn', 'ew'] +['Ġ', 'j'] +['ĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġf', 'rom'] +['Ġ', 'ex'] +['Ġ', 'O'] +['l', 'd'] +['Ġ', '['] +['o', 'c'] +[':', 'Ċ'] +['Ġs', 'e'] +['Ġ', 'le'] +['----', '----'] +['.', 's'] +['{', 'Ċ'] +["'", ','] +['an', 't'] +['Ġa', 't'] +['as', 'e'] +['.', 'c'] +['Ġc', 'h'] +['<', '/'] +['av', 'e'] +['an', 'g'] +['Ġa', 're'] +['Ġin', 't'] +['âĢ', 'Ļ'] +['_', 't'] +['er', 't'] +['i', 'al'] +['a', 'ct'] +['}', 'Ċ'] +['iv', 'e'] +['od', 'e'] +['o', 'st'] +['Ġc', 'lass'] +['Ġn', 'ot'] +['o', 'g'] +['or', 'd'] +['al', 'ue'] +['al', 'l'] +['f', 'f'] +['(', ');Ċ'] +['on', 't'] +['im', 'e'] +['a', 're'] +['Ġ', 'U'] +['Ġp', 'r'] +['Ġ', ':'] +['i', 'es'] +['iz', 'e'] +['u', 're'] +['Ġb', 'y'] +['i', 're'] +['Ġ}', 'ĊĊ'] +['.', 'p'] +['Ġs', 'h'] +['ic', 'e'] +['a', 'st'] +['pt', 'ion'] +['tr', 'ing'] +['o', 'k'] +['_', '_'] +['c', 'l'] +['#', '#'] +['Ġh', 'e'] +['ar', 'd'] +[')', '.'] +['Ġ', '@'] +['i', 'ew'] +['ĉĉ', 'ĉ'] +['Ġw', 'as'] +['i', 'p'] +['th', 'is'] +['Ġ', 'u'] +['ĠT', 'he'] +['id', 'e'] +['a', 'ce'] +['i', 'b'] +['a', 'c'] +['r', 'ou'] +['Ġw', 'e'] +['j', 'ect'] +['Ġp', 'ublic'] +['a', 'k'] +['v', 'e'] +['at', 'h'] +['o', 'id'] +['Ġ=', '>'] +['u', 'st'] +['q', 'ue'] +['Ġre', 's'] +[')', ')'] +["'", 's'] +['Ġ', 'k'] +['an', 's'] +['y', 'st'] +['un', 'ction'] +['****', '****'] +['Ġ', 'i'] +['Ġ', 'us'] +['p', 'p'] +['on', 'e'] +['a', 'il'] +['==', '=='] +['n', 'ame'] +['Ġst', 'r'] +['Ġ', '/'] +['Ġ', '&'] +['a', 'ch'] +['d', 'iv'] +['yst', 'em'] +['el', 'l'] +['Ġh', 'ave'] +['er', 'r'] +['ou', 'ld'] +['ul', 'l'] +['p', 'on'] +['Ġ', 'J'] +['_', 'p'] +['Ġ=', '='] +['ig', 'n'] +['S', 't'] +['.', 'Ċ'] +['Ġp', 'l'] +[')', ';ĊĊ'] +['f', 'orm'] +['p', 'ut'] +['ou', 'nt'] +['}', 'ĊĊ'] +['d', 'd'] +['it', 'e'] +['Ġg', 'et'] +['r', 'r'] +['om', 'e'] +['Ġ', 'âĢ'] +['ar', 'am'] +['c', 'c'] +['Ġ*', '/'] +['E', 'R'] +['I', 'n'] +['le', 's'] +['_', 's'] +['on', 'g'] +['i', 'e'] +['Ġc', 'an'] +['Ġ', 'V'] +['er', 'v'] +['p', 'r'] +['Ġ', 'un'] +['ro', 'w'] +['b', 'er'] +['Ġd', 'o'] +['l', 'l'] +['Ġ', 'el'] +['Ġs', 'elf'] +['at', 'ed'] +['ar', 'y'] +['Ġ', '.'] +["'", ']'] +['u', 'd'] +['Ġ', 'en'] +['ĠT', 'h'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠ'] +['t', 'e'] +['_', 'c'] +['u', 'ct'] +['Ġa', 'b'] +['or', 'k'] +['.', 'get'] +['Ġ', '#'] +['a', 'w'] +['res', 's'] +['o', 'b'] +['N', 'ame'] +['ap', 'p'] +['[', "'"] +['Ġal', 'l'] +['or', 'y'] +['it', 'ion'] +['an', 'ce'] +['e', 'ar'] +['Ġcon', 't'] +['v', 'ent'] +['i', 'a'] +['Ġw', 'ill'] +['I', 'N'] +['ĠĠĠĠĠĠĠĠ', 'Ġ'] +['re', 'turn'] +['Ġ<', '/'] +['d', 'ata'] +[')', 'ĊĊ'] +['R', 'e'] +['p', 'le'] +['il', 'd'] +['th', 'er'] +['Ġy', 'our'] +['"', 'Ċ'] +['(', '$'] +['Ġ', 'out'] +[')', ','] +['Ġh', 'as'] +['S', 'tring'] +['s', 'o'] +['Ġ', 'up'] +['a', 'x'] +['Ġde', 'f'] +['Ġb', 'o'] +['g', 'e'] +['al', 'se'] +['O', 'N'] +['p', 'er'] +['ic', 'h'] +['Ġb', 'ut'] +['Ġ', 'Ċ'] +['Ġ', '_'] +['_', 'm'] +['ad', 'd'] +['que', 'st'] +['od', 'el'] +['s', 'elf'] +['er', 'y'] +['f', 't'] +['en', 's'] +['//', '//'] +['a', 'ke'] +['.', 'C'] +['Ġg', 'o'] +['Ġf', 'unction'] +['Ġ', 'K'] +['iv', 'ate'] +['Ġ', 'im'] +['Ġcon', 'st'] +['.', 't'] +['Ġ*/', 'Ċ'] +[')', ';čĊ'] +['Ġv', 'oid'] +['Ġs', 'et'] +['ĠS', 'ystem'] +['c', 'ri'] +['(', ')Ċ'] +['l', 'i'] +['ĉ', 'if'] +['.', 'm'] +['al', 'ly'] +['s', 'et'] +['e', 'p'] +['âĢĻ', 's'] +['b', 'o'] +['de', 'f'] +["'", ',Ċ'] +['Ġm', 'e'] +['Ġ', '!'] +['at', 'ch'] +['"', '>'] +['"', ',Ċ'] +['e', 'c'] +['ĠI', 'n'] +['p', 'h'] +['Ġ', '|'] +['_', 'f'] +['Ġv', 'ar'] +['en', 'ce'] +['I', 'd'] +['re', 'e'] +['in', 'k'] +['le', 'ct'] +['u', 'g'] +['et', 'h'] +['Ġel', 'se'] +['--------', '--------'] +['con', 't'] +['Ġs', 'o'] +['at', 'ic'] +['Ġl', 'o'] +['p', 'ro'] +['t', 'on'] +['s', 's'] +['ow', 'n'] +['ab', 'el'] +['o', 'int'] +['ou', 's'] +['el', 'd'] +['S', 'T'] +['T', 'he'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['R', 'E'] +['"', ':'] +['ol', 'or'] +['t', 'p'] +['e', 'g'] +['ke', 'y'] +['u', 'de'] +['ĠS', 't'] +['ou', 'nd'] +['Ġa', 'r'] +['"', ');Ċ'] +['en', 'er'] +['s', 'er'] +['b', 'ject'] +['ess', 'age'] +['f', 'er'] +['Ġm', 'ore'] +['ation', 's'] +['ent', 's'] +['Ġh', 'is'] +['Ġthe', 'y'] +['.', 'S'] +['Ġ', 'Y'] +['u', 'se'] +['n', 'e'] +['is', 'h'] +['ol', 'd'] +['_', 'd'] +['i', 'o'] +['i', 'eld'] +['Ġp', 'er'] +['C', 'ont'] +['ing', 's'] +['##', '##'] +['Ġd', 'ata'] +['Ġs', 'a'] +['e', 'f'] +['f', 'o'] +['Ġon', 'e'] +['en', 'g'] +['Ġd', 'is'] +['A', 'T'] +['Ġn', 'ame'] +['Ġtr', 'ue'] +['v', 'al'] +['le', 'd'] +['.', 'f'] +['Ġn', 'e'] +['Ġ', 'end'] +['.', 'T'] +['c', 're'] +['ar', 'k'] +['lo', 'g'] +['E', 'x'] +['err', 'or'] +['_', 'id'] +['ur', 're'] +['ang', 'e'] +['Ġn', 'ull'] +['rr', 'ay'] +['Ġm', 'y'] +['p', 'an'] +['ic', 't'] +['at', 'or'] +['V', 'iew'] +['L', 'ist'] +['ĉ', 'return'] +['âĢ', 'Ŀ'] +['Ġp', 're'] +['Ġ', 'x'] +['cl', 'ude'] +['ar', 'g'] +['o', 'v'] +['.', 'h'] +['Ġ', '>'] +['Ġthe', 'ir'] +["'", ')'] +['ir', 'st'] +['ic', 'k'] +['g', 'h'] +['L', 'E'] +['O', 'R'] +['Ġpr', 'ivate'] +['t', 'em'] +['čĊ', 'čĊ'] +['us', 'er'] +['Ġ', ')'] +['c', 'om'] +['.', 'A'] +['"', ';Ċ'] +['Ġ', 'id'] +['re', 'ad'] +['Ġwh', 'o'] +['_', 'b'] +['"', '>Ċ'] +['Ġt', 'ime'] +['Ġm', 'an'] +['r', 'y'] +['====', '===='] +['rou', 'p'] +['ro', 'p'] +['p', 'ublic'] +['v', 'el'] +['um', 'ber'] +['b', 'le'] +['Ġwh', 'ich'] +['********', '********'] +['Ġan', 'y'] +['Ġf', 'alse'] +['w', 'e'] +['Ġv', 'alue'] +['Ġl', 'i'] +['"', ')'] +['nd', 'er'] +['g', 'r'] +['Ġn', 'o'] +['p', 'aram'] +['f', 'ig'] +['.c', 'om'] +['Ġa', 'pp'] +['_', 'l'] +['ion', 's'] +['.', 'D'] +['ĠC', 'h'] +['Ġab', 'out'] +['Ġa', 'dd'] +['Ġs', 'u'] +['Ġstr', 'ing'] +['I', 'D'] +['Ġo', 'ver'] +['str', 'ing'] +['.', 'l'] +['our', 'ce'] +['_', 'C'] +[']', 'Ċ'] +['Ġ', 'qu'] +['ĠS', 'tring'] +['c', 'a'] +['S', 'E'] +['Ġ', 'ro'] +['s', 'h'] +['u', 'al'] +['T', 'ype'] +['s', 'on'] +['n', 'ew'] +['er', 'n'] +['Ġa', 'g'] +['A', 'R'] +[']', ';Ċ'] +[']', '.'] +['Ġ', '?'] +['ic', 'al'] +['Ġd', 'es'] +['ut', 'h'] +['i', 'x'] +['ay', 's'] +['Ġt', 'ype'] +["'", 't'] +['a', 'ult'] +['Ġin', 'ter'] +['v', 'ar'] +['.', 'b'] +['Ġp', 'art'] +['.', 'd'] +['urre', 'nt'] +['I', 'T'] +['E', 'N'] +['en', 'c'] +['(', 'f'] +['r', 'a'] +['v', 'alue'] +['ch', 'o'] +['ut', 'ton'] +['o', 'se'] +['Ġ!', '='] +['at', 'er'] +['Ã', '©'] +['re', 'ate'] +['ol', 'l'] +['p', 'os'] +['y', 'le'] +['n', 'g'] +['A', 'L'] +['us', 'ing'] +['am', 'es'] +['Ġ{', 'čĊ'] +['at', 'es'] +['el', 'y'] +['Ġw', 'ork'] +['Ġ', 'em'] +['in', 'al'] +['Ġs', 'p'] +['Ġwh', 'en'] +['.s', 'et'] +['ĠĠĠĠ', 'ĠĠ'] +[')', ':Ċ'] +['t', 'o'] +['qu', 'ire'] +['ind', 'ow'] +['le', 'ment'] +['pe', 'ct'] +['as', 'h'] +['[', 'i'] +['Ġu', 'se'] +['.', 'F'] +['pe', 'c'] +['Ġa', 'd'] +['o', 've'] +['ce', 'ption'] +['eng', 'th'] +['in', 'clude'] +['ad', 'er'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠ'] +['at', 'us'] +['T', 'h'] +['it', 'le'] +['r', 'it'] +['v', 'oid'] +['()', '.'] +['(', 'Ċ'] +['Ġof', 'f'] +['Ġo', 'ther'] +['Ġ&', '&'] +["'", ';Ċ'] +['m', 's'] +['Ġbe', 'en'] +['Ġt', 'e'] +['m', 'l'] +['c', 'o'] +['n', 'c'] +['erv', 'ice'] +['Ġ', '%'] +['**', 'Ċ'] +['an', 'n'] +['ad', 'e'] +['ĊĊ', 'ĊĊ'] +['lo', 'ck'] +['con', 'st'] +['pon', 'se'] +['Ġs', 'up'] +['+', '+'] +['d', 'ate'] +['Ġa', 'cc'] +['Ġh', 'ad'] +['Ġb', 'u'] +['ĠR', 'e'] +['Ġw', 'ere'] +['Ġf', 'ile'] +['Ġw', 'ould'] +['ĠâĢ', 'ľ'] +['v', 'en'] +['is', 's'] +['Ġ', 'our'] +['c', 'lass'] +['r', 'aw'] +['Ġy', 'ear'] +['D', 'ata'] +['Ġv', 'al'] +['Ġs', 'ome'] +['f', 'ter'] +['y', 's'] +['Ġ//', '/'] +['rou', 'nd'] +['v', 'iew'] +['Ġp', 'e'] +['Ġth', 'ere'] +['Ġsa', 'id'] +['d', 'u'] +['o', 'f'] +['l', 'ine'] +['/', '*'] +['d', 'uct'] +['Ġh', 'er'] +['ĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠ'] +['R', 'es'] +['Ġc', 'o'] +['Ġcom', 'm'] +['is', 'e'] +['m', 'in'] +['ĠĠĠĠ', 'Ċ'] +['#', 'include'] +['eth', 'od'] +['.', 'P'] +['ut', 'e'] +['Ġas', 's'] +['I', 'nt'] +['as', 'k'] +['lo', 'c'] +['Ġli', 'ke'] +['od', 'y'] +['Ġle', 't'] +['lo', 'ad'] +['Ġa', 'm'] +['ro', 'l'] +['Ġg', 'r'] +['y', 'p'] +['Ġal', 'so'] +['ĠI', 't'] +['ur', 'l'] +['if', 'ic'] +['or', 's'] +['_', 'P'] +['_', 'n'] +['ig', 'h'] +['Ġth', 'an'] +['C', 'om'] +['A', 'N'] +['U', 'L'] +['at', 'ing'] +['ĠTh', 'is'] +['re', 'f'] +['_', 'S'] +['Ġst', 'atic'] +['ro', 'll'] +['Ġj', 'ust'] +['Ġres', 'ult'] +['i', 'an'] +['id', 'th'] +['Ġthe', 'm'] +[')', ');Ċ'] +['d', 'er'] +['re', 'ak'] +['C', 'on'] +[':', '//'] +['u', 'le'] +['..', '.'] +['ar', 'ch'] +['em', 'ent'] +['Ġ<', '<'] +['us', 'h'] +['en', 'se'] +['ar', 'r'] +['Ġint', 'o'] +['c', 'ess'] +['am', 'p'] +['i', 'ed'] +['um', 'ent'] +['Ġ', '\\'] +[']', ','] +['w', 'o'] +['al', 's'] +['Ġwh', 'at'] +['an', 'c'] +['V', 'alue'] +['=', "'"] +['ol', 'um'] +['Ġp', 'os'] +['ag', 'es'] +['ay', 'er'] +['Ġs', 'c'] +['u', 'es'] +['"', ')Ċ'] +['_', 'T'] +['Ġl', 'ist'] +['(', 's'] +['Ġc', 'ase'] +['C', 'h'] +['ĉĉĉĉ', 'ĉ'] +['////', '////'] +['pon', 'ent'] +['Ġ', 'z'] +['Ġk', 'n'] +['le', 't'] +['D', 'E'] +['re', 'd'] +['Ġf', 'e'] +['Ġ}', ',Ċ'] +['Ġ', ','] +['(', 't'] +['Ġf', 'irst'] +["'", ');Ċ'] +['w', 'ord'] +['Ġ', 'import'] +['Ġa', 'ct'] +['Ġch', 'ar'] +['C', 'T'] +['ĠT', 'r'] +['op', 'le'] +['=', '{'] +['ĉ', 'f'] +['i', 'ent'] +['c', 'ent'] +['.', 'j'] +['le', 'ction'] +[')', ')Ċ'] +['Ġon', 'ly'] +['Ġpr', 'int'] +['m', 'er'] +['.', 'W'] +['o', 'ck'] +['Ġ', '--'] +['T', 'ext'] +['Ġo', 'p'] +['an', 'k'] +['Ġit', 's'] +['Ġb', 'ack'] +['[', '"'] +['Ġne', 'ed'] +['Ġc', 'l'] +['Ġs', 'ub'] +['Ġl', 'a'] +['(', '('] +['.', '"'] +['O', 'bject'] +['Ġst', 'art'] +['f', 'ile'] +['(', 'self'] +['n', 'er'] +['e', 'y'] +['Ġus', 'er'] +['Ġ', 'ent'] +['ĠC', 'om'] +['it', 's'] +['ĠC', 'on'] +['ou', 'ble'] +['ow', 'er'] +['it', 'em'] +['ver', 'y'] +['ĠW', 'e'] +['lic', 'k'] +['Ġ', 'Q'] +['ph', 'p'] +['t', 'tp'] +["'", ':'] +['ic', 's'] +['Ġu', 'nder'] +['Ġ*', 'Ċ'] +['.', 'L'] +[')', ';'] +['ic', 'es'] +['Ġre', 'g'] +[')', 'čĊ'] +['ĉ', 'public'] +['S', 'S'] +['Ġth', 'en'] +['re', 'at'] +['i', 'ous'] +['.', 'G'] +['e', 'k'] +['ire', 'ct'] +['he', 'ck'] +['cri', 'pt'] +['n', 'ing'] +['ĠU', 'n'] +['Ġm', 'ay'] +['ĠW', 'h'] +['B', 'o'] +['I', 'tem'] +['str', 'uct'] +['.', 'st'] +['re', 'am'] +['ib', 'le'] +['lo', 'at'] +['Ġor', 'g'] +['u', 'nd'] +['s', 'um'] +['_', 'in'] +['..', '/'] +['_', 'M'] +['Ġh', 'ow'] +['r', 'ite'] +["'", 'Ċ'] +['T', 'o'] +['w', 'w'] +['Ġpe', 'ople'] +['ind', 'ex'] +['.', 'n'] +['ht', 'tp'] +['(', 'm'] +['ect', 'or'] +['Ġin', 'd'] +['Ġj', 'av'] +[']', ',Ċ'] +['ĠH', 'e'] +['_', 'st'] +['f', 'ul'] +['o', 'le'] +[')', '{Ċ'] +['Ġsh', 'ould'] +['op', 'y'] +['el', 'p'] +['i', 'er'] +['_', 'name'] +['ers', 'on'] +['I', 'ON'] +['ot', 'e'] +['Ġt', 'est'] +['Ġb', 'et'] +['rr', 'or'] +['ul', 'ar'] +['ã', 'Ģ'] +['Ġ', 'Ð'] +['b', 's'] +['t', 'ing'] +['Ġm', 'ake'] +['T', 'r'] +['Ġa', 'fter'] +['ar', 'get'] +['R', 'O'] +['olum', 'n'] +['r', 'c'] +['_', 're'] +['def', 'ine'] +['Ġr', 'ight'] +['r', 'ight'] +['d', 'ay'] +['Ġl', 'ong'] +['[', ']'] +['(', 'p'] +['t', 'd'] +['con', 'd'] +['ĠP', 'ro'] +['Ġre', 'm'] +['ption', 's'] +['v', 'id'] +['.', 'g'] +['Ġ', 'ext'] +['Ġ', '__'] +["'", ')Ċ'] +['p', 'ace'] +['m', 'p'] +['Ġm', 'in'] +['st', 'ance'] +['a', 'ir'] +['a', 'ction'] +['w', 'h'] +['t', 'ype'] +['ut', 'il'] +['a', 'it'] +['<', '?'] +['I', 'C'] +['t', 'ext'] +['Ġp', 'h'] +['Ġf', 'l'] +['.', 'M'] +['cc', 'ess'] +['b', 'r'] +['f', 'ore'] +['ers', 'ion'] +[')', ',Ċ'] +['.', 're'] +['ate', 'g'] +['Ġl', 'oc'] +['in', 's'] +['-', 's'] +['tr', 'ib'] +['ĠI', 'nt'] +['Ġa', 'rray'] +[',', '"'] +['P', 'ro'] +['(', 'c'] +['ess', 'ion'] +['>', 'ĊĊ'] +['Ġs', 'he'] +['"', ']'] +['ap', 'h'] +['Ġex', 'p'] +['ert', 'y'] +['ĠS', 'e'] +['Ġp', 'ar'] +['un', 'c'] +['E', 'T'] +['Ġre', 'ad'] +['pr', 'int'] +['Ġre', 'l'] +['Ġfor', 'm'] +['Ġd', 'r'] +['Ex', 'ception'] +['in', 'put'] +['Ġtr', 'ans'] +['####', '####'] +['ord', 'er'] +['B', 'y'] +['Ġa', 'w'] +['it', 'ies'] +['u', 'ff'] +['pl', 'ay'] +['.', 'add'] +['ĠâĢ', 'ĵ'] +['Ġw', 'ant'] +['Ġcom', 'p'] +['ment', 's'] +['Ġ|', '|'] +['a', 'z'] +['b', 'e'] +['Ġn', 'umber'] +['Ġre', 'quire'] +['ĠE', 'x'] +['Ġc', 'ol'] +['Ġ', 'key'] +['em', 'ber'] +['Ġt', 'wo'] +['Ġs', 'ize'] +['Ġwh', 'ere'] +['U', 'T'] +['res', 'ult'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['ou', 'gh'] +['or', 'ld'] +['o', 'od'] +['u', 'ch'] +['at', 'ive'] +['g', 'er'] +['are', 'nt'] +['Ġ/', '*'] +['Ġar', 'g'] +['Ġwh', 'ile'] +['(', 'this'] +['Ġre', 'c'] +['Ġd', 'if'] +['St', 'ate'] +['Ġs', 'pec'] +['r', 'ide'] +['_', 'F'] +['Ġlo', 'ok'] +['A', 'M'] +['il', 'ity'] +['et', 'er'] +['âĢĻ', 't'] +['ĊĊ', 'Ċ'] +['ay', 'out'] +['----------------', '----------------'] +['ag', 'er'] +['Ġc', 'ould'] +['Ġb', 'r'] +['end', 's'] +['u', 'res'] +['Ġkn', 'ow'] +['et', 's'] +['ĠI', 'f'] +['ĠS', 'h'] +['.', 'w'] +['b', 'ack'] +['Ġs', 'er'] +['Ġ+', '='] +['Ġf', 'r'] +['()', ');Ċ'] +['Ġh', 'and'] +['I', 'nd'] +['UL', 'L'] +['I', 'm'] +['()', ';ĊĊ'] +['Ġm', 'ost'] +['Ġtr', 'y'] +['Ġn', 'ow'] +['rou', 'gh'] +['>', 'čĊ'] +['ack', 'age'] +['Ġh', 'im'] +['.', '_'] +['if', 'y'] +['Ġb', 'reak'] +['Ġ', ');Ċ'] +['re', 'n'] +['#', 'define'] +['it', 't'] +['Ġa', 'p'] +['ĉ', 'c'] +['(', 'n'] +['ĠY', 'ou'] +[':', 'ĊĊ'] +['-', 'm'] +['Ġe', 'very'] +['ust', 'om'] +['li', 'ent'] +['oc', 'ument'] +['cri', 'ption'] +['E', 'rror'] +['-', 'b'] +['Ð', '¾'] +[']', '['] +['tr', 'ans'] +['Ġp', 'oint'] +['Ġst', 'd'] +['Ġf', 'il'] +['T', 'ime'] +['Ġm', 'od'] +['Ġ', '->'] +['Ġ', 'error'] +['a', 'h'] +['Ġt', 'ext'] +['roll', 'er'] +['lo', 'se'] +['q', 'l'] +['Ġp', 'ol'] +['>', '', '<'] +['.', 'B'] +['-', 'c'] +['Ġop', 'en'] +['Ġe', 'st'] +['ĠĠĠĠĠĠĠĠ', 'Ċ'] +['Ġn', 'ext'] +['I', 'M'] +['Ñ', 'Ĥ'] +['O', 'T'] +['Ã', '³'] +['Ġf', 'ollow'] +['cont', 'ent'] +['ĠĠĠĠĠĠĠĠ', 'ĠĠĠĠ'] +['Ġin', 'clud'] +['H', 'E'] +['ĠR', 'es'] +['Ġh', 'ref'] +['Ð', '¸'] +['Ġc', 'ar'] +['yp', 'es'] +['im', 'age'] +['U', 'n'] +['Ġbo', 'ol'] +['A', 'D'] +['Ġg', 'ame'] +['.F', 'orm'] +['row', 's'] +['*', '/'] +['vel', 'op'] +['.D', 'rawing'] +['Ġp', 'ath'] +['is', 'ion'] +['Ġe', 'ach'] +['ĠP', 'l'] +['_t', 'ype'] +['P', 'ath'] +['ne', 'ction'] +['Ġa', 'v'] +["'", ').'] +['Ġsup', 'port'] +['EN', 'T'] +['re', 'm'] +['"', ').'] +['Ġo', 'wn'] +['Ġc', 'or'] +['c', 'ount'] +['m', 'iss'] +['u', 'ally'] +['Ġm', 'em'] +['st', 'd'] +['i', 'ence'] +['se', 'arch'] +['"', 'ĊĊ'] +['F', 'orm'] +['Ġs', 'ex'] +['en', 'ame'] +['Ġs', 'ign'] +['Ġ', 'et'] +['ĠĠĠĠĠĠĠĠ', 'ĠĠ'] +["',", "'"] +['ĠA', 'pp'] +['Ġth', 'ose'] +['o', 'ff'] +['Ġ', 'err'] +['Ġs', 'ystem'] +['Ġbe', 'st'] +['c', 'ode'] +['Ġs', 'ame'] +['Ġd', 'i'] +['us', 's'] +['Ġc', 'reate'] +['ath', 'er'] +['A', 'rray'] +['.', 'in'] +['f', 'e'] +['S', 'ervice'] +['U', 'N'] +['at', 's'] +['Ġ', 'Z'] +['al', 'th'] +['Ġm', 'ade'] +['tr', 'ue'] +['A', 'B'] +['Ġm', 'ark'] +['r', 'id'] +['if', 'ied'] +[',', 'čĊ'] +['y', 'n'] +['p', 'ress'] +['Ġg', 'roup'] +['Ġf', 'in'] +['ĠL', 'icense'] +['F', 'ield'] +['eg', 'er'] +['Ġw', 'orld'] +['in', 'ess'] +['t', 'y'] +['Ġpro', 'cess'] +['(', 'b'] +['Ġc', 're'] +['ar', 'n'] +['iv', 'es'] +['Ġm', 'ain'] +['ide', 'o'] +['_', 'g'] +['A', 'G'] +['val', 'id'] +['im', 'g'] +['P', 'I'] +['Ġc', 'olor'] +['Ġre', 'port'] +['Ġt', 'ake'] +['ri', 'b'] +['O', 'M'] +['Ġd', 'ay'] +['Re', 'quest'] +['Ġs', 'k'] +['b', 'ers'] +['ĉ', 's'] +['.A', 'dd'] +['o', 'ot'] +['Im', 'age'] +['Ġcom', 'ple'] +['ol', 'lection'] +['Ġto', 'p'] +['Ġf', 'ree'] +['A', 'S'] +['D', 'e'] +['ĠO', 'n'] +['I', 'G'] +['et', 'a'] +['D', 'ate'] +['Ġa', 'ction'] +['O', 'ver'] +['it', 'or'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['n', 'ot'] +['Ġind', 'ex'] +['h', 'er'] +['ic', 'on'] +['O', 'n'] +[';čĊ', 'čĊ'] +['iv', 'ity'] +['m', 'and'] +['.W', 'indows'] +['O', 'L'] +['Ġre', 'al'] +['Ġm', 'ax'] +['l', 'and'] +['..', '..'] +['r', 'aph'] +['Ġbu', 'ild'] +['le', 'g'] +['ass', 'word'] +['?', 'ĊĊ'] +['âĢ', '¦'] +['o', 'ok'] +['u', 'ck'] +['Ġm', 'essage'] +['t', 'est'] +['iv', 'ers'] +['Ġin', 'put'] +['Ġar', 't'] +['Ġbet', 'ween'] +['G', 'et'] +['ent', 'er'] +['g', 'round'] +['en', 'e'] +['Ã', '¡'] +['.l', 'ength'] +['N', 'ode'] +['(', 'i'] +['C', 'lass'] +['f', 'or'] +['ĠâĢ', 'Ķ'] +['t', 'en'] +['o', 'in'] +['Ġ', 'ke'] +['u', 'i'] +['ĠI', 'N'] +['Ġt', 'able'] +['s', 'ub'] +['ĠL', 'e'] +['Ġhe', 'ad'] +['Ġm', 'ust'] +['////////', '////////'] +['.', 'util'] +['Cont', 'ext'] +['Ġor', 'der'] +['Ġm', 'ov'] +['o', 'ver'] +['Ġcont', 'in'] +['Ġs', 'ay'] +['st', 'atic'] +['.T', 'ext'] +['Ġclass', 'Name'] +['pan', 'y'] +['Ġt', 'er'] +['he', 'ad'] +['r', 'g'] +['Ġpro', 'duct'] +['Th', 'is'] +['.', 'âĢĿ'] +['ĠB', 'ut'] +['lo', 'y'] +['Ġd', 'ouble'] +['s', 'g'] +['Ġpl', 'ace'] +['.', 'x'] +['m', 'essage'] +['Ġin', 'formation'] +['pr', 'ivate'] +['Ġo', 'per'] +['c', 'ed'] +['d', 'b'] +['">', ''] +['ater', 'ial'] +['ile', 'd'] +['Ġp', 'ut'] +['Q', 'u'] +['Ñ', 'Ģ'] +['un', 'g'] +['m', 'ap'] +['ĉĉĉĉ', 'ĉĉĉĉ'] +['Ġle', 'vel'] +['Com', 'ponent'] +['bo', 'ok'] +['cre', 'en'] +['_', 'RE'] +['Ġcon', 'fig'] +['ã', 'ģ'] +['O', 'r'] +['.', 'data'] +['Ġd', 'ocument'] +['",', '"'] +['trib', 'ute'] +['u', 'x'] +['L', 'og'] +['fer', 'ence'] +['p', 'ost'] +['_', 'e'] +['Ġloc', 'al'] +['and', 'om'] +['ass', 'ert'] +['V', 'al'] +['lect', 'ed'] +['in', 'a'] +['atab', 'ase'] +['A', 'dd'] +['Ġcont', 'ent'] +['.p', 'rint'] +['s', 'igned'] +['r', 'ic'] +['."', 'ĊĊ'] +['Ġf', 'a'] +['!', 'ĊĊ'] +['-', 'f'] +['iv', 'ed'] +['Ġ', 'quest'] +['.', 'ex'] +['Ġf', 'loat'] +['Ġde', 'velop'] +['о', 'Ð'] +['M', 'ap'] +['ad', 'ing'] +['Ġpos', 's'] +['U', 'E'] +['n', 'amespace'] +['_', 'O'] +['ĉ', 'b'] +['.G', 'et'] +['>', '('] +['j', 'son'] +['etail', 's'] +['Ġto', 'o'] +['Ġext', 'ends'] +['ĠN', 'one'] +['Ġf', 'ore'] +['(', 'String'] +['form', 'at'] +['Ġg', 'reat'] +['int', 'er'] +['ca', 'le'] +['Ñ', 'ģ'] +['r', 'on'] +['iv', 'ing'] +['E', 'nt'] +['enc', 'y'] +['x', 't'] +['o', 'y'] +['Ġmon', 'th'] +['Ġh', 'app'] +['Ġsup', 'er'] +['b', 'ar'] +['def', 'ault'] +['_', 'de'] +['ord', 's'] +['l', 'n'] +['(', '{Ċ'] +['ĠI', 'nd'] +['as', 'es'] +['Ġt', 'itle'] +['Ġcont', 'ext'] +['o', 'h'] +['-', 'p'] +['E', 'm'] +['Ġm', 'et'] +['T', 'est'] +['Ġl', 'ife'] +['_', 'v'] +['ĠU', 'S'] +['U', 'I'] +['oc', 'ation'] +['m', 'd'] +['Ġ[', 'Ċ'] +['Ġ', ']'] +['s', 'w'] +['Ġin', 'cre'] +['s', 'cript'] +['ent', 'ial'] +['w', 'ays'] +['.', 'de'] +['Ġs', 'rc'] +['Ġc', 'atch'] +['ĠA', 'meric'] +['//', 'Ċ'] +['ĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠ'] +['Ġp', 'ay'] +['pl', 'it'] +['âĢ', 'Ķ'] +['Ġc', 'oun'] +['ob', 'j'] +['.ph', 'p'] +['Ġch', 'ange'] +['eth', 'ing'] +["'", 're'] +['ast', 'er'] +['lo', 's'] +['l', 'ation'] +['ĠĠ', 'Ċ'] +['L', 'e'] +['Ã', '¤'] +['(', '{'] +['read', 'y'] +['ĠN', 'o'] +['Ġpos', 'ition'] +['Ġo', 'ld'] +['Ġbo', 'ok'] +['able', 'd'] +['b', 'ug'] +['H', 'and'] +['}', ';ĊĊ'] +['is', 'play'] +['av', 'ing'] +['Ġgo', 'ver'] +['Ġv', 'ersion'] +['S', 'ystem'] +['n', 'ect'] +['res', 'ponse'] +['St', 'yle'] +['U', 'p'] +['ang', 'u'] +['Ġth', 'ree'] +['in', 'it'] +['er', 'o'] +['Ġl', 'aw'] +['end', 'if'] +['Ġb', 'ase'] +['em', 'ail'] +['(', 'l'] +['_', 'V'] +['Ġcon', 'f'] +['AT', 'E'] +['Ġd', 'uring'] +['t', 'es'] +['Ġcon', 'sole'] +['ĠP', 'r'] +['Ġs', 'pe'] +['v', 'es'] +['p', 'ath'] +['ial', 'og'] +['d', 'ition'] +['_t', 'o'] +['ard', 's'] +['Ġagain', 'st'] +['et', 'work'] +['ĠP', 'h'] +['_', 'L'] +['c', 'ur'] +['im', 'it'] +['W', 'ith'] +['Ġp', 'ower'] +['i', 'um'] +["'", ';ĊĊ'] +['Ġw', 'om'] +['le', 'ft'] +['our', 'ces'] +['at', 'ri'] +['ĠI', 'm'] +['ĠM', 'an'] +['or', 'th'] +['$', '{'] +['qu', 'als'] +['es', 'e'] +['_s', 'ize'] +['Ġis', 's'] +['ot', 'al'] +['-', 'g'] +['i', 'que'] +['r', 'ame'] +['Ġw', 'idth'] +['er', 'g'] +[')', '('] +['itt', 'le'] +['T', 'R'] +['ĠThe', 'y'] +['enc', 'es'] +['r', 'l'] +['on', 's'] +['Ġl', 'abel'] +['.', 'y'] +['-', 't'] +['up', 'date'] +['an', 'el'] +['s', 'c'] +['.t', 'o'] +['Ġpro', 'ject'] +['Ã', '¼'] +['Ġe', 'lement'] +['Ġsu', 'ccess'] +['ĉĉ', 'Ċ'] +['.s', 'h'] +['r', 'am'] +['ch', 'ed'] +['()', ')Ċ'] +['Ġ(', 'Ċ'] +['Ġd', 'ate'] +['Ġto', 't'] +['_', 'ST'] +['A', 'll'] +['ific', 'ation'] +['ĉ', 'var'] +['Ġt', 'ri'] +['ch', 'em'] +['m', 'y'] +['Ġb', 'ig'] +['ĠA', 'd'] +['ĠA', 't'] +['ot', 's'] +['n', 'um'] +['A', 'ct'] +['Ġm', 'ap'] +['er', 'a'] +['co', 'pe'] +['.', '$'] +[',', 'âĢĿ'] +['Ġp', 'op'] +['Ġf', 'ew'] +['Ġl', 'en'] +['u', 'id'] +['et', 'ers'] +['u', 'les'] +['Ã', 'Ń'] +['s', 'ource'] +['http', 's'] +['Ġd', 'em'] +['Ġe', 'ar'] +['########', '########'] +['Ġm', 'atch'] +['or', 'ies'] +['ac', 'es'] +['ĠC', 'l'] +['Ġn', 'ode'] +['ir', 'c'] +['loc', 'al'] +['un', 'ity'] +['}', ';Ċ'] +['Ġan', 'other'] +['<', '<'] +['og', 'le'] +['Ġs', 'it'] +['ew', 'ork'] +['T', 'E'] +['.', 'I'] +['N', 'S'] +['olog', 'y'] +['ou', 'ght'] +['.C', 'ont'] +['>', '>'] +['Ġc', 'are'] +['st', 'ate'] +['ĉ', 'private'] +['Ġe', 'ffect'] +['++', ')'] +['_f', 'ile'] +['end', 'ing'] +['L', 'ine'] +['F', 'or'] +['i', 'or'] +['ĠS', 'c'] +['Ġf', 'un'] +['.S', 'ize'] +['ĉ', 'else'] +[']', ')'] +['st', 'art'] +['v', 'ious'] +['Ġ}', ','] +['our', 's'] +['Ġle', 'g'] +['Ġs', 'ervice'] +['Ġs', 'ince'] +['ir', 'on'] +['L', 'abel'] +['Ġn', 'on'] +['Ġl', 'os'] +['ict', 'ion'] +['Ġf', 'ull'] +['act', 'er'] +['bo', 'ard'] +['g', 'ress'] +['Ġt', 'urn'] +['ith', 'er'] +['.s', 'ize'] +['Ġb', 'ody'] +['res', 'h'] +['et', 'urn'] +['(', '_'] +['y', 'les'] +['orm', 'al'] +['p', 'i'] +['Ġsom', 'ething'] +['!', '--'] +['u', 'int'] +['Ġpro', 'du'] +['Ġst', 'and'] +['Ġpro', 'ble'] +['Ġav', 'ailable'] +['m', 't'] +['ĠB', 'l'] +['Ġ', '...'] +['Ġb', 'lock'] +['In', 'put'] +['Ġke', 'ep'] +['C', 'ount'] +['op', 'en'] +['Ġ[', "'"] +['Ġth', 'row'] +['uild', 'er'] +['A', 'ction'] +['Ġth', 'ings'] +['Tr', 'ue'] +['Ġ', 'url'] +['ĠB', 'o'] +['print', 'f'] +['Ġre', 'd'] +['j', 's'] +['.c', 'reate'] +['ĠO', 'r'] +['St', 'atus'] +['In', 'stance'] +['Ġcont', 'rol'] +['Ġcom', 'e'] +['Ġc', 'ustom'] +['loc', 'ation'] +['m', 'odel'] +['Ġ', 'čĊ'] +['Ġs', 'ource'] +['Ġe', 'as'] +['.', 'out'] +[']', 'ĊĊ'] +['one', 'y'] +['Ġaw', 'ait'] +['Ġpart', 'ic'] +['A', 'P'] +['ub', 'lish'] +['od', 'es'] +['_p', 'ro'] +['p', 'ly'] +['rit', 'er'] +['Ġpro', 'v'] +['Ġm', 'ill'] +['H', 'T'] +[']', ')Ċ'] +['Ġch', 'ang'] +['Ġas', 'k'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠ'] +['Ġout', 'put'] +['Ġem', 'ail'] +['.p', 'ush'] +['Ġ}', 'čĊčĊ'] +['in', 'ation'] +['atri', 'x'] +['T', 'able'] +['u', 'ccess'] +[']', ');Ċ'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġdis', 'c'] +['(', '['] +['Ġb', 'usiness'] +['he', 'ight'] +['.', 'html'] +['t', 'a'] +['f', 'ield'] +['Ġrequire', 'd'] +['_', 'R'] +['Ġgover', 'n'] +['}', 'čĊčĊ'] +['le', 'x'] +['.', ','] +['ĠS', 'et'] +['ur', 'ch'] +['//', '/'] +['t', 's'] +['a', 'f'] +['Ġm', 'ight'] +['ist', 'ory'] +['S', 'tr'] +['Ġne', 'ver'] +['Res', 'ponse'] +['ar', 'se'] +['ad', 'a'] +['ĠH', 'ow'] +['Ġ*', ')'] +['Ġ', ';'] +['Ġh', 'ard'] +['A', 'd'] +['Ġinter', 'n'] +['us', 'ed'] +['(', 'data'] +['m', 'od'] +['ann', 'el'] +['Ġn', 'p'] +['ug', 'g'] +['Ġ/', '>Ċ'] +['Ġcal', 'led'] +['b', 'ody'] +['Ġch', 'o'] +['(', 'r'] +['_s', 'et'] +['ir', 'd'] +['Ġ>', '='] +['Ġ}', ';Ċ'] +['Ġo', 'ptions'] +['ĠG', 'ener'] +['Ġhe', 'ight'] +['P', 'oint'] +['Y', 'ou'] +['et', 'y'] +['C', 'lick'] +['Ġsm', 'all'] +['Ġ', 'ide'] +['Ġacc', 'ess'] +['angu', 'age'] +['Ġprot', 'ected'] +['Ġj', 'ob'] +['ĠTh', 'ere'] +['D', 'ef'] +['Ġadd', 'ress'] +['Ġu', 'int'] +['N', 'ot'] +['o', 'o'] +['ap', 's'] +['<', 'div'] +['ain', 'ed'] +['at', 'ur'] +['Ġs', 'um'] +['-', 'w'] +['ĠD', 'ate'] +['Ġl', 'ittle'] +['Ġf', 'ri'] +['Y', 'PE'] +['Ġp', 'ort'] +['e', 'h'] +['pr', 'ing'] +['_p', 'ath'] +['Ġst', 'atus'] +['a', 'im'] +['bo', 'ol'] +['Ġap', 'pe'] +['Ġo', 's'] +['.', 'name'] +['ens', 'ion'] +['_', 'G'] +['Ġup', 'date'] +['Con', 'fig'] +['a', 'ff'] +['ER', 'R'] +['Ġ<', '='] +['at', 'ely'] +['#', 'if'] +['u', 'ction'] +['ĠT', 'e'] +['Ġl', 'ink'] +['ĠU', 'ser'] +['.f', 'ind'] +['.', 'org'] +['m', 'e'] +['Ġg', 'iven'] +['O', 'ut'] +['#', 'endif'] +['Ġbet', 'ter'] +['P', 'age'] +['Ġfe', 'el'] +['en', 'n'] +['M', 'L'] +['Ġal', 'ready'] +['Ġinclud', 'ing'] +['o', 'ogle'] +['r', 'u'] +['ic', 'ally'] +['pro', 'p'] +['le', 'an'] +['out', 'er'] +['Ġal', 'ways'] +['ord', 'ing'] +['I', 'f'] +['or', 'age'] +['Ġp', 'arent'] +['v', 'is'] +['ĉĉĉĉ', 'ĉĉĉ'] +['Ġg', 'ot'] +['st', 'and'] +['Ġle', 'ss'] +['/', 's'] +['ĠA', 'ss'] +['ap', 't'] +['ire', 'd'] +['ĠA', 'dd'] +['Ġacc', 'ount'] +['p', 'loy'] +['Ġd', 'er'] +['res', 'ent'] +['Ġl', 'ot'] +['Ġval', 'id'] +['ĉ', 'd'] +['Ġb', 'it'] +['pon', 'ents'] +['Ġfollow', 'ing'] +['_', 'ex'] +['S', 'ON'] +['Ġs', 'ure'] +['oc', 'ial'] +['Ġp', 'rom'] +['ert', 'ies'] +['he', 'ader'] +['.p', 'ro'] +['Ġbo', 'olean'] +['Ġse', 'arch'] +['k', 'en'] +['Ġor', 'ig'] +['Ġ', 'er'] +['E', 'd'] +['E', 'M'] +['a', 'ut'] +['l', 'ing'] +['al', 'ity'] +['By', 'Id'] +['b', 'ed'] +['ĉc', 'ase'] +['eth', 'er'] +['pos', 'it'] +['Ġinv', 'est'] +['ĠO', 'R'] +['Ġs', 'ays'] +['miss', 'ion'] +['AM', 'E'] +['Ġtem', 'p'] +['o', 'ad'] +['Ġre', 'st'] +['in', 'fo'] +['Ġinter', 'est'] +['A', 'rg'] +['Ġper', 'form'] +['pon', 's'] +['ĠV', 'iew'] +['Ġv', 'er'] +['l', 'ib'] +['(', 'const'] +['U', 'til'] +['List', 'ener'] +['ar', 'ge'] +['Ġm', 'ult'] +['Ġd', 'ie'] +['Ġs', 'ite'] +['../', '../'] +['E', 'L'] +['Ġval', 'ues'] +['Ġ}', ')Ċ'] +['p', 'en'] +['N', 'o'] +['ic', 'ro'] +['Ġbe', 'h'] +["Ġ'", './'] +['ac', 'y'] +['re', 'c'] +['()', '->'] +['ĉ', 'ĠĠĠ'] +['"', '))'] +['Cont', 'ent'] +['_', 'W'] +['ple', 'ment'] +['Ġw', 'on'] +['Ġv', 'ideo'] +['ad', 'i'] +['p', 'oint'] +['%', '%'] +['Ġg', 'l'] +['erv', 'ed'] +['v', 'iron'] +['I', 'F'] +['ut', 'ed'] +['ã', 'ĥ'] +["'", 'm'] +['Ġc', 'ert'] +['Ġpro', 'f'] +['Ġc', 'ell'] +['ar', 'i'] +['Ġpl', 'ayer'] +['a', 'is'] +['Ġc', 'ost'] +['Ġh', 'um'] +['(', 'R'] +['Ġoff', 'ic'] +['k', 's'] +['.t', 'ext'] +['at', 'ures'] +['Ġtot', 'al'] +['Ġ*/', 'ĊĊ'] +['o', 'pe'] +['Ġst', 'at'] +['U', 'M'] +['Ġlo', 'ad'] +['ight', 's'] +['Ġc', 'lear'] +['u', 'ro'] +['Ġte', 'chn'] +['up', 'port'] +['I', 'R'] +['Ġ', 'row'] +['Ġse', 'em'] +['Ġ', 'q'] +['Ġsh', 'ort'] +['ĠN', 'ot'] +['ip', 'p'] +['G', 'roup'] +['se', 'ction'] +['m', 'ax'] +['ir', 'l'] +['Ġover', 'ride'] +['Ġcom', 'pany'] +['Ġd', 'one'] +['"', ');čĊ'] +['Ġg', 're'] +['.', 'Re'] +['Ġbel', 'ie'] +['r', 'ist'] +['Ġhe', 'alth'] +['AN', 'T'] +['()', 'ĊĊ'] +['ĠB', 'e'] +['.', 'value'] +['ĠG', 'r'] +['ott', 'om'] +['Ġarg', 's'] +['P', 'T'] +['st', 'atus'] +['f', 'unc'] +['um', 'ents'] +['-', 'h'] +['N', 'umber'] +[':', 'čĊ'] +['ĠL', 'og'] +['er', 'ver'] +['Ġ)', ',Ċ'] +['am', 'ent'] +['Ġob', 'j'] +['in', 'c'] +['Ġchild', 'ren'] +['ic', 'y'] +['I', 'Z'] +['and', 's'] +['ab', 'ly'] +['Ġdist', 'rib'] +['Ġc', 'ur'] +['er', 'ial'] +['Ġd', 'ays'] +['re', 'ated'] +['re', 'ct'] +['-', 'l'] +['ir', 'm'] +['idd', 'en'] +['om', 'b'] +['Ġin', 'itial'] +['.j', 's'] +['Ġ', 'â'] +['Qu', 'ery'] +['Ġon', 'line'] +['im', 'al'] +['.', 'con'] +['a', 'u'] +['U', 'rl'] +['cont', 'rol'] +['ire', 'ction'] +['Ġin', 'stance'] +['OR', 'T'] +['ĠF', 'r'] +['wh', 'ere'] +['Ġjav', 'ax'] +['Ġorg', 'an'] +['ap', 'ter'] +['Ġre', 'ason'] +['o', 'ptions'] +['ĠM', 'ar'] +['(', 'a'] +['Ġwith', 'in'] +['.âĢĿ', 'ĊĊ'] +['O', 'DE'] +['_', 'DE'] +['ad', 'min'] +['end', 'ed'] +['Ġdes', 'ign'] +['ĠD', 'ata'] +['un', 'e'] +['ĠF', 'ile'] +['ro', 'ot'] +['Ġc', 'ent'] +['Ġa', 'rr'] +['_', 'add'] +['l', 'en'] +['p', 'age'] +[',', "'"] +['_', 'str'] +['Ġb', 'ro'] +['ab', 'ility'] +['ou', 'th'] +['/', 'c'] +['p', 'ose'] +['irt', 'ual'] +['ear', 'ch'] +['_', 'url'] +['arg', 'in'] +['H', 'ttp'] +['Ġs', 'chool'] +['av', 'a'] +['Ġcons', 'ider'] +['.l', 'abel'] +['ĠA', 'rray'] +['we', 'b'] +['o', 'pt'] +['.print', 'ln'] +['ul', 'ation'] +['Ġf', 'unc'] +['P', 'L'] +['Ġ"', '\\'] +['ĠT', 'ext'] +['act', 'ory'] +['(f', 'unction'] +['n', 'ull'] +['Ġen', 'g'] +['d', 'own'] +['Ġin', 'clude'] +['ĠE', 'n'] +['ĠD', 'r'] +['Ġd', 'b'] +['!', '!'] +['s', 'ide'] +['Ġin', 'it'] +['quire', 'd'] +['ĠS', 'he'] +['C', 'olumn'] +['re', 'act'] +['Ġan', 'n'] +['Ġst', 'op'] +['Ġl', 'ater'] +['ĠTh', 'at'] +['ent', 'ion'] +['d', 'f'] +['U', 'G'] +['I', 'LE'] +['Ġc', 'lient'] +['ra', 'ft'] +['ff', 'er'] +['PO', 'ST'] +['el', 'per'] +['Ġlo', 've'] +['qu', 'ote'] +['ou', 'd'] +['Ġj', 'son'] +['Ġab', 'le'] +['Ġm', 'en'] +['A', 'X'] +['ĠC', 'opyright'] +['Ã', '¶'] +['av', 'ig'] +['re', 'q'] +['C', 'lient'] +['}', ');Ċ'] +['.C', 'om'] +['er', 'c'] +['il', 't'] +['pec', 'ial'] +['_c', 'om'] +['ro', 'om'] +['.', 'Name'] +['Ġg', 'ive'] +['am', 'b'] +['i', 'ke'] +['Ġcon', 'dition'] +['cl', 'ient'] +['ator', 's'] +[':', '"'] +['Ġc', 'opy'] +['ut', 'ure'] +['ivers', 'ity'] +['ern', 'al'] +['{', '{'] +['ĠC', 'an'] +['ou', 'nc'] +['d', 'o'] +['Ġo', 'cc'] +['Ġapp', 'ro'] +['th', 'ers'] +['z', 'e'] +['Ġe', 'ither'] +['ĠF', 'l'] +['Ġimport', 'ant'] +['Ġle', 'ad'] +['at', 'tr'] +['AR', 'T'] +['E', 'qual'] +['Ġd', 'a'] +['et', 'ch'] +['ent', 'ity'] +['Ġfam', 'ily'] +['add', 'ing'] +['Ġo', 'ption'] +['Ġex', 'ist'] +['ic', 'a'] +['ĠO', 'bject'] +["'", 've'] +['v', 'ers'] +['ition', 'al'] +['out', 'put'] +['ĠTr', 'ue'] +['ĠO', 'F'] +['_t', 'ime'] +['Ġof', 'fer'] +['Ġ}', ');ĊĊ'] +['H', 'ER'] +['eg', 'in'] +['"', '"'] +['Ġw', 'ater'] +['Ġc', 'he'] +['ĠM', 'y'] +['ore', 'd'] +['Ġst', 'ep'] +['anc', 'es'] +['C', 'K'] +['A', 'Y'] +['à', '¸'] +['str', 'uction'] +['(', 'C'] +['ou', 'ch'] +['St', 'ream'] +['act', 'ive'] +['am', 'a'] +['Ent', 'ity'] +['pro', 'duct'] +['()', '{Ċ'] +['Ġgovern', 'ment'] +['ĠI', 'D'] +['aj', 'or'] +['A', 'nd'] +['Ġdis', 'play'] +['Ð', '»'] +['Ġt', 'imes'] +['Ġf', 'our'] +['Ġf', 'ar'] +['Ġpres', 'ent'] +['ĠN', 'S'] +['Ġ\\', 'Ċ'] +['ue', 'st'] +['Ġb', 'as'] +['e', 'cho'] +['ch', 'ild'] +['if', 'ier'] +['Hand', 'ler'] +['Ġl', 'ib'] +['Prop', 'erty'] +['trans', 'lation'] +['Ġro', 'om'] +['Ġon', 'ce'] +['Ġ[', ']'] +['cent', 'er'] +['================', '================'] +['Ġresult', 's'] +['Ġcontin', 'ue'] +['Ġt', 'alk'] +['_', 'get'] +['Ġg', 'row'] +['.s', 'w'] +['e', 'b'] +['ĠP', 'ublic'] +['O', 'P'] +['ec', 'ute'] +['ol', 's'] +['Ġ', '**'] +['"', ');ĊĊ'] +['Ġm', 'ass'] +['ure', 'd'] +['.c', 'lass'] +['om', 'ic'] +['Ġme', 'an'] +['ip', 's'] +['Ġa', 'ut'] +[');čĊ', 'čĊ'] +['Ġun', 'til'] +['Ġmark', 'et'] +['Ġare', 'a'] +['u', 'it'] +['Ġl', 'ength'] +['ĠW', 'ith'] +['struct', 'or'] +['e', 'vent'] +['">', '<'] +['ĠS', 'p'] +['I', 'V'] +['Ġm', 'us'] +['if', 'f'] +['Ġk', 'ind'] +['a', 'uthor'] +['ound', 's'] +['m', 'b'] +['_', 'key'] +['w', 'idth'] +['posit', 'ory'] +['Ġl', 'ight'] +['u', 'k'] +['R', 'ow'] +['oh', 'n'] +['al', 'f'] +['viron', 'ment'] +['app', 'er'] +['ollection', 's'] +['Ġs', 'ide'] +['_in', 'fo'] +['Ġex', 'ample'] +['im', 'ary'] +['Ġw', 'r'] +['Ġc', 'amp'] +['cri', 'be'] +['"', '/'] +['Ġm', 'iss'] +['w', 'ay'] +['Ġb', 'ased'] +['Ġpl', 'an'] +['V', 'is'] +['om', 'ain'] +['un', 'k'] +['Ġaw', 'ay'] +['U', 'P'] +['<', 'T'] +['O', 'S'] +['i', 'od'] +['ĠM', 'on'] +['âĢĻ', 're'] +['Ġli', 'k'] +['Ã', '§'] +['iv', 'ely'] +['.', 'v'] +['im', 'er'] +['iz', 'er'] +['S', 'ub'] +['Ġbut', 'ton'] +['ĠU', 'p'] +['Ġexper', 'ience'] +['C', 'L'] +['Ġre', 'nder'] +['_', 'value'] +['Ġn', 'ear'] +['UR', 'L'] +['al', 't'] +['Ġcoun', 'try'] +['ib', 'ility'] +['()', ',Ċ'] +['e', 'ad'] +['Ġa', 'uthor'] +['Ġspec', 'ific'] +['b', 'ase'] +['(', 'name'] +['on', 'es'] +['ĠD', 'o'] +['Ġal', 'ong'] +['y', 'ear'] +['Ġexp', 'ress'] +['.', "'"] +['en', 'v'] +['Ġbeg', 'in'] +['Ġso', 'ftware'] +['Ġim', 'p'] +['Ġw', 'in'] +['ó', 'n'] +['Ġth', 'ing'] +['Tr', 'ans'] +['ĠT', 'HE'] +['Ġ<', '?'] +['Ġwh', 'y'] +['Ġdoes', 'n'] +['i', 'j'] +['g', 'ing'] +['ĉ', 'g'] +['Ġs', 'ingle'] +['off', 'set'] +['ar', 'ning'] +['og', 'raph'] +['le', 'y'] +['_c', 'ount'] +['Ġan', 'al'] +['cre', 'ate'] +['/', 'm'] +['ĠR', 'eg'] +['un', 'ch'] +['=', '$'] +['is', 'k'] +['Ġright', 's'] +['(', 'M'] +['Ġ""', '"Ċ'] +['ap', 'er'] +['.m', 'odel'] +['Ġp', 'o'] +['em', 'pty'] +['art', 'ment'] +['Ġa', 'nt'] +['ĠWh', 'en'] +['Ġwom', 'en'] +['ĠE', 'd'] +['Ġse', 'ason'] +['Ġde', 'st'] +['Ã', '£'] +['(', 'h'] +['Ġposs', 'ible'] +['Ġse', 'ver'] +['Ġb', 'tn'] +['Ġdid', 'n'] +['Ġs', 'ent'] +['Ġen', 'c'] +['Ġcomm', 'and'] +['Ġ', '],Ċ'] +['_', 'x'] +['Ġre', 'cent'] +['ol', 'ution'] +['v', 'ector'] +['ĠB', 'y'] +['ĠM', 'ay'] +['ĠA', 'ct'] +['»', '¿'] +['Ġm', 'oney'] +['IN', 'T'] +['bs', 'ite'] +['ĉ', 'p'] +['.', 'čĊ'] +['ï', '»¿'] +['s', 'l'] +['atter', 'n'] +['ĠC', 'lass'] +['Ġto', 'ld'] +['ud', 'io'] +['c', 'urrent'] +['Ġe', 'qu'] +['Ġa', 'uto'] +['ĠSt', 'ate'] +['d', 'a'] +['ms', 'g'] +['))', ';ĊĊ'] +['Ġwork', 'ing'] +['Ġqu', 'ery'] +['ĠB', 'r'] +['Ġw', 'indow'] +['a', 'uth'] +['on', 'ly'] +['ĉ', 't'] +['Ġle', 'ast'] +['ag', 'n'] +['Ġex', 'pl'] +['it', 'ter'] +['ar', 'ing'] +['Ġc', 'olumn'] +['ĠGener', 'al'] +['":', '"'] +['er', 'al'] +['ri', 'or'] +['Ġrec', 'ord'] +['I', 'B'] +['E', 'X'] +['Ġd', 'at'] +['Ġm', 'aking'] +['u', 'ed'] +['ĠC', 'ar'] +['em', 'p'] +['"', '.'] +['ĠM', 'ed'] +['Ġc', 'lose'] +['Ġper', 'cent'] +['Ġp', 'ast'] +['(', 'g'] +[':', '('] +['Ġw', 'rite'] +['Ġm', 'ove'] +['Ġp', 'at'] +['Cont', 'rol'] +['.T', 'o'] +['Ġv', 'i'] +['*/', 'Ċ'] +['in', 'ate'] +["'", 'll'] +['ag', 'ed'] +['N', 'ull'] +['Ġspec', 'ial'] +['IZ', 'E'] +['Ġc', 'ity'] +['/*', 'Ċ'] +['ĠE', 'ng'] +['ix', 'ed'] +['in', 'ary'] +['p', 'y'] +['Ġe', 'ff'] +['ar', 'io'] +['Ġt', 'ell'] +['av', 'or'] +['Ġse', 'lect'] +['le', 'vel'] +['im', 'um'] +['op', 'er'] +['B', 'uilder'] +['I', 'P'] +["')", ',Ċ'] +['es', 'c'] +['Ġf', 'ont'] +['"', ';ĊĊ'] +['ĠA', 'm'] +['ish', 'ed'] +['ill', 's'] +['Int', 'er'] +['O', 'W'] +['Ġcour', 'se'] +['Ġl', 'ate'] +['idd', 'le'] +['Ġam', 'ount'] +['Ġas', 'ync'] +['in', 'o'] +['c', 'ul'] +['Ġ', 'ì'] +['and', 'le'] +['_', 'user'] +['Ġb', 'en'] +['ĠC', 'al'] +['Ġ$', '_'] +['ĠR', 'ep'] +['Ġen', 'ough'] +['T', 'oken'] +['.', 'user'] +['(', 'j'] +['S', 'c'] +['W', 'idth'] +['n', 'ow'] +['at', 'form'] +['Ġlook', 'ing'] +['Ġh', 'old'] +['M', 'odule'] +['IT', 'Y'] +['v', 'o'] +['is', 'on'] +['.D', 'ata'] +['y', 'c'] +['Ġp', 'ot'] +['ĠTr', 'ump'] +['id', 'ual'] +['id', 'es'] +['r', 't'] +['Ġprop', 'erty'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠ'] +['am', 'ework'] +['g', 'o'] +['Ġl', 'ow'] +['Ġpar', 'a'] +['Ġpr', 'ice'] +['ur', 'y'] +['Ġto', 'day'] +['ro', 'y'] +["Ġ'", '/'] +['Ġpol', 'it'] +["Ġ'", "'"] +['ym', 'b'] +['P', 'h'] +['Ġad', 'v'] +['Ġatt', 'ack'] +['ĠS', 'te'] +['RO', 'M'] +['an', 'a'] +['Ġme', 'ans'] +['Ġst', 'ory'] +['id', 's'] +['ak', 'en'] +['Ġme', 'et'] +['Ġm', 'om'] +['ĠâĢ', 'ĺ'] +['Ġ?', '>'] +['Ġd', 'en'] +['ob', 'ile'] +['ch', 'ange'] +['ĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĊ'] +['ic', 'i'] +['n', 'a'] +['ĠF', 'orm'] +['Ġs', 'ort'] +['Se', 'lect'] +['p', 'are'] +['Ġth', 'ought'] +['_', 'con'] +['Ġt', 'ask'] +['oc', 'us'] +['ĠD', 'E'] +['ĠM', 'in'] +['Ġo', 'pt'] +['ĉb', 'reak'] +['um', 'er'] +['K', 'E'] +['th', 'en'] +['Ġd', 'et'] +['ĠT', 'est'] +['port', 's'] +['Ġre', 'view'] +["('", '/'] +['m', 'ove'] +['Ġsw', 'itch'] +['ER', 'T'] +['p', 'atch'] +['ann', 'ot'] +['ã', 'Ĥ'] +['Ġab', 'ove'] +['it', 'ive'] +['Ġquest', 'ion'] +['ĠQ', 'u'] +['ãĢĤ', 'ĊĊ'] +['g', 'le'] +['Ġw', 'ord'] +['Ġprov', 'ide'] +['ĠR', 'eturn'] +['Ġre', 'search'] +['ã', 'o'] +['u', 'str'] +['Ġp', 'ublish'] +['chem', 'a'] +['}', '}'] +['ĠC', 'ON'] +['-', 'in'] +['all', 'back'] +['Ġco', 'ver'] +['\\', '\\'] +['c', 'olor'] +['ĠI', 'S'] +['Ġwh', 'ether'] +['im', 'ate'] +['is', 'c'] +['B', 'ar'] +['Ġd', 'iv'] +['B', 'e'] +['our', 'n'] +['Ġh', 'aving'] +['le', 'm'] +['pl', 'ayer'] +['ab', 's'] +['am', 'era'] +['ne', 'y'] +['Ġex', 'c'] +['get', 'her'] +['pl', 'ied'] +['a', 'o'] +['[', '$'] +['Ġ+', '+'] +['i', 'pe'] +['sh', 'ow'] +['/', 'd'] +['[', ':'] +['ag', 'ement'] +['le', 'v'] +['_', 'ID'] +['r', 'ary'] +['ad', 'es'] +['_', 'se'] +['a', 'use'] +['Ġem', 'ploy'] +['Ġ*/', 'čĊ'] +['Ġf', 're'] +["Ġ'", '@'] +['Ġcomple', 't'] +['Ġl', 'arge'] +['r', 'al'] +['\\', 'x'] +['Ġf', 'ac'] +['<', 'String'] +['Ġcre', 'ated'] +['up', 'er'] +['.st', 'ate'] +['Ġh', 'ost'] +['ener', 'ic'] +['/', 'b'] +['(', '!'] +['wh', 'ile'] +['i', 'as'] +['B', 'UG'] +['Ġ', ');ĊĊ'] +['Ġro', 'le'] +['Re', 'g'] +['ĠC', 'olor'] +['St', 'art'] +['Ġp', 'orn'] +['t', 'op'] +['Ġwe', 'b'] +['Ġde', 'v'] +['Ġde', 'al'] +['++', ')Ċ'] +['Int', 'eger'] +['pos', 'ition'] +['.', 'on'] +['Ġ(', '"'] +['ä', '¸'] +['Ġproble', 'm'] +['s', 'v'] +['Ġp', 'ress'] +['AB', 'LE'] +['AT', 'ION'] +['ĠSe', 'e'] +['an', 'ch'] +['Ġth', 'ough'] +['le', 'ep'] +['Ġ<', '!--'] +['Ġpoint', 's'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠ'] +['.', 'J'] +['Ġ', '::'] +['p', 'tr'] +['D', 'B'] +['++', ';Ċ'] +['.p', 'ng'] +['n', 'ode'] +['so', 'ft'] +['pon', 'd'] +['Ġe', 'ver'] +['--------------------------------', '--------------------------------'] +['M', 'enu'] +["('", '#'] +['Ġs', 'ervices'] +['p', 'g'] +['}', ')Ċ'] +['param', 's'] +['Ġact', 'ually'] +['Ġ"', '/'] +['Em', 'pty'] +['M', 'ethod'] +['Ġid', 'ent'] +['un', 'ic'] +['Ġmill', 'ion'] +['Ġa', 'ff'] +['st', 'yle'] +['Ġcon', 'c'] +['i', 'os'] +['ign', 'ment'] +['UL', 'T'] +['P', 'r'] +['"', ';čĊ'] +['Ġunder', 'stand'] +['u', 'ary'] +['Ġhapp', 'en'] +['Ġser', 'ver'] +['ĠC', 'o'] +['S', 'C'] +['Ġle', 's'] +['Ġfile', 's'] +['G', 'rid'] +['s', 'ql'] +['Ġof', 'ten'] +['Ġin', 'fo'] +['_', 'tr'] +['s', 'rc'] +['on', 'y'] +['Ġsp', 'ace'] +['um', 'b'] +['Ġpass', 'word'] +['Ġst', 'ore'] +[',', 'ĊĊ'] +['ĠWh', 'at'] +['g', 'ed'] +['ĠF', 'alse'] +['U', 's'] +['sw', 'er'] +['_', 'index'] +['Ġform', 'at'] +['m', 'ost'] +['s', 'm'] +['N', 'ew'] +['Ġd', 'etails'] +['Ġpro', 'b'] +['ĠAN', 'D'] +['()', 'čĊ'] +['il', 'ar'] +['Ġ$', '{'] +['ry', 'pt'] +['.C', 'ollections'] +['$', 'this'] +['ĠF', 'ree'] +['_', 'of'] +['(f', 'alse'] +['d', 'ated'] +['Ġ>', '>'] +['Ġf', 'ace'] +['CT', 'ION'] +['Ġs', 'ave'] +['Ġt', 'yp'] +['de', 'v'] +['("', '#'] +['AG', 'E'] +['cont', 'ainer'] +['ed', 'it'] +['Q', 'L'] +['Ġitem', 's'] +['Ġs', 'ocial'] +['i', 'en'] +['ĠRe', 'act'] +[')', '.ĊĊ'] +['Ġm', 'ar'] +['Ġre', 'du'] +['ĠR', 'E'] +['.p', 'ut'] +['Ġm', 'ajor'] +['C', 'ell'] +['n', 'ext'] +['Ġexpect', 'ed'] +['Ġy', 'et'] +['Ġin', 'div'] +['trib', 'utes'] +['at', 'is'] +['am', 'ed'] +['Ġf', 'ood'] +['S', 'ource'] +['(', 'string'] +['Ġ+', 'Ċ'] +['it', 'es'] +['d', 'r'] +['Ġmem', 'bers'] +['Ġcom', 'b'] +['item', 's'] +['ĠP', 'er'] +['T', 'H'] +['=', 'True'] +['Ġb', 'ar'] +['_', 'SE'] +['com', 'm'] +['(', 'w'] +[')ĊĊ', 'Ċ'] +['Ġs', 'end'] +['Ġin', 'c'] +['un', 'signed'] +['F', 'A'] +['Ġparam', 's'] +['app', 'ing'] +['ro', 's'] +['ug', 'in'] +['f', 'a'] +['Ġcon', 'nection'] +['Ġ}', ';ĊĊ'] +['Ġbe', 'come'] +['M', 'ode'] +['Ġe', 'v'] +['Ġdif', 'f'] +['ĠUn', 'ited'] +['He', 'ight'] +['ful', 'ly'] +['im', 'ages'] +['Ġm', 'akes'] +['Ġg', 'lobal'] +['Ġcont', 'act'] +["'", ':Ċ'] +['Ġab', 's'] +['а', 'Ð'] +['f', 'loat'] +['Ġex', 'cept'] +['ĠP', 'ol'] +['Ch', 'ild'] +['t', 'yp'] +['Ġcert', 'ain'] +['i', 'ón'] +['O', 'UT'] +['Ġim', 'pro'] +['ile', 's'] +['Ġ--', '>Ċ'] +['ĠP', 'art'] +['val', 'ues'] +['os', 's'] +['/', '**'] +['il', 'it'] +['ĠE', 'vent'] +['cur', 'ity'] +['st', 'er'] +['Ġchar', 'acter'] +['Ġnew', 's'] +['Ġ"', ','] +['Ġde', 'vice'] +['c', 'el'] +['log', 'in'] +['he', 'et'] +['Def', 'ault'] +['@', '"'] +['ĉ', 'Ġ'] +['c', 'lick'] +['(', 'value'] +['ĠA', 'b'] +['Ġpre', 'vious'] +['ERR', 'OR'] +['oc', 'al'] +['Ġm', 'aterial'] +['Ġbel', 'ow'] +['ĠCh', 'rist'] +['Ġmed', 'ia'] +['co', 'ver'] +['ĠU', 'I'] +['Ġf', 'ail'] +['Ġbl', 'ack'] +['Ġcom', 'ponent'] +['ĠAmeric', 'an'] +['Ġadd', 'ed'] +['Ġbu', 'y'] +['st', 'it'] +['Ġc', 'ame'] +['Ġde', 'lete'] +['prop', 'erty'] +['od', 'ing'] +['Ġc', 'ard'] +['rop', 's'] +['Ġhttp', 's'] +['Ġro', 'ot'] +['Ġhand', 'le'] +['C', 'C'] +['B', 'ack'] +['em', 'plate'] +['Ġget', 'ting'] +['_b', 'y'] +['m', 'ail'] +['_s', 'h'] +['.', 'assert'] +['ĠD', 'ec'] +['(', 'true'] +['Ġcom', 'put'] +['Ġcl', 'aim'] +["'", '=>'] +['ĠS', 'ub'] +['Ġa', 'ir'] +['op', 's'] +['n', 'av'] +['em', 'ents'] +['(', 'id'] +['Ġent', 'er'] +['ang', 'ed'] +['E', 'nd'] +['Ġloc', 'ation'] +['Ġn', 'ight'] +['Ġdo', 'ing'] +['ĠR', 'ed'] +['l', 'in'] +['}ĊĊ', 'Ċ'] +['vid', 'er'] +['Ġp', 'ick'] +['Ġw', 'atch'] +['ess', 'ages'] +['Ġhum', 'an'] +['Ġd', 'am'] +['p', 'end'] +['d', 'ir'] +['Ġt', 'ax'] +['Ġg', 'irl'] +['re', 'et'] +['Ġbo', 'x'] +['Ġstr', 'ong'] +['(', 'v'] +['re', 'l'] +['Ġinter', 'face'] +['Ġm', 'sg'] +['f', 'ect'] +['_', 'at'] +['Ġh', 'ouse'] +['Ġtr', 'ack'] +["'", ');ĊĊ'] +['j', 'e'] +['ĠJ', 'ohn'] +['ist', 'r'] +['(', 'S'] +['ub', 'e'] +['Ġc', 'e'] +['itt', 'ed'] +['V', 'ER'] +['*', ')'] +['p', 'arent'] +['Ġapp', 'lication'] +['an', 'y'] +['.sw', 'ing'] +['Ġp', 'ack'] +['\\', 'u'] +['Ġpr', 'act'] +['Ġse', 'ction'] +['ct', 'x'] +['Ġun', 'signed'] +['.P', 'oint'] +['ĠO', 'ne'] +['Ä', '±'] +['ip', 'le'] +['a', 'id'] +['Ñ', 'ĥ'] +['V', 'ector'] +['by', 'te'] +['Ġw', 'ait'] +['ĠÃ', 'ł'] +['Ã', '¥'] +['Ġto', 'gether'] +['Ġth', 'rows'] +['F', 'O'] +["'", '))'] +['h', 'ost'] +['is', 'ing'] +['.', 'view'] +['Ġter', 'ms'] +['fr', 'amework'] +['-', 'r'] +['Ġapp', 'ly'] +['Ġs', 'ession'] +['O', 'ptions'] +['ugg', 'est'] +['Ġo', 'thers'] +['w', 'itter'] +['Ġf', 'und'] +['In', 'it'] +['__', '('] +['ens', 'or'] +['G', 'ET'] +['Ġsever', 'al'] +['i', 'i'] +['[', 'j'] +['I', 'O'] +['Ġtem', 'plate'] +['P', 'osition'] +['Ġe', 'con'] +['ach', 'ine'] +['Ġ', 'il'] +['.s', 'pring'] +['m', 'ain'] +['el', 't'] +['im', 'ent'] +['Re', 'c'] +['m', 'm'] +['ĠUn', 'iversity'] +['urs', 'or'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠ'] +['G', 'L'] +['ict', 'ure'] +['ith', 'ub'] +['c', 'er'] +['c', 'ast'] +['F', 'rom'] +['a', 'les'] +['Ġsub', 'ject'] +['p', 'assword'] +['n', 'y'] +['Ġes', 'c'] +['.w', 'rite'] +['ï¼', 'Į'] +['Wh', 'at'] +['.', 'H'] +['Ġh', 'istory'] +['ĠF', 'e'] +['Ġindiv', 'idual'] +['un', 'it'] +['Ġ--', '>'] +['Ġd', 'u'] +['I', 'ST'] +['Ġus', 'ers'] +['f', 's'] +['f', 'alse'] +['un', 't'] +['T', 'itle'] +['Ġm', 'ot'] +['Ġf', 'uture'] +['ach', 'ed'] +['Ġstart', 'ed'] +['Ġm', 'ode'] +["Ġ'", '<'] +['_', 'array'] +['Ġa', 'x'] +["']", ';Ċ'] +['i', 'res'] +['Th', 'ere'] +['ug', 'ht'] +['t', 'ml'] +['pos', 'ed'] +['ic', 'ult'] +['Ġto', 'ok'] +['Ġg', 'ames'] +['Ġ}', '}'] +['Ġ?', '>Ċ'] +['Ġproduct', 's'] +['I', 's'] +['Ġb', 'ad'] +['ĠD', 'es'] +['.p', 'ath'] +["'", 'ĊĊ'] +['ĠP', 'ost'] +['av', 'el'] +['(', ':'] +['Ġneed', 's'] +['Ġkn', 'own'] +['F', 'l'] +['Ġex', 'ec'] +['Ġse', 'en'] +['um', 'e'] +['Ġb', 'order'] +['Ġl', 'ive'] +['tem', 'p'] +['P', 'er'] +['Ġvar', 'iable'] +['i', 'et'] +['ĠD', 'ef'] +['Ġg', 'e'] +['em', 'e'] +['_b', 'ack'] +['f', 'irst'] +['Ġprovid', 'ed'] +['////////////////', '////////////////'] +['Ġfil', 'ename'] +['Ġh', 'ope'] +['ul', 'y'] +['a', 'uto'] +['f', 'ind'] +['_', 'string'] +['b', 'tn'] +['it', 'ude'] +['At', 'tribute'] +['Ġyou', 'ng'] +['.t', 'xt'] +['Ġwe', 'bsite'] +['ĠP', 'rop'] +['Ġe', 'y'] +['>', '();Ċ'] +['ion', 'al'] +['AR', 'R'] +['iction', 'ary'] +['ur', 'ther'] +['.', ''] +['t', 'x'] +['Ġp', 'ur'] +['u', 'el'] +['ymb', 'ol'] +['u', 'ation'] +['ang', 'er'] +['Ġback', 'ground'] +['ec', 'ess'] +['ef', 'ined'] +['....', '....'] +['Ġdes', 'cription'] +['Ġrep', 'resent'] +['")', ');Ċ'] +['press', 'ion'] +['row', 'ser'] +['Ġser', 'ies'] +['ward', 's'] +['($', '_'] +['a', 'ise'] +['Ġh', 'ot'] +['ac', 'ity'] +['ri', 'es'] +['action', 's'] +['C', 'reate'] +['ad', 'io'] +['amp', 'les'] +['Ġorig', 'inal'] +['ens', 'ive'] +['f', 'ont'] +['st', 'ream'] +['', 'using'] +['.spring', 'framework'] +['ser', 'ver'] +['Ġb', 'ill'] +['AC', 'K'] +['il', 'ename'] +['Ġfr', 'ame'] +['Ġ=', 'Ċ'] +['Ed', 'it'] +['adi', 'us'] +['Ġd', 'raw'] +['ank', 's'] +['Ġd', 'eter'] +['Ġcom', 'es'] +['_', 'int'] +['Ġfore', 'ach'] +['ang', 'le'] +['Ġe', 'lect'] +['pect', 'ed'] +['He', 'ader'] +['ist', 'ration'] +['F', 'alse'] +['ĠG', 'ame'] +['Ġfil', 'ter'] +['Act', 'ivity'] +['Ġl', 'arg'] +['in', 'ition'] +['Ġ"', '<'] +['is', 'ed'] +['Ġrem', 'ove'] +['ĠTr', 'ans'] +['m', 'et'] +['se', 'e'] +['Form', 'at'] +['Com', 'mand'] +['ĠE', 'X'] +['N', 'one'] +['Ġfr', 'ont'] +['A', 'SE'] +['ĠR', 'ec'] +['ound', 'ation'] +['Ġv', 'o'] +['=', '\\"'] +['(', '*'] +['Ch', 'ange'] +['.W', 'rite'] +['g', 'roup'] +['i', 'ents'] +['u', 'y'] +['********************************', '********************************'] +['Ġd', 'ig'] +['h', 'r'] +['(', '-'] +['Ġg', 'en'] +['n', 'umber'] +['ve', 'c'] +['uro', 'pe'] +['ent', 'ry'] +['L', 'L'] +['Ġst', 'e'] +['Val', 'id'] +["']", ','] +['_p', 'aram'] +['Ġse', 'lected'] +['Ġacc', 'ording'] +['ĠD', 'is'] +['Ġ', 'util'] +['B', 'uffer'] +['_', 'error'] +['Ġass', 'oci'] +['_S', 'IZE'] +['Ġw', 'or'] +['Ġprint', 'f'] +['r', 'ag'] +['Â', 'ł'] +['D', 'D'] +['ĠV', 'al'] +['Ġact', 'iv'] +['E', 'ng'] +['et', 'ime'] +['Ġv', 'irtual'] +['a', 'ign'] +['a', 'ur'] +['ĠP', 'res'] +['ĠEx', 'ception'] +['Ġany', 'thing'] +['ĠO', 'ff'] +['Ġh', 'ours'] +['Ġw', 'ar'] +['Arg', 's'] +['ag', 'ing'] +['Ġmodel', 's'] +['ĠT', 'ime'] +['O', 'b'] +['am', 's'] +['j', 'oy'] +['Ġear', 'ly'] +['.', 'read'] +['Ġc', 'enter'] +['ĠIn', 'itial'] +['Ġl', 'anguage'] +['l', 'ength'] +['x', 'y'] +['Ġs', 'n'] +['Ġin', 'f'] +['P', 'ost'] +['Ġag', 'o'] +['Ġeas', 'y'] +['_c', 'ode'] +['ĠAN', 'Y'] +['_', 'ch'] +['Ġdown', 'load'] +['(', 'T'] +['av', 'ed'] +['âĢ', 'ĵ'] +['Ġstud', 'ents'] +['Ġf', 'ig'] +['l', 'ight'] +['x', 'x'] +['Ġbu', 'ffer'] +['ĠD', 'ep'] +['ĠM', 'ath'] +['IT', 'H'] +['Ġvar', 'i'] +['Ġd', 'ue'] +['F', 'actory'] +['Ġp', 'or'] +['Ġe', 'p'] +['ot', 'ype'] +['Ġcan', 'not'] +['Ġwh', 'ite'] +['<', 'int'] +['ter', 'n'] +['Ġreg', 'ister'] +['Ġpre', 'd'] +['cl', 'us'] +['_d', 'ate'] +['Ġ/', '**'] +['Ġa', 'uth'] +['Ġ[', ']Ċ'] +['Ġper', 'iod'] +['n', 'own'] +['Ġv', 'ot'] +['Ġs', 'creen'] +["'", 'd'] +['T', 'ypes'] +['Ġt', 'mp'] +['е', 'Ð'] +['ur', 'al'] +['Ġben', 'ef'] +['_', 'y'] +['Ġn', 'et'] +['ĠSt', 'ates'] +["']", "['"] +['ĠN', 'e'] +['ĠN', 'OT'] +['Ġn', 'eg'] +['Ġcomm', 'on'] +['s', 'cope'] +['Ġc', 'red'] +['g', 'es'] +['_T', 'YPE'] +['Ġs', 'uggest'] +['o', 'om'] +['.ĊĊ', 'Ċ'] +['Ġac', 'cept'] +['Ġr', 'andom'] +['er', 'm'] +['ĠV', 'ector'] +['w', 'ith'] +['T', 'ER'] +['(', 'str'] +['Ġres', 'pons'] +['Ġh', 'it'] +['.S', 'et'] +['gr', 'id'] +['ri', 'a'] +['Ġc', 'lick'] +['und', 'le'] +['C', 'ase'] +['ins', 'ert'] +['Util', 's'] +['Ġ""', '"'] +['Ġim', 'plement'] +['at', 'al'] +['tem', 'pt'] +['tem', 'plate'] +['oc', 'r'] +['return', 's'] +['Ġplay', 'ers'] +['us', 'ers'] +['ed', 'ef'] +['ĠTh', 'ese'] +['Ġam', 'ong'] +['Ġde', 'b'] +['h', 'a'] +['.get', 'Element'] +['Ġc', 'irc'] +['Ġan', 'swer'] +['Ġw', 'alk'] +['Ġt', 'reat'] +['ĠG', 'e'] +['ĠC', 'reate'] +['Ġa', 'ge'] +['Ġre', 'q'] +['O', 'ST'] +['ang', 'ular'] +['Ñ', 'ı'] +['Ġf', 'ive'] +['Ġdistrib', 'uted'] +['Ġfri', 'end'] +['T', 'P'] +['Ġc', 'lean'] +['ow', 's'] +['.Control', 's'] +['d', 'is'] +['Ġw', 'ords'] +['.', 'io'] +['z', 'y'] +['Ġhe', 'ader'] +['ĠC', 'heck'] +['âĢĻ', 'm'] +['j', 'ust'] +['h', 'older'] +['="', '', 'čĊ'] +['.', 'annot'] +['Ġcol', 'lection'] +["'", '.'] +['Ġsim', 'ilar'] +['Ġt', 'aken'] +['("', '%'] +['Or', 'der'] +["']", 'Ċ'] +['-m', 'd'] +['ĠT', 'H'] +['ac', 'ed'] +['Ġis', 'n'] +['/', 'j'] +['Ġs', 'on'] +['gr', 'aph'] +['ĠInt', 'eger'] +['Ġn', 'ecess'] +['re', 'en'] +['Ġ', 'um'] +['Ġ\\', '<'] +['Ġmom', 'ent'] +['Ġbr', 'ing'] +['Ġind', 'ic'] +['ys', 'is'] +['Le', 'vel'] +['ver', 'se'] +['urre', 'nc'] +['_t', 'est'] +['Ġent', 'ire'] +['D', 'own'] +['Ġ}ĊĊ', 'Ċ'] +['(', 'result'] +['ĠRe', 'ad'] +['Ã', '¨'] +['M', 'od'] +['Ġtry', 'ing'] +['")', ',Ċ'] +['Ġm', 'ember'] +['ĠC', 'or'] +['OD', 'O'] +['-', 'control'] +['un', 'time'] +['ĠS', 'im'] +['D', 'ialog'] +['pl', 'ot'] +['_', 'on'] +['Ġph', 'ys'] +['}', '/'] +['Ġn', 'amespace'] +['ĉ', 'čĊ'] +['ac', 'c'] +['Pl', 'ayer'] +['A', 'RE'] +['Ġf', 'oot'] +['Ġbo', 'ard'] +['p', 'art'] +['Ġs', 'us'] +['w', 'ise'] +['ĠM', 'c'] +['Ġp', 'ush'] +['AT', 'A'] +['Ġp', 'lease'] +['ri', 'ed'] +['we', 'et'] +['b', 'it'] +['id', 'ed'] +['V', 'E'] +['ĠS', 'w'] +['U', 'B'] +['Ġt', 'ypes'] +['ed', 'ia'] +['Ġc', 'los'] +['ace', 'book'] +['Wh', 'en'] +['Ġed', 'it'] +['ig', 'ger'] +['Ġen', 'erg'] +['Cont', 'ainer'] +['Ġph', 'ot'] +['ĠC', 'ount'] +['ĠE', 'urope'] +['.I', 's'] +['ĠR', 'uss'] +['pe', 'ed'] +['ĠS', 'tr'] +['Ġp', 'y'] +['Ġc', 'ult'] +['Ġdef', 'ined'] +['cc', 'ount'] +['Ġob', 't'] +['.L', 'ocation'] +['Ġth', 'read'] +['il', 'le'] +['Ġinst', 'ead'] +['str', 'ong'] +['ĠS', 'ec'] +['U', 'RE'] +['Ġide', 'a'] +['.', 'se'] +['em', 'y'] +['select', 'ed'] +['Con', 'nection'] +['ac', 'ing'] +['th', 'read'] +['.n', 'ext'] +['Ġc', 'oll'] +['Ġfil', 'm'] +['ist', 'ic'] +['Ġcomp', 'et'] +['Ġcon', 'n'] +['th', 'ough'] +['Ġcom', 'pan'] +['ock', 'et'] +['Ġte', 'ach'] +['=', '('] +['Ġph', 'one'] +['Ġact', 'ive'] +['de', 'lete'] +['tr', 'ies'] +['Ġm', 'o'] +['Ġde', 'ath'] +['}', ');ĊĊ'] +['oc', 'ol'] +['W', 'idget'] +['Ġart', 'icle'] +['ro', 'du'] +['and', 'id'] +['Ñ', 'ĭ'] +['ĠC', 'r'] +['k', 'a'] +['()', ':'] +['lo', 'od'] +['ĉĉĉ', 'Ċ'] +['Ġal', 'most'] +['Ġs', 'ell'] +['erv', 'let'] +['ri', 'p'] +['Un', 'it'] +['Ġapp', 'lic'] +['Ġcon', 'nect'] +['Ġfe', 'ature'] +['Ġv', 'ia'] +["'", '),'] +['Ġl', 'im'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['ĠG', 'u'] +['Eng', 'ine'] +['Ġen', 's'] +['Ġen', 'vironment'] +['b', 'lock'] +['HER', 'E'] +['N', 'ULL'] +['g', 'y'] +['t', 'ag'] +[')', ').'] +['ex', 'p'] +['Ġcom', 'pl'] +['Ġinst', 'all'] +['Ġcomple', 'te'] +['que', 'ue'] +['atur', 'al'] +['Ġgener', 'al'] +['th', 'on'] +['Ġask', 'ed'] +['o', 'res'] +['(', 'res'] +['Ġres', 'erved'] +['S', 'P'] +['ĠâĢ', '¦'] +['Å', 'Ĥ'] +['Ġsign', 'ific'] +['O', 'ff'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['ĠA', 'g'] +['ĠJ', 'ust'] +['ĠE', 'rror'] +['Ġin', 'fl'] +['ad', 'ata'] +['Ġ', 'icon'] +['ask', 's'] +["'", "'"] +['_', 'LO'] +['?', '.'] +['ac', 'count'] +['Ġ(', '*'] +["'", ')ĊĊ'] +['r', 'ap'] +['_', 'var'] +['ĠF', 'OR'] +['Ġpart', 'y'] +['ĠY', 'our'] +['c', 'at'] +['str', 'y'] +['.', 'new'] +['bo', 'ot'] +['ĠN', 'ov'] +['Ġv', 'ector'] +['Ġn', 'ormal'] +['Ġf', 'urther'] +['Re', 'pository'] +['Ġd', 'atabase'] +['att', 'le'] +['Ġmus', 'ic'] +['Ġspe', 'ed'] +['Ġd', 'oc'] +['pro', 'cess'] +['IG', 'HT'] +['.p', 'arse'] +['Ġt', 'aking'] +['Ġvi', 'ol'] +['ce', 'ed'] +['ĠA', 'fter'] +['Ġfor', 'ward'] +['Ġc', 'rit'] +['"/', '>Ċ'] +['ro', 't'] +['Ġfa', 'iled'] +['ef', 'ore'] +['Ġconc', 'ern'] +['o', 'e'] +['b', 'a'] +['Ġs', 'ender'] +['Ġter', 'm'] +['h', 'as'] +['="', '#'] +['Ġpot', 'ential'] +['N', 'um'] +['Ġpublish', 'ed'] +['.c', 'lose'] +['ĠIm', 'age'] +['str', 'aint'] +['U', 'D'] +['ĠO', 'b'] +['Ġprob', 'ably'] +['l', 'im'] +['"', ':Ċ'] +['olum', 'e'] +['Ġcon', 'sum'] +['ag', 'ue'] +['ens', 'ions'] +['Ġinvest', 'ig'] +['-', 'year'] +["')", ';'] +['-s', 'm'] +['Ġen', 'joy'] +['or', 'ig'] +['er', 'ing'] +['c', 'p'] +['le', 'ased'] +['ple', 'ments'] +['Ġreturn', 's'] +['p', 'at'] +['B', 'O'] +['ĠH', 'ouse'] +['.L', 'abel'] +['Ġwe', 'ight'] +['igh', 'b'] +['Ġcondition', 's'] +['Ġex', 'ception'] +['d', 'escription'] +['Ġtr', 'ad'] +['-', 'to'] +['Ġ{', '}'] +['Ġmod', 'ule'] +['EN', 'D'] +['.', 'ap'] +['.p', 'rops'] +['Ġcon', 'structor'] +['av', 'es'] +['Ġf', 'avor'] +['ĠN', 'ow'] +[';', 'i'] +['ĠM', 'ain'] +['_', 'k'] +['er', 'ies'] +['âĢĻ', 'll'] +['trans', 'form'] +['imest', 'amp'] +['P', 're'] +['Ġm', 'er'] +['.', 'res'] +['st', 'ant'] +['L', 'ocation'] +['_N', 'AME'] +['Ġlos', 's'] +['Ġ', 'ĊĊ'] +['n', 'et'] +['Ġeng', 'ine'] +['B', 'lock'] +['Ġiss', 'ues'] +['Ġpar', 'se'] +['ĠB', 'ar'] +['Ġst', 'ay'] +['ĠJ', 'SON'] +['Ġd', 'om'] +['air', 's'] +['w', 'ner'] +['Ġl', 'ower'] +['",', 'čĊ'] +['ĠD', 'em'] +['uf', 'act'] +['Ġp', 's'] +['Ġper', 'fect'] +['R', 'L'] +['Ġed', 'uc'] +['l', 's'] +['em', 'ory'] +['ARR', 'ANT'] +['u', 'ge'] +['Ġex', 'act'] +['.', 'key'] +['al', 'led'] +['e', 'ch'] +['ie', 'f'] +['\\', '/'] +['o', 'ke'] +['Ġfor', 'mer'] +['al', 'loc'] +['Ġs', 'ix'] +['id', 'a'] +['Ġm', 'argin'] +['Ġhe', 'art'] +['al', 'd'] +['p', 'ack'] +['.getElement', 'ById'] +['ĠW', 'ARRANT'] +['Ġr', 'ather'] +['Ġbuild', 'ing'] +['er', 'man'] +['lic', 'e'] +['Ġquest', 'ions'] +['iz', 'es'] +['le', 'ge'] +['irect', 'ory'] +['Ġj', 'e'] +['Ġc', 'as'] +['pro', 'ps'] +['ut', 'f'] +['Ġse', 'curity'] +['Ġhow', 'ever'] +['we', 'ight'] +['Ġins', 'ide'] +['Ġpres', 'ident'] +['Ch', 'ar'] +['ĠW', 'ITH'] +['.m', 'ap'] +['Ġgr', 'aph'] +['Ġt', 'ag'] +['_st', 'atus'] +['Ġat', 'tempt'] +['op', 'p'] +['us', 'es'] +['ĉ', 'const'] +['Ġr', 'ound'] +[',', '$'] +['Ġfri', 'ends'] +['Em', 'ail'] +['?', '>'] +['Res', 'ource'] +['KE', 'Y'] +['os', 'p'] +['.', 'query'] +['ĠN', 'orth'] +['able', 's'] +['ist', 'rib'] +['_c', 'lass'] +['el', 'lo'] +['Th', 'at'] +['Ð', 'º'] +['pecial', 'ly'] +['ĠPres', 'ident'] +['Ġcamp', 'aign'] +['Ġal', 't'] +['are', 'a'] +['Ġch', 'all'] +['Ġop', 'port'] +['.C', 'on'] +['Ġenerg', 'y'] +['li', 'ke'] +['.', 'string'] +['ing', 'ton'] +[')', '*'] +['y', 'y'] +['Ġprof', 'ession'] +['ir', 'th'] +['Ġse', 'g'] +['æ', 'ľ'] +['Ġh', 'or'] +['i', 'ers'] +['c', 'an'] +['Ġbeh', 'ind'] +['Pro', 'duct'] +['f', 'g'] +['ĠS', 'k'] +['.j', 'pg'] +['?', ':'] +[']', ';ĊĊ'] +['Ġcall', 'back'] +['ĠH', 'ttp'] +['Ñ', 'Į'] +['l', 'ong'] +['M', 'S'] +['AT', 'H'] +['Ġr', 'aise'] +['Ġwant', 'ed'] +['row', 'n'] +['ut', 'or'] +['l', 't'] +[']', '='] +['el', 'ine'] +['M', 'A'] +['Ġse', 'par'] +['c', 's'] +['se', 'mb'] +['D', 'is'] +['bs', 'erv'] +['ĠW', 'ill'] +['Ġpol', 'icy'] +['Ġth', 'ird'] +['ph', 'one'] +['Ġb', 'ed'] +['/', 'g'] +['.', '__'] +['ĠIn', 'c'] +['iz', 'ing'] +['.re', 'move'] +['in', 'stance'] +['.t', 'ype'] +['Ġs', 'erv'] +['E', 'ach'] +['Ġh', 'ar'] +['ĠM', 'essage'] +['(', 'key'] +['SE', 'LECT'] +['P', 'os'] +['))', ';čĊ'] +['Ġre', 'comm'] +['Ġtr', 'aining'] +['ĠE', 'nt'] +['ĠCh', 'ar'] +['ic', 'ht'] +['(f', 'ile'] +['Ġp', 'rior'] +['G', 'ame'] +['Ġex', 'it'] +['Param', 's'] +['.c', 'ore'] +['P', 'C'] +['n', 'es'] +['anc', 'ed'] +['(', 'request'] +['P', 'assword'] +['}', '>Ċ'] +['Ġm', 'ag'] +['Ġre', 'lease'] +['Ġsh', 'all'] +['ud', 'ent'] +['ĠS', 'outh'] +['and', 'o'] +[':', "'"] +['.Tab', 'Index'] +['s', 'k'] +['ann', 'er'] +['is', 'set'] +['Ġout', 'side'] +['led', 'ge'] +['Ġ', 'å'] +['ĠR', 'ob'] +['Ġim', 'm'] +['!', 'Ċ'] +['ĠWe', 'b'] +['D', 'es'] +['B', 'C'] +['anc', 'ial'] +['R', 'oute'] +['D', 'ec'] +['fer', 'ences'] +['Ġp', 'urch'] +['ĠM', 'odel'] +['ct', 'or'] +['g', 'n'] +['_st', 'art'] +['_', 'un'] +['.', '*'] +['is', 'es'] +['Ġg', 'round'] +['Ġun', 'ique'] +['Ġbe', 'aut'] +['{', '"'] +['Ġp', 'our'] +['ĠO', 'ct'] +['Ġt', 'ree'] +['set', 's'] +['_', 'res'] +["')", '->'] +['_re', 'g'] +['("', '\\'] +['Ġby', 'te'] +['B', 'l'] +['Ġd', 'ating'] +['Ġm', 'atter'] +['ĠR', 'em'] +["Ġ'", '../'] +['ĠA', 'ug'] +['ĠL', 'a'] +['Ġ$', '('] +['ourn', 'al'] +['i', 'am'] +['Ġshow', 's'] +['w', 'rite'] +['Ġb', 'all'] +['Ġsim', 'ply'] +['Ġf', 'ast'] +['Ġmem', 'ory'] +['A', 'SS'] +['ĠO', 'f'] +['ov', 'ed'] +['ant', 'e'] +['a', 'ul'] +['ist', 'ry'] +['))', ');Ċ'] +['Ġf', 'it'] +['<', 'string'] +['Ġpolit', 'ical'] +['anc', 'el'] +['_', '.'] +['c', 'ard'] +['.c', 'urrent'] +['o', 'ch'] +['_', 'image'] +['\\', 't'] +['#', 'Ċ'] +['(', 'L'] +['Ġindu', 'stry'] +['com', 'ing'] +['Ġex', 'tra'] +['Ġreport', 'ed'] +['.st', 'art'] +['Ġres', 'ources'] +['Ġim', 'g'] +['fl', 'ow'] +['_E', 'X'] +['(n', 'ull'] +['ĠP', 're'] +['Ġwr', 'ong'] +['inter', 'face'] +['Param', 'eter'] +['n', 'ers'] +['á', '»'] +['t', 'ure'] +['ers', 'ist'] +['oun', 'try'] +['Ġseem', 's'] +['al', 'ance'] +['de', 'st'] +['ĉ', 'String'] +['Ġm', 'aint'] +['Ġun', 'it'] +['act', 'ers'] +['ĠT', 'R'] +['if', 'ul'] +['export', 's'] +['pro', 'ject'] +['App', 'lication'] +['leg', 'ate'] +['Ġt', 'akes'] +['ter', 'm'] +['Ġet', 'c'] +['ust', 'er'] +['Ġappe', 'ar'] +['add', 'ress'] +['Ġf', 'em'] +['h', 's'] +['Ġh', 'om'] +[',', '-'] +['Ġdiff', 'icult'] +['Ġcom', 'ing'] +['O', 'pen'] +['Ġset', 'tings'] +['ĠW', 'ar'] +['ĠTh', 'en'] +['Ġaut', 'om'] +['ĠF', 'oundation'] +['Ġqu', 'ite'] +['D', 'escription'] +['Ġb', 'log'] +['i', 'qu'] +['P', 'S'] +['_f', 'ield'] +['J', 'son'] +['SS', 'ION'] +['ĠS', 'ch'] +['ĠL', 'O'] +['Ġdes', 'cri'] +['Ġevery', 'one'] +['Ġpret', 'ty'] +['Ġlong', 'er'] +['Ġm', 'enu'] +['Ġcurrent', 'ly'] +['se', 'c'] +['Ġrelations', 'hip'] +['################', '################'] +['ĠM', 'ap'] +['as', 'et'] +['Ġparam', 'eters'] +['Ġcr', 'ush'] +['"', 'čĊ'] +['IL', 'ITY'] +['ig', 'ration'] +['Ġc', 'out'] +['t', 'otal'] +['Ġn', 'ames'] +['nd', 'ef'] +['")', ';'] +['ri', 'end'] +['yn', 'amic'] +['Ġeff', 'ort'] +['Ġact', 'ual'] +['Ġfield', 's'] +['O', 'UN'] +['t', 'ers'] +['Ġf', 'ix'] +['_m', 'odel'] +['Ġc', 'ases'] +['C', 'A'] +['M', 'y'] +['Inter', 'face'] +['ĠS', 'E'] +[']', ']'] +['al', 'le'] +['ĠN', 'ational'] +['ĠArray', 'List'] +['in', 'line'] +['.', 'V'] +['ar', 'a'] +['ref', 'ix'] +['as', 'c'] +['Re', 'ader'] +['ĠÐ', '¿'] +['ast', 'ic'] +['(', '()'] +['C', 'l'] +['.annot', 'ation'] +['Ġperform', 'ance'] +['ail', 'y'] +['.to', 'String'] +['.n', 'et'] +['view', 's'] +['.', 'end'] +['ay', 'ers'] +['l', 'ate'] +['ĠA', 'pr'] +['ed', 'eral'] +["']", ')'] +['.b', 'ody'] +['Ġhigh', 'er'] +['_f', 'l'] +['c', 'r'] +['al', 'ert'] +['_n', 'ode'] +['ĠG', 'oogle'] +['Ġit', 'self'] +['A', 'uth'] +['urrenc', 'y'] +['Ġsignific', 'ant'] +['app', 'end'] +['Ġres', 'pect'] +['str', 'ap'] +['Ġun', 'a'] +['riter', 'ia'] +['P', 'ORT'] +['.ap', 'ache'] +['Out', 'put'] +['Ġpro', 'gress'] +['Ġm', 'id'] +['ĠM', 'icrosoft'] +['Ġres', 'ource'] +['ab', 'lish'] +['Ġd', 'im'] +['.', 'load'] +['.A', 'pp'] +['Ġd', 'irection'] +['Ġadd', 'itional'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠ'] +['Ġnum', 'bers'] +['Ġcompan', 'ies'] +['.T', 'h'] +['Ġs', 'ound'] +['user', 'name'] +['Ġstat', 'ement'] +['Ġal', 'ert'] +['Ġcon', 'tract'] +['h', 'ome'] +['_l', 'ength'] +['.Com', 'ponent'] +['e', 'v'] +['.', 'Ex'] +['ï¼', 'ļ'] +['"', ';'] +['ĠH', 'igh'] +['Ġ', ')ĊĊ'] +['ĠP', 'oint'] +['op', 'h'] +['Ġl', 'ines'] +['->', '_'] +['"', ')ĊĊ'] +['o', 'x'] +['app', 'lication'] +['Ġ', ']Ċ'] +['ĊĊĊĊ', 'ĊĊ'] +['Ġso', 'on'] +['ction', 's'] +['ing', 'er'] +['Ġj', 'oin'] +['ĠP', 'e'] +['Ġ', 'ë'] +['Ġl', 'as'] +['.', 'E'] +['c', 'ss'] +['/', 'or'] +['ĠSt', 'art'] +['ĠT', 'O'] +['Ġsub', 's'] +['con', 'n'] +['com', 'ponents'] +['DE', 'BUG'] +['qu', 'are'] +['F', 'unction'] +['end', 'ar'] +['.', 'index'] +['Ġf', 'ill'] +['Ä', 'Ļ'] +['Ġcho', 'ose'] +['h', 'ow'] +['ĠAmeric', 'a'] +['ass', 'ets'] +['--------', '----'] +['ĠV', 'alue'] +['Ġoff', 'ice'] +['Ġv', 'eh'] +['Ġtrans', 'form'] +['ĠAr', 't'] +['Ġin', 'de'] +['Ġf', 'n'] +['Ġim', 'plements'] +['ang', 'o'] +['ple', 'te'] +['+', '"'] +['t', 'mp'] +['am', 'ily'] +['Ġhas', 'h'] +['miss', 'ions'] +['E', 'ST'] +['g', 't'] +['Pro', 'vider'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠ'] +['Ġfl', 'ag'] +['Ġpartic', 'ip'] +['d', 'en'] +['ĠReturn', 's'] +['Ġnot', 'e'] +['ü', 'r'] +['p', 'm'] +['ide', 'os'] +['Ġspec', 'ified'] +['ĠE', 'N'] +['est', 'er'] +['ol', 'id'] +['Ġup', 'on'] +['(', 'std'] +['ĉ', 'v'] +["Ġ'", '\\'] +['u', 'z'] +['Ġv', 'ert'] +['Ġv', 'ict'] +['ĉ', 'self'] +['Ġ"', '$'] +['.', 'k'] +['Ġgroup', 's'] +['g', 'ithub'] +['l', 'ang'] +['Ġm', 'ut'] +['T', 'O'] +['Ġv', 'e'] +['ĠP', 'lease'] +[';ĊĊ', 'Ċ'] +['ac', 'cess'] +['Ġ{', '"'] +['re', 'a'] +['Ġr', 'isk'] +['ick', 'er'] +['og', 'gle'] +['ĉ', 'while'] +['AN', 'G'] +['.s', 'end'] +['Ġwom', 'an'] +['Ġget', 's'] +['Ġ', 'ign'] +['ĠI', 'd'] +['_', 'log'] +['ON', 'E'] +['Ġe', 'vid'] +['ĠH', 'ar'] +['_s', 'ub'] +['Ġend', 'l'] +['Ġinclud', 'ed'] +['()', ');ĊĊ'] +['ĠA', 'p'] +['ig', 'r'] +['Ġs', 'em'] +['ĠBl', 'ack'] +['d', 'oc'] +['_t', 'able'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['-', 'up'] +['Ġca', 'use'] +['Ġ', '..'] +['Ġv', 'an'] +['_d', 'ict'] +['Ġf', 'ocus'] +['IN', 'D'] +['CE', 'SS'] +['.L', 'og'] +['Ġmult', 'iple'] +['id', 'o'] +['Ġreg', 'ard'] +['-', 'M'] +['and', 'ler'] +['our', 'se'] +['Ġde', 'g'] +['.', 'U'] +['Ġadd', 'ition'] +['Ġvar', 'ious'] +['Ġrece', 'ive'] +['е', 'н'] +['ĠH', 'T'] +['Ob', 'j'] +['D', 'F'] +['Ġincre', 'ase'] +['ĠO', 'pen'] +[']', ';'] +['Ġcomm', 'it'] +['?', 'Ċ'] +['ateg', 'ories'] +['at', 'ory'] +['sh', 'ip'] +['ĠM', 'ich'] +['Ġh', 'tml'] +['rom', 'ise'] +['Ġle', 'ave'] +['Ġstr', 'ateg'] +['av', 'en'] +['ĠCon', 'sole'] +['k', 'nown'] +['-', 'n'] +['_', 'LE'] +['.com', 'ponent'] +['Ġb', 're'] +['S', 'ession'] +['i', 'ance'] +['Ġal', 'ign'] +['typ', 'edef'] +['_', 'result'] +['ĠW', 'HERE'] +['.s', 'plit'] +['Ġread', 'ing'] +['FA', 'ULT'] +['Ġc', 'lo'] +['Ġnot', 'ice'] +['_p', 'r'] +['ar', 'ter'] +['Ġlo', 'ck'] +['Ġstand', 'ard'] +['et', 'ic'] +['ell', 'ow'] +['Ġp', 'adding'] +['ĠH', 'is'] +['Ġst', 'ates'] +['_c', 'ast'] +['(', 'P'] +['a', 'a'] +['Ġintern', 'al'] +['e', 'an'] +['ĠP', 'RO'] +['ĠK', 'ey'] +['Ġes', 'pecially'] +['m', 'ing'] +['Ġc', 'ross'] +['Ġn', 'ational'] +['_', 'object'] +['f', 'ilter'] +['Ġs', 'cript'] +['.', 'update'] +['_', 'i'] +['ĠAss', 'ert'] +['/', 'core'] +['%%', '%%'] +['Ġproble', 'ms'] +['ist', 'or'] +['Ġ.', '='] +['Ġar', 'ch'] +['Ġwrit', 'ten'] +['Ġm', 'ilit'] +['M', 'ENT'] +['.', 'ch'] +['ca', 'pe'] +['ĠM', 'us'] +['_', 'config'] +['ĠA', 'PI'] +['fo', 'ot'] +['Ġim', 'ages'] +['end', 'l'] +['.', 'In'] +['F', 'irst'] +['Ġpl', 'atform'] +['.pro', 't'] +['O', 'ption'] +['st', 'e'] +['ĠT', 'ODO'] +['Ġfor', 'ce'] +['.', 'cont'] +['ĉ', 'echo'] +['ĠD', 'av'] +['P', 'tr'] +['(', 'B'] +['R', 'T'] +['ĠB', 'ase'] +[']', "['"] +['Ġann', 'ounc'] +['con', 'sole'] +['ĠP', 'y'] +['d', 's'] +['.', 'as'] +['Ġpre', 'vent'] +['ap', 'an'] +['Ġ{', "'"] +['}', '', "'"] +['Ġde', 'ad'] +['V', 'AL'] +['Q', 'UE'] +['****************************************************************', '********'] +['Ġch', 'arg'] +['R', 'eturn'] +['Ġf', 'ul'] +['d', 'om'] +['Ġr', 'ules'] +['Ġmod', 'ify'] +['Ġe', 'val'] +['h', 'am'] +['at', 'ement'] +['\\', '<'] +['ul', 'a'] +['=', 'False'] +['R', 'A'] +['Ġcont', 'ains'] +['Ġst', 'ack'] +['m', 'ar'] +['Ġ{', '}Ċ'] +['Ġund', 'efined'] +['A', 'ss'] +['ĠCh', 'ina'] +['ve', 'y'] +['*', 'Ċ'] +['Ġplay', 'ing'] +[')', '/'] +['act', 'or'] +['Ġb', 'ottom'] +['li', 'er'] +['ĠN', 'umber'] +['Ġcou', 'ple'] +['D', 'C'] +['ĠS', 'O'] +['g', 'or'] +['.set', 'Text'] +['s', 'uccess'] +['com', 'mand'] +['F', 'ilter'] +['ĠO', 'ur'] +['_', 'item'] +['Ġc', 'tx'] +['Ġro', 'ad'] +['V', 'ersion'] +['c', 'ase'] +['ur', 't'] +['av', 'ior'] +['y', 'ch'] +['semb', 'ly'] +['ĠPro', 'duct'] +['Ġh', 'eld'] +['a', 'fe'] +['Ġinclud', 'es'] +['<', 'quote'] +['Ġa', 'void'] +['ĠF', 'in'] +['ĠM', 'od'] +['Ġt', 'ab'] +['an', 'o'] +['Ã', '±'] +['ipp', 'ing'] +['-', 'e'] +['Ġins', 'ert'] +['t', 'arget'] +['ch', 'an'] +['.M', 'odel'] +['IM', 'E'] +['\\', 'Ċ'] +['Ġm', 'achine'] +['av', 'y'] +['ĠN', 'O'] +['ĠInt', 'er'] +['Ġoper', 'ation'] +['mod', 'al'] +['T', 'ag'] +[']', ':'] +['Ġprodu', 'ction'] +['Ġare', 'as'] +['Ġre', 'n'] +['_f', 'rom'] +['n', 'bsp'] +['Ġoper', 'ator'] +['m', 'en'] +['app', 'ed'] +['_p', 'er'] +['z', 'en'] +['("', '.'] +['.s', 'ave'] +['="', '{{'] +['Ġt', 'or'] +['(', 'response'] +['Ġc', 'andid'] +['Ġcon', 'v'] +['a', 'iled'] +['ĠL', 'ib'] +['com', 'p'] +['ur', 'a'] +['ï¿', '½'] +['ĠH', 'ere'] +['Ġarg', 'ument'] +['h', 'ood'] +['Ġest', 'ablish'] +['ograph', 'y'] +['Ġon', 'Click'] +['amb', 'da'] +['Ġs', 'ch'] +['Ġmov', 'ie'] +['Ġse', 'c'] +['Ġact', 'ivity'] +['Ø', '§'] +['Ġs', 'ql'] +['_', 'all'] +['inc', 'ip'] +['Ġprovid', 'es'] +['Ġs', 'ys'] +['ack', 'et'] +['Ġwas', 'n'] +['Ġus', 'es'] +['ĠF', 'unction'] +['.g', 'oogle'] +['ĠRes', 'ult'] +['Vis', 'ible'] +['ag', 'ma'] +['el', 'come'] +['ĠS', 'y'] +['ĠC', 'ent'] +['AL', 'SE'] +['ac', 'ión'] +['EX', 'T'] +['Ġl', 'icense'] +['ĠL', 'ong'] +['Ġacc', 'om'] +['Ġab', 'ility'] +['.', 'height'] +['Act', 'ive'] +['olog', 'ical'] +['ol', 'y'] +['))', ','] +['.S', 'e'] +['Ġparam', 'eter'] +['pr', 'ite'] +['AB', 'ILITY'] +['.s', 'ervice'] +['ĠG', 'roup'] +['_', 'query'] +['ĠI', 'tem'] +['in', 'ing'] +['Ġj', 'ud'] +['im', 's'] +['f', 'ix'] +['ind', 'er'] +['ag', 'ram'] +['Ġfunction', 's'] +['Ġexper', 'i'] +['ĠE', 'm'] +['Ġro', 't'] +['Ġp', 'en'] +['.b', 'tn'] +['ĠA', 'S'] +['#if', 'def'] +['Ġcho', 'ice'] +['ĠP', 'age'] +['_P', 'RO'] +['Q', 'U'] +['å', 'ı'] +['ant', 'ity'] +['Â', 'Ń'] +['word', 's'] +['Ġread', 'only'] +['Ġf', 'lex'] +['prot', 'ected'] +['ĠAn', 'y'] +['Ġchar', 'acters'] +['enc', 'ed'] +['ĠJ', 'uly'] +['il', 'er'] +['C', 'ard'] +['ur', 'ance'] +['Ġre', 'v'] +['.e', 'vent'] +['al', 'y'] +['Ġwon', 'der'] +['ĠP', 'ort'] +['Ġleg', 'al'] +['ro', 'le'] +['Ġt', 'en'] +['Ġgo', 'es'] +['M', 'P'] +['wh', 'ite'] +['):', 'čĊ'] +['))', 'čĊ'] +['Ġre', 'ference'] +['Ġm', 'is'] +['ĠPro', 'ject'] +['ick', 's'] +['>', '&'] +['C', 'ON'] +['Ġre', 'pl'] +['Ġreg', 'ular'] +['St', 'orage'] +['ram', 'ework'] +['Ġgo', 'al'] +['Ġt', 'ouch'] +['.w', 'idget'] +['Ġbu', 'ilt'] +['d', 'es'] +['P', 'art'] +['(', 're'] +['Ġw', 'orth'] +['h', 'ib'] +['g', 'ame'] +['ĠÐ', '²'] +['ac', 'ion'] +['ĠWh', 'ite'] +['(t', 'ype'] +['(', '`'] +['Ġn', 'atural'] +['Ġin', 'j'] +['Ġcal', 'cul'] +['ĠApr', 'il'] +['.', 'List'] +['Ġassoci', 'ated'] +['ĉ', 'System'] +['~', '~'] +['=', '['] +['Ġst', 'orage'] +['Ġby', 'tes'] +['Ġtr', 'avel'] +['Ġs', 'ou'] +['Ġpass', 'ed'] +['!', '='] +['as', 'cript'] +['.', 'open'] +['Ġgr', 'id'] +['Ġb', 'us'] +['Ġrec', 'ogn'] +['A', 'b'] +['Ġh', 'on'] +['ĠC', 'enter'] +['Ġpre', 'c'] +['b', 'uild'] +['HT', 'ML'] +['ĠS', 'an'] +['Ġcoun', 'tries'] +['a', 'led'] +['t', 'oken'] +['k', 't'] +['Ġqu', 'al'] +['L', 'ast'] +['ad', 'ow'] +['Ġman', 'ufact'] +['id', 'ad'] +['j', 'ango'] +['N', 'ext'] +['x', 'f'] +['.', 'a'] +['Ġporn', 'o'] +['ĠP', 'M'] +['er', 've'] +['it', 'ing'] +['_', 'th'] +['c', 'i'] +['=', 'None'] +['g', 's'] +['Ġlog', 'in'] +['at', 'ives'] +["']", ');Ċ'] +['Ä', 'ħ'] +['Ġ', 'ill'] +['I', 'A'] +['child', 'ren'] +['D', 'O'] +['Ġlevel', 's'] +['Ġ{', '{'] +['Ġlook', 's'] +['Ġ"', '#'] +['To', 'String'] +['Ġnecess', 'ary'] +['ĠĠĠ', 'Ċ'] +['c', 'ell'] +['En', 'try'] +["Ġ'", '#'] +['Ġext', 'rem'] +['Select', 'or'] +['Ġplace', 'holder'] +['L', 'oad'] +['Ġre', 'leased'] +['O', 'RE'] +['En', 'umer'] +['ĠT', 'V'] +['SE', 'T'] +['in', 'q'] +['P', 'ress'] +['ĠDep', 'artment'] +['Ġprop', 'erties'] +['Ġres', 'pond'] +['S', 'earch'] +['a', 'el'] +['Ġre', 'qu'] +['ĠB', 'ook'] +['/', 'Ċ'] +['(', 'st'] +['Ġfin', 'ancial'] +['ick', 'et'] +['_in', 'put'] +['Ġth', 'reat'] +['(', 'in'] +['Str', 'ip'] +['ì', 'Ŀ'] +['ç', 'ão'] +['Ġevid', 'ence'] +['))', ';'] +['ĠB', 'ro'] +['Ġ[', '];Ċ'] +['Ġ', 'ou'] +['b', 'uf'] +['S', 'cript'] +['d', 'at'] +['Ġr', 'ule'] +['#', 'import'] +['="', '/'] +['S', 'erial'] +['Ġstart', 'ing'] +['[', 'index'] +['a', 'e'] +['Ġcon', 'trib'] +['s', 'ession'] +['_', 'new'] +['ut', 'able'] +['o', 'ber'] +['Ġ"', './'] +['Ġlog', 'ger'] +['Ġrecent', 'ly'] +['Ġreturn', 'ed'] +['č', 'čĊ'] +['))', ')Ċ'] +['ition', 's'] +['Ġse', 'ek'] +['Ġcomm', 'unic'] +['Ġ"', '.'] +['Ġuser', 'name'] +['E', 'CT'] +['D', 'S'] +['Ġother', 'wise'] +['ĠG', 'erman'] +['.', 'aw'] +['Ad', 'apter'] +['ix', 'el'] +['Ġsystem', 's'] +['Ġd', 'rop'] +['Ġstruct', 'ure'] +['Ġ$', '("#'] +['enc', 'ies'] +['ann', 'ing'] +['ĠL', 'ink'] +['ĠRes', 'ponse'] +['Ġst', 'ri'] +['Å', '¼'] +['ĠD', 'B'] +['æ', 'Ĺ'] +['and', 'roid'] +['sub', 'mit'] +['ot', 'ion'] +['(', '@'] +['.t', 'est'] +['ĊĊĊĊ', 'ĊĊĊĊ'] +[']', ';čĊ'] +['Ġdirect', 'ly'] +['Ġ"', '%'] +['r', 'is'] +['el', 'ta'] +['A', 'IL'] +[')', '{čĊ'] +['m', 'ine'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠ'] +['(', 'k'] +['b', 'on'] +['as', 'ic'] +['p', 'ite'] +['__', '_'] +['M', 'ax'] +['Ġerror', 's'] +['ĠWh', 'ile'] +['Ġarg', 'uments'] +['Ġens', 'ure'] +['R', 'ight'] +['-b', 'ased'] +['We', 'b'] +['Ġ-', '='] +['Ġint', 'rodu'] +['ĠIn', 'st'] +['ĠW', 'ash'] +['ord', 'in'] +['j', 'oin'] +['D', 'atabase'] +['Ġgr', 'ad'] +['Ġus', 'ually'] +['IT', 'E'] +['Prop', 's'] +['?', '>Ċ'] +['ĠG', 'o'] +['@', 'Override'] +['RE', 'F'] +['Ġ', 'ip'] +['ĠA', 'ustral'] +['Ġ', 'ist'] +['View', 'ById'] +['Ġser', 'ious'] +['Ġcustom', 'er'] +['.prot', 'otype'] +['od', 'o'] +['c', 'or'] +['Ġdo', 'or'] +['ĠWITH', 'OUT'] +['Ġpl', 'ant'] +['Ġbeg', 'an'] +['Ġdist', 'ance'] +['()', ').'] +['Ġch', 'ance'] +['Ġor', 'd'] +['c', 'ame'] +['pr', 'agma'] +['Ġprot', 'ect'] +['rag', 'ment'] +['ĠN', 'ode'] +['en', 'ing'] +['Ñ', 'ĩ'] +['Ġr', 'oute'] +['ĠS', 'chool'] +['h', 'i'] +['Ġne', 'ighb'] +['A', 'fter'] +['lic', 'it'] +['Ġcon', 'tr'] +['Ġpr', 'imary'] +['A', 'A'] +['.Write', 'Line'] +['util', 's'] +['Ġb', 'i'] +['R', 'ed'] +['.L', 'inq'] +['.', 'object'] +['Ġlead', 'ers'] +['un', 'ities'] +['Ġg', 'un'] +['on', 'th'] +['ĠDe', 'v'] +['F', 'ILE'] +['Ġcom', 'ments'] +['_l', 'en'] +['ar', 'row'] +['am', 'ount'] +['R', 'ange'] +['s', 'ert'] +['Grid', 'View'] +['Ġup', 'dated'] +['ĠM', 'o'] +['Ġin', 'form'] +['oci', 'ety'] +['al', 'a'] +['A', 'ccess'] +['Ġh', 'ab'] +['Ġc', 'reat'] +['_', 'arg'] +['ĠJan', 'uary'] +['ĠD', 'ay'] +['")', 'čĊ'] +['up', 'le'] +['d', 'ocument'] +['gor', 'ith'] +['m', 'enu'] +['ĠO', 'ver'] +['b', 'b'] +['.t', 'itle'] +['_', 'out'] +['Ġle', 'd'] +['ur', 'i'] +['Ġ?', '>Ċ'] +['r', 'un'] +['Ġsc', 'ene'] +['(', 'array'] +['de', 'vice'] +['_t', 'itle'] +['ag', 'on'] +[']', 'čĊ'] +['ab', 'y'] +['Ġbe', 'came'] +['bo', 'olean'] +['Ġp', 'ark'] +['ĠC', 'ode'] +['up', 'load'] +['rid', 'ay'] +['ĠSept', 'ember'] +['F', 'e'] +['Ġs', 'en'] +['c', 'ing'] +['F', 'L'] +['C', 'ol'] +['ut', 's'] +['_p', 'age'] +['in', 'n'] +['Ġim', 'plied'] +['al', 'ing'] +['Ġyour', 'self'] +['.C', 'ount'] +['con', 'f'] +['Ġa', 'ud'] +['_in', 'it'] +['.', ')'] +['Ġw', 'rote'] +['N', 'G'] +['.', 'Error'] +['ä', '»'] +['.f', 'or'] +['Ġe', 'qual'] +['ĠRe', 'quest'] +['Ġser', 'ial'] +['Ġallow', 's'] +['X', 'X'] +['Ġm', 'iddle'] +['ch', 'or'] +['Ã', '¸'] +['erv', 'al'] +['.C', 'olumn'] +['read', 'ing'] +['Ġesc', 'ort'] +['ĠAug', 'ust'] +['Ġquick', 'ly'] +['Ġwe', 'ap'] +['ĠC', 'G'] +['rop', 'ri'] +['h', 'o'] +['Ġc', 'op'] +['(', 'struct'] +['ĠB', 'ig'] +['Ġv', 's'] +['Ġfre', 'qu'] +['.', 'Value'] +['Ġaction', 's'] +['Ġpro', 'per'] +['Ġin', 'n'] +['Ġobject', 's'] +['Ġm', 'atrix'] +['av', 'ascript'] +['Ġon', 'es'] +['.g', 'roup'] +['Ġgre', 'en'] +['Ġp', 'aint'] +['ool', 's'] +['y', 'cl'] +['enc', 'ode'] +['ol', 't'] +['com', 'ment'] +['.', 'api'] +['D', 'ir'] +['Ġun', 'e'] +['iz', 'ont'] +['.p', 'osition'] +['Ġdes', 'igned'] +['_', 'val'] +['av', 'i'] +['ir', 'ing'] +['t', 'ab'] +['Ġl', 'ayer'] +['Ġview', 's'] +['Ġre', 've'] +['ra', 'el'] +['ĠO', 'N'] +['r', 'ics'] +['n', 'p'] +['Ġc', 'ore'] +['()', ');čĊ'] +['M', 'ain'] +['Ġexp', 'ert'] +['ĉĉ', 'čĊ'] +['_', 'en'] +['Ġ/', '>'] +['ut', 'ter'] +['I', 'AL'] +['ail', 's'] +['ĠK', 'ing'] +['*/', 'ĊĊ'] +['ĠM', 'et'] +['_', 'end'] +['add', 'r'] +['or', 'a'] +['Ġ', 'ir'] +['M', 'in'] +['Ġsur', 'pr'] +['Ġre', 'pe'] +['Ġdirect', 'ory'] +['P', 'UT'] +['-', 'S'] +['Ġe', 'lection'] +['h', 'aps'] +['.p', 're'] +['c', 'm'] +['Val', 'ues'] +['Ġ"', 'Ċ'] +['c', 'olumn'] +['iv', 'il'] +['Log', 'in'] +['in', 'ue'] +['Ġbeaut', 'iful'] +['Ġse', 'cret'] +['(e', 'vent'] +['Ġch', 'at'] +['um', 's'] +['Ġorig', 'in'] +['Ġeffect', 's'] +['Ġman', 'agement'] +['ill', 'a'] +['t', 'k'] +['Ġset', 'ting'] +['ĠC', 'our'] +['Ġmass', 'age'] +['ĉ', 'end'] +['Ġhapp', 'y'] +['Ġfin', 'ish'] +['Ġc', 'amera'] +['ĠV', 'er'] +['ĠDem', 'ocr'] +['ĠH', 'er'] +['(', 'Q'] +['con', 's'] +['it', 'a'] +["Ġ'", '.'] +['{', '}'] +['ĉ', 'C'] +['Ġst', 'uff'] +['Ġ', ':Ċ'] +['ĠA', 'R'] +['T', 'ask'] +['h', 'idden'] +['er', 'os'] +['IG', 'N'] +['at', 'io'] +['ĠHe', 'alth'] +['ol', 'ute'] +['Ent', 'er'] +["'", '>'] +['ĠT', 'witter'] +['ĠCount', 'y'] +['s', 'cribe'] +['Ġ=', '>Ċ'] +['Ġh', 'y'] +['f', 'it'] +['Ġmilit', 'ary'] +['Ġsa', 'le'] +['re', 'quired'] +['n', 'on'] +['boot', 'strap'] +['h', 'old'] +['r', 'im'] +['-', 'old'] +['ĠD', 'own'] +['Ġm', 'ention'] +['cont', 'act'] +['_g', 'roup'] +['od', 'ay'] +['Ġto', 'wn'] +['Ġsol', 'ution'] +['u', 'ate'] +['ell', 'ing'] +[']', '->'] +['ot', 'es'] +['ent', 'al'] +['om', 'en'] +['osp', 'ital'] +['ĠS', 'up'] +['_', 'EN'] +['Ġsl', 'ow'] +['SE', 'SSION'] +['Ġbl', 'ue'] +['ag', 'o'] +['Ġl', 'ives'] +['Ġ', '^'] +['.', 'un'] +['in', 'st'] +['en', 'ge'] +['Ġcustom', 'ers'] +['Ġc', 'ast'] +['ud', 'get'] +['ï¼', 'ģ'] +['ic', 'ens'] +['Ġdeter', 'min'] +['Se', 'lected'] +['_', 'pl'] +['ue', 'ue'] +['Ġd', 'ark'] +['//', 'ĊĊ'] +['s', 'i'] +['ther', 'n'] +['ĠJ', 'apan'] +['/', 'w'] +['P', 'U'] +['ĠE', 'ast'] +['ov', 'ie'] +['Ġp', 'ackage'] +['Ġn', 'or'] +['Ġap', 'i'] +['b', 'ot'] +['"', '];Ċ'] +['_p', 'ost'] +['ul', 'ate'] +['Ġcl', 'ub'] +["')", ');Ċ'] +['Ġlo', 'op'] +['PI', 'O'] +['ion', 'e'] +['sh', 'ot'] +['In', 'itial'] +['Ġplay', 'ed'] +['reg', 'ister'] +['rou', 'ght'] +['_m', 'ax'] +['ac', 'ement'] +['m', 'atch'] +['raph', 'ics'] +['A', 'ST'] +['Ġexist', 'ing'] +['Ġcomple', 'x'] +['D', 'A'] +['.C', 'h'] +['.com', 'mon'] +['m', 'o'] +["Ġ'", '../../'] +['it', 'o'] +['Ġanal', 'ysis'] +['Ġdel', 'iver'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'Ċ'] +['id', 'x'] +['Ã', 'ł'] +['ong', 'o'] +['ĠEng', 'lish'] +['<', '!--'] +['Ġcomput', 'er'] +['EN', 'SE'] +['Ġp', 'as'] +['Ġr', 'ais'] +['H', 'ash'] +['Ġm', 'obile'] +['Ġo', 'wner'] +['F', 'IG'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['th', 'es'] +['Ġat', 'tr'] +['w', 'd'] +['.t', 'ime'] +['aw', 'n'] +['Ġtreat', 'ment'] +['ĠA', 'c'] +['.', 'View'] +['im', 'pl'] +['m', 'ore'] +['p', 'ass'] +['Ġh', 'a'] +['.f', 'rom'] +['Ġle', 'ading'] +['FF', 'FF'] +['(', 'error'] +['.', 'ui'] +['at', 'ar'] +['ad', 'ers'] +['d', 'ates'] +['Ġz', 'u'] +['Ġfl', 'ow'] +['T', 'arget'] +['Ġinvol', 'ved'] +['Ġi', 'o'] +['par', 'se'] +['$', '_'] +['he', 'st'] +['.', 'int'] +['-', 'item'] +['as', 'y'] +['S', 'p'] +['Ġsh', 'ift'] +['N', 'T'] +['Ġt', 'f'] +['_T', 'R'] +['.', 'web'] +['C', 'S'] +['Ġ}', ')'] +['Ġey', 'es'] +['_', 'z'] +["'", ');čĊ'] +['if', 'orn'] +['Ġ{', '@'] +['Ġn', 'ice'] +['.l', 'ist'] +['ĠĠĠĠ', 'čĊ'] +['Ġf', 'loor'] +['Ġred', 'irect'] +['ĠU', 'K'] +['(', "['"] +['Ġw', 'ish'] +['Ġcap', 't'] +['leg', 'al'] +['ĠI', 'O'] +['Ġst', 'age'] +['.', 'String'] +['ĠA', 'fr'] +['ig', 'en'] +['ĠS', 'H'] +['De', 'lete'] +['ell', 's'] +['Ġsol', 'id'] +['Ġmeet', 'ing'] +['Ġwork', 'ed'] +['Ġed', 'itor'] +['in', 'y'] +['Ð', '¼'] +['_', 'read'] +['.', 'Id'] +['e', 'ff'] +['Off', 'set'] +['ch', 'a'] +['US', 'ER'] +['ĉĉ', 'ĠĠĠ'] +['ipp', 'ed'] +['Ġd', 'ict'] +['ĠR', 'un'] +['.h', 'pp'] +['Ġan', 'g'] +['x', 'ml'] +['im', 'ple'] +['Ġmed', 'ical'] +['_t', 'oken'] +['con', 'nect'] +['Ġh', 'our'] +['Ġcont', 'roller'] +['_m', 'essage'] +['U', 'ID'] +['G', 'r'] +['and', 'ed'] +['_C', 'H'] +['Ġbook', 's'] +['Ġspe', 'ak'] +['am', 'ing'] +['Ġm', 'ount'] +['Rec', 'ord'] +['ĉ', 'struct'] +['.W', 'eb'] +['ond', 'on'] +['Ġ//', 'Ċ'] +['Ġf', 'elt'] +['.A', 'uto'] +['id', 'ge'] +['_p', 'os'] +['P', 'R'] +['Ġmod', 'ern'] +['C', 'ollection'] +['_m', 'sg'] +['C', 'D'] +['ĠL', 'o'] +['Ġsecond', 's'] +['ib', 'ly'] +['.e', 'quals'] +['Ġintern', 'ational'] +['#', 'pragma'] +['oo', 'th'] +['W', 'riter'] +['i', 'ate'] +['Ġce', 'le'] +['ĠB', 'it'] +['iv', 'o'] +['iv', 'ery'] +['r', 'd'] +['HE', 'CK'] +['Ġc', 'ache'] +['.c', 'ount'] +['Ġro', 'll'] +['.Re', 'ad'] +['RE', 'D'] +['Ġset', 'up'] +['izont', 'al'] +['model', 's'] +['arg', 'v'] +['Ġconsider', 'ed'] +['="', '../'] +['set', 'tings'] +['ĠR', 'el'] +['Ġgrow', 'th'] +['Ġm', 'ix'] +['ĠWash', 'ington'] +['Ġpl', 't'] +['ĠI', 'M'] +['á', 'º'] +['Ġturn', 'ed'] +['ĠDate', 'Time'] +['ĠW', 'ed'] +['(', 'url'] +['Ġ"', '-'] +['Ġlet', 'ter'] +['As', 'ync'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠ'] +['ĠOct', 'ober'] +['_l', 'ine'] +['Ġatt', 'ention'] +['Ġcol', 'lect'] +['ĠH', 'ash'] +['Ġim', 'ag'] +['T', 'ree'] +['Ġsit', 'uation'] +['et', 'te'] +['_n', 'o'] +['IV', 'E'] +['Ġv', 'on'] +['.t', 'arget'] +['Ġknow', 'ledge'] +['Ġdr', 'ive'] +['.p', 'ost'] +['Ġb', 'lood'] +['Ġc', 'it'] +['pr', 'imary'] +['Ġconfig', 'uration'] +['te', 'e'] +['Ġph', 'oto'] +['is', 'ode'] +['Tr', 'ace'] +['Ġg', 'ave'] +['Ġsh', 'ot'] +['ĠA', 'ir'] +['Ġm', 'other'] +['pr', 'ice'] +['Ġmor', 'ning'] +['))', '{Ċ'] +['-', 'x'] +['Ġtr', 'ade'] +['Ġdes', 'c'] +['Ġ&&', 'Ċ'] +['Ġparent', 's'] +['A', 'pi'] +['å', 'Ī'] +['t', 'ed'] +['w', 'er'] +['Ġ', 'æ'] +['Ġs', 'y'] +['ĠK', 'e'] +['Par', 'ser'] +['å', 'ħ'] +['anc', 'y'] +['Ġpie', 'ce'] +['iforn', 'ia'] +['to', 'String'] +['r', 'an'] +['id', 'ing'] +['PT', 'ION'] +['com', 'es'] +['/', 'lic'] +['.c', 'lient'] +['E', 'l'] +['L', 'ong'] +['Ġprofession', 'al'] +['ru', 'pt'] +['v', 'a'] +['Ġcomplet', 'ely'] +['Ġpract', 'ice'] +['Ġse', 'lection'] +['R', 'em'] +['in', 'i'] +['Ġc', 'am'] +['RE', 'E'] +['Ġsit', 'es'] +['p', 'a'] +['AT', 'US'] +['Ñģ', 'ÑĤ'] +['arr', 'ant'] +['*', '('] +['_', 'KEY'] +['ĠB', 'utton'] +['ĠF', 'riday'] +['se', 'qu'] +['Ġre', 'ader'] +['Ġm', 'essages'] +['è', '¯'] +['Ġbu', 'f'] +['K', 'e'] +['Ġn', 'ov'] +['H', 'P'] +['M', 'sg'] +['al', 'ign'] +['ar', 'ily'] +["Ġ'", ','] +['_w', 'ith'] +['Ġd', 'as'] +['Ġhe', 'ard'] +['at', 'omic'] +['ri', 'al'] +[')', '['] +['Ġdis', 'e'] +['@', 'end'] +['Ġg', 'old'] +['Ġf', 'air'] +['Ġsa', 'les'] +['.', 'Button'] +['str', 'ict'] +['s', 'ave'] +['Ġme', 'asure'] +['Ġ"', '+'] +['ec', 'ause'] +['View', 'Controller'] +['ĠT', 'able'] +['.p', 'aram'] +['Ġdec', 'ided'] +['((', '('] +['IN', 'FO'] +['Ġopport', 'unity'] +['T', 'e'] +['IC', 'ENSE'] +['cc', 'ording'] +['k', 'i'] +['ĠU', 'N'] +['Ġcont', 'ain'] +['Ġman', 'ager'] +['Ġp', 'ain'] +['ĠF', 'ire'] +['rom', 'e'] +['Ġpl', 'ans'] +['F', 'ound'] +['l', 'ay'] +['ĠDec', 'ember'] +['Ġinfl', 'u'] +['Ã', 'º'] +['ren', 'ch'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'Ġ'] +['az', 'ing'] +['b', 'rief'] +['c', 'all'] +['wo', 'od'] +['Ġload', 'ed'] +['Ġgr', 'and'] +['/', 'f'] +['im', 'p'] +['_', 'U'] +['ST', 'R'] +['âĢ', '¢'] +['Ġcred', 'it'] +['.C', 'olor'] +['or', 'ge'] +['QUE', 'ST'] +['Ġdiffer', 'ence'] +['ĠP', 'C'] +['w', 'args'] +['Ġp', 'ub'] +['und', 'ay'] +['Ġf', 'ra'] +['.m', 'ax'] +['Ġtri', 'ed'] +['ann', 'els'] +['s', 'end'] +['Ġreport', 's'] +['Ġad', 'ult'] +['ä', 'º'] +['Ġcons', 'ist'] +['ĠSt', 'reet'] +['ĠPro', 'gram'] +['S', 'QL'] +['M', 'atrix'] +['ounc', 'il'] +['-', 'A'] +['ĉ', 'w'] +['Ġwho', 'se'] +['Ġrel', 'ig'] +['ĠS', 'ex'] +['Ġg', 'ives'] +['n', 'one'] +['.m', 'essage'] +['(', 'G'] +['.aw', 't'] +['-', 'right'] +['ĠNov', 'ember'] +['ell', 'ig'] +['ut', 'ive'] +['Ä', 'ĥ'] +['over', 'n'] +['Ġeas', 'ily'] +['Ġide', 'as'] +['ĠÐ', '½'] +['/c', 'ss'] +['ly', 'ing'] +['el', 'le'] +['C', 'an'] +['_c', 'olor'] +['оÐ', '²'] +['Ġp', 'air'] +['ng', 'th'] +['Ġs', 'plit'] +['d', 'rop'] +['art', 'y'] +['on', 'a'] +['Ġcap', 'ital'] +['Ġhe', 'ar'] +['Ġex', 'ists'] +['ĉ', 'log'] +['em', 'o'] +['R', 'un'] +['o', 'i'] +['Ġpar', 'ser'] +['ĠM', 'ethod'] +['Ġeduc', 'ation'] +['[', 'k'] +['Ġlib', 'rary'] +['>', '";Ċ'] +['_', 'UN'] +['ĉ', 'std'] +['od', 'ed'] +['Ġcall', 's'] +['h', 'ere'] +['R', 'el'] +['Ġbr', 'and'] +['back', 'ground'] +['g', 'a'] +['_add', 'ress'] +['_param', 's'] +['C', 'ategory'] +['ĠInd', 'ia'] +['_e', 'vent'] +['Ġ', 'ing'] +['R', 'ender'] +['.c', 'l'] +['ump', 'y'] +['Ġp', 'et'] +['F', 'C'] +['ĠA', 'nt'] +['Ex', 't'] +['Ġchar', 'ge'] +['en', 'ed'] +['gr', 'ad'] +['E', 'O'] +['Ġdep', 'end'] +['Ġ', '.ĊĊ'] +['fr', 'ame'] +['Ġd', 'f'] +['Ġh', 'uge'] +['ĠP', 'ART'] +['ed', 's'] +[';', ';'] +['ĠA', 'M'] +['Ġbas', 'ic'] +['ĠL', 'et'] +['lic', 'h'] +['Ġar', 'm'] +['Ġst', 'ar'] +['Ġf', 'ederal'] +['W', 'ork'] +['Ġcar', 'ry'] +['ĠIs', 'rael'] +['(', 'obj'] +['={', '{'] +['Ġs', 'aved'] +['Ġs', 'yn'] +['Ġconst', 'ant'] +['V', 'ENT'] +['Ġpos', 'itive'] +['Ġcon', 'duct'] +['Ġsk', 'in'] +['Ġear', 'lier'] +['Ġl', 'ayout'] +['ĠI', 'P'] +['O', 'UR'] +['Ġt', 'im'] +['styles', 'heet'] +['_', 'cl'] +['ĠC', 'ard'] +['++', '){Ċ'] +['Ġtem', 'per'] +['ĠDav', 'id'] +['ĉ', 'try'] +['.d', 'art'] +['Ġwant', 's'] +['Ġp', 'icture'] +['Ġv', 'ideos'] +['ĠCom', 'm'] +['is', 'ions'] +['_M', 'AX'] +['M', 'apping'] +['-', 'content'] +['ĠE', 'ar'] +['-', 'de'] +['Ġpre', 'm'] +['br', 'uary'] +['Ġcom', 'ponents'] +['Ġthrough', 'out'] +['Ġp', 'ull'] +['Ġp', 'ages'] +['ent', 'e'] +['res', 'pond'] +['Ġg', 'as'] +['cript', 'or'] +['Ġed', 'ge'] +['Ġb', 'ound'] +['A', 'CT'] +['****', '**'] +['Ġcre', 'ating'] +['ĠC', 'H'] +['Ġnull', 'ptr'] +['B', 'r'] +['+', "'"] +['.c', 'o'] +['>', '::'] +['Ġle', 'arning'] +['.L', 'ength'] +['_S', 'H'] +['Ġpat', 'ients'] +['A', 'IN'] +['Ġk', 'ids'] +['Ġcom', 'fort'] +['Ġsh', 'own'] +['ug', 'ins'] +['ĠB', 'ack'] +['ell', 'a'] +['_C', 'L'] +['Ġl', 'at'] +['Ġdis', 'patch'] +['Ġclass', 'es'] +['.', 'at'] +['.b', 'egin'] +['Ġsuccess', 'ful'] +['b', 'an'] +['Ġobt', 'ain'] +['ĠS', 'l'] +['Ġl', 'ack'] +['iter', 'ator'] +['Th', 'read'] +['(s', 'ize'] +['Ġn', 'one'] +['.h', 'as'] +['_', 'X'] +['s', 'ort'] +['n', 'ap'] +['p', 'et'] +['b', 'in'] +['ĠCan', 'ada'] +['The', 'y'] +['Ġd', 'ans'] +['ĠM', 'at'] +['<', 'td'] +['Ġh', 'air'] +["Ġ'", "',Ċ"] +['Ġc', 'u'] +['Ġlaw', 's'] +['let', 'ed'] +['p', 'ed'] +['Ġp', 'ow'] +['Ġk', 'new'] +['_C', 'OM'] +['_', ','] +['ĠM', 'ag'] +['id', 'ents'] +['(', 'req'] +['Ġ', '),'] +['-', 'center'] +['Ġw', 'ide'] +['ĠA', 'uthor'] +['st', 'ants'] +['Ġjob', 's'] +['Ġm', 'ath'] +['et', 'imes'] +['Bo', 'olean'] +['Ġs', 'cope'] +['_', 'is'] +['Ġme', 'as'] +['Ġkey', 's'] +['el', 'ay'] +['Ġexact', 'ly'] +["'=>", "'"] +['ĠP', 'aul'] +['m', 'as'] +['ĉ', 'print'] +['(l', 'en'] +['f', 'd'] +['Ġ)', ';'] +['.', 'Event'] +['q', 'li'] +['ir', 'it'] +['ield', 's'] +['om', 'an'] +['ĠT', 'op'] +['Ġv', 'ote'] +['Ġm', 'ask'] +['Ġthem', 'e'] +['-', 'Ċ'] +['Ġpro', 'ps'] +['Ġf', 'ine'] +['Ġwrit', 'er'] +['_', 'offset'] +['c', 'ar'] +['Ġal', 'tern'] +['Ġc', 'opyright'] +['Ġdest', 'roy'] +['pp', 'er'] +['Ġgener', 'ate'] +['pp', 'ed'] +['âĢĻ', 'd'] +['ĠĠĠĠĠĠ', 'Ċ'] +['m', 'ake'] +['ĠSh', 'ow'] +['Ġb', 'rowser'] +['Ġfavor', 'ite'] +['Ġcare', 'er'] +['Ġhappen', 'ed'] +['(', 'char'] +['Ġrecomm', 'end'] +['Ġl', 'iter'] +['.f', 'ilter'] +['gr', 'ade'] +['ĠÂ', '£'] +['Ph', 'one'] +['om', 's'] +['Ġn', 'amed'] +['-', 'label'] +['ip', 'o'] +['ĠO', 'ther'] +['Ġp', 'anel'] +['Ġro', 'ck'] +['S', 'cale'] +['ĉ', 'assert'] +['Ð', '´'] +['Ġtr', 'ust'] +['fr', 'ont'] +['Ġdem', 'on'] +['A', 'r'] +['N', 'et'] +['Ġecon', 'omic'] +['foot', 'er'] +['Ġr', 'ace'] +['(n', 'ode'] +['ĠO', 'ption'] +['s', 'plit'] +['Ġphys', 'ical'] +['if', 'est'] +['Ġrem', 'oved'] +['.', 'http'] +['))', ',Ċ'] +['Ġlook', 'ed'] +["'", ';'] +['d', 'ing'] +['g', 'est'] +['atur', 'day'] +['/lic', 'enses'] +['Pr', 'ice'] +['Ġd', 'ro'] +['Ġto', 'wards'] +['Ġun', 's'] +['ĠC', 'L'] +['ĉ', 'static'] +['Ġ', 'rows'] +['Ġdef', 'ine'] +['.re', 'place'] +['Ġf', 'ather'] +['ĠDes', 'ign'] +['ass', 'ign'] +['m', 'ut'] +['De', 'vice'] +['D', 'id'] +["')", ')Ċ'] +['omet', 'ry'] +['ay', 'load'] +['Ġh', 'istor'] +['ĠP', 'aram'] +['ĠBo', 'olean'] +['Ġn', 'ature'] +['Ġj', 's'] +['Ġn', 'ation'] +['i', 'h'] +['Ġdis', 'cover'] +['se', 'm'] +['Hand', 'le'] +['ĉ', 'r'] +['ĠTe', 'chn'] +['Ġw', 'all'] +['{', '$'] +['@', 'property'] +['Ġ"', '../'] +['Ġex', 'am'] +['.d', 'raw'] +['opp', 'ing'] +['Ġnear', 'ly'] +['Ġco', 'ol'] +['Ġinde', 'pend'] +['RE', 'S'] +['Ġhand', 'ler'] +['ĠMon', 'day'] +['Ġs', 'un'] +['St', 'yles'] +['ous', 'ly'] +['Ġ', 'ĉ'] +['v', 'est'] +['D', 'isplay'] +['(', 'y'] +['atic', 'ally'] +['Ġpred', 'ict'] +['y', 'ing'] +['Ġsom', 'etimes'] +['"', ']Ċ'] +['Ġdr', 'ink'] +['Ġb', 'ul'] +['ific', 'ations'] +['.', 'insert'] +['.re', 'g'] +['Ġtest', 's'] +['Al', 'ignment'] +['Ġal', 'leg'] +['Ġat', 'tribute'] +['ĠN', 'ote'] +['Ġmy', 'self'] +['art', 's'] +['N', 'ow'] +['Ġinterest', 'ing'] +['li', 'ents'] +['Ġpop', 'ulation'] +['ĠCal', 'ifornia'] +['"', 'I'] +['å', '¹'] +['Ġgre', 'ater'] +['ues', 'day'] +['Ġth', 'ous'] +['Ġcost', 's'] +['Ġla', 'unch'] +['\\', 'Http'] +['k', 'er'] +['b', 'and'] +['ĠPl', 'ay'] +['Ġb', 'and'] +['.sh', 'ape'] +['es', 'ome'] +['art', 'icle'] +['.r', 'f'] +['Ġw', 'er'] +['á', 's'] +['em', 'bers'] +['us', 'r'] +['B', 'A'] +['ic', 'an'] +['et', 't'] +['valid', 'ate'] +['ult', 'i'] +['Ġimmedi', 'ately'] +['z', 'er'] +['Ġfig', 'ure'] +['o', 'es'] +['ell', 'er'] +['irc', 'le'] +['ĠS', 'ign'] +['.d', 'b'] +['Ġr', 'ank'] +['By', 'tes'] +['Ġproject', 's'] +['_re', 'c'] +['UL', 'AR'] +['A', 'PI'] +['ĠL', 'ine'] +['P', 'ort'] +['Ġp', 'oll'] +['Ġg', 'iving'] +['id', 'ence'] +['--', 'Ċ'] +['Ġpl', 'ot'] +['ic', 'ial'] +['Ġw', 'arrant'] +['IT', 'ION'] +['ĠD', 'ouble'] +['Ġbill', 'ion'] +['gorith', 'm'] +['Ġequ', 'ipment'] +['D', 'ATE'] +['Ġ@', '"'] +['E', 'E'] +['Ġp', 'le'] +['i', 'ation'] +['Ġhead', 'ers'] +['Ġpro', 'ced'] +['.Component', 'Model'] +['ĠOb', 'ama'] +['Ġp', 'a'] +['ĠB', 'est'] +['im', 'ately'] +['.get', 'String'] +['.', '\\'] +['mp', 'loy'] +['Ġr', 'aw'] +['_b', 'lock'] +['und', 'red'] +['"', '},Ċ'] +['.Group', 'Layout'] +['Ġb', 'rought'] +['NS', 'String'] +['th', 'row'] +['cre', 'ated'] +['.N', 'ew'] +['_', 'view'] +['C', 'P'] +['ep', 's'] +['O', 'p'] +['Ġgr', 'atis'] +["Ġ'", '"'] +['Ġinter', 'view'] +['""', '"Ċ'] +['Ġpart', 'ial'] +['Ġa', 'ria'] +['b', 'ing'] +['A', 'uthor'] +['Bo', 'ok'] +['ĠP', 'at'] +['um', 'an'] +['Us', 'ers'] +['pl', 'us'] +['ĠD', 'irect'] +['ven', 'ue'] +['al', 'pha'] +['UC', 'CESS'] +['ĠC', 'all'] +['Ġ', ');čĊ'] +['im', 'ated'] +['Ġrem', 'ain'] +['Ġant', 'i'] +['ĠL', 'ondon'] +['Ġsaf', 'ety'] +['PO', 'SE'] +['o', 'les'] +['cont', 'roller'] +['By', 'te'] +['ĠCour', 't'] +['ĠPh', 'il'] +['ĠAss', 'oci'] +['en', 'a'] +['å', 'IJ'] +['_ST', 'R'] +['co', 'in'] +['resh', 'old'] +['Ġb', 'atch'] +['_C', 'lick'] +['entic', 'ation'] +['>', "';Ċ"] +['ent', 'y'] +['Ġbegin', 'ning'] +['Ġz', 'ero'] +['ĠCon', 'vert'] +['Ġt', 'err'] +['Ġp', 'aid'] +['Ġincre', 'ased'] +['c', 'atch'] +['-s', 'ize'] +['act', 'ivity'] +['e', 'quals'] +['Ġque', 'ue'] +['Ġ"', "'"] +['ĠIntern', 'ational'] +['Ġf', 'ür'] +['urs', 'day'] +['Ġsc', 'ient'] +['all', 'ow'] +['ax', 'is'] +['Ġapp', 'ropri'] +['ed', 'ge'] +['Ġid', 'x'] +['S', 'uccess'] +['ent', 'ifier'] +[':', '\\'] +['x', 'is'] +['Ġmax', 'imum'] +['ark', 's'] +['Ġb', 'irth'] +['(', 'index'] +['Ġmay', 'be'] +['.p', 'y'] +['file', 's'] +['Ġlim', 'ited'] +['_', 'check'] +['lo', 'ok'] +['pl', 'ies'] +['Ġmov', 'ement'] +["']", '.'] +['Ġbro', 'ad'] +['ĠB', 'E'] +['ĠUn', 'ityEngine'] +['.c', 'pp'] +['ĠE', 'very'] +['Ad', 'min'] +['Ġf', 'ans'] +['p', 'ared'] +['Ċ', 'ĠĠĠĠĊ'] +['Ġfore', 'ign'] +['Ġp', 'an'] +['Ġt', 'our'] +['ĠOr', 'der'] +['Ġmov', 'ing'] +['Ġa', 'uf'] +['C', 'all'] +['c', 'b'] +['Å', 'Ł'] +['vent', 'ory'] +['ĠS', 'ql'] +['Ġful', 'ly'] +['Click', 'Listener'] +['W', 'ORD'] +['Ġannounc', 'ed'] +[')', 'čĊčĊ'] +['Ġagre', 'ed'] +['ri', 'e'] +['Ġe', 'arn'] +['_l', 'ink'] +['.', 'array'] +['(t', 'ext'] +['Ġmaterial', 's'] +[',', 'p'] +['ff', 'ff'] +['v', 'g'] +['ĠÂ', '©'] +['Ġun', 'less'] +['aj', 'ax'] +['LO', 'G'] +['Ġsex', 'ual'] +['Ġ\\', '"'] +['-', 'time'] +['Ġco', 'ach'] +['Ġsupport', 'ed'] +['Ġphot', 'os'] +['if', 'orm'] +['.C', 'reate'] +[')', ']'] +['ri', 'er'] +['Ġd', 'ialog'] +['av', 'er'] +['ig', 'e'] +[')', '+'] +['_id', 'x'] +[':', '['] +['_m', 'in'] +['ĠC', 'ong'] +['Ġpress', 'ure'] +['Ġteam', 's'] +['S', 'ign'] +['b', 'egin'] +['ri', 'an'] +['NE', 'SS'] +['L', 'S'] +['Ġimpro', 've'] +['ĠS', 'unday'] +['Ġdef', 'inition'] +['ig', 'er'] +['roll', 'ers'] +['Ġthink', 'ing'] +['T', 'emplate'] +['-', 'F'] +['Ġem', 'erg'] +['pl', 'ates'] +['ĠUS', 'A'] +['.set', 'State'] +['ĠAl', 'so'] +['re', 'v'] +['Ġen', 'able'] +['ĠC', 'O'] +['PE', 'CT'] +['Ġcon', 'cept'] +[')', '-'] +['ĠâĢ', '¢'] +['Ġset', 's'] +['Ġmean', 'ing'] +['em', 'on'] +['ĠCon', 's'] +['c', 'mp'] +['ed', 'er'] +['ann', 'ed'] +['icens', 'ed'] +['ĠS', 'uper'] +['Ġd', 'aily'] +['Ġmult', 'i'] +['_', 'u'] +['Ġchall', 'eng'] +['_m', 'ode'] +['ĠP', 'romise'] +['Ġstr', 'ict'] +['j', 'o'] +['int', 'on'] +['(', 'list'] +['On', 'ly'] +['>', '{'] +['Ġveh', 'icle'] +['í', 'ķ'] +['ĠPl', 'ayer'] +['ĠD', 'el'] +['Ġp', 'ool'] +['.', 'url'] +['nes', 'day'] +['();čĊ', 'čĊ'] +['Ġ"', ');Ċ'] +['L', 'ocal'] +['.', '");Ċ'] +['Ġorgan', 'ization'] +['re', 'nder'] +['ĠApp', 'lication'] +['Ġsum', 'mer'] +['ex', 'pected'] +['N', 'A'] +['Ġr', 'ap'] +['_', 'obj'] +['Ġsur', 'face'] +['ĠP', 'UR'] +['Ġ},', 'ĊĊ'] +['Ġvariable', 's'] +['(m', 'essage'] +['Ġop', 'in'] +['.b', 'ack'] +['а', 'н'] +['Ġwork', 'ers'] +['v', 'm'] +['C', 'o'] +['ught', 'er'] +['Ġm', 'aster'] +['Ġ"', '",'] +['Ġst', 'ories'] +['.', 'User'] +['Ġcele', 'br'] +['ines', 'e'] +['B', 'S'] +['ĠCom', 'mand'] +['ash', 'board'] +['Ġo', 'g'] +['k', 'g'] +['.', 'image'] +['.st', 'yle'] +['Ġstep', 's'] +['ĠB', 'en'] +['(', 'args'] +['ĠP', 'erson'] +[',', 'y'] +['Ġofficial', 's'] +['|', 'Ċ'] +['Ġsk', 'ills'] +['v', 'c'] +['Ġbuild', 'er'] +['Ġg', 'ar'] +['A', 'ccount'] +['ĠA', 'uth'] +['ç', 'Ķ'] +["']", ')Ċ'] +['ĠA', 'T'] +['n', 'n'] +['.', 'Int'] +['SS', 'ERT'] +['Ġeffect', 'ive'] +['LE', 'TE'] +['Ġto', 'ols'] +['AR', 'D'] +['Ġdig', 'ital'] +['D', 'ouble'] +['ĠF', 'ind'] +['R', 'C'] +['Ġin', 'line'] +['/', 'r'] +['AR', 'AM'] +['AS', 'K'] +['Ġint', 'ent'] +['a', 'ight'] +['_add', 'r'] +['Ġrequest', 's'] +['.f', 'irst'] +['Ġde', 'bug'] +['Ġsp', 'ent'] +['()', '));Ċ'] +['Å', 'Ľ'] +['Ġpr', 'incip'] +['Log', 'ger'] +['clud', 'es'] +['.', 'use'] +['Ġsur', 'v'] +['med', 'ia'] +['ĠFe', 'bruary'] +['ĠM', 'ac'] +['Ġmiss', 'ing'] +['Ġw', 'ife'] +['Ġtalk', 'ing'] +['ĠM', 'ake'] +['Ġc', 'art'] +['Ġloc', 'ated'] +['E', 'nc'] +['-', 'a'] +['ch', 'ron'] +['Ġc', 'ards'] +['Ġgu', 'y'] +['Ġp', 'ers'] +['ĠY', 'es'] +['ate', 'ver'] +['ĠA', 'ng'] +['ol', 'ar'] +['ĠE', 'ven'] +['Ġacc', 'ur'] +['ĠP', 'ower'] +['ĠG', 'old'] +['c', 'lear'] +['Pro', 'cess'] +['Ġrec', 'ords'] +['Ġk', 'illed'] +['.c', 'lear'] +['ĠWARRANT', 'IES'] +['Ġpur', 'pose'] +['pan', 'el'] +['J', 'ECT'] +['ÃŃ', 'a'] +['Ġex', 'erc'] +['W', 'S'] +['/', 'L'] +['.', 'exports'] +['Ġ__', '_'] +['Ġs', 'in'] +['S', 'ervlet'] +['Ġd', 'é'] +['.de', 'lete'] +['ro', 'ke'] +['S', 'l'] +['ug', 'h'] +['ear', 's'] +['Ġpoint', 'er'] +['Ġh', 'op'] +['all', 'ery'] +['Ġo', 'bs'] +['co', 'very'] +['ĉ', 'char'] +['ĉĉĉĉ', 'ĉĉĉĉĉĉ'] +['ĉ', 'def'] +['oc', 'ity'] +['itch', 'en'] +['ul', 'ations'] +['ĠF', 'IT'] +['Ġ', ').'] +['straint', 's'] +['vent', 'ion'] +['Ġrequ', 'ires'] +['ĠO', 'per'] +['M', 'E'] +['OUN', 'T'] +['al', 'let'] +['Ġn', 'orm'] +['I', 'RE'] +['ex', 'as'] +['Ġprogram', 's'] +['Ġwe', 'ak'] +["'", '.$'] +['u', 'ing'] +['ĉ', 'ĠĠĠĠĠĠĠ'] +['Ġm', 'il'] +['Ġf', 'irm'] +['init', 'ely'] +['_VAL', 'UE'] +['ap', 'se'] +['atis', 'f'] +['Ġdem', 'and'] +['_m', 'od'] +['Ġdescri', 'bed'] +['Ġpl', 'aces'] +['V', 'ID'] +['Ġal', 'one'] +['Ġex', 'port'] +['Ġv', 'ec'] +['ĠM', 'ax'] +['Ġactiv', 'ities'] +['ict', 'ures'] +['g', 'ener'] +['Ġm', 'a'] +['Ĥ', '¬'] +['Ġexpress', 'ion'] +['C', 'allback'] +['_', 'content'] +['ĠM', 'ost'] +['Ġtest', 'ing'] +['E', 'C'] +['CH', 'ANT'] +['Ġad', 'just'] +['.Th', 'reading'] +['(', 'ctx'] +['Ġag', 'ree'] +['ig', 'hest'] +['Ġu', 'i'] +['ĠL', 'aw'] +['.', 'Y'] +['>', '', 'ĊĊ'] +['.ex', 'ample'] +['ber', 'g'] +['Ġmov', 'ed'] +['ĉ', 'e'] +['ĠS', 'aturday'] +['Ġpay', 'load'] +['Ä', 'ĩ'] +[')', ':ĊĊ'] +['Ġbe', 'y'] +['ur', 'er'] +['<', 'script'] +['Ġs', 'ymbol'] +['Ġass', 'um'] +['Ġp', 'ul'] +['E', 'ffect'] +['Ġh', 'undred'] +['To', 'ol'] +['ak', 'ed'] +['con', 'nection'] +['Ġvo', 'ice'] +['Ġp', 'd'] +['Ġtrans', 'action'] +['Ġlink', 's'] +['E', 'rr'] +['ĠInd', 'ian'] +['T', 'C'] +['atal', 'og'] +['n', 'i'] +['s', 'ign'] +['<<', '"'] +['j', 'i'] +['y', 'a'] +['Ġdemon', 'str'] +['ul', 'ated'] +['.', 'St'] +['Ġinst', 'it'] +['Ġbo', 'ost'] +['Ġcell', 's'] +['ol', 'ic'] +['.P', 'ro'] +[':', '', ','] +['">', '', '\\'] +['Ġth', 'us'] +['ĠReg', 'ister'] +['h', 'ol'] +['ĠCh', 'inese'] +['Ġpost', 'ed'] +['Ġm', 'agn'] +['ab', 'ilities'] +['Ġdise', 'ase'] +['Ġrem', 'ains'] +['ĠPro', 'f'] +['-', 'form'] +['Ġc', 'in'] +['org', 'an'] +['ic', 'ate'] +['Ġst', 'ress'] +[']', '*'] +['Ġ', '----------------------------------------------------------------'] +['_', 'context'] +['or', 'ry'] +['Ġd', 'ied'] +['m', 'at'] +['Ġstart', 's'] +['.M', 'essage'] +['Ġrun', 's'] +['Ġgu', 'ide'] +['Ġwarrant', 'y'] +['ential', 's'] +['d', 'ict'] +['ĠS', 'ize'] +['ul', 'er'] +['Ġrespons', 'ible'] +['_SE', 'T'] +['Ġcont', 'aining'] +['ĠPr', 'ice'] +['|', '|'] +['F', 'S'] +['Ġem', 'p'] +['_b', 'utton'] +['(', 'uint'] +['Ġsu', 'ff'] +['p', 'th'] +['Ġdef', 'initely'] +['put', 'e'] +['Ġmarket', 'ing'] +['ĠW', 'H'] +['ĠS', 'ie'] +['+', '='] +['OL', 'OR'] +['Ġcons', 'ult'] +['Ġs', 'igned'] +['Ġse', 'quence'] +['le', 'e'] +['Ġrequire', 'ments'] +['h', 'y'] +['Ex', 'press'] +['M', 'T'] +['se', 'y'] +['Ġ', 'ult'] +['å', '®'] +['ellig', 'ence'] +['Ġanal', 'y'] +['Ġd', 'ress'] +['eng', 'ine'] +['ĠG', 'reat'] +['ĠAnd', 'roid'] +['ĠA', 'lex'] +['m', 'ode'] +['D', 'ictionary'] +['.D', 'ate'] +['ä', '½'] +['V', 'ICE'] +['Ġfam', 'ilies'] +['ĠRuss', 'ian'] +['ĠT', 'imes'] +['.c', 'all'] +['$', '('] +['Pro', 'file'] +['Ġf', 'older'] +['ch', 'es'] +['Ġleg', 'is'] +['_', 'row'] +['un', 'es'] +['Ù', 'Ħ'] +['Ġ}', ').'] +['Ass', 'ert'] +['ag', 'en'] +['ĠH', 'and'] +['I', 'ter'] +['Ġbig', 'gest'] +['ore', 'ach'] +['Ġpol', 'ic'] +['Ġper', 'missions'] +['Ġshow', 'ed'] +['ĠE', 'lement'] +['Ġtop', 'ic'] +['âĢĶ', 'âĢĶ'] +['ro', 'ad'] +['ĠB', 'ank'] +['rec', 'ord'] +['Ġpart', 'ners'] +['ĠR', 'ef'] +['ess', 'ions'] +['Ġass', 'ess'] +['U', 'ST'] +['ĠPart', 'y'] +['pro', 'du'] +['L', 'C'] +['Ġ', 'ul'] +['.', 'form'] +['h', 'ide'] +['c', 'opy'] +['UT', 'F'] +['ĠSO', 'FTWARE'] +['čĊčĊ', 'čĊ'] +['ĠL', 'in'] +['un', 'a'] +['ug', 'ar'] +['Ġadmin', 'istration'] +['Ġopen', 'ing'] +['Ġsc', 'an'] +['Ġcontin', 'ued'] +['com', 'ponent'] +['.s', 'p'] +['Ġhapp', 'ens'] +['um', 'my'] +['ĠP', 'R'] +['.F', 'ile'] +['ĠDown', 'load'] +['Lo', 'ading'] +['d', 'i'] +['Ġwait', 'ing'] +['_A', 'DD'] +['T', 'ab'] +['.query', 'Selector'] +['Ġecon', 'omy'] +['ĠF', 'rench'] +['t', 'xt'] +['Ġf', 'ant'] +['_', ';Ċ'] +['H', 'older'] +['S', 'H'] +['Ġn', 'umpy'] +['Ġst', 'reet'] +['Ġm', 'ale'] +['\\', 'Model'] +['ang', 'ing'] +['ĠB', 'ill'] +['Ġprevious', 'ly'] +['B', 'I'] +['ĠSec', 'ret'] +['Ġm', 'ist'] +['ĠF', 'ield'] +['up', 's'] +['ĠPro', 'cess'] +['Ġke', 'pt'] +['ĠO', 'T'] +['Ġtrad', 'itional'] +['.', 'i'] +['am', 'in'] +['Ġhelp', 's'] +['An', 'y'] +['orig', 'in'] +['ilt', 'ers'] +['j', 'u'] +['d', 'esc'] +['ĠA', 'ccount'] +['Ġ)', 'čĊ'] +['k', 'top'] +['ol', 'ly'] +['Ġf', 's'] +['Ġ', 'ê'] +['Ġ', 'ut'] +['Ġcent', 'ral'] +['(t', 'est'] +['.A', 'n'] +['Ġs', 'atisf'] +['G', 'R'] +['ĠF', 'ull'] +['Ġhe', 'at'] +['ib', 'er'] +['Ġon', 'to'] +['m', 'os'] +['S', 'chema'] +['Ġfact', 'ory'] +['"', '.$'] +['aw', 's'] +['St', 'atement'] +['(t', 'arget'] +['ĉ', 'new'] +['.b', 'e'] +['Ġg', 'uest'] +['Ġm', 'al'] +['AR', 'Y'] +['Ġre', 'ached'] +['Ġm', 'ouse'] +['Ġchall', 'enge'] +['ĉd', 'ouble'] +['ĠT', 'em'] +['Ġt', 'error'] +['Ġex', 'tract'] +['_T', 'O'] +['Ġsepar', 'ate'] +['Ġm', 'ir'] +['h', 'elp'] +['Ġcap', 'acity'] +['ĠProp', 'erty'] +['k', 'an'] +['_c', 'reate'] +['ĠL', 'ight'] +['.p', 'arent'] +['Ġunderstand', 'ing'] +['Ġeas', 'ier'] +['Ġ|', '='] +['Ġen', 'h'] +['Ġf', 'at'] +['Ġprot', 'est'] +['am', 'm'] +['_', 'AT'] +['-', 'of'] +['il', 's'] +['ĠO', 'h'] +['Ġps', 'ych'] +['Ġ$', '.'] +['ind', 's'] +['Ġrel', 'ative'] +['sh', 'op'] +['sh', 'ort'] +['ĠS', 'and'] +['uest', 'ion'] +['Ġf', 'ear'] +['/', 'ĊĊ'] +['.', 'context'] +['Ġschool', 's'] +['Ġser', 've'] +['z', 'one'] +['_d', 'b'] +['Ġmajor', 'ity'] +['ex', 'ample'] +['Ġl', 'ang'] +['ĉ', 'ĠĠ'] +['Reg', 'ister'] +['end', 'o'] +['Ġprocess', 'ing'] +['_t', 'emplate'] +['-', 'user'] +['Ġe', 'g'] +['C', 'OM'] +['ĠBl', 'ue'] +['i', 'ro'] +['Ġrem', 'ote'] +['ĠI', 'T'] +['#!', '/'] +['Ġred', 'istrib'] +['ra', 'z'] +['ĠS', 'ince'] +['ĠT', 'ur'] +['Back', 'ground'] +['==', '='] +['Ġref', 'lect'] +['Ġpro', 's'] +['c', 'md'] +['Ġwh', 'om'] +['Com', 'pat'] +['ĠA', 're'] +['Id', 'entifier'] +['ĠTh', 'om'] +['_', 'port'] +['g', 'u'] +['Ġmon', 'itor'] +['r', 'm'] +['Ġpat', 'ient'] +['ver', 'ter'] +['Ġg', 'ain'] +['-', 'ui'] +['In', 'st'] +['Ġd', 'ies'] +['A', 'rea'] +['_f', 'ilter'] +['Ġgr', 'at'] +['Ġreal', 'ity'] +['ord', 'inate'] +['ol', 'ved'] +['Cont', 'act'] +['Ġcompl', 'iance'] +['_', 'or'] +['ĠV', 'ar'] +['d', 'l'] +['Ġapp', 'end'] +['G', 'ER'] +['(m', 'ax'] +['.re', 'nder'] +['Ġd', 'ynamic'] +['ordin', 'ates'] +['_', 'options'] +['_c', 'olumn'] +['Ġb', 'atter'] +['s', 'pace'] +['L', 'a'] +['ĠS', 'ource'] +['/b', 'in'] +['Ġd', 'os'] +['ĠBo', 'ard'] +['ĠTh', 'read'] +['ĠA', 'L'] +['(', 'config'] +['ĠM', 'er'] +['Ġm', 'iles'] +['_', 'header'] +['ETH', 'OD'] +['iz', 'z'] +['Ġbenef', 'it'] +['Ġinteg', 'r'] +['(c', 'urrent'] +['ul', 'o'] +['.', 'default'] +['ĠD', 'iv'] +['Ġt', 'on'] +['o', 'th'] +['erv', 'ation'] +['ed', 'om'] +['Ġb', 'aby'] +['ce', 'ived'] +['.t', 'op'] +['rior', 'ity'] +['ĠL', 'ocal'] +['ri', 'age'] +['Ġattack', 's'] +['Ġh', 'ospital'] +['Ġfem', 'ale'] +['ĠLog', 'in'] +['ĠFl', 'or'] +['Ġch', 'ain'] +['ash', 'ion'] +['Text', 'ure'] +['S', 'ave'] +['Ġf', 'arm'] +['.cont', 'ains'] +['.T', 'est'] +['Ġknow', 's'] +['Ġgener', 'ally'] +['ip', 'eline'] +['Ġme', 'ant'] +['enc', 'ia'] +['Ġn', 'icht'] +['Ġcont', 'ents'] +['P', 'M'] +['ched', 'ule'] +['(', 'line'] +['C', 'G'] +['j', 'ob'] +['ĠRe', 'al'] +['u', 'er'] +['f', 'irm'] +['Ġ', 'Ø'] +['et', 'ro'] +['"', '`Ċ'] +['Ġspe', 'ech'] +['Ġth', 'r'] +['fore', 'ach'] +['Ġw', 'arn'] +['ĉ', 'l'] +['Ġhe', 'avy'] +['<', 'li'] +['N', 'e'] +['Ġinvestig', 'ation'] +['M', 'ath'] +['-', 'title'] +['Ġch', 'urch'] +['Ġdes', 'pite'] +['ch', 'ain'] +['Ġwh', 'atever'] +['ar', 'ian'] +['f', 'n'] +['Ġm', 'eta'] +['}', ')ĊĊ'] +['U', 'FF'] +['Ġregard', 'ing'] +['_S', 'UCCESS'] +['m', 'es'] +['ĠInt', 'ent'] +['Ġres', 'olve'] +['pos', 's'] +['ir', 'a'] +['for', 'ce'] +['o', 'ice'] +['Ã', '¢'] +['Ġp', 'm'] +['Ġup', 'dates'] +['A', 'rr'] +['Ġ', 'Ñ'] +['test', 'ing'] +['Ġto', 'ward'] +['nt', 'ax'] +['ë', 'ĭ'] +['Ġlist', 'en'] +['Ġgo', 'als'] +['Instance', 'State'] +['D', 'r'] +['Ġr', 'are'] +['Ġtr', 'ail'] +['Ke', 'ys'] +['C', 'al'] +['C', 'ar'] +['ĠPe', 'ople'] +['ĉ', 'local'] +['class', 'es'] +['Re', 'ference'] +['.for', 'Each'] +['em', 'b'] +['act', 'iv'] +['Ġpr', 'im'] +['red', 'ict'] +['Ġr', 'ad'] +['æķ', '°'] +['.B', 'ack'] +['Ġsp', 'read'] +['Ġc', 'lock'] +['Ġv', 'ir'] +['ed', 'itor'] +['Ġeffort', 's'] +['Ġbr', 'anch'] +['Ġind', 'ust'] +['Ġmot', 'or'] +['Ġam', 'b'] +['Ġdat', 'etime'] +['Ġren', 'cont'] +['ĠChrist', 'ian'] +['ĠAmeric', 'ans'] +['f', 'ull'] +['Ġf', 'mt'] +['.m', 'ain'] +['Ġca', 'used'] +['_', 'update'] +['ĠCont', 'ent'] +['AT', 'CH'] +['Ġb', 'ath'] +['ĠE', 'ach'] +['Ġr', 'adio'] +['ach', 'ment'] +['uz', 'z'] +['Sub', 'mit'] +['Ġre', 'strict'] +['ab', 'in'] +['ĠL', 'oad'] +['Ġext', 'ension'] +['Ġess', 'ay'] +['Ġh', 'at'] +['avi', 'our'] +['to', 'Be'] +['":', '['] +['Ġoffer', 'ed'] +['Ġv', 'ill'] +['(d', 'ouble'] +['æĹ', '¥'] +['b', 'c'] +['_f', 'ree'] +['ĠM', 'iss'] +['ĠB', 'er'] +['Ġ', 'è'] +['ĠL', 'ike'] +['Ġhelp', 'ed'] +['.get', 'Name'] +['_', 'AL'] +['Ġsp', 'irit'] +['ĠAp', 'ache'] +['w', 's'] +['Ġthere', 'fore'] +['(', 'params'] +['_', 'img'] +['Ġpe', 'ace'] +['Ġinc', 'or'] +['ĠEX', 'PECT'] +['Ġmin', 'or'] +['ip', 'es'] +['ĉ', 'data'] +['select', 'or'] +['c', 'ity'] +['tr', 'ie'] +['.b', 'ase'] +['_f', 'rame'] +['Ġopen', 'ed'] +['/', 'json'] +['L', 'Y'] +['n', 'u'] +['.D', 'e'] +['t', 'f'] +['m', 'argin'] +['.P', 'arse'] +['Ġp', 'i'] +['Ġe', 'q'] +['b', 'd'] +['Field', 's'] +['ĠT', 'ree'] +['Ġb', 'an'] +['ist', 'an'] +['Ċ', 'ĠĠĠĠĠĠĠĠĊ'] +['ĉg', 'l'] +['Ġprodu', 'ced'] +['s', 'ystem'] +['M', 'ark'] +['_h', 'ash'] +['Ġb', 'g'] +['Ġconst', 'it'] +['ĠLe', 'ague'] +['Ġmiss', 'ion'] +['_', 'format'] +['([', 'Ċ'] +['clus', 'ion'] +['!', '"'] +['Ð', '·'] +['b', 'reak'] +['ĉs', 'witch'] +['Ġth', 'er'] +['Trans', 'form'] +['Ġfoot', 'ball'] +['-', 'link'] +['r', 'oute'] +['.', 'auth'] +['Ġb', 'ag'] +['ov', 'ers'] +['Ġen', 'abled'] +['Ġr', 'ac'] +['(', 'I'] +['C', 'R'] +['anc', 'ing'] +['Ġman', 'aged'] +['_', 'q'] +['NG', 'TH'] +['Ġm', 'ac'] +['ĠA', 'uto'] +['ament', 'e'] +["Ġ'", "',"] +['.App', 'end'] +['Ġp', 'in'] +['.', 'item'] +['ack', 'ing'] +['Ġocc', 'as'] +['p', 'erson'] +['Ġt', 'i'] +['.Re', 'g'] +['Ġh', 'aven'] +['Ġg', 'lass'] +['Ġ"', '', ')'] +['_', 'char'] +['res', 'ource'] +['Ġep', 'isode'] +["Ġ'", '_'] +['ĠE', 's'] +['ĠEar', 'th'] +['Âł', 'Âł'] +['UP', 'DATE'] +['ĠS', 'ou'] +['u', 'is'] +['t', 'ypes'] +['Ġm', 'as'] +['Ġf', 'av'] +['Ġcon', 'struct'] +['_r', 'ate'] +['er', 'as'] +['Ġ|', 'Ċ'] +['rop', 'erties'] +['Ġext', 'ernal'] +['Ġap', 'plied'] +['Ġpre', 'fix'] +['ot', 'ed'] +['l', 'ers'] +['Ġc', 'old'] +['ĠS', 'P'] +['ĠCh', 'urch'] +['ĠOut', 'put'] +['los', 'ed'] +['ç', 'ļ'] +['ific', 'ate'] +['oper', 'ation'] +['her', 'it'] +['x', 'FF'] +['.', 'env'] +['_', 'err'] +['os', 'h'] +['D', 'irection'] +['C', 'ancel'] +['ĠFr', 'ank'] +['Ġfind', 'ing'] +['.', ')ĊĊ'] +['Ġr', 'outer'] +['ãĥ', '»'] +['s', 'es'] +['Ġc', 'row'] +['==', "'"] +['Ġs', 'and'] +['Ġr', 'id'] +['it', 'ure'] +['Ġent', 're'] +['Ġo', 'bserv'] +['Ġv', 'ac'] +['ð', 'Ł'] +['-', 'T'] +['A', 'rt'] +['n', 'ight'] +['.', 'search'] +['Ġex', 'change'] +['Ġdistr', 'ict'] +['.', 'os'] +['Ġdep', 'artment'] +['Ġdoc', 'uments'] +['Ġcent', 'ury'] +['ĠN', 'ext'] +['H', 'ost'] +['ĠK', 'IND'] +['Ġsus', 'p'] +['-', 'P'] +['re', 'nd'] +['.', 'em'] +['u', 'ite'] +['ist', 'ers'] +['(', 'json'] +['ĠAn', 'n'] +['w', 't'] +['at', 'i'] +['ĠHT', 'ML'] +['wh', 'en'] +['D', 'irectory'] +['Ġsh', 'ut'] +['<', 'a'] +['ed', 'y'] +['Ġhealth', 'y'] +['Ġtemper', 'ature'] +['ĠG', 'en'] +['Ġmet', 'al'] +['Ġsub', 'mit'] +['ĠD', 'O'] +['Ġat', 'tract'] +['Ġ{', '};Ċ'] +['ĠW', 'ord'] +['Ġl', 'l'] +['Ġseem', 'ed'] +['k', 'o'] +['I', 'ED'] +['Ġl', 'abor'] +['.Cont', 'ext'] +['Ġas', 'set'] +['y', 'ou'] +['Ġc', 'ars'] +['ĠC', 'olumn'] +['Ġr', 'é'] +['Ġs', 'quare'] +['ĠNS', 'String'] +['âĢĿ', ','] +['ap', 'es'] +['..', '.Ċ'] +['Ġthan', 'ks'] +['(', 'props'] +['Ġt', 'ick'] +['Ġexper', 'iment'] +['Ġpr', 'ison'] +['t', 'ree'] +['-', 'text'] +['ĠIO', 'Exception'] +['-w', 'idth'] +['_ST', 'ATUS'] +['f', 'ast'] +['-b', 'ody'] +['-', 'header'] +['Ġgu', 'ar'] +['cre', 'te'] +['ĠT', 'im'] +['Ġclear', 'ly'] +['ĠRepublic', 'an'] +['Ġjust', 'ify'] +['и', 'ÑĤ'] +['ĉ', 'ĠĠĠĠ'] +['c', 'ache'] +[';', '//'] +['Ġpres', 'ence'] +['Ġfact', 'ors'] +['Ġemploy', 'ee'] +[']', '))'] +['M', 'ember'] +['Ġselect', 'or'] +['b', 'or'] +['ĠM', 'ex'] +['çļ', 'Ħ'] +['ut', 'ex'] +['_t', 'ag'] +['ail', 'ure'] +['ĠN', 'et'] +['Ġre', 'li'] +['E', 'G'] +['Ġf', 'printf'] +['Ġte', 'en'] +['lo', 'ss'] +['Ġle', 'aving'] +['De', 'legate'] +['Ġbe', 'at'] +['Ġmin', 'ute'] +['sub', 'scribe'] +['Ġredistrib', 'ute'] +['Con', 'stants'] +['Ġcan', 'cer'] +['/', '{'] +['B', 'L'] +['Ġs', 'pan'] +['ĠCh', 'ild'] +['C', 'enter'] +['Ġear', 'th'] +['Y', 'S'] +['ĠLe', 'vel'] +['Ġse', 'a'] +['.s', 'upport'] +['.in', 'ner'] +['.', 'Item'] +['ill', 'ing'] +['ĠĠĠĠĊ', 'ĠĠĠĠĊ'] +['ĠL', 'abel'] +['ĠE', 'st'] +['(', 'arg'] +['bo', 'Box'] +['ĉf', 'oreach'] +['c', 'os'] +['F', 'ailed'] +['sw', 'ers'] +['Ed', 'itor'] +['r', 'ont'] +['ĠM', 'P'] +['ex', 'pr'] +['ĠL', 'ife'] +['Ġ?', '?'] +['ö', 'r'] +['Ġatt', 'end'] +['ĠQ', 'ue'] +['Ġspec', 'ies'] +['-', 'D'] +['Ġa', 'us'] +['Str', 'uct'] +['Ġadvant', 'age'] +['ost', 'on'] +['-b', 'lock'] +['in', 'itial'] +['C', 'RE'] +['Ġtr', 'uly'] +['Ġcomp', 'are'] +['or', 'ney'] +['Ġs', 'pect'] +['F', 'ull'] +['b', 'es'] +['Ġvis', 'ible'] +['Ġm', 'ess'] +['st', 'ances'] +['Ġcl', 'oud'] +['_v', 'ersion'] +['Ġf', 'urn'] +['ic', 'ago'] +['LO', 'W'] +['Ġtraff', 'ic'] +['Ġf', 'ol'] +['rypt', 'o'] +['Ġdecl', 'ar'] +['Ġsl', 'ot'] +['ĠEx', 't'] +['ĠEng', 'land'] +['ĠU', 'nder'] +['Ġt', 'a'] +['let', 'ter'] +['Ġoffic', 'er'] +['ĠDon', 'ald'] +['Y', 'es'] +['_', 'json'] +['IT', 'ableView'] +['ĠU', 'SE'] +['mploy', 'ee'] +['Ġopin', 'ion'] +['ĠA', 'ut'] +['b', 'order'] +['Ġad', 'vice'] +['Ġautom', 'atically'] +['is', 'co'] +['Ġm', 'm'] +['.', 'vis'] +['am', 'l'] +['Ġinitial', 'ize'] +['Ġ(', '{'] +['Ġ', ';ĊĊ'] +['Ġgener', 'ation'] +['Ġb', 'its'] +['clip', 'se'] +['Ġun', 'f'] +['ut', 'ors'] +['pl', 't'] +['Ġdel', 'ta'] +['est', 'roy'] +['is', 'is'] +['<', 'br'] +['Ġlimit', 'ations'] +['Ġend', 'ed'] +['ĠM', 'ad'] +['il', 'm'] +['Th', 'ese'] +['ĠMin', 'ister'] +['Ġch', 'art'] +['F', 'ragment'] +['Ġindepend', 'ent'] +['Y', 'ear'] +['Ġin', 'str'] +['Ġt', 'ags'] +['A', 'VE'] +['ĠAr', 'ch'] +['st', 'op'] +['Pro', 'gress'] +['Ġm', 'i'] +['Ġlearn', 'ed'] +['G', 'e'] +['Ġhot', 'el'] +['S', 'M'] +['T', 'YPE'] +['Ġc', 'y'] +['ERS', 'ION'] +['un', 'ately'] +['l', 'imit'] +['s', 'el'] +['Ġmov', 'ies'] +['Ġste', 'el'] +['o', 'z'] +['g', 'b'] +['ĠC', 'amp'] +['s', 'ite'] +['ĠLog', 'ger'] +['P', 'LE'] +['оÐ', '´'] +['.', 'right'] +['ĠC', 'ore'] +['Ġm', 'ixed'] +['st', 'ep'] +['Ġput', 's'] +['s', 'uper'] +['R', 'outer'] +['.', 'Http'] +['ly', 'ph'] +['ĠColor', 's'] +['Ġandroid', 'x'] +['.', 'str'] +['Ġinn', 'ov'] +['Ġde', 'ck'] +["'", '>Ċ'] +['ap', 'ers'] +[']', '('] +['cont', 'inue'] +['s', 'pec'] +['ĠR', 'oad'] +['AS', 'H'] +['ili', 'ar'] +['Ġcontin', 'ues'] +['Ġapp', 'oint'] +['Ġ#', 'Ċ'] +['ĠV', 'ir'] +['Ġ?>', '"'] +['Ġb', 'in'] +['}', '",'] +['go', 'ing'] +['e', 'ach'] +['B', 'D'] +['ĠA', 'ccess'] +['D', 'oc'] +['ĠMan', 'agement'] +['B', 'ER'] +['ask', 'et'] +['.get', 'Instance'] +['Ġestablish', 'ed'] +['so', 'cket'] +['IN', 'S'] +['ĉv', 'irtual'] +['ĉ', 'result'] +['RE', 'AD'] +['_', 'height'] +['ĠF', 'ont'] +['Ġ(', ');Ċ'] +['_', 'html'] +['Ġneighb', 'or'] +['l', 'or'] +['Ġg', 'ather'] +['Ġ}', ')ĊĊ'] +['Ġid', 'entity'] +['Ġf', 'ab'] +['p', 'adding'] +['ĠR', 'oute'] +['Enumer', 'able'] +['Ã', '´'] +['Ġfor', 'ced'] +['/j', 'query'] +['.ĊĊ', 'ĊĊĊĊ'] +['res', 'ents'] +['_', 'left'] +['.P', 'aram'] +['ĉ', 'throw'] +['ĠH', 'am'] +['Ġevent', 'ually'] +['ac', 'er'] +['p', 'ub'] +['Ġtr', 'a'] +['un', 'ique'] +['d', 'el'] +['ĠFlor', 'ida'] +['ĠC', 'lean'] +['x', 'a'] +['ĠÂ', '·'] +['Ġvalid', 'ate'] +['Vis', 'ual'] +['Ex', 'pression'] +['_f', 'unc'] +['m', 'ember'] +['ĉ', 'h'] +['tr', 'l'] +['ĉ', 'G'] +['nap', 'shot'] +['ĠProp', 'Types'] +['v', 'in'] +[']', ')ĊĊ'] +['ow', 'l'] +['if', 'ies'] +['Ġ$', "('."] +['ĠCont', 'ext'] +['ĠTo', 'ast'] +['.', 'Key'] +['Ġoffic', 'ers'] +['/', 'n'] +['s', 'n'] +['und', 'efined'] +['.', 'items'] +['ut', 'ow'] +['am', 'age'] +['Ġaccount', 's'] +['ook', 'ie'] +['Se', 'ction'] +['ici', 'ans'] +['Ġad', 'vis'] +['(', 'is'] +['[:', ','] +['ĠFr', 'ance'] +['F', 'unc'] +['ic', 'ious'] +['Ġto', 'k'] +['Ch', 'annel'] +['ĠA', 'D'] +['_N', 'UM'] +['Ġtime', 'out'] +['lem', 'ma'] +['rem', 'e'] +['u', 'j'] +['.A', 'l'] +['uc', 'lear'] +['(', 'os'] +['("', '<'] +['[', 'Ċ'] +['f', 'etch'] +['Ġb', 'al'] +['Ġgu', 'id'] +['-', 'align'] +['ĠW', 'rite'] +['ĠOn', 'ce'] +['utow', 'ired'] +['OD', 'ULE'] +['Ġp', 'itch'] +['C', 'F'] +['by', 'tes'] +['ĠCom', 'mission'] +['Ġincre', 'd'] +['P', 'ER'] +['_', 'response'] +['ĠL', 'os'] +['par', 'ser'] +['Ġass', 'ume'] +['.', 'Request'] +['ĠT', 'oken'] +['_p', 'osition'] +['Ġn', 'om'] +['-', 'term'] +['Ġrem', 'aining'] +['i', 'ostream'] +['Ġpie', 'ces'] +['ap', 'y'] +['ĠL', 'ess'] +['r', 'ange'] +['umb', 'n'] +['pr', 'ise'] +['_', 'option'] +['Im', 'pl'] +['k', 'wargs'] +['Ġbusiness', 'es'] +['Al', 'ert'] +['Ġpart', 'ies'] +['ĠCont', 'ainer'] +['ĠPr', 'ivate'] +['ĠPl', 'an'] +['Ġregister', 'ed'] +['Ġj', 'our'] +['ack', 'er'] +['ен', 'и'] +['/', '>'] +['ch', 'at'] +['se', 'ct'] +['Ġcre', 'ation'] +['olut', 'ely'] +['Ġinst', 'ant'] +['Ġdel', 'ivery'] +['ick', 'en'] +['y', 'es'] +['ĠFr', 'anc'] +['bl', 'ing'] +['end', 'a'] +['[', '('] +['_r', 'ange'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠ'] +['Ġsched', 'ule'] +['Con', 'n'] +['Ġthan', 'k'] +['x', 'd'] +['Ġh', 'ook'] +['Ġdocument', 'ation'] +['Param', 'eters'] +['H', 'ello'] +['v', 't'] +['Ġart', 'icles'] +['Ġw', 'est'] +['def', 'ined'] +['.', 'select'] +['ok', 'ens'] +['ĠV', 'AL'] +['.f', 'ile'] +['res', 'et'] +['Ġmy', 's'] +['ĠM', 'A'] +[']', '),'] +['Ġc', 'ities'] +['rel', 'ated'] +['å', 'Ľ'] +['Ġappe', 'ared'] +['Ġw', 'id'] +['.p', 'anel'] +['ĠIn', 's'] +['.', 'entity'] +['Ġde', 'cre'] +['ĠL', 'ou'] +['(t', 'ime'] +['ĠTh', 'ank'] +['.create', 'Element'] +['Ġmention', 'ed'] +['oun', 'ce'] +['ĠT', 'ry'] +['ĠW', 'all'] +['/', 'images'] +['ĠM', 'enu'] +["'", 'čĊ'] +['ĠE', 'r'] +['Ġcrit', 'ic'] +['ĠY', 'ear'] +['(', 'param'] +['Ġf', 'lo'] +['N', 'N'] +['oot', 'er'] +['Ġ', '];Ċ'] +['ĠA', 'ff'] +['"', 'github'] +['room', 's'] +['Ġh', 'yp'] +['g', 'lobal'] +['Ġa', 'vec'] +['æľ', 'Ī'] +['Ġcomplet', 'ion'] +['Ġcon', 'd'] +['onym', 'ous'] +['(', 'temp'] +['Ġst', 'ars'] +['Ġre', 'levant'] +['Ġcover', 'ed'] +['Ġel', 'im'] +['_t', 'ypes'] +['(', 'bool'] +['Ġt', 'u'] +['_ex', 'ists'] +['Ġsec', 'ure'] +['Ġst', 'ored'] +[']', '/'] +['x', 'F'] +['ĠCont', 'roller'] +['Ġm', 'igr'] +['M', 'I'] +['ĠD', 'en'] +['Ġann', 'ual'] +['U', 'IL'] +['-', 'and'] +['Ġcr', 'ime'] +['b', 'el'] +['Ġk', 'itchen'] +['@', 'g'] +['_p', 'h'] +['ourn', 'ament'] +['ĠS', 'ocial'] +['ĠS', 'pecial'] +['log', 'ger'] +['Ġt', 'ail'] +['Ġun', 'known'] +['d', 'ed'] +['Ġapp', 'rec'] +['(d', 'b'] +['c', 'f'] +['Ġass', 'ign'] +['-', 'out'] +['ĠM', 'ont'] +['d', 'p'] +['w', 'idget'] +['Ġst', 'one'] +['-', 'primary'] +['.', 'grid'] +['Result', 's'] +['az', 'z'] +['Ġda', 'ughter'] +['Ġcur', 'r'] +['Ġl', 'in'] +['Ġs', 'outh'] +['form', 's'] +['ĠO', 'UT'] +['let', 'te'] +['ak', 's'] +['ig', 'ure'] +['ĠE', 'U'] +['var', 'iable'] +['Ġb', 'rief'] +['ĠSc', 'ott'] +['Ġcon', 'ference'] +['and', 'a'] +['_', 'lock'] +['or', 'al'] +['Ġe', 'ine'] +['OR', 'S'] +['////////////////////////////////', '////////////////////////////////'] +['ess', 'o'] +['Ġr', 'is'] +['Ġg', 'ender'] +['est', 'ic'] +['L', 'icense'] +['(', 'out'] +['Ġm', 's'] +['Se', 'e'] +['Ġwill', 'ing'] +['az', 'e'] +['Ġs', 'ports'] +['Ġy', 'es'] +['l', 'u'] +['Ġp', 'urs'] +['/j', 'avascript'] +['-', 'pro'] +['nav', 'bar'] +['_pro', 'duct'] +['/', 'bootstrap'] +['Ġdr', 'iving'] +['Ġ', 'Ä'] +['Ġpro', 'pos'] +['ult', 'ip'] +['up', 'lic'] +['.', 'email'] +['Ġappro', 'x'] +['(', 'cl'] +['Ġwe', 'ar'] +['Ġrep', 'ly'] +['ass', 'et'] +['Ġ', 'ice'] +['Ġt', 'x'] +['k', 'r'] +['ĠGerman', 'y'] +['ĠGe', 'orge'] +['Ġc', 'b'] +['ĉ', 'err'] +['M', 'ove'] +['Ġpol', 'y'] +['vo', 'ice'] +['}', '"'] +['Ġan', 'imal'] +['A', 'v'] +['ĠL', 'ocation'] +['Ġn', 'ative'] +[']', '["'] +['<', 'double'] +['Ġm', 'ais'] +[',', 'int'] +['Ġpre', 'par'] +['Ġinter', 'val'] +['plement', 'ation'] +['_', 'ERR'] +['Ġb', 'ug'] +['>', '"'] +['st', 'at'] +['Ġ}', ',čĊ'] +['<', 'span'] +['Ġfa', 'ith'] +['Ġ', 'rom'] +['pre', 'v'] +['ĠE', 'lect'] +['F', 'ind'] +['Ġg', 'od'] +['ot', 'or'] +['//', '----------------------------------------------------------------'] +['orig', 'inal'] +['C', 'pp'] +['ĠSen', 'ate'] +['Ġposition', 's'] +['Ġweap', 'ons'] +['Ġco', 'ff'] +['Ġpur', 'poses'] +['p', 'ol'] +['Ġim', 'press'] +['Ġanim', 'als'] +['.', 'Entity'] +['(n', 'p'] +['Ġmur', 'der'] +['Ġ`', '`'] +['fl', 'ag'] +['Ġsol', 'utions'] +['ĠAct', 'ive'] +['Ġb', 'right'] +['.d', 'ate'] +['Ġsit', 'u'] +['ï¼', 'Ī'] +['.', 'ID'] +['Ġs', 'ie'] +['),', 'čĊ'] +['ak', 't'] +['S', 'pace'] +['.d', 'at'] +['.index', 'Of'] +['h', 'an'] +['az', 'ine'] +['ĠZ', 'e'] +['Ġcr', 'ash'] +['(', '/'] +['>', '='] +['Ð', '±'] +['iv', 'a'] +['.Auto', 'Size'] +['ĠL', 'at'] +['_', 'ext'] +['Initial', 'ize'] +['.reg', 'ister'] +['OP', 'Y'] +['Ġre', 'verse'] +['_d', 'is'] +["']", '['] +['Ġprom', 'pt'] +['ont', 'o'] +['ĠJ', 'ournal'] +['r', 'outer'] +['Ġmys', 'qli'] +['#', 'else'] +[')', '"'] +['-x', 's'] +['let', 's'] +['ph', 'an'] +['.', 'LE'] +['W', 'ill'] +['Ġaff', 'ord'] +['Ġsk', 'ill'] +['-t', 'oggle'] +['N', 'C'] +['B', 'ind'] +['T', 'S'] +['J', 'ust'] +['iter', 'al'] +['Y', 'P'] +['ĉ', 'unsigned'] +['Ġw', 'ind'] +['))', ':Ċ'] +['Ġw', 'arning'] +['ĠW', 'ater'] +['Ġd', 'raft'] +['Ġc', 'm'] +['Ġs', 'am'] +['Ġhold', 'ing'] +['z', 'ip'] +['ĠSc', 'ience'] +['Ġsup', 'posed'] +['G', 'en'] +['Ġdi', 'et'] +['<', 'h'] +['ĠP', 'ass'] +['v', 'i'] +['Ġhus', 'band'] +['�', '�'] +['n', 'ote'] +['ĠAb', 'out'] +['ĠIn', 'stitute'] +['Ġcl', 'imate'] +['.Form', 'at'] +['Ġn', 'ut'] +['est', 'ed'] +['Ġapp', 'arent'] +['Ġhold', 's'] +['f', 'i'] +['new', 's'] +['C', 'M'] +['v', 'ideo'] +["':", "'"] +['D', 'ITION'] +['p', 'ing'] +['Ġsen', 'ior'] +['w', 'a'] +['--', '>Ċ'] +['_', 'default'] +['ĠD', 'atabase'] +['re', 'p'] +['E', 'SS'] +['ner', 'gy'] +['.F', 'ind'] +['_m', 'ask'] +['Ġr', 'ise'] +['Ġk', 'ernel'] +['::', '$'] +['.', 'Q'] +['Ġoffer', 'ing'] +['de', 'cl'] +['ĠC', 'S'] +['Ġlist', 'ed'] +['Ġmost', 'ly'] +['eng', 'er'] +['Ġblock', 's'] +['ol', 'o'] +['Ġgover', 'ning'] +['\\', 'F'] +['Ġcon', 'cent'] +['.get', 'Text'] +['Ġm', 'b'] +['Ġocc', 'urred'] +['Ġchang', 'ing'] +['Sc', 'ene'] +['_C', 'ODE'] +['B', 'eh'] +['"', 'The'] +['Ġt', 'ile'] +['ĠAssoci', 'ation'] +['ĉ', 'P'] +['al', 'ty'] +['_', 'ad'] +['od', 'ies'] +['i', 'ated'] +['Ġpre', 'pared'] +['poss', 'ible'] +['Ġm', 'ort'] +['TE', 'ST'] +['Ġign', 'ore'] +['Ġcal', 'c'] +['Ġr', 's'] +['Ġassert', 'Equals'] +['Ġs', 'z'] +['ĠTH', 'IS'] +['.', '"Ċ'] +['Ġcan', 'vas'] +['j', 'ava'] +['Ġd', 'ut'] +['VAL', 'ID'] +['.s', 'ql'] +['.', 'input'] +['Ġa', 'ux'] +['S', 'up'] +['Ġart', 'ist'] +['V', 'ec'] +['_T', 'IME'] +['.string', 'ify'] +['et', 'ween'] +['ĠC', 'ategory'] +['Ġ[', '-'] +['ĠDev', 'Express'] +['ĠJ', 'ul'] +['Ġr', 'ing'] +['.', 'ed'] +['Y', 'Y'] +['L', 'et'] +['Text', 'Field'] +['Ġfl', 'at'] +['_p', 'rint'] +['ĠOT', 'HER'] +['ad', 'ian'] +['Ġcheck', 'ed'] +['e', 'le'] +['Al', 'ign'] +['stand', 'ing'] +['Ġ[', '],'] +['Ġl', 'ab'] +['uck', 'y'] +['ĠChrist', 'mas'] +['(', 'image'] +['.m', 'odule'] +['Ġl', 'ots'] +['Ġslight', 'ly'] +['(f', 'inal'] +['er', 'ge'] +['è', '¿'] +['ĠPol', 'ice'] +['ĠR', 'ight'] +['Ġaw', 'ard'] +['ĠO', 'S'] +['Ġ{', '}ĊĊ'] +['Ġp', 'tr'] +['ov', 'es'] +['ic', 'ated'] +['еÐ', '¼'] +['Ġman', 'age'] +['olid', 'ay'] +['Am', 'ount'] +['ool', 'Strip'] +['t', 'body'] +['N', 'av'] +['w', 'rap'] +['B', 'B'] +['Ġwatch', 'ing'] +['ari', 'os'] +['Ġoption', 'al'] +['_', 'K'] +['ĠL', 'icensed'] +['.M', 'ap'] +['T', 'imer'] +['ĠA', 'P'] +['ĠRe', 'v'] +['(', 'o'] +[',', 'c'] +['um', 'in'] +['eta', 'iled'] +['ĠH', 'y'] +['Ġbl', 'ank'] +['ag', 'ger'] +['ĠS', 'elf'] +['()', '['] +['.m', 'ake'] +['ear', 'n'] +['ch', 'annel'] +['<', 'pre'] +['ble', 'm'] +['_p', 'assword'] +['_s', 'p'] +['ic', 'ing'] +['e', 'z'] +['Ġthe', 'ory'] +['ĠT', 'er'] +[',', 'n'] +['log', 'o'] +['ĠHT', 'TP'] +['()', '))'] +['.h', 'andle'] +['>', ';Ċ'] +['W', 'orld'] +['Ġpy', 'thon'] +['Ġl', 'if'] +['Ġtr', 'av'] +['Ġcon', 'ven'] +['com', 'pany'] +['ĠCl', 'ub'] +['V', 'er'] +['B', 'tn'] +['Ġz', 'one'] +['product', 's'] +['ĠE', 'duc'] +['Ġver', 'ify'] +['ĠM', 'il'] +['on', 'o'] +[']', ');ĊĊ'] +['EN', 'CE'] +['Ġpack', 'et'] +['Ġc', 'er'] +['Ġen', 'umer'] +['Ġpar', 's'] +['form', 'ed'] +['Ġocc', 'up'] +['t', 're'] +['Ġexerc', 'ise'] +['D', 'ay'] +['_s', 'um'] +['Ġask', 'ing'] +['apt', 'ion'] +['Ġord', 'ers'] +['Ġsp', 'ending'] +['ĠE', 'RR'] +['.D', 'is'] +['ĠU', 'til'] +['âĢľ', 'I'] +['\\', "'"] +['?', ')'] +['/', '>Ċ'] +['Ġem', 'ot'] +['Ġinflu', 'ence'] +['ĠAfr', 'ica'] +['att', 'ers'] +['Ù', 'ħ'] +['.s', 'ession'] +['Ġch', 'ief'] +['ĉĉĉĉĉĉĉĉ', 'ĉĉĉ'] +['Ġto', 'm'] +['clud', 'ed'] +['ser', 'ial'] +['_h', 'andler'] +['.T', 'ype'] +['ap', 'ed'] +['Ġpolic', 'ies'] +['-', 'ex'] +['-', 'tr'] +['bl', 'ank'] +['mer', 'ce'] +['Ġcover', 'age'] +['Ġr', 'c'] +['_m', 'atrix'] +['_', 'box'] +['Ġcharg', 'es'] +['ĠB', 'oston'] +['P', 'e'] +['Ġcirc', 'um'] +['Ġfil', 'led'] +['Ġn', 'orth'] +['icture', 'Box'] +['ĉ', 'res'] +['è', '®'] +['Ġter', 'min'] +['Ġ[', 'â̦'] +['IRE', 'CT'] +['Ġb', 'er'] +['Ġ"', '../../'] +['ret', 'ch'] +['.c', 'ode'] +['_c', 'ol'] +['ĠGovern', 'ment'] +['Ġarg', 'v'] +['ĠL', 'ord'] +['as', 'i'] +['Ex', 'ec'] +['ĉ', 'let'] +['vert', 'is'] +['Ġdiscuss', 'ion'] +['en', 'ance'] +['out', 'ube'] +['type', 'of'] +['Ġs', 'erved'] +['ĠP', 'ut'] +['ĉ', 'x'] +['Ġs', 'weet'] +['B', 'efore'] +['ateg', 'y'] +['.', 'of'] +['ĠM', 'aterial'] +['S', 'ort'] +['ON', 'T'] +['ig', 'ital'] +['Wh', 'y'] +['Ġs', 'ust'] +['Ġ', 'ç'] +['ab', 'et'] +['Ġseg', 'ment'] +['Ġ[', '],Ċ'] +['ĠMus', 'lim'] +['Ġfind', 'ViewById'] +['c', 'ut'] +['_T', 'EXT'] +['ĠM', 'ary'] +['Ġlo', 'ved'] +['Ġl', 'ie'] +['ĠJ', 'O'] +['Ġis', 'set'] +['mon', 'th'] +['Ġpr', 'ime'] +['t', 'i'] +['ĠCar', 'ol'] +['U', 'se'] +['ĠP', 'op'] +['ĠS', 'ave'] +['Int', 'erval'] +['ex', 'ecute'] +['d', 'y'] +['ĠI', 'ran'] +['_', 'cont'] +['ĉ', 'T'] +['Ġph', 'ase'] +['check', 'box'] +['we', 'ek'] +['Ġh', 'ide'] +['Ġt', 'il'] +['Ġj', 'u'] +['C', 'ustom'] +['b', 'urg'] +['/', 'M'] +['T', 'ON'] +['Ġqu', 'ant'] +['Ġr', 'ub'] +['ix', 'els'] +['Ġinst', 'alled'] +['Ġd', 'ump'] +['Ġproper', 'ly'] +['(', 'List'] +['Ġdec', 'ide'] +['app', 'ly'] +['H', 'as'] +['Ġkeep', 'ing'] +['Ġcitiz', 'ens'] +['Ġj', 'oint'] +['p', 'ool'] +['S', 'ocket'] +['_', 'op'] +['Ġweap', 'on'] +['gn', 'ore'] +['ĠEx', 'ec'] +['ott', 'en'] +['ĠM', 'S'] +['Ġ(', '-'] +['ĠRe', 'view'] +['Ġex', 'amples'] +['Ġt', 'ight'] +['!', '('] +['D', 'P'] +['ĠMessage', 'Box'] +['Ġphot', 'ograph'] +['UR', 'I'] +['é', 't'] +['l', 'ow'] +['ĠGr', 'and'] +['.p', 'ersistence'] +['Ġmaint', 'ain'] +['Ġnum', 's'] +['Ġz', 'ip'] +['ial', 's'] +['ĠG', 'ets'] +['pe', 'g'] +['ĠB', 'uffer'] +['~~', '~~'] +['ra', 'structure'] +['ĠP', 'L'] +['u', 'en'] +['ob', 'by'] +['size', 'of'] +['Ġp', 'ic'] +['Ġse', 'ed'] +['Ġexperi', 'enced'] +['Ġo', 'dd'] +['Ġk', 'ick'] +['Ġproced', 'ure'] +['avig', 'ator'] +['-', 'on'] +[',', 'j'] +['ĠAl', 'though'] +['Ġuser', 'Id'] +['ac', 'cept'] +['Bl', 'ue'] +['IC', 'olor'] +['l', 'ayer'] +['av', 'ailable'] +['Ġend', 's'] +['.t', 'able'] +['Ġdat', 'aset'] +['b', 'us'] +['Ġexpl', 'ain'] +['(', 'pro'] +['ĠCommit', 'tee'] +['Ġnot', 'ed'] +[']', ':Ċ'] +['D', 'im'] +['std', 'io'] +['.', '",Ċ'] +['_s', 'ource'] +['ĠWe', 'ek'] +['ĠEd', 'ge'] +['Ġoper', 'ating'] +['Ġest', 'e'] +['i', 'pl'] +['ag', 'ination'] +['Ġpro', 'ceed'] +['Ġanim', 'ation'] +['.Model', 's'] +['ĠW', 'atch'] +['i', 'at'] +['Ġopp', 'on'] +['/', 'A'] +['Re', 'port'] +['Ġs', 'ounds'] +['_b', 'uf'] +['IEL', 'D'] +['Ġbu', 'nd'] +['ĉ', 'get'] +['.p', 'r'] +['(t', 'mp'] +['Ġk', 'id'] +['>ĊĊ', 'Ċ'] +['Ġy', 'ang'] +['Not', 'Found'] +['Ñ', 'Ĩ'] +['m', 'ath'] +['@g', 'mail'] +['ĠL', 'IMIT'] +['red', 'ients'] +['Ġv', 'ent'] +['avig', 'ate'] +['L', 'ook'] +['Ġrelig', 'ious'] +['Ġr', 'and'] +['ri', 'o'] +['(', 'GL'] +['_', 'ip'] +['u', 'an'] +['ici', 'ency'] +['ĠCh', 'ange'] +['>', 'čĊčĊ'] +['ĠEnt', 'ity'] +['Ġrencont', 're'] +['ĠR', 'et'] +['pl', 'an'] +['é', 'n'] +['BO', 'OL'] +['ur', 'ies'] +['tr', 'ain'] +['Def', 'inition'] +['========', '===='] +['z', 'z'] +['An', 'imation'] +['ĠO', 'K'] +['_m', 'enu'] +['.b', 'l'] +['_s', 'core'] +['Ġac', 'ad'] +['(', 'System'] +['Ġref', 'resh'] +["'=>", '$'] +['.G', 'raphics'] +['ament', 'o'] +['p', 'id'] +['t', 'c'] +['Ġt', 'ips'] +['Ġhom', 'es'] +['Ġf', 'uel'] +['â', 'ĸ'] +['_h', 'elper'] +['ĠĠ', 'čĊ'] +['ĠR', 'oom'] +['.C', 'lose'] +['_', 'attr'] +['ĠM', 'ount'] +['ĠE', 'v'] +['ar', 'ser'] +['_t', 'op'] +['e', 'ah'] +['ĠDe', 'lete'] +['ãĢ', 'į'] +['u', 'ke'] +['Ġus', 'age'] +['ar', 'ia'] +['_de', 'v'] +['Ġtext', 'ure'] +['Ġconvers', 'ation'] +['e', 'per'] +['Be', 'an'] +['d', 'one'] +['non', 'atomic'] +['ĠSe', 'cond'] +['Ġshoot', 'ing'] +['_p', 're'] +['Com', 'ponents'] +['Ġ]', 'ĊĊ'] +['__', ','] +['stit', 'ution'] +['.Ch', 'ar'] +['>', '();ĊĊ'] +['Ġpresent', 'ed'] +['Ġw', 'a'] +['ok', 'er'] +['-', 'ĊĊ'] +['in', 'er'] +['Ġbe', 'coming'] +['Ġinc', 'ident'] +['At', 't'] +['Ġreve', 'aled'] +['for', 'c'] +['Ġbo', 'ot'] +['.p', 'age'] +['Enumer', 'ator'] +['_', '->'] +['Ph', 'oto'] +['Ġs', 'pring'] +['.', '",'] +['ĠD', 'ictionary'] +['B', 'JECT'] +['Ġloc', 'ations'] +['Ġs', 'amples'] +['Input', 'Stream'] +['ĠB', 'rown'] +['Ġst', 'ats'] +['qual', 'ity'] +['Ñ', 'ħ'] +['-d', 'is'] +['Ġhelp', 'ing'] +['Ġp', 'ed'] +['(', 'se'] +['ĠWh', 'o'] +['al', 'ian'] +['int', 'ernal'] +['Ġf', 't'] +['>', '().'] +['->', '{'] +['Ġm', 'ine'] +['Ġs', 'ector'] +['Ġg', 'ro'] +['Ġopport', 'unities'] +['ĠÃ', '¼'] +['Ġm', 'p'] +['Ġalleg', 'ed'] +['Ġdoub', 't'] +['M', 'ouse'] +['Ab', 'out'] +['_p', 'art'] +['Ġch', 'air'] +['Ġstop', 'ped'] +['lo', 'op'] +['ent', 'ities'] +['Ġapp', 's'] +['ans', 'ion'] +['Ġm', 'ental'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠ'] +['F', 'R'] +['Ġdef', 'end'] +['c', 'are'] +['Ġide', 'al'] +['/', 'api'] +['ur', 'face'] +['Ġe', 'le'] +['ul', 'ator'] +['ĠR', 'ights'] +['angu', 'ages'] +['Ġfund', 's'] +['Ġad', 'apt'] +['At', 'tributes'] +['Ġdep', 'loy'] +['opt', 's'] +['Ġvalid', 'ation'] +['Ġconcern', 's'] +['u', 'ce'] +['.n', 'um'] +['ult', 'ure'] +['il', 'a'] +['Ġc', 'up'] +['Ġp', 'ure'] +['.F', 'ore'] +['ĠHash', 'Map'] +['.value', 'Of'] +['as', 'm'] +['M', 'O'] +['Ġc', 's'] +['Ġst', 'ores'] +['Ġ', '************************************************************************'] +['Ġcommunic', 'ation'] +['m', 'em'] +['.Event', 'Handler'] +['.', 'Status'] +['_', 'right'] +['.set', 'On'] +['S', 'heet'] +['Ġident', 'ify'] +['ener', 'ated'] +['order', 'ed'] +['Ġ"', '['] +['Ġs', 'we'] +['Con', 'dition'] +['ĠA', 'ccording'] +['Ġpre', 'pare'] +['Ġro', 'b'] +['P', 'ool'] +['Ġs', 'port'] +['r', 'v'] +['ĠR', 'outer'] +['Ġaltern', 'ative'] +['(', '[]'] +['ĠCh', 'icago'] +['ip', 'her'] +['is', 'che'] +['ĠDirect', 'or'] +['k', 'l'] +['ĠW', 'il'] +['key', 's'] +['Ġmy', 'sql'] +['Ġw', 'elcome'] +['k', 'ing'] +['ĠMan', 'ager'] +['Ġca', 'ught'] +[')', '}Ċ'] +['S', 'core'] +['_P', 'R'] +['Ġsur', 'vey'] +['h', 'ab'] +['He', 'aders'] +['AD', 'ER'] +['Ġdec', 'or'] +['Ġturn', 's'] +['Ġr', 'adius'] +['err', 'upt'] +['C', 'or'] +['Ġm', 'el'] +['Ġin', 'tr'] +['(', 'q'] +['ĠA', 'C'] +['am', 'os'] +['M', 'AX'] +['ĠG', 'rid'] +['ĠJes', 'us'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠ'] +['.D', 'E'] +['Ġt', 's'] +['Ġlink', 'ed'] +['f', 'ree'] +['ĠQ', 't'] +['Ġ/**', 'čĊ'] +['Ġf', 'aster'] +['ct', 'r'] +['_', 'J'] +['D', 'T'] +['.C', 'heck'] +['Ġcomb', 'ination'] +['Ġint', 'ended'] +['-', 'the'] +['-', 'type'] +['ect', 'ors'] +['am', 'i'] +['ut', 'ing'] +['Ġum', 'a'] +['X', 'ML'] +['U', 'CT'] +['A', 'p'] +['ĠR', 'andom'] +['Ġr', 'an'] +['.s', 'ort'] +['Ġsort', 'ed'] +['.', 'Un'] +['_P', 'ER'] +['it', 'ory'] +['Ġprior', 'ity'] +['ĠG', 'al'] +['ĠO', 'ld'] +['h', 'ot'] +['ĠD', 'isplay'] +['(s', 'ub'] +['_T', 'H'] +['_', 'Y'] +['ĠC', 'are'] +['load', 'ing'] +['K', 'ind'] +['_h', 'andle'] +[',', ','] +['r', 'ase'] +['_re', 'place'] +['.add', 'EventListener'] +['ĠR', 'T'] +['Ġenter', 'ed'] +['g', 'ers'] +['Ġ', 'ich'] +['(', 'start'] +['/', 'app'] +['Ġbro', 'ther'] +['M', 'emory'] +['Out', 'let'] +['Ġ', 'utf'] +['pre', 'c'] +['Ġn', 'avigation'] +['OR', 'K'] +['Ġd', 'st'] +['D', 'etail'] +['Ġaud', 'ience'] +['Ġd', 'ur'] +['Ġcl', 'uster'] +['un', 'ched'] +['Ġ', '],'] +['Ġcomfort', 'able'] +['.', 'values'] +['ĠT', 'otal'] +['Ġsn', 'ap'] +['Ġstand', 'ards'] +['Ġperform', 'ed'] +['h', 'and'] +['("', '@'] +['å', 'Ń'] +['Ġph', 'il'] +['ib', 'r'] +['tr', 'im'] +['Ġfor', 'get'] +['Ġdo', 'ctor'] +['.Text', 'Box'] +['icon', 's'] +[',', 's'] +['ĠO', 'p'] +['S', 'm'] +['St', 'op'] +['ĉ', 'List'] +['ĉ', 'u'] +['Com', 'ment'] +['_V', 'ERSION'] +['.X', 'tra'] +['P', 'erson'] +['r', 'b'] +['LO', 'B'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĊ'] +['ĠCent', 'ral'] +['IC', 'K'] +['ra', 'q'] +['Ġput', 'ting'] +['Ġm', 'd'] +['ĠL', 'ove'] +['Pro', 'gram'] +['B', 'order'] +['o', 'or'] +['Ġallow', 'ing'] +['a', 'fter'] +['Ġent', 'ries'] +['ĠMay', 'be'] +[']', ').'] +['ĠSh', 'ort'] +[')', '\\'] +['.n', 'ow'] +['f', 'riend'] +['Ġpre', 'fer'] +['ĠG', 'PIO'] +['os', 'is'] +['ĠGame', 'Object'] +['Ġsk', 'ip'] +['Ġcompet', 'ition'] +['_m', 'atch'] +['lic', 'ations'] +['_CON', 'T'] +['.group', 'Box'] +['Ġal', 's'] +['"', 'We'] +['_e', 'q'] +['l', 'an'] +['_', 'search'] +['ĠMus', 'ic'] +['as', 'is'] +['Ġb', 'ind'] +['ĠIs', 'land'] +['r', 'um'] +['(', 'E'] +['Ġse', 'at'] +['V', 'ideo'] +['Ġa', 'ck'] +['ree', 'k'] +['={', '()'] +['Ġr', 'ating'] +['Ġrestaur', 'ant'] +['DE', 'X'] +['(b', 'uf'] +['pp', 'ing'] +['ual', 'ity'] +['Ġle', 'ague'] +['Ġfoc', 'used'] +['ap', 'on'] +['$', 'data'] +['CL', 'UD'] +['CLUD', 'ING'] +['Ġabs', 'olute'] +['(', 'query'] +['Ġtell', 's'] +['A', 'ng'] +['Ġcomm', 'unities'] +['Ġhon', 'est'] +['ok', 'ing'] +['Ġap', 'art'] +['ar', 'ity'] +['/', '$'] +['_m', 'odule'] +['ĠE', 'nc'] +['.', 'an'] +['.Con', 'fig'] +['C', 're'] +['Ġsh', 'ock'] +['ĠAr', 'ab'] +['I', 'ENT'] +['/', 're'] +['Ġre', 'trie'] +['ycl', 'er'] +['is', 'a'] +['ĠO', 'rgan'] +['.', 'graph'] +['Ġ', 'í'] +['ĠB', 'AS'] +['En', 'um'] +['Ġposs', 'ibly'] +['ÑĢ', 'аÐ'] +['ĠJapan', 'ese'] +['Ġc', 'raft'] +['ĠPl', 'ace'] +['Ġtal', 'ent'] +['Ġfund', 'ing'] +['Ġconf', 'irmed'] +['Ġc', 'ycle'] +['/', 'x'] +['G', 'E'] +['Ġhe', 'aring'] +['Ġpl', 'ants'] +['Ġm', 'outh'] +['p', 'ages'] +['or', 'ia'] +['ĠRem', 'ove'] +['_t', 'otal'] +['Ġo', 'd'] +['oll', 'apse'] +['do', 'or'] +['Ġb', 'ought'] +['Ġadd', 'r'] +['AR', 'CH'] +['_d', 'im'] +['dd', 'en'] +['Ġdec', 'ades'] +['RE', 'QUEST'] +['Ġvers', 'ions'] +['f', 'ire'] +['Ġmov', 'es'] +['f', 'b'] +['Ġcoff', 'ee'] +['.con', 'nect'] +['ĠR', 'ow'] +['Ġs', 'chema'] +['S', 'cope'] +['-', 'Type'] +['Ġfight', 'ing'] +['Ġret', 'ail'] +['Ġmod', 'ified'] +['T', 'F'] +['File', 's'] +['n', 'ie'] +['_com', 'mand'] +['st', 'one'] +['Ġ', 'ÑĤ'] +['_', 'thread'] +['Ġb', 'ond'] +['ĠDevelop', 'ment'] +['Ġp', 't'] +['F', 'ORM'] +['ple', 't'] +['Ġident', 'ified'] +['c', 'pp'] +['Ġc', 'oding'] +['ok', 'ed'] +['ĠM', 'aster'] +['ID', 'TH'] +['Ġres', 'idents'] +['red', 'it'] +['ĠPh', 'oto'] +['=', '-'] +['un', 'te'] +['ate', 'ur'] +['_ST', 'ATE'] +['ĠS', 'ing'] +['Ġshe', 'et'] +['.', 'val'] +['or', 'se'] +['Ġh', 'ers'] +['Ġdetermin', 'ed'] +['Com', 'mon'] +['Ġw', 'ed'] +['_', 'queue'] +['P', 'H'] +['ĠAt', 'l'] +['cre', 'd'] +['/L', 'ICENSE'] +['Ġm', 'es'] +['Ġadv', 'anced'] +['.j', 'ava'] +['.S', 'h'] +['G', 'o'] +['k', 'ill'] +['f', 'p'] +['_set', 'tings'] +['Ġp', 'al'] +['Ġtr', 'uck'] +['Ġcomb', 'ined'] +['Ġ"', '${'] +['ĠCor', 'por'] +['Ġjo', 'ined'] +['ĠJ', 'ose'] +['ĠC', 'up'] +['un', 's'] +['est', 'ival'] +['lev', 'ision'] +['Ġbro', 'ken'] +['Ġmar', 'riage'] +['ĠWest', 'ern'] +['Ġrep', 'resents'] +['ĠT', 'itle'] +['Ġs', 's'] +['.A', 'ss'] +['ongo', 'ose'] +['ient', 'o'] +['<', '>();Ċ'] +['Ġabs', 'olutely'] +['Ġsm', 'ooth'] +['TER', 'N'] +['ĠUn', 'less'] +['W', 'ord'] +['Ġmer', 'ge'] +['ig', 'an'] +['ĠV', 'ol'] +['Ġn', 'n'] +['.get', 'Id'] +['ĠÐ', '·'] +['Ġsex', 'y'] +['Ġseek', 'ing'] +['S', 'ingle'] +['.', 'this'] +['Ġk', 'om'] +['b', 'ound'] +[';', '"'] +['Ġfont', 'Size'] +['_d', 'f'] +['Ġinj', 'ury'] +['(', 'H'] +['Ġiss', 'ued'] +['_', 'END'] +[':', 'self'] +['Ġp', 'atch'] +['Ġle', 'aves'] +['Ġad', 'opt'] +['File', 'Name'] +['ãĢ', 'IJ'] +['Ġexec', 'utive'] +['ĠBy', 'te'] +[']', '))Ċ'] +['Ġn', 'u'] +['out', 'ing'] +['clud', 'ing'] +['-', 'R'] +['.', 'options'] +['Ġsub', 'stant'] +['av', 'ax'] +['ĠB', 'UT'] +['Ġtechn', 'ical'] +['Ġtw', 'ice'] +['Ġm', 'ás'] +['Ġun', 'ivers'] +['y', 'r'] +['Ġdr', 'ag'] +['ĠD', 'C'] +['Ġs', 'ed'] +['Ġb', 'ot'] +['ĠP', 'al'] +['ĠH', 'all'] +['forc', 'ement'] +['Ġa', 'uch'] +['.m', 'od'] +['not', 'ation'] +['_file', 's'] +['.l', 'ine'] +['_fl', 'ag'] +['[', 'name'] +['Ġres', 'olution'] +['Ġb', 'ott'] +['("', '['] +['end', 'e'] +['(', 'arr'] +['F', 'ree'] +['(', '@"'] +['ĠD', 'istrict'] +['PE', 'C'] +[':', '-'] +['P', 'icker'] +['ĠJ', 'o'] +['ĠĠĠĠĠ', 'Ċ'] +['ĠR', 'iver'] +['_', 'rows'] +['Ġhelp', 'ful'] +['Ġmass', 'ive'] +['---', 'Ċ'] +['Ġmeas', 'ures'] +['ĠR', 'untime'] +['Ġwor', 'ry'] +['ĠS', 'pec'] +['ĉ', 'D'] +['ãĢ', 'ij'] +['Ġ)', '{Ċ'] +['Ġwor', 'se'] +['(f', 'ilename'] +['Ġl', 'ay'] +['Ġmag', 'ic'] +['ĠThe', 'ir'] +['ou', 'l'] +['st', 'roy'] +['ĠWh', 'ere'] +['Ġsu', 'dden'] +['Ġdef', 'e'] +['Ġb', 'inding'] +['Ġfl', 'ight'] +['ĠOn', 'Init'] +['ĠW', 'omen'] +['ĠPol', 'icy'] +['Ġdrug', 's'] +['ish', 'ing'] +["('", '../'] +['ĠM', 'el'] +['pe', 'at'] +['t', 'or'] +['Ġpro', 'posed'] +['Ġst', 'ated'] +['_RE', 'S'] +['Ġe', 'ast'] +['ĠCON', 'DITION'] +['_d', 'esc'] +['Ġwin', 'ning'] +['fol', 'io'] +['M', 'apper'] +['ĠP', 'an'] +['ĠAn', 'ge'] +['.s', 'ervlet'] +['Ġcop', 'ies'] +['L', 'M'] +['Ġv', 'm'] +['å', 'į'] +['Ġd', 'ictionary'] +['S', 'eg'] +['el', 'ines'] +['ĠS', 'end'] +['Ġ', 'iron'] +['ĠF', 'ort'] +['.d', 'omain'] +['Ġdeb', 'ate'] +['Not', 'Null'] +['e', 'q'] +['ach', 'er'] +['l', 'f'] +['ĉf', 'mt'] +['Ġlaw', 'y'] +['Ä', 'Ł'] +['ĠM', 'en'] +['Ġtr', 'im'] +['(', 'NULL'] +['Ġ!', '!'] +['Ġp', 'ad'] +['Ġfollow', 's'] +['"]', '["'] +['re', 'qu'] +['ĠE', 'p'] +['.g', 'ithub'] +['(', 'img'] +['et', 'o'] +["('", '\\'] +['S', 'ervices'] +['umbn', 'ail'] +['_m', 'ain'] +['ple', 'ted'] +['fort', 'unately'] +['Ġw', 'indows'] +['Ġpl', 'ane'] +['ĠCon', 'nection'] +['.', 'local'] +['u', 'ard'] +['}', '\\'] +['==', '"'] +['and', 'on'] +['ĠR', 'oy'] +['w', 'est'] +['ig', 'inal'] +['em', 'ies'] +['it', 'z'] +["')", ':Ċ'] +['ĠP', 'eter'] +['Ġt', 'ough'] +['Ġredu', 'ced'] +['Ġcalcul', 'ate'] +['Ġrap', 'id'] +['c', 'ustomer'] +['Ġeff', 'icient'] +['Ġmed', 'ium'] +['Ġf', 'ell'] +['.', 'ref'] +['ĠC', 'as'] +['Ġfeed', 'back'] +['S', 'peed'] +['(', 'output'] +['aj', 'e'] +['Ġc', 'ategories'] +['Ġfe', 'e'] +['}', ';'] +['Ġde', 'leted'] +['re', 'h'] +['Ġpro', 'of'] +['D', 'esc'] +['B', 'uild'] +['Ġs', 'ides'] +['.Array', 'List'] +['-', '%'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠ'] +['Ø', '±'] +['.m', 'atch'] +['л', 'и'] +['Ġfe', 'els'] +['Ġachie', 've'] +['Ġcl', 'im'] +['_', 'ON'] +['ĠC', 'D'] +['Ġteach', 'er'] +['_c', 'urrent'] +['b', 'n'] +['_P', 'L'] +['ist', 'ing'] +['En', 'able'] +['G', 'EN'] +['Ġt', 'v'] +['Ġso', 'ck'] +['Ġpl', 'ays'] +['Ġdis', 'count'] +['ĠK', 'E'] +['ĠDe', 'bug'] +['F', 'ore'] +['ĠI', 'raq'] +['Ġappear', 'ance'] +['M', 'on'] +['Ġst', 'yled'] +['ĠH', 'uman'] +['i', 'ot'] +['ĠH', 'istory'] +['Ġs', 'ac'] +['ĠC', 'ollection'] +['Ġrecomm', 'ended'] +['.Se', 'lected'] +['Ġorgan', 'izations'] +['Ġdiscover', 'ed'] +['co', 'hol'] +['ad', 'as'] +['ĠThom', 'as'] +['M', 'ay'] +['Ġcons', 'erv'] +['Ġdom', 'in'] +['ĠF', 'ollow'] +['ĠSe', 'ction'] +['ĠTh', 'anks'] +['User', 'name'] +['Ġrec', 'ipe'] +['Ġwonder', 'ful'] +['.s', 'leep'] +['_', 'if'] +['ĉĊ', 'ĉĊ'] +['orn', 'o'] +['Ġr', 'u'] +['_t', 'arget'] +['."', '"'] +['à', '¦'] +['Event', 'Args'] +['Ġinput', 's'] +['Ġf', 'if'] +['Ġv', 'ision'] +['c', 'y'] +['ĠS', 'eries'] +[')', '((('] +['Ġtr', 'ading'] +['Ġmark', 'er'] +['B', 'egin'] +['Ġtyp', 'ically'] +['Ġca', 'uses'] +['drop', 'down'] +['_DE', 'BUG'] +['Ġdet', 'ect'] +['c', 'ountry'] +['!', '");Ċ'] +['ĉ', 'R'] +['app', 'y'] +['Ġc', 'ref'] +["('", '<'] +['"', '=>'] +['ĠL', 'E'] +['read', 'er'] +['Ġadmin', 'istr'] +['Ã', 'µ'] +['uck', 'et'] +['Ġf', 'ashion'] +['.', 'char'] +['iz', 'ar'] +['Ġdis', 'able'] +['Ġsu', 'c'] +['ĠL', 'ive'] +['iss', 'ue'] +['Ġmet', 'adata'] +['fl', 'ags'] +['Ġ', 'ðŁ'] +['Ġcomm', 'itted'] +['Ġv', 'a'] +['Ġr', 'ough'] +["Ġ''", "'Ċ"] +['Ġhigh', 'light'] +['_var', 's'] +['V', 'O'] +['Ġenc', 'oding'] +['-', 'Z'] +['_s', 'ign'] +['$', '("#'] +['Ġr', 'ain'] +['reate', 'st'] +['ĠEN', 'D'] +['Se', 'lection'] +['Ġcandid', 'ates'] +['Ġs', 'av'] +['.', 'Empty'] +['Ġdec', 'isions'] +['Ġcoll', 'abor'] +['rid', 'ge'] +['fe', 'ed'] +['ress', 'ion'] +['Ġperson', 's'] +['V', 'M'] +['eg', 'a'] +['_B', 'IT'] +['A', 'ccording'] +['ack', 'ed'] +['Ġdoll', 'ars'] +['_lo', 'ss'] +['ĠC', 'ost'] +['}', '"Ċ'] +['Not', 'ification'] +['Ġpro', 'stit'] +['Ġauthor', 'ity'] +['.re', 'c'] +['Ġsp', 'okes'] +['ĠT', 'oday'] +['ist', 'ant'] +['ĠHe', 'ad'] +['âĢĿ', '.'] +['ertain', 'ment'] +['ce', 'an'] +['cul', 'ate'] +['Ġv', 'en'] +['How', 'ever'] +['_', 'arr'] +['Ġtok', 'ens'] +['G', 'raph'] +['ĠJ', 'ud'] +['ĠVir', 'gin'] +['ĠS', 'erial'] +['un', 'ning'] +['M', 'utable'] +['ag', 'ers'] +['.c', 'sv'] +['Ġdevelop', 'ing'] +['Ġinstruction', 's'] +['Ġprom', 'ise'] +['Ġrequest', 'ed'] +['_', 'encode'] +['/', '"'] +['ĠI', 'con'] +['u', 'ilt'] +['-', 'day'] +['Ġint', 'elligence'] +['.', 'IS'] +['ĠO', 'bservable'] +['ĠH', 'ard'] +['Bo', 'ol'] +['ident', 'ial'] +['.An', 'chor'] +['Ġsell', 'ing'] +['C', 'I'] +['AG', 'ES'] +['t', 'le'] +['b', 'ur'] +['UFF', 'ER'] +['R', 'Y'] +['Ġbig', 'ger'] +['Ġr', 'at'] +['Ġfam', 'ous'] +['Ġtyp', 'ename'] +['Ġexpl', 'ained'] +['}', '}Ċ'] +['Ġn', 'uclear'] +['-', 'N'] +['Ġcr', 'isis'] +['ĠEnt', 'er'] +['Ġan', 'swers'] +['/', '${'] +['/', 'pl'] +['Ġse', 'qu'] +['_n', 'ext'] +['m', 'ask'] +['Ġstand', 'ing'] +['Ġpl', 'enty'] +['ĠC', 'ross'] +['ĉ', 'ret'] +['d', 'ro'] +['ĠC', 'ast'] +['=', 'true'] +['ĠCh', 'ris'] +['ic', 'io'] +['ĠM', 'ike'] +['Dec', 'imal'] +['add', 'Component'] +['L', 'en'] +['Ġco', 'ck'] +['Ġ#', '{'] +['UR', 'N'] +['<', 'tr'] +['Ġauthor', 'ities'] +['Res', 'ources'] +['-', 'H'] +['B', 'ottom'] +['_', 'qu'] +['put', 'er'] +['ester', 'day'] +['Dis', 'patch'] +['s', 'ince'] +['Ġfam', 'iliar'] +[',', 'i'] +['V', 'C'] +['Ġm', 'ent'] +[',', 'C'] +['Ġfre', 'edom'] +['Ġr', 'outes'] +['ĠB', 'uy'] +['Ġcomm', 'ands'] +['Ġm', 'esh'] +['/', 'C'] +['ĠSet', 'tings'] +['-', 'style'] +['Ġw', 'itness'] +['Ġc', 'le'] +['Ġun', 'ion'] +['ef', 'ault'] +['are', 't'] +['Ġthought', 's'] +['Ġ', '----'] +['_pro', 'cess'] +['_', 'us'] +['ing', 'ly'] +['U', 'ES'] +['T', 'ouch'] +['ĠÐ', '¼'] +['_', 'open'] +['ĠV', 'ec'] +['Ġre', 'ward'] +['.C', 'lick'] +['/', ':'] +['Ġn', 'ie'] +['Ch', 'anges'] +['M', 'onth'] +['ï¼', 'Ł'] +['Ġexec', 'ution'] +['Ġbe', 'ach'] +['(', 'Integer'] +['ĉ', 'a'] +['/', "'"] +['.Font', 'Style'] +['Ġab', 'ort'] +['ĠS', 'ingle'] +['(', 'isset'] +['Ġd', 'p'] +['Ġ}}', ''] +['Ġ*', '='] +['ĠP', 'S'] +['Ġdanger', 'ous'] +['[', 'p'] +['OM', 'E'] +['O', 'ther'] +['ĠString', 'Builder'] +['Point', 's'] +['head', 'ing'] +['Ġc', 'urrency'] +['Ġpercent', 'age'] +['_A', 'PI'] +['Ġclass', 'ic'] +['the', 'ad'] +['ĠM', 'O'] +['F', 'E'] +['Id', 'x'] +['aw', 'ait'] +['ĠÃ', '¨'] +['Ġacc', 'ident'] +['Ġvari', 'ant'] +['Ġm', 'yst'] +['ĠL', 'and'] +['ĠB', 're'] +['Ġh', 'arm'] +['ĠA', 'cc'] +['Ġcharg', 'ed'] +['ion', 'es'] +['Vis', 'ibility'] +['ar', 'ry'] +['ĠL', 'anguage'] +['Ġwalk', 'ing'] +['"', '.ĊĊ'] +['if', 'er'] +['Ġleaders', 'hip'] +['.F', 'rom'] +['yn', 'am'] +['Ġt', 'imestamp'] +['i', 'pt'] +['ĠH', 'as'] +['REF', 'ER'] +['ĠIt', 's'] +['Ġlist', 'ener'] +['UT', 'E'] +['_d', 'escription'] +['Ġexperi', 'ences'] +['Ġcre', 'ates'] +['R', 'S'] +['c', 'art'] +['bl', 'ack'] +['Ġcho', 'ices'] +['w', 'ar'] +["Ġ''", "'"] +['Ġorder', 'ed'] +['Ġeven', 'ing'] +['Ġp', 'il'] +['Ġt', 'un'] +['ĠB', 'ad'] +['(', 'app'] +['r', 'andom'] +['Ġexp', 'licit'] +['Ġarr', 'ived'] +['Ġf', 'ly'] +['Ġecon', 'om'] +['-m', 'ail'] +['Ġlist', 's'] +['Ġarch', 'itect'] +['ĠP', 'ay'] +['Ġd', 's'] +['ĠS', 'ol'] +['Ġveh', 'icles'] +['H', 'z'] +['-', 'com'] +['Ġk', 'ing'] +['_e', 'qual'] +['ĠH', 'elp'] +['Ġab', 'use'] +['--', ';Ċ'] +['Ġex', 'tr'] +['Ġchem', 'ical'] +['ä', '¿'] +['Ġor', 'ient'] +['Ġbre', 'ath'] +['ĠS', 'pace'] +['(e', 'lement'] +['w', 'ait'] +['DE', 'D'] +['ig', 'ma'] +['Ġent', 'r'] +['Ġs', 'ob'] +['-', 'name'] +['Ġaff', 'ected'] +['ik', 'a'] +['Ġco', 'al'] +['_w', 'ork'] +['Ġhundred', 's'] +['Ġpolit', 'ics'] +['sub', 'ject'] +['Ġconsum', 'er'] +['ANG', 'E'] +['Ġrepe', 'ated'] +['S', 'end'] +['Ġ#', '['] +['Ġprot', 'ocol'] +['Ġlead', 's'] +['use', 'um'] +['E', 'very'] +['Im', 'port'] +['(c', 'ount'] +['Ġchalleng', 'es'] +['Ġnov', 'el'] +['Ġdep', 'art'] +['b', 'its'] +['.C', 'urrent'] +['Ġ`', '${'] +['ot', 'ing'] +['(', '\\'] +['Ġcreat', 'ive'] +['Ġbu', 'ff'] +['Ġintrodu', 'ced'] +['us', 'ic'] +['mod', 'ules'] +['A', 're'] +['-d', 'oc'] +['l', 'anguage'] +['_c', 'ache'] +['Ġto', 'd'] +['?', '>', '{{'] +['ĠRes', 'ource'] +['ĠSt', 'andard'] +['ĠP', 'rem'] +['up', 'dated'] +['ival', 'ent'] +['Ġas', 'sets'] +['_t', 'emp'] +['Ġinterest', 's'] +['Ġhard', 'ware'] +['ĠR', 'om'] +['ĠSh', 'are'] +["Ġ'", "'Ċ"] +['Ġ*', ','] +['ĠT', 'ake'] +['ĠIm', 'ages'] +['_C', 'HECK'] +['(type', 'of'] +['ĠJ', 'un'] +['\\<', '^'] +['Ġli', 'qu'] +['Ġwor', 'st'] +['ymb', 'ols'] +['ĉĉĉ', 'ĠĠĠ'] +['Ġdr', 'ivers'] +['ĠD', 'ocument'] +['en', 'o'] +['ĠTechn', 'ology'] +['Ġappro', 'ved'] +['ump', 's'] +['Ġs', 'now'] +['form', 'ance'] +['_A', 'SSERT'] +['u', 'its'] +['Ù', 'Ĩ'] +['Ġdiffer', 'ences'] +['.', 'Visible'] +['ĉĉĉ', 'čĊ'] +['ĠP', 's'] +['_f', 'etch'] +['Ġto', 'do'] +['.', "',Ċ"] +['Ġs', 'el'] +['ur', 'ers'] +['in', 'valid'] +['Ġt', 'weet'] +['V', 'EL'] +['Ġresearch', 'ers'] +['Ġs', 'printf'] +['ĠR', 'O'] +['Ġp', 'el'] +['.Tr', 'ans'] +['Ġil', 'legal'] +['d', 'ialog'] +['sm', 'arty'] +['l', 'g'] +['_M', 'IN'] +['Ġher', 'o'] +['f', 'inal'] +['Ġp', 'p'] +['.L', 'e'] +['Ġc', 'i'] +['ĉ', 'RT'] +['Ġsuggest', 'ed'] +['p', 'df'] +['ach', 'ing'] +['ĠR', 'o'] +['ĠProp', 'erties'] +['ĠS', 'i'] +['Ġbuy', 'ing'] +['Ġm', 'u'] +['Ġl', 'ands'] +['if', 'iers'] +['ĠF', 'ILE'] +['RO', 'UP'] +['Ġh', 'older'] +['ĠS', 'on'] +['Ġsym', 'pt'] +['.r', 'oute'] +[')', '?'] +['Ġarg', 'c'] +['Ġfor', 't'] +['Ġcas', 'ino'] +['_c', 'ategory'] +['Ġfor', 'um'] +['p', 'refix'] +['apt', 'ure'] +['T', 'ube'] +['em', 's'] +['im', 'ize'] +['Ġn', 'ue'] +['a', 'us'] +['c', 'ourse'] +['AT', 'OR'] +['()', '),'] +['Ad', 'vertis'] +['ING', 'S'] +['Ġack', 'now'] +['ĠKore', 'a'] +['pl', 'ing'] +['Ġwork', 'er'] +['PL', 'IED'] +['h', 'al'] +['ĠRich', 'ard'] +['Element', 's'] +['ĉĉĉ', 'Ġ'] +['st', 'ar'] +['Ġrelationship', 's'] +['Ġche', 'ap'] +['AC', 'H'] +['ĠX', 'ML'] +[',', '&'] +['ĠLou', 'is'] +['Ġr', 'ide'] +['_F', 'AIL'] +['Ġch', 'unk'] +['[', 's'] +['_O', 'UT'] +['Ġch', 'osen'] +['_', '['] +['/', '('] +['ĠJ', 'eff'] +['_s', 'l'] +['pr', 'iv'] +['ĠCan', 'adian'] +['Ġun', 'able'] +['_F', 'LAG'] +['Ġn', 'os'] +['h', 'igh'] +['Ġl', 'ift'] +['f', 'un'] +['()', '{'] +['el', 'ly'] +['ycler', 'View'] +['_', 'as'] +['_L', 'IST'] +['Ġr', 'adi'] +['.get', 'Value'] +['ĠAnge', 'les'] +['ĠS', 'pan'] +['_in', 'stance'] +['it', 'ors'] +['Ġm', 'igration'] +['A', 'K'] +['O', 'h'] +['Â', '®'] +['.', 'selected'] +['ĠG', 'T'] +['Ġadv', 'ance'] +['ĠSt', 'yle'] +['.Data', 'GridView'] +['e', 'ction'] +['Ñ', 'İ'] +['p', 'io'] +['ro', 'g'] +['Ġsh', 'opping'] +['ĠR', 'ect'] +['I', 'lluminate'] +['O', 'U'] +['ĉ', 'array'] +['Ġsubstant', 'ial'] +['Ġpre', 'gn'] +['Ġprom', 'ote'] +['IE', 'W'] +['.L', 'ayout'] +['Ġsign', 's'] +['/', '.'] +['Ġlet', 'ters'] +['Bo', 'ard'] +['ct', 'rl'] +['"', '\\'] +['ĠJ', 'ones'] +['Ġvert', 'ex'] +['Ġj', 'a'] +['Ġaff', 'ili'] +['Ġwe', 'alth'] +['ĉ', 'default'] +['Ġsignificant', 'ly'] +['Ġe', 'c'] +['Ġx', 's'] +['act', 'ual'] +['.p', 'er'] +['_st', 'ep'] +['an', 'vas'] +['m', 'ac'] +['Ġtrans', 'l'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Iter', 'ator'] +['Ġo', 'ch'] +['agnost', 'ic'] +['ĠD', 'uring'] +['ĠDE', 'FAULT'] +['Ġt', 'ill'] +['Ġsign', 'ature'] +['Ġb', 'ird'] +['ĠO', 'l'] +['ĠI', 'r'] +['H', 'S'] +['av', 'atar'] +['ESS', 'AGE'] +['Ġe', 'lev'] +['Ġm', 't'] +['ĠN', 'av'] +['Ġrel', 'ax'] +['Ġpl', 'ate'] +['IT', 'EM'] +['(', 'date'] +['.n', 'ot'] +['Ġgr', 'ade'] +['Ġ}', '),Ċ'] +['?', '"ĊĊ'] +['i', 'ences'] +['H', 'igh'] +['ĠD', 'IS'] +['dis', 'abled'] +['Q', 'UI'] +['Ġno', 'ise'] +['a', 'ux'] +['ĠU', 'P'] +['os', 'a'] +['Ġv', 'oc'] +['Ġ', '))'] +['oc', 'om'] +['_O', 'FF'] +['ĠD', 'b'] +['L', 'ock'] +['.e', 'clipse'] +[',', 'd'] +['ĠD', 'raw'] +['Ġ"', '('] +['Ġvis', 'ited'] +['Ġâ', 'Ī'] +['Ġsuc', 'ceed'] +['Ġim', 'possible'] +['a', 'ire'] +['ĠT', 'urn'] +['Ġd', 'ish'] +['F', 'G'] +['Ġs', 'ensor'] +['AN', 'N'] +['ab', 'a'] +['Ġsur', 'g'] +[']', ');čĊ'] +['Ġf', 'p'] +['_', 'an'] +['-', 'J'] +['-', 'G'] +['ĠJ', 'ob'] +['Con', 'vert'] +['ĠKE', 'Y'] +['Ġauth', 'ors'] +['_s', 'erver'] +['\\', 'r'] +['Ġ-*', '-'] +['f', 'lex'] +['Ġs', 'oc'] +['R', 'et'] +['Ġs', 'alt'] +['Ġâ̦', 'ĊĊ'] +['ĠC', 'lear'] +['(p', 'age'] +['-d', 'anger'] +['Ġroom', 's'] +['con', 'v'] +['#', '{'] +['.', 'op'] +['ĠA', 'rea'] +['_S', 'C'] +['h', 'en'] +['Ġbeg', 'ins'] +['-', 'y'] +['Ġexc', 'ited'] +['Ġign', 'ored'] +['Ġbon', 'us'] +['st', 'udent'] +['ĠM', 'ember'] +['Ġrel', 'atively'] +['ĠL', 'ow'] +['ĠPro', 'du'] +['ate', 'way'] +['pos', 'ure'] +['Ġth', 'ick'] +['ani', 'el'] +['(', 'view'] +['ĠCr', 'ush'] +['Ext', 'ension'] +['I', 'l'] +['e', 'ed'] +['LO', 'C'] +['.', 'im'] +['.', 'Items'] +['Ġconflic', 't'] +['.pre', 'vent'] +['Ġon', 'Create'] +['u', 'v'] +['is', 'er'] +['Ġw', 'ave'] +['M', 'ar'] +['ĠComm', 'unity'] +['ic', 'he'] +['ĠNo', 'thing'] +['[', 'm'] +['ĠLe', 'e'] +['ri', 'ends'] +['è', 're'] +['!!', '!'] +['an', 'z'] +['.', 'result'] +['ĠS', 'K'] +['_P', 'ARAM'] +['Ġdem', 'ocr'] +['Back', 'Color'] +['.ex', 'ists'] +['"', 'It'] +['(', 'options'] +['ra', 'zy'] +['as', 'er'] +['\\', 'Database'] +['al', 'endar'] +['_', 'ass'] +[';', '}Ċ'] +['vert', 'ex'] +['ine', 'craft'] +['W', 'arning'] +['arg', 'o'] +['Ġact', 'or'] +['ĠInst', 'ead'] +['ĠUs', 'ing'] +['S', 'elf'] +['@', 'interface'] +['Ġspe', 'aking'] +['ĠPar', 'is'] +['ĠL', 'ICENSE'] +['.n', 'ode'] +['ĠF', 'ood'] +['E', 'IF'] +['ĠB', 'i'] +['.', 'Start'] +['ĠI', 'B'] +['Ġun', 'iversity'] +['ĠHe', 'ader'] +['.pro', 'duct'] +['C', 'opy'] +['et', 'c'] +['r', 'ical'] +['Ġ>', '>>'] +['book', 's'] +['Ġal', 'gorithm'] +["Ġ'", '__'] +['(j', 'avax'] +['Ġnumer', 'ous'] +['Sh', 'are'] +['H', 'ave'] +['Ġrec', 'ru'] +['Ġpro', 've'] +['.sub', 'string'] +['he', 'alth'] +['е', 'л'] +['Ġdec', 'imal'] +['Ġcomm', 'ission'] +['s', 'cription'] +['x', 'C'] +['Ġsum', 'mary'] +['att', 'ed'] +['Ġclo', 'ser'] +['fin', 'ished'] +['()', '){Ċ'] +['ĠW', 'ood'] +['_field', 's'] +['k', 'u'] +['_', 'items'] +['Fl', 'ag'] +['Ġconf', 'idence'] +['ĠF', 'ederal'] +['du', 'x'] +['Ġcomp', 'at'] +['Ġvert', 'ical'] +['Ð', '¹'] +['è', 's'] +[';', '">Ċ'] +['_m', 'anager'] +['()', '))Ċ'] +['ID', 'E'] +[':', '",'] +['__', 'Ċ'] +['ĠW', 'ay'] +['Ñ', 'Ī'] +['T', 'emp'] +['ĠS', 'TR'] +['rit', 'ten'] +['S', 'ync'] +['ĠA', 'V'] +['ĠC', 'EO'] +['ĠG', 'uid'] +['Ġenvironment', 'al'] +['Ġcorrespond', 'ing'] +['ĉ', 'console'] +['Ġjust', 'ice'] +['ĠJ', 'S'] +['Ġl', 'ived'] +['g', 'ar'] +['ĠG', 'raph'] +['ĠSt', 'at'] +['Ġi', 'Phone'] +['.', 'al'] +['ĠH', 'D'] +['Ġocc', 'ur'] +['Ġth', 'reshold'] +['Ġon', 'click'] +['RE', 'G'] +['.Graphics', 'Unit'] +['M', 'eta'] +['Å', '¾'] +['Ġc', 'um'] +['.g', 'nu'] +['Ã', '«'] +['Ġobt', 'ained'] +['Ġcompl', 'aint'] +['Ġe', 'ating'] +['Ġt', 'ar'] +['_t', 'ask'] +['Ġopt', 's'] +['(', 'to'] +['P', 'ass'] +['Ġpl', 'astic'] +['t', 'ility'] +['ĠW', 'in'] +['.prevent', 'Default'] +['p', 'ile'] +['ĠG', 'ar'] +['Ġqu', 'antity'] +['_l', 'ast'] +['Ġg', 'reatest'] +['D', 'ao'] +['_D', 'IS'] +['ĠUs', 'ed'] +['ĠH', 'P'] +['rit', 'ing'] +['S', 'ION'] +['bl', 'ue'] +['d', 'omain'] +['Ġs', 'cores'] +['N', 'ormal'] +['_', 'admin'] +['ĠA', 'SSERT'] +['Th', 'en'] +['**', '*'] +['d', 'ist'] +['l', 'on'] +['Ġh', 'ate'] +['sh', 'al'] +['Image', 'View'] +['d', 'atabase'] +['Ġp', 'and'] +['Ġlog', 'ic'] +['=', 'false'] +['b', 'g'] +['ĠConfig', 'uration'] +['Ġn', 'ur'] +['O', 'G'] +['Ġmar', 'ried'] +[':', '+'] +['Ġdro', 'pped'] +['Ġreg', 'istration'] +['оÐ', '¼'] +['ult', 'iple'] +['iz', 'ers'] +['sh', 'ape'] +['.c', 'opy'] +['Ġwe', 'aring'] +['ĠC', 'ath'] +['Ġded', 'icated'] +['Ġ..', '.Ċ'] +['Ġadv', 'oc'] +['ĠF', 'amily'] +['Ġstat', 'ements'] +['em', 'atic'] +['ampions', 'hip'] +['Ġmot', 'iv'] +['ĠH', 'ave'] +['Ġbl', 'ow'] +['J', 'ob'] +['c', 'ert'] +['_v', 'ector'] +['inst', 'all'] +['ĠC', 'OPY'] +['em', 'bed'] +['D', 'IR'] +['ĠS', 'pring'] +['Ġex', 'hib'] +['cd', 'n'] +['ĠCom', 'ment'] +['ĠOption', 'al'] +['.', 'player'] +['ĠD', 'ark'] +['(', 'pos'] +['ĠSh', 'ould'] +['Ġcent', 're'] +['ĠGu', 'ard'] +['ó', 'w'] +['Ġtr', 'ouble'] +['EN', 'ER'] +['(', 'unsigned'] +['_s', 'ervice'] +['Ġn', 's'] +['ul', 'ing'] +['ĠMex', 'ico'] +['ĠN', 'Y'] +['mys', 'ql'] +['Ġl', 'ic'] +['å', 'ľ'] +['M', 'r'] +['-', 'fl'] +['ĠC', 'ustomer'] +['id', 'i'] +['Ġ?', '>ĊĊ'] +['ri', 'ble'] +['Ġп', 'ÑĢ'] +['Ġs', 'izes'] +['_STR', 'ING'] +['valid', 'ation'] +['ĠJ', 'on'] +['(', 'Http'] +['add', 'Class'] +['N', 'odes'] +['Ġfrag', 'ment'] +['Ġsp', 'oke'] +['Ġw', 'aste'] +['J', 'oin'] +['Ġill', 'ustr'] +['el', 'i'] +['c', 'ient'] +['Ġa', 'id'] +['Ġpro', 'sec'] +["')", '{Ċ'] +['Ġpass', 'ing'] +['Ġf', 'aces'] +['Sh', 'ape'] +['_', 'Z'] +['it', 'i'] +['Ġal', 'le'] +['Ġro', 'bot'] +['ĠĠĠĠĠĠĠ', 'Ċ'] +['ĠS', 'pe'] +['Ġrece', 'iving'] +['ĠD', 'etails'] +['Ġ"', ')'] +['m', 'g'] +['_RE', 'F'] +['Ġcompar', 'ison'] +['*', ','] +['ĠF', 'ound'] +['_s', 'ession'] +['(', 'U'] +['/', 'F'] +['Ġx', 'xx'] +['N', 'etwork'] +['d', 'ers'] +['Ġcap', 'ture'] +['Ġcor', 're'] +['ĠL', 'td'] +['ĠAd', 'v'] +['[', '@'] +['Ġcl', 'ip'] +['M', 'ill'] +['ĠPro', 'file'] +['Ġend', 'if'] +['Ġob', 'lig'] +['des', 'cribe'] +['.e', 'lement'] +['riter', 'ion'] +['L', 'D'] +['er', 'ed'] +['Ġfav', 'our'] +['s', 'core'] +['ĠF', 'ilter'] +['at', 'tributes'] +['Ġcheck', 's'] +['In', 'flater'] +['ĠPl', 'us'] +['Ġscient', 'ific'] +['Ġpriv', 'acy'] +['He', 'ad'] +['Ġfe', 'at'] +['Ġdeg', 'rees'] +['ĠP', 'ale'] +[';', '">'] +['Ġfil', 'ms'] +['ĠA', 'udio'] +['ĠT', 'ag'] +['ĠE', 'nergy'] +['it', 'ar'] +['par', 'ator'] +['Ġf', 'ellow'] +['Ġev', 't'] +['ĠT', 'ri'] +['ĠD', 'AM'] +['cl', 'oud'] +['ĠP', 'assword'] +['ĠDemocr', 'ats'] +['ĠAc', 'ad'] +['$', 'lang'] +['Ġre', 'b'] +['()', ')ĊĊ'] +['н', 'Ñĭ'] +['ĠB', 'ur'] +['read', 'cr'] +['Ġh', 'ex'] +['Con', 'sole'] +['ct', 'l'] +['ous', 'el'] +['ĠWill', 'iam'] +['Ġa', 'z'] +['_P', 'ORT'] +['Ġpract', 'ices'] +['Ġany', 'where'] +['ĠP', 'osition'] +['Ġ-', '>Ċ'] +['i', 'ams'] +['.user', 'name'] +['place', 'holder'] +['Ġo', 'der'] +['ĠSecret', 'ary'] +['Ġi', 'T'] +['mon', 'd'] +['event', 's'] +['?', 'âĢĿ'] +['.S', 'ub'] +['Ġatt', 'ached'] +['Ġn', 'ão'] +['Ġest', 'ate'] +['.', 'action'] +['Ġfig', 'ures'] +['Ġ}', ');čĊ'] +['Ġsubs', 'cri'] +['.t', 'ag'] +['n', 'am'] +['.', 'plot'] +['no', 'on'] +['li', 'ament'] +['Char', 'acter'] +['.t', 'ab'] +['Ġw', 'inter'] +['ĠVar', 'iable'] +['Ġtre', 'es'] +['Ġpr', 'oud'] +['(', 'V'] +['_', 'load'] +['Ġh', 'ier'] +['ĠE', 'con'] +['Ġf', 'd'] +['Ġvict', 'ims'] +['R', 'est'] +['ian', 'a'] +['Ġf', 'ake'] +['.Print', 'ln'] +['Ġstr', 'len'] +['Ġs', 'ad'] +['Ġb', 'le'] +['Pro', 't'] +['Ġbutton', 's'] +['Ġte', 'levision'] +['Ġlog', 'o'] +['ext', 'ension'] +['ĉ', 'j'] +['ste', 'in'] +['acion', 'es'] +['Ġ""', '"ĊĊ'] +['Ġsim', 'p'] +['Ġrecord', 'ed'] +['Ġbr', 'ings'] +['Ġprincip', 'al'] +['Ġfe', 'es'] +['(s', 'ource'] +['k', 'dir'] +['Ġutil', 's'] +['Ġcorrect', 'ly'] +['f', 'il'] +['Ġw', 'el'] +['P', 'air'] +['-b', 'utton'] +['s', 'cale'] +['ver', 'ify'] +['[', 'c'] +['Ġ--', '-'] +['Ġes', 'cape'] +['ik', 'es'] +['Lower', 'Case'] +['ic', 'ian'] +['Ġch', 'apter'] +['ĠT', 'YPE'] +['Ġsh', 'adow'] +['Ġaw', 'esome'] +['W', 'E'] +['el', 'if'] +['Ġl', 'ambda'] +['Ġdist', 'inct'] +['Ġb', 'are'] +['-', 'off'] +['Ġcol', 'our'] +['.append', 'Child'] +['ole', 'c'] +['ag', 'a'] +['.f', 'ill'] +['ĉs', 'uper'] +['Ġad', 'j'] +['(', 'position'] +['.get', 'Item'] +['Sh', 'ort'] +['Ġtot', 'ally'] +['V', 'D'] +['ĠT', 're'] +['_', 'ep'] +['v', 'ements'] +['ĠS', 'olution'] +['Ġfund', 'ament'] +['F', 'ollow'] +['Ġfac', 'ility'] +['Ġhappen', 'ing'] +['O', 'F'] +['.text', 'Box'] +['S', 'pan'] +['ĠÂ', '«'] +['id', 'en'] +['Ġex', 'ceed'] +['(p', 'arent'] +['Ġc', 'p'] +['ç', '»'] +['Ġhas', 'n'] +['Ġp', 'ri'] +['Ġcon', 'sequ'] +['n', 'en'] +['ĠIN', 'TO'] +['I', 'gnore'] +['ĠF', 'uture'] +['Ġcar', 'bon'] +['ĠSte', 'el'] +['f', 'mt'] +['ok', 'ie'] +['Ġs', 'pl'] +['(t', 'itle'] +['-', 'info'] +['Ġde', 'als'] +['Ġfix', 'ture'] +['e', 'a'] +['D', 'iv'] +['Ġtest', 'ed'] +['_', 'return'] +[')ĊĊ', 'ĊĊ'] +['upport', 'ed'] +['ĠC', 'ook'] +['Ġpay', 'ing'] +['ĠI', 'll'] +['Ġarrest', 'ed'] +['ĠPr', 'ime'] +['_c', 'allback'] +['>', ',Ċ'] +['dr', 'iver'] +['On', 'ce'] +['ab', 'b'] +['_by', 'tes'] +['ĠS', 'ets'] +['(', 'Object'] +['Ġc', 'c'] +['Ġsh', 'ell'] +['al', 'o'] +[');', '//'] +['(', 'log'] +['ct', 'ors'] +[')', ''] +['Ġ$', '(".'] +['.p', 'os'] +['Ġbo', 'ys'] +['Ġwed', 'ding'] +['Ġag', 'ents'] +['="', '_'] +['ĠAr', 'my'] +['Ġh', 'int'] +['v', 'ision'] +['Ġte', 'ch'] +['ĠCon', 'nect'] +['Ġleg', 'end'] +['ĠB', 'et'] +['.B', 'ase'] +['Sub', 'ject'] +['Ġl', 'it'] +['Rem', 'ove'] +['Ġ"', ':'] +['ĠF', 'inal'] +['pear', 'ance'] +['ĠiT', 'unes'] +['Ġparticip', 'ants'] +['ĠPy', 'thon'] +['Ġbus', 'y'] +['i', 'el'] +['vert', 'ices'] +['Ġtemplate', 'Url'] +['ĠC', 'lose'] +['Im', 'g'] +['ĠCorpor', 'ation'] +['t', 'imestamp'] +['Ġext', 'end'] +['Ġwe', 'bsites'] +['Ġposs', 'ibility'] +['о', 'ÑĤ'] +['Ġk', 'ö'] +['Ġme', 'at'] +['Ġrepresent', 'ation'] +['Ġ', 'ĉĉ'] +['_ST', 'ART'] +['.app', 'ly'] +['ĠVal', 'ley'] +['ĠS', 'uccess'] +['H', 'i'] +['Ġn', 'ob'] +['ĠI', 'Enumerable'] +['_', 'select'] +['ge', 'o'] +['.', '")Ċ'] +['Ġturn', 'ing'] +['Ġfab', 'ric'] +['("', '");Ċ'] +['Ġpers', 'pective'] +['é', 'Ĺ'] +['ĠS', 'n'] +['Th', 'ank'] +[';', 'j'] +['.Param', 'eters'] +['ĉ', 'ĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġfact', 's'] +['Ġun', 't'] +['.in', 'stance'] +['################################', '################################'] +['-', 'end'] +['ĠJO', 'IN'] +['ĠH', 'en'] +['Ġur', 'i'] +['åIJ', 'į'] +['Ġн', 'а'] +['ĠIn', 'fo'] +['Ġconduct', 'ed'] +['ĠÃ', '¥'] +['OUR', 'CE'] +['Ġw', 'ine'] +['J', 'ohn'] +['.Error', 'f'] +['ĠA', 'ge'] +['ound', 'ed'] +['Ġreal', 'ize'] +['Ġ]', ';'] +['Ġsub', 'sequ'] +[',', 'm'] +['(', 'User'] +['ian', 'o'] +['Ġaccom', 'pl'] +['is', 'p'] +['.st', 'd'] +['é', 'ĩ'] +['ĠB', 'ed'] +['.set', 'Attribute'] +['B', 'R'] +['ke', 'ep'] +['ĠA', 'LL'] +['Ġis', 'ol'] +['am', 'ma'] +['P', 'ackage'] +['Ġoccas', 'ion'] +['-s', 'uccess'] +['еÐ', '´'] +['ĠLIMIT', 'ED'] +['st', 'rip'] +['()', 'ĊĊĊ'] +['istrib', 'ution'] +['Color', 's'] +['Ġ+', ':+'] +['Did', 'Load'] +['al', 'er'] +['Ġt', 'id'] +['ĠL', 'ED'] +['ĠLink', 'ed'] +['ĠC', 'art'] +['()', ')čĊ'] +['_RE', 'AD'] +['Ġkill', 'ing'] +['ĠP', 'HP'] +['fe', 'ction'] +['Ġinst', 'ances'] +['c', 'v'] +['"/', '>'] +['Ġs', 'f'] +['Ġtax', 'es'] +['_', 'location'] +['ĠBit', 'coin'] +['u', 'able'] +['r', 'ank'] +['ign', 'ore'] +['tr', 'ack'] +['к', 'а'] +['Ġshould', 'n'] +['ĠO', 'P'] +['=>', '{Ċ'] +['Ġk', 'm'] +['Ġh', 'elper'] +['_', 'head'] +['ĠWh', 'ether'] +['oc', 'o'] +['_b', 'l'] +['Ġstat', 'istics'] +['Ġbeaut', 'y'] +['Ġto', 'g'] +['t', 'ip'] +['ëĭ', '¤'] +['Ġc', 'sv'] +['(s', 'ql'] +['std', 'lib'] +['we', 'ak'] +['Ġlik', 'es'] +['Ä', 'į'] +['Ġrepe', 'at'] +['Ġap', 'artment'] +['Ġem', 'ph'] +['_', 'edit'] +['Ġv', 'it'] +['ĉ', 'type'] +['E', 'ven'] +['ut', 'en'] +['Ġcircum', 'stances'] +['b', 'ian'] +['Ġs', 'ugar'] +['W', 'indows'] +['ì', 'ŀ'] +['Ġobs', 'erved'] +['/', 'data'] +['Ġcal', 'endar'] +['Ġstri', 'ke'] +['ĠR', 'ES'] +['_s', 'c'] +['f', 'ony'] +['ore', 'm'] +['(', 'z'] +['p', 'ower'] +['et', 'ect'] +['ĠS', 'at'] +['.d', 'escription'] +['Ġg', 'ang'] +['ĠS', 'ports'] +['ong', 's'] +['ĠB', 'undle'] +['.s', 'um'] +['on', 'ce'] +['Ġacc', 'used'] +['Ġexplo', 're'] +['Ġapprox', 'imately'] +['Ġlos', 'ing'] +['thes', 'is'] +['ĠF', 'und'] +['Ġdi', 'agn'] +['A', 'utowired'] +['prop', 'erties'] +['Ġ_', '.'] +['Ġc', 'nt'] +['ced', 'ure'] +['Ġy', 'y'] +['Ġgr', 'ant'] +['so', 'ck'] +['.inner', 'HTML'] +['Ġ]', ');Ċ'] +['ĠCON', 'FIG'] +["='", '$'] +[']', '];Ċ'] +['UN', 'D'] +['Ġg', 'lob'] +['Ġd', 'ire'] +['uff', 'le'] +['_M', 'EM'] +['Ġauth', 'entic'] +['>', '("'] +['Ġdec', 'ade'] +['ĠIm', 'port'] +['Ġorigin', 'ally'] +['Ġj', 'Query'] +['Ġindic', 'ate'] +['Ġours', 'elves'] +['S', 'w'] +['.l', 'bl'] +['ener', 'ate'] +['Ġbas', 'ically'] +['ĠH', 'om'] +['Ġ+', '#+'] +['ĠBrit', 'ain'] +['ĠK', 'ar'] +['to', 'Equal'] +['.st', 'op'] +['Ġmod', 'al'] +['is', 'i'] +['Ġsuggest', 's'] +['Ġd', 'type'] +['Ġt', 'ur'] +['b', 'f'] +['Ġconnection', 's'] +['ĠB', 'efore'] +['ist', 'ed'] +['m', 'ouse'] +['Ġpul', 'led'] +['.b', 'uild'] +['Ġlegis', 'lation'] +['Ġfor', 'th'] +['p', 'ad'] +['eg', 'o'] +['.N', 'ow'] +['Ġexc', 'iting'] +['}ĊĊ', 'ĊĊ'] +['Ġcom', 'pr'] +['Ġsh', 'ares'] +['Ġr', 'ig'] +['g', 'reen'] +['_', 'vec'] +['Ġenumer', 'ate'] +['A', 'uto'] +['ic', 'ator'] +['ĠR', 'ay'] +['as', 'se'] +['Ġh', 'oliday'] +['Ġnull', 'able'] +['g', 'un'] +['_d', 'etails'] +['Ġwr', 'apper'] +['se', 'q'] +['ĠYou', 'ng'] +['ju', 'ana'] +['Ġ"', '__'] +['lic', 'ense'] +['ser', 've'] +['^', '('] +['id', 'ers'] +['.Rem', 'ove'] +['rop', 'down'] +["'", 'S'] +['p', 'in'] +['(t', 'oken'] +['.D', 'efault'] +['Ġreason', 'able'] +['amp', 'ion'] +['ĠS', 'ociety'] +['Ġbe', 'i'] +['erv', 'es'] +['r', 'ad'] +['ĠF', 'ox'] +['_', 'images'] +['Ġw', 'heel'] +["')", '['] +['Ġc', 'fg'] +['(', 'By'] +['Con', 'structor'] +['Ġv', 'ary'] +['.sw', 'ift'] +['Ġpro', 'xy'] +['ĉ', 'H'] +['ĠAn', 'other'] +['ĠP', 'en'] +['Ġcheck', 'ing'] +['Ġj', 'est'] +['man', 'ager'] +['Or', 'igin'] +['ug', 's'] +['o', 'ir'] +['><', '!--'] +['Ġexpress', 'ed'] +['Ġmod', 'er'] +['Ġag', 'encies'] +['Ġi', 'h'] +['-h', 'idden'] +['ious', 'ly'] +['ĠR', 'od'] +['Ġso', 'le'] +['M', 'ed'] +['.A', 'ny'] +['Ġp', 'c'] +['b', 'al'] +['Ex', 'ample'] +['ĠS', 'ale'] +['Ġst', 'rip'] +['ĠCom', 'p'] +['Ġpresident', 'ial'] +['M', 'ost'] +['put', 'ation'] +['(', 'ref'] +['ĠF', 'our'] +['_f', 'ilename'] +['Ġen', 'forcement'] +['Ø', '¯'] +['ĠGe', 'org'] +['we', 'ights'] +['/', 'l'] +['Ġag', 'gress'] +['Ġd', 'rawing'] +['and', 'y'] +['<', 'I'] +['-', 'j'] +['ak', 'a'] +['h', 'ref'] +['Ġteach', 'ers'] +['_', 'Q'] +['(', 'it'] +['ĠM', 'B'] +['Ġtemp', 'orary'] +['ire', 'base'] +['str', 'a'] +['æĹ', '¶'] +['è', '´'] +['(', 'label'] +['ou', 'p'] +['Ġtop', 'ics'] +['Ġport', 'ion'] +['id', 'os'] +['ĠJew', 'ish'] +['Ġre', 'covery'] +['Ġstand', 's'] +['#', '['] +['Ġafter', 'noon'] +['ĠArt', 'icle'] +['_', 'att'] +['Ġexpl', 'an'] +['ĠP', 'ak'] +['.setOn', 'ClickListener'] +['.', 'children'] +['Ġi', 'k'] +['+', '('] +['l', 'ag'] +['Ġdis', 'k'] +['Ġcont', 'rovers'] +['">', '&'] +['as', 'p'] +['Ġw', 'ie'] +['ĠAustral', 'ian'] +['ĠYou', 'Tube'] +['At', 'tr'] +['cont', 'ains'] +['du', 'ce'] +['ĠM', 'att'] +['at', 'ern'] +['Ġvol', 'unte'] +['Ġnew', 'sp'] +['V', 'P'] +['olt', 'ip'] +['Ġde', 'legate'] +['_m', 'eta'] +['Ġaccur', 'ate'] +['ĠEx', 'ample'] +['%', ','] +['ĠD', 'aily'] +['Ġc', 'abin'] +['ĠS', 'W'] +['Ġlim', 'its'] +['k', 'ip'] +['Ġar', 'my'] +['Ġend', 'ing'] +['Ġb', 'oss'] +['ĠD', 'ialog'] +['Al', 'so'] +['="#', '"'] +['ord', 'an'] +['row', 'se'] +['-', 'min'] +['Ġ"', '&'] +['_', 'loc'] +['U', 'X'] +['Ġdevelop', 'ers'] +['Ġaccur', 'acy'] +['Ġmaint', 'enance'] +['Ġhe', 'av'] +['Ġfil', 'ters'] +['.T', 'oolStrip'] +['Ġn', 'arr'] +['ĠE', 'mp'] +['ORD', 'ER'] +['ĠM', 'obile'] +['.S', 'erial'] +['.out', 'put'] +['.c', 'ol'] +['M', 'aterial'] +['um', 'a'] +['Ġconsum', 'ers'] +['sh', 'ift'] +['Ġp', 'ued'] +['Ġmin', 'i'] +['c', 'ollection'] +['Ġk', 'an'] +['.c', 'enter'] +['H', 'istory'] +['Ġben', 'ch'] +['()', ');'] +['itor', 'ies'] +['Ġcrow', 'd'] +['_c', 'all'] +['Ġpow', 'ers'] +['-', 'E'] +['Ġdis', 'miss'] +['Ġtalk', 's'] +['ĠCh', 'annel'] +['for', 'ward'] +['_', 'control'] +['/s', 'rc'] +['i', 'est'] +['****************', '********'] +['Ġbet', 'a'] +['(c', 'olor'] +['_O', 'BJECT'] +['ĠA', 'pi'] +['Ġeffect', 'ively'] +['C', 'amera'] +['s', 'd'] +['uss', 'y'] +['D', 'ict'] +['ĠE', 'ffect'] +['ib', 'ilities'] +['Ġreturn', 'ing'] +['ĠF', 'ar'] +["Ġ'", "')"] +['Ġmod', 'ules'] +['il', 'ation'] +['Ġ(', '%'] +['TR', 'GL'] +['Ġst', 'orm'] +['on', 'na'] +['ĠEX', 'P'] +['Ġs', 'pons'] +['Ġdis', 'pl'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['f', 'all'] +['å', 'Į'] +['ign', 'Key'] +['_', 'US'] +['et', 'rics'] +['Ġhand', 'les'] +['T', 'L'] +['_', 'amount'] +['ow', 'a'] +['br', 'and'] +['ĠT', 'ool'] +['Ġus', 'ual'] +['.', 'Z'] +['cre', 'ment'] +['ad', 'ium'] +['st', 'ock'] +['Ġserv', 'ing'] +['ĠB', 'on'] +['Ġline', 'ar'] +['ĠT', 'arget'] +['ĠR', 'adio'] +['H', 'L'] +['Sh', 'ader'] +['om', 'atic'] +['ag', 'ues'] +['in', 'ity'] +['d', 'iff'] +['_', 'iterator'] +['qu', 'ot'] +['Ġ', ',Ċ'] +['c', 'allback'] +['Ġsympt', 'oms'] +['[', '_'] +['ĠB', 'ul'] +['ĠF', 'eb'] +['und', 'o'] +['_', 'account'] +['Ġtyp', 'edef'] +['и', 'Ñģ'] +['tr', 'as'] +['User', 'Id'] +['ĠP', 'enn'] +['ĠSup', 'reme'] +['}', '>'] +['user', 'Id'] +['ĠK', 'im'] +['Ġg', 'a'] +['Ġart', 'ists'] +['å', '¸'] +['ĠAb', 'stract'] +['ok', 'emon'] +['Ġh', 'am'] +['o', 'val'] +['Ġch', 'a'] +['at', 'en'] +['å', 'Ĩ'] +['F', 'ixed'] +['Ġvul', 'ner'] +['ĠParam', 'eters'] +['qu', 'antity'] +['.C', 'lear'] +['Servlet', 'Request'] +['Ġy', 'a'] +['Ġsou', 'l'] +['trans', 'action'] +['Ġsol', 'o'] +['Ġp', 'airs'] +['æ', 'Ķ'] +['ĠG', 're'] +['_', 'word'] +['ĠC', 'C'] +['Ġg', 'i'] +['z', 'ie'] +['Ġsched', 'uled'] +['rot', 'ation'] +['gy', 'pt'] +['ul', 'ous'] +['::', '_'] +['ĠE', 'll'] +['<', '!'] +['ĉĉ', 'ĠĠ'] +['l', 'p'] +['ah', 'a'] +['C', 'opyright'] +['Ġdr', 'am'] +['Ġdi', 'agram'] +['ĠM', 'em'] +['Ġg', 'arden'] +['Com', 'p'] +['Ġattempt', 's'] +['uff', 'ix'] +['>', '()'] +['Ġphil', 'osoph'] +['_re', 'l'] +['å', '¼'] +['Ġs', 'v'] +['.se', 'cond'] +['ant', 'o'] +['.J', 'son'] +['ĠTe', 'le'] +['_', 'local'] +['_s', 'end'] +['Ġas', 'pects'] +['ì', 'Ĺ'] +['IB', 'LE'] +['Ġr', 'ail'] +['Ġwid', 'ely'] +['ash', 'ed'] +['i', 'ar'] +['in', 'f'] +['up', 'per'] +['d', 'jango'] +['_result', 's'] +['iss', 'ing'] +['Ġequ', 'ivalent'] +['OUN', 'D'] +['Ġt', 'y'] +['Ġpotential', 'ly'] +['Advertis', 'ement'] +['ĠRec', 'ord'] +['resent', 'ation'] +['_w', 'idget'] +['ound', 'ing'] +['Ġrelig', 'ion'] +['Ġcons', 'c'] +['ĠL', 'im'] +['.', 'am'] +['H', 'tml'] +["Ġ'", ':'] +['P', 'ATH'] +['_s', 'pec'] +['ort', 'ed'] +['id', 'ades'] +['_sh', 'ape'] +['Ġkeep', 's'] +['.S', 'ave'] +['ĠL', 'oc'] +['or', 'i'] +['ĠT', 'EST'] +['unic', 'ip'] +['Ġreg', 'ions'] +['Ġbelie', 'ves'] +['/', 'en'] +['pos', 'ite'] +['{', "'"] +['pre', 'pare'] +['_', 'const'] +['s', 'ample'] +['ĠWill', 'iams'] +['Ġstr', 't'] +['_', 'Get'] +['ĠAnd', 'rew'] +['.', 'active'] +['Ġl', 'ayers'] +['Visual', 'Style'] +['az', 'y'] +['ĠK', 'n'] +['Ġac', 'id'] +['ĠAs', 'ia'] +['Ġex', 'cess'] +['ĉm', 'y'] +['Ġkey', 'board'] +['ens', 'us'] +['Ġcre', 'w'] +['Ġmiss', 'ed'] +['m', 'aster'] +['ĠW', 'ild'] +['Ġnew', 'ly'] +['Ġwin', 'ner'] +['Ġst', 'ub'] +['ic', 'ode'] +['.m', 'ove'] +['D', 'omain'] +['ĠS', 'ar'] +['Ġfore', 'st'] +['LE', 'D'] +['claim', 'er'] +['.ex', 'it'] +['ĠW', 'indow'] +['Ġres', 'istance'] +['ĠC', 'HECK'] +['("', '-'] +['ĠR', 'yan'] +['Ġp', 'ipe'] +['Ġco', 'ast'] +['DE', 'F'] +['//', '!'] +['_', 'off'] +['ex', 'it'] +['Ġult', 'imately'] +['imit', 'ive'] +['ĠKe', 'ep'] +['Ġhistor', 'ical'] +['Ġany', 'way'] +['ĠJack', 'son'] +['ock', 'er'] +['ER', 'N'] +['ĠU', 'INT'] +['y', 'ntax'] +['ER', 'Y'] +['is', 'ms'] +['Ġc', 'n'] +['Ġocc', 'urs'] +['Ġ;', ';'] +['Text', 'View'] +['A', 'E'] +['/', 'img'] +['Ġy', 'esterday'] +['-', 'default'] +['Ġt', 'iny'] +['Ġpro', 'c'] +['Ġal', 'ive'] +['ĠRE', 'G'] +['.', 'th'] +['ear', 'ing'] +['.get', 'Logger'] +['<', 'link'] +['_', 'login'] +['F', 'older'] +['ab', 'c'] +['lyph', 'icon'] +['н', 'о'] +['Ġnot', 'iced'] +['od', 'igo'] +['Ġed', 'ition'] +['im', 'ator'] +['.', 'Enabled'] +['.parse', 'Int'] +['Ġy', 'ards'] +['ĉĉĉĉĉĉĉĉ', 'ĉĉĉĉ'] +['Ġver', 'bose'] +['л', 'Ñı'] +['_B', 'Y'] +['.log', 'in'] +['.*', ';Ċ'] +['ĠM', 'id'] +['é', 'es'] +['Ġg', 'lo'] +['Ġbuild', 'ings'] +['Ġz', 'e'] +['ĠI', 'ter'] +['Ġt', 'ube'] +['ĠP', 'ot'] +['\\', 'M'] +['<', 'th'] +['br', 'idge'] +['ĠS', 'cript'] +['ĠM', 'odule'] +['Ġv', 'acc'] +['Ġinstall', 'ation'] +['v', 'y'] +['VisualStyle', 'BackColor'] +['ĠS', 'M'] +['.t', 'otal'] +['b', 'at'] +['Ġfind', 's'] +['Ġat', 'mos'] +['Sub', 'view'] +['iz', 'ard'] +['Ġrepl', 'acement'] +['lic', 'ated'] +['ap', 'is'] +['Ġlog', 'ged'] +['ĠLe', 'ft'] +['G', 'ui'] +['_', 'Type'] +['t', 'm'] +['P', 'ad'] +['Ġhouse', 'hold'] +['Ġre', 'le'] +['Ġpropos', 'al'] +['_CL', 'ASS'] +['::', '::'] +['Ġinf', 'rastructure'] +['In', 'ject'] +['/', 'html'] +['Ġad', 's'] +['iz', 'za'] +['Ġm', 'g'] +['ctr', 'ine'] +['%', 'Ċ'] +['<', 'html'] +['-', 'image'] +['Ġatt', 'orney'] +['<', 'm'] +["('", ','] +['Ġcan', 'n'] +['Ġprint', 'ln'] +['o', 'ose'] +['Ġy', 'ellow'] +['.ex', 'p'] +['p', 'ayment'] +['Ġtable', 'View'] +['aw', 'ay'] +['Ġopp', 'osition'] +['ĠAg', 'ain'] +['ĠH', 'andle'] +['Ġex', 'clusive'] +['in', 'ar'] +['é', 'r'] +['оÐ', '±'] +['ĠC', 'ODE'] +['emp', 'orary'] +['Ġre', 'act'] +['pi', 'pe'] +['c', 'z'] +['.', 'activity'] +['Ġlarg', 'ely'] +['Ġdis', 's'] +['ax', 'y'] +['es', 'is'] +['ĠR', 'en'] +['Ġc', 'orn'] +['.Use', 'VisualStyleBackColor'] +['d', 'ays'] +['Ġfr', 'uit'] +['In', 'sert'] +['_', 'enc'] +['E', 'st'] +['_de', 'c'] +['ĠL', 'uc'] +['Ġü', 'ber'] +['param', 'eters'] +['P', 'ERT'] +['ex', 'press'] +['_pro', 'file'] +['Un', 'known'] +['Ġrev', 'olution'] +['.add', 'ress'] +['_re', 'quire'] +['Ġun', 'iform'] +['ĠP', 'ack'] +['l', 'ar'] +['ĠU', 'ITableView'] +['Ġdep', 'ends'] +['Valid', 'ation'] +['conf', 'irm'] +['O', 'wner'] +['Ġt', 'rib'] +['h', 'et'] +['ĠI', 'de'] +['ans', 'as'] +['L', 'anguage'] +['u', 'et'] +['ĠP', 'o'] +['ĠSte', 've'] +['Ġcont', 'est'] +['_DE', 'FAULT'] +['Ġapparent', 'ly'] +['RE', 'EN'] +['Ġfrequ', 'ently'] +['Ġtrad', 'ition'] +['ocol', 'ate'] +['S', 'I'] +['ĠArg', 'ument'] +['F', 'ocus'] +['ert', 'e'] +['ĠL', 'ayout'] +['Ġd', 'x'] +['Ġgener', 'ator'] +['ĠW', 'ait'] +['P', 'olicy'] +['l', 'ights'] +['.Ex', 'ecute'] +['P', 'y'] +['Ġbed', 'room'] +['ed', 'a'] +['ra', 'id'] +['ĉs', 'ize'] +['Ġan', 'cient'] +['Ġp', 'ump'] +['Ġd', 'w'] +['Ġ(!', '('] +['Ġspec', 'ify'] +['(', 'status'] +['ĠF', 'BI'] +['.ex', 'ception'] +['Ġrem', 'ark'] +['ly', 'mp'] +['ant', 'ee'] +['Up', 'load'] +['ern', 'et'] +['é', '¡'] +['in', 'ent'] +['ĠR', 'ender'] +['d', 'm'] +['ĠM', 'emory'] +['r', 'ich'] +['ĠT', 'ools'] +['Ġk', 'ne'] +['Ġper', 'm'] +['b', 'ad'] +['Ġd', 'inner'] +['.res', 'et'] +['Ġj', 'Label'] +['Fe', 'ature'] +['.S', 'ervice'] +['Ġ(', '{Ċ'] +['Ġre', 'ferred'] +['.class', 'List'] +['Ġinit', 'With'] +['ĠText', 'View'] +['Ġne', 'ither'] +['Ġcount', 'y'] +['Ġ"', '{'] +['ç', '§'] +['Ġt', 'ack'] +['class', 'Name'] +['ĠUS', 'ER'] +['Ġre', 'new'] +['`', '`'] +['get', 'Name'] +['Ġb', 'rown'] +['Err', 'ors'] +['ert', 'o'] +['Ġsust', 'ain'] +['S', 'O'] +['let', 'es'] +['ĠIn', 'valid'] +['Ġen', 'emies'] +['un', 'ge'] +['Ġexist', 'ence'] +['err', 'a'] +['Ċ', 'ĠĠĊ'] +['utor', 'ial'] +['#', 'a'] +['p', 'ay'] +['char', 'ge'] +['ĠI', 're'] +['ate', 'st'] +['Ġexp', 'los'] +['Ġf', 'ired'] +['N', 'ER'] +['ĠT', 'y'] +['ic', 'ion'] +['U', 'ri'] +['Ġobvious', 'ly'] +['ĠC', 'olum'] +["Ġ'", '+'] +['ĠDe', 'vice'] +['-', 'related'] +['_', 'ARG'] +['Ġv', 'or'] +['ĠLess', 'er'] +['_O', 'P'] +['Serial', 'izer'] +['Ġup', 'grade'] +['L', 'ight'] +['Ġc', 'odes'] +['++', ';čĊ'] +['Ġwrit', 'es'] +['fo', 'od'] +['Ġé', 't'] +['@', 'section'] +['Ġtrack', 's'] +['Ġserious', 'ly'] +['ch', 't'] +['(size', 'of'] +['Ġimmedi', 'ate'] +['Ġscient', 'ists'] +['Ġ{', '$'] +['_', 'ne'] +['.Anchor', 'Styles'] +['Ġaccom', 'mod'] +['ĠHar', 'ry'] +['Ġs', 'ight'] +['ĠPale', 'st'] +['ersist', 'ent'] +['Ġ', 'Ñĥ'] +['-', 'input'] +['Ġco', 'ordinates'] +['Â', '·'] +['W', 'elcome'] +['.con', 'f'] +['Ġgre', 'w'] +['Ġb', 'old'] +['ĠC', 'PU'] +['(m', 'y'] +['Ġperfect', 'ly'] +['Ġmom', 'ents'] +['ĠM', 'ovie'] +['-', 'data'] +['yst', 'al'] +['_W', 'IDTH'] +['ĠS', 'creen'] +['æ', 'Ŀ'] +['Ġdis', 'ap'] +['Ġredu', 'ction'] +['.Get', 'Component'] +['_M', 'ODULE'] +['Ġgener', 'ic'] +['Ġd', 'y'] +['all', 'er'] +['Ġc', 'url'] +['ĠB', 'ody'] +['Ġb', 'anks'] +[',', 't'] +['av', 'g'] +['Ġev', 'il'] +['Ġmanufact', 'urer'] +['Ġrece', 'iver'] +['Column', 's'] +['Ġing', 'redients'] +['ĉ', 'out'] +['qu', 'es'] +['.L', 'oad'] +['Ġslow', 'ly'] +['ĠT', 'own'] +['ĠC', 'ell'] +['_n', 'ormal'] +['_p', 'refix'] +['ĠAl', 'ert'] +['("', '{'] +['ä', 'r'] +['âĢľ', 'The'] +['ĠM', 'D'] +['Ġcour', 'ses'] +['ath', 'an'] +['é', 'Ļ'] +['oc', 'c'] +['ĠS', 'ER'] +['es', 'ign'] +['Add', 'r'] +['=', "['"] +['("', './'] +[']', '}'] +['.f', 'ont'] +['ĠInst', 'agram'] +['ĠB', 'order'] +['od', 'a'] +['Ġh', 'all'] +['Ġr', 'um'] +['_b', 'it'] +['Ġs', 'aving'] +['_d', 'own'] +['R', 'andom'] +['_reg', 'ister'] +['(', 'Context'] +['Ġoppos', 'ite'] +['R', 'oom'] +['Y', 'ES'] +['ан', 'и'] +['Ġenjoy', 'ed'] +['_r', 'un'] +['C', 'lear'] +['âĢ', 'ĺ'] +['ĠF', 'ord'] +['on', 'ic'] +['ost', 'en'] +['"]', ')'] +['_', 'auth'] +['//', 'čĊ'] +['Ġsuff', 'icient'] +['LE', 'S'] +['Ġph', 'en'] +['Ġo', 'h'] +['_c', 'sv'] +['Ġrout', 'ine'] +['.Are', 'Equal'] +['ay', 'lor'] +['Ġb', 'asket'] +['_COM', 'M'] +['rypt', 'ed'] +['S', 'im'] +['ĠSh', 'op'] +['Ġstud', 'io'] +['at', 'os'] +['(', 'W'] +['[', 'string'] +['ä', 't'] +['og', 'a'] +['Ġsh', 'r'] +['Ġs', 'ick'] +['An', 'other'] +['Ġdo', 'ors'] +['_N', 'E'] +['ĠTH', 'REE'] +['.', 'order'] +['raz', 'il'] +['Ġmap', 's'] +['_TR', 'UE'] +['trans', 'late'] +['Ġnear', 'by'] +['Ġn', 'ach'] +['LO', 'AT'] +['b', 'atch'] +['Ġl', 'ux'] +['ash', 'es'] +['ang', 'ers'] +['â̦', 'â̦'] +['_E', 'VENT'] +['_', 'UP'] +['Ġact', 's'] +['in', 'v'] +['_M', 'ETHOD'] +['cc', 'ion'] +['Ġret', 'ain'] +['ut', 'ch'] +['ĠÐ', '±'] +['Ġknow', 'ing'] +['Ġrepresent', 'ing'] +['N', 'OT'] +['p', 'ng'] +['Con', 'tract'] +['Ġtr', 'ick'] +['ĠE', 'dition'] +['uplic', 'ate'] +['Ġcontrol', 'led'] +['c', 'fg'] +['j', 'avascript'] +['Ġmil', 'k'] +['Wh', 'ite'] +['Se', 'quence'] +['aw', 'a'] +['Ġdiscuss', 'ed'] +['ĠB', 'ush'] +['ĠY', 'ES'] +['.f', 'actory'] +['t', 'ags'] +['Ġt', 'act'] +['Ġs', 'id'] +['$', '$'] +['ĠE', 'num'] +['Ġfr', 'ames'] +['}', ');'] +['Ġreg', 'ul'] +["']", ';čĊ'] +['Reg', 'ion'] +['ff', 'f'] +['Ġc', 'ro'] +['(', 'com'] +['="', '+'] +['St', 'udent'] +['Ġdis', 'appoint'] +['RES', 'ULT'] +['Count', 'er'] +['Ġbut', 'ter'] +['ĠH', 'a'] +['ĠD', 'igital'] +['Ġb', 'id'] +['">', '{{'] +['ing', 'ers'] +['ĠC', 'ountry'] +['_t', 'pl'] +['"]', ')Ċ'] +['/', 'k'] +['d', 'ating'] +[':', '#'] +['ĠD', 'ATA'] +['yn', 'chron'] +['_b', 'ody'] +['olly', 'wood'] +['Ġval', 'or'] +['ip', 'ient'] +['o', 'ft'] +['UB', 'L'] +['doc', 's'] +['Ġsyn', 'chron'] +['Ġform', 'ed'] +['ru', 'ption'] +['Ġlist', 'a'] +['Request', 'Mapping'] +['Ġvill', 'age'] +['Ġkn', 'ock'] +['oc', 's'] +['"', '{'] +['_fl', 'ags'] +['Ġtrans', 'actions'] +['Ġhab', 'it'] +['ĠJ', 'e'] +['ed', 'en'] +['Ġa', 'ircraft'] +['ir', 'k'] +['ĠA', 'B'] +['Ġfair', 'ly'] +['.', 'inter'] +['.A', 'ct'] +['Ġinstr', 'ument'] +['remove', 'Class'] +['.com', 'mand'] +['Ñ', 'ī'] +['ĉm', 'em'] +['(', 'min'] +['Ġo', 't'] +['Ġcol', 'le'] +['=', 's'] +['time', 'out'] +['Ġid', 's'] +['ĠM', 'atch'] +['ij', 'n'] +['z', 'ero'] +['Ġnetwork', 's'] +['.g', 'ov'] +['Ġint', 'el'] +['Ġsection', 's'] +['out', 'ine'] +['(c', 'md'] +['(d', 'ir'] +['ĠLI', 'ABILITY'] +['ĠB', 'log'] +['Ġbr', 'idge'] +['ĠC', 'V'] +['con', 'vert'] +['Ġ"', ')Ċ'] +['ĠB', 'ern'] +['_P', 'O'] +['e', 'val'] +['(', 'set'] +['to', 'ol'] +['Ġpay', 'ments'] +['Beh', 'aviour'] +['Ġcon', 'crete'] +['Ġel', 'ig'] +['Ġacc', 'eler'] +['Ġh', 'ole'] +['_', 'o'] +['TE', 'GER'] +['Ġgraph', 'ics'] +['O', 'wn'] +['Form', 'atter'] +['on', 'der'] +['Ġpack', 'ages'] +['/', 'a'] +['ĠK', 'now'] +['Or', 'Default'] +['Ġdut', 'y'] +['W', 'ait'] +['н', 'а'] +['_rec', 'ord'] +['[', 't'] +['M', 'esh'] +['Ġon', 'going'] +['.be', 'ans'] +['Ġt', 'an'] +['Ġinter', 'pret'] +['ast', 'ers'] +['QU', 'AL'] +['Ġleg', 's'] +['\\', 'Request'] +['-', 'file'] +['_m', 'utex'] +['ĠS', 'aint'] +['//', '#'] +['Ġpro', 'hib'] +['(', 'info'] +[':', '='] +['lin', 'ux'] +['Ġb', 'lo'] +['ot', 'ic'] +['ĉf', 'inal'] +['_ex', 'p'] +['ĠSt', 'op'] +['ap', 'ing'] +['(s', 'aved'] +['_p', 'ush'] +['Ġe', 'ase'] +['_F', 'R'] +['pons', 'ive'] +['str', 'cmp'] +[':', 'ĊĊĊĊ'] +['ä»', '¶'] +['ol', 'i'] +['Ġextrem', 'e'] +['Ġprof', 'essor'] +['Im', 'ages'] +['.IO', 'Exception'] +['Ġaddress', 'es'] +['plement', 'ed'] +['Ġincor', 'por'] +['Ġuse', 'Effect'] +['_O', 'F'] +['ĠD', 'a'] +['n', 'ombre'] +['IR', 'ST'] +['Ġdisc', 'rim'] +['Ġcomp', 'ens'] +['greg', 'ate'] +['anc', 'ell'] +['ach', 'es'] +['ĠC', 'riteria'] +['$', 'result'] +['D', 'estroy'] +['Ġsecond', 'ary'] +['W', 'atch'] +['ĠS', 'em'] +['ĠMc', 'C'] +['Ġacad', 'emic'] +['U', 'pper'] +['::', '~'] +['ut', 'ral'] +['ĠD', 'og'] +['ad', 'ed'] +['Valid', 'ator'] +['Ġder', 'ived'] +['Ġset', 'Timeout'] +['ĠK', 'en'] +['Ġtyp', 'ical'] +['ĠB', 'ob'] +['Ġb', 'ounds'] +['ĠSe', 'ason'] +['Ġc', 'razy'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠ'] +['-r', 'outer'] +['itt', 'est'] +['ĠM', 'ir'] +['Ġemot', 'ional'] +[',', 'v'] +['c', 'n'] +['/', 'st'] +['å', '½'] +['on', 'om'] +['Ġdecl', 'ared'] +['>', '.'] +['ail', 'ing'] +['Ġ/*', '<<<'] +['Ġnorm', 'ally'] +['(M', 'e'] +['ev', 'in'] +['lik', 'ely'] +['Ġpoint', 'ed'] +['ĠSt', 'ack'] +['Ġw', 'alls'] +['.', 'Vector'] +['me', 'an'] +[']', ']Ċ'] +['Ġlist', 'ening'] +['ad', 'v'] +['Ġsw', 'ap'] +['IF', 'T'] +['Ø', 'ª'] +['.', 'argv'] +['ul', 's'] +['<', 'option'] +['not', 'ations'] +['Ġemail', 's'] +['ĠU', 'kr'] +['ast', 'a'] +['ĠTh', 'us'] +['ĠSt', 'one'] +['Ġappe', 'al'] +['.', 'âĢĻ'] +['Ġreg', 'ulations'] +['Pre', 'ferences'] +['ĠPh', 'one'] +['ul', 'f'] +['ĠD', 'R'] +['Ġtechn', 'ologies'] +['Ġpar', 'agraph'] +['Ġnecess', 'arily'] +['.e', 'ach'] +['<', 'float'] +['res', 'a'] +['Ġunder', 'st'] +['Ġf', 'inger'] +['press', 'ed'] +['-b', 'y'] +['if', 'fer'] +['w', 'atch'] +['ĠB', 'a'] +['A', 'IM'] +['Ġwe', 'ights'] +['ĠR', 'on'] +["')", '}}'] +['[', 'self'] +['--------', '--Ċ'] +['per', 'iment'] +['Ġto', 'String'] +['x', 'ic'] +['ĠC', 'amera'] +['!', 'ĊĊĊĊ'] +['aur', 'ant'] +['P', 'refix'] +['Ġinstit', 'utions'] +[':', 'int'] +['Ġex', 'posure'] +['p', 'attern'] +['ĠLin', 'ux'] +['.n', 'umber'] +['red', 'ient'] +['Argument', 'Exception'] +['ĠCh', 'ief'] +['"', '},'] +['Ġelect', 'ronic'] +['r', 'ong'] +['er', 'd'] +['sp', 'Net'] +['ra', 'it'] +['/', "',"] +['ĠOh', 'io'] +['Cont', 'rollers'] +['Ġcontin', 'uing'] +['ĠT', 'emplate'] +['ĠE', 'th'] +['s', 'z'] +['/', 'env'] +['En', 'v'] +['%', '.'] +['art', 'ers'] +[')', '(('] +['ĠT', 'ABLE'] +['ĠÃ', '®'] +['per', 'ature'] +['pro', 'gress'] +['P', 'res'] +['ê', '°'] +['im', 'plementation'] +['Ġb', 'ien'] +['Ġstre', 'ets'] +['_M', 'SG'] +['New', 's'] +['##', '#'] +[':', '/'] +['Ġcut', 'ting'] +['x', 'B'] +['ress', 'ed'] +['_EN', 'ABLE'] +['l', 'ab'] +['Ġca', 'using'] +[']', '));Ċ'] +['b', 'ra'] +['x', 'FFFF'] +['il', 'ly'] +['plet', 'ion'] +['w', 'ill'] +['_b', 'ar'] +['Ġstruct', 'ures'] +['ĠI', 'mp'] +['Û', 'Į'] +['Ġ<', '>'] +['Ġ', '----------------'] +['_B', 'UFFER'] +['.d', 'ir'] +['Ġpl', 'ain'] +['Ġpe', 'er'] +['g', 'g'] +['oint', 's'] +['Ġsomew', 'hat'] +['Ġw', 'et'] +['Ġemploy', 'ment'] +['Ġtick', 'ets'] +['ir', 'ms'] +['Ġt', 'uple'] +['s', 'is'] +['$', 'sql'] +['r', 'ig'] +['Ġcon', 'version'] +['Ġg', 'es'] +['Ġconfig', 'ure'] +['eg', 'r'] +['ĠC', 'a'] +['Ġ__', "('"] +['ou', 'ston'] +['.t', 'oken'] +['Bl', 'ack'] +['Ġmag', 'azine'] +['A', 'W'] +['.', 'IN'] +['os', 'ing'] +['Ġbro', 'ke'] +['ĠC', 'ru'] +['DE', 'LETE'] +['Ġdestroy', 'ed'] +['(M', 'ath'] +['Ġappro', 'val'] +['-d', 'om'] +['ĠI', 'II'] +['table', 'View'] +['Ġdesign', 's'] +['Ġcrush', 'ing'] +['Ġcons', 'ent'] +['dir', 'name'] +['om', 'p'] +['Ġc', 'rypt'] +['?', '('] +['or', 'ough'] +['.', 'o'] +['ĉ', 'list'] +['ams', 'ung'] +['.""', '"Ċ'] +['err', 'ing'] +['G', 'oogle'] +['_p', 'air'] +['_IN', 'IT'] +['rem', 'arks'] +['Ġg', 'ear'] +['F', 'ill'] +['l', 'ife'] +['}', '")Ċ'] +['Ġsuit', 'able'] +['Ġsurpr', 'ised'] +['_RE', 'QUEST'] +['Ġman', 'ifest'] +['att', 'en'] +['Ġfr', 'ustr'] +['ov', 'ement'] +['.c', 'lick'] +['Ġi', 'i'] +['Ġexp', 'ansion'] +['ig', 's'] +['P', 'arse'] +['.Reg', 'ular'] +['R', 'ob'] +['_l', 'ayout'] +['ì', 'ł'] +['Ġtrans', 'lation'] +['ĠBe', 'aut'] +['B', 'est'] +['_C', 'OLOR'] +['<', 'label'] +['Ġliqu', 'id'] +['IT', 'S'] +['Ġpro', 'd'] +['Ġoper', 'ate'] +['UI', 'Kit'] +['Ġn', 'atur'] +['arg', 'ument'] +['_d', 'etail'] +['ĠCent', 're'] +['Ġ"', '--'] +['Ġ}}', '"'] +['lo', 'cale'] +['.t', 'v'] +['_se', 'q'] +['Ġup', 'coming'] +['Ch', 'art'] +['ĠDiv', 'ision'] +['Ġclin', 'ical'] +['Com', 'pany'] +['S', 'epar'] +['l', 'as'] +['ĠH', 'un'] +[':', 's'] +['Ġhead', 'ing'] +['оÐ', '³'] +['Ġ"', '");Ċ'] +['[', 'id'] +['b', 'ia'] +['Ġst', 'retch'] +['ic', 'ide'] +['Ġre', 'produ'] +['.pro', 'ject'] +['leg', 'end'] +['end', 'ers'] +['Ġrespons', 'es'] +['Ġon', 't'] +['rit', 'ical'] +['Ġref', 'uge'] +['ĠL', 'i'] +['Ġ:', 'ĊĊ'] +['ĠTh', 'ree'] +['.cont', 'roller'] +['_IN', 'DEX'] +['_F', 'OR'] +['\\Model', 's'] +['j', 'ax'] +['ĉex', 'it'] +['Ġâ', 'ĸ'] +['Ġc', 'overs'] +['ĉ', 'y'] +['-', '.'] +['IND', 'OW'] +['Ġfail', 's'] +['in', 'cludes'] +['Ġf', 'ault'] +['Ġl', 'y'] +['ñ', 'o'] +['.s', 'lice'] +['ILE', 'D'] +['ĠP', 'ur'] +['ĠAs', 'ian'] +['_b', 'atch'] +['.M', 'ax'] +['v', 'l'] +['ĠCOPY', 'RIGHT'] +['Ġg', 'iant'] +['ĠMan', 'ual'] +['ĠC', 'opy'] +['Class', 'Name'] +['He', 'alth'] +['C', 'ursor'] +['IB', 'Outlet'] +['Ġt', 'we'] +['æ', '³'] +['_label', 's'] +['Ġcol', 'lected'] +['Ġfurn', 'iture'] +['Ġdeal', 'ing'] +['Control', 's'] +['ĠHot', 'el'] +['ck', 's'] +['Ġch', 'ose'] +['âĶ', 'Ģ'] +['od', 'd'] +['S', 'R'] +['Ù', 'Ĭ'] +['ì', 'Ħ'] +['Ġacc', 'ord'] +['ĠM', 'ove'] +['ĠM', 'ode'] +['ĠM', 'ock'] +['Ġthread', 's'] +['++', '++'] +['ĠO', 'ptions'] +['Ref', 'resh'] +['ĠD', 'id'] +["']", '->'] +['u', 'cc'] +['_ch', 'annel'] +['.', 'abs'] +['Ġ{', '},Ċ'] +['ĠW', 'al'] +['er', 'ior'] +['Ġmain', 'ly'] +['ĠDr', 'iver'] +['NotFound', 'Exception'] +['Ġcount', 's'] +['e', 'am'] +['Ġ&', '='] +['Q', 'uestion'] +['ĠA', 'li'] +['Ġany', 'more'] +['d', 'etail'] +['t', 'ail'] +['Ġm', 'ile'] +['ĠF', 'air'] +['Ġs', 'orry'] +['Ġsurround', 'ing'] +['Ġad', 'm'] +['De', 'v'] +['Ġmari', 'juana'] +['ĠS', 'ound'] +['ĠA', 'sh'] +['F', 'D'] +['Te', 'am'] +['.', 'port'] +['Ġ[', ']ĊĊ'] +['ub', 'ble'] +['Ġas', 'c'] +['Ġint', 'ention'] +['A', 'cc'] +['ch', 'i'] +['ust', 'ers'] +['Ġins', 'pired'] +['se', 'g'] +['CL', 'U'] +['Ġman', 'ip'] +['M', 'etadata'] +['Con', 'nect'] +['ĠB', 'eh'] +['Ġfind', 'ings'] +['Ġas', 'sembly'] +['w', 'orld'] +['Ġrem', 'ained'] +['Ġu', 'id'] +['(', '.'] +['Ġm', 'x'] +['Lo', 'op'] +['ĊĊĊĊ', 'Ċ'] +['Ġfant', 'astic'] +['wh', 'o'] +['ak', 'i'] +['ĠB', 'asic'] +['ĠY', 'et'] +['ĠUs', 'ers'] +['ik', 'ip'] +['Ġhead', 's'] +['ĠMich', 'igan'] +['_', 'it'] +['ĠTor', 'onto'] +['Ġrec', 'ording'] +['Ġsub', 'mitted'] +['_var', 'iable'] +['medi', 'ate'] +['.graph', 'ics'] +['Ġst', 'ood'] +['Ġre', 'ar'] +['vel', 'ocity'] +['_M', 'ESSAGE'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['ro', 'les'] +['ĠT', 'our'] +['_', 'year'] +['end', 'ment'] +['amp', 's'] +['ĠIre', 'land'] +['m', 'al'] +['Ġyoung', 'er'] +['Ġstrugg', 'le'] +['Ġc', 'able'] +['ĠSD', 'L'] +["('", '-'] +['an', 'es'] +['ĠNe', 'ed'] +['.R', 'ow'] +['P', 'ol'] +['ĠP', 'H'] +['_s', 'cript'] +['ag', 'em'] +['ĠB', 'as'] +['_s', 'pace'] +['.', 'loc'] +[':', 'i'] +['ad', 'r'] +['Ġengine', 'ering'] +['it', 'en'] +[')', '&'] +['Ġu', 'k'] +['ĠL', 'ittle'] +['_C', 'OUNT'] +['x', 'A'] +['Array', 'List'] +['æ', 'į'] +['Ġ"', '")Ċ'] +['An', 'chor'] +['Ġh', 'ang'] +['t', 'witter'] +['Ġcompet', 'itive'] +['.s', 'rc'] +['ãģ', 'Ĺ'] +['Ġtrans', 'late'] +['ĠCre', 'ates'] +['ook', 's'] +['ĠR', 'oll'] +["''", "'Ċ"] +['/', 'sh'] +['s', 'ome'] +['Enc', 'oding'] +['.res', 'olve'] +['Ġdesign', 'er'] +['ĠSt', 'orage'] +['Ġz', 'a'] +['ĠN', 'ever'] +['Ġsomew', 'here'] +['Ġbox', 'es'] +['.s', 'ource'] +['Ġpy', 'game'] +['Ġgrow', 'n'] +['.t', 'w'] +['()', '),Ċ'] +["',", "['"] +['Ġoppon', 'ent'] +['(s', 'rc'] +['.l', 'ayer'] +['AP', 'P'] +['ĠAct', 'iv'] +['Ġguest', 's'] +['ĠVAL', 'UES'] +['};ĊĊ', 'Ċ'] +['.n', 'ative'] +['Ġamount', 's'] +['.', 'RE'] +['Ġcl', 'one'] +['Ġwer', 'en'] +['Ġ"', '<<'] +['_', 'ac'] +['Ġbreak', 'ing'] +['Ġreli', 'able'] +['.P', 'OST'] +['ĠSk', 'y'] +["Ġ'", '&'] +['Ġsaved', 'InstanceState'] +['ast', 'ing'] +['ill', 'ion'] +['com', 'ments'] +['ult', 'y'] +['.m', 'enu'] +['/', 'config'] +['Ġ', 'ĊĊĊ'] +['T', 'ODO'] +['Ġpurch', 'ased'] +['_c', 'or'] +['ĉ', 'auto'] +['Compat', 'Activity'] +['com', 'plete'] +['_', 'graph'] +['is', 'odes'] +['Ġsitu', 'ations'] +['ĠH', 'or'] +['Re', 'ceive'] +['âĢľ', 'We'] +['Ġent', 'ities'] +['.assert', 'Equals'] +['оÐ', 'º'] +['ĠS', 'ans'] +['v', 'ince'] +['rom', 'pt'] +['=', 'Ċ'] +['Ġ/', '.'] +['.Se', 'lect'] +['yl', 'v'] +['Ġb', 'att'] +['A', 'udio'] +['Ġincreasing', 'ly'] +['.B', 'undle'] +['Ġexpl', 'ains'] +['the', 'ast'] +['.', 'offset'] +['Ġh', 'al'] +['Ġtechn', 'ique'] +['_l', 'imit'] +['Ġdraw', 'n'] +['AY', 'ER'] +['Ġfeature', 'd'] +['yy', 'yy'] +['at', 'in'] +['ph', 'en'] +['ach', 'el'] +['!', '\\'] +['l', 'ower'] +['ĠG', 'R'] +['Ġp', 'ag'] +['ĠP', 'arse'] +['Ġt', 'ou'] +['ä¸', 'Ģ'] +['D', 'istance'] +['Index', 'Path'] +['Ġh', 'ell'] +['s', 'im'] +['UT', 'TON'] +['Us', 'age'] +['elen', 'ium'] +['ĠF', 'all'] +['Ġ"', '.$'] +['ĠM', 'u'] +['Ġcr', 'uc'] +['Ġs', 'ont'] +['REF', 'IX'] +['Ġinter', 'ior'] +['ĠO', 'lymp'] +['.Auto', 'Scale'] +['par', 'a'] +['Axis', 'Alignment'] +['Ġr', 'iver'] +['D', 'to'] +['Ġwith', 'draw'] +['Re', 'act'] +['-', 'class'] +['b', 'efore'] +['_', 'alloc'] +['Cont', 'ents'] +['ĠW', 'as'] +['I', 'CT'] +['Ġform', 'ula'] +['Ġindic', 'ates'] +['ĠĠĠĠ', 'ĊĊ'] +['_st', 'ore'] +['it', 'ting'] +['ĠIt', 'alian'] +['_S', 'et'] +['_re', 'port'] +['Ġp', 'id'] +['_V', 'ER'] +['Ġw', 'ins'] +['ĠCl', 'oud'] +['")', '{Ċ'] +['ch', 'ester'] +['Ġden', 'ied'] +['Ġw', 'ird'] +['ĠSte', 'p'] +['Ġinvest', 'ors'] +['b', 'old'] +['_d', 'isplay'] +['ou', 'ver'] +['or', 'er'] +['Res', 'et'] +['Ġsurg', 'ery'] +['Ġstrateg', 'ies'] +['/m', 'aterial'] +['_', 'unit'] +['Ġc', 'ouncil'] +['.P', 'er'] +['ĠâĢ', 'ŀ'] +['Ġre', 'form'] +['F', 'ramework'] +['Ġlist', 'ing'] +['_b', 'tn'] +['Ġb', 'is'] +['%', 'd'] +['eg', 'as'] +['Ġsudden', 'ly'] +['_S', 'ER'] +['Ġa', 'o'] +['_d', 'irectory'] +['f', 'as'] +['Ġprem', 'ium'] +['Ġtrack', 'ing'] +['ĠB', 'L'] +['Ġm', 'ature'] +['Ġbath', 'room'] +["Ġ'/", "'"] +['ĠÄ', 'ij'] +['Per', 'formed'] +['Ġsold', 'iers'] +['arn', 'ings'] +['Ġwalk', 'ed'] +['-', 'con'] +['b', 'ottom'] +['Ġsurpr', 'ising'] +['Ġg', 'ene'] +['Us', 'uario'] +['.DE', 'FAULT'] +['ĠM', 'IT'] +['C', 'ODE'] +['ĠE', 'gypt'] +['p', 'icker'] +['ys', 'ql'] +['AT', 'URE'] +['d', 'etails'] +['ĠCon', 'ference'] +['In', 'formation'] +['ĠM', 'ail'] +['-d', 'own'] +['r', 'aries'] +['b', 'ro'] +['Ġsubject', 's'] +["Ġ'", '*'] +['è¯', '·'] +['or', 'ient'] +[':', '@'] +['ver', 'bose'] +['E', 'F'] +['Ġto', 'ler'] +['eng', 'ers'] +['Ġend', 'point'] +['Ġstr', 'ange'] +['Ġcol', 'on'] +['Ġpre', 'ferred'] +['de', 'p'] +['ĠE', 'V'] +['ARR', 'AY'] +['Ġw', 'he'] +['Ġp', 'up'] +['_n', 'odes'] +['Ġtalk', 'ed'] +['Ġinstit', 'ution'] +['db', 'c'] +['Ġex', 'posed'] +['te', 'en'] +['ĠFr', 'ont'] +['T', 'T'] +['_N', 'ONE'] +['\\/', '\\/'] +['pro', 'gram'] +['Ġencour', 'age'] +['.', '`'] +['sh', 'ire'] +['ĠIsl', 'am'] +['e', 'en'] +['N', 'I'] +["'", '"'] +['.W', 'idth'] +['Ġlik', 'ed'] +['Ġ{', '...'] +['ĠSystem', 's'] +['Ġvot', 're'] +['Ġmanufact', 'uring'] +['Con', 'verter'] +['ĠIn', 'f'] +['ì', 'ļ'] +['D', 'TO'] +['Ġin', 'ches'] +['Ġ', 'à¤'] +['Ã', '¹'] +['ĠChar', 'les'] +['B', 'U'] +['"))', ';ĊĊ'] +['ĠL', 'abor'] +['un', 'n'] +['Ġest', 'im'] +['m', 'obile'] +['ĠL', 'earn'] +['_C', 'ALL'] +['â', 'Ħ'] +['Ġind', 'ices'] +['Ġt', 'ub'] +['ikip', 'edia'] +['C', 'ost'] +['row', 'able'] +['ë', '¡'] +['g', 'age'] +['Ġfunction', 'ality'] +['uzz', 'le'] +['em', 'os'] +['.l', 'ib'] +['Ġd', 'ass'] +['еÐ', 'º'] +['enn', 'a'] +['Ġsh', 'ots'] +['Ġrest', 'ore'] +['/', 'D'] +['For', 'Key'] +['],', '['] +['al', 'ias'] +['l', 'int'] +['.st', 'ream'] +['æ', 'ł'] +['_FORM', 'AT'] +['Ġsil', 'ver'] +['.re', 'pository'] +['Ġlegis', 'l'] +['.B', 'order'] +['_fe', 'atures'] +['Per', 'mission'] +['Ġhous', 'es'] +['ĠW', 'ars'] +['_COM', 'P'] +['Ġinj', 'uries'] +['Ġconstant', 'ly'] +['fl', 'utter'] +['EN', 'U'] +['ĠCon', 'f'] +['Ġrecogn', 'ized'] +['Ġpract', 'ical'] +['Ġde', 'cent'] +['B', 'J'] +[']', ');'] +['ast', 'y'] +['ĠAct', 'ivity'] +['-m', 'ode'] +['Ġsl', 'ide'] +['.IsNullOr', 'Empty'] +['ĠY', 'OU'] +['P', 'ower'] +['ind', 'ices'] +['Ġqual', 'ified'] +['Ġthrow', 'n'] +['h', 'ello'] +['ĠN', 'ick'] +['l', 'ah'] +['as', 'sembly'] +['ĠSm', 'all'] +['old', 'ing'] +['Sh', 'ould'] +['ĠSil', 'ver'] +['(saved', 'InstanceState'] +['Ġtog', 'gle'] +['.N', 'ot'] +['C', 'trl'] +[':', 'nil'] +['ĠCont', 'inue'] +['ĠB', 'oot'] +['æ', 'ī'] +['ĠM', 'ur'] +['d', 'on'] +['ĠF', 'A'] +['S', 'napshot'] +['Ġassoci', 'ation'] +['fo', 'x'] +[',', 'a'] +['az', 'ione'] +[']', ')čĊ'] +['CT', 'YPE'] +['Ġf', 'ade'] +['ĠD', 'ar'] +['.n', 'avigation'] +['Ġl', 'uck'] +['SC', 'RI'] +['ĠDe', 'ad'] +['Ġterm', 'inal'] +['_LE', 'NGTH'] +['Ġeff', 'iciency'] +['Ġun', 'w'] +['Ġn', 'arrow'] +['iment', 'o'] +['(', 'Color'] +['ĠSe', 'a'] +['_', 'area'] +[',', 'A'] +['_', 'opt'] +['ĠHill', 'ary'] +['.t', 'ask'] +['ĠJ', 'ac'] +['ast', 'ed'] +['ĠAd', 'am'] +['ĠIl', 'legal'] +['Ġsearch', 'ing'] +['Instance', 'Of'] +['J', 'ava'] +['ĠForm', 'at'] +['Ġreal', 'ized'] +['ĠChild', 'ren'] +['Ġk', 'il'] +['(f', 'rame'] +['âĢĿ', '.ĊĊ'] +['Ġscen', 'ario'] +['"]', ');Ċ'] +['Ġincred', 'ible'] +['li', 'x'] +['IO', 'Exception'] +['ĠQ', 'uest'] +['il', 'ty'] +['Ġun', 'lock'] +['â', 'Ĥ¬'] +['Ġre', 'ferences'] +['ĠV', 'ert'] +['B', 'inding'] +['eg', 'ative'] +['Ġwr', 'ap'] +['.d', 'atabase'] +['(', 'content'] +['B', 'uf'] +['ĠTr', 'ad'] +['ĠA', 'ud'] +['tr', 'ace'] +['.m', 'ock'] +['Ġther', 'apy'] +['ĉ', 'L'] +['.To', 'Int'] +['ĠKing', 'dom'] +['B', 'us'] +['ha', 'ust'] +['""', '"ĊĊ'] +['(', 'end'] +['.draw', 'able'] +['[', '];Ċ'] +['ĠH', 'ospital'] +['Ġph', 'arm'] +['----', '-'] +['ĠA', 'G'] +['é', 'd'] +['>', '");Ċ'] +['Ġw', 'allet'] +['at', 'able'] +[')', '$'] +['Ġmonth', 'ly'] +['Ġdi', 'agnostic'] +['S', 'ymbol'] +['Ġiter', 'ator'] +['un', 'finished'] +['Ġimm', 'igration'] +['s', 'r'] +['RO', 'W'] +['(g', 'ame'] +['Ġclo', 'thes'] +['ĠU', 'nt'] +['Ġactiv', 'ation'] +['_C', 'on'] +['.h', 'ash'] +['Ġinitial', 'ly'] +['.H', 'ash'] +['Ġcut', 's'] +['f', 'ound'] +['ĠSt', 'ory'] +['ÑĨ', 'и'] +['ac', 'ao'] +['_T', 'YP'] +['pro', 'to'] +['est', 'r'] +['-p', 'age'] +['ah', 'r'] +['Ġincor', 'rect'] +['ĠJose', 'ph'] +['TextBox', 'Column'] +['_st', 'yle'] +['ĠD', 'aniel'] +['s', 'heet'] +['Ġl', 'iv'] +['l', 'ined'] +['Ġr', 'a'] +['R', 'untime'] +['_', 'empty'] +['sl', 'ug'] +['_', 'struct'] +['ë', 'Ĭ'] +['m', 'u'] +['Ġper', 'mitted'] +['Ġreg', 'ional'] +['Ġsob', 're'] +['ĠS', 'uch'] +['Ġ[', '_'] +['Ġro', 'of'] +['.Al', 'ignment'] +['t', 'imes'] +['.m', 'sg'] +['Ġche', 'st'] +['ĠT', 'ab'] +['Ġest', 'a'] +['ä', 'n'] +['Ġsubs', 'cription'] +['(', 'command'] +['s', 'pecial'] +['Ġme', 'al'] +['")', ':Ċ'] +['_', 'ctx'] +['Ġclos', 'ely'] +['et', 'ry'] +['-', 'be'] +['ad', 'el'] +['ĠR', 'am'] +['ig', 'est'] +['ĠSpan', 'ish'] +['Ġcommit', 'ment'] +['Ġw', 'ake'] +['*', '>('] +['P', 'HP'] +['_', '{'] +['ck', 'er'] +['<', 'List'] +['_n', 'ull'] +['ĠRes', 'erved'] +['Ġin', 'her'] +['.Column', 's'] +['.A', 'spNet'] +['_IN', 'VALID'] +['ĠParam', 'eter'] +['Ġex', 'pr'] +['}', '{'] +['Cell', 'Style'] +['Ġval', 'uable'] +['Ġfun', 'ny'] +['In', 'v'] +['Ġst', 'able'] +['*', 't'] +['Ġp', 'ill'] +['pl', 'iers'] +['ĠC', 'SS'] +['ĠCon', 'dition'] +['ĠS', 'peed'] +['ublish', 'er'] +['Ġoff', 'ensive'] +['ce', 'st'] +['ic', 'as'] +['Ġsp', 'ark'] +['ĠPro', 'te'] +['set', 'up'] +['IF', 'Y'] +['ĠT', 'ax'] +['Wh', 'o'] +['F', 'amily'] +['-', 'for'] +['.', 'uk'] +['Ġf', 'asc'] +['sv', 'g'] +['")', ').'] +['Ġbirth', 'day'] +['âĸ', 'Ī'] +['ve', 'h'] +['el', 'led'] +['Ġimport', 's'] +['ĠIsl', 'amic'] +['T', 'A'] +['ĠSt', 'an'] +['we', 'ather'] +['Ġsus', 'pect'] +['e', 'ature'] +['enn', 'es'] +['W', 'M'] +['.m', 'inecraft'] +['av', 'id'] +['è', '½'] +['.se', 'curity'] +['in', 'os'] +['G', 'ood'] +['Ġm', 'arch'] +['Ġposs', 'ess'] +['us', 'uario'] +['Con', 's'] +['am', 'ber'] +['ched', 'uler'] +['Ġhor', 'se'] +['ç', '½'] +['(b', 'ody'] +['ĠTrans', 'form'] +['_de', 'code'] +['.s', 'vg'] +['Ġf', 'oo'] +['Ġd', 'ella'] +['ext', 'ends'] +['am', 'er'] +['Ġprocess', 'ed'] +['ĠH', 'arr'] +['ĠA', 'I'] +['Ġk', 'o'] +['CH', 'AR'] +['(', '%'] +['Ġt', 'ap'] +['({', "'"] +['c', 'roll'] +['D', 'OM'] +['Ġte', 'a'] +['Ġre', 'in'] +['Ġworld', 'wide'] +['_f', 'n'] +['sh', 'a'] +['Ġb', 'ir'] +['ç', 'ões'] +['="#', '">'] +['Ġrepresent', 'ed'] +['ill', 'er'] +['(ex', 'pected'] +['Ġd', 'ance'] +['Ġvisit', 'ors'] +['.con', 'cat'] +['-b', 'it'] +['UR', 'RE'] +['ĠR', 'og'] +['v', 'p'] +['ip', 'h'] +['ĠL', 'LC'] +['it', 'led'] +['iam', 'i'] +['C', 'oll'] +['_re', 'al'] +['_sh', 'ow'] +['_f', 'older'] +['Ġd', 'ar'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġl', 'atter'] +['arch', 'y'] +['Ġb', 'ow'] +['Ġout', 'come'] +['ĠPost', 'ed'] +['Ġris', 'ks'] +['ĠThere', 'fore'] +['Ġowners', 'hip'] +['Ġpar', 'allel'] +['Ġp', 'ending'] +['ge', 'ometry'] +['Ġrecogn', 'ize'] +['ST', 'EM'] +['ĠC', 'P'] +['Ġimm', 'igr'] +['IT', 'LE'] +['ĠĠĠĠ', 'ĉĉ'] +['conn', 'ected'] +['Ġsm', 'ile'] +['(d', 'ocument'] +['\\', 'Component'] +['vert', 'ical'] +['Ġconsum', 'ption'] +['Ġsh', 'oes'] +['.', 'impl'] +['un', 'ks'] +['.', '";Ċ'] +['Ġfood', 's'] +['_', ');Ċ'] +['.assert', 'True'] +['Ġp', 'ipeline'] +['Ġcollection', 's'] +['Ġearn', 'ed'] +['ĠC', 'ert'] +['Ġpartners', 'hip'] +['(', 'action'] +['Ġc', 'd'] +['ĠV', 'ery'] +['Option', 'al'] +['Ġscre', 'ens'] +['Ġtit', 'les'] +['ener', 'ator'] +['Ġab', 'andon'] +['k', 'ind'] +['IL', 'TER'] +['Ġclos', 'ing'] +['lic', 'a'] +['_', 'inter'] +['Ġcamp', 'us'] +['set', 'ting'] +['S', 'prite'] +['ãģ', '¯'] +['_re', 'ply'] +['To', 'List'] +[':', '\\/\\/'] +['ed', 'e'] +['Ġfol', 'ks'] +['Ġbo', 'at'] +['(', 'argv'] +['Ġperman', 'ent'] +['Ġcarry', 'ing'] +['Ġconserv', 'ative'] +['import', 'ant'] +['.', 'img'] +['ĠIm', 'm'] +['Ġdim', 'ensions'] +['al', 'and'] +['s', 'ingle'] +['Ex', 'it'] +['--------', '--'] +['ari', 'ant'] +['tern', 'al'] +['Se', 'conds'] +['ĠIt', 'aly'] +['ot', 'lin'] +['.Res', 'ume'] +["='", '"'] +[')', '=='] +['cept', 'or'] +['Ġs', 'ca'] +['/m', 'ain'] +['Sec', 'urity'] +['_d', 'at'] +['Ġlet', 's'] +['Ġa', 'qu'] +['Ġwhen', 'ever'] +['b', 'erry'] +['Ġact', 'ing'] +['ant', 'i'] +['p', 'd'] +['&', 'gt'] +['æ', 'Ń'] +['Z', 'one'] +['T', 'oday'] +['!', '.'] +['To', 'Props'] +['ab', 'is'] +['it', 'able'] +['Ġg', 'al'] +[']', '{'] +['iz', 'ona'] +['Ġin', 'contri'] +['N', 'ET'] +['///', 'Ċ'] +['[', 'in'] +['_s', 'ave'] +['Ġex', 'em'] +['ĠK', 'enn'] +['Ġev', 'olution'] +['var', 's'] +['_st', 'ats'] +['-', 'only'] +['ĠColor', 'ado'] +['Ġwatch', 'ed'] +['b', 'our'] +['Ġsever', 'e'] +['Ġprofession', 'als'] +['port', 'ion'] +['Ġguar', 'ante'] +['Ð', '³'] +['Ġpush', 'ed'] +['ĠG', 'i'] +['ï', '½'] +['Ġt', 'um'] +['ĠA', 'z'] +['ĠEdge', 'Insets'] +['"))', ';čĊ'] +['is', 'se'] +['.', 'ac'] +['Set', 'ting'] +['Ġapprec', 'iate'] +['ĠValue', 'Error'] +['Ġsur', 've'] +['ĠR', 'ole'] +['.', 'Inter'] +['plot', 'lib'] +['j', 'et'] +['d', 'am'] +['Ġplatform', 's'] +['te', 'le'] +['UT', 'O'] +['ĠInt', 'ernal'] +['+', ':'] +['}', ';čĊ'] +['Gener', 'al'] +['\\', 'Entity'] +['Ġlawy', 'er'] +['qu', 'iv'] +['ĠPost', 's'] +['is', 'o'] +['Ġacc', 'um'] +['ob', 'e'] +['Ġmark', 's'] +['Ġ]', ';ĊĊ'] +['ĉ', 'text'] +['.s', 'uccess'] +['cur', 'r'] +['as', 'a'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġth', 'in'] +['_', 'over'] +['are', 'st'] +['ĠO', 's'] +['(', 'address'] +['Ġvel', 'ocity'] +['Ġ[]', ';ĊĊ'] +['="', '../../'] +['ĠPr', 'iv'] +['b', 'ow'] +['Ġguar', 'antee'] +['%', 'ĊĊ'] +['Ġeval', 'uate'] +['.LE', 'NGTH'] +['Ġin', 'ventory'] +['q', 'a'] +['_de', 'bug'] +['.On', 'ClickListener'] +['Ġl', 'ies'] +['Ġassess', 'ment'] +['dat', 'etime'] +['.background', 'Color'] +['Ġ*/', 'čĊčĊ'] +['ra', 'f'] +['un', 'wrap'] +['ĠF', 'oot'] +['Ġnot', 'ify'] +['Ġlow', 'est'] +['DO', 'CTYPE'] +['Ġl', 'anguages'] +['ex', 'tra'] +['-', 'back'] +['Ġein', 'en'] +['tem', 'plates'] +['_p', 'ass'] +['ĠM', 'ust'] +['Ġest', 'á'] +['_c', 'ore'] +['ĠSc', 'ot'] +['A', 'I'] +['Ġb', 'ias'] +['ations', 'hip'] +['Con', 'stant'] +['Ġprogram', 'ming'] +['In', 's'] +['uspend', 'Layout'] +['ĠPRO', 'VID'] +['ant', 'es'] +['Ġsh', 'irt'] +['in', 'ated'] +['.', 'OK'] +['[', 'a'] +['Ġthink', 's'] +['?', 'ĊĊĊĊ'] +['Ġregard', 'less'] +['ĠMag', 'ic'] +['ul', 'ating'] +['ĉ', 'class'] +['add', 'Group'] +['RE', 'ATE'] +['ĠS', 'U'] +['Ġsim', 'pl'] +['c', 'opyright'] +['Ġb', 'unch'] +['Ġun', 'iverse'] +['ĠE', 'rr'] +['Ġpresent', 'ation'] +['c', 'ategories'] +['Ġatt', 'ach'] +['.s', 'ign'] +['_A', 'C'] +['Ġdisc', 'ipl'] +['Ġregular', 'ly'] +['Ġprim', 'arily'] +['ink', 's'] +['[', '['] +['.r', 'and'] +['.sh', 'ould'] +['ownt', 'own'] +['="', "'"] +['Ġs', 'ans'] +['Ġsupport', 'ers'] +['se', 'quence'] +['G', 'O'] +['.', '.ĊĊ'] +['ĠS', 'pr'] +['Ġcare', 'fully'] +['U', 'IColor'] +['dest', 'roy'] +['Ġtod', 'os'] +['ĠOR', 'DER'] +['ott', 'ed'] +['Ġd', 'ont'] +['aud', 'i'] +['_', 'player'] +['g', 're'] +['ĠO', 'il'] +['<', 'body'] +['_st', 'ack'] +['.P', 'adding'] +['ĠProduct', 's'] +['Ġpriv', 'ile'] +['Ġinj', 'ured'] +['ĠF', 'urther'] +['Ġal', 'ias'] +['.Resume', 'Layout'] +['_LE', 'N'] +['Ġs', 'es'] +["']", ';ĊĊ'] +['cre', 'ens'] +['Ġdirect', 'ed'] +['.S', 'uspendLayout'] +['od', 'ge'] +['.A', 't'] +['mark', 's'] +['ĠUn', 'ivers'] +['ert', 's'] +['ĠE', 'sc'] +['Ġnav', 'bar'] +['Ġutil', 'ity'] +['agnost', 'ics'] +['Ġin', 'ject'] +['ĠD', 'NA'] +['Ġ"', ',"'] +['am', 'ar'] +['Ġe', 'u'] +['Ġrestaur', 'ants'] +['_p', 'ut'] +['ut', 'ers'] +['Tool', 'Strip'] +['t', 'w'] +['ist', 'ro'] +['Ġz', 'oom'] +['Ġleg', 'it'] +['pec', 'ific'] +['ĠC', 'ome'] +['Ġlocal', 'Storage'] +['Ġabs', 'or'] +['.P', 'anel'] +['ĠDesign', 'er'] +['Ġo', 'w'] +['IC', 'AL'] +['_', 'uri'] +['(f', 'ield'] +['Ġsup', 'erv'] +['Ex', 'ists'] +['Ġrespect', 'ively'] +['ĠSt', 'and'] +['Con', 'f'] +['uss', 'ian'] +['Ġar', 'c'] +['Ġ', 'nd'] +['uck', 's'] +['Ġre', 'str'] +['Ġseason', 's'] +['ĠCh', 'apter'] +['ĠSw', 'itch'] +['p', 'ic'] +['Ġh', 'i'] +['load', 'ed'] +['Ġfl', 'uid'] +['-b', 'tn'] +['Ġrun', 'time'] +['.', 'it'] +['B', 'N'] +['Op', 'acity'] +['as', 'ant'] +['ry', 'ption'] +['-n', 'ative'] +['Ġta', 'ught'] +['å', '¯'] +['ag', 'ment'] +['Ġm', 'ul'] +['Reg', 'istry'] +['_', 'grid'] +['ĠBro', 'ok'] +[':', 'Set'] +['Ġm', 'ongoose'] +['AM', 'ES'] +['inner', 'HTML'] +['Ġs', 'oci'] +['ĠInt', 'el'] +['get', 'Id'] +['C', 'md'] +['Ġaccess', 'ible'] +['r', 'ames'] +['le', 'ton'] +['Ġ__', '('] +['ĉ', 'delete'] +['ĠS', 'quare'] +['"', 'ĊĊĊ'] +['Ġbu', 'cket'] +['avor', 'ite'] +['ĠB', 'reak'] +['++', ']'] +['Ġbr', 'ush'] +['Ġt', 'ensor'] +['/', 'http'] +['T', 'ile'] +['Ġfunction', 'al'] +['Ġ"', '*'] +['wh', 'el'] +['Ġt', 'ent'] +['ĠChar', 'acter'] +['Ġse', 'es'] +['.', 'ST'] +['B', 'ig'] +['Ġext', 'ern'] +['Url', 's'] +['))', ')),'] +['ĠJ', 'r'] +['.B', 'uilder'] +['.', ';'] +['n', 'l'] +['_', 'Init'] +['ĠH', 'ER'] +['ż', 'e'] +['mys', 'qli'] +['_', 'icon'] +['v', 'an'] +['Ġfeel', 'ings'] +['Ġle', 'an'] +['Ġhop', 'ing'] +['T', 'V'] +['="čĊ'] +['b', 'est'] +['all', 'as'] +['ent', 'ed'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĊ'] +['_con', 'nection'] +['Ġrep', 'o'] +['en', 'abled'] +['аÐ', 'º'] +['Ġsh', 'a'] +['Ġmembers', 'hip'] +['Status', 'Code'] +['in', 'ating'] +['_s', 'm'] +['_c', 'ustom'] +['_', 'weight'] +['Ġc', 'ss'] +['St', 'at'] +['_', 'env'] +['link', 's'] +['TR', 'L'] +['ĠH', 'it'] +[',', 'r'] +['up', 'id'] +['Ġop', 'ens'] +['Ġg', 'ent'] +['_v', 'is'] +['Ġj', 'oy'] +['<', 'w'] +['_c', 'ost'] +['ĠPy', 'Object'] +['ren', 'ce'] +['ĠGeorg', 'ia'] +['ĠBro', 'ad'] +['m', 'ma'] +['â', 'Ĥ'] +['p', 'f'] +['Ġ"', '\\"'] +['Ġ(', '&'] +['om', 'o'] +['Ġliter', 'ally'] +['Ī', 'ĺ'] +['met', 'ric'] +['Ġb', 'ars'] +['z', 'ed'] +['(w', 'indow'] +['ĠIsrael', 'i'] +['Ġform', 'al'] +['ident', 'ifier'] +['.d', 'ao'] +['ĠDe', 'ath'] +['%', ';Ċ'] +['Ġdecl', 'are'] +['ar', 'ms'] +['RE', 'AM'] +['PERT', 'Y'] +['Ġconsequ', 'ences'] +['to', 'ols'] +['Pe', 'ople'] +['ĠWh', 'ich'] +['>', '();čĊ'] +['.de', 'code'] +['_A', 'CT'] +['Button', 's'] +['.f', 'loat'] +['.F', 'irst'] +['ë', '¥'] +['ĠPol', 'it'] +['ĠX', 'CT'] +['T', 'ags'] +['ĠCG', 'Float'] +['=', 'str'] +['Ġle', 'af'] +['-', 'check'] +['ĠI', 'ss'] +['.s', 'ystem'] +['log', 'out'] +['ach', 't'] +['Ang', 'le'] +['s', 'in'] +['ch', 'art'] +['INT', 'ER'] +['ĠN', 'UM'] +['B', 'asic'] +['.P', 'roperties'] +['ä¸', 'Ń'] +['_', 'change'] +['ĠB', 'razil'] +['Ab', 'stract'] +['Ġ:', '+:'] +['_', 'use'] +['а', 'л'] +['ĠL', 'y'] +['IB', 'UT'] +['Ġout', 'er'] +['Ġ--', '>čĊ'] +['Ġrel', 'ief'] +['l', 'ap'] +['qu', 'er'] +['_p', 'arent'] +['he', 'ap'] +['LO', 'SE'] +['Ġcomb', 'ine'] +['ĠR', 'ose'] +['ow', 'ers'] +['Ġproced', 'ures'] +['ĠS', 'ort'] +['an', 'im'] +['var', 'iant'] +['eh', 'icle'] +['Ġsign', 'ing'] +['Pr', 'imary'] +['c', 'urrency'] +['Ġsex', 'e'] +['o', 'en'] +['th', 'eta'] +['em', 'an'] +['Ġimpress', 'ive'] +["('", '_'] +['ĉ', 'U'] +['ĠText', 'Style'] +['_c', 'nt'] +['Ġs', 'lice'] +["('", ':'] +['Ġunderst', 'ood'] +['H', 'is'] +['Ġinform', 'ed'] +['Ġn', 'ick'] +['(T', 'AG'] +['h', 'd'] +['Ġelection', 's'] +['est', 'ure'] +['ĠS', 'anta'] +['ĠCo', 'ast'] +['.p', 'df'] +['inc', 'iple'] +['.cl', 'one'] +['b', 'orn'] +['ut', 'a'] +['Ġl', 'icensed'] +['C', 'r'] +['Ġb', 'read'] +['ĠH', 'ouston'] +['Ġn', 'od'] +['Ġhop', 'es'] +['ĠCG', 'Rect'] +['Ġgu', 'ilty'] +['.g', 'if'] +['Ġro', 'se'] +['.Com', 'mon'] +['T', 'ip'] +['AN', 'K'] +['ĠF', 'C'] +['D', 'uring'] +['ĠSym', 'fony'] +['Ġdef', 'ensive'] +['k', 'm'] +[')', '>'] +['arch', 'ive'] +['ĠU', 'RI'] +['ycl', 'ing'] +['-', 'o'] +['ĠWe', 'bsite'] +['AM', 'P'] +['ish', 'ment'] +['Ġdo', 'ctors'] +['D', 'irect'] +['AR', 'I'] +['ĠRed', 'irect'] +['ier', 'en'] +['_d', 'ist'] +['y', 'o'] +['ĠPro', 'gress'] +['Ġz', 'um'] +['Ġmem', 'or'] +['ĠE', 'D'] +['Ġj', 'ur'] +['æį', '®'] +['_T', 'ABLE'] +['Ġu', 'uid'] +['Ex', 'pr'] +['.', 'head'] +["('", '%'] +['point', 'er'] +['Ġest', 'imate'] +['ĠG', 'reg'] +['Ġlo', 'ader'] +['Ġi', 'OS'] +['Ġm', 'ens'] +['[', 'y'] +['Ġref', 'used'] +['Ġprec', 'ision'] +['is', 'ch'] +['ĠA', 'CTION'] +['Cl', 'oud'] +['s', 'With'] +['(', 'ret'] +['_ADD', 'R'] +['_con', 'f'] +['(d', 'f'] +['Ġlock', 'ed'] +['Ġr', 'ising'] +['ãĥ»', 'ãĥ»'] +['ĠM', 's'] +['Ġscen', 'es'] +['_EX', 'T'] +['_', 'raw'] +['_', 'the'] +['pe', 'ople'] +['Ġre', 'con'] +['ĠF', 'un'] +['Ġb', 'less'] +['ĠUp', 'dated'] +['ü', 'n'] +['ĠĠĠĠĠĠĠĠĠĠĠĠ', 'čĊ'] +['pe', 'ction'] +['Re', 'lease'] +['.log', 'ger'] +['ĠS', 'Y'] +['Ġcoun', 'sel'] +['ur', 'd'] +['_', 'true'] +['Ġevery', 'body'] +['iv', 'ot'] +['Ġh', 'ence'] +['ĠN', 'AS'] +['Ġoppos', 'ed'] +['unk', 'nown'] +['ĠDES', 'C'] +['ĠCh', 'air'] +['fa', 'iled'] +['ĠIN', 'CLUDING'] +['Ġwrit', 'ers'] +['{', '}Ċ'] +['ÃŃ', 't'] +['_c', 'opy'] +['}', ':'] +['ĠB', 'at'] +['Ġconvert', 'ed'] +['ed', 'ing'] +['pl', 'acement'] +['ĠH', 'ost'] +['S', 'ound'] +['и', 'м'] +['Ġs', 'ought'] +['m', 'id'] +['Ġsal', 'ary'] +['og', 'g'] +['âĦ', '¢'] +['b', 'ul'] +['Ġw', 'ir'] +['valid', 'ator'] +['_ST', 'AT'] +['.st', 'ore'] +['ĠB', 'attle'] +['ı', 'n'] +['Ġ--', '>ĊĊ'] +['Tr', 'ump'] +['d', 'ot'] +['ĠCON', 'T'] +['.f', 'etch'] +['Ġcontin', 'u'] +['w', 'as'] +['Ġfra', 'ud'] +['_t', 'mp'] +['mit', 'ter'] +['.p', 'ictureBox'] +['G', 'A'] +['Ġt', 'ournament'] +['.', 'Input'] +['[', 'r'] +['ex', 'ion'] +['cent', 'age'] +['ĠKore', 'an'] +['und', 'ef'] +['ĠAv', 'ailable'] +['resh', 'ape'] +['Ġk', 'it'] +['ĠStr', 'uct'] +['ĠS', 'UB'] +['An', 'swer'] +['_l', 'ib'] +['.t', 'witter'] +['Ġo', 're'] +['ĠDr', 'agon'] +['.Ex', 't'] +[',', 'k'] +['Ġexplan', 'ation'] +['ref', 's'] +['ĠDr', 'ive'] +['ĠTr', 'aining'] +['.H', 'as'] +['int', 'age'] +['b', 'ig'] +['olog', 'ist'] +['enn', 'is'] +['Ù', 'ĩ'] +['Ġch', 'icken'] +['ĠĠĠĠĠĠĠĠĠĠ', 'Ċ'] +['ç', 'Ľ'] +['ãģ', '§'] +['Ġpe', 'ak'] +['Ġdrink', 'ing'] +['Ġen', 'code'] +['ĠNE', 'W'] +['m', 'alloc'] +['ĉf', 'printf'] +['Ġ=', '================================================================'] +['in', 'cluding'] +['Ġprincip', 'les'] +['ĠM', 'ah'] +['st', 'orage'] +['-', 'key'] +['Ġkey', 'word'] +['%', ';'] +['Ġtr', 'ained'] +['.con', 'trib'] +['Ġk', 'v'] +['__', "':Ċ"] +['ĠB', 'oy'] +['param', 'eter'] +['Ġsu', 'ite'] +['Ġthous', 'and'] +['Ġco', 'ordinate'] +['-g', 'enerated'] +['íķ', 'ĺ'] +['gener', 'ated'] +['Ġad', 'mitted'] +['Ġp', 'ussy'] +['#', 'w'] +['Ġsw', 'im'] +['un', 'ion'] +['N', 'a'] +['ĠRoy', 'al'] +['.ch', 'annel'] +['Up', 'dated'] +['_RO', 'OT'] +['Ġv', 'ital'] +['ra', 'ction'] +['ĠCrush', 'er'] +['Ġpre', 'ced'] +['Ġhor', 'izontal'] +['Blue', 'print'] +['Ġattr', 's'] +['Ġsm', 'oke'] +['Ð', 'Ĵ'] +['.', 'Equals'] +['F', 'B'] +['ĠRes', 'ources'] +['roll', 'ing'] +['Ġpass', 'es'] +['ĠN', 'um'] +['rot', 'ate'] +['et', 'ype'] +['\\', '",'] +['Ġsens', 'itive'] +['Ġt', 'all'] +['?', 'âĢĿĊĊ'] +['Pro', 'xy'] +['i', 'y'] +['_', 'section'] +['âĢĶâĢĶ', 'âĢĶâĢĶ'] +['br', 'id'] +['Ġcirc', 'uit'] +['at', 'an'] +['EN', 'C'] +['Ġdr', 'iven'] +['Ġvot', 'ed'] +['Ġeduc', 'ational'] +['Ġinter', 'action'] +['abet', 'es'] +['Ġt', 'one'] +['ĠInitialize', 'Component'] +['Ġmer', 'ely'] +['Ġì', 'ŀ'] +['co', 'okie'] +['_', 'div'] +['ĠUIL', 'abel'] +['vel', 'y'] +['}', ');čĊ'] +['_', 'ENT'] +['#+', '#+'] +['art', 'icles'] +['ĠSou', 'thern'] +['Ġstrong', 'er'] +['ĠG', 'iven'] +['ĠE', 'ric'] +['ĠI', 'R'] +['ab', 'stract'] +['U', 'nder'] +['n', 'able'] +['Ġincre', 'ment'] +['ov', 'en'] +['Ġco', 'in'] +['_t', 'imer'] +['Ġsuffer', 'ed'] +['ĠF', 'REE'] +["']", '."'] +['ĠQue', 'en'] +['st', 'ats'] +['Ġmeet', 'ings'] +['Ġenter', 'ing'] +['Ġalong', 'side'] +['(s', 'ession'] +['it', 'als'] +['Ġfound', 'ation'] +['ĠC', 'redit'] +['.', 'div'] +['_', 'ALL'] +['pc', 'ion'] +['_st', 'at'] +['ick', 'ing'] +['Default', 's'] +['_s', 'rc'] +['Ġoutput', 's'] +['/', 'B'] +['Ġent', 'hus'] +['-b', 'l'] +['.Fore', 'Color'] +['ĉ', 'temp'] +['F', 'ace'] +['Ġinter', 'act'] +['Ġwe', 'ird'] +['M', 'ount'] +['re', 'll'] +['ud', 'ents'] +['Ġrequire', 'ment'] +['ĠS', 'us'] +['I', 'ER'] +['Ġe', 'lected'] +['re', 'ference'] +['ĠM', 'E'] +['Ġserv', 'ers'] +['.w', 'ait'] +['Ġsnap', 'shot'] +['il', 'ton'] +['Ġtri', 'es'] +['Ġt', 'ipo'] +['.T', 'ime'] +['>', 'w'] +['Ġmount', 'ain'] +['Ġp', 'ounds'] +['Ġ[', '...'] +['ex', 'ists'] +['Ġng', 'On'] +['_M', 'AP'] +['Ġf', 'lying'] +['xi', 'ety'] +['ĉ', 'value'] +['_D', 'B'] +['un', 'o'] +['Ġse', 'ats'] +['T', 'URN'] +['.', 'author'] +['!', ')'] +['or', 'ce'] +['Ġindic', 'ated'] +['.s', 'in'] +['Ġass', 'ignment'] +['im', 'iento'] +['ĠF', 'rame'] +['_g', 'en'] +['in', 'ery'] +['_', ')'] +['m', 'essages'] +['.set', 'tings'] +['ĠMe', 'an'] +['ĠM', 'useum'] +['ir', 'q'] +['att', 'ach'] +['ĠPalest', 'in'] +['_', 'QU'] +['_t', 'ags'] +['Ġcas', 'ual'] +['em', 'en'] +['ASS', 'WORD'] +['$', 's'] +['ĠC', 'irc'] +['оÐ', '¹'] +['et', 'ric'] +['/', 'P'] +['Ġep', 'och'] +['<', 'head'] +['_C', 'MD'] +['Ġg', 'it'] +['Ġpen', 'alty'] +['or', 'ph'] +['_', 'users'] +['ours', 'es'] +['.Date', 'Time'] +['atern', 'ion'] +['_pro', 'ject'] +['Ġsuper', 'ior'] +['ĠD', 'am'] +['ĠSe', 'attle'] +['X', 'Y'] +['>', 'The'] +['ĠA', 'k'] +['Ġgr', 'ass'] +['/*', 'čĊ'] +['(d', 'is'] +['Ġgun', 's'] +['Ġt', 'b'] +['ĠK', 'evin'] +['.', 'args'] +['ĠA', 'h'] +['op', 'ed'] +['(', 'J'] +['column', 's'] +['arg', 'uments'] +['ĠWith', 'Events'] +['_f', 'ull'] +['ĠDef', 'ense'] +['S', 'imple'] +['Ġdeath', 's'] +['Ġext', 'ensive'] +['ĠSt', 'ill'] +['ĠEx', 'pression'] +['ĠAg', 'ency'] +['Ġperform', 'ing'] +['F', 'X'] +['Ġus', 'uario'] +['U', 'AL'] +['S', 'ide'] +['od', 'os'] +['apt', 'op'] +['Ġcred', 'entials'] +['_c', 'ap'] +['at', 'ient'] +['ĠDis', 'ney'] +['Ġa', 'i'] +['Ġch', 'ip'] +['Ġvol', 't'] +['.make', 'Text'] +['%%%%%%%%', '%%%%%%%%'] +['Ġbelie', 'f'] +['_LO', 'C'] +['ĠC', 'ivil'] +['N', 'avigation'] +['Ġreve', 'al'] +['Ġviol', 'ent'] +['ĠF', 'il'] +['Ġc', 'atalog'] +['em', 'ed'] +['sc', 'an'] +['.', 'control'] +['Ġconstit', 'ution'] +['C', 'ountry'] +['Separ', 'ator'] +['_A', 'PP'] +['top', 'ic'] +['uet', 'ooth'] +['M', 'IN'] +['Ġdes', 'criptor'] +['y', 't'] +['ET', 'HER'] +['Ġdistrib', 'ute'] +["'", '}Ċ'] +['.tr', 'im'] +['.L', 'ine'] +['Ġl', 'bl'] +['assert', 'Equals'] +['ĠD', 'et'] +['omb', 'ok'] +['(', 'width'] +['Ġt', 'ort'] +['ĠEXP', 'RESS'] +['ac', 'o'] +['Us', 'ing'] +['ĠBr', 'and'] +['w', 'all'] +['EM', 'ENT'] +['ĠComm', 'unic'] +['<', 'uint'] +['ĠG', 'UI'] +['EG', 'IN'] +['ĠR', 'ange'] +['/', 'i'] +['ĠT', 'aylor'] +['c', 'ost'] +['Ġrespond', 'ed'] +['ĠTh', 'eme'] +['n', 'ce'] +['IS', 'H'] +['Ġfeat', 'uring'] +['Return', 's'] +['ĠK', 'r'] +['Ġ', '.Ċ'] +['Ġn', 'am'] +['_c', 'b'] +['Test', 'ing'] +['Ġ{', '},'] +['y', 'al'] +['.f', 'ield'] +['Ġ/', '='] +['_SH', 'ORT'] +['m', 'ates'] +['Test', 'Case'] +['ain', 'less'] +['Ġeval', 'uation'] +['_', 'ITEM'] +['ĠPac', 'ific'] +['ĉ', 'k'] +['Ġc', 'ant'] +['ĠR', 'os'] +[')', 's'] +['Ġf', 'et'] +['STR', 'ING'] +['ĠDis', 'pose'] +['g', 'al'] +['ĠJ', 'oin'] +['ĠP', 'orn'] +['ĠCath', 'olic'] +['AR', 'GET'] +['cp', 'u'] +['ç', 'łģ'] +['.sc', 'roll'] +['IS', 'ING'] +['ifest', 'yle'] +['anc', 'ement'] +['Ġm', 'erc'] +['ĠB', 'rowser'] +['eter', 'min'] +['Ġover', 'flow'] +['Av', 'ailable'] +['Ġbott', 'le'] +[':', 'UI'] +['ific', 'ial'] +['Ġco', 'ord'] +['clar', 'ation'] +['Ġcon', 'j'] +['G', 'LOBAL'] +['ok', 'u'] +['Ġk', 'wargs'] +['cond', 'itions'] +['ul', 'um'] +['Ġg', 'enu'] +['ĠH', 'ero'] +['å', 'İ'] +['Ġun', 'expected'] +['ĠDAM', 'AGES'] +['Ġk', 'a'] +['ĠC', 'ould'] +['UP', 'PORT'] +['ĠPh', 'otos'] +['Ġconf', 'ident'] +['Ġdet', 'ected'] +['de', 'g'] +['rg', 'b'] +['Ġstrong', 'ly'] +['Ġ}', ';čĊ'] +['Ġ)', ':'] +['Ġle', 'ct'] +['urs', 'ive'] +['RO', 'L'] +['ĠWe', 'ight'] +['Ġent', 'ertainment'] +['Ġ)', ');Ċ'] +['Ġg', 'onna'] +['Ġb', 'b'] +['.d', 'o'] +['G', 'S'] +['Ġmist', 'ake'] +['D', 'L'] +['ĠPROVID', 'ED'] +['ear', 'ning'] +['L', 'imit'] +['iss', 'ions'] +['[', 'v'] +['ä¸', 'į'] +['ir', 'ty'] +['D', 'el'] +['Ġunder', 'lying'] +['pre', 'ne'] +['Ġj', 'aw'] +['ĠD', 'I'] +['pe', 'er'] +['Ġobject', 'ive'] +['Ġde', 'posit'] +['Ġk', 'on'] +['Ġes', 'p'] +['.set', 'Visibility'] +['/', 'login'] +['<', 'typename'] +['Ġfr', 'anch'] +['/', 'e'] +['Par', 'allel'] +['Ġsc', 'ored'] +['ĠH', 'on'] +['ĠV', 'ill'] +['ig', 'a'] +['Ġant', 'icip'] +['_', 'assert'] +['ĠO', 'pt'] +['Ġdescri', 'bes'] +['w', 'an'] +['m', 'ount'] +['Ġmonitor', 'ing'] +['Ġt', 'out'] +['ëĬ', 'Ķ'] +['},', '{'] +['................', '................'] +['=', 'int'] +['Ġc', 'ust'] +['----', '--'] +['Ġatmos', 'phere'] +['P', 'AR'] +['ort', 'e'] +['IS', 'IBLE'] +['ĠI', 'ron'] +['ĠNot', 'ification'] +['.log', 'ging'] +['ĠBO', 'OL'] +['-p', 'oint'] +['Ġaf', 'raid'] +['ent', 'a'] +['Ġtom', 'orrow'] +['@', 'implementation'] +['Ġeng', 'age'] +['ĠAn', 'th'] +['ĠF', 'loor'] +['ĠU', 'l'] +['To', 'ols'] +['Ġb', 'ab'] +['Ġcare', 'ful'] +['ãģ', 'Ħ'] +['Ġcruc', 'ial'] +['Ġcalcul', 'ated'] +['ĠS', 'A'] +['Ġw', 'y'] +['D', 'X'] +['_T', 'AG'] +['ind', 'ed'] +['Ġj', 'et'] +['ĠEngine', 'ering'] +['.M', 'AX'] +['en', 'z'] +['v', 'd'] +['Ġpublic', 'ation'] +['Ġ##', '#'] +['Ġfac', 'ed'] +['ra', 'ham'] +['ĠC', 'apt'] +['As', 'set'] +['ĠCon', 'stants'] +['Ġlo', 'ans'] +['_', 'IP'] +['ĠF', 'ish'] +['Red', 'uc'] +['_m', 'at'] +['Date', 'Format'] +['_m', 'e'] +['[]', '[]'] +['Ġintegr', 'ity'] +['ĠC', 'ourse'] +['lob', 'als'] +['Ġfac', 'ilit'] +['Ġem', 'br'] +['ĠN', 'g'] +['.S', 'ystem'] +['Ġmanufact', 'urers'] +['Ġpro', 'ven'] +['.on', 'Create'] +['Ġal', 'arm'] +['ĠÂ', '§'] +['Ġcomm', 'only'] +['ic', 'os'] +['æĸ', '°'] +['ĠSt', 'ation'] +['}', ').'] +['ĠF', 'ilm'] +['w', 'i'] +['ç', 'ī'] +['Ġeng', 'aged'] +['St', 'ats'] +['Ġgovern', 'ments'] +['Ġafford', 'able'] +['_p', 'roperty'] +['Ġag', 'es'] +["('", '--'] +['Ġf', 'ör'] +['ĠProf', 'essor'] +['Ġhy', 'dro'] +['P', 'ush'] +['Ġorgan', 'ized'] +['Ac', 'cept'] +['é', 'm'] +['_c', 'ell'] +['Ġn', 'b'] +['p', 'b'] +['Art', 'icle'] +['Ġrem', 'oval'] +['Ġauth', 'entication'] +['ĠF', 'R'] +['l', 'ide'] +['Ġple', 'asure'] +['ap', 'ol'] +['Ġpart', 'ition'] +['ĠS', 'ide'] +['Ġcr', 'imes'] +['Ġdem', 'o'] +['hold', 'ers'] +['ĠPak', 'istan'] +['In', 'struction'] +['Ġexpect', 'ations'] +['.sc', 'ene'] +["Ġ'", ')'] +['h', 'es'] +['ino', 'is'] +['_P', 'ro'] +['Ġm', 'olec'] +['and', 'al'] +['_sh', 'ort'] +['Ġdefault', 's'] +['Ġn', 'ations'] +['in', 'en'] +['Ġr', 't'] +['O', 'CK'] +['P', 'acket'] +['S', 'B'] +['ĠSH', 'ALL'] +['_cont', 'ents'] +['ise', 'conds'] +['vert', 'y'] +['á', 't'] +['G', 'uid'] +['n', 'om'] +['Ġcon', 'clusion'] +['.', 'Update'] +['Ġlo', 'vely'] +['Ġem', 'it'] +['b', 'ec'] +['ĉĉĉĉ', 'Ġ'] +['Ġintel', 'lect'] +['Ġb', 'rew'] +['ec', 'ycle'] +['F', 'ire'] +['Ġad', 'mit'] +['Ġar', 'bit'] +['Ġarr', 'ang'] +['ĠM', 'IN'] +['M', 'ail'] +['ĠN', 'ative'] +['C', 'ur'] +['Ġcon', 'vent'] +['.R', 'untime'] +['"', '}Ċ'] +['.R', 'un'] +['Ġprint', 'ed'] +['Ġconven', 'ient'] +['.', 'ar'] +['m', 'ock'] +['ĠAdmin', 'istration'] +['ãģ', '¾'] +['Ġelect', 'ron'] +['fl', 'ate'] +['Ġl', 'ombok'] +['Ġjava', 'fx'] +['n', 'h'] +['Ġsup', 'plies'] +['Ġvisit', 'ing'] +['ah', 'l'] +['Ġpow', 'der'] +['Ġult', 'imate'] +['Ġorient', 'ation'] +['ut', 'as'] +['_s', 'cale'] +['Con', 'firm'] +['ph', 'ones'] +['ĠOper', 'ation'] +['/', 'T'] +['_IN', 'TER'] +['Ġair', 'port'] +['Ġmet', 'rics'] +['Ġphen', 'omen'] +['a', 'udio'] +['Ġm', 'ai'] +['(', 'K'] +['h', 'u'] +['all', 'ing'] +['rodu', 'ction'] +['ĠTrans', 'port'] +['ĠNOT', 'E'] +['æĸ', 'ĩ'] +['Ġfew', 'er'] +['_T', 'IM'] +['ì', '§'] +['к', 'и'] +['A', 'ge'] +['F', 'IN'] +['Ġì', 'Ŀ'] +['ĠAt', 'tribute'] +['group', 's'] +['er', 'k'] +['at', 'to'] +['.', 'define'] +['.AspNet', 'Core'] +['ategor', 'ia'] +['ĠS', 'ir'] +['(', 'form'] +['<', 'User'] +['.', 'round'] +['_d', 'ay'] +['.A', 'll'] +['Servlet', 'Response'] +['.N', 'o'] +['l', 'arge'] +['IG', 'H'] +['qu', 'ent'] +['Ġvir', 'us'] +['Ġret', 'ro'] +['Ġim', 'per'] +['Bit', 'map'] +['Ġv', 'ice'] +['Ġoff', 'ense'] +['ist', 'e'] +['ĠA', 'UTH'] +['Ġê', '°'] +['ToolStrip', 'MenuItem'] +['G', 'u'] +['Ġr', 'ape'] +['ĠDav', 'is'] +['Ġover', 'whel'] +[':', 'flutter'] +['-', 'table'] +['ĠCon', 'structor'] +['Pr', 'ivate'] +['e', 'ven'] +['ch', 'r'] +['Ġap', 'plies'] +['_at', 'tribute'] +['Ġcon', 'tribute'] +['E', 'VER'] +['L', 'ines'] +['ĠAf', 'ghan'] +['Vis', 'itor'] +['ĠS', 'L'] +['se', 'ason'] +['C', 'U'] +['Ġintrodu', 'ction'] +['Ġmat', 'plotlib'] +['Å', 'ij'] +['Ġnewsp', 'aper'] +['âĢĶ', 'and'] +['<', 'tag'] +['Ġin', 'i'] +['Ġd', 'iverse'] +['Ignore', 'Case'] +['ĠU', 'r'] +['Ag', 'ent'] +['Ġb', 'ull'] +['.em', 'it'] +['(', 'Exception'] +['ar', 'Layout'] +['Ġincred', 'ibly'] +['ĠTr', 'ust'] +['={', '('] +['-', 'nav'] +['Ġe', 'quals'] +['Ġl', 'ady'] +['ĠP', 'od'] +['d', 'isc'] +['al', 'am'] +['ĠI', 'V'] +['â', 'Ļ'] +['iv', 'idual'] +['ph', 'i'] +['add', 'ed'] +['Ġdifficult', 'y'] +['Ġcomp', 'act'] +['ĠAction', 'Result'] +['c', 'ers'] +['_class', 'es'] +['Non', 'Null'] +['Ġqu', 'it'] +['Ġp', 'ou'] +['S', 'witch'] +['ir', 's'] +['-', 'test'] +['ĠK', 'ind'] +['ĠCal', 'endar'] +['Ġstream', 'ing'] +['}', "',"] +['S', 'W'] +['Ġst', 'ead'] +['oc', 'a'] +['Ġprov', 'ince'] +['Ġcol', 'span'] +['Ġperson', 'nel'] +['ĠE', 'mployee'] +['Ġprodu', 'cer'] +['Ġevery', 'where'] +['od', 'b'] +['Ð', 'Ł'] +['bs', 'olute'] +['act', 'ivate'] +['Ġgr', 'inding'] +['ĠBuild', 'ing'] +['ĠSand', 'ers'] +['(s', 'c'] +['ĠOff', 'set'] +['////////', '////'] +['}', ';čĊčĊ'] +['({', '"'] +['Ġscan', 'f'] +['ĠY', 'Y'] +['ĉdef', 'er'] +['Ġj', 'ew'] +['Ġrestrict', 'ions'] +['.m', 'p'] +['[', 'l'] +['ä¸', 'ĭ'] +['label', 's'] +['red', 'icate'] +['aw', 'esome'] +['Ġw', 'aves'] +['Ġcon', 'front'] +['Ġmeas', 'ured'] +['Ġdat', 'as'] +['_ex', 'it'] +['ot', 'ton'] +['Ġshould', 'er'] +['ask', 'a'] +['+', '#'] +['ĠĠĠĠĠĠĠĠĊ', 'ĠĠĠĠĠĠĠĠĊ'] +['Ġtro', 'ops'] +['ĠU', 'nd'] +['_c', 'ard'] +['w', 'ich'] +['Ġn', 'ous'] +['Ġ"/', '"'] +['s', 'b'] +['Ġcommunic', 'ations'] +['Ex', 'port'] +['Ġdec', 'ode'] +['th', 's'] +['inter', 'pret'] +['By', 'Name'] +['ĠSp', 'irit'] +['ed', 'ges'] +['O', 'LE'] +['ĠE', 'M'] +['t', 'it'] +['ĠTh', 'rough'] +['Ġb', 'io'] +['ĠP', 'ackage'] +['or', 'ne'] +['Ġ}', '.'] +['`', ';Ċ'] +['Ġok', 'ay'] +['ĠZe', 'aland'] +['ident', 'ity'] +['(n', 'ext'] +['ĠB', 'ang'] +['Lib', 'rary'] +['Ġheav', 'ily'] +['il', 'on'] +['Ġdi', 'pl'] +['Ġrot', 'ate'] +['put', 's'] +[')', "',Ċ"] +['ĠData', 'Table'] +['Ġmay', 'or'] +['.to', 'LowerCase'] +['Ġsome', 'how'] +['ĠNor', 'thern'] +['al', 'c'] +['Ġcap', 'abilities'] +['Ġv', 'ibr'] +['+', 'Ċ'] +['ĠS', 'u'] +['ĠRes', 'et'] +['_m', 'ean'] +['Ġc', 'ig'] +['.cl', 'oud'] +['ĠB', 'and'] +['ĠF', 'actory'] +['ĠAr', 'izona'] +['_', 'io'] +['op', 'her'] +['Ġconsc', 'ious'] +['ĠÃ', '¶'] +['\\', 'Controllers'] +['_s', 'peed'] +['ĠF', 'ac'] +['_C', 'om'] +['ĠB', 'ible'] +['w', 'en'] +['ED', 'IT'] +['Ġun', 'n'] +['ĠSt', 'aff'] +['ĠIn', 'n'] +['Ġmechan', 'ism'] +['ĠM', 'embers'] +['Ġmigration', 'Builder'] +["']", ".'"] +['.get', 'Int'] +['<', 'void'] +['ĉf', 'ree'] +['oid', 's'] +['\\', 'Support'] +['Ġautom', 'atic'] +['Ġch', 'ances'] +['Ð', '¶'] +['Ġcomp', 'licated'] +['[', 'row'] +['ah', 'oo'] +['Ġ}ĊĊ', 'ĊĊ'] +['Model', 's'] +['W', 'in'] +['Ġt', 'ape'] +['ir', 'us'] +['iz', 'on'] +['on', 'omy'] +['("', '_'] +[':', '.'] +['.st', 'ereotype'] +['(', 'env'] +['_re', 'ct'] +['(w', 'ith'] +['Ġassert', 'That'] +['Ġcon', 'straints'] +['put', 'y'] +['E', 'mployee'] +['T', 'D'] +['Ġgu', 'itar'] +['ĠJew', 's'] +['.pro', 'cess'] +['Ġf', 'iction'] +['ĠSh', 'ared'] +['âĶĢ', 'âĶĢ'] +['Ġprop', 'ag'] +['.N', 'et'] +['Ġachie', 'ved'] +['ĉ', 'Q'] +['Ġn', 'urs'] +['Sh', 'ared'] +['_FAIL', 'URE'] +['Ġbeh', 'aviour'] +['Ġcol', 's'] +['ism', 'o'] +['Ġfem', 'in'] +['Ġchalleng', 'ing'] +['Ġpost', 'ing'] +['enc', 'il'] +['Ġcapt', 'ured'] +['ĠD', 'ou'] +['(', 'word'] +['ĠTur', 'key'] +['pan', 'ies'] +['Ġre', 'putation'] +['ORM', 'AL'] +['Ġelig', 'ible'] +['prot', 'ocol'] +['id', 'as'] +['(f', 'rom'] +['Ġfin', 'ance'] +['-', 'per'] +['Ġg', 'otten'] +['H', 'A'] +['d', 'uration'] +['ĠP', 'arent'] +['Ġin', 'vent'] +['Ġre', 'start'] +['ол', 'ÑĮ'] +['r', 'ition'] +['(r', 's'] +['<', 'bool'] +['i', 'ert'] +['Ġmod', 'ification'] +['ĠT', 'X'] +['readcr', 'umb'] +['b', 'ank'] +['$', '/'] +['ĠMill', 'er'] +[']', '),Ċ'] +['.Check', 'ed'] +['Ġsac', 'r'] +['se', 'curity'] +['Ġp', 'ose'] +['ĠBr', 'ad'] +['Ġfit', 'ness'] +['Ġannounc', 'ement'] +['ation', 'Token'] +['Ġserv', 'es'] +['ne', 'ed'] +['Ġge', 'ometry'] +['AR', 'S'] +['æ', 'Ģ'] +['andid', 'ate'] +['Ġs', 'prite'] +['_s', 'plit'] +['We', 'ek'] +['ad', 'ies'] +['>', '(Ċ'] +['?>', '"'] +['Ġ///', 'Ċ'] +['Ġein', 'er'] +['Ġweek', 'ly'] +['ĉlog', 'ger'] +['_p', 'op'] +['_m', 'an'] +['Ġmigr', 'ations'] +['Ġask', 's'] +['Ġb', 's'] +['Ġfall', 's'] +['.W', 'here'] +['-', 'height'] +['_fe', 'ature'] +['.M', 'in'] +['Ġhy', 'per'] +['Ġvol', 'atile'] +['Ġtw', 'enty'] +['Typ', 'ography'] +['Un', 'able'] +['D', 'et'] +[',', 'f'] +['-m', 'od'] +['Ġsett', 'lement'] +['Ġcontract', 's'] +['n', 'ome'] +['B', 'ad'] +['ĠB', 'rian'] +['(user', 'name'] +['!!', '!!'] +['Ġh', 'ack'] +['.F', 'ield'] +['H', 'R'] +['ĠJ', 'ordan'] +['iz', 'a'] +['ĠÂ', 'ł'] +['ĠSh', 'er'] +['.', 'header'] +['(', 'other'] +['ĠD', 'ub'] +['(', 'op'] +['ĠR', 'ound'] +['Ġv', 'ie'] +['Ġap', 'pl'] +['ĉ', 'J'] +['ĠIn', 'sert'] +['ĠL', 'P'] +['reg', 'on'] +['ĠM', 'PI'] +['Ġan', 'chor'] +['ac', 'a'] +['ø', 'r'] +['Ġa', 'de'] +['anch', 'or'] +['que', 'e'] +['ĠTree', 'Node'] +['Ġtarget', 'ed'] +['Ġla', 'id'] +['AB', 'EL'] +['v', 'et'] +['ĠOr', 'igin'] +['A', 'nt'] +['.', "');Ċ"] +['ex', 'pect'] +['ed', 'Reader'] +['ĠM', 'ajor'] +['Ġin', 'ch'] +['Com', 'par'] +['Ġpre', 'view'] +['Ġill', 'ness'] +['ĠCONTR', 'ACT'] +['ĠInd', 'epend'] +['u', 'uid'] +['Ġn', 'ome'] +['Ġt', 'c'] +['ĠA', 'venue'] +['is', 'an'] +['Ġph', 'rase'] +['_m', 'ove'] +['")', '['] +['Ġprov', 'ision'] +['Ġconcent', 'r'] +['_', 'IR'] +['ĠU', 't'] +['()', '+'] +['Ġn', 'as'] +['!', ','] +['ĠRob', 'in'] +['i', 'ations'] +['at', 'itude'] +['Ġp', 'x'] +['ĠWith', 'out'] +['/b', 'ash'] +['ek', 't'] +['re', 'ement'] +['Ob', 'server'] +['ĠReg', 'ion'] +['UBL', 'IC'] +['Ġ{', '//'] +['K', 'N'] +['å', '·'] +['Game', 'Object'] +['å', '¾'] +['enc', 'oding'] +['Ġ**', '*'] +['project', 's'] +['Ġt', 'k'] +['Ġche', 'ese'] +['EM', 'PL'] +['ar', 'o'] +['Ġا', 'ÙĦ'] +['Ġcons', 'ists'] +['ref', 'resh'] +['ure', 'au'] +['ĠSc', 'anner'] +['Ġso', 'il'] +['Ġfl', 'avor'] +['Data', 'Source'] +['Ex', 'ecute'] +['ени', 'е'] +['Ġsh', 'it'] +['åĪ', 'Ĩ'] +['<', 'any'] +['Ġretrie', 've'] +['Ġbelong', 's'] +['.st', 'rip'] +['abs', 'olute'] +['Ġexp', 'anded'] +['bo', 'y'] +['):', '-'] +['Ġresc', 'ue'] +['.J', 'Label'] +['Ġre', 'ly'] +['Ġal', 'ignment'] +['-f', 'amily'] +['Ġre', 'nd'] +['OLUM', 'N'] +['Ġb', 'orrow'] +['Ġqu', 'otes'] +['ĠL', 'ew'] +['Ġsh', 'ower'] +['ĠDE', 'LETE'] +['_lo', 'op'] +['!', '"ĊĊ'] +['ĉ', 're'] +['Ġattempt', 'ed'] +['aver', 'age'] +['ĠP', 'aint'] +['quis', 'ition'] +['ol', 'en'] +['Ġliter', 'ature'] +['ĠRe', 'ference'] +['_TEXT', 'URE'] +['ĠS', 'eg'] +['ĠInd', 'ust'] +['ct', 'ype'] +['D', 'UCT'] +['_H', 'OST'] +['ĠTr', 'ade'] +['Ġpl', 'ugins'] +['Ġbre', 'ast'] +['ul', 'se'] +['Ġcreat', 'ure'] +['ãģ', 'Ļ'] +['ĠW', 'i'] +['Ġsup', 'plied'] +['c', 'oll'] +['!', '("'] +['Ġfuck', 'ing'] +['ĠCh', 'rome'] +['ĠU', 'ri'] +['ĠN', 'ation'] +['Ġvert', 'ices'] +['T', 'HE'] +['ĠOr', 'iginal'] +['on', 'de'] +['Ġsh', 'arp'] +['Ġcook', 'ing'] +['Ġ{', '/*'] +['ĠPs', 'ych'] +['ĠH', 'ollywood'] +['=$', '_'] +['.D', 'ock'] +['Ġg', 'er'] +['Ġb', 'one'] +['_con', 'n'] +['_se', 'c'] +['ys', 'ics'] +['Ġ=', '"'] +['S', 'al'] +['s', 'f'] +['Ġdeep', 'ly'] +['ang', 'les'] +['T', 'erm'] +['b', 'ell'] +['ĠQu', 'ick'] +['ener', 'ation'] +['adio', 'Button'] +['åħ', '¥'] +['}čĊčĊ', 'čĊ'] +['Ġcapt', 'ion'] +['l', 'c'] +['ĠE', 'L'] +[',', '['] +['ĠĠĠĠĠĠ', 'čĊ'] +['ret', 't'] +['(m', 'ethod'] +['ĠFl', 'ash'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['W', 'ISE'] +['.s', 'cale'] +['Ġrough', 'ly'] +['_', 'child'] +['m', 'emory'] +['ay', 'ing'] +['Ġinitial', 'ized'] +['in', 'ator'] +['а', 'ÑĢ'] +['Ġsc', 'alar'] +['ĠH', 'o'] +['ai', 'res'] +['(c', 'olumn'] +['.de', 'stroy'] +['P', 'ACK'] +['Ġh', 'em'] +['ang', 'el'] +['_S', 'UB'] +['.', 'qu'] +['Ġ', '×'] +['DE', 'FAULT'] +['pos', 'itories'] +['ĠL', 'ength'] +['ĠF', 'ast'] +['Ġsign', 'als'] +['Ġ//', '$'] +['ri', 'ers'] +['Ġd', 'ummy'] +['AN', 'Y'] +['Ġperson', 'ality'] +['Ġa', 'gricult'] +['Pl', 'atform'] +['ER', 'O'] +['ĠT', 'ra'] +['Ġen', 'orm'] +['ĉ', 'W'] +['Action', 'Result'] +['Ġa', 'ver'] +['[', 'str'] +["Ġ'", '--'] +['.S', 'printf'] +['Ġdeb', 'ut'] +['Ġ', 'Ñĩ'] +['h', 'ex'] +['_', 'utils'] +['Ġp', 'b'] +['U', 'ITableView'] +['Ġz', 'ur'] +['.', 'encode'] +['Ġv', 'ag'] +['.error', 's'] +['о', 'н'] +['Ġm', 'r'] +['ĠA', 'ward'] +['Ġc', 'pu'] +['Ġpress', 'ed'] +["'", 'est'] +['ĠF', 'estival'] +["'", 'T'] +['Ġa', 'k'] +['res', 'olve'] +['.m', 'e'] +['Ġn', 'ic'] +['Ġgen', 're'] +['Ġat', 'trib'] +['ĠMo', 'on'] +['Ġarr', 'ive'] +['ĠD', 'ating'] +['Ġt', 'm'] +['.Config', 'uration'] +['.', 'red'] +['Ġgl', 'm'] +['Ġst', 'ations'] +['sw', 'itch'] +['Ġt', 'ied'] +['äº', 'º'] +['Ġ/', '>Ċ'] +['Ġsubsequ', 'ent'] +['pos', 'able'] +['-fl', 'uid'] +['Ġth', 'orough'] +['Ġpublic', 'ly'] +['apt', 'ers'] +['ĠWil', 'son'] +['_P', 'RE'] +['y', 'ard'] +['ä', '¼'] +['ĉ', 'in'] +['Ġre', 'vers'] +['Ġbul', 'let'] +['cri', 'bed'] +['nes', 'ota'] +['Ġ($', '_'] +['ann', 'on'] +['c', 'ursor'] +['Ġclo', 'thing'] +['ĠM', 'ulti'] +[':', "',"] +['Ġv', 'ess'] +['ordin', 'ator'] +['Ġein', 'em'] +['C', 'annot'] +['Ġar', 'med'] +['ĉ', 'V'] +['ä¸', 'Ĭ'] +['.F', 'lat'] +['ĠS', 'ep'] +['ĠSub', 'ject'] +['_f', 'ont'] +['Ġcharacter', 'istics'] +['D', 'one'] +['el', 'n'] +['########', '####'] +['PO', 'S'] +['Ġd', 'ensity'] +['ĠPl', 'atform'] +['-', 'items'] +['Ġo', 'vers'] +['Ġpush', 'ing'] +['ç', '¤'] +['.Con', 'nection'] +['_', 'term'] +['Ġinitial', 'ization'] +['________________', '________________'] +['ç', '¬'] +['.d', 'ocument'] +['les', 'h'] +['ĉd', 'ocument'] +['ĠP', 'in'] +['ç', 'a'] +['Ġdefinition', 's'] +['.P', 'ath'] +['_W', 'RITE'] +['Ġ', 'ĉĊ'] +['?', '>ĊĊ'] +['Ġter', 'rible'] +['be', 'an'] +['ick', 'ets'] +['ĠS', 'V'] +['B', 'uy'] +['(t', 'ask'] +['Ġreg', 'ime'] +['g', 'oogle'] +['Ġcr', 'ack'] +['.vis', 'it'] +['N', 'UM'] +['ener', 'gy'] +['Ġstr', 'uck'] +['_s', 'ample'] +['.p', 'ayload'] +['Ġre', 'vis'] +['ĠSc', 'ene'] +['Ġp', 'g'] +['Ġbreak', 'fast'] +['URRE', 'NT'] +['.char', 'At'] +['_ex', 'ception'] +['ĠAnt', 'on'] +['Ġguid', 'elines'] +['Ġex', 'haust'] +['ĠFin', 'ancial'] +['Ġind', 'ent'] +['Ġdes', 'ktop'] +['H', 'idden'] +['F', 'ailure'] +['Ġpr', 'inciple'] +['Ġ', 'iv'] +['Ġse', 'ks'] +['n', 'etwork'] +['Ġnumber', 'Of'] +['ĠAl', 'bert'] +['ĉ', 'long'] +[',', '.'] +['Ġz', 'eros'] +['f', 'ade'] +['ĠT', 'yp'] +['ĠT', 'erm'] +['ĠAr', 'ts'] +['.App', 'lication'] +['Ġbeh', 'alf'] +['æĪ', '·'] +['Ġm', 'ere'] +['(`', '${'] +['Ġaware', 'ness'] +['elp', 'ers'] +['f', 'lix'] +['Ġwe', 'igh'] +['Ġestim', 'ates'] +['.', 'child'] +['/', 'O'] +['ĠBit', 'map'] +['.b', 'ottom'] +['Ġ************************************************************************', '**'] +['Ex', 'pect'] +['ent', 'o'] +['ĠFor', 'um'] +['ver', 'al'] +['Ġj', 'ail'] +['Ġab', 'ilities'] +['ĠH', 'OLD'] +['ĠC', 'it'] +['Ġd', 'ynam'] +['Ġgr', 'ay'] +['ĉĉĉĉĉĉĉĉ', 'ĉĉĉĉĉ'] +['.next', 'Int'] +['ant', 'ly'] +['ĠAR', 'ISING'] +['(', 'private'] +['Ġreject', 'ed'] +['ĠN', 'ic'] +['Ġle', 'ather'] +['=', '{Ċ'] +['aly', 'tics'] +['th', 'etic'] +['.T', 'op'] +['.P', 'age'] +['={', '`'] +['Ġ', ';čĊ'] +['de', 'pth'] +['m', 'ann'] +['W', 'D'] +['ĠS', 'om'] +['.R', 'ight'] +['Ġ)', '}Ċ'] +['Ġtr', 'ait'] +['Ã', 'Ĺ'] +['i', 'ac'] +['Ġr', 'v'] +['S', 'ample'] +['.X', 'ml'] +['opp', 'ed'] +['ĠÑ', 'Ħ'] +['list', 's'] +['Ġt', 'ear'] +['ivers', 'ary'] +['.c', 'ollection'] +['ĠCon', 'stitution'] +['ĠHttp', 'Response'] +['Ġbr', 'ill'] +['ĠP', 'rom'] +['h', 'over'] +['ĠM', 'iami'] +['Ġarg', 'ue'] +['_f', 'loat'] +['Ġ', 'ãĤ'] +['Ġn', 'at'] +['ĠT', 'al'] +['Ġinteg', 'ration'] +['(c', 'ur'] +['Ġrem', 'oving'] +['Ġco', 'eff'] +['ĠTh', 'ough'] +['Ġfore', 'cast'] +['ĠV', 'egas'] +['S', 'ite'] +['Ġtr', 'ab'] +['ĠHen', 'ry'] +['-', 'i'] +['Ġinvol', 'ves'] +['B', 'T'] +['Ġs', 'lo'] +['In', 'voke'] +['Ġl', 'ucky'] +['r', 'at'] +['Ġ?', 'Ċ'] +['Ġhand', 'led'] +['(f', 'd'] +['cont', 'ents'] +['ĠO', 'FF'] +['R', 'F'] +['Ġst', 'y'] +['ĠM', 'otor'] +['ter', 'y'] +['t', 'ax'] +['M', 'AP'] +['ĠMr', 's'] +['Ġph', 'ones'] +['ĠUI', 'View'] +['"))', ');Ċ'] +['(', 'dev'] +['ĠIr', 'ish'] +['Ġw', 's'] +['D', 'I'] +['_OFF', 'SET'] +['ĠEvent', 's'] +['Ġst', 'ages'] +['Ġ}', '//'] +['Ġhab', 'en'] +['ST', 'ANCE'] +['ĠS', 'in'] +['ĠM', 'oney'] +['(t', 'op'] +['Ġappoint', 'ment'] +['VER', 'SION'] +['met', 'adata'] +['_com', 'ment'] +['Ġcolle', 'agues'] +['map', 's'] +['â', 'ĺ'] +['Ċ', 'ĉĊ'] +['(', 'al'] +['_re', 'q'] +['Ġf', 'ut'] +['Ġarchitect', 'ure'] +['ĠWH', 'ETHER'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['_s', 'creen'] +['Ġstyle', 'Urls'] +['Ġmon', 'ster'] +['.', 'up'] +['ph', 'ia'] +['Ġprocess', 'or'] +['ĠT', 'err'] +['=', "',"] +['ĠMan', 'ufact'] +['ĠN', 'T'] +['k', 'el'] +['ib', 'ern'] +['ĉf', 'ile'] +['A', 'li'] +['rient', 'ation'] +['Ġ//', '!'] +['ap', 'ore'] +['ane', 'ous'] +['ĠC', 'reat'] +['f', 'older'] +['Ġh', 'ay'] +['Sup', 'press'] +['(', 'left'] +['Ġe', 'uro'] +['Ġdis', 'claimer'] +['ustr', 'y'] +['sh', 'ips'] +['_f', 'd'] +['ĠF', 'a'] +['_in', 'sert'] +['Ġro', 'l'] +['if', 'ting'] +['ĠCom', 'ments'] +['_b', 'r'] +['Ġloss', 'es'] +['ĠAdd', 'ed'] +['ch', 'arg'] +['Ġп', 'о'] +['_s', 'ystem'] +['ĠS', 'ometimes'] +['ĠSp', 'ain'] +['(g', 'roup'] +['ial', 'is'] +['Ġdoll', 'ar'] +['ĠAr', 'gs'] +['qu', 'ires'] +['ĠT', 'en'] +['.s', 'css'] +['Ġsurv', 'ive'] +['us', 'age'] +['Ġj', 'un'] +['im', 'iter'] +['ï¼ģ', 'ĊĊ'] +['Ġfif', 'th'] +['t', 'oggle'] +['Ġdecl', 'ine'] +['($', '"'] +['(L', 'ong'] +['ing', 'e'] +['Ġpil', 'ot'] +['-l', 'ight'] +['-r', 'adius'] +['Ġpod', 'cast'] +['Ġnatur', 'ally'] +['P', 'ages'] +['ä¸', 'º'] +['ĠDes', 'pite'] +['Ġlight', 'ing'] +['Ġcr', 'ate'] +['ĠB', 'inary'] +['Ġredu', 'cing'] +['Ġe', 'leg'] +['ĠM', 'ouse'] +['ĠTest', 'Bed'] +['Ġbefore', 'Each'] +['_', 'ARRAY'] +['Red', 'irect'] +['Ġf', 'lood'] +['Ġsh', 'ips'] +['Ġelectric', 'ity'] +[')*', '('] +['ê', '¸'] +['ĠV', 'iet'] +['her', 'o'] +['Ġd', 'ia'] +['ĠK', 'ent'] +['he', 'art'] +['Ġthreat', 's'] +['_', 'acc'] +['Ġs', 'ymbols'] +['is', 'chen'] +['_in', 'st'] +['C', 'riterion'] +['ĠT', 'IM'] +['.', 'Height'] +['Ġ', 'âĢĻ'] +['();ĊĊ', 'Ċ'] +['Product', 's'] +['_S', 'P'] +['ĠC', 'y'] +['Ġdepend', 'ent'] +['est', 'e'] +['Ġdat', 'os'] +['d', 'it'] +['аÐ', '²'] +['IGN', 'AL'] +['Ġless', 'on'] +['">', "'"] +['ĠC', 'over'] +['ĠH', 'ope'] +['ĠT', 'imer'] +['Ġd', 'ad'] +['vid', 'ers'] +['ĠPh', 'ot'] +['/', '?'] +['rop', 'y'] +['om', 'ing'] +['as', 'ion'] +['Ġ\\', '('] +['ĠE', 'T'] +['ĠRe', 'ading'] +['Ġep', 'isodes'] +['l', 'm'] +['ech', 'a'] +['Ġne', 'uro'] +['Ġhar', 'mon'] +['Ġlib', 'eral'] +['-', 'ind'] +['D', 'ATA'] +['Ġevery', 'day'] +['Ġdiv', 'ided'] +['ĠActive', 'Record'] +['fig', 'ure'] +['U', 'A'] +['ä', '¹'] +['riend', 'ly'] +['te', 'ch'] +['.game', 'Object'] +['иÑĤ', 'ÑĮ'] +['Ġmo', 'on'] +['ft', 'ime'] +['Ġno', 'ch'] +['ĠT', 'ORT'] +['ĠV', 'M'] +['.in', 'itial'] +['(', 'child'] +['Ġmus', 'ical'] +['Ġo', 'c'] +['b', 'as'] +['ĠH', 'ay'] +['_l', 'ong'] +['Ġmem', 'set'] +['ile', 'y'] +['adel', 'phia'] +['S', 'V'] +['ro', 'at'] +['_t', 'x'] +['Ġl', 'on'] +['ĠngOn', 'Init'] +['b', 'p'] +['ĠGold', 'en'] +['AC', 'HE'] +['Ġwor', 'ried'] +['az', 'i'] +['E', 'ar'] +['T', 'ake'] +['(f', 'p'] +['bur', 'gh'] +['_', 'Data'] +['g', 'res'] +['ĠO', 'nt'] +['p', 'us'] +['Ġtrans', 'parent'] +['Ġp', 'ocket'] +['Ġr', 'am'] +['igr', 'ations'] +['.', 'čĊčĊ'] +['Ġ[', '('] +['Ġadopt', 'ed'] +['Ġreported', 'ly'] +['ĠD', 'ream'] +['Ġ}', '));Ċ'] +['los', 'ing'] +['Ġte', 'eth'] +['ĠBook', 's'] +['",', '&'] +['enn', 'y'] +['LE', 'MENT'] +['Ġg', 'el'] +['ĠPl', 'ant'] +['!', 'âĢĿ'] +['.h', 'ost'] +['ĠRep', 'ly'] +['re', 'ngth'] +['Ġrecogn', 'ition'] +['Ġ}}', '>Ċ'] +['L', 'A'] +['Ġmir', 'ror'] +['Ġassist', 'ant'] +['(', 'device'] +['Ġspirit', 'ual'] +['b', 'uilder'] +['Â', '§'] +['Ġou', 'tr'] +['Ġt', 't'] +['ĠP', 'ER'] +['Ġrad', 'ical'] +['Method', 's'] +['Ġp', 'ace'] +['ud', 'y'] +['Ġg', 'ut'] +['ĠG', 'reek'] +['Ġnon', 'atomic'] +['ĠP', 'aper'] +['_G', 'PIO'] +['Ġob', 'st'] +['.A', 'd'] +['viron', 'ments'] +['ĠS', 'ov'] +['(', 'con'] +['ĠTrans', 'action'] +['.', 'assign'] +['ĉc', 'atch'] +['el', 'ter'] +['Ġbit', 'coin'] +['_G', 'R'] +['ĠčĊ'] +['met', 'ic'] +['Ġtrans', 'formation'] +['åı', '·'] +['Ġr', 'gb'] +['istrib', 'utions'] +['Ġimp', 'licit'] +['/', 'in'] +['dest', 'ination'] +['аÑĤ', 'ÑĮ'] +['Z', 'ero'] +['Ġun', 'set'] +['.', 'where'] +['.g', 'o'] +['Ġform', 'ation'] +['Ġdeclar', 'ation'] +['()', 'čĊčĊ'] +['ĠEx', 'pl'] +['ĉĉĉ', 'ĠĠ'] +['/', 'pro'] +['.J', 'SON'] +['Ġdes', 'k'] +['.sub', 'str'] +['//----------------------------------------------------------------', '------------'] +['ly', 'n'] +['p', 'son'] +['dis', 'able'] +['ĠF', 'unc'] +['ĉ', 'Assert'] +['ĠM', 'ARK'] +['Ġdefe', 'at'] +['Ġbl', 'ind'] +['Ġconst', 'ants'] +['.', 'headers'] +['UIL', 'D'] +['Ġexp', 'enses'] +['P', 'ixel'] +['Ġh', 'r'] +['Ġf', 'el'] +['ĠEast', 'ern'] +['_d', 'el'] +['ĠC', 'ub'] +['Ġs', 'q'] +['ĉc', 'ount'] +['ĠD', 'irectory'] +['Ġex', 'clus'] +['Ġhistor', 'ic'] +['Ġ', '------------------------------------------------'] +['Ġcom', 'position'] +['Ġdata', 'GridView'] +['ĠB', 'urn'] +['ĠB', 'C'] +['M', 'aster'] +['Ġsp', 'awn'] +['Ġbe', 'aring'] +['.Set', 'Active'] +['il', 'o'] +['Ġg', 'allery'] +['Ġfound', 'ed'] +['Ġav', 'ailability'] +['.s', 'qrt'] +['Ġp', 'es'] +['ĠD', 'OM'] +['m', 'ate'] +['O', 'ct'] +['Ġmatch', 'ed'] +['it', 'ivity'] +['Ġan', 'xiety'] +['.pr', 'ice'] +['ĠIn', 'stant'] +['ì', 'Ĭ'] +['Ġt', 'ut'] +['IC', 'ollection'] +['.sh', 'ared'] +['_s', 'ql'] +['t', 'bl'] +['lib', 'rary'] +['_de', 'stroy'] +['erm', 'al'] +['ĠNot', 'es'] +['ĠE', 'in'] +['Ġsou', 'thern'] +['ĠOTHER', 'WISE'] +['Ġmac', 'ro'] +['.l', 'ower'] +['cl', 's'] +['Content', 'View'] +['.l', 'ink'] +['const', 'ant'] +['ĠB', 'es'] +['Ġsome', 'body'] +['n', 'b'] +['">', '{'] +['(', 'local'] +['..', '...'] +['ĠN', 'ull'] +['m', 'x'] +['ĠÃ', '§'] +['Ġp', 'ause'] +['--------', '---'] +['_M', 'O'] +['ĠC', 'M'] +['Ġfor', 'Key'] +['ĠD', 'VD'] +['Ġclose', 'st'] +['_DE', 'VICE'] +['ĠSte', 'phen'] +['ĠB', 'BC'] +['ĠTr', 'avel'] +['P', 'aint'] +['ĠResult', 's'] +['ĠR', 'ule'] +['Ġt', 'p'] +['Ġrat', 'ings'] +['c', 'in'] +['c', 'sv'] +['>', '/'] +['ĠG', 'OP'] +['l', 'ad'] +['Ġ', 'ÑĢ'] +['Ġindex', 'Path'] +['m', 'atrix'] +['=', 'f'] +['ars', 'ed'] +['Ġ}', ');'] +['ĠC', 'os'] +['ĠS', 'core'] +['Ġt', 'ak'] +['ĠE', 'SP'] +['ĠIN', 'C'] +['_N', 'ULL'] +['-f', 'lex'] +['"]', '['] +['int', 'o'] +['el', 'and'] +['Author', 'ization'] +['_F', 'ALSE'] +['Ġg', 'ate'] +['Ġv', 'id'] +['ist', 'ent'] +['T', 'IME'] +['Ġre', 'write'] +['Ġt', 'ie'] +['Ġarch', 'ive'] +['.event', 's'] +['.get', 'Parameter'] +['ĠPer', 'mission'] +['Ġprogram', 'me'] +['Ġ', 'é'] +['j', 'ud'] +['Ġcam', 'eras'] +['(s', 'ys'] +['ĠSy', 'rian'] +['Ġimpro', 'vements'] +['Ġh', 'ip'] +['Ġsu', 'icide'] +['Ġsch', 'olar'] +['Ġcompat', 'ible'] +['rem', 'ote'] +['.d', 'own'] +['F', 'UNCTION'] +['Ġman', 'aging'] +['ĠUI', 'Kit'] +['.', 'raw'] +['>>', '>>'] +['Ġdem', 'ands'] +['ell', 'ite'] +['Ġd', 'ent'] +['ĠM', 'icro'] +['åı', 'ĸ'] +["']", '[$'] +['ĠI', 'E'] +['im', 'ension'] +['Ġt', 'rem'] +['Ġg', 'ained'] +['.w', 'ith'] +['.', 'ok'] +['h', 'ou'] +['Ġb', 'om'] +['amp', 'aign'] +['Ġjoin', 'ing'] +['f', 'ish'] +['Ġadd', 'Subview'] +['Ġnor', 'thern'] +['.c', 'or'] +['ore', 't'] +['D', 'ie'] +['in', 'ish'] +['_com', 'p'] +['Ġatt', 'ended'] +['Ġcoll', 'apse'] +['ĠS', 'S'] +['ac', 'ent'] +['_E', 'QUAL'] +['ĠDe', 'ep'] +['R', 'GB'] +['ĉ', 'test'] +['ol', 'ves'] +['us', 'et'] +['Un', 'ityEngine'] +['w', 'riter'] +['Res', 'olver'] +[',', '%'] +['if', 'ference'] +['_re', 'move'] +['ond', 'a'] +['Ġfem', 'me'] +['de', 'code'] +['Br', 'anch'] +['Ġfl', 'ush'] +['Ġinnov', 'ative'] +['Test', 's'] +["Ġ['", './'] +['Ġcover', 'ing'] +['.', 'admin'] +['ultip', 'art'] +['(l', 'ambda'] +['', 'namespace'] +['ĠS', 'port'] +['Ġ!', '('] +['ac', 'les'] +['Ġde', 'pression'] +['ĠK', 'ong'] +['Ġp', 'ert'] +['ĠCon', 'n'] +['ĠOther', 'wise'] +['/', 'home'] +['s', 'upported'] +['Ġp', 'ink'] +['Ġinv', 'ited'] +['ñ', 'os'] +['_en', 'abled'] +['Ġ-', 'Ċ'] +['F', 'W'] +['en', 'ers'] +['ĠM', 'Y'] +['Ġsuggest', 'ions'] +['Can', 'vas'] +['Ġf', 'er'] +['ĠMarket', 'ing'] +['@', 'Test'] +['unt', 'u'] +['ĠV', 'en'] +['ĠC', 'ou'] +['iv', 'als'] +['Don', 'ald'] +['lim', 'ited'] +['ĉĉĉĉĉĉ', 'Ċ'] +['Ġanal', 'yst'] +['(', 'entry'] +['Ġrepresent', 'ative'] +['_at', 'tributes'] +['Ġf', 'ur'] +['.h', 'ide'] +['res', 'p'] +['ado', 'res'] +['rid', 'es'] +['ĠJ', 'osh'] +['ro', 'bot'] +['ĠN', 'AT'] +['Ġs', 'esso'] +['Ġintegr', 'ated'] +[':', 'true'] +['part', 's'] +['Ġst', 'upid'] +[':', 'event'] +['@end', 'section'] +['Ġp', 'u'] +['.T', 'able'] +['ĠY', 'ii'] +['`', ';ĊĊ'] +['Ġcl', 'ang'] +['="', '">'] +['eng', 'an'] +['_param', 'eters'] +['.int', 'ernal'] +['ĠMod', 'ern'] +['Ġmet', 'ric'] +['Ġsem', 'i'] +['={', '{Ċ'] +['.am', 'azon'] +['ĠB', 'B'] +['aint', 'y'] +['view', 'port'] +['Ġstart', 'Activity'] +['dis', 'patch'] +['****', '*'] +['Ġfl', 'av'] +['iffer', 'ent'] +['[', 'this'] +['Ġst', 'ake'] +['Ġarg', 'ued'] +['vious', 'ly'] +['.w', 'ork'] +['ĠO', 'ak'] +['O', 'ld'] +['(', 'async'] +['not', 'es'] +['Ġfl', 'ip'] +['Ġdis', 'ag'] +['ĠT', 'E'] +['ĉ', 'error'] +['<', "'"] +['Ġ»', 'ĊĊ'] +['Ġfilter', 'ed'] +['ĠM', 'ach'] +['Ġh', 'ung'] +['_d', 'ump'] +['_s', 'amples'] +['-dis', 'miss'] +['Ġr', 'ay'] +['Im', 'plemented'] +['D', 'K'] +['Ġj', 'ed'] +['Ġbreak', 's'] +['Ġf', 'its'] +['.', 'gr'] +['ĠZ', 'ero'] +['or', 'o'] +['Ġequ', 'ally'] +["Ġ'", '['] +['Ġconcern', 'ing'] +['<', 'meta'] +['play', 'ers'] +['_P', 'OS'] +['_s', 'im'] +['J', 'an'] +['Ġyour', 's'] +['ĉ', 'N'] +['Ġsp', 'ir'] +['Ġch', 'ampion'] +['ĠAn', 'alysis'] +['ap', 'a'] +['ĠNS', 'Log'] +['_l', 'ines'] +['ñ', 'a'] +['ĉĉ', 'ĠĠĠĠĠĠĠ'] +['.S', 'c'] +['Re', 'p'] +['etro', 'it'] +['ur', 'able'] +['M', 'IT'] +['com', 'pat'] +['own', 'ed'] +['_ind', 'ices'] +['],', 'čĊ'] +['Ġdis', 'covery'] +['ĠDie', 'go'] +['ob', 'i'] +['.', 'Index'] +['Ġtrend', 's'] +['PL', 'AY'] +['.n', 'o'] +['Ġl', 'ens'] +['_c', 'fg'] +['Ġan', 'no'] +['ag', 'an'] +['Ġperiod', 's'] +['ter', 'ms'] +['y', 'z'] +['Ġattack', 'ed'] +['ib', 'ration'] +['PEC', 'IAL'] +['_', 'grad'] +['Ġaccord', 'ance'] +['.Read', 'Line'] +['.de', 'vice'] +['ri', 'x'] +['.', 'container'] +['m', 'ay'] +['erc', 'ise'] +['ĠL', 'u'] +['Ġr', 'g'] +['ĠÑģ', 'ÑĤ'] +['ĉĉĊ', 'ĉĉĊ'] +['(', 'un'] +['TERN', 'AL'] +['Ġless', 'ons'] +['Ġalleg', 'ations'] +['Ġtrans', 'mission'] +['.Re', 'f'] +['M', 'obile'] +['ĠT', 'ournament'] +['ĠN', 'ut'] +['ĠG', 'a'] +['ĠCap', 'ital'] +['def', 'inition'] +['-', 'exp'] +['c', 'lean'] +['Ġfant', 'asy'] +['Ġenh', 'ance'] +['ent', 'ence'] +["']", ':Ċ'] +['ack', 'ets'] +['Ġcelebr', 'ate'] +['@', '",'] +['Serialize', 'Field'] +['Ġarray', 's'] +['t', 'b'] +['ĉ', 'st'] +['[', 'assembly'] +['(', 'reg'] +['.c', 'ategory'] +['Ġimpro', 'ving'] +['Ġsal', 'ope'] +['Byte', 'Array'] +['Or', 'iginal'] +['Ġ[', '{Ċ'] +['åĽ', 'ŀ'] +['ĠCl', 'in'] +['oen', 'ix'] +['ĠS', 'amsung'] +['Ġmaint', 'ained'] +['Ġag', 'enda'] +['f', 'ail'] +['Ġpres', 'ents'] +['Ġtim', 'ing'] +['.m', 'ark'] +["'", '><'] +['Ġprom', 'ot'] +['Ġin', 'cl'] +['_', 'only'] +['ë¥', '¼'] +['ĠAtt', 'orney'] +['-', 'date'] +['Ġlands', 'cape'] +['Ġf', 'u'] +['S', 'Y'] +['.p', 'rop'] +['ĠA', 'rr'] +['p', 'ag'] +['Parallel', 'Group'] +["':", 'čĊ'] +['Ġlog', 's'] +['a', 'unch'] +['unc', 'i'] +['n', 'ama'] +['Table', 'Cell'] +['iss', 'ues'] +['.', '{'] +['ec', 'urity'] +['_ex', 'ec'] +['old', 's'] +['Ġhost', 's'] +['Ġpro', 'to'] +['_', 'import'] +['_s', 'ort'] +['ĠB', 'ow'] +['ĠN', 'ormal'] +['ĠF', 'arm'] +['.create', 'ParallelGroup'] +['R', 'otation'] +['.', 'err'] +['Ġp', 'leased'] +['it', 'age'] +['.W', 'h'] +['ĉĉ', 'ĠĠĠĠ'] +['M', 'R'] +['ĠM', 'ORE'] +['ĠN', 'atural'] +['_', 'transform'] +['B', 'ASE'] +['ener', 'al'] +['ut', 'down'] +['.common', 's'] +['W', 'T'] +['Ġa', 'an'] +['.', 'Result'] +['d', 'og'] +['Ġclick', 'ing'] +['),', 'ĊĊ'] +['#', 'line'] +['Oper', 'ator'] +['Ġc', 'iv'] +['Ġm', 'erg'] +['ob', 'uf'] +['ng', 'then'] +['Ġ[', '{'] +['Ġcan', 'cell'] +['tr', 'igger'] +['.', ':'] +['W', 'ORK'] +['decl', 'are'] +['Ġdecre', 'ase'] +['ÅĽ', 'ci'] +['lo', 'om'] +['.N', 'one'] +['ĠM', 'I'] +['ĠJ', 'ason'] +['Ġhealth', 'care'] +['iam', 'ond'] +['s', 'ylvania'] +['*', 'x'] +['ĠR', 'a'] +['[', 'b'] +['Ġprint', 'ing'] +['ph', 'abet'] +['ĠLab', 'our'] +['op', 'per'] +['Ġz', 'ijn'] +['-t', 'arget'] +['_F', 'UNCTION'] +['Ġo', 'ct'] +['ени', 'Ñı'] +['åľ', '¨'] +['Ġwest', 'ern'] +['Ġcomput', 'ers'] +['ĠR', 'ET'] +['Hash', 'Map'] +['[', 'String'] +['get', 'Value'] +['_D', 'ATE'] +['.N', 'ext'] +['ĠF', 'if'] +['é', 'l'] +['ick', 'ed'] +['æ', 'İ'] +['-M', 'M'] +['Ġ{', 'ĊĊĊ'] +['Ġcontact', 's'] +['Ġdig', 'its'] +['Pro', 'du'] +['Ġunus', 'ual'] +['Ġrapid', 'ly'] +['t', 'ures'] +['Ġang', 'ry'] +['c', 'ancel'] +['xx', 'xx'] +['_p', 'arser'] +['id', 'ity'] +['_P', 'REFIX'] +['Ġme', 'hr'] +['Ġrare', 'ly'] +['et', 'he'] +['op', 'es'] +['Ġ%', '.'] +['work', 's'] +['Ġthe', 'ta'] +['Ġcontrib', 'ution'] +['ĠT', 'ony'] +['Ġsqu', 'ad'] +['аÐ', '¹'] +['Ġî', 'n'] +['th', 'ere'] +['out', 'ed'] +['ĉ', 'q'] +['Ļ', 'Ĥ'] +['g', 'ood'] +['L', 'I'] +['é¡', 'µ'] +['ĠL', 'iving'] +['iz', 'abeth'] +['Ġk', 't'] +['ĠD', 'allas'] +[']', '],Ċ'] +['Ġ/', '>ĊĊ'] +['Ġrais', 'ing'] +['/r', 'outer'] +['_g', 'ame'] +['ĠC', 'UR'] +['z', 'ens'] +['.', 'es'] +['Ġfont', 'Weight'] +['(f', 'unc'] +['not', 'ification'] +["Ġ'../../", '../'] +['Ġbl', 'ame'] +['ãĢĤ', 'ĊĊĊĊ'] +['an', 'co'] +['Id', 'entity'] +['f', 'ollow'] +['Ġart', 's'] +['x', 's'] +['Ġofficial', 'ly'] +['ĠSt', 'udio'] +['Ġrecommend', 'ations'] +['Ġloc', 'ale'] +['Ġam', 'ateur'] +['ĠEn', 'able'] +['Ġcap', 's'] +['.', 'End'] +['-', 'add'] +['_g', 'shared'] +['ĠC', 'T'] +['For', 'ce'] +['Ċ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĊ'] +['Ġor', 'ange'] +['Ġl', 'p'] +['Ġanswer', 'ed'] +['.G', 'rid'] +['Ġd', 'ual'] +['Ġstrateg', 'ic'] +['Ġnob', 'ody'] +['Ġf', 'atal'] +['_', 'est'] +['(', 'el'] +['Ġì', 'ł'] +['ĠB', 'udd'] +['A', 'IT'] +['_f', 'actor'] +['-', 'one'] +['ĠH', 'AVE'] +['"', 'čĊčĊ'] +['Pro', 'f'] +['Ġä', 'r'] +['str', 'ings'] +['Ġdir', 'ty'] +['ĠF', 'ace'] +['ĠB', 'egin'] +['ĠB', 'us'] +['Ġw', 'is'] +['åŃ', 'Ĺ'] +['Ġspe', 'aker'] +['Ġcar', 'rier'] +['ĠO', 'm'] +['Ġhad', 'n'] +['All', 'ow'] +['::', '__'] +['Ġver', 'b'] +['ĠCom', 'plete'] +['ĠE', 'asy'] +['Ġb', 'ills'] +['ĠĠ', 'ĊĊ'] +['Vert', 'ical'] +['Ġpr', 'on'] +['ĠDef', 'ine'] +['Ġlook', 'up'] +['variable', 's'] +['Ġpand', 'as'] +['um', 'es'] +['Ġinn', 'oc'] +['Ġset', 'Up'] +['ĠCh', 'ampionship'] +['art', 'ist'] +['ĠC', 'Type'] +['F', 'oundation'] +['à¹', 'Ī'] +['ĠSet', 'up'] +['Ġrec', 'ipes'] +['ĠU', 'IColor'] +['ĠF', 'ight'] +['Ġauthor', 'ized'] +['_c', 'lick'] +['_s', 'uccess'] +['ang', 'an'] +['ĠMount', 'ain'] +['ĠDo', 'ctor'] +['Ġeg', 'g'] +['ĠMedic', 'ine'] +['c', 'les'] +['`', '.Ċ'] +['[', 'int'] +['d', 'ashboard'] +['ĠApp', 'ro'] +['-d', 'r'] +['Ġprodu', 'ces'] +['Ġrent', 'al'] +['Ġre', 'load'] +['Ġarr', 'ival'] +['sp', 'ot'] +['Ġund', 'ert'] +['Ġequ', 'ipped'] +['Ġpro', 'ved'] +['Ġcent', 'ers'] +['Ġdef', 'ines'] +['al', 'so'] +['Ġop', 'acity'] +['ĠUn', 'fortunately'] +['ĠIll', 'inois'] +['Ġн', 'е'] +['ĠTem', 'ple'] +['ĠTr', 'ail'] +['ĠK', 'elly'] +['Ġmeasure', 'ment'] +['Ġsepar', 'ated'] +['-c', 'ircle'] +['H', 'ey'] +['ĠRE', 'AD'] +['ig', 'its'] +['Ġ', 'ib'] +['ĠM', 'OD'] +['atter', 'y'] +['аÐ', '·'] +['Ġv', 'end'] +['ен', 'ÑĤ'] +['ĠHttp', 'Client'] +['s', 'afe'] +['_A', 'SS'] +['ic', 'it'] +['ĠCon', 'struct'] +['ĠC', 'lo'] +['ĠS', 'ix'] +['_T', 'OKEN'] +['(b', 'lock'] +['Ġwarn', 'ed'] +['/*', '!'] +['!', 'Ċ'] +['Ġinnov', 'ation'] +['_', '"'] +['Ġ', ');čĊčĊ'] +['Ġsp', 'ots'] +['Ġcho', 'osing'] +['.c', 's'] +['Ġflex', 'ible'] +['U', 'Int'] +['Ġscr', 'atch'] +['-', 'al'] +['Ġf', 'estival'] +['Ġout', 'standing'] +['================================', '================'] +['M', 'ean'] +['ĠO', 'regon'] +['s', 'ymbol'] +['.', 'account'] +['d', 'ney'] +["''", "'"] +['!', '",'] +['Ġpart', 'icle'] +['Ã', 'ĥ'] +['[', 'MAX'] +['IV', 'ER'] +['ER', 'ENCE'] +['NS', 'Mutable'] +['ĠColum', 'bia'] +['_', 'ĊĊ'] +['.f', 'r'] +['Ġc', 'ogn'] +['V', 'R'] +['ĠMethod', 's'] +['ĠM', 'ade'] +['ĠB', 'R'] +['ĠEl', 'se'] +['Ġeg', 'gs'] +['Ġsw', 'ing'] +['ĠIn', 'v'] +['Ġdise', 'ases'] +['Ġf', 'irms'] +['Ġle', 'mma'] +['}`', ');Ċ'] +['l', 'ings'] +['Ġg', 'ym'] +['umin', 'um'] +['.T', 'rim'] +['M', 'em'] +['Ġcritic', 'ism'] +['ibern', 'ate'] +['_T', 'X'] +['ion', 'i'] +['Ġguid', 'ance'] +['Ġrepeated', 'ly'] +['Ġsup', 'plier'] +['Ġpaint', 'ing'] +['.F', 'ragment'] +['ed', 'Exception'] +['Ġw', 'iring'] +['Ġcour', 'ts'] +['W', 'EB'] +['æľ', 'ī'] +['\\', '.'] +['ill', 'ance'] +['Ġb', 'rows'] +['ĠP', 'attern'] +['PL', 'ICATION'] +['ĠSum', 'mer'] +['Ch', 'ain'] +['Ġc', 'ute'] +['mer', 'cial'] +['Ġd', 'il'] +['ĠFrank', 'lin'] +['ĉg', 'lobal'] +['IN', 'CLUDING'] +['h', 'istory'] +['Ġl', 'st'] +['Q', 't'] +['SD', 'L'] +['al', 'ia'] +['i', 'ere'] +['(', '...'] +['ĉc', 'in'] +['iff', 's'] +['vel', 'ope'] +['ĠR', 'oot'] +['cl', 'uster'] +['User', 'Name'] +['ign', 'e'] +['<', 'S'] +['Ġf', 'est'] +['Ġindic', 'ating'] +['ke', 'eper'] +['Ġc', 'ada'] +['é', 'g'] +['cons', 'in'] +['ĠG', 'B'] +['Ġl', 'b'] +['em', 'ony'] +['-icon', 's'] +['_d', 'oc'] +['Act', 'or'] +['e', 'lem'] +['.De', 'lete'] +['Ġin', 'fection'] +['ĠPriv', 'acy'] +['Ġgreat', 'ly'] +['ĠP', 'os'] +['ĠT', 'reat'] +['Fl', 'ow'] +['Ġattract', 'ive'] +['ĠMar', 'c'] +['s', 'udo'] +['tes', 'y'] +['-', 'an'] +['ab', 'ama'] +['ĠW', 'ould'] +['Ġsu', 'ck'] +['index', 'Path'] +['ĠE', 't'] +['T', 'imes'] +['Ġclub', 's'] +['_ass', 'oc'] +['Ġac', 'quired'] +['("', ':'] +['Ġint', 'ense'] +['.m', 'aps'] +['Ex', 'pected'] +['T', 'oggle'] +['Ġa', 'y'] +['Ġl', 'ifestyle'] +['-c', 'alled'] +['ĠS', 'now'] +['V', 'olume'] +['Ġcann', 'abis'] +['ĠD', 'irection'] +['ĠLim', 'ited'] +['-s', 'pecific'] +['Ġd', 'owntown'] +['/', 'icons'] +['Ġre', 'ven'] +['L', 'eg'] +['=', 'null'] +['Key', 'board'] +["')", ').'] +['Ġ""', ';čĊ'] +['Ġatt', 'itude'] +['.n', 'avigate'] +['-', 'error'] +['AM', 'PLE'] +['ĠJ', 'ay'] +['v', 'r'] +['c', 'ow'] +['.com', 'pile'] +['Ġmem', 'ories'] +['_m', 'ark'] +['ĠMin', 'nesota'] +['Ġk', 'osten'] +['Ġprob', 'ability'] +['w', 'arning'] +['Ġgen', 'etic'] +['F', 'ixture'] +['ĠHash', 'Set'] +['N', 'ombre'] +['_m', 'onth'] +['Æ', '°'] +['-', 'start'] +['xy', 'gen'] +['ĉ', 'ft'] +['i', 'agnostics'] +['ĠMat', 'thew'] +['Ġconcept', 's'] +['Ġcon', 'str'] +['.', 'State'] +['и', 'н'] +['N', 'ov'] +['Î', '±'] +['ĠP', 'anel'] +['ä¸', 'ª'] +['com', 'pare'] +['>', '()Ċ'] +['Ġapply', 'ing'] +['Ġprom', 'ised'] +['Ġo', 'x'] +['nc', 'ia'] +['ĠValid', 'ation'] +['ort', 's'] +['_c', 'ur'] +['e', 'lect'] +['ey', 'e'] +['(', 'Data'] +['Ġreport', 'er'] +['ĠB', 'uff'] +['Ġs', 'r'] +['Ġ"', ';'] +['ick', 'y'] +['Ġtemp', 'or'] +['S', 'N'] +['Ġres', 'ident'] +['pi', 'res'] +['ys', 'ical'] +['Ġend', 'orse'] +['ĠS', 'ong'] +['is', 'Empty'] +['le', 'et'] +['_', 'util'] +['Ġdist', 'ingu'] +['ĠT', 'alk'] +['ĠM', 'ot'] +['(', 'default'] +['.A', 'rg'] +['gorith', 'ms'] +['_', 'words'] +['im', 'mer'] +['_res', 'et'] +['f', 'amily'] +['W', 'W'] +['Ġsav', 'ings'] +['ĠâĢ', 'Ŀ'] +['_en', 'able'] +['side', 'bar'] +['Run', 'ning'] +['Ġal', 'i'] +['Ġtest', 'im'] +['Ġwarn', 'ings'] +['ĠCh', 'em'] +['ĠEx', 'it'] +['Ġfound', 'er'] +['pect', 'or'] +['Ġr', 'm'] +['_d', 'ataset'] +['ĠD', 'as'] +['Ġh', 'an'] +['Get', 'ty'] +['á', 'l'] +['Ġn', 'y'] +['Ġpo', 'verty'] +['Ġresult', 'ed'] +['.b', 'y'] +['ĠVis', 'it'] +['Ġobt', 'aining'] +['/', "'.$"] +['ĠĠĠĠĠĠĠĠĠĠĠ', 'Ċ'] +['sh', 'all'] +['_LE', 'FT'] +['UI', 'Image'] +['_', 'Name'] +['h', 'ave'] +['ĠN', 'ob'] +['l', 'r'] +['-', 'footer'] +['Ġn', 'aked'] +['ĠG', 'arden'] +['\\F', 'acades'] +['Ġgrad', 'uate'] +['Ġfranch', 'ise'] +['pl', 'ane'] +['Ġcontrib', 'utions'] +['Ġstring', 'With'] +['Ġc', 'rypto'] +['Ġmov', 'ements'] +['ath', 'ers'] +['Ġlif', 'etime'] +['Ġcommunic', 'ate'] +['j', 'ar'] +['ĠFr', 'agment'] +['_', 'IF'] +['ĠN', 'avy'] +['ĠF', 'igure'] +['Ġsim', 'ulation'] +['_st', 'op'] +['Ġreport', 'ers'] +['Ġvers', 'us'] +['aj', 'a'] +['ĠÎ', '±'] +['Ġgovern', 'or'] +['List', 'Item'] +['Ġse', 'aled'] +['.Back', 'ground'] +['ed', 'i'] +['ash', 'ing'] +['Ġl', 'ip'] +['ĠI', 'h'] +['mer', 'ge'] +['Ġn', 'ec'] +['el', 'ocity'] +['ATE', 'G'] +['Ġse', 'eds'] +['Ġflo', 'ating'] +['_F', 'A'] +['w', 'alk'] +['ĉ', 'user'] +['_de', 'pth'] +['Ġw', 'age'] +['@', 'app'] +['N', 'il'] +['(', '["'] +['(', 'vector'] +['Ġsecret', 'ary'] +['Ġj', 'Panel'] +['ve', 'z'] +['³³', '³³'] +['d', 'irection'] +['ĠE', 'P'] +['Ġh', 'unt'] +['Json', 'Property'] +['ĠP', 'ORT'] +[']', '",'] +['аÐ', '¿'] +['ĠFore', 'ign'] +['pan', 'ic'] +['Ġtri', 'als'] +['ĠA', 'le'] +['Ġr', 'ural'] +['-', 'value'] +['author', 'ized'] +['ĠScot', 'land'] +['.d', 'rop'] +['ĠM', 'T'] +['ç', '±'] +['row', 'th'] +['File', 'Path'] +['Ġrec', 'all'] +['if', 'le'] +['Ġc', 'el'] +['ĠSE', 'LECT'] +['k', 'n'] +['_c', 'ase'] +['Ġc', 'rop'] +['s', 'ure'] +['p', 'ot'] +['IC', 'S'] +['Ġst', 'em'] +['Ġindust', 'ries'] +['P', 'ut'] +['Ġa', 'ber'] +['road', 'cast'] +['Icon', 's'] +[')', '")Ċ'] +['æĪIJ', 'åĬŁ'] +['g', 'ui'] +['Ġassum', 'ed'] +['Ġr', 'x'] +['E', 'A'] +['è', '§'] +['EL', 'L'] +['Ġdo', 'se'] +['Ġin', 'e'] +['Ġde', 'eper'] +['l', 'ider'] +['Ġord', 'inary'] +['Ġg', 'olf'] +['_IM', 'AGE'] +['ĠN', 'AME'] +['(m', 'odule'] +['Ġat', 'om'] +['Ġbel', 't'] +['Ġoff', 'ices'] +['b', 'eta'] +['Ġphilosoph', 'y'] +['(', 'JSON'] +['-f', 'ield'] +['Ġintrodu', 'ce'] +['Ġconven', 'ience'] +['opt', 'im'] +['>', '"Ċ'] +['ath', 'y'] +['Ġemploy', 'er'] +['qu', 'ate'] +['Ġed', 'ited'] +['Arg', 'uments'] +['ĠN', 'ations'] +['__', ')'] +['Ġno', 'se'] +['ĠS', 'ample'] +["'", ')ĊĊĊ'] +['Ġc', 'ake'] +['.get', 'Attribute'] +['H', 'D'] +['Mod', 'ified'] +['Ġpredict', 'ed'] +['Å', 'Ħ'] +['an', 'ie'] +['S', 'orry'] +['(d', 'oc'] +['w', 'ind'] +['ie', 've'] +['Ġprov', 'isions'] +['AT', 'ER'] +['OT', 'E'] +['M', 'Y'] +['.A', 'utowired'] +['ĠB', 'ath'] +['.', 'Boolean'] +['Ġback', 'end'] +['.M', 'ouse'] +['ater', 'al'] +['p', 'aper'] +['Con', 'st'] +['ĠV', 'R'] +['_', 'entity'] +['_C', 'TRL'] +['ĠProte', 'ction'] +['ĠG', 'M'] +['ĠStud', 'y'] +['Ġsou', 'p'] +['ot', 'ime'] +["'", 'use'] +[']', '"'] +['/', 'users'] +['a', 'ug'] +['ĠH', 'ong'] +['_n', 'orm'] +['ãģ', '¨'] +['Ġse', 'cre'] +['(B', 'uild'] +['ĠCon', 'tract'] +['ol', 'as'] +['Ġsa', 'uce'] +['Ġaggress', 'ive'] +['Ġrac', 'ial'] +['char', 'acter'] +['@', '@'] +['Ġcomp', 'ile'] +['ĠV', 'oid'] +['_re', 'm'] +['_m', 'emory'] +['k', 'k'] +['Ġm', 'ic'] +['S', 'ame'] +['U', 'tility'] +['ĠH', 'tml'] +['ĠX', 'ml'] +['Read', 'y'] +['Ġg', 'all'] +['Ġalleged', 'ly'] +['ĉĉĉĉ', 'ĠĠĠ'] +['ĠMet', 'al'] +['ĠPerson', 'al'] +['Ġborder', 'Radius'] +['rx', 'js'] +['object', 's'] +['Ġwant', 'ing'] +['Ġb', 'owl'] +['v', 'endor'] +['offset', 'of'] +['ĠR', 's'] +['ĠR', 'ating'] +['Ġr', 'ally'] +['_N', 'ODE'] +['ĠM', 'ix'] +['Ġadvert', 'is'] +['Ġnarr', 'ative'] +['s', 'al'] +['Ġm', 'c'] +['SE', 'rror'] +['Ġf', 'ingers'] +['Ġaccom', 'pany'] +['Ġt', 'ired'] +['Ġstr', 'ide'] +['Ġgu', 'i'] +['el', 'ist'] +['Loc', 'ale'] +['Ġrele', 'ases'] +['ik', 'ing'] +['Ġan', 'ger'] +['))', ')ĊĊ'] +['alle', 'st'] +['Sum', 'mary'] +['(', 'O'] +['(f', 'or'] +['Ġbasket', 'ball'] +['Ġroad', 's'] +['ĠInst', 'all'] +['ĠF', 'ab'] +['it', 'map'] +['Ġ)', ')Ċ'] +['Ġinter', 'section'] +['ighb', 'or'] +['ĠB', 'ry'] +['ĠHER', 'E'] +['So', 'ftware'] +['elf', 'are'] +['ac', 's'] +['Ġtrail', 'er'] +['.get', 'Class'] +['ch', 'ars'] +['Ġreg', 'ulation'] +['Ġref', 'ers'] +['Ġde', 'struction'] +['Ġcontin', 'uous'] +['ĠAust', 'in'] +['é', '¢'] +['ak', 'an'] +['.w', 'indow'] +['ĠTem', 'plates'] +['Ġabs', 'ence'] +[':', 'n'] +['Ġdis', 'order'] +['fl', 'ash'] +['Ġde', 'let'] +['bo', 'ards'] +['ĠĠ', 'ĉ'] +['RO', 'P'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġac', 'qu'] +['Ġlaws', 'uit'] +['ĠRe', 'views'] +['Ġgar', 'age'] +['t', 'imer'] +['Ġe', 'j'] +['ĠRect', 'angle'] +['Ġflow', 'ers'] +['il', 'st'] +['ĠIn', 'stance'] +['S', 'uper'] +['d', 'et'] +['dis', 'posing'] +['ĠE', 'S'] +['ĠI', 'C'] +['ver', 'e'] +['S', 'k'] +['_ch', 'annels'] +['put', 'ed'] +['/', 'null'] +['nn', 'en'] +['ĠG', 'allery'] +['_g', 'lobal'] +['Auth', 'entication'] +['ĠR', 'ank'] +['Ġblock', 'ed'] +['Ġcal', 'm'] +['mark', 'et'] +['ĉ', 'val'] +['Ġa', 'ug'] +['per', 'iod'] +['ĠCon', 'stant'] +['Ġ?>', '">Ċ'] +['Ġl', 'obby'] +['p', 'al'] +['Ġs', 'ink'] +['ia', 'h'] +['Ð', '¡'] +['urn', 'ame'] +['Ġcon', 'ver'] +['Ġinvestig', 'ate'] +['Ch', 'rist'] +['H', 'ub'] +['ĠIN', 'D'] +['ĠP', 'ed'] +['ur', 'as'] +['ĉ', 'url'] +['ĠT', 'ro'] +['Ġpre', 'ferences'] +['Ġguarante', 'ed'] +['`', 'ĊĊ'] +['Ġport', 'ions'] +['Ġeval', 'u'] +["'", '>', ';ĊĊ'] +['.AutoScale', 'Mode'] +['Ġc', 'ats'] +['Ġreg', 'istry'] +['ul', 'us'] +['F', 'I'] +['p', 'ayload'] +['-', 'search'] +['Ġstay', 'ing'] +['ac', 'ious'] +['Dec', 'oration'] +['Re', 'view'] +['In', 'f'] +['Ke', 'ep'] +['it', 'is'] +[',', 'String'] +['Co', 'ord'] +['Ġper', 'o'] +['S', 'ex'] +['ĠAtl', 'anta'] +['uest', 'a'] +['Arg', 'b'] +['>', '*'] +['}', '_'] +['F', 'ooter'] +['Ġemploy', 'ed'] +['_b', 'ound'] +['v', 'ide'] +['.f', 'unc'] +['$', 'scope'] +['Ġsp', 'o'] +['ĠAn', 'al'] +['ounc', 'ed'] +['ar', 'ound'] +['Ġrestr', 'iction'] +['Ġsh', 'ops'] +['å', 'Ģ'] +['ĠLat', 'in'] +['-c', 'ol'] +['Ġbare', 'ly'] +['ĠE', 'uro'] +['E', 'r'] +['Ġfa', 'ire'] +['_d', 'istance'] +['_un', 'lock'] +['Qu', 'ote'] +['IV', 'ATE'] +['Ġå', 'Ī'] +['Ġaim', 'ed'] +['ĠRet', 'rie'] +['.', 'iter'] +['Ġwr', 'apped'] +['Ġagre', 'ements'] +['str', 'ument'] +['(', 'product'] +['Ġstud', 'ied'] +['.set', 'Value'] +['Ġy', 'e'] +['ĠC', 'ache'] +['MB', 'OL'] +['Ġquarter', 'back'] +['Ġsy', 'ntax'] +['.getElements', 'By'] +['.v', 'ersion'] +['we', 'bsite'] +['Run', 'ner'] +['_s', 'ingle'] +['at', 'iv'] +['ĠAl', 'tern'] +['ĠBeaut', 'iful'] +['right', 'arrow'] +['Ġd', 'iversity'] +['pl', 'ash'] +['(', 'co'] +['.F', 'ill'] +['Ġtyp', 'ing'] +['Ġcl', 'ar'] +['H', 'it'] +['O', 'O'] +['ac', 'co'] +['w', 'orth'] +['Ġscript', 's'] +['ĠMuslim', 's'] +['ĠL', 'L'] +['erv', 'ing'] +['(', 'boolean'] +['Ġbase', 'ball'] +['ĠC', 'AN'] +['MA', 'IL'] +['de', 'pend'] +['Ġrespect', 'ive'] +['Ġconst', 'expr'] +['.*', ';ĊĊ'] +["']", '))Ċ'] +['Ġy', 'ard'] +['Ġident', 'ical'] +['if', 'ecycle'] +['US', 'H'] +['up', 'iter'] +['.', 'validate'] +['cl', 'i'] +['IST', 'ER'] +['Ind', 'icator'] +['F', 'ail'] +['Ġdemocr', 'acy'] +['.', 'var'] +['Ġsatisf', 'ied'] +['------------', '-'] +['enc', 'er'] +['h', 'or'] +['Ġr', 'ounds'] +['DA', 'O'] +['o', 'a'] +['Ġfl', 'ask'] +['=', 'c'] +['[', ']Ċ'] +['/d', 'ist'] +['Ġpart', 'e'] +['Ġconfirm', 'ation'] +['er', 'on'] +['aw', 'are'] +[''] +['Ġdepend', 'encies'] +['ĠV', 'ideos'] +['-', 'row'] +['Ġ**', '/Ċ'] +['Ġn', 'ou'] +['Ġh', 'over'] +['æ', 'ŀ'] +['Ġn', 'in'] +['ĠUS', 'D'] +['M', 'ac'] +['_L', 'oad'] +['Ġout', 'comes'] +['_s', 'ocket'] +['Ġqu', 'eries'] +['w', 'm'] +['Ġhit', 'ting'] +['in', 'ux'] +['M', 'ich'] +['ud', 'ge'] +['AT', 'AB'] +['Ġvulner', 'able'] +['ä', '¾'] +['Ġport', 'folio'] +[':', 'YES'] +['ĉm', 'ap'] +['B', 'ound'] +['Ġiter', 'ation'] +['in', 'cess'] +['Ġact', 'ors'] +['ĠQ', 'ual'] +['_c', 'lean'] +['ãĢij', 'ãĢIJ'] +['MS', 'G'] +['G', 'reen'] +['ĠOff', 'icer'] +['Ġsm', 'oking'] +['>', "',"] +['ĠF', 'lo'] +['++', ';'] +['oly', 'gon'] +['Ġbul', 'k'] +['Ġdr', 'ama'] +['Ġexception', 's'] +['os', 'ed'] +['Ġ+', 'čĊ'] +['Ġleg', 'acy'] +['C', 'V'] +['Ġcontrib', 'uted'] +['ĠTer', 'ms'] +['Ġb', 't'] +['Ġunt', 'uk'] +['Ġal', 'ien'] +['===', 'Ċ'] +['ĉ', 'Vector'] +['Ġl', 's'] +['On', 'line'] +['.f', 'acebook'] +['num', 'eric'] +['ock', 'ets'] +['A', 'ut'] +['b', 'ury'] +['-re', 'dux'] +['ĠRed', 'istributions'] +['GLOBAL', 'S'] +['urrenc', 'ies'] +['Ġt', 'ons'] +['âĢĻ', ','] +['ĠÃ', 'ª'] +['(c', 'ol'] +['ĠS', 'ymbol'] +['Ġstay', 'ed'] +['ĠM', 'L'] +['Ġm', 'unicip'] +['Ġsex', 'o'] +['S', 'en'] +['n', 'r'] +['Ġg', 'ains'] +['Ġshort', 'ly'] +['.M', 'enu'] +['Ã', '½'] +['KN', 'OWN'] +['Ġoper', 'ators'] +['-', 'V'] +['ĠPat', 'rick'] +['/', 'add'] +['_C', 'O'] +['ir', 'ation'] +['(p', 'ost'] +['Post', 's'] +['/', '_'] +['Ġpl', 'ug'] +['Ġintellect', 'ual'] +['Ġmet', 'ab'] +['Ġpregn', 'ancy'] +['ĠPrem', 'ier'] +['n', 'm'] +['Ġpred', 'iction'] +['ĠMin', 'istry'] +['Th', 'ree'] +['val', 'uate'] +['ĠMin', 'i'] +['b', 'u'] +['оÐ', '·'] +['<', 'ul'] +['Ġd', 'd'] +['ol', 'ving'] +['ĠC', 'ut'] +['Ġs', 'chem'] +['.tr', 'ain'] +['it', 'ate'] +['Ġr', 'ice'] +['Ġbird', 's'] +['ãģ', '«'] +['m', 'iddle'] +['struction', 's'] +['Ġn', 'erv'] +['a', 'que'] +['Ġfl', 'u'] +['Ġsurv', 'ival'] +['ĠGal', 'axy'] +['ĠF', 'ant'] +['.', 'Order'] +['At', 'trib'] +['irt', 's'] +['é', 'c'] +['M', 'ovie'] +['Ġcon', 'ce'] +['qu', 'arters'] +['Ġm', 'ood'] +['.Add', 'Range'] +['Ġres', 'olved'] +['ãĥ', 'Ī'] +['Ġburn', 'ing'] +['ĉĉĉĉ', 'čĊ'] +['ĠW', 'E'] +['Ġhost', 'ing'] +['L', 'AB'] +['Ġman', 'agers'] +['Ġstre', 'ngthen'] +['<', 'const'] +['ĠFire', 'base'] +['on', 'ed'] +['ĠJ', 'ean'] +["'", '', '";čĊ'] +['ĠS', 'av'] +['.B', 'old'] +['Ġen', 'ables'] +['ĉt', 'mp'] +['Ġman', 'ually'] +['ĠS', 'qu'] +['user', 'id'] +['.f', 'unction'] +['.c', 'ache'] +['LO', 'PT'] +['.S', 'ervices'] +['dd', 'it'] +['t', 'im'] +['<', 'img'] +['ĠTh', 'ings'] +['ĠEvery', 'thing'] +['Ġa', 'pt'] +['em', 'and'] +['Ġroll', 'ing'] +['ë', '¦'] +['.', 'level'] +['Ġst', 'om'] +['ĠW', 'inter'] +['Ġview', 'ing'] +['(', 'values'] +['ocom', 'plete'] +['v', 'ia'] +['up', 'o'] +['Ġabort', 'ion'] +['i', 'ère'] +['ï¼', 'ij'] +['_B', 'UTTON'] +['_d', 'omain'] +['Ġb', 'ra'] +['ĠA', 'st'] +['in', 'as'] +['Ġstat', 'ist'] +['c', 'od'] +['L', 'R'] +['Ġdr', 'ives'] +['Ġfollow', 'ers'] +['Ġall', 'ies'] +['ĉc', 'urrent'] +['ecess', 'ary'] +['Ġdam', 'aged'] +['_', 'pt'] +['and', 'les'] +['oun', 'tries'] +['Ġsim', 'ult'] +['e', 'u'] +['Ġcontrovers', 'ial'] +['_G', 'ROUP'] +['Ġr', 'ib'] +['.', 'Info'] +[':', 'mm'] +['.n', 'ormal'] +['_ADD', 'RESS'] +['Ġ', 'íķ'] +['add', 'le'] +['ĠD', 'ur'] +['.', 'Element'] +['W', 'arnings'] +['Ġcred', 'its'] +['Ġin', 'hib'] +['Ġem', 'issions'] +['Ġh', 'az'] +['.y', 'outube'] +['ugg', 'ed'] +['Ġbo', 'ther'] +['ĠK', 'ansas'] +['ĠF', 'ixed'] +['ĠTest', 's'] +['ĠF', 'IX'] +['Un', 'iform'] +['Ġk', 'ont'] +['>>', '>'] +['st', 'ation'] +['lo', 're'] +['at', 'ype'] +['ish', 'op'] +['/', '****************************************************************'] +['Com', 'boBox'] +['Ġvac', 'ation'] +['Ġiniti', 'ative'] +['Ġdefault', 'Value'] +['con', 'cat'] +['ĠK', 'h'] +['ĠW', 'elcome'] +['ized', 'Name'] +['M', 'igration'] +['Ġgrad', 'ient'] +['H', 'ot'] +['Ġhard', 'ly'] +['el', 'o'] +['ĠStud', 'ents'] +['Ġlo', 'ose'] +['at', 'z'] +['.S', 'end'] +["'", '/'] +['Ġunivers', 'al'] +['Ġenter', 'prise'] +['Ġreg', 'ex'] +['Ġvis', 'itor'] +['ĠF', 'ly'] +['Se', 'q'] +['à¸', 'Ļ'] +['ĠVis', 'ual'] +['Ġlib', 'raries'] +['ato', 'es'] +['P', 'ayment'] +['Ġp', 'ent'] +['Ġgather', 'ed'] +['VRT', 'X'] +['ĠD', 'M'] +['S', 'plit'] +['Ġlet', 'ting'] +['Ð', 'Ŀ'] +['_error', 's'] +['ep', 'och'] +['P', 'ARAM'] +['c', 'u'] +['ÑģÑĤ', 'в'] +['ol', 'utions'] +['Edit', 'ing'] +['font', 's'] +['Ġalloc', 'ated'] +['ĠB', 'ased'] +['(', 'Y'] +['ĠJud', 'ge'] +['Ġbro', 'thers'] +['FILE', 'S'] +['ç', 'o'] +['w', 'b'] +['_P', 'I'] +["'", '^'] +['Ġs', 'word'] +['.s', 'ervices'] +['Ġn', 'l'] +['T', 'im'] +['ig', 'g'] +['ĠMo', 'ore'] +['Ġcrypt', 'oc'] +['åĩ', 'º'] +['_post', 's'] +['ot', 'ate'] +['?', "'"] +['...', '.ĊĊ'] +['Ġk', 'l'] +['="', '$'] +['Ġdec', 'oration'] +['áº', '¡'] +['ĠD', 'IRECT'] +['G', 'UI'] +[')', '=>{Ċ'] +['Ġnews', 'letter'] +['Ġprec', 'is'] +['(p', 'oint'] +['ĠEqu', 'ipment'] +['ut', 'y'] +['ĠD', 'ave'] +['Ġparticip', 'ation'] +['u', 'arios'] +['x', 'it'] +['.A', 's'] +['ET', 'ER'] +['or', 'ous'] +['Ġsh', 'ield'] +['[]', '>'] +['ilit', 'ary'] +['.', 'origin'] +['Ġprom', 'otion'] +['U', 'nt'] +['Ġc', 't'] +['TR', 'A'] +['View', 'Holder'] +['Ġsig', 'ma'] +['d', 'elta'] +['are', 'house'] +['con', 'tract'] +['(', 'Vector'] +['Ġcompet', 'e'] +['/', 'form'] +['/', 'components'] +['Ġn', 'r'] +['ĠInd', 'ones'] +['Ġо', 'ÑĤ'] +['ĠV', 'olume'] +['.f', 'iles'] +['(res', 'p'] +['/', 'models'] +['Ġsur', 'f'] +['stand', 'ard'] +['/', 'o'] +['ĠXCT', 'Assert'] +['V', 'ICES'] +['.C', 'ode'] +['SE', 'D'] +['Ġact', 'ivate'] +['D', 'elta'] +['Ġlimit', 'ation'] +['ri', 'j'] +['Ġpregn', 'ant'] +[':', '^('] +['Ġs', 'our'] +['p', 'ie'] +['Ġexp', 'ense'] +['ic', 'ation'] +['ĠL', 'arge'] +['ĠÂ', '±'] +['ĠB', 'owl'] +['(model', 's'] +['/', 'N'] +['P', 'a'] +['.re', 'load'] +['Ġwonder', 'ing'] +['Exec', 'ution'] +['ĉ', 'ĠĠĠĠĠĠ'] +['ĠG', 'raphics'] +['ĠCont', 'in'] +['_j', 'ob'] +['Ġget', 'Name'] +['ĠM', 'agn'] +['ĠD', 'WORD'] +['m', 'ad'] +['Ġn', 'h'] +['fe', 'atures'] +['}', '");Ċ'] +['he', 'ets'] +['(tr', 'ain'] +['z', 'n'] +['Ġrecru', 'it'] +['.con', 'nection'] +['Ġbar', 'rel'] +['Ġste', 'am'] +['_set', 'ting'] +['Ġang', 'ular'] +['ane', 'ously'] +['Ġb', 'il'] +['ĠN', 'orm'] +['(!', '$'] +['ib', 't'] +['%', '('] +['Ġpos', 'it'] +['ĠF', 'ather'] +['int', 'endo'] +['L', 'ive'] +['Ġport', 's'] +['Ġme', 'j'] +['Ġland', 'ing'] +['pon', 'der'] +['Ġc', 'od'] +['_HE', 'ADER'] +['.M', 'argin'] +['Ġball', 's'] +['Ġdiscuss', 'ions'] +['Ġbl', 'end'] +['H', 'ex'] +['Ġfarm', 'ers'] +['Ġmaint', 'aining'] +['ĠĠĠ', 'čĊ'] +['s', 'yn'] +['[', 'T'] +['r', 'us'] +['uff', 'ers'] +['Ġcontrib', 'utors'] +['_s', 'ys'] +['.De', 'bug'] +['Ġconstruct', 'ed'] +['om', 'es'] +['?', 'id'] +['sl', 'ider'] +['Ġsup', 'pliers'] +['scri', 'ber'] +['p', 'es'] +['Ð', 'ŀ'] +['":', 'čĊ'] +['\\', 'Controller'] +['))', 'ĊĊĊ'] +['Ġl', 'ua'] +['M', 'ulti'] +['EN', 'S'] +['S', 'rc'] +['Ġpet', 'ition'] +['Ġsl', 'ave'] +['look', 'ing'] +['V', 'ERT'] +['ĉ', 'vector'] +['S', 'pecial'] +['h', 'h'] +['an', 'ne'] +['ĠN', 'iger'] +['/', 'views'] +['z', 'ing'] +['end', 'ant'] +['<', 'C'] +['s', 'peed'] +['Ġ{', '};ĊĊ'] +['Begin', 'Init'] +['Ġf', 'open'] +['@', 'RequestMapping'] +['End', 'Init'] +['Ġp', 'unch'] +['S', 'ender'] +['é', 'Ķ'] +['get', 'Message'] +['/t', 'ypes'] +['.P', 'I'] +["('", "');Ċ"] +['oc', 'used'] +['(', 'all'] +['Ġdrop', 'down'] +[').', '__'] +['ĠV', 'in'] +['.Fore', 'ignKey'] +['can', 'f'] +['ou', 'red'] +['ĠOrgan', 'ization'] +['ĠÐ', '°'] +['ĠC', 'ulture'] +['(cl', 's'] +[',', '_'] +['rg', 'ba'] +['ìĿ', 'ĺ'] +['.data', 'GridView'] +['Ġdo', 'zen'] +['ĠG', 'es'] +['_sh', 'ared'] +['n', 'ick'] +['Ġh', 'osp'] +['om', 'eter'] +['Ġclaim', 'ing'] +['ib', 'les'] +['ri', 'k'] +['æĺ', '¯'] +['en', 'ario'] +['Ġd', 'engan'] +['ob', 'b'] +['m', 'ont'] +['_r', 'ank'] +["('/", "',"] +['Ġap', 'olog'] +['P', 's'] +['_p', 'ower'] +['ĠG', 'ree'] +['Ġful', 'fill'] +['Ġfire', 'base'] +['Ġf', 'are'] +['ĠH', 'im'] +['Ġbe', 'an'] +['â̦', '.'] +['ĠS', 'PI'] +['_R', 'X'] +['Ġper', 'ception'] +['rel', 'ative'] +['comp', 'ile'] +['u', 'um'] +['ut', 'os'] +['a', 'uc'] +['ĠAs', 'k'] +['Ġindic', 'ator'] +['/', 'th'] +['.set', 'String'] +['ĠWis', 'consin'] +['.D', 'omain'] +['Ġart', 'ificial'] +['De', 'velop'] +['ĠSar', 'ah'] +['Ġl', 'ying'] +['(', 'search'] +['ĠEmp', 'ire'] +['urr', 'ing'] +['æĹ¶', 'éĹ´'] +['="', '${'] +['Ġget', 'Id'] +['ĠP', 'ayment'] +['trans', 'ition'] +['Ġ', '].'] +['ix', 'in'] +['V', 'T'] +['-', 'select'] +['Ġdemonstr', 'ated'] +['Ġlast', 'Name'] +['employ', 'ment'] +['.get', 'Property'] +['Ġf', 'ought'] +['file', 'Name'] +['ĠP', 'ers'] +['-c', 'ard'] +['a', 'str'] +['attr', 's'] +['Ġprom', 'inent'] +['Des', 'ign'] +['anc', 'ouver'] +['ãģĹ', 'ãģ'] +['ard', 'o'] +['se', 'cret'] +['Ġr', 'ag'] +['Ġpo', 'ison'] +['-m', 'an'] +[',', 'omitempty'] +['ĉ', 'un'] +['it', 'zer'] +['ĠCas', 'ino'] +['ĠR', 'oss'] +['-', 'foot'] +['(result', 's'] +['Pl', 'an'] +['Ġlas', 'er'] +['ê¸', '°'] +['_D', 'R'] +['F', 'acebook'] +['Ġbo', 'ards'] +['st', 'a'] +[']', '],'] +['Ġt', 'iles'] +['S', 'IZE'] +['Ġ=', '~'] +['Ġprem', 'ier'] +['oc', 'ab'] +['Ġenc', 'oded'] +['Ġres', 'erve'] +['ĠAfghan', 'istan'] +['ĠList', 'Node'] +['url', 's'] +['Ġsub', 'mission'] +['Ġne', 'u'] +['Ġ#', '+#'] +['_P', 'OST'] +['Ġmo', 'ist'] +['ell', 'i'] +['ellig', 'ent'] +['.', 'alert'] +['ó', 'd'] +['b', 're'] +['ĠCol', 'lect'] +['Ġgraph', 'ic'] +['Ġlong', 'itude'] +['ĠPro', 'vid'] +['ĠCal', 'culate'] +['x', 'ffff'] +['c', 'riteria'] +['Ġw', 'aters'] +['ro', 'ck'] +['lo', 'quent'] +['ĠT', 'rib'] +['Ġbur', 'st'] +['Ġsuff', 'ix'] +['.Ext', 'ensions'] +['ish', 'es'] +['iv', 'el'] +['ĠLI', 'KE'] +['ĠGet', 'ty'] +['.Action', 'Event'] +['.s', 'lf'] +['ĠH', 'AL'] +['up', 'al'] +['E', 'AR'] +['ud', 'i'] +['_time', 'out'] +['U', 'F'] +['ĠSing', 'apore'] +['ĠAd', 'vent'] +['_int', 'erval'] +['cha', 'ft'] +['ĠE', 'mer'] +['Ġtele', 'phone'] +['ĠTur', 'k'] +['_', 'interface'] +['ĠO', 'wn'] +['Ġencour', 'aged'] +['<', 'Object'] +['_T', 'ext'] +['ĠOnt', 'ario'] +['ĠApp', 'ly'] +['.f', 'irebase'] +['Ġant', 'ib'] +['P', 'riority'] +['ene', 'z'] +['D', 'ays'] +['c', 'id'] +['urre', 'nce'] +[';', '/'] +['inn', 'ed'] +['Ñģ', 'Ñı'] +['Ġve', 'z'] +['f', 'w'] +['//', '$'] +['att', 'ack'] +['Ġstart', 'up'] +['ain', 'ers'] +['.f', 'ragment'] +['op', 'acity'] +['(', 'conn'] +['he', 'im'] +['.n', 'etwork'] +['(', 'stream'] +['ĠN', 'ON'] +['t', 'ol'] +['ĠX', 'box'] +['ĠD', 'S'] +['Ġc', 'ached'] +['Ġprostit', 'utas'] +['ĠB', 'alt'] +["('", '['] +['Ġno', 'except'] +['"', "'"] +['Ġs', 'd'] +['.', 'valid'] +['_', 'ag'] +['Ġr', 'aces'] +['Ġro', 'd'] +['itud', 'es'] +['<', '>('] +['.Pro', 'duct'] +['Form', 's'] +['NE', 'W'] +['P', 'ay'] +['ĉ', 'boolean'] +['_', 'contact'] +['ĠElect', 'ric'] +['sk', 'ip'] +['Ġw', 'ur'] +['Ġch', 'ronic'] +['_d', 'river'] +['ĠS', 'ab'] +['ĠU', 'lt'] +['ĠR', 'ad'] +['ST', 'ATUS'] +['ĠLew', 'is'] +['O', 'B'] +['Ġgift', 's'] +['.Re', 'c'] +['TR', 'UE'] +['Ġint', 'ensity'] +['Mark', 'er'] +['.com', 'pare'] +['ff', 'ic'] +['C', 'ookie'] +['ĠB', 'aby'] +['ĠBig', 'Decimal'] +['ile', 't'] +['ĠHOLD', 'ERS'] +['ĠL', 'ady'] +['Ġl', 'ung'] +['ĠAl', 'abama'] +['Ġd', 'ess'] +['`', ');Ċ'] +['ĠB', 'uilder'] +['_reg', 'ion'] +['Ġne', 'utral'] +['Bo', 'th'] +['Ġh', 'p'] +['Ġh', 'orn'] +['Ġseg', 'ments'] +['ĠE', 'C'] +['"=>', '"'] +['(', 'rec'] +['ĠP', 'i'] +['G', 'M'] +['Ġl', 'aptop'] +['Sc', 'alar'] +['is', 'd'] +['-d', 'ialog'] +['ĠAnd', 'erson'] +['Ġmist', 'akes'] +['ĠH', 'an'] +['j', 'es'] +['est', 'ination'] +['Ġprom', 'ises'] +['b', 'id'] +['ĠSc', 'ient'] +['G', 'IN'] +['ĠPer', 'formance'] +['b', 'age'] +['.', 'users'] +['le', 'ading'] +['Ġor', 'al'] +['G', 'raphics'] +['_P', 'TR'] +['h', 'ang'] +['Ġin', 'ev'] +['process', 'ing'] +['F', 'actor'] +['ĠN', 'A'] +['$', 'string'] +['Ġground', 's'] +['.Save', 'Changes'] +['c', 'lock'] +['cri', 'pcion'] +['ĠNew', 'ton'] +['g', 'c'] +['.in', 'cludes'] +['Ġbl', 'ast'] +["Ġ'-", "'"] +['Ġpued', 'e'] +['.S', 'ession'] +['Ġgre', 'p'] +['_f', 'inal'] +['ĠG', 'ay'] +['ĠG', 'ive'] +['ir', 'i'] +['-st', 'ar'] +['ĠUI', 'Image'] +['_ep', 'och'] +['ub', 'b'] +['ent', 'h'] +['Ġel', 'ite'] +['Ġcampaign', 's'] +['ĠP', 'orno'] +['_', 'assign'] +['Prot', 'ocol'] +['ĠBe', 'ing'] +['ĠAir', 'port'] +['Ġconvent', 'ional'] +['ĠW', 'at'] +['ĠC', 'I'] +['ET', 'A'] +['ĠAnth', 'ony'] +['Ġtable', 't'] +['(', 'format'] +['Ġconsist', 'ently'] +['ĠI', 'owa'] +['Ġav', 'atar'] +['.c', 'ursor'] +['!', '['] +['Ġh', 'anging'] +['H', 'er'] +['S', 'uch'] +["';ĊĊ", 'Ċ'] +['orge', 'ous'] +['()', '=='] +['Ġview', 'Model'] +['Ġ', 'ãĥ'] +['Ġel', 's'] +['ĠAg', 'ent'] +['F', 'etch'] +['ap', 'or'] +['Ġc', 'x'] +['p', 'read'] +['ĠP', 'ier'] +['oe', 'ff'] +['S', 'n'] +['ĠV', 'irtual'] +['A', 'pr'] +['.Wh', 'ite'] +['_M', 'OD'] +['ĠPoint', 's'] +['å¤', '±'] +['Ġgen', 'es'] +['Ġv', 'endor'] +['Ġmain', 'stream'] +['<', 'src'] +['ĠEl', 'izabeth'] +['Dec', 'oder'] +['-', 'state'] +['ĠG', 'lass'] +['nc', 'y'] +['adi', 'ans'] +['_m', 'on'] +['ĠRem', 'ote'] +['Ġwire', 'less'] +['ĠM', 'i'] +['å', 'ī'] +['è¡', '¨'] +['st', 'age'] +['ĠT', 'ile'] +['ll', 'ib'] +['V', 'ariant'] +['==', 'Ċ'] +['Ġgold', 'en'] +['(Q', 'String'] +['.put', 'Extra'] +['ĠD', 'om'] +['ĠAn', 'imation'] +['Ġinter', 'active'] +['if', 'act'] +['éĻ', '¤'] +['LE', 'T'] +['Ġfrequ', 'ent'] +['Ġ<', '>Ċ'] +['F', 'ilename'] +['Ġs', 'ne'] +['ĠFoot', 'ball'] +['Ġr', 'ival'] +['Ġdis', 'aster'] +['ion', 'ic'] +['ĠD', 'amage'] +['.', 'Resource'] +['-', 'en'] +['ĠT', 'ypes'] +['get', 'String'] +['(', 'board'] +['Ġb', 'ol'] +['pl', 'ain'] +['z', 'ym'] +['à¸', '²'] +['Ġsc', 'anner'] +['ild', 'er'] +['_msg', 's'] +['æ', 'ı'] +['(int', 'ent'] +['Ġde', 'struct'] +['Ġb', 'ust'] +['ĠE', 'mploy'] +['on', 'i'] +['ĠUI', 'ViewController'] +['Ġodd', 's'] +['ear', 'er'] +['Ge', 'ometry'] +['Ġy', 'ii'] +['_EX', 'PORT'] +['ĠAtt', 'ack'] +['Ġn', 'iet'] +['Ġim', 'pression'] +['ĠG', 'il'] +['_pro', 'b'] +['ĠC', 'F'] +['ĠEx', 'perience'] +['/pl', 'ugins'] +['.M', 'ethod'] +['Ġbelie', 'fs'] +['N', 'ative'] +['_b', 'uild'] +['Ġv', 'ig'] +['Ġr', 'anks'] +['cover', 'ed'] +['s', 'uch'] +['G', 'uard'] +['.p', 'ack'] +['add', 'er'] +['iv', 'ia'] +['l', 'ng'] +['Ġв', 'Ñĭ'] +['T', 'imestamp'] +['_n', 'ow'] +['Ġp', 'oker'] +['Ġun', 'c'] +['Ġsh', 'apes'] +['-t', 'ypes'] +['_per', 'iod'] +['p', 'k'] +['Ġveter', 'an'] +['Ġson', 'o'] +['Ġappoint', 'ed'] +['over', 'flow'] +['.d', 'river'] +['_c', 'at'] +['ut', 't'] +['pl', 'ant'] +['im', 'b'] +['ĠAc', 'cept'] +['Ġconc', 'ert'] +['ĉ', 'node'] +['ĉ', 'z'] +['?', '>čĊ'] +['Ġb', 'anned'] +['ĉ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġto', 'xic'] +['Ġdisap', 'pe'] +['È', 'Ľ'] +['Ġgr', 'ace'] +['ate', 'ful'] +['Re', 'ply'] +['ĠCru', 'z'] +['Ġsc', 'rap'] +['Ġkey', 'words'] +['s', 'imp'] +['Ġmort', 'gage'] +['Ġcy', 'ber'] +['ĠEx', 'ecute'] +['Ġlat', 'itude'] +['if', 'u'] +['.C', 'OM'] +['d', 'bo'] +['Ġsort', 's'] +['ĠG', 'as'] +['om', 'ial'] +['.L', 'ocal'] +['Cell', 's'] +['.Re', 'place'] +['String', 's'] +['.f', 'it'] +['ĠTh', 'ird'] +['%', '",Ċ'] +['Ġ{}', '".'] +['ĠS', 'ony'] +['Ġ[', ':'] +['Ġfall', 'en'] +['.', "')Ċ"] +['in', 'h'] +['ĠM', 'C'] +['Ġred', 'is'] +['C', 'odes'] +['Ġprofile', 's'] +['h', 'ook'] +['Reduc', 'er'] +['_F', 'UNC'] +['Ġn', 'avigate'] +['str', 'len'] +['Ġh', 'orm'] +['á', 'ŀ'] +['ĠS', 'R'] +['.', 'boot'] +['Ġdig', 'est'] +['ĉ', 'header'] +['.find', 'One'] +['æ', 'ģ'] +['Db', 'Type'] +['n', 'ia'] +['_m', 'erge'] +['Ġdon', 'ne'] +['/', 'Getty'] +['_CH', 'AR'] +['Ġb', 'ands'] +['.', 'URL'] +['art', 'ial'] +['Ġf', 'req'] +['Ġs', 'ist'] +['N', 'g'] +['Ġrender', 'ing'] +['\\', 'Core'] +['Widget', 's'] +['ĠV', 'A'] +['Ġactiv', 'ists'] +['St', 'e'] +['=', '_'] +['all', 'a'] +['St', 'amp'] +['Ġload', 's'] +['Ġx', 'x'] +['ĠL', 'earning'] +['.M', 'vc'] +['u', 'ir'] +['("', '$'] +['Ġconnect', 'ing'] +['Read', 'Only'] +['ur', 'u'] +['ĠE', 'ag'] +['B', 'IT'] +['_DE', 'L'] +['å', '§'] +['arr', 'ass'] +['ext', 'ernal'] +['ĠY', 'OUR'] +['ĠB', 'rew'] +['ĠF', 'ive'] +['Ġres', 'ize'] +['ig', 'id'] +['er', 'ation'] +['ĠÑ', 'į'] +['åĬ', 'ł'] +['ĠC', 'atch'] +['Ù', 'ģ'] +['ĠLe', 'on'] +['am', 'il'] +['.B', 'ody'] +['Cl', 'ip'] +['/', 'list'] +['.b', 'r'] +['Edit', 'Text'] +['ĉ', 'db'] +['.G', 'ame'] +['(Build', 'Context'] +['back', 'end'] +['.R', 'ed'] +['face', 'book'] +['.url', 's'] +['m', 'r'] +['rol', 'led'] +['----', '---'] +['Ġinter', 'vention'] +['Ġretire', 'ment'] +['ĠK', 'it'] +['ĠP', 'RE'] +['Upper', 'Case'] +['ĠS', 'ocket'] +['Ġ:', '-'] +['Ġstudy', 'ing'] +['ĠMet', 'ro'] +['ard', 'ed'] +['Ġconvers', 'ations'] +['C', 'alled'] +['Ġexam', 'ine'] +['ert', 'ificate'] +['.g', 'z'] +['-res', 'ponsive'] +['Ġref', 'und'] +['_n', 'etwork'] +['allow', 'ed'] +['em', 'pt'] +['Ġme', 'als'] +['C', 'ategories'] +['Ġtravel', 'ing'] +['Ġk', 'g'] +['Ġsh', 'ame'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġexplicit', 'ly'] +['Ġmath', 'ematic'] +['ĠS', 'uite'] +['ĠR', 'GB'] +['******', '/'] +['Ġmix', 'ture'] +['lear', 'ning'] +['.t', 'emplate'] +['att', 's'] +['w', 'x'] +['ĉ', 'ctx'] +['.p', 'roperties'] +['Ġdrink', 's'] +['ĠE', 'ither'] +['set', 'Text'] +['.get', 'Data'] +['.z', 'ip'] +['Ġreve', 'als'] +['<', 'table'] +['.Hash', 'Map'] +['ĠH', 'ur'] +[')', '");Ċ'] +['.f', 'ramework'] +['ĠST', 'ART'] +['feed', 'back'] +['Ġsaf', 'ely'] +['.', 'icon'] +['config', 'ure'] +['.', 'lock'] +['.l', 'ayers'] +['/>', '.Ċ'] +['Ġrank', 'ed'] +['_', 'impl'] +['ĠHand', 'les'] +['Ġhost', 'ed'] +['Ġup', 'dating'] +['al', 'bum'] +['é', 'Ŀ'] +['Ġsh', 'ader'] +['Edit', 'ors'] +['-', 'round'] +['[]', '{'] +['Ġse', 'p'] +['ĠH', 'i'] +['TE', 'M'] +['look', 'up'] +['.m', 'an'] +['_IN', 'PUT'] +['Ġthreat', 'ened'] +['_IM', 'PORT'] +['Ġd', 'rops'] +['ru', 'it'] +['s', 'id'] +['bo', 'th'] +['ĠEx', 'cel'] +['Ġj', 'er'] +['ord', 'inary'] +['еÐ', '¹'] +['V', 'IEW'] +['re', 'ply'] +['Ġ)', ':Ċ'] +['color', 's'] +['ver', 'ified'] +['_T', 'r'] +['_p', 'arse'] +['Ġcon', 'gress'] +['P', 'romise'] +['int', 's'] +['ĠM', 'other'] +['.A', 'pi'] +['ĠD', 'uration'] +['Ġfirst', 'Name'] +['inherit', 'doc'] +['ĠM', 'ars'] +['Ġa', 'pr'] +['OD', 'Y'] +['Ġvis', 'its'] +['Ġhe', 'aling'] +['let', 'ters'] +['))', ');čĊ'] +['f', 'uture'] +['.F', 'ramework'] +['Ġk', 'iss'] +['Ġinv', 'olve'] +['Ġsil', 'ent'] +['ad', 'ows'] +['Ġany', 'body'] +['s', 'ch'] +['Ġsole', 'ly'] +['-', 'img'] +['Ġprop', 'ri'] +['Ġin', 'struct'] +['Ġlic', 'enses'] +['Ġm', 'eth'] +['Ġcond', 'em'] +['ĠD', 'omain'] +['ĠHarr', 'is'] +['Ġs', 'Ã¥'] +['CE', 'PT'] +['B', 'atch'] +['@', 'extends'] +['ĠCONTR', 'IBUT'] +['.Data', 'Frame'] +['_p', 'acket'] +['rec', 'ision'] +['Ġfoc', 'using'] +['.', 'ht'] +['__', '":Ċ'] +[':', 'Get'] +['ĠK', 'C'] +['Ġpass', 'age'] +['Seg', 'ment'] +['_c', 'enter'] +['-z', 'A'] +['_B', 'L'] +['Ġconv', 'in'] +['Ġclass', 'ified'] +['ĠNS', 'Mutable'] +['_', 'ap'] +['t', 'ile'] +['Rect', 'angle'] +['(n', 'ums'] +['v', 'ens'] +['ĠUI', 'Button'] +['ĠF', 'eder'] +['am', 'o'] +['Ġout', 'line'] +['ĠPar', 'ser'] +['Ġâ', 'ī'] +['ĠWork', 's'] +['.S', 'chema'] +['Ġeng', 'ines'] +['_com', 'mon'] +['_', 'old'] +['Ġset', 'ContentView'] +['Ġ///', '<'] +['ĠB', 'T'] +['f', 'm'] +['Ġd', 'ivers'] +['_', 'weights'] +['em', 'ark'] +['ĠA', 'CT'] +['Ġpro', 'portion'] +['over', 'lay'] +['.dir', 'name'] +['ĠG', 'it'] +['_REF', 'ERENCE'] +['<', '>'] +['l', 'b'] +['_r', 'ule'] +['è´', '¥'] +['ĠPut', 'in'] +['Ġsleep', 'ing'] +['()', ':čĊ'] +['Ġpres', 'erve'] +['Ġpar', 'liament'] +['ĠLook', 'ing'] +['Ġpick', 'ing'] +['ĠDis', 'patch'] +['Ġsl', 'ip'] +['ë', 'ĵ'] +['ĠL', 'yn'] +['_sign', 'al'] +['config', 'uration'] +['ĠP', 'itt'] +['ad', 'en'] +['pro', 'cedure'] +['Ġenthus', 'i'] +['f', 'ight'] +['ĠCons', 'ider'] +['Ġt', 'orn'] +['Conn', 'ected'] +['.c', 'os'] +['_group', 's'] +['ĠTh', 'ink'] +['Ġdel', 'iber'] +['Ġres', 'id'] +['work', 'ing'] +['.column', 's'] +['ĠCal', 'led'] +['Ġes', 'lint'] +['>', '",'] +['_D', 'OWN'] +['h', 'ist'] +['ĠAdv', 'anced'] +['Ġre', 'wards'] +['act', 'ors'] +['Ġsil', 'ence'] +['Ġmy', 'th'] +['Ġne', 'ur'] +['Ġa', 'uction'] +['.Get', 'String'] +['ek', 's'] +['(', 'project'] +['ĉ', 'msg'] +['ĉ', 'output'] +['Ġcomplaint', 's'] +[',', 'S'] +['Ġt', 'bl'] +['Ġ,', 'ĊĊ'] +['ri', 'ors'] +['ah', 'ren'] +['Ġlawy', 'ers'] +['re', 'dux'] +['_s', 'ymbol'] +['off', 'ee'] +['_RES', 'ULT'] +['(', 'Name'] +['UT', 'C'] +['.current', 'Time'] +['Ġorgan', 'is'] +['.', 'arg'] +['Ġmin', 'im'] +['w', 'ick'] +['Ġrece', 'ives'] +['B', 'alance'] +['Ġspeak', 's'] +['ĠD', 'ays'] +['ĠBel', 'ow'] +['t', 'ipo'] +['P', 'resent'] +['Ġres', 'erv'] +['h', 'p'] +['Ġr', 'it'] +['_R', 'IGHT'] +['--', ')'] +['Ġchair', 'man'] +['D', 'IS'] +['ĠBO', 'OST'] +['Ġexper', 'iments'] +['__', ');Ċ'] +['Ġst', 'amp'] +['Ġf', 'ert'] +['Ġf', 'ond'] +['T', 'er'] +['el', 've'] +['ure', 'n'] +['+', 'i'] +['end', 'ency'] +['Ġvirt', 'ually'] +['...', '"'] +['ï½', 'ŀ'] +['-', 'cent'] +['_un', 'ique'] +['Ġpr', 'icing'] +['m', 'ic'] +['RES', 'H'] +['Ġ::', ':'] +['Ġan', 'notation'] +['ĠC', 'ircle'] +['ong', 'odb'] +['it', 'as'] +['Ġ%', '('] +['(', 'component'] +['Ġо', 'б'] +['(', 'port'] +['-h', 'our'] +['.', 'obj'] +['L', 'BL'] +['Ġj', 'ury'] +['GB', 'T'] +['Ġsp', 'y'] +['ĠProf', 'essional'] +['Ġ""', ';ĊĊ'] +['Ġstri', 'king'] +['Ġdiscrim', 'ination'] +['Ġp', 'ays'] +['lic', 't'] +['ent', 'es'] +['Ġthrow', 'ing'] +['ĠPl', 'ugin'] +['(', 'def'] +['ĠRuntime', 'Exception'] +['ĠM', 'igration'] +['Ġd', 'ic'] +['b', 'ag'] +['on', 'ia'] +['Ġcor', 'ruption'] +['(', 'Map'] +['Ġpr', 'z'] +['.d', 'to'] +['Ġac', 'quire'] +['State', 'ToProps'] +['Ġlo', 'ving'] +['оÐ', '¶'] +['_p', 'attern'] +['Ġemot', 'ions'] +['Ġpublish', 'er'] +['_b', 'e'] +['Ġcoup', 'les'] +['o', 'j'] +['ĠCh', 'art'] +['Ġt', 'rop'] +['.t', 'ool'] +['Ġestablish', 'ment'] +['Ġd', 'ol'] +['Ġto', 'wer'] +['Ġl', 'ane'] +['ĠSy', 'dney'] +['Ġfill', 'ing'] +['claim', 'ed'] +['Ġdialog', 'ue'] +['Ġcon', 'vention'] +['book', 'ing'] +['pare', 'ncy'] +['æ', '±'] +['ĠGener', 'ic'] +['\\', 'Schema'] +['Ġr', 'anges'] +['/', 'ch'] +['Ġpan', 'els'] +['Ġr', 'uled'] +['çĶ', 'Ł'] +['.t', 's'] +['_s', 'ets'] +['Ġclean', 'up'] +['Pre', 'vious'] +['ĠAn', 'imal'] +['($', '('] +['ĠA', 've'] +['oll', 'ar'] +['_e', 'val'] +['ĉ', 'Name'] +['(t', 'ree'] +['Ġ"', ']'] +['Ġdut', 'ies'] +["='", '/'] +['Click', 'ed'] +['Ġdifferent', 'ly'] +['ĠCl', 'ark'] +['Ġd', 'it'] +['olog', 'ists'] +['Ġsy', 'nd'] +['Ġs', 'ends'] +['-', 'known'] +['k', 'b'] +['ĠMod', 'al'] +['it', 'ative'] +['Ġr', 'acing'] +['Ġhigh', 'lights'] +['ĠSim', 'on'] +['ĠCapt', 'ain'] +['ä¿', '¡'] +['ĠC', 'B'] +['cont', 'in'] +['ar', 'an'] +['Ġphys', 'ics'] +['ret', 'ty'] +['et', 'al'] +['.m', 'd'] +['ax', 'ios'] +['Ġspeak', 'ers'] +['Ġpre', 'p'] +['Ġaward', 'ed'] +['ì§', 'Ģ'] +['ĠC', 'orn'] +['ĠN', 'ature'] +['UD', 'IO'] +['Ġpro', 'j'] +['-', 'pre'] +['[', 'u'] +['Fe', 'atures'] +['Ġis', 'Equal'] +['B', 'inary'] +['s', 'ig'] +['Ġconf', 'usion'] +['ĠH', 'at'] +['Ġkt', 'ó'] +['.config', 'ure'] +['M', 'ON'] +['/', 'edit'] +['_A', 'dd'] +[',', 'true'] +['Ġc', 'li'] +['Error', 'Message'] +['-', 'loader'] +['Dim', 'ensions'] +['ultip', 'ly'] +['Ġ{', '!!'] +['ĠSql', 'Command'] +['Ġsp', 'oken'] +['Ġp', 'ics'] +['Ġto', 'y'] +['(', 'Key'] +['ĠLo', 'op'] +['Ø', '¨'] +['E', 'ATURE'] +['in', 'ction'] +['_set', 'up'] +['w', 'rapper'] +['Ġt', 'ong'] +['c', 'ular'] +['O', 'pt'] +['.P', 'l'] +['="', ','] +['(l', 'ength'] +['um', 'n'] +['Ġch', 'rom'] +['Ġse', 'vent'] +['ĠIllegal', 'ArgumentException'] +['ĉ', 'start'] +['Ġbeg', 'un'] +['CE', 'PTION'] +['dat', 'aset'] +['ĠF', 'ailed'] +['col', 's'] +['Ġkne', 'e'] +['im', 'ore'] +['.sp', 'lice'] +['sh', 'ell'] +['ig', 'gers'] +['Ġthem', 'es'] +['ĠD', 'J'] +['ĠAss', 'istant'] +['-', '$'] +['May', 'be'] +['Ġorder', 'ing'] +['ĠInt', 'elligence'] +['ĠMass', 'achusetts'] +['Ġfail', 'ing'] +['el', 'son'] +['G', 'reat'] +['=', 'i'] +['.re', 'st'] +['Ġinv', 'ite'] +['-dis', 'able'] +['.Group', 'Box'] +['âĢĻ', 'est'] +['Ġtack', 'le'] +['g', 'v'] +['et', 'ter'] +['Ġ),', 'čĊ'] +['_r', 'ules'] +['.w', 'arn'] +['function', 's'] +['ĠChrist', 'ians'] +['Ġback', 'ed'] +['Ġsl', 'ider'] +['Ġenjoy', 'ing'] +['n', 'est'] +['Ġh', 'ij'] +['_m', 's'] +['//', '*'] +['An', 'notations'] +['ĠVariable', 's'] +['<', 'V'] +['(', 'server'] +['ĠOr', 'acle'] +['element', 's'] +['Ġorgan', 'isation'] +['_point', 'er'] +['ĠHe', 'aders'] +['[', 'd'] +['Ġdead', 'line'] +['iss', 'a'] +['Ġkn', 'ife'] +['ĠNAS', 'A'] +['ĠHe', 'ight'] +['ĠAs', 'ync'] +['Ġven', 'ue'] +['.d', 'om'] +['bour', 'ne'] +['ĠHaw', 'ai'] +['Ġmem', 'o'] +['ict', 'ions'] +['Ġsurve', 'illance'] +['om', 'i'] +['/', 'assets'] +['Ġed', 'u'] +['Ä', 'Ľ'] +['Ġro', 'ster'] +['Ġh', 'ired'] +['ĠT', 'ok'] +['Ġpl', 'acement'] +['ur', 'ations'] +['Ġset', 'State'] +['ĠMag', 'azine'] +['Ġhor', 'ror'] +['T', 'ry'] +['Ġl', 'ag'] +['ĠEvery', 'one'] +['th', 'ur'] +['))', ';čĊčĊ'] +['.', 'return'] +['Ġsy', 'mp'] +['âĸĪ', 'âĸĪ'] +['Ġn', 'ights'] +['work', 'er'] +['Ġa', 'le'] +['ennes', 'see'] +['.st', 'ep'] +['Ġsynchron', 'ized'] +['our', 'i'] +['Do', 'es'] +['.', 'change'] +['f', 'on'] +['.set', 'Background'] +['irc', 'ular'] +['+', '-'] +['ĠC', 'IA'] +['ĠJ', 'ane'] +['ĠSim', 'ilar'] +['-', 'I'] +['level', 'and'] +['Ġpros', 'pect'] +['_f', 'ound'] +['ĉc', 'olor'] +['.D', 'iagnostics'] +['Ġann', 'ounce'] +['Ġassum', 'es'] +['/', 'tr'] +['Ġb', 'd'] +['ĠCar', 'bon'] +['Ġanal', 'ys'] +['.de', 'st'] +['n', 'ik'] +['ĠL', 'ie'] +['-', 'index'] +['Draw', 'able'] +['ĠT', 'AG'] +['Ġtri', 'angle'] +['_F', 'LOAT'] +['ĉĉ', 'ĠĠĠĠĠ'] +['.bl', 'ack'] +['v', 'ue'] +['cur', 'acy'] +['Ġaffect', 's'] +['Ġsure', 'ly'] +['Sl', 'ider'] +['uk', 'i'] +['c', 'ery'] +['Ġun', 'ter'] +['.pro', 'file'] +['ord', 'on'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['le', 'ave'] +['Ġsmart', 'phone'] +['g', 'ie'] +['Ġcons', 'pir'] +['Ġt', 'utorial'] +['ç±', '»'] +['Ġc', 'ab'] +['ĠSum', 'mary'] +['*', 'ĊĊ'] +['ä', 'h'] +['"', 'This'] +['Ġsl', 'ides'] +['"', ''] +['c', 'ycle'] +['ĠB', 'ull'] +['path', 's'] +['Ġun', 'p'] +['Ġview', 'DidLoad'] +['_M', 'odel'] +['Ġassert', 'True'] +['Ġr', 'ated'] +['De', 'cl'] +['vert', 'ed'] +['ĠD', 'at'] +['b', 'rew'] +['Ġpoint', 'ing'] +['M', 's'] +['ĠPoint', 'er'] +[')', "'"] +['_n', 'on'] +['ĠSE', 'C'] +['Ġy', 'eah'] +['g', 'ency'] +['initial', 'ize'] +['f', 'ly'] +['[', 'pos'] +[',', 'g'] +['Te', 'le'] +['Ġj', 'oke'] +['Ġcl', 'ause'] +['.find', 'ById'] +['en', 'es'] +['(', 'instance'] +['Â', '£'] +['Ġs', 'lic'] +['_h', 'ome'] +['Ġ*/', '}Ċ'] +['_p', 'ages'] +['(s', 'ervice'] +['R', 'P'] +['ĠAm', 'ong'] +['.get', 'Current'] +['ãĤ', '¹'] +['Ġs', 'lee'] +['=', '', '[Ċ'] +['ol', 'er'] +['Ġlib', 'ert'] +['Ġ`', 'Ċ'] +['Ġw', 'enn'] +['l', 'ated'] +['Ġimm', 'une'] +['(', 'Node'] +['ĠPro', 'blem'] +['ĠA', 'bs'] +['log', 's'] +['Ġ', '../'] +['ĠA', 'DC'] +['Ġ}}', '">Ċ'] +['>', "');Ċ"] +['=', 'b'] +['ĠW', 'ind'] +['lah', 'oma'] +['Ġalloc', 'ate'] +['or', 'ian'] +['Ġpres', 'cription'] +['-', 'quality'] +['ĠMay', 'or'] +['in', 'ely'] +['end', 'foreach'] +['ĠCom', 'plex'] +['k', 'om'] +['T', 'Y'] +[']', '].'] +['.', 'Style'] +['_m', 'any'] +["','", '$'] +['Ġbar', 'rier'] +['ĠF', 'etch'] +['ĠMar', 'vel'] +['Ġres', 'ist'] +['ог', 'о'] +['b', 'idden'] +['ĠRun', 'nable'] +[':', 'false'] +['Ġbuild', 's'] +['ĠSt', 'age'] +['Ġd', 'ub'] +['emp', 'o'] +['.s', 'ite'] +[';ĊĊ', 'ĊĊ'] +['ĠDen', 'ver'] +['Ġre', 'vel'] +['Ġtrigger', 'ed'] +['Ġd', 'ice'] +['_f', 'ail'] +['Ġg', 'c'] +['ĉ', 'X'] +['ĠTh', 'rowable'] +['.r', 'outer'] +['ĠRev', 'olution'] +['ÑĢ', 'а'] +['_N', 'ON'] +['Ł', '¥'] +['Ġel', 'der'] +['Ġab', 'road'] +['ĠÐ', 'µ'] +['ĠAd', 'ult'] +['bl', 'r'] +['g', 'lyphicon'] +['Ġprom', 'oting'] +['Ġ', 'iz'] +['ĠS', 'olid'] +['_lo', 'ader'] +['ear', 'ly'] +['.en', 'abled'] +['-', 'edit'] +['ĠU', 'L'] +['_', 'play'] +['ĠInt', 'errupt'] +['Ġadvant', 'ages'] +['uc', 'le'] +['Ġmechan', 'ical'] +['.table', 'LayoutPanel'] +['ĠWork', 'ing'] +['Ġan', 'onymous'] +['R', 'ating'] +['ig', 'ious'] +['_ph', 'one'] +['.addAction', 'Listener'] +['Ġfr', 'an'] +['und', 'en'] +['Ġ*)', '&'] +['_', 'bool'] +['ul', 'ative'] +['Ġcon', 'e'] +['ĠM', 'ult'] +['Ġm', 'ö'] +['ĠFor', 'ward'] +[']', '):Ċ'] +['Ġconvin', 'ced'] +['act', 'ed'] +['ãģ', 'ĵ'] +['ĠConfig', 'ure'] +['Ġce', 'iling'] +['D', 'er'] +['Ġpass', 'engers'] +['Group', 's'] +['Ġsoc', 'cer'] +['/', 'W'] +['avi', 'ors'] +['sw', 'ith'] +['ĠZ', 'one'] +['.', 'Options'] +['ĠM', 'om'] +['ied', 'er'] +['Array', 's'] +['Ġtreat', 'ments'] +['Ġprotect', 'ing'] +['f', 'ac'] +['Ġpick', 'le'] +['Button', 'Item'] +['Ġblock', 'ing'] +['str', 'ar'] +['Ã', '²'] +['ĠEx', 'port'] +['Ġth', 'rew'] +['ott', 'a'] +['ĠB', 'ASE'] +['.w', 's'] +['.LE', 'ADING'] +['order', 'By'] +['_d', 'elay'] +['ĠP', 'u'] +['.d', 'll'] +['ĠCh', 'oose'] +['Pol', 'ice'] +['ĠBE', 'GIN'] +['box', 'es'] +['Ġdiam', 'ond'] +[',', 'l'] +['Ġ', 'ĉĉĉ'] +['Ġcur', 'ious'] +['t', 'v'] +['Ġerot', 'ische'] +['ack', 'ages'] +['ĉ', 'Set'] +['T', 'ick'] +['.b', 'order'] +['static', 'method'] +['Ġch', 'er'] +['in', 'voice'] +['Ġcr', 'u'] +['Ġdef', 'ect'] +['_m', 'etadata'] +['re', 'lation'] +['ik', 'an'] +['[', 'N'] +['(Q', 't'] +['(', 'Base'] +['æģ', '¯'] +['be', 'at'] +['ĠEm', 'pty'] +['ĉ', 'o'] +['_sh', 'ift'] +['Ġreg', 'ret'] +['Th', 'ose'] +['C', 'ent'] +['ĠPort', 'ug'] +['ĠIs', 'lands'] +['ĠT', 'IME'] +['Man', 'agement'] +['-s', 'p'] +['ê', 'me'] +['Ġnot', 'ion'] +['un', 'ifu'] +['P', 'K'] +['è¡', 'Į'] +['ĠCUR', 'LOPT'] +['\\"', '\\'] +['U', 'V'] +['ç', 'º'] +['d', 'ra'] +['c', 'ou'] +['=', '`'] +['ĠD', 'estroy'] +['r', 'p'] +['.c', 'ancel'] +['G', 'G'] +['r', 'untime'] +['ĠV', 'ue'] +['Ġprogress', 'ive'] +['/s', 'ervices'] +['Ġrun', 'ner'] +['_FR', 'AME'] +['.ToolStrip', 'MenuItem'] +["Ġ'", ",'"] +['d', 'elay'] +['=', 'utf'] +['Ġscreen', 'ing'] +['Ġpull', 'ing'] +['om', 'as'] +['Ġan', 'th'] +['-', 'new'] +['/', 'local'] +['Ġi', 'Pad'] +['Ġt', 'witter'] +['Ġd', 'ying'] +['Ġhe', 'aven'] +['ĠU', 'Int'] +['ĠSen', 'ator'] +['Ġpres', 'um'] +['ĠWalk', 'er'] +['Ġover', 'come'] +['ete', 'ction'] +['Ġemb', 'arrass'] +['Ch', 'ina'] +['In', 'clude'] +['RO', 'LL'] +['Ġdata', 'Type'] +['D', 'avid'] +['à¸', '£'] +['lo', 'p'] +['-m', 'onth'] +['Ġsc', 'ar'] +['ĠS', 'afe'] +['Ġ', '****************************************************************'] +['Ġaccess', 'ories'] +['Ġr', 'amp'] +['_U', 'SE'] +['Ġcontr', 'ad'] +['))', ']Ċ'] +['Ġpre', 'st'] +['ĠH', 'R'] +['ĠR', 'ap'] +['Ġus', 'ize'] +['Ġcap', 'ability'] +['Ġc', 'ort'] +['-', 'next'] +['Ġbur', 'den'] +['_read', 'er'] +['Ġ@', '@'] +['reg', 'ular'] +['ĠK', 'a'] +['M', 'AN'] +['Ġa', 'str'] +["Ġ'", "')Ċ"] +['Ġf', 'ed'] +['Ġpars', 'ing'] +['ĠY', 'ears'] +['Ġbro', 'ker'] +['":', '{"'] +['Ġa', 'kt'] +['In', 'ventory'] +['abe', 'led'] +['Ġarg', 'parse'] +['******', '*Ċ'] +['vers', 'ation'] +['Ġc', 'ord'] +['ĠT', 'i'] +['Ġhope', 'fully'] +['Ġa', 'h'] +['ver', 'b'] +['Ġst', 'olen'] +['.', 'Entry'] +['Ġexpect', 'ing'] +['O', 'rientation'] +['Ġpower', 'ed'] +['Ġp', 'ersist'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +["']", ');'] +["'))", ',Ċ'] +['ĠC', 'ash'] +['ĉ', 'item'] +['gr', 'ades'] +['rop', 'ol'] +['b', 'asic'] +['Ġ"', ');čĊ'] +['Ġaw', 'ards'] +['(r', 'ange'] +['-', 'all'] +['ĠIB', 'Outlet'] +['ĠInd', 'eed'] +['----------------------------------------------------------------', '------------'] +['Ġstom', 'ach'] +['Ġfl', 'ower'] +['Ġs', 'ew'] +['_t', 'imes'] +['av', 'is'] +['Q', 'String'] +['ĠR', 'outes'] +['_pro', 't'] +['Ġcom', 'edy'] +['Ġlog', 'out'] +['Ġwood', 'en'] +['Ġpost', 'er'] +['p', 'iece'] +['.J', 'oin'] +['ĠP', 'ok'] +['cel', 'ona'] +['mut', 'ex'] +[';čĊ', 'čĊčĊ'] +['Ġstri', 'kes'] +['Load', 'ed'] +[')', 'arg'] +['es', 'a'] +['Un', 'ited'] +['E', 'p'] +['PE', 'LL'] +['ĠAtl', 'antic'] +['ul', 'let'] +['app', 'le'] +['Ġsett', 'led'] +['a', 'con'] +['Ġprint', 'er'] +['ĠG', 'C'] +['å®', 'ļ'] +['Ġrender', 'ed'] +[',', 'âĢĻ'] +['he', 'it'] +['s', 'ocial'] +['.', 'ge'] +['ĠR', 'ick'] +['ĠUt', 'ah'] +['g', 'ot'] +['on', 'ical'] +['ĠSc', 'roll'] +['ĠSc', 'iences'] +['Ġj', 'ug'] +['Ġam', 'pl'] +['ent', 'i'] +['LE', 'FT'] +['Ġt', 'abs'] +['Ġenorm', 'ous'] +['.get', 'Key'] +['loc', 'ate'] +['.', 'EX'] +['.st', 'orage'] +['.W', 'e'] +['Ġto', 'ast'] +['ĠAdd', 'itionally'] +['ĠN', 'OW'] +['_', 'UPDATE'] +['Ġtrans', 'ferred'] +['th', 'a'] +['.D', 'isplay'] +['_', 'ui'] +['ID', 'EO'] +['Ġmeaning', 'ful'] +['ĠMos', 'cow'] +[',', 'this'] +['ĠVict', 'oria'] +['æĶ', '¹'] +['ĠÐ', 'Ł'] +['.st', 'ack'] +['ĠB', 'arn'] +['pared', 'Statement'] +[':', 'string'] +['Ġb', 'ij'] +['ĠST', 'ATE'] +['Ġemploy', 'ers'] +['ĉ', 'input'] +['(', '|'] +['Ġle', 'x'] +['in', 'voke'] +['ĉ', 'num'] +['++', ','] +['at', 'ial'] +['ors', 'es'] +['Ġfor', 'k'] +['_t', 'xt'] +['ĠAnton', 'io'] +['Ġ(', '<'] +['aver', 'se'] +['Ġdev', 'ast'] +['ãĢ', 'Ģ'] +['.D', 'ec'] +['ĠG', 'ard'] +['/', 'ui'] +['.', '%'] +['tr', 'i'] +['Ġrol', 'led'] +['Value', 'Pair'] +['itt', 'en'] +['ĠTh', 'er'] +['Ġv', 'rou'] +['ĠFl', 'ow'] +['ĠFin', 'ance'] +['ĠCom', 'b'] +['H', 'C'] +['.set', 'Visible'] +['is', 'l'] +['Ġp', 'k'] +['Ġup', 'set'] +['(', 'raw'] +['ĠV', 'ice'] +['e', 'atures'] +['ĠL', 'ang'] +['Look', 'ing'] +['ĠA', 'ST'] +['Ġtri', 'ps'] +['ĠJust', 'in'] +['b', 'rowser'] +['="', "'.$"] +['.', 'vertices'] +['-', 'co'] +['}/', '{'] +['Ġ?', ','] +['ĠD', 'omin'] +['ĠBel', 'g'] +['"', '<'] +['Ġsup', 'pose'] +['add', 'y'] +['Ġwalk', 's'] +['ERR', 'U'] +['_f', 'ilters'] +['Pre', 'ferred'] +['sc', 'ene'] +['е', 'Ñģ'] +['ĠAff', 'airs'] +['Ġ"#', '{'] +['Ġon', 'Submit'] +['Ġstock', 's'] +['/', 'view'] +['g', 'ree'] +['-', 'get'] +['h', 'it'] +['J', 'o'] +['.get', 'C'] +['Initial', 'ized'] +['ÑĤ', 'и'] +['c', 'uts'] +['(', 'Type'] +['ĠAg', 'reement'] +['ĠViet', 'nam'] +['Ġ/*', '!'] +['Ġp', 'izza'] +['-', 'view'] +['_', 'em'] +['Ġl', 'hs'] +['Ġm', 'uy'] +['ĠId', 'ent'] +['ĠF', 'riends'] +['Ġab', 'und'] +['_A', 'D'] +['.t', 'imestamp'] +['-', "'"] +['Ġd', 'uplicate'] +['Ġhun', 'ting'] +['Ġregul', 'atory'] +['ia', 'o'] +['am', 'ous'] +['ĠEnt', 'ertainment'] +['[', 'A'] +['iat', 'ric'] +['_CL', 'IENT'] +['ĠK', 'ids'] +['/p', 'kg'] +['B', 'reak'] +['))', ');ĊĊ'] +['ĠSh', 'ape'] +['Ġrel', 'ating'] +['Int', 'errupt'] +['able', 'Opacity'] +['emb', 're'] +['Ġmyst', 'ery'] +['Ġjournal', 'ists'] +['rit', 'able'] +['.L', 'ink'] +['Ġstop', 'ping'] +['CRE', 'T'] +['.D', 'B'] +['Ġpopular', 'ity'] +['Ġg', 'ew'] +['Ġim', 'pr'] +['set', 'Value'] +['FL', 'AG'] +['ĉm', 'ax'] +['Ġb', 'ake'] +['w', 'y'] +['ĠEcon', 'omic'] +['Ġen', 'contr'] +['Ġf', 'name'] +['/', 'de'] +['R', 'ank'] +['Ġbug', 's'] +['.s', 'm'] +['Ġmed', 'ian'] +['D', 'OWN'] +['ĠS', 'ure'] +['At', 'Index'] +['ĠD', 'ick'] +['Ġ(', '__'] +['.d', 'elta'] +['F', 'r'] +['Ġsuggest', 'ing'] +['ĠRec', 'yclerView'] +[',', 'e'] +['ST', 'ART'] +['/************************************************************************', '****'] +['xf', 'ord'] +['Ġrece', 'ipt'] +['CL', 'AIM'] +['read', 'only'] +['Ġeng', 'aging'] +['C', 'a'] +['as', 'ma'] +['Ġens', 'uring'] +['Eng', 'lish'] +['ĠV', 'ancouver'] +['hy', 'th'] +['Ġpurch', 'asing'] +['ĠP', 'I'] +['.', 'word'] +['(s', 'p'] +['.h', 'ome'] +[':', 'def'] +['Ġg', 'ig'] +['ĠV', 'e'] +['for', 'um'] +['ĠM', 'itch'] +['B', 'ay'] +['_F', 'L'] +['Ġs', 'oll'] +['_column', 's'] +['Ġminor', 'ity'] +['b', 'ird'] +['Ġhand', 'ed'] +['SS', 'L'] +['ST', 'AT'] +['Ġnerv', 'ous'] +['ĥ', '½'] +['Ġfile', 'Path'] +['CRE', 'ATE'] +['A', 'w'] +['Ġp', 'ens'] +['se', 'ed'] +['ĠCom', 'pute'] +['ol', 'k'] +['ĠAs', 'set'] +['re', 'ach'] +["'),", 'čĊ'] +['n', 'avigation'] +['L', 'F'] +['/', 'util'] +['ĠP', 'ub'] +['Ġâ', 'Ķ'] +['c', 'ion'] +['##', 'Ċ'] +['II', 'I'] +['Tag', 'Name'] +['Ġam', 'id'] +['per', 'mission'] +['if', 'iable'] +['xFFFF', 'FFFF'] +['н', 'и'] +['.B', 'uffer'] +['_', 'irq'] +['d', 'ark'] +['Ġret', 'val'] +['.f', 'ire'] +['produ', 'ction'] +['.list', 'en'] +['ĠWe', 'ather'] +['Ġbuy', 'ers'] +['.', 'ne'] +['er', 'p'] +['ĠP', 'ent'] +['Ġw', 'elfare'] +['Ġpage', 'Size'] +['ĠSt', 'adium'] +['ert', 'a'] +['Ġle', 'v'] +['amp', 'a'] +['P', 'ager'] +['Ġcharg', 'ing'] +['ĠNet', 'flix'] +['|', 'null'] +['_r', 'andom'] +['.x', 'path'] +['Ġst', 'ere'] +['ĠIS', 'IS'] +['pons', 'es'] +['(', 'loc'] +['ey', 'ond'] +['ĠOff', 'icial'] +['ĠMary', 'land'] +['Data', 'Type'] +['_p', 'ar'] +['{', '},'] +['ĠEn', 'joy'] +['_SH', 'IFT'] +['ĠA', 'wards'] +['_ENT', 'RY'] +['Ġseem', 'ingly'] +['entic', 'ate'] +['Ġheart', 's'] +['_', ';ĊĊ'] +['ĠH', 'IV'] +['Ġindiv', 'id'] +['ĠFl', 'ag'] +['_', 'ctrl'] +['ĠC', 'allback'] +[',', 'z'] +['ĠG', 'PU'] +['ĉ', 'obj'] +['ĠPh', 'oenix'] +['ĠB', 'US'] +['Ġrub', 'ber'] +['_A', 'UTH'] +['ĠSol', 'utions'] +['(', 'location'] +['Variable', 's'] +['.set', 'Enabled'] +['_h', 'igh'] +['W', 'O'] +['G', 'esture'] +['Ġre', 'try'] +['Ġobject', 'ForKey'] +['allow', 'een'] +['Ġm', 'os'] +['ĠC', 'ele'] +['Ġik', 'ke'] +['(c', 'ell'] +['ĠM', 'ODE'] +['ren', 'a'] +['Ġdescri', 'bing'] +['Ġph', 'i'] +['Ġr', 'd'] +['Ġdes', 'erve'] +['Ġwhe', 'els'] +['å¸', 'Ĥ'] +['Ġcrit', 'ics'] +['N', 'amespace'] +['ĠF', 'ra'] +['Ġ', 'ĊĊĊĊ'] +['Ġall', 'a'] +['Ġrequ', 'iring'] +['æľ', 'Ł'] +['ut', 'ation'] +['Ġdelay', 'ed'] +['Ġadministr', 'ative'] +['Ġb', 'ay'] +['.h', 'idden'] +['T', 'ex'] +['Ġbound', 'aries'] +['Ġ]', ');ĊĊ'] +['ĠFollow', 'ing'] +['~', '/'] +['F', 'i'] +['_con', 'v'] +['_T', 'ITLE'] +['Ġdes', 'de'] +['ICollection', 'View'] +['Ali', 'as'] +['Ġb', 'ite'] +['pat', 'ient'] +['_COMM', 'AND'] +['Com', 'pleted'] +['ĉ', 'elif'] +['(', '<'] +['B', 'usiness'] +['ĠP', 'ool'] +['Ġpurs', 'ue'] +['ĠB', 'an'] +['_st', 'eps'] +['_DE', 'CL'] +['um', 'ble'] +['Ġcom', 'bo'] +['ĠL', 'ayer'] +['.x', 'r'] +['Ġd', 'up'] +['--------', '-'] +['Ġmod', 'ifier'] +['ro', 'b'] +['re', 'z'] +['Ġath', 'letes'] +['Us', 'ed'] +['w', 'ear'] +['Ġlegit', 'imate'] +['Ġ"', 'ĊĊ'] +['Ġh', 'v'] +['St', 'd'] +['ĠH', 'old'] +['Ġsurv', 'iv'] +['ĠAll', 'iance'] +['ĠEar', 'ly'] +['Beh', 'avior'] +['(f', 'ont'] +['/lib', 's'] +['Ġrect', 'angle'] +['Ġs', 'inger'] +['Ġam', 'p'] +['Equal', 'To'] +['Ġ"', '."'] +['Ġgirl', 'friend'] +['å', '±'] +['line', 'ar'] +['obs', 'erv'] +['Ġpi', 'ù'] +['Ġcomple', 'ment'] +['With', 'Value'] +['(p', 'assword'] +['t', 'ake'] +['Bl', 'ank'] +['ĠCom', 'par'] +["'", '",'] +['_p', 'olicy'] +['m', 'ongoose'] +['_FA', 'ILED'] +['.re', 'port'] +['R', 'atio'] +['.Perform', 'Layout'] +['us', 'able'] +['m', 'ers'] +['_re', 'nder'] +['PE', 'ED'] +['Ġles', 'b'] +['ĉ', 'E'] +['_t', 'ool'] +['Ġl', 'adies'] +['о', 'Ñģ'] +['))', '))Ċ'] +[';;', ';;'] +['.d', 'ot'] +['Ġn', 'est'] +['pe', 'ak'] +['uk', 'kit'] +['ec', 'a'] +['_S', 'W'] +['Ġ&', '('] +['ĠOk', 'lahoma'] +['Ġbank', 'ing'] +['ĠN', 'intendo'] +['Ġreprodu', 'ce'] +['_element', 's'] +['_m', 'ac'] +['pro', 'xy'] +['Ġremark', 'able'] +['}/', '${'] +['Ġout', 's'] +['.has', 'Next'] +['M', 'ODE'] +['Ġan', 'ime'] +['.con', 'n'] +['Un', 'ique'] +['D', 'om'] +['Ġimportant', 'ly'] +['itt', 'y'] +['Ġju', 'ice'] +['T', 'w'] +['ĠPart', 'ners'] +['Ġattack', 'ing'] +['Ġport', 'able'] +['am', 'iento'] +['.P', 'ictureBox'] +['.g', 'en'] +['Ġopt', 'imal'] +['Ġre', 'cre'] +['Ġjournal', 'ist'] +['ĠEx', 'tract'] +['ĠMore', 'over'] +['Ġmargin', 'Top'] +['.A', 'p'] +['Ġf', 'iring'] +['Na', 'N'] +['ĉ', 'template'] +['аÐ', '´'] +['.', 'En'] +['Ġdef', 'ence'] +['ĠT', 'el'] +['il', 'en'] +['j', 'an'] +['=', 'data'] +['ĠU', 'rl'] +['ĠRe', 'uters'] +['(t', 'otal'] +['ĠFif', 'th'] +['Ġess', 'ays'] +['Ġinterpret', 'ation'] +['Ġchar', 'ity'] +['ĠR', 'ules'] +['Ġsub', 'section'] +['st', 'yled'] +['az', 'er'] +['l', 'ags'] +['L', 'IST'] +['Ġupload', 'ed'] +['Ġtr', 'ash'] +['Ġreg', 'istr'] +['Ġsell', 'er'] +[">'", ';čĊ'] +['Ġstart', 'Time'] +['ç', 'Ļ'] +['s', 'y'] +['(Http', 'ServletRequest'] +['Ġtr', 'ap'] +['G', 'C'] +['Ġembed', 'ded'] +['Ġsurround', 'ed'] +['im', 'its'] +['T', 'X'] +['yl', 'inder'] +['ĠF', 'al'] +['Ġsent', 'ences'] +['ĠJ', 'a'] +['IF', 'ICATION'] +['we', 'apon'] +['ov', 'ation'] +['Ġco', 'at'] +['Ġinter', 'pol'] +['Ġl', 'ips'] +['ĠK', 'y'] +['Ġv', 'ectors'] +['_', 'am'] +['Ġint', 'ake'] +['.w', 'orld'] +['Ġin', 'box'] +['ĠM', 'AC'] +['_', 'ab'] +['(name', 'of'] +['Ġent', 'ert'] +['Ġgather', 'ing'] +['ĠS', 'IM'] +['++', '.'] +['ny', 'a'] +["'", '}}'] +['ĠUP', 'DATE'] +['Ġp', 'ac'] +['(', 'html'] +['ĠS', 'ant'] +['i', 'ating'] +['ĠIde', 'as'] +['Ġspr', 'ay'] +['ĠH', 'art'] +['Ġver', 'ification'] +['ades', 'h'] +['/', 'modules'] +['ĠM', 'ind'] +['ĠSized', 'Box'] +['Ġsh', 'elter'] +['Ġher', 'oes'] +['att', 'y'] +['Ġcert', 'ified'] +['s', 'j'] +['Ġê', 'tre'] +['ÅĤ', 'o'] +['Ġpublish', 'ing'] +['ĠMal', 'ays'] +['.get', 'User'] +['ĠPro', 'vider'] +['ĠLinked', 'List'] +['ĠB', 'or'] +['RO', 'UND'] +['d', 'id'] +['t', 'ain'] +['p', 'ire'] +['ĠJ', 'enn'] +['t', 'el'] +['and', 'e'] +['_f', 'ront'] +['ĠMc', 'G'] +['Test', 'Method'] +['à¸', 'Ń'] +['Ġoccasion', 'ally'] +['ĠW', 'ales'] +['Ġexerc', 'ises'] +['ĠÐ', 'Ĵ'] +['-', 'plus'] +['Ġvalid', 'ator'] +['Ġpr', 'ayer'] +['L', 'ATED'] +['_', 'author'] +['Ġlab', 'our'] +['++', 'Ċ'] +['-e', 'quiv'] +['ĠG', 'PL'] +['Ġface', 'book'] +['s', 'imple'] +['g', 'ly'] +['Process', 'or'] +['ip', 'y'] +['Ġ*', '>'] +['Ġcle', 'ared'] +['ĠP', 'ush'] +['Ġpen', 'is'] +['Struct', 'ure'] +['li', 'j'] +['ĠM', 'organ'] +['Ġhand', 'ful'] +['"', '.Ċ'] +['|', '\\'] +['Ġ', '********************************'] +['ĠA', 'qu'] +['_', 'IC'] +['.load', 's'] +['Ġm', 'eter'] +['ĠMar', 'ine'] +['::', '{'] +['ĠT', 'S'] +['ĠArray', 's'] +['.T', 'itle'] +['GR', 'AM'] +['ter', 'min'] +['Ġco', 'inc'] +['El', 'se'] +['_st', 'ates'] +['-r', 'un'] +['m', 'embers'] +['ast', 'ro'] +['Ġon', 'Press'] +['Ġbe', 'ings'] +['Ġabandon', 'ed'] +['Ġtax', 'p'] +['own', 'ers'] +['.m', 'ode'] +['Ġdiagn', 'osis'] +['Ġ_', 'Ċ'] +['ĠK', 'night'] +['ĉ', 'A'] +['Ġob', 'serve'] +['),', "'"] +['!', '")Ċ'] +['ĠPar', 'a'] +['Ġvari', 'ation'] +['(', 'False'] +['ĠAnt', 'i'] +['Ġg', 'ri'] +['Ġhome', 'less'] +['?', 'v'] +['Ġbe', 'z'] +['.S', 'erver'] +['re', 'lease'] +['ĠP', 'atri'] +['Ġchar', 's'] +['Ġrank', 'ing'] +['activ', 'ation'] +['Ġw', 'ides'] +['q', 'r'] +['.S', 'ql'] +['ac', 'ular'] +['ĠB', 'ot'] +['_s', 'ync'] +['Ġhapp', 'iness'] +['Ġvolunte', 'ers'] +['Ġs', 'its'] +['/', '<'] +['[', 'e'] +['(file', 'Name'] +['Ġcap', 'ac'] +['ĠMar', 'ia'] +['f', 'ather'] +['Ġgr', 'am'] +['*', 'i'] +['Ġcas', 'o'] +['_d', 'raw'] +['ĠR', 'aw'] +['ĠIter', 'ator'] +['ĠP', 'adding'] +['P', 'D'] +['BO', 'X'] +['ĠS', 'PECIAL'] +['Ġfe', 'cha'] +['Ġv', 'ide'] +['ĠLe', 'ader'] +['ä»', '¥'] +['$', '(".'] +['Ġdiam', 'eter'] +['Ġm', 'ild'] +['Ġrock', 's'] +['app', 'ings'] +['d', 'irectory'] +['.fl', 'ush'] +['ĠJ', 'ess'] +['UN', 'IT'] +['ĠP', 'ear'] +['Ġmand', 'atory'] +['S', 'ur'] +['q', 't'] +['Ġstream', 's'] +['Ġco', 'operation'] +['ĠS', 'ac'] +['Ġche', 'aper'] +['ĉ', 'ch'] +['an', 'imation'] +['f', 'are'] +['(', 'height'] +['(', 'True'] +['N', 'Y'] +['Ġw', 'rest'] +['Ġpoll', 's'] +['Ġencounter', 'ed'] +['ĠMarket', 'able'] +['_P', 'ASSWORD'] +['_SE', 'LECT'] +['ĠArab', 'ia'] +['_c', 'lock'] +['Ġv', 'oy'] +['Ġи', 'з'] +['Ġst', 'ir'] +['is', 'ible'] +['-e', 'ffect'] +['.c', 'reated'] +['Ġto', 'ys'] +['ĠTrad', 'able'] +['Ġr', 'ust'] +['Ġstr', 'cpy'] +['_t', 'imestamp'] +['Ġtalent', 'ed'] +[',', 'null'] +['ĠJ', 'obs'] +['ĠPort', 'land'] +['Ġweak', 'ness'] +['Th', 'row'] +['ĠAng', 'el'] +['ä¿', '®'] +['Ġun', 'cert'] +['ï¼ī', 'Ċ'] +['ĠìĿ', '´'] +['Wh', 'ich'] +['Ġ[-', ']:'] +['S', 'omething'] +['Ġconv', 'icted'] +['k', 'le'] +['ed', 'ium'] +['Ġbranch', 'es'] +['Ġb', 'ases'] +['ç', '®'] +['Ġcomplex', 'ity'] +['ĠF', 'ig'] +['.', 'reshape'] +['$', 'db'] +['_CON', 'ST'] +['ĠT', 'es'] +['.r', 'untime'] +['Ġden', 'y'] +['ĠB', 'SD'] +['Ġk', 'r'] +['h', 'att'] +['ĠSt', 'atic'] +['Ġunivers', 'ities'] +['Re', 'place'] +['Ġdro', 've'] +['Ġad', 'oles'] +['_pl', 'ugin'] +['ĠL', 'GBT'] +['Ġt', 'ex'] +['du', 'ction'] +['ED', 'I'] +['ĠT', 'ed'] +['_', 'URI'] +['Ġre', 'ception'] +['art', 'en'] +['.S', 'ingle'] +['r', 'ice'] +['sc', 'ious'] +['_b', 'g'] +['Ġw', 'ages'] +['ĠS', 'ervlet'] +['UIL', 'ayout'] +['Ġform', 'atted'] +['.M', 'od'] +['<', 'class'] +['is', 'en'] +['Ġrepresent', 'atives'] +['"]', '='] +['Ġport', 'al'] +['ĠHun', 'ter'] +['Ġh', 'iring'] +['__', ')Ċ'] +['ric', 'ulum'] +['u', 'o'] +['li', 'est'] +['Ġt', 'ears'] +['L', 'at'] +['Ġliter', 'al'] +['.In', 'sert'] +['Ġc', 'urs'] +['ĠCom', 'put'] +['Ġterror', 'ism'] +['Ġswe', 'ep'] +['Ġ[]', 'čĊ'] +['Ġpass', 'enger'] +['Ġeast', 'ern'] +['Ġtwe', 'ets'] +['Ġoper', 'ated'] +['w', 'nd'] +['ĠS', 'yn'] +['.t', 'ools'] +['ĠW', 'M'] +['ul', 'ates'] +['Ġbacter', 'ia'] +['(', 'bytes'] +['.set', 'Data'] +['Ġvis', 'ibility'] +['//', '================================================================'] +['el', 'm'] +['Ġgener', 'ating'] +['Ġm', 'v'] +['Ġk', 'h'] +['j', 'en'] +['/', 'search'] +['Ġaccount', 'ing'] +['se', 'gment'] +['act', 'ic'] +['.', 'ip'] +['Ġdeploy', 'ment'] +['Ġfoot', 'er'] +['>', "',Ċ"] +['Ġexpand', 'ing'] +['ĠHam', 'ilton'] +['ĠCon', 'trib'] +['.T', 'ables'] +['Act', 'iv'] +['H', 'H'] +['ocom', 'merce'] +['_', ';'] +['Ġamong', 'st'] +['ow', 'ing'] +['ĠC', 'old'] +['AP', 'H'] +['Ġpsych', 'ological'] +['_t', 'ensor'] +['Ġpack', 'aging'] +['ĠSw', 'eden'] +['Ġp', 'are'] +['Ġag', 'gregate'] +['Ġmoder', 'ate'] +['_h', 'and'] +['Ġdesign', 'ated'] +['Ġdr', 'um'] +['Ġget', 'User'] +['ĠC', 'reek'] +['_s', 'cope'] +['ĠTrans', 'fer'] +['ĠM', 'arg'] +['Ġfight', 'ers'] +['W', 'nd'] +['ĠS', 'el'] +['ĠLa', 'unch'] +['Ġemerg', 'ing'] +['if', 'rame'] +['ĠAdd', 'itional'] +['Ġf', 'ears'] +['Ġsat', 'ellite'] +['_', ':'] +['Ġdis', 'posing'] +['Get', 'Value'] +['Http', 'Post'] +['AT', 'IVE'] +['ul', 'ary'] +['View', 's'] +['Ġatt', 'ending'] +['ĠT', 'ennessee'] +['ĠM', 'ission'] +['Ġmedic', 'ation'] +['ĠW', 'y'] +['ĠAn', 'na'] +['Ø', '¹'] +['ĠVert', 'ex'] +['.t', 'ypes'] +['O', 'rgan'] +['.DataGridView', 'TextBoxColumn'] +['ĠR', 'S'] +['Ġtemp', 'o'] +['(', 'App'] +['Version', 'UID'] +['.p', 'oint'] +['ĠD', 'utch'] +['H', 'ours'] +['L', 'U'] +['Ġqu', 'oted'] +['.b', 'uilder'] +['ĠPer', 'fect'] +['ĠAl', 'ways'] +['_t', 'wo'] +['Ġexclus', 'ively'] +['ĠC', 'ra'] +['ific', 'ar'] +['ĠA', 'WS'] +['ing', 'ham'] +['com', 'plex'] +['k', 'ernel'] +['Ġgr', 'avity'] +['Ġw', 'i'] +['Ġover', 'view'] +['ĠW', 'ant'] +['ĠW', 'P'] +['(', 'sh'] +['.', 'rotation'] +['St', 'ates'] +['ĠTe', 'en'] +['_com', 'ponents'] +['ì', 'Īĺ'] +['Re', 'ceived'] +['Ġly', 'rics'] +['rit', 'es'] +['ĉĉĉĉĉ', 'Ġ'] +['-A', 'merican'] +['[', 'num'] +['/', 'python'] +['ĠU', 'ART'] +['Ġapp', 'le'] +['ĠJon', 'athan'] +['Ġmoment', 'um'] +['à¸', '±'] +['Ĥ', '¹'] +['Ġm', 'ich'] +['and', 'ra'] +['Ġbi', 'ological'] +['ĠM', 'ens'] +['Ġ%', '%'] +['else', 'a'] +['ĠMex', 'ican'] +['.rand', 'int'] +['Ġt', 'ale'] +['ĠValid', 'ate'] +['Ġdefe', 'ated'] +['.ht', 'm'] +['Ġcop', 'per'] +['=', '/'] +['cos', 'ystem'] +['Ġr', 'ip'] +['dec', 'imal'] +['.V', 'ISIBLE'] +['ĠT', 'a'] +['ĉĉĉĉĉĉĉĉ', 'ĉĉĉĉĉĉ'] +['Ġdownload', 'ed'] +['en', 'vironment'] +['Ġnom', 'ine'] +['build', 'ing'] +['ĠSp', 'ot'] +['ipher', 'al'] +['Ġal', 'to'] +['qu', 'et'] +['ĠF', 'T'] +['/', 'get'] +['/m', 'aster'] +['W', 'IN'] +['åħ', 'ĥ'] +['W', 'est'] +['arg', 'c'] +['Ġprodu', 'cers'] +['ĠM', 'uch'] +['_st', 'orage'] +['cred', 'it'] +['CON', 'T'] +['Ġv', 'et'] +['Ġvo', 'ices'] +["('", "',"] +['Ġinstr', 'uments'] +['ĠM', 'SG'] +['es', 'se'] +['re', 'pository'] +['om', 'ics'] +['Ġdeal', 'er'] +['St', 'ill'] +['Ġb', 'anner'] +['asc', 'ii'] +['Ġrem', 'arks'] +['[', 'js'] +['Ġshort', 'er'] +['g', 'ulp'] +['Ġmyst', 'er'] +['Ġk', 'un'] +['ĠB', 'ird'] +['Ġti', 'ene'] +['n', 'ut'] +['ĠU', 'm'] +['Ġw', 'ise'] +['Y', 'eah'] +['INE', 'SS'] +['_b', 'egin'] +['-', 'heading'] +['C', 'ourse'] +['Ġ', 'čĊčĊ'] +['omb', 'ie'] +['grad', 'ed'] +['ĠG', 'PS'] +['Ġ', 'że'] +['F', 'it'] +['c', 'aption'] +['ö', 'n'] +['/', 'image'] +['l', 'ia'] +['(m', 'od'] +['Ġle', 'ak'] +['en', 'za'] +['/', 'H'] +['ĠH', 'appy'] +['D', 'ist'] +['n', 'x'] +['ĠGovern', 'or'] +['(l', 'ast'] +['te', 'acher'] +['ĠS', 'ent'] +['s', 'upport'] +['ject', 'ory'] +['Ġ', 'Ùħ'] +['Reg', 'istration'] +['ĠGr', 'ay'] +[',', 'false'] +['Ġadjust', 'ed'] +['(', 'settings'] +['<', 'R'] +['ĠM', 'age'] +['Ġpl', 'aint'] +['_', ')Ċ'] +['ĉ', 'it'] +['omet', 'ric'] +['.', 'bootstrap'] +['Ġcar', 'ries'] +['I', 'p'] +['Ġ!', '$'] +['Ġswim', 'ming'] +['ĠMar', 'io'] +['ĠQuest', 'ions'] +['P', 'ACE'] +['æĸ', '¹'] +['e', 'or'] +['}}', '"'] +['Ġo', 'ven'] +['ĠK', 'on'] +['Ġwis', 'dom'] +['Ġac', 'quisition'] +['ess', 'ment'] +['ag', 'ine'] +['Ġexpress', 'ions'] +['Sequential', 'Group'] +['F', 'ront'] +['ul', 'pt'] +['aw', 'k'] +["']", ')ĊĊ'] +['_', 'AR'] +['Ġanal', 'og'] +['ul', 'in'] +['_PR', 'INT'] +['ĠL', 'G'] +['Ġb', 'lob'] +['ĠFurther', 'more'] +['_com', 'ponent'] +['ĠC', 'ole'] +['L', 'AN'] +['SCRI', 'PTION'] +['Ġl', 'ap'] +['icens', 'ing'] +['_TIME', 'OUT'] +['ĠF', 'ro'] +['Ġli', 'ability'] +['Ġcom', 'posed'] +['.create', 'SequentialGroup'] +['_p', 'erson'] +['Ġbe', 'am'] +['ĉ', 'ĠĠĠĠĠĠĠĠ'] +['ĠNot', 'Found'] +['.', "'Ċ"] +['ÃŃ', 's'] +['.Text', 'View'] +['P', 'DF'] +['Ġk', 'ar'] +['__', "('"] +['Ġ"', ':"'] +['_m', 'essages'] +['Ġhar', 'vest'] +['.h', 'istory'] +['>', "'Ċ"] +['-f', 'old'] +['æ', 'Ĭ'] +['ĠBet', 'ter'] +['Ġ"\\', '<'] +['sp', 'acing'] +['Ġfurn', 'ished'] +['os', 'er'] +[']', '}Ċ'] +['Ġ$', '"'] +['p', 'ull'] +['.P', 'ost'] +['(', 'ip'] +['Ĺ', 'ı'] +['.f', 'ront'] +['nt', 'e'] +['ĠF', 'M'] +['g', 'uid'] +['Ġnegot', 'iations'] +['agon', 'al'] +['Ġtrem', 'end'] +['unge', 'on'] +['Ad', 'v'] +['car', 'ousel'] +['ÃŁ', 'e'] +['_DE', 'SC'] +['Ġham', 'mer'] +['áº', 'Ń'] +['ĠĠĠĠĠĠĠĠ', 'ĊĊ'] +['-c', 'ore'] +['-s', 'ervice'] +['Ġcorn', 'ers'] +['ĠS', 'F'] +['p', 'red'] +['>', 'A'] +['ĠJ', 'Label'] +['Ġrom', 'antic'] +['Ġtestim', 'ony'] +['os', 'c'] +['ĠGener', 'ation'] +['as', 'ures'] +['_int', 'ernal'] +['Ġprint', 's'] +['Ġ]', ')Ċ'] +['ĠC', 'leveland'] +['re', 'po'] +['D', 'isc'] +['Ġ"', '>Ċ'] +['��', '��'] +['Ġne', 'arest'] +['_t', 'b'] +['(', 'require'] +['EO', 'F'] +['-', 'child'] +['Ġbu', 'dd'] +['.Xtra', 'Editors'] +['alt', 'ies'] +['\\":', '\\"'] +['W', 'ords'] +['Ġloc', 'ally'] +['Ġpurch', 'ases'] +['Draw', 'er'] +['ex', 'tract'] +['Ġexec', 'ut'] +['}', "'."] +['user', 'data'] +['Ġfocus', 'es'] +['-min', 'ute'] +['ĠP', 'ublish'] +['og', 'o'] +['Ġmount', 'ains'] +['B', 'ot'] +['}', '>{'] +['Ġt', 'ension'] +['ro', 'd'] +['m', 'esh'] +['Ġtransform', 'ed'] +[',', 'R'] +['()', '}Ċ'] +['.l', 'ong'] +['Ġg', 'orgeous'] +['ĠS', 'chedule'] +['Ġol', 'dest'] +['Ġsub', 'process'] +['(', 'IN'] +['y', 'ect'] +['ĠCo', 'oper'] +['arn', 'ess'] +['ĠMon', 'itor'] +['.p', 'art'] +['ĠN', 'BC'] +['Ġc', 'otton'] +['Ġh', 'ol'] +['Ġrg', 'ba'] +['ĠB', 'io'] +['Cont', 'inue'] +['P', 'od'] +['Ġparticip', 'ating'] +['clus', 'ions'] +['(By', 'Val'] +['Ã', '¬'] +['ĠH', 'OW'] +['_set', 'opt'] +['Ġaccompany', 'ing'] +['at', 'on'] +['Ġ/', '\\'] +['ĠAuth', 'entication'] +['i', 'én'] +['ĠBar', 'ack'] +['/*', '.'] +['Ġe', 'ager'] +['ĠC', 'ancel'] +['<', 'lemma'] +['ep', 'h'] +['ĉ', 'window'] +['Ġinc', 'idents'] +['),', '('] +['.D', 'es'] +['ib', 'e'] +['ĠFunction', 's'] +['Ġhosp', 'itals'] +['Ġo', 'xygen'] +['root', 'Scope'] +['Ġd', 'rew'] +['ĉ', 'request'] +['not', 'ice'] +['ak', 'u'] +['am', 'ents'] +['f', 'ar'] +['Ġprec', 'ise'] +['_w', 'rapper'] +['Ġlisten', 'ers'] +['A', 'Z'] +['.b', 'ounds'] +['ĠA', 'verage'] +['field', 'set'] +['_', 'axis'] +['Ġexam', 'ination'] +["'", '.Ċ'] +['mon', 's'] +['++)', '{čĊ'] +['ĠForm', 's'] +['íķ', 'ľ'] +['Cpp', 'Method'] +['_tr', 'ace'] +['Ġengine', 'er'] +['ĠFl', 'at'] +['Ġrev', 'ision'] +['Ġhe', 'ating'] +['/', 'profile'] +['.r', 'u'] +['p', 'riority'] +['Ġin', 'fer'] +['_ST', 'REAM'] +['Ġ*', ')('] +['>', '$'] +['OLE', 'AN'] +['OK', 'IE'] +['IB', 'ILITY'] +['U', 'AGE'] +['ĠSur', 'vey'] +['Ġres', 'ign'] +['w', 'ing'] +['Ġsecre', 'ts'] +['Ġch', 'ips'] +['JSON', 'Object'] +['Des', 'ktop'] +['_SY', 'MBOL'] +['(res', 'ource'] +['ĠĊ'] +['Ġnew', 'est'] +['ul', 'i'] +['Ġdes', 'ert'] +['Ġd', 'ip'] +['ĠP', 'ow'] +['Ġequ', 'ation'] +['Ġposs', 'ibilities'] +['ĠF', 'ed'] +['os', 'ph'] +['Ġ[', '%'] +['Ġb', 'ubble'] +['ether', 'lands'] +['Ġc', 'ement'] +['.', 'auto'] +['_', 'AN'] +['âĢĻ', '.'] +['se', 'lection'] +['ĠB', 'ond'] +['D', 'en'] +['-', 'O'] +['.get', 'Type'] +['.W', 'indow'] +['p', 'res'] +['Ġsw', 'inger'] +['"', '})Ċ'] +['Ġp', 'ip'] +['Ġm', 'ice'] +['Ġcomp', 'ound'] +['-', 'plugin'] +['ik', 'o'] +['Ġcent', 'uries'] +['ic', 'ular'] +['-in', 'line'] +['ĉ', 'key'] +['>', '\\<'] +['EN', 'SION'] +['Ġ[', 'čĊ'] +['Ġprecis', 'ely'] +['Ġét', 'é'] +['ĠP', 'ast'] +['ĠCam', 'bridge'] +['-f', 'ull'] +['Ġanaly', 'ze'] +['ĠSte', 'ven'] +['Ġn', 'em'] +['d', 'ue'] +['ore', 'n'] +['Ġmus', 'cles'] +['ij', 'ing'] +['/', '-'] +['ĠKenn', 'edy'] +['R', 'M'] +['oss', 'ible'] +['Ġact', 'ress'] +['Ġd', 'olor'] +['å½', 'ķ'] +['Ne', 'ed'] +['.t', 'oggle'] +['ĠR', 'ace'] +['w', 'ers'] +['.m', 'aterial'] +['ĠD', 'ue'] +['ĠP', 'el'] +['#', 'print'] +['Ġindepend', 'ence'] +['ex', 'us'] +['Sh', 'adow'] +['Ġenc', 'oder'] +['(', 'level'] +['ĠSw', 'ift'] +['.d', 'oc'] +['_se', 'lection'] +['Ġserial', 'VersionUID'] +['Label', 's'] +['Ġperform', 'ances'] +['.T', 'ag'] +['ĠN', 'HL'] +['iz', 'en'] +['/', 'UIKit'] +['_CONT', 'ROL'] +['Ġearn', 'ings'] +['ĠAl', 't'] +['_H', 'ANDLE'] +['C', 'tx'] +['Ġpers', 'u'] +['Ġtr', 'an'] +['ç', '¨'] +['_CH', 'ANNEL'] +['Ġsatisf', 'action'] +['ĠG', 'P'] +['io', 'x'] +['m', 'itt'] +['land', 'o'] +['Ġp', 'ig'] +['inal', 's'] +['ê', 'ncia'] +['S', 'urface'] +['ĠU', 'UID'] +['Ġbenef', 'icial'] +['Ġsequ', 'ences'] +['ĉmem', 'set'] +['Ġmag', 'ical'] +['Â', '«'] +['Ġw', 'orn'] +['AS', 'C'] +['pop', 'up'] +['COM', 'P'] +['_b', 'efore'] +['en', 'ess'] +['U', 'i'] +['L', 'es'] +['.re', 'quire'] +['.Serial', 'izable'] +['add', 'Gap'] +['Ġauthor', 'ization'] +['.py', 'plot'] +['urr', 'ay'] +['lat', 'itude'] +['fr', 'ames'] +['aj', 's'] +['Ġcomp', 'ass'] +['Ġobserv', 'ations'] +['_s', 'up'] +['.en', 'viron'] +['Ġtri', 'ple'] +['ĠRub', 'y'] +['Ġdr', 'ain'] +['_F', 'ILTER'] +['S', 'an'] +['UM', 'P'] +['Null', 'Exception'] +['ĠG', 'ab'] +['ow', 'e'] +['ĠTurk', 'ish'] +['_se', 'quence'] +['ĠGr', 'ant'] +['uel', 'a'] +['Ġw', 'o'] +['Ġc', 'ube'] +['i', 'q'] +['Ġdis', 'orders'] +['Ġextra', 'ordinary'] +['Ġc', 'trl'] +['ĠSe', 'q'] +['ent', 'r'] +['Ġsan', 'ctions'] +['uts', 'ch'] +['Re', 'ports'] +['Ġin', 'herit'] +['Per', 'iod'] +['Ġphot', 'ography'] +['ĠF', 'ramework'] +['Ġspecial', 'ist'] +['Ġ?', 'ĊĊ'] +['_', 'selected'] +['.P', 'layer'] +['Ġal', 'location'] +['(', 'account'] +['Ġstruct', 'ural'] +['v', 'able'] +['-', 'offset'] +['.App', 'CompatActivity'] +['аÐ', '¼'] +['.Add', 'WithValue'] +['Ġicon', 's'] +['Ġshut', 'down'] +['_l', 'ow'] +['ĠCom', 'pare'] +['ĠC', 'e'] +['=', 'head'] +['l', 'am'] +['.p', 'redict'] +['_DE', 'C'] +['ĠS', 'leep'] +['ĠGr', 'atis'] +['Ġsuggest', 'ion'] +['ĠD', 'EL'] +['ca', 'ff'] +['av', 'irus'] +['No', 'thing'] +['ŀ', 'ĭ'] +['Ġwides', 'pread'] +['Ġmechan', 'isms'] +['Ġtext', 'Align'] +['occ', 'up'] +['ĠR', 'ail'] +[':', 'NS'] +['Ġf', 'iber'] +['Ġm', 'k'] +['Ġv', 'intage'] +['-l', 'ong'] +['.re', 'duce'] +['.', 'Entities'] +['(', 'record'] +['Ġple', 'asant'] +['FR', 'ING'] +['.C', 'ells'] +['OT', 'T'] +['ĉelse', 'if'] +['_con', 'firm'] +['ĠView', 'Group'] +['s', 'ym'] +['Ġpr', 'ay'] +['Ġsus', 'pected'] +['Cont', 'ains'] +['Ġb', 'orders'] +['Ġcomponent', 'Did'] +['ASS', 'ERT'] +['Ġinf', 'inite'] +['-', 'order'] +['Ġh', 'ello'] +['ĠGr', 'ade'] +['.currentTime', 'Millis'] +['apol', 'is'] +['z', 'h'] +['ĉ', 'Object'] +[':', '\\\\'] +['H', 'O'] +['val', 'uation'] +['Ġvoc', 'ab'] +['Ġcou', 'pon'] +['atab', 'ases'] +['.Get', 'Type'] +['L', 'earn'] +[']', '="'] +['ĠG', 'ary'] +['ot', 'ive'] +['Ġas', 'h'] +['Ġb', 'ib'] +['XX', 'XX'] +['Ġbal', 'anced'] +['VAL', 'UE'] +['ĠN', 'at'] +['_A', 'd'] +['<', 'E'] +['åĮ', 'º'] +['ĠMethod', 'Info'] +['L', 'IB'] +['Ġconsider', 'able'] +['ĠInd', 'ustry'] +['test', 's'] +['.set', 'Title'] +['ĠBl', 'uetooth'] +['Ġm', 'apped'] +['ĠBru', 'ce'] +['ĠMain', 'Window'] +['ĉ', 'status'] +['Ġr', 'az'] +['ĠM', 'and'] +['Ġclass', 'ification'] +['Per', 'missions'] +['Ġ----------------------------------------------------------------', '------------'] +['Ġcontain', 'ers'] +[':', 'set'] +['_x', 'ml'] +['Ġwh', 'ilst'] +['Th', 'rough'] +['Ġval', 'ign'] +['Ġworld', 's'] +['C', 'ORD'] +['ED', 'IA'] +['ÑĢ', 'ов'] +['Ġsp', 'are'] +['ĠH', 'ad'] +['ĠDE', 'F'] +['(p', 'tr'] +['Ġwarm', 'ing'] +['à¤', '¾'] +['Ġcons', 'ensus'] +['ag', 'ne'] +['CT', 'L'] +['Ġì', 'ķ'] +['.M', 'ain'] +['web', 'Element'] +['Ġp', 'ist'] +['Fl', 'ash'] +['App', 'end'] +['.tw', 'img'] +['T', 'ap'] +['Ġveget', 'ables'] +['al', 'g'] +['.s', 'ample'] +['Ġcoach', 'ing'] +['(', 'ind'] +['Cell', 'Value'] +['Check', 'Box'] +['ĠH', 'ell'] +['RO', 'OT'] +['Ġst', 'adium'] +['Ġinvestig', 'ating'] +[')', '%'] +['st', 'ed'] +['ĠW', 'riting'] +['Ġê', '²'] +['Ġun', 'o'] +['Ġ{{', '--'] +['Ġco', 'ords'] +['Ġun', 'ser'] +['organ', 'ization'] +['ĠCr', 'ime'] +['ĠDemocr', 'at'] +['Ġv', 'in'] +['/', 'file'] +['-', 'api'] +['ĠA', 'y'] +['Ġfund', 'ed'] +['ĠBre', 'xit'] +['ĠG', 'h'] +['ent', 'ina'] +['c', 'ases'] +['Ġd', 'ash'] +['Ġ!!', '}Ċ'] +['H', 'I'] +['Off', 'ice'] +['Ġcapt', 'ain'] +['Ġwor', 'ship'] +['\\', 'C'] +['Ġglo', 'be'] +['_', 'board'] +['Ġbab', 'ies'] +['Ġconsec', 'utive'] +['Ġenh', 'anced'] +['ere', 'um'] +['ĠAd', 'vis'] +['Ġgr', 'ain'] +['Ġc', 'raw'] +['ancell', 'ationToken'] +['.', 'alpha'] +['_W', 'ITH'] +['ĠO', 'tt'] +['ĠC', 'ool'] +['.b', 'atch'] +['Ġver', 'ified'] +['(c', 'allback'] +['Ġreg', 'ards'] +['ĠInt', 'Ptr'] +['ouch', 'er'] +['Ġk', 'in'] +['Ġtou', 'ched'] +['it', 'Ãł'] +['ath', 'on'] +['Ġadj', 'acent'] +['Ġaccom', 'panied'] +['LE', 'AR'] +['Ġim', 'plies'] +['Ġh', 'ill'] +['ĠBalt', 'imore'] +['="', '-'] +['Fin', 'ally'] +['S', 'am'] +['ic', 'opt'] +['Ġs', 'od'] +['Ġm', 'aj'] +['ĠSh', 'ipping'] +['Ġget', 'All'] +['Ġcoach', 'es'] +['Ġdon', 'ations'] +['il', 'ot'] +['ĠT', 'ar'] +['c', 'err'] +['Ġbad', 'ge'] +['Ġmark', 'ers'] +['ĠR', 'and'] +['ais', 'ed'] +['iss', 'ance'] +['Ġexpl', 'oring'] +['uc', 'ed'] +['ĠIndones', 'ia'] +['Ġbene', 'ath'] +['Ġmagn', 'etic'] +['Ġm', 'useum'] +['match', 'Condition'] +['Ġdis', 'rupt'] +['Ġrem', 'ind'] +['ĠT', 'M'] +['Ġ/', '><'] +['Ġf', 'ool'] +['Ġes', 'k'] +['.N', 'ull'] +['ĠD', 'ies'] +['_OUT', 'PUT'] +['_TYP', 'ED'] +['Ġpaint', 'ed'] +['Ġsoph', 'istic'] +['ĠB', 'ear'] +['*', 'n'] +['_P', 'ACK'] +['Ġdeliver', 'ing'] +['ĠC', 'OUNT'] +['åį', 'ķ'] +['Ġj', 'eg'] +['-c', 'ar'] +['f', 'name'] +['Ġr', 'anging'] +['ĠN', 'eg'] +['/', '******/'] +['ĠCH', 'AR'] +['Ġul', 'tra'] +['Gr', 'ad'] +['=', 't'] +['Ġjud', 'ges'] +['ĠD', 'ise'] +['ann', 'ers'] +['Ġsc', 'al'] +['_c', 'al'] +['ĠCON', 'NECTION'] +['_', 'embed'] +['(f', 'n'] +['ĠC', 'raft'] +['ĠP', 'as'] +['")', '->'] +['.con', 'vert'] +['.res', 'ource'] +['ĠST', 'ATUS'] +['ô', 'ng'] +['ĠT', 'it'] +['Ġclass', 'room'] +['ĠArch', 'itect'] +['ĠK', 'ings'] +['Ġstead', 'y'] +['/*', '!Ċ'] +['ĠG', 'ene'] +[')', '";Ċ'] +['ic', 'ia'] +['st', 'an'] +['ĠCon', 'struction'] +['um', 'per'] +['w', 'c'] +['ĠC', 'BS'] +['ing', 'ing'] +['-p', 'arty'] +['(d', 'river'] +['M', 'ARK'] +['Ġn', 'ested'] +['ew', 'ard'] +['Ġdepend', 'ency'] +['Ġm', 'ales'] +['ĠO', 'NE'] +['ĠProdu', 'ction'] +['][', '$'] +['ãĥ¼', 'ãĥ'] +['_LO', 'AD'] +['ĠB', 'ol'] +['el', 'ry'] +['ł', 'éϤ'] +['ĠRe', 'quire'] +['Ġpl', 'acing'] +['xx', 'x'] +['CA', 'LE'] +['Ġth', 'umb'] +['Ch', 'oose'] +['Ġprot', 'otype'] +['VO', 'ID'] +['Ġles', 'bian'] +['Ġtra', 'its'] +['Sh', 'arp'] +['Ġconsum', 'e'] +['Tr', 'uth'] +['Ġaction', 'Performed'] +['ĠEnvironment', 'al'] +['ĠDe', 'an'] +['Ġest', 'ado'] +['s', 'ame'] +['Ġnumer', 'ic'] +['Ġtrans', 'it'] +['.', 'Email'] +['-s', 'ide'] +['_R', 'UN'] +['ĠVill', 'age'] +['_OP', 'EN'] +['è', '¦'] +['.re', 'm'] +['-w', 'arning'] +['any', 'a'] +['Property', 'Changed'] +['Ġ(!', '_'] +['(', 'check'] +['il', 'ia'] +['ĠSo', 'ft'] +['st', 'eps'] +['ĠMad', 'rid'] +['Memory', 'Warning'] +['Ġhand', 'lers'] +['Ġexperi', 'encing'] +['Ġins', 'pect'] +['button', 's'] +['Receive', 'MemoryWarning'] +['chem', 'y'] +['Link', 's'] +['Ġur', 'llib'] +['.System', 'Colors'] +['ĠE', 'igen'] +['Ġpun', 'ishment'] +[':UI', 'Control'] +['bar', 'a'] +['-', 'set'] +['Ġ}čĊčĊ', 'čĊ'] +['Ġtoler', 'ance'] +['Ġinter', 'faces'] +['.', 'redirect'] +['ighb', 'ors'] +['cs', 'rf'] +['_back', 'ground'] +['.', 'Utils'] +['_H', 'T'] +['ĠInter', 'est'] +['im', 'os'] +['Ġgr', 'ants'] +['Ġexam', 'ined'] +['Ð', 'Ķ'] +['Ġc', 'f'] +['for', 'ge'] +['back', 's'] +['ĠObject', 's'] +['_s', 'ent'] +['.', 'entry'] +['ĠTH', 'EN'] +['ell', 'ido'] +['c', 'ia'] +[',', 'res'] +['/std', 'c'] +['.', 'nd'] +['(', 'Int'] +['ĠAuth', 'ors'] +['ĠApp', 'CompatActivity'] +["'", '{'] +['Ġmed', 'i'] +['M', 'usic'] +['ig', 'm'] +['ce', 'ipt'] +['Ġa', 'uss'] +['Ġtarget', 'ing'] +['ĠKe', 'ys'] +['h', 'n'] +[':', ']Ċ'] +['Ġmin', 'eral'] +['Ã', '®'] +['.c', 'a'] +['om', 'ed'] +['Ġshe', 'ets'] +['Ġc', 'amb'] +['Ġdead', 'ly'] +['.in', 'ject'] +['(', 'unit'] +['ĠSe', 'lection'] +['.g', 'ms'] +['(', 'connection'] +['Ġ$', '("'] +['é', 'mon'] +['ĠCurrent', 'ly'] +['pt', 'e'] +['_path', 's'] +['le', 'af'] +['Ġimp', 'lications'] +['pos', 'al'] +['ä½', 'į'] +['[', '/'] +['anc', 'ia'] +['é', 'Ľ'] +['m', 'ul'] +['c', 'ie'] +['Ġge', 'ile'] +['im', 'als'] +['UI', 'View'] +['Ġs', 'urre'] +['serial', 'ize'] +['IS', 'O'] +['Ġarbit', 'rary'] +['Ġsock', 'addr'] +['.f', 'n'] +['ĠM', 'erc'] +['Ġcast', 'ing'] +['Key', 'Down'] +['Ġnew', 'Value'] +['op', 'ens'] +['T', 'odo'] +['Ġflex', 'ibility'] +['ĉĉĉĉ', 'ĠĠ'] +['V', 'elocity'] +['ú', 'n'] +['row', 'ing'] +['Ġcomput', 'ed'] +['`', ')Ċ'] +['st', 'atement'] +['Ġr', 'i'] +['_c', 'art'] +['L', 'ow'] +['trans', 'fer'] +['.n', 'av'] +['Ġgr', 'ave'] +['ĠDo', 'or'] +['ĉ', 'alert'] +['.sub', 'scribe'] +['-', 'profile'] +['ĉb', 'ase'] +['ĠâĪ', 'Ĵ'] +['__', 'ĊĊ'] +['Ġengine', 'ers'] +['Ġexplos', 'ion'] +['Ġd', 'ari'] +['ĉ', 'Log'] +['on', 'al'] +['Ġisol', 'ated'] +['{', 'i'] +['ĠM', 'sg'] +['F', 'uture'] +['Ġrac', 'ist'] +['-w', 'rap'] +['ĠV', 'ers'] +['b', 'org'] +['IS', 'ION'] +['Ġ', 'ÑĢаÐ'] +['ĠY', 'an'] +['init', 'With'] +['Ġn', 'omin'] +['(', 'empty'] +['ÃŃ', 'n'] +['ãĤ', '¤'] +['ĉ', 'width'] +['Ġch', 'amber'] +['/', 'ajax'] +['EM', 'P'] +['Ġnec', 'es'] +['iv', 'os'] +['log', 'ic'] +['*)', '&'] +['cript', 's'] +['Row', 'At'] +['ib', 'lings'] +['Ġe', 'ars'] +['Ġcomput', 'ing'] +['Ġm', 'aker'] +['ĠNe', 'ither'] +['b', 'readcrumb'] +['Ġserial', 'ize'] +['ĠWith', 'in'] +['Ġd', 'ell'] +['_TR', 'ACE'] +['=', 'a'] +['Ġwish', 'es'] +['-in', 'ch'] +['ĠD', 'or'] +['Ġinnoc', 'ent'] +['ĠD', 'ol'] +['Ġint', 'ens'] +['for', 'ced'] +['ĠB', 'IT'] +['Ġphotograph', 's'] +['Ġcas', 'a'] +['ĠL', 'en'] +['\\F', 'ramework'] +['.S', 'imple'] +['Ġde', 'ar'] +[')/', '('] +['ip', 'pi'] +['Ġown', 's'] +['Pl', 'ayers'] +['Ġpropos', 'als'] +['.p', 'i'] +['us', 'alem'] +['D', 'amage'] +['Ġcal', 'ories'] +['ĠCreat', 'ive'] +['Ġ[', '$'] +['Ġ//', 'čĊ'] +['And', 'View'] +['è', 'me'] +['.c', 'ustom'] +['_f', 'actory'] +['command', 's'] +['_lo', 'ok'] +['Ġstr', 'cmp'] +['Y', 'N'] +['a', 'ired'] +['Ġaud', 'it'] +['о', 'ÑģÑĤ'] +['ĠRe', 'verse'] +['ropri', 'ate'] +['et', 'ics'] +['<', 'vector'] +['.s', 'elenium'] +['.', 'or'] +['Ġpred', 'icate'] +['Ġfinish', 'ing'] +['Ġk', 'le'] +['ĠRep', 'os'] +['ĠK', 'han'] +['ĠM', 'aking'] +['ĠF', 'S'] +['Ġp', 'ute'] +['ĉ', 'state'] +['_S', 'UPPORT'] +["'", '-'] +['orient', 'ation'] +['Ġexist', 'ed'] +['atur', 'a'] +['Ġexpect', 's'] +['ĠSh', 'adow'] +['Ġorgan', 'iz'] +['å', 'ŀĭ'] +['Ġsusp', 'ension'] +['Ġu', 'it'] +['Ġsimult', 'aneously'] +['ĠAff', 'ero'] +[':', '");Ċ'] +['Ġro', 'cket'] +['c', 'as'] +['eter', 'mine'] +['ace', 'ut'] +['x', 'l'] +['ĠA', 'MD'] +['(', 'graph'] +['ass', 'oci'] +['_C', 'R'] +['.ar', 'ange'] +['(j', 'Label'] +['Ġbe', 'ef'] +['Qu', 'ick'] +['.c', 'ard'] +[']', '):'] +['-', 'gr'] +['.G', 'ONE'] +['_C', 'LOSE'] +['ĠNe', 'v'] +['ÃŃ', 'as'] +['Ġste', 'pped'] +['ĠFre', 'edom'] +['ĠW', 'R'] +['NS', 'Array'] +['_r', 'x'] +['_d', 'ialog'] +['Ġhot', 'els'] +['Ġ(', '\\<'] +['ĠD', 'iamond'] +['Ġassum', 'ption'] +['um', 'i'] +['(', 'items'] +['č', 'ččĊ'] +['æ³', 'ķ'] +['Ġn', 'el'] +['Book', 's'] +['åİ', '¿'] +['us', 'b'] +['ĠF', 'IN'] +['æ', '¬'] +['Ġcorpor', 'ations'] +['US', 'A'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['.p', 'roperty'] +['ew', 'ise'] +['_', 'plot'] +['">', "';Ċ"] +['Ġpe', 'pper'] +['Ġsh', 'ed'] +['ĠMed', 'ium'] +['ĠC', 'ookie'] +['Ġoverse', 'as'] +['ed', 'or'] +['asure', 'ment'] +['åŃ', 'ĺ'] +["Ġ'", ".'"] +['Ġph', 'p'] +['ĠPRO', 'C'] +['Ġexception', 'al'] +['(', 'th'] +['ĠJ', 'et'] +['Ġoccup', 'ied'] +['.set', 'Image'] +['ĠRel', 'ated'] +['uck', 'er'] +['M', 'embers'] +['PR', 'INT'] +['ĠG', 'lo'] +['_V', 'IEW'] +['}', '",Ċ'] +['Ġad', 'option'] +['[]', ')Ċ'] +['ĠMiss', 'ouri'] +['ĠLin', 'coln'] +['eral', 'd'] +['Pop', 'up'] +['Ġf', 'ate'] +['-', 'bootstrap'] +['fe', 'ctions'] +['ĠP', 'oll'] +['_ARG', 'S'] +['in', 'ance'] +['-h', 'ome'] +['.', '),'] +['_d', 'one'] +[':', 'ĊĊĊ'] +['Ġdiscuss', 'ing'] +['ĠSQL', 'Exception'] +['Ġelect', 'ro'] +['ĉ', 'req'] +['Ġz', 'w'] +['Ġl', 'ui'] +['Ġover', 'night'] +['$', 'user'] +['ĠW', 'AY'] +['Ġall', 'erg'] +['Ġdisappoint', 'ed'] +['Ġradi', 'ation'] +['Ġimpress', 'ed'] +['ific', 'ates'] +['Ġto', 'b'] +['CL', 'ASS'] +['Ġc', 'uda'] +['_d', 'et'] +['-', 'post'] +['ul', 'u'] +['Trans', 'lation'] +['-h', 'and'] +['.y', 'ear'] +['ĠM', 'ongo'] +['Ġun', 'clear'] +['.', 'engine'] +['WEB', 'PACK'] +['r', 'ices'] +['_AC', 'CESS'] +['Ġh', 'olidays'] +['per', 'cent'] +['.Id', 'entity'] +['ĠG', 'ov'] +['Ġpassion', 'ate'] +['!!', '.'] +['ĠGree', 'ce'] +['plus', 'plus'] +["'))", ';'] +['G', 'P'] +['Ġexc', 'it'] +['.tab', 'Page'] +['_', 'cond'] +['Ġspons', 'or'] +['M', 'ODULE'] +['_pro', 'c'] +['Ġ$', 'Ċ'] +['Ġr', 'ational'] +['.T', 'ool'] +['Ġi', 'hr'] +['cc', 'a'] +['åĵ', 'ģ'] +['ĠE', 'state'] +['IB', 'UTE'] +['Action', 'Performed'] +['ĠS', 'olar'] +['¦', 'Ĥ'] +['Ġequ', 'ity'] +['t', 'id'] +['Ġrec', 'ip'] +['.s', 'imple'] +['m', 'k'] +['ĠL', 'uke'] +['ĠGuard', 'ian'] +['Ġenc', 'rypted'] +['Ġdomin', 'ant'] +['.', 'place'] +['ĠN', 'V'] +['Ġtong', 'ue'] +['(', 'Get'] +['Ġst', 'ainless'] +['.P', 'lay'] +['Ġe', 'b'] +['ac', 'i'] +['.b', 'uffer'] +['readcr', 'umbs'] +['Ġvacc', 'ine'] +['p', 'rom'] +['Ġuser', 'Info'] +['Ġsl', 'ug'] +['Serial', 'izedName'] +['-w', 'ide'] +['Ġre', 'actions'] +['ĠY', 'ang'] +['ĠAdd', 's'] +['(user', 'Id'] +['Ġpl', 'ates'] +['ĠM', 'EM'] +['Ġb', 'ail'] +['In', 'side'] +['et', 'ed'] +['Ġels', 'if'] +['Ġs', 'ake'] +['Ġc', 'ycles'] +['Ġì', 'Ĺ'] +['ĉ', 'I'] +['-c', 'ollapse'] +['ĠG', 'MT'] +['De', 'claration'] +['Ġg', 'ros'] +['Ġreach', 'es'] +['Ġcust', 'ody'] +['Unt', 'il'] +['t', 'u'] +['ĠCh', 'en'] +['Ġn', 'x'] +['(', 'addr'] +['ĠO', 'ffer'] +['Ġcol', 'leg'] +['ass', 'ador'] +['Ġm', 'apper'] +['ĠS', 'IGNAL'] +['ĠB', 'loom'] +['ĠH', 'oll'] +['ĠIm', 'per'] +['-d', 'es'] +['_s', 'ite'] +['Pro', 'c'] +['E', 'qu'] +['Ġat', 'omic'] +['ĠW', 'oman'] +['s', 'ent'] +['sc', 'ar'] +['Ġint', 'elligent'] +['ĠGet', 'ting'] +['ĠReg', 'istration'] +['ĠPh', 'ill'] +['Ġkill', 'er'] +['unic', 'ode'] +['Ċ', 'ĉĉĊ'] +['ĠJac', 'ob'] +['ĠCon', 'st'] +['Ġloc', 'ate'] +['Ġca', 'us'] +['ĠSch', 'olar'] +['Ġconstitution', 'al'] +['Ġinfl', 'ation'] +['ĠG', 'ot'] +['=', 'array'] +['end', 'um'] +['Ġtransl', 'ated'] +['Ġdiv', 'orce'] +['En', 'tries'] +['Ġs', 'or'] +['ĠQu', 'ote'] +['irl', 'ines'] +['U', 'K'] +['Ġexc', 'el'] +['(', 'opt'] +['ĠAD', 'V'] +[',:', ','] +['Ġcontact', 'ed'] +['ĠD', 'A'] +['Ġr', 'ings'] +['ĠIndust', 'rial'] +['.get', 'Context'] +['Ġforg', 'otten'] +['ĠT', 'an'] +['Ġp', 'ants'] +['Ġo', 'v'] +['Ġdec', 'oder'] +['ĠPart', 'ial'] +['Ġv', 'c'] +['Ġbatt', 'les'] +['A', 'rial'] +['FRING', 'EMENT'] +['ir', 'ates'] +[',', 'w'] +['aint', 'enance'] +['ĠO', 'd'] +['ĠTechn', 'ologies'] +['åī', 'į'] +['ĠCar', 'ter'] +['.find', 'All'] +['N', 'ome'] +['B', 'en'] +['ĠUs', 'age'] +['ĠP', 'icture'] +['Ġbad', 'ly'] +['_p', 'anel'] +['Ġpat', 'ent'] +['ĠProt', 'ocol'] +['lot', 'te'] +['ĉ', 'player'] +['je', 'ctions'] +['Ġd', 'ou'] +['_re', 'lease'] +['urn', 'iture'] +['_t', 'ax'] +['ĠF', 'ields'] +['.d', 'ataset'] +['_m', 'aster'] +['CLU', 'DE'] +['ĠPh', 'arm'] +['b', 'st'] +['Ġoper', 'ational'] +['.c', 'ell'] +['Ġident', 'ifying'] +['Ġj', 'wt'] +['t', 'uple'] +['ĠT', 'C'] +['ĠC', 'ro'] +['ix', 'map'] +['-', 'components'] +['gener', 'al'] +['Ġo', 'z'] +['_D', 'e'] +['_d', 'ouble'] +['ĠTo', 'o'] +['.View', 'Group'] +['g', 'ate'] +['d', 'ings'] +['ph', 'otos'] +['Ġgrand', 'e'] +['ol', 'lect'] +['_l', 'in'] +['Ġaw', 'ful'] +['f', 'ilters'] +['Ġaltern', 'ate'] +['es', 'p'] +['Ġcomp', 'ress'] +['e', 'o'] +['ĠS', 'cale'] +['Ġind', 'irect'] +['Ġinv', 'oice'] +['ĊĊĊĊĊĊĊĊ', 'ĊĊĊĊĊĊĊĊ'] +['Start', 'ing'] +['ĠPl', 'ayers'] +['ie', 'le'] +['.', 'then'] +['Or', 'd'] +['ĠT', 'uple'] +['Ġb', 'out'] +['ĠStat', 'istics'] +['Pre', 'view'] +['Ġp', 'uzzle'] +['ĠW', 'idth'] +['ST', 'ATE'] +['Ġover', 'lay'] +['ĉ', 'on'] +['Ġin', 'fr'] +['Ġsm', 'allest'] +['lock', 'ed'] +['ÑĤ', 'о'] +['ss', 'l'] +['Ġde', 'emed'] +['Ġs', 'co'] +['re', 'ck'] +['Ġj', 'Button'] +['Ġmiss', 'ions'] +['ç§', '°'] +['.Selected', 'Index'] +['T', 'ABLE'] +['Se', 'pt'] +['Ġacknow', 'ledge'] +['Ġstrt', 'otime'] +['ĠT', 'ell'] +['ĠD', 'ak'] +['Ġal', 'uminum'] +['Ġf', 'ence'] +['ĠSt', 'ars'] +['CON', 'FIG'] +['Ġretro', 'fit'] +['Ġemph', 'asis'] +['/', 'header'] +['ĠS', 'omething'] +['in', 'ished'] +["='", '".$'] +['ĠValid', 'ators'] +['Ġpol', 'ar'] +['section', 's'] +['.as', 'px'] +['Ġas', 'pir'] +['.M', 'ock'] +['Code', 'Gen'] +['Ġpe', 'ut'] +['Ġaccept', 'ing'] +['Ġback', 'ing'] +['P', 'icture'] +['/', 'ap'] +['еÐ', '³'] +['_SE', 'C'] +['-', 'use'] +['annot', 'ation'] +['Ġcogn', 'itive'] +['Ġg', 'rip'] +['h', 'our'] +['ĠLeg', 'al'] +['Ġep', 'ic'] +['.t', 'oolStrip'] +['.not', 'ify'] +['.L', 'ast'] +['OR', 'IZ'] +['M', 'iddleware'] +['cri', 'ptions'] +['l', 'ash'] +['_F', 'OUND'] +['ĠLiver', 'pool'] +['Ġ{}', '",'] +['Inst', 'all'] +['Ġn', 'it'] +['Ġfig', 'ured'] +['[', 'len'] +['.W', 'in'] +['.pl', 'atform'] +['Ġgam', 'bling'] +['(d', 't'] +['av', 'ery'] +['ĉ', 'include'] +['Wh', 'ether'] +['R', 'outing'] +['Ġther', 'ap'] +['Rem', 'ote'] +['ĠL', 'oss'] +['y', 'll'] +['Ġappro', 'ached'] +['ĠV', 'ehicle'] +['ĠAl', 'pha'] +['Ġvoc', 'ê'] +['ans', 'wers'] +['NS', 'Dictionary'] +['cons', 'ider'] +['un', 'used'] +['ĠF', 'an'] +['or', 'able'] +['f', 're'] +['ĠDIS', 'CLAIM'] +['ĠAct', 'or'] +['.', ']'] +['to', 'Have'] +['.user', 'Id'] +['Ġspeed', 's'] +['ew', 'ay'] +['Ġrec', 'urs'] +['ĠÐ', '³'] +['_pr', 'iv'] +['!', 'âĢĿĊĊ'] +['Ch', 'oice'] +['Ġsett', 'le'] +['Ġplan', 'es'] +["'", '},'] +['T', 'om'] +['IT', 'ER'] +['!', '"Ċ'] +['å', '»'] +['achel', 'or'] +['Ġsepar', 'ation'] +['Ġd', 'al'] +['ad', 'j'] +['Ġreg', 'isters'] +['r', 'iz'] +['ĠNot', 'ice'] +['Ġl', 'u'] +['Ġcour', 'age'] +['Ġax', 'es'] +['cell', 'ent'] +['.as', 'ync'] +['Ġcompat', 'ibility'] +['ç', '«'] +['Ġ!', 'ĊĊ'] +['ĉ', 'title'] +['Y', 'LE'] +['ĉ', 'message'] +['U', 'UID'] +['OLD', 'ER'] +['ĠH', 'H'] +['ĠStyle', 'Sheet'] +['Ġaccess', 'ed'] +['.', 'validation'] +['t', 'asks'] +['Ġpoll', 'ution'] +['.c', 'anvas'] +['Ġing', 'redient'] +['ĠC', 'abin'] +['A', 'h'] +['old', 'own'] +['ĠNO', 'I'] +['ĠÃ', 'Ĺ'] +['[', 'f'] +['ed', 'uc'] +['y', 'alty'] +['(n', 'ot'] +['_', 'State'] +['am', 'en'] +['Ġda', 'o'] +['ud', 'ad'] +['ell', 'ers'] +['}', '&'] +['lic', 'ity'] +['_W', 'INDOW'] +['Ġt', 'atto'] +['val', 'or'] +['.R', 'ange'] +['Ġrefer', 'enced'] +['ĠRes', 'erve'] +['M', 'oney'] +['SCRI', 'PT'] +['/', 'product'] +['cho', 'ices'] +['Ġt', 'in'] +['ãĤ', 'ĵ'] +['Ġsepar', 'ator'] +['Ġp', 'kg'] +['am', 'med'] +['ĠM', 'AT'] +['!', '!ĊĊ'] +['Ġr', 'aid'] +['Ġmotiv', 'ation'] +['ĠX', 'P'] +['ĠBack', 'ground'] +['ĠQu', 'aternion'] +['.define', 'Property'] +['ik', 'er'] +['ĉp', 'arent'] +['ĠOrigin', 'ally'] +['ant', 'age'] +['ĠH', 'ans'] +['Ġtim', 'eline'] +['.c', 'ur'] +['op', 'ic'] +['ĠSe', 'qu'] +['m', 'ust'] +['ĠCo', 'al'] +['Ġform', 'atter'] +['_R', 'GB'] +['Ġ_', '("'] +["'}", '),Ċ'] +['Ġ=', '================'] +['ĠF', 'UNCTION'] +['Ġl', 'ng'] +['ic', 'ates'] +['l', 'ive'] +['_', 'engine'] +['Ġtown', 's'] +["'))", 'ĊĊ'] +['ĠP', 'K'] +['(', 'api'] +['ĉs', 'canf'] +['pack', 'et'] +['.ph', 'one'] +['á', 'Ģ'] +['ĠAnd', 'y'] +['_N', 'AMES'] +['PL', 'Y'] +['Ġmin', 's'] +['im', 'i'] +['Ġbr', 'ick'] +['Ġbl', 'ade'] +['.std', 'out'] +['}`', ';Ċ'] +['Sh', 'ift'] +['ĉs', 'b'] +['ĠCheck', 's'] +['Ġphenomen', 'on'] +['Av', 'atar'] +['Ġmin', 'istry'] +['ro', 'se'] +['ĉ', 'File'] +['Ġtit', 'led'] +['(', 'LOG'] +['Ġg', 'an'] +['des', 'ign'] +['(),', 'čĊ'] +['Ġb', 'ones'] +['st', 'm'] +['ÅĽ', 'Äĩ'] +['ĠInput', 'Stream'] +['Ġvol', 'unt'] +['ĠSerial', 'izable'] +['Ġfight', 'er'] +['ĠDr', 'ag'] +['T', 'witter'] +['Ġsubs', 'id'] +['ç', '¼'] +['Ġfor', 'ums'] +['.load', 'ing'] +['log', 'ged'] +['_', 'this'] +['Ġterr', 'ain'] +['Ġir', 're'] +['ĠIn', 'g'] +['ĠC', 'N'] +['_object', 's'] +['.', 'uid'] +['Ġconscious', 'ness'] +['T', 'INGS'] +['ĠG', 'all'] +['Ġport', 'ray'] +['ĠDevelop', 'er'] +['Ġparticip', 'ant'] +['Ġ"', ';čĊ'] +['/', 'model'] +['ĠOper', 'ations'] +['^', '\\'] +['ĠL', 'ater'] +['Ġrais', 'es'] +['-n', 'one'] +['.m', 'eta'] +["='", '.$'] +['Fin', 'ished'] +['Ġrepl', 'acing'] +['Ġsam', 'pling'] +['ĠJ', 'en'] +['"', 'There'] +['RE', 'AL'] +['A', 'LE'] +['ìĬ', '¤'] +['Or', 'ders'] +['_param', 'eter'] +['ĠOlymp', 'ic'] +['Ġtr', 'ès'] +['Ġare', 'na'] +['i', 'ol'] +[';', '?>'] +['Ġimpact', 's'] +['ĠW', 'S'] +[':', 'get'] +['Ġfl', 'ights'] +['ĠRuss', 'ell'] +['c', 'amera'] +['F', 'n'] +['s', 'igma'] +['Ġfor', 'cing'] +['Ġloc', 'als'] +['Ġdepart', 'ure'] +['Ġcelebr', 'ation'] +['ĠS', 'ay'] +['ï¼', 'Ĵ'] +['ĠH', 'ills'] +['.has', 'OwnProperty'] +['Ġtyp', 'ings'] +['.A', 'PI'] +['Ġdon', 'ation'] +['Operation', 'Exception'] +['.Act', 'ivity'] +['c', 'plusplus'] +['ĠChar', 'lie'] +['Ġimport', 'ed'] +['Ġd', 'ann'] +['Ġoccas', 'ions'] +['Ġimplement', 'ing'] +['Ġpur', 'ple'] +['.d', 'ialog'] +['SQL', 'Exception'] +['ern', 'o'] +['Ġw', 'ars'] +['Ġpast', 'e'] +['Ġdecre', 'ased'] +['Ġhar', 'sh'] +['Ġel', 'abor'] +['input', 's'] +['ĠView', 's'] +['Ġerror', 'Message'] +['_m', 'ul'] +['ĉ', 'write'] +['ĠC', 'op'] +['ĠAnn', 'ual'] +['(b', 'utton'] +['Ġv', 'ida'] +['b', 'ars'] +['ĠHar', 'vard'] +['ĉex', 'pect'] +['Ġindex', 'es'] +['Ġdocument', 'ary'] +['Ġf', 'lesh'] +['OR', 'LD'] +['ĠD', 'elta'] +['M', 'AND'] +['Br', 'ush'] +['-c', 'olumn'] +['Ġdevelop', 'ments'] +['method', 'Visitor'] +['s', 'lice'] +['ĠP', 'DO'] +['Ġinvest', 'ing'] +['ir', 'able'] +['Ġxml', 'ns'] +['ï¼', 'Ľ'] +['art', 'a'] +['Ġthe', 'ories'] +['_c', 'ity'] +['Ġ$', '__'] +['Cre', 'ating'] +['(', 'pr'] +['D', 'ropdown'] +['ism', 'atch'] +['ĠN', 'ET'] +["']", ')){Ċ'] +['ĠVal', 'ues'] +['ĠSE', 'O'] +['ĠST', 'AT'] +['Ġe', 'cosystem'] +['Ġtem', 'pt'] +['Ġ\\', '\\'] +['Ġ//', '{Ċ'] +['ĠChrist', 'opher'] +['ĠKent', 'ucky'] +['ĠHttp', 'ServletResponse'] +['Ġhy', 'brid'] +['y', 'on'] +['Ġfeed', 'ing'] +['ĠEx', 'tra'] +['N', 'orm'] +['IT', 'CH'] +['ĠSe', 'an'] +['ĠUp', 'load'] +['m', 'un'] +['p', 'ur'] +['Ġp', 'ersistent'] +['ĠID', 'C'] +['ĠPer', 'form'] +['.m', 'erge'] +['_', 'room'] +['Mean', 'while'] +['!', "='"] +['ĠW', 'el'] +['Args', 'Constructor'] +['.D', 'atabase'] +['Ġcount', 'ing'] +['()', '*'] +['Ķ', 'åĽŀ'] +['ĠT', 'OP'] +['m', 'ill'] +['ĠD', 'T'] +['IGN', 'ED'] +['ĠK', 'B'] +['Ġcomp', 'ly'] +['S', 'outh'] +['_c', 'ollection'] +['Ch', 'apter'] +['Ġexpl', 'aining'] +['_', 'AM'] +['_t', 's'] +['c', 'ards'] +['Ġqu', 'el'] +['Ġp', 'ole'] +['Ġtouch', 'down'] +['ĠO', 'thers'] +['Ġpe', 'ers'] +['ĠType', 'Error'] +['Ġsix', 'th'] +['Ġche', 'er'] +['Ġdis', 'pute'] +['us', 'c'] +[')', '],'] +['th', 'umb'] +['Ġh', 'iding'] +['ĠS', 'IG'] +['lik', 'es'] +['ĠP', 'AGE'] +['.Ref', 'lection'] +['Ġhead', 'quarters'] +['T', 'ING'] +['ĠG', 'host'] +['M', 'LE'] +['$', 'Ċ'] +['Ġcontr', 'ary'] +['ext', 'end'] +["']", ').'] +['FF', 'ECT'] +['ĠP', 'interest'] +['úmer', 'o'] +['ric', 'ane'] +['ĉs', 'ession'] +['Ġcr', 'ystal'] +['-', 'Control'] +['overn', 'ment'] +['og', 'raf'] +['-', 'action'] +['v', 'olume'] +['ft', 'en'] +['Ġun', 'con'] +['Ġan', 'imate'] +['Ġle', 'ase'] +['sc', 'r'] +['Ġref', 'use'] +['ãĢ', 'ĭ'] +['ft', 'p'] +['in', 'formation'] +['Ġeval', 'uated'] +['Ġin', 'jection'] +['Ġj', 'ack'] +['Ġwork', 'shop'] +['æ³', '¨'] +['PT', 'H'] +['ĠT', 's'] +['off', 'er'] +['ĉ', 'os'] +['Ġking', 'dom'] +['M', 'issing'] +['Ġlaw', 'makers'] +['ext', 'Field'] +['Ġsing', 'ing'] +['ab', 'i'] +['/', 'client'] +['.m', 'edia'] +['ATEG', 'ORY'] +['Sign', 'ature'] +['%', "',Ċ"] +['ĠF', 'uck'] +['][', ':'] +['Ġsens', 'ors'] +['/', 'com'] +['ĠPr', 'imary'] +['.S', 'QL'] +['_pro', 'gram'] +['Ġp', 'ills'] +['Ġinteg', 'ral'] +['Ġfle', 'et'] +['Ġdro', 'pping'] +['.s', 'l'] +['Be', 'en'] +['Ġp', 'ets'] +['Ġadvis', 'ed'] +['Ġdr', 'agon'] +['_', 'EDIT'] +['(', 'im'] +['F', 'ER'] +['ĠDr', 'ug'] +['(r', 'andom'] +['Ġcomp', 'ression'] +['ou', 'st'] +['[', '%'] +['Ġbuy', 'er'] +['h', 'op'] +['R', 'oles'] +['man', 'age'] +['Ġpain', 'ful'] +['ĠBr', 'anch'] +['-mod', 'al'] +['en', 'ant'] +['ĠM', 'esh'] +['/', 'font'] +['ĠG', 'raham'] +['Ġâ', 'ĺ'] +['Ġn', 'c'] +['ĠFranc', 'is'] +['Ġspec', 'ification'] +['Ġdam', 'ages'] +['-', 'config'] +['Ġthe', 'oret'] +['sec', 'ure'] +['_m', 'ulti'] +['aceut', 'ical'] +['Ġdemand', 'ing'] +['en', 'ne'] +['IST', 'S'] +['()', '));ĊĊ'] +['Re', 'ason'] +['Re', 'cent'] +['ph', 'ase'] +['Ġps', 'y'] +['_M', 'AN'] +['Ġvolunte', 'er'] +['å', '¿'] +['istrib', 'uted'] +['li', 'o'] +['Ġproduct', 'ivity'] +['_com', 'm'] +['S', 'pring'] +['n', 'is'] +['.', 'weight'] +['ĠC', 'ancer'] +['Al', 'loc'] +['ĠT', 'weet'] +['Ġsepar', 'ately'] +['ĉ', 'check'] +['_p', 'roperties'] +['.', 'Unit'] +['_CL', 'K'] +['Ġg', 't'] +['Ġ(', ');ĊĊ'] +['Ġhand', 'y'] +['ĠThom', 'pson'] +['Ġunn', 'ecessary'] +['ĠRe', 'ader'] +['G', 'N'] +['=', 'request'] +['ĠU', 'tility'] +['.Re', 'pository'] +['ĠA', 'x'] +['hy', 'dr'] +['ie', 'u'] +['Ġth', 'y'] +['Ġl', 't'] +['_m', 'ail'] +['ä¿®', 'æĶ¹'] +['ail', 'and'] +['ĠPhil', 'ip'] +['Ġbit', 'ter'] +['Ġbet', 'ting'] +['Ġtim', 'ed'] +['ock', 's'] +["'", 'a'] +['Ġal', 'gorithms'] +['Ġre', 'interpret'] +['Ġto', 'ss'] +['ro', 'gen'] +['Ġhop', 'ed'] +['(', 'selected'] +['Ġvent', 'ure'] +['TE', 'X'] +['ĠLe', 'ave'] +['.Sub', 'string'] +['Ġgr', 'ateful'] +['uk', 'a'] +['ĠCon', 'sumer'] +['Ġag', 'greg'] +['C', 'ircle'] +['à¸', 'ģ'] +['_block', 's'] +['Ġleg', 'ally'] +['Ġ"', '|'] +['ãĥ', 'ĥ'] +['.', 'board'] +['.A', 'b'] +['Function', 's'] +['rec', 'ipe'] +['è', 'ĩ'] +['ĠO', 'xford'] +['Ġwho', 'les'] +['.B', 'uild'] +['_ch', 'anged'] +['h', 'ai'] +['Ġdepart', 'ments'] +['I', 'mp'] +['Ġcoal', 'ition'] +['IN', 'FRINGEMENT'] +['Ġemp', 'ower'] +['itch', 'es'] +['N', 'orth'] +['Ġinfl', 'amm'] +['ON', 'SE'] +['Ġmiss', 'ile'] +['ĠR', 'aj'] +['ĠIss', 'ue'] +['Ġat', 'oi'] +['ca', 'led'] +['.Cont', 'rollers'] +['ĠW', 'olf'] +['Ġcrush', 'ers'] +['á»', 'ĩ'] +['.A', 'uth'] +['.add', 'Attribute'] +['h', 'is'] +['Ġbo', 'ots'] +['.c', 'lean'] +['c', 'amp'] +['Ġten', 'ant'] +['Ġt', 'une'] +['Ġ{}', "'."] +['Ġwork', 'out'] +['Re', 'po'] +['Ġpartial', 'ly'] +['MI', 'SSION'] +['j', 'amin'] +['ĠS', 'B'] +['Ġdetermin', 'ation'] +["Ġ'", "');Ċ"] +['ĠB', 'eng'] +['Ġv', 'os'] +['Ġin', 'hab'] +['/', 'lang'] +['s', 'burgh'] +['Exec', 'utor'] +['h', 'one'] +['ĠCh', 'allenge'] +['_link', 's'] +['.Le', 'vel'] +['Ġunder', 'ground'] +['-c', 'ode'] +['Ġoptim', 'ization'] +['log', 'ging'] +['_de', 'st'] +['Ġsn', 'ake'] +['Ġchemical', 's'] +['_IMPORT', 'ED'] +['ado', 'op'] +['ĠTH', 'AT'] +['man', 'aged'] +['Ġredu', 'ces'] +['ĠRE', 'AL'] +['ĠG', 'uy'] +['_GENER', 'IC'] +['/', '********************************'] +['.', 'amount'] +['Ġd', 'ere'] +['get', 'Time'] +['Ġp', 'ant'] +['an', 'onymous'] +['Ġharmon', 'y'] +['ĠAl', 'an'] +['Ġscen', 'arios'] +['Ġd', 'irt'] +['ht', 'ags'] +['M', 'c'] +['Sh', 'ell'] +['r', 'in'] +['{', 'čĊčĊ'] +['.p', 'ow'] +['ĉ', 'client'] +['Ġconspir', 'acy'] +['Ġad', 'mission'] +['ĠReg', 'ional'] +['ĠView', 'Controller'] +['ĠPhilipp', 'ines'] +['Ġde', 'pos'] +['Ġp', 'ap'] +['ĠP', 'ad'] +['P', 'aul'] +['.Com', 'boBox'] +['Ġt', 'utor'] +['ĠRec', 'ipe'] +['w', 'riting'] +['Ġcontrib', 'utor'] +['OT', 'H'] +['Sm', 'all'] +['V', 'I'] +['Ġh', 'acer'] +['e', 'qu'] +['ĠEx', 'amples'] +['h', 'uman'] +['.m', 'essages'] +['ĉt', 'yp'] +['Ġ(', 'čĊ'] +['ĠS', 'SL'] +['LE', 'N'] +['ĠRom', 'ney'] +['(', 'grid'] +['ĉ', 'min'] +['Ġ>', 'ĊĊ'] +['Ġfr', 'uits'] +['Ġvot', 'er'] +['In', 'line'] +['pan', 'e'] +['ĠC', 'ollections'] +['char', 'set'] +['Ġsp', 'am'] +['z', 'b'] +['item', 'ap'] +['Ġsucceed', 'ed'] +['_C', 'OL'] +['Ġel', 'apsed'] +['im', 'eter'] +['Ġrecover', 'ed'] +['T', 'ensor'] +['hatt', 'an'] +['.set', 'up'] +['ist', 'o'] +['(', 'head'] +['ĠS', 'IZE'] +['Ġtact', 'ics'] +['Ġdist', 'ur'] +['Ġpre', 'val'] +['ici', 'os'] +['(', 'Value'] +['_c', 'ols'] +['ĠF', 'at'] +['Ġse', 'al'] +['Ġs', 'ons'] +['Ġens', 'ures'] +['Ġpress', 'ing'] +['=', '&'] +['igen', 'ous'] +['Ġharass', 'ment'] +['_', 'JSON'] +['Ġign', 'or'] +['yn', 'omial'] +['om', 'er'] +['_st', 'atic'] +['Ġsignific', 'ance'] +['Ġcirc', 'les'] +['_S', 'ystem'] +['Ġdiscipl', 'ine'] +['Ġdress', 'ed'] +['Ġs', 'phere'] +['Ġclim', 'b'] +['_', 'actions'] +['ĠB', 'ab'] +["Ġ'", "=',"] +['_s', 'chema'] +['"', 'use'] +['Ġund', 'ers'] +['Ġc', 'ups'] +['.s', 'creen'] +['/', 'new'] +['Ġappe', 'aring'] +['T', 'OP'] +['vis', 'ed'] +['cl', 'ang'] +['Ġinvestig', 'ators'] +['Ġmyster', 'ious'] +['Ġprom', 'ising'] +['Ġqual', 'ify'] +['Ġc', 'ave'] +['Ġequ', 'ip'] +['=', 'x'] +['G', 'T'] +['(', 'link'] +['.', 'velocity'] +['.', 'erase'] +['ot', 'er'] +['++++', '++++'] +['pro', 'fit'] +['Ġz', 'ones'] +['_', 'uid'] +['-', 'ser'] +['Ġobject', 'ives'] +['Ġmil', 'f'] +['web', 'kit'] +['(m', 'atch'] +['ne', 'h'] +['ĠAssoci', 'ated'] +['ĠT', 'odo'] +['=', 'd'] +['C', 'am'] +['Ġv', 'ocal'] +['Ġs', 'udo'] +['(', 'EX'] +['Ġtr', 'ou'] +['AB', 'C'] +['.b', 'ean'] +['ĠG', 'round'] +['ĠRE', 'ST'] +['we', 'ets'] +['In', 'g'] +['im', 'on'] +['_b', 'us'] +['ĠC', 'OLOR'] +['un', 'to'] +['Ġf', 'oss'] +['ĠLink', 's'] +['ä', 'ng'] +['/', 'forms'] +['pr', 'ises'] +['Ġachie', 'vement'] +['C', 'ALL'] +['ел', 'ÑĮ'] +['ĠVer', 'ify'] +['_S', 'OURCE'] +['apt', 'cha'] +['ID', 'D'] +['_re', 'ference'] +['G', 'old'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĊ'] +['Re', 'ceiver'] +['Ġa', 'j'] +['_d', 'irection'] +['}', ']'] +['ĠCom', 'pet'] +['Ġb', 'ang'] +['ĠC', 'ass'] +['-', 'url'] +['te', 'chn'] +['ĠJer', 'usalem'] +['long', 'itude'] +["'", ');čĊčĊ'] +['Ġwin', 'ners'] +['T', 'asks'] +['ĠD', 'MA'] +['Ġtool', 'tip'] +['İ', '·'] +['ĠB', 'ra'] +['_d', 'uration'] +['cur', 'y'] +['parent', 's'] +['----', '', '>('] +['ĠK', 'ir'] +['Ġint', 'ros'] +['Ġsk', 'etch'] +['Ġsk', 'illed'] +['Ġim', 'mer'] +['Ġade', 'quate'] +['_re', 'p'] +['(', 'header'] +['_', 'like'] +['Ġper', 'ceived'] +['ss', 'h'] +['Ġassum', 'ing'] +['Ġf', 'f'] +['_u', 'uid'] +['ul', 'as'] +['Ġdemocr', 'atic'] +['.', 'entities'] +['S', 'eries'] +['aph', 'ore'] +['Ġnew', 'er'] +['}', '('] +['SE', 'C'] +['ai', 'ro'] +['Ġcomm', 'od'] +['Ġprivile', 'ge'] +['Ġde', 'ux'] +['ĠH', 'op'] +[".'", '/'] +['ct', 'ic'] +['.', "';Ċ"] +['', 'C'] +['ĠWar', 'ren'] +['Ġoptim', 'izer'] +['ĠSER', 'VICES'] +['_', 'oper'] +['get', 'Attribute'] +['ĠMc', 'K'] +['_s', 'elf'] +['.r', 's'] +['"', ')ĊĊĊ'] +['Get', 'Component'] +['er', 'ce'] +['Ġt', 'ous'] +['un', 'its'] +["']", ');čĊ'] +['Z', 'oom'] +['/', 'E'] +['Ġobs', 'c'] +['Ġfast', 'est'] +['on', 'line'] +['Ġpeace', 'ful'] +['ff', 'en'] +['Ġc', 'argo'] +['ĉ', 'pr'] +['Ġseek', 's'] +['z', 'u'] +['Tr', 'im'] +['Ġw', 'ard'] +['Ġver', 'd'] +['Ġblog', 's'] +['.exception', 's'] +['ĠPrem', 'ium'] +['ĠN', 'etherlands'] +['S', 'afe'] +['Fin', 'ish'] +['ĠAl', 'bum'] +['_A', 'CC'] +['=', 'this'] +['v', 'irtual'] +[']', '>'] +['_L', 'ABEL'] +['ĠN', 'ich'] +['_w', 'in'] +['ĠA', 'aron'] +['W', 'P'] +[';', '$'] +['aim', 's'] +['ĠImage', 'View'] +['Ġend', 'less'] +['ER', 'A'] +['_DIS', 'ABLE'] +['Ġcancel', 'led'] +['-', 'us'] +['Ġins', 'pection'] +['em', 'in'] +['ĠG', 'rey'] +['-', 'open'] +['Ġiter', 'ations'] +['.', 'owner'] +['Ġk', 'eras'] +['.P', 'assword'] +['ĠR', 'y'] +['ĠIN', 'S'] +['A', 'ir'] +['ĠSe', 'veral'] +['.Tab', 'Stop'] +['ING', 'LE'] +['ĠH', 'air'] +['ĠCan', 'vas'] +['AA', 'AA'] +['Ġfl', 'aw'] +['ced', 'es'] +['.Re', 'port'] +['í', 'Ĭ'] +['ĠT', 'ips'] +['cript', 'ors'] +['.trans', 'action'] +['.S', 'pring'] +['Ġview', 'er'] +['Ġins', 'ights'] +['è¾', 'ĵ'] +['ord', 'ion'] +['U', 'INT'] +['se', 'ek'] +['ĠA', 'uf'] +['ìŀ', 'IJ'] +['Ġstr', 'ain'] +['To', 'oltip'] +['Ġd', 'z'] +['ign', 'al'] +['ad', 't'] +['Ġu', 'c'] +['fin', 'ite'] +['Ġn', 'm'] +['.c', 'md'] +['ĠMy', 'Sql'] +['[', 'data'] +['.j', 'ackson'] +['.t', 'ree'] +['Request', 'Param'] +['_', 'agent'] +['")', ']čĊ'] +['Ġass', 'ass'] +['(', 'Constants'] +[':', 'ss'] +['ĠM', 'AN'] +['+-', '+-'] +['ĠB', 'ottom'] +['print', 's'] +['ĠS', 'ame'] +['@', 'Autowired'] +['sw', 'ap'] +['ici', 'ón'] +['Ġprotest', 'ers'] +['Ġh', 'oney'] +['ĠV', 'eter'] +['(C', 'alendar'] +['-', 'ad'] +['ĠBrook', 'lyn'] +['L', 'ife'] +['_V', 'AR'] +['ze', 'ch'] +['ĠC', 'ALL'] +['_C', 'AST'] +['ĠE', 'lection'] +['Ġthick', 'ness'] +['V', 'ery'] +['_IN', 'TEGER'] +['-', 'dev'] +['))', '))'] +['ap', 'at'] +['oo', 'oo'] +['d', 'emo'] +['Ġparse', 'Float'] +['ĠR', 'ather'] +['ST', 'IT'] +['m', 'aker'] +['[', 'current'] +['chron', 'o'] +['Ġch', 'rist'] +['ãģ', 'ª'] +['ĠD', 'etail'] +['ư', 'á»'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġs', 'ul'] +['id', 'ency'] +['Q', 'ue'] +['Ġeleg', 'ant'] +['ap', 'ons'] +['Ġdish', 'es'] +['Ġinteg', 'ers'] +['(', 'read'] +['find', 'ViewById'] +['ĠAm', 'ount'] +['ĠSk', 'ip'] +['Ġhab', 'its'] +['*', ')('] +['Ġmon', 'sters'] +['M', 'AC'] +[':', 'end'] +['Ġfr', 'ank'] +['As', 'sembly'] +['Ġd', 'fs'] +['Ġne', 'ut'] +['_TYP', 'ES'] +['e', 'qual'] +['loy', 'd'] +['(', 'uri'] +['Ġch', 'i'] +['Ġdefend', 'ant'] +['Ġconflic', 'ts'] +['Ġv', 'il'] +['-', 'js'] +['ĠPe', 'ace'] +['Ġmut', 'able'] +[')', 'sender'] +['ĠF', 'ocus'] +['å»', 'º'] +['Ġapprec', 'iated'] +['s', 'leep'] +['ĠR', 'ED'] +['C', 'ulture'] +['Ġdesign', 'ers'] +['_g', 'enerator'] +['c', 'odes'] +['/', 'ex'] +['.Get', 'Value'] +['umb', 'led'] +['.scal', 'ajs'] +['per', 'or'] +['Ġveter', 'ans'] +['Ġ}', ')čĊ'] +['Ġun', 'fortunately'] +['_C', 'REATE'] +['M', 'ass'] +['ĠCL', 'AIM'] +['ĠMe', 'et'] +['_s', 'upport'] +['B', 'ank'] +['()', '.Ċ'] +['D', 'ark'] +['_LO', 'W'] +['ĠMin', 'ing'] +['ĠO', 'wner'] +['ier', 'a'] +['Client', 'e'] +['Ġencour', 'aging'] +['>', 'S'] +['Ġboy', 'friend'] +['ĠH', 'alf'] +['ĠA', 'CC'] +['A', 'ff'] +['_', 'ar'] +['-l', 'ife'] +['c', 'x'] +['.J', 'Button'] +['iz', 'ado'] +['.z', 'ero'] +['.open', 'qa'] +['ot', 'on'] +['.text', 'Content'] +['Ġto', 'll'] +['at', 'ie'] +['Ġball', 'ot'] +['-', 'number'] +['.', 'Exception'] +['ĉ', 'params'] +['c', 'ircle'] +['-m', 'ap'] +['Ġn', 'ap'] +['ĠRob', 'ot'] +['ĠI', 'ch'] +['reg', 'istration'] +['Am', 'azon'] +['roll', 'ment'] +['(', 'exp'] +['Ġt', 'anks'] +['ĠG', 'ordon'] +['Ġmach', 'inery'] +['Ġbas', 'eline'] +['æ', 'ĭ'] +['Ø', '©'] +['ĠCon', 'vention'] +['ĉ', 'config'] +['ook', 'ies'] +['m', 'ult'] +['Rec', 'ords'] +['ĠE', 'ST'] +['Ġgar', 'bage'] +['Ġcon', 'form'] +['id', 'al'] +['Ġb', 'arg'] +['Ġsurv', 'ived'] +['Ġinvestig', 'ations'] +['.contains', 'Key'] +['----------------------------------------------------------------', '----------Ċ'] +['ort', 'ion'] +['Ġhor', 'r'] +['_', 'http'] +['Ġm', 'ant'] +[']', ';čĊčĊ'] +['b', 'inary'] +['em', 'pl'] +['Ġin', 'quiry'] +['ĠMean', 'while'] +['Ġcollect', 'ing'] +['.Entity', 'Framework'] +['",', 'ĊĊ'] +['ĠP', 'ic'] +['@', 'Inject'] +['ick', 'ness'] +['ĠB', 'inding'] +['Ġcont', 'rolling'] +['re', 'verse'] +['Ġch', 'airs'] +['semb', 'led'] +['(', 'add'] +['Dis', 'abled'] +['an', 'as'] +['.trans', 'late'] +['--------', '---Ċ'] +['Ġref', 'lected'] +['"]', 'ĊĊ'] +['Ex', 'ternal'] +['Ar', 'row'] +['Single', 'ton'] +['%', 'x'] +['Ġ', 'Å'] +['Ġan', 'cest'] +['ĠOr', 'leans'] +['ĉc', 'md'] +['Ġprohib', 'ited'] +['ith', 'metic'] +['(ch', 'annel'] +['_c', 'ss'] +['For', 'ward'] +['.s', 'ocket'] +['Ġl', 'uc'] +['â', 'Ĩ'] +['ĠFire', 'fox'] +['ĠM', 'ovies'] +[')', '_'] +['.', 'ends'] +['(', 'shape'] +['Ġde', 'alt'] +['Ġs', 'aves'] +['Ġgl', 'ory'] +['Ġmej', 'or'] +['Ġbreath', 'ing'] +['Ġ', 'eller'] +['get', 'Data'] +['Ġang', 'les'] +['Ġtool', 'bar'] +['Ġsp', 'acing'] +['IP', 'S'] +['Ġflo', 'ors'] +['_ACT', 'IVE'] +['Ġsh', 'uffle'] +['/', 'shared'] +['ĠE', 'le'] +['ed', 'ish'] +['Ġweb', 'cam'] +['.ex', 'pect'] +['il', 'oc'] +['ĠIn', 'cludes'] +['Ġtweet', 'ed'] +['Ġ:', ')'] +['ĠEss', 'ay'] +['F', 'ix'] +['-b', 'etween'] +['_', 'web'] +['.con', 'v'] +['Ġrac', 'ism'] +['Ġreflect', 's'] +['um', 'm'] +['иÑĤ', 'е'] +['_f', 'ooter'] +['/d', 'ocs'] +['ĠP', 'our'] +['Ng', 'Module'] +['.initial', 'ize'] +['pattern', 's'] +['_', 'In'] +['ĠAb', 'b'] +['*', 'čĊ'] +['Ġsent', 'iment'] +['b', 'uff'] +['_count', 's'] +['Ġre', 'use'] +['ch', 'unk'] +['Ġim', 'posed'] +['Primary', 'Key'] +['Fore', 'ground'] +['Ġconsum', 'ed'] +['?', '!'] +['Ġd', 'ick'] +['Ġch', 'ron'] +['ĠF', 'ern'] +['Ġrespons', 'ive'] +['Ġin', 'sect'] +['icult', 'y'] +['Ġr', 'w'] +['Ġal', 'ike'] +['Ġsub', 'set'] +['ĠCook', 'ies'] +['ĠP', 'air'] +['Ġt', 'ier'] +['IF', 'O'] +['av', 'our'] +['ĠQ', 'U'] +[',', 'sizeof'] +['Ġmerg', 'ed'] +['m', 'v'] +['it', 'ol'] +['yl', 'on'] +['Ġjump', 'ed'] +['.', 'role'] +['ens', 'aje'] +['R', 'ules'] +['Ġb', 'rowse'] +['An', 'imator'] +['Ġy', 'oga'] +['Ġvari', 'ants'] +['Ġcour', 'tesy'] +['ur', 'an'] +['p', 'bs'] +['else', 'if'] +['Al', 't'] +['ĠL', 'ane'] +['CL', 'K'] +['IM', 'ARY'] +['_PRO', 'PERTY'] +['ï¼', 'IJ'] +['Ġch', 'an'] +['Ġgrad', 'ually'] +['Ġsh', 'ake'] +['Ġbl', 'onde'] +['...', '");Ċ'] +['-se', 'x'] +['Ġgame', 'play'] +['ac', 'ies'] +['.ref', 'resh'] +['US', 'B'] +['ĠPl', 'ot'] +['W', 'as'] +['iss', 'ippi'] +['ĠT', 'ensor'] +['Ġcryptoc', 'urrency'] +['Ġdifficult', 'ies'] +['De', 'leted'] +['With', 'out'] +['_', 'append'] +['_', 'ver'] +['"))', 'čĊ'] +['Ġhonest', 'ly'] +['Ġp', 'ivot'] +['Ġtem', 'ps'] +['_p', 's'] +['ĠUn', 'like'] +['[:', '-'] +['V', 'S'] +['_in', 'f'] +['Ġjun', 'ior'] +['Ġanim', 'ations'] +['Ġfile', 'path'] +['?', '{{', '$'] +['Ġun', 'icode'] +['pl', 'aces'] +['ĠC', 'offee'] +['.S', 'E'] +['ĠP', 'AR'] +['(t', 'xt'] +['ge', 'bra'] +['Ġf', 'ires'] +['Main', 'Window'] +['med', 'ium'] +['Ġ(', 'âĢľ'] +['Ġl', 'g'] +['Ġc', 'mp'] +['/', 'base'] +['_l', 'ayers'] +['_', 'entries'] +['Ġadmin', 'ister'] +['ĠSU', 'CH'] +['B', 'P'] +['ĠScott', 'ish'] +['ĉčĊ', 'ĉčĊ'] +['gu', 'ard'] +['ĠStr', 'ong'] +['In', 'sn'] +['ĠC', 'AP'] +['as', 'ury'] +['ĠSE', 'E'] +['C', 'lock'] +['er', 'ie'] +['\\', 'models'] +['Ġ$', '$'] +['ĠC', 'ab'] +['Ġwur', 'de'] +['Ġsold', 'ier'] +['Ġcl', 'ips'] +['Ġarrang', 'ement'] +['ĠW', 'onder'] +['ĠH', 'orn'] +['Ġsc', 'ared'] +['Ġc', 'ure'] +['m', 'kdir'] +['Ġal', 'igned'] +['ĠP', 'ink'] +['Ġland', 'ed'] +['Dim', 'ension'] +['Scroll', 'Pane'] +['.ch', 'at'] +['.W', 'ith'] +['ĠTr', 'ain'] +[']', '.Ċ'] +['Ġth', 'irty'] +['Ġdur', 'able'] +['Ġl', 'd'] +['Ġlate', 'init'] +['Ġch', 'arts'] +['Ġins', 'ult'] +['.F', 'atal'] +['_', 'ct'] +['Ġm', 'asks'] +['CLU', 'DED'] +['Pres', 'ident'] +['Ġcol', 'ours'] +['g', 'ments'] +['.at', 'tributes'] +['ĠF', 'lex'] +['ĠC', 'lock'] +['ÃŃ', 'cul'] +['im', 'en'] +['J', 'O'] +['ĠReg', 'ex'] +['_L', 'INK'] +['Ġc', 'ouch'] +['ĠIN', 'PUT'] +['Ġbe', 'ating'] +['b', 'usiness'] +['pre', 'ced'] +['.', 'unit'] +['ĠF', 'el'] +['N', 'ever'] +['osp', 'el'] +['.start', 'swith'] +['ĠE', 'PA'] +['.', 'only'] +['Ġprevent', 'ing'] +['y', 'er'] +['Column', 'Name'] +['Ġelev', 'ation'] +['fl', 'u'] +['icy', 'cle'] +['Ġoff', 'line'] +['Tool', 'bar'] +['Ġcompet', 'ing'] +[')', '].'] +['Ġm', 'og'] +['Ġis', 'Valid'] +['As', 'k'] +['_', 'av'] +['_l', 'at'] +['AN', 'C'] +['ĠJ', 'oh'] +['k', 'ers'] +['Ġgu', 'ards'] +['Ġch', 'ains'] +['ĠSimple', 'DateFormat'] +['.st', 'atic'] +['Ġvess', 'el'] +['Ġm', 'ud'] +['Ġst', 'abil'] +['Ġst', 'ret'] +['g', 'm'] +['am', 'ation'] +['ç', 'ľ'] +['-w', 'ith'] +['Ġro', 's'] +['_P', 'A'] +['Ġresult', 'ado'] +['Ġconf', 'idential'] +['ĠTok', 'yo'] +['ĉ', 'using'] +['ĠMath', 'f'] +['omb', 'ine'] +['ĠESP', 'N'] +['Ġdeal', 'ers'] +['Ġdismiss', 'ed'] +['TR', 'Y'] +['Ġte', 'ens'] +['rec', 'ords'] +['Ġw', 'ings'] +['g', 'allery'] +['account', 's'] +['_L', 'IB'] +['Ġj', 'acket'] +['ĠNS', 'Object'] +['Ġst', 'ones'] +['ĠDel', 'ivery'] +['ĠD', 'iet'] +['/w', 'atch'] +['Ġto', 'ilet'] +['ĠG', 'uest'] +['.d', 'ay'] +['Ġint', 'val'] +['Vis', 'it'] +['Ġinvestig', 'ated'] +['Ġpent', 'ru'] +['ĠThe', 'atre'] +['andid', 'ates'] +['L', 'ang'] +['ĠS', 'erv'] +['Ġcont', 'rollers'] +['Ġset', 'Title'] +['N', 'P'] +['am', 'y'] +['fl', 'at'] +['(', 'ui'] +['_d', 'ocument'] +['è', 'ĥ½'] +['ĠC', 'oin'] +['ĠAd', 'ams'] +['pt', 'ic'] +['Ġproduct', 'ive'] +['Ġaccompl', 'ished'] +['čĊčĊ', 'čĊčĊ'] +['Ġdefer', 'red'] +['ient', 'es'] +['Ġs', 'inc'] +['ol', 'ars'] +['Right', 'arrow'] +['Ġvari', 'ations'] +['(', 'offset'] +['.Layout', 'Inflater'] +['Ġsus', 'pend'] +['Ġprevent', 'ion'] +['_pr', 'ivate'] +['_', 'js'] +['âĺ', 'ħ'] +['Ġw', 'ieder'] +['at', 'um'] +['Ĵ', 'Į'] +['Ġappear', 'ances'] +['.D', 'ocument'] +['Ġvalid', 'ates'] +['cal', 'endar'] +['}', '";Ċ'] +['.d', 'emo'] +['con', 'ut'] +['Ġcorre', 'ction'] +['ĠDe', 'al'] +['Ġbatter', 'ies'] +['.d', 'uration'] +[',', '\\'] +['_m', 'arker'] +['m', 'ulti'] +['Ġh', 'alt'] +['Ġc', 'ms'] +['Ġsh', 'aped'] +['B', 'ro'] +['re', 'duce'] +['Ġ', '####'] +['CT', 'OR'] +['ĠBen', 'ef'] +['Ġicon', 'ic'] +['Ġp', 'iano'] +['Ġeffect', 'iveness'] +['|', '.Ċ'] +['Ġa', 'jax'] +['Ġv', 'olumes'] +['à¸', '¡'] +['Ġcl', 'js'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'Ċ'] +['ath', 's'] +['ra', 'its'] +['å¤', '§'] +['Ñ', 'ĸ'] +['_m', 'ult'] +['Ġfasc', 'inating'] +['A', 'verage'] +['Ġpr', 'é'] +['ĠChair', 'man'] +['.find', 'Element'] +['_p', 'in'] +['Ġcomp', 'aring'] +['Ġdark', 'ness'] +['-F', 'i'] +['-', 'server'] +['Ġselect', 'ing'] +['ster', 'dam'] +['ĠPart', 's'] +['FORM', 'ATION'] +['Ġnot', 'ing'] +['Ġp', 'ile'] +['og', 's'] +['Ġpa', 'lette'] +['_d', 'o'] +['it', 'ize'] +['()', '('] +['Ġdef', 'ining'] +['Ġremain', 'der'] +['Un', 'its'] +['_T', 'ASK'] +['Http', 'Client'] +['S', 'ocial'] +['Ġfund', 'ra'] +['N', 'R'] +['ch', 'est'] +['C', 'urrency'] +['.ad', 'apter'] +['Ġd', 'op'] +['un', 'ting'] +['ANG', 'UAGE'] +['"', 'He'] +['ĉ', 'index'] +['_p', 'ackage'] +['.I', 'con'] +['Ġrep', 'et'] +['m', 'ass'] +['="', '.$'] +['ĠS', 'ud'] +['Ġl', 'id'] +['pro', 'vince'] +['ì', 'ľ'] +['G', 'PIO'] +['Ð', 'ļ'] +['ĠMy', 'SQL'] +['Ġdoc', 's'] +['ĠG', 'A'] +['Ġip', 'sum'] +['K', 'ernel'] +['Ġaccept', 's'] +['Ġfit', 'ting'] +['Ġcu', 'ando'] +['Ġd', 'uplic'] +['ĠBro', 'ther'] +['ĠK', 'le'] +['num', 's'] +['Ġmor', 'ph'] +['Ġ', '########'] +['ĠCG', 'Point'] +['<', 'unsigned'] +['ä¾', 'ĭ'] +['ĠD', 'uke'] +['.set', 'Bounds'] +['q', 's'] +['or', 'ic'] +['j', 'er'] +['Ġregard', 'ed'] +['Http', 'Request'] +['Ġbond', 's'] +['Ġthorough', 'ly'] +['enc', 'ent'] +['Ġhighlight', 'ed'] +['Ġac', 'res'] +['Ġwork', 'place'] +['ĠL', 'ux'] +['Ġqu', 'ot'] +['.in', 'flate'] +['Ġdocument', 'ed'] +['Ġadd', 'iction'] +['Ġmut', 'ation'] +['.c', 'ity'] +['Ġbott', 'les'] +['ĠRepos', 'itory'] +['on', 'n'] +['err', 'no'] +['ARI', 'ABLE'] +['åº', '¦'] +['_B', 'EGIN'] +['gl', 'as'] +["'", '})Ċ'] +['ĠMass', 'age'] +['ĠWh', 'it'] +['reg', 'ex'] +['W', 'A'] +['Ġout', 'let'] +['-', 'head'] +['Ġexp', 'ired'] +['ĠTh', 'ai'] +['/', 'include'] +['grad', 'ient'] +['scan', 'f'] +['Ġse', 'am'] +['w', 'al'] +['ĉb', 'uf'] +['B', 'earer'] +['Ġprec', 'ious'] +['if', 'acts'] +['co', 'ord'] +['Ġexpl', 'oration'] +['.get', 'Y'] +['(h', 'andle'] +['Top', 'ic'] +['ĠV', 'ent'] +['r', 'hs'] +['----', '--Ċ'] +['ĠB', 'right'] +['Ġg', 'uild'] +['m', 'other'] +['st', 'orm'] +['Ġmunicip', 'al'] +['Ġin', 'k'] +['.T', 'YPE'] +['w', 'l'] +['...', '', '', 'manual'] +['ĠTechn', 'ical'] +['Ġcorpor', 'ation'] +['ĠH', 'W'] +['ank', 'a'] +['T', 'AIL'] +['ist', 'as'] +['Ġperform', 's'] +['ĠBeh', 'avior'] +['.F', 'or'] +['_', 'ORDER'] +['ĠK', 'ick'] +['Ġcallback', 's'] +['_d', 'r'] +['ue', 'go'] +['h', 'ub'] +['uff', 'icient'] +['sk', 'y'] +['Ġb', 'p'] +['ht', 'able'] +['ĠON', 'LY'] +['ĠAUTH', 'ORS'] +['.Arg', 'ument'] +['"', '};Ċ'] +['ĠTh', 'under'] +['ĠK', 'om'] +['.Sh', 'ould'] +['A', 'UTH'] +['ah', 'u'] +['_p', 'ayment'] +['Ġst', 'arter'] +['ìĦ', 'ľ'] +['ìļ', '©'] +['B', 'log'] +['.p', 'atch'] +['Ġgovern', 'ed'] +['ass', 'y'] +['-f', 'ound'] +['Ġthe', 'ater'] +['ĠFont', 'Weight'] +['ĠBat', 'man'] +['"', 'If'] +['.R', 'andom'] +['_d', 'elta'] +['ĠC', 'E'] +['Auth', 'enticated'] +['Ġdr', 'one'] +['Ġc', 'ous'] +['r', 'adius'] +['M', 'er'] +['(', 'None'] +['ĠN', 'J'] +['_', 'headers'] +['Ġam', 'er'] +['py', 'test'] +['ĠA', 'ctions'] +['ĉĉĉ', 'ĠĠĠĠ'] +['Ġet', 't'] +['Ġh', 'oly'] +['Ġun', 'comfort'] +['ĠN', 'in'] +['ĠDec', 'imal'] +['ĠM', 'essages'] +['.s', 'ender'] +[']', '])Ċ'] +['Ġembr', 'ace'] +['Th', 'ough'] +['/', 'sp'] +['Ġcult', 'ures'] +['Ġhigh', 'way'] +['t', 'ar'] +['.f', 'ail'] +['_h', 'idden'] +['ĠcomponentDid', 'Mount'] +['ĠW', 'right'] +['Ġj', 'ag'] +['_', 'il'] +['../../', '../'] +['ig', 'u'] +['F', 'ood'] +['Ġa', 'ce'] +['Ġa', 'ños'] +['US', 'D'] +['Ġmut', 'ual'] +['Log', 'ic'] +['Ġtem', 'ple'] +['Ġbrief', 'ly'] +['ĠT', 'rip'] +['class', 'method'] +['default', 's'] +['Ġch', 'unks'] +[',,', ',,'] +['ĠRe', 'ason'] +['$', 'id'] +['-up', 's'] +['Ġdam', 'n'] +['Ġtruck', 's'] +['Ġun', 'limited'] +['Ġsc', 'ulpt'] +['ĠC', 'ards'] +['Ġaut', 'or'] +['ĠTest', 'ing'] +['Ġdies', 'e'] +['sh', 'ops'] +['ç', '´'] +['(p', 'ayload'] +['ĠP', 'ATH'] +['ĠMem', 'orial'] +['Ġridic', 'ulous'] +['eg', 'ree'] +['-w', 'inning'] +['Ġre', 'hab'] +['Ġsophistic', 'ated'] +['wp', 'db'] +['ĉ', 'path'] +['!', '";Ċ'] +['_S', 'YS'] +['.s', 'peed'] +['Ġso', 'ap'] +['s', 'uffix'] +['W', 'rap'] +['Ġenh', 'ancement'] +['Ã', 'ī'] +['ú', 'b'] +['Ġplay', 'list'] +['Ġmix', 'ing'] +['ant', 'idad'] +['="', '";Ċ'] +['ĠRev', 'ision'] +['ĠBe', 'at'] +['.in', 'c'] +['-w', 'ay'] +['enc', 'ias'] +['ul', 'ers'] +['C', 'at'] +['id', 'el'] +['ĠSh', 'ip'] +['.set', 'Color'] +['Ġthreat', 'ening'] +['.mod', 'ules'] +['Ġafter', 'wards'] +['ĠD', 'ashboard'] +['Ċ', 'ĠĊ'] +['Sign', 'al'] +['Ġpr', 'imer'] +['orne', 'ys'] +['ici', 'ary'] +['Ġl', 'igne'] +['_p', 'redict'] +['Ġa', 'est'] +['_', 'https'] +['>', ':'] +['ĠL', 'ex'] +['Ġrencont', 'res'] +['eg', 'ral'] +['sc', 'ala'] +['_f', 'amily'] +['ÃŁ', 'en'] +['_s', 'ym'] +['Ġuncert', 'ainty'] +['ĠVAL', 'UE'] +['Ġ}', ';čĊčĊ'] +['Ġbro', 'ader'] +['Ġh', 'orses'] +['ãģ', 'Ŀ'] +['ĠK', 'al'] +['ob', 'a'] +['_IN', 'ET'] +['ĠK', 'ill'] +['j', 'query'] +['am', 'ination'] +['[', '@"'] +['Ġm', 'uj'] +['##', '#Ċ'] +['First', 'OrDefault'] +['then', 'Return'] +['C', 'he'] +['/', 'footer'] +['Ġpark', 's'] +['as', 'je'] +['ĠG', 'ulf'] +['Ġmod', 'est'] +['.', 'Init'] +['ï¼Ł', 'ĊĊ'] +['Ġpros', 'pects'] +['Ġs', 'vg'] +['Ġå', 'ı'] +['.D', 'ialog'] +['_N', 'ET'] +['Ġ(', '($'] +['Ġe', 'k'] +['ĠW', 'arning'] +['ĠM', 'K'] +['<', 'LM'] +["Ġ'", 'čĊ'] +['i', 'em'] +['h', 'etic'] +['Ġi', 'x'] +['th', 'ink'] +['-sh', 'adow'] +['ĠE', 'ld'] +['ĠNev', 'ada'] +['ĠLe', 'af'] +['ĠG', 'ROUP'] +['Ġprom', 'o'] +['ent', 'ine'] +['ĉ', 'Map'] +['ĠModel', 's'] +['ĠK', 'rist'] +['_k', 'ernel'] +['-m', 'ade'] +['Ġc', 'err'] +['As', 'sets'] +['ell', 'ar'] +['Ġinv', 'oked'] +['.v', 'ue'] +['Ġcult', 'iv'] +['C', 'losed'] +['Ġgener', 'ates'] +['ffff', 'ff'] +['thes', 'ize'] +['s', 'qrt'] +['ĠCast', 'le'] +['.c', 'ar'] +['Ġke', 'en'] +['und', 'a'] +['ĠC', 'row'] +['ĠSing', 'h'] +['y', 'thon'] +['Ġbe', 'ans'] +['l', 'arg'] +['æĸĩ', 'ä»¶'] +['Aw', 'esome'] +['unc', 'ate'] +['Path', 's'] +['o', 'ji'] +['(c', 'urr'] +['CON', 'DS'] +['Ġm', 'im'] +['Ġshould', 'ers'] +['H', 'ard'] +['ast', 'es'] +['а', 'еÑĤ'] +['Ġconv', 'ince'] +['de', 'cess'] +['m', 'ade'] +['ĠC', 'MD'] +['.', 'Im'] +['Ġcha', 'os'] +['ens', 'ively'] +['Ġcool', 'ing'] +['Ġbur', 'ied'] +["('", '@'] +['_S', 'e'] +['ĉĉĉĉĉĉĉĉ', 'ĉĉĉĉĉĉĉĉ'] +['.com', 'pany'] +['.sub', 'mit'] +['ph', 'ant'] +['Ġboot', 'strap'] +['_h', 'elp'] +['à', '§'] +['.d', 'ump'] +['Ġdif', 'er'] +['_m', 'apping'] +['Ġcirc', 'ular'] +['Ġescort', 's'] +['Ġb', 'ere'] +['Ġgrad', 'u'] +['ĠLeg', 'end'] +['im', 'edia'] +['ĠBar', 'celona'] +['Ġbed', 's'] +['åĪ', '°'] +['ãĢ', 'Ĭ'] +['_v', 'olume'] +['Ġtremend', 'ous'] +['Ġsc', 'aling'] +['Ġp', 'ins'] +['en', 'as'] +['type', 'param'] +['D', 'ashboard'] +['render', 'er'] +['Ġsp', 'i'] +['Ġ&', '$'] +['ĠSk', 'in'] +['alm', 'art'] +['Ġh', 'ockey'] +['Ġ\'"', '.$'] +['Ġerr', 'no'] +['Ġb', 'ew'] +['Follow', 'ing'] +['.M', 'odule'] +['er', 'able'] +['ĠM', 'ilitary'] +['ĠR', 'io'] +['_', 'available'] +['ĠSur', 'face'] +['Ġst', 'ab'] +['IF', 'IER'] +['ĠL', 'IST'] +['Ġd', 'ashboard'] +['Ġcl', 'usters'] +['.pl', 'ugin'] +['Ġj', 'ou'] +['ĠDec', 'or'] +['F', 'our'] +['Ġdel', 'le'] +['******', '/Ċ'] +['ia', 'z'] +['in', 'de'] +['ch', 'ing'] +['Ġget', 'Item'] +['.Add', 'ress'] +['ment', 'ed'] +['A', 'meric'] +['Pl', 'ain'] +['Ġus', 'b'] +['ĠPract', 'ice'] +['_', 'ment'] +['.bl', 'ue'] +['H', 'int'] +['ÑĢаÐ', '²'] +['Ġconn', 'ector'] +['Ġinher', 'ited'] +['и', 'в'] +['Ġinterval', 's'] +['Ġc', 'ere'] +['Ġu', 'd'] +['Ġin', 'con'] +['.Ex', 'ists'] +['ĠM', 'ic'] +['F', 'K'] +['(c', 'ard'] +['.Set', 'tings'] +['Ġexhib', 'ition'] +['Ġon', 'Pressed'] +['Ġrest', 'ored'] +['eng', 'u'] +['.', 'def'] +['Ġrec', 'v'] +['."', ');čĊ'] +['enc', 'oder'] +['ather', 'ine'] +['(', 'dest'] +['az', 'ed'] +['#', 'endregion'] +['sem', 'bl'] +[',', 'M'] +['ob', 'y'] +['Ġп', 'еÑĢ'] +['.C', 'all'] +['Ġattend', 'ance'] +['-b', 'order'] +['Ġaddress', 'ing'] +['ê', 'n'] +['ĠLe', 'v'] +['Ġb', 'ash'] +['ben', 'ch'] +['C', 'redentials'] +['Sp', 'acing'] +['(', 'of'] +['_RE', 'SET'] +['ig', 'uous'] +['Ġcr', 'uel'] +['Ġcross', 'ed'] +['Ġle', 'ur'] +['ĠG', 'olf'] +['or', 'rect'] +['Ġpack', 'ets'] +['ĠData', 'Set'] +['Ġpart', 'ly'] +['SEQU', 'ENTIAL'] +['Ġindic', 'ation'] +['ĠS', 'alt'] +['ac', 'ia'] +['Ġ*', ');Ċ'] +['ĉ', 'info'] +['ĠView', 'Bag'] +['on', 'z'] +['Ġeditor', 'ial'] +['ĠA', 'rena'] +['Ġs', 'ir'] +['_', 'Static'] +['(', 'socket'] +['s', 'u'] +['cho', 'ose'] +['.m', 'onth'] +['.M', 'y'] +['é', 'ri'] +[';', 'font'] +['do', 'es'] +['Ġcon', 'verter'] +['Ġsal', 'v'] +['Ġl', 'r'] +['Ġinflu', 'enced'] +['(f', 'eature'] +['ĠQue', 'ens'] +['let', 't'] +['_M', 'ON'] +['&', 'amp'] +['Touch', 'ableOpacity'] +['O', 'FF'] +['Ġmetab', 'ol'] +['(', 'iter'] +['Ġvit', 'amin'] +['ĠIND', 'IRECT'] +['aut', 'om'] +['_p', 'ublic'] +['Ġadjust', 'ment'] +['Ġspecial', 'ized'] +['w', 'indows'] +['.add', 'All'] +['Ġaccording', 'ly'] +['ĠJ', 'OptionPane'] +['Ġcell', 'spacing'] +['Ġqu', 'ad'] +['Ġcre', 'ep'] +['Ġout', 'lets'] +['}`', ')Ċ'] +['Ġpri', 'est'] +['_TH', 'READ'] +['ĠMar', 'x'] +['ĠBy', 'Val'] +['Ġc', 'ual'] +['éĿ', '¢'] +['Ġtempor', 'arily'] +['An', 'n'] +['ke', 'leton'] +['å', '¥'] +['ĠLO', 'C'] +['au', 'er'] +['der', 'ive'] +['Ġbeh', 'aviors'] +['as', 'ename'] +['ĠCent', 'ury'] +['Ġhor', 'rible'] +['ME', 'SS'] +['_', 'List'] +['we', 'i'] +['P', 'at'] +['ĠCh', 'oice'] +['_F', 'ROM'] +['ĉ', 'line'] +['.in', 'voke'] +['.B', 'ottom'] +['Ġnow', 'here'] +['."', 'ĊĊĊĊ'] +['_', 'export'] +['Ġstrugg', 'led'] +['.Ap', 'pearance'] +['ĠJ', 'Button'] +['ĠJer', 'emy'] +['([', '['] +['Ġkick', 'ed'] +['mar', 'shal'] +['st', 'aff'] +['es', 'ity'] +['Ġqu', 'iz'] +['_e', 'ffect'] +['Ġ}', '));ĊĊ'] +['m', 'el'] +['b', 'anner'] +['ĠP', 'IN'] +['Ġin', 'vention'] +['Ġcons', 'olid'] +['Ġop', 's'] +['ĠB', 'etween'] +['j', 'ack'] +['ern', 'ational'] +['Ġsacr', 'ifice'] +['ag', 'ation'] +['ĠJ', 'oy'] +['Ġam', 'endment'] +['ĠS', 'old'] +['Ġprison', 'ers'] +['ан', 'нÑĭ'] +['Doc', 'uments'] +[')', '])Ċ'] +['ust', 'ed'] +['ĠLine', 'arLayout'] +['os', 'o'] +['_E', 'M'] +['.s', 'elf'] +['.M', 'iddle'] +[')', '//'] +['Ġ\\', "'"] +['Ġfuck', 'ed'] +['ĠM', 'urray'] +['Ġprof', 'ound'] +['_E', 'LEMENT'] +['ult', 'a'] +['il', 'ers'] +['port', 'folio'] +['J', 'une'] +['t', 'cp'] +['mod', 'ified'] +['ĠTr', 'ace'] +['ĠK', 'el'] +['aly', 'zer'] +[')', '=>'] +['ĠRep', 'air'] +['_B', 'E'] +['Br', 'and'] +['u', 'art'] +['pre', 'view'] +['Ġiniti', 'atives'] +['run', 'ning'] +['b', 'ang'] +['ĉ', 'update'] +['ĠCo', 'ach'] +['R', 'ich'] +['Ġy', 'outube'] +['Ġrit', 'ual'] +['app', 'a'] +['ĠRobin', 'son'] +['prec', 'ision'] +['////////////////////////////////////////////////////////////////', '////////////'] +['=[', ']Ċ'] +['Ġcelebr', 'ated'] +['OT', 'O'] +['Ġin', 'clusion'] +['J', 'P'] +["'", ';čĊčĊ'] +['Ġnot', 'able'] +['(_', '.'] +['Man', 'aged'] +['Ġgu', 'ides'] +['&', 'nbsp'] +['ated', 'Route'] +['ĠAd', 'just'] +['Ġcol', 'ored'] +['_s', 'cores'] +['ĠTes', 'la'] +['_pro', 'gress'] +['.in', 'st'] +["['", '_'] +['.fl', 'ags'] +['Ġf', 'close'] +['_O', 'PER'] +['ż', 'y'] +['_n', 'ote'] +['Ġtrans', 'gender'] +['å', 'ķ'] +['RI', 'PT'] +['Ġabs', 'ent'] +['Ġam', 'et'] +['Ġoper', 'and'] +['ë', '©'] +['Ġh', 'ood'] +['to', 'LowerCase'] +['av', 'o'] +['ĠCirc', 'uit'] +['ĠL', 'ind'] +['--', '}}Ċ'] +['=', 'm'] +['Ġsup', 'press'] +['ĠM', 'AP'] +['i', 'ang'] +['-', 'admin'] +['Ġside', 'bar'] +['ĠB', 'u'] +['ĠH', 'ex'] +[',', 'F'] +['ĠSign', 'al'] +['Ġtrans', 'parency'] +['ĠFeder', 'ation'] +['/', 'V'] +['Re', 'q'] +['Ġpul', 'se'] +['Ġt', 'ends'] +['Num', 'bers'] +['%', "'"] +['Ġde', 'port'] +['dat', 'as'] +['_U', 'INT'] +['_', 'tra'] +['ok', 'o'] +['Ġ"', '?'] +['comp', 'et'] +['sole', 'te'] +['und', 'ry'] +['Ġover', 'lap'] +['}`', ',Ċ'] +['.', 'ly'] +['_sum', 'mary'] +['ĠL', 'ost'] +['.C', 'enter'] +['Ġdis', 'ability'] +['.Serial', 'ization'] +['Ġge', 'om'] +['Ġ?', ':'] +['ĠW', 'o'] +['Ġsh', 'ipped'] +['Ĥ', 'æķ°'] +['Ġu', 'gly'] +['Ġexcit', 'ement'] +['Ġext', 'erior'] +['Ġcheck', 'out'] +['Ġk', 'ur'] +[',', 'D'] +['ĠAl', 'aska'] +['Ġsyn', 'thetic'] +['ĠB', 'udget'] +['ĠSub', 'scribe'] +['Ġ&', 'Ċ'] +['ÈĻ', 'i'] +['ĠY', 'u'] +['ĉ', 'query'] +['}', '.Ċ'] +['Ġtr', 'aged'] +['ass', 'en'] +['Ġaccommod', 'ation'] +['Ġphys', 'ician'] +['Ġren', 'amed'] +['Ġtid', 'ak'] +['z', 'Äħ'] +['Ġmin', 'us'] +['ny', 'ch'] +['_EX', 'CEPTION'] +['thread', 's'] +['Ġt', 'ire'] +['_c', 'reated'] +['ens', 'ure'] +['Ġworth', 'y'] +['Ġexc', 'use'] +['Ġclo', 'th'] +['.parent', 'Node'] +['/pl', 'atform'] +['ĠU', 'FC'] +['ĠG', 'tk'] +['un', 'ny'] +['Ġg', 'ibt'] +['ke', 'ley'] +['h', 'um'] +['(t', 'x'] +['ĉ', 'dev'] +['Ġout', 'fit'] +['do', 'ors'] +['Ġf', 'on'] +['ic', 'ut'] +['vol', 'atile'] +['Ġhom', 'osex'] +['Max', 'imum'] +['Ġexp', 'end'] +['Ġ});ĊĊ', 'Ċ'] +['E', 'q'] +['ond', 'ers'] +['dep', 'artment'] +['ĠPhys', 'ics'] +['"', '});Ċ'] +['Ġpar', 'ad'] +['.S', 'tr'] +['Ġse', 'le'] +['IF', 'IED'] +['Ġdel', 'ivers'] +['iv', 'an'] +['Ġrespons', 'ibilities'] +['Ġadvoc', 'ates'] +['è', 'µ'] +['ĠR', 'ID'] +['.param', 'eters'] +['M', 'etrics'] +['ron', 'ics'] +['ĠUITableView', 'Cell'] +['A', 'bsolute'] +['ip', 'se'] +['yl', 'um'] +['MLE', 'lement'] +['_VAL', 'ID'] +['<', 'title'] +['D', 'lg'] +['p', 'aces'] +['Ġsynd', 'rome'] +['be', 'ans'] +['_d', 'atabase'] +['oz', 'illa'] +['ĠM', 'eg'] +['DB', 'G'] +['Ġl', 'ub'] +['Bag', 'Constraints'] +['ab', 'ad'] +['Ġproject', 'ed'] +['_BY', 'TE'] +['.Size', 'F'] +['st', 'reet'] +['ĊĊĊĊ', 'ĊĊĊĊĊĊ'] +['ĠLO', 'SS'] +['Ġdirect', 'ors'] +['/', 'news'] +['Ġnurs', 'ing'] +['ĠD', 'one'] +['.', 'HTTP'] +['dis', 'count'] +['ĠR', 'ot'] +['To', 'Many'] +['Ġen', 'abling'] +['Ġauss', 'i'] +['ost', 'a'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'čĊ'] +['è½', '½'] +['Ġhel', 'icopt'] +['ĠIn', 'side'] +['ä¿¡', 'æģ¯'] +['is', 'per'] +['ĠAll', 'ah'] +['ARCH', 'AR'] +['Ġroll', 's'] +['Com', 'pare'] +['X', 'P'] +['Index', 'Of'] +['S', 'UM'] +['Ġass', 'ured'] +['ĠPhys', 'ical'] +['End', 'point'] +['.G', 'lobal'] +['.d', 'etail'] +['Ġthe', 'ft'] +['.j', 'upiter'] +['Ġhum', 'or'] +['.R', 'ender'] +['A', 'lex'] +['.c', 'ap'] +['Ġbuff', 'ers'] +['Ġdis', 'pose'] +['t', 'ion'] +['.p', 'resent'] +['z', 'el'] +[',', 'P'] +['Ġdesper', 'ate'] +['.get', 'Column'] +['Ġtw', 'in'] +['ì', 'ĸ'] +['.c', 'an'] +['Ġf', 'lee'] +['ĠIran', 'ian'] +['Ġstick', 'y'] +['ĠU', 'TC'] +['L', 'T'] +['////////////////////////////////', '////////////////'] +['Ġl', 'icensing'] +['_PO', 'INT'] +['ĠM', 'aps'] +['Ġl', 'ol'] +['=', 'models'] +['-t', 'ab'] +['ĠN', 'ash'] +['_log', 'ger'] +['tor', 'ch'] +['ĠCON', 'SEQUENTIAL'] +['Not', 'Empty'] +['/', 'react'] +['Ġp', 'f'] +['Ġassert', 'ion'] +['Ġsubsequ', 'ently'] +['_c', 'an'] +['Ġpand', 'emic'] +['og', 'ue'] +['"+', 'Ċ'] +['_', 'ent'] +['_P', 'aram'] +['.ĊĊ', 'ĊĊĊĊĊĊ'] +['Res', 'earch'] +['C', 'apture'] +['Ġbel', 'oved'] +['d', 'em'] +['Ġextract', 'ed'] +['Ġf', 'ights'] +['ER', 'C'] +['(a', 'uth'] +['position', 's'] +['Ġrevers', 'ed'] +['(st', 'ack'] +['Ġ_', ')'] +['uto', 'ff'] +['_fl', 'ow'] +['ç', 'Ĥ¹'] +['(', 'Game'] +['Ġex', 'cluded'] +['ĠCS', 'V'] +['c', 'g'] +['ĠT', 'itan'] +['p', 'ause'] +['Ġcer', 'ca'] +['Ġdump', 'ster'] +['L', 'ess'] +['Ġkotlin', 'x'] +['aster', 'xml'] +['Ġpoint', 'ers'] +['Ġfl', 'ows'] +['ĠT', 'un'] +['ĠMain', 'Activity'] +['Ġdis', 'cret'] +['Ġcomb', 'inations'] +['vis', 'it'] +['_b', 'ind'] +['oot', 'ing'] +['d', 'ater'] +['_look', 'up'] +['.n', 'io'] +['Ġswe', 'at'] +['ĠR', 'd'] +['Ġscient', 'ist'] +['ĠP', 'ixel'] +['@', 'NgModule'] +['Play', 'ing'] +['Ġunf', 'old'] +['Trans', 'late'] +['ĠLaw', 'rence'] +['ĠFIX', 'ME'] +['B', 'ill'] +['ĠR', 'IGHT'] +['Ġwhere', 'ver'] +['Ġo', 'ok'] +['vid', 'ence'] +['Ġ]', '];'] +['ĠSk', 'ill'] +['unist', 'd'] +['ĠðŁ', 'ĻĤ'] +['Ġfem', 'ales'] +['--', ')Ċ'] +['İ·', 'åıĸ'] +['ĠF', 'red'] +['Over', 'all'] +['Ù', 'Ĥ'] +['Ġess', 'ence'] +['Ġthere', 'by'] +['Ġw', 'ounded'] +['ĠD', 'OWN'] +['les', 'son'] +['text', 'ure'] +['R', 'ound'] +['Ġautom', 'ated'] +['ĠÐ', '¡'] +['ĠUp', 'dates'] +['Ġsh', 'ade'] +['p', 'ublish'] +['ĠG', 'ear'] +['=', 'lambda'] +['Ġle', 'ver'] +[')', '+"'] +['h', 'ill'] +['Ġrad', 'ar'] +['ry', 'ing'] +['Ġ"', ').'] +['f', 'illed'] +['Ġline', 'up'] +['Ġd', 'l'] +['Ġworks', 'pace'] +['V', 'o'] +['_d', 't'] +['ë', '²'] +['_', 'Item'] +['NS', 'URL'] +['.', 'verify'] +['ĠHawai', 'i'] +['G', 'od'] +['M', 'arch'] +['Ġ[â̦', ']'] +['Ġpel', 'o'] +['ur', 'ious'] +['ĠPitt', 'sburgh'] +['.', 'It'] +['C', 'lean'] +['>', '\\<^'] +['Ġi', 'os'] +['s', 'ound'] +['"]', ';'] +['Ġfre', 'ed'] +['rot', 'tle'] +['ĠL', 'ower'] +['[', 'count'] +['å', 'Ŀ'] +['Ġp', 'ale'] +['ĠWay', 'ne'] +['ear', 'th'] +['_c', 'ategories'] +['U', 'CK'] +['.m', 'etadata'] +['Ġsum', 'mon'] +['H', 'OME'] +['олÑĮ', 'з'] +['Ġmanufact', 'ured'] +['Ġdo', 'ck'] +['Ġcompet', 'itors'] +['_MODE', 'L'] +['ok', 'ia'] +['ĠH', 'ey'] +['Î', '¿'] +['Ġback', 'ward'] +['ĠPO', 'SS'] +['rop', 'a'] +['Ġc', 'ri'] +['_O', 'BJ'] +['Trans', 'port'] +['-h', 'igh'] +['Ġerot', 'ik'] +['_s', 'lot'] +['Ġart', 'ic'] +['_f', 'ramework'] +['-ser', 'if'] +['ĠSql', 'DbType'] +["')", '('] +['+', '"/'] +['Ġw', 'ore'] +['S', 'il'] +['Ġst', 'oring'] +['ĠPh', 'ase'] +['u', 'ant'] +['Ġb', 'ump'] +['in', 'ho'] +['Ġd', 'ign'] +['Ġback', 's'] +['q', 'q'] +['(h', 'ash'] +['Ġge', 'o'] +['Ġt', 'ender'] +['Log', 'o'] +['!', ')Ċ'] +['ĠM', 'X'] +['ĠAr', 'thur'] +['esso', 'a'] +['_C', 'h'] +['Ġbed', 'rooms'] +['="#', '"><'] +['Ġth', 'roat'] +['ins', 'ic'] +['.int', 'eger'] +['Ġpr', 'imitive'] +['Truth', 'y'] +['Ġfacilit', 'ate'] +['Ġcreat', 'ivity'] +['ĠD', 'NS'] +['Ġg', 'ra'] +['ue', 'z'] +['Ġcount', 'less'] +['ĠPol', 'and'] +["'", 'M'] +['ĠD', 'ist'] +['Ġv', 'est'] +['Ġcert', 'ification'] +['á»', 'ij'] +['h', 'eld'] +['ext', 'ensions'] +['(', 'static'] +['Ġgr', 'ades'] +['ĠU', 'ber'] +['ãģ', 'Ł'] +['Ġ[', '])Ċ'] +['dat', 'os'] +['Ġget', 'Data'] +['ĠCh', 'arg'] +['ĠB', 'S'] +['.m', 'icrosoft'] +['.v', 'ideo'] +['.d', 'irection'] +['->{', "'"] +['l', 'ua'] +['ape', 'st'] +['Ġbo', 'iler'] +['ere', 'k'] +['Ġdec', 'ides'] +['.j', 'ar'] +['IS', 'C'] +['ĠW', 'ords'] +['(C', 'ON'] +['EMPL', 'ATE'] +['ree', 'ze'] +['sh', 'ots'] +['app', 's'] +['unt', 'ed'] +['.set', 'Name'] +['::', '<'] +['-b', 'old'] +['ê', '²'] +['å¯', 'Ĩ'] +['Long', 'rightarrow'] +['Ġunf', 'air'] +['Ġear', 'ning'] +['Ġsh', 'elf'] +['URE', 'MENT'] +['Ġid', 'le'] +['_M', 'ENU'] +['.C', 'ustom'] +['AG', 'ER'] +['-', '"'] +['_s', 'witch'] +['b', 'ecause'] +[')', 'view'] +['m', 'are'] +['_', 'condition'] +['ĠStart', 'ing'] +['M', 'vc'] +['(p', 're'] +['d', 'ump'] +['_LO', 'CK'] +['at', 'etime'] +['.c', 'allback'] +['ĠC', 'er'] +['op', 'ol'] +['ib', 'rary'] +['Ġres', 'ervation'] +['ĉĉĉĉĉĉĉ', 'Ċ'] +['lect', 'or'] +['grad', 'uate'] +['Ġgener', 'ous'] +['Ġ', 'ion'] +['ric', 'ao'] +['m', 'q'] +['_com', 'plete'] +['(c', 'ursor'] +['ĠForm', 'Control'] +[':', 'center'] +['Ġsub', 'stitute'] +['ĠPl', 'anning'] +['Ġp', 'ension'] +['Ġrecommend', 'ation'] +['ĠT', 'ags'] +['Ġg', 'ef'] +['Ġalbum', 's'] +['Ġwash', 'ing'] +['ro', 'c'] +['Ġtr', 'ains'] +['at', 'ings'] +['Ġex', 'ponent'] +['ack', 'bar'] +['-', 'ln'] +['á', 'g'] +['.Data', 'Annotations'] +['ĠE', 'IF'] +['ĠMalays', 'ia'] +['ĉ', 'PORT'] +['on', 'us'] +['Ġcle', 'ver'] +['Ġpe', 'u'] +['>', 'ĊĊĊĊ'] +['ĠArg', 'uments'] +['Ġdebug', 'ging'] +['(', 'right'] +["'", 'D'] +['com', 'pute'] +['Ġfin', 'est'] +['OR', 'AGE'] +['Ġspect', 'acular'] +['ph', 'rase'] +['Ġind', 'ia'] +['Ġlegend', 'ary'] +['b', 'irth'] +['Ġcom', 'posite'] +['Ġg', 'rows'] +['ĠT', 'D'] +['Ġep', 'id'] +['Ġlaunch', 'ing'] +[']', ']['] +['Min', 'utes'] +['ĠCh', 'a'] +['Ġclean', 'ed'] +['Ġwitness', 'es'] +['uk', 'an'] +['ĉ', 'Type'] +['Ġhab', 'e'] +['par', 'agraph'] +['ĠJ', 'Panel'] +['ĠH', 'ann'] +['Ġvar', 'ied'] +['ĠP', 'okemon'] +['ĠM', 'UST'] +['åĬ', '¨'] +['.vis', 'ibility'] +['op', 'up'] +['^', '['] +['.exp', 'and'] +['Ġ"', "',"] +['.f', 'asterxml'] +['_', 'auto'] +['ĠShe', 'et'] +['mark', 'er'] +['Par', 'cel'] +['ew', 's'] +['ĠStr', 'ategy'] +['-m', 'aking'] +['Ġun', 've'] +['Ġtrail', 'ing'] +['Ġclick', 's'] +['ĠGet', 'Component'] +['ĉ', 'content'] +['IG', 'ENCE'] +['ERN', 'EL'] +['NSMutable', 'Array'] +['Ġb', 'reat'] +['Ġharm', 'ful'] +['¶', 'Ī'] +['Ġbes', 'ides'] +['Ġb', 'oring'] +['Ġbrut', 'al'] +['v', 'ang'] +['(p', 'arse'] +['qu', 'ick'] +['Ġpy', 'test'] +['Ġswitch', 'ing'] +['()', ']Ċ'] +['Ġì', 'Ħ'] +['L', 'ER'] +['ĉf', 'ont'] +['Ġnet', 't'] +[')', ']ĊĊ'] +['(/', '\\'] +['æŀ', 'ľ'] +['to', 'Array'] +['Ġbre', 'ed'] +['ĠC', 'AR'] +['ĠWe', 'apon'] +['A', 'bs'] +['t', 'ot'] +['Ġset', 'Name'] +['apt', 'ive'] +['Ġ:', ','] +['Ġesc', 'aped'] +['ord', 'en'] +['ĠP', 'ri'] +['th', 'umbnail'] +['Ġdescri', 'ptions'] +['/', 'styles'] +['ĠPC', 'I'] +['Ġal', 'phabet'] +['astic', 'search'] +['NOT', 'E'] +['Ġc', 'ialis'] +['ĠGr', 'iff'] +['Ġpor', 'que'] +['Ġprote', 'ins'] +['pl', 'ays'] +['Ġst', 'ating'] +['Ġimag', 'ination'] +['Ġfac', 'ial'] +['ĠMe', 'chan'] +['Ġarr', 'anged'] +['_', 'used'] +['Ġarrang', 'ements'] +['ĠP', 'ipe'] +['host', 'name'] +['Ġprov', 'inc'] +['T', 'it'] +['.Flat', 'Style'] +['ĠS', 'plit'] +['ĠLo', 'ader'] +['.c', 'c'] +['Ġclin', 'ic'] +['----------------', '------------'] +['Ġb', 'aking'] +['ĠEN', 'T'] +['ne', 'ath'] +['ãĢģ', 'ĊĊ'] +['AN', 'E'] +['.EntityFramework', 'Core'] +['app', 'ers'] +['.', 'ic'] +['ĠNg', 'Module'] +['ĠF', 'ORM'] +["Ġ'", ';'] +['-pro', 'fit'] +['h', 'w'] +['en', 'emy'] +['ĠE', 'ye'] +['Ġca', 'ution'] +['t', 'own'] +['Ġur', 'ged'] +['ĠJim', 'my'] +['ynchron', 'ous'] +['-s', 'ized'] +['m', 'aking'] +[',', '{'] +[']', "',"] +['_', 'Object'] +['ah', 'oma'] +['Ġactiv', 'ist'] +['IN', 'VAL'] +['ĠCom', 'mercial'] +['ĠOr', 'lando'] +['(t', 'ab'] +['ĠØ', '¨'] +['Al', 'gorithm'] +['Ġher', 'itage'] +['Get', 'Mapping'] +['Ġfail', 'ures'] +['ri', 'os'] +['at', 'iva'] +['Ġt', 'et'] +['Ġcar', 'pet'] +['(', 'Z'] +['th', 'ree'] +['Ġdisc', 'losure'] +['.', 'ERROR'] +['_c', 'alled'] +['Ġd', 'ial'] +['Ġoccas', 'ional'] +['.E', 'rr'] +['Ġfunc', 'ion'] +['caff', 'old'] +['Ġrele', 'asing'] +['ï¼ī', 'ĊĊ'] +['_', 'Value'] +['ĠV', 'ari'] +['y', 'ellow'] +['Ġstrugg', 'les'] +['.c', 'al'] +['ĠDak', 'ota'] +['ĉc', 'lose'] +['Ġsand', 'wich'] +['Ġanaly', 'tics'] +['Ġ**', ')'] +['&', '#'] +['ĠJ', 'os'] +['Ġpass', 'ive'] +['AT', 'TR'] +['Th', 'rowable'] +['ĠM', 'un'] +['ĠU', 'int'] +['(dis', 'posing'] +['ar', 'ak'] +['ĠLe', 'aders'] +['Ġaffect', 'ing'] +['Ġitem', 'View'] +['Ġeconom', 'ics'] +['f', 'v'] +['à¹', 'Ģ'] +['.r', 'b'] +['ĠOver', 'all'] +['Ġwealth', 'y'] +['Ġev', 'olved'] +['nd', 'a'] +['ĠH', 'us'] +['re', 'strict'] +['um', 'en'] +['ĠA', 'gricult'] +['!', 'ĊĊĊ'] +['Ġexp', 'ires'] +['Ġspokes', 'person'] +['int', 'erval'] +['ĠÃ', '¢'] +['Ġque', 'en'] +['(n', 'il'] +['ing', 'o'] +['He', 'ap'] +['Ù', 'İ'] +['Ġcompl', 'ain'] +['S', 'ym'] +['ĠCl', 'one'] +['ĠR', 'u'] +['ĠW', 'ILL'] +['ĠCr', 'ystal'] +['/', 'content'] +['ing', 'en'] +['oint', 'ment'] +['Last', 'Name'] +['av', 'icon'] +['ĠIB', 'M'] +['ĠDim', 'ension'] +['an', 'h'] +['icip', 'ants'] +['ĠAn', 'ne'] +['.pro', 'gress'] +['Ġal', 'go'] +['ob', 'il'] +['ĠV', 'oice'] +['ĠF', 'E'] +['Ġg', 'li'] +['Ġv', 'ed'] +['Ġprevent', 's'] +['\\', 'Column'] +['Ġfol', 'k'] +['ett', 'i'] +['Ġm', 'n'] +['ĠCL', 'ASS'] +['Ġdisplay', 'ing'] +['ĠK', 'l'] +['ĠF', 'err'] +['d', 'uto'] +['.', 'ib'] +['Ġd', 'ados'] +["'", 'name'] +['-s', 'pace'] +['Ġit', 'alian'] +['Ġin', 'verse'] +['Ġd', 'ense'] +['ut', 'er'] +['ĠI', 'Enumerator'] +['-s', 'ign'] +['Ġnation', 'wide'] +['Ġperson', 'a'] +['Ġsol', 'ved'] +['Ġdram', 'atically'] +['Log', 'out'] +['Ġgr', 'av'] +['Ġanalys', 'es'] +['ol', 'lo'] +['Ġl', 'amp'] +['.', 'team'] +['ĠE', 'rot'] +['=', '["'] +['Ġd', 'ancing'] +['Ġ?>', '/'] +['Ġc', 'ater'] +['ff', 'e'] +['ĠSh', 'a'] +['ĠB', 'os'] +['ĠRE', 'QUIRE'] +['ĠMon', 'ster'] +['ĠR', 'B'] +['ĠI', 'DE'] +['Ġsu', 'its'] +['Ġform', 'Data'] +['(', 'theta'] +['Ġsp', 'atial'] +['=', 'NULL'] +['ĠSql', 'Connection'] +['Ġ', 'à'] +['ĠV', 'enez'] +['ĠMor', 'ning'] +['Ġpublic', 'ations'] +['ĠNON', 'INFRINGEMENT'] +['first', 'Name'] +['ud', 's'] +['W', 'ould'] +['_HE', 'AD'] +['Ġinvest', 'ed'] +['st', 'able'] +['f', 'red'] +['Ġcommand', 'er'] +['SE', 'S'] +['âĢĶ', 'a'] +['an', 'che'] +['ĠM', 'ovement'] +['ë', '³'] +['S', 'uite'] +['Ġjur', 'isdiction'] +['ë¦', '¬'] +['ĠB', 'eth'] +['j', 'Query'] +['ĠIs', 'a'] +['Ġd', 'ental'] +[',', '*'] +['ĠL', 'imit'] +['ili', 'ation'] +['="', '{'] +['b', 'ast'] +['Ġt', 'urb'] +['is', 'y'] +['O', 'OK'] +['Ġadvoc', 'ate'] +['im', 'ag'] +['LE', 'CTION'] +['л', 'ÑĮ'] +['(c', 'ategory'] +['.de', 'c'] +['Ġun', 'iqu'] +['_s', 'n'] +['Ġattract', 'ed'] +['ĠÃ', 'ī'] +['ĠRun', 'ning'] +['_', 'edges'] +['ĠDis', 'able'] +['_A', 'S'] +['åĽ', '¾'] +['Ġnetwork', 'ing'] +['_br', 'anch'] +['H', 'aving'] +['toBe', 'Truthy'] +['G', 'I'] +['Ġcamp', 's'] +['se', 'p'] +['-p', 'art'] +['Ġ)ĊĊ', 'ĊĊĊĊĊĊ'] +['ustral', 'ia'] +['ĠRe', 'ports'] +['rit', 'o'] +['Ġwa', 'ist'] +['_pl', 'us'] +['ĠW', 'W'] +['-p', 'erson'] +['Apr', 'il'] +['Ġs', 'ar'] +['.t', 'ar'] +['Ġagricult', 'ural'] +['t', 'ic'] +['Ġt', 'cp'] +['Ġset', 'Value'] +['agent', 'o'] +['ĠAp', 'pe'] +['p', 'iler'] +['CA', 'DE'] +['Ġan', 'che'] +['atch', 'er'] +['Ġcom', 'ics'] +['Ġl', 'bs'] +['_se', 'gment'] +["']", '=$'] +['itt', 'ers'] +['ich', 'er'] +['G', 'INE'] +['Ġutil', 'ize'] +['ĠC', 'ursor'] +['_ex', 'pression'] +['Ġd', 'ag'] +['<', 'long'] +['Ġr', 'hyth'] +['æı', 'IJ'] +['Ġconsult', 'ation'] +['Y', 'et'] +['"))', 'ĊĊ'] +['_M', 'AC'] +['c', 'ould'] +["Ġ'", '\\\\'] +['ĠV', 'o'] +['ĉ', 'http'] +['Ġg', 's'] +['ph', 'er'] +['-', 'grid'] +['J', 'ames'] +['J', 'ul'] +['Ġsch', 'on'] +['Ġtensor', 'flow'] +['ĠLOG', 'GER'] +['am', 'as'] +['Ġsc', 'ipy'] +['Ġconv', 'iction'] +['.', 'ag'] +['Ġadministr', 'ator'] +['))', '{čĊ'] +['Ġn', 'un'] +['"', 'group'] +['P', 'or'] +['Ġnur', 'se'] +['ex', 'pression'] +['ak', 'y'] +['ĠHe', 'avy'] +['.', 'opt'] +['.get', 'All'] +['Ġover', 'l'] +['/', '",'] +['_c', 'ountry'] +['ç', 'İ'] +['ĠG', 'ENER'] +['_r', 'oute'] +['ĠD', 'al'] +['Â', '´'] +['ol', 'oad'] +['Ġuncomfort', 'able'] +['(m', 'enu'] +['Ġhost', 'name'] +["'", '");Ċ'] +['Ġcalcul', 'ations'] +['-c', 'lick'] +['Ġprotect', 'ive'] +['ãĤ', '¯'] +['_F', 'orm'] +['ung', 's'] +['Act', 'ual'] +['m', 'f'] +['ĠProcess', 'ing'] +['ĠIn', 'ventory'] +['(m', 'atrix'] +['app', 'ropriate'] +['w', 'eg'] +['ij', 'a'] +['Ġch', 'r'] +['Ġr', 'ifle'] +['-w', 'sj'] +['k', 'ar'] +['Ġindepend', 'ently'] +['I', 'OS'] +['Ġconsist', 'ency'] +['v', 'n'] +['/s', 'ystem'] +['ĠCh', 'anges'] +['Ġexp', 'ose'] +['ici', 'ents'] +['Ġrel', 'ate'] +['ĉ', 'next'] +['è', '¨'] +['ud', 'es'] +['Ġglass', 'es'] +['F', 'XML'] +['....', '..'] +['ĠP', 'df'] +['Ġappro', 've'] +['Ġ{', '\\'] +['Ġexist', 'e'] +['))', '('] +['ARE', 'NT'] +['оÐ', '¿'] +['ĠL', 'atest'] +['ĠNiger', 'ia'] +['.Inter', 'faces'] +['Ġrem', 'oves'] +['En', 'emy'] +['Ġen', 'force'] +['vert', 's'] +['ĉ', 'pos'] +['_text', 'ure'] +['W', 'ARD'] +['ĠINC', 'IDENT'] +['(', 'container'] +['Ġdef', 'ending'] +['ĠR', 'X'] +['ĠH', 'ook'] +['br', 'is'] +['ĠFl', 'ask'] +['Gr', 'ay'] +['.', ')Ċ'] +['vis', 'ibility'] +['ĠRedirectTo', 'Action'] +['err', 'al'] +['_e', 'lem'] +['Ġres', 'on'] +['front', 'end'] +['_variable', 's'] +['ater', 'ia'] +['Ġ+', '"'] +['ave', 'led'] +['RI', 'X'] +['Ġdef', 'icit'] +['_C', 'heck'] +['YY', 'YY'] +['To', 'One'] +['sp', 'y'] +['Ġun', 'ited'] +['end', 'ent'] +['Ġp', 'ode'] +['ãģ', 'Į'] +['C', 'AT'] +['(f', 'mt'] +['ĠBon', 'us'] +['Ġre', 'ck'] +['Â', 'º'] +['Mod', 'ules'] +['Ġvac', 'uum'] +['R', 'adio'] +['ĠDAM', 'AGE'] +['P', 'en'] +['ĠPark', 'er'] +[';', ';Ċ'] +['ĠRe', 'ally'] +['_n', 'eg'] +['p', 'ending'] +['Ġnomine', 'e'] +['ĠC', 'ategories'] +['ĠUl', 'tra'] +['We', 'apon'] +['Ġdef', 'ender'] +['I', 'ss'] +['ĠG', 'ender'] +['ĠD', 'ress'] +['Ġimpr', 'ison'] +['Ġbank', 'rupt'] +['imension', 'al'] +['PH', 'A'] +['ĠStr', 'ateg'] +['ĠPROF', 'ITS'] +['Ġp', 'atri'] +['////////////////////////////////////////////////////////////////', '////////////////'] +['de', 'legate'] +['Ġfor', 'State'] +['Ġdev', 'oted'] +['_m', 'ake'] +['Ġterror', 'ists'] +['ĠS', 'nap'] +['_n', 'av'] +['ĠA', 'A'] +['ĠI', 'an'] +['ĉ', 'app'] +['Pl', 'acement'] +['_h', 'dr'] +['<', 'K'] +['Ġs', 'ang'] +['st', 'roke'] +['-', 'Q'] +['>', 'x'] +['.T', 'ask'] +['m', 'oney'] +['ib', 'aba'] +["'", '});Ċ'] +['ĠSpec', 'ific'] +['ĠLine', 'ar'] +['_O', 'PT'] +['Hash', 'Code'] +['(', 'Player'] +['.Contains', 'Key'] +['Ġcoll', 'apsed'] +['trans', 'parent'] +['_R', 'ANGE'] +['View', 'er'] +['(c', 'fg'] +['Ġsort', 'ing'] +['Ġinf', 'ected'] +['ĠN', 'ach'] +['Ġaccommod', 'ate'] +['.element', 's'] +['_P', 'ART'] +['ĠSex', 'y'] +['=', 'get'] +['(', 'year'] +['Ġx', 'hr'] +[':', ']'] +['ows', 'ki'] +['Ġsum', 'mar'] +['ĠÂ', '¿'] +['Ġint', 'e'] +['Ġwork', 'flow'] +['ĠTai', 'wan'] +['vers', 'ions'] +['åı', 'ij'] +['Ġsurprising', 'ly'] +['Ġopt', 'ical'] +['Ġpro', 'ces'] +['Ġdisag', 'ree'] +['Ġnue', 'vo'] +['ĠC', 'AM'] +['sort', 'ed'] +['le', 'ases'] +['ist', 'le'] +['Id', 'ent'] +['ĉ', 'event'] +['ject', 'ed'] +['Ch', 'unk'] +['V', 'ars'] +['.pro', 'vider'] +['Ġproceed', 'ings'] +['Ġin', 'clusive'] +['Ġart', 'work'] +['end', 'ants'] +['ï¼ļ', 'Ċ'] +['se', 'en'] +['Ġl', 'ig'] +['Ġm', 'akers'] +['_f', 'un'] +['Ġlength', 's'] +['Path', 'Variable'] +['[', 'item'] +['à¸', 'µ'] +['De', 'ad'] +['FFFF', 'FF'] +['ĠUr', 'ban'] +['up', 'les'] +['ich', 'en'] +['(null', 'ptr'] +['.s', 'pec'] +[',', 'System'] +['UR', 'ATION'] +['(j', 'ob'] +['å¼', 'ı'] +['Ġtrack', 'er'] +['Å', 'Ļ'] +['ĠM', 'R'] +['ĠSQL', 'ite'] +['Ġd', 'to'] +['Ġ;', ';Ċ'] +['Ġm', 'int'] +['ĠInt', 'roduction'] +['ca', 'o'] +['Ġquestion', 'ed'] +['Ġf', 'itted'] +['rev', 'ision'] +['s', 'q'] +['Ġm', 'ig'] +['_un', 'its'] +['_', 'async'] +['Ġf', 'lick'] +['});ĊĊ', 'Ċ'] +['Ġnot', 're'] +['}`', ','] +['F', 'ilters'] +['Ġm', 'undo'] +['_d', 'ays'] +['Ġfr', 'm'] +['ut', 'c'] +['Ġval', 's'] +['ew', 'idth'] +['ĠGener', 'ator'] +['ĠArt', 'ist'] +['ĠID', 's'] +['ĠArt', 'icles'] +['re', 'ater'] +['ĠComponent', 'Fixture'] +['.', '='] +['Ġr', 'ou'] +['-', 'no'] +['.b', 'ukkit'] +['eg', 'g'] +['ĠD', 'iff'] +['atic', 's'] +['Ñĥ', 'Ñĩ'] +['âĢĶ', 'ĊĊ'] +['ĠChar', 'lotte'] +['by', 'e'] +['Ġ}', ');čĊčĊ'] +['ĠV', 'ik'] +['ĠB', 'row'] +['Ġl', 'v'] +['ĠG', 'ib'] +['-w', 'ing'] +['GL', 'IGENCE'] +['(I', 'l'] +['ĠEngine', 'er'] +['.W', 'ait'] +['ĠP', 'ictures'] +['Ġr', 'het'] +['Ġth', 'ermal'] +['Ġpr', 'aise'] +['<', '>();ĊĊ'] +['ĠSp', 'ider'] +['P', 'ause'] +['ĠB', 'aker'] +['Ġsl', 'ower'] +['Ġ}', ']Ċ'] +['_en', 'queue'] +['Ġdisappe', 'ared'] +['ĠT', 'icket'] +['IN', 'UX'] +['_LOC', 'AL'] +['аÑģ', 'Ñģ'] +['@Inject', 'able'] +['comm', 'unity'] +['Gesture', 'Recognizer'] +['åĽ', '½'] +['Ġsca', 'les'] +['Ġ-', '('] +['/', "'+"] +['ĠS', 'it'] +['Ġexecut', 'ives'] +['ard', 'ing'] +['Ġad', 'vers'] +['Ġback', 'wards'] +['ĉ', 'context'] +['ĠH', 'amp'] +['ĠP', 'F'] +['ĠDe', 'ck'] +['ĠCra', 'ig'] +['A', 'merican'] +['Ġb', 'ell'] +['Ġpro', 'l'] +['uf', 'en'] +['Ġr', 'ng'] +['ar', 'shal'] +['ĠSim', 'ply'] +['first', 'name'] +['sh', 'ore'] +['J', 'uly'] +['Ġmort', 'ality'] +['ĠâĨĴ', 'ĊĊ'] +['Help', 'ers'] +['Ġbench', 'mark'] +['em', 'ade'] +['Ġorganis', 'ations'] +['.g', 'son'] +['ĠText', 'Field'] +['Ġciv', 'ilians'] +['.Array', 's'] +['ĠMiss', 'issippi'] +['Ġinter', 'mediate'] +['get', 'User'] +['_cl', 'uster'] +['Rel', 'ative'] +['fore', 'ign'] +['.querySelector', 'All'] +['Fore', 'ignKey'] +['Ġreason', 'ably'] +['--------', '-Ċ'] +['C', 'ards'] +['ĠK', 'am'] +['ĠTh', 'or'] +['Ġroll', 'er'] +['-e', 'lement'] +['ĠC', 'urrency'] +['dd', 'ie'] +['ALL', 'Y'] +['ĠR', 'A'] +['Ġper', 'met'] +['aa', 'aa'] +['Ġhom', 'ework'] +['ĠV', 'it'] +['Ġm', 'old'] +['ĠF', 'er'] +['[', 'start'] +['Ġstatist', 'ical'] +['Ġsc', 'ary'] +['_H', 'OME'] +['.B', 'egin'] +['Con', 'struct'] +['ogen', 'ic'] +['ĠDEAL', 'INGS'] +['Ġtamb', 'ién'] +['ix', 'on'] +['.', 'ind'] +['ac', 're'] +['Ġtransform', 's'] +['ĠN', 'ap'] +['.B', 'lock'] +['uss', 'ia'] +['pir', 'ation'] +['ul', 'ent'] +['Ġce', 'il'] +['Cl', 'ause'] +['na', 'ire'] +['T', 'ES'] +['Ġne', 'at'] +['ST', 'D'] +['ĠReg', 'Exp'] +['per', 'form'] +[':', ')'] +['Ġun', 'ions'] +['Ġs', 'ublic'] +['Ġw', 'inds'] +['lo', 'ating'] +['g', 'lich'] +['Ġp', 'agination'] +['S', 'kill'] +['App', 'ly'] +['ĠOper', 'ator'] +['ist', 'ogram'] +['Ġqual', 'ities'] +['C', 'ross'] +['Ġde', 'com'] +['],', '"'] +['ĠJ', 'uan'] +['.mod', 'al'] +['.Ch', 'ild'] +['ĠRog', 'er'] +['STIT', 'UTE'] +[':CGRect', 'Make'] +['a', 'lette'] +['Ġst', 'a'] +['as', 'ide'] +['Ġbl', 'ur'] +['ĠW', 'a'] +['if', 'etime'] +['re', 'ed'] +['control', 's'] +['Ġb', 'ins'] +['Ġп', 'ол'] +['*/', ',Ċ'] +['U', 'IS'] +['ĠR', 'ou'] +['ĠDem', 'o'] +['-', 'awesome'] +['ĠCh', 'ain'] +['Ġh', 'asta'] +['ĠB', 'art'] +['.', 'KEY'] +['Ġvend', 'ors'] +['nof', 'ollow'] +['ĠD', 'est'] +['_b', 'uilder'] +['Ġarg', 'ues'] +['_', 'answer'] +['g', 'oto'] +['ĠRES', 'ULT'] +['ĠM', 'ON'] +['Ġp', 'oder'] +['o', 'ons'] +['_C', 'ASE'] +['Ġrep', 'lic'] +['Ġfin', 'ancing'] +['ĠD', 'ATE'] +['c', 'ern'] +['_tr', 'ack'] +['t', 'ies'] +['/', 'logo'] +['ĠNE', 'GLIGENCE'] +['get', 'Type'] +['>', 'T'] +['b', 'et'] +['g', 'irl'] +['ĠINCIDENT', 'AL'] +['-s', 'ite'] +['.tr', 'igger'] +['ĠL', 'isa'] +['_input', 's'] +['Ġrel', 'atives'] +['Logged', 'In'] +['Config', 'ure'] +['I', 'K'] +['.', 'accept'] +['Res', 'ume'] +['ĠD', 'raft'] +['Ġ*', '>('] +['ĠW', 'A'] +['ed', 'ian'] +['ern', 'ess'] +['ĠLayout', 'Inflater'] +['*/', 'čĊčĊ'] +['oth', 'y'] +['Ġoblig', 'ation'] +['Sub', 'scribe'] +['Ġth', 'umbnail'] +['ex', 'ist'] +['Ġins', 'isted'] +['ĠU', 'ICollectionView'] +['ĠAng', 'ular'] +['Ġtable', 'ts'] +['ĠImp', 'act'] +['ãĢį', 'ĊĊ'] +['ah', 'o'] +['Ġcharacter', 'istic'] +['g', 'd'] +['Ġ=', '================================================'] +['our', 't'] +['`', '.'] +['App', 'ro'] +['Co', 'ordinate'] +['Rem', 'ember'] +['Ġmar', 'ine'] +[']', "=='"] +['ĠAdmin', 'istrator'] +['.get', 'Default'] +['Ġforg', 'ot'] +['ĠStruct', 'ure'] +['V', 'ue'] +['ars', 'ing'] +['m', 'oment'] +['k', 'w'] +['_c', 'ursor'] +['Att', 'ack'] +['Ġath', 'letic'] +['Ġdiagn', 'osed'] +['Ġend', 'e'] +['åĪ', 'łéϤ'] +['H', 'ouse'] +['ĠP', 'ARAM'] +['Ġw', 'iki'] +['ĠO', 'pp'] +['Ġcons', 'ervation'] +['Ġs', 'nd'] +['_t', 'em'] +['sub', 'str'] +['ĠC', 'ape'] +['.s', 'im'] +['UT', 'ION'] +['an', 'an'] +['âĢĻ', 'un'] +['Ġg', 'y'] +['-', 'work'] +['Ġcomp', 'elling'] +["='", '#'] +['ĉs', 'ub'] +['Ġdirect', 'ories'] +['íĬ', '¸'] +['Ġtouch', 'es'] +['out', 'ines'] +['.C', 'ollection'] +['s', 'chedule'] +['.l', 'at'] +['ĠDo', 'ctrine'] +['CA', 'A'] +['ĠRe', 'fer'] +['Ġshift', 's'] +['Ġlik', 'elihood'] +['pre', 'ter'] +['ĠF', 'emale'] +['Ġinter', 'cept'] +['Ġl', 'ou'] +['çĻ', '»'] +['Ġr', 'ug'] +['ĠC', 'rown'] +['Ġ************************************************************************', '****'] +['-', 'product'] +['Ġprompt', 'ed'] +['ung', 'le'] +['d', 'ocker'] +['ĠT', 'u'] +['ĠUn', 'ique'] +['_', 'Error'] +['ul', 'os'] +['Ġâ', 'Ħ'] +['Ġ(', '`'] +['Get', 'ting'] +['_s', 'cal'] +['ĠEn', 'h'] +['ü', 't'] +['Ġsust', 'ained'] +['Ġp', 'atches'] +['Ġpros', 'per'] +['ĠG', 'aza'] +['_l', 'ight'] +['Ġin', 'cons'] +['--------', 'Ċ'] +['ĉĉ', 'ĠĠĠĠĠĠ'] +['S', 'F'] +['C', 'N'] +[':', '";Ċ'] +['ĠColl', 'ins'] +['(', '*)'] +['Ġcomp', 'ilation'] +["']", 'čĊ'] +['Ġcon', 'sequence'] +[',', '...'] +['Ġd', 'm'] +['ĠB', 'LOCK'] +['Cl', 'uster'] +['Ġsk', 'i'] +['(arg', 'c'] +['T', 'uple'] +['Ġjo', 'ins'] +['ĠSher', 'iff'] +['W', 'ar'] +['ind', 'i'] +['Ġcomment', 'ed'] +['H', 'OST'] +['Ġinv', 'itation'] +['apan', 'ese'] +['Ġperm', 'its'] +['preced', 'ented'] +['_z', 'one'] +['ĠA', 'my'] +['_R', 'D'] +['Min', 'imum'] +['Ġinv', 'ocation'] +['.en', 'able'] +['icht', 'en'] +['-', 'owned'] +['"', 'id'] +['_PO', 'INTER'] +['F', 'ac'] +['Ġspecific', 'ations'] +['Ġnom', 'ination'] +['Ġg', 'p'] +['<', '('] +['Ġrob', 'ots'] +['ĠJ', 'erry'] +['Ġhold', 'ers'] +['Ġw', 'and'] +['c', 'ms'] +['Ġ}', '))Ċ'] +['.To', 'ast'] +['ĠI', 'List'] +['B', 'ased'] +['z', 'oom'] +['/', 'style'] +['ĠBe', 'ck'] +['M', 'en'] +['Ġcontrib', 'uting'] +['Ġund', 'o'] +['ĠO', 'H'] +['Ġadd', 'Object'] +['Ġe', 'igen'] +['sign', 'up'] +['éĶ', 'Ļ'] +['Ġdist', 'ant'] +['PAR', 'ATOR'] +['ĠM', 'ari'] +['Ġm', 'á'] +['E', 'mp'] +['ó', 's'] +['Ġì', 'Īĺ'] +['ev', 't'] +['+', 'j'] +['p', 'ark'] +['ĠSt', 'ay'] +['ĠD', 'un'] +['Ġso', 'y'] +['>', '%'] +['az', 'ines'] +['Ġti', 'empo'] +['(m', 'e'] +['p', 'resent'] +['.Th', 'is'] +['Ġedit', 'ors'] +['F', 'IELD'] +['.W', 'ork'] +['ĠUn', 'iverse'] +['Ġdr', 'unk'] +['.t', 'imer'] +['Ġalter', 'ed'] +['ĠN', 'ar'] +['ëł', '¥'] +['.Act', 'ive'] +['id', 'or'] +['ç', 'Ń'] +['.delta', 'Time'] +['Ġawk', 'ward'] +['&', 'quot'] +['ĠSaf', 'ari'] +['Ġtr', 'icks'] +['MENT', 'S'] +['div', 'ision'] +['Ġvary', 'ing'] +['ĠHigh', 'way'] +['Ġphotograph', 'er'] +['ĠSt', 'ewart'] +['Ġlast', 'ing'] +['.P', 're'] +['.amazon', 'aws'] +['ĠL', 'uck'] +['.D', 'escription'] +['ĠN', 'az'] +['n', 'eg'] +['Ġc', 'ó'] +['<<"', '\\'] +['ĠSur', 'v'] +['ĠU', 'nc'] +['Rec', 'ipe'] +['.Border', 'Style'] +['Ġmod', 'ifications'] +['-', 'at'] +['AT', 'FORM'] +['h', 'dr'] +['ak', 'o'] +['Ġsublic', 'ense'] +['ĠJ', 'ump'] +['Ġbe', 'im'] +['ĠMan', 'hattan'] +['.', 'bool'] +['_h', 'w'] +['ÑĤ', 'ÑĮ'] +['B', 'in'] +['Ġg', 'ateway'] +['"', '":'] +['ĠU', 'IS'] +[':"', '+'] +['-', 'def'] +['ĠReg', 'ular'] +['/', 'testing'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['string', 'stream'] +['Ġdis', 'par'] +['Ġmob', 'il'] +['-', 'read'] +['ĠAd', 'apter'] +['ĠCh', 'ampions'] +['Ġsched', 'uler'] +['Ġk', 'ills'] +['ĠM', 'ultiple'] +['ir', 'ror'] +['Ġgod', 's'] +['AD', 'O'] +['ak', 'te'] +['ĠUs', 'uario'] +['.c', 'ircular'] +['Ġre', 'cept'] +['ĠEx', 'pr'] +['Ġelder', 'ly'] +['Ġnic', 'ely'] +['Ġbest', 'e'] +['W', 'ant'] +['Ġclass', 'ical'] +['.s', 'prite'] +['obj', 'c'] +['ĠM', 'ason'] +['Ġsist', 'ema'] +['.Bl', 'ack'] +['es', 'o'] +['ĠZe', 'it'] +['Ġdiv', 'id'] +['Ġent', 'ers'] +['_sub', 'ject'] +['ĠPlan', 'et'] +['.w', 'arning'] +['ĠG', 'ram'] +['_t', 'okens'] +['Ġhousehold', 's'] +['_c', 'ustomer'] +['user', 'Name'] +['c', 'ross'] +['Ġp', 'ione'] +['Ġass', 'ists'] +['_S', 'M'] +['ib', 'o'] +['Ġlo', 'yal'] +['Ġuse', 'less'] +['#', 'elif'] +['ĠUlt', 'imate'] +['C', 'ome'] +['g', 'el'] +['Ġd', 'ich'] +['xy', 'z'] +['ik', 'el'] +['ob', 'ra'] +['_s', 'can'] +['ĠInter', 'ior'] +['ĠN', 'ice'] +['Ġpl', 'ac'] +['ĉt', 'arget'] +['Ġvir', 'al'] +['ass', 'o'] +['()', '/'] +['und', 'e'] +['ĠAd', 'obe'] +['O', 's'] +['vis', 'ited'] +['ĠO', 'W'] +['ĠFe', 'ed'] +['ĠSe', 'quence'] +['Ġman', 'ages'] +['in', 'son'] +['ĠLouis', 'iana'] +['{', '})'] +['ĠH', 'ab'] +['ĠL', 'D'] +['Ġb', 'ip'] +['pr', 'ites'] +['(e', 'lem'] +['.h', 'ibernate'] +['él', 'é'] +['Ġoh', 'ne'] +['_trans', 'action'] +['Ġann', 'unci'] +['P', 'ublished'] +['ĠH', 'onda'] +['ĠT', 'am'] +['ĠP', 'acket'] +['_', 'selector'] +['Ġchalleng', 'ed'] +['Process', 'ing'] +['-h', 'over'] +['Ġtr', 'ainer'] +['_c', 'ancel'] +['ĠNS', 'Dictionary'] +['ab', 'ric'] +['ĠM', 'LS'] +['_s', 'ensor'] +['Ġshr', 'ink'] +['ĠF', 'X'] +['th', 'reshold'] +['ĉH', 'X'] +['-m', 'ark'] +['`', '.`'] +['S', 'cheme'] +['(f', 'ull'] +['_w', 'riter'] +['ĠS', 'ys'] +['Ġf', 'led'] +['ĠC', 'in'] +['-w', 'idget'] +['ĠPre', 'vious'] +['G', 'ender'] +['_', 'question'] +['Fe', 'ed'] +['Ġscr', 'ut'] +['(p', 'refix'] +['ãĢĤ', 'ãĢĤ'] +['Ġin', 'fections'] +['Part', 's'] +['Ġhier', 'archy'] +['_DE', 'LETE'] +['ĠPat', 'ient'] +['_p', 'ay'] +['Ġprom', 'oted'] +['Ġì', 'ĭ'] +['Ġcivil', 'ian'] +['Ġagricult', 'ure'] +['ĠP', 'iece'] +['Ġst', 'ance'] +['uts', 'che'] +['Ass', 'ign'] +['.A', 'CTION'] +['F', 'ig'] +['_r', 'adius'] +['ĠS', 'ync'] +['du', 'cer'] +['f', 'ailure'] +['ens', 'ed'] +['pt', 'ime'] +['B', 'M'] +['_dat', 'etime'] +['qu', 'ivo'] +['QUE', 'UE'] +['èĢ', 'ħ'] +['Ap', 'pear'] +['Ġsum', 'mit'] +[':', 'void'] +['Ġv', 'ine'] +['è®', '¤'] +['on', 'ne'] +['_TR', 'ANS'] +['.g', 'reen'] +['_', 'cc'] +['Ġhung', 'ry'] +['Ġ"', '>'] +['()', ');čĊčĊ'] +['Ex', 'tract'] +['iz', 'ens'] +['Ġsol', 'ver'] +['Not', 'ify'] +['Ġeng', 'lish'] +['ĠSh', 'opping'] +['inter', 'faces'] +['RE', 'Q'] +['Ġil', 'leg'] +['ĠUI', 'ImageView'] +['Ġdis', 'connect'] +['ĠUnt', 'il'] +['ĠConserv', 'ative'] +['@', 'Column'] +['Ġshift', 'ed'] +['Ġ:', 'čĊ'] +['Ġf', 'ich'] +['Ġd', 'la'] +['Ġsh', 'oe'] +['"),', 'čĊ'] +['ular', 'ity'] +['_RE', 'SP'] +['We', 'ather'] +['UI', 'Application'] +['.', 'iterator'] +['Ġag', 'ing'] +['.P', 'arent'] +['ow', 'ie'] +['(e', 'qual'] +['ĠCon', 'v'] +['/', 'default'] +['Ġmeas', 'uring'] +['.pre', 'v'] +['.Is', 'Valid'] +['.F', 'at'] +['Ġs', 'Äĥ'] +['key', 'words'] +['with', 'out'] +['Ġso', 'vere'] +['Ġex', 'changes'] +['Ġm', 'elt'] +['Ġis', 'lands'] +['ĠInt', 'egr'] +['Ġjump', 'ing'] +['Ġg', 'le'] +['Ġjournal', 'ism'] +['Ġd', 'ated'] +['Local', 'ized'] +['ĠRef', 'resh'] +['Part', 'icle'] +['Ġa', 'a'] +['ĠSTR', 'ICT'] +['Ġb', 'od'] +['.Pro', 'cess'] +['_A', 'UTO'] +['ĠP', 'ublished'] +['e', 'very'] +['Ġtechn', 'ological'] +['ls', 'x'] +['Ġir', 'rit'] +['Add', 'itional'] +['Ġdel', 'imiter'] +['_l', 'anguage'] +['-', 'area'] +['bo', 'ys'] +['ĠT', 'ube'] +['Ġw', 'at'] +['Ġmechan', 'ics'] +['_', 'owner'] +['Sp', 'ell'] +['ĠSt', 'ories'] +['.Append', 'Line'] +['Table', 'View'] +['h', 'em'] +['st', 'ick'] +['oll', 'ower'] +['I', 'FF'] +['ĠU', 'V'] +['oll', 'ision'] +['S', 'UB'] +['Ġcompar', 'able'] +['Ġdon', 'de'] +['s', 'ales'] +['ll', 'vm'] +['Ġ}', '],Ċ'] +['OTT', 'OM'] +['ĠPur', 'pose'] +['L', 'ab'] +['Ġinterview', 'ed'] +['o', 'is'] +['as', 'il'] +['.set', 'Id'] +['ĠIn', 'struction'] +['--', '>'] +['ĠMod', 'ified'] +['ation', 'ally'] +['ĠMe', 'eting'] +['è¯', '¯'] +['#', 'region'] +['Ġrout', 'ing'] +['.f', 'ocus'] +['ĠYou', 'th'] +['<', 'D'] +['ĠN', 'ag'] +['contact', 's'] +['Ġform', 'ing'] +['Ġm', 'ie'] +["',['", '../'] +['ĠB', 'P'] +['Ġapp', 'et'] +['ĠTe', 'acher'] +['ĠT', 'P'] +['Ġann', 'ually'] +['outed', 'EventArgs'] +['ĠSpe', 'aker'] +['Ġre', 'name'] +['CF', 'G'] +['("', '//'] +['æİ', '¥'] +['/p', 'ages'] +['Ġpr', 'és'] +['ĠSp', 'ell'] +['.All', 'ow'] +['ĠINT', 'ERRU'] +['Ġ(', '#'] +['âĢĻ', 'ĊĊ'] +['_G', 'eneric'] +['.im', 'show'] +['_t', 'im'] +['-', 'face'] +['(&', '('] +['atin', 'um'] +['Ġrevolution', 'ary'] +['ĠH', 'ours'] +['r', 'ain'] +['Ġany', 'time'] +['Ġab', 'b'] +['.j', 'sp'] +['Scroll', 'View'] +['ĠTr', 'uth'] +['Ġanticip', 'ated'] +['Ġacc', 'ent'] +['.', 'checked'] +['Ġspec', 'ifies'] +['Ġca', 'f'] +['Ġcell', 'padding'] +['Ġcook', 'ed'] +['ĠH', 'ugh'] +['pe', 'ek'] +['_R', 'ATE'] +['Ġd', 'orm'] +['/', 'čĊ'] +['IV', 'ITY'] +['.Cont', 'roller'] +['(p', 'art'] +['.con', 'straint'] +['Ġinv', 'asion'] +['MO', 'VE'] +['Ġgl', 'uc'] +['l', 'ename'] +['Ġam', 'en'] +['eng', 'lish'] +['ĠSw', 'itzerland'] +['";ĊĊ', 'Ċ'] +['pe', 'st'] +['.col', 'lect'] +['N', 'ib'] +['ĠD', 'ict'] +['ĠE', 'mb'] +['(sub', 'ject'] +['Ġoutr', 'age'] +['Ġdec', 'iding'] +['Ġsent', 'enced'] +['F', 'echa'] +['"', 'A'] +['Ġqu', 'er'] +['Ġfont', 'Family'] +['Ġqu', 'adr'] +['-', 'Y'] +['_C', 'ACHE'] +['Ġanaly', 'zed'] +['Ġg', 'aining'] +['ĠAgain', 'st'] +['ĠSou', 'l'] +['ta', 'u'] +['Ġlight', 'weight'] +['ĠT', 'F'] +['ĠEffect', 's'] +['.T', 'ypes'] +['.add', 'Class'] +['Ġv', 'egan'] +['é', 'ģ'] +[".'", '"'] +['ĠExpl', 'orer'] +['.d', 'etect'] +['.sh', 'ift'] +['Ġoblig', 'ations'] +['last', 'Name'] +['Ġassoci', 'ations'] +['ĠTime', 'Span'] +['un', 'ter'] +['ĠF', 'resh'] +['Compat', 'ible'] +['P', 'ub'] +['id', 'ges'] +['.', 'option'] +['var', 'i'] +['.hash', 'Code'] +['Ġg', 'eb'] +['.', 'section'] +['-', 'not'] +['ĠSub', 'mit'] +['T', 'N'] +['reg', 'istry'] +['_m', 'edia'] +['Ġn', 'aj'] +['ff', 't'] +['Ġm', 'ate'] +['-th', 'ird'] +['Ġp', 'ockets'] +['est', 'a'] +['Ġb', 'ent'] +['ĠN', 'ord'] +['Ġretail', 'ers'] +['ĠMor', 'ris'] +['.""', '"ĊĊ'] +['W', 'rong'] +['Ġ', 'ÅĽ'] +['R', 'ay'] +['.', 'ec'] +['ĠB', 'ind'] +['_H', 'AND'] +['(n', 'on'] +['is', 'Valid'] +['Ġsimilar', 'ly'] +['_L', 'IMIT'] +['Ġdynam', 'ics'] +['Ġdist', 'inction'] +['ãģ', 'Ĩ'] +['<', 'N'] +['Ġor', 'th'] +['ĠToy', 'ota'] +['ĠK', 'ate'] +['ĠL', 'S'] +['or', 'ie'] +['ĠSpr', 'ings'] +['Ġf', 'reak'] +['last', 'name'] +['_M', 'ULT'] +['-st', 'ep'] +['"', '('] +['AD', 'DR'] +['Ġentert', 'aining'] +['_CON', 'F'] +['Ġdec', 'oded'] +['Ġst', 'reak'] +['Ġwait', 'ed'] +['Ġnot', 'ified'] +['rodu', 'ced'] +['vis', 'ual'] +['.Layout', 'Params'] +['æ', '°'] +['es', 'ian'] +['f', 'its'] +['s', 'pring'] +['ĠBern', 'ie'] +['User', 'Defaults'] +['Ġped', 'est'] +['Ap', 'pearance'] +['ĠW', 'iki'] +['ĠNOT', 'ICE'] +['Ġs', 'sh'] +['Ġdur', 'ante'] +['ĠZ', 'ip'] +['ı', 'r'] +['ĠNAT', 'O'] +['Ġtw', 'elve'] +['Ġro', 'yal'] +['ï', '¸'] +['Ġmer', 'chant'] +['ĠF', 'urniture'] +["']", '),Ċ'] +[',', 'X'] +['Ġfold', 'ers'] +['ĠG', 'ate'] +['ĉf', 'unc'] +['p', 'ick'] +['_us', 'uario'] +['ĠV', 'erm'] +['ment', 'ion'] +['ur', 'pose'] +['Ġalert', 's'] +['x', 'ious'] +['_s', 'ig'] +['ĠF', 'u'] +['Ġ(', ':'] +['Ġd', 'umb'] +['åħ', '³'] +['Ġaccur', 'ately'] +['éĩ', 'į'] +['R', 'B'] +['-s', 'creen'] +['ĠV', 'ER'] +['j', 'our'] +['Ġrom', 'ance'] +['uc', 'ceed'] +['.', 'choice'] +['Ġad', 'ip'] +['_d', 'ims'] +['Serial', 'izable'] +['ãĤ', 'ĭ'] +['.j', 'ob'] +['Ġpro', 'g'] +['uch', 'ar'] +['Ġg', 'ently'] +['ĠR', 'SS'] +['ict', 'ured'] +['_ENABLE', 'D'] +['ĉ', 'label'] +['aw', 'ks'] +['ĠEn', 'sure'] +['rem', 'ember'] +['ìł', 'ķ'] +['Ġtrans', 'mit'] +['{{', '$'] +['.Trans', 'action'] +['ur', 'se'] +['_rel', 'ative'] +['Ġs', 'ized'] +['ĠX', 'X'] +['ĠPr', 'incess'] +['ĠL', 'arry'] +['Ġpr', 'ó'] +['ĠÑģÑĤ', 'ÑĢ'] +['Ġs', 'isters'] +['estr', 'uct'] +['Ġcheck', 'point'] +[':', 'length'] +['ĠCar', 'los'] +['/', 'icon'] +['_T', 'ARGET'] +['T', 'okens'] +['Ġpat', 'ience'] +['ĠSe', 'lected'] +['q', 'ty'] +['.show', 'Message'] +['Ġwild', 'life'] +['ĠP', 'rops'] +['b', 'm'] +['-', 'arrow'] +['Ġpar', 'cel'] +['fire', 'base'] +['ĠBen', 'jamin'] +['cess', 'o'] +['.t', 'im'] +['ĠG', 'arc'] +['.', 'any'] +['ĠHOW', 'EVER'] +['ĠK', 'o'] +['Ġgrab', 'bed'] +['_f', 'rames'] +['Ġobject', 'AtIndex'] +['ĠADV', 'ISED'] +['Ġsub', 'ur'] +['ĉ', 'GL'] +['Ġ})', '}Ċ'] +['-l', 'ength'] +['ìĭ', 'ľ'] +['ĠPot', 'ter'] +['_b', 'uff'] +['.g', 'ui'] +['ĠEnc', 'oding'] +['E', 'lect'] +['-m', 'essage'] +['Ġ', '�'] +['Ġ', 'ÈĻi'] +['ĠArgument', 'NullException'] +['а', 'ÑĨи'] +['Ġmin', 'imize'] +['Ġrespond', 'ing'] +['$_', "['"] +['ĠInd', 'ividual'] +['á', 'c'] +['ĠIN', 'TER'] +['Ġmast', 'urb'] +['ĠB', 'in'] +["('", '$'] +['ëĵ', 'ľ'] +['Ġopen', 'ly'] +['Ġ>', '<'] +['Ġun', 'to'] +['olog', 'ically'] +['ĠM', 'ul'] +['VID', 'IA'] +['Ġsl', 'im'] +['ĠCommission', 'er'] +['(', 'on'] +['Ġunder', 'neath'] +['/', 'db'] +['v', 'ote'] +['(', 'Message'] +['ĠP', 'ope'] +['Def', 'ined'] +['Ġsw', 'ift'] +['ur', 'f'] +['Ġadapt', 'ed'] +['SE', 'L'] +['Ġreven', 'ues'] +['Ġdiv', 'ine'] +['=', 'y'] +['Grad', 'ient'] +['_', 'act'] +['Ġ/*!', '<'] +['Ġpoly', 'gon'] +['ĠF', 'DA'] +['ĠC', 'arr'] +['at', 'ables'] +['(std', 'out'] +['Ġrefr', 'iger'] +['Ġco', 'ordin'] +['avor', 'ites'] +['ÑĪ', 'и'] +['Ġcompass', 'ion'] +['ĠPOSS', 'IBILITY'] +['-', 'secondary'] +['ur', 'acy'] +['Ġcomp', 'romise'] +['_A', 'V'] +['_', 'os'] +['Ġbes', 'ide'] +['ĥ', 'Ŀ'] +['Ġl', 'n'] +['.pl', 'ugins'] +['Cap', 'acity'] +['al', 'ah'] +['.b', 'in'] +['ĠC', 'RC'] +['_b', 'alance'] +['Ġflex', 'Direction'] +['Ġam', 'bit'] +['Ġnick', 'name'] +['ĠFor', 'ces'] +['C', 'LE'] +['ĠSh', 'ell'] +['Ġs', 'ail'] +['ĠW', 'riter'] +['ĠA', 'lice'] +['d', 'w'] +['ĠInd', 'ians'] +['ĠMar', 'shall'] +['_S', 'RC'] +['Ġnormal', 'ized'] +['ĠJ', 'ag'] +['ãĤ', 'Ĵ'] +['ze', 'it'] +['r', 'pc'] +['ÃŃ', 'c'] +['.in', 'line'] +['Ġtrav', 'ers'] +['_n', 'umeric'] +['Ġutil', 'ities'] +['Ġev', 'ac'] +['IN', 'PUT'] +['ĉ', 'register'] +['M', 'X'] +['ĠCamp', 'bell'] +['Ġdatas', 'ets'] +['Ġdem', 'anded'] +['Ġinitial', 'State'] +['g', 'an'] +['Ġe', 'i'] +['Un', 'expected'] +['-', 'web'] +['tr', 'ait'] +[',', 'Y'] +['ĠT', 'odd'] +['Ġske', 'leton'] +['Ġoptim', 'ize'] +['ç¬', '¬'] +['ĠU', 'pon'] +['ĠSt', 'Object'] +['Ġap', 'lic'] +[".'", '', 'P'] +['v', 'ron'] +['.', 'UN'] +['Ġpaint', 'er'] +['izar', 're'] +['Ġl', 'av'] +['Ġp', 'om'] +['p', 'reg'] +['=', 'function'] +['(', 'serial'] +['ific', 'a'] +['um', 'ing'] +['åľ', '°'] +['ãģ', 'Ĥ'] +['-', 'op'] +['U', 'CH'] +['ĠH', 'end'] +['.prop', 'Types'] +['Ġy', 'o'] +['Ġrout', 'ines'] +['Ġcar', 'ing'] +['S', 'em'] +['Ġres', 'erves'] +['Ġprior', 'ities'] +['red', 'its'] +['IST', 'R'] +['Content', 'Type'] +['ĠSch', 'w'] +['/', 'media'] +['Ġe', 'str'] +['Ġclim', 'bing'] +['-', 'week'] +['cher', 'che'] +['s', 'ensor'] +['To', 'Array'] +['ĠMont', 'real'] +['Ġcloud', 's'] +['ĠInject', 'able'] +['ĠR', 'ice'] +['Ġpropag', 'anda'] +['_pro', 'vider'] +['Ġind', 'oor'] +['Ġin', 'aug'] +['Ġdipl', 'om'] +['Ġmess', 'aging'] +['_m', 'ut'] +['å', '¦Ĥ'] +['Ġk', 'w'] +['ON', 'S'] +['ari', 'ans'] +['R', 'PC'] +[')', ']čĊ'] +['-r', 'ay'] +['ĠS', 'or'] +['m', 'all'] +['Ġmarket', 'place'] +['Ġv', 'tk'] +['M', 'a'] +['og', 'an'] +['ig', 'i'] +['Ġspons', 'ored'] +['ĠD', 'ani'] +['.S', 'EVER'] +[">'", '.$'] +['m', 'ultipart'] +['ĠW', 'ol'] +['Ġtable', 'Name'] +['ĠUser', 'name'] +['Background', 'Color'] +['Ġf', 'right'] +['_E', 'MAIL'] +['Sept', 'ember'] +['_val', 's'] +['op', 'ia'] +['Ġsp', 'otted'] +['-', 'Ch'] +['Ġdata', 'Source'] +['/', '"Ċ'] +['ек', 'ÑĤ'] +['ĠRequest', 'Method'] +['ĠRe', 'place'] +['-d', 'o'] +['ah', 'n'] +['ĠPh', 'D'] +[']', '.ĊĊ'] +['N', 'ON'] +['g', 'ement'] +['ĠTh', 'r'] +['Ġquiet', 'ly'] +['Ġtort', 'ure'] +['Ġte', 'as'] +['ĠC', 'Y'] +['Ġa', 'tr'] +['develop', 'ment'] +['-d', 'etail'] +['Ġlight', 'er'] +['Ġarg', 'uing'] +['Ġdes', 'erves'] +['Ġcur', 'riculum'] +['_CON', 'TEXT'] +['ÅĤ', 'y'] +['H', 'ITE'] +['ĉ', 'ID'] +['/', 'uploads'] +['Ġt', 'its'] +['re', 'o'] +['_d', 'rop'] +['.', 'UTF'] +['Ġpick', 'up'] +['Ġgro', 'cery'] +['ĠP', 'ure'] +['Ġeas', 'iest'] +['Ph', 'il'] +['.f', 'eature'] +['("', '*'] +['Ġinvest', 'or'] +['t', 'ok'] +['Ġj', 'ar'] +['L', 'os'] +['âĢĶâĢĶâĢĶâĢĶ', 'âĢĶâĢĶâĢĶâĢĶ'] +['.', 'queue'] +['-s', 'peed'] +['M', 'al'] +['um', 'blr'] +['ĠCON', 'ST'] +['ĠH', 'RESULT'] +['ĠD', 'ance'] +['(file', 'Path'] +['Ġattrib', 'uted'] +['à¥', 'į'] +['ĠB', 'und'] +['co', 'ins'] +['Ġs', 'ão'] +['Ġp', 'ir'] +['person', 'al'] +['Ġpre', 'lim'] +['Ġprop', 'ose'] +['ĠT', 'L'] +[']', '])'] +['ĠSub', 'scription'] +['ĠK', 're'] +[',', 'len'] +['.First', 'OrDefault'] +[')', '--'] +['_product', 's'] +['.Get', 'Bytes'] +['Sh', 'ip'] +['Ġenc', 'rypt'] +['ĠS', 'G'] +['ĠM', 'yst'] +['h', 'ir'] +['Ġiter', 'ate'] +['Ġint', 'end'] +['.mock', 'ito'] +['Ġch', 'apters'] +['(', 'angle'] +['ĠV', 'lad'] +['è®', '¾'] +["'", '.ĊĊ'] +['Response', 'Body'] +['ĠAb', 'd'] +['de', 'al'] +['Ġbar', 'riers'] +['-out', 'line'] +['b', 'ill'] +['ĠF', 'alls'] +['_se', 'cond'] +['.', 'include'] +['.', 'ceil'] +['Ġoccup', 'ation'] +['ph', 'ony'] +['.move', 'To'] +['ĠJenn', 'ifer'] +['AST', 'ER'] +[';', '"><'] +['ĠEn', 'abled'] +['Ġtermin', 'ate'] +['ĠI', 'o'] +['l', 'ations'] +['ĠTHE', 'ORY'] +['Ġear', 'liest'] +['Ġr', 'ack'] +['ĠSc', 'ar'] +['sh', 'ake'] +['ch', 'ip'] +['Ġu', 'v'] +['Ġall', 'iance'] +['п', 'иÑģ'] +['ĠGOOD', 'S'] +['z', 'ione'] +['ĠV', 'I'] +['Ġ{', '-'] +['Ġfilter', 'ing'] +['Ġmis', 'con'] +['.Dock', 'Style'] +['Ġb', 'ush'] +['Ġj', 'unk'] +['æ', 'Į'] +['ĠQ', 'UE'] +['Ġhook', 's'] +['Ġfirm', 'ware'] +['Ġmiddle', 'ware'] +['d', 'ic'] +['ĠOak', 'land'] +['Ġarr', 'ives'] +['P', 'ayload'] +['p', 'ixel'] +[']', '|'] +['Ġstart', 'Date'] +['.P', 'RO'] +['_a', 'udio'] +['Ġmid', 'field'] +['igid', 'body'] +['ĠSw', 'iss'] +['ĠCl', 'ip'] +['ĠD', 'ump'] +['ĠText', 'Box'] +['Ġg', 'eh'] +['y', 'ield'] +['od', 's'] +['Ġrefer', 'endum'] +['Back', 'end'] +['ĠC', 'ream'] +['Ġdomin', 'ated'] +['ĠArch', 'ive'] +['Ġrid', 'ers'] +['.prepare', 'Statement'] +['Ġqu', 'ando'] +['Ġche', 'f'] +['w', 'iki'] +['in', 'el'] +['am', 'pling'] +['("', '\\\\'] +['Ġs', 'ag'] +['_pro', 'xy'] +['ãģ', 'ķ'] +['p', 'do'] +['.getElementsBy', 'TagName'] +['Ġdemonstr', 'ation'] +['ĠN', 'PC'] +['Ġarch', 'ivo'] +['end', 'ance'] +['Ġefficient', 'ly'] +['(', 'actual'] +['.t', 'ableView'] +['Ġm', 'ush'] +['Ġbe', 'ars'] +['_thread', 's'] +['j', 'as'] +['ah', 'un'] +['Ġne', 'ural'] +['Ġdesign', 'ing'] +['ĠG', 'DP'] +['Ġlift', 'ed'] +['çĽ', '®'] +['ĠJ', 'oint'] +['ĠIn', 'clude'] +['ĠGi', 'ants'] +['Ġwithdraw', 'al'] +['ĠR', 'ent'] +['n', 'ative'] +['ĠSe', 'ek'] +['gress', 'ion'] +['_C', 'PU'] +['\\', 'S'] +['ĠSh', 'ield'] +['Ġsol', 'ic'] +['Ġbo', 'om'] +['yect', 'o'] +['Ġmanufact', 'ure'] +['ĠâĢ', 'ĭ'] +['Ġb', 'box'] +['Ġearth', 'qu'] +['ollect', 'ors'] +[':@"', '%'] +['Ġlo', 'ops'] +['J', 'e'] +['alk', 'ing'] +['ĠWh', 'ats'] +['ĠBo', 'ys'] +['.', 'book'] +['ARG', 'E'] +['_p', 'ixel'] +['Ġsus', 'pects'] +['Î', '¹'] +['us', 'p'] +['ĠBM', 'W'] +['ie', 'ces'] +['(p', 'erson'] +['å¼', 'Ģ'] +['é', '»'] +['ĠPod', 'cast'] +['Ġb', 'ou'] +['(', 'Item'] +['Ã', '»'] +['(', 'Input'] +['Http', 'Get'] +['Ġb', 'urg'] +[')', '^'] +['BO', 'ARD'] +['*/', ','] +['Ġg', 'ulp'] +['ĠB', 'enn'] +['Ġdeck', 's'] +['.status', 'Code'] +['Ġac', 'ute'] +['Ġh', 'ug'] +['ug', 'u'] +['Ġp', 'led'] +[',"', '%'] +['h', 'ape'] +['Ġз', 'ап'] +['ĠMain', 'e'] +['.re', 'al'] +['Ġd', 'alam'] +['ĠMin', 'or'] +['.F', 'loat'] +['dis', 'p'] +['Ġt', 'l'] +['Ġen', 'count'] +['=>', '$'] +['Ġf', 'g'] +['te', 'es'] +['ĠRec', 'omm'] +['ä', 'l'] +['Ġchem', 'istry'] +['Block', 's'] +['O', 'ID'] +['Ġfore', 'x'] +['ĠApp', 'end'] +['Ġ{', '*'] +['ĠSup', 'ply'] +['CG', 'Float'] +['(b', 'l'] +['Ġat', 'e'] +['ador', 'a'] +['Ġg', 'ust'] +['Ass', 'oci'] +['>', '.Ċ'] +['F', 'ETCH'] +['.s', 'erial'] +['widget', 's'] +['ard', 'less'] +['ie', 'fs'] +['_F', 'ULL'] +['ernet', 'es'] +['ĠP', 'red'] +['Ø', 'Ń'] +['äº', 'ĭ'] +['ub', 'ernetes'] +['ĠL', 'aura'] +['Ġl', 'abeled'] +['High', 'light'] +['Ġanno', 'ying'] +['/', 'update'] +['(d', 'escription'] +['Ġintim', 'id'] +['$', 'c'] +['"))', ')Ċ'] +['.A', 'P'] +['Ġ[]', '*'] +['ĠEX', 'IT'] +['.H', 'ost'] +['ĠOP', 'EN'] +['.send', 'Message'] +['_c', 'amera'] +['_t', 'ile'] +['Ġth', 'erm'] +['onom', 'ous'] +['Ġdis', 'adv'] +['Ġna', 'ar'] +['index', 'Of'] +['ĠP', 'P'] +['.prot', 'ocol'] +['AF', 'E'] +['Ġtext', 'ures'] +['################################', '################'] +['umb', 'ai'] +['.st', 'ats'] +['ĠG', 'E'] +['Ġi', 'e'] +['ĠST', 'D'] +['ĠM', 'ann'] +['.ref', 'lect'] +['K', 'B'] +['Ġd', 'ive'] +['.w', 'av'] +['/*', '----------------------------------------------------------------'] +['/', 'settings'] +['.l', 'ifecycle'] +['Ġda', 'ughters'] +['or', 'us'] +['ub', 'er'] +['N', 'ING'] +['st', 'ri'] +['ĠT', 'ip'] +['Ġz', 'n'] +['Ġswitch', 'ed'] +['in', 'et'] +['uff', 'y'] +['ĠTransport', 'ation'] +['(', 'conf'] +['fr', 'ica'] +['ĠX', 'L'] +['ĠLe', 'ad'] +['_per', 'cent'] +['<', 'Map'] +['Ġthr', 'ust'] +['or', 'b'] +['ik', 'k'] +['Ġtra', 'uma'] +['Access', 'or'] +['ĠF', 'it'] +['ĠString', 'Buffer'] +['ex', 'pl'] +['(s', 'creen'] +['Ġaud', 'iences'] +['ĠO', 'PTION'] +['_', 'round'] +['[', 'node'] +['be', 'h'] +['->', '__'] +['per', 'missions'] +['ĠD', 'etermine'] +['.M', 'an'] +['Ġadv', 'ances'] +['.', 'InputStream'] +['Ġstrong', 'est'] +['Ġe', 'Bay'] +['Ġ#', '-'] +['Ġdir', 'name'] +['ĠS', 'MS'] +['Ġmedic', 'ations'] +['Ġam', 'ended'] +['Ġchurch', 'es'] +['ĠImper', 'ial'] +['$', 'row'] +['ĠMad', 'ison'] +['ĠIn', 'sp'] +['Ġaff', 'air'] +['Ġpsych', 'ology'] +['v', 'h'] +['Ġsever', 'ity'] +['âĢ', 'IJ'] +['Ġstri', 'ps'] +['A', 'H'] +['vert', 'ising'] +['Ġcon', 'se'] +['IM', 'AGE'] +['ĠSt', 'ats'] +['ĉs', 'c'] +['.C', 'ursor'] +['Ġfree', 'ze'] +['ss', 'on'] +['(x', 'ml'] +['ĠSus', 'an'] +['.t', 'ile'] +['ed', 'ed'] +['ĠĠĠĠ', 'ĉĉĉ'] +['uel', 'le'] +['ĠMitch', 'ell'] +['b', 'ased'] +['Oper', 'and'] +['½', 'æķ°'] +['ĠF', 'F'] +['ĉstr', 'cpy'] +['ounc', 'es'] +['ild', 'o'] +['.execute', 'Query'] +['Ġapproach', 'ing'] +['ĠSe', 'ven'] +['Ġn', 'uts'] +['Ġr', 'ic'] +['ass', 'ignment'] +['Ġcalcul', 'ator'] +['ĠMur', 'phy'] +['ĠB', 'ou'] +['í', 'Ħ'] +['Ġbut', 't'] +['Ġt', 'icks'] +['Project', 's'] +['il', 'ib'] +['.text', 'Color'] +['m', 'ov'] +['_log', 'o'] +['(', 'template'] +['ĠIN', 'IT'] +['Ġimage', 'View'] +['scri', 'ptions'] +['OR', 'ITY'] +['Con', 'sumer'] +['Ġun', 'precedented'] +['Ġtour', 'ist'] +['Ġbr', 'on'] +['Ġcontract', 'or'] +['Ġlic', 'ence'] +['ĠN', 'am'] +['æ', '¯'] +['(', 'transform'] +['_AT', 'T'] +['P', 'ref'] +['ĠG', 'am'] +['Ġvess', 'els'] +['Ġh', 'av'] +['L', 'ater'] +['.To', 'Lower'] +['Ġurl', 's'] +['Ġbreak', 'down'] +['Ġpen', 'alties'] +['Ġf', 'oster'] +['ĠU', 'E'] +['Ġcl', 'ue'] +['com', 'ed'] +['åIJį', 'ç§°'] +['-m', 'ain'] +['Ġp', 'ts'] +['Ġcount', 'ed'] +['ict', 's'] +['/', 'post'] +['Ġget', 'attr'] +['Ġp', 'ing'] +['ANCE', 'L'] +['Ġp', 'ec'] +['Ñħ', 'од'] +['ant', 'om'] +['ĠBlue', 'print'] +['ĠEvent', 'Emitter'] +['Ġl', 'ä'] +['æ', '²'] +['Ġstr', 'aw'] +['(', 'comp'] +["'", 'une'] +['>', 'N'] +['-', 'client'] +['es', 'Module'] +['-b', 'ase'] +['Ġret', 'reat'] +['_s', 'imple'] +['ĉĉĉĉĉĉ', 'Ġ'] +['fe', 'e'] +["')", 'čĊčĊ'] +['Control', 'Item'] +['Ġsubscri', 'bers'] +['ple', 'ase'] +['ĠE', 'ff'] +['Ġp', 'ound'] +['ĠBy', 'tes'] +['ĠTe', 'a'] +['_', 'activity'] +['Ġmax', 'im'] +['Ġop', 'code'] +['B', 'SD'] +['.', 'constant'] +[';', '}'] +['omb', 'res'] +['Ġcare', 'ers'] +[')', '.ĊĊĊĊ'] +['Ġsp', 'reading'] +['-exp', 'anded'] +['ĠOr', 'd'] +['amar', 'in'] +['Ġmob', 'ility'] +['Un', 'fortunately'] +['ak', 'k'] +['N', 'L'] +['_', 'redirect'] +['ĠP', 'G'] +['ĠS', 'ensor'] +['b', 'ol'] +['t', 'ap'] +['_MEM', 'ORY'] +['ĠUI', 'Alert'] +['plit', 'ude'] +['We', 'bsite'] +['ĠLog', 'o'] +['lo', 've'] +['[', 'ind'] +['Ġalto', 'gether'] +['Ġwonder', 'ed'] +['Ġes', 'per'] +['ĠLib', 'eral'] +['Ġo', 'ss'] +['Ġel', 'it'] +['Ġst', 'iff'] +['od', 'ox'] +['_ment', 'ions'] +['ĠDou', 'glas'] +['_p', 'id'] +['ĠC', 'K'] +['ĠinitWith', 'Frame'] +['.b', 'log'] +['p', 'kg'] +['ang', 'hai'] +['QUI', 'RED'] +['u', 'u'] +['Ġm', 'kdir'] +['AT', 'AL'] +['Ġun', 'h'] +['in', 'ces'] +['st', 'h'] +['Ġhypo', 'thesis'] +['Ġc', 'ata'] +['ĠT', 'B'] +['ĠCl', 'ar'] +['Ġpre', 'decess'] +['Ġsitu', 'ated'] +['-w', 'orld'] +['))', '/'] +['Ġhead', 'lines'] +['.st', 'at'] +['Ġout', 'break'] +['sp', 'ath'] +['_FLAG', 'S'] +['ĠServlet', 'Exception'] +['S', 'un'] +['F', 'ROM'] +['ĠD', 'ir'] +['ãĥ»ãĥ»', 'ãĥ»'] +['_co', 'ord'] +['ĠOpt', 'im'] +['Mon', 'itor'] +['.b', 'it'] +['XX', 'X'] +['Ġtod', 'as'] +['f', 'eld'] +['ÑĢ', 'и'] +['im', 'ir'] +['Ġpolit', 'ically'] +['Ġmolec', 'ular'] +['Ġtrad', 'ed'] +['Ġ{{', '$'] +['ĠSw', 'edish'] +["Ġ'@", '/'] +['_RE', 'AL'] +['Ġw', 'arehouse'] +['t', 'oday'] +[',', 'L'] +['or', 'p'] +['<', 'section'] +['-', 'br'] +['ym', 'e'] +['ĠUser', 'Service'] +['Ġlib', 'erty'] +['Ġmoment', 'o'] +['(', 'Image'] +['<', 'size'] +['S', 'ch'] +['Ġj', 'og'] +['i', 'ology'] +['arent', 'ly'] +['Ġquant', 'um'] +['ĠAb', 'u'] +['Ġr', 'im'] +['Ġman', 'a'] +['Font', 'Size'] +['Build', 'ing'] +['st', 'airs'] +['AIL', 'ABLE'] +['Ġ&', "'"] +['Ġs', 'ect'] +['Ġs', 'igh'] +['(b', 'atch'] +['.I', 'Container'] +['p', 'oll'] +['ĠCor', 'ps'] +['Î', 'µ'] +['ar', 'u'] +['ĠK', 'ay'] +['.r', 'ange'] +['_click', 'ed'] +['ĠRobert', 's'] +['.N', 'etwork'] +['fin', 'ish'] +['-', 'Man'] +['Ġcolleg', 'es'] +['ĠF', 'ine'] +['"))', ',Ċ'] +['f', 'ilm'] +['Ġrem', 'inded'] +['Ġgest', 'ure'] +['out', 'il'] +['Ġthread', 'ing'] +['Ġobj', 'et'] +['Ġt', 'ours'] +['activ', 'ated'] +['.m', 'kdir'] +['=', 'user'] +['Ġre', 'de'] +['f', 'ü'] +['_SY', 'STEM'] +['p', 'v'] +['Ġcon', 'gr'] +['Ġmass', 'asje'] +['Ġpract', 'ition'] +['Un', 'iversity'] +['Ġtab', 'index'] +['Ð', 'ĺ'] +['S', 'ets'] +['Ġcount', 'ies'] +['g', 'uest'] +['f', 'an'] +['Ġword', 'en'] +['.d', 'i'] +['на', 'Ñĩ'] +['Â', '¿'] +['ig', 'Decimal'] +['Ġsh', 'ore'] +['Ġg', 'ö'] +['Ġrep', 'airs'] +['Ġhelp', 'ers'] +['Ġcenter', 'ed'] +['OL', 'LOW'] +['Ġmap', 'StateToProps'] +['Ġc', 'ents'] +['<', 'A'] +['Ġexpect', 'ation'] +['Oct', 'ober'] +['Ġbg', 'color'] +['ca', 'les'] +['.C', 'ON'] +['ĠV', 'el'] +['Ġcry', 'ing'] +['-se', 'ason'] +['Ġfunction', 'ing'] +['_LOC', 'ATION'] +['ü', 'ss'] +['ber', 'y'] +['Par', 'a'] +['omin', 'ator'] +['-', 'le'] +['Ġeth', 'ical'] +['has', 'htags'] +['emp', 'lo'] +['Ġn', 'úmero'] +['(', 'activity'] +['.St', 'op'] +['.str', 'ftime'] +['IL', 'D'] +['Ġto', 'e'] +['ĉ', 'Node'] +['")', 'čĊčĊ'] +['ĠPu', 'erto'] +['Ġexec', 'uting'] +['ĠG', 'UID'] +['Ġoppos', 'ing'] +['al', 'ph'] +['Ġexhib', 'it'] +['_fl', 'ash'] +['Ġme', 'ille'] +['Ġjson', 'Object'] +['H', 'ero'] +['aint', 'ed'] +['_D', 'OM'] +['Ġw', 'il'] +['Ġslo', 'pe'] +['Ġm', 'Ã¥'] +['ĠIraq', 'i'] +['Ġorgan', 'ize'] +['ĉj', 'Query'] +['H', 'UD'] +['sh', 'ine'] +['.', 'we'] +['ĠSk', 'ills'] +['pons', 'or'] +['Ġcon', 'clusions'] +['Ġre', 'forms'] +['Ġrel', 'uct'] +['n', 'amed'] +['ĠOl', 'iver'] +['Ġ//', '}Ċ'] +['-', 'looking'] +['Ġf', 'og'] +['ĠH', 'O'] +['ĠF', 'ried'] +['Ġinev', 'itable'] +['ĠData', 'GridView'] +['H', 'our'] +['il', 'les'] +['log', 'ical'] +['Ġconnect', 'ivity'] +['.tw', 'ig'] +['ĠK', 'yle'] +['(d', 'st'] +['-', 'Sh'] +['ĠStud', 'ios'] +['(', 'Level'] +['.j', 'et'] +['_PRO', 'TO'] +['-de', 'coration'] +['OT', 'HER'] +['Ġread', 'ily'] +['.Param', 'eter'] +['Ġmultip', 'ly'] +['ĠL', 'IB'] +['ar', 'med'] +['Ġsoon', 'er'] +['æ', 'Ħ'] +['_', 'ES'] +['Ġfoss', 'il'] +['ĠA', 'nc'] +['âĢľ', 'This'] +['l', 'odash'] +['Py', 'thon'] +['Ġhist', 'ogram'] +['west', 'ern'] +['Ġinf', 'ant'] +['Ġco', 'ordinator'] +['Ġn', 'ib'] +[':', 'm'] +['Ġres', 'pected'] +['Ġdef', 'init'] +['&', 'T'] +['_p', 'ad'] +['ĠTr', 'igger'] +['th', 'al'] +['Ġimage', 'Named'] +['Ġbeat', 'en'] +['ĉ', 'rc'] +['ĠPal', 'ace'] +['Ġhaz', 'ard'] +['Ġisol', 'ation'] +['_', 'rc'] +['cont', 're'] +['OUT', 'PUT'] +['Ġre', 'ign'] +['ĠPl', 'ate'] +['AT', 'ES'] +['Ġfl', 'ux'] +['Ġpack', 's'] +['.get', 'Selected'] +['Ġparticip', 'ated'] +['Ġneed', 'le'] +['-de', 'pth'] +['::::', '::'] +['-l', 'aw'] +['ins', 'pace'] +['on', 'itor'] +['=', 'no'] +['ĠAt', 'omic'] +['ĠBr', 'ain'] +['Edit', 'able'] +['-s', 'c'] +['red', 'ential'] +['ĠP', 'erry'] +['k', 'ie'] +['Ġ', '----------Ċ'] +['.st', 'roke'] +['(', 'Intent'] +['Ġun', 'ity'] +['um', 'lah'] +['F', 'urther'] +['Ġpr', 'ze'] +['Ġs', 'ø'] +['ãĤ', 'Ĭ'] +['ĠPROC', 'UREMENT'] +['ĠH', 'ousing'] +['Ġatt', 'orneys'] +['Ġcomp', 'ose'] +['atter', 'ing'] +['"', 'What'] +['dra', 'ul'] +['Ġstraight', 'forward'] +['In', 'stant'] +['.J', 'TextField'] +['Ġtr', 'ades'] +['л', 'а'] +['Ġ{', '!'] +['Ġl', 'ately'] +['IM', 'G'] +['ĠA', 'ld'] +['ĠIN', 'NER'] +['Ġcart', 'oon'] +['.S', 'ource'] +['F', 'ALSE'] +['Ġd', 'ough'] +['f', 'en'] +['(', 'rect'] +['Data', 'Table'] +['N', 'ick'] +['ĠBut', 'ter'] +['read', 's'] +['_com', 'ments'] +['EN', 'V'] +['ĠConnect', 'icut'] +['-F', 'IRST'] +['ĉĉĉ', 'ĠĠĠĠĠ'] +['ach', 'i'] +['.M', 'sg'] +['re', 'ction'] +['Ġrelax', 'ed'] +['Ġsha', 'ft'] +['Ġe', 'f'] +['ĠAdd', 'ing'] +['Ġbre', 'ach'] +['Ġ', 'ï¼ļ'] +['ram', 'a'] +['Ġconduct', 'ing'] +['Ġ(', ';'] +['(g', 'l'] +['ĠCA', 'USED'] +['ash', 'i'] +['ĠF', 'LAG'] +['ĠCom', 'merce'] +['ĠIN', 'TEGER'] +['h', 'ours'] +['ĠSchool', 's'] +['Ġn', 'ucle'] +['Ag', 'ain'] +['pro', 'j'] +['Ġsevent', 'h'] +['EMPL', 'ARY'] +['(m', 'ock'] +["']", ',čĊ'] +['_S', 'PEED'] +['>', 'false'] +['Ġsp', 'a'] +['ĠN', 'ear'] +['ì', 'ķ'] +['Ġintr', 'ig'] +['_m', 'embers'] +['w', 'ave'] +['Ġanalyst', 's'] +['_O', 'S'] +['ed', 'in'] +['ĠF', 'ri'] +['Ġretrie', 'ved'] +['Reg', 'ular'] +['_', 'obs'] +['EX', 'PORT'] +["')}}", '"'] +['"', 'class'] +['__', '(('] +['b', 'ucket'] +['Ġst', 'ro'] +['ĠP', 'atch'] +['yst', 'ick'] +['ful', 'ness'] +['ap', 'os'] +['D', 'a'] +['ĉĉĉĉĉ', 'ĠĠĠ'] +['Ġen', 'rich'] +['un', 'ordered'] +['h', 'ole'] +['C', 'ong'] +['<', 'Product'] +['ĠC', 'urt'] +['(', 'the'] +['_l', 'ower'] +['Ġavoid', 'ing'] +['Ġbu', 'zz'] +['Ġv', 'iable'] +['ub', 'a'] +['-', 'is'] +['are', 'l'] +['Ġact', 'ed'] +['-d', 'etails'] +['à¸', 'ĩ'] +['ĠThe', 'ory'] +['ĠP', 'un'] +['ĠAn', 'onymous'] +['...', '"Ċ'] +['è', 'res'] +['åı', '¯'] +['ĠV', 'ision'] +['_se', 'm'] +['ash', 'a'] +['Ġcelebr', 'ity'] +['Ġend', 'Date'] +['Ġpop', 'ulate'] +['Ġcu', 'is'] +['qu', 'ant'] +['f', 'loor'] +['Ġglob', 'ally'] +['Ġcru', 'ise'] +['ĠStan', 'ley'] +['Ġb', 'ikes'] +['.get', 'Connection'] +['Ġpoor', 'ly'] +['_', 'other'] +['amp', 'ing'] +['."', ');ĊĊ'] +['od', 'i'] +['_A', 'DMIN'] +['.color', 's'] +['ĠG', 'aming'] +['>', "';ĊĊ"] +['STR', 'UCT'] +['Q', 'R'] +['ID', 's'] +['(arg', 'uments'] +['_a', 'ux'] +['(', 'Event'] +['_PR', 'IVATE'] +['ĠTre', 'k'] +['Ġdownload', 's'] +['m', 'utable'] +['_STR', 'UCT'] +['(w', 'x'] +['Ġdom', 'ains'] +['js', 'px'] +['ĠVi', 'agra'] +['Command', 's'] +['J', 's'] +['.c', 'fg'] +['Content', 'Pane'] +['ĠEdit', 'Text'] +['à¥į', 'à¤'] +['Att', 'ach'] +['ĠAR', 'M'] +['posit', 'ive'] +['ĠGener', 'ated'] +['Ġse', 'ized'] +['=', ':'] +['Ġelectron', 'ics'] +['ĠApp', 'Component'] +['/', "',Ċ"] +['.equals', 'IgnoreCase'] +['Do', 'ctrine'] +['d', 'isk'] +['ĠPolit', 'ical'] +['CH', 'O'] +['<', 'F'] +['ĉ', 'height'] +['ĠB', 'ug'] +['.', 'le'] +['ik', 'h'] +['Ġmill', 'iseconds'] +['Ġconstit', 'u'] +['m', 'ag'] +['.n', 'l'] +['-r', 'ange'] +['ang', 'gal'] +["',", '['] +['ropol', 'itan'] +['ĠÃ', 'ľ'] +['ĠU', 'C'] +['.d', 'esc'] +['-L', 'AST'] +['f', 'stream'] +['ib', 'il'] +['Ġf', 'ier'] +['VER', 'Y'] +['Ġë', '³'] +['IR', 'T'] +['_', 'UI'] +['(', 'abs'] +['Ġkne', 'es'] +['Ġro', 'okie'] +['ĠV', 'ac'] +['are', 'na'] +['comm', 'end'] +['-', '\\'] +['ĠSUB', 'STITUTE'] +['So', 'ft'] +['Ġpart', 'ir'] +['we', 'alth'] +['è¦', 'ģ'] +['(d', 'ataset'] +['ĠCl', 'imate'] +['-', 'show'] +['Ġreli', 'ability'] +['_ch', 'unk'] +['ä»', '£'] +['_st', 'ock'] +['ĠEX', 'EMPLARY'] +['ï¸', 'ı'] +['Ġv', 'ÃŃ'] +['Ġsm', 'iled'] +['Ġdr', 'ill'] +['.F', 'unction'] +['ĠS', 'I'] +['Ġreg', 'ression'] +['-', 'X'] +['ĠJ', 'ar'] +['p', 'ref'] +['ĉs', 'uccess'] +['ĠHit', 'ler'] +['Ġinst', 'inct'] +['Ġfem', 'mes'] +['Ġlo', 'ver'] +['<', 'Ċ'] +['Ġmulti', 'plier'] +['r', 'il'] +['Res', 'ize'] +['ĠAuthor', 'ization'] +['ĠK', 'an'] +['Dispatch', 'ToProps'] +['Ġc', 'rops'] +['t', 'okens'] +['ec', 'n'] +['ential', 'ly'] +['ĠINTERRU', 'PTION'] +['f', 'ake'] +['Und', 'efined'] +['ĠA', 'K'] +['ĠTest', 'Case'] +['Ġr', 'ab'] +['Ġtor', 'rent'] +['ĠO', 't'] +['B', 'ars'] +['Ġlect', 'ure'] +['Ġen', 'jo'] +['Ġrespond', 's'] +['Ġindex', 'ed'] +['Of', 'Work'] +['_ch', 'ain'] +['))', '->'] +['ĠBeaut', 'y'] +['Ġ`', '<'] +['Ġtouch', 'ing'] +['Ġ|', '--'] +['ĉf', 'lag'] +['normal', 'ize'] +['Ġtr', 'apped'] +['Ġestablish', 'ing'] +['/b', 'uild'] +['A', 'J'] +['f', 'y'] +['-', 'react'] +['av', 'n'] +['RI', 'PTION'] +['Ġk', 'ut'] +['ĠF', 'ashion'] +['ĠIn', 'form'] +['cur', 'ities'] +['<', 'byte'] +['ĠUkr', 'ain'] +['Ġs', 'ug'] +['Ġconsist', 'ing'] +['ood', 'le'] +['.', 'ctx'] +['.To', 'List'] +['Ġcomment', 'ary'] +['Ġtransf', 'ers'] +['Ġn', 'ost'] +['ih', 'ad'] +['ĠU', 'pper'] +['Ġconf', 'using'] +['miss', 'ing'] +['-', 'cl'] +['Ġbound', 'ing'] +['Ġcongress', 'ional'] +['Ġreve', 'aling'] +['d', 'h'] +['r', 'up'] +['Ġt', 'res'] +['re', 'peat'] +[',', 'ĊĊĊĊ'] +['_t', 'ac'] +['Ġexp', 'ed'] +['G', 'irl'] +['h', 'orizontal'] +['Ġ"../../', '../'] +['(', 'option'] +['Ġwe', 'iter'] +['ĉs', 'ql'] +['Ġ=>', '{Ċ'] +['Ġgar', 'lic'] +['Ġre', 'pr'] +['Ġrepl', 'ies'] +['(', 'prop'] +['Ġspir', 'its'] +['Ġins', 'pire'] +['Ġbas', 'ement'] +['.re', 'ject'] +['Ġhint', 's'] +['Ġpoll', 'ing'] +['ĉ', 'ĠĊ'] +['_r', 'ating'] +['Ġc', 'ath'] +['av', 'ier'] +['Ġcomp', 'ressed'] +['ĠV', 'S'] +[']', "'"] +['Ġjud', 'icial'] +['ĠT', 'rend'] +['tr', 'aining'] +['EST', 'AMP'] +['ogn', 'ition'] +['Ä', 'ģ'] +['SE', 'NT'] +['vent', 'ions'] +['Ġconsult', 'ant'] +['um', 'ph'] +['Ġuser', 'Service'] +[',', 'NULL'] +['k', 'h'] +['D', 'ear'] +['_B', 'AD'] +['it', 'ations'] +['Ġmet', 'aph'] +["'", 'é'] +['and', 'ise'] +['-f', 'ont'] +['.ch', 'art'] +['Ġs', 'g'] +['_', 'Controller'] +['.j', 'peg'] +['ĠUL', 'ONG'] +['ĉg', 'ame'] +['(', 'ss'] +['ĠM', 'aj'] +['ĉg', 'o'] +['ĠS', 'ad'] +['ĠB', 'erg'] +['ĠM', 'ine'] +['P', 'ack'] +['Ġres', 'istant'] +['ĠR', 'OM'] +['Ġp', 'eg'] +['ĠStan', 'ford'] +['ĠY', 'ahoo'] +['Ġsca', 'led'] +['Ġl', 'an'] +['=', '[]'] +['"/', '>', 'ččĊ'] +['Ġs', 'ud'] +['ĉ', 'background'] +['Ġsch', 'olars'] +['-m', 'uted'] +['ar', 'á'] +['Ġ=', '===='] +['Ġ__', '__'] +['C', 'reat'] +['ene', 'ver'] +['/w', 'p'] +['ĠV', 'PN'] +['Error', 'Code'] +[')', '],Ċ'] +['(b', 'uilder'] +['ĠEn', 'emy'] +['S', 'ensor'] +['us', 'a'] +['Ġtr', 'iggers'] +['Ġplayoff', 's'] +['_RE', 'Q'] +['Ġ(', '~'] +['ĠBar', 'ry'] +['Ġperman', 'ently'] +['ĠR', 'UN'] +['Ġb', 'ure'] +['.Fat', 'alf'] +['Ġch', 'ick'] +['ĉ', 'panic'] +['ps', 'i'] +['ok', 'a'] +['éĢ', 'ī'] +['>', '['] +['Ġunderstand', 's'] +['ĠJun', 'ior'] +['ĠIN', 'FO'] +['=', 'mysqli'] +['ust', 'ain'] +['-s', 'ource'] +['s', 'erv'] +['ĠC', 'REATE'] +['.', 'au'] +['Ġsell', 's'] +['ĠĠĊ', 'ĠĠĊ'] +['E', 'urope'] +['z', 'w'] +['pre', 'h'] +['ĠNS', 'A'] +['Ġx', 'y'] +['à¸', '´'] +['ĠB', 'eyond'] +['Inst', 'ead'] +['Non', 'Query'] +['Ġar', 'ise'] +['Ġavoid', 'ed'] +['.em', 'place'] +['_model', 's'] +['}', '),Ċ'] +['Ġh', 'id'] +['Ġ&', '_'] +['.p', 'oints'] +['.get', 'Width'] +['.Ex', 'ec'] +['Ġ//', '//'] +['ĠS', 'essions'] +['...', '\\'] +['ĠCol', 'omb'] +['Ġacceler', 'ation'] +['rest', 'ore'] +['Ġ', 'ile'] +['ob', 'ic'] +['<', 'Node'] +['ĠD', 'X'] +['ĠBes', 'ides'] +['.', 'age'] +['ĠCont', 'ains'] +['N', 'ational'] +['ĠIm', 'plementation'] +['Ġeff', 'ic'] +['ĠR', 'M'] +['H', 'y'] +['ĠWed', 'ding'] +['ok', 'ies'] +['Ġrec', 'ursive'] +['Ġprosec', 'utors'] +['.Se', 'lection'] +['ĠForm', 'ula'] +['Been', 'Called'] +['[i', 'i'] +['ĠFr', 'an'] +['Ġtraged', 'y'] +['_F', 'EATURE'] +['Ļ', '¨'] +['comp', 'ass'] +['ĠB', 'h'] +['?', 'ĊĊĊ'] +['.w', 'riter'] +['ĠH', 'our'] +['Db', 'Context'] +['io', 'v'] +['am', 'on'] +['re', 'pr'] +['é', 'ĥ'] +['ĉf', 'i'] +["']", ']'] +['ĠD', 'ry'] +['.', 'ro'] +['ĠO', 'bserv'] +['æł', 'ĩ'] +['Form', 'er'] +['ĠB', 'alance'] +['ĉ', 'json'] +['Ġpr', 'zy'] +['I', 'SS'] +['(', 'sock'] +['ĠL', 'INE'] +['Ġde', 'ce'] +['Ġal', 'ly'] +['Ġtend', 'ency'] +['F', 'un'] +['Ġschem', 'es'] +['Ġinter', 'ven'] +['æĺ', 'İ'] +['Ġad', 'verse'] +['quote', 'lev'] +['Ġsacr', 'ific'] +['_s', 'ide'] +['Ġmut', 'ex'] +['AG', 'IC'] +['Ġocc', 'urring'] +['ĠCommunic', 'ation'] +['um', 'ar'] +['ç¼', 'ĸ'] +['ĠTreat', 'ment'] +['.p', 'erson'] +['ĠL', 'C'] +['Ġe', 'ch'] +['(', '("'] +['ĠDise', 'ase'] +['ä', 'd'] +['ĠA', 'Z'] +['.A', 'ccount'] +['Ġcontinu', 'ously'] +['END', 'ING'] +['ĠRET', 'URN'] +['-', 'string'] +['.f', 'ilename'] +['syn', 'thesize'] +['Res', 'ponder'] +['(', 'opts'] +['reg', 's'] +['Ġn', 'uest'] +['Pe', 'er'] +['//', '------------------------------------------------'] +['Ġg', 'auge'] +['ĠK', 'in'] +['.s', 'chema'] +['Ġarr', 'ange'] +['ĠBl', 'ake'] +['_Type', 'Info'] +['C', 'over'] +['ĠHamp', 'shire'] +['P', 'aper'] +['-in', 'ner'] +['util', 'ity'] +['Ġcross', 'origin'] +['F', 'OR'] +['Ġign', 'oring'] +['ĠD', 'D'] +['av', 'an'] +['Ġtrad', 'itions'] +['Ġget', 'String'] +['Ġeth', 'ics'] +['ĠMaterial', 's'] +['DE', 'SC'] +['Ġen', 'zym'] +['io', 'let'] +['ĠCh', 'ip'] +['ĠMc', 'Donald'] +['Ġn', 'erve'] +['ç', 'Ħ'] +['")', ']'] +['æ±', 'Ĥ'] +['ĠS', 'ugar'] +['_S', 'IM'] +['j', 'peg'] +['Ġdiscret', 'ion'] +['ĠT', 'N'] +['bo', 've'] +['ĠMin', 'imum'] +['ĠForm', 'Group'] +['Ġwork', 'force'] +['ĠExec', 'ution'] +['err', 'er'] +['ĉ', 'ĠĠĠĠĉ'] +['Ġpres', 'cribed'] +['.Text', 'Align'] +['OP', 'EN'] +['ĠP', 'B'] +['im', 'ity'] +['ĠEx', 'ternal'] +['°', 'C'] +['ĠApplication', 'Controller'] +['Ġb', 'arr'] +['imp', 'licit'] +['_d', 'ot'] +['ĠCol', 'on'] +['C', 'OLOR'] +['.Pro', 'ject'] +['*', '', '}Ċ'] +['pl', 'aint'] +['get', 'Text'] +['Ġindivid', 'ually'] +['Ġcheck', 'box'] +['U', 'Y'] +['ĠL', 'amb'] +['Ġdys', 'function'] +['ĠL', 'ar'] +['à', '°'] +['ĠCre', 'ating'] +["');ĊĊ", 'Ċ'] +['"', 'They'] +['loc', 'ations'] +['_C', 'ORE'] +['Inter', 'action'] +['umbn', 'ails'] +['ĠPart', 'ner'] +['b', 'rit'] +['Ġless', 'er'] +['ĠSl', 'ot'] +['set', 'Attribute'] +['ĠW', 'ave'] +['.p', 'o'] +['/', 'store'] +['Ġbrows', 'ing'] +['_p', 'd'] +['sum', 'e'] +['s', 'ed'] +['Cur', 've'] +['Ġpl', 'asma'] +['Ġsusp', 'icious'] +['ìĿ', '¸'] +['ĠB', 'ah'] +['ĠExp', 'licit'] +['_C', 'C'] +['.Client', 'Size'] +['\\', 'View'] +['Ġsub', 'stit'] +['lo', 'on'] +['ĠG', 'AME'] +['ĠB', 'rid'] +['Ľ', '建'] +['_', 'User'] +['Ġsqu', 'ares'] +['f', 'one'] +['Ġsac', 'red'] +['ug', 'hs'] +[']', 'interface'] +['ĠTh', 'row'] +['ĠK', 'irk'] +['Ġemp', 'ire'] +['Ġassess', 'ed'] +['T', 'ax'] +['ĠHe', 'aven'] +['-b', 'uffer'] +['_STAT', 'IC'] +['én', 'é'] +['-b', 'ordered'] +['Ġpun', 'ct'] +['(m', 'ode'] +['Ġke', 'ine'] +['S', 'ent'] +['ĠCal', 'cul'] +['ĠE', 've'] +['Ġsty', 'lish'] +['Ġoil', 's'] +['.Test', 'Case'] +['Ġtrad', 'emark'] +['Ġliter', 'ary'] +['Ġconcentr', 'ations'] +['ĠRel', 'ations'] +['(', 'Class'] +['Ġstd', 'in'] +['Ġv', 'æ'] +['back', 'up'] +['.', 'VERSION'] +['.AutoScale', 'Dimensions'] +['st', 'arter'] +['Transaction', 'al'] +['-', 'panel'] +['St', 'udio'] +['k', 'c'] +['ĠCh', 'amber'] +['ĠSpi', 'el'] +['Ġr', 'ho'] +['ا', 'ÙĦ'] +['!', "'"] +['.At', 'tributes'] +['Ġmurder', 'ed'] +['apeut', 'ic'] +['Ġint', 'imate'] +['Ġtext', 'Field'] +['ĠBuff', 'alo'] +['d', 'ummy'] +['"', '%'] +['ĠLib', 'erty'] +['ob', 'ar'] +['ĠT', 'ank'] +['ĠPop', 'ular'] +['erv', 'isor'] +['ĠIn', 'iti'] +['ĠM', 'all'] +['ĠP', 'rior'] +['C', 'AP'] +['ĠCl', 'ay'] +['ĠCert', 'ificate'] +['.L', 'ock'] +['-st', 'rip'] +['-dr', 'iven'] +['/', 'all'] +['ĠMessageBox', 'Buttons'] +['_SE', 'CRET'] +['_p', 'b'] +['Ġr', 'ats'] +['ा', 'à¤'] +['Ġn', 't'] +['.R', 'outer'] +['_top', 'ic'] +['Ġt', 'ennis'] +['ĠP', 'UBLIC'] +['ĠActiv', 'atedRoute'] +["Ġ'", ',Ċ'] +['Ġcost', 'ume'] +['Ġj', 'okes'] +['.', 'Handle'] +['ĉ', 'byte'] +['Ġflav', 'ors'] +['(', 'cc'] +['Ġperson', 'as'] +['ĉ', 'image'] +['ĠN', 'azi'] +['Ġgram', 'mar'] +['Ġú', 'lt'] +['Ġval', 've'] +['Ġv', 'ic'] +['ĠR', 'achel'] +['_in', 'valid'] +['P', 'refs'] +['std', 'int'] +['(r', 'oute'] +['Ġhtml', 'specialchars'] +['Ġpe', 'oples'] +['pl', 'ine'] +['Ġn', 'v'] +['ĠQu', 'ant'] +['opp', 'ers'] +['Ġcurrent', 'User'] +['ĠC', 'atal'] +['Ġrecon', 'c'] +['Ġconj', 'unction'] +['l', 'x'] +['amb', 'urg'] +['Ġinflu', 'ential'] +['d', 'anger'] +['ind', 'ers'] +['Ġ%', '@",'] +['.config', 'uration'] +['os', 'ome'] +['.', 'identity'] +['Ġpick', 'er'] +['n', 'ost'] +['ĠDI', 'Y'] +['Aug', 'ust'] +['ab', 'lo'] +['Le', 'af'] +['ĠRec', 'o'] +['ck', 'o'] +['DO', 'C'] +['ĠH', 'erm'] +[':', 'any'] +['ĠInt', 'erview'] +['ĠT', 'ex'] +['x', 'fe'] +['(', 'work'] +['Ġle', 'ap'] +['He', 'ading'] +['Ġqu', 'arters'] +['\\', 'Bundle'] +['re', 'b'] +['Per', 'haps'] +['ĠG', 'mbH'] +['B', 'irth'] +['ĉ', 'sum'] +['ĠWat', 'son'] +['.n', 'il'] +['ç', '¡'] +['{', '}ĊĊ'] +['ica', 'id'] +['Get', 'ter'] +['"', 'name'] +['Ġ"', 'čĊ'] +['_n', 'one'] +['z', 'm'] +['ac', 'ute'] +['uest', 'o'] +['Ġs', 'ous'] +['Ġre', 'build'] +['Ġnewsp', 'apers'] +['ĠH', 'az'] +['Ġk', 'its'] +['if', 'o'] +['Bl', 'ur'] +['Ġsu', 'ited'] +['-', 'In'] +['à', '¯'] +['ĠKe', 'ith'] +['ĠNor', 'way'] +['IN', 'IT'] +['ire', 'ccion'] +['iet', 'ies'] +['_us', 'age'] +['ĠDou', 'g'] +['r', 'ise'] +['Ġtr', 'illion'] +['im', 'ited'] +['ĠR', 'EL'] +['al', 'ic'] +['Ġcritic', 'ized'] +['the', 'orem'] +['Ġce', 'ase'] +['Ġsid', 'ew'] +['ĠT', 'erry'] +['Ġsubs', 'idi'] +['Ġfirm', 'ly'] +['Ġaw', 's'] +['Ġh', 'ott'] +['Ġdress', 'ing'] +['bad', 'ge'] +['ĠApp', 'lications'] +['è¿', 'ĶåĽŀ'] +['Ġlaugh', 'ed'] +['Ġh', 'obby'] +['Ġmus', 'icians'] +['Ġ*', '.'] +['.', 'placeholder'] +['Ġcount', 'ers'] +['ĠCap', 'itol'] +['SD', 'K'] +['Ġhel', 'met'] +['and', 'box'] +['qu', 'it'] +['Ġcriminal', 's'] +['Ġteen', 'ager'] +['(', 'update'] +['G', 'l'] +['.se', 'lection'] +['Ġdis', 'charge'] +['Ġpresent', 'ing'] +['ufact', 'urer'] +['_UN', 'KNOWN'] +['Ġstress', 'ed'] +['å', 'ύ'] +['Pro', 'to'] +['_cor', 'rect'] +['ha', 'us'] +['Ġren', 'ov'] +['Ġfire', 'arms'] +['Ġtechn', 'ically'] +['-b', 'rowser'] +['Ġc', 'andy'] +['St', 'roke'] +['Ġexec', 'utor'] +['Ġocc', 'urrence'] +['ĠIP', 'v'] +['_INTER', 'FACE'] +['ĠRetrie', 've'] +['.b', 'ad'] +['Ex', 'change'] +['Nav', 'bar'] +['ĠK', 'id'] +['(get', 'ApplicationContext'] +['_ST', 'OP'] +['ĠB', 'oss'] +['List', 'eners'] +['Ġshoot', 'er'] +['ĠAl', 'b'] +['ä', 'ch'] +['Ġp', 'ix'] +['.key', 'Code'] +['al', 'one'] +['Ġabs', 'urd'] +['ĠC', 'um'] +['ĠNewton', 'soft'] +['ik', 't'] +['Ġlaugh', 'ing'] +['Ġcapital', 'ism'] +['ree', 'Node'] +['T', 'x'] +['_QU', 'ERY'] +['.S', 'leep'] +['(', 'login'] +['Web', 'Element'] +['Ġcelebr', 'ating'] +['Ġde', 'precated'] +['Ġma', 'ar'] +['Ġart', 'istic'] +['_ASS', 'OC'] +['ĠBorder', 'Radius'] +['ĉw', 'p'] +['Ġsurviv', 'ors'] +['In', 'ner'] +['-', 'red'] +['Ġprosec', 'ution'] +['_', 'pp'] +['("', '', '$'] +['Ġcomm', 'a'] +['un', 'checked'] +['graph', 'ics'] +['r', 'ors'] +['G', 'ROUND'] +['(', 'public'] +['Ġcustom', 'ized'] +['ĠArk', 'ansas'] +['ĠR', 'ew'] +['Ġexp', 'iration'] +['×', 'ķ'] +['ĠC', 'ul'] +['Ġn', 'ons'] +['.F', 'ilter'] +['Ġsen', 'ator'] +['_def', 'inition'] +['ash', 'ington'] +['ym', 'ph'] +['/', 'J'] +['Ġf', 'use'] +['ram', 'id'] +['ĠSup', 'plier'] +['Ġaut', 'ocomplete'] +['Ġ}', '),'] +['."', 'ĊĊĊ'] +['_function', 's'] +['ĉ', 'to'] +['.e', 'val'] +['ĠT', 'Object'] +['Re', 'ferences'] +['Ġhe', 'ated'] +['H', 'AL'] +['Ġ))', '}Ċ'] +['}', '$'] +['ĠB', 'arr'] +['_UN', 'IT'] +['+', '$'] +['Ġget', 'Value'] +['ip', 'ed'] +['ch', 'ied'] +['(v', 'm'] +['c', 'ue'] +['_int', 'eger'] +['_c', 'ourse'] +['th', 'ird'] +['Ġrevis', 'ed'] +['**', '/Ċ'] +['_D', 'IRECT'] +['Out', 'Of'] +['("', '('] +['ĠFe', 'el'] +['Ġre', 'ass'] +['Ġsub', 'title'] +['per', 'i'] +['n', 'f'] +['Ġenjo', 'ys'] +['Ġtreat', 's'] +[')', 'this'] +['-t', 'abs'] +['anc', 'ers'] +['Ġcontin', 'ent'] +['Ġcard', 'io'] +['S', 'er'] +['.', 'question'] +['Ġph', 'rases'] +['Valid', 'ators'] +['Ġpop', 'ul'] +['Ġl', 'ÃŃ'] +['s', 'ong'] +['_IN', 'TERNAL'] +['Ġadvis', 'er'] +['Ġp', 'uzz'] +['Ġambit', 'ious'] +['ĠT', 'ob'] +['ĠD', 'P'] +['Ġpres', 'idency'] +['Ġsurre', 'nder'] +['Ġwatch', 'es'] +['_b', 'inary'] +['ĠSo', 'on'] +['Ġcan', 'ada'] +['("', '")Ċ'] +[']', "='"] +['ĠBr', 'andon'] +['eps', 'ilon'] +['r', 'w'] +['.add', 'Child'] +['.C', 'opy'] +['Pr', 'incipal'] +['Ph', 'otos'] +['Ġmarg', 'inal'] +['Ġbas', 'ics'] +['e', 'ing'] +['M', 'ust'] +['_', 'String'] +['Ġo', 'le'] +['M', 'agento'] +['.c', 'ustomer'] +['(p', 'rev'] +['à¸', '¥'] +['Ġlo', 'yalty'] +['C', 'og'] +['Ġprot', 'ocols'] +['ĠCom', 'panies'] +['Ġtheoret', 'ical'] +['Ġaccess', 'ing'] +['ĠZ', 'en'] +['.', 'ones'] +['att', 'ice'] +['_w', 'orld'] +['z', 'es'] +['Ġtatto', 'o'] +['Ġmen', 'os'] +['Ġinter', 'sect'] +['"]', ';ĊĊ'] +['bel', 'ie'] +['Ġin', 'active'] +['.read', 'line'] +['-label', 'led'] +['.d', 'one'] +['lick', 'r'] +['ĠW', 'ORK'] +['Ġderiv', 'ative'] +['Ġd', 'atabases'] +['âĤ', 'Ĥ'] +['Ġs', 'x'] +['.is', 'Array'] +['Ġy', 's'] +['Ġp', 'ada'] +['ĠBul', 'let'] +['(`', '/'] +['is', 'Active'] +['ĠCG', 'Size'] +['(equal', 'To'] +['ĠColum', 'bus'] +['Ġmar', 'ry'] +['DE', 'V'] +['_l', 'imits'] +['ron', 'es'] +['I', 'AS'] +['Ġt', 'au'] +['min', 'o'] +['_W', 'rite'] +['ĠW', 'ine'] +['Ġ[', "['"] +['ĠP', 'ull'] +['rit', 'ers'] +['ri', 'ents'] +['Ġsh', 'ifting'] +['up', 'p'] +['_TIM', 'ER'] +['ĠCondition', 's'] +['áº', '¥'] +['ĠOr', 'ders'] +['ĠSt', 'rength'] +['æī', 'Ģ'] +['Ġvalid', 'ity'] +['Ġf', 'ot'] +['et', 'ur'] +['Ġb', 'olt'] +['åĨ', 'ħ'] +['ĠAl', 'ong'] +['os', 'hi'] +['Ġassum', 'ptions'] +['Ġmag', 'azines'] +['_S', 'PI'] +['Ġp', 'unt'] +['_PRO', 'DUCT'] +['Ġrel', 'ay'] +['ĠJ', 'avascript'] +['.', 'te'] +['-', 'es'] +['Ġwidget', 's'] +['(f', 's'] +['<', 'Item'] +['_ex', 'tra'] +['Ġrecru', 'iting'] +['E', 't'] +['Ġnecess', 'ity'] +['p', 'w'] +['Ġnov', 'els'] +['uss', 'els'] +['Cre', 'ator'] +['ĠM', 'VP'] +['ĠO', 'C'] +['th', 'ood'] +['cl', 'ients'] +['))', '*'] +['Ġcharacter', 'ized'] +['_SE', 'ND'] +['ut', 'i'] +['T', 'y'] +['.from', 'Json'] +['@', 'Service'] +['ãĤ', 'Ĥ'] +['Ch', 'ris'] +['_', 'Is'] +['ĠJohn', 'ny'] +['Ġclean', 'er'] +['ĠInitial', 'izes'] +['UN', 'K'] +['(', 'axis'] +['еÐ', '·'] +['ie', 'val'] +['ĠWar', 'riors'] +['}', ')('] +['DM', 'I'] +['âĻ', 'Ģ'] +['ĠTre', 'asury'] +['Ġfe', 'as'] +['Ġsl', 'a'] +['_EN', 'UM'] +['l', 'hs'] +['ĠIn', 'stit'] +['ipp', 'ers'] +['Line', 'ar'] +['Re', 'ading'] +['quir', 'ies'] +['-c', 'ell'] +['ch', 'rome'] +['.S', 'earch'] +['IN', 'A'] +['ç±»', 'åŀĭ'] +['ĠĊ', 'ĠĊ'] +['ĠSam', 'uel'] +['Ġmill', 's'] +['Ġdon', 'ate'] +['ĠGe', 'o'] +['(', 'rows'] +['Ġshe', 'ep'] +['Ġé', 'l'] +['ä½', 'ĵ'] +['Ġb', 'em'] +['_UN', 'USED'] +['ĠR', 'CC'] +['Ġintrodu', 'cing'] +['att', 'a'] +['ĠP', 'riority'] +['ĠF', 'B'] +['ĠSer', 'ge'] +['>', '";'] +['atch', 'ing'] +['ĠKnow', 'ledge'] +['ĉ', 'The'] +[';', 'margin'] +['less', 'ness'] +['op', 'ard'] +['um', 'atic'] +['()', '));čĊ'] +['Ġf', 'als'] +['(c', 'ache'] +['Type', 'Id'] +['éĢ', 'ļ'] +['_', 'choice'] +['ĠGo', 'th'] +['ĠS', 'ites'] +['M', 'G'] +['_b', 'order'] +['Ind', 'ices'] +['Compar', 'er'] +['ĠRed', 'istribution'] +['Ġclo', 'set'] +['Ġvers', 'atile'] +['Input', 's'] +['****************', '****'] +['Ġob', 'esity'] +['qu', 'iz'] +['gr', 'a'] +['(g', 'lobal'] +['åĬ', '¡'] +['Ġcollect', 'or'] +['Ġk', 'or'] +['ov', 'able'] +['AD', 'C'] +['ĠEvent', 'Handler'] +['.', 'nc'] +['Ġplay', 'back'] +['ient', 'os'] +['_p', 'erm'] +['_W', 'ARNING'] +['ĠOlymp', 'ics'] +['.n', 'orm'] +['ĠBroad', 'cast'] +['_sm', 'all'] +['dr', 'ive'] +['.', 'iloc'] +['Ġtyp', 'ed'] +['M', 'EM'] +['_con', 's'] +['DM', 'ETHOD'] +['Ġl', 'un'] +['.d', 'istance'] +['(p', 'ar'] +['po', 'on'] +['Ġb', 'ast'] +['activ', 'ities'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +[':', 'čĊčĊ'] +['S', 'ER'] +[')', '&&'] +['_l', 'st'] +['ĠPol', 'ish'] +['Ġknock', 'ed'] +['Ġfrustr', 'ation'] +['au', 'kee'] +['Ġph', 'osph'] +['iqu', 'id'] +['_c', 'oeff'] +['æŃ', '¤'] +['L', 'atest'] +['ĠD', 'ust'] +['T', 'ipo'] +['Ġmaint', 'ains'] +['Ġmar', 'sh'] +['inc', 'inn'] +['l', 'bl'] +['C', 'are'] +['Ġneighborhood', 's'] +['_g', 'pio'] +['ĠAr', 'senal'] +['D', 'em'] +['ĠW', 'he'] +['_h', 'ook'] +['Ġl', 'dc'] +['ĠHar', 'per'] +['ĠBer', 'keley'] +['Ġgrad', 'uated'] +['Per', 'cent'] +['Ġarr', 'iving'] +['ĠAdvent', 'ure'] +['(s', 'cope'] +["('", '*'] +['qu', 'arter'] +['ĠMar', 'ie'] +['Spe', 'aking'] +['_code', 'gen'] +['Ġimm', 'un'] +['c', 'aster'] +['ãĤ', 'Į'] +['åķ', 'Ĩ'] +['ĠDim', 'ensions'] +['.rec', 'ord'] +['Ġtext', 'o'] +['ĠMich', 'elle'] +['P', 'ending'] +['(', 'by'] +['_P', 'AR'] +['uch', 't'] +['be', 'e'] +['.Th', 'read'] +['amp', 'ire'] +['k', 'now'] +['ĠClin', 'ical'] +['Ġmargin', 'Bottom'] +['Ġdistingu', 'ish'] +['.F', 'ull'] +['.', 'undefined'] +['ĠSequ', 'elize'] +['################################################################', '############'] +['Ġeduc', 'ated'] +['_O', 'VER'] +['åº', 'ı'] +['ĠÂł', 'ĠÂł'] +['_e', 'ach'] +['Ġur', 'ge'] +['de', 'part'] +['Ġdon', 'ors'] +['ĠA', 'u'] +['Ġbill', 'ions'] +['Ġbelong', 'ing'] +['_', 'age'] +['_', 'Int'] +['Ġsub', 'stances'] +['m', 'achine'] +['!!', '!ĊĊ'] +['Ġjson', 'ify'] +['ib', 'bean'] +['ĠC', 'ad'] +['Ġend', 'Time'] +['Ġc', 'ycling'] +['ĠUIT', 'extField'] +['Ġle', 'verage'] +['Ġvan', 'illa'] +['e', 'at'] +['La', 'unch'] +['(', 'pt'] +['st', 'ates'] +['ĠControl', 's'] +['ĠRes', 'pons'] +['ĠJ', 'ake'] +['Ġas', 'leep'] +['fort', 'unate'] +['.next', 'Line'] +['Size', 'Mode'] +['ìĿ', '¼'] +['Testing', 'Module'] +['G', 'erman'] +['ĠInvest', 'ig'] +['.re', 'verse'] +['ĠB', 'ACK'] +['(', 'DateTime'] +['Ġnon', 'profit'] +['ĠEx', 'pect'] +['Ġt', 'anto'] +["']", '),'] +['ĉ', 'the'] +['M', 'ultiple'] +['(get', 'Activity'] +['_W', 'AIT'] +['Ġj', 'á'] +['de', 'cor'] +['lev', 'ance'] +['ĠGit', 'Hub'] +['min', 'ation'] +['_qu', 'antity'] +['.Sc', 'anner'] +['ĠL', 'ion'] +['éĶĻ', '误'] +['Ġd', 're'] +['Ġtan', 'tra'] +['Ġcontent', 'Type'] +['Ġf', 'id'] +['_', 'alt'] +['NS', 'IndexPath'] +['-', 'pl'] +['åĮ', 'ĸ'] +['Ġantib', 'iot'] +['table', 's'] +['ac', 'ial'] +['ĠReg', 'istry'] +['Ġol', 'ive'] +['ig', 'ers'] +['Ġsubscri', 'ber'] +['_p', 'res'] +['ĠSy', 'ntax'] +['Ġlo', 'vers'] +['.', 'Byte'] +['old', 'ers'] +['_for', 'ward'] +['al', 'ways'] +['C', 'aption'] +['Pr', 'iv'] +['ĠT', 'ampa'] +['is', 'ateur'] +['-labelled', 'by'] +['ĠTo', 'String'] +['Ġì', 'Ĥ¬'] +['Ġinit', 'iated'] +['W', 'F'] +['Ġinstitution', 'al'] +['in', 'ject'] +['ĠSc', 'r'] +['Ġdo', 'ctrine'] +['Ġsp', 'acious'] +['is', 'ure'] +['ĠAn', 'a'] +['"', 'time'] +['ess', 'aging'] +['Ġc', 'id'] +['ĠN', 'an'] +['Ġin', 'complete'] +['T', 'AG'] +['-b', 'uild'] +['Dec', 'ember'] +['Ġres', 'idual'] +['(P', 'DO'] +['ĠList', 'en'] +['Ġg', 'lyph'] +['Ġg', 'aps'] +['ne', 'a'] +['.R', 'ect'] +['Ġsa', 'u'] +['ĠPhot', 'ograph'] +['Ġexec', 'utable'] +['ĠExp', 'ert'] +['Cor', 'outine'] +['_s', 'izes'] +['ĠN', 'L'] +['.is', 'Valid'] +[');', '}Ċ'] +['-', 'reg'] +['Ġc', 'iting'] +['c', 'wd'] +['ĠOtt', 'awa'] +['ĠB', 'att'] +['Ġrenew', 'able'] +['Ġprelim', 'inary'] +['Ġas', 'ylum'] +['Ġw', 'rist'] +['Ġutil', 'iz'] +['Ġdet', 'ention'] +['F', 'ast'] +['Ġan', 'ge'] +['incinn', 'ati'] +['Ġste', 'ering'] +['ĠNa', 'N'] +['ios', 'ity'] +['/', 'page'] +['Ġè', '¿'] +['ster', 'ol'] +['Ġdis', 'g'] +['(', 'DB'] +['ĠDESC', 'RIPTION'] +['Ġ_', '$'] +['Ġobst', 'acle'] +['Ġb', 'izarre'] +['Ġextr', 'action'] +['_ex', 'pected'] +['Ġlos', 'es'] +['ĠCele', 'br'] +['Ġhtml', 'For'] +['Ġexplo', 'it'] +['олÑĮз', 'ов'] +['XY', 'Z'] +['Ġmagn', 'et'] +['amp', 'ed'] +['Ġat', 'oms'] +['S', 'ources'] +['pect', 'ives'] +['Ñģ', 'ли'] +['Ġ=', 'čĊ'] +['Ġd', 'are'] +['ĠWal', 'ter'] +['Ġbright', 'ness'] +['Ġan', 'notations'] +['ë', 'ı'] +['is', 'ke'] +['S', 'chedule'] +['.', 'images'] +['ros', 'so'] +['Ġ"', '..'] +['g', 'amma'] +['Ġin', 'structor'] +['Ġover', 'write'] +['-', 'am'] +['Ġdevast', 'ating'] +['ĠSaint', 's'] +['Ġh', 's'] +['Ġbon', 'uses'] +['$', 'output'] +['ij', 'd'] +['(Action', 'Event'] +['mon', 'itor'] +['Ġmatt', 'ress'] +['Jan', 'uary'] +['.j', 'p'] +['Ġcar', 'acter'] +['Ġim', 'pose'] +['_re', 'st'] +['ĠSign', 'ature'] +['Ġcoron', 'avirus'] +['ãģ', 'Ĭ'] +['_com', 'pare'] +['Me', 'asure'] +['it', 'ated'] +['el', 'ijk'] +['ig', 'os'] +['es', 'ar'] +['Ġrush', 'ed'] +['met', 'ry'] +['_SE', 'PARATOR'] +['_W', 'E'] +['_ATTR', 'IBUTE'] +['Ġy', 'aml'] +['Ġspec', 's'] +['ĠR', 'ah'] +['ph', 'eric'] +['ĠInvest', 'ment'] +['ä', 'll'] +['Ġappe', 'aling'] +['Ġview', 'port'] +['ç', '©'] +['Ġmargin', 'Left'] +['Ġsub', 'tract'] +['ĠED', 'IT'] +['ĉ', 'ArrayList'] +['gr', 'ading'] +['ĠF', 'ailure'] +['as', 'per'] +['EE', 'K'] +['(n', 'ow'] +['<', 'object'] +['ĠAl', 'ignment'] +['ple', 'ado'] +['q', 'tt'] +['(', 'ERROR'] +['ĠIN', 'VALID'] +['Ġuser', 'id'] +['ra', 'ises'] +['ID', 'I'] +['Ġvari', 'ance'] +['ĠN', 'il'] +['/', 'delete'] +['_M', 'AIN'] +['.T', 'oken'] +['.C', 'ategory'] +['>', ')Ċ'] +['Coll', 'ision'] +['ĠGre', 'ater'] +['ĠR', 'acing'] +['al', 'an'] +['Ġmon', 'etary'] +[',', 'new'] +['ĠS', 'orry'] +['.', 'Enable'] +['ĠInstant', 'iate'] +['oll', 'en'] +['ë©', '´'] +['ĠCall', 'ing'] +['_h', 'our'] +['AD', 'A'] +['Ġsh', 'y'] +[')', '**'] +['Ġ==', '>'] +['Ġes', 'pecial'] +['Ġinterpre', 'ted'] +['!', '="'] +['Ġpharm', 'acy'] +['.s', 'ingle'] +['ĠC', 'ialis'] +['Ġpar', 'as'] +['.to', 'UpperCase'] +['ĠDem', 'on'] +['Pr', 'ime'] +['Ġrank', 'ings'] +['Add', 'ing'] +['_H', 'ASH'] +['ĠEx', 'am'] +['Ú', '©'] +['ĠVict', 'or'] +['Ok', 'ay'] +['"]', ';čĊ'] +['Ġfort', 'une'] +['ĠF', 'ETCH'] +['exp', 'and'] +['.Inter', 'op'] +['Ġb', 'arn'] +['æ', '¶Ī'] +['ue', 'vo'] +['Ġspec', 'ulation'] +['âĶĢâĶĢ', 'âĶĢâĶĢ'] +['ĠN', 'u'] +['ĠBl', 'ues'] +['(f', 'name'] +['Ġinhab', 'it'] +['Ġ\\"', '%'] +['C', 'ES'] +['ular', 'io'] +['_c', 'r'] +['Ġvalid', 'ated'] +['Ġmid', 'night'] +['ank', 'ing'] +['Ġincorpor', 'ate'] +['Ġpurs', 'uit'] +['EX', 'P'] +['pr', 'ime'] +['P', 'id'] +['-', 'US'] +['ĠN', 'urs'] +['ĠW', 'heel'] +['é', 'ĺ'] +['Ġin', 'p'] +['Ġsupport', 'ive'] +['.m', 'ember'] +['ĠSh', 'ot'] +['.Check', 'Box'] +['Ġaff', 'irm'] +['T', 'or'] +['Full', 'Year'] +['Ġconsider', 'ably'] +['cred', 'entials'] +['_', 'opts'] +['R', 'oll'] +['(', 'round'] +['Ġcom', 'ent'] +['_U', 'ART'] +['Ġext', 'ending'] +['R', 'G'] +['result', 'ado'] +['it', 'u'] +['.get', 'Session'] +['Ġattr', 'action'] +['&', 'D'] +['$', 'html'] +['ĠJess', 'ica'] +['ĠAssoci', 'ate'] +['a', 'ñ'] +['_', 'ed'] +['ĠL', 'ag'] +['Ġorig', 'ins'] +['())', '->'] +['add', 'EventListener'] +['IAL', 'OG'] +['åIJ', '¦'] +['.Com', 'pare'] +['Al', 'bum'] +['ĠK', 'u'] +['<', 'Q'] +['arg', 'est'] +['Ġpro', 'long'] +['Ġconfig', 'urations'] +['Ġaccident', 'ally'] +['_ph', 'oto'] +["Ġ''", ';čĊ'] +['Ġver', 'se'] +['B', 'ob'] +['Ġfarm', 'ing'] +['del', 'ivery'] +['ĠM', 'ack'] +['Ġuse', 'Selector'] +['.bootstrap', 'cdn'] +['keep', 'ing'] +['en', 'y'] +['.', 'upload'] +['ĠM', 'ETHOD'] +['cre', 'ator'] +['<', '_'] +['ĠE', 'aster'] +['.', '--'] +['UI', 'Button'] +['ãĤ', 'ī'] +['om', 'eters'] +['Ġsh', 'ine'] +['Ġh', 'ogy'] +['\\', 's'] +['Ġh', 'arness'] +['.C', 'ell'] +['Ġlif', 'ting'] +['Ġcomb', 'ines'] +['ĠOcc', 'up'] +['ex', 'clude'] +['pat', 'ial'] +['Ġres', 'pir'] +['_f', 'it'] +['Ġfif', 'ty'] +['ĠM', 'ol'] +['Ġtun', 'ed'] +['-d', 'imensional'] +['Ġq', 's'] +['Ġto', 'ps'] +['>', '";ĊĊ'] +['quis', 'ite'] +['ch', 'annels'] +['/', 'res'] +['ĠAn', 'alytics'] +['.app', 'compat'] +['/', 'to'] +['Ġon', 'Error'] +['(', 'attr'] +['IR', 'M'] +['Ġrag', 'az'] +['-', 'as'] +['.Se', 'cond'] +['orient', 'ed'] +['Ġdon', 'n'] +['Ġlight', 'ning'] +['f', 'id'] +['ĠP', 'le'] +['ãģ¾', 'ãģĻ'] +['t', 'ro'] +['.Tr', 'ue'] +['O', 'bservable'] +['×', 'Ļ'] +['umb', 'ing'] +['Ġpros', 'pective'] +['-f', 'ilter'] +['Ġpurs', 'uant'] +['(p', 'oints'] +['.B', 'ind'] +['Ġp', 'alm'] +['clear', 'fix'] +['ö', 's'] +['ĠG', 'onz'] +['Ġwe', 'aken'] +['Dr', 'ive'] +['en', 'ido'] +['l', 'ld'] +['ob', 'ox'] +['ane', 'an'] +['G', 'ot'] +['ä¿', 'Ŀ'] +['Reg', 'ex'] +['æ', 'ĥ'] +['Ġsal', 'ad'] +['ass', 'is'] +['"', 'net'] +['inherit', 'Doc'] +['ĠR', 'V'] +['qu', 'ier'] +['Ġcl', 'azz'] +['ı', 'ÅŁ'] +['oster', 'one'] +['Ġair', 'line'] +['.list', 'dir'] +['Ġdownload', 'ing'] +['ĠP', 'alm'] +['w', 'aukee'] +['&', 'lt'] +['.B', 'L'] +['_IN', 'LINE'] +['off', 's'] +['<<', '('] +['_new', 's'] +['Ġch', 'ase'] +['/', '><'] +['Ġeuro', 's'] +['ĠEgypt', 'ian'] +['ĠSt', 'ainless'] +['_BO', 'OL'] +['ĠG', 'uild'] +['ĠD', 'ynam'] +['[index', 'Path'] +['Ġ', 'ï'] +['Ġmemor', 'able'] +['ĠCh', 'ampion'] +['Resource', 'Manager'] +['.Log', 'in'] +['ĠForm', 'er'] +['yp', 'ed'] +['Ġl', 'leg'] +[';', '",'] +['D', 'WORD'] +['Ġtax', 'i'] +['Ġbom', 'bs'] +['ra', 'h'] +['.t', 'ags'] +['_test', 's'] +['st', 'ones'] +['âĢĿ', ')'] +['[', 'g'] +['r', 'type'] +['Ġv', 'u'] +['Ġhost', 'ile'] +['Ch', 'ars'] +['ĠPatri', 'ots'] +['/', 'status'] +['<', 'B'] +['ĠIn', 'come'] +['ĠD', 'ad'] +['Ġpat', 'rol'] +['_CH', 'ANGE'] +['Ġup', 'graded'] +['Ġch', 'ina'] +['set', 'q'] +['Start', 'ed'] +['.U', 'ndef'] +['Ġcheck', 'sum'] +['Ġfrustr', 'ated'] +['{', 'o'] +['Ġen', 'f'] +['Ġwood', 's'] +['ĠAny', 'one'] +['Enc', 'ode'] +['ĠQt', 'Widgets'] +['are', 'as'] +['Ġshe', 'er'] +['sk', 'i'] +['end', 'point'] +['_T', 'est'] +['S', 'oup'] +['~~~~~~~~', '~~~~~~~~'] +['(f', 'iles'] +['ĉĉĉĉĉ', 'čĊ'] +['.sp', 'ark'] +['Ġval', 'ued'] +['Ġ%', 'Ċ'] +['.control', 's'] +['ĠXCTAssert', 'Equal'] +['Ġf', 'ame'] +['ĠR', 'ic'] +['D', 'OT'] +['ĠAlbert', 'a'] +['ä½', '¿'] +['os', 'al'] +['.Web', 'Controls'] +['Ġ', '------------'] +['ĠM', 'is'] +['ĠS', 'YS'] +['Non', 'null'] +['=', 'item'] +['Ġexp', 'ire'] +['Dec', 'ode'] +['_', 'operation'] +['ĠValid', 'ator'] +['.C', 'ENTER'] +['uff', 's'] +['*', 'm'] +['Ġav', 'ant'] +['æ¬', '¡'] +['âĢľ', 'You'] +['.per', 'mission'] +['...', ')'] +['ĠL', 'ic'] +['_co', 'ords'] +['.n', 'ombre'] +['c', 'lo'] +['.Int', 'ernal'] +['ĠCh', 'o'] +['_s', 'w'] +['ĉ', 'Il'] +['cl', 'k'] +['Ġcast', 'le'] +['(l', 'ayer'] +['p', 'it'] +['Ġgu', 'ided'] +['Ġâĸ', 'Ī'] +['Ġsuper', 'b'] +['Ġsup', 'plements'] +['_c', 'ent'] +['Ġpe', 'ek'] +['IN', 'ARY'] +['.Content', 'Alignment'] +['f', 'alls'] +['"))', ';'] +['W', 'all'] +[').', 'čĊ'] +['ĠD', 'anny'] +['irm', 'ingham'] +['IAL', 'IZ'] +['(', 'create'] +['"', 'In'] +['Service', 'Provider'] +['Ġpr', 'iced'] +['mac', 'ro'] +['am', 'ac'] +['.', 'box'] +['----', 'Ċ'] +['ãĥ', '«'] +['ĠS', 'uit'] +['ur', 'st'] +['br', 'u'] +['ourn', 'als'] +['num', 'ero'] +['__', '()Ċ'] +['D', 'as'] +['ĠM', 'itt'] +['ud', 'er'] +['?', '\\'] +['f', 'u'] +['[', 'B'] +['Ġ:', ')ĊĊ'] +['(int', 'er'] +['br', 'ains'] +['Ġatt', 'itudes'] +['Ver', 'ify'] +['Ġsign', 'atures'] +['ack', 'Bar'] +['Ġg', 'd'] +['J', 'ack'] +['.c', 'at'] +['Ġz', 'z'] +['war', 'f'] +['FT', 'ER'] +['");ĊĊ', 'Ċ'] +['Al', 'ive'] +['IC', 'LE'] +['ĠWh', 'atever'] +['Ġout', 'lined'] +['s', 'prite'] +['еÐ', '²'] +['_A', 'B'] +['_DE', 'PTH'] +['Ġcrush', 'ed'] +['aa', 'a'] +['(e', 'v'] +['æľ', 'º'] +['Ant', 'i'] +['IC', 'O'] +['is', 'EqualTo'] +['.s', 'un'] +['ic', 'ulo'] +['s', 'ale'] +['_h', 'ex'] +['ĠV', 'k'] +['apt', 'or'] +['Un', 'ion'] +['ĠDis', 'count'] +['list', 'a'] +['.Undef', 'Or'] +['Ġautom', 'ation'] +['N', 'or'] +['å¯', '¹'] +['åı', 'Ĥæķ°'] +['Ġref', 'lex'] +['ĠLa', 'ure'] +['.showMessage', 'Dialog'] +['.t', 'emp'] +['Ġa', 'kan'] +['Ġ__', '____'] +['.Is', 'True'] +['ARE', 'D'] +['ag', 'le'] +['E', 'nergy'] +['Ġquant', 'ities'] +['âĢĻ', 'é'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġcitizens', 'hip'] +['m', 'outh'] +['Ġin', 'appropriate'] +['ĠOut', 'door'] +['White', 'Space'] +['An', 'onymous'] +['load', 's'] +['webElement', 'Properties'] +['T', 'en'] +['Ġacc', 'idents'] +['Ġadvertis', 'ement'] +['ĠY', 'emen'] +['(c', 'all'] +['Ġsl', 'avery'] +['Ñģ', 'п'] +['ĠL', 'am'] +['_BIT', 'S'] +['ome', 'ga'] +['ĠO', 'le'] +['Ġkid', 'n'] +['_A', 'n'] +['ĠR', 'aid'] +['Cre', 'ation'] +['s', 'aved'] +['Ġpro', 'port'] +['W', 'ARNING'] +['\\', 'P'] +['Ġp', 'wd'] +['Data', 'Reader'] +['is', 'cher'] +['ade', 'on'] +['ĠP', 'redict'] +['Ġreason', 'ing'] +['Ġdestroy', 'ing'] +['H', 'el'] +['*', 'd'] +['ĠLeg', 'isl'] +['_P', 'r'] +['ĉĉĉ', 'ĠĠĠĠĠĠĠ'] +['Ġsymp', 'ath'] +['Ġch', 'ess'] +['Ġm', 'am'] +[':', 'hover'] +['Ġconvert', 's'] +['Ġp', 'ela'] +['Ġprogress', 'ion'] +['Ġ"_', '"'] +['ĠG', 'ill'] +['ĉ', 'show'] +['Ġsupposed', 'ly'] +['ac', 'curacy'] +['el', 'in'] +['Ġunf', 'olding'] +['ĠHy', 'per'] +['Ġw', 'anna'] +['Ġup', 's'] +['(', '#'] +['ĠCr', 'iminal'] +['(', 'Point'] +['at', 'Lng'] +['act', 'ly'] +['Ġcontract', 'ors'] +["']", '}'] +['draul', 'ic'] +['ód', 'igo'] +['ĠT', 'T'] +['ĠW', 'ide'] +['ĠAR', 'G'] +['_', 'ic'] +['FLAG', 'S'] +['S', 'chool'] +['Ġclear', 'ing'] +['-be', 'ing'] +['={', '['] +[',', 'const'] +['man', 'ent'] +['Over', 'lay'] +["('", '"'] +['éĩ', 'ı'] +['ĠT', 'imestamp'] +['Ġmail', 'ing'] +['ĠC', 'ake'] +['.Th', 'at'] +['Ġmed', 'itation'] +['q', 'p'] +['Ġemp', 'resa'] +['ĠL', 'ions'] +['Ġw', 'eld'] +['ĠLinked', 'In'] +['Ġc', 'ush'] +['Ġgen', 'ome'] +['.Index', 'Of'] +['ag', 'ain'] +['Ġf', 'allback'] +['Ġcamp', 'ing'] +['re', 'dd'] +['-strip', 'ed'] +['Ġd', 'v'] +['Fe', 'bruary'] +['ĠPro', 'xy'] +['us', 'k'] +['Ġdies', 'el'] +['W', 'RITE'] +['RE', 'AK'] +['L', 'orem'] +['.In', 'voke'] +['-', 'div'] +['Inter', 'ceptor'] +['ĠD', 'H'] +['ia', 'les'] +['Ġvill', 'ages'] +['Ø', '´'] +['ĠEN', 'V'] +['S', 'ys'] +['.X', 'R'] +['Ġpo', 'em'] +['Ã', 'Ĥ'] +['c', 'ade'] +['pl', 'ots'] +['Ġ{', '('] +['.g', 'it'] +['/s', 'vg'] +['nc', 'mp'] +['ĠÄ', 'į'] +['ain', 'es'] +['åĩ', '½æķ°'] +['Ġ(', ')ĊĊ'] +['ops', 'is'] +['ĠRel', 'ationship'] +['_', 'aut'] +['ĠB', 'omb'] +['ĉ', 'com'] +['*', 'sizeof'] +['off', 'icial'] +['_p', 'ayload'] +['ĉĉĉĉĉ', 'ĠĠ'] +['.m', 'anager'] +['ĠA', 'round'] +['ĉs', 'end'] +['ĠEx', 'ercise'] +['ĠB', 'illy'] +['iv', 'i'] +['Ġneed', 'ing'] +['_url', 's'] +['_t', 'asks'] +['ĠH', 'em'] +['Ġtear', 'Down'] +['enc', 'rypt'] +['.t', 'ie'] +['Ġas', 'm'] +['IC', 'H'] +['ĠCGRect', 'Make'] +['ìĦ', '±'] +['ul', 'ong'] +['Ġit', 'r'] +['ĠG', 'ST'] +['Ġoffer', 'ings'] +['ro', 'be'] +['EE', 'E'] +['oper', 'ators'] +['_PRO', 'P'] +['ind', 'ent'] +['A', 'DE'] +['or', 'f'] +['ë', 'IJ'] +['Ġbless', 'ed'] +['vas', 'cular'] +['Ġcon', 'oc'] +['H', 'appy'] +['B', 'ridge'] +['ilit', 'ation'] +['j', 'oint'] +['ĠAdmin', 'istr'] +['-', 'transform'] +['Ġmeant', 'ime'] +['/', 'K'] +['ĠBed', 'room'] +['Ġrig', 'id'] +['Ġbrows', 'ers'] +['EM', 'PTY'] +['.S', 'erialize'] +['_', 'ED'] +['Ġst', 'itch'] +['Ġj', 'an'] +['ell', 't'] +['Ġbr', 'ace'] +['Ġtr', 'ails'] +['p', 'ublished'] +['å¯Ĩ', 'çłģ'] +['}', "')Ċ"] +['Ġac', 'ids'] +['Ġ!', '!!'] +['_d', 'irect'] +['>', '());Ċ'] +['aj', 'Äħ'] +['_O', 'CC'] +['Ġplan', 'ets'] +['æ', 'Ł¥'] +['ĠDub', 'lin'] +['Ġser', 'ie'] +['.print', 'f'] +['de', 'ep'] +['`', ')'] +['Ġ\\', '$'] +['ĠÎ', '¼'] +['_V', 'IDEO'] +['end', 'ors'] +['ĠC', 'rypto'] +['F', 'ar'] +['.Trans', 'parent'] +['.T', 'R'] +['ias', 'm'] +['_tr', 'aining'] +['Ġteach', 'es'] +['ĠB', 'elt'] +['Ġlimit', 'ing'] +['ĠK', 'ath'] +['ĠIndex', 'Path'] +['Ġachie', 'vements'] +['Ġser', 'á'] +['interop', 'Require'] +['Ġdis', 'se'] +['.I', 'f'] +['arm', 'ing'] +['uls', 'ion'] +['P', 'o'] +['_DE', 'TAIL'] +['Prot', 'otype'] +['ĠC', 'AL'] +['Ġagre', 'es'] +['.v', 'o'] +['.Execute', 'NonQuery'] +['ĠTop', 'ic'] +["Ġ'", '{}'] +['Ar', 'm'] +['Ġe', 'cc'] +['M', 'ag'] +['Ġserial', 'ized'] +['ĉ', 'conn'] +['c', 'ached'] +['=', 'tf'] +['ĠByte', 'Array'] +['prot', 'obuf'] +['var', 'char'] +['ĉ', 'ASSERT'] +['Ġlist', 'e'] +['_tr', 'igger'] +['·', '¸'] +['Fe', 'el'] +['T', 'ahoma'] +['ĠL', 'ik'] +['Ġstruct', 'ured'] +['erg', 'us'] +['.In', 'itial'] +['_', 'ge'] +['cl', 'js'] +['.cont', 'act'] +['Ġand', 'ere'] +['$', 'stmt'] +['_C', 'URRENT'] +['ĠDis', 'cover'] +['$', 'res'] +['form', 'atter'] +['H', 'a'] +['vang', 'st'] +['Ġem', 'erge'] +['ãĢĤ', 'âĢĿ'] +['ĠCabin', 'et'] +['-s', 'quare'] +['éĥ', '¨'] +['Ġr', 'age'] +['ĠA', 'J'] +['ĠV', 'T'] +['sh', 'adow'] +['ĠFa', 'ith'] +['en', 'ames'] +['pret', 'ty'] +['has', 'il'] +['part', 'y'] +['Ġvar', 'char'] +['Ġf', 'otos'] +['Ġal', 'um'] +['ĠBelg', 'ium'] +['.y', 'label'] +['Ġde', 'j'] +['_num', 'bers'] +['Ġh', 'u'] +['.set', 'Adapter'] +['ĠUs', 'ually'] +['(s', 'ample'] +['.Sh', 'ared'] +['Ġbook', 'ed'] +['Ġ>>', '='] +['Ġmin', 'erals'] +['">'] +['pro', 'g'] +['bo', 'o'] +['_m', 'd'] +['_p', 'ack'] +['(ex', 'press'] +['ut', 'z'] +['\\', 'Auth'] +[',', 'id'] +['ĠCh', 'ile'] +['act', 'ice'] +['Ġrecruit', 'ment'] +['Ġpos', 'es'] +['Ġvulner', 'ability'] +['inst', 'anc'] +['or', 'um'] +['d', 'ess'] +['Ġx', 'l'] +['%%%%%%%%%%%%%%%%', '%%%%%%%%%%%%%%%%'] +['(', 'fig'] +['Ġdelet', 'ing'] +['.d', 'el'] +[')', "')Ċ"] +['ĠWeek', 'ly'] +['??', '?'] +['(str', 'cmp'] +['sm', 'ith'] +['Ġpurs', 'uing'] +['-', 'so'] +['ĠApp', 's'] +['/', "'Ċ"] +['Ġdec', 'is'] +['FO', 'RE'] +['Every', 'one'] +['Ġl', 'anes'] +['V', 'irtual'] +['.', 'attach'] +['(', 'Log'] +['ĠMed', 'icaid'] +['(', 'Path'] +['ĠTurn', 'er'] +['/', 'application'] +['Ġport', 'rait'] +['Ġopp', 'ose'] +['check', 'out'] +['Ġfinish', 'es'] +['_M', 'E'] +['Bar', 'rier'] +['S', 'ong'] +['V', 'AR'] +['Ear', 'lier'] +['rell', 'a'] +['Ġh', 'ast'] +['az', 'ar'] +['Ġpull', 's'] +['ng', 'x'] +['Ġinspir', 'ing'] +['Ñĥ', 'Ñİ'] +['-d', 'irection'] +['Ġexplos', 'ive'] +['Ġcreated', 'At'] +['st', 'o'] +['Ġwhe', 'at'] +['ĠB', 'uilt'] +["'", 'ai'] +['Ġtrack', 'ed'] +['ham', 'mad'] +['RowAt', 'IndexPath'] +['_', 'heap'] +['D', 'ue'] +['Ġconnect', 's'] +['.p', 'ublish'] +['em', 'u'] +['Ġbul', 'lets'] +['B', 'AR'] +['ol', 'ate'] +['Ġintern', 'ally'] +['Ġcatch', 'ing'] +['-p', 'assword'] +['ou', 'ched'] +['æĢ', '§'] +['e', 'ous'] +['Ġx', 'range'] +['Q', 'uality'] +['v', 'v'] +['Man', 'age'] +['(', '($'] +['ac', 'ements'] +['ĠBro', 'thers'] +['ĠHE', 'AD'] +['ĠUn', 'supported'] +['s', 'an'] +['es', 'i'] +['**', '*Ċ'] +['Ġadapt', 'ation'] +['ĠWork', 'er'] +["']", '/'] +['.save', 'fig'] +['(', 'trans'] +['Ø', '¬'] +['ne', 'e'] +['Cor', 'rect'] +['...', '")Ċ'] +['Ġsubmit', 'ting'] +['-p', 'ath'] +['ĉ', 'last'] +['iss', 'an'] +['.x', 'label'] +['ĠS', 'epar'] +['/', 'no'] +['_b', 'est'] +['ĠM', 'ills'] +['_s', 'ock'] +['(f', 'lag'] +['Ġdest', 'inations'] +['em', 'ption'] +['ĠF', 'AIL'] +['å', 'ĴĮ'] +['Ġr', 'p'] +['f', 'act'] +['ĉ', 'len'] +['D', 'AY'] +['Ġse', 'iz'] +['_d', 'st'] +['l', 'ip'] +['.Line', 'ar'] +['ĠB', 'asket'] +['$', 't'] +['$', 'i'] +['-', 'brand'] +['ĠNe', 'il'] +['ĠE', 'q'] +['Ġth', 'ou'] +['og', 'ene'] +['Ġscholar', 'ship'] +['æĽ', '´'] +['Ġs', 'wo'] +['ag', 'inator'] +['en', 'i'] +['(', 'book'] +['Ġbl', 'ink'] +['th', 'us'] +['Ġcancell', 'ationToken'] +['ĠPalestin', 'ians'] +['Ġprofit', 'able'] +['Ġback', 'pack'] +['ens', 'on'] +['<', 'Long'] +['Ġp', 'ools'] +['Ġst', 'icks'] +['Ġspokes', 'woman'] +['Be', 'ing'] +['ĠHer', 'itage'] +['ĠN', 'ike'] +['SH', 'A'] +['ĠNotImplemented', 'Exception'] +['$', 'core'] +['ĠR', 'ico'] +['/', 'latest'] +['ĠC', 'zech'] +['ner', 'Radius'] +['(l', 'ines'] +['Ġsem', 'ester'] +['Ġw', 'ounds'] +['Pro', 'cedure'] +['.m', 'ail'] +['()', '):Ċ'] +['Ġcor', 'rid'] +['ter', 'ed'] +['ĠN', 'CAA'] +['Ġgal', 'axy'] +['_k', 'ind'] +['il', 'k'] +['Ġtr', 'as'] +['_P', 'OL'] +['ĠH', 'et'] +['Ġrefuge', 'e'] +['Ġteen', 'age'] +['.b', 'inding'] +['post', 'al'] +['Ġiç', 'in'] +['ĠData', 'Type'] +['é', 'ĸ'] +['ycl', 'erview'] +[',', 'value'] +['_id', 'entifier'] +['<', 'b'] +['Ġout', 'file'] +['čĊ', 'ĠĠĠĠčĊ'] +['Ġcr', 'é'] +['Ġrespond', 'ents'] +['ĠBe', 'ast'] +['ce', 'led'] +['Ġinter', 'f'] +['-th', 'eme'] +['g', 'if'] +['ĠR', 'angers'] +['IT', 'AL'] +['Ġauthentic', 'ate'] +['Com', 'pletion'] +['urs', 'ors'] +['Ġcin', 'ema'] +['Ġdisc', 'our'] +['ĠJ', 'aw'] +['OCK', 'ET'] +['Ġpr', 'ayers'] +['ĠL', 'uis'] +['fr', 'ag'] +['=[', 'Ċ'] +['Ġbr', 'ave'] +['_p', 'ose'] +['C', 'ertificate'] +['-', 'fe'] +['ifer', 'ay'] +['ĠFl', 'ags'] +['Container', 'Gap'] +['ĠC', 'rit'] +['Result', 'Set'] +['ĉc', 'ur'] +['Ġcorrespond', 's'] +['St', 'aff'] +['.Http', 'ServletRequest'] +['Ġneur', 'ons'] +['ĠMain', 'AxisAlignment'] +['ed', 'ar'] +['Ġg', 'ad'] +['_p', 'arts'] +['ĠÎ', '²'] +['Ġf', 'x'] +['/', 'files'] +['ĠB', 'ros'] +['hip', 's'] +['Ġgluc', 'ose'] +['Ġfar', 'ms'] +['Ġment', 'ally'] +['rest', 'aurant'] +['Table', 'Name'] +['ĠMer', 'cedes'] +['.', 'Visual'] +['Ġan', 'ch'] +['inal', 'g'] +['_r', 'untime'] +['Ġpropri', 'etary'] +['Ġintent', 'ions'] +['iz', 'i'] +['S', 'lice'] +[';', '">', 'true'] +['ĠNY', 'C'] +['Ġb', 'ored'] +['ĠD', 'etect'] +['Ġapp', 'ar'] +['Ġje', 'ans'] +['ĠT', 'ak'] +['I', 'OD'] +['ĠH', 'orse'] +['(', 'FILE'] +['(', '?'] +['ri', 'que'] +['optim', 'izer'] +['n', 'at'] +['lo', 'ys'] +['ĉ', 'Token'] +['oub', 'ted'] +['u', 'ess'] +['oco', 'a'] +['Data', 'Member'] +['_P', 'OWER'] +['class', 'List'] +['Push', 'Button'] +['ĠWi', 'Fi'] +['.', 'Stream'] +['.g', 'uild'] +['Ġn', 'og'] +['ĠPortug', 'al'] +['ĠUnt', 'er'] +['Pr', 'imitive'] +['b', 'oss'] +['ĠDe', 'utsch'] +['Ġerot', 'ic'] +['Ġstr', 'conv'] +['.Try', 'Parse'] +['Ġgr', 'ams'] +['.S', 'uccess'] +['_p', 'k'] +['ĠHar', 'vey'] +['-m', 'inded'] +['.c', 'ountry'] +['[]', '"'] +['Ġang', 'el'] +['Ġbe', 'ats'] +['ĠV', 'or'] +['il', 'io'] +['.m', 'aster'] +['s', 'omething'] +['ĠP', 'ACK'] +['(', 'if'] +['Request', 'Body'] +['Ġant', 'es'] +['/w', 'idget'] +['Ġmod', 'o'] +['ĠA', 'W'] +['find', 'er'] +['Ġoptim', 'ized'] +['Ġmiss', 'iles'] +['N', 'B'] +['ĉint', 'ernal'] +['t', 'ex'] +['ĠS', 'ri'] +['Ġdam', 'aging'] +['ĠM', 'ais'] +['-', 'Allow'] +['ĠZ', 'h'] +['-', 'alt'] +['Ġ', '));ĊĊ'] +['è', 'ī'] +['Ġinflu', 'ences'] +['Ġc', 'atal'] +['_REG', 'ISTER'] +['ĠAPI', 's'] +['-cent', 'ury'] +['Ġbi', 'ology'] +['ĠAct', 'ual'] +['Ġhe', 'els'] +['TR', 'ACE'] +['_D', 'IG'] +['D', 'ataset'] +['ĠM', 'atter'] +['Ġclass', 'ifier'] +['.w', 'ikipedia'] +['ĠRog', 'ers'] +['Ġdon', 'ated'] +['raw', 'ler'] +['en', 'en'] +['Ġcas', 'inos'] +['ort', 'al'] +['Ġpr', 'ive'] +['s', 'pe'] +['duc', 'ers'] +['.', 'ep'] +['Ġgr', 'asp'] +['ac', 'ji'] +['Ġd', 'airy'] +['Ġb', 'uses'] +['.com', 'm'] +['.', 'ins'] +['ĠI', 'RS'] +['ĠBe', 'er'] +['ad', 'c'] +['o', 'ard'] +['_M', 'ET'] +["Ġ'", "+'"] +['r', 'ans'] +['Ġkind', 'a'] +['ĠâĶ', 'Ĥ'] +['ĠM', 'aur'] +['аÐ', '³'] +['Ġband', 'width'] +['ib', 'us'] +['ĠD', 'ifferent'] +['(m', 'at'] +['ĠRes', 'ume'] +['_UN', 'S'] +['est', 'ablish'] +['Ġfon', 'ction'] +['Sub', 'scription'] +['_com', 'pany'] +['Ġlight', 'ly'] +['.con', 'firm'] +['.y', 'aml'] +['ĠBo', 'ost'] +['Com', 'merce'] +['-', 'template'] +['_DEL', 'AY'] +['ĠH', 'I'] +['Ġn', 'avig'] +['(S', 'ender'] +['ĠH', 'S'] +['_', '"+'] +['ĠRE', 'QUEST'] +['Ġw', 'ifi'] +['="', '"Ċ'] +['])', '->'] +['Ġro', 'pe'] +['Ġviol', 'ated'] +['Ġgl', 'ance'] +['ĠK', 'urd'] +['Ġè', '®'] +['de', 'ck'] +['ĠIS', 'BN'] +['Ġin', 'fect'] +['ĠF', 'oo'] +['Ġget', 'ter'] +['Ġt', 'ener'] +['ap', 'pe'] +['.h', 'h'] +['_h', 'ot'] +['<', 'AM'] +['p', 'oly'] +['!', '",Ċ'] +['Ġconver', 'ting'] +['ĠW', 'WE'] +['RO', 'S'] +["('", '{'] +['Com', 'mit'] +[')', 'L'] +['ĠO', 're'] +['Ġsp', 'arse'] +['Ġdis', 'posal'] +['Ġcan', 'celed'] +['åIJ', 'İ'] +['Ġa', 'er'] +['Ġvin', 'yl'] +['á»', 'ĥ'] +['rec', 'ogn'] +['ark', 'ing'] +['Ġtrick', 'y'] +['*', 's'] +['Ġproceed', 's'] +['Ġis', 'o'] +['Ġco', 'conut'] +['Ġcraft', 'ed'] +['IEL', 'DS'] +['Ġquest', 'o'] +['Ġcomm', 'un'] +['_CON', 'NECT'] +['Ġtraff', 'icking'] +['De', 'ep'] +['a', 'ções'] +['c', 'odigo'] +['ve', 'au'] +['Ġbet', 'ray'] +['int', 'a'] +['T', 'ED'] +['æ', 'r'] +['m', 'art'] +['_B', 'US'] +['/', 'sc'] +['ial', 'ly'] +['Ġcigaret', 'tes'] +['è¯', 'ģ'] +['(n', 'n'] +['Ġmodel', 'ing'] +['/', 'products'] +['w', 'arn'] +['Ġmet', 'ro'] +['ĠI', 'v'] +['&', ')'] +['ĠC', 'able'] +['Î', '»'] +['Compar', 'ison'] +['g', 'ary'] +['ĠB', 'A'] +['P', 'ART'] +['Ġp', 'v'] +['_up', 'dated'] +['C', 'redit'] +['orth', 'y'] +['observ', 'able'] +['Ġthe', 'atre'] +['B', 'LE'] +[';', '}ĊĊ'] +['la', 'unch'] +['_str', 'ings'] +['ug', 'o'] +['ĠR', 'PG'] +['-', 'auth'] +['Ð', 'ł'] +['hol', 'm'] +['ĠP', 'and'] +['U', 'id'] +['Ġim', 'ply'] +['ìľ', '¼'] +["']", "='"] +['/', 'User'] +['Ġstr', 'cat'] +['нÑĭ', 'й'] +['Data', 'Adapter'] +['Ġland', 'sc'] +['Ġdipl', 'omatic'] +['ï¼', 'ĵ'] +['************************************************************************', '****'] +['ĠCh', 'icken'] +['Ġbc', 'rypt'] +['.In', 'f'] +['[', 'col'] +['ĠQu', 'antity'] +['-', 'position'] +['Ġdiet', 'ary'] +['Ġfil', 'mm'] +['Is', 'rael'] +['Pre', 'v'] +['ĠMill', 'ion'] +['Ġrem', 'ed'] +['Ġbill', 'ing'] +['Ġout', 'doors'] +['.t', 'm'] +['Ġn', 'ad'] +['F', 'org'] +['Z', 'Z'] +['Ġs', 'sl'] +['],', "'"] +['K', 'T'] +['f', 'req'] +['=', 'document'] +['bl', 'ur'] +['¬', '¸'] +['ĠJeff', 'erson'] +['C', 's'] +['(s', 'ave'] +['Ġstr', 'ap'] +['Ind', 'ia'] +['Ġide', 'ology'] +['BO', 'SE'] +['ĠF', 'P'] +['(', 'ans'] +['Ġfe', 'ver'] +['ĠY', 'am'] +['K', 'ing'] +['à', '²'] +['AT', 'ING'] +['bo', 'hydr'] +['roll', 'back'] +['Ġnew', 'Node'] +['ĠN', 'VIDIA'] +['Ġhon', 'our'] +['ĠCon', 'firm'] +['xb', 'd'] +['Ġsuccess', 'or'] +['/', 'u'] +['l', 'iv'] +['ourn', 'aments'] +['Att', 'achment'] +['Ġgr', 'up'] +['Ġtri', 'be'] +['Ġca', 'res'] +['e', 'ft'] +['_s', 'ame'] +["'", 'label'] +['Ġ', 'ãĢIJ'] +['M', 'otor'] +['Ġin', 'exp'] +['Ġ"', '("'] +['_POS', 'ITION'] +['Ġval', 'ley'] +['ĠResult', 'Set'] +['Ġpres', 'erved'] +['Ġmut', 'ations'] +['Ġquestion', 'ing'] +['mun', 'ition'] +['parse', 'Int'] +['ĠS', 'r'] +['ĠMet', 'adata'] +['âĢĿ', 'ï¼Į'] +['timestamp', 's'] +['Ġtrans', 'itions'] +['í', 'Ļ'] +['Ñ', 'Ĭ'] +['i', 'om'] +['.D', 'o'] +['Ġp', 'ine'] +['Ġf', 'ung'] +['Ġtrans', 'mitted'] +['ct', 'ime'] +['ĠF', 'am'] +['Re', 'vision'] +['B', 'as'] +['UP', 'ER'] +['D', 'estination'] +['toHave', 'BeenCalled'] +['Ġun', 'fortunate'] +['IN', 'ES'] +['_pro', 'f'] +['Am', 'ong'] +['ĠCy', 'ber'] +['ĠB', 'attery'] +['gen', 're'] +['ĠView', 'Model'] +['-', '='] +['Ġutil', 'ized'] +['p', 'aint'] +['.Integer', 'Field'] +['ern', 'ity'] +['comp', 'iler'] +['âĢĭ', 'ĊĊ'] +['ĠM', 'asters'] +['.To', 'Array'] +['Ġstrt', 'ol'] +['ĠUkrain', 'ian'] +['}', '));Ċ'] +['Ġsh', 'emale'] +['"', 'That'] +['for', 'all'] +['/', 'download'] +['Ġrhet', 'oric'] +['.l', 'atitude'] +['ĠWH', 'EN'] +['Ġshock', 'ing'] +['IF', 'IC'] +['.N', 'ormal'] +['_F', 'OLDER'] +['Ġdr', 'ift'] +['Ġmount', 'ing'] +['-', 'book'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'Ċ'] +['ĠWire', 'less'] +['>', '".$'] +['Ġrel', 'ies'] +['(', 'Console'] +['Int', 'ernational'] +['->', '{$'] +['M', 'id'] +['Ġdis', 'sert'] +['dd', 's'] +['Ġdepos', 'its'] +['ĉd', 'river'] +['#', 'ga'] +['pr', 'ising'] +['print', 'ln'] +['Ġpres', 'enter'] +['Ġmin', 'es'] +['C', 'SS'] +['ĠD', 'ual'] +['(!', '('] +['Ġk', 'am'] +['Ġis', 'Loading'] +['ĠProt', 'ect'] +['.', 'upper'] +['ar', 'ium'] +[']:', 'ĊĊĊ'] +['Y', 'ii'] +['-sh', 'irt'] +['ĠIM', 'AGE'] +['_color', 's'] +['Ġur', 'gent'] +['.Cont', 'ainer'] +['!', '(Ċ'] +['S', 'aturday'] +['Ġsoci', 'eties'] +['ĠTh', 'an'] +['ĠC', 'od'] +['=', '@'] +['Ġattach', 'ments'] +['.m', 'obile'] +['Ġsp', 'ite'] +['Ġb', 'ounce'] +['raw', 'l'] +['instanc', 'etype'] +['ĠTr', 'uck'] +['Ġmanip', 'ulation'] +['(', 'Config'] +['-in', 'st'] +['Ġst', 'or'] +['it', 'ution'] +['Preferred', 'Gap'] +['Ġmain', 'AxisAlignment'] +['Ġlist', 'ened'] +["''", "'ĊĊ"] +['ott', 'age'] +['-', 'project'] +['.AP', 'PLICATION'] +['ĉ', 'root'] +['Ġwh', 'it'] +['Ġb', 'ilder'] +['Ġk', 'er'] +['Ġappl', 'iances'] +['row', 'ave'] +['ìĿ', 'Ģ'] +['ematic', 's'] +['ĠO', 'rg'] +['op', 'ing'] +['_SE', 'ARCH'] +['Ġch', 'am'] +['add', 'ContainerGap'] +['Ġ(', ').'] +['ĠAr', 'row'] +['Il', 'legal'] +['Current', 'ly'] +['Ġus', 'a'] +['Ġpassword', 's'] +['Ġre', 'nown'] +['av', 'ern'] +['ĠEv', 'il'] +['Ġconc', 'at'] +['Ġdu', 'o'] +['Ġv', 'ale'] +['ĠBe', 'an'] +['Ġindic', 'ators'] +['cm', 'ath'] +['ĠP', 'ump'] +['Nov', 'ember'] +['ific', 'ant'] +['_DOM', 'AIN'] +['reg', 'ar'] +['ĠPort', 'al'] +['"', '$'] +['Ġformer', 'ly'] +['"]', ':Ċ'] +['ĠVis', 'ibility'] +['.getElementsBy', 'ClassName'] +['_RE', 'D'] +['Ġch', 'ampions'] +['à', '´'] +['Val', 'or'] +['_', 'es'] +['*', 'a'] +['-re', 'peat'] +['B', 'and'] +['.st', 'age'] +['Ġbure', 'auc'] +['C', 'nt'] +['et', 'en'] +['-', 'function'] +['Ġm', 'uito'] +['P', 'ID'] +['_', 'editor'] +['Ġcrash', 'ed'] +['de', 'ad'] +['k', 'at'] +['ag', 'h'] +['ĠEX', 'T'] +['ass', 'er'] +['-sm', 'all'] +['Ġreal', 'iz'] +['(', 'Entity'] +['ú', 's'] +['ĠAct', 'ually'] +['ĠEl', 'ite'] +['Ġhel', 'm'] +['(non', 'atomic'] +['ash', 'er'] +['Comm', 'unity'] +['all', 'eng'] +['ir', 'y'] +['ĠG', 'rowth'] +['Ġs', 'ue'] +['Ġfrequ', 'encies'] +['_des', 'criptor'] +['.At', 'tribute'] +['Ġrecip', 'ients'] +['_N', 'S'] +['/', '"+'] +['ib', 'an'] +['Ġath', 'lete'] +['ĠI', 'gn'] +['_D', 'MA'] +['(d', 's'] +['ĠRequire', 'ments'] +['AD', 'I'] +['ere', 'z'] +['\\', 'Admin'] +['br', 'aska'] +['ĠR', 'ust'] +['Rel', 'ation'] +['C', 'OD'] +['ĠV', 'ERSION'] +['em', 'ma'] +['))', '{'] +['.D', 'uration'] +['ĠC', 'amb'] +['-', 'logo'] +['Ġread', 'able'] +['Ġcre', 'ators'] +['()', '];Ċ'] +['Up', 'Down'] +['-h', 'alf'] +['.get', 'Month'] +['(s', 'f'] +['P', 'ic'] +['Ġhun', 'ger'] +['.t', 'x'] +['Ġexceed', 'ed'] +['_se', 'ed'] +['(', '^'] +['_s', 'k'] +['.per', 'form'] +['Ġ>', '::'] +['Ġm', 'ongo'] +['=', 'float'] +['bind', 'Param'] +['Sm', 'art'] +['if', 'a'] +['Ġse', 'curities'] +['Ġpre', 'jud'] +['Ġ,', '"'] +['Ġcor', 'ps'] +['Ġv', 'ra'] +['amac', 'are'] +['it', 'err'] +['(M', 'edia'] +['uch', 'e'] +['Ġc', 'ob'] +['Ġlib', 'er'] +['.', 'geometry'] +['Loc', 'ator'] +['Ġsl', 'iding'] +['Ġsurg', 'ical'] +['_C', 'UR'] +['Ġcon', 'sect'] +['[', '*'] +['ĠRes', 'ort'] +['St', 'ub'] +['_DO', 'UBLE'] +['ĠS', 'oph'] +['Ġelect', 'oral'] +['_dis', 'able'] +['ĠÑģ', 'о'] +['ĠLight', 'ning'] +['Ġment', 'ions'] +['oc', 'y'] +['Ġle', 'aked'] +['Ġrelax', 'ing'] +['Pres', 'enter'] +['v', 'sp'] +['Ġgu', 'ilt'] +['=-', '=-'] +['.re', 'ply'] +['ĠMir', 'ror'] +['C', 'amp'] +['Ġ+#+', '#+#+'] +['Ġ+#+#+#+', '#+#+'] +['.A', 'uthor'] +['Ġdirect', 'ive'] +['-h', 'ook'] +['íĦ', '°'] +['}ĊĊ', 'ĊĊĊ'] +['@', 'pytest'] +['_r', 'and'] +['m', 'is'] +['Ġcolor', 'ful'] +['u', 'je'] +['lass', 'es'] +['ĠClass', 'es'] +['.h', 'ave'] +['%', '),'] +['é¢', 'ĺ'] +['Ġdistur', 'bing'] +['sub', 'string'] +['ĠK', 'oh'] +['In', 'vest'] +['p', 'urchase'] +['Ġrec', 'ycling'] +['ĠA', 'RT'] +['ier', 'archy'] +['Ġf', 'ps'] +['.check', 'Box'] +['íķ', '´'] +['_m', 'aterial'] +['duc', 'ation'] +['Ġf', 'w'] +['ud', 'it'] +['Ġreview', 'ing'] +['ĠS', 'id'] +['S', 'yntax'] +['ĠW', 'ritten'] +['arg', 'ar'] +['UM', 'E'] +['/', 'q'] +['Class', 'ifier'] +['Off', 'icial'] +['Ġj', 'azz'] +['Ġom', 'ega'] +['Ph', 'ysics'] +['Ġl', 'ugar'] +['_access', 'or'] +['.command', 's'] +['Ab', 'ility'] +['ĠB', 'atch'] +['R', 'AM'] +['Ġencount', 'ers'] +['.', 'Qu'] +['BY', 'TE'] +['ĠD', 'istribution'] +['Ġus', 'o'] +['ĠReco', 'very'] +['appro', 'ved'] +['Ġden', 'ial'] +['/sh', 'are'] +['Linked', 'List'] +[')čĊčĊ', 'čĊ'] +['udd', 'y'] +['Ġf', 'ines'] +['Ġr', 'y'] +['Un', 'icode'] +['ĉ', 'render'] +['Ġprem', 'ises'] +['Ġp', 'on'] +['ali', 'ases'] +['/F', 'oundation'] +['c', 'uda'] +['ĠC', 'ock'] +[',:', ')'] +['(f', 'older'] +['Ġm', 'éd'] +['dr', 'ag'] +['Ġtal', 'ents'] +['ĠĠĠ', 'ĊĊ'] +['е', 'ÑģÑĤв'] +['m', 'ob'] +['.y', 'ml'] +['Ġa', 'ster'] +['Ġdis', 'cre'] +['go', 'al'] +['ĠGT', 'X'] +['ĠS', 'UCCESS'] +['ĠL', 'ONG'] +['(f', 'ind'] +['Ġsing', 'ular'] +['_s', 'z'] +['ĠEth', 'ereum'] +['..', 'Ċ'] +['Ġir', 'res'] +["'))", '{Ċ'] +['Ġmin', 'isters'] +['St', 'eps'] +['ivers', 'al'] +['ĠNever', 'theless'] +['-', 'led'] +['Ġ(', '%)'] +['ç¡', '®'] +['Ġtime', 'zone'] +['Ġstr', 'anger'] +['(re', 'nder'] +['Ġsh', 'util'] +['Ġm', 'ph'] +['Ġtri', 'o'] +['pp', 'y'] +['Ġpred', 'omin'] +['Ġend', 'ors'] +['ĠRuss', 'ians'] +['ĉ', 'row'] +['Ġw', 'izard'] +['.s', 'erialize'] +['Ġcompl', 'ained'] +['Ġs', 'ido'] +['Ġdelight', 'ed'] +['-m', 'e'] +['ĠR', 'av'] +['H', 'uman'] +['ad', 'ays'] +['rec', 'v'] +['Work', 'ing'] +['J', 'ump'] +['ĠÃ¥', 'r'] +['ĠAut', 'omatic'] +['_B', 'ase'] +['æł', '¼'] +['aur', 'ants'] +['Â', '¯'] +['æ', '¸'] +['(C', 'Type'] +['IF', 'I'] +['(', 'amount'] +['Ġbelie', 'ving'] +['=', 'mysql'] +['Ġf', 'ir'] +['Ġrest', 'oration'] +['ere', 'co'] +['Ð', '¢'] +['_', "'+"] +['Ġe', 'book'] +['Ġde', 'bris'] +['(input', 's'] +['AY', 'OUT'] +['Ġscre', 'aming'] +['av', 'ia'] +['land', 'er'] +['Ġdist', 'ress'] +['Ġas', 'sembled'] +['ĠA', 'void'] +['(', 'thread'] +['ĠR', 'PC'] +['_EX', 'IT'] +['(', 'queue'] +['и', 'ÑģÑĤ'] +['D', 'll'] +['Ġsk', 'ull'] +['_p', 'ub'] +['che', 'z'] +['min', 'ate'] +['ens', 'en'] +['Ġins', 'ane'] +['b', 'ounds'] +['ĠR', 'osen'] +['Ġcondition', 'ing'] +['process', 'ed'] +['v', 'ideos'] +['f', 'our'] +['.Con', 'v'] +['|', ';Ċ'] +['Person', 'al'] +['cer', 'pt'] +[':UIControlState', 'Normal'] +['Ġdos', 'es'] +['ĠKar', 'l'] +['ĠFre', 'qu'] +['.B', 'ASE'] +['ĠV', 'ote'] +['Ġcon', 'current'] +['ĠMessageBox', 'Icon'] +['ĠÃ', 'ĸ'] +['ĠDub', 'ai'] +['ĠR', 'etail'] +[':', 'number'] +['ĠOb', 'server'] +['ĠBig', 'Integer'] +['_', 'origin'] +['_W', 'ORK'] +['F', 'rames'] +['Ġnot', 'ably'] +['.', 'âĢľ'] +['Ġtrop', 'ical'] +['Ġn', 'iche'] +['am', 'ina'] +['.s', 'ys'] +['(t', 'okens'] +['mod', 'ify'] +['os', 'it'] +['st', 'rom'] +['ĠCom', 'ics'] +['O', 'PTION'] +['T', 'icket'] +['Ġfact', 'ories'] +['Ġdis', 'put'] +['_F', 'ile'] +['ĠFin', 'n'] +['ee', 'e'] +['ĠDisc', 'ord'] +['_m', 'oney'] +['.t', 'pl'] +['_s', 'afe'] +['L', 'B'] +['Ġgl', 'ut'] +['J', 'K'] +['.fl', 'ow'] +['-', 'cont'] +['g', 'os'] +['Ġhor', 'izon'] +['ĠR', 'ush'] +['::', '*'] +['P', 'ipe'] +['ull', 'a'] +['bor', 'ough'] +['he', 'imer'] +['(m', 'ove'] +['(', 'Text'] +['}', ');čĊčĊ'] +['w', 'elcome'] +['ĠCom', 'ponents'] +['Ġgovern', 'ance'] +['c', 'losed'] +['ĉm', 'argin'] +['Ġla', 'undry'] +['ĠTerm', 'inal'] +['iz', 'ards'] +['.', 'âĢĶ'] +['.rem', 'ote'] +['.r', 'adius'] +['ĠQue', 'bec'] +['Ġd', 'h'] +['T', 'ech'] +['ĠM', 'ist'] +['s', 'eller'] +['_l', 'iteral'] +['Ġgen', 'ius'] +['Ġbr', 'ains'] +['g', 'em'] +['ĠMe', 'asure'] +['Ġcata', 'st'] +['r', 'ance'] +['.Text', 'Field'] +['Ġconsum', 'ing'] +["Ġ'\\", "''"] +['oubted', 'ly'] +['ĠC', 'ertain'] +['E', 'v'] +['ert', 'i'] +['be', 'ing'] +['Ex', 'perience'] +['Ġ//', '['] +['ĠArab', 'ic'] +['ĠC', 'rist'] +['ĠAz', 'ure'] +['Ġhor', 'a'] +['l', 'adesh'] +['\\', 'Blueprint'] +['d', 'ar'] +['.re', 'l'] +['Ġsup', 'rem'] +['ĠRe', 'agan'] +['ĠAt', 'tributes'] +['-s', 'idebar'] +['Ġuse', 'Styles'] +['ĠA', 'irlines'] +['Ġh', 'ills'] +['/x', 'html'] +['v', 'inc'] +['_m', 'ock'] +['Ċ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ'] +['ĠP', 'ill'] +['.Layout', 'Style'] +['ĠCommand', 'er'] +[']', '<'] +['sign', 'ature'] +['Ġ{', '}čĊ'] +['Ġhat', 'red'] +['Ġë', 'ĭ'] +['ole', 'sterol'] +['Ġ', '********'] +['ancell', 'or'] +['c', 'rop'] +['T', 'IM'] +['ĉĉ', 'ĊĊ'] +['ys', 'qli'] +['uit', 'ive'] +['ĉun', 'set'] +['_s', 'el'] +['Ġmen', 'us'] +['t', 'ick'] +['Ġconstit', 'ute'] +['ĠElement', 's'] +['ĠRed', 'is'] +['agg', 'io'] +['_f', 'p'] +['_de', 'pend'] +['em', 'as'] +['CA', 'ST'] +['or', 'ange'] +['j', 'on'] +['ĠEm', 'ily'] +['Ġpot', 'atoes'] +['Ġre', 'ceptor'] +['ĠElect', 'ronic'] +['ĠL', 'ights'] +['Ġcomb', 'ining'] +['ĠSome', 'one'] +['Ġ########', '.'] +['ĠT', 'OD'] +['/', 'show'] +['X', 'd'] +['."', "'"] +['af', 'x'] +['Ġtr', 'agic'] +['St', 'yled'] +['ĠMar', 'co'] +['G', 'allery'] +['d', 'ale'] +['.âĢĿ', 'ĊĊĊĊ'] +['é', 'rie'] +['/s', 'ervice'] +['äº', 'Ĩ'] +['Ġamb', 'ient'] +['_SET', 'TINGS'] +['.Ad', 'apter'] +['l', 'ene'] +['Ġtrav', 'els'] +['Not', 'ice'] +['Ġcle', 'ans'] +['ĠF', 'em'] +['ch', 'air'] +['Ñĥ', 'н'] +['/', 'my'] +['_b', 'ad'] +['ĠEcon', 'omics'] +['IS', 'A'] +['_C', 'NT'] +['(M', 'enu'] +['äº', 'İ'] +['ĠR', 'idge'] +['Ġlength', 'y'] +['D', 'ot'] +['Ġjump', 's'] +['Ġhe', 'y'] +['$', 'pdf'] +['Ġw', 'orm'] +['Ġs', 'ut'] +['Ġsh', 'er'] +['iam', 'o'] +['ĠCal', 'c'] +['trie', 've'] +['Ġc', 'ops'] +['ĠCh', 'rom'] +['Ġreg', 'ulated'] +['reat', 'ment'] +['ĠHigh', 'er'] +['ok', 's'] +['Ġde', 'ze'] +['LOC', 'ATION'] +['ongs', 'To'] +['Ġfin', 'ite'] +['Ġvar', 'ies'] +['Ġposition', 'ed'] +["'", 'il'] +['éĩ', 'ij'] +['Ġh', 'ike'] +['(d', 'one'] +['play', 'list'] +['Ġad', 'a'] +['Ġcoast', 'al'] +['ĠN', 'ancy'] +['.DateTime', 'Field'] +['Cpp', 'CodeGen'] +['ĠSimilar', 'ly'] +['re', 'ur'] +['ĠCon', 'tr'] +['ĠH', 'idden'] +['ĠB', 'eta'] +['atch', 'ed'] +['_inst', 'all'] +['.', 'Output'] +['Look', 'up'] +['ĠRich', 'mond'] +['qu', 'ared'] +['Ġm', 'anga'] +['-control', 's'] +['ĠBern', 'ard'] +['L', 'arge'] +['Ġslic', 'es'] +['Ġoff', 'ence'] +['ĠM', 'ega'] +['Ġest', 'ar'] +['Ġjoint', 's'] +['Ġsum', 'm'] +['_pl', 'atform'] +['B', 'uff'] +['.add', 'Subview'] +['Ġret', 'ained'] +['Let', 'ter'] +['.d', 'im'] +['Ġess', 'ere'] +['ĠS', 'caffold'] +['EX', 'PECT'] +['ĉ', 'RE'] +['.long', 'itude'] +['ü', 'nd'] +['Ġstat', 'ue'] +['.add', 'Widget'] +['ĠCar', 'ibbean'] +['add', 'PreferredGap'] +['il', 'de'] +['UIL', 'abel'] +['ĠOp', 'port'] +['Ġimper', 'ial'] +['urs', 'ion'] +['Ġmand', 'ate'] +['Ġpromot', 'ional'] +['Ġv', 'k'] +['ia', 'ÅĤ'] +['Ġp', 'yl'] +['ĠCre', 'ation'] +['оз', 'д'] +['Ġsim', 'pler'] +['.', 'what'] +['ĠRec', 'ent'] +['St', 'orm'] +['.', 'quantity'] +['ĠL', 'ov'] +['"', '-'] +['ubb', 'les'] +['_not', 'ification'] +['(w', 'orld'] +['ur', 'ger'] +['*', '(-'] +[':', '"Ċ'] +['h', 'm'] +['ans', 'hip'] +['ĠAl', 'most'] +['Ġmotor', 'cycle'] +['_f', 'ee'] +['Ġabsor', 'b'] +['ĠVin', 'cent'] +['Ġsound', 'ed'] +['ÃŃ', 'st'] +['Ġpharm', 'aceutical'] +['ht', 'ag'] +['ĠKind', 'le'] +['ital', 'ize'] +['ĠEm', 'peror'] +['oust', 'ic'] +['Ġspecial', 'ists'] +['åħ', '¬'] +['Border', 'Style'] +['/', '\\'] +['RE', 'LATED'] +["(',", "',"] +['(ex', 'pr'] +['Ġh', 't'] +['åį', 'Ī'] +['_C', 'reate'] +['Ġspecial', 'ly'] +['Ġ[]', ';čĊ'] +['Ġhe', 'el'] +['Ġse', 'pt'] +['_', 'arch'] +['(in', 'itial'] +['%', '.ĊĊ'] +['\\",', '\\"'] +['Ġdiscuss', 'es'] +['Ġu', 'pt'] +['Ġ[', '&'] +['Ġman', 'us'] +['.h', 'and'] +['ĠM', 'AIN'] +['ĠDen', 'mark'] +['Ġ],', 'čĊ'] +['Ġcr', 'yst'] +['Ġn', 'ack'] +['Co', 'ords'] +['_in', 'ner'] +['Ġmid', 'st'] +['Ġaw', 'ake'] +['ĠÐ', 'ŀ'] +['-b', 'reak'] +['ÃŃ', 'vel'] +['_P', 'ASS'] +['ĠParam', 's'] +['Ġdet', 'r'] +['Ġsp', 'ider'] +['ĠCon', 'cept'] +['Ġpre', 'nd'] +['CH', 'ED'] +['.Ex', 'it'] +['Ġpop', 'ulated'] +['Ġvirt', 'ue'] +['_SE', 'SSION'] +['Ġnou', 'vel'] +['o', 'auth'] +['Ġд', 'аннÑĭ'] +['r', 'ink'] +['.Header', 'Text'] +['atur', 'ated'] +['Ġer', 'st'] +['Ġå', 'ħ'] +['à¥', 'ĩ'] +['_vis', 'ible'] +['ey', 'er'] +['Ġli', 'able'] +['Ġde', 'be'] +['Ġb', 'w'] +['{-', '#'] +['_W', 'IN'] +['df', 's'] +['H', 'over'] +['ĠP', 'UT'] +['-', 'angle'] +['Ġnob', 'le'] +['Ġtr', 'aces'] +['enc', 'v'] +['Ġuser', 'Data'] +['_in', 's'] +['ĠS', 'uz'] +['Ġnews', 'letters'] +['ĠMod', 'i'] +['Ġentreprene', 'urs'] +['Ġtrib', 'ute'] +['Ġrum', 'ors'] +['Ġr', 'r'] +['ĠQu', 'arter'] +['ê³', 'ł'] +['Ġfeed', 's'] +['ó', 'g'] +['Ġen', 'velope'] +['Ġle', 'ar'] +['Ġk', 'ø'] +['develop', 'er'] +['Sim', 'ilar'] +[':', '")Ċ'] +['sub', 'scription'] +['Mod', 'ifier'] +['ital', 'ic'] +['Ġn', 'asty'] +['Ġtermin', 'ation'] +['Ġchar', 'ming'] +['Ġâ', 'Ł'] +['ton', 's'] +['.tr', 'ace'] +['h', 'ots'] +['ĠU', 'R'] +['M', 'ont'] +['Ġjust', 'ified'] +['ĠG', 'ang'] +['ine', 'a'] +['Ġb', 'og'] +['(', 'ap'] +['_', '$'] +['Ġcont', 'amin'] +['.D', 'ot'] +['ĉ', 'Debug'] +['(', 'exports'] +['Ġpa', 'ired'] +['ĠAss', 'ignment'] +['Ġautom', 'obile'] +['ĵ', 'į'] +['Ġph', 'ases'] +['v', 'w'] +['@', 'SuppressWarnings'] +['=', '\\'] +['r', 'ant'] +['-', 'ed'] +['ĉ', 'await'] +['Ġcert', 'ificates'] +["'>", '"'] +['Ġint', 'act'] +['CT', 'RL'] +['M', 'ike'] +['greg', 'ation'] +['AT', 'TERN'] +['Ġre', 'public'] +['_up', 'per'] +['ili', 'ary'] +['Ġcomput', 'ation'] +['h', 'ire'] +['ĠSh', 'in'] +['_', 'ANY'] +['ĠManufact', 'urer'] +['ĠC', 'arm'] +['Ġbear', 'ings'] +['_c', 'omb'] +['c', 'ad'] +['ur', 'istic'] +['Ġwholes', 'ale'] +['Ġdon', 'or'] +['.inter', 'faces'] +['press', 'o'] +['ĠBr', 'un'] +['-c', 'lose'] +['pro', 've'] +['_S', 'K'] +['ĉf', 'rame'] +['et', 'ros'] +['ĠP', 'ain'] +['_EX', 'P'] +['ĠL', 'T'] +['_f', 's'] +['.dat', 'as'] +['ĉ', 'ss'] +['vo', 'ir'] +['ĠA', 'xis'] +['M', 'ajor'] +['="', '<'] +['[', 'h'] +['Ġprof', 'ess'] +['igr', 'ate'] +['(s', 'core'] +['Key', 'word'] +['"', 'os'] +['ĠĠĠĠ', 'ĉĊ'] +['an', 'alysis'] +['Ġre', 'play'] +['.p', 'ass'] +['\\', 'd'] +['t', 'ls'] +['Ġsan', 'ct'] +['.l', 'ight'] +['_m', 'obile'] +['ÑģÑĤ', 'ÑĮ'] +['ĉt', 'otal'] +['u', 'ity'] +['Ġpa', 'used'] +['N', 'AS'] +['Ġen', 'core'] +['lo', 'e'] +['Ġ-*', '-ĊĊ'] +['.h', 'igh'] +['am', 'pler'] +['ĠSec', 'ure'] +['Ġfrag', 'ments'] +['_', 'vel'] +['ill', 'ary'] +['ĠSte', 'in'] +['ĠD', 'awn'] +['Ġmax', 'imize'] +['à¸', '¢'] +['Ġ/', '^'] +['Ġcontin', 'ually'] +['Ġsh', 'adows'] +['ĉ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['ĠI', 'ActionResult'] +['Ġinform', 'ación'] +['C', 'HECK'] +['.Selected', 'Item'] +['b', 'undle'] +['ol', 'ley'] +['<', 'Int'] +['AIN', 'ER'] +['ĠW', 'ing'] +['tit', 'les'] +['ount', 'ain'] +['C', 'Y'] +['ĠLoc', 'ale'] +['form', 'er'] +['<', 'context'] +['R', 'adioButton'] +['_s', 'chedule'] +['Ġfab', 'ulous'] +['Rob', 'ert'] +['_PRO', 'FILE'] +['Ġg', 'ates'] +['IM', 'P'] +['ĠPent', 'agon'] +['g', 'old'] +['b', 'ach'] +['employ', 'ees'] +['R', 'otate'] +['Ġch', 'amp'] +['Ġsel', 'bst'] +['Al', 'tern'] +['Ġconvert', 'View'] +['/', ','] +['Ġ~', '('] +['St', 'reet'] +['_', 'place'] +['Ġpersonal', 'ized'] +['P', 'ublisher'] +['ĠSO', 'CK'] +['_NAMES', 'PACE'] +['ĠStand', 'ards'] +['so', 'ever'] +['_C', 'ENTER'] +['Inter', 'est'] +['ô', 't'] +['tem', 'perature'] +['View', 'port'] +['get', 'Resource'] +['Ġeat', 'en'] +['Ġsem', 'pre'] +['Ġab', 'normal'] +['Ġc', 'ylinder'] +['Ġtroub', 'les'] +['n', 'od'] +['Ñĭ', 'в'] +['g', 'ames'] +['_g', 'l'] +['Pl', 'ane'] +['g', 'rey'] +['_t', 'bl'] +['.Component', 'Placement'] +['ĠCh', 'ase'] +['Log', 'ging'] +['man', 'y'] +['ì', 'Ĩ'] +['Ġfl', 'ame'] +['="<'] +['Ġtra', 'jectory'] +['_r', 'ing'] +['Ġhydro', 'gen'] +['tr', 'on'] +['Ġstat', 'ute'] +['Ġcondition', 'al'] +['Ġtr', 'ay'] +['-s', 'chool'] +['(w', 'idget'] +['$', 'config'] +['Ġrequest', 'ing'] +['.', 'uint'] +['et', 'on'] +['brit', 'ies'] +['Of', 'Type'] +['AD', 'MIN'] +['p', 'redict'] +['Ġg', 'egen'] +['ĠH', 'app'] +['OC', 'UMENT'] +['ĠA', 'part'] +['Ġ----', '-'] +['ro', 'e'] +['u', 'ide'] +['just', 'ify'] +['ĠSqu', 'ad'] +['Ġprof', 'es'] +['.b', 'ot'] +['_c', 'urrency'] +['inn', 'en'] +['ĠM', 'umbai'] +['ĠNum', 'bers'] +['avana', 'ugh'] +['agn', 'itude'] +['âĢľ', 'There'] +['=', 'http'] +['çī', 'ĩ'] +['Ġv', 'b'] +["+'", '{{', '$'] +['Ġin', 'ode'] +['s', 'il'] +['Ġh', 'ace'] +['Ġsever', 'ely'] +['ĠOver', 'view'] +['Ġspr', 'aw'] +['Ġbeach', 'es'] +[':', 'left'] +['·', '»'] +['($', '{'] +['ĠF', 'IRST'] +['ĠSp', 'a'] +['-', 'ass'] +['Ġb', 'aise'] +['ĠN', 'ODE'] +['ĠP', 'izza'] +['P', 'et'] +['(se', 'q'] +['\\', '">Ċ'] +['CppMethod', 'Pointer'] +['Ġv', 'p'] +['Ġi', 'a'] +['_se', 'conds'] +['em', 'et'] +['/b', 'lob'] +['_TH', 'RESH'] +['...', 'čĊ'] +['D', 'est'] +['ĠN', 'H'] +['.data', 'Source'] +['it', 'és'] +['ĠJ', 'ak'] +['s', 'ell'] +['Ġwork', 'shops'] +['<', 'u'] +['Ġr', 'ivals'] +['ĠEX', 'ISTS'] +['h', 'om'] +['-t', 'oken'] +['compat', 'ible'] +['.J', 'Panel'] +['Ġphys', 'icians'] +['art', 'in'] +['Ġdes', 'irable'] +['Ġdistinct', 'ive'] +['.D', 'ep'] +['g', 'id'] +['ili', 'ate'] +[',', 'max'] +['Ġprem', 'iere'] +['Ġq', 'Debug'] +['Ġadvoc', 'acy'] +['Ġwh', 'isper'] +['P', 't'] +['Ġun', 'changed'] +['_q', 'ty'] +['请', 'æ±Ĥ'] +['Se', 'ason'] +['avel', 'ength'] +['ĠP', 'ul'] +['Ġd', 'ÃŃa'] +["']", ']],Ċ'] +['al', 'is'] +['("', '&'] +['bor', 'o'] +['Ġb', 'm'] +['ĠR', 'adi'] +['w', 'rong'] +['ĠGo', 'ing'] +['ime', 'Type'] +['ij', 'i'] +['-', 'feedback'] +['ĠN', 'ames'] +['ĠB', 'apt'] +['Ġprob', 'able'] +['ĠE', 'ther'] +['ĠPolit', 'ics'] +['_prot', 'ocol'] +['lin', 'ing'] +['S', 'at'] +['Ġcor', 'rel'] +['.Pr', 'imary'] +['(null', 'able'] +['RI', 'ORITY'] +['Ġcolor', 'ing'] +['Ġutil', 'izing'] +['d', 'as'] +['Ġexport', 'ed'] +['Ġcar', 'riers'] +['Con', 'v'] +['.', 'editor'] +['i', 'ó'] +['(h', 'andles'] +['Ġapprec', 'iation'] +['.', 'import'] +['ĠAust', 'ria'] +['ĠStr', 'ip'] +['il', 'ight'] +['Ġappropri', 'ately'] +['ĠP', 'rest'] +['ĠW', 'ir'] +['ĠUI', 'Application'] +['al', 'chemy'] +['ĠM', 'ob'] +['ĠD', 'etermin'] +['ergus', 'on'] +['register', 'ed'] +['_con', 'vert'] +['ĠVlad', 'imir'] +['.Show', 'Dialog'] +['ref', 'lect'] +['Ġsh', 'ook'] +['Ġass', 'ure'] +['ĠO', 'ften'] +['Ġcivil', 'ization'] +['Ġvocab', 'ulary'] +['fore', 'ground'] +['ĠS', 'cope'] +['Ġunw', 'anted'] +['act', 'ing'] +['Ġ(', '[]'] +['Ġmark', 'ing'] +['.', 'original'] +['ĠMO', 'VE'] +['Ġsport', 'ing'] +['ception', 's'] +['NS', 'Number'] +['S', 'izes'] +['Ġprovinc', 'ial'] +['_Tr', 'ans'] +['Ġproblem', 'atic'] +['d', 'igit'] +['ĠEm', 'ma'] +['lock', 's'] +['ĠC', 'rew'] +['ib', 'a'] +["')", ':'] +['ish', 'a'] +['Ġm', 'amm'] +['Ġocc', 'ured'] +['w', 'cs'] +['(r', 'ule'] +['Ġmerch', 'andise'] +['es', 'pecially'] +['ĠT', 'win'] +['Ġn', 'aming'] +['Ġs', 'log'] +['Ġimpro', 'ves'] +['Ġad', 'her'] +[':', 'text'] +['.h', 'adoop'] +['_HT', 'TP'] +['.to', 'List'] +['.dis', 'abled'] +['Ġl', 'enses'] +['.in', 'i'] +['ĠR', 'are'] +['ĠUb', 'untu'] +['Ġsc', 'ram'] +['ol', 'ation'] +['tit', 'ulo'] +['Every', 'thing'] +['Ġnod', 'ded'] +['icht', 'ig'] +['_const', 'ant'] +['z', 'c'] +['l', 'ift'] +['ĠNot', 'ify'] +['ond', 'o'] +['ĠIN', 'F'] +['("', '+'] +['ĠK', 'az'] +['Ġd', 'read'] +['.m', 'apper'] +['le', 'ur'] +['ĠCome', 'y'] +['ĠN', 'B'] +['ic', 'ers'] +['.P', 'ush'] +['ĠH', 'ack'] +['ĠBrazil', 'ian'] +['_pro', 'd'] +['Ġ//', 'ĊĊ'] +['Ġb', 'icycle'] +['Ġun', 'available'] +['Ġadoles', 'cent'] +['bl', 'k'] +['Ġmit', 'ig'] +['_bl', 'ue'] +['ì', 'ĺ'] +['fade', 'In'] +['ĠUtil', 'ities'] +['ĠM', 'N'] +[';', 'k'] +['<', 'style'] +['-', 'status'] +['ind', 'o'] +['Ġinn', 'ings'] +['Ġg', 'j'] +['Ġ||', '='] +['.e', 'u'] +[':', 'Number'] +['Ġcuis', 'ine'] +['ĠURL', 's'] +['ie', 'k'] +['Ġw', 'ires'] +['ĉ', 'ps'] +['ie', 'g'] +['.m', 'k'] +['so', 'ap'] +['Ġsom', 'etime'] +['Ġst', 'ap'] +['_s', 'eries'] +['.T', 'arget'] +['æ', 'º'] +['.dest', 'ination'] +['OUN', 'TER'] +['R', 'aises'] +['&', 'A'] +['Ġsmart', 'phones'] +['NI', 'Env'] +['.s', 'dk'] +['Ġhelicopt', 'er'] +['Ġim', 'pe'] +['ĠB', 'irth'] +['A', 'U'] +['b', 'readcrumbs'] +['co', 'ords'] +['Ġexplo', 'red'] +['Ġl', 'od'] +['ĠI', 'p'] +['g', 'able'] +['ian', 'e'] +['Ġart', 'ifacts'] +['Box', 'Layout'] +['ا', 'ر'] +['list', 'ener'] +['.c', 'art'] +['ĠH', 'uff'] +['ĠHind', 'u'] +['ĠData', 'Types'] +['ĠDr', 'upal'] +['IGN', 'ORE'] +['Ġoffset', 's'] +['ĠR', 'TC'] +['-', 'login'] +['æ', '®'] +['ĠQ', 'Object'] +['Ġprosec', 'utor'] +['R', 'ock'] +['_ch', 'at'] +['W', 'ay'] +['ì', '²'] +['Ġneg', 'lig'] +['Ġd', 'ude'] +[';', '<'] +['Ġdeleg', 'ates'] +['_f', 'ailed'] +['/', 'dev'] +['/', 'work'] +['(', 'New'] +['et', 'able'] +['()', '"'] +['(', 'Icons'] +['Ġp', 'ork'] +['ĠModel', 'AndView'] +['ĠV', 'IP'] +['ĠK', 'or'] +['m', 'ix'] +['Ġox', 'id'] +['ĠSC', 'REEN'] +['ĠFour', 'th'] +['/', '",Ċ'] +['Ġte', 'e'] +['ĠSte', 'vens'] +['t', 'icks'] +['Ġp', 'ledge'] +['ib', 'bon'] +['ĠLo', 'an'] +['Ġne', 'o'] +['n', 'umpy'] +['ĠShared', 'Preferences'] +['-', 'oriented'] +['ĠLogger', 'Factory'] +['ĠGraph', 'QL'] +['zen', 'ia'] +['"', '_'] +['W', 'omen'] +['.c', 'ast'] +['Ġdeliber', 'ately'] +['+', 'b'] +['ĠAr', 'n'] +['font', 'Size'] +['Ġm', 'aze'] +['Ġbl', 'amed'] +['.m', 'as'] +['}', ')čĊ'] +['eler', 'ik'] +['Ġsc', 'anning'] +['ĠWork', 'shop'] +['Ġfind', 'en'] +['Ġca', 'ut'] +['UI', 'Font'] +['(', 'return'] +['al', 'in'] +['cast', 'le'] +['////////////////////////////////////////////////////////////////', '////////'] +['Ġincent', 'ive'] +['op', 'ath'] +['b', 'lob'] +['Ġcigaret', 'te'] +['Ġfert', 'il'] +['*/', 'ĊĊĊ'] +['ĠSh', 'ar'] +['Ċ', 'ĠĠĠĠĠĠĊ'] +['Ġunc', 'ertain'] +['ĠS', 'ton'] +['Oper', 'ations'] +['ĠSp', 'encer'] +['Ġdef', 'in'] +['ĠS', 'olo'] +['on', 'est'] +['·»', 'åĬł'] +['Ġu', 'omo'] +['G', 'ive'] +['Ġdent', 'ro'] +[';', 'padding'] +['ent', 'ai'] +['ĠC', 'ars'] +['Ġenthus', 'iasm'] +['ĠOper', 'ating'] +['S', 'kip'] +['par', 'ation'] +['Ġprotect', 's'] +['Ġre', 'ver'] +['d', 'g'] +['ĠC', 'incinnati'] +['Ġconsect', 'etur'] +['Ġm', 'uss'] +['employ', 'ed'] +['a', 'uses'] +['ink', 'le'] +['.', 'Values'] +['£', '¼'] +['lo', 'v'] +['_W', 'ARN'] +['Ġbook', 'mark'] +['ĠAp', 'ollo'] +['.', 'axis'] +['Ġm', 'ét'] +['Ġop', 'ener'] +['Ġtum', 'or'] +['d', 'an'] +['Ġelement', 'ary'] +['Ġsk', 'ipped'] +['ĠK', 'er'] +['as', 'ia'] +['_res', 'p'] +['Ġdem', 'ol'] +['ĠCan', 'adians'] +['Ġt', 'astes'] +['U', 'Integer'] +["Ġ'", '${'] +['.aw', 's'] +['RO', 'ID'] +['ri', 'ans'] +['M', 'Q'] +['ord', 'able'] +['Ġcous', 'in'] +['Prop', 'agation'] +['(S', 'ession'] +['ph', 'alt'] +['UL', 'D'] +['ĠSc', 'alar'] +['Ġblo', 'ody'] +['Ġ', 'à¦'] +['.m', 'ask'] +[',', 'q'] +['ĠUn', 'its'] +['Ġcent', 'res'] +['ĠPr', 'im'] +['.', ']ĊĊ'] +['ĠSh', 'aw'] +['P', 'rom'] +['ĠTh', 'ought'] +['Check', 'er'] +['_output', 's'] +['(', 'chan'] +['E', 'INVAL'] +['Ġb', 'ob'] +['_c', 'mp'] +['P', 'ed'] +['Ġmat', 'rices'] +['Ġvrou', 'wen'] +['Ġgenu', 'inely'] +['high', 'light'] +['(d', 'isplay'] +[')', '!='] +['Ġdel', 'icate'] +['ĠL', 'uther'] +['ĠM', 'iles'] +['Ġuser', 'ID'] +['%', '='] +['ate', 'urs'] +['_B', 'UF'] +['----', '---Ċ'] +['imit', 'ives'] +['Ġsh', 'elves'] +['sl', 'ow'] +['_in', 'formation'] +['LE', 'G'] +['W', 'r'] +['.form', 's'] +['cel', 'and'] +['/', 'un'] +[':', '&'] +['.âĢĻ', 'ĊĊ'] +['="', '%'] +['Ġpro', 'st'] +['Ġfont', 'size'] +['uc', 'ión'] +['get', 'ic'] +['am', 't'] +['="', '.'] +['Dec', 'or'] +['B', 'rit'] +['Ġ""', ').'] +['Ġfound', 'ing'] +['.File', 'Name'] +['ĠT', 'ier'] +['Ġdisc', 'lose'] +['á', 'm'] +['.s', 'yn'] +['.View', 'Holder'] +['lic', 'ant'] +['_st', 'age'] +['Mon', 'day'] +['Ġdes', 'erialize'] +['t', 'alk'] +['Ġtradition', 'ally'] +['æĢ', 'ģ'] +['Ø', '®'] +['LE', 'X'] +['Ġe', 'h'] +['ĉ', 'ROM'] +['Ġ{', '})Ċ'] +['Quest', 'ions'] +['nc', 'py'] +['Ġfix', 'ing'] +['к', 'Ñĥ'] +['_', 'Key'] +[':', 'x'] +['ĠSTR', 'ING'] +['ĠÑĦ', 'ай'] +['ĉ', 'left'] +['ĠBen', 'ch'] +['ell', 'ij'] +['UR', 'RED'] +['ĠDi', 'agram'] +['}', 'catch'] +['/', 'time'] +['ĠMiss', 'ing'] +['db', 'name'] +['Ġs', 'ore'] +['ĠW', 'alt'] +['ugg', 'ing'] +['rep', 'resent'] +['ĠG', 'S'] +['ne', 'ys'] +['ĉ', 'page'] +['Ġvol', 'can'] +['(b', 'tn'] +['Ġexceed', 's'] +['Ġ', 'erg'] +['Ġpil', 'ots'] +['ĠS', 'ed'] +['ers', 'ions'] +['Ġpat', 'ron'] +['R', 'V'] +['/', 'top'] +['.', 'asset'] +['_c', 'ross'] +['.', 'Editor'] +['.t', 'b'] +['Ġwel', 'coming'] +['SC', 'REEN'] +[')', 'findViewById'] +['C', 'oder'] +['', '",Ċ'] +['_P', 'in'] +['ues', 'e'] +['Ġover', 'rides'] +['_', 'ready'] +['Adv', 'anced'] +['Ġop', 'i'] +['-c', 'art'] +['("/', '",'] +['ĠDe', 'b'] +['CR', 'Y'] +['ĠVert', 'ical'] +['ĠO', 'VER'] +['ĠCorpor', 'ate'] +['Ġ""', ';'] +['Ġste', 'pping'] +['e', 'j'] +['Ġaccus', 'ations'] +['Ġor', 'az'] +['_t', 'ail'] +['Ġindu', 'ced'] +['Ġel', 'astic'] +['Ġbl', 'own'] +[',', '//'] +['Ġbackground', 's'] +['âĢĻ', 'une'] +['-s', 'dk'] +['Ġset', 'Interval'] +['Ġincent', 'ives'] +['Ġveget', 'able'] +['_', 'On'] +['exp', 'anded'] +['p', 'ix'] +['_sh', 'ader'] +['ĠSP', 'DX'] +['@', 'example'] +['ĠW', 'rapper'] +['.Z', 'ero'] +['Pos', 'itive'] +['Ġsp', 'inner'] +['Ġinvent', 'ed'] +['ĠG', 'ates'] +['оÑĤ', 'оÑĢ'] +['Ġcompar', 'isons'] +['è', '·'] +['.pr', 'imary'] +['data', 'Provider'] +['add', 'itional'] +['ĉ', 'options'] +['s', 'napshot'] +['.set', 'Horizontal'] +['Ġ"', '{}'] +['ĠFish', 'er'] +['hal', 'ten'] +['<', 'Type'] +['Ġmax', 'Length'] +['ĠM', 't'] +['Ġê°', 'Ģ'] +['.jet', 'brains'] +['Ġident', 'ifies'] +['Ġflow', 'ing'] +['ĠDisc', 'ussion'] +['ats', 'by'] +['Ġsch', 'w'] +['ught', 'y'] +['Ġr', 'ivers'] +['.un', 'ique'] +['_PH', 'Y'] +['ed', 'ral'] +['(', 'll'] +['Ġcs', 'rf'] +['pp', 'ers'] +['ü', 'l'] +['ĠEs', 'pecially'] +['port', 'ed'] +['ĠHarr', 'ison'] +['******', '*/Ċ'] +['Text', 'Color'] +['ìĬ', 'µ'] +['w', 'ire'] +['Ġstatus', 'Code'] +['ĠFin', 'ish'] +['c', 'ence'] +['ĠMcC', 'ain'] +['ĠW', 'or'] +['(', 'await'] +['Ġ)', '->'] +['ĠRegister', 'ed'] +['IN', 'ED'] +['k', 'al'] +['par', 'ison'] +['Ġobj', 'eto'] +['V', 'i'] +['mand', 'a'] +['Ġrenew', 'ed'] +['ĠS', 'of'] +['ess', 'el'] +['.nd', 'array'] +['Ġcr', 'ap'] +['ç®', '¡'] +['.ab', 'spath'] +['(', 'up'] +['Ġclear', 'ance'] +['ĠT', 'W'] +['_C', 'OPY'] +['ĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĉ'] +['Ġforest', 's'] +['Ġarg', 'uably'] +['ĠA', 'SS'] +['he', 'y'] +['am', 'el'] +['_f', 'ore'] +['ĠSou', 'theast'] +['Ġab', 'used'] +['Ġpract', 'icing'] +['aked', 'irs'] +['ä¸', '»'] +['_res', 'ources'] +['Ġp', 'ond'] +['.F', 'ixed'] +['Last', 'Error'] +['ĠPsych', 'ology'] +['Ġ"', '//'] +['!', ':'] +['Re', 'usable'] +['Ġmens', 'aje'] +['Ġro', 'spy'] +['Ġb', 'our'] +['Ġvar', 'ieties'] +['Ġem', 'path'] +['((', '{'] +['_', 'org'] +['ĠM', 'es'] +['ĠMag', 'ento'] +['IST', 'ORY'] +['Un', 'less'] +['Ġh', 'j'] +['ĠD', 'uty'] +['J', 'un'] +[',', 'size'] +['Ġpaint', 'ings'] +['Ġdisp', 'ens'] +['d', 'art'] +['Ġbehavior', 'al'] +['Ġr', 'pc'] +['cal', 'culate'] +['fr', 'uit'] +['_m', 'm'] +['ĉp', 'thread'] +['Max', 'Length'] +['Ġc', 'urrencies'] +['_cap', 'acity'] +['ĠO', 'z'] +['Ġfire', 'arm'] +['Ġcoeff', 'icient'] +['Ġbankrupt', 'cy'] +['w', 'art'] +['Ġfat', 'igue'] +['AV', 'A'] +['Ġes', 'pa'] +['_p', 'c'] +['ĠQu', 'otes'] +['_L', 'IGHT'] +['ĠT', 'ickets'] +['Ġrel', 'ates'] +['Ġpublish', 'ers'] +['Ġunlock', 'ed'] +['Ġ//', '----------------------------------------------------------------'] +['ĠInterrupt', 'edException'] +['Ġout', 'look'] +['r', 'n'] +['Ġreb', 'els'] +['W', 'ritten'] +['Ġas', 'ian'] +['ot', 'to'] +['Ġ', 'ĉĉĉĉ'] +['_g', 'pu'] +['T', 'xt'] +['.Image', 'View'] +['Ġsu', 'is'] +['_t', 'ables'] +['.Rec', 'yclerView'] +['Ġwhat', 'soever'] +['è', 'ģ'] +[']', '++;Ċ'] +['assert', 'True'] +['_', 'verify'] +['ĠR', 'ivers'] +['Ġ', ']['] +['J', 'et'] +['id', 'ian'] +['S', 'ibling'] +['Ġgen', 'res'] +['.A', 'ccess'] +['OP', 'S'] +['Ġtr', 'ivial'] +['à¸', 'ª'] +['al', 'en'] +['в', 'ед'] +['ĠS', 'word'] +['Ġscrut', 'iny'] +['(c', 'b'] +['Ġcomm', 'erce'] +['Ġguarante', 'es'] +['_ad', 'v'] +['ĠL', 'ET'] +['rec', 'io'] +['Ġh', 'ilar'] +['Ġback', 'yard'] +['ãĢ', 'ı'] +['Ġillustr', 'ated'] +['/v', 'endor'] +['.', 'Util'] +['Ġw', 'ow'] +['LO', 'Y'] +['ĠMar', 'shal'] +['">', "'.$"] +['ĠB', 'ak'] +['Ġmod', 'ifiers'] +['d', 'ictionary'] +['ĠSt', 're'] +['m', 'ultiple'] +['"))', ','] +['ĠC', 'ort'] +["']", '").'] +['(', 'admin'] +['ĠCre', 'ator'] +['Int', 'ernet'] +['(', 'ms'] +['log', 'y'] +['DECL', 'ARE'] +['ĠMarc', 'us'] +['<<', '<<'] +['ãģ', 'ł'] +['_m', 'y'] +['(in', 'st'] +['Ġsc', 'iences'] +['ND', 'ER'] +['.', 'enter'] +['Ġit', 'u'] +['Ġbeh', 'ave'] +['P', 'an'] +['omb', 'ies'] +["='", '<'] +["'))", ';čĊ'] +['ĠM', 'ENU'] +['ĠWork', 'ers'] +['.No', 'Error'] +['Ġbind', 'ings'] +['Ġdis', 'abilities'] +['{', '\\'] +['ĠM', 'unicip'] +['Ġco', 'res'] +['ur', 'ple'] +['ĠN', 'okia'] +['us', 'ions'] +['ĠF', 'itness'] +['.handle', 'Change'] +['Ġjav', 'ascript'] +['ìļ', 'Ķ'] +['(', 'dec'] +['Ġpack', 'ing'] +['-de', 'pend'] +['Ġtrans', 'cript'] +['z', 'eros'] +['_', 'alert'] +['?', '",Ċ'] +['lib', 's'] +['±', 'оÑĤ'] +['Ġ|', 'ĊĊ'] +['tr', 'ained'] +['ĠG', 'ent'] +['ĠR', 'ab'] +['x', 'p'] +['_config', 'uration'] +['å¤', '©'] +['_', 'accept'] +['.rec', 'yclerview'] +[':', 'url'] +['ĠMu', 'hammad'] +['Ġprivile', 'ges'] +['_b', 'ank'] +['uk', 'u'] +['w', 'allet'] +['ĠRO', 'OT'] +['Ġenc', 'uent'] +['?', 'family'] +['ĉ', 'position'] +['Ġc', 'g'] +['Ġprec', 'ip'] +['method', 's'] +['_f', 'ast'] +['in', 'crement'] +['ĠT', 'iger'] +['_OCC', 'URRED'] +['qu', 'ip'] +['ĠH', 'AS'] +['_d', 'om'] +['Ġw', 'reck'] +['b', 'j'] +['Ġd', 'ern'] +['Ġorg', 'ans'] +['.', 'entries'] +['Ġ_', "('"] +['ram', 'ento'] +['ĠJam', 'ie'] +['Ġp', 'unk'] +['IP', 'P'] +['Ġprogram', 'a'] +['Ġatt', 'ain'] +['Ġpro', 'ves'] +['/s', 'ign'] +['Ġanswer', 'ing'] +['Ġl', 'adder'] +['************************', '****'] +['ĠW', 'almart'] +['ĠCONT', 'ENT'] +['duct', 'or'] +['Ġver', 'bal'] +['ĠP', 'ID'] +['c', 'rypto'] +['_CALL', 'BACK'] +['Ġ=', '================================'] +['Ġpot', 'ent'] +['Ġshort', 's'] +['.U', 'ri'] +['.un', 'iform'] +[';', 'border'] +['ĠW', 'er'] +['Ġhere', 'in'] +['ll', 'a'] +['ĠI', 'hr'] +['P', 'ixmap'] +['l', 'iteral'] +['!', ')ĊĊ'] +['g', 'eneric'] +['r', 'ust'] +['_script', 's'] +['ost', 'o'] +['it', 'us'] +['ĠCoal', 'ition'] +['Ġrem', 'ot'] +['de', 'ploy'] +['ĠEag', 'le'] +['ãĢģ', 'ãĢĮ'] +['Ġimportant', 'e'] +['ĉ', 'object'] +['Ġseason', 'al'] +['ne', 'j'] +['aid', 'u'] +['Bind', 'View'] +['ĠSi', 'erra'] +['-b', 'g'] +['Ġmake', 'Styles'] +['[', 'offset'] +['G', 'ames'] +['Ġhorm', 'one'] +['AR', 'IO'] +['head', 's'] +['(', 'select'] +['ĠStart', 'ed'] +['@', 'param'] +['_de', 'cl'] +['_b', 'log'] +['Ġa', 'ño'] +['\\', 'Api'] +['ĠMil', 'waukee'] +['Pro', 'vid'] +['An', 'imated'] +['Ġcool', 'er'] +['ĠSe', 'ed'] +['.', 'Edit'] +['Ï', 'Ħ'] +['ĠT', 'aking'] +['Ġborder', 'Color'] +['-found', 'er'] +['.Logger', 'Factory'] +['Ġ""', 'ĊĊ'] +['AL', 'T'] +['ĠL', 'ate'] +['EDI', 'ATE'] +['Ġ);ĊĊ', 'Ċ'] +['af', 'a'] +['Ġcancell', 'ation'] +['At', 'om'] +['ĠB', 'irmingham'] +['emp', 'resa'] +['HE', 'MA'] +['asc', 'al'] +['Ġup', 'side'] +['.V', 'ersion'] +['ĠF', 'older'] +['ĠE', 'ight'] +['ĠV', 'intage'] +['ĠApp', 'Delegate'] +['ĠPre', 'vention'] +['.se', 'parator'] +['ST', 'M'] +['(', 'room'] +['gener', 'ator'] +['Ġc', 'attle'] +['ĉ', 'Z'] +['ĠPart', 'icle'] +["'", '};Ċ'] +['Ġneighb', 'ours'] +['ĠState', 'less'] +['Ġalt', 'itude'] +['Ġsa', 'int'] +['об', 'ав'] +['Ġconv', 'inc'] +['ĠCont', 'ents'] +['Ġje', 'une'] +['(t', 's'] +['Serial', 'ization'] +['(c', 'ollection'] +['ĠJ', 'azz'] +['ĠD', 'od'] +['ĠR', 'och'] +['ac', 'io'] +['comm', 'ended'] +['DEF', 'INE'] +['.on', 'load'] +['Ġspecial', 'ty'] +['PL', 'ACE'] +['_MO', 'VE'] +['Ġaccount', 'able'] +['Re', 'uters'] +['Ġf', 'icken'] +['Ġde', 'pr'] +['W', 'ow'] +['V', 'oid'] +['.s', 'pace'] +['à¸', 'Ĺ'] +['Ġt', 'q'] +['ĠP', 'ets'] +['<', '$'] +['(C', 'urrent'] +['ber', 'ries'] +['plan', 'ation'] +['Ġlist', 'Of'] +['ĠTh', 'u'] +['ĠPR', 'INT'] +['Ġm', 'ismo'] +['Ġdo', 'i'] +['ch', 'k'] +['ĠUn', 'icode'] +['(', 'role'] +['Ġvir', 'gin'] +['<', 'Point'] +['_RESP', 'ONSE'] +['-h', 'ouse'] +['ĠVenez', 'uela'] +['EM', 'AIL'] +['Ġp', 'úb'] +['_ex', 'ist'] +['B', 'all'] +['.C', 'L'] +['re', 'ferences'] +['ĠBeautiful', 'Soup'] +['ĉ', 'Expect'] +['TH', 'IS'] +['Ñĥ', 'д'] +['b', 'ane'] +['Ġtemp', 'oral'] +['ER', 'IC'] +['et', 'as'] +['Ġrefresh', 'ing'] +['Ġsec', 'ular'] +['@', 'synthesize'] +['ac', 'cur'] +['Ġn', 'ella'] +['ĠS', 'OL'] +['.p', 'ipe'] +['Ch', 'annels'] +['èĩ', 'ª'] +['Ġinsert', 'ion'] +['á»', 'ĭ'] +['el', 'ia'] +['Ġadjust', 'able'] +['Can', 'ada'] +['ĠI', 'TEM'] +['Ġcur', 'ves'] +['ĠChe', 'ap'] +['let', 'ing'] +['Ġoptim', 'istic'] +['al', 'lo'] +['Ġpolit', 'ician'] +['_down', 'load'] +['=', 'edge'] +['ORT', 'H'] +['Ġmodel', 'o'] +['art', 'o'] +['.', 'rotate'] +['Ġs', 'elenium'] +['æĪ', 'ij'] +['_al', 'ias'] +['Ġrenown', 'ed'] +[".'", '.'] +['Ġc', 'zy'] +['Ġal', 'les'] +['.Com', 'piler'] +['ĠB', 'ass'] +['Conn', 'ector'] +['.R', 'ole'] +['L', 'INK'] +['Ġc', 'riterion'] +['lem', 'etry'] +['Success', 'fully'] +['/p', 'ng'] +['Ġey', 'eb'] +['asp', 'berry'] +['(', 'gr'] +['Ġd', 'angers'] +['Ġcorrect', 'ed'] +['Ġgl', 'ow'] +['Ġelabor', 'ate'] +['ĠB', 'ears'] +['aw', 'ai'] +['="', "'+"] +['Ġpromot', 'ions'] +['Ġmathematic', 'al'] +['Ġ"', '`'] +['_Generic', 'Class'] +['ĠChe', 'f'] +['.S', 'ort'] +['table', 'Name'] +['R', 'IC'] +['Ġvolunt', 'ary'] +['ĠBl', 'ade'] +['-e', 'lect'] +['ĠCom', 'bat'] +['ĠAb', 'ility'] +['Ġab', 'dom'] +['Ġd', 'uck'] +['T', 'mp'] +['åħ', '¨'] +['Ġer', 'ase'] +['.P', 'h'] +['ĠDefault', 's'] +['p', 'artment'] +['_US', 'B'] +['ê', 'te'] +[';', "'"] +['Ġp', 'ads'] +['ĠOb', 'amacare'] +['.T', 'otal'] +['Ġdiv', 'ert'] +['Ġcr', 'icket'] +['Ġrecre', 'ational'] +['(', 'red'] +['ĠC', 'le'] +['R', 'U'] +['Ġmist', 'aken'] +['ĠMont', 'ana'] +['Ġstr', 'ive'] +['_sl', 'ider'] +['ĠPl', 'astic'] +['Ġdecor', 'ated'] +['ĠV', 'P'] +['lic', 'o'] +['ĉf', 'alse'] +['Ġpre', 'fs'] +['(', '\\"'] +['_f', 'alse'] +['i', 'endo'] +['Ġ@', '$'] +['B', 'ucket'] +['act', 'ical'] +['ĠZ', 'hang'] +['.c', 'ols'] +['.B', 'inding'] +['Ġw', 'ax'] +['_ST', 'ORAGE'] +['Ġlaw', 'n'] +['Ġr', 'f'] +['.Sc', 'ene'] +['ĠCal', 'culator'] +['.d', 'esign'] +['Ġres', 'il'] +['л', 'ем'] +['E', 'mploy'] +['ĠPr', 'ices'] +['ĠP', 'WM'] +['ag', 'i'] +['.e', 'valuate'] +['ĉ', 'param'] +['Ġbr', 'ass'] +['bb', 'en'] +['Ġinflamm', 'ation'] +['ull', 'ivan'] +['Ġan', 'not'] +['Ġp', 'H'] +['iam', 'eter'] +['ĠB', 'TC'] +['(', 'box'] +['Story', 'board'] +['Ġcl', 'ay'] +['.assert', 'Raises'] +['|', 'string'] +['.App', 'ly'] +['Ġmatch', 'er'] +['und', 'ed'] +['Ġsatisf', 'ying'] +['Ġìł', 'ķ'] +['Render', 'ing'] +['_app', 'ro'] +['ind', 'rome'] +['AN', 'EL'] +['_f', 'ix'] +['br', 'ush'] +['.M', 'atch'] +['Ġsm', 'iling'] +['on', 'aut'] +['S', 'unday'] +['Ġdelet', 'ion'] +['Ġencour', 'ages'] +['P', 'ull'] +['Ġreven', 'ge'] +['Ġqu', 'arry'] +['tr', 'ade'] +['Ġc', 'ables'] +['(d', 'elta'] +['ites', 'pace'] +['Ġf', 'h'] +['.b', 'unifu'] +['Ġvi', 'el'] +['_IN', 'CLUDED'] +['ĠT', 'ail'] +['ad', 'ar'] +['of', 's'] +['Ġmet', 'als'] +['g', 'om'] +['_method', 's'] +['Ġn', 'j'] +['.St', 'd'] +['(w', 'in'] +['$', "('"] +['Ġt', 'urtle'] +['ur', 'on'] +['Ġen', 'rolled'] +['ĠH', 'z'] +['ĠBox', 'Decoration'] +['Ġp', 'ont'] +['rel', 'ationship'] +['B', 'i'] +['³', '»'] +['Ġmas', 'cul'] +['Ġsh', 'ades'] +['Ġv', 'r'] +['ĠLog', 'ic'] +['Ġa', 'in'] +['ĠD', 'IST'] +['Ġcoll', 'ar'] +['"', 'profile'] +['Generated', 'Value'] +['ĠP', 'ossible'] +['Ġe', 'ines'] +['ĥ', 'ģ'] +['.time', 'out'] +['ĠE', 'c'] +['Ġjer', 'sey'] +['.D', 'ouble'] +['Ġqual', 'ifying'] +['v', 'or'] +['CRE', 'EN'] +['_A', 'pp'] +['_rec', 'v'] +['Ġali', 'ens'] +['It', 's'] +['E', 'sc'] +['i', 'ator'] +['ĠE', 'clipse'] +['Ġg', 'h'] +['V', 'ict'] +['ĉ', 'html'] +['to', 'o'] +['.', 'const'] +['Ġant', 'erior'] +['ĠW', 'u'] +['(key', 's'] +['Ġul', 'tr'] +['_p', 'oly'] +['ĠT', 'ap'] +['ĠB', 'ud'] +['A', 'WS'] +['Ġcrash', 'es'] +['_t', 'ot'] +['Cont', 'in'] +['-h', 'anded'] +['alth', 'ough'] +['à¸', 'ļ'] +['ific', 'ent'] +['Ġde', 've'] +['ut', 'ory'] +['ĠW', 'orth'] +['_M', 'S'] +['Ġfloor', 'ing'] +['Ġsell', 'ers'] +['ĠThank', 'sgiving'] +['Ġp', 'ng'] +['Ġval', 'ores'] +['Ġslee', 've'] +['Ġfil', 'le'] +['Ð', 'IJ'] +['Ġappoint', 'ments'] +['Ġv', 'im'] +['User', 'Info'] +['BO', 'OST'] +['Ġpos', 'ed'] +['initial', 'ized'] +['.product', 's'] +['ĠLeaders', 'hip'] +['man', 'uel'] +["'", '%'] +['em', 'arks'] +['Per', 'centage'] +['(d', 'ist'] +['.', 'avatar'] +['(h', 'Object'] +['ä»', 'Ĭ'] +['_', 'iff'] +['ic', 'one'] +[';', ')'] +['_n', 'il'] +['Ġab', 'ol'] +['е', 'ÑģÑĤ'] +['Ġven', 'ues'] +['.Con', 'vert'] +['!', "')Ċ"] +['.B', 'itmap'] +['sk', 'in'] +['_C', 'OLUMN'] +['Re', 'v'] +['G', 'RESS'] +['g', 'ow'] +['Ġw', 'ished'] +['tract', 's'] +['.assert', 'False'] +['Ġscreens', 'hot'] +['Ġfo', 'is'] +['Com', 'b'] +['Line', 'Width'] +['ĠGr', 'ab'] +['Ġint', 'ensive'] +['ĉ', 'sh'] +['+', ')'] +['.first', 'Name'] +['_PRO', 'CESS'] +['Ġt', 'ilt'] +['it', 'ored'] +['.L', 'OG'] +['Ġb', 'ak'] +['Ġintention', 'ally'] +['.play', 'ers'] +['(c', 'anvas'] +['))', ')čĊ'] +['.Pro', 'vider'] +['_P', 'UBLIC'] +['T', 'alk'] +['ĠL', 'iv'] +['ched', 'ulers'] +['Ġl', 'c'] +['ad', 'ic'] +['feature', 'd'] +['.res', 'ources'] +['Full', 'Name'] +['Ġmean', 'while'] +['B', 'uffers'] +['Ġres', 'olver'] +['ĠS', 'AP'] +['_T', 'E'] +['G', 'NU'] +['ĠForms', 'Module'] +['_', 'wh'] +['ĠS', 'we'] +['.widget', 's'] +['Ġcabin', 'ets'] +['Ġsus', 'cept'] +['ĠB', 'ott'] +['activ', 'ex'] +['av', 'ar'] +['ant', 'ics'] +['Ġ"', '="'] +['_k', 'wargs'] +['Ġgame', 'Object'] +['ĠAng', 'le'] +['.I', 'ter'] +['mar', 'sh'] +['ĠB', 'irthday'] +['ĠC', 'MS'] +['request', 's'] +['ĠPear', 'l'] +['_E', 'OL'] +['Ġlin', 'ux'] +['(', 'org'] +['_M', 'ouse'] +['.con', 'structor'] +['Ġz', 'd'] +['Ġk', 'icks'] +['art', 'isan'] +['Ġe', 'ax'] +['K', 'n'] +['pon', 'ge'] +['ĠFin', 'land'] +['Ġmet', 'res'] +['ĠAss', 'essment'] +['part', 'ner'] +['/', 'pre'] +['!', "',Ċ"] +['[', 'Int'] +['Ġos', 'lo'] +['date', 'picker'] +['/', 'String'] +['op', 'lay'] +['ĠHe', 'brew'] +[',', 'double'] +['Ġtrab', 'al'] +['+"', '\\'] +['ĉ', 'EIF'] +['/', 'text'] +['_F', 'IRST'] +['ĠP', 'ete'] +['Ġe', 'go'] +['Ġextr', 'as'] +['P', 'DO'] +['Ġreg', 'ulate'] +['ĠQ', 'Widget'] +['st', 's'] +['ĠSh', 'ows'] +['ĠN', 'HS'] +['.c', 'ourse'] +['p', 'thread'] +['ĠF', 'uel'] +['.t', 'imes'] +['ĠÂ', '°'] +['Ġstr', 'ides'] +['($', "('#"] +['(', 'words'] +['Ġrhyth', 'm'] +['Ġsp', 'ont'] +['Ġsens', 'ation'] +['Ġsp', 'ike'] +['C', 'losing'] +['页', 'éĿ¢'] +['N', 'umeric'] +['Ġbreat', 'he'] +['Ġfin', 'ale'] +['_F', 'ACT'] +['in', 'ion'] +['Ġch', 'ill'] +['Ġform', 'ally'] +['ANG', 'ED'] +["Ġ'", ":'"] +['ĠпÑĢ', 'и'] +['a', 'q'] +['ĠFab', 'ric'] +['(l', 'at'] +['ĠPr', 'incipal'] +['Ġer', 'ro'] +['oc', 'ale'] +['N', 'om'] +['Ġf', 'ost'] +['_C', 'USTOM'] +['.int', 'ellij'] +['ert', 'ools'] +['Ġcl', 'asse'] +['adi', 'ents'] +['Ġfundra', 'ising'] +['EN', 'E'] +['_OPTION', 'S'] +['_', 'ob'] +['//', '}Ċ'] +['Ġprote', 'ctions'] +['.se', 'ed'] +['N', 'V'] +['term', 'inal'] +[';;', ';'] +['P', 'redicate'] +['Ġì', '¶'] +['Ġbomb', 'ing'] +['G', 'F'] +['Ġch', 'ew'] +['))', ').'] +['qual', 'ified'] +[']', '={'] +['list', 'en'] +['C', 'ENT'] +['d', 'igest'] +['E', 'ast'] +['Ġd', 'iver'] +['Ġend', 'points'] +['Ġe', 'e'] +['Ġcolle', 'ague'] +['Ġdissert', 'ation'] +['_com', 'mit'] +['_D', 'AT'] +['.', 'rc'] +['Ġbre', 'asts'] +['ĠR', 'ug'] +['ĠP', 'il'] +['Contract', 's'] +['ĠBry', 'an'] +['Web', 'View'] +['Ġconcent', 'rate'] +['ĠIn', 'ner'] +["Ġ'", '|'] +['std', 'out'] +['_S', 'ub'] +['>', '-->Ċ'] +['V', 'ol'] +['ĠS', 'SD'] +['))', '),'] +['.', 'Optional'] +['Ġnurs', 'es'] +['Ġor', 'b'] +['_', 'pe'] +[');čĊ', 'čĊčĊ'] +['pl', 'aced'] +['ess', 'er'] +['Ġther', 'apeutic'] +['Ġwhites', 'pace'] +['Ġa', 'ston'] +['Success', 'ful'] +['Ġpr', 'aised'] +['ĠW', 'es'] +['Ġe', 'ighth'] +['ir', 'al'] +['Ġvrou', 'w'] +['Ġf', 'action'] +['_b', 'ias'] +['Ġw', 'itch'] +['Ġnp', 'c'] +['(s', 'b'] +['ĠRod', 'rig'] +['_b', 'ig'] +['Dep', 'endency'] +['ĠAb', 'raham'] +['ard', 'i'] +['C', 'AR'] +['n', 'os'] +['Ġabund', 'ance'] +['Ġnut', 'rients'] +['in', 'stein'] +['.V', 'ert'] +['ĠI', 'SS'] +['<', 'U'] +['Ġsum', 's'] +['_h', 'ist'] +['Ġfar', 'mer'] +['ĠA', 'br'] +['Sh', 'ot'] +['ĠBad', 'Request'] +['Ġh', 'ass'] +['ĠR', 'ails'] +['Ġaffili', 'ated'] +['æĿ', '¥'] +['Ġer', 'f'] +['IN', 'F'] +['ĠView', 'Holder'] +['min', 'i'] +['ĠR', 'oth'] +['Ġfaith', 'ful'] +['ĠPhill', 'ips'] +['AND', 'OM'] +['].', '['] +['_P', 'AY'] +['ĠAr', 'ctic'] +['f', 'aker'] +['D', 'igit'] +['M', 'ale'] +['std', 'err'] +['se', 'ys'] +['Ġ', 'Å¡'] +['_rem', 'ote'] +['li', 'que'] +['Ġin', 'def'] +['ĠIndust', 'ries'] +['it', 'ra'] +['_p', 'airs'] +['<', 'iostream'] +['Ġsal', 'aries'] +['ik', 'en'] +['.F', 'rame'] +['PL', 'IC'] +['_S', 'PEC'] +['ĠMed', 'iterr'] +['Ġsystem', 'atic'] +['Ġinter', 'rog'] +['Icon', 'Button'] +['se', 'a'] +['int', 'ro'] +['ĠIss', 'ues'] +['enc', 'rypted'] +['Ġintern', 'ationally'] +['Ġsn', 'printf'] +['Ġpast', 'a'] +['ĠBrad', 'ley'] +['_', 'Status'] +['AL', 'K'] +['_P', 'AD'] +['.l', 'aunch'] +['<', 'select'] +['Ġhar', 'dest'] +['Ġph', 'y'] +['Ġ((', '*'] +['-s', 'lide'] +['ĠNob', 'ody'] +['S', 'u'] +['Ġas', 'ÃŃ'] +['close', 'st'] +['_initial', 'izer'] +['Ġsupport', 'er'] +['-g', 'en'] +['Ġt', 'ales'] +['Ġcor', 'p'] +['_f', 'u'] +['s', 'at'] +['ne', 'ighbor'] +['.M', 'igrations'] +['Ġal', 'gun'] +['Ġsin', 'on'] +['.S', 'pec'] +['?', ',Ċ'] +['.G', 'L'] +['m', 'ale'] +['Ġmon', 'itors'] +['yl', 'an'] +['-L', 'icense'] +['.m', 'atches'] +['ĠA', 'BS'] +['ĠM', 'ast'] +['ĠW', 'allet'] +['($', '("#'] +['Dir', 'ty'] +['Ġco', 'pe'] +['Ġinterpol', 'ation'] +['ous', 'ed'] +['ĠJ', 'ets'] +['.F', 'LAG'] +['.C', 'ancel'] +['.Event', 's'] +['ne', 'ver'] +['ĠM', 'Hz'] +['>', 'D'] +['Ġs', 'ervlet'] +['bast', 'ian'] +['Ġ>', '&'] +['S', 'ID'] +['_cl', 'k'] +['Ġdiv', 'isions'] +['}', "',Ċ"] +['Ġd', 'ildo'] +['Ġpar', 'ade'] +['m', 'ajor'] +['Ġab', 'oard'] +[';', '++'] +['Ġf', 'usion'] +['"},', '{"'] +['ĠDialog', 'Result'] +['ĉ', 'arr'] +['-', 'em'] +['_n', 'r'] +['(h', 'andler'] +['.N', 'ET'] +['.Xtra', 'Reports'] +['ĠSh', 'ah'] +['ĠB', 'rief'] +['-', ','] +['Ġprec', 'io'] +['ĉĉĉ', 'ĠĠĠĠĠĠ'] +['Ġt', 'ant'] +['ĠGrand', 'e'] +['/', 'xml'] +['_IC', 'ON'] +['ĠR', 'etro'] +['un', 'que'] +['Ġn', 'ag'] +['to', 'Fixed'] +['X', 'L'] +['Ġdecl', 'aring'] +['ĠCon', 'crete'] +['ĠAm', 'azing'] +['ĉprint', 'k'] +['Ġdeb', 'ates'] +['D', 'ATED'] +['Ġaest', 'hetic'] +['emet', 'ery'] +['Routing', 'Module'] +['ĠNash', 'ville'] +['W', 'AYS'] +['Ġw', 'olf'] +['Ġobserv', 'ers'] +['OT', 'A'] +['ans', 'on'] +['Ġe', 'a'] +['Ġgreen', 'house'] +['ĵį', 'ä½ľ'] +['Ġst', 'air'] +['Ġimmigr', 'ant'] +['_app', 'ly'] +['pe', 'are'] +['ĠBloom', 'berg'] +['_PL', 'AYER'] +['Res', 'p'] +['æŃ', '£'] +['Cho', 'oser'] +['ĠI', 'Collection'] +['P', 'eter'] +['Er', 'ro'] +['.detect', 'Changes'] +['Map', 's'] +['Ġs', 'queeze'] +['ĠHom', 'es'] +['weg', 'ian'] +['Ġformat', 'ting'] +['Ġnegot', 'iate'] +['ul', 'd'] +['ĠN', 'ep'] +['ĠQ', 'B'] +['Ġeconom', 'ies'] +['Ġ*/', ','] +['Ġredu', 'nd'] +['ĠA', 'ber'] +['.IsNullOr', 'WhiteSpace'] +['yc', 'led'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĊ'] +['_S', 'h'] +['Ġske', 'pt'] +['Ġre', 'created'] +['Ġget', 'Type'] +['Ġmarg', 'ins'] +['Ġcolon', 'ial'] +['ch', 'arts'] +['//', '@'] +['Ġprocess', 'ors'] +['è¯', '´'] +['b', 'atis'] +['æĦ', 'ı'] +['ator', 'io'] +['mention', 'ed'] +['P', 'atient'] +['Ġpre', 'y'] +['Check', 'box'] +['_x', 'path'] +['.s', 'kip'] +['ĠMorm', 'on'] +['ĠMemory', 'Stream'] +['CRE', 'MENT'] +['Ġk', 'u'] +['m', 'eld'] +['\\', 'Data'] +['ĠK', 'ernel'] +['il', 'tr'] +['éĢ', 'ģ'] +['(', 'profile'] +['Car', 'bon'] +['RO', 'LE'] +['(', 'pl'] +[']', '*('] +['.m', 'emory'] +['Ġmed', 'al'] +['Ġadvis', 'or'] +['it', 'ät'] +['Ġh', 'dr'] +['ier', 'ung'] +['ĠProvid', 'es'] +['(', 'alpha'] +['Ġteen', 'agers'] +['-', 'parser'] +['.L', 'atLng'] +[']', '()Ċ'] +['Ġfel', 'ony'] +['ĉĉĉĊ', 'ĉĉĉĊ'] +['BO', 'OK'] +['Ġsl', 'ash'] +['Ġclear', 'fix'] +['ĠPro', 'phet'] +['å®', '¹'] +['right', 'ness'] +['-f', 'i'] +['.k', 'ind'] +['ert', 'on'] +['J', 'im'] +['Ġmanip', 'ulate'] +['Ġworks', 'heet'] +['ol', 'in'] +['st', 'ars'] +['Ġart', 'ifact'] +['_EM', 'PTY'] +['ĉm', 'ain'] +['-------------', ''", ';'] +['Ġexpress', 'ing'] +['ĠI', 'Q'] +['ĠF', 'act'] +['/************************************************************************', '*******Ċ'] +['_m', 'ass'] +['))', ':'] +['Ġcon', 'dom'] +['Ġcreate', 'State'] +['omet', 'own'] +['Ġir', 'r'] +['Ġ>', '('] +['>', 'B'] +['iter', 'ation'] +['ãĥ', 'ª'] +['Ġshirt', 's'] +['ount', 'y'] +['->', '$'] +['_S', 'IGN'] +['ĠD', 'ale'] +['Ġj', 'j'] +['E', 'asy'] +['F', 're'] +['ĠN', 'y'] +['Ġch', 'lor'] +['match', 'ed'] +['ĠG', 'erm'] +['-', 'UA'] +['ĠN', 'athan'] +['educ', 'ation'] +['-y', 'ard'] +['-', 'che'] +['h', 'ouses'] +['r', 'itional'] +['Ġprox', 'imity'] +['Ġdies', 'em'] +['áºŃ', 'p'] +['Ġd', 'rought'] +['.a', 'udio'] +['ĠLe', 'o'] +['Ġfavor', 'able'] +['in', 'ch'] +['ĠD', 'aw'] +['rib', 'ly'] +['_st', 'udent'] +['id', 'able'] +['O', 'VE'] +['Ġlack', 's'] +['ounc', 'ing'] +['.b', 'usiness'] +['Ġre', 'open'] +['may', 'be'] +['_G', 'LOBAL'] +['Ġdress', 'es'] +['ĠEd', 'wards'] +['ens', 'ible'] +['ĠHard', 'ware'] +['ĠEx', 'cellent'] +['ĠTime', 'Unit'] +['CTION', 'S'] +['Ġsched', 'ules'] +['Ġseg', 'ue'] +['Op', 'ens'] +['am', 'men'] +['-', 'Identifier'] +['Ġst', 'aring'] +['Ġhapp', 'ily'] +['ĠH', 'ob'] +["'", '_'] +['Ġ"', ');'] +['ament', 'os'] +['et', 'ched'] +['Ġ/>', '}Ċ'] +['.', 'Users'] +['Ġinterrupt', 'ed'] +['Contact', 's'] +['Ġreg', 'istro'] +['in', 'burgh'] +['CH', 'A'] +['_', 'imp'] +['ph', 'is'] +['s', 'ay'] +['Ġretail', 'er'] +['.N', 'ODE'] +['/', 'maps'] +['_L', 'AST'] +['ĠCh', 'arge'] +['_g', 'uard'] +['Coll', 'ider'] +['ĠStateless', 'Widget'] +['":', '["'] +['("', '../../'] +['iox', 'ide'] +['ĠS', 'und'] +["Ġ''", ';'] +['un', 'set'] +['add', 'Widget'] +['л', 'Ñİ'] +['el', 'les'] +['alk', 'er'] +['A', 'rc'] +['Ġded', 'uct'] +['G', 'UILayout'] +['ĠV', 'illa'] +['Ġfor', 'bidden'] +['_', 'where'] +['Ġ\\', '/'] +['ĠT', 'ib'] +['_A', 'X'] +[']', 'čĊčĊ'] +['ĠB', 'ir'] +['Ġb', 'end'] +['ĠMA', 'KE'] +['ĠM', 'ET'] +['Ġfut', 'ures'] +['Ġweight', 'ed'] +['""', '"čĊ'] +['Ġauthor', 'ize'] +['(pro', 'gram'] +['},', '{"'] +['Ġcoeff', 'icients'] +['ê', 's'] +['Per', 'Page'] +['ĠBath', 'room'] +['ĠPublish', 'ing'] +['G', 'PL'] +['Ġsub', 'missions'] +['ĠNUM', 'BER'] +['j', 'Äħ'] +['Ġaddition', 'ally'] +['em', 'pre'] +['ĠSh', 'el'] +['ot', 'yp'] +['S', 'olution'] +['Ġth', 'under'] +['_', 'ec'] +['ĠĊ', 'ĠĠĠĠĊ'] +['ĠF', 'ellow'] +['Ġk', 'ay'] +['Ġnew', 'State'] +['ONT', 'AL'] +['Im', 'plementation'] +['.L', 'ook'] +['Ġ', 'ents'] +['Ġl', 'ors'] +['ĠB', 'IG'] +['f', 'ab'] +['Ġaver', 'aged'] +['ĠFe', 'edback'] +['ĠW', 'ells'] +['Ġm', 'artial'] +['Ġind', 'ul'] +['ĠComm', 'unist'] +['ĠFore', 'x'] +['ĠAgricult', 'ure'] +['"', '['] +['Ġqu', 'ar'] +['ĠK', 'ont'] +['ĉ', 'view'] +['.', 'Bytes'] +['des', 'ktop'] +['ĠM', 'akes'] +['akes', 'peare'] +['.Null', 'able'] +['Ġspot', 'light'] +['V', 'B'] +['ow', 'y'] +['(t', 'orch'] +['tr', 'idge'] +['_b', 'ounds'] +['Ġapolog', 'ize'] +['.add', 'Item'] +['ant', 'd'] +['*', ');Ċ'] +[',', 'u'] +['(g', 'en'] +['ç»', 'ĵ'] +['re', 'ator'] +['ĠC', 'ord'] +['ou', 'pper'] +['.m', 'etro'] +['Ġ', 'ew'] +['ĠW', 'ORD'] +['.A', 'fter'] +['Ġdet', 'ained'] +['ĠHam', 'mer'] +['ex', 'isting'] +['Ġo', 'st'] +['Ġmon', 'ument'] +['-c', 'ustom'] +['User', 'ID'] +['ĠN', 'om'] +['Ġre', 'jection'] +['(d', 'im'] +['Ġsingle', 'ton'] +['ĉd', 'ie'] +['ari', 'ance'] +['re', 'ports'] +[']', '!='] +['eld', 'a'] +['Ġpreval', 'ence'] +['_reg', 's'] +['."', '.'] +['Ġfemin', 'ist'] +['Code', 'c'] +['Ġ', '**Ċ'] +['(label', 's'] +['_M', 'ARK'] +['FA', 'ILED'] +['Ġadminister', 'ed'] +['W', 'N'] +['ĠĠĠĠĠĠĠĠ', 'ĉĉ'] +['Ġn', 'oun'] +['w', 'ig'] +['Ġg', 'otta'] +['Ġr', 'if'] +['-', 'im'] +['ĠPaul', 'o'] +['ĠCommand', 'Type'] +[']', '))ĊĊ'] +['-z', 'ero'] +['Tr', 'aining'] +['Ġl', 'ord'] +['_', 'art'] +['re', 'ddit'] +['C', 'ert'] +['Ġpes', 'o'] +['R', 'ot'] +['Ġend', 'anger'] +['.d', 'r'] +['user', 'Info'] +['un', 'ts'] +['n', 'v'] +['ĠTrail', 'er'] +['-f', 'irst'] +['(m', 'ake'] +['Ġbenef', 'ici'] +['-bl', 'ack'] +['i', 'ÃŁ'] +['Ġund', 'oubtedly'] +['Ġm', 'ex'] +['ĠAnc', 'ient'] +['(', 'as'] +['Ġdes', 'cent'] +['P', 'ick'] +['Ġrep', 'lica'] +['$', 'obj'] +['ä', 'hr'] +['Ġar', 'rows'] +['ft', 'y'] +['ĠLib', 'ya'] +['ug', 'a'] +['charg', 'ed'] +['T', 'ur'] +['Ġh', 'omic'] +['iss', 'en'] +['ĠF', 'ake'] +['Ġbe', 'ers'] +['Ġsc', 'attered'] +['(', 'Time'] +['UT', 'IL'] +['Ġbureauc', 'r'] +['/pl', 'ain'] +['Ġstick', 'ing'] +['FA', 'IL'] +['ĠC', 'ovid'] +['Th', 'ird'] +['_p', 'resent'] +['ĠPier', 're'] +['Ġë', 'ª'] +['Ġ[...', ']ĊĊ'] +['Pro', 'b'] +['ĠTra', 'ffic'] +['ica', 'o'] +['do', 'ctor'] +['Ġ),', 'ĊĊ'] +['T', 'abs'] +['al', 'u'] +['ï¼ļ', 'âĢľ'] +['Ġinher', 'ent'] +['_N', 'o'] +['rit', 'is'] +['ĠPro', 'of'] +['.b', 'asename'] +['ä¼', 'ļ'] +['Ġch', 'im'] +['ĠProt', 'ected'] +['c', 'rit'] +['Ġpr', 'one'] +['Ġк', 'он'] +['ĠHero', 'es'] +['Ġan', 'xious'] +['Ġan', 'os'] +['Ġweek', 'ends'] +['Ġs', 'ext'] +['Ġredu', 'cer'] +['=', 'UTF'] +['h', 'alf'] +['ĠS', 'aw'] +['.m', 'm'] +['Ġnue', 'va'] +['.current', 'Target'] +['.l', 'ua'] +['_EXT', 'ENSION'] +['ĉ', 'reg'] +['ĠC', 'trl'] +['_', 'align'] +['accept', 'able'] +['Ġrush', 'ing'] +['fr', 'ac'] +['Ġbo', 'asts'] +['F', 'ive'] +['Â', '±'] +['ĠTem', 'perature'] +['>', '):'] +['Ġchar', 'ter'] +['RE', 'ATED'] +['Ġsubject', 'ed'] +['Ġop', 'c'] +['health', 'y'] +['使', 'ç͍'] +['ĠScient', 'ific'] +['Ġfra', 'u'] +['ri', 'ages'] +['à¸', 'Ķ'] +['.in', 'ventory'] +['ation', 'ale'] +['M', 'ad'] +['min', 'utes'] +['>>', '();Ċ'] +['ĠEn', 'v'] +['Ġrecord', 'ings'] +['Ġsusp', 'icion'] +['sql', 'ite'] +['ĉ', 'read'] +['ãģ', '¦'] +['Ġwor', 'ries'] +['.put', 'String'] +['ĠSh', 'anghai'] +['(', 'uid'] +['r', 'er'] +['ĠvÃŃ', 'de'] +['")', ':'] +['Ġmethod', 'ology'] +['Ġк', 'оÑĤоÑĢ'] +['cc', 'c'] +['av', 'ad'] +['Ġindu', 'ction'] +['ĉ', 'Thread'] +[',', 'string'] +['ạ', 'i'] +['neh', 'men'] +['u', 'ition'] +['Ġ*', '__'] +['.em', 'f'] +['Ġì', 'ľ'] +['/th', 'emes'] +['ĠN', 'ine'] +['.', 'One'] +['ĠEm', 'bed'] +['Ġf', 'az'] +['u', 'ations'] +['Ġpriv', 'ately'] +['Ġl', 'ing'] +['[', 'F'] +['ush', 'i'] +['Ġlaunch', 'es'] +['(', 'KEY'] +['G', 'MT'] +['Ġaim', 'ing'] +['pat', 'ible'] +['ĠB', 'iden'] +['i', 'w'] +['ĠD', 'egree'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġ$', "('<"] +['á', 'rios'] +['to', 'UpperCase'] +['ìł', 'ľ'] +['ĠE', 'UR'] +['Ġovers', 'ight'] +['Ġtable', 'sp'] +['Up', 'dates'] +['.m', 'akedirs'] +['Ġhum', 'idity'] +['/', 'template'] +['Al', 'ways'] +['(', 'IS'] +['_c', 'ert'] +['D', 'ig'] +['Ġunder', 'way'] +['ort', 'on'] +['ĠHur', 'ricane'] +['Ġsp', 'ends'] +['ĠSeg', 'ment'] +['Ġfl', 'ies'] +['ĠT', 'oggle'] +['ĠLyn', 'ch'] +['Ġs', 'enses'] +['ĠK', 'os'] +['set', 'Enabled'] +['ist', 'ically'] +['Ġtest', 'er'] +['Ġadministr', 'ators'] +['Ġtag', 'ged'] +['Ð', 'ĵ'] +['Ġshort', 'cut'] +['ĠRes', 'olution'] +['Ġsuperv', 'ision'] +['ĠAsh', 'ley'] +['Tr', 'acking'] +['ul', 'atory'] +['and', 'el'] +['ist', 'en'] +['Ġun', 're'] +['(d', 'iff'] +['ANT', 'S'] +['Ġr', 'ider'] +['Ġs', 'Äħ'] +['.S', 'eries'] +['_', 'orders'] +['ORIZ', 'ONTAL'] +['Ġret', 'ention'] +['ãĢĤ', '', 'čĊčĊ'] +['Ġdi', 'agonal'] +['ĠC', 'ancellationToken'] +['_', 'Internal'] +['Ġru', 'in'] +['.Q', 't'] +['ocr', 'atic'] +['T', 'el'] +['ĠAn', 'swers'] +['m', 'atic'] +['Ġx', 'p'] +['at', 'em'] +['_j', 'obs'] +['_', 'any'] +['Ġsen', 'iors'] +['Ġland', 'mark'] +['ĠQ', 'List'] +['Ġman', 'eu'] +['ot', 'ify'] +['/', '";Ċ'] +['/', 'server'] +['ĠPhil', 'osoph'] +['uten', 'ant'] +['(', 'io'] +['h', 'z'] +['Ġauthentic', 'ated'] +['d', 'v'] +['-', 'Compatible'] +['Origin', 'ally'] +[',', 'function'] +['ãĢĤ', 'čĊ'] +['ĠRepresent', 'ative'] +['as', 'ily'] +['irc', 'uit'] +['.d', 't'] +['(m', 'ath'] +['.M', 'arshal'] +['[', ','] +['ĠC', 'ities'] +['_', 'turn'] +['|', ')Ċ'] +['Ġcant', 'idad'] +['al', 'ter'] +['ĉ', 'ui'] +['ĠNe', 'braska'] +['Ġsk', 'irt'] +['.b', 'g'] +['Shared', 'Preferences'] +['(', 'style'] +['Ġg', 'rief'] +['g', 'ew'] +['Ġsaf', 'eg'] +['ol', 'ang'] +['_l', 'ists'] +['ì', 'Ľ'] +['Ġgran', 'ite'] +['Ġhott', 'est'] +['.j', 'dbc'] +['.C', 'ustomer'] +['Ġâī', '¤'] +['Ġwa', 'ar'] +['_sc', 'ene'] +["+'", '/'] +['ĠJ', 'TextField'] +['Ġse', 'ating'] +['Ġwe', 'ars'] +['Ġ`', '/'] +['C', 'ases'] +['ĠY', 'outube'] +['ı', 'm'] +['Ġbal', 'con'] +[',', 'G'] +['Meta', 'Data'] +['-', 'price'] +['SC', 'R'] +['Un', 'ity'] +['Ġtr', 'unk'] +['={`', '${'] +['Ġearthqu', 'ake'] +['Part', 'ial'] +['Ġsub', 'st'] +['Ġelim', 'in'] +['="', "'."] +['//*', '[@'] +['Ġsuperv', 'isor'] +['vro', 'let'] +['_', 'article'] +['Ġp', 'ane'] +['b', 'io'] +['Ġmot', 'ors'] +['N', 'M'] +['F', 'rank'] +['Ġon', 'ion'] +['-', 'word'] +['Item', 'ClickListener'] +['Ġb', 'rit'] +['end', 'encies'] +['Com', 'puter'] +['_r', 'unning'] +['(', 'day'] +['-', 'he'] +['(n', 'amed'] +['ĠS', 'ach'] +['о', 'Ñĩ'] +['c', 'ampaign'] +['.Ab', 'stract'] +['(w', 'rapper'] +['.p', 'ay'] +['Ġu', 'w'] +['Ge', 'o'] +['r', 'ails'] +['/', 'select'] +['icht', 'e'] +['son', 's'] +['E', 'VENT'] +['Ġal', 'iment'] +['Pro', 'viders'] +['A', 'wait'] +['_INTER', 'VAL'] +['.', 'off'] +['Ġgl', 'uten'] +['_cl', 'oud'] +['Ġw', 'en'] +['.ex', 'tract'] +['ĉ', 'button'] +['/', 'MM'] +['Part', 'y'] +['Ġdem', 'ographic'] +['_err', 'no'] +['Ġh', 'iking'] +["('", "')Ċ"] +['",', '@"'] +['Ġw', 'it'] +['r', 'á'] +['olog', 'ie'] +['ĠSt', 'yles'] +['ĠBrowser', 'Module'] +['.Request', 'Mapping'] +['ic', 'ans'] +['P', 'AGE'] +['cre', 'ation'] +['ĠF', 'erguson'] +['ud', 'ed'] +['num', 'bers'] +['ĠGT', 'K'] +['Ġpresent', 'ations'] +['ĠB', 'obby'] +['_s', 'pan'] +['est', 'yle'] +['Ġilleg', 'ally'] +['abel', 'a'] +['Ġbattle', 'field'] +['cap', 'acity'] +['ter', 'ror'] +[']', '");Ċ'] +['Ġwar', 'rior'] +['le', 'ader'] +['ĠDB', 'G'] +['ĠRe', 'venue'] +['Ġvig', 'il'] +['Ġcounter', 'parts'] +['(', 'Error'] +['ACT', 'ER'] +['Ġhe', 'eft'] +['Ġselection', 's'] +['ze', 'ug'] +['t', 'om'] +['-t', 'wo'] +['.', ';Ċ'] +['_st', 'atement'] +['ĠA', 'id'] +['ĠV', 'ul'] +['_r', 'gb'] +['Ġpr', 'izes'] +['Ġedit', 'able'] +['ĉ', 'form'] +['ın', 'ı'] +['.de', 'cor'] +['D', 'emo'] +['lic', 'es'] +['Ġen', 'ctype'] +['rat', 'ulations'] +['ĠR', 'OS'] +['_ch', 'ars'] +['ĠJ', 'ahr'] +['part', 'ial'] +['Ñĥ', 'ÑĤ'] +['ĠRe', 'ceive'] +['ĠL', 'ands'] +['AP', 'TER'] +['Ġch', 'opped'] +['..', '"'] +['ĠAn', 'aly'] +['ĠU', 'ID'] +['ĠR', 'adeon'] +['ĠB', 'ee'] +['Ġun', 'm'] +['>', 'M'] +['.find', 'all'] +['Token', 'izer'] +['ĠWH', 'AT'] +['Ġs', 'j'] +['D', 'rawing'] +['E', 'ss'] +['ON', 'D'] +['Ĭ', '¶'] +['(p', 'acket'] +['âĢĶ', 'but'] +['Inv', 'ocation'] +['ĠN', 'uclear'] +['?', ';Ċ'] +['Ġgrand', 'es'] +['ĠC', 'rypt'] +['rem', 'ark'] +["Ġ'../../", '../../'] +['Ġin', 'ability'] +['m', 'agic'] +['c', 'ats'] +['Ġsim', 'ulate'] +[':', '${'] +['in', 'flate'] +['Ġen', 'er'] +[':', 'NO'] +['ip', 'les'] +['Ġmer', 'it'] +['ĠR', 'ated'] +['Ġgl', 'ue'] +['/b', 'log'] +['Ġg', 'ren'] +['Ġthr', 'illed'] +['.C', 'H'] +['unc', 'an'] +['ĠPR', 'IMARY'] +['Ġper', 'sec'] +['Ġfe', 'ared'] +['.M', 'IN'] +['ĠThe', 'ater'] +['é', 'Ĵ'] +['ategor', 'ie'] +['æ®', 'µ'] +['Ġappet', 'ite'] +['s', 'quare'] +['ĠAlex', 'and'] +['.User', 'Id'] +['_g', 't'] +['_', 'enter'] +['Ġgradu', 'ates'] +['Fragment', 'Manager'] +['Author', 'ize'] +['-N', 'LS'] +['(M', 'y'] +['Ġtri', 'umph'] +['ust', 'ing'] +['_PARAM', 'S'] +['Char', 'acters'] +['(:', ',:,'] +['_B', 'UILD'] +['M', 'Hz'] +['Ġwash', 'ed'] +['Ġun', 'cle'] +['Ste', 've'] +['ard', 'own'] +['', '${'] +['_confirm', 'ation'] +['Ġtro', 'phy'] +['Work', 's'] +['ĠElect', 'ronics'] +['ĠMediterr', 'anean'] +['_m', 'etrics'] +['Ġannounc', 'ing'] +['ĠD', 'AY'] +['_pro', 'to'] +['Ġp', 'ear'] +['base', 'Url'] +['ĉĉĉĉĉĉĉĉ', 'Ċ'] +['Ġcoord', 'ination'] +[':', 'N'] +['.an', 'imate'] +['ĠC', 'otton'] +['_h', 'it'] +['â', 'ľ'] +['Ġjet', 'zt'] +['if', 'ter'] +['(f', 'ields'] +['own', 'load'] +['ific', 'acion'] +['.c', 'uda'] +['ĠLi', 'u'] +['>', 'equals'] +['ĠA', 'ce'] +['ÑĢаÐ', '¼'] +['ĠSuper', 'man'] +['ĠGarc', 'ia'] +['Ġarrest', 's'] +['ag', 'ar'] +['Ġ{}', ')'] +['Ġmac', 'ros'] +['rou', 'pe'] +['ê', 'tre'] +['Ġtw', 'isted'] +['str', 'uments'] +['_', '("'] +['_', 'vertices'] +['ĠTrans', 'ition'] +['и', 'к'] +['[', 'max'] +['m', 'ind'] +['Ġaccess', 'Token'] +['Ġun', 'le'] +['m', 'us'] +['c', 'op'] +['ĠF', 'actor'] +['Ġcon', 'ced'] +['Ġre', 'tr'] +['.l', 'inalg'] +['-s', 'lider'] +['ob', 'l'] +['_Static', 'Fields'] +['Ġz', 'ombie'] +['s', 'elling'] +['Ġch', 'ap'] +['Ġsh', 'aking'] +['ĠTrans', 'late'] +['ĠAm', 'sterdam'] +['ĠE', 'TH'] +['_EX', 'TERN'] +['k', 'd'] +['_d', 'isc'] +['Ġpreced', 'ing'] +['Ġpri', 'x'] +['Object', 'Name'] +['_mod', 'ified'] +['ard', 'ware'] +['Ġ?>', '">'] +['ĠD', 'W'] +['`', '${'] +['Ġ?>', '">ĊĊ'] +['Ġspin', 'ning'] +['_p', 'ending'] +['Match', 'ers'] +['.', 'Keys'] +['ĠP', 'V'] +['en', 'us'] +['ant', 'is'] +['Ġdisc', 'ard'] +['Ġh', 'aul'] +['Ġem', 'pir'] +['Ġpath', 'way'] +['Ġo', 'ak'] +['м', 'ен'] +['-ind', 'uced'] +['Ġimp', 'air'] +['ĠCal', 'gary'] +['.is', 'Hidden'] +['d', 'z'] +['_', 'include'] +['Ġg', 'm'] +["Ġ'", "('"] +['P', 'Y'] +['uggest', 'ions'] +['Ġcommod', 'ity'] +['c', 'ro'] +['/', 'sub'] +['Ġget', 'Instance'] +['ĠLeg', 'acy'] +['ĠK', 'il'] +['B', 'al'] +['(', 'short'] +['In', 'form'] +['+', 'x'] +['*', 'r'] +['ĠHope', 'fully'] +['or', 'ate'] +['Ġmach', 'en'] +['Ġtreat', 'y'] +['ĠO', 'ri'] +['.p', 'ublic'] +['-h', 'orizontal'] +['Ġtact', 'ic'] +['Ġb', 'ord'] +['w', 'ares'] +['Ġam', 'mo'] +['ĠL', 'ists'] +['Ġequ', 'ations'] +['/', 'her'] +['ĠNS', 'W'] +['B', 'ounding'] +['_C', 'ollections'] +['Ġav', 'ail'] +['.Drop', 'Down'] +['è', '°'] +['Ġh', 'h'] +['Ġl', 'Ãł'] +['.p', 'b'] +['Ġmemor', 'ial'] +['ĠAT', 'TR'] +['Ġexhaust', 'ed'] +['Ġt', 'sp'] +['ĉ', 'redirect'] +['Ġlik', 'ewise'] +['ST', 'ER'] +['L', 'java'] +['Ġcondem', 'ned'] +['oca', 'ust'] +['(str', 'ict'] +['Ġexem', 'pt'] +['Ġs', 'ms'] +['Ġex', 'agger'] +['S', 'YS'] +['Ġl', 'ounge'] +[':', '^'] +['Ġto', 'dd'] +['de', 'b'] +['ator', 'ial'] +['ĠPort', 'er'] +['Ġtu', 'ition'] +['Ġexem', 'pl'] +['Ġp', 'aren'] +['.line', 'To'] +['Ġkid', 'ney'] +['Ġç', 'a'] +['Ġc', 'ui'] +['ï¼Į', '请'] +['X', 'C'] +['Ġmo', 'ż'] +['Ġnomin', 'ated'] +['l', 'ung'] +['Im', 'Gui'] +['ĠB', 'uzz'] +['Ġstere', 'o'] +['port', 'al'] +['res', 'as'] +['Ġk', 'lass'] +['Ġdraft', 'ed'] +['Ġproject', 'ile'] +['/g', 'pl'] +['(param', 'eters'] +['*', ')Ċ'] +['Ġassist', 'ed'] +['ĠNS', 'Integer'] +['s', 'itemap'] +[':n', 'th'] +['.View', 's'] +['.Argument', 'Parser'] +['Ġme', 'er'] +['z', 'ier'] +['ĠD', 'ig'] +['Ċ'] +['Ġpl', 'ag'] +['p', 'ine'] +['Ġblank', 'et'] +['Ġ:', '', '-'] +['Ġl', 'cd'] +['------------', '---'] +['("', '"'] +['Ġtact', 'ical'] +['ĠRon', 'ald'] +['ex', 'tr'] +['ĠF', 'est'] +['Ġf', 'uer'] +['-n', 'avigation'] +['Ġk', 'b'] +['gh', 'ost'] +['Ġhandle', 'Change'] +['_cl', 's'] +['()', '!='] +['Com', 'parator'] +['.v', 'm'] +['ĠCo', 'x'] +['_re', 'view'] +['/', '@'] +['_c', 'ookie'] +['Ġrecogn', 'ised'] +['ld', 'ap'] +['Thread', 's'] +['ĠSex', 'ual'] +['ĠB', 'earing'] +['(S', 'QL'] +['Ġx', 'r'] +['Ġth', 'igh'] +['URL', 'Connection'] +['ĠSU', 'V'] +['Ġm', 'Context'] +['Ġinc', 'idence'] +['ĠE', 'ste'] +['.s', 'up'] +['_t', 'e'] +['(EX', 'IT'] +['C', 'MD'] +['/', '">'] +['Al', 'most'] +['ĠU', 'ne'] +['Ġand', 'eren'] +['ĠSingle', 'ton'] +['Ġb', 'ore'] +['Th', 'ink'] +['Ġn', 'arc'] +[']', 'initWith'] +['_sh', 'op'] +['(str', 'ategy'] +['!', "',"] +['her', 'its'] +['ĠDes', 'k'] +['_m', 'achine'] +['.net', 'ty'] +['ı', 'nda'] +['=', '<'] +['ĠQ', 'R'] +['ĠS', 'idebar'] +['.split', 'Container'] +['Ġon', 'Success'] +['Ġmon', 'key'] +['En', 'joy'] +['(n', 'odes'] +['pect', 'rum'] +['Ġ(*', '('] +['ĉU', 'INT'] +[',', 'height'] +['ĠNetwork', 's'] +['.t', 'ail'] +['.l', 'inspace'] +['Ġ"', '...'] +['List', 'en'] +['Æ', '¡'] +['.Ch', 'annel'] +['-', 'defined'] +['Re', 'peat'] +['ad', 'just'] +['ER', 'M'] +['_', 'application'] +['.assert', 'NotNull'] +['-', 'stream'] +['Ġr', 'abbit'] +['Ġposition', 'ing'] +['Ġw', 'oke'] +['Ġf', 'ing'] +['Ġmulti', 'player'] +['Ġregister', 'ing'] +['un', 'til'] +['Ã¥', 'n'] +['(', '::'] +['uss', 'ions'] +['Ġpot', 'ato'] +['ĠE', 'quals'] +['.S', 'up'] +['/ap', 'ache'] +['Ġ(', '='] +['.', '")'] +['.p', 'tr'] +['ĠSpe', 'ech'] +['.cl', 'ip'] +['ĠGab', 'riel'] +['Ġmusic', 'ian'] +['/', 'issues'] +['.sh', 'op'] +['ĠH', 'ier'] +['_RE', 'T'] +['_b', 'ucket'] +['ãĥ', '¡'] +['av', 's'] +['Ġro', 'z'] +['fl', 'ower'] +['Write', 'Barrier'] +['ĠMil', 'an'] +['Ġlegisl', 'ature'] +['ĠD', 'oll'] +['Ġprov', 'ing'] +['.concat', 'enate'] +['âķ', 'IJ'] +['Ġg', 'char'] +['cdn', 'js'] +['b', 'les'] +['ĠList', 'ing'] +['л', 'о'] +['.xr', 'Label'] +['ĠS', 'ak'] +['just', 'ice'] +['ĠVal', 'entine'] +['un', 'less'] +['Ġp', 'iger'] +['(r', 'un'] +['Ġtest', 'ified'] +['AN', 'A'] +['ĠRem', 'oves'] +['))', '));Ċ'] +['rec', 'ated'] +['ĠRuntime', 'Method'] +['Ġcon', 'qu'] +['ãĤ', '¢'] +['Ġt', 'issues'] +['ail', 'er'] +['ét', 'é'] +['-', 'Star'] +['Ġfl', 'ames'] +['.set', 'Icon'] +['Ġsup', 'ern'] +['Ġvag', 'ina'] +['-', 'variable'] +['Ġwell', 'ness'] +['C', 'UR'] +['Ġbel', 'le'] +['.get', 'Request'] +['Ġp', 'oco'] +['ben', 'h'] +['ag', 'ens'] +['Ġsp', 'ill'] +['ĠJ', 'ur'] +['Ġdispatch', 'er'] +['н', 'ого'] +['emon', 'ic'] +['(dir', 'name'] +['ĠÐ', 'Ķ'] +['Ġpas', 'se'] +['Ġg', 'anz'] +['ric', 'ing'] +['E', 'U'] +['Ġmuj', 'eres'] +['ess', 'en'] +['.at', 'tribute'] +['j', 'j'] +['ĉĉ', 'ĠĊ'] +['[', '^'] +['Ġstrtol', 'ower'] +['lex', 'er'] +['ect', 'ar'] +['hot', 'el'] +['.s', 'quare'] +['Ġr', 'all'] +['Ġlower', 'ed'] +['hand', 'led'] +['Mark', 'et'] +['ĠUs', 'es'] +['iv', 'as'] +['.B', 'usiness'] +['ãģĹãģ', '¦'] +['D', 'IV'] +['Ġw', 'asted'] +['Ġav', 'oir'] +['ê', 'm'] +['_ACC', 'OUNT'] +['.', 'et'] +['ĉ', 'SDL'] +['k', 'ap'] +['Ġf', 'ox'] +['up', 'pet'] +['{', '},Ċ'] +['",', "'"] +['F', 'avorite'] +['P', 'END'] +['ĠA', 'ES'] +['}', '),'] +['Ġded', 'uction'] +['Ġpol', 'ÃŃt'] +['Ġcomponent', 'Will'] +['ĠT', 'elerik'] +['_SE', 'LF'] +['Ġm', 'use'] +['C', 'raft'] +['Ġd', 'ens'] +['à¤', '¿'] +['(', 'tp'] +['Ġt', 'asty'] +['Ġbal', 'ances'] +['Ġded', 'ication'] +['ĠWall', 'ace'] +['Ġun', 'law'] +['\\">', '\\'] +['Ġm', 'um'] +['-', 'update'] +['ement', 'e'] +['Ġs', 'oda'] +['Re', 'public'] +['as', 'mine'] +['é', 'ric'] +['(', 'Status'] +['ĠJson', 'Convert'] +['ĠD', 'isk'] +['.Red', 'irect'] +['Ġfilm', 'ing'] +['/m', 'ol'] +['R', 'o'] +['Ġv', 'ille'] +['Ġtrab', 'aj'] +['Ġsyn', 'thesis'] +['reg', 'a'] +['Ġr', 'l'] +['S', 'cheduler'] +['ISH', 'ED'] +['current', 'User'] +['(error', 's'] +["'", 'h'] +['_b', 'ot'] +['x', 'imo'] +['ĠUS', 'ART'] +['_s', 'uper'] +['_DEC', 'REF'] +['н', 'ой'] +['_RO', 'W'] +['Ġprom', 'otes'] +['ĠT', 'A'] +['Ġhor', 'as'] +['ĠRep', 'resents'] +['Ġname', 'of'] +['ĠEx', 'c'] +['ĠGar', 'age'] +['Ġse', 'ine'] +[',', '#'] +['Ġher', 'b'] +['/', 'resources'] +['Ġple', 'aded'] +['.r', 'adioButton'] +['Ġæ', 'ĺ'] +['O', 'ps'] +['ĠN', 'est'] +['c', 'string'] +['ĠDef', 'ence'] +['Ġref', 'ere'] +['_le', 'af'] +['Ġrevel', 'ation'] +['ë', '§'] +['.execute', 'Update'] +['_W', 'ORLD'] +['Ġexp', 'ans'] +['("', '\\"'] +['j', 'ab'] +['Ġdoub', 'ts'] +['ĠGe', 'ometry'] +['Ġintrodu', 'ces'] +['Ġsen', 'ators'] +['Ġcan', 'al'] +['.h', 'elper'] +['ĠBi', 'ology'] +['_SE', 'NS'] +['.pre', 'vious'] +['-t', 'ouch'] +['ab', 'it'] +['Ġimpact', 'ed'] +['Ġbr', 'ackets'] +['.d', 'irect'] +['acc', 'um'] +['Ġtest', 'osterone'] +['ĉ', 'action'] +['ĠCh', 'ance'] +['Ġpe', 'aks'] +['CppCodeGen', 'WriteBarrier'] +['Ġun', 'belie'] +['_p', 'ress'] +['.R', 'el'] +['ang', 'led'] +['/', 'templates'] +['--', '>čĊ'] +['l', 'ime'] +['Ġsufficient', 'ly'] +['_', 'nt'] +['Exp', 'and'] +['.is', 'file'] +['Ġis', 'Empty'] +['Ġq', 't'] +['Ġmul', 'her'] +['ac', 'ob'] +['Ge', 'orge'] +['å¸', '¸'] +['Ġass', 'im'] +['as', 'o'] +['Ġcompr', 'ised'] +['O', 'V'] +['(CON', 'FIG'] +['ĉw', 'riter'] +['Ġdes', 'p'] +['Ġten', 'ure'] +['(c', 'r'] +['.p', 'ool'] +['ĠB', 'rend'] +['Ġc', 'ensor'] +['(time', 'out'] +['Ġple', 'a'] +['.W', 'rap'] +['Ġtight', 'ly'] +['ĠW', 'ere'] +['ĠI', 'gnore'] +['abe', 'i'] +['Ġbr', 'idges'] +['Ġcondem', 'n'] +['Ġsimp', 'licity'] +['Ġrout', 'inely'] +['Ġblack', 's'] +['j', 'b'] +['ĠP', 'it'] +['U', 'tf'] +['Ġ/', 'Ċ'] +['re', 'load'] +['Ġset', 'Object'] +['/g', 'lobal'] +['Ġf', 'atty'] +['Ġsock', 's'] +['Could', 'n'] +['Ġerot', 'isk'] +['æĿ', '¡'] +['ĠPress', 'ure'] +['ĠM', 'az'] +['n', 'pos'] +['tol', 'ower'] +['ĠE', 'Q'] +['ute', 'ur'] +['ĠM', 'oment'] +['Ġet', 'a'] +['{{', '--'] +['Ġgraph', 's'] +['ĠGu', 'ar'] +['r', 'ine'] +['(', '--'] +['ĠHttp', 'Status'] +['(st', 'udent'] +['*', 'np'] +['Ġrail', 'way'] +['Ġas', 'ynchronous'] +['_v', 'm'] +["']", ",'"] +[',', 'text'] +['mer', 'chant'] +['(G', 'uid'] +['ĠG', 'ra'] +['ix', 'er'] +['fetch', 'All'] +['.add', 'Listener'] +['fl', 'ip'] +['*', '$'] +['>', '(),'] +['Ġsun', 'light'] +['ass', 'igned'] +['Ġab', 'c'] +['ĠC', 'OLUMN'] +['ĠðŁĻĤ', 'ĊĊ'] +[')', '...'] +['Ġen', 'semble'] +['Ġnew', 'line'] +['_S', 'INGLE'] +['ied', 'ad'] +['Ġdark', 'er'] +['orm', 'ap'] +['Ġl', 'ion'] +['pl', 'its'] +['Ġillustr', 'ation'] +['ĠI', 'EEE'] +['Ġv', 'ista'] +['ous', 'ands'] +['******', '*'] +['ĠTom', 'my'] +['Ġh', 'ue'] +['S', 'el'] +['Ġa', 'ura'] +['ĠTher', 'apy'] +['Ġanim', 'ator'] +['.con', 'straints'] +['Ġv', 'ague'] +['("', '")'] +['Ġvill', 'ain'] +['Ġbless', 'ing'] +['Ġstring', 'Builder'] +['ĠM', 'isc'] +['ĠD', 'IR'] +['f', 'ax'] +['-', 'node'] +['ĠWalk', 'ing'] +['ĠA', 'U'] +['s', 'ess'] +['Ġgr', 'ill'] +['VERT', 'ISE'] +['ĠF', 'oods'] +['Ġt', 'ournaments'] +['Ã', 'ĵ'] +['ĠMar', 'sh'] +['Ġw', 'onders'] +['Long', 'itude'] +['.Command', 'Text'] +['=', 'input'] +['_enc', 'oder'] +['page', 'Size'] +['Ġget', 'State'] +['>', '>Ċ'] +['.g', 'rey'] +['p', 'od'] +['Ġread', 'ings'] +['Ġre', 'consider'] +['Start', 'up'] +['Ġexc', 'er'] +['.b', 'alance'] +['_c', 'ycle'] +['_T', 'ime'] +['LOC', 'AL'] +['ĠE', 'FI'] +['ĠRe', 'yn'] +['.set', 'Foreground'] +['by', 'n'] +['Ġdis', 'connected'] +['ACT', 'IVE'] +['Ġembed', 'ding'] +['ick', 'ers'] +['Ġsurround', 'ings'] +['*', 'c'] +['Ġgar', 'ant'] +['Ġb', 'f'] +['Ġw', 'ipe'] +['Ġ', 'ä¸ĭ'] +['_T', 'RA'] +['ado', 'x'] +['ç', 'ķ'] +['Ġsu', 'cks'] +['ĠS', 'ongs'] +['ĠAssoci', 'ates'] +['ĠB', 'ald'] +['ĠB', 'rett'] +['ven', 'ile'] +['Ġv', 't'] +['Ġin', 'ade'] +['Ġres', 'igned'] +['ĠGl', 'enn'] +['.p', 'attern'] +['.Data', 'Bind'] +['Ñĥ', 'м'] +['Layout', 'Inflater'] +['ch', 'et'] +['ĠTest', 'ament'] +['.m', 's'] +['Ġp', 'av'] +['ĠReact', 'DOM'] +['ur', 'dy'] +['AD', 'ATA'] +['M', 'u'] +['/', 'actions'] +['ĠJ', 's'] +['_ex', 'tract'] +['ĠBr', 'ing'] +[':', 'id'] +['str', 't'] +['iv', 'ation'] +['Ġoutr', 'ight'] +['az', 'u'] +['loy', 'ment'] +['и', 'Ñı'] +['al', 'do'] +['ĠP', 'ublisher'] +['E', 'ducation'] +['Pa', 'lette'] +['_d', 'rv'] +['Ġ($', '('] +['ĠAnd', 'a'] +['Ġrem', 'edy'] +['Ġincons', 'istent'] +['te', 'ction'] +['Ġregul', 'ators'] +['Ġshort', 'est'] +['(p', 'air'] +['ĠInstall', 'ation'] +['Ġdefend', 'ants'] +['Ġ(', ');'] +['-l', 'arge'] +['M', 'el'] +['Ġthreat', 'en'] +['н', 'Ñı'] +['Ġfet', 'ish'] +['ot', 'ine'] +['_d', 'ic'] +['Ġ<', '$'] +['Ġst', 'agger'] +['sp', 'i'] +['$', 'response'] +['S', 'erv'] +['-b', 'orn'] +['j', 'os'] +['ĉ', 'img'] +['ĉW', 'HERE'] +['_l', 't'] +['å½', 'ĵ'] +['.c', 'ost'] +['ĠT', 'ue'] +['.label', 's'] +['ĠL', 'V'] +['wcs', 'store'] +['ĠJes', 'se'] +['à¸', '«'] +['Tr', 'ade'] +['Ġpredecess', 'or'] +['ë', 'Ĥ'] +['fin', 'ally'] +['_g', 'eneral'] +['ogg', 'ler'] +['_REG', 'ION'] +['n', 'ement'] +['Ġblog', 'ger'] +['ĠHar', 'bor'] +['ĠD', 'ataset'] +['[', 'w'] +['Ġattend', 'ees'] +['.', 'ico'] +['max', 'imum'] +['.Un', 'lock'] +['_SY', 'NC'] +['ág', 'ina'] +['Ġdown', 's'] +['ĠW', 'ii'] +['])', '/'] +['Ġkick', 'ing'] +['unic', 'ation'] +['ĠD', 'AC'] +['ĠID', 'S'] +['ĠR', 'ental'] +['Ġcurrent', 'Time'] +['Ġvacc', 'ines'] +['ĠDev', 'il'] +['Ġn', 'ors'] +['_m', 'ouse'] +['urre', 'ction'] +['(n', 'o'] +['Ġ>', 'čĊ'] +['Ġaggress', 'ion'] +['Ġbre', 'eding'] +['.s', 'ymbol'] +['im', 'an'] +['Absolute', 'Path'] +['ĠWH', 'O'] +['_fl', 'ush'] +['-', 'root'] +['arn', 'a'] +['&', 'M'] +['Ġf', 'athers'] +['ĠR', 'ocket'] +['ive', 'au'] +['Ġw', 'ander'] +['Ġcom', 'pos'] +['ĠWar', 'rior'] +['ĠSe', 'at'] +['ĠClin', 'ic'] +['_in', 'voice'] +['(dis', 'patch'] +['Product', 'o'] +['at', 'uring'] +['oss', 'ier'] +['ĠM', 'AY'] +['Ġd', 'agger'] +['Ġsanit', 'ized'] +['ĠR', 'FC'] +['Ġpro', 'ph'] +['Ġur', 'ine'] +['Ġgr', 'ind'] +['ĠExp', 'anded'] +['des', 'cripcion'] +['-f', 'w'] +['ĠK', 'erry'] +['=', 'name'] +['Ġch', 'k'] +['Ġnation', 'ally'] +['Ġthe', 'e'] +['In', 'c'] +['Ġ?', '>>'] +['.R', 'adioButton'] +['.Http', 'ServletResponse'] +['/', 'Y'] +['ĉf', 'ield'] +['Ġhom', 'me'] +['y', 'per'] +['Ph', 'ysical'] +['=', 'v'] +['Ġdr', 'iv'] +['ĠErr', 'ors'] +['Ġc', 'Äĥ'] +['De', 'ath'] +['ĠW', 'INDOW'] +['Ġpo', 'et'] +['ĠSh', 'arp'] +['ĠImm', 'utable'] +['ĉ', 'create'] +['Ġge', 'ht'] +['ĠRe', 'form'] +['ais', 'er'] +['ĠInitial', 'ization'] +['Ġimm', 'unity'] +['.com', 'pose'] +['Ġlat', 'ency'] +['ĠLeban', 'on'] +['ĠPar', 'ad'] +['Ġfu', 'els'] +['ĠEx', 'hib'] +['co', 'h'] +['%', '">Ċ'] +['ĠCL', 'I'] +[')', 'initWith'] +['-Z', 'a'] +['_C', 'LEAR'] +['reg', 'n'] +['Ġfin', 'ances'] +['.st', 'andard'] +['_C', 'ATEGORY'] +['.lib', 'rary'] +['Ġtravel', 'ers'] +['_w', 'p'] +['ĠE', 'valuation'] +['start', 'ing'] +['Ġ', ')),Ċ'] +['ep', 'isode'] +['ĠV', 'ariant'] +['Ġda', 'emon'] +['ĠJul', 'ia'] +['ĠN', 'R'] +['Ġdoub', 'les'] +['<', 'v'] +['/r', 'untime'] +['Ġinterpre', 'ter'] +['ĠIN', 'DEX'] +['ĠHol', 'mes'] +['_D', 'IM'] +['Ġp', 'addle'] +['_ex', 'ample'] +['Ġfore', 'ground'] +['.r', 'outes'] +['Ġs', 'owie'] +['S', 'UCCESS'] +['ĠC', 'DC'] +['ĠB', 'D'] +['_', '-'] +['as', 'ured'] +['W', 'riting'] +['Ġcurrent', 'Page'] +['(', 'answer'] +['ĠASC', 'II'] +['à', '¨'] +['Ġsocial', 'ly'] +['yy', 'y'] +['ĠSpecial', 'ist'] +['(c', 'ustomer'] +['ist', 'ani'] +['ke', 'st'] +['ĠM', 'ak'] +['Ġth', 'o'] +['.', 'pt'] +['(', 'comment'] +['ĠCon', 'verter'] +['g', 'am'] +['b', 'ins'] +['.', 'tele'] +['ĠVeter', 'ans'] +['_AL', 'LOC'] +['олÑĮзов', 'аÑĤ'] +['inn', 'amon'] +[';', 'width'] +['oh', 'l'] +['Ġfant', 'as'] +['Ġs', 'ung'] +['ĉ', 'K'] +['(', 'Json'] +['Ġneighbour', 'hood'] +['Ġv', 'ow'] +['Ġs', 'ins'] +['on', 'acci'] +['Ġepoch', 's'] +['im', 'agen'] +['.Ch', 'ange'] +['.my', 'batis'] +['Se', 'ek'] +['W', 'ER'] +['管', 'çIJĨ'] +['Ġinter', 'ess'] +['_', 'Event'] +['eder', 'land'] +['Ġterr', 'itor'] +['Ġci', 'udad'] +['uck', 'ed'] +['Ġsn', 'ack'] +['Ġtransport', 'ed'] +['ĠMan', 'ifest'] +['ĠD', 'AT'] +['_th', 'eta'] +['Ġw', 'ont'] +['.ĊĊ', 'ĊĊĊĊĊĊĊĊ'] +['Ĭ¶', 'æĢģ'] +['ĠEp', 'ic'] +['De', 'ck'] +['l', 'tra'] +['_Z', 'ERO'] +['Ġ[]', ';'] +['/', 'scripts'] +['Ġ----------------------------------------------------------------', '----------------'] +['æĥ', 'ħ'] +['Ġwe', 'ed'] +['N', 'BC'] +['Ġrap', 'ed'] +['ĠG', 'ateway'] +['[', 'M'] +['ĠTime', 'out'] +['ench', 'mark'] +['.View', 'Model'] +['Ġporn', 'os'] +['ĠY', 'a'] +['th', 'ritis'] +['ĠFly', 'nn'] +['Ġme', 'ga'] +['ac', 'in'] +['Ġtrib', 'al'] +['.app', 'le'] +['ĠB', 'lo'] +['â', 'n'] +['ib', 'i'] +['ro', 'v'] +['ĠL', 'ives'] +['^', '.'] +['get', 'Request'] +['ĠEst', 'ablish'] +['cont', 'ainers'] +['Ġst', 'arring'] +['Ġcele', 'brities'] +['ĠRel', 'ative'] +['ĠHe', 'ights'] +['Ġtq', 'dm'] +['ĠNorth', 'west'] +['iv', 'ic'] +['ĉ', 'cl'] +['Ġautom', 'otive'] +['ent', 'ric'] +['Ġfort', 'unate'] +['Ġfire', 'place'] +['se', 'ud'] +['nick', 'name'] +[';', 's'] +['_C', 'AL'] +['h', 'alt'] +['(n', 's'] +['_de', 'leted'] +['Develop', 'ment'] +['m', 'ovies'] +['Ġident', 'ities'] +['Ġprompt', 'ly'] +['ا', 'ÙĨ'] +['Ġant', 'e'] +['Ġ"', "','"] +['åı', '£'] +['imp', 'se'] +['Ġy', 'ap'] +['Type', 'Name'] +['Ġb', 'itch'] +['Ġassoci', 'ates'] +['HE', 'ME'] +['-', 'empty'] +['ĠØ', 'ª'] +['ol', 'vers'] +['Ġpist', 'ol'] +['Sc', 'oped'] +['ag', 'ner'] +["']", "=='"] +['ĠI', 'MP'] +['ex', 'c'] +['Ġo', 'mitted'] +['Ġmind', 'set'] +['Ġ[]', '('] +['Ġor', 'n'] +['_C', 'AM'] +['A', 'vg'] +['Localized', 'String'] +['ĠN', 'atur'] +['Ġcom', 'poser'] +['ĠPlay', 'ing'] +['Ġover', 'd'] +['_', 'utf'] +['.s', 'k'] +['ĠF', 'ol'] +['$', 'page'] +[',', 'Object'] +['Ġbe', 'es'] +['al', 'ary'] +['bul', 'let'] +['_lib', 'rary'] +['O', 'ffer'] +['loc', 'ated'] +['Ġ(_', ','] +['âĢľ', 'He'] +['ĠOwn', 'ers'] +[')', ').Ċ'] +['Ġb', 'ri'] +['.Ad', 'min'] +['kt', 'ion'] +['лÑİ', 'Ñĩ'] +['Ġerot', 'ici'] +['Cancel', 'led'] +['Ġa', 'gr'] +['re', 'views'] +['_d', 'ma'] +['RI', 'CT'] +['Ġg', 'fx'] +['mp', 'i'] +['pp', 'o'] +['Ġ//', '@'] +['Ġupper', 'case'] +['Ġcommit', 'ting'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['User', 'Data'] +['Ġv', 'ai'] +['ĉs', 'ort'] +['Ġcongr', 'at'] +['Ġd', 'ioxide'] +['д', 'а'] +['.', 'area'] +['ĠJosh', 'ua'] +['ĠK', 'och'] +['_b', 'reak'] +['az', 'ure'] +['ist', 'ical'] +['_AL', 'PHA'] +['_', 'views'] +['Ġelim', 'inating'] +['OM', 'B'] +['en', 'umer'] +['ĠHy', 'dro'] +['(*', '('] +['ERT', 'ICAL'] +['Ġinev', 'itably'] +['Ġst', 'ole'] +['-e', 'ast'] +['ier', 'on'] +['Ġl', 'inger'] +['/d', 'oc'] +['Å', 'º'] +['ĠAl', 'ready'] +['as', 'io'] +['Ġ--', 'Ċ'] +['Ġabb', 'rev'] +['ĠAt', 'om'] +['h', 'im'] +['ĠINS', 'ERT'] +['s', 'un'] +['âĻ', 'ª'] +['CON', 'NECT'] +['er', 'ator'] +['ĠM', 'anning'] +['Ġ:', '('] +['g', 'as'] +['=>', "'"] +['Ġquery', 'set'] +[';', '}čĊ'] +['ĠPop', 'ulation'] +['uted', 'String'] +['res', 'ident'] +['_F', 'ONT'] +['ĠRes', 'pond'] +['Ġobsc', 'ure'] +['Ġo', 'bservable'] +['ĠContrib', 'utors'] +['k', 'on'] +['ĠMus', 'k'] +['ex', 'ao'] +['ĠT', 'ub'] +['Boot', 'Application'] +['S', 'OR'] +['.H', 'orizontal'] +['.find', 'By'] +['.p', 'ower'] +['Ġposit', 'ively'] +['ven', 'ience'] +['ĠJ', 'ong'] +['Ġwh', 'istle'] +['Ġз', 'наÑĩ'] +['Ġl', 'ending'] +['Ġdestruct', 'ive'] +['Ġon', 'Delete'] +['author', 'ization'] +['();', '?>'] +['_', 'original'] +['sc', 'ience'] +['at', 'ra'] +['?,', '?,'] +['ĠAs', 'c'] +['Ġconvinc', 'ing'] +['$', 'a'] +['org', 'en'] +['_D', 'ate'] +['ĠPro', 'vide'] +['Ġlon', 'ely'] +[')', "'Ċ"] +['ex', 'change'] +[';', '?>Ċ'] +['.f', 'ast'] +['S', 'amples'] +['L', 'ondon'] +["']", ')čĊ'] +['ĠI', 'onic'] +['Ġp', 'esso'] +['ĠKn', 'ights'] +['ĠR', 'af'] +['_attr', 's'] +['Ġrepe', 'al'] +['>', 'Main'] +['ĠOrder', 'ed'] +['_N', 'ew'] +['="', '">", '";Ċ'] +['ĠS', 'ERVER'] +['ĠHE', 'ADER'] +['_', 'velocity'] +['ĠIn', 'voke'] +['.timestamp', 's'] +['Ġs', 'ulf'] +['I', 'QUE'] +['Ġinhabit', 'ants'] +['ph', 'ins'] +['azz', 'o'] +['Ġmon', 'o'] +['Leg', 'end'] +['Ġnon', 'ce'] +['IF', 'E'] +[';', '";Ċ'] +['-', 'create'] +['"', '",Ċ'] +['per', 'mit'] +['ĠImm', 'igration'] +['Ġpath', 'name'] +['ffect', 'ive'] +['âĻĢ', 'âĻĢ'] +['Ġex', 'ams'] +['-', 'event'] +['ĠT', 'ill'] +['[m', 'id'] +['F', 'IX'] +[';', 'color'] +['(', 'Order'] +['_tra', 'its'] +['Ġorder', 'By'] +['Ġs', 'unt'] +['ĠNich', 'olas'] +['Ø', '²'] +['Ġsun', 'ny'] +['in', 'ers'] +['Ġaccess', 'ibility'] +['ĠH', 'B'] +['.com', 'p'] +['ĉ', 'op'] +['Ġminor', 'ities'] +['ethe', 'us'] +['Ġcollabor', 'ative'] +['pr', 'it'] +['H', 'IR'] +['Ġwr', 'aps'] +['ĉd', 'raw'] +['g', 'od'] +['ĠI', 'X'] +['.app', 's'] +['ĠN', 'M'] +['Ġirre', 'levant'] +['ĠT', 'igers'] +['Ġdi', 'ag'] +['G', 'V'] +['ĠAccess', 'ories'] +['k', 'ont'] +['Ġsimpl', 'ify'] +['ĠF', 'avorite'] +['_t', 'ools'] +['([]', ');Ċ'] +['Ġtow', 'ers'] +['B', 'es'] +['Ġhun', 'ter'] +['Ġsal', 'on'] +['(b', 'uff'] +['ĉ', 'debug'] +['Ġmal', 'ware'] +['M', 'oving'] +['-', 'options'] +[')', "+'"] +['ĠLO', 'VE'] +['_S', 'OCKET'] +['_f', 'in'] +['ĠDel', 'aware'] +['Ġsher', 'iff'] +['-in', 'valid'] +['ĠF', 'ULL'] +['Ġп', 'од'] +['el', 'as'] +['"', 'strings'] +['ĠRepresent', 'atives'] +['s', 'urface'] +['res', 'olved'] +['ht', 'docs'] +['))', ':čĊ'] +['Ġpress', 'ures'] +['Ġnorm', 's'] +['Ġpl', 'a'] +['Ġs', 'urname'] +['Ġpost', 'al'] +['ĠDep', 'art'] +['Ġsla', 'ughter'] +['or', 'ida'] +['Ġhe', 'bben'] +['Ġdes', 'ar'] +['comp', 'act'] +['_L', 'ANG'] +['åIJ', 'Ī'] +['op', 'oly'] +['_r', 'ad'] +['ĠST', 'DMETHOD'] +['L', 'azy'] +['ĠĠĠ', 'ĉ'] +['...', ','] +['(', 'web'] +['ĠP', 'ont'] +['Ġet', 'was'] +['Ġup', 'ward'] +['_h', 'at'] +['Ġ],', 'ĊĊ'] +['Ġbase', 'Url'] +['Ġworry', 'ing'] +['-add', 'on'] +['(get', 'Class'] +['S', 'PI'] +['Ġcapt', 'uring'] +[')', '},Ċ'] +['Effect', 's'] +['Ġcompet', 'ent'] +['Ġf', 'oul'] +['Ġsubscri', 'bing'] +['ĠO', 'BJECT'] +['IX', 'EL'] +['b', 'ucks'] +['(', 'edge'] +['(p', 'ass'] +['ĠPet', 'erson'] +['Ġbo', 'obs'] +['ĠD', 'elay'] +['_s', 'quare'] +['el', 'im'] +['ot', 'ers'] +['_P', 'C'] +['%', 'E'] +['on', 'click'] +['ĠSV', 'G'] +['Ġto', 'pped'] +['Ġf', 'ist'] +['sm', 'art'] +['ĠR', 'alph'] +['(', 'owner'] +['j', 'ours'] +['Ġbron', 'ze'] +['ĠArgument', 'Exception'] +['(', 'original'] +['_S', 'CALE'] +['_c', 'p'] +['Ġrecomm', 'ends'] +['.set', 'Style'] +['S', 'ure'] +['L', 'AND'] +['Ġrepe', 'ating'] +['M', 'att'] +['.', 'Visibility'] +['Ġenter', 'prises'] +['.Set', 'up'] +['(sc', 'ene'] +['ĠRe', 'active'] +['ur', 'ge'] +['b', 'w'] +['.P', 'ut'] +['p', 'ersist'] +['.c', 'ookie'] +['ĠAud', 'i'] +['`', 's'] +['sup', 'plier'] +['(', 'Form'] +['Â', '¡'] +['_s', 'o'] +['Į', 'Ģ'] +['ĠLeg', 'ion'] +['t', 'te'] +['N', 'd'] +['L', 'oss'] +['(', 'attrs'] +['.sc', 'atter'] +['Ġg', 'room'] +['Ġgl', 'impse'] +['Ġn', 'ails'] +['Ġcum', 'ulative'] +['Ġf', 'azer'] +['_s', 'ervices'] +['.N', 'um'] +['ib', 'ilit'] +['_res', 'olution'] +['ĠT', 'x'] +['umin', 'ium'] +['op', 'a'] +['.s', 'chedule'] +['sm', 'tp'] +['à¸', 'ķ'] +['ur', 'ry'] +['ü', 'k'] +['go', 'og'] +['_sign', 'ature'] +['.int', 'o'] +['ĠSte', 'ps'] +['Ġhome', 'owners'] +['ĠNS', 'URL'] +['ĠP', 'AC'] +['ĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĊĊ'] +['>', "')Ċ"] +['en', 'h'] +['Ġinc', 'ap'] +['$', 'MESS'] +['Ġmo', 'ins'] +['ĠF', 'i'] +['Ġoff', 'season'] +['press', 'ions'] +['>', '.Ċ'] +['ĠGr', 'ass'] +['ĠGo', 'al'] +['_p', 'df'] +['Hand', 'lers'] +['Ġstack', 's'] +['.get', 'FullYear'] +['=[', '];Ċ'] +['è½', '¦'] +[',', 'V'] +['(s', 'plit'] +['Ñĥн', 'к'] +['Ġbake', 'ca'] +['Ġ~', '/.'] +['pe', 'z'] +['t', 'ails'] +['ĠG', 'len'] +['Ġset', 'Image'] +['ĠCom', 'ic'] +['B', 'LOCK'] +['ĉ', 'This'] +['o', 'ader'] +['Ġcapital', 'ist'] +['_ST', 'EP'] +['(', 'Boolean'] +['ĠCor', 'rect'] +['r', 'ina'] +['Ġconc', 'aten'] +['å®', 'ŀ'] +['()', ':ĊĊ'] +['Ġun', 'anim'] +['ll', 'i'] +['al', 'ars'] +['-', 'ne'] +['Ġdiv', 'or'] +['ĠKick', 'starter'] +['].', '_'] +['<', 'number'] +['/m', 'enu'] +['GR', 'APH'] +['vis', 'itor'] +['Ġimpro', 'per'] +['_N', 'EXT'] +['Ġb', 'isa'] +['background', 'Color'] +['/', 'input'] +['Ġmo', 'i'] +['Go', 'al'] +['li', 'qu'] +['Ġmiscon', 'duct'] +['Ġcompr', 'ises'] +['aw', 'ns'] +['ĠP', 'ie'] +['ra', 'is'] +['role', 'um'] +['Ġcur', 'se'] +['y', 'u'] +['_p', 'oll'] +['.current', 'User'] +['ES', 'H'] +['])', '['] +['Ġstory', 't'] +[')?', ';Ċ'] +['*', '='] +['ĠB', 'urg'] +['/', 'layout'] +['_back', 'end'] +[';', '?>', '*', "'+"] +['åĿ', 'Ģ'] +['ac', 'ency'] +['(', 'URL'] +['_h', 'alf'] +['=', 'l'] +['Ġlist', 'View'] +['(', 'section'] +['.to', 'Array'] +['+', '/'] +['ĠRodrig', 'uez'] +['ist', 'ream'] +['Ġelig', 'ibility'] +['::', '-'] +['.new', 'Instance'] +['P', 'B'] +['ĠAs', 'sets'] +['ĠCom', 'posite'] +['ĠL', 'abs'] +['ĠHam', 'as'] +['++', ');Ċ'] +['Ġbl', 'k'] +['ĠNe', 'o'] +['L', 'uc'] +['@', 'login'] +['Ġun', 'aware'] +['.m', 'et'] +['_RE', 'LEASE'] +['(', 'ST'] +['AM', 'IL'] +['ri', 'ke'] +['Ġ(', '){Ċ'] +['(s', 'printf'] +['ĠAccount', 's'] +['ĠV', 'IEW'] +['ĠA', 'j'] +['ãĤ', '°'] +['Ġwh', 'isk'] +['Ġid', 'i'] +['Ġro', 'de'] +['Ġih', 'n'] +['ĠElement', 'ary'] +['Q', 'ty'] +['Ġintrig', 'uing'] +['Ġå', '¤'] +['J', 'obs'] +['ĉ', 'offset'] +['ĠAh', 'med'] +['ĠTal', 'iban'] +['Ġè', 'İ·åıĸ'] +['Ġinject', 'ed'] +['.Auth', 'entication'] +['_line', 'ar'] +['.Dec', 'imal'] +['Ġapp', 'les'] +['Ġshare', 'holders'] +['Ġb', 'aked'] +['.d', 'iff'] +['ĠE', 'ddie'] +['ok', 'ers'] +['Ġconfront', 'ed'] +['vo', 'ices'] +['Ġt', 'us'] +['ĠSp', 'in'] +['N', 'ODE'] +['_', 'Un'] +['CT', 'X'] +['/g', 'oogle'] +['Tem', 'perature'] +["Ġ'", "')."] +['Ġmagn', 'ificent'] +['Ġstart', 'Index'] +['semb', 'les'] +['Any', 'one'] +['z', 'k'] +['eh', 'en'] +['ĠD', 'ame'] +['.', 'strict'] +['Ġrepl', 'aces'] +['Ġline', 'back'] +['Ġpush', 'es'] +['Ġche', 'ek'] +['ĠSh', 'i'] +['_BY', 'TES'] +['RE', 'A'] +['ả', 'n'] +['_CON', 'NECTION'] +['G', 'ateway'] +['ĠTr', 'avis'] +['ĠA', 'X'] +['ĠBas', 'ically'] +['ĠUp', 'grade'] +['à', 'ª'] +['th', 'emes'] +['erm', 'o'] +['k', 'or'] +['F', 'emale'] +['_att', 'ach'] +['ĠìĤ¬', 'ìļ©'] +['Ġpo', 'z'] +['============', '==Ċ'] +['(s', 'ymbol'] +['ĠS', 'ector'] +['__', ')ĊĊ'] +['_p', 'adding'] +['ï¼ļ', '"'] +['Ġf', 'abs'] +['Ġr', 'anged'] +['set', 'Name'] +['Ġp', 'error'] +['â', 'Ĺ'] +['ĠFile', 'Reader'] +['Ġful', 'filled'] +['_C', 'urrent'] +['Ġdom', 'inate'] +['Ġsm', 'ugg'] +['Post', 'Mapping'] +['_for', 'ce'] +['Ġb', 'loc'] +['ĠG', 'iant'] +['(v', 'ideo'] +['ĠC', 'U'] +['System', 'Service'] +['Ġ', 'elf'] +['Ġkont', 'akt'] +['ë', 'ª'] +['ke', 'es'] +['gt', 'k'] +['Ġparam', 'Int'] +['Ġmark', 'up'] +['u', 'ales'] +['Ġaccount', 'ed'] +['Ġgang', 'bang'] +['RY', 'PT'] +['ĠW', 'rong'] +['Ġcred', 'ited'] +['ĠM', 'ESSAGE'] +['Ġfl', 'aws'] +['Ġbb', 'w'] +['Ġmetab', 'olic'] +['ĠO', 'EM'] +['/', 'event'] +['(C', 'ollectors'] +['mont', 'on'] +['ap', 'pear'] +['Ġopt', 'ed'] +['Ġche', 'at'] +['Ġd', 'av'] +['ĠPro', 'ceed'] +['Ġê', '¸'] +['ank', 'ed'] +['и', 'з'] +['ans', 'k'] +['ĠH', 'ang'] +['ĠC', 'ler'] +['Ġdis', 'gu'] +['Ġc', 'map'] +['.cl', 'js'] +['Ġa', 'ument'] +['le', 'z'] +['ĠJo', 'ined'] +['_re', 'ceived'] +['Ġa', 'erial'] +['ot', 'el'] +['Ġgre', 'et'] +['"', 's'] +['ĠGen', 'esis'] +['ĠCal', 'if'] +['pan', 'ion'] +['Ġtail', 'ored'] +['m', 'apping'] +['and', 'Expect'] +['.tr', 'ack'] +['at', 'omy'] +['ĠO', 'w'] +['ull', 'ah'] +['.Y', 'es'] +['ĠSimple', 'Name'] +['db', 'h'] +["'", 'en'] +['Ġnons', 'ense'] +['Ġphilosoph', 'ical'] +['(get', 'Context'] +['Ġis', 'so'] +['ĠA', 'CE'] +['start', 'Date'] +['Ġb', 'ÄĻd'] +['ĠAUTH', 'OR'] +['ĠGlo', 'be'] +['Ġinsect', 's'] +['_A', 'l'] +['ush', 'ing'] +['è®', '°'] +['/', 'Home'] +['ĠLocal', 'Date'] +['need', 'ed'] +['hes', 'ive'] +['Ġill', 'usion'] +['äº', 'Į'] +['Ġtr', 'at'] +['x', 'o'] +['/d', 'etail'] +['_M', 'ATCH'] +['Ġbroad', 'band'] +['Ġw', 'al'] +['ĠIllegal', 'StateException'] +['IRE', 'CTION'] +['Ġnor', 'theast'] +['es', 'ium'] +['ĠClient', 'e'] +['ul', 'ance'] +['nt', 'y'] +['Ġt', 'ecn'] +['Dev', 'ices'] +['Ġgr', 'ains'] +['ĠO', 'g'] +['ĠS', 'EL'] +['ud', 'iant'] +['Ġ++', ';Ċ'] +['Ġexplan', 'ations'] +['oc', 'co'] +['Ġdi', 'ets'] +['Ġco', 'hort'] +['(', 'controller'] +['.Iter', 'ator'] +['-r', 'ich'] +['ro', 'cess'] +['G', 'D'] +['Ġcar', 'bohydr'] +['Ġfri', 'ed'] +['ĠEmploy', 'ment'] +['ìŀ', '¥'] +['ĠLeon', 'ard'] +['_', '${'] +['qu', 'ares'] +['Ġcompan', 'ions'] +['Ġpar', 'is'] +['Ġstim', 'ulation'] +['ĠZ', 'oo'] +['Ġre', 'levance'] +['ĠCol', 'our'] +['Ġspe', 'ar'] +['ot', 'ional'] +['ĠL', 'ite'] +['ĠK', 'osten'] +['ĠÃ', '³'] +['_att', 'achment'] +['orph', 'ic'] +['Ġdam', 'it'] +['Ġd', 'lg'] +['Ġthr', 'ive'] +['CH', 'ANGE'] +['ĠApp', 'arently'] +['Ġat', 'ual'] +['Ġroot', 'ed'] +['(', 'images'] +['aw', 'i'] +['ari', 'at'] +['Ġch', 'erry'] +['STAT', 'IC'] +['m', 'nt'] +['ĠUser', 'Id'] +['il', 'let'] +['ĠHis', 'panic'] +['Ġn', 'ak'] +['Ġcent', 'ro'] +['Ġdim', 's'] +['_initial', 'ize'] +['ı', 'k'] +['ĠCent', 'ers'] +['RE', 'N'] +['Ġevolution', 'ary'] +['ĠTop', 'ics'] +['_d', 'amage'] +['em', 'er'] +['Ġr', 'und'] +['Ġpun', 'ished'] +['Ġcub', 'ic'] +['f', 'air'] +['[]', ';ĊĊ'] +['Ġinstant', 'iate'] +['Ġover', 'see'] +['-', 'delete'] +['unte', 'er'] +['start', 'Time'] +['ĠP', 'ipeline'] +['_G', 'AME'] +['ĠC', 'ir'] +['ĉ', 'Null'] +['.Format', 'ting'] +['uc', 'umber'] +['ĠR', 'ide'] +['Ġz', 'oo'] +['Ġcheck', 'er'] +['åIJ', 'Į'] +['=', 'C'] +['Ġg', 'rit'] +['");', '//'] +['_x', 'y'] +['ĠDe', 'claration'] +['Ġcall', 'able'] +['F', 'oo'] +['ĠList', 'Item'] +['Ġin', 'accur'] +['ml', 'in'] +['ĉ', 'Data'] +['Ġev', 'olving'] +['aw', 'an'] +['Ġca', 'fe'] +['fol', 'k'] +['_ID', 'X'] +['ĠAny', 'thing'] +['ĠPalest', 'ine'] +['ĠGrid', 'View'] +['Ġcol', 'ony'] +['ĠGerm', 'ans'] +['(', '+'] +['.p', 'id'] +['.js', 'x'] +['ĠSuper', 'ior'] +['Christ', 'ian'] +['ĠL', 'ect'] +['ĉ', 'Game'] +['Ġinstrument', 'al'] +['Anim', 'ations'] +['д', 'ал'] +['ĠMos', 'es'] +['ĉĉčĊ', 'ĉĉčĊ'] +['z', 's'] +['k', 'te'] +['ä¸', 'ļ'] +['_D', 'IST'] +['bit', 'map'] +['d', 'B'] +['Ġp', 'ersistence'] +['ÑĢ', 'оÑģ'] +['$', 'l'] +['B', 'ron'] +['Ġ{', '|'] +['_ch', 'art'] +['ĠCon', 'sum'] +['Ġh', 'emp'] +['Ġ"', '))Ċ'] +['Ġattack', 'ers'] +['Ġknowledge', 'able'] +['Ġc', 'et'] +['Ġvir', 'uses'] +["'", 'I'] +['Ġpitch', 'er'] +['Ġsweep', 'ing'] +['=', 'list'] +['apt', 'ops'] +['.de', 'pth'] +['Ġinstruct', 'ed'] +['ĠR', 'us'] +['benh', 'avn'] +['Ġи', 'н'] +['S', 'ports'] +['Ġon', 'set'] +['æĿ', 'ĥ'] +['.', 'RED'] +['_s', 'i'] +['ĠP', 'ST'] +['.on', 'Change'] +['>', 'tag'] +['ĠR', 'oh'] +['_char', 'acter'] +['ĠLaw', 's'] +['ĠB', 'achelor'] +['_s', 'wap'] +['.re', 'activex'] +['Ġreward', 'ing'] +['Med', 'ium'] +['-', '['] +['ĠRec', 'ently'] +['J', 'oint'] +['part', 'ition'] +['ĠMin', 'utes'] +['Ġind', 'o'] +['Ġabsor', 'bed'] +['ĠG', 'N'] +['_IN', 'D'] +['Ġsab', 'er'] +['Sp', 'awn'] +['output', 's'] +['ĠJeff', 'rey'] +['Ġmed', 'ieval'] +['h', 'ed'] +['Gu', 'ide'] +['Ġpsy', 'cho'] +['Ġgl', 'am'] +['E', 'lim'] +['äd', 'chen'] +['_pl', 'ain'] +['ĠS', 'au'] +['-f', 'our'] +['Ġanaly', 'zing'] +['QU', 'ERY'] +['Ġtom', 'ato'] +['_button', 's'] +['V', 'EN'] +['.set', 'Status'] +['.', 'Url'] +['+', 'ĊĊ'] +['Ġcompl', 'aining'] +['deg', 'ree'] +['conf', 'irmed'] +['Ġsub', 't'] +['p', 'arsed'] +['Ġtor', 'que'] +['Ġtroub', 'led'] +['ĠT', 'ARGET'] +['Ġtrad', 'emarks'] +['ĠCo', 'ordinate'] +['ĠV', 'iv'] +['Ġ//', '}ĊĊ'] +['Ġapr', 'ès'] +['.get', 'Position'] +['(Key', 'Code'] +['ĠSil', 'va'] +['Ġmet', 'eor'] +['Ġendorse', 'ment'] +['Over', 'view'] +['ĠP', 'oss'] +['.In', 'ject'] +['Ġeven', 'ly'] +['Ġvisual', 'ization'] +['Ġw', 'char'] +['ĠH', 'DMI'] +['Ġfun', 'ct'] +['ick', 'name'] +["','", "','"] +['Ġfor', 'wards'] +['Managed', 'Object'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠ'] +['ĉ', 'server'] +['ĠOut', 'look'] +['ĠChron', 'icle'] +['Ġdub', 'bed'] +['Ġd', 'ok'] +['ĠW', 'ear'] +['.A', 'L'] +['pare', 'n'] +['.', 'Interface'] +['Inter', 'faces'] +['.c', 'od'] +['Ġd', 'ib'] +['.Global', 'ization'] +['ĠAcad', 'emic'] +['Ġass', 'ms'] +['Aut', 'om'] +['Ġl', 'w'] +['ĠN', 'W'] +['Ġ&&', 'čĊ'] +['Ġproble', 'ma'] +['ĠManufact', 'uring'] +['lim', 'its'] +['-m', 'obile'] +['Ġfil', 'me'] +['/', 'map'] +['Ġdo', 'it'] +['ĠIn', 'k'] +['Ġsu', 'ed'] +['.', 'arr'] +['Ġunder', 'min'] +['ĠPro', 'c'] +['croll', 'View'] +['__', '$'] +['Ġsidew', 'alk'] +['(', 'that'] +['à¸', '·'] +['[', 'q'] +['gram', 'mar'] +['Ġt', 'ë'] +['qu', 'ito'] +['Ġspir', 'al'] +['ext', 'ended'] +['Ġf', 'ocal'] +['Ġdig', 'ging'] +['p', 'as'] +['ĠT', 'all'] +['.pro', 'xy'] +['it', 'ures'] +['TR', 'ACT'] +['ĠRe', 'alm'] +['Ġf', 'eder'] +['Ġorient', 'ed'] +['ĠAltern', 'ative'] +['Ġo', 'we'] +['Ġsour', 'ced'] +['ink', 'er'] +['.d', 'et'] +['S', 'ep'] +['ĠQ', 'ui'] +['ĠPal', 'mer'] +['(_', ','] +['s', 'amples'] +['oy', 'er'] +['ull', 'an'] +['que', 'z'] +['Ed', 'ges'] +['Ġsh', 'out'] +['ĠA', 'chie'] +['Ġha', 'ar'] +['_Con', 'struct'] +['Ġprem', 'ature'] +['Ġre', 'vert'] +["').", 'Ċ'] +['Ġs', 'chn'] +['filter', 'ed'] +['null', 'ptr'] +['S', 'aved'] +['itect', 'ure'] +['CL', 'A'] +['Ġv', 'l'] +['st', 'ell'] +['ĉ', 'Me'] +['ĠL', 'ip'] +['n', 'ational'] +['Ġwh', 'olly'] +['Ġspr', 'ings'] +['.T', 'imer'] +['ĉs', 'rc'] +['els', 'en'] +['åħ', '¶'] +['Ġcommunic', 'ating'] +['ĠQu', 'iz'] +['Ġt', 'eng'] +['Ġge', 'z'] +['ĠOut', 'side'] +['.S', 'ign'] +['(c', 's'] +['Ġdisput', 'es'] +['ĠWe', 'iss'] +['ann', 'es'] +['>', 'No'] +['ĠB', 'ach'] +['.remove', 'All'] +['re', 'fer'] +['/d', 'ashboard'] +['ĠA', 'jax'] +['Index', 'Changed'] +['ĠWe', 'ak'] +["'", '"Ċ'] +['Ġs', 'ights'] +['access', 'Token'] +['ĠJ', 'oi'] +['(d', 'omain'] +['ĉc', 'v'] +['Ġcontin', 'uation'] +['Ġpl', 'um'] +['ad', 'ir'] +['.set', 'Message'] +['Ġ', 'ï¼Į'] +['Ġsw', 'allow'] +['ĠL', 'amp'] +['Ġq', 'w'] +['Ġu', 'u'] +['C', 'oin'] +['ub', 'ic'] +['ĠDe', 'als'] +['r', 'ace'] +['Ġdict', 'ator'] +['Ġmem', 'e'] +['turn', 'ed'] +['ĠJul', 'ie'] +['.grid', 'Column'] +['Ġpup', 'py'] +['Ġp', 'am'] +['Ġ)', '{čĊ'] +['Ġinv', 'iting'] +['Ġf', 'rench'] +['v', 'im'] +['Ġwr', 'apping'] +['Ġ#-', '}Ċ'] +['([', '-'] +['Ear', 'ly'] +['Ġsh', 'iny'] +['.f', 'aces'] +['Ġreb', 'ell'] +['abc', 'def'] +['ä', 'lt'] +['Ġest', 'imation'] +['ph', 'ys'] +['los', 'ures'] +['_RE', 'L'] +['Ġex', 'clusion'] +['ĠSk', 'ype'] +['we', 'ise'] +['-st', 'op'] +['no', 'thing'] +['ĠE', 'gg'] +['is', 'ors'] +['Rich', 'ard'] +['Ġcounsel', 'ing'] +['Ġcomm', 'em'] +['ĠQ', 'MessageBox'] +['ĠSy', 'nd'] +['ĠFro', 'st'] +['ĠCompet', 'ition'] +['ĠAw', 'ake'] +['Ġt', 'ed'] +['ic', 'iones'] +['ĠDev', 'Components'] +['VERTISE', 'MENT'] +['ott', 'i'] +['.run', 'ner'] +['Ġuniqu', 'ely'] +['.fl', 'ag'] +['ĉ', 'rs'] +['_g', 'eneric'] +['Ġ``', '`Ċ'] +['ACH', 'INE'] +['Ġme', 'in'] +['(', 'Application'] +['(', 'br'] +['Ġrat', 'ios'] +[':', ','] +['ĠXCT', 'est'] +['ustain', 'able'] +['-', 'www'] +['it', 'les'] +['_T', 'EMP'] +['Ġs', 'yst'] +['umeric', 'UpDown'] +['ĉassert', 'True'] +['Ġw', 'f'] +['.', 'peek'] +['ĠBul', 'g'] +['Ġterr', 'ifying'] +['.M', 'ODE'] +['ĠG', 'W'] +['á', 'r'] +['Ġf', 'ic'] +['Ġcommit', 'ments'] +['-', 'tech'] +['ĠL', 'iquid'] +['ope', 'z'] +['z', 'heimer'] +['a', 'ña'] +['-m', 'edia'] +['(', 'animated'] +['_go', 'al'] +['Ġg', 'um'] +['yst', 'one'] +['.S', 'ET'] +['ĠW', 'end'] +['set', 'CellValue'] +['Ġmsg', 's'] +['c', 'ash'] +['AL', 'LOC'] +['/', 'aws'] +['Ġmic', 'rowave'] +['.Point', 'er'] +['ĉ', 'Console'] +['_s', 'orted'] +['ĠFil', 'ip'] +['Pro', 'd'] +['Ġ//!', '<'] +['ing', 'roup'] +['Ġk', 's'] +['_T', 'RI'] +['Ġteas', 'poon'] +['ĠAT', 'T'] +['Ġrecover', 'ing'] +['ĠG', 'LOBAL'] +['.P', 'ar'] +['Ġ/>', ';Ċ'] +['Ġmar', 'ble'] +['ul', 'ators'] +['ĠC', 'ycle'] +['Ġher', 'bs'] +['_m', 'etric'] +[')', '!'] +['_C', 'LOCK'] +['_', 'Button'] +['H', 'arry'] +['è¿', 'Ľ'] +['Ġstr', 'ains'] +['ĠApp', 'Bar'] +['ĠCh', 'an'] +['/v', 'ideo'] +['Ġb', 'am'] +['.Pro', 'gress'] +['$', 'f'] +['lem', 'en'] +['Ġir', 'regular'] +['ĠD', 'uncan'] +['ĠM', 'int'] +['-v', 'ideo'] +['à¦', '¾'] +['ó', 'wn'] +['ĠEM', 'PTY'] +['Ġstack', 'ed'] +['ĠH', 'A'] +['_c', 'ut'] +['Ġwhere', 'in'] +['ĠW', 'ays'] +['(count', 'er'] +['è¯', 'ķ'] +['Form', 'Group'] +['Ġble', 'w'] +['c', 'ourses'] +['Ġproduct', 'os'] +['ry', 's'] +['ĠRest', 'r'] +['Ġsty', 'ling'] +['>', 's'] +['Ġp', 'iv'] +['Ġit', 'ertools'] +['get', 'Repository'] +['ĠI', 'k'] +['_dev', 'ices'] +['lay', 'ui'] +['Ġhalf', 'way'] +['Ġfran', 'ç'] +['Ġtun', 'ing'] +['O', 'A'] +['_N', 'ode'] +['ar', 'de'] +['Ġfier', 'ce'] +['lic', 'ted'] +['#', 'čĊ'] +['Ġbreak', 'through'] +['ĠE', 'rik'] +['Ġb', 'ride'] +['Ġ.', '"'] +['cul', 'us'] +['ins', 'ide'] +['ĠIndian', 'apolis'] +['ĠE', 'E'] +['Ġy', 'og'] +['urre', 't'] +['.f', 's'] +['.', 'grad'] +['_c', 'ards'] +['_ac', 'curacy'] +['_ep', 'i'] +['qu', 'eda'] +['/', 'org'] +['é', 'ªĮ'] +['Ġcom', 'pte'] +['))', '['] +['Out', 'side'] +['G', 'reater'] +['ĠRender', 'er'] +['.', 'actor'] +['Account', 's'] +['Id', 'le'] +['_h', 'ours'] +['ern', 'er'] +['Jo', 'ined'] +['Ġmen', 'j'] +['requ', 'ires'] +['ĠO', 'PER'] +['.remove', 'Child'] +['ĉs', 'p'] +['Ġes', 'se'] +['r', 'ift'] +['xF', 'E'] +['ĠSh', 'akespeare'] +['________', '____'] +['Ġbudget', 's'] +['Model', 'State'] +['fill', 'able'] +['-', 'component'] +['oc', 'os'] +['ĠBUT', 'TON'] +['/', 'io'] +[',', 'out'] +['s', 'ms'] +['Th', 'omas'] +['ĠAr', 'med'] +['res', 'ume'] +['Ġrot', 'ating'] +['ĠV', 'ault'] +['Ġse', 'us'] +['.', '(*'] +['Ġa', 'mino'] +['Ġ[]', ');ĊĊ'] +['Ġprov', 'oc'] +['no', 'x'] +['.Get', 'Enumerator'] +['====', '===Ċ'] +['æĸ', 'Ļ'] +['_sc', 'roll'] +['Ġfil', 'med'] +['ĠS', 'oci'] +['g', 'ap'] +['g', 'ro'] +['V', 'ote'] +['"', 'But'] +['_R', 'C'] +['An', 'imal'] +['Â', 'Ģ'] +['ib', 'ile'] +['Ġaw', 'aken'] +['ore', 'st'] +['in', 'ja'] +['ĠI', 'van'] +['(', 'Command'] +['Ġ', '*****'] +['Î', '·'] +['Ġkv', 'inder'] +['/h', 'elpers'] +['_c', 'ases'] +['t', 'g'] +['ìĦ', '¸'] +['Register', 'ed'] +['ĉp', 'ass'] +['_d', 'igits'] +['Ġcont', 'our'] +['Ġinf', 'ants'] +['Ġjust', 'ification'] +['ĠFort', 'unately'] +['Con', 'tr'] +['ĠonCreate', 'View'] +['_S', 'AMPLE'] +['Ġallow', 'Null'] +['Ġn', 'ud'] +['Ġfet', 'ched'] +['_e', 'qu'] +['ĠUn', 'able'] +['=\\"', '"'] +['>', '{Ċ'] +['Ġcommit', 'tees'] +['ist', 'ema'] +['+', '".'] +['ÃŃ', 'an'] +['m', 'ant'] +['Ġsou', 'theast'] +['ï¼Į', 'Ċ'] +['dialog', 's'] +['PRO', 'JECT'] +['charg', 'er'] +['-', 'port'] +['(u', 'uid'] +['.', 'export'] +['S', 'ix'] +['ĠR', 'P'] +['P', 'rem'] +['Ġconsc', 'ience'] +['Ġmargin', 'Right'] +['_d', 'istribution'] +['y', 'aml'] +['res', 'izing'] +['D', 'ock'] +['ĠLoc', 'ations'] +['G', 'Y'] +['Se', 'ed'] +['B', 'UFFER'] +['oss', 'ip'] +['ull', 'en'] +['Th', 'ings'] +['-', 'self'] +['.p', 'oll'] +['PL', 'AYER'] +['Ġå', '®'] +['G', 'ROUP'] +['ĠA', 'way'] +['Ġg', 'ospel'] +['xf', 'd'] +['M', 'ary'] +['ĠPort', 'able'] +['T', 'URE'] +['Ġutil', 'is'] +['Ġse', 'it'] +['Ġstr', 'and'] +['Ġtrans', 'c'] +['Ġ(', '^'] +['ĠAl', 'fred'] +['.m', 'em'] +['.c', 'ircle'] +['Ġ~', '/'] +['for', 'cing'] +['Ġr', 'iot'] +['pro', 'x'] +['TH', 'ON'] +['iz', 'ación'] +['ĠN', 'I'] +['ro', 'st'] +['Ġdis', 'pro'] +['_in', 'stances'] +['ï¼Į', 'âĢľ'] +['ograph', 'er'] +['end', 'as'] +['ĠIsa', 'ac'] +['ĠP', 'ine'] +['/d', 'is'] +['Ġcolor', 'With'] +['iter', 'ate'] +['_str', 'ide'] +['Ġpun', 'to'] +['.Event', 'Args'] +['(', 'center'] +['Ġneighb', 'oring'] +['ĠPr', 'ison'] +['ĠMess', 'enger'] +['Ġepid', 'emic'] +['da', 'o'] +['_com', 'plex'] +['Ġgr', 'avel'] +['_D', 'IP'] +['é', 'ment'] +['ĠA', 'ri'] +['_bit', 'map'] +['.qu', 'it'] +['(', 'valid'] +['Ġp', 'end'] +['Ġrespir', 'atory'] +['Ġre', 'bound'] +['Default', 'Value'] +['ãĥ', 'Ń'] +['Ġcomm', 'its'] +['.test', 's'] +['_f', 'r'] +['it', 'et'] +['.s', 'f'] +['Ġspace', 'craft'] +['c', 'ritical'] +['Ġde', 'pressed'] +['ĠAny', 'Object'] +['Ġun', 'b'] +['Ġdisc', 'ern'] +['(m', 'ysql'] +['L', 'atin'] +['ĠB', 'og'] +['ĠWild', 'life'] +['To', 'File'] +['iox', 'id'] +['@', 'RestController'] +['Ġ"$', '('] +['Ġ<<', '"'] +['Ġdefect', 's'] +['Ġdat', 'um'] +['h', 'in'] +['Ġreal', 'izar'] +['any', 'ahu'] +['ĠS', 'ig'] +['@', 'Data'] +['ad', 'aptive'] +['ĠC', 'atherine'] +['.c', 'r'] +['ĠCO', 'OKIE'] +['Ġp', 'ictured'] +['ĠFight', 'er'] +['Query', 'able'] +['ĠAny', 'way'] +['ĠGL', 'FW'] +['_n', 'amespace'] +['_', 'ft'] +['Ġ]', ')'] +['Organ', 'ization'] +['Ġconstit', 'utes'] +['Ġqu', 'and'] +['(ch', 'unk'] +['"/', '>čĊ'] +['ĠL', 'akes'] +['main', 'window'] +['Car', 'thy'] +['sp', 'in'] +['(c', 'sv'] +[':', 'red'] +['-com', 'merce'] +['à¸', '¹'] +['Ġdiscover', 'ing'] +['Ġe', 'co'] +['_f', 'ac'] +['inc', 'eton'] +['ĠGre', 'ens'] +['j', 'wt'] +['Ø', 'µ'] +['ĠBron', 'cos'] +['ĠGood', 's'] +['(G', 'TK'] +['Ġreturn', 'Value'] +['Ġsi', 'empre'] +['Ġneut', 'r'] +['w', 'ent'] +['ĠN', 'atal'] +['Ġenthusi', 'astic'] +['á»', 'į'] +['F', 'N'] +['/d', 'atabase'] +['C', 'atalog'] +['Ġbr', 'un'] +['ĠK', 'ash'] +['_P', 'l'] +['isc', 'rim'] +[',', 'width'] +['Ġin', 'mates'] +['Ass', 'ignment'] +['ĠH', 'aven'] +['Ġplay', 'ground'] +['ex', 'am'] +['@', 'Controller'] +['ul', 'iar'] +['.get', 'Parent'] +['Ġ"', ';ĊĊ'] +[':', 'size'] +['iss', 'ors'] +['Ġf', 'is'] +['Ġal', 'c'] +['ens', 'ation'] +['ĠN', 'ixon'] +['Ġmight', 'y'] +['-', 'str'] +['_s', 'pecial'] +['_A', 'DC'] +['ĠTw', 'ig'] +['um', 'bling'] +['-', 'address'] +['Ġher', 'oin'] +['Y', 'TE'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĊ'] +['F', 'riend'] +['Ġa', 've'] +['ĠP', 'NG'] +['ĠKurd', 'ish'] +['DataSet', 'Changed'] +['Ġbl', 'ades'] +['br', 'al'] +['St', 'eam'] +['Ġsig', 'u'] +['IRT', 'UAL'] +['ac', 'os'] +['UD', 'P'] +['(d', 'atabase'] +['he', 'c'] +['ĠString', 's'] +['_scal', 'ar'] +['ĉd', 'esc'] +['ĠT', 'LS'] +[';', '"Ċ'] +['ĠCor', 'byn'] +['Simple', 'Name'] +['u', 'ell'] +['ĠEnt', 're'] +['ell', 'ites'] +['-', 'place'] +['Ġfrank', 'ly'] +['ĠE', 'rf'] +['CE', 'L'] +['Ġpa', 'ÃŃs'] +['Ġh', 'edge'] +['Ġlat', 'ent'] +['ĠIR', 'Q'] +['ĠH', 'erald'] +['ĠP', 'rec'] +['ë³', '´'] +['.T', 'EXT'] +['Sal', 'ary'] +['Ġaut', 'umn'] +['Ġtrav', 'ail'] +['.S', 'um'] +['Ġc', 'ared'] +['M', 'or'] +['Ġint', 'uitive'] +['Ġj', 'ournals'] +['_', 'IT'] +['ĠT', 'rou'] +['ä¼', 'ł'] +['Has', 'ColumnName'] +['Com', 'posite'] +['Ġsp', 'ice'] +['_d', 'isk'] +['_CODE', 'S'] +['ĠInt', 'roduced'] +['ion', 'a'] +['Ġnue', 'stra'] +['o', 'ct'] +['ĠĠĠĠĊĠĠĠĠĊ', 'ĠĠĠĠĊ'] +['(param', 'eter'] +['Ġstud', 'ios'] +['Ġproject', 'Id'] +['Ġbd', 'sm'] +['.Sql', 'Client'] +['im', 'izer'] +['ĠC', 'ARD'] +['+', 't'] +['a', 'an'] +['.s', 'ol'] +['_Ad', 'just'] +['Ġright', 'eous'] +['ĠLog', 'ging'] +['.f', 'ilters'] +['_T', 'AB'] +['ĉs', 'ys'] +['roph', 'ic'] +['other', 'apy'] +['ĠB', 'rowse'] +['key', 'board'] +['R', 'ON'] +['+', '\\'] +['ro', 'pped'] +['Ġext', 'ensively'] +['f', 'k'] +['Ġl', 'ime'] +['year', 's'] +['Ex', 'c'] +['Ġs', 'ph'] +['Ġche', 'ating'] +['and', 'ro'] +['ÃŃ', 'o'] +['Ġpr', 'ince'] +['o', 'ire'] +['ĠD', 'estination'] +['ĠConvert', 's'] +['Ġup', 'stream'] +['o', 'led'] +['Ġserv', 'ants'] +['Ġsem', 'antic'] +['Ġcr', 'unch'] +['Ġevent', 'ual'] +['run', 'ner'] +['/', 'error'] +['Sp', 'in'] +['Ġsecret', 'ly'] +['Ġas', 'semble'] +['.P', 'erson'] +['end', 'error'] +['_', '<'] +['Ġp', 'endant'] +['S', 'leep'] +['ĠChem', 'istry'] +['Ġboss', 'es'] +['l', 'k'] +['))', '),Ċ'] +['Block', 'ly'] +['DE', 'VICE'] +['Ġreflect', 'ing'] +['Ġam', 'ple'] +['Mill', 'iseconds'] +['ĠPresident', 'ial'] +['Ġus', 'uarios'] +['ĠN', 'Z'] +['ĠSal', 'ary'] +['ĠA', 'manda'] +['_n', 'p'] +['j', 'ury'] +['Ġkö', 'n'] +['Ġtherap', 'ist'] +['Ġhomosex', 'ual'] +['ĠDr', 'ake'] +['-w', 'indow'] +['ĠLoc', 'ated'] +['.D', 'river'] +['ĠV', 'IDEO'] +['Ġmerch', 'ants'] +['ĠC', 'hest'] +['-', 'lock'] +['/', 'php'] +['Ġmil', 'ano'] +['_ST', 'YLE'] +['arg', 'er'] +['ide', 'a'] +['G', 'UID'] +['adv', 'anced'] +['me', 'al'] +['Options', 'ItemSelected'] +["='", '%'] +['ĠCh', 'am'] +[':', 'data'] +['(st', 'at'] +['Will', 'Appear'] +['Ġinform', 'al'] +['aj', 'i'] +['Ġre', 'productive'] +['ĠC', 'AS'] +['ãģ', '£'] +['F', 'UNC'] +['ĠR', 'uth'] +[')+', '('] +['CON', 'ST'] +['ĠF', 'ans'] +['Ġgroup', 'Id'] +['xffff', 'ffff'] +['Ġsam', 'pler'] +['Ġ}}', '">'] +['.', 'the'] +['Ġh', 'ollow'] +['W', 'AY'] +['ĠFac', 'ulty'] +['Attrib', 'utedString'] +['ĠLook', 's'] +['ĠR', 'ex'] +['j', 'k'] +['ĠM', 'IL'] +['Ġb', 'ard'] +['.L', 'ong'] +['Ġliv', 'est'] +['Ġsk', 'al'] +['ic', 'ism'] +['MA', 'IN'] +['Ġmu', 'cho'] +['B', 'ODY'] +['Ġes', 'e'] +['ĉ', 'use'] +['F', 'oot'] +['.SQL', 'Exception'] +['Ġinherit', 'ance'] +['re', 'ceived'] +['Ġput', 'as'] +['ed', 'is'] +['als', 'a'] +['ĠError', 'Message'] +['Book', 'ing'] +['Ġtr', 'act'] +['ac', 'z'] +['ĠC', 'ant'] +['_reg', 'ex'] +['Ġide', 'ological'] +['Ġj', 'ihad'] +['h', 'os'] +['/s', 'ys'] +['col', 'm'] +['(p', 'ool'] +['Ġest', 'án'] +['ĠP', 'ending'] +['em', 'ás'] +['Ġktó', 'ry'] +['));ĊĊ', 'Ċ'] +['trans', 'actions'] +['Ġw', 'ield'] +['it', 'ere'] +['ert', 'ure'] +['_s', 's'] +['Ġstretch', 'ing'] +['Ġprison', 'er'] +['.Read', 'All'] +['Ġbes', 'ch'] +['--', ';čĊ'] +['Ġcr', 'isp'] +['_SC', 'AN'] +['Ġa', 'e'] +['Str', 'ict'] +['ĠMin', 'neapolis'] +['ĠBo', 'eing'] +['ar', 'is'] +['re', 'k'] +['_p', 'ipe'] +['Ġpri', 'ests'] +['(E', 'IF'] +['eh', 'icles'] +['ĠInter', 'active'] +['b', 'etween'] +['ĉNull', 'Check'] +['ĠBl', 'air'] +['ĠL', 't'] +['_in', 'line'] +['eth', 'yl'] +['Â', '¼'] +['_p', 'ackages'] +['Ġbarrel', 's'] +['_', 'he'] +['Ġreg', 'exp'] +['_', 'pts'] +['_H', 'andler'] +['ing', 'ular'] +['ĠN', 'issan'] +['ĠR', 'anch'] +['Ġper', 'ch'] +['Un', 'supported'] +['Sm', 'ith'] +['ĠLeg', 'ends'] +['M', 'i'] +['Ġg', 'f'] +['st', 'eder'] +['Ġacqu', 'iring'] +['Ġsim', 'ulator'] +['()', ',"'] +['re', 'ceive'] +['Ġin', 'place'] +['A', 'CTION'] +['ĠWeb', 'Driver'] +['files', 'ystem'] +['<', 'Order'] +['lo', 'pen'] +['ĠHE', 'IGHT'] +['.set', 'Border'] +['į', '°'] +['__', '["'] +['Ġcl', 'amp'] +['Seg', 'oe'] +['b', 'ands'] +['to', 'List'] +['amb', 'a'] +[">'", '+Ċ'] +['Ġcred', 'ible'] +['am', 'at'] +['play', 'ing'] +['.setImage', 'Resource'] +['qu', 'el'] +['Ġpod', 'r'] +['ge', 'om'] +['E', 'k'] +['ĠQ', 'atar'] +['Ġg', 'eld'] +['?', "',Ċ"] +['Ġc', 'yl'] +['(', 'ax'] +['ĠW', 'I'] +['ur', 'ally'] +['ĠBr', 'asil'] +['Ġsen', 'za'] +['ale', 'y'] +['on', 'en'] +['Ġb', 'ah'] +['Ġmolec', 'ule'] +['R', 'ad'] +['è¿', '°'] +['AN', 'CH'] +['-', 'background'] +['-', 'agent'] +['Ġprol', 'ifer'] +[':', 'boolean'] +['Ġt', 'ide'] +['erial', 'izer'] +['_', ';čĊ'] +['F', 'ee'] +['**', ')'] +['erg', 'y'] +['ĠHon', 'or'] +['.Log', 'ging'] +['ir', 'is'] +['Ġunder', 'mine'] +['ĠD', 'y'] +['Ġt', 'yr'] +['Ġde', 'que'] +['Ġdam', 'er'] +['([]', ')Ċ'] +['.layout', 'ControlItem'] +['pe', 'ated'] +['C', 'AN'] +['rag', 'ments'] +['L', 'and'] +[')', ']);Ċ'] +['ĠS', 'ah'] +['ĠDE', 'CL'] +['With', 'in'] +['ĠN', 'amespace'] +['an', 'other'] +['sem', 'bling'] +['.des', 'cribe'] +['Con', 'sum'] +['ĠF', 'ear'] +['g', 'iven'] +['Or', 'ange'] +['<', 'boolean'] +['Ġstead', 'ily'] +['pa', 'Repository'] +['Ġresult', 'Set'] +['_', 'ENTER'] +['_re', 'peat'] +['Ġt', 'ones'] +['ĠPRO', 'P'] +['n', 'al'] +['part', 'icle'] +['Ġsign', 'aling'] +['Ġaccess', 'ory'] +['ĉĉĉĉĉĉ', 'ĠĠ'] +['Ġvie', 'le'] +['ĠNo', 'ah'] +['-', 'ag'] +['Ġmur', 'ders'] +['Ġa', 'ired'] +['ĠPL', 'AY'] +['ĠS', 'ullivan'] +['_C', 'ore'] +['Ġul', 'ong'] +['Ġblog', 'ging'] +['>', 'This'] +['Ġdata', 'Index'] +['Ġprint', 'able'] +['ĠE', 'yes'] +['_target', 's'] +['(P', 'y'] +['.', 'over'] +['Ġbr', 'u'] +['am', 'pton'] +['Ġplaint', 'iff'] +['<', 'Key'] +['b', 'ull'] +['ĠâŁ', '¨'] +['Iss', 'ue'] +['.cor', 'nerRadius'] +['C', 'ritical'] +['_p', 'hi'] +['.', 'angle'] +['Ġdynam', 'ically'] +['!', '");čĊ'] +['>', ');Ċ'] +['in', 'vest'] +['.*', 'ĊĊ'] +['Ġt', 'élé'] +['Ġsuper', 'f'] +['Ġcas', 'cade'] +['DT', 'D'] +['Ġviv', 'id'] +['Ġsubsid', 'ies'] +['ĠH', 'ass'] +['Ġcoll', 'aps'] +['Ġcer', 'amic'] +['{}', '".'] +['ĠLeak', 'age'] +['-tr', 'ash'] +['coll', 'apsed'] +['-s', 'ocial'] +['ĠCh', 'ad'] +['Ġincl', 'ined'] +['Ġst', 'o'] +['Ġstory', 'board'] +['.p', 'ayment'] +['stack', 'overflow'] +['ĠRaid', 'ers'] +['Ġ#', "'"] +['olic', 'ies'] +['ìľ¼', 'ë¡ľ'] +['em', 'ap'] +['Ġk', 'j'] +['Ġqu', 'ota'] +['ĠGard', 'ens'] +['ë²', 'Ī'] +['ĠAng', 'els'] +['Ġof', 't'] +['Ġlower', 'case'] +['Ġi', 'Param'] +['Ġche', 'apest'] +['un', 'ta'] +['_p', 'kt'] +['ic', 'ators'] +['Ġle', 'urs'] +['Ġdecre', 'ases'] +['ĉ', 'define'] +['PRE', 'C'] +['amm', 'ers'] +['ĠPre', 'paredStatement'] +['(d', 'irection'] +['Ġcre', 'ws'] +['ark', 'ed'] +['ĠMem', 'phis'] +['ĠS', 'ell'] +['G', 'TK'] +['Ġm', 'aid'] +[':', 'disable'] +['éĽ', 'Ĩ'] +['ĠP', 'f'] +['Ġal', 'beit'] +['open', 'h'] +['?>', '">Ċ'] +['.get', 'Source'] +['(s', 'cale'] +['D', 'u'] +['ĠP', 'IL'] +['_ref', 'resh'] +['Ġbet', 's'] +['(c', 'ar'] +['ĠV', 'on'] +['|', '--------------------------------------------------------------------------Ċ'] +['ĠGr', 'at'] +['M', 'uch'] +['(', 'Dialog'] +['.stop', 'Propagation'] +['Ġte', 'k'] +['Ġex', 'its'] +["'],", '$'] +['Ġphone', 'Number'] +['uc', 's'] +['ec', 'imal'] +['------------', '--'] +['in', 'p'] +['.po', 'jo'] +['Ġcor', 'pus'] +['Ġpractition', 'ers'] +['.p', 'ic'] +['"', 'testing'] +['Ġstring', 'By'] +['.Not', 'Null'] +['Ġr', 'ang'] +['.D', 'ynamic'] +['_R', 'ender'] +['аÑĤ', 'а'] +['Wait', 'ing'] +['ĠW', 'ik'] +['Ġoverwhel', 'med'] +['%', '">'] +['ĠA', 'E'] +['}}', '>Ċ'] +['u', 'w'] +['_t', 'yp'] +['Ġbuck', 'ets'] +['Ġgre', 'eting'] +['Ġla', 'ughter'] +['Ġant', 'agon'] +['uggest', 'ion'] +['-', 'email'] +['ĉt', 'op'] +['Ġer', 'os'] +['_tr', 'i'] +['Ġiss', 'uing'] +['Ġh', 'á'] +['Ġisol', 'ate'] +['Over', 'flow'] +[',', 'E'] +['Ġnut', 'ritional'] +['ĠAbb', 'ott'] +['Ġn', 'f'] +['.t', 'ouch'] +['.fetch', 'all'] +['_z', 'ip'] +['")', '}Ċ'] +['Ġam', 'at'] +['ĠC', 'isco'] +['Ġn', 'Ã¥'] +['PLE', 'X'] +['Ġse', 'i'] +['f', 'oto'] +['.to', 'Json'] +['å¤', 'ļ'] +['ĠKle', 'in'] +['Ġlib', 'c'] +['Ġmin', 'ers'] +['å', '¢'] +['-', 'print'] +['ĠP', 'ride'] +['T', 'odos'] +['Ġmask', 'ed'] +['Ġset', 'Data'] +['Ġtele', 'fon'] +['Ġunh', 'appy'] +['ĠT', 'ables'] +['ge', 'b'] +['(', 'debug'] +['_all', 'owed'] +['-', 'access'] +['Ġlog', 'istics'] +['Ġg', 'ems'] +['ĠM', 'ature'] +['Ġr', 'sp'] +['ĠAl', 'le'] +['.get', 'Bytes'] +['\\', 'web'] +['ynchron', 'ized'] +['Par', 'agraph'] +['Ġth', 'rottle'] +['.sql', 'ite'] +['cons', 'ulta'] +['ĠSe', 'ah'] +['C', 'e'] +['Ġsub', 'mar'] +['ER', 'E'] +['V', 'ous'] +['Ġre', 'ddit'] +['Ġsql', 'alchemy'] +['-m', 'ile'] +['oc', 'ide'] +['P', 'our'] +['}}', '">Ċ'] +['st', 'ead'] +['Ġ@', '('] +['Ġ[', '])'] +['ĠAd', 's'] +['Ġover', 'load'] +['r', 'idden'] +['ĠDes', 'ert'] +['ĠW', 'rap'] +['ĠPortug', 'uese'] +['et', 'z'] +['ĉf', 'irst'] +['Ġmile', 'stone'] +['æĹ', 'ł'] +['Ñĥ', 'Ñī'] +['(s', 'uccess'] +['<', 'Vector'] +['co', 'ol'] +['Ġ[', ']);Ċ'] +['erv', 'als'] +['Ġin', 'vert'] +['"', 'io'] +['cur', 'so'] +['fr', 'agment'] +['Ġfeas', 'ible'] +['.set', 'Position'] +['Ġel', 'm'] +['Ġimag', 'in'] +['@', 'Spring'] +['Ġb', 'ats'] +['pu', 'és'] +['ga', 'lement'] +['ns', 'ic'] +['gi', 'ene'] +['ell', 'ation'] +['ĠBa', 'iley'] +['Sh', 'ar'] +['ĠT', 'ul'] +['ĠH', 'K'] +['Ġfree', 'zing'] +['gl', 'm'] +['ce', 'ans'] +['-c', 'ut'] +['_c', 'ircle'] +['åij', 'ĺ'] +['n', 'egative'] +['Ġind', 'ian'] +['s', 'alt'] +['Ġt', 'ing'] +['ĉm', 'od'] +['Ġs', 'int'] +['ak', 'in'] +['um', 'l'] +['ĠText', 'Input'] +['Ġpop', 'ped'] +['T', 'MP'] +['Ġpark', 'ed'] +['×Ļ', '×'] +['ĠF', 'usion'] +['Ġhe', 'ater'] +['ET', 'F'] +['ro', 'zen'] +['h', 'all'] +['ĠM', 'ik'] +['lev', 'ard'] +['-', 'heart'] +['ĉ', 'order'] +['M', 'aking'] +['Ġpled', 'ged'] +['Ġdir', 's'] +['$', 'post'] +['ĠH', 'err'] +['stant', 'iate'] +[',', '"Ċ'] +['.get', 'Color'] +['ĠS', 'AT'] +['Ġtimed', 'elta'] +['ĠM', 'ai'] +['ĉm', 'ethod'] +['Ġid', 'iot'] +['ĠTr', 'av'] +['ident', 'ified'] +['ĠDiv', 'ine'] +['.get', 'Path'] +['D', 'ash'] +['Ġinf', 'iltr'] +['Ġhandle', 'Submit'] +['bro', 'ok'] +['.g', 'eneric'] +['.short', 'cuts'] +['................................', '................................'] +['Ġdat', 'ings'] +['ĠM', 'V'] +['', '#'] +['}', '"ĊĊ'] +['Ġimprison', 'ment'] +['ason', 'ic'] +['rou', 'd'] +['uc', 'ion'] +['æĬ', '¥'] +['Ġdia', 'lect'] +['Ġon', 'Mouse'] +['const', 'expr'] +['.label', 'Control'] +['Ġwe', 'aker'] +['Ġman', 'kind'] +['ĠRE', 'CE'] +['Ġd', 'iz'] +['Ġapp', 'Bar'] +['Ġqu', 'é'] +['f', 'ra'] +['_default', 's'] +['Ġal', 'iqu'] +['_at', 'om'] +[':', 'indexPath'] +['Ġmiss', 'es'] +['Ġvis', 'ually'] +['ĠH', 'ands'] +['STR', 'U'] +['i', 'ates'] +['_', 'asset'] +['F', 'inder'] +['mid', 't'] +['Ġsn', 'acks'] +['(__', "('"] +['.', 'uri'] +['ĠIn', 'strument'] +['ven', 'ir'] +['($', '__'] +['.Dot', 'NetBar'] +['Ġconfig', 's'] +['Ġguess', 'ed'] +['ि', 'à¤'] +['Ġinitial', 'izer'] +['Ġ?', '",'] +['ĠVer', 'izon'] +['man', 'ifest'] +['ge', 'ben'] +['.d', 'etails'] +['G', 'ate'] +['pons', 'ible'] +['ĠEl', 'im'] +[',', 'str'] +['Ġwrit', 'ings'] +['ĠD', 'erek'] +['ĠCo', 'ordinator'] +['Ġpill', 'ow'] +['Ġnotice', 'able'] +['R', 's'] +['Ġduplic', 'ates'] +['ern', 'els'] +['k', 'J'] +['.z', 'z'] +['oll', 'and'] +['ĠSE', 'CTION'] +['_f', 'name'] +['uff', 'led'] +["'].'", '', '")Ċ'] +['ĠD', 'ollar'] +['Ġem', 'oji'] +['Car', 'ousel'] +['-', 'player'] +['Ġadjust', 'ing'] +['Ġjug', 'a'] +['alleng', 'es'] +['g', 'ene'] +['(body', 'Parser'] +['lop', 'edia'] +['ĠBeh', 'ind'] +['Ġslee', 'ves'] +['Ġdrag', 'ging'] +['ĠChe', 'vrolet'] +['Ġb', 'iz'] +['iv', 'ities'] +['ĠFrequ', 'ency'] +[',', 'char'] +['.W', 'HITE'] +['_pre', 'view'] +[')', "';Ċ"] +['_', 'ax'] +['ION', 'S'] +['.c', 'pu'] +['.input', 's'] +['UB', 'E'] +['_fe', 'ed'] +['ĠSup', 'plement'] +['!', ').'] +['es', 'us'] +['ĠU', 'DP'] +['Ġmicro', 'phone'] +['Ġconf', 'irms'] +['.is', 'NotEmpty'] +['":"', '",Ċ'] +['_S', 'CREEN'] +['ĉ', 'expected'] +['+-+-', '+-+-'] +['ĠH', 'ait'] +['fast', 'call'] +['Ġdep', 'ict'] +['v', 'b'] +['_p', 'icture'] +['ĉd', 'escription'] +['ĠW', 'ife'] +['uc', 'i'] +['Ġv', 'icious'] +['ä»', 'ĸ'] +['ue', 'ba'] +['Ġset', 'User'] +['ãģ', '¡'] +['Ġd', 'iving'] +['Ġoper', 'a'] +['user', 'content'] +['ar', 'ah'] +[')', '},'] +['y', 'un'] +['vel', 't'] +['Ġun', 'covered'] +['Ġh', 'ips'] +['Ġosc', 'ill'] +['Ġassert', 'ing'] +['ĠX', 'i'] +['.re', 'store'] +['ke', 'a'] +['Ġsp', 'elling'] +['Ġder', 'ive'] +['ab', 'we'] +['ĠD', 'ow'] +['.set', 'Type'] +['_v', 's'] +['Ġco', 'zy'] +['.c', 'ategories'] +['O', 'rg'] +['_m', 'gr'] +['Ġd', 'ungeon'] +['collection', 'View'] +['ĠBl', 'ank'] +['ac', 'ias'] +['ä', 'ä'] +['_clean', 'up'] +['_ACT', 'IVITY'] +['Ġtri', 'angles'] +['.Menu', 'Item'] +['Ġip', 'hone'] +['ĠW', 'on'] +[']', ']ĊĊ'] +['ĠCompar', 'ison'] +['.D', 'oc'] +['Ġcan', 'onical'] +['ĠSud', 'an'] +["')", '{'] +['Up', 'Inside'] +['b', 'uiltin'] +['ENC', 'Y'] +['x', 'be'] +['Ġch', 'uck'] +['Ġcontrad', 'ict'] +['Ġnuest', 'ro'] +['Ġarchitect', 'ural'] +['ĠF', 'ib'] +['Ġcomp', 'ares'] +['*', 'k'] +['C', 'fg'] +['çĦ', '¡'] +['nt', 'en'] +['Match', 'es'] +['ĠDOWN', 'LOAD'] +['_HAND', 'LER'] +['man', 'agement'] +['[', 'S'] +['EN', 'G'] +['ÂĢ', 'Â'] +['f', 'ang'] +['Ġsl', 'ipped'] +['ĠL', 'anka'] +['esc', 'aping'] +['Ġtack', 'les'] +['ĠPed', 'ro'] +['.P', 'rop'] +[".'", "'"] +['.G', 'enerated'] +['.New', 'Guid'] +['at', 'rigesimal'] +['ill', 'on'] +['Ġstat', 'istic'] +['spec', 'ies'] +['hold', 'ing'] +['Dr', 'upal'] +['Ġfundament', 'ally'] +['Ġbond', 'age'] +['Ġres', 'olutions'] +['Inline', 'Data'] +['\\', 'Type'] +['est', 'ion'] +['.w', 'rap'] +['Ġwar', 'riors'] +['ĠLOC', 'AL'] +['Arch', 'ive'] +['Ġembr', 'aced'] +['á»', '§'] +['.V', 'er'] +['ĠAff', 'ordable'] +['oles', 'ale'] +['ĠAp', 'plied'] +['ĠCon', 'version'] +['m', 'ega'] +['_c', 'am'] +['Ġcer', 'emon'] +['aur', 'us'] +['ĠVol', 'k'] +['.op', 'ens'] +['/', 'about'] +['ĠSt', 'd'] +['j', 'ournal'] +['())', '{čĊ'] +[',"', '\\'] +['(', 'Arrays'] +['ĠD', 'ense'] +['ase', 'ña'] +['än', 'ner'] +['/', 'stat'] +['user', 'Data'] +['Ġg', 'erman'] +['Ġt', 'z'] +['worth', 'y'] +['Format', 'Exception'] +['ph', 'erd'] +['Ġsm', 'iles'] +['ĠWh', 'enever'] +['(', 'adapter'] +['.bad', 'logic'] +['Ġbrief', 'ing'] +['.Grid', 'Column'] +['-', 'char'] +['dim', 'ension'] +['ĠC', 'opper'] +['Ġnin', 'th'] +["Ġ'", '{{'] +['Ġr', 'av'] +['_T', 'able'] +['Ġderiv', 'atives'] +['ĠR', 'aise'] +['ĠF', 'ut'] +['arm', 'or'] +['-p', 'adding'] +['Ġre', 'min'] +['ĉ', 'style'] +['ĠMembers', 'hip'] +['Ġspread', 's'] +['Ġgall', 'eries'] +['ĠClar', 'ke'] +['Ġcon', 'ception'] +['min', 'ute'] +['Ġab', 'usive'] +['_ad', 'j'] +['Ġterr', 'ific'] +['Ġover', 't'] +['our', 'cing'] +['Ġentr', 'ada'] +['level', 's'] +['Ġcrit', 'ique'] +['Ġrespect', 's'] +['ĠM', 'MA'] +['i', 'ene'] +['Ġenc', 'aps'] +['ĠRay', 'mond'] +['Div', 'ider'] +['iv', 'able'] +['b', 'az'] +['Ġ@', '_;Ċ'] +['ĠCl', 'aire'] +['Ġur', 'ging'] +['CE', 'E'] +['Ġtransform', 'er'] +['disc', 'ord'] +['ĠJ', 'ourney'] +['t', 'os'] +['Ġcompet', 'itions'] +['ĠO', 'BJ'] +['ĠB', 'is'] +['Ġrelax', 'ation'] +['id', 'y'] +['_IN', 'STANCE'] +['ĠP', 'ref'] +['d', 'ados'] +['ici', 'encies'] +['ĠMedia', 'Query'] +['ĠC', 'ube'] +['ĠStr', 'ange'] +['g', 'pu'] +['(d', 'ays'] +['_Init', 'Struct'] +['Ġfinger', 'print'] +['em', 'at'] +['ĠGe', 'cko'] +['Ġr', 'ails'] +['ĠL', 'um'] +['str', 'action'] +['ig', 'ung'] +['(m', 'ovie'] +['_d', 'ictionary'] +['_int', 'errupt'] +['ĠQ', 'C'] +['ik', 'ed'] +['append', 'Child'] +['rec', 'ipient'] +['r', 'é'] +['V', 'e'] +['Ġtow', 'el'] +['.last', 'IndexOf'] +['Ġplace', 'bo'] +['ĠW', 'ie'] +['.es', 'p'] +['(', 'Debug'] +['oper', 'ative'] +['Ġdece', 'ased'] +['&', 'id'] +['ĉm', 'utex'] +['el', 'ic'] +['Ġb', 'apt'] +['ĉ', 'čĊčĊ'] +['Ġfar', 'ther'] +['H', 'alf'] +['.dis', 'able'] +['.menu', 'Strip'] +['le', 'ccion'] +['Ġresult', 'Code'] +['Ġc', 'ans'] +['-e', 'lection'] +['f', 'emale'] +['_F', 'IX'] +['aus', 'ible'] +['ĠP', 'OWER'] +['Ġrecon', 'struction'] +['Ġsc', 'ans'] +['.Xtra', 'Bars'] +['âĢĺ', 's'] +['Rem', 'oved'] +['Ġparagraph', 's'] +['_m', 'argin'] +['Ġl', 'ymph'] +['Ġb', 'os'] +['ling', 'ton'] +['ĠBapt', 'ist'] +['Ġadvertis', 'ements'] +['ĠMan', 'age'] +['/', 'yyyy'] +['IO', 'US'] +['ENC', 'ES'] +['ĠF', 'iction'] +['ĉm', 'enu'] +['ĠFile', 'OutputStream'] +['ov', 'an'] +['ĠF', 'eng'] +['Ġsk', 'ipping'] +['get', 'Class'] +['ann', 'i'] +['Ġreb', 'ounds'] +['Ġpublic', 'ity'] +['Ġing', 'res'] +['use', 'ment'] +['Ġthought', 'ful'] +['.Ch', 'art'] +['Ġhat', 'te'] +['pass', 'port'] +['Ġhook', 'ed'] +['ĠL', 'ens'] +['Ġflag', 'ship'] +['Ġst', 'ip'] +['ĠG', 'EN'] +['Ġcl', 'ues'] +['ip', 'v'] +['ĠR', 'ise'] +['ĠG', 'ew'] +['tab', 'lename'] +['Ġfore', 'most'] +['_', 'validate'] +['_an', 'alysis'] +['oll', 'a'] +['Ġqual', 'ifications'] +['Ġdistrib', 'utions'] +['ĠFl', 'ower'] +['Ġt', 'ense'] +['Ġthank', 'ful'] +['Ġcl', 'utch'] +['Ġun', 'ified'] +['ro', 'ads'] +['Ġsit', 'i'] +['Ġst', 'all'] +['_P', 'RIORITY'] +['c', 'stdlib'] +['_USER', 'NAME'] +['.by', 'tes'] +['?', 'page'] +['ermal', 'ink'] +['ĠVe', 'get'] +['/v', 'nd'] +['-', 'author'] +['.N', 'ONE'] +['ĠCon', 'current'] +['ĠC', 'ry'] +['Ġstart', 'ers'] +['ĠInter', 'action'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠ'] +['ĠLE', 'VEL'] +['E', 'll'] +['Ġcom', 'boBox'] +['ĠTh', 'eresa'] +['te', 'k'] +['_H', 'andle'] +['Ġab', 'y'] +['.g', 'dx'] +[',', 'end'] +['(L', 'ocal'] +['O', 'l'] +['kn', 'ife'] +['ar', 'ial'] +['ĠH', 'off'] +['Ġprostituer', 'ade'] +['Do', 'ctor'] +['Inst', 'ances'] +['.Set', 'Value'] +['ĉf', 'rom'] +['Ġlux', 'urious'] +['Ind', 'ent'] +['Alloc', 'ator'] +['_D', 'RAW'] +['(",', '",'] +['ĠFr', 'ances'] +['Ġgroup', 'Box'] +['(s', 'chema'] +['Print', 'f'] +['OR', 'IES'] +['-', 'gradient'] +['Ġre', 'put'] +['ar', 'in'] +['_D', 'ONE'] +['in', 'cre'] +['ig', 'nty'] +['Ġex', 'ert'] +['Ġ-', '.'] +['/', 'App'] +['-th', 'rough'] +['Ġdecl', 'ining'] +['Ġdess', 'ert'] +['Ġinc', 'umb'] +['Ġdesign', 'ation'] +['.P', 'ORT'] +[',', 'strong'] +['Ġsand', 'box'] +['Ġw', 'ines'] +['ĠP', 'av'] +['$', 'str'] +['ask', 'ell'] +['Ġh', 'ö'] +['ĠP', 'Y'] +['Get', 'Instance'] +['Text', 'Input'] +['game', 'Object'] +['/', 'events'] +['created', 'At'] +['Ġlocal', 'Var'] +['ĠWH', 'ITE'] +['per', 'ed'] +['ile', 'ge'] +['eff', 'icient'] +[',', 'color'] +['c', 'ate'] +['ĠC', 'afe'] +['Ġsimilar', 'ities'] +['Ġp', 'umps'] +['ĠHung', 'ary'] +['.User', 'name'] +['Ġsk', 'ate'] +['Ġtouchdown', 's'] +['Ġacceler', 'ate'] +['ĠH', 'elen'] +['OM', 'EM'] +['ĠK', 'un'] +['_v', 'ol'] +['Ġfind', 'All'] +['ĠMens', 'chen'] +['a', 'head'] +[');', '"'] +['kom', 'men'] +['Ġpossess', 'ed'] +['.arg', 'max'] +['.trans', 'ition'] +['AR', 'P'] +['OLUM', 'E'] +['(s', 'cript'] +['ĠÐ', 'ĺ'] +['ĠF', 'inding'] +['on', 'ces'] +['I', 'o'] +['B', 'old'] +['Ġrenew', 'al'] +['_D', 'IALOG'] +['Ġdis', 'reg'] +['INT', 'ERN'] +['Ġt', 'oute'] +['Ġelect', 'r'] +['ĠG', 'ross'] +['ĉ', 'true'] +['.F', 'ields'] +['ĠW', 'IDTH'] +['ĠD', 'ent'] +['ĠÃ', 'ģ'] +['NS', 'Notification'] +['Ġa', 'os'] +['Ġme', 'lee'] +['.', 'Validation'] +['ĠDE', 'C'] +['-depend', 'ent'] +['Ġsu', 'ic'] +['T', 'raits'] +['$', 'message'] +['ĠD', 'ear'] +['ĉ', 'FILE'] +['l', 'anguages'] +['.P', 'rot'] +['.add', 'r'] +['-g', 'eneration'] +['IC', 'ON'] +['Ġtrans', 'plant'] +['-d', 'escription'] +['Ġch', 'asing'] +['Ġche', 'es'] +['Ġ}', '*/Ċ'] +['Tr', 'ad'] +['qu', 'eries'] +['/widget', 's'] +['sub', 'package'] +['Ġes', 'pec'] +['Ġcr', 'acked'] +['Ġcompet', 'itor'] +['P', 'urchase'] +['-', 'team'] +['olec', 'ular'] +['or', 'Thunk'] +['&', 'P'] +['Ġrel', 'ent'] +['/', '#{'] +['Ġproduct', 'Id'] +['Ġè', '¾'] +['ĠL', 'av'] +['ĠAl', 'ter'] +['.M', 'ode'] +['AD', 'IO'] +['gr', 'p'] +['æ', '·»åĬł'] +['Qu', 'it'] +['Ġdepth', 's'] +['-c', 'ategory'] +['ĠD', 'ATABASE'] +['S', 'PELL'] +['ĠFal', 'con'] +['ĠQString', 'List'] +["Ġ''", '.'] +['ĠIn', 'stitution'] +['d', 'amage'] +['az', 'or'] +['bel', 'ongsTo'] +['ver', 'ages'] +['ĠN', 'ONE'] +['ipp', 'ets'] +[',', '\\Ċ'] +['Ġfoot', 'print'] +['_', 'archive'] +['n', 'ak'] +['.get', 'Field'] +['ĠRef', 'lection'] +["Ġ'", ']'] +['ĠH', 'BO'] +['_dis', 'count'] +['Ġin', 'cest'] +['ĠD', 'odge'] +['ĠW', 'ade'] +['.N', 'O'] +['"', 'encoding'] +['ĠBlock', 'chain'] +['Ġlaws', 'uits'] +['ĠM', 'aint'] +['ch', 'ten'] +['Ġét', 'ait'] +['Ġktó', 're'] +['_', 'ctl'] +['(t', 'imer'] +['B', 'attle'] +['iz', 'o'] +['ay', 'ed'] +['I', 'OR'] +['ĠGlas', 'gow'] +['Ġsyn', 'th'] +['_log', 's'] +['.p', 'ose'] +['_Adjust', 'orThunk'] +['((', '&'] +['Ġuns', 'ure'] +['yst', 'ate'] +['íķĺ', 'ëĬĶ'] +['O', 'ULD'] +['.', 'ng'] +['Ġdefault', 'dict'] +['work', 'space'] +['Ġselect', 'ive'] +['Picker', 'Controller'] +['YNAM', 'IC'] +['.method', 's'] +['Ġpath', 'ways'] +['ĠF', 'ew'] +['K', 'G'] +['CRY', 'PT'] +['follow', 'ing'] +['ĠD', 'LC'] +['ĠS', 'ara'] +['Ġpres', 'et'] +['estruct', 'or'] +['ĠK', 'urt'] +['Ġair', 'plane'] +['Ġo', 'mp'] +['ĠParent', 's'] +['ĠMart', 'inez'] +['.com', 'plete'] +['Ġbroad', 'ly'] +['Ġsc', 'are'] +['ĠM', 'é'] +['Ġelim', 'ination'] +['Ġpou', 'red'] +['/', 'sw'] +['Ġcom', 'un'] +['Ġm', 'asc'] +['ĠOrgan', 'ic'] +['ĠString', 'Utils'] +['il', 'ateral'] +['Ġreluct', 'ant'] +['-', 'age'] +['Ġn', 'z'] +['."', '\\'] +['Ġpast', 'or'] +['ale', 'z'] +['Ġe', 'fect'] +['pro', 'v'] +['/', 'init'] +['Ġp', 'enn'] +['und', 's'] +['Ġs', 'size'] +['ĠPro', 'j'] +['bas', 'ename'] +['Ġsh', 'ells'] +['ĠNe', 'ck'] +['ĠEn', 'forcement'] +['vid', 'ed'] +['st', 'own'] +['S', 'phere'] +['$', 'r'] +['uss', 'en'] +['af', 'il'] +['ĠTele', 'gram'] +['Ġanaly', 'tical'] +['нÑĭ', 'е'] +['us', 'ually'] +['x', 'n'] +['Ġhistor', 'ian'] +['ĠGreg', 'ory'] +['ol', 'ph'] +['ĠUn', 'a'] +['Ġcon', 'tributes'] +['%', '-'] +['anti', 'ago'] +['ÑĢ', 'ед'] +['.reg', 'ion'] +['Ġab', 'rupt'] +['ĠUnsupported', 'OperationException'] +['ĠT', 'ASK'] +['_f', 'inish'] +['Ġnot', 'orious'] +['ĠV', 's'] +['ĠM', 'Q'] +['Ġsun', 'set'] +['Ġun', 'acceptable'] +['ar', 'cer'] +['Ġill', 'umin'] +['ĠOr', 'b'] +['Ġb', 'h'] +['E', 'ste'] +['_dis', 'patch'] +['Ġr', 'ipped'] +['Ġtou', 'jours'] +['ĠPar', 'cel'] +['_', 'll'] +['.user', 'Name'] +['.class', 'es'] +['S', 'OURCE'] +['(', 'Number'] +['ел', 'Ñı'] +['Ġhead', 'phones'] +['(s', 'ide'] +['const', 'itution'] +['ann', 'ah'] +['čĊ', 'ĠĠĠĠĠĠĠĠčĊ'] +['Ġcl', 'iff'] +['-', 'ref'] +['Ġmo', 'strar'] +['ĠPow', 'ell'] +['+', 'y'] +['ĠB', 'G'] +['_f', 'ragment'] +['.P', 'ort'] +['Ġreal', 'izing'] +['param', 'ref'] +['Ġh', 'ometown'] +['@', 'Table'] +['+"', '', '--}}Ċ'] +['F', 'rench'] +['Entity', 'Manager'] +['ĠPl', 'ain'] +['////////////////////////////////////////////////////////////////', '////'] +['Â', '³'] +['(', 'RE'] +['c', 'apt'] +['Ġorgan', 'isms'] +['Ġj', 'ets'] +['ol', 'ocation'] +['ĠApp', 'RoutingModule'] +['Ġgl', 'orious'] +['æľ', 'į'] +['Ġdisc', 'arded'] +['ĉĉĉĉ', 'ĠĠĠĠĠ'] +['ĠArn', 'old'] +['l', 'ug'] +['Ġpar', 'l'] +['Ġhorm', 'ones'] +['Ġm', 'ah'] +['ĠSon', 'ic'] +['Ġorgan', 'izers'] +['_PL', 'ATFORM'] +['.in', 'v'] +['Ġch', 'ord'] +['vent', 'ional'] +['ĉ', 'of'] +['Ep', 'isode'] +['.', 'Enum'] +['unk', 't'] +['ĠD', 'h'] +['ĠJ', 'ared'] +['ĠN', 'ak'] +['Ġint', 'ends'] +['End', 'ian'] +['Ġa', 'ustralia'] +['_c', 'v'] +['(res', 'olve'] +['Ġclin', 'ics'] +['lik', 'ed'] +['ASH', 'INGTON'] +['in', 'ha'] +["'", '*'] +['ĠN', 'P'] +['_b', 'eh'] +['Ġh', 'f'] +['Ġw', 'ür'] +['c', 'ategoria'] +['$', 'form'] +['Ġsub', 'way'] +['Ġis', 'Active'] +['pop', 'ular'] +['C', 'our'] +['Ġco', 'oldown'] +['Ġa', 'insi'] +['ĠGL', 'uint'] +['ere', 'al'] +['Ġarray', 'Of'] +['Ġh', 'atch'] +['========', '=='] +['ress', 'es'] +['_P', 'P'] +['.', '^'] +['_dec', 'ay'] +['ĠB', 'less'] +['met', 'rics'] +['ĠCOPY', 'ING'] +['ĠDump', 'ster'] +['ĠJos', 'é'] +['ĠDesign', 's'] +['<'] +['Ġ"', '}Ċ'] +['time', 'zone'] +['Ġe', 'er'] +['max', 'cdn'] +['ĠE', 'SC'] +['ig', 'aret'] +['_conn', 'ected'] +['_re', 'verse'] +['Ġquestion', 'able'] +['ĠUS', 'C'] +['Ġtut', 'ti'] +['Ġdrop', 'out'] +['ĠActiv', 'ities'] +['ĠW', 'inds'] +["'))", ');Ċ'] +['Ġcon', 'gest'] +['ÄŁ', 'ı'] +['Ġprolong', 'ed'] +['è¿', 'Ļ'] +['ĠCross', 'AxisAlignment'] +['LE', 'EP'] +['ĠVAL', 'ID'] +['ĠG', 'az'] +['Ġdepend', 'ence'] +['ĠP', 'rix'] +['.Compiler', 'Services'] +['j', 'ump'] +['Ġstr', 'at'] +['c', 'irc'] +['ĠC', 'USTOM'] +['x', 'aa'] +['Ġb', 'mp'] +['Ġb', 'ureau'] +['Ġw', 'aren'] +['N', 'X'] +['(', 'Window'] +['ĠChrist', 'ie'] +['_F', 'E'] +['Ġt', 'n'] +['ĠOm', 'ega'] +['communic', 'ations'] +['Home', 'Page'] +['com', 'pletion'] +['Ġsupply', 'ing'] +['YP', 'ES'] +['á', 'vel'] +['åĪ', '¶'] +['(c', 'lick'] +['\\', 'Contracts'] +['/', 'questions'] +['Ġe', 'z'] +['AM', 'S'] +['.m', 'esh'] +["Ġ'", '', '\\Ċ'] +['Rob', 'ot'] +['Json', 'Object'] +['ĠD', 'F'] +['ĠProcess', 'or'] +['_sh', 'ould'] +['.prot', 'obuf'] +['-', 'users'] +['Ġemb', 'ry'] +['F', 'ONT'] +['Ġstart', 'ups'] +['ĠData', 'Source'] +[')', '#'] +['uro', 's'] +['_C', 'olor'] +['Ġstand', 'alone'] +['}', '['] +['j', 'd'] +['Ġforg', 'ive'] +['Ġng', 'x'] +['ĠGener', 'ally'] +['Ġconfig', 'urable'] +['/', 'order'] +['Ġv', 'as'] +["')", '";Ċ'] +['ĠR', 'R'] +['ĠT', 'roy'] +['Ġcomprom', 'ised'] +['ĠSw', 'an'] +['int', 'endent'] +['Cent', 'ral'] +['_', 'keeper'] +['Ġar', 'quivo'] +['ĠRead', 'Only'] +['_cur', 've'] +['k', 'v'] +['ent', 'in'] +['è', '±'] +['ĠE', 'y'] +['.im', 'read'] +['ĠP', 'am'] +['if', 'fe'] +['at', 'ivity'] +['xb', 'c'] +['Ġgr', 'im'] +['-f', 'illed'] +['names', 'e'] +["']", ':'] +['Ġa', 'ur'] +['ĠGib', 'son'] +['.Mouse', 'Event'] +['Ġl', 'ado'] +['avad', 'oc'] +['Ġfam', 'il'] +['ĠM', 'oder'] +['f', 'ps'] +['ãĢĢ', 'ãĢĢ'] +['-', 'example'] +['ĠAl', 'zheimer'] +['ĠU', 'tf'] +['_arg', 'uments'] +['Con', 'clusion'] +['text', 'Content'] +['rem', 'aining'] +['Ġinterrupt', 's'] +['ĠBack', 'up'] +['ĠM', 'ong'] +['Ġrecept', 'ors'] +['h', 'istor'] +['.cor', 'outines'] +['Ġsh', 'outed'] +['Al', 'arm'] +['Ġcomb', 'ust'] +['Ġg', 'rote'] +['ult', 'ural'] +['(', 'ids'] +['----------------------------------------------------------------', '----------------'] +['ipl', 'inary'] +['O', 'pts'] +['ĠY', 'ale'] +['local', 'Storage'] +['Ġequ', 'ival'] +['ĠF', 'leet'] +['\\', 'b'] +['*', 'pi'] +['ĠQ', 'Label'] +['æ', '¡'] +['Ġv', 'x'] +['ĠA', 'CL'] +['Ġsu', 'cesso'] +['Ġper', 'c'] +['ĠNot', 're'] +['Ġan', 'arch'] +['R', 'ing'] +['sp', 'b'] +['Ġstr', 'pos'] +['st', 'ores'] +['ĠMap', 'le'] +['(Main', 'Activity'] +['("', '"))'] +['Ġview', 'Holder'] +['Qu', 'ad'] +['Ġig', 'ual'] +['ors', 'che'] +['.m', 'argin'] +['Ġind', 'ie'] +['Ġfr', 'anc'] +['ĠForm', 'Builder'] +['ĠPart', 'icip'] +['.fl', 'ash'] +['Ġstorm', 's'] +['U', 'lt'] +['Ġf', 'en'] +['[', 'new'] +['E', 'ver'] +['="', 'Ċ'] +['Ġlocal', 'ized'] +['_f', 'ollow'] +['Ġn', 'ave'] +['Ġdomin', 'ance'] +['(t', 'ile'] +['J', 'ournal'] +['ĠV', 'C'] +['Ġpenet', 'ration'] +['ï¼', 'ķ'] +['Ġcomp', 'artment'] +['Ġb', 'ids'] +['Form', 'atted'] +['******', '/ĊĊ'] +['(c', 'ity'] +['âĢĶ', 'it'] +['[', 'C'] +['Ġuse', 'Callback'] +['a', 'ub'] +[')', '?.'] +['ĠV', 'AR'] +['ĠSe', 'bastian'] +['ĠM', 'oss'] +['Ġabund', 'ant'] +['G', 'reg'] +['ÑĤ', 'а'] +['_c', 'i'] +['Ġbib', 'li'] +['CR', 'M'] +['ĠAt', 'tempt'] +['ism', 'e'] +['d', 'ash'] +['ãĢ', 'İ'] +['_m', 'u'] +['.Formatting', 'Enabled'] +['Ind', 'eed'] +['-d', 'irect'] +['Ġsuck', 'ing'] +['Ġp', 'ne'] +['ocab', 'ulary'] +['ĠPack', 'ers'] +['.N', 'avigation'] +['Ġp', 'ied'] +['cri', 'bing'] +['ĠSt', 'uart'] +['.To', 'Double'] +['ĠSecond', 'ary'] +['S', 'aving'] +['ĠD', 'ut'] +['ĠM', 'add'] +['M', 'agic'] +[',', 'H'] +['.document', 'Element'] +['ĠB', 'ST'] +['Ġdiff', 'ers'] +['Ġmore', 'over'] +['_', 'nd'] +['SE', 'ARCH'] +['п', 'ÑĢав'] +['æ', '´'] +['to', 'Match'] +['Ġdecre', 'asing'] +['-m', 'ember'] +['amp', 'us'] +['(', 'boost'] +['D', 'aily'] +['Data', 'GridView'] +['ĠHttp', 'Context'] +['Ġh', 'ipp'] +['_work', 'ers'] +['-l', 'anguage'] +['é', 'ĵ'] +['Ġconsist', 'ed'] +['ath', 'ing'] +['ĠMer', 'cury'] +['$', 'content'] +['Ġpract', 'iced'] +['ĠMod', 'ules'] +['_D', 'AY'] +['Ġweakness', 'es'] +['ĠL', 'odge'] +['Ġn', 'ar'] +['ĠM', 'ate'] +['Ġj', 'p'] +['ĠHttp', 'Headers'] +['Ġsm', 'o'] +['ĠT', 'OKEN'] +[']', ')('] +['Ġaqu', 'i'] +['sw', 'agen'] +['Ġs', 'rv'] +['ĉ', 'ans'] +['A', 'round'] +['ĠMan', 'uel'] +['Ġfiction', 'al'] +['ĠIM', 'G'] +['Ġ.', "'"] +['ĠB', 'erry'] +['Ġwall', 'paper'] +['sex', 'ual'] +['ier', 'o'] +['Ġ', 'çļĦ'] +['ìĨ', 'Į'] +['Backing', 'Field'] +['ĠAd', 'rian'] +['BASE', 'PATH'] +['Ġrepe', 'ats'] +['Ġbl', 'ues'] +['Ġunp', 'redict'] +['_c', 'oll'] +['st', 'acle'] +['ĠT', 'umblr'] +['ĠEl', 'f'] +['Ġass', 'urance'] +['Ġc', 'ensus'] +['ĠIM', 'PORT'] +['END', 'ER'] +['an', 'os'] +['Ġ=', '('] +['ĠEll', 'is'] +['"', 'ĊĊĊĊ'] +['.w', 'in'] +['ĠA', 'bove'] +['al', 'on'] +['_t', 'ick'] +['Ġrepresent', 'ations'] +['Ġæ', 'ķ'] +['w', 'id'] +['ĠAr', 'ms'] +['List', 'a'] +['_f', 'ailure'] +['_c', 'm'] +['.Flat', 'Appearance'] +['Ġthr', 'one'] +['P', 'atch'] +['ĠV', 'oy'] +['eng', 'l'] +['Ġnegot', 'iating'] +['>', '`'] +['Ġshoot', 's'] +['ĠF', 'PS'] +['.Y', 'ear'] +['ĠK', 'iss'] +['enc', 'ión'] +['reet', 'ing'] +['From', 'File'] +['Ġresign', 'ation'] +['Ø', '·'] +['Ġtw', 'ins'] +['ưá»', '£'] +['Ġge', 'bru'] +['.get', 'Content'] +['.T', 'ree'] +['ĠEmploy', 'ees'] +['ĠF', 'IFA'] +['Ġcert', 'ainty'] +['(C', 'l'] +['Ġtot', 'als'] +['edit', 'able'] +['à¥', 'Ģ'] +['.Report', 'ing'] +['M', 'as'] +['qu', 'iet'] +['.r', 'ules'] +['ĠV', 'O'] +['con', 'exion'] +[',', 'K'] +['Ġalloc', 'ator'] +['ĠPow', 'der'] +['\\', 'Repository'] +['Be', 'at'] +['_t', 'ipo'] +["Ġ['", "',"] +['_IN', 'TR'] +['Ġ<<', '<'] +['<', 'hr'] +['")', '=='] +['ugg', 'age'] +['ĠC', 'raw'] +['Ġé', 'galement'] +['Ġg', 'inger'] +['Ġprim', 'era'] +['Ġprod', 'uto'] +['lt', 'k'] +['.User', 'Name'] +['Ġstr', 'error'] +['m', 'ith'] +['_n', 'b'] +['Ġdis', 'comfort'] +["'];", '?>', '");čĊ'] +['drop', 'IfExists'] +['ĠB', 'eg'] +['_H', 'AL'] +['Ġcross', 'AxisAlignment'] +['ĠE', 'vidence'] +['Ġpec', 'uliar'] +['Ġinstit', 'ute'] +['ve', 'is'] +['Ġf', 'ft'] +['Ã', 'ģ'] +['Ġzo', 'ekt'] +['an', 'aly'] +['ĠHom', 'eland'] +['Ġpen', 'etr'] +['udden', 'ly'] +['ĉ', 'element'] +['ĠB', 'ren'] +['ĠTr', 'udeau'] +['ĠCub', 'an'] +['j', 'am'] +['us', 'lim'] +['_e', 'v'] +['Ġst', 'ems'] +['}', '%'] +['Ŀ', 'å§ĭ'] +['Ġbrand', 'ing'] +['Ġcorrespond', 'ence'] +['.j', 'query'] +['¢', 'åįķ'] +['ĠRead', 's'] +['(Http', 'StatusCode'] +['ass', 'in'] +['(s', 'lot'] +['ĠGrad', 'uate'] +['///', '<'] +['Ġinform', 'ations'] +['EN', 'ABLE'] +['Ġp', 'uis'] +['Ġfind', 'er'] +['ĠBr', 'is'] +['Ġnett', 'steder'] +['_m', 'id'] +['Ġo', 'gs'] +['ĠSter', 'ling'] +['Ġar', 'rog'] +['str', 'ftime'] +['|', 'ĊĊ'] +['Ġvo', 'x'] +['ĠReg', 'ardless'] +['Ġes', 'o'] +['ĠCom', 'fort'] +['.Boolean', 'Field'] +['Ġu', 'h'] +['AC', 'Y'] +['Ġsque', 'ez'] +['ĠV', 'ic'] +['cont', 'ro'] +['.', 'lo'] +['Ġ', 'ire'] +['ĠCom', 'edy'] +['ë', '¶'] +['Ġorigin', 'ated'] +['Ġsh', 'ipment'] +['|', 'max'] +['_g', 'uid'] +['lev', 'ation'] +['на', 'Ñı'] +['(', 'undefined'] +['ĠD', 'DR'] +['Ġshoot', 'ings'] +['ĠLat', 'ino'] +['END', 'OR'] +['Ġaver', 'aging'] +['Ġgre', 'eted'] +['Ġthe', 'aters'] +['о', 'е'] +['Ġd', 'B'] +['Ġg', 'st'] +['Ġdef', 'inite'] +['.', 'Storage'] +['.h', 'er'] +['Ġa', 'fore'] +['ĠRe', 'ality'] +['ĠGod', 's'] +['vers', 'ed'] +['Ġhands', 'ome'] +['Ġex', 'cluding'] +['(', 'ad'] +['Qu', 'otes'] +['ĠS', 'cheme'] +['?', 'q'] +['ĠT', 'amil'] +['T', 'icks'] +['Ġp', 'est'] +["'", 'n'] +['Ġporn', 'ography'] +['_mod', 'al'] +['Ġ', '----------'] +['Ġdis', 'posable'] +['F', 'REE'] +['Ġsh', 'ark'] +['C', 'HE'] +['Ġdep', 'icted'] +['Ġdemonstr', 'ations'] +['ĠK', 'illed'] +['ĠR', 'ULE'] +['Ġobs', 'essed'] +['Ġsimpl', 'ified'] +['Post', 'al'] +['Ġconcept', 'ual'] +['Ġp', 'st'] +['L', 'as'] +['_PRO', 'JECT'] +['ucceed', 'ed'] +['ol', 'u'] +['ÄŁ', 'i'] +['Ġpersonal', 'ities'] +['Ġres', 'hape'] +['Ġenc', 'losed'] +['ĉp', 'tr'] +['Ġtutor', 'ials'] +['Ġexpl', 'oded'] +['_DIRECT', 'ORY'] +['åĨħ', '容'] +['Ġcan', 'on'] +['Ġrecogn', 'ise'] +['P', 'AD'] +['ĠAppro', 'x'] +['ĠRest', 'ore'] +['ĠImport', 'ant'] +['Ġheav', 'ier'] +['.Se', 'quential'] +['Ear', 'th'] +['ĠMil', 'k'] +['.set', 'Request'] +['.t', 'em'] +['Ġre', 'construct'] +['Ġskept', 'ical'] +['_Pr', 'ivate'] +['BU', 'F'] +['qu', 'a'] +[':', 'a'] +['Ġse', 'k'] +['Ġd', 'well'] +['oss', 'a'] +['Ġreward', 'ed'] +['и', 'й'] +['(top', 'ic'] +['_part', 'ition'] +['Ġ__', '________________'] +['Key', 'words'] +['ĠFr', 'anco'] +['L', 'ite'] +['Ġn', 'aken'] +['Ġз', 'а'] +['O', 'BJECT'] +['Ġcraft', 's'] +['ĠSw', 'ap'] +['.X', 'na'] +['.Con', 'nect'] +['Ġbalcon', 'y'] +['(re', 'al'] +['ĠBarn', 'es'] +['b', 'ir'] +['ĠTw', 'enty'] +['ay', 'an'] +['at', 'ars'] +['ĠProp', 'el'] +['ĠIh', 'nen'] +['Up', 'grade'] +['Ġcur', 'b'] +['-', 'second'] +['Ġn', 'eph'] +['.p', 'res'] +['ìŀ', 'ħ'] +['.se', 'q'] +['Ġp', 'added'] +['"', '?'] +['j', 'l'] +['ãĥ', '¬'] +["')", '', 'a'] +['Co', 'ordinates'] +['Ġen', 'acted'] +['ENT', 'S'] +['Ġl', 'ac'] +['.f', 'inal'] +['ĠPhp', 'Storm'] +['c', 'alled'] +['Ġin', 'quiries'] +['.m', 'iddleware'] +['ĠD', 'owntown'] +['/', "';Ċ"] +['Ġkil', 'omet'] +['ac', 'cel'] +['Ġqu', 'ien'] +['w', 'string'] +['set', 'Data'] +['Ġman', 'era'] +['Ġmod', 'ular'] +['rim', 'p'] +['Ġtar', 'iffs'] +['âĢĻ', 'il'] +['_TH', 'ROW'] +['/c', 'olor'] +['ĠHT', 'MLElement'] +['Ġcar', 'ro'] +['Ġpr', 'ere'] +['Ġplot', 'ting'] +['ĠPos', 'itive'] +['ĠMach', 'ines'] +['OT', 'ES'] +['á»', 'Ľ'] +['ple', 'asant'] +['Ġal', 'te'] +['Ġa', 'inda'] +['th', 'ese'] +['Ġc', 'ors'] +['ip', 'ay'] +['ĠAdvis', 'ory'] +['ĠRub', 'io'] +['j', 'q'] +['Ġl', 'imestone'] +['Ġdet', 'ached'] +['设', 'ç½®'] +['ten', 'ant'] +['ĠDep', 'th'] +['al', 'ore'] +['ĠÑģÑĤÑĢ', 'ок'] +['ĠF', 'ORE'] +['ĠL', 'ay'] +['p', 'resentation'] +[')', "');Ċ"] +['.sub', 'plots'] +['Ï', 'ĥ'] +['N', 'OW'] +['G', 'ar'] +['hand', 'les'] +['ab', 'ra'] +['put', 'ies'] +['ĠElect', 'rical'] +['M', 'iddle'] +['rop', 'ic'] +['ĠJ', 'D'] +['ĠD', 'yn'] +['ĠB', 'ristol'] +['ĠMc', 'Carthy'] +['Ġstri', 'ker'] +['Ġenumer', 'able'] +['ĠEv', 'an'] +['.default', 's'] +['qu', 'ences'] +[')', '||'] +['ĉt', 'oken'] +['â', 'Ĺı'] +['-d', 'ropdown'] +['ST', 'ORE'] +['ĠGraph', 'ic'] +['(', 'pp'] +['Ex', 'pl'] +['Ġup', 'wards'] +['ĠD', 'istributed'] +['ĠW', 'EB'] +['J', 'er'] +['is', 'NaN'] +['çĶŁ', 'æĪIJ'] +['>', 'R'] +['üss', 'en'] +['ef', 's'] +['Ġun', 'cover'] +['Ġl', 'ud'] +['.cal', 'culate'] +['Ġint', 'ptr'] +['Ġmidfield', 'er'] +['.', 'Headers'] +['Ġm', 'f'] +['ere', 'f'] +['.M', 'etro'] +['ĠSpe', 'aking'] +[':', 'b'] +['Ġcryptoc', 'urrencies'] +['Ġdem', 'ons'] +['ĉ', 'EXPECT'] +['Ġw', 'icked'] +['y', 'outube'] +[':', 'Int'] +['ĠHind', 'i'] +['ĠC', 'AT'] +['ĠØ', '¹'] +['r', 'ar'] +['om', 'ore'] +['/', 'per'] +['/lic', 'ense'] +['Ġre', 'im'] +['Ġawait', 'ing'] +['Ġle', 'thal'] +['ĠE', 'F'] +['round', 'ed'] +['ĠPl', 'atinum'] +['ĠвÑģ', 'е'] +['.co', 'ords'] +['.De', 'vice'] +['/', 'item'] +['ĠW', 'enn'] +['compile', 'Components'] +['ĠK', 'inder'] +['.remove', 'Item'] +['Ġand', 'a'] +['bn', 'b'] +['Ġpr', 'a'] +['(', 'transaction'] +['Ġembarrass', 'ing'] +['ĉ', 'BOOL'] +['.content', 'View'] +['Ġevent', 'data'] +['at', 'ore'] +['Ġprovided', 'In'] +['ir', 'ma'] +['Ġz', 'ona'] +['_H', 'W'] +['æ', 'Ļ'] +['Ġst', 'ove'] +['Ġcounter', 'part'] +['_Pro', 'duct'] +['_MAN', 'AGER'] +['Ġinfr', 'ing'] +['ĠE', 'RA'] +['_p', 'arty'] +['Ñ', 'ij'] +['Ġin', 'ici'] +['_', 'Request'] +['Ġmir', 'acle'] +['Ġcancel', 'Button'] +['S', 'py'] +['at', 'ó'] +['Ġpol', 'ish'] +['ĠNic', 'ole'] +['.display', 'Name'] +['\\Request', 's'] +['Ġuse', 'History'] +['Router', 'Module'] +['Ġst', 'ared'] +['ID', 'ER'] +['Ñĥнк', 'ÑĨи'] +['Ġnot', 'a'] +['$', 'arr'] +['pec', 'ified'] +['Ġto', 'pp'] +['_DR', 'IVER'] +['/', 'ng'] +['å', 'ł'] +['_t', 'm'] +['%', 'timeout'] +['<', 's'] +['Ġ(', '*)'] +['ĠHttp', 'Request'] +['_TR', 'ACK'] +['(n', 'ote'] +['ĠExp', 'lore'] +['_s', 'erv'] +['Ġç', '»'] +['B', 'inder'] +['+', '",'] +['.', 'att'] +['ĠEth', 'i'] +['Ġc', 'ódigo'] +["='", '\\'] +['.l', 'ines'] +['(', 'Of'] +['å°', 'Ĩ'] +['miss', 'ible'] +['Ġv', 'é'] +['Ġac', 'oustic'] +['Ġcraft', 'ing'] +['n', 'it'] +['.b', 'a'] +['ĠLuc', 'y'] +['Ġi', 'Pod'] +['Ġpup', 'ils'] +['-m', 'ax'] +['_w', 'r'] +['(c', 'p'] +['ĠRE', 'PORT'] +['Ġd', 'ns'] +['ĠRe', 'ferences'] +['Ġundert', 'aken'] +['Ġkø', 'benhavn'] +['Ġch', 'ai'] +['ĠC', 'roat'] +['_', 'Log'] +['rown', 'ed'] +['_m', 'ed'] +['ĉ', 'date'] +['#', '__'] +['Ġcost', 'umes'] +['ĠRe', 'quires'] +['aff', 'le'] +['ç', 'Ĭ¶æĢģ'] +['-S', 'emit'] +['ela', 'ide'] +['еÑĤ', 'од'] +['Ġp', 'estic'] +['Ġd', 'ra'] +['DOC', 'UMENT'] +['Ġ...', 'čĊ'] +['}`', '}Ċ'] +['ĠA', 'uction'] +['ĠD', 'ock'] +['xxxx', 'xxxx'] +['(get', 'String'] +['ħ', 'į'] +['Ġborder', 'Width'] +['ĠMach', 'inery'] +['Ġpredict', 'able'] +['.S', 'H'] +['Ġam', 'plitude'] +['.for', 'Root'] +['IN', 'avigation'] +['Table', 'Model'] +['at', 'trib'] +['Ġmaneu', 'ver'] +['Ġexc', 'av'] +['B', 'ERS'] +['Ġd', 'apat'] +['Ġinstall', 'ations'] +['.A', 'sync'] +['Ġr', 'ays'] +['=', 'âĢĿ'] +[';', 'ččĊ'] +['.c', 'rypto'] +['_db', 'g'] +['ĠEnum', 'erable'] +['Of', 'Size'] +['_epoch', 's'] +['m', 'w'] +['M', 'ENU'] +['out', 'line'] +['ĠP', 'apers'] +['============', 'Ċ'] +['Ġuniform', 's'] +['ĠG', 'ig'] +['-', 'package'] +['ĠJen', 'kins'] +['ĠHome', 'Page'] +['.is', 'Selected'] +['Ġmechan', 'ic'] +['M', 'K'] +['ĠS', 'ounds'] +['//----------------------------------------------------------------------------', '-Ċ'] +['Ġresearch', 'ing'] +['Ġinf', 'os'] +['ograph', 'ics'] +['ers', 'et'] +["(['", '/'] +['ĠTim', 'ber'] +['.', 'agent'] +['.to', 'JSON'] +['_command', 's'] +['par', 'ing'] +['_ad', 'just'] +['.n', 'ome'] +['(g', 'lm'] +['Status', 'Bar'] +['file', 'path'] +['?', 'âĢĻ'] +['Ġdetect', 'ive'] +['Ġunser', 'er'] +['ĠTib', 'et'] +['EN', 'DED'] +['(se', 'ed'] +['Ġsne', 'ak'] +['Ġam', 'or'] +['="', '//'] +['ĠPan', 'thers'] +['all', 'ax'] +['ĠL', 'IVE'] +['ĉD', 'WORD'] +[']=', '-'] +['Ġtorn', 'ado'] +['/', 'min'] +['Ġlung', 's'] +['-c', 'urrent'] +['ĠBook', 'ing'] +['åĪĹ', '表'] +['Ġenjoy', 'ment'] +['à¤', '°'] +['J', 'A'] +['typ', 'ed'] +['.B', 'tn'] +['f', 'at'] +['ug', 'al'] +['ĠSh', 'ares'] +['Ġdis', 'gr'] +['ĠB', 'AR'] +['ĠFO', 'X'] +['Op', 'code'] +['ĠS', 'z'] +['key', 'down'] +['iction', 'aries'] +['Ġdetail', 'ing'] +['}', '))Ċ'] +['Ġp', 'ok'] +['Ġdemonstr', 'ating'] +['Ġnot', 'ation'] +['l', 'ayers'] +['@', 'if'] +['ĠN', 'PR'] +['.strict', 'Equal'] +['ĠRec', 'ipes'] +['.T', 'ensor'] +['Ġliqu', 'or'] +['Ġdeb', 'ts'] +['.ends', 'With'] +['W', 'heel'] +['.P', 'os'] +['CS', 'V'] +['$', 'arity'] +['Ġun', 'stable'] +['(', 'loss'] +['ENS', 'OR'] +['Ġele', 'ven'] +['ĠL', 'opez'] +['ĠHop', 'kins'] +['con', 'om'] +['ĠS', 'eth'] +['Ġpo', 'ems'] +['Qu', 'ant'] +['Ġg', 'sl'] +['Ġsy', 'rup'] +['Ġs', 'ibling'] +['Ġc', 'ass'] +['-v', 'ous'] +['ö', 't'] +['_P', 'ATTERN'] +['_SE', 'CTION'] +['est', 'imated'] +['up', 'grade'] +['.m', 'ongodb'] +['ĠBo', 'at'] +['_C', 'TX'] +['Ġfetch', 'ing'] +['ust', 'in'] +['pi', 'el'] +['M', 'arg'] +['Ref', 'lection'] +['Ġd', 'uct'] +['ĠMunicip', 'al'] +['Ġb', 'x'] +['.Get', 'Current'] +['ml', 'ink'] +['ĠAccount', 'ing'] +['ĠGene', 'va'] +['_P', 'os'] +['Ġpass', 'er'] +['Ġhear', 'ings'] +['com', 'pan'] +['Ġfrag', 'ile'] +['Initial', 'izer'] +['walk', 'er'] +['.M', 'aterial'] +['ĠHun', 'ting'] +['trys', 'ide'] +['Ġk', 'at'] +['Ġcl', 'erk'] +['á', 'Ł'] +['do', 'ing'] +['ĉg', 'roup'] +['Ġsan', 'ction'] +['.l', 'b'] +['ĠL', 'azy'] +['ĠCon', 'straint'] +['P', 'agination'] +['Ġpou', 'vez'] +['ĠInd', 'icates'] +['M', 'ER'] +['Ġcour', 's'] +['Ġyear', 'ly'] +['Ġgros', 'se'] +['abb', 'rev'] +['ĠD', 'ON'] +['Ġproceed', 'ed'] +['ent', 'lich'] +['Ġproperty', 'Name'] +['ĠTe', 'aching'] +['st', 'adt'] +['Ġc', 'utoff'] +['orn', 'ers'] +['Ġa', 'frica'] +['Ġrend', 'ers'] +['ĠYan', 'kees'] +['ĠTool', 'bar'] +['sp', 'aces'] +['.fill', 'Style'] +['Ġseg', 'undo'] +['_str', 'len'] +['.F', 'irebase'] +['å¤', 'Ħ'] +['Ġmention', 'ing'] +['\\', '('] +['ĠVal', 've'] +['Set', 'ter'] +['Ġsp', 'ans'] +['ĠAl', 'cohol'] +['ĠLet', 'ters'] +['\\x', 'e'] +['ĠT', 'K'] +['_B', 'LE'] +['.get', 'Result'] +['<', 'Player'] +['ĠP', 'att'] +['Ġeas', 'ing'] +['Ġtur', 'key'] +['ĠF', 'en'] +["')", '"'] +['Ġconf', 'ined'] +['Ġin', 'clus'] +['Sup', 'erview'] +['(with', 'Identifier'] +['enc', 'ial'] +['Ġstuff', 'ed'] +['Th', 'eta'] +['Ġeconom', 'ists'] +['}', '));ĊĊ'] +['co', 'okies'] +['ĠRo', 'ose'] +['ĠChe', 'ese'] +['Ġfich', 'ier'] +['Ġen', 'forced'] +['AB', 'B'] +['no', 'ÅĽci'] +['_AL', 'LOW'] +['Ġrecru', 'ited'] +['Ġexpend', 'iture'] +['-n', 'ight'] +['Ġassert', 'NotNull'] +['_ex', 'ecute'] +['ĠØ', '¯'] +['IN', 'DEX'] +['_F', 'MT'] +['Ġresc', 'ued'] +['ĠMonth', 'ly'] +['ĠCons', 'ervation'] +['ĠG', 'eb'] +['Ob', 'ama'] +['Ep', 'och'] +['ic', 'ies'] +['ĠOr', 't'] +['Ġso', 'it'] +['(', 'icon'] +['F', 'riends'] +['m', 'ol'] +['Ġground', 'ed'] +['ĠC', 'ause'] +['ad', 'ena'] +['WE', 'EN'] +['ĠL', 'un'] +['IT', 'IVE'] +['.', 'loop'] +['_un', 'til'] +['Ġcor', 'r'] +['.ed', 'ges'] +['Ġhyp', 'oth'] +['ched', 'uling'] +['trans', 'lator'] +['ĠÐ', 'ľ'] +['R', 'om'] +['ãĢij', 'ĊĊ'] +['ĠX', 'amarin'] +['Ġviol', 'ating'] +['.', 'anchor'] +['---', 'ĊĊ'] +['Ġtr', 'ader'] +['AD', 'VERTISEMENT'] +['Ġuns', 'ere'] +['ĠD', 'AO'] +['Ġbl', 'ond'] +['ĠP', 'AT'] +['.g', 'lob'] +['Ġè¾', 'ĵ'] +['Ġsplit', 'ting'] +['Ġun', 'subscribe'] +['Ġatmos', 'pheric'] +['ĠTr', 'im'] +['Ġcit', 'ation'] +['Ġin', 'ference'] +['ĠF', 't'] +['ĠDar', 'win'] +['find', 'One'] +['ĠG', 'el'] +['(', 'Convert'] +['Ġaccess', 'or'] +[';', 'text'] +['(s', 'orted'] +['Ġjud', 'ged'] +[');', '\\'] +[':', 'p'] +['Ġme', 'ine'] +['ĠS', 'lim'] +['.Command', 's'] +['Ġper', 'ceive'] +['coh', 'olic'] +['<', 'Data'] +['.entry', 'Set'] +['Ġassert', 'False'] +['ĠPat', 'rol'] +['ense', 'm'] +['ÅĤ', 'Äħ'] +['¨', '¡'] +['W', 'IDTH'] +['ĠRes', 'cue'] +['ĠU', 'IF'] +['_THRESH', 'OLD'] +['ĠMich', 'el'] +['ATER', 'IAL'] +['opens', 'ource'] +['ĠD', 'iana'] +['Ġinv', 'ites'] +['_B', 'ODY'] +['Ġreserv', 'oir'] +['Ġro', 'i'] +['c', 'ust'] +['(t', 'c'] +['ï¼ģ', '");Ċ'] +['Ġfest', 'ivals'] +['Ġperform', 'ers'] +['Ġclim', 'bed'] +['Ġj', 'ungle'] +['String', 'Length'] +['Ġunlaw', 'ful'] +['ier', 're'] +['vertis', 'ement'] +['Ġst', 'akes'] +['Ġh', 'ats'] +['Mod', 'ify'] +['ĠLET', 'TER'] +['.H', 'ide'] +['Ġstat', 'utory'] +['_', 'white'] +['ĠPer', 'l'] +['uten', 'berg'] +['em', 'ple'] +['.W', 'orld'] +['Ġoverlook', 'ed'] +['Ġcon', 'cludes'] +['/*', '================================================================'] +['-w', 'ise'] +['ĉ', 'stream'] +['pop', 'ulation'] +['Ġevent', 'o'] +['Ġillustr', 'ations'] +['ft', 's'] +['Ġaut', 'of'] +['ĠPro', 'cedure'] +['Ġdes', 'erved'] +['-t', 'imes'] +['Ġg', 'ol'] +['N', 'SError'] +['cre', 'st'] +['ĠPak', 'istani'] +['any', 'ch'] +['get', 'Current'] +['Ġl', 'ar'] +['nt', 'l'] +['ĠRe', 'becca'] +['Ġm', 'ateria'] +['Ġfind', 'By'] +['/', 'ad'] +['Callback', 's'] +['ĠAl', 's'] +['ĠKat', 'ie'] +['ĠObservable', 'Collection'] +['ĠDocument', 'ation'] +['Typ', 'ed'] +['ĠCulture', 'Info'] +['ĠTim', 'othy'] +['Ġlater', 'al'] +['"', 'type'] +['Ġun', 'authorized'] +['Ġteach', 'ings'] +['Ġdebug', 'ger'] +['[', 'value'] +['Ġal', 'ors'] +['Ġu', 'z'] +['Ġsc', 'atter'] +['Ġdown', 'ward'] +['Ġmig', 'li'] +['status', 'Code'] +['Ġ(', '))'] +['ĠM', 'W'] +['Ġм', 'ож'] +['RO', 'SS'] +['.b', 'uf'] +['Ġfair', 'y'] +['ĠInf', 'rastructure'] +['=>', '"'] +['t', 'lement'] +['$', '("'] +['From', 'String'] +['ĠB', 'ild'] +['Ġconvent', 'ions'] +['_n', 'ative'] +['ĠIns', 'pector'] +['ĠP', 'ist'] +['ub', 'ar'] +['Ġreg', 's'] +['ĠP', 'ilot'] +['Th', 'us'] +[">'", '+'] +['Ġc', 'ela'] +['.new', 's'] +['(', 'Product'] +['L', 'iving'] +['R', 'ussia'] +['Ġfac', 'et'] +['et', 'ical'] +["Ġ['", '$'] +['/', '['] +['ĠD', 'ire'] +['Ġg', 'ases'] +['ĠIN', 'FORMATION'] +['ĠE', 'at'] +['ĠFor', 'ums'] +['ĠChar', 'acters'] +['_m', 'et'] +['Ġìĭ', 'ľ'] +['Ġk', 'ings'] +['ach', 'ie'] +['ĠL', 'ambda'] +['Ġtim', 'ers'] +['ĠLight', 'ing'] +['ĠCase', 'y'] +['add', 'ir'] +['and', 'ex'] +['.', 'answer'] +['ĠH', 'ip'] +['ĠPr', 'incip'] +['Start', 'Date'] +['Ġ', 'ãĢĮ'] +['t', 'res'] +['Ġ&', '#'] +['.Max', 'Value'] +['ĠPro', 'blems'] +['Ġlat', 'ex'] +['Of', 'Class'] +['ĠLyn', 'n'] +['//', "'"] +['Ġvoy', 'age'] +['Ġshut', 'tle'] +['ĠRoll', 'er'] +['ĠRuntime', 'Error'] +['uy', 'a'] +['D', 'ic'] +['ĉb', 'uilder'] +['Ġbul', 'lying'] +['Ġsimple', 'st'] +['.c', 'alled'] +['ĠL', 'R'] +['Ġmor', 'ality'] +['Ġst', 'urdy'] +['tr', 'acking'] +['.sw', 'agger'] +['_B', 'IND'] +['IT', 'OR'] +['-url', 'encoded'] +['ĠÑ', 'ħ'] +['ĠTr', 'inity'] +['Ġtr', 'aps'] +['Ġ|', '-'] +['Ġset', 'Text'] +['Ġbarg', 'ain'] +['Ġbr', 'akes'] +['.get', 'Code'] +['Ġmigr', 'ate'] +['Ġrib', 'bon'] +[')', 'return'] +['Ġcharg', 'er'] +['ac', 'om'] +['ADI', 'US'] +['ĠAmb', 'assador'] +['-a', 'fter'] +['Ġann', 'i'] +['ĉs', 'pin'] +['Con', 'cept'] +['ĠHend', 'erson'] +['ĠH', 'OST'] +['.r', 'ank'] +['ĠNor', 'theast'] +['Ġber', 'lin'] +['Ġrequ', 'is'] +['.f', 'eed'] +['Ġsource', 'Mapping'] +['ĠRen', 'contre'] +['.', 'ajax'] +['nest', 'js'] +['Ġtre', 'k'] +['ĠN', 'acional'] +['Ġ&', '['] +['Ġpay', 'able'] +['ort', 'ex'] +['Ġde', 'pt'] +['field', 'Name'] +['Ġcomple', 'tes'] +['ĠR', 'VA'] +['Ġon', 'ions'] +['al', 'ignment'] +['Form', 'ats'] +["Ġ'", '{$'] +['Hash', 'Set'] +['ĠB', 'od'] +['.Invariant', 'Culture'] +['Ġsettlement', 's'] +['Ġhy', 'dr'] +['.', 'updated'] +['vent', 'h'] +['(', 'seconds'] +['="/', '"'] +['Ġweb', 'page'] +['(', 'ĊĊ'] +['Ġt', 'ir'] +['Ġto', 'es'] +['ĠBr', 'ick'] +['Ġamb', 'ition'] +['P', 'ot'] +['=', 'max'] +['ET', 'IME'] +['Ġdep', 'ot'] +['c', 'alls'] +['ĠNor', 'wegian'] +['`', ':'] +['Ġbur', 'ger'] +['Ġprofess', 'ors'] +['ĠAl', 'locate'] +['-third', 's'] +['-ch', 'art'] +['Ġfor', 'd'] +['*', 'N'] +['.k', 'otlin'] +['Ġpaper', 'work'] +['ĠDE', 'VICE'] +['%', '@",'] +['res', 'pect'] +['(m', 'p'] +['é', '«ĺ'] +['-', 'if'] +['Ġcush', 'ion'] +['ob', 'ot'] +['Ġpar', 'c'] +['SP', 'ACE'] +['ĠNet', 'anyahu'] +['Ġself', 'ish'] +['fe', 'at'] +['Ġclient', 'es'] +['-to', 'ols'] +['Ġpor', 'ch'] +['Ġj', 'q'] +['.', 'verbose'] +['Ġlib', 'erals'] +[']', ')ĊĊĊ'] +['p', 'ies'] +['Not', 'Blank'] +['(', 'term'] +['ÈĽ', 'i'] +['_Param', 's'] +['.normal', 'ize'] +['B', 'ullet'] +['AS', 'IC'] +['(h', 'ex'] +['_client', 'e'] +['+', ','] +['_D', 'I'] +['Ġforth', 'coming'] +['}', '")]Ċ'] +['se', 'o'] +['U', 'm'] +['>', 'Name'] +['Ġcomfort', 'ably'] +['irection', 'al'] +['W', 'ITH'] +['/', 'pr'] +['ĠP', 'oor'] +['ĠVit', 'amin'] +['v', 'ic'] +['G', 'H'] +['Ġprior', 'it'] +['ĠN', 'N'] +['ĠC', 'losed'] +['¤', 'í'] +['Ġis', 'Open'] +['\\', 'Console'] +['And', 'Feel'] +['.S', 'UCCESS'] +['_OPER', 'ATION'] +['pol', 'ation'] +['ĠT', 'as'] +['ps', 'z'] +['>', "'."] +['C', 'URRENT'] +['V', 'endor'] +['host', 's'] +['ĠE', 'rd'] +['>tag', 'ger'] +['ĠsourceMapping', 'URL'] +['Ġmar', 'athon'] +['_c', 'losed'] +['Ġexem', 'ption'] +['Ġrecogn', 'izes'] +['ides', 'how'] +["'", '$'] +["('/", "');Ċ"] +['m', 'its'] +['war', 'z'] +['ĠCh', 'erry'] +['µ', '¬'] +['n', 'or'] +['port', 'e'] +['Ġw', 'l'] +['_back', 'up'] +['.get', 'Boolean'] +['.get', 'Resource'] +['Ġdefinit', 'ive'] +['.', 'EditText'] +['Ġs', 'ÃŃ'] +['.C', 'ONT'] +['ĠPL', 'AYER'] +['.c', 'ards'] +['ĠSh', 'ore'] +["('/", "')Ċ"] +['cl', 'uir'] +['Web', 'Driver'] +['(m', 'onth'] +['-re', 'lease'] +['Ġins', 'pector'] +['å', '£'] +['ĠN', 'F'] +['_cl', 'ip'] +['åŃ', 'IJ'] +['Ġinteract', 'ing'] +['.t', 'mp'] +["Ġ''", "'ĊĊ"] +['Ġde', 'e'] +['Ġfro', 'st'] +['"]', '))Ċ'] +['ĠPl', 'aces'] +['Th', 'rows'] +['f', 'ork'] +['/', 'day'] +['i', 'Phone'] +['ĠM', 'IC'] +['Ġfold', 'ing'] +['Ġcro', 're'] +['ĠCh', 'iefs'] +['pher', 'ical'] +['(', 'price'] +['.Write', 'String'] +['Ġexit', 'ing'] +[']', "',Ċ"] +['ight', 'ing'] +['Ing', 'redient'] +['(', 'vertex'] +['Ġscroll', 'View'] +['h', 'f'] +[':', 'new'] +['SE', 'N'] +['se', 'ctor'] +['Ġsp', 'ins'] +['ĠS', 'cheduler'] +['ote', 'chn'] +['sem', 'icolon'] +['Font', 'OfSize'] +['ĠSpecific', 'ally'] +['fl', 'amm'] +['.Object', 'Id'] +['Ġcont', 'a'] +['_per', 'missions'] +['ĉF', 'ROM'] +['IC', 'ODE'] +['/', 'kg'] +['ĠHot', 'els'] +['-m', 'ed'] +['ĠD', 'in'] +['Ġn', 'avy'] +['get', 'Param'] +['Ġm', 'end'] +['Ġportray', 'ed'] +['ĠMet', 'ropolitan'] +['Paint', 'er'] +['Ġref', 'erral'] +['_g', 'ood'] +['Ġmar', 'vel'] +['osa', 'ic'] +['>', '(&'] +['.', 'ur'] +['Ġest', 'os'] +['Will', 'iam'] +['Ġtim', 'ber'] +['Ġquel', 'ques'] +['ĠDoc', 'uments'] +['.X', 'aml'] +['Ġbatch', 'es'] +['éģ', 'ĵ'] +['ĠRe', 'leased'] +['T', 'ail'] +['CO', 'OKIE'] +['he', 'id'] +['_st', 'ation'] +['ĠV', 'ia'] +['S', 'ale'] +['ĠRe', 'peat'] +['Ġprom', 'in'] +['ĠZ', 'o'] +['-', 'forward'] +['ĠI', 'on'] +['it', 'ary'] +['Ġj', 'us'] +['-', 'request'] +['Ġproud', 'ly'] +['ĠStream', 'ing'] +['(Mouse', 'Event'] +['ĠS', 'print'] +['_', 'rotation'] +['Re', 'positories'] +['Ġt', 'art'] +['ĠÑģ', 'в'] +['Ġm', 'appings'] +['è', 'ª'] +['C', 'u'] +['C', 'ycle'] +['Ġb', 'un'] +['ĉl', 'ua'] +['ãĥ', 'ī'] +['Ġ((', '!'] +['Ġcollect', 'ively'] +['ĠCon', 'd'] +['Ġwsz', 'yst'] +['(l', 'ib'] +['openh', 'agen'] +['_s', 'kip'] +['.Column', 'Header'] +['é', 'Ĥ'] +['peri', 'enced'] +['ı', 'è¿°'] +['_p', 'rops'] +['Ġcontr', 'ace'] +['Ġmatch', 'up'] +['ab', 'etic'] +['.m', 'embers'] +['RE', 'CT'] +['(d', 'at'] +['Ġs', 'og'] +['ren', 'om'] +['_M', 'ethod'] +['Custom', 'ers'] +['full', 'name'] +['Z', 'N'] +['re', 'try'] +['Ġk', 'ap'] +['ĠNe', 'u'] +['è', 'Ĭ'] +['add', 'Child'] +['will', 'Return'] +['_p', 'ermalink'] +['Ġener', 'getic'] +['ĠW', 'et'] +['ĠMor', 'r'] +['Ġg', 'cd'] +['count', 's'] +[',', 'type'] +['d', 'ig'] +['(', 'Login'] +['Ġcr', 'acks'] +['Ġbacter', 'ial'] +['ĠMe', 'at'] +['ĠArm', 'strong'] +['ĠBron', 'ze'] +['Ġapprox', 'imate'] +['_dir', 's'] +['lig', 'a'] +['ÅĤ', 'ad'] +['Ġkind', 'ness'] +['Ġcont', 're'] +['ĠE', 'VERY'] +['M', 'ET'] +['Ġannounc', 'ements'] +['g', 'pio'] +['ĠWaitFor', 'Seconds'] +['ĠPhotos', 'hop'] +['Ġdis', 'contin'] +['/', 'dd'] +['Ġtop', 'ology'] +['an', 'ical'] +['.', 'interface'] +['auc', 'oup'] +['.Hash', 'Set'] +['ARI', 'ANT'] +['(r', 'outes'] +['ĠT', 'eh'] +['Ġh', 'ype'] +[']', '").'] +['Ġsl', 'am'] +['Ġbro', 'th'] +['-', 'inter'] +['ĠR', 'id'] +['-m', 'anager'] +['Cancel', 'ar'] +['ĠP', 'agination'] +['Ġsound', 'track'] +['Ġpost', 'erior'] +['Ġscr', 'ub'] +['cre', 'ating'] +['-', '*'] +['ir', 'teen'] +['.d', 'y'] +['.s', 'ymmetric'] +['Ġ""', '.'] +['============', '==='] +['Ġch', 'assis'] +['ĠnumberOf', 'Rows'] +['Develop', 'er'] +['_b', 'ins'] +['ĠO', 'UR'] +['ri', 'eb'] +['Pro', 's'] +['Ġwi', 'ÄĻ'] +['"', 'd'] +['Ġasync', 'io'] +['ze', 'igen'] +['_s', 'pi'] +['.A', 'LL'] +['Ġscre', 'ws'] +['Ch', 'inese'] +['Ġapi', 'Key'] +['Ġun', 'successful'] +['ĠSeah', 'awks'] +['OR', 'G'] +['ç«', 'ł'] +['Ġprofession', 'ally'] +['ĠCou', 'pon'] +['åŃĹ', '段'] +['Con', 'vention'] +['Ġpol', 'ym'] +['æī', 'ĭ'] +['Ġsalv', 'ation'] +['Ġengine', 'ered'] +['ĠW', 'rest'] +['ĠG', 'CC'] +['Ġwar', 'mer'] +['Layout', 'Constraint'] +['Ġag', 'grav'] +['Script', 's'] +['vent', 'ure'] +['Ġrefriger', 'ator'] +['Ġinnov', 'ations'] +['ĠRun', 'ner'] +['N', 'IC'] +['ĠRoll', 'ing'] +['Control', 'Events'] +['Ġlo', 'os'] +['p', 'ac'] +['ĉ', 'panel'] +['ef', 'e'] +['ĠBudd', 'ha'] +['------------', '--Ċ'] +['åº', 'ĵ'] +['(for', 'Key'] +['Ġl', 'umin'] +['Ġ(', '?'] +['ĠA', 'IDS'] +[',', 'user'] +['im', 'ientos'] +['content', 'Type'] +['ant', 'lr'] +['é', '¦'] +['ĠW', 'elt'] +['Produ', 'ction'] +['m', 'ight'] +['ĠV', 'II'] +['",', '('] +['Ġobserv', 'ing'] +['Ġdeliber', 'ate'] +['(', 'control'] +['Ġwith', 'd'] +['Ġsem', 'ana'] +['ST', 'ACK'] +['uch', 'en'] +['N', 'ice'] +['ĠDeutsch', 'land'] +['ĠSpec', 'ifies'] +['d', 'ma'] +['iz', 'io'] +['ĠF', 'acts'] +['_pop', 'up'] +['ĠDirect', 'ors'] +['{', ':'] +['[', 'R'] +['ĠÑį', 'леменÑĤ'] +['Ġpl', 'at'] +['Ġdirect', 'ing'] +['ä¸', 'ī'] +['ĠGil', 'bert'] +['â̦', '.ĊĊ'] +['.q', 'ml'] +['Ġthere', 'after'] +['Ġdis', 'position'] +['d', 'raft'] +['Ġsurge', 'on'] +['ĠIns', 'ider'] +['Bl', 'end'] +['ĠT', 'rev'] +['tr', 'insic'] +['Top', 'ics'] +['rie', 've'] +['_FILE', 'NAME'] +['Ġaut', 'res'] +['J', 'ose'] +['Produ', 'cer'] +['er', 'us'] +['Ġpet', 'it'] +['ĠN', 'EXT'] +['ĠF', 'ilters'] +['Ġreplic', 'ate'] +['"]', ').'] +['Ġl', 'enders'] +[']', '",Ċ'] +[';', 'charset'] +['Cpp', 'Object'] +['Ġfl', 'oral'] +['ĠT', 'ipo'] +['Ġcirc', 'uits'] +['e', 'asy'] +['(&', '$'] +['itt', 'a'] +['ery', 'l'] +['_COMM', 'ON'] +["'}}", '>Ċ'] +['-back', 'ed'] +['(var', 'iable'] +['(', 'Index'] +['Ġvo', 'ir'] +['_loc', 'ations'] +['++)', '{'] +['ĠLouis', 'ville'] +['Ġgrat', 'itude'] +['.Mock', 'ito'] +['ĠP', 'owers'] +['ie', 'urs'] +['Ġge', 'ographic'] +['ra', 'le'] +['Ġc', 'ra'] +['ĠSp', 'urs'] +['iph', 'ertext'] +['AC', 'ION'] +['-', 'common'] +['Ġvict', 'ories'] +['ĠFinal', 's'] +['.sh', 'uffle'] +['-m', 'illion'] +['_PRO', 'C'] +['ass', 'ume'] +['Ġil', 's'] +['DB', 'C'] +['Boot', 'Test'] +['Ġl', 'avor'] +['.test', 'ing'] +['.', 'ast'] +['"]', '/'] +['m', 'oid'] +['Ġqual', 'ification'] +['ges', 'ch'] +['ĉ', 'put'] +['Ġair', 'ports'] +['J', 'I'] +['Te', 'acher'] +['_un', 'iform'] +['Ġn', 'ama'] +['ĠB', 'ast'] +['ert', 'ype'] +['c', 'apture'] +['get', 'All'] +['ĠReyn', 'olds'] +['oo', 'led'] +['.com', 'ments'] +['Ġch', 'in'] +[').', '*'] +['Ġи', 'ли'] +['t', 'gl'] +['ud', 'os'] +['Ġd', 'ÃŃas'] +['ch', 'ai'] +['.pro', 'gram'] +['Ġps', 'z'] +['ĉ', 'icon'] +['ph', 'il'] +['ent', 'ral'] +['_WR', 'AP'] +['ov', 'i'] +['Ġnost', 'alg'] +['In', 'finity'] +['ĉy', 'ield'] +['Ġvit', 'amins'] +['Qu', 'aternion'] +['S', 'ink'] +['_g', 'oods'] +['Ġ', '........'] +['ĠW', 'ings'] +['ur', 'idad'] +['-st', 'ory'] +['"]', ')ĊĊ'] +['idel', 'ity'] +['Type', 'Def'] +['G', 'tk'] +['Ġí', 'Į'] +['_M', 'ain'] +['Ġche', 'z'] +['ĠR', 'aven'] +['Ġpay', 'roll'] +['Ġfreel', 'ance'] +['LL', 'U'] +['ĠM', 'end'] +['ed', 'ay'] +['Api', 'ModelProperty'] +['.Form', 'BorderStyle'] +['Ġeconom', 'ist'] +['stan', 'bul'] +['Ġfre', 'ight'] +['-A', 'gent'] +['(m', 'eta'] +['Ġsym', 'metry'] +["Ġ'", '..'] +['.C', 'alendar'] +['-', 'aut'] +['g', 'f'] +['p', 'ent'] +['yc', 'lopedia'] +['Ġwish', 'ing'] +['ĊĊĊĊĊĊĊĊ', 'ĊĊĊĊ'] +['Ġgentle', 'man'] +['Ġê', '³'] +['=', '#'] +['Ġlect', 'ures'] +['âĢľ', 'In'] +['Ġ!', '_'] +['Ġh', 'b'] +['ĠV', 'endor'] +['Recent', 'ly'] +['_n', 'otes'] +['æıIJ', '示'] +['"', 'My'] +['Headers', 'Height'] +['_S', 'O'] +['Ġunw', 'illing'] +['Ġsuper', 'hero'] +['g', 'io'] +['ps', 'y'] +['ĠPe', 'er'] +['j', 'avax'] +['&', 'apos'] +['ĠCr', 'isis'] +['ord', 'inal'] +['Mem', 'cpy'] +['++++++++', '++++++++'] +['-', 'val'] +['Ġwork', 'book'] +['-', 'ap'] +['=', 'k'] +['Ġmetal', 'lic'] +['_', 'peer'] +['By', 'PrimaryKey'] +['_S', 'D'] +['u', 'ator'] +['_SH', 'ADER'] +[')', 'Math'] +['.Trans', 'form'] +['Ġc', 'ows'] +['Ph', 'i'] +['ĠC', 'lem'] +['(_', '("'] +['ĠL', 'ud'] +['-d', 'elay'] +['ĠSec', 'urities'] +['ĠOrth', 'odox'] +['Sym', 'fony'] +['(re', 'port'] +['Ġent', 'ertain'] +['E', 'PS'] +['iz', 'oph'] +['ex', 'ual'] +['IR', 'D'] +['ä»', 'İ'] +['Ġl', 'ith'] +['Ġsanit', 'ize'] +['Ġfemin', 'ine'] +['IS', 'BN'] +['.auth', 'entication'] +['_p', 'ipeline'] +['/', 'constants'] +['ĠCON', 'F'] +['Ġluc', 'r'] +['ric', 'ia'] +['.t', 'tf'] +['.set', 'Content'] +['Ġst', 'an'] +['ore', 'an'] +['ĠL', 'loyd'] +['.raw', 'Value'] +['Ġg', 'or'] +['ĠBrow', 'ns'] +['Re', 'gression'] +['Ġlower', 'ing'] +['na', 'issance'] +['Ġbl', 'ows'] +['Ġam', 'azed'] +['Ġun', 'related'] +['Re', 'views'] +['Ġrub', 'y'] +['ĠMod', 'ifier'] +['Ġgi', 'ants'] +['.', 'thread'] +['Ġcontain', 'ment'] +['ĠStart', 'Coroutine'] +['um', 'at'] +['ore', 'lease'] +['ĠR', 'andy'] +['@', 'endif'] +['D', 'igest'] +['Ġsubur', 'ban'] +['="', ');Ċ'] +['Ġann', 'once'] +['.', 'variable'] +['\\F', 'oundation'] +['Ġa', 'cre'] +['V', 'an'] +['Ġt', 'uples'] +['d', 'ns'] +['ĠStand', 'ing'] +['_l', 'arge'] +['Ġbox', 'ing'] +['Support', 'ActionBar'] +['ĠFort', 'une'] +['ĠR', 'um'] +['_m', 'ultiple'] +['arch', 'ical'] +['Ġf', 'write'] +['_', 'quote'] +['Ġfool', 'ish'] +['Ġcompr', 'ising'] +['Ġо', 'п'] +['-', 'selected'] +['v', 'f'] +['ma', 'id'] +['N', 'ama'] +['(d', 'atetime'] +['Ġindirect', 'ly'] +['g', 'art'] +['fix', 'tures'] +['ch', 'os'] +['ĠH', 'alo'] +['Ġrec', 'urring'] +['-', 'news'] +['v', 'il'] +['ĠNurs', 'ing'] +['-', 'produ'] +['ĠH', 'Q'] +['\\Http', 'Foundation'] +['enc', 'i'] +['au', 'en'] +['Ġv', 'y'] +['ocr', 'acy'] +['Ġdeleg', 'ation'] +['Ġas', 'phalt'] +['Ġset', 'Selected'] +['k', 'ok'] +['/', 'rest'] +['met', 'ics'] +['ĠNS', 'Date'] +['Ġtravel', 'led'] +['Ġrec', 'ib'] +['Ġm', 'ime'] +['CL', 'IENT'] +['ĠG', 'U'] +['ĠH', 'ANDLE'] +['/', 'Q'] +['[', 'z'] +['Ġbother', 'ed'] +['ĠBB', 'Q'] +['ç', 'as'] +['_ex', 'amples'] +['_F', 'IN'] +['Ġwhite', 'Color'] +['Ġastr', 'onom'] +['-d', 'ir'] +['Ġsovere', 'ign'] +['Ġb', 'reeze'] +['Ġin', 'ning'] +['ĠEd', 'monton'] +['g', 'li'] +['.blog', 'spot'] +['js', 'x'] +['Ġvers', 'a'] +['ĠMoh', 'ammed'] +['.J', 'ob'] +['-t', 'oggler'] +['Ġп', 'олÑĮзоваÑĤ'] +['ard', 'on'] +['Ġnew', 'born'] +['Ġnav', 'al'] +['note', 'q'] +['Ġtum', 'blr'] +['Ġh', 'entai'] +['ĠTyp', 'ically'] +['Ġlo', 'ot'] +['.S', 'prite'] +['Fl', 'ight'] +['Ġw', 'avelength'] +['-s', 'k'] +['ĠEl', 'le'] +['_', 'exports'] +['Ġ', 'Ñı'] +['ĠI', 'H'] +['izoph', 'ren'] +['Ġí', 'ģ'] +['_pr', 'imary'] +['Ġmo', 'is'] +['ĠB', 'N'] +['Ġsystem', 'ic'] +['Ġdifer', 'entes'] +['IN', 'CT'] +["Ġ''", 'ĊĊ'] +['$', 'q'] +['Widget', 'Item'] +['cl', 'ide'] +['$', 'file'] +['L', 'emma'] +['/', 'table'] +['ag', 'rid'] +['ĠMongo', 'DB'] +['int', 'e'] +['Ġapp', 'rent'] +['ÂŃ', 'ing'] +['.D', 'b'] +['ĠÃ', 'Ĥ'] +['ham', 'mer'] +["='", "';Ċ"] +['Ġbro', 'kers'] +['it', 'lement'] +['sembl', 'ies'] +['E', 'le'] +['{', 'x'] +['Ġlast', 'name'] +['<', '-'] +['Ġfl', 'atten'] +['_b', 'and'] +['.R', 'oot'] +['.read', 'FileSync'] +['====', '=='] +['.r', 'x'] +['?', 'čĊ'] +['Ġmetaph', 'or'] +['T', 'i'] +['con', 'te'] +['Ġdeb', 'it'] +['Ġcont', 'empt'] +['Cpp', 'Type'] +['æĶ', '¯'] +['Form', 'Field'] +['r', 'atio'] +['os', 'opher'] +['Ġimpl', 'ant'] +['P', 'URE'] +['Ġal', 'ta'] +['_man', 'agement'] +['Ġref', 'ine'] +['ĠCheck', 'Box'] +['ĠChar', 'l'] +['-', 'version'] +['cond', 'itional'] +['ven', 'ues'] +['Ġrif', 'les'] +['Ġoff', 'spring'] +['Ġmill', 'ing'] +['Ġshar', 'ply'] +['Ġunder', 'water'] +['(', 'origin'] +['_', 'Control'] +['Ġ.', '$'] +['Pl', 'ugins'] +['Ġdry', 'ing'] +['Ġillustr', 'ates'] +['-', 'u'] +['Ġveget', 'arian'] +['n', 'pc'] +['He', 'art'] +[';', "',Ċ"] +['com', 'ma'] +['te', 'enth'] +['as', 'an'] +['/s', 'pec'] +['_m', 'oves'] +['-m', 'argin'] +['Ġing', 'en'] +['³³', 'Âł'] +['Ġpro', 'jet'] +['Ġo', 'tra'] +['Ġbr', 'as'] +['.', 'utc'] +['Ġsle', 'pt'] +['=', 'sub'] +['ab', 'ilit'] +['post', 'er'] +['Ġs', 'dk'] +['ounc', 'ill'] +['Ġw', 'd'] +['Pre', 'paredStatement'] +['ĠDr', 'um'] +['(', 'attribute'] +['ĠEther', 'net'] +['ĉ', 'DB'] +['Cal', 'ifornia'] +['c', 'ube'] +['[', 'I'] +['.C', 'reated'] +['ĠH', 'M'] +['Ġtr', 'acing'] +['Forms', 'Module'] +['-', 'you'] +['.c', 'urrency'] +['feed', 'ing'] +['Ġt', 'body'] +['L', 'i'] +['acc', 'ion'] +['n', 'as'] +['Ġtr', 'ouver'] +['N', 'ONE'] +['"}', ',čĊ'] +['Ġf', 'tp'] +['With', 'Identifier'] +['pol', 'ate'] +['File', 'Info'] +['Ġpurs', 'ued'] +['ĠĠĠĠčĊ', 'ĠĠĠĠčĊ'] +['DE', 'SCRIPTION'] +['}', '*/Ċ'] +['From', 'Nib'] +['Ġdecor', 'ative'] +['_S', 'SL'] +['(ch', 'at'] +['T', 'LS'] +['Ġsurpr', 'ises'] +['al', 'culate'] +['ĠS', 'plash'] +['(', 'Configuration'] +['ĠS', 'EM'] +['im', 'son'] +['/lib', 'rary'] +['<', 'Double'] +['.', 'robot'] +['³³³³', '³³³³'] +['ĠCP', 'F'] +['ĠUnder', 'standing'] +['Ġcos', 'metic'] +['ĠX', 't'] +['t', 'ips'] +['+', 'k'] +['("', "'"] +['ĠP', 'DT'] +['W', 'AR'] +['.get', 'Object'] +['ĠTrad', 'itional'] +['.sl', 'ug'] +['ĠDi', 'pl'] +['="', '",'] +['ĠFil', 'ms'] +['ĠAn', 'im'] +['.h', 'elp'] +['Ġemb', 'assy'] +['ĠBoot', 's'] +['Ġb', 'unk'] +['-r', 'isk'] +['Ġp', 'ci'] +['Ġ/', '\\.'] +['ĠI', 'PT'] +['Ġcrash', 'ing'] +['Ġip', 'v'] +['_', 'ke'] +['ĠRES', 'P'] +['.Log', 'Error'] +['Ġinade', 'quate'] +['I', 'on'] +['ĠF', 'ür'] +['ric', 'ula'] +['Ġshould', 'Be'] +['al', 'ready'] +['\']."', ''] +['G', 'ED'] +['fa', 'q'] +['Ġoption', 'ally'] +['_D', 'is'] +['ĠSuccess', 'ful'] +['ĠC', 'ensus'] +['Ġinc', 'arcer'] +['_C', 'ARD'] +['Ġav', 'iation'] +['ĠG', 'ym'] +['Author', 'ity'] +['.B', 'ean'] +['sh', 'ader'] +['Not', 'Exist'] +['_Text', 'Changed'] +['ĠST', 'OP'] +['(', 'team'] +['"', 'H'] +['w', 'g'] +['Ġgr', 'inder'] +['Ġstri', 'pe'] +['Ġpres', 'ervation'] +['Cl', 'aim'] +['avers', 'al'] +['ware', 'house'] +['target', 's'] +['Tr', 'ust'] +['Ġal', 'lev'] +[',', 'www'] +['ous', 'se'] +['_ch', 'an'] +['_S', 'ize'] +['system', 's'] +['Ġobj', 'ection'] +['ĠK', 'ane'] +['Ġcor', 'ros'] +['ĠD', 'SL'] +['Ġu', 'a'] +['ĠM', 'H'] +['ĠStrateg', 'ic'] +['_t', 'cp'] +['Ġê°', 'Ĵ'] +['Ġborrow', 'ed'] +['ĠA', 'ch'] +['ĉ', 'command'] +['Ġg', 'ps'] +['le', 'ston'] +['iche', 'ver'] +['ĠU', 'A'] +['Ġassault', 'ed'] +['Ġspecial', 'izes'] +['ĉ', 'search'] +['Hot', 'el'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'čĊ'] +['ĠP', 'itch'] +['Ġ', 'Ùģ'] +['READ', 'Y'] +['Ġparent', 'al'] +['Ġg', 'éné'] +['Ġdonn', 'ées'] +['Ġdet', 'ain'] +['T', 'ARGET'] +['Ġprotagon', 'ist'] +['Ġclear', 'Interval'] +['ĠIcon', 'Button'] +['ĠGet', 'All'] +['Type', 'Info'] +['E', 'H'] +['âĢľ', 'They'] +['Ġ{', '['] +['Ġg', 'ag'] +['Ġ', 'Ú©'] +['ĠD', 'ropdown'] +['.f', 'ree'] +['g', 'one'] +['im', 'ens'] +['Ġinst', 'al'] +['ĉc', 'url'] +['_C', 'AN'] +['ĠB', 'one'] +['ï¼', 'Ķ'] +['ony', 'ms'] +['-g', 'overnment'] +['.binding', 'Navigator'] +['ĠD', 'ans'] +['ĠMc', 'L'] +['(', 'en'] +['>(', '_'] +['ÐĴ', 'Ñĭ'] +['.*', ';čĊ'] +['=', 'j'] +['-c', 'or'] +['S', 'on'] +['.ToolStrip', 'Item'] +['-', 'around'] +['_X', 'ML'] +['end', 'Date'] +['Ġsl', 'ack'] +['Ġrot', 'ated'] +['Ġno', 'qa'] +['Ġc', 'ottage'] +['Ġencontr', 'ar'] +['_s', 'kill'] +['hou', 'ette'] +['!', 'čĊ'] +['.', 'weather'] +['Ġemphas', 'ized'] +['å®', '¶'] +['ĠÑģ', 'пиÑģ'] +['ĠComp', 'iler'] +['(', 'android'] +['ĠâĢ', 'º'] +['.', 'turn'] +['Ġsup', 'pression'] +['_c', 'alls'] +['Ġ*', '@'] +['(str', 'len'] +['.h', 'ex'] +['ĠB', 'ills'] +['ĠR', 'SA'] +['Ï', 'Ĥ'] +['ĠEs', 'cape'] +['ement', 'ia'] +['Ġfront', 'end'] +['Ġp', 'int'] +['_ex', 'c'] +['zz', 'o'] +['[', '],Ċ'] +['Ġ"\',\'', '"'] +['.', 'Environment'] +['Ġafore', 'mentioned'] +['Ġend', 'ure'] +['prot', 'otype'] +['ther', 'apy'] +['ss', 'i'] +['D', 'eg'] +['_pl', 'ugins'] +['.user', 'Info'] +['Print', 'er'] +['ĠPRO', 'GRAM'] +['Ġru', 'ins'] +['Ġempir', 'ical'] +['Ġcraw', 'l'] +['ĠBo', 'iler'] +['-', 'comment'] +['.sub', 'plot'] +['_', 'et'] +["Ġ'.", "',"] +['min', 'or'] +['ĠCustom', 's'] +['Ġy', 'aw'] +['under', 'line'] +['ĠCom', 'o'] +['(', "('"] +['(m', 'ean'] +['Ġcha', 'que'] +['ĠBlock', 's'] +['.r', 'ad'] +['ilib', 'rium'] +['Ġweb', 'driver'] +['Ġmel', 'hor'] +['d', 'ana'] +['ĠAb', 'use'] +['ĠSouth', 'west'] +['ĠP', 'aren'] +['PERT', 'IES'] +['ĉ', 'IL'] +['Ġscre', 'am'] +['v', 'u'] +['Ġin', 'comes'] +['Ġn', 'im'] +['Ġl', 'ace'] +['Ġcompens', 'ate'] +['Re', 'verse'] +['D', 'at'] +['_att', 'ack'] +['Ġn', 'our'] +['ach', 'en'] +['ce', 'k'] +['<', 'Func'] +['w', 'ie'] +['com', 'pressed'] +['-m', 'atch'] +['("', '")]Ċ'] +['im', 'ized'] +['.', 'orientation'] +['.compare', 'To'] +['Ġmass', 'aggi'] +['Ġìľ', 'Ħ'] +['Ġel', 'bow'] +['Ġant', 'ioxid'] +['undred', 's'] +['/', 'tools'] +['ĠR', 'OW'] +['an', 'mar'] +['ĠW', 'ow'] +['_t', 'icket'] +['Program', 'ming'] +['Ġthe', 'or'] +['-re', 'view'] +['()', ')));Ċ'] +['ĠRichard', 'son'] +['ĠP', 'ocket'] +[']', '[]'] +['am', 'pp'] +['_', 'health'] +['ĠP', 'OP'] +['ĠNav', 'al'] +['Gu', 'ess'] +['Ġancest', 'or'] +['.Get', 'All'] +['.local', 'Scale'] +['ĠM', 'apper'] +['Ġaccum', 'ulation'] +['Ġsim', 'ulated'] +['ĠDr', 'ivers'] +['Ġd', 'és'] +['cur', 'ring'] +['Ġele', 'phant'] +['Ġadvert', 'ised'] +['Ġmail', 'box'] +['SH', 'IFT'] +['ĠMon', 'ica'] +['Ġan', 'c'] +['Ġward', 'robe'] +['Ing', 'redients'] +['Ġ||', 'čĊ'] +['ipp', 'y'] +['Ġantibiot', 'ics'] +['av', 'ings'] +['(c', 'x'] +['ĠFerr', 'ari'] +['ĠAn', 'imator'] +['.d', 'type'] +['rem', 'oved'] +['order', 'by'] +['Ġc', 'res'] +['oc', 'ê'] +['Ġp', 'ym'] +['ĠCirc', 'ular'] +['@', 'index'] +['ĠW', 'arm'] +['S', 'ay'] +['ĠAss', 'istance'] +['Ġcur', 'tain'] +['ĠMont', 'e'] +['IL', 'ER'] +['ĠC', 'VE'] +['ĠD', 'uck'] +['ĠAll', 'ows'] +['_f', 'ire'] +['ĠDer', 'by'] +['Ġre', 'pos'] +['Ġhttp', 'Client'] +['Ġpsych', 'iat'] +['Ġnow', 'adays'] +['Ġcaut', 'ious'] +['ĠComput', 'ing'] +['Ġcompletion', 'Handler'] +['ĠWel', 'sh'] +['ĠB', 'EST'] +['Ġstress', 'ful'] +['_P', 'E'] +['æĹ¥', 'æľŁ'] +['ĠData', 'Frame'] +['ĉ', 'Integer'] +['_P', 'rint'] +['M', 'oves'] +['Ġtransform', 'ing'] +['.B', 'atch'] +['y', 'ahoo'] +['Position', 's'] +['ze', 'j'] +['Ġno', 'od'] +['io', 'res'] +['_', '*'] +['Ġcl', 'k'] +['ĠF', 'loyd'] +['Ġh', 'ap'] +['font', 'size'] +['Ġn', 'az'] +['.not', 'ification'] +['ĠDep', 'ression'] +['Ġac', 'ne'] +['***', 'ĊĊ'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĊ'] +['.cont', 'ents'] +['yn', 'th'] +['ĠStra', 'ight'] +["')}}", '">', '"+'] +['Ġtoken', 'izer'] +['Ġsovere', 'ignty'] +['ĠP', 'ence'] +['()', '");Ċ'] +['Ġpesso', 'as'] +['.G', 'e'] +['ĠIn', 'cluded'] +['Ġpag', 'ina'] +['Ġex', 'posing'] +['е', 'ÑĪ'] +['_SC', 'RIPT'] +['/$', "',"] +['Th', 'umbnail'] +['×', 'Ķ'] +['webElement', 'X'] +['webElementX', 'paths'] +['press', 'ure'] +['ĠCur', 'ry'] +['_C', 'P'] +['OL', 'UTION'] +['ILE', 'S'] +['prot', 'ect'] +['ool', 'a'] +['Work', 'space'] +['{', '};Ċ'] +['ĠU', 'NS'] +['Ġsymp', 'athy'] +['ro', 'ker'] +['Ġrem', 'odel'] +['ĉc', 'ell'] +['Ġat', 'op'] +['.Full', 'Name'] +['Ġfa', 'ut'] +['ĠE', 'asily'] +['_d', 'ynamic'] +['Ġfr', 'amed'] +['Ġmot', 'ive'] +['è·', '¯'] +['s', 'am'] +['Ġmar', 'ca'] +['ĠText', 'EditingController'] +['Ġde', 'structor'] +['cre', 'am'] +['Ġr', 'ude'] +['ĠB', 'old'] +['ĠInd', 'igenous'] +['Ġg', 'ens'] +['Ġrel', 'acion'] +['(s', 'ystem'] +['ĠUIF', 'ont'] +['_char', 'ge'] +['UST', 'ER'] +['E', 'V'] +['.N', 'amespace'] +['Ġmer', 'ger'] +['Ġcal', 'loc'] +['g', 'ang'] +['Bad', 'Request'] +['Ġs', 'per'] +['-d', 'esign'] +['Ġâ', 'ĩ'] +['Ch', 'an'] +['Ġorgan', 'ism'] +[',', ')'] +['=', 'id'] +['_pl', 'ane'] +['ĠC', 'ases'] +['elf', 'ast'] +['ĠLegisl', 'ature'] +['ĠF', 'aker'] +['Ġinv', 'oking'] +['-', 'utils'] +['().', "'"] +['.f', 'ace'] +['Ġguard', 'ian'] +['my', 'Modal'] +['Ġclip', 'board'] +['ĠAT', 'M'] +['Ġpe', 'as'] +['ĠS', 'ylv'] +['.c', 'alc'] +['ĠContact', 's'] +['int', 'Value'] +['Ġmodify', 'ing'] +['ĠBar', 'b'] +['.', 'loss'] +['_per', 'centage'] +['Ask', 'ed'] +['(l', 'st'] +['ategor', 'ical'] +['-', 'files'] +['ĠRoman', 'ia'] +['.A', 'c'] +['Ġh', 'ai'] +['ĠF', 'lying'] +['Ġ', 'ż'] +['j', 'p'] +['ĠTr', 'ainer'] +['.', 'arc'] +['_de', 'g'] +['Ġtrace', 'back'] +['Or', 'Fail'] +['F', 'LOW'] +['.', 'old'] +['oy', 'a'] +['g', 'mt'] +['is', 'empty'] +['Ġvacc', 'ination'] +['Ġob', 'solete'] +['recogn', 'ized'] +['Ġru', 'ined'] +['ĠRe', 'in'] +['ĠTr', 'acking'] +['xf', 'b'] +['ا', 'ÛĮ'] +['Ġvæ', 're'] +['Ġbr', 'yster'] +['ĠIT', 'S'] +['Ġdest', 'iny'] +['Ġsw', 'ear'] +['Ġred', 'es'] +['Ġcl', 'f'] +['Ġfl', 'ipped'] +['ĉ', 'head'] +['Bl', 'uetooth'] +['ĠOver', 'rides'] +[':', 'Boolean'] +['_', '='] +['_l', 'r'] +['sp', 'awn'] +[':', 'index'] +['VAL', 'UES'] +['is', 'key'] +['?', '");Ċ'] +['.syn', 'thetic'] +['ĠCheck', 'ing'] +['struct', 'ures'] +['ip', 'ing'] +['Ġvoc', 'als'] +['-', 'Up'] +['ĠManufact', 'urers'] +['ĠMar', 'riage'] +['代', 'çłģ'] +['Ġgar', 'ner'] +['_C', 'lient'] +['par', 'allel'] +['RI', 'END'] +['Ġvine', 'gar'] +['seg', 'ue'] +['J', 'B'] +['Ġcontact', 'ing'] +['ĠCar', 'roll'] +['Ġout', 'reach'] +['t', 'ensor'] +['_var', 'iant'] +['Ġthe', 'at'] +['lic', 'able'] +['{', '|'] +['t', 'iny'] +['_', 'letter'] +['Ġp', 'encil'] +['HeadersHeight', 'SizeMode'] +['ilt', 'ro'] +['.auto', 'configure'] +['.d', 'rag'] +['.use', 'State'] +['ĠB', 'MI'] +['h', 'int'] +['Com', 'pile'] +['*', '\\'] +['en', 'ary'] +['Ġl', 'vl'] +['.C', 'ache'] +['+', '="'] +['_t', 'v'] +['ruit', 'ment'] +['Ġf', 'read'] +['Art', 'icles'] +['f', 'ila'] +['Ġpack', 'aged'] +['âĺ', 'Ĩ'] +['AT', 'HER'] +['ĠPl', 'anned'] +['s', 'cheme'] +['Ġdi', 'ary'] +['Ġoff', 'enses'] +['/', '', 'F'] +['ĠSt', 'ick'] +['Ġc', 'erc'] +['ĠS', 'lee'] +['ĉĉ', 'ĠĠĠĠĠĠĠĠ'] +['<', 'Image'] +['Ġè®', '¾'] +['-', 'editor'] +['pie', 'ces'] +['ĠD', 'rama'] +['Ġ//', '////////////////'] +['ĠT', 'asks'] +['AR', 'C'] +['g', 'ateway'] +['.get', 'cwd'] +['.M', 'etadata'] +['Ġguess', 'ing'] +['åľ°', 'åĿĢ'] +['Ġsm', 'arter'] +['ĠGet', 'Enumerator'] +['Ġe', 'fter'] +['/', 'operators'] +['ĠGL', 'float'] +['Ġf', 'ør'] +['Ġop', 'aque'] +['ä¿Ŀ', 'åŃĺ'] +['Sp', 'read'] +['SY', 'STEM'] +['Ġinv', 'ersion'] +['ĠBasket', 'ball'] +['Ġsim', 'ulations'] +['Ġden', 'ies'] +['Ġa', 'vez'] +['_list', 'ener'] +['Ġenh', 'ancing'] +['ĠMy', 'th'] +['ĠL', 'akers'] +['_M', 'D'] +['Nd', 'Ex'] +['D', 'ATABASE'] +['Ġt', 'á»'] +['ar', 'th'] +['[', 'left'] +['Ġcontest', 's'] +['st', 'ile'] +['(K', 'ERN'] +['_f', 'c'] +['_p', 'm'] +['Ġpres', 'idents'] +['Ġhospital', 'ity'] +['Ġfade', 'In'] +['RO', 'PERTY'] +['_m', 'aps'] +['ĠDefinition', 's'] +['Ġassess', 'ing'] +['Ġus', 'ar'] +['Ġquant', 'itative'] +['mo', 'z'] +['Be', 'autiful'] +['[', '(('] +['b', 'ons'] +['f', 'requency'] +['Cont', 'ain'] +['Ġpuzz', 'les'] +['ĠCast', 'ro'] +['Ġv', 'illa'] +['Ġkind', 'ly'] +['Font', 'Awesome'] +['ern', 'a'] +['epoch', 's'] +['_dat', 'as'] +['ĉ', 'ip'] +['.p', 'adding'] +['ĠCont', 'est'] +['Ġed', 'itions'] +['Ġdispro', 'portion'] +['ĠI', 'CO'] +['Ġcome', 'back'] +['=', 'value'] +['ri', 'ad'] +['-s', 'ort'] +['Sub', 'mitted'] +['(n', 'etwork'] +['ĠC', 'el'] +['Ġinstall', 'ment'] +['l', 'ashes'] +['.List', 'View'] +['ĠV', 'atican'] +['(Media', 'Type'] +['IV', 'ED'] +['reach', 'able'] +[':', 'Is'] +['ĠC', 'ITY'] +['äº', '¬'] +['ĠHelp', 'ful'] +['Ġba', 'ÅŁ'] +['%', 'čĊ'] +['Ġpsych', 'iatric'] +['Ġrec', 'ycled'] +['FORM', 'AT'] +['ĠG', 'row'] +['b', 'ine'] +['G', 'it'] +['.s', 's'] +['ĠWe', 'apons'] +['ĠSt', 'y'] +['_', 'arrow'] +['*', 'self'] +['ire', 'ment'] +['Ġdeg', 'li'] +['App', 'Delegate'] +['_b', 'anner'] +['Ġcoordin', 'ated'] +['ĠWeb', 'cam'] +['Ġcelebr', 'ations'] +['.', 'act'] +['********************************', '****************'] +['(', 'show'] +['Ġweek', 'day'] +['Ġconc', 'erts'] +['ол', 'н'] +['cl', 'in'] +['Ġcr', 'on'] +['ĠN', 'im'] +['.set', 'Vertical'] +['ĠEll', 'en'] +['س', 'ت'] +['ĠS', 'AM'] +['E', 'ff'] +['g', 'z'] +['ste', 'am'] +['Ġant', 'ique'] +['ph', 'ysical'] +['ĠForm', 'Data'] +['.set', 'ter'] +['ĠPO', 'INT'] +['B', 'on'] +['Ġflav', 'our'] +['erv', 'ention'] +['_ENT', 'ITY'] +['ĉ', 'ĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġintr', 'insic'] +['Ġæ', 'İ'] +['append', 'To'] +['aram', 'el'] +[')', '])'] +['ĠRecomm', 'end'] +[')', 'm'] +['OutOf', 'Range'] +['Ġkn', 'ight'] +['Ġsat', 'ellites'] +['ĠTit', 'ans'] +['Ġweigh', 'ed'] +['ĠD', 'ana'] +['e', 'ase'] +['Ġs', 'ip'] +['S', 'IM'] +['ĠDevelop', 'ers'] +['mal', 'ink'] +['/', 'check'] +['_P', 'LL'] +['n', 'ung'] +['Ġdry', 'er'] +['=', 'A'] +['.d', 'w'] +['_S', 'QL'] +['Ġsub', 'plot'] +['D', 'ROP'] +['Ġprot', 'otypes'] +['Ġhour', 'ly'] +['display', 'Name'] +['Ġas', 'i'] +['ĠViol', 'ence'] +['Ġastr', 'onaut'] +['Ġdat', 'atype'] +['Ġinformation', 'al'] +['Ġinvestig', 'ative'] +['etermin', 'ed'] +['ren', 'al'] +[';', "'>"] +['ĉc', 'ol'] +['V', 'G'] +['_', 'boolean'] +['re', 'cent'] +['Ġ*', ')ĊĊ'] +['ĠRain', 'bow'] +['om', 'men'] +['Ġl', 'ur'] +['Ġopp', 'ression'] +['(",', '");Ċ'] +['ĠFac', 'ility'] +['DEF', 'INED'] +['Ġne', 'on'] +['Ġoff', 'ender'] +['AF', 'P'] +['ĠClean', 'ing'] +['[]', '):'] +['Ġund', 'ocumented'] +['.Re', 'positories'] +['ĠG', 'uitar'] +['аÑģÑģ', 'ив'] +['Sk', 'ills'] +['Ġtestim', 'on'] +['rypt', 'ography'] +['ĠAm', 'ber'] +['ĠSt', 'alin'] +['Ġl', 'one'] +['Ġap', 'enas'] +['Ġdies', 'es'] +['ĠAr', 'duino'] +['è½', '¬'] +['==', '-'] +['_A', 'ct'] +['Ġc', 'oded'] +['âĸ', 'ł'] +['amb', 'urger'] +['-link', 's'] +['Ġarm', 'our'] +['.H', 'igh'] +['get', 'Content'] +['st', 'ag'] +['Ġhe', 'ck'] +['ĠìĹ', 'Ĩ'] +['ĠMc', 'Connell'] +['ĠCon', 'cert'] +['ĠAl', 'loc'] +['ä', 're'] +['.replace', 'All'] +['Ġpart', 'itions'] +['rot', 't'] +['ĠF', 'le'] +['_T', 'REE'] +['reason', 'able'] +['ĠReport', 'ing'] +['Ġbillion', 'aire'] +['s', 'cores'] +['min', 's'] +['-', 'eye'] +['M', 'ORE'] +['ab', 'ort'] +['ĠSW', 'T'] +['Ġin', 'verted'] +['ĠTe', 'achers'] +[';', 'n'] +['Ġast', 'ro'] +['н', 'ов'] +['ани', 'ÑĨ'] +['product', 'o'] +['c', 'ountries'] +['ĠO', 'wen'] +['Ġcont', 'amination'] +['Ġv', 'ibe'] +['ĠEll', 'i'] +['.s', 'cript'] +['ĠOl', 'ive'] +['D', 'MA'] +['v', 'ier'] +[':', 'semicolon'] +['-m', 'odule'] +['gress', 'ive'] +['ag', 'u'] +['_', 'players'] +['Ġresult', 'ados'] +['start', 'ed'] +['scroll', 'Top'] +['====', '='] +['Ġweigh', 'ing'] +['Ġ[[', '['] +['z', 'ahl'] +['(', 'NS'] +['ĠAssert', 'ion'] +['le', 'ague'] +['.setText', 'Color'] +['ĉ', 'Message'] +['Ġmom', 's'] +['_A', 'F'] +['.', 'wh'] +['AL', 'S'] +['Ġaut', 're'] +[']', 'ĊĊĊĊ'] +['.op', 'acity'] +['ĠBudd', 'hist'] +['Ġde', 'af'] +['ĠOrgan', 'isation'] +['(G', 'lobal'] +['ens', 'ch'] +['Ġhead', 'ache'] +['ĠAli', 'en'] +['_in', 'ode'] +['ĠSt', 'ark'] +['Ġæ', 'ī'] +['-l', 'nd'] +['ore', 'f'] +['_fe', 'at'] +['Ġpedest', 'rian'] +['Ġnom', 'inal'] +['Ġbal', 'loon'] +['Ġspr', 'ites'] +['Prototype', 'Of'] +['ĠA', 'post'] +['ĠF', 'EATURE'] +['O', 'H'] +['Ġre', 'cess'] +['ĠDon', 'na'] +['con', 'sumer'] +['$', 'GLOBALS'] +['ĠG', 'IF'] +['-', 'frame'] +['In', 'icio'] +['Ġpass', 'ages'] +['Date', 'String'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠ'] +['.by', 'te'] +['B', 'ug'] +['initial', 'izer'] +['p', 'kt'] +['od', 'ium'] +['ĠD', 'ER'] +['.', 'ops'] +['ler', 'i'] +['Ġgift', 'ed'] +['Ġdet', 'ach'] +['ter', 'rain'] +['elt', 'ers'] +['ãģ', 'ı'] +['.', 'loader'] +['ĠN', 'GO'] +['str', 'ncmp'] +['K', 'h'] +['(font', 'Size'] +['ro', 'cket'] +['Ġpreced', 'ent'] +['ĠAur', 'ora'] +['ĠEx', 'periment'] +['is', 'phere'] +['Enc', 'oded'] +['ĠâĢĵ', 'ĊĊ'] +['Ġpy', 'ramid'] +['ĠAnn', 'iversary'] +['of', 'il'] +['ë', 'Ł'] +['(', 'plugin'] +['C', 'oeff'] +['Ġcooper', 'ate'] +['Ġpredomin', 'antly'] +['IS', 'M'] +['Ph', 'rase'] +['_DEF', 'INE'] +['Fl', 'ip'] +['AMIL', 'Y'] +['ĠMark', 'ets'] +['ĠStream', 'Reader'] +['ĠComb', 'ine'] +['Ġmanus', 'cript'] +['z', 'za'] +[',', 'tp'] +['Wh', 'atever'] +['IT', 'ICAL'] +['ighb', 'our'] +['Data', 'Provider'] +['.Text', 'ure'] +['priv', 'acy'] +['.S', 'DK'] +['Ġre', 'charge'] +['Ġc', 'pp'] +['ĠC', 'FG'] +['(h', 'older'] +['(p', 'y'] +['m', 'ot'] +['Ġsav', 'oir'] +['ĠR', 'osa'] +['ĠPC', 's'] +['Ġí', 'Ļ'] +['.her', 'oku'] +['Ġf', 'ren'] +['ĠR', 'iley'] +['ag', 'ate'] +['Ġs', 'ond'] +['.x', 'lsx'] +['Ġh', 'acked'] +['st', 'ad'] +['G', 'i'] +['Ġsan', 'ity'] +['ĠSql', 'DataAdapter'] +['...', '",'] +['ĠP', 'ussy'] +['Ġ', '****************'] +['Ġhass', 'le'] +['_P', 'ARENT'] +['ĠU', 'AE'] +['Ġbegin', 'ners'] +['(', 'Client'] +['Ġstatist', 'ically'] +['.h', 'our'] +['ed', 'elta'] +['Ġtr', 'action'] +['uel', 've'] +['ar', 'at'] +['Ġsa', 'una'] +['IN', 'VALID'] +['Ġindict', 'ment'] +['AL', 'LE'] +['Ġdiss', 'ent'] +['ĠTyp', 'ography'] +['Ġintention', 'al'] +['s', 'it'] +['ĠAn', 'imals'] +['Ġcoun', 'tryside'] +['Ġu', 'art'] +['}', '\\"'] +['Ġseam', 'less'] +['¾', '示'] +['Ġaut', 'os'] +['Ġ"\'', '";Ċ'] +['Fl', 'ush'] +['ANN', 'OT'] +['Ġal', 'gebra'] +['ass', 'oc'] +['ĠW', 'aters'] +['Ġprepar', 'ations'] +['ron', 'ym'] +['[,', ']'] +['S', 'ans'] +['Ġarm', 'ies'] +['ipe', 'g'] +['Ġcream', 'y'] +['.', 'art'] +['et', 're'] +['ĠAn', 'imated'] +['Ġun', 'pleasant'] +['eme', 'an'] +['g', 'reat'] +['i', 'Äħ'] +['ĠEar', 'lier'] +['Ġch', 'ic'] +['Ġpres', 'erving'] +['(ex', 'ec'] +['ĠInvest', 'igation'] +['ĉG', 'PIO'] +['Ġrig', 'orous'] +['ij', 'o'] +['=', 'num'] +['Ġtool', 'Strip'] +[')', 'set'] +['+"', '&'] +['ĠAcc', 'eler'] +['Ġdevelopment', 'al'] +['is', 'posable'] +['Ġflaw', 'ed'] +['re', 'ne'] +['Up', 'dating'] +['Ġwatch', 'dog'] +['Ġden', 'ominator'] +['Ġsubur', 'bs'] +['Ġ...', ')'] +['Ġconv', 'ictions'] +['c', 'losure'] +['.I', 'P'] +['Ġtransl', 'ates'] +['.sw', 't'] +['.Tr', 'ace'] +['Ġmet', 'tre'] +['.is', 'Enabled'] +['ĠEffect', 'ive'] +['.to', 'Int'] +['Ġen', 'chant'] +['Ġst', 'unned'] +['Ġpo', 'i'] +['/', 'code'] +['ad', 'm'] +['.datab', 'inding'] +['ĠL', 'orem'] +['________________________________', '________________________________'] +['Ġled', 'ger'] +['Ġcar', 'a'] +['ĠG', 'ir'] +['Ġwa', 'its'] +['Un', 'o'] +['Ġc', 'wd'] +['è¾', 'ij'] +['ĠT', 'Result'] +['Ġre', 'jo'] +['Ġem', 'itted'] +['ĠWest', 'minster'] +['ä¸Ģ', '个'] +['ne', 'k'] +['_T', 'is'] +['Ġen', 'act'] +['ĉ', 'with'] +['org', 'ia'] +['Ġj', 'ue'] +['Per', 'form'] +['SP', 'ATH'] +['.top', 'ic'] +['ĠD', 'aten'] +['áº', '§'] +['Ġsit', 'io'] +['_M', 'M'] +['"', 'So'] +['b', 'ial'] +['Ġsc', 'oped'] +['Re', 'quires'] +['ĠT', 'OTAL'] +['ĠCh', 'ancellor'] +['(', 'contents'] +['Ġste', 'alth'] +['dev', 'ices'] +['-p', 'ass'] +['ili', 'h'] +['ĠMal', 'colm'] +['ĠDep', 'ot'] +['Ġconfig', 'ur'] +['a', 'ussian'] +['_con', 'straint'] +['в', 'еÑĤ'] +['G', 'RA'] +['ĠR', 'ates'] +['.dataGridView', 'TextBoxColumn'] +['ĠNob', 'el'] +['it', 'ics'] +['Ġignor', 'ant'] +['ĠReport', 'er'] +['ĠEb', 'ola'] +['ĠSh', 'ock'] +['_re', 'lation'] +['ĠNin', 'ja'] +[')', 'c'] +['Ġt', 'icker'] +['.is', 'Checked'] +['ĠSup', 'pliers'] +['ĠRap', 'id'] +['Level', 's'] +['âĤ¬', 'âĦ¢'] +['ĉ', 'queue'] +['Ġch', 'op'] +['ĠUn', 'ix'] +['re', 'ject'] +['-c', 'alendar'] +['(s', 'ort'] +['è', 'ne'] +['erc', 'icio'] +['Ġh', 'ect'] +['CALL', 'TYPE'] +['rou', 'pon'] +['Ġrent', 'als'] +['auth', 'ors'] +['{', 'name'] +['ĠF', 'IFO'] +['Ġl', 'assen'] +['ĠN', 'ous'] +['Ġsn', 'apped'] +['Ġfert', 'ility'] +['"', 'log'] +['click', 'ed'] +['Ġplant', 'ing'] +['Ġg', 'b'] +['/', 'output'] +['PE', 'AT'] +['Ġc', 'ategoria'] +['Ġb', 'ach'] +['Prof', 'essor'] +['in', 'th'] +['"]', 'čĊ'] +['Rec', 'order'] +['ser', 'de'] +['ĠTrans', 'mission'] +['tr', 'ad'] +['Ġtur', 'bo'] +['_VER', 'TEX'] +['\\', 'Event'] +['il', 'ver'] +['Ġbod', 'ily'] +['ĠS', 'ources'] +['Ġkill', 'ings'] +['.xr', 'TableCell'] +['Ġfold', 'ed'] +['/', 'legal'] +['un', 'er'] +['ĠR', 'ifle'] +['ĠM', 'IDI'] +['_Selected', 'IndexChanged'] +['.Size', 'Type'] +['ĠWeb', 'Socket'] +['Ġsele', 'ccion'] +['S', 'and'] +['ot', 'ros'] +['Ġenv', 'ision'] +['/', 'etc'] +['ĠMel', 'issa'] +['Sp', 'ot'] +['но', 'е'] +['_', 'ARM'] +['At', 'tempt'] +['ĠB', 'I'] +['ãģ', 'Ķ'] +['ĠD', 'U'] +['Ġback', 'lash'] +['str', 'ide'] +['/', 'classes'] +['Ġtext', 'Color'] +['_st', 'aff'] +['ob', 'lin'] +['agent', 'a'] +['.c', 'ollections'] +['ill', 'age'] +["'", 'čĊčĊ'] +['fl', 'atten'] +['_s', 'ales'] +['_M', 'ASTER'] +['T', 'W'] +['_d', 'a'] +['P', 'itch'] +['ph', 'ies'] +['Ġz', 'ombies'] +['ĠV', 'ERY'] +['ĠPharm', 'acy'] +['Ġprogress', 'Bar'] +['Ġhas', 'htag'] +['S', 'idebar'] +['@', 'stop'] +['(p', 'c'] +['ол', 'ж'] +['MA', 'KE'] +['ĠCor', 'on'] +['Ġkv', 'inner'] +['ĠM', 'aid'] +['b', 'ob'] +['.title', 'Label'] +['Ġsuccess', 'es'] +['ĠDemocr', 'acy'] +['ĠSurg', 'ery'] +['Ġcou', 'gar'] +['Ġcur', 'so'] +['Ġl', 'oro'] +['ist', 'ency'] +['Sen', 'ior'] +['æ', 'k'] +['ĠA', 'AA'] +['ĠBO', 'OK'] +['к', 'о'] +['W', 'STR'] +['Ġ*/', ',Ċ'] +['oy', 'al'] +['.v', 'ector'] +['ĠS', 'PEC'] +['SS', 'F'] +['Ġcomp', 'uls'] +['ĠAppe', 'als'] +['ĠW', 'inston'] +['ĠMock', 'ito'] +['con', 'trib'] +['.', 'available'] +['entity', 'Manager'] +['ari', 'as'] +['_s', 'ale'] +['_r', 's'] +['Ġdec', 'oding'] +['Ġloc', 'ator'] +['ol', 'ith'] +['Ġk', 'ol'] +['Ġasc', 'ii'] +['ĠR', 'ut'] +['/', 'interface'] +['ĉĉĉĉĉĉ', 'ĠĠĠ'] +['ĠN', 'umer'] +['.fl', 'ip'] +['-d', 'el'] +['Ġbol', 'ster'] +['on', 'omic'] +['Ġz', 'm'] +['L', 'G'] +['Find', 'By'] +['Ġadapt', 'ive'] +['lo', 'o'] +['Ġv', 'ue'] +['(re', 'verse'] +['_c', 'anvas'] +['.', 'roles'] +['ific', 'ado'] +['ven', 'ient'] +['"', 'As'] +['ĠEn', 'tr'] +['al', 'igned'] +['Ġbere', 'its'] +['///', 'ĊĊ'] +['.g', 'wt'] +['.', 'employee'] +['_cl', 'i'] +['Ġanticip', 'ate'] +['éĻ', 'IJ'] +['Ġp', 'ik'] +['Ġmush', 'rooms'] +['(t', 't'] +['Ġo', 'ma'] +['ĠSan', 'chez'] +['_g', 'oogle'] +['.', 'Valid'] +['ĠFile', 'Name'] +['iv', 'ative'] +['k', 'ed'] +['-w', 'ar'] +['Ġm', 'aturity'] +['и', 'д'] +['Ġmin', 'er'] +['Reduc', 'ers'] +['ĠLat', 'Lng'] +['_ST', 'D'] +['D', 'igits'] +['Cal', 'c'] +['-up', 'load'] +['Ġhand', 'ic'] +['ี', 'à¹Ī'] +['egr', 'ated'] +['ĠST', 'M'] +['C', 'lients'] +['ĠTur', 'bo'] +['SY', 'NC'] +['Ġphotograph', 'ers'] +['.', 'Out'] +['.char', 'acter'] +['B', 'UILD'] +['.un', 'lock'] +['Ġar', 'ises'] +['ĠCommand', 's'] +['("', '");čĊ'] +['_F', 'ORE'] +[';', "',"] +['+"', "'"] +['.', 'Images'] +['")', '{'] +['ĠM', 'eyer'] +['Ġneg', 'atively'] +['ĠD', 'LL'] +['Ġex', 'e'] +['Ġdef', 'iciency'] +['Ġwild', 'ly'] +['-s', 'witch'] +['con', 'struction'] +['Ġexception', 'ally'] +['ĠL', 'iz'] +['/j', 'ava'] +['Ġtheir', 's'] +['ĠCont', 'emporary'] +['l', 'is'] +['.fill', 'Rect'] +['ĠN', 'FC'] +['Ġre', 'he'] +['(num', 'bers'] +['Ġr', 'aster'] +['Ġfig', 'uring'] +['Ġshow', 'c'] +['ĠJ', 'ill'] +['Ġarc', 'ade'] +['ĠConstruct', 's'] +['md', 'l'] +["('", '|'] +['Ġident', 'ifiers'] +['Ġst', 'ellar'] +['(', 'Connection'] +['Ġ"', '{{'] +['y', 'or'] +['(m', 'ysqli'] +['Ġdo', 've'] +['Of', 'Birth'] +['.dis', 'connect'] +['_h', 'i'] +['Ġzw', 'ischen'] +['ĠGr', 'und'] +['i', 'ros'] +['_A', 'rray'] +['.on', 'click'] +['ans', 'om'] +['An', 'swers'] +['ĉ', 'remove'] +['F', 'a'] +['Ġhur', 'ry'] +['-in', 'f'] +['Ġget', 'Class'] +['ĠReg', 'ulation'] +['ĠFLAG', 'S'] +['m', 'isc'] +['K', 'en'] +['_', 'heading'] +['G', 'Hz'] +['-', 'entry'] +['Ġbi', 'ography'] +['S', 'ig'] +['-m', 'f'] +['Watch', 'er'] +['âĢľ', 'A'] +['}', 'px'] +['Ġsp', 'icy'] +['_s', 'q'] +['L', 'ost'] +['(tr', 'ack'] +['а', 'ли'] +['Desc', 'ending'] +['<', 'bits'] +['qu', 'ine'] +['ĠAdv', 'oc'] +['_S', 'N'] +['ĠHann', 'ah'] +['PO', 'P'] +['Ġem', 'itter'] +['Ġc', 'yn'] +['ĠC', 'AD'] +['?', ').'] +['/', 'set'] +['ĠS', 'ister'] +['ĠEnd', 'point'] +['Ġmen', 'or'] +['Ġinter', 'p'] +['r', 'k'] +['id', 'le'] +['Ġout', 'fits'] +['.', 'vertex'] +['Ġc', 'lic'] +['ARE', 'N'] +['Ġpost', 'ure'] +['ĠOpport', 'unity'] +['v', 'x'] +['ĠFor', 'bes'] +['.D', 'irection'] +['Ġres', 'ide'] +['Ġremember', 'ing'] +['nest', 'y'] +['Auto', 'resizing'] +['pro', 'viders'] +['ĠA', 'H'] +['Ġhur', 'ting'] +['ĠL', 'ily'] +['eval', 'uate'] +['lij', 'k'] +['p', 'apers'] +['ĠSm', 'ash'] +['ĠL', 'AST'] +['Ġwell', 's'] +['w', 'asher'] +['_RO', 'LE'] +['ĠD', 'anger'] +['*', '(('] +['_re', 'pository'] +['ĠRes', 'olve'] +['ĠRoom', 's'] +['_R', 'G'] +['ĠQ', 'T'] +['o', 'op'] +['ĠHe', 'ap'] +['Ġslow', 'ing'] +['Ġgrat', 'uite'] +['_c', 'atalog'] +['Ġpol', 'ynomial'] +['L', 'y'] +['pc', 's'] +['F', 'ox'] +['ĠC', 'yr'] +['Ġdim', 'in'] +['/', 'month'] +['S', 'alt'] +['Ġh', 'ind'] +['.P', 'ER'] +['For', 'um'] +['c', 'en'] +['_p', 'ol'] +['íĺ', '¸'] +['Ġin', 'ser'] +['(', '~'] +['@', 'test'] +['ĠGold', 'man'] +['Ġupload', 'ing'] +['F', 'c'] +['Ġkom', 'mer'] +['Ġm', 'itt'] +['_log', 'ged'] +['Ġbu', 'cks'] +['-l', 'ayer'] +[')', '};Ċ'] +['ĠO', 'M'] +['Ġv', 'eg'] +['col', 'our'] +['Ġоб', 'ÑĬ'] +['Std', 'String'] +['_', 'que'] +['ĠT', 'ian'] +['Ġspecial', 'ize'] +['и', 'п'] +['Ġк', 'л'] +['tr', 'ial'] +['-', 'edge'] +['Ġm', 'ars'] +['OG', 'LE'] +['Ġempath', 'y'] +['ĠB', 'om'] +['Ġcoll', 'isions'] +['Ġcart', 'e'] +['ĠTe', 'il'] +['ĠM', 'PL'] +['Ġporn', 'ô'] +['Ġa', 'irlines'] +['A', 'ws'] +['N', 's'] +['ĠSp', 'awn'] +['(', 'use'] +['é»', 'ĺ认'] +['Ġy', 'acc'] +['st', 'or'] +['Ġconf', 'ess'] +['Ġpe', 'que'] +['r', 'age'] +['?', '"Ċ'] +['/dat', 'atables'] +['ĠSh', 'ower'] +['__', '/'] +['Ġcryst', 'als'] +['Ġbus', 'car'] +['ĠH', 'aus'] +['iz', 'ação'] +['_', 'entities'] +['ķ', 'Į'] +['ļ', 'Į'] +['x', 'cc'] +['v', 'irt'] +['-che', 'vron'] +['(', 'Result'] +['c', 'ake'] +['COM', 'E'] +['Ġprohib', 'it'] +['ĠCh', 'ess'] +['Ġbe', 'aucoup'] +['ĠÑĩ', 'ÑĤо'] +['R', 'UN'] +['ĠI', 'K'] +['ó', 'ÅĤ'] +['_', 'Update'] +['Ġsle', 'ek'] +['ĠSpec', 'ify'] +['_c', 'redentials'] +['ÅŁ', 't'] +['ĠUser', 'Name'] +['ĉ', 'Value'] +['Ġarray', 'List'] +['Ġex', 'changed'] +['ips', 'is'] +['.re', 'lated'] +['ĠSe', 'ite'] +['_B', 'AR'] +['ĠL', 'em'] +['ĠW', 'ATCH'] +['ĠC', 'lients'] +['Ġ.', '*'] +['ĠEar', 'l'] +['-re', 'port'] +['Ġforeign', 'ers'] +['Ġstrengthen', 'ing'] +['ĉ', 'Description'] +['(g', 'o'] +['.tool', 'bar'] +['Ġcalcul', 'ates'] +['ĉs', 'ource'] +['Ġcz', 'as'] +['Ġre', 'cl'] +['ab', 'o'] +['Ġlocal', 'host'] +['Ġ^', '{Ċ'] +['.P', 'op'] +['ĠDes', 'igned'] +['\\', 'Abstract'] +['H', 'old'] +['ĠGuid', 'elines'] +['ipl', 'ine'] +['Ġc', 'aching'] +['.Re', 'ader'] +['_ext', 'ernal'] +['.str', 'ptime'] +['ĠWeek', 'end'] +['-M', 'ar'] +['ĠBe', 'i'] +['Ġ{*', '}'] +['ĠR', 'ud'] +['Ġexpl', 'or'] +['ĠBou', 'levard'] +['C', 'ash'] +['Ġprep', 'ares'] +['Ġserial', 'ization'] +['ew', 'ater'] +['Ġad', 'c'] +[':', 'ĊĊĊĊĊĊ'] +['Re', 'fer'] +['Ġsc', 'anned'] +['}', '}ĊĊ'] +['ĠF', 'ul'] +['Ġtour', 'ing'] +['ãĥĥ', 'ãĤ¯'] +['>', '(('] +['sur', 'vey'] +['Ġí', 'ĺ'] +['...', "')Ċ"] +['ĠDiv', 'ider'] +['os', 'l'] +['_C', 'ANCEL'] +['_pre', 'pare'] +['st', 'in'] +['ĠHe', 'ath'] +['.Primary', 'Key'] +['ĠâĨ', 'IJ'] +['ĠLocal', 'DateTime'] +['Ġcooper', 'ative'] +['L', 'earning'] +['.en', 'queue'] +['Ġgo', 'og'] +['ĠReg', 'ression'] +['im', 'ates'] +['Ġvoy', 'eur'] +['ĠDr', 'ink'] +['pl', 'ug'] +['Ġl', 'ender'] +['man', 'a'] +['Ġperson', 'nes'] +['yp', 'se'] +['Ġun', 'link'] +['ĠRav', 'ens'] +['Ġhur', 'd'] +['Ġperiod', 'ically'] +['ARG', 'S'] +['ĠG', 'H'] +['char', 'acters'] +['...', '"ĊĊ'] +['-', 'establish'] +['Ġd', 'n'] +['(', 'condition'] +['ĠGr', 'avity'] +['Ġest', 'as'] +['_f', 'ocus'] +['Creat', 'ure'] +['(s', 'ite'] +['Ġc', 'arr'] +['ĠR', 'L'] +['ĠR', 'I'] +['ĠM', 'oto'] +['AS', 'F'] +['ĠLuck', 'ily'] +['ĉ', 'Route'] +['Ġent', 'ropy'] +['("', ',"'] +['Col', 'lect'] +['(', 'contact'] +['ĠFlo', 'rence'] +['Ġpremium', 's'] +['Ġlif', 'ecycle'] +['Ġb', 'ans'] +['x', 'ef'] +['Web', 'Kit'] +['ĠFlo', 'ating'] +['Ġcos', 'a'] +['Spec', 'ific'] +['ĠLo', 'ans'] +['b', 'read'] +['Ġdes', 'criptors'] +['Ġ{', ':.'] +['TH', 'READ'] +['ĠT', 'rent'] +['Ġsc', 'op'] +['Q', 'A'] +['ĠAnt', 'ar'] +['p', 'el'] +['_d', 'ifference'] +['_ch', 'anges'] +['(...', ')'] +['ĠR', 'otation'] +['ĠLG', 'PL'] +['ĠJ', 'UST'] +['(T', 'ask'] +['_sub', 'set'] +['ĠTR', 'ANS'] +['åĬ', 'Ľ'] +['ĠSc', 'out'] +['-p', 'opup'] +['Ġsm', 'oked'] +['_C', 'lass'] +['Ġturn', 'over'] +['br', 'akk'] +['ĠRock', 'y'] +['t', 'as'] +['.Regular', 'Expressions'] +['ĠElli', 'ott'] +['ĠSp', 'inner'] +['DU', 'CTION'] +['Ġlib', 're'] +['Ġmol', 'to'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠ'] +['ĠF', 'TP'] +['m', 'peg'] +['(f', 'eatures'] +['Ġb', 'ald'] +['ĠV', 'id'] +['Ġsh', 'outing'] +['L', 'int'] +['Ġsock', 'ets'] +['Ġpro', 'w'] +['Ġnouvel', 'le'] +['isc', 'ard'] +['ĠS', 'ponsor'] +['Ġconsult', 'a'] +['))', ');'] +['Ind', 'ian'] +['ĠR', 'aspberry'] +['Ġteam', 'mate'] +['ĠJ', 'WT'] +['ĠGh', 'ana'] +['Ġc', 'akes'] +['pr', 'imer'] +['form', 'a'] +['erg', 'arten'] +['_M', 'anager'] +['Ġpre', 'season'] +['G', 'AME'] +['|', '"'] +['ĠBro', 'ck'] +['Ġoccup', 'y'] +['Ġdecor', 'ations'] +['á', 'nd'] +['Ġc', 'ot'] +['Ġpar', 'an'] +['D', 'isk'] +['rem', 'ain'] +['>', '?'] +['Str', 'ong'] +['Ġfr', 'ance'] +['ĠE', 'ra'] +['-c', 'r'] +['.Buffer', 'edReader'] +['ĠParad', 'ise'] +['ĠV', 'AT'] +['ĠAnd', 'ers'] +['Ġlim', 'b'] +['amp', 'oo'] +['Ġimper', 'ative'] +['UT', 'ILITY'] +['ĠRec', 'ognition'] +['Ġragaz', 'ze'] +['Ġpop', 's'] +['yp', 'ress'] +['Ġemb', 'argo'] +['//', '{Ċ'] +['Ġsy', 'll'] +['P', 'TR'] +['åŃĺ', 'åľ¨'] +['Ġdid', 'nt'] +['Mail', 'er'] +['Ġacad', 'emics'] +['ĠFra', 'uen'] +['ne', 'ider'] +['-', 'rel'] +['Ġrain', 'bow'] +['(', 'In'] +['Ġslic', 'ed'] +['============', '=Ċ'] +['(s', 'end'] +['NSMutable', 'Dictionary'] +['v', 'os'] +['(p', 'ackage'] +['Ġord', 'inance'] +['view', 'er'] +['ĠSant', 'os'] +['-s', 'elling'] +['Ġgo', 'v'] +['ett', 'le'] +['Ġfound', 'ers'] +['Ġw', 'aking'] +['sl', 'ashes'] +['-p', 'ound'] +['re', 'cht'] +['ا', 'ت'] +['.on', 'Click'] +['Ġn', 'ord'] +['st', 'änd'] +['_', 'when'] +['UT', 'ERS'] +['ic', 'c'] +['Ġcaps', 'ule'] +['ĠW', 'id'] +['M', 'arc'] +['à¸', '¸'] +['ro', 'red'] +['UG', 'E'] +['LO', 'UD'] +['ĠAud', 'it'] +['ip', 'ients'] +['op', 'ian'] +['ĠS', 'ue'] +['Ġwur', 'den'] +['.H', 'elpers'] +['Ġf', 'actions'] +['[', 'np'] +['-th', 'an'] +['Ġre', 'co'] +['Ġk', 'as'] +['Ġcmd', 's'] +['/n', 'etwork'] +['xb', 'f'] +['get', 'Color'] +['Ġbi', 'ased'] +['ĠL', 'ak'] +['D', 'atas'] +['vent', 's'] +['Ġë', '²'] +['_P', 'S'] +['.', 'Validate'] +['Inv', 'oker'] +['Ġne', 'uen'] +['Ġju', 'venile'] +['V', 'ISION'] +['Ġdev', 'ote'] +['Ġlin', 'ha'] +['Ġdiscount', 'ed'] +['\\', 'Config'] +['Ġworth', 'while'] +['Ġskin', 'ny'] +['ĠC', 'ourses'] +['le', 'ys'] +['ĠMort', 'gage'] +['K', 'evin'] +['Ġannounc', 'es'] +['])', '*'] +['res', 'ervation'] +['Ġæķ', '°'] +['Ġprejud', 'ice'] +['ĠString', 'Comparison'] +['Ġbe', 'ard'] +['-w', 'in'] +['ĠS', 'ão'] +['ĉ', 'ms'] +['j', 'al'] +['ĠE', 'arn'] +['_', 'ports'] +['ĠN', 'ombre'] +['_C', 'OR'] +['ĠB', 'UILD'] +['.s', 'ound'] +['Y', 'ellow'] +['Ġlineback', 'er'] +['Ġchar', 'itable'] +['j', 'ug'] +['_NON', 'NULL'] +['ĠD', 'ental'] +['">', '${'] +['ĉm', 'atch'] +['R', 'ussian'] +['Ġvers', 'ch'] +['Ġp', 'inned'] +['Ġadopt', 'ing'] +['Options', 'Menu'] +['P', 'ag'] +['Ġpair', 'ing'] +['Ġt', 'read'] +['erc', 'ises'] +['ĠSp', 'read'] +[')', 'i'] +['ĠB', 'AD'] +['_t', 'f'] +['UI', 'ImageView'] +['pop', 'ulate'] +['b', 'ab'] +['ĠÏ', 'ĥ'] +['[', '++'] +['Ġopi', 'oid'] +['Ġ##', 'Ċ'] +['d', 'type'] +['ĠStart', 's'] +["('/", "')"] +['Ġperson', 'als'] +['-mark', 'et'] +['Ġredund', 'ant'] +['ĠEss', 'ential'] +['Ġscrap', 'y'] +['Ġи', 'м'] +['a', 'cl'] +['Ġcre', 'ar'] +['ĠB', 'end'] +['Ġrel', 'ieve'] +['-', 'room'] +['w', 'ife'] +['Ġv', 'Ãł'] +['ĠQ', 'Point'] +['Ġqu', 'asi'] +['Ġmethod', 'Name'] +['\\x', 'c'] +['ĠPer', 'u'] +['/', 'The'] +['.', 'orm'] +['Ġv', 'iz'] +['/p', 'df'] +['Loc', 'ated'] +['Ġconfront', 'ation'] +['ĠChampionship', 's'] +['Ġhyp', 'ert'] +['Ġd', 'j'] +['ĠUser', 'Info'] +['ĠåĪ', 'Ľå»º'] +['\\x', 'b'] +['(s', 'im'] +['Ġ==', 'Ċ'] +['Ġst', 'aging'] +['Ġdr', 'astically'] +['åŃ', '¦'] +['l', 'ords'] +['.', 'less'] +['вед', 'иÑĤе'] +['ĠB', 'ucket'] +['ĠM', 'am'] +['.', 'term'] +['_p', 'i'] +['c', 'zy'] +['.p', 'ub'] +['prec', 'io'] +['ĠV', 'irt'] +['Ġrom', 'an'] +['it', 'at'] +['L', 'ex'] +['_inf', 'os'] +['Ä', '°'] +['.', 'other'] +['VE', 'LO'] +['Ġp', 'onder'] +['Ġh', 'anno'] +['(', 'Page'] +['do', 'i'] +['Ġpol', 'ite'] +['Ġprogram', 'mer'] +['D', 'ies'] +['$', 'd'] +['Ġrep', 'lication'] +['add', 'Column'] +['fr', 'ican'] +['Ġl', 'eng'] +['be', 'er'] +['o', 'it'] +['Ġw', 'asting'] +['yl', 'im'] +['me', 'asure'] +['N', 'eg'] +['Ġpart', 'ie'] +['.con', 'sole'] +['ĠGu', 'inea'] +['TE', 'L'] +['_f', 'act'] +['.ch', 'unk'] +['Ġl', 'ent'] +['Ġall', 'er'] +['Ġà¤', 'ķ'] +['_id', 'le'] +['Ġad', 'missions'] +['JSON', 'Array'] +['Ġv', 'ibration'] +['.h', 'elpers'] +['å¤', 'ĸ'] +['Ġh', 'en'] +['j', 'ohn'] +['Ġì', 'ĥĿ'] +['Ġjud', 'gement'] +['Ġge', 'en'] +['ter', 'ra'] +['^', '{'] +['ĠI', 'z'] +['Ġc', 'â'] +['inst', 'ances'] +['Ġthreat', 'ens'] +['Ġm', 'üssen'] +['Kind', 'OfClass'] +['Ġstoryt', 'elling'] +['_d', 'emo'] +['ri', 'as'] +['Priv', 'acy'] +['h', 'ift'] +['ĠY', 'i'] +['es', 'or'] +['íķ', 'ł'] +['ens', 'itivity'] +['.W', 'riter'] +['à¸', 'Ĥ'] +['D', 'istrict'] +['.get', 'JSONObject'] +['Im', 'pro'] +['(get', 'Resources'] +['ĠS', 'PELL'] +['rodu', 'ce'] +['Ġslow', 'ed'] +['Ġlin', 'ewidth'] +['Ġhonest', 'y'] +['ĠCo', 'ord'] +['ĠF', 'ork'] +['ĠDispatch', 'Queue'] +['ĠCl', 'iff'] +['ĠW', 'iring'] +['_TIM', 'ESTAMP'] +['oll', 'ah'] +['av', 'oid'] +['++', '];Ċ'] +['sem', 'antic'] +['-c', 'ss'] +['Ġv', 'eto'] +['ĠM', 'err'] +['Ġlegisl', 'ators'] +['CEE', 'DED'] +['Ġquestion', 'naire'] +['ĠP', 'ills'] +['Cal', 'culate'] +['(c', 'ore'] +["'", 'e'] +['Ġdis', 'like'] +['ĠPre', 'ferences'] +['_EX', 'TERNAL'] +['è°', 'ĥ'] +['Ġd', 'odge'] +['æľį', 'åĬ¡'] +['.n', 'ames'] +['.draw', 'Image'] +['_p', 'rom'] +['uck', 'land'] +['Ġ<$', '>'] +['ı', 'z'] +['/s', 'ite'] +['é¡', '¹'] +['rop', 'he'] +['Ġcomp', 'elled'] +['Ġl', 'aptops'] +['Ġun', 'i'] +['C', 'LOSE'] +['Ġcasual', 'ties'] +['ĠUn', 'iform'] +['Term', 'inal'] +['.', '","'] +['D', 'AT'] +['(T', 'reeNode'] +['ĠGand', 'hi'] +['(st', 'mt'] +['AX', 'B'] +['*', 'M'] +['Ġumb', 'rella'] +['an', 'imal'] +['Ġgr', 'pc'] +['Ġwhere', 'by'] +['Ġfloat', 's'] +['ĉ', 'arg'] +['Ġdb', 'g'] +['Ġexceed', 'ing'] +['Event', 'Type'] +['.SaveChanges', 'Async'] +['Ġ{', '{{'] +['Ġow', 'ed'] +['ahren', 'heit'] +['Ġì', '§'] +['Ġequ', 'ipo'] +['ur', 'ai'] +['Ġid', 'ol'] +[']', '")Ċ'] +['_m', 'ajor'] +['Ġentire', 'ty'] +['inger', 'print'] +['ç', 'os'] +['/', 'account'] +['ĉ', 'right'] +['urs', 'os'] +['ĠE', 'DT'] +['_INS', 'ERT'] +['Ġsh', 'ining'] +['Ġ<', ':'] +['Edge', 'Insets'] +['Ġcolon', 'ies'] +['.', 'IM'] +['ĉĠ', 'ĉ'] +['RO', 'AD'] +['CC', 'CC'] +['pl', 'acing'] +['Ġget', 'Activity'] +['em', 'acs'] +["'", '%('] +['.click', 'ed'] +['ĠTh', 'em'] +['is', 'ia'] +['Bus', 'car'] +['.re', 'name'] +['Ġo', 'ath'] +['Ġafter', 'ward'] +['ĠU', 'FO'] +['AP', 'S'] +['ĠJackson', 'ville'] +['.s', 'ome'] +['Conf', 'irmed'] +['.s', 'can'] +['ig', 'Integer'] +['Decor', 'ator'] +['sh', 'ield'] +['ress', 'ive'] +['.d', 'id'] +['请', 'è¾ĵåħ¥'] +['Ġsh', 'utter'] +['D', 'am'] +['Ġparent', 'ing'] +['ey', 'ed'] +['$', 'item'] +['-de', 'velop'] +['Ġextract', 's'] +['Ġdecentral', 'ized'] +['ĠEl', 'sa'] +['_sp', 'in'] +['])', '+'] +['-in', 'itial'] +['Ġmult', 'itude'] +['Ġsens', 'ory'] +['ĠMODE', 'L'] +['Ġsafeg', 'uard'] +['ì', '¹'] +['Ġhunt', 'ers'] +['ĠT', 'iny'] +['IN', 'O'] +['decor', 'ate'] +['ĠNo', 'Such'] +['H', 'o'] +['(', 'Response'] +['Ġr', 'uler'] +['ĉ', 'short'] +['Ġc', 'aster'] +['Ġclient', 'Id'] +['Ġp', 'db'] +['ëı', 'Ħ'] +['it', 'ic'] +['ĠGame', 'State'] +['Ġnew', 'Item'] +[')ĊĊ', 'ĊĊĊĊ'] +['ou', 'is'] +['n', 'oc'] +['.BL', 'ACK'] +['_V', 'ECTOR'] +['----------', '', '();'] +['.get', 'P'] +['any', 'e'] +['Ġneur', 'on'] +['if', 'old'] +['ĠK', 'nown'] +['Bit', 'coin'] +['Any', 'way'] +['ay', 'ette'] +["Ġ'", "['"] +['Ãł', 'nh'] +['m', 'gr'] +['Ġcor', 'related'] +['Ġn', 'ause'] +['Ġment', 'ality'] +['has', 'Many'] +['ĠF', 'G'] +['amp', 'ie'] +['IT', 'U'] +['F', 's'] +['.S', 'p'] +['_b', 'etween'] +['Dep', 'endencies'] +['ou', 'g'] +['Place', 'holder'] +['=', 'text'] +['ĠMan', 'aging'] +['ocal', 'ypse'] +['åĮ', 'Ĺ'] +['_m', 'ag'] +['f', 'ld'] +['â', 'ij'] +['C', 'AM'] +['ĠHelp', 'ers'] +['Ġd', 'ost'] +['/', 'out'] +['Ġassass', 'ination'] +['.get', 'Image'] +['ĠKenn', 'y'] +[".'", ')ĊĊ'] +['){', '//'] +['ĠR', 'anger'] +['Ġg', 'ek'] +['Ġsinc', 'ere'] +['<', 'Value'] +['ĠD', 'OT'] +['ĠVict', 'ory'] +['Ġleg', 'ends'] +['Ġpr', 'isons'] +['(ex', 'pression'] +['ĠR', 'abbit'] +['_s', 'entence'] +['Ġbit', 'es'] +['Ġon', 'Failure'] +['ĠâĪ', 'Ī'] +['K', 'im'] +['.g', 'ender'] +['ĠÎ', '»'] +['Ġ[', '.'] +['"]', ');'] +['land', 'ing'] +['-d', 'igit'] +['TE', 'MP'] +['ĉ', 'entry'] +['Ġstrt', 'ok'] +['Ġdesc', 'endants'] +['um', 'no'] +['Ġlean', 'ing'] +['Ġspecific', 's'] +['q', 'n'] +['ĠSp', 'art'] +['Ġpor', 'r'] +['EDIATE', 'K'] +['Ġse', 'per'] +["'", 'aut'] +['ĠSTE', 'P'] +['ĠBorder', 'Layout'] +['Ġret', 'ros'] +['ĠSalv', 'ador'] +['ĠEN', 'GINE'] +['x', 'dc'] +['T', 'weet'] +['v', 'k'] +['Ġì', '²'] +[']', '<<'] +['het', 'ics'] +['c', 'oding'] +['Re', 'ach'] +['.re', 'q'] +['gu', 'ide'] +['.s', 'cope'] +['sh', 'irt'] +['rog', 'ate'] +['SET', 'TING'] +['ĠProte', 'in'] +['Ġe', 'ing'] +['.', 'EMPTY'] +['.d', 'f'] +['Ġclear', 'er'] +['Ġc', 'rossover'] +['ĠTo', 'ys'] +['Ġco', 'ated'] +['.M', 'onth'] +['ĠAtt', 'ach'] +['/', 'run'] +['.t', 'abs'] +['Ġogs', 'Ã¥'] +['B', 'rown'] +['.D', 'ATE'] +['Ġf', 'os'] +['åŃĹ', '符'] +['W', 'ood'] +['-th', 'ree'] +['her', 'ited'] +['Ġ', 'rop'] +['(', 'ac'] +['Ġembod', 'iment'] +['ĠKenn', 'eth'] +['Ġcan', 'non'] +['Ġb', 'idding'] +['čĊ'] +['.get', 'Resources'] +['Ġl', 'ump'] +['_const', 's'] +['(', 'ext'] +['ĉd', 'ir'] +['â', 'Ŀ'] +['Ġpadding', 'Top'] +['Ġobs', 'ession'] +['Ġb', 'anning'] +['ĠApp', 'Module'] +['Ġpart', 'isan'] +['Ġcatalog', 'ue'] +['Ġmin', 'ors'] +['Ġpitch', 'es'] +['we', 'ep'] +['Ġundert', 'ake'] +['Ġthem', 'ed'] +['aud', 'it'] +['.scroll', 'Top'] +['Ġr', 'er'] +['Ġsympt', 'om'] +['Ġopen', 'ings'] +['.block', 's'] +['open', 'id'] +['Ġas', 'sh'] +['-s', 'ave'] +['ĠP', 'ig'] +['Ġreg', 'ain'] +['Ġin', 'icial'] +['/f', 'avicon'] +['ĉ', 'exp'] +['Ġsp', 'ices'] +['isk', 'a'] +['claim', 's'] +['m', 'ak'] +['definition', 's'] +['Ġcorrespond', 'ent'] +['ĠCann', 'abis'] +['__', ',Ċ'] +['ĠL', 'ucky'] +['ĠGa', 'ussian'] +['ĠN', 'early'] +['C', 'AD'] +["']", ']Ċ'] +['Ġadequ', 'ately'] +['ĠT', 'ITLE'] +['constitution', 'al'] +['-m', 'm'] +['_', 'override'] +['Ġbl', 'as'] +['.ready', 'State'] +['Ġremin', 'is'] +['Ġrein', 'forced'] +['ĠColl', 'abor'] +['Ġdecor', 'ating'] +['Ġb', 'achelor'] +['ERRU', 'PT'] +['Ġup', 'right'] +['ip', 'ation'] +['ĠNob', 'le'] +['Ġvalue', 'ForKey'] +['Ġset', 'Loading'] +['.I', 'gnore'] +['å', 'ģ'] +['G', 'lobals'] +['ĠM', 'ent'] +['AS', 'SES'] +['Ġlim', 'bs'] +['ĠH', 'UD'] +['inc', 'i'] +['.', 'iv'] +['ĠQ', 'ModelIndex'] +['F', 'use'] +['Ġped', 'al'] +['_F', 'REQ'] +['(', 'verbose'] +['Ġlong', 'itud'] +['ĠChar', 'ter'] +['ê', '·¸'] +['Ġbund', 'les'] +['.', 'ignore'] +['um', 'bo'] +['EM', 'A'] +['....', '...'] +['s', 'x'] +['.C', 'ard'] +['Ġhe', 'ute'] +['Ġste', 'er'] +['j', 'umlah'] +['Ġ{', '_'] +['_Check', 'ed'] +['Ġf', 'ax'] +['ĠG', 'ust'] +['itch', 'ens'] +['Ġ', '))ĊĊ'] +['Ġremark', 'ably'] +['/', 'XML'] +['-', 'remove'] +['_b', 't'] +['Ġinc', 'ub'] +['.p', 'ackage'] +['.current', 'Thread'] +['ĠHigh', 'lander'] +['.s', 'ide'] +['s', 'plash'] +['Ġ', 'ici'] +['=', 'D'] +['Ġp', 'uck'] +['Ġball', 'ots'] +['Ġhug', 'ely'] +['co', 'eff'] +['Ġp', 'Data'] +['.C', 'OLUMN'] +['ĠHe', 'aling'] +['Ġord', 'in'] +['!', '),'] +["Ġ'", "',čĊ"] +['(m', 'd'] +['ĠS', 'ask'] +['<', 'strong'] +['Ġsurviv', 'or'] +['.s', 'eries'] +['Ġcaffe', 'ine'] +['Ġ`', '('] +['.TRA', 'ILING'] +['_', 'Input'] +['("', '^'] +['z', 'd'] +['&', ');Ċ'] +['ĠP', 'ing'] +['Ġv', 'oucher'] +['.r', 'ating'] +['-sh', 'irts'] +['ĠRetrie', 'ves'] +['.al', 'ibaba'] +['Or', 'acle'] +['_MO', 'V'] +['Old', 'Data'] +['Ġ/*', 'čĊ'] +['Ġg', 'boolean'] +['Ġ=>', 'čĊ'] +['Ġr', 'á'] +['Ġbl', 'unt'] +['ĠImage', 'Icon'] +['if', 'ik'] +['RT', 'C'] +['Ġfib', 'ers'] +['Ġto', 'ile'] +['.s', 'ent'] +['ĠPy', 'Qt'] +['$', 'app'] +['Ġmed', 'io'] +['Ġgrant', 'ing'] +['Ġtsl', 'int'] +['ĠM', 'ö'] +['(fig', 'size'] +['Ġhur', 'ricane'] +['Ġlif', 'es'] +['ĠÃ', 'Ħ'] +['rocess', 'ing'] +['_st', 'andard'] +['-', 'option'] +["'))", ')'] +['Ġvac', 'ant'] +['å·', '¥'] +['ĠH', 'ollow'] +['handle', 'Change'] +['Ġdiv', 'ider'] +['ĠEngine', 'ers'] +['Ġsv', 'ens'] +['Ġcompl', 'iant'] +['t', 'anggal'] +['ĠC', 'redits'] +['ĠEm', 'irates'] +['Rule', 'Context'] +['Ġreal', 'ization'] +['Ġdistr', 'acted'] +[']+', '='] +['Ġaug', 'ment'] +['ĠD', 'w'] +['ot', 'p'] +['or', 'rent'] +['Edit', 'ar'] +['.st', 'ock'] +['St', 'udy'] +['pe', 'ctions'] +['ĠGame', 'Manager'] +['=', 'cut'] +['Ġf', 'lock'] +['ĠRom', 'ans'] +['th', 'em'] +['-h', 'op'] +['Ġscreens', 'hots'] +['Ġ/*', '!Ċ'] +['Ġconvers', 'ions'] +['Ġnormal', 'ization'] +['(config', 'uration'] +['Ġa', 'eros'] +['_se', 'curity'] +['!', "'Ċ"] +['B', 'onus'] +['ĠDR', 'IVER'] +['ĉ', 'Date'] +['t', 'ie'] +['ĠWy', 'oming'] +['St', 'and'] +['it', 're'] +['Ġsh', 'oppers'] +['Ġdisadv', 'antage'] +['Ġlik', 'ing'] +['ç¬', 'ij'] +['Ġunderstand', 'able'] +['SE', 'E'] +['Ġh', 'oy'] +['Ġnin', 'ete'] +['Ġcon', 'fer'] +['Ġnow', 'rap'] +['ĠV', 'ern'] +[',', 'čĊčĊ'] +['imest', 'ep'] +['Layout', 'Manager'] +['à', '·'] +['ĉw', 'ait'] +['PLE', 'TED'] +['J', 'apan'] +['Ġindu', 'ce'] +['Ġå', '¯'] +['оз', 'в'] +['_END', 'POINT'] +['.h', 'orizontal'] +['Ġacceler', 'ated'] +['rim', 'on'] +['IV', 'ES'] +['Trans', 'actions'] +['Le', 'an'] +['ĠSO', 'UR'] +['wh', 'ether'] +['y', 'g'] +['Ġo', 'id'] +['ĠEntity', 'Manager'] +['OUN', 'TRY'] +['Ġfil', 'a'] +['OLUM', 'NS'] +['IN', 'UE'] +['ĠAn', 'chor'] +['TR', 'AN'] +['wo', 'o'] +['block', 'quote'] +['ĠN', 'urse'] +['ĠCar', 'p'] +['Ġrede', 'em'] +['.', 'try'] +['ĠJ', 'P'] +['Ġtimestamp', 's'] +['Ġ?>', '"><'] +['ĠREM', 'OVE'] +['ĠStar', 'bucks'] +['Re', 'ally'] +['Ġflood', 'ed'] +['.C', 'allback'] +['Drop', 'Down'] +['ip', 'ro'] +['Ġt', 'ended'] +['l', 'te'] +['Ġproport', 'ions'] +['-', 'te'] +['ĠR', 'ena'] +['lic', 'ate'] +['for', 'ces'] +['.ex', 'tra'] +['.auth', 'enticate'] +['в', 'од'] +['¡', '°'] +['Ġfor', 'ControlEvents'] +['Ġsen', 'ha'] +['Ġke', 'in'] +['Ġmin', 'ist'] +['ĠPre', 'ference'] +['ĠTele', 'graph'] +['Ñĥ', 'п'] +['str', 'pos'] +['Ġillness', 'es'] +['Ġp', 'igs'] +['Ġget', 'Intent'] +['S', 'ol'] +['ĠÂ', '¡'] +['(c', 'pu'] +['[', 'prop'] +['s', 'creens'] +["');", '?>'] +['ĠAct', 's'] +['Ġstr', 'dup'] +['Ġaver', 'ages'] +['an', 'al'] +['ĠCas', 'ual'] +['Group', 'Box'] +['ĠHand', 'book'] +['/', 'comments'] +['Ġnumber', 'ed'] +['Ġbroadcast', 'ing'] +['çĽ', 'ij'] +['.native', 'Element'] +['.m', 'u'] +['Ġupdated', 'At'] +['ĠDoes', 'n'] +['.A', 'C'] +['.c', 'oll'] +['Ġrec', 'order'] +['_sh', 'a'] +['B', 'g'] +['b', 'il'] +['Ġbol', 'ts'] +['Ġç', '¬'] +['Ġim', 'posing'] +['ĠInformation', 'en'] +['_flash', 'data'] +['e', 'conomic'] +['Rem', 'ark'] +['uc', 'as'] +['ĠOff', 'icers'] +['ĠT', 'ER'] +['W', 'alk'] +['Ġmerc', 'ado'] +['_g', 'enerate'] +['H', 'Y'] +['Call', 'ing'] +['s', 'nap'] +['script', 'Id'] +['.', 'operation'] +['ĠFl', 'ame'] +['l', 'iness'] +['Ġrent', 'ed'] +['_t', 'oggle'] +['-ch', 'anging'] +['ĠT', 'Y'] +["'", 'util'] +['EE', 'P'] +['Ġgraph', 'ql'] +['ĠUn', 'i'] +['Ġimp', 'ulse'] +['.B', 'asic'] +['Ġenerg', 'ies'] +['M', 'ARY'] +['ĠMar', 'cel'] +['Ġmort', 'al'] +['Ġf', 'res'] +['m', 'ens'] +['m', 'otion'] +['Ġsample', 'd'] +['âĢľ', 'That'] +['id', 'ay'] +['qu', 'ipment'] +['get', 'Int'] +['ĠA', 'bsolute'] +[",'", '"'] +['un', 'ed'] +['.sh', 'are'] +['Ġ}', ')('] +['mm', 'm'] +['ĠR', 'ising'] +['ä»', '»'] +['Ġun', 'employed'] +['x', 'fa'] +['.f', 'ollow'] +['ĉĉĉĉ', 'ĠĠĠĠĠĠ'] +['sl', 't'] +['.P', 'hone'] +['Ġkn', 'ives'] +['Ġe', 've'] +['on', 'Click'] +[']', '))čĊ'] +['ĠW', 'itness'] +['ĉ', 'NS'] +['ĠE', 'OS'] +['ĠSte', 'fan'] +['ĠPri', 'est'] +['âĢĶ', 'which'] +['Get', 'String'] +['.', 'By'] +['Ġup', 'stairs'] +['Ġdetr', 'iment'] +['bro', 'ken'] +['emb', 'ro'] +['Ġnic', 'otine'] +['il', 'ion'] +['Ġaston', 'ishing'] +['_', 'aff'] +['ĠLess', 'on'] +['Ġaccident', 'al'] +['od', 'or'] +['Ġdec', 'ir'] +['Ġnew', 'Name'] +['+', '.'] +['çĽ', '¸'] +['igs', 'list'] +['ĠG', 'ithub'] +['Ġsuccess', 'ive'] +['rac', 'ial'] +['Ġen', 'viron'] +['éªĮ', 'è¯ģ'] +['Ġredirect', 'ed'] +['T', 'OTAL'] +['Ġgrab', 'bing'] +['ĠL', 'ance'] +['Ġfor', 'fe'] +['_C', 'B'] +['å¾', '®'] +['El', 'apsed'] +['_w', 'ay'] +['(Dialog', 'Interface'] +['_me', 'asure'] +['x', 'bb'] +['D', 'og'] +['Dep', 'art'] +['-s', 'rc'] +['res', 'olver'] +['with', 'standing'] +['_sh', 'ell'] +['ĠLast', 'Name'] +['ĠAv', 'iation'] +['Ġbegin', 'ner'] +['("%', '.'] +['(to', 'ol'] +['Ġн', 'ов'] +[':', 'init'] +['(A', 'PI'] +['ĠMorr', 'ison'] +['vt', 'Color'] +['Ġstap', 'le'] +['/', 'INFO'] +['Ġsupern', 'atural'] +['Ġste', 'ak'] +['tim', 'eline'] +['zz', 'le'] +['"', '`ĊĊ'] +['Second', 'ary'] +['ĠNep', 'al'] +['.String', 'Utils'] +['Ġad', 'am'] +['Ġ(', '...'] +['Ġsub', 'stitution'] +['Ġboard', 'ing'] +['ĠKey', 'word'] +['ĠAss', 'ault'] +['dbc', 'Template'] +['Ġorder', 'Id'] +['(', 'engine'] +['.assert', 'That'] +['ĠVen', 'us'] +['Ġhomic', 'ide'] +['ĠA', 'val'] +['Ġg', 'utter'] +['ĠSupport', 'ed'] +['/p', 'art'] +['Ġac', 'claimed'] +['H', 'istor'] +['Ġmes', 'es'] +['ü', 'ber'] +['ĠRen', 'ew'] +['Ġgr', 'as'] +['ĠE', 'k'] +['Ġin', 'file'] +['ind', 'y'] +['.m', 'usic'] +['.S', 'croll'] +['ĠA', 'ges'] +['ĠNar', 'uto'] +['ĠG', 'ather'] +['Ġconfirm', 'ing'] +['=', '("'] +['Ġpitch', 'ed'] +['ole', 'y'] +['Fr', 'ance'] +["+'", '"'] +['$', 'total'] +['Ġon', 'de'] +['Ġd', 'itch'] +['_s', 'igma'] +['Ġcontinu', 'ity'] +['re', 'ward'] +['-', 'load'] +['Ġproces', 'o'] +['Lock', 'ed'] +['st', 'aw'] +['Ġsp', 'inal'] +['l', 'azy'] +['!', '=='] +['j', 'est'] +['Ġd', 'un'] +['ĠRod', 'gers'] +['ĉ', 'grid'] +['Ġlog', 'os'] +['ĠBeng', 'al'] +['.s', 'uper'] +['Provid', 'es'] +['Ġnut', 'rient'] +['.T', 'imestamp'] +['IZ', 'ATION'] +['åĨ', 'Į'] +['Ġf', 'ats'] +['ĠX', 'xx'] +['ct', 'ica'] +['Target', 's'] +['Ġcont', 'ours'] +['Ġre', 'ordered'] +[':', 'Array'] +['Ġtoler', 'ate'] +['V', 'ir'] +['Ġter', 'ribly'] +['Ġbr', 'icks'] +['(&', '_'] +['h', 'b'] +['Port', 'al'] +['ĠB', 'read'] +['.', 'which'] +['ÂŃ', 't'] +['as', 'InstanceOf'] +['Ġj', 'object'] +['ĉ', 'length'] +['_M', 'T'] +[';', '">čĊ'] +['_EX', 'IST'] +['Ġmat', 'ernal'] +['RE', 'L'] +['Ġê²½', 'ìļ°'] +['he', 'e'] +['Ġlayout', 's'] +['ĠL', 'ap'] +['ais', 'y'] +['Ġst', 'umbled'] +['ĠU', 'IG'] +['ĠS', 'co'] +['Ġimp', 'aired'] +['RES', 'SED'] +['Ġab', 'uses'] +['V', 'F'] +['AR', 'B'] +['.N', 'AME'] +['r', 'ch'] +['prim', 'ir'] +['_com', 'pleted'] +['Ġp', 'enny'] +['Ch', 'rome'] +['(b', 'egin'] +['ern', 'en'] +['-', 'checkbox'] +['Plain', 'OldData'] +['ĠL', 'PC'] +['r', 'ade'] +['sp', 'ir'] +['Ġcon', 'ceived'] +['T', 'ips'] +['ĠIo', 'T'] +['ĠG', 'an'] +['èģ', 'Ķ'] +['Ġbi', 'ases'] +['Ġconsult', 'ants'] +['ple', 'd'] +['_', 'ht'] +['associ', 'ated'] +['],', 'ĊĊ'] +['Ġdelight', 'ful'] +['ĠÑĤ', 'ек'] +['Hel', 'vetica'] +['(', 'load'] +['-exp', 'and'] +['_W', 'IDGET'] +['to', 'a'] +['ĠA', 'kt'] +['Ġom', 'n'] +['Ġcl', 'auses'] +['Int', 'el'] +['*/', '}Ċ'] +['_reg', 'istration'] +['Ġold', 'Value'] +['Ġrest', 'oring'] +['Ġun', 'real'] +['O', 'VER'] +['ĉĊĉĊ', 'ĉĊ'] +['AT', 'S'] +['_pro', 'be'] +['Ġdiv', 'isor'] +['.update', 'Dynamic'] +['å¹', '³'] +['Produ', 'ces'] +['st', 'amp'] +['.j', 'boss'] +['ĉt', 'ask'] +['!', '(:'] +['Ġpsych', 'ic'] +['@', 'class'] +['M', 'artin'] +['ĠPass', 'ed'] +['clar', 'ations'] +['h', 'el'] +['а', 'Ñĩ'] +['ĉc', 'opy'] +['-b', 'in'] +['z', 'an'] +['ig', 'ram'] +['া', 'à¦'] +['(s', 'ig'] +['ĠC', 'aval'] +['_', '##'] +['Ġ%', '='] +['out', 'lined'] +['ĠAc', 'id'] +['Ġunpredict', 'able'] +['-d', 'ashboard'] +['Hex', 'String'] +['+', 'c'] +['.P', 'ublic'] +['áº', '©'] +['Ġconvey', 'or'] +['ĠE', 'B'] +['Ġselect', 's'] +['Ġknock', 'ing'] +['ĠC', 'ec'] +['IBUT', 'ES'] +['owa', 'Äĩ'] +['g', 'atsby'] +['*', 'v'] +['ent', 'ropy'] +['Ġdispatch', 'ed'] +['Ġcam', 'el'] +['ĠSat', 'urn'] +['Ġover', 'weight'] +['(', 'phone'] +['par', 'able'] +['%', 'B'] +['_v', 'ectors'] +['Ġbrew', 'ing'] +['ĠT', 'k'] +['ĠDownload', 's'] +['ĠS', 'aved'] +['.Pr', 'ice'] +['Ġcur', 'ved'] +['ĠParen', 'thood'] +['è', '¶'] +['.p', 'nl'] +['plet', 'ely'] +['.D', 'ay'] +['Ġadvertis', 'ers'] +['Ġej', 'ec'] +['Ġpr', 'zed'] +['ë', '¯'] +['!', "';Ċ"] +['ĠK', 'ush'] +['ĠT', 'AB'] +['Ġquest', 's'] +['Ġcoinc', 'idence'] +['umm', 'ies'] +['ĠKash', 'mir'] +['ĠEth', 'ics'] +['_g', 'rowth'] +['Ġakt', 'iv'] +['Ġgroup', 'ing'] +['å¢', 'ŀ'] +['_tr', 'uth'] +['åIJ', '¬'] +['t', 'odos'] +['is', 'et'] +['Tex', 'Coord'] +['ä', 'tt'] +['ĠZ', 'ur'] +['ro', 'ys'] +['_M', 'AGIC'] +['Ġbrew', 'ery'] +['(', 'State'] +['ĠSM', 'ALL'] +['ĠPl', 'ants'] +['it', 'bart'] +['each', 'er'] +['ĠAd', 'elaide'] +['L', 'u'] +['Ġf', 'ick'] +['und', 'les'] +['_load', 'ed'] +['и', 'е'] +['P', 'oll'] +['rit', 'ic'] +['EL', 'Y'] +['Ġ+', "'"] +['ĠProf', 'ession'] +['Ġst', 'amps'] +['ĠS', 'ew'] +['scroll', 'View'] +['Ġcomm', 'unist'] +['/pro', 'blems'] +['}čĊčĊ', 'čĊčĊ'] +[',', 'o'] +['Ġu', 'dp'] +['Ġob', 'ese'] +['appro', 've'] +['ancell', 'ation'] +['_G', 'ame'] +['ĠHas', 'htable'] +['adaptive', 'Styles'] +['Ġpossess', 'es'] +['.match', 'er'] +['function', 'al'] +['M', 'rs'] +['ĉs', 'ave'] +['ĠDb', 'Type'] +['Ġk', 'en'] +['get', 'Context'] +['Ġm', 'ans'] +['(', 'rel'] +['ĠBrother', 'hood'] +[')', '`Ċ'] +['è§', '£'] +['.In', 'formation'] +['OutOfRange', 'Exception'] +['ĠS', 'ek'] +['C', 'as'] +['Ġblog', 'gers'] +['E', 'ither'] +['("', '""'] +['Ġpin', 'ch'] +['Ġco', 'arse'] +[')', 'p'] +['ĠP', 'ulse'] +['Ġlear', 'nt'] +['Ġdent', 'ist'] +['Ġon', 'change'] +['Ġdirect', 'ives'] +['(', 'actions'] +['ny', 'der'] +['ĠSh', 'ir'] +['T', 'rait'] +['_de', 'p'] +['ĠP', 'ET'] +['ĠRE', 'P'] +['.App', 'Settings'] +['cu', 'ador'] +['iden', 'av'] +['Ġenv', 'i'] +['Ġsl', 'ammed'] +['ĠSh', 'oot'] +['Ġdate', 'Format'] +['.j', 'oda'] +['ve', 'ys'] +['Ġ)', '.ĊĊ'] +['Ġcare', 'g'] +['ĠPar', 'allel'] +['_', 'translation'] +['.function', 's'] +['.', 'obs'] +['Runtime', 'Exception'] +['[]', '='] +['over', 'view'] +['ĠSch', 'l'] +['Ġno', 'isy'] +['ĠOn', 'PropertyChanged'] +['S', 'ending'] +['Ġunf', 'amiliar'] +['U', 'pon'] +['ĠPrint', 's'] +['.t', 'yp'] +['Ġflee', 'ing'] +['ĉm', 'ove'] +['(', 'Un'] +['Ġq', 'r'] +['×', 'ľ'] +['_b', 'eta'] +['Ġsk', 'ies'] +['ĉm', 'e'] +['W', 'ND'] +['Ġstick', 'ers'] +['bl', 'as'] +['Ġinsert', 's'] +['Ġvers', 'es'] +['ĠD', 'ew'] +['Ġtang', 'ible'] +['Ġhe', 'cho'] +['P', 'OL'] +['Ġte', 'ardown'] +['om', 'nia'] +['IB', 'E'] +['.c', 'over'] +['_str', 'ategy'] +['^', '-'] +['set', 'Position'] +['u', 'ale'] +['S', 'igned'] +['Ġif', 'ace'] +['as', 'eline'] +['.set', 'Time'] +['ĠMin', 'eral'] +['ĠFight', 'ing'] +['sk', 'ins'] +['Ġdiscrim', 'in'] +['Ġdans', 'k'] +['ĠPr', 'inceton'] +['ac', 'ist'] +['Ġ(', '));Ċ'] +['tr', 'acks'] +['imon', 'ial'] +['ad', 'ecimal'] +['EP', 'ROM'] +['ugg', 'le'] +['.Not', 'ification'] +['$', 'mail'] +['c', 'antidad'] +['ĠJ', 'ung'] +['Ġseek', 'ers'] +['Ġpl', 'ausible'] +['t', 'ier'] +['еÐ', '¶'] +['Ġr', 'apper'] +['ĠMan', 'a'] +['ĠHttp', 'StatusCode'] +['Ġburn', 't'] +['los', 'es'] +['ĠF', 'oto'] +['ĠJson', 'Object'] +['Inst', 'agram'] +['Ġsys', 'call'] +['Ġreal', 'ities'] +['ĠMAT', 'LAB'] +[':^', '{Ċ'] +['TER', 'M'] +['ĠC', 'bd'] +['ĠPar', 'agraph'] +['Ġtrav', 'és'] +['Ġconstruct', 'ing'] +['Ġsw', 'al'] +['Ġp', 'ige'] +['LL', 'LL'] +['-ex', 'isting'] +['G', 'ets'] +['Ġmelt', 'ed'] +['Ġmitig', 'ate'] +['H', 'en'] +['Ġh', 'm'] +['im', 'as'] +['ĠA', 'o'] +['ĠP', 'erez'] +['ĠD', 'AL'] +['Ġëĭ', '¤'] +['Ġdiv', 'is'] +['Storyboard', 'Segue'] +['ĠMod', 'ify'] +['ĠÃľ', 'ber'] +['_O', 'VERRIDE'] +['.p', 'em'] +['unt', 'os'] +['Ġespa', 'ñ'] +['Ġ{', '?'] +['ĠP', 'AY'] +['_ip', 'v'] +['ĠF', 'ury'] +['__', '.__'] +['el', 'ow'] +['-center', 'ed'] +['check', 's'] +['_', 'Reg'] +['-J', 'avadoc'] +['ĉ', 'load'] +['ĠLik', 'ewise'] +['ا', 'Ùħ'] +['UN', 'E'] +['.se', 'm'] +['x', 'cb'] +['ĠC', 'ave'] +['_s', 'leep'] +['Ġsil', 'ently'] +['ĠExt', 'reme'] +['.To', 'Upper'] +['ĉC', 'HECK'] +['Ġc', 'ue'] +['ĠQ', 'ByteArray'] +['Ġcorrupt', 'ed'] +['ĠD', 'é'] +['Ġimp', 'ed'] +['Get', 'Name'] +['Ġinaccur', 'ate'] +['Ġso', 'ber'] +['е', 'е'] +['Ġbar', 'code'] +['--', '){Ċ'] +['ink', 'i'] +['Ġé', 'p'] +['Ġd', 'ri'] +['ĠAL', 'T'] +['>>>>', '>>>>'] +['ont', 'a'] +['[', 'L'] +['Ġinter', 'es'] +['ver', 'ting'] +['Ġdi', 'agnostics'] +['p', 'dev'] +['è', '©'] +['ĠIntegr', 'ated'] +[').', "'"] +['_g', 'c'] +['$', 'text'] +['.g', 'ames'] +['ĠT', 'erra'] +["'", 'Re'] +['.trans', 'fer'] +['_F', 'IFO'] +['get', 'Model'] +['Ġbl', 'and'] +['ĠCole', 'man'] +['Ġpr', 'imes'] +['Ġæ', 'Ī'] +['Ġcross', 'es'] +['n', 'k'] +['G', 'ING'] +["Ġ'", '^'] +['ĠB', 'lob'] +['Ġinter', 'course'] +['ĠBl', 'vd'] +['Ġweigh', 's'] +['_reg', 'ular'] +['ĠPer', 'th'] +['Ġsepar', 'ating'] +['Ġb', 'illed'] +['.tab', 'Control'] +['Ġpup', 'pet'] +['Ġutil', 'ization'] +['Ġâĸ', 'ł'] +['Ġsucc', 'es'] +['Ġl', 'amps'] +['_pro', 'j'] +['E', 'ric'] +['Ġren', 'ovation'] +['ĠFam', 'ilies'] +['ĠB', 'its'] +['part', 'ials'] +['-M', 'en'] +['s', 'olution'] +['Ġd', 'warf'] +['.IN', 'TEGER'] +['ĠLO', 'CK'] +['.', 'ct'] +['Ġexcer', 'pt'] +['ĠP', 'ix'] +['ĠFirst', 'Name'] +['ANT', 'ED'] +['ĠAd', 'mir'] +['-h', 'elp'] +['P', 'rior'] +['ĠAl', 'ign'] +['.IN', 'STANCE'] +['Line', 'Edit'] +["('/", ':'] +['Ġin', 'et'] +['od', 'us'] +['.p', 'kl'] +['ĠK', 'Y'] +['up', 'ert'] +['Ġn', 'erves'] +['_grad', 'ient'] +['}', "','"] +['_un', 'ref'] +['Ġs', 'aturated'] +['ĠConn', 'ected'] +['ĠF', 'N'] +['EX', 'IT'] +['Ġtele', 'port'] +['Ġav', 'ait'] +['Page', 'Route'] +['Ġdivor', 'ced'] +['(l', 'ang'] +['f', 'st'] +['ĠT', 'yr'] +['Ġmess', 'enger'] +['if', 'stream'] +['X', 'S'] +['ĠBank', 'ing'] +['Ġinfect', 'ious'] +['ĠM', 'ons'] +['_LO', 'OP'] +['Ġzur', 'ück'] +['Ġobt', 'ener'] +['/re', 'pos'] +['V', 'el'] +['ac', 'ro'] +['Ġuser', 'Repository'] +['style', 'Type'] +['ĠS', 'RC'] +['VML', 'INUX'] +['rec', 'ursive'] +['/', 'bar'] +['_ch', 'ip'] +['omin', 'ated'] +['ĠN', 'it'] +['âĢĶ', 'to'] +['ĠBudd', 'h'] +['ом', 'еÑĢ'] +['ĠM', 'AG'] +['ĠC', 'HE'] +['_d', 'en'] +['.', 'raises'] +['_de', 'gree'] +['Ġpump', 'kin'] +['_tem', 'plates'] +['_M', 'EDIA'] +['ĠTim', 'eline'] +['Ġb', 'ots'] +['Object', 'Type'] +['Ġbu', 'ys'] +['.post', 's'] +['C', 'AL'] +['wait', 'ing'] +['ĠDani', 'els'] +['Ġd', 'abei'] +['ĠS', 'igma'] +['il', 'or'] +['ig', 'el'] +[',', 'W'] +['AD', 'S'] +['(', 'panel'] +['ì²', '´'] +['it', 'ating'] +['.p', 'alette'] +['Ġmos', 'quito'] +['Ġt', 'ego'] +['(parse', 'Int'] +['Ġdes', 'pués'] +['p', 'romise'] +['Ġw', 'ij'] +['types', 'cript'] +['ĠT', 'v'] +['_IDENT', 'IFIER'] +[').ĊĊ', 'Ċ'] +['_fl', 'at'] +['its', 'u'] +['US', 'R'] +['ex', 'perience'] +['-f', 'it'] +['ph', 'inx'] +['_th', 'resh'] +['Ġide', 'ally'] +['ĠFre', 'eman'] +[',', 'DB'] +['_r', 'w'] +['çŃ', 'ī'] +['U', 'b'] +['_stat', 'istics'] +['="', '"><'] +['Ġch', 'ore'] +['Ġy', 'ork'] +['inst', 'alled'] +['Add', 'itionally'] +['Ġp', 'stmt'] +['yl', 'ko'] +['::', 'Ċ'] +['Fore', 'st'] +['Ġhead', 'set'] +['Ġgall', 'on'] +['ÑĢ', 'ем'] +['Ġwithdraw', 'n'] +['ĠC', 'andidate'] +['Ġmel', 'ting'] +['Ġfree', 'zer'] +['Ġh', 'l'] +['_HE', 'LP'] +['m', 'ime'] +['(', '/*'] +['Ġth', 'irst'] +['$', 'return'] +['member', 'of'] +['еÐ', '±'] +['ĠHttp', 'ServletRequest'] +['(', 'ob'] +['_', 'Result'] +['Ġassert', 'ed'] +['Ġfulfill', 'ing'] +['Ġstret', 'ches'] +['par', 'ated'] +['-f', 'unded'] +['Ġå', 'Ľ'] +['ing', 'les'] +['_c', 'a'] +['.', 'condition'] +['ĠDis', 'plays'] +['Ġor', 'ang'] +['ĠC', 'RE'] +['Ġgl', 'Bind'] +['ĠSelect', 'or'] +['/', 'type'] +['ĠAlex', 'a'] +['ched', 'ules'] +['ĠPen', 'insula'] +['Ġpar', 'ity'] +['ĉ', 'dest'] +['ĠDo', 'ors'] +['čĊ', 'ĉčĊ'] +['_dim', 'ension'] +['Ġa', 'load'] +['.St', 'oredProcedure'] +['(p', 'aren'] +['ĠBur', 'ke'] +["')", ']Ċ'] +['-', 'engine'] +['Ġqu', 'ir'] +['ĠHy', 'brid'] +['ĠDo', 'e'] +['Ġout', 'lines'] +['ĠTrend', 's'] +['_N', 'V'] +['per', 'iments'] +['ĠH', 'in'] +['?', "',"] +['ĉ', 'Text'] +['F', 'UL'] +['Ġsm', 'ells'] +['Ġs', 'lick'] +['Ġmis', 'erable'] +['ĠArray', 'Adapter'] +['Ġparam', 'String'] +['H', 'om'] +['_l', 'iterals'] +['us', 'uarios'] +['Ġprompt', 'ing'] +['_l', 'azy'] +['ĠActiv', 'ation'] +['_', 'oc'] +['We', 'ak'] +['Ġan', 'ecd'] +['ĠU', 'CLA'] +['=', 're'] +['isse', 'ment'] +['ĠEsc', 'orts'] +['Ex', 'cellent'] +['ĠP', 'ause'] +['Ġre', 'positories'] +['T', 'OR'] +['ari', 'ate'] +['_is', 'o'] +['up', 'dates'] +['hal', 'b'] +['udi', 'ante'] +['ë¡', 'Ŀ'] +['Ġna', 'ive'] +['ĠP', 'eg'] +['ĠL', 'ounge'] +['ARG', 'IN'] +['(b', 'in'] +['On', 'ClickListener'] +['ĠFA', 'ILED'] +['Ġl', 'ite'] +['Ġd', 'zie'] +['ĠL', 'iteral'] +['iv', 'or'] +['fc', 'ntl'] +['Ġe', 'ats'] +['Ġq', 'ed'] +['Un', 'lock'] +['rid', 'ing'] +['und', 'ai'] +['=', 'M'] +['AT', 'TER'] +['Configure', 'Await'] +['ici', 'as'] +['ustom', 'ed'] +['Ġsuccess', 'ion'] +['end', 'Time'] +['ĠJ', 'upiter'] +['Ġjud', 'ging'] +['d', 'ration'] +['_d', 'ocs'] +['.m', 'o'] +['Ġeduc', 'ators'] +['ĠV', 'ine'] +['Con', 'd'] +['[', 'out'] +['q', 'b'] +['\\', 'Validator'] +['Ġmean', 'ings'] +['Ġpresent', 'ly'] +['Ġdiv', 'iding'] +['otten', 'ham'] +['asc', 'ular'] +['Ġtrail', 'ers'] +['ĠC', 'LOSE'] +['ам', 'и'] +['âĢĻ', 'ai'] +['ĠG', 'ain'] +['w', 'or'] +['Ġpl', 'anner'] +['Ġdistrib', 'uting'] +['v', 'at'] +['month', 's'] +['x', 'label'] +['H', 'F'] +['V', 'iol'] +['.BASE', 'LINE'] +['еÑĤ', 'ÑģÑı'] +['ĠR', 'otate'] +['Ġtx', 'n'] +[':', 'bold'] +['Ġb', 'loss'] +['Forg', 'ery'] +['(', 'embed'] +['Ġjak', 'o'] +['s', 'printf'] +['the', 'ir'] +['Ġexhib', 'its'] +['-', 'static'] +['he', 'cy'] +['get', 'ActiveSheet'] +['.c', 'lients'] +['ãģ', 'į'] +['_h', 'ide'] +['[', 'word'] +['C', 'b'] +['add', 'Item'] +['ax', 'e'] +['_r', 'adio'] +['al', 'ion'] +['mod', 'ifier'] +['Ġsat', 'uration'] +['Ġden', 'om'] +['_p', 'ixels'] +['m', 'ess'] +['(f', 'l'] +['at', 'if'] +['Ġse', 'cs'] +['Ġpro', 'stitution'] +['Ġgrand', 'children'] +['Ġparad', 'ise'] +['ĠF', 'eld'] +['_B', 'INARY'] +['it', 'ous'] +['à¹', 'Ħ'] +['Ġflash', 'ing'] +['-s', 'ided'] +['Ġcontrad', 'iction'] +['/*', 'ĊĊ'] +['y', 'label'] +['ĠT', 'et'] +['Ġadm', 'ire'] +['res', 'o'] +['Ġlet', 'z'] +['ĠSE', 'ARCH'] +['sl', 'ots'] +['ĠRew', 'ards'] +['ĠH', 'og'] +['ĠNS', 'Data'] +['st', 'ash'] +['F', 'all'] +['ĠA', 'mer'] +['Line', 'arLayout'] +['/', 'photos'] +['Ġfe', 'ather'] +['Ġ|', 'čĊ'] +['Download', 's'] +['.Start', 'sWith'] +['Ġ//', '#'] +['ine', 'Transform'] +['Ġaff', 'id'] +['V', 'tbl'] +['ĠRog', 'ue'] +['scri', 'bed'] +['Ġfa', 'uc'] +['ĠMon', 'roe'] +['Ġdecl', 'ares'] +['mod', 'ern'] +['re', 'on'] +['ay', 'be'] +['P', 'ASS'] +['f', 'ers'] +['_MULT', 'I'] +['ĠMath', 'ematics'] +['Ġsud', 'ah'] +['_ATT', 'ACH'] +['Ġnumber', 'With'] +['ĠSol', 'omon'] +['j', 'in'] +['ograf', 'ia'] +['ö', 'l'] +['_d', 'esign'] +['cul', 'ated'] +['ĠL', 'una'] +['ies', 'z'] +['Ġ=>', "'"] +['Ġrevel', 'ations'] +['Al', 'ong'] +['(', 'ed'] +['ĠF', 'ilename'] +['Ġy', 'label'] +['Sec', 'ure'] +['Ġbus', 'ca'] +['agn', 'osis'] +['_RE', 'CE'] +['Ġoverl', 'apping'] +['Ext', 'ent'] +['Ġanticip', 'ation'] +['Check', 's'] +['ĠALS', 'O'] +['or', 'c'] +['iling', 'ual'] +['it', 'ational'] +['Ġadv', 'ancement'] +['ou', 'ro'] +['ĠP', 'redicate'] +['å¾', 'Ĺ'] +['er', 'ia'] +['ĠPier', 'ce'] +['or', 'io'] +['Ġmer', 'its'] +['Ġpe', 'anut'] +['.P', 'ackage'] +['ĠCon', 'duct'] +['_SENS', 'OR'] +['Ġbo', 'iling'] +['Ġin', 'tra'] +['ĠI', 'GN'] +['ĠF', 'ur'] +['.Ref', 'resh'] +['ĠRe', 'ach'] +['_dec', 'oder'] +['.Ex', 'p'] +['ĠÑĤ', 'ак'] +['p', 'ill'] +[',', 'Q'] +['ĠGr', 'ill'] +['Ġpop', 'ping'] +['.A', 'g'] +['Ġpro', 'yecto'] +['Ġmile', 'age'] +['Ġec', 'ological'] +[']', ']);Ċ'] +['ĠÂ', 'Ń'] +['sub', 'plot'] +['ac', 'ad'] +['ĠTry', 'ing'] +['rec', 'ipes'] +['$', 'criteria'] +['ĠPers', 'ian'] +['-b', 'ound'] +['M', 'ASK'] +['ĠG', 'esture'] +['Ġk', 'k'] +['ĠP', 'VC'] +['Ġprohib', 'ition'] +['Ġcom', 'ando'] +['ĠLO', 'OK'] +['Sh', 'opping'] +['Ġdist', 'ortion'] +['<', 'Boolean'] +['.Get', 'Length'] +['um', 'pt'] +['\\', 'Product'] +['ell', 'ery'] +['Ġfire', 'wall'] +['form', 'atted'] +['.red', 'is'] +['Ġes', 'a'] +['ĠRh', 'ode'] +['S', 'om'] +['.n', 'on'] +["Ġ'", ').'] +['Ġget', 'View'] +['ạ', 'n'] +['pr', 'us'] +['Mat', 'thew'] +['Ġs', 'ia'] +['ĠF', 'ors'] +['G', 'PU'] +['ient', 'ras'] +['_IN', 'ST'] +['Ġol', 'arak'] +['Ġimport', 'ing'] +['T', 'CP'] +['/', '");Ċ'] +['e', 'ither'] +['Ġfresh', 'ly'] +['c', 'ascade'] +['(char', 'acter'] +['ĠJe', 'ep'] +['ot', 'ics'] +['_', 'UTIL'] +['.Xtra', 'Printing'] +['.first', 'Child'] +['ĠEx', 'cell'] +['Ġd', 'vd'] +['Ġt', 'aller'] +['Ġr', 'as'] +['yp', 'ass'] +['Ġassign', 's'] +['Ġgri', 'ev'] +['-m', 'ore'] +['J', 'D'] +['ĠBurn', 's'] +["'", '>čĊ'] +['.D', 'ependency'] +['.Query', 'String'] +['.O', 'wner'] +['Ġexp', 'iry'] +['Th', 'u'] +['(', 'Vec'] +['Ġhazard', 'ous'] +['Ġr', 'pm'] +['AP', 'ON'] +['Ġadd', 'Target'] +['sv', 'ille'] +['p', 'Net'] +['ĠIm', 'g'] +['ĠTIM', 'ER'] +['.An', 'imation'] +['Ġbe', 'k'] +['Ġass', 'ort'] +['Ġle', 'bih'] +['Ġbody', 'Parser'] +['Ġvibr', 'ating'] +['ID', 'L'] +['Ġbutter', 'knife'] +['int', 'ers'] +['Ġpersu', 'ade'] +['ĠLGBT', 'Q'] +['è', 'ĭ'] +['.s', 'oft'] +['Ġbe', 'ams'] +['_s', 'ur'] +['.D', 'ef'] +['Ġl', 'abs'] +['ĉ', 'plt'] +['Ġsk', 'ins'] +['Ġtransf', 'erring'] +['Ġimag', 'inary'] +['_E', 'nd'] +[';', 'background'] +['Ġl', 'aps'] +['_COM', 'MENT'] +['(S', 'DL'] +['ond', 's'] +['.Rec', 'ord'] +['ĠIm', 'plements'] +['_t', 'icks'] +['()', '))ĊĊ'] +['Ġa', 'rose'] +[']', '?'] +['ĠM', 'p'] +['ĠI', 'Command'] +['Ġsculpt', 'ure'] +['Ġcontract', 'ed'] +['<', 'HTML'] +['Ġcal', 'end'] +['at', 'y'] +['/', 'Sub'] +['Ġkv', 'inn'] +['_', 'IGNORE'] +['ĠSh', 'ane'] +['ML', 'S'] +['Ġstim', 'ulate'] +['Part', 'ition'] +['Ġm', 'un'] +['ó', 'm'] +['eral', 'a'] +['-', 'account'] +['.B', 'inary'] +['c', 'é'] +['Ġse', 'ize'] +['connection', 's'] +['ĠĊ', 'ĠĠĠĠĠĠĠĠĊ'] +['ĠDi', 'agnostic'] +['V', 'ISIBLE'] +['ĠRun', 's'] +['Ġimpress', 'ions'] +['s', 'uite'] +['ob', 'le'] +['~', '-'] +['ak', 'ukan'] +['<', 'Person'] +['ĠN', 'os'] +['ĠG', 'ui'] +['.wait', 'For'] +['RE', 'SET'] +['Ġpost', 'pon'] +['Dis', 'cover'] +['arr', 'ison'] +['sh', 'aw'] +['b', 'lood'] +['AJ', 'OR'] +['æĽ´', 'æĸ°'] +['ĠM', 'use'] +['æĶ', '¶'] +['Ġret', 'aining'] +['ot', 'te'] +['Ġmos', 'que'] +['ĠS', 'ne'] +['Ġstandard', 'ized'] +['Ġmain', 'land'] +['_th', 'ree'] +['unge', 'ons'] +['get', 'Doctrine'] +['Ġwh', 'ale'] +['Ġag', 'g'] +['ĠP', 'orsche'] +['now', 'led'] +['lat', 'ent'] +['ĠRel', 'ation'] +['Ġ//', "'"] +['Ġshut', 'ting'] +['ĠRem', 'ix'] +['_c', 'ov'] +['Ġs', 'ailing'] +['Ġv', 'owed'] +['Ġp', 'ots'] +['out', 'u'] +['Ġhair', 'y'] +['cast', 's'] +['Rel', 'oad'] +['Ġre', 'connect'] +['ter', 'a'] +['.child', 'Nodes'] +['ĠR', 'ack'] +['Ġcurrent', 'Index'] +['Ġall', 'en'] +['Ġ', 'ç͍æĪ·'] +['ĠC', 'ubs'] +['[', 'X'] +['_SE', 'Q'] +['_RE', 'MOVE'] +['.get', 'Action'] +['(/', '^'] +['err', 'ar'] +['Ġ', 'ether'] +['cur', 've'] +['Ġsl', 'ap'] +['Ġu', 'om'] +['O', 'thers'] +['Ġen', 'gr'] +['Dis', 'position'] +['Ġst', 'aged'] +['E', 'ye'] +['ĠA', 'ux'] +['auth', 'enticate'] +['Ġ$', '?'] +['ĠAndre', 'as'] +['Ġset', 'w'] +['.A', 'rt'] +['Ġforecast', 's'] +['Ġa', 'unt'] +['-m', 'iddle'] +['Ġmis', 'd'] +['des', 'k'] +['Ġescort', 'e'] +['ĠCas', 'a'] +['rop', 'ical'] +['Ġexem', 'ple'] +['plan', 'et'] +['(U', 'INT'] +['Ġwh', 'ip'] +['ĠPC', 'B'] +['clide', 'an'] +['="', '\\'] +['Ġox', 'ide'] +['Ġsucceed', 's'] +['der', 'ived'] +['ĠEcon', 'om'] +['_co', 'ordinates'] +['ir', 'as'] +['D', 'raft'] +['Ġvisual', 'ize'] +['B', 'rian'] +['_ASS', 'UME'] +['ĠObject', 'Id'] +['Ġtrain', 'ers'] +['_FOR', 'CE'] +['Ġcon', 'soles'] +['-', 'process'] +['lic', 'her'] +['ĠSim', 'mons'] +['T', 'aking'] +['ĠCl', 'aims'] +['Ġdiffé', 'rent'] +['Activity', 'Result'] +['Ġsn', 's'] +['éĢī', 'æĭ'] +['ĠCr', 'us'] +['Ġll', 'am'] +['r', 'ab'] +['ĠJo', 'an'] +['AA', 'A'] +['ĉf', 'ilter'] +['ish', 'ops'] +['get', 'ting'] +['à', 'µ'] +['Ġquant', 'o'] +['P', 'ast'] +['ov', 'ich'] +['Ġin', 'justice'] +['ĠF', 'LOAT'] +['Ġal', 'right'] +['\\', 'DB'] +['(', 'GameObject'] +['u', 'ish'] +['(b', 'ot'] +['Ġgall', 'ons'] +['ĠR', 'é'] +['ĠS', 'aid'] +['ĠSTDMETHOD', 'CALLTYPE'] +['ais', 'ing'] +['_process', 'or'] +['ell', 'idos'] +['ter', 'dam'] +['ĠBe', 'am'] +['Text', 'Area'] +['Ġret', 'orno'] +['.M', 'ake'] +['Ġ$', '("<'] +['Ġlock', 'down'] +['Ġremed', 'ies'] +['Ġve', 'el'] +['x', 'ee'] +['do', 'ctype'] +['F', 'il'] +['ĠExp', 'and'] +['Ġemp', 'loys'] +['Ġsession', 'Storage'] +['Ph', 'p'] +['P', 'ublish'] +['Ġret', 'al'] +['f', 'abs'] +['ynam', 'ics'] +['Ġtoss', 'ed'] +['ĠnumberOfRows', 'InSection'] +['x', 'path'] +['\\', 'modules'] +['Ġdis', 'astr'] +['ĠM', 'ULT'] +['.M', 'esh'] +['-st', 'age'] +['Ġs', 'df'] +['it', 'ung'] +['ug', 'es'] +['Ġ?>', '">\''] +['kin', 'son'] +['Ġк', 'ол'] +['ogn', 'itive'] +['_', 'li'] +['Ġim', 'minent'] +['Ġaff', 'inity'] +['.sign', 'al'] +['Ġnot', 'ch'] +['ĠSteel', 'ers'] +['max', 'length'] +['K', 'K'] +['ĠEug', 'ene'] +['_P', 'WM'] +['ro', 'i'] +['Ġâ', 'Ĺı'] +['ĠH', 'amburg'] +['.M', 'ust'] +['Ġax', 'e'] +['en', 'ef'] +['Ġamb', 'itions'] +['ĠSpec', 'ies'] +['ĠSt', 'ress'] +['Ġa', 'while'] +['Ġб', 'Ñĥд'] +['Ġwith', 'stand'] +['ĠDec', 'oder'] +['_in', 'ventory'] +['Ġ{', 'ččĊ'] +['Ġt', 'gt'] +['Ġrail', 'road'] +['W', 'ASHINGTON'] +['Ġnegot', 'iated'] +['N', 'ST'] +['-', 'phone'] +[',', 'U'] +['Ġexerc', 'ising'] +['á»', '¥'] +['_P', 'IXEL'] +['av', 'ors'] +['iter', 'ated'] +['Ġv', 'ampire'] +['ad', 'al'] +['In', 'grese'] +['Ġun', 'g'] +['ject', 'ive'] +['.c', 'ells'] +['Ġn', 'ano'] +['Ġmark', 'down'] +['_R', 'ULE'] +['(event', 's'] +['Ġl', 'uggage'] +['MESS', 'AGE'] +['ig', 'keit'] +['$', 'count'] +['Attribute', 'Name'] +['IG', 'INAL'] +['_E', 'nt'] +['ĠB', 'F'] +['ĠCOM', 'MENT'] +['_in', 'i'] +['ĠEurope', 'ans'] +['ĠB', 'elle'] +['åij', '½'] +[')', "['"] +['åº', 'Ķ'] +['ĠUse', 'ful'] +['.re', 'ference'] +['()', '",'] +['_', 'grade'] +['ĠK', 'aw'] +['Ġsent', 'encing'] +['Ġsocial', 'ism'] +['mon', 'ster'] +['_L', 'AYER'] +['Ġdee', 'pest'] +['w', 'k'] +['ĠNo', 'ise'] +['###', 'ĊĊ'] +['Ġpr', 'éc'] +['ot', 'le'] +['ÑĤ', 'е'] +['a', 'uf'] +['ib', 'al'] +['Ġcon', 'quer'] +['>', 'Email'] +['Ġamb', 'ulance'] +['O', 'AD'] +['Ġ("', '%'] +['ĠF', 'I'] +['.f', 'ixture'] +['Ġter', 'se'] +['ĠĠĠĠ', 'ĉĉĉĉ'] +['Ġsanct', 'uary'] +['ug', 'i'] +['ĠCom', 'parator'] +['Definition', 's'] +['Ġast', 'hma'] +['Ġl', 'act'] +['Ġhard', 'wood'] +['.c', 'lock'] +['Ġattract', 'ing'] +['ĠM', 'our'] +['(d', 'istance'] +['ic', 'its'] +['Ġbon', 'ne'] +['ĠAC', 'CESS'] +['.Deserialize', 'Object'] +['ĠTyp', 'ed'] +['Ġje', 'u'] +['Ġapp', 'Id'] +['ĠCl', 'ara'] +['ĠH', 'F'] +['ĠRe', 'ich'] +['ipp', 'les'] +['//----------------------------------------------------------------', '----------------'] +['_del', 'ivery'] +['erial', 'ization'] +['Ġplaint', 'iffs'] +['Sc', 'ient'] +['sh', 'opping'] +['ĠD', 'ummy'] +['ĠW', 'ald'] +['Group', 'Name'] +['Ġins', 'cription'] +['el', 'og'] +['::::', '::::'] +['_', 'ld'] +['Back', 'Pressed'] +['.R', 'aw'] +['ĠOn', 'Trigger'] +['Ġmuse', 'ums'] +['ĠBe', 'en'] +['ĠAdvent', 'ures'] +['Ġsl', 'ate'] +['Ġlet', 't'] +['Ġsu', 'nd'] +['ĠG', 'in'] +['ĠMechan', 'ical'] +['.s', 'hip'] +['App', 'Component'] +['Ġdest', 'ined'] +['Ġdw', 'elling'] +['Prof', 'iler'] +['Pre', 'pare'] +['ze', 'ich'] +['Ġsil', 'icon'] +['(h', 'as'] +['Ġ#', '%'] +['VID', 'EO'] +['Ġcollabor', 'ate'] +['L', 'in'] +['Ġsc', 'opes'] +['(', 'className'] +['(s', 'd'] +['and', 'in'] +['.h', 'am'] +['Service', 'Impl'] +['-des', 'cribed'] +['Ġiron', 'y'] +['st', 'ial'] +['ĠHu', 'awei'] +['(re', 'po'] +['Ġunexpected', 'ly'] +['ĠK', 'ai'] +['.inst', 'all'] +['\\x', 'f'] +['Ġexhib', 'ited'] +['_T', 'CP'] +['ĠO', 'x'] +['_CH', 'O'] +['Ġprostitu', 'erte'] +['Ġv', 'ä'] +['Ġsit', 'o'] +['Ġconstitu', 'ents'] +['ĠContin', 'ued'] +['ĠS', 'AVE'] +['r', 'ss'] +['/', 'message'] +['ub', 'es'] +['Ġmisd', 'emean'] +['Ġtax', 'ation'] +['Ġstory', 'line'] +['h', 'air'] +['ĠFind', 's'] +['S', 'IG'] +['ver', 'ification'] +['~', '='] +['.h', 'p'] +['Iter', 'able'] +['Ñĭ', 'е'] +['ator', 'i'] +['Ġc', 'tr'] +['R', 'x'] +['_', ');ĊĊ'] +['d', 'ag'] +['.p', 'in'] +['Ġp', 'seud'] +['Ġinv', 'o'] +['ÑģÑĤ', 'ÑĢ'] +['_p', 'ix'] +['为', '空'] +['Ġsw', 'orn'] +['âĢĶ', 'or'] +['_reg', 'istry'] +['Ġdis', 'asters'] +['ĠRO', 'I'] +['ĠâĢ', 'ķ'] +['akt', 'u'] +['fore', 'st'] +['be', 'iten'] +['âĢĶ', 'I'] +['ue', 'va'] +['eg', 't'] +['Ġsp', 'ikes'] +['URE', 'S'] +['ĠRecomm', 'ended'] +['Ġexplo', 'ited'] +['ĠFreder', 'ick'] +['_COMP', 'LETE'] +['ĠDr', 'ugs'] +['!!!!', '!!!!'] +['ĠR', 'iv'] +['ST', 'OP'] +['RO', 'OM'] +['ĠP', 'ASSWORD'] +['C', 'ookies'] +['.E', 'l'] +['á»', 'Ń'] +['ĠB', 'ert'] +['Ġhash', 'ed'] +['ic', 'ester'] +['Ġdecor', 'ator'] +['Ġquery', 'String'] +[':', ';Ċ'] +['Ġ"', '["'] +['oto', 'pe'] +['-A', 'meric'] +['ĠMatthew', 's'] +['UR', 'AL'] +['âĢľ', ','] +['Sum', 'mer'] +['f', 'os'] +['_CONT', 'AINER'] +['_A', 'CK'] +['Ġfil', 'tr'] +['_dis', 'p'] +['_', 'Re'] +['Ġfac', 'ile'] +['а', 'ÑĪ'] +['Ġìķ', 'Ĭ'] +['Ġe', 'ben'] +['Ġspr', 'ink'] +['ĠQ', 'uint'] +['>', 'V'] +['Ġhistor', 'ians'] +['our', 'met'] +['ĠMonitor', 'ing'] +['led', 'ger'] +['c', 'ott'] +['Ġw', 'are'] +['GG', 'LE'] +['c', 'ars'] +['ĠM', 'EDIATEK'] +['Ġvol', 'upt'] +['_', 'View'] +['HE', 'L'] +['(c', 'opy'] +['(st', 'ats'] +['Ġchrom', 'osome'] +['ĠCurt', 'is'] +['-', 'conf'] +['(', 'asset'] +['Ġhv', 'or'] +['File', 'System'] +['<', '>();čĊ'] +['oc', 'oder'] +['ĠC', 'annon'] +[')', 'x'] +['ĠSm', 'ooth'] +['ĠS', 'AS'] +['_', 'ce'] +['ĉ', 'prev'] +['_m', 'ovie'] +['E', 'c'] +['_w', 'all'] +['<', 'Button'] +['ĠF', 'AST'] +['Ġon', 'View'] +['ul', 'an'] +['ĠS', 'UPPORT'] +['Ġgesch', 'ichten'] +['ĠS', 'ons'] +['Im', 'm'] +['$', 'IFn'] +['Ġfair', 'ness'] +['Ġd', 'pi'] +['ats', 'u'] +['J', 'osh'] +['Equal', 'ity'] +['Ġ}', '()Ċ'] +['_', 'less'] +['ĠR', 'atio'] +['ĠC', 'ats'] +['ĠS', 'tern'] +['Mon', 'ster'] +['Ġmer', 'cury'] +['ü', 'hr'] +['Ġplus', 'ieurs'] +['.des', 'erialize'] +['sc', 'opy'] +['.F', 'alse'] +[')', 'animated'] +['ĠExp', 'erts'] +['Ġ"")', '{Ċ'] +['.W', 'hen'] +['see', 'also'] +['.un', 'pack'] +['LE', 'M'] +['.select', 'All'] +['Ġperception', 's'] +['ud', 'ing'] +['ir', 'ling'] +['ĠPrint', 'ing'] +['gram', 's'] +['ĠFile', 'Stream'] +['erv', 'ille'] +['il', 'og'] +['ic', 'mp'] +['_C', 'ount'] +['Ġlivest', 'ock'] +['-', 'ca'] +['doc', 'uments'] +['Ġpo', 'les'] +['ĉw', 'ant'] +['Ġflu', 'ores'] +['Ġstand', 'point'] +['ĠH', 'uge'] +['Ġradi', 'ans'] +['ĠUIB', 'ar'] +['EDI', 'UM'] +['ĠHistor', 'ic'] +['_h', 'older'] +['ĠMar', 'ines'] +['Ġt', 'ä'] +['.L', 'ight'] +['quir', 'er'] +['ason', 'ry'] +['div', 'ider'] +['ĠFl', 'utter'] +['_f', 'b'] +['restrict', 'ed'] +['ĠEvery', 'body'] +['N', 'ão'] +['Ġkn', 'ot'] +['ĠT', 'witch'] +['Ġhall', 'way'] +['(C', 'ollider'] +['Input', 'Element'] +['?', ')Ċ'] +['/', 'off'] +['/', ')'] +['play', 'ed'] +['[', 'OF'] +['Ġbat', 'ting'] +['_d', 'l'] +['Ġcom', 'edian'] +['Ġé', 'v'] +['ĠD', 'EM'] +['ĠEd', 'en'] +[':', 'white'] +["'", "',"] +['Con', 'struction'] +['acer', 'b'] +['Ġtask', 'ed'] +['.man', 'age'] +['Rel', 'ationship'] +['Ġph', 'on'] +['n', 'z'] +['_B', 'GR'] +['Validate', 'AntiForgeryToken'] +['_', 'air'] +['âĢľ', 'When'] +['Ġgl', 'fw'] +['ĠCon', 'versation'] +['_T', 'OTAL'] +[',', 'Z'] +['Ġg', 'raz'] +['Ġiter', 'able'] +['ĠP', 'ASS'] +['Ġadvert', 'ise'] +['Ġmö', 'glich'] +['/', 'train'] +['ĠVolk', 'swagen'] +['Ġcreep', 'y'] +['Ġ"', ')čĊ'] +['QU', 'ENCE'] +['Ġalt', 'ar'] +['Ġed', 'its'] +['comp', 'iled'] +['aw', 'ning'] +['ĠD', 'ungeon'] +['Ġo', 'sg'] +['Navigation', 'Bar'] +['Ġtrend', 'ing'] +['ĠE', 'co'] +['ogg', 'les'] +['cd', 'ot'] +['|', '-'] +['S', 'ie'] +['ec', 'ret'] +['ĠN', 'egative'] +['ĠL', 'ing'] +['ĠD', 'IM'] +['ĠC', 'WE'] +['ĠCar', 'rier'] +['Ġcar', 'tridge'] +['_us', 'b'] +['=', 'os'] +['ĠJack', 'ie'] +['Ġo', 'tras'] +['Ġcommod', 'ities'] +['ĠP', 'resentation'] +[')&&', '('] +['ĠMar', 'tha'] +['ĠCath', 'olics'] +['ĠM', 'ond'] +['об', 'Ñĭ'] +['_', 'absolute'] +['Ġash', 'amed'] +['pons', 'ors'] +['t', 'al'] +['Ġsad', 'ness'] +['Ġpu', 'ò'] +['F', 'ade'] +['-pre', 'view'] +['ĠRequest', 's'] +['ĠCal', 'vin'] +['h', 'orn'] +['Reuse', 'Identifier'] +['(pro', 'vider'] +['/app', 's'] +['ime', 'o'] +['ĉ', 'Class'] +['S', 'amsung'] +['ĠW', 'ORLD'] +['Ġc', 'innamon'] +['dot', 'env'] +['ĠI', 'User'] +['ĠDE', 'V'] +['_C', 'har'] +['.ib', 'atis'] +['et', 'i'] +['/', 'me'] +['s', 'st'] +['.s', 'ym'] +['ĠRug', 'by'] +['-m', 'aster'] +['aj', 'ar'] +['ĠY', 'EAR'] +['Ġo', 'dp'] +['ĠR', 'oles'] +['Ġbip', 'artisan'] +['ail', 'le'] +['Ġblock', 'er'] +['Ġgre', 'ens'] +['.SE', 'CONDS'] +['Ġbelie', 'vers'] +['ĠL', 'ikes'] +['F', 'LOAT'] +['Ġm', 'ak'] +['Ġg', 'cc'] +['âķIJ', 'âķIJ'] +['("', '~/'] +['SCRIPT', 'OR'] +['Ġton', 'nes'] +['ĠS', 'ang'] +['Ġtrans', 'pose'] +['enn', 'ai'] +['P', 'red'] +['Ġsoll', 'te'] +['.github', 'usercontent'] +['(', 'print'] +['ĠH', 'ole'] +['çľ', 'ĭ'] +['ad', 'get'] +['Ġprompt', 's'] +['Ġgen', 'etically'] +['ĠH', 'od'] +['Ġvert', 'ically'] +['_control', 's'] +['ÑģÑĤ', 'ан'] +['")', '{čĊ'] +['$', 'title'] +['Ġ}', '),ĊĊ'] +['Ġstate', 'wide'] +['ĠCor', 'respond'] +['ĠAt', 'tr'] +['it', 'ant'] +['Element', 'Type'] +['Ġout', 'ward'] +['Ġfam', 'ilia'] +['(', 'article'] +['Ġbl', 'at'] +['Âł', 'Ċ'] +['Ġgl', 'Get'] +['ĠRe', 'ceiver'] +['Ġ%', '-'] +['ad', 'am'] +['W', 'inner'] +['Ġtail', 'or'] +['_p', 'wd'] +['ert', 'en'] +['St', 'an'] +['ĉ', 'all'] +['al', 'ive'] +['strt', 'otime'] +['�', 's'] +['s', 'essions'] +['$', 'conn'] +['ass', 'ist'] +['Ġchat', 'ting'] +['ĠM', 'ant'] +['Ġ%', '@'] +['Ġ""', ');ĊĊ'] +['Ġd', 'gv'] +['Ġíķ', '¨'] +['.re', 'peat'] +['_M', 'essage'] +['Ġadvis', 'ers'] +['/', 'path'] +['Ġk', 'es'] +[')', '}', '.ĊĊ'] +['ogen', 'esis'] +['ĠOPTION', 'S'] +['upt', 'ools'] +['Ġmilit', 'ant'] +['Ġex', 'ited'] +['ig', 'ar'] +['ĠCOM', 'M'] +['ĠDis', 'posable'] +['ay', 'cast'] +['Ġrow', 'span'] +['Ġsyn', 'thes'] +['Ġsond', 'ern'] +['ĠĊ'] +['ĠJ', 'acket'] +['R', 'ATION'] +['.getSelected', 'Item'] +['-', 'init'] +['ĠReg', 'isters'] +['_se', 'p'] +['ĠTool', 'kit'] +['.d', 'ict'] +['Ġx', 'label'] +['\\', 'Table'] +['t', 'oc'] +['_com', 'bo'] +['ĠComp', 'act'] +['Ġr', 'ugged'] +['à¥ĩ', 'à¤'] +['-man', 'agement'] +["')}}", '">Ċ'] +['ĠSt', 'amp'] +['ı', 'l'] +['ro', 'x'] +['Ġlandsc', 'apes'] +['_NOT', 'E'] +['mon', 'ary'] +['c', 'ab'] +['Ġmo', 'et'] +['x', 'af'] +['rc', 'ode'] +['-', 'cli'] +['_g', 'ate'] +['[', 'event'] +['SP', 'ORT'] +['g', 'ia'] +['ĠS', 'UPER'] +['/', 'Login'] +['_sh', 'utdown'] +['int', 'errupt'] +['Ġpret', 'ending'] +['Ġfr', 'inge'] +['ĠRed', 's'] +['ĠC', 'UDA'] +['ĠUN', 'IX'] +['v', 'it'] +['Ġbr', 'ig'] +['dr', 'v'] +['ĠConn', 'ector'] +['There', 'fore'] +['Ġl', 'ia'] +['D', 'etection'] +['_', 'actor'] +['Ġtemp', 'file'] +['Ġecc', 'entric'] +['-', 'role'] +['Ġpad', 'x'] +['d', 'ent'] +['West', 'ern'] +['Ġê', '·¸'] +['ĠApplication', 'Record'] +['Ġcampaign', 'ing'] +['_run', 'ner'] +['ĠC', 'ivic'] +['ale', 'igh'] +['Ġdire', 'kt'] +['.s', 'ul'] +['ĠĠ', 'ĉĉĉ'] +['ant', 'en'] +['Ġiss', 'uer'] +['Ġassert', 'ions'] +['(', 'orig'] +['AT', 'IO'] +['Ġlean', 'ed'] +['ä', 's'] +['.D', 'TO'] +['expl', 'ode'] +['.O', 'bservable'] +['Ġstagger', 'ing'] +['Ġkidn', 'apped'] +['Ġprogram', 'mers'] +['ĠInn', 'ov'] +['.param', 'eter'] +['Ġdom', 'ination'] +['Ġske', 'ptic'] +['Ġæĺ', '¯'] +['Ġavoid', 's'] +['.Ver', 'ify'] +['ub', 'by'] +['ĠAS', 'N'] +['Ġformat', 'o'] +['ĠBeat', 'les'] +['_b', 'rand'] +['Ġin', 'set'] +['y', 'outu'] +['Ġto', 'c'] +['-f', 'inal'] +['Show', 'ing'] +['ĠD', 'oub'] +['ĠM', 'esa'] +['Ad', 'j'] +['_m', 'edium'] +['Cre', 'ates'] +['(end', 'point'] +['ĉ', 'UP'] +['bb', 'ie'] +['Ġst', 'alk'] +['.datab', 'ind'] +['.S', 'can'] +['ag', 'ents'] +['$', ','] +['ind', 'ividual'] +['+', ')/'] +['ĉv', 'm'] +['(not', 'ification'] +['Ġin', 'ex'] +['ĠClass', 'ification'] +['ren', 'o'] +['Ġo', 'lig'] +['-r', 'ated'] +['Ġform', 'ulation'] +["',", '{'] +['Ġa', 'cept'] +['_un', 'pack'] +['_C', 'A'] +['.P', 'ow'] +['ĉ', 'im'] +['Ġal', 'uminium'] +['AN', 'O'] +['Ġx', 'n'] +['Ġcó', 'mo'] +['ĠIng', 'redient'] +['Ġseiz', 'ures'] +['åħ', '±'] +['ific', 'ador'] +['Ġsigu', 'iente'] +['ĠIn', 'fragistics'] +['Ġduplic', 'ated'] +['ĠDe', 'e'] +['Ġn', 'ø'] +['ĠAC', 'CEPT'] +['(c', 'rate'] +['иÑĤ', 'елÑĮ'] +['-', 'less'] +['Ġinf', 'inity'] +['An', 'alyzer'] +['-D', 'ay'] +['rit', 't'] +['(c', 'in'] +['ĠG', 'y'] +['Ġmulti', 'plied'] +['uch', 'i'] +['ĠBald', 'win'] +['/', 'ip'] +['Ġshort', 'cuts'] +['.A', 'DD'] +['Ġvig', 'or'] +['_in', 'struction'] +['(', ';'] +['_', 'eta'] +['è¿', 'ŀ'] +['utor', 'ials'] +['Ġboost', 'ing'] +['b', 'v'] +['Ġacknowled', 'ges'] +['List', 'ening'] +['FA', 'Q'] +[';', 'b'] +['((', '-'] +['Ġarchitect', 's'] +['Ġz', 'we'] +['Ġpul', 's'] +['Ġget', 'Count'] +['ver', 'bs'] +['ãĢ', 'ľ'] +['(C', 'ollection'] +['k', 're'] +['Ġjuris', 'dictions'] +['_b', 'ridge'] +['ĠCr', 'ack'] +['ĠDiff', 'iculty'] +['K', 'O'] +['Res', 'ervation'] +['_re', 'quires'] +['T', 'our'] +['ãģĹãģ', 'Ł'] +['.set', 'Current'] +['Ġk', 'y'] +['ĠAlb', 'any'] +['Ġè', '§'] +['ll', 'er'] +['agn', 'a'] +['work', 'ers'] +['.bl', 'ank'] +['ĠPr', 'ayer'] +['M', 'IC'] +['Ġresil', 'ience'] +['Te', 'X'] +['ĠL', 'anguages'] +['st', 'udy'] +['ĉc', 'urr'] +['Ġenzym', 'es'] +['Sl', 'ug'] +['ĠíĮ', 'Į'] +['str', 'al'] +['Ġtum', 'ors'] +['Ġseg', 'unda'] +["='", '{'] +['in', 'struction'] +['ĠL', 'isp'] +['/', 'info'] +['Ġ"', '{$'] +[',:', '),'] +['Ġg', 'v'] +['(', 'ErrorMessage'] +["Ġ'", '='] +['}-', '${'] +['.Doc', 'uments'] +['"', 'Well'] +['Ġreminis', 'cent'] +['Ġg', 'az'] +['iro', 'pr'] +['eh', 'r'] +['Ġsup', 'pressed'] +['ers', 'h'] +['.scroll', 'To'] +['Ġcad', 'ena'] +['Ġgame', 'State'] +['ÃŃ', 'm'] +['(', 'conv'] +['ĠTom', 'orrow'] +['ĠC', 'CT'] +['M', 'ongo'] +['ul', 'g'] +['.C', 'amera'] +['.hand', 'lers'] +['m', 'ph'] +['Ġst', 'k'] +['Ġgen', 'etics'] +['AC', 'ING'] +['Tr', 'ivia'] +['ĠB', 'am'] +['(m', 'arker'] +['.St', 'retch'] +['ĠSun', 'ni'] +['ĠBet', 'ty'] +['.t', 'olist'] +['un', 'likely'] +['.Rect', 'angle'] +['ob', 'solete'] +['IL', 'ON'] +['inner', 'Text'] +['emb', 'ourg'] +['a', 'N'] +['ĠV', 'ehicles'] +['un', 'lock'] +[':', 'utf'] +['n', 'ob'] +['ĠSee', 'ing'] +['ĠNE', 'VER'] +['Ġt', 'ls'] +['Ġfil', 'les'] +['Ġbenef', 'ited'] +['ĠCl', 'int'] +['*/', '),'] +['.f', 'old'] +['Ġpos', 'ible'] +['A', 'DED'] +['th', 'ouse'] +['.D', 'AL'] +['ĠO', 'dd'] +['ro', 'kes'] +['ĠSun', 'ny'] +['ĠPartial', 'Eq'] +['_B', 'uffer'] +['ĠLe', 'vi'] +['long', 'rightarrow'] +['eld', 'on'] +['g', 'ages'] +['_w', 'arn'] +['.Create', 'Table'] +['ĠD', 'ip'] +['_', 'questions'] +['.log', 'ic'] +['Ġ#', '"'] +['={()', '=>'] +['Ġt', 'ep'] +['Ġju', 'icy'] +['ì', 'Ĥ¬'] +['en', 'ko'] +['ia', 'lect'] +['Ù', 'ī'] +['Ġon', 'board'] +['Ġæ', 'ı'] +['ĉ', 'rt'] +['_', 'UTF'] +['ĠQ', 'Action'] +['âĢ', 'ŀ'] +['(', 'Component'] +['(a', 'udio'] +['.h', 'it'] +['g', 'te'] +['Ġprogram', 'med'] +['state', 'Params'] +['Ġpoly', 'ester'] +['f', 'ires'] +['by', 'ss'] +[']', '=('] +['_', 'quality'] +['Of', 'Day'] +['ĠFair', 'y'] +['Ġy', 'elled'] +['op', 'l'] +['(user', 'Name'] +['ĠD', 'ifference'] +['Ġevalu', 'ations'] +['iff', 'any'] +['Ġcycl', 'ists'] +['Ġc', 'idade'] +['Ġtext', 'book'] +['Ġprof', 'iling'] +['__', '),'] +['de', 'a'] +['.', 'activate'] +['Ġindic', 'ations'] +['Ð', 'ķ'] +['Touch', 'UpInside'] +['Ġinval', 'uable'] +['ĠM', 'ASK'] +['Ġcont', 'end'] +['F', 'req'] +['Ġrecru', 'its'] +['(int', 'erval'] +['ĠUser', 'Profile'] +["Ġ'./", '../'] +['ed', 'u'] +['_C', 'allback'] +['Ġanal', 'ogy'] +['ĠTro', 'phy'] +['app', 'hire'] +['V', 'ideos'] +['ĠCh', 'er'] +['ĠH', 'av'] +['â̦', '"'] +['.', 'validator'] +['g', 'fx'] +['ĠU', 'Object'] +['class', 'names'] +['tri', 'angle'] +['ĠEnc', 'oder'] +['.s', 'py'] +['Ġpred', 'ators'] +['=', 'status'] +['-s', 'afe'] +[':', '",Ċ'] +['ĠIn', 'cluding'] +['Ġ{}', ';čĊ'] +['*', 'cos'] +['Ġend', 'ured'] +['.sul', 'ake'] +['Ġnurs', 'ery'] +['Ġfrag', 'rance'] +['Ġre', 'building'] +['Ġn', 'th'] +['ĠFr', 'aser'] +['.set', 'Date'] +['ĠV', 'ince'] +['_RE', 'ST'] +['Ġvent', 'ilation'] +['æµ', '·'] +['cri', 'bes'] +['.as', 'm'] +['lp', 'Vtbl'] +['ĠA', 'be'] +['uis', 'ine'] +[',', 'array'] +['ĉ', 'className'] +['err', 'als'] +["Ġ'", 'ĊĊ'] +['Check', 'out'] +['Ġsol', 'icit'] +['A', 'ux'] +['_c', 'apture'] +['Ġrib', 's'] +['rag', 'on'] +['vi', 'ol'] +['top', 'ics'] +['Function', 'Flags'] +['ĠM', 'arty'] +['b', 'ike'] +['ĠT', 'ucker'] +['(k', 'ernel'] +['ĠO', 'ps'] +['Close', 'Operation'] +['/d', 'emo'] +['ild', 'a'] +['ĠlÃŃ', 'nea'] +['APP', 'ING'] +['Ġsu', 'ites'] +['.visit', 'VarInsn'] +['ur', 'us'] +['ĠMin', 'ute'] +['(m', 'anager'] +['Ġbutter', 'fly'] +['Ġap', 'are'] +['Ġw', 'olves'] +['J', 'WT'] +['ĠSal', 'on'] +['ĉd', 'elay'] +['-es', 'lint'] +['is', 'ations'] +['.r', 'pc'] +[')|', '('] +['ĠSnap', 'chat'] +['/m', 'm'] +['M', 'N'] +['cer', 'ies'] +['.text', 'Alignment'] +['ĠFrank', 'furt'] +['Ġad', 'o'] +['(new', 'Value'] +['(', 'access'] +['(', 'Expression'] +['ĠSign', 'In'] +['ĠHait', 'i'] +['_t', 'p'] +['.set', 'Parameter'] +['Min', 'ute'] +['Ġmanual', 's'] +['ric', 'anes'] +['ĠP', 'TR'] +['ĠOut', 'er'] +['Ġget', 'line'] +['oc', 'ations'] +['_C', 'D'] +['ĠLy', 'on'] +['/g', 'ui'] +['_l', 'ive'] +['id', 'an'] +['.ge', 'om'] +['Ġborder', 'Bottom'] +['im', 'uth'] +['_check', 'point'] +['Ġme', 'u'] +['ĠIr', 'ving'] +['Ġpeu', 'vent'] +['(M', 'AX'] +['ĠAR', 'CH'] +['Ġp', 'ov'] +['.source', 'forge'] +['Ġjam', 'ais'] +['Ġar', 'k'] +['ĠBaghd', 'ad'] +['ĠC', 'LEAR'] +['Menu', 'Bar'] +['Ġtro', 'is'] +['CHED', 'ULE'] +['Ġ#', 'čĊ'] +['(C', 'all'] +['$', 'order'] +['(M', 'aterial'] +['Ġencontr', 'ado'] +['$', 'list'] +['ĠMETHOD', 'S'] +['.begin', 'Transaction'] +['_M', 'AG'] +['Style', 'Sheet'] +['Ġmaj', 'ors'] +['Ġindef', 'initely'] +['clean', 'up'] +['Ġhom', 'eland'] +['(d', 'to'] +['D', 'ates'] +['P', 'resentation'] +['ĠD', 'K'] +['={`', '/'] +['ĉ', 'Key'] +['(', 'Block'] +['_check', 'box'] +['ne', 'eds'] +['Ġon', 'Complete'] +['ric', 'o'] +['Ġgle', 'ich'] +['Ġx', 'm'] +['O', 'OD'] +['B', 'etter'] +['ĠSQL', 'ITE'] +['.', 'Book'] +['x', 'ad'] +['ĠG', 'one'] +['ĉd', 'p'] +['Ġdev', 'otion'] +['Ġst', 'm'] +['Ġobs', 'ess'] +['ĠBack', 'end'] +['Qu', 'eries'] +['I', 'k'] +['//', '****************************************************************'] +['Ġdivid', 'ends'] +['.parent', 'Element'] +['}', '")ĊĊ'] +['ĠMaterial', 'PageRoute'] +[':', 'num'] +['Ġexp', 'lic'] +['ĠO', 'L'] +['le', 'ast'] +['O', 'ops'] +['iment', 'os'] +['Ġins', 'urers'] +['Ġhero', 'ic'] +['ĉf', 'ields'] +['.img', 'ur'] +['.btn', 'Cancel'] +['ĠDetect', 'ive'] +['(s', 'm'] +['ĠMutable', 'LiveData'] +['.l', 'ab'] +['((', '['] +['Ġha', 'irst'] +['ĠTrans', 'actions'] +['å¼Ģ', 'å§ĭ'] +['Ġstd', 'Class'] +['uent', 'o'] +['G', 'IS'] +['_c', 'od'] +['Instruction', 's'] +['C', 'alls'] +['Pointer', 'Type'] +['ĠR', 'w'] +['Ġassort', 'ment'] +['ĠD', 'IG'] +['+', 'r'] +['_C', 'ERT'] +['Ġinst', 'ability'] +['Ġv', 'ib'] +['on', 'as'] +['Ġro', 'ku'] +['ap', 'ellido'] +['Ġan', 'gl'] +['prene', 'ur'] +['Ġfluid', 's'] +['ise', 'ase'] +['Ġde', 'ed'] +['qu', 'ist'] +['_CONST', 'ANT'] +['Ġequ', 'ilibrium'] +['_de', 'legate'] +['ĠQuant', 'um'] +['re', 'i'] +['Cap', 'abilities'] +['rect', 'angle'] +['?', '><'] +['al', 'ien'] +['ĠJ', 'ug'] +['D', 'NA'] +['T', 'ickets'] +['Occ', 'urs'] +['ĠHaw', 'k'] +['.setHorizontal', 'Group'] +['\\', 'Collection'] +['ff', 'iti'] +['Ġre', 'arr'] +['.setVertical', 'Group'] +['Ġc', 'avity'] +['Ġadult', 'e'] +['Fac', 'ade'] +['-', 'wh'] +['ĠL', 'OL'] +['Ø', '°'] +['Ġgrand', 'parents'] +['Sw', 'ift'] +['ĉw', 'x'] +['æīĢ', 'æľī'] +['if', 'en'] +['ff', 'set'] +['B', 'eyond'] +['//', '}ĊĊ'] +['Ġw', 'ager'] +['Ġb', 'ury'] +['Ġcomm', 'ence'] +['reg', 'istro'] +['sc', 'ient'] +['ĠPer', 'cent'] +['Ġд', 'олж'] +['(', 'identifier'] +['.set', 'Model'] +['Ġs', 'eldom'] +['nt', 'on'] +['Ġappl', 'iance'] +['am', 'us'] +['rys', 'ler'] +['Ġpant', 'ies'] +['engu', 'ins'] +['Ġmim', 'ic'] +['Ġon', 'Changed'] +['Ġal', 'coholic'] +['.reload', 'Data'] +['Ch', 'arge'] +['ĠF', 'ax'] +['Ġj', 'ScrollPane'] +['Emp', 'resa'] +['Ġsh', 'attered'] +['x', 'ba'] +['Font', 's'] +['?', 's'] +['Ġpost', 'season'] +['ret', 'ain'] +['_r', 'ates'] +['Ġrequest', 'Code'] +['.t', 'odo'] +['´', 's'] +['CH', 'K'] +['ĠKeep', 'ing'] +['enge', 'ance'] +['Ġvs', 'code'] +['IPP', 'ING'] +['Default', 'CloseOperation'] +['_', 'raise'] +['ĠO', 'culus'] +['ogram', 's'] +['ra', 'j'] +['pc', 'i'] +['Ġcorros', 'ion'] +['.handle', 'Submit'] +['Access', 'ible'] +['ĠP', 'iano'] +['l', 'ittle'] +['AC', 'L'] +['Äĩ', 'e'] +['.un', 'wrap'] +['ĠCon', 'vers'] +['ĠLe', 'ben'] +['ione', 'er'] +['ĠMer', 'chant'] +['ĠJ', 'orge'] +['Ġembr', 'acing'] +['Ġvent', 'a'] +['á', 'st'] +['Ġvi', 'ene'] +['<', 'QString'] +['Ġexplos', 'ions'] +['Ġdistur', 'bed'] +['."', '<'] +['m', 'emo'] +['ĠAb', 'original'] +['Ġcomple', 'to'] +['Tex', 'Parameter'] +['Ġuom', 'ini'] +['(', 'agent'] +['Ñĥ', 'ÑĢ'] +['ĠWh', 'olesale'] +['/', 'am'] +['ĠBook', 'mark'] +['dr', 'agon'] +['Ġglo', 've'] +['Ġ"', '"));Ċ'] +['iv', 'ariate'] +['now', 'rap'] +['In', 'Children'] +['.B', 'r'] +['Ġcon', 'exion'] +['Ġback', 'bone'] +['Ġe', 'clipse'] +['Ġpersec', 'ution'] +["':", 'ĊĊ'] +['/', 'link'] +['ĠP', 'ero'] +['and', 'as'] +['ĠT', 'ek'] +['.', '");'] +['-an', 'alysis'] +['Ġer', 'ad'] +['Mar', 'shal'] +['Ġanch', 'ors'] +['og', 'er'] +['Ġconver', 'gence'] +['st', 'icky'] +['Ġnave', 'g'] +['int', 'ern'] +['_DE', 'SCRIPTOR'] +['ĠConsult', 'ant'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'Ċ'] +['ĠA', 'uch'] +['Ġer', 're'] +['ÅĽ', 'li'] +['ĠHor', 'izon'] +['col', 'a'] +['Install', 'ation'] +['hot', 'mail'] +['C', 'NN'] +['.C', 'ollectors'] +['ch', 's'] +['(tr', 'ace'] +['ĠEnc', 'rypt'] +['Ġ----', '--'] +['ĠBase', 'Controller'] +['Ġag', 'ua'] +['Ġre', 'active'] +['id', 'l'] +['Ġclass', 'Names'] +['ĉ', 'Session'] +['ĠDod', 'gers'] +['H', 'ad'] +['_l', 'v'] +['Is', 'Valid'] +['ĠHEL', 'P'] +['ut', 'to'] +['ĠVer', 'ification'] +['Ġget', 'env'] +['_p', 'a'] +['.b', 'mp'] +[':', 'f'] +['ĠLou', 'ise'] +["('", ';'] +['/', 'socket'] +['Gr', 'anted'] +['.c', 'alendar'] +['(', 'IP'] +['ĠP', 'X'] +['.R', 'oom'] +['Ġprogram', 'm'] +['ens', 'i'] +['Ġtablesp', 'oons'] +['Ġle', 've'] +['Ġmo', 'str'] +['.t', 'ipo'] +['/', 'an'] +['(d', 'i'] +['Ġb', 'iod'] +['Ġdb', 'Context'] +['ĠJS', 'X'] +['ĉ', 'results'] +['.', 'END'] +['ht', 'e'] +['l', 'ify'] +['P', 'recision'] +['èĬ', 'Ĥ'] +['ARS', 'ER'] +[')did', 'ReceiveMemoryWarning'] +['at', 'tempt'] +['IS', 'P'] +['&', 'a'] +['_P', 'OP'] +['ĠT', 'ac'] +['Ġprepared', 'Statement'] +['Ġзап', 'иÑģ'] +['Ġow', 'ing'] +[',', 'start'] +['Ġreview', 'er'] +['Ġr', 'st'] +['Ġprop', 'Types'] +['Ġrock', 'y'] +['_lo', 'cale'] +['ĠStrateg', 'ies'] +['ĠWe', 'ber'] +['.C', 'ascade'] +['_equal', 'To'] +['Ġcos', 'as'] +['ĠDe', 'letes'] +['ĠMax', 'im'] +['Ġsh', 'rimp'] +['re', 'trieve'] +['.In', 'clude'] +['IG', 'IN'] +['ĠO', 'E'] +[']', ');čĊčĊ'] +['.en', 'umer'] +['Ġco', 'ef'] +['_N', 'ull'] +['R', 'a'] +['ty', 'ard'] +['ĠSh', 'awn'] +['keep', 'ers'] +['Ġq', 'q'] +['_s', 'b'] +['om', 'ens'] +['ĠExec', 'utes'] +['#', '"'] +['TT', 'Y'] +['ĠValue', 'Type'] +[');', '*/Ċ'] +['ĠAbs', 'olutely'] +['ĠT', 'ottenham'] +['/', 'art'] +['Ġbless', 'ings'] +['Ġswift', 'ly'] +['b', 'uster'] +['Ġa', 'vid'] +['COM', 'M'] +[',', 'temp'] +['Ġ}', '?>Ċ'] +['-g', 'rowing'] +['Ġdeep', 'copy'] +['A', 'ck'] +['egg', 'ies'] +['Ġ__', '("'] +['Ġno', 'ir'] +['terror', 'ism'] +['Ġanth', 'em'] +['ag', 'ency'] +['_PACK', 'AGE'] +['ĠC', 'losure'] +['.reg', 'istry'] +['Ġmamm', 'als'] +['<', 'L'] +['U', 'ICollectionView'] +['ĠLED', 's'] +['Ġvol', 'ley'] +['(', 'Buffer'] +['_N', 'ATIVE'] +['lib', 'c'] +['impl', 'ode'] +['Scroll', 'Bar'] +['ĠMar', 'ion'] +['.Con', 'tracts'] +['_A', 't'] +['ĠWe', 'instein'] +['compare', 'To'] +['ĠH', 'ose'] +['en', 'ity'] +['.create', 'Query'] +['_r', 'outer'] +['Ġstim', 'uli'] +['Ġ++', ')'] +['ĠCh', 'amp'] +['ĠBay', 'ern'] +['ass', 'a'] +['.v', 'a'] +['Ġdistrib', 'utors'] +['Ġfile', 'private'] +['Ġdepart', 'ed'] +['cc', 'cc'] +['@', 'click'] +['ĠL', 'unch'] +['>', 'L'] +['Ġbl', 'uetooth'] +['.De', 'ep'] +['-', 'standing'] +['ác', 'il'] +['Ġro', 'oft'] +['ĠPath', 's'] +['_iter', 'ations'] +['Invalid', 'ArgumentException'] +['.s', 'pi'] +['ĠUIAlert', 'Action'] +['uy', 'e'] +['sign', 'in'] +['.p', 'riority'] +['ĠEss', 'ays'] +["='", '{$'] +['Ġè¿', 'ĶåĽŀ'] +['_s', 'igned'] +['.p', 'ersist'] +['Ġred', 'esign'] +['To', 'Lower'] +['ĠNew', 'man'] +['=', 'start'] +['ĠIsrael', 'is'] +['asis', 'wa'] +['Spe', 'ech'] +['Ġnum', 'eros'] +['hand', 'lers'] +['ĠW', 'ong'] +['Ġм', 'еÑĤод'] +['We', 'ights'] +['ĠGu', 'jar'] +['te', 'il'] +['ĠNon', 'etheless'] +['_E', 'FFECT'] +['Ġv', 'ect'] +['ĠO', 'sc'] +['Ġco', 'ats'] +['ĠW', 'heat'] +['Ġge', 'ek'] +['ĠPRO', 'PERTY'] +['w', 'orm'] +['_const', 'ants'] +['ĠB', 'oulder'] +['ĠP', 'arm'] +['co', 'le'] +['Ġdefault', 'Center'] +['ĠRou', 'ge'] +[':', 'A'] +['xc', 'f'] +['ĠVen', 'ice'] +['med', 'ian'] +['Ġred', 'emption'] +['F', 'resh'] +['Ġcos', 'm'] +['Ġfig', 'ur'] +['Ġref', 'urb'] +['CO', 'PE'] +['.c', 'd'] +['Ġch', 'ords'] +['ĠS', 'gt'] +['Å', 'į'] +['VP', 'N'] +['ĠS', 'END'] +['ain', 'en'] +['_account', 's'] +['Ġtent', 'h'] +['Ġdiss', 'olved'] +['<', 'App'] +['ĠCover', 'age'] +['use', 'State'] +['é', 'ro'] +['..', '<'] +['Ġì', '£¼'] +['Ġdream', 'ing'] +['ĠFore', 'cast'] +['.C', 'ursors'] +['Ġvis', 'as'] +['/', 'script'] +['_start', 'ed'] +['Ġga', 'str'] +['(P', 'RO'] +['];', '//'] +['.T', 'ile'] +['*', 'sin'] +['(', 'Adapter'] +['ĠSand', 'ra'] +['_S', 'IG'] +['ard', 'ash'] +['ĠO', 'val'] +['Ġdescri', 'pcion'] +['(s', 'l'] +['ĠDes', 'criptor'] +['Ġ`', '$'] +['/f', 'ree'] +['ĠKey', 'words'] +['Ġt', 'udo'] +['ion', 'ale'] +['(f', 'ound'] +['.x', 'yz'] +['ĠGeneration', 'Type'] +['_DISABLE', 'D'] +['(', 'area'] +['Ġel', 'ites'] +['Ġh', 'ombre'] +['(m', 'essages'] +['ĠR', 'ac'] +['Ġext', 'ingu'] +['ĠEst', 'a'] +['op', 'o'] +['.', 'vel'] +['mouse', 'out'] +['Ġconv', 'olution'] +['ĠHand', 'ling'] +['Ġceil', 'ings'] +['T', 'ek'] +['ĠAre', 'as'] +['.writer', 'ow'] +['<', 'View'] +['ĠCorn', 'ell'] +['_B', 'IN'] +['.in', 'valid'] +["''", "'čĊ"] +['ie', 'ż'] +['_P', 'osition'] +['Ġk', 'idding'] +['PC', 'ODE'] +['Ġwatch', 'er'] +['lo', 'x'] +['Ġâ', 'Ĺ'] +['D', 'ave'] +['_all', 'ow'] +['Ġbis', 'exual'] +['Ġun', 'ordered'] +['ĠSch', 'we'] +['_se', 'gments'] +['Ġt', 'earing'] +['IN', 'LINE'] +['Ġund', 'es'] +['.g', 'oods'] +['.c', 'am'] +['ĠL', 'W'] +['ĉ', 'where'] +['Cal', 'culator'] +['-th', 'reat'] +['-', 'alert'] +['ĠSuz', 'uki'] +['ĠIP', 'A'] +['ĠAtt', 'achment'] +['AC', 'CESS'] +['(d', 'type'] +['O', 'pp'] +['_s', 'ymbols'] +['Ġdans', 'ke'] +['l', 'age'] +['or', 'get'] +['res', 'olution'] +['е', 'Ñĩ'] +['ĠQ', 'Color'] +['ĠBar', 'rett'] +['аÑĨи', 'Ñı'] +['=', "\\'"] +['ĠNav', 'Controller'] +['/', 'ref'] +['(c', 'ountry'] +['_H', 'DR'] +['Ġterse', 'but'] +['pet', 'ition'] +['Ġsu', 'f'] +['cred', 'its'] +['à¹', 'Į'] +['x', 'm'] +['ĠDav', 'ies'] +['.re', 'ddit'] +['Ġw', 'oven'] +['ĠO', 'bl'] +['ĠK', 'M'] +['ĠConsider', 'ing'] +['ens', 'ored'] +['.per', 'iod'] +['Ġd', 'dl'] +['$', 'wp'] +['Ġextrem', 'ist'] +[';', '\\Ċ'] +['Ġk', 'im'] +['al', 'ers'] +['Ġspan', 'ning'] +['Ġco', 'herent'] +['Ġconse', 'gu'] +['.text', 'Label'] +['.g', 'eneral'] +['_d', 'ashboard'] +['л', 'ение'] +['k', 'ick'] +['_P', 'ID'] +['ĠExt', 'ensions'] +['reg', 'exp'] +['ĠCl', 'ause'] +['_m', 'ov'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠ'] +['ĠR', 'eward'] +['ĠLEG', 'O'] +['A', 'k'] +['=-=-', '=-=-'] +['ĉ', 'parser'] +['Ġon', 'ze'] +['éĢ', 'Ģ'] +['âĢĿ', 'ãĢĤ'] +['_b', 'all'] +['(r', 'hs'] +['Ġch', 'orus'] +['<', 'count'] +['as', 'urable'] +['Ġwirk', 'lich'] +['ĠEr', 'in'] +['ĠMS', 'NBC'] +['Ġet', 'ter'] +['ĠC', 'ron'] +['_F', 'LOW'] +['Ġ,', 'čĊ'] +['Ġcal', 'idad'] +['ĠFile', 'Writer'] +['ĉ', 'stmt'] +['(', 'Byte'] +['_p', 'at'] +['Ġte', 'lescope'] +['Ġgre', 'ed'] +['ĠT', 'ort'] +['(w', 'rite'] +['\\', 'application'] +['ĉRT', 'LR'] +['ĠConfiguration', 'Manager'] +['Un', 'ix'] +['End', 'Time'] +['In', 'cludes'] +['ĠHar', 'vest'] +['en', 'berg'] +['ĠAustral', 'ians'] +['Ġë', 'ĵ'] +['Ġr', 'n'] +['Ġreput', 'able'] +['Ġbl', 'ending'] +['UL', 'ATION'] +['ĠBrend', 'an'] +['d', 'ad'] +['Ġm', 'ø'] +['ĠW', 'oo'] +['_d', 'c'] +['U', 'ne'] +['Ġr', 'ue'] +['with', 'in'] +['ang', 'ep'] +['Ġp', 'ouch'] +['\\"', '",'] +['ĠS', 'ic'] +['âĢĿ', '),'] +['aly', 'ze'] +['ĠG', 'ef'] +['c', 'overs'] +['Ġd', 'bo'] +['replace', 'All'] +['ĉ', 'Logger'] +['Try', 'ing'] +['[', 'state'] +['-p', 'iece'] +['éĸ', 'ĵ'] +['beh', 'avior'] +['all', 'ows'] +['l', 'rt'] +['_p', 'ython'] +['ert', 'ura'] +['-c', 'ountry'] +['ĠT', 'G'] +['.UI', 'Manager'] +['b', 'ens'] +['ale', 'x'] +['ĠBre', 'itbart'] +['b', 'ac'] +['Ġpredict', 's'] +['Ġg', 'ab'] +['Ġcard', 'inal'] +['.Time', 'Unit'] +['ĠVis', 'itor'] +['ĠM', 'ing'] +['Ġliv', 're'] +['Ġparent', 'Id'] +['port', 'un'] +['Ġdimension', 'al'] +['ĠV', 'est'] +['en', 'ic'] +['à', '³'] +['Ġ', 'Ùĩ'] +['ĠBL', 'UE'] +['Ġitem', 'Count'] +['Ġfe', 'athers'] +['ĉp', 'stmt'] +['ĠPol', 'ar'] +['{', '//'] +['und', 'i'] +['Ñĥ', 'ж'] +['z', 'ar'] +['Error', 'Response'] +['ì', 'ĥģ'] +['Rep', 'resentation'] +['*', '_'] +['+', ']'] +['pre', 'pend'] +["Ġ'", '>'] +['Ġlegitim', 'acy'] +['Ġo', 'o'] +['S', 'linky'] +['Ġnation', 'als'] +['.', 'words'] +[';', 'p'] +['tr', 'ap'] +['oman', 'ip'] +['Ġc', 'ues'] +['Ġgradu', 'ating'] +['Ġsem', 'aphore'] +['"]', ');ĊĊ'] +['ace', 'y'] +['RE', 'ET'] +['Gr', 'ab'] +['ĠFel', 'ix'] +['(', 'Id'] +['_ne', 'ighbors'] +['Ġmeaning', 'less'] +['(d', 'el'] +['Ġj', 'eder'] +['ĠContent', 'Values'] +['.abs', 'olute'] +['/', 'cl'] +['Ġx', 'b'] +['dat', 'um'] +['Ġtort', 'ured'] +['Ġrub', 'bing'] +['S', 'cores'] +['ĠðŁĺ', 'ī'] +['Ġav', 'ons'] +['Ġam', 'sterdam'] +['E', 'OS'] +['H', 'al'] +['Ġtrust', 'worthy'] +['#', '='] +['.EX', 'TRA'] +['Ġman', 'o'] +['is', 'icing'] +['-s', 'upport'] +['ĉc', 'ursor'] +['ĠSp', 'o'] +['aim', 'assage'] +['M', 'ission'] +['[]', '{"'] +['Ġprint', 'ers'] +['G', 'REEN'] +['Ġt', 'eg'] +['Ġabdom', 'inal'] +['!', 'ĊĊĊĊĊĊ'] +['.Sh', 'ort'] +['аз', 'в'] +['ĠGift', 's'] +['}', '")'] +['(b', 'inding'] +['x', 'ce'] +['âĢ', 'ij'] +['inf', 'os'] +['Form', 'Data'] +['Ġd', 'art'] +['Ġele', 'ms'] +['(in', 'v'] +['Y', 'L'] +['t', 'in'] +['GEN', 'ER'] +['á»', '¯'] +['ĠT', 'aken'] +['uck', 'le'] +[':', 'e'] +['Ġspect', 'ral'] +['.b', 'aidu'] +['/', "');Ċ"] +['Ġgre', 'edy'] +['es', 'ion'] +[',,,,', ',,,,'] +['Ġ/>', ',Ċ'] +['Internal', 'ServerError'] +['NSNotification', 'Center'] +['ĠA', 'i'] +['Ġsp', 'it'] +['Ġaug', 'mented'] +['Ġstandard', 'UserDefaults'] +['FIN', 'ITY'] +['R', 'ace'] +[':', 'C'] +['ĠRE', 'CORD'] +['ĠHigh', 'light'] +["Ġ'", '`'] +['Ġdef', 'icits'] +['Ġne', 'i'] +['Ġresearch', 'ed'] +['T', 'a'] +['Ġc', 'opp'] +['.Get', 'HashCode'] +['):', 'čĊčĊ'] +['On', 'Click'] +['ĠWell', 'ington'] +['Ġrev', 'ival'] +['æ¯', 'Ķ'] +['éĹ', '®'] +['ĠN', 'SS'] +['Ġfor', 'n'] +['Ġint', 'é'] +['ĠKu', 'wait'] +['_fl', 'ip'] +['_', 'bo'] +['_', '\\'] +['Ġocc', 'urrences'] +['ĠScient', 'ists'] +['S', 'RC'] +['og', 'ens'] +['igr', 'ant'] +['RE', 'MOTE'] +['ĠS', 'ID'] +['.', 'opts'] +['u', 've'] +['()', '])Ċ'] +['Ġlibert', 'arian'] +['ĠGl', 'ide'] +['les', 'en'] +['Ġform', 'e'] +['ow', 'ania'] +['Ġannoy', 'ed'] +['Def', 's'] +['ĠExec', 'utor'] +['Ġcast', 's'] +['.set', 'Checked'] +['ĠSh', 'aring'] +['.Serialize', 'Object'] +['Ġselect', 'ors'] +['_', 'OTHER'] +['ë¯', '¸'] +['(s', 'uper'] +['(', 'OS'] +['_VER', 'IFY'] +['id', 'unt'] +['<', 'header'] +['Ġ/>', "';Ċ"] +['Ġvidé', 'o'] +['ĠNeg', 'ro'] +['ĠL', 'ords'] +['ĠT', 'ours'] +['Ġsoft', 'ly'] +['.re', 'ceive'] +['ĠE', 'RC'] +['Ġdata', 'Set'] +['Bad', 'ge'] +['ĉ', 'Event'] +['Ġper', 'l'] +['Ġ{}', '\\'] +['(s', 'entence'] +['Or', 'Update'] +['Ġdim', 'inish'] +['P', 'IN'] +['(d', 'raw'] +['.To', 'DateTime'] +['.Equal', 'To'] +['(p', 'in'] +['-p', 'encil'] +['lu', 'ent'] +['ĠCall', 'er'] +['Ġplay', 'ful'] +['-', "'+"] +['x', 'ca'] +['sw', 'ick'] +['){', '}Ċ'] +['}:', '${'] +['ĠM', 'eth'] +['.get', 'Cell'] +['.b', 'reak'] +['Ġy', 'max'] +["='", 'Ċ'] +['ĠH', 'iro'] +['(', 'TRUE'] +['as', 'urer'] +['Ġcu', 'er'] +['U', 'ber'] +['.', 'Operation'] +['Ġol', 'an'] +['Ġthr', 'illing'] +['<', 'Response'] +['ĠF', 'emin'] +['Ġtravers', 'al'] +['Ġp', 'oc'] +['Ġset', 'Status'] +['decl', 'ar'] +['std', 'afx'] +['Ġaddict', 'ive'] +['ĠB', 'tn'] +['Ġexplos', 'ives'] +['ĠCook', 'ing'] +['ĠPl', 'aint'] +['Ġaccum', 'ulator'] +['ĠApp', 'ointment'] +[',', 'password'] +['ĠF', 'AR'] +['lu', 'et'] +['Further', 'more'] +['decl', 'spec'] +['_Static', 's'] +['.D', 'ictionary'] +['">', "'."] +['ĉ', 'valid'] +['"', '",'] +['In', 'strument'] +['>', 'J'] +['Ġno', 'str'] +['ĠR', 'ift'] +['_P', 'ort'] +['Ġvec', 'es'] +['[', "['"] +['Ġrall', 'ies'] +['-', 'series'] +['Ġv', 'v'] +['.', 'uc'] +['Ġr', 'tn'] +['State', 'Changed'] +['(', 'ins'] +['ĠCl', 'a'] +['------------', 'Ċ'] +['c', 'us'] +['ĠRel', 'oad'] +['//----------------------------------------------------------------', '--------------------------------'] +['.se', 'conds'] +['_dest', 'ination'] +['Ġscrew', 'ed'] +['>', 'c'] +['Th', 'ickness'] +['Design', 'er'] +['Ġgr', 'ids'] +['n', 'Äħ'] +['(', 'cookie'] +['T', 'rip'] +['-M', 'obile'] +['Ġv', 'oll'] +['Ġgen', 'ital'] +['Ġconf', 'isc'] +['ĠConfeder', 'ate'] +['Ġweb', 'View'] +['Ġm', 'ise'] +['Ġcl', 'er'] +['(se', 'lection'] +['$', 'date'] +['Ġshar', 'pen'] +['rag', 'en'] +['And', 'Update'] +['Ġrem', 'ix'] +['Ġh', 'tons'] +['R', 'W'] +['M', 'PI'] +['Ġretrie', 'val'] +['Ġric', 'hest'] +['.Dec', 'ode'] +[':init', 'Components'] +['ĠT', 'Value'] +['S', 'aint'] +['@', 'include'] +['ĠPER', 'SON'] +['.se', 'p'] +['ĠLD', 'AP'] +['g', 'ba'] +['Ġgro', 'ÃŁe'] +['Ġreli', 'ably'] +['ĠD', 'FS'] +['.getItem', 'Id'] +['Ġprés', 'ent'] +['.get', 'Token'] +['Ġch', 'inese'] +['ĠMe', 'al'] +['Y', 'OU'] +['">', '>ĊĊ'] +['b', 'ower'] +['Ġsw', 'apped'] +['/', 'install'] +['Ġs', 'inks'] +['etr', 'ize'] +['Ġdecl', 'ines'] +['ĉm', 'ysql'] +['ĠC', 'String'] +['ĠMotion', 'Event'] +['.L', 'anguage'] +['R', 'oad'] +['ÑĤ', 'еÑĢ'] +['asc', 'imento'] +["'))", '->'] +['.', 'about'] +['(', 'editor'] +['ĠR', 'atings'] +['in', 'come'] +['Å¡', 'e'] +['.de', 'queueReusableCell'] +['ĠAust', 'rian'] +['Ġs', 'ulla'] +['ĠTrib', 'unal'] +['ĠDid', 'n'] +['ов', 'аÑĢ'] +['Ġins', 'pections'] +['B', 'oss'] +['Ġcock', 'tails'] +['Ġapolog', 'ized'] +['_sub', 'plot'] +['op', 'al'] +['+', '=('] +['Ġreson', 'ance'] +['ib', 'u'] +['Ġë', '¦¬'] +['rom', 'a'] +['res', 'erve'] +['pl', 's'] +['ĠT', 'ah'] +['ax', 'ies'] +['OP', 'LE'] +['ĠDar', 'ren'] +['ĠZ', 'ombie'] +['_M', 'ap'] +['Ġ]', ')ĊĊ'] +['ĠQ', 'i'] +['ĠS', 'ail'] +['Ġrestrict', 'ive'] +['Ġeros', 'ion'] +['-', 'par'] +['WH', 'ITE'] +['Ġold', 'u'] +['Ġap', 'erture'] +['Ġbit', 'coins'] +['text', 'o'] +['ĠCom', 'cast'] +['Ġtime', 'less'] +['en', 'kins'] +['Ġfeed', 'er'] +['/', 'tmp'] +['res', 'den'] +["+'", '_'] +['.D', 'estroy'] +['Ġç', 'ok'] +['ĠD', 'OCUMENT'] +['.l', 'ng'] +['.tag', 'Name'] +['Ġk', 'ullan'] +['eg', 'rate'] +['Ġ(*', '.'] +['ç¼ĸ', 'è¾ij'] +['Ġhand', 'shake'] +['s', 'oc'] +['_', 'geometry'] +['ĠDam', 'ascus'] +['Min', 'or'] +['ĠK', 'afka'] +['ìĹ', '¬'] +['Fl', 'orida'] +['_com', 'pute'] +['.ex', 'pr'] +['Ġpar', 'alle'] +['ĠD', 'iaz'] +['c', 'ir'] +['[', 'target'] +['Ġj', 'oking'] +['Ġgl', 'or'] +['(set', 'q'] +['_hand', 'lers'] +['H', 'ang'] +['Ġf', 'err'] +['rim', 'inal'] +['ĉĠĠĠĠ', 'ĉĉ'] +['ent', 'ies'] +['def', 'ines'] +['-t', 'ax'] +['json', 'p'] +['ĠU', 'PS'] +['met', 'ro'] +['__', ';Ċ'] +['ĠUg', 'anda'] +[']))', ':Ċ'] +['_t', 'd'] +['x', 'ae'] +['l', 'w'] +['.', 'OS'] +['ĠLog', 'ged'] +['ac', 'id'] +['ĠMay', 'o'] +['as', 'pect'] +['Ġvag', 'inal'] +['Ġinitial', 'izing'] +['Ġster', 'oids'] +['f', 'iction'] +['G', 'RE'] +['g', 'end'] +['Ġli', 'abilities'] +['ĠL', 'ets'] +['M', 'ech'] +['(', 'nc'] +['(', 'change'] +['Ġconnect', 'ors'] +[':', 'k'] +['Ġt', 'ast'] +['!', '");ĊĊ'] +['th', 'ings'] +['ro', 'phy'] +['luet', 'ooth'] +['ĠSign', 'Up'] +['.', 'ctrl'] +['Ġthere', 'in'] +['ord', 'a'] +['.', 'escape'] +['ig', 'ator'] +['Ġpet', 'rol'] +['Ġspec', 'imen'] +['Ġdeb', 'uted'] +['-', 'Pro'] +['Ġcr', 'ises'] +['.add', 'View'] +['ëı', 'Ļ'] +['-d', 'oor'] +['Ġmon', 'et'] +['Ġmill', 'is'] +['Ġv', 'ier'] +['Internal', 'Enumerator'] +['Ġadmin', 's'] +['ĠL', 'air'] +['z', 'in'] +['get', 'Query'] +['umb', 'les'] +['L', 'IMIT'] +['ĠV', 'ig'] +['_s', 'ong'] +['<', 'Character'] +['::', '.'] +['_h', 'om'] +['_b', 'p'] +['ĠSup', 'ervisor'] +['sub', 'mission'] +['ab', 'ile'] +['Ġno', 'i'] +['Or', 'Create'] +['Ġpe', 'el'] +['Ġon', 'Start'] +['Ġsent', 'iments'] +['veh', 'icles'] +['Ġclass', 'rooms'] +['Ġs', 'zer'] +['Ġb', 'ending'] +['Ġlong', 'evity'] +['Ġa', 'cl'] +['ĠAle', 'ppo'] +['ĠU', 'M'] +['ĠR', 'icht'] +['Ġmultip', 'rocessing'] +['DOM', 'AIN'] +['","', '+'] +['_Y', 'EAR'] +['Ġsc', 'rape'] +['Ġsol', 'itary'] +['Ġ"]', '";Ċ'] +['/', 'errors'] +['ìŀ', '¬'] +['ľ', 'ëł¥'] +['b', 'etter'] +['ĉ', 'number'] +['ĠL', 'F'] +['ĠAc', 'ross'] +['Pub', 'Med'] +['\\"', '"'] +['ĠExcell', 'ence'] +['Ġus', 'ando'] +['ĠU', 'IP'] +['Activity', 'Indicator'] +['_V', 'OID'] +['Ġbre', 'eds'] +['ï½', '¥'] +['uest', 'as'] +['ĠTre', 'asure'] +['ustral', 'ian'] +['(f', 'ace'] +['ĠT', 'ennis'] +['ĉ', 'Int'] +['ĠHans', 'en'] +['ç', 'µ'] +[':', 'I'] +['Ġâľ', 'Ķ'] +['GR', 'AY'] +['O', 'USE'] +['Ġhe', 'pat'] +['ł', 'í'] +['A', 'IR'] +['ó', 'ż'] +['Ġque', 'ued'] +['vinc', 'ia'] +['ĠChrom', 'ium'] +['Ġcompet', 'ence'] +['ung', 'al'] +['ill', 'i'] +['Ġget', 'By'] +['ĠF', 'inder'] +['Ġincap', 'able'] +['Ġs', 'add'] +['Ġc', 'ites'] +['ĠChurch', 'ill'] +['S', 'dk'] +['More', 'over'] +['As', 'pNet'] +['(', 'Float'] +['$', 'password'] +['ĠConn', 'or'] +['-s', 'ession'] +['_d', 'm'] +['*', '))'] +['Ġde', 'utsch'] +['ĠN', 'X'] +['Ġper', 'ks'] +['_S', 'ORT'] +['_TO', 'OL'] +['_V', 'ISIBLE'] +['.as', 'p'] +['æĪ', 'ĸ'] +['ĠBre', 'ath'] +['D', 'etect'] +['ĠD', 'uel'] +['.c', 'mb'] +['[', 'it'] +['.Set', 'Bool'] +['Ġnarc', 'iss'] +['Ġab', 'ide'] +['Ġej', 'emplo'] +['ĠâĦ', 'ķ'] +['Ġm', 'ornings'] +['Ġcomput', 'es'] +['.s', 'sl'] +['j', 't'] +['Ġmuch', 'os'] +['_S', 'S'] +['[', 'end'] +['Ġbas', 'in'] +['Ġalgun', 'os'] +['ĠCroat', 'ia'] +['lin', 'ewidth'] +['(t', 'ags'] +['(h', 'idden'] +['ÃŃc', 'io'] +['Ġap', 'ar'] +['ĠÐ', '¶'] +['ä¸', 'İ'] +['.', 'food'] +['ĠR', 'ural'] +['Ġbread', 'th'] +['å½', '±'] +['(s', 'ess'] +['+', '")'] +['ĠP', 'aste'] +['Ġserv', 'idor'] +['ĠBit', 'Set'] +['ĠTr', 'an'] +['la', 'us'] +['v', 'ette'] +['ey', 'es'] +['ĠCL', 'ICK'] +['ĠV', 'III'] +['ĠTurn', 's'] +['ĠLe', 'Bron'] +['ĠM', 'uj'] +['ĠD', 'eg'] +['ĠAdult', 's'] +['_s', 'uite'] +['process', 'able'] +['ĠPH', 'Y'] +['g', 'hest'] +['.F', 'ail'] +['ĠSl', 'ack'] +['ce', 'j'] +['\\', 'Carbon'] +['Ġsuper', 'star'] +['Ġhold', 'ings'] +['(', 'forms'] +["Ġ'#", "'"] +['M', 'ultip'] +['("[', '%'] +['-s', 'olid'] +['/', 'url'] +['-t', 'ier'] +['[', 'length'] +['ĠStream', 'Writer'] +['ĠMarket', 'place'] +['get', 'text'] +['_T', 'ICK'] +['ĠFor', 'ge'] +['Ġblack', 'jack'] +['ĠDO', 'ES'] +['ĠM', 'atters'] +['w', 'aves'] +['Ġwhisper', 'ed'] +['Ġl', 'ush'] +['ìĺ', '¤'] +['d', 'igital'] +['Ġwr', 'ink'] +['ĠH', 'ogan'] +['Ġrust', 'ic'] +['.Apply', 'Resources'] +['ĠHard', 'y'] +['os', 'omes'] +['A', 'UT'] +['.ST', 'ATE'] +['Ġnarr', 'atives'] +['ĉ', 'store'] +['b', 'ib'] +['ĉ', 'Scanner'] +['ĠC', 'ody'] +['\\', 'Repositories'] +['Ġre', 'union'] +['and', 'um'] +['âĢĻ', 'h'] +['Ġsn', 'iff'] +['NS', 'Bundle'] +['Ġcompreh', 'end'] +['_US', 'AGE'] +['_', 'occ'] +['URRE', 'NCY'] +['J', 'NI'] +['Ġspecial', 'izing'] +['Ġvis', 'ions'] +['Ġdol', 'ore'] +['Ġv', 'á'] +['ĠChe', 'vy'] +['ĠSt', 'yled'] +['imp', 'act'] +['all', 'en'] +['Ġk', 'art'] +['ĠTable', 't'] +['st', 'uff'] +['re', 'esome'] +['аÑĤ', 'оÑĢ'] +['//----------------------------------------------------------------', '-----------Ċ'] +['_Ad', 'min'] +['Ġcell', 'phone'] +['Ġaut', 'oplay'] +['Ġcamb', 'io'] +['Ġmar', 'itime'] +['_BO', 'OT'] +['-', 'quarter'] +['Ġlat', 'ina'] +['ĠAJ', 'AX'] +['e', 'quiv'] +['ĠFront', 'ier'] +['ĠX', 'Y'] +['}', ']Ċ'] +['ĠR', 'ough'] +['.pro', 'to'] +['Ġcorrect', 'ness'] +['Ġfac', 'il'] +['ĠRe', 'ached'] +['ãģĿ', 'ãģ®'] +['V', 'IS'] +['.p', 's'] +['Ġstr', 'ncpy'] +['Ġdiff', 'usion'] +['.start', 'Activity'] +['��', '�'] +['Ġaccom', 'p'] +['AMES', 'PACE'] +['imon', 'ials'] +['ĠBl', 'ast'] +['aby', 'rin'] +['Ġd', 'ome'] +['Ġextr', 'av'] +['Ġy', 'en'] +['Ġcul', 'inary'] +['P', 'RI'] +['ĠComm', 'unities'] +['n', 'id'] +['_oper', 'ations'] +['.h', 's'] +['ĠMil', 'ton'] +['Ġno', 'ises'] +['Autoresizing', 'Mask'] +['(c', 'id'] +['}ĊĊ', 'ĊĊĊĊ'] +[']', '},Ċ'] +['ĠD', 'etection'] +['tab', 'la'] +['Ġlib', 'erties'] +['_D', 'YNAMIC'] +['w', 'get'] +['ĠT', 'ür'] +['ĠP', 'ascal'] +['Trans', 'parent'] +['Delay', 'ed'] +[']', '()'] +['ĠHer', 'bert'] +['<', 'ActionResult'] +['ch', 'allenge'] +['Ġmush', 'room'] +['.insert', 'Before'] +['ĠR', 'in'] +['Ġhum', 'our'] +['Ġf', 'ø'] +['api', 'Key'] +['alloc', 'ated'] +['Ġconf', 'ession'] +['.', '",čĊ'] +['ĉassert', 'That'] +['ĠS', 'ORT'] +['ĠL', 'ORD'] +['Ġexport', 'er'] +['.set', 'Level'] +['p', 'okemon'] +['ash', 'tra'] +['Ġf', 'é'] +['ur', 'ator'] +['(M', 'SG'] +['Ġt', 'up'] +['ĠH', 'ull'] +['Ġyield', 'ed'] +['.Sub', 'ject'] +['\\', 'Route'] +['!', '?'] +['ĠÑĥ', 'дал'] +['\\', 'Security'] +['-', 'ar'] +['Ġalleg', 'ation'] +['(', 'Settings'] +['ä', 'nder'] +['Ġell', 'ipse'] +['ĠRetro', 'fit'] +['Ġregul', 'ating'] +['ĠM', 'olly'] +['ĠL', 'ok'] +['_C', 'ustom'] +['ĠProm', 'o'] +['is', 'in'] +['Ġres', 'umed'] +['Ġmet', 'ropolitan'] +['.error', 'Message'] +[':', '-------------'] +['Ġpas', 'ado'] +['th', 'ank'] +['_De', 'lete'] +['ĠBright', 'on'] +[',', 'unsigned'] +['ä½ľ', 'èĢħ'] +['Ġaspir', 'ations'] +['-h', 'ow'] +['R', 'ose'] +['=', '(('] +['_ne', 'eded'] +['_pl', 'ural'] +['<', 'Application'] +['ĠW', 'EEK'] +['ĠUn', 'lock'] +['ĠT', 'EMP'] +['S', 'ou'] +['Ġschizophren', 'ia'] +['Ġt', 'roll'] +['Ġcomplement', 'ary'] +['ĠNET', 'WORK'] +['Ġbl', 'ir'] +['Ġprogress', 'Dialog'] +['"', '%('] +['ĠAttribute', 'Set'] +['ĉ', 'ts'] +['.iter', 'items'] +['è¯', 'Ŀ'] +['Ġesc', 'rit'] +['v', 'ous'] +['_pl', 'aces'] +['H', 'K'] +['Ġseg', 'uir'] +['_f', 'w'] +['ĠR', 'ounded'] +['Ġdis', 'posit'] +['è§', 'Ĩ'] +['par', 'm'] +['w', 'ow'] +['STRU', 'CTION'] +['.', 'allow'] +['ĠChar', 'Sequence'] +['ĉ', 'extern'] +['Ġprosec', 'uted'] +['Ġmort', 'ar'] +['ĠJ', 'uda'] +['-', 'msg'] +['Ġest', 'ud'] +['.get', 'Description'] +['Ġs', 'ow'] +['amb', 're'] +['Ġrom', 'a'] +['En', 'h'] +['bon', 'us'] +['Ġsqu', 'at'] +['Ġdist', 'ra'] +['ed', 'Image'] +['Ġpe', 'ppers'] +['-per', 'formance'] +[',', 'ĊĊĊ'] +[',', 'file'] +['ĠM', 'IME'] +['_con', 'cat'] +['AB', 'S'] +['-f', 'ashion'] +['Ġunder', 'cover'] +['One', 'ToMany'] +['Ġre', 'claim'] +['C', 'OPY'] +['Ġb', 'inds'] +['ĠT', 'ape'] +['Ġg', 'ossip'] +['ĠEqu', 'ity'] +['/', 'Card'] +['.', 'activ'] +["'", 'am'] +['Ġdrain', 'age'] +['<', 'Scalars'] +['ĠonBind', 'ViewHolder'] +['()', '?.'] +['Ġs', 'orrow'] +['ĠI', 'b'] +['up', 'y'] +['_U', 'UID'] +['ĠCh', 'arm'] +['ĠElection', 's'] +['.on', 'Destroy'] +['ĠInterest', 'ingly'] +['ounding', 'Box'] +['_d', 'etection'] +['-h', 'eld'] +['_', 'unknown'] +['Ġrefr', 'ain'] +['Ġmét', 'odo'] +['Ġe', 'Book'] +['EN', 'OMEM'] +['Ġd', 'ang'] +['Prof', 'essional'] +['Ġd', 'ictionaries'] +['/m', 'ysql'] +['ĠST', 'UD'] +['Ġmas', 'se'] +['s', 'cape'] +['Ġdre', 'i'] +[':', 'name'] +['.log', 'o'] +['Sign', 'Up'] +['Ġt', 'ahun'] +['(', 'theme'] +['ĠFem', 'me'] +['Ġbom', 'ber'] +['ĠJ', 'ade'] +['ĠT', 'ay'] +['Ġsubmar', 'ine'] +['_cl', 'ause'] +['zy', 'ch'] +['Ġsimult', 'aneous'] +['Ġcas', 'os'] +['.', 'boolean'] +['(l', 'hs'] +['Ġcontin', 'ental'] +['-s', 'ale'] +['ĉ', 'env'] +['ĠC', 'ute'] +['ĠFactory', 'Girl'] +['ab', 'us'] +['/', 'value'] +['Ġj', 'adx'] +['Ġst', 'ern'] +['>', '>ĊĊ'] +['Ġsurf', 'aced'] +['Ġìł', 'Ģìŀ¥'] +['pl', 'atz'] +['ĉ', 'email'] +['cept', 'ors'] +['">', '('] +['Ġep', 'ile'] +['è¯', '»'] +['ĠDe', 'bt'] +['åij', 'Ĭ'] +['N', 'OP'] +['"', 'https'] +[':', 'j'] +['Form', 'Item'] +['_L', 'ICENSE'] +['.get', 'Double'] +['ĠAg', 'enda'] +['ĉf', 'inally'] +['(f', 'ilters'] +['(', 'av'] +['ç¾', 'İ'] +['AP', 'ER'] +['Ġl', 'ava'] +['еÑĢ', 'ж'] +['))', '))ĊĊ'] +['Ġfault', 'y'] +['_n', 'm'] +['Ġtr', 'ava'] +['(B', 'itmap'] +['Ġspeed', 'ing'] +['>', "')."] +['Ġscreen', 'ed'] +['_', 'roll'] +['ĠMac', 'Book'] +['ĠA', 'UD'] +['Ġdiagn', 'ose'] +['.G', 'enerate'] +['Ġ^', '^'] +['Ġstr', 's'] +['[', 'Test'] +['Ġr', 'ansom'] +['ĠDH', 'CP'] +['eld', 'en'] +['Ġinterpret', 'ations'] +['()', '].'] +['flat', 'Map'] +['Ġline', 'Height'] +['_m', 'ount'] +['ĠW', 'izards'] +['Ġsl', 'uts'] +['eh', 'ler'] +['od', 'al'] +['Ġmilit', 'ia'] +['å', '²'] +['earn', 'ed'] +['Ġmis', 'ery'] +['int', 'val'] +['f', 'und'] +['Ġh', 'ides'] +['Ġdi', 'arr'] +['ĠWes', 'ley'] +['Ġx', 'mm'] +['Ġqu', 'em'] +['ĠAr', 'abs'] +['if', 'th'] +['ategor', 'ized'] +['Dis', 'posable'] +['P', 'ure'] +['_NOT', 'IFY'] +['sn', 'ippet'] +['ĠGar', 'rett'] +['.run', 'ning'] +['.', 'weights'] +['Ġ(', '--'] +['Ġin', 'variant'] +['äºĭ', 'ä»¶'] +['ĠAll', 'owed'] +['dir', 's'] +['Ġpass', 'ions'] +['Ġl', 'ad'] +['ĠFl', 'ush'] +['men', 'us'] +[':', 'block'] +['Ġcompr', 'a'] +['.ch', 'omp'] +['alloc', 'ator'] +['Ġcur', 'ated'] +['ĠKnow', 'ing'] +['ĠPatt', 'erson'] +['Ġtel', 'ah'] +["'", 'ex'] +['Ġdo', 'omed'] +['Ġphil', 'anth'] +['ott', 'y'] +['.st', 'yles'] +['Own', 'ed'] +['Ġallerg', 'ies'] +['=', 'params'] +['oc', 'ese'] +['it', 'elist'] +['ĠS', 'ending'] +['b', 'ef'] +['orr', 'ar'] +['ĠN', 'ão'] +['ĠF', 'argo'] +['ĠL', 'ub'] +['ĠComb', 'ined'] +['_g', 'iven'] +['ĉĉĉĉĉ', 'ĠĠĠĠ'] +['Ġreconc', 'iliation'] +['Pattern', 's'] +['az', 'ard'] +['Ġbiom', 'ass'] +['ĠH', 'ouses'] +['resp', 'uesta'] +['cc', 'o'] +['/top', 'ics'] +['ĠY', 'uk'] +['Ġweaken', 'ed'] +['_c', 'alendar'] +['Ġmulher', 'es'] +['ĠMar', 'l'] +['Ġs', 'ine'] +['ĠT', 'il'] +['ĠSou', 'ls'] +['ĠDe', 'utsche'] +['ĠF', 'OLLOW'] +['Ġpip', 'elines'] +['ĠBever', 'ly'] +['_DIP', 'SETTING'] +['"', '#'] +['ĠPro', 'to'] +['.b', 'ig'] +['ĠSav', 'ings'] +['ĠT', 'anz'] +['j', 'un'] +['ĠG', 'amma'] +['ĠS', 'add'] +['Ġadvis', 'ors'] +['Ġro', 'ast'] +['Ġun', 'ters'] +['ud', 'ies'] +['_l', 'on'] +['-point', 'er'] +['ĠElement', 'Ref'] +['\\', 'Builder'] +['example', 'Input'] +['.web', 'driver'] +['data', 'Type'] +['ĠQu', 'ite'] +['ĠCelt', 'ics'] +['u', 'il'] +['-def', 'ense'] +['b', 'ish'] +['ĠUI', 'Window'] +['ĠS', 'uddenly'] +['.h', 'ot'] +['.re', 'ason'] +['Ġg', 'ör'] +['AM', 'D'] +['.M', 'ulti'] +['auth', 'enticated'] +['reg', 'ions'] +[';', '('] +['а', 'ÑĢам'] +['ĠKir', 'by'] +['$', 'route'] +['PREC', 'ATED'] +['ĠDur', 'ham'] +['ow', 'o'] +['ĠPer', 'forms'] +['Ġdisreg', 'ard'] +['n', 'st'] +['ĠP', 'ols'] +['Ġget', 'P'] +['"]', ':'] +['-col', 'ored'] +['(', 'Keys'] +['ĠAl', 'leg'] +['_mod', 'ify'] +['_', 'loading'] +['str', 'ained'] +['Ġat', 'roc'] +['_p', 'hr'] +['<', 'Sprite'] +['Ġsatisf', 'actory'] +['m', 'anship'] +['.p', 'ipeline'] +['T', 'ony'] +['Ġth', 'ief'] +['pol', 'ator'] +['(', 'lock'] +['bur', 'st'] +['ĠOptim', 'ization'] +['Ġsurf', 'ing'] +['"', 'Yes'] +['Ġdesc', 'ended'] +['æ', 'Ĵ'] +['_C', 'lear'] +['Ġc', 'ries'] +['ĠFro', 'zen'] +['D', 'IRECT'] +['-', 'Con'] +['ĠLe', 'icester'] +['å¥', '³'] +['O', 'OM'] +['=', 'db'] +['Ġget', 'Message'] +['<', 'Student'] +['_b', 'atches'] +['.M', 'ask'] +['_', 'eth'] +['\\', ')'] +['Ġsom', 'a'] +['C', 'atch'] +['[', 'ch'] +['Own', 'ers'] +['ind', 'le'] +[':', 'auto'] +['.', 'vert'] +['iv', 'r'] +['.set', 'Location'] +['Ġfl', 'uent'] +['_END', 'IAN'] +['ĠCar', 'lo'] +['cept', 's'] +['add', 'Action'] +['.o', 'auth'] +['<', 'UnityEngine'] +['re', 'ements'] +['.S', 'kip'] +['?', ')ĊĊ'] +['.default', 'Props'] +['Ġc', 'abe'] +['ĠSh', 'en'] +['eros', 'is'] +['ĠPro', 'fit'] +['Ġpo', 'is'] +['_C', 'REATED'] +['Ġremove', 'From'] +['(w', 's'] +['?', 'action'] +['(', 'Field'] +['Ġerr', 'one'] +['.min', 'imum'] +['ĠRetrie', 'ved'] +['Ġd', 'ado'] +['ĠPR', 'IVATE'] +['-s', 'pec'] +['Ġg', 'zip'] +['p', 'data'] +['Ġpos', 'Y'] +['(l', 'ow'] +['Ġqual', 'quer'] +['/', 'cloud'] +['ê²', 'Į'] +['(', 'common'] +['ĠAr', 'beit'] +['organ', 'isation'] +['Ġtid', 'y'] +['ĠRol', 'and'] +['(', 'ph'] +['.z', 'one'] +['Ġgent', 'lemen'] +['ượ', 'c'] +['å±', '±'] +['Ġenc', 'losure'] +['ĠMan', 'afort'] +['ĉ', 'Color'] +['St', 'encil'] +['N', 'ic'] +['Ġthe', 'orem'] +['ĠV', 'G'] +['Ġcol', 'oured'] +['V', 'BoxLayout'] +['uls', 'ive'] +['Drag', 'on'] +['c', 'ff'] +['et', 'est'] +['ens', 'a'] +['of', 'day'] +['.A', 'zure'] +[':UIControlEvent', 'TouchUpInside'] +['_up', 'dates'] +['Ġtrend', 'y'] +['ug', 'as'] +['weak', 'Self'] +['Ġr', 'idge'] +['ib', 'ri'] +['Ġì¶', 'Ķ'] +['(C', 'G'] +['ĠMon', 'key'] +['.write', 'Int'] +['.tim', 'edelta'] +['ViewController', 'Animated'] +['ĠProvid', 'ence'] +['ãģ', 'Ī'] +['Ġbl', 'ends'] +['/Sub', 'threshold'] +['ĠAp', 'pl'] +['Ġat', 'an'] +['Ġreload', 'Data'] +['umb', 'otron'] +['st', 'üt'] +['O', 'Auth'] +['ĠG', 'iving'] +['ĠìĦ', '¤'] +['ĠFinn', 'ish'] +['check', 'ing'] +['.', 'Embed'] +['sequ', 'elize'] +['Ġinitial', 'izes'] +['ĠOs', 'lo'] +['Ø', '¶'] +['get', 'Extension'] +['_AL', 'T'] +['(bl', 'ank'] +['Ġfatal', 'Error'] +['Ġdem', 'ise'] +['****', '*Ċ'] +['ĠX', 'S'] +['(A', 'F'] +['ĠEn', 's'] +['an', 'tha'] +['ĠP', 'OR'] +['Ġn', 'ich'] +['.N', 'amed'] +['Ġgig', 'antic'] +['ĠObserv', 'atory'] +['.Res', 'olve'] +['ĠPay', 'ments'] +['g', 'uild'] +['Ġcurrent', 'State'] +['============', '===Ċ'] +['ĠS', 'ey'] +['p', 'Data'] +['Ġdead', 'lines'] +['Ġcentral', 'ized'] +['ĠScholar', 'ship'] +['_s', 'upported'] +['.ch', 'rome'] +['()', ']);Ċ'] +['Ġc', 'yan'] +['ĠC', 'age'] +['Auth', 'ors'] +['_', 'čĊ'] +['/', 'os'] +['k', 'im'] +['de', 'e'] +['.t', 'ex'] +['Ġyours', 'elves'] +['Ġm', 'gr'] +['Ġal', 'k'] +['-inst', 'all'] +['Ġdraft', 'ing'] +['Ġrum', 'or'] +['Ġstat', 'ues'] +['Pool', 'ing'] +['ol', 'ina'] +['AAAA', 'AAAA'] +['/*', '----------------------------------------------------------------------------'] +['Ġextrem', 'ists'] +['Cal', 'cul'] +['ighth', 'ouse'] +['In', 'set'] +['(IN', 'PUT'] +['Ġsynchron', 'ization'] +['iv', 'irus'] +['.', 'axes'] +['ĠG', 'ap'] +['-', 'An'] +['_T', 'emplate'] +['Ġgam', 'er'] +['ĠCr', 'icket'] +['Ġl', 'int'] +['Ġauthor', 'itarian'] +['NS', 'UInteger'] +['Ġred', 'o'] +['Ġadip', 'iscing'] +['_F', 'ETCH'] +['che', 'id'] +['ĠF', 'ang'] +['.', 'indices'] +['t', 'one'] +['д', 'ел'] +['Ġ{{--', '<'] +['bra', 'him'] +['Ġsal', 'a'] +['get', 'Code'] +['Ġcommunic', 'ated'] +['start', 'sWith'] +['ert', 'z'] +['Read', 'able'] +['Item', 'Id'] +['oref', 'errer'] +['cred', 'ible'] +['á', 'ria'] +['Ġcombine', 'Reducers'] +['**', '/ĊĊ'] +['Ġbl', 'iss'] +['Ġad', 'orn'] +['dep', 'ends'] +['ĠRO', 'OM'] +['Ġfr', 'aming'] +['Ġ?', "',"] +['aut', 'y'] +['_p', 'ot'] +['_t', 'abs'] +['Ex', 'act'] +[',', '",'] +["Ġ'}", "';Ċ"] +['Ġarbit', 'r'] +['ahr', 'ain'] +['.getString', 'Extra'] +['Ġ$', '\\'] +['Ġoutput', 'Stream'] +['Ġcomm', 'enc'] +['an', 'us'] +['ch', 'y'] +['<', 'Employee'] +['Ġhex', 'atrigesimal'] +['Ġn', 'acional'] +['(serial', 'izers'] +['_put', 'char'] +['_S', 'AFE'] +['ential', 'Action'] +['ItemSelected', 'Listener'] +['.Dis', 'patch'] +['Conf', 'lict'] +['_', 'about'] +['os', 'aur'] +['Bound', 'ary'] +['Ġclear', 'Color'] +['(', 'Location'] +['ĠMON', 'TH'] +['ĠT', 'aste'] +['-', 'General'] +['ĠW', 'AR'] +['Ġer', 'halten'] +['-s', 'aving'] +['Ġcou', 'pling'] +['-tr', 'igger'] +['m', 'otor'] +['Ġy', 'yyy'] +['ĠPat', 'ent'] +['pt', 'o'] +['Ġmisdemean', 'or'] +['vas', 'ion'] +['ĠAdmir', 'al'] +['à¹ī', 'า'] +['_P', 'WR'] +['Ġdevast', 'ated'] +['fol', 'ios'] +['ITU', 'DE'] +['urre', 'ct'] +['Ġrobot', 'ic'] +['ĠSan', 'ct'] +['ĠHawai', 'ian'] +['.R', 'oute'] +['-', 'condition'] +['Ġr', 'k'] +['/****************************************************************************', 'Ċ'] +['create', 'Element'] +['ĠK', 'op'] +['ign', 'ant'] +['.', 'rollback'] +['Ġsal', 'ud'] +['_', "',"] +['ĠAN', 'SI'] +['Ex', 'cept'] +['ĠDraw', 'able'] +['.Utc', 'Now'] +['":[', '{Ċ'] +['Ġk', 'ole'] +['L', 'ua'] +['ĠBel', 'ieve'] +['Com', 'put'] +['Ġhall', 'uc'] +['ĠSign', 's'] +['r', 'st'] +['.h', 'u'] +['ĠKN', 'OW'] +['W', 'i'] +['ĠBr', 'ass'] +['ĠR', 'as'] +['@', 'hotmail'] +['Ġsed', 'iment'] +['Ġap', 'k'] +['Ġì', 'ĥģ'] +['_reg', 'ions'] +['Ġpod', 'ium'] +['<', 'Book'] +['ж', 'е'] +['Ġsix', 'teen'] +['ĠAli', 'as'] +['Ġinfr', 'ared'] +['ĠV', 'ander'] +['ĠLe', 'ading'] +['uc', 'ing'] +[',:', ',:'] +['_h', 'or'] +['w', 'at'] +['Ġdé', 'cou'] +['_W', 'idget'] +['S', 'ounds'] +['_n', 'avigation'] +['Ġschn', 'ell'] +['(g', 'enerator'] +['uc', 'ene'] +['Ġrem', 'ake'] +['IP', 'v'] +['Ġré', 'al'] +['_IN', 'CREMENT'] +['Ġhypoth', 'etical'] +['_', 'ang'] +['Ġof', 's'] +['Ġ!', 'Ċ'] +['.com', 'pleted'] +['Get', 'Type'] +['Ġkom', 'men'] +['ál', 'ido'] +['add', 'On'] +['Ġz', 'ÅĤ'] +['UL', 'A'] +['_ind', 'icator'] +["']", 'ĊĊĊ'] +['ap', 'ache'] +['_S', 'elect'] +['ĠGre', 'ene'] +['Wh', 'ats'] +['_an', 'im'] +['Ġrepet', 'itive'] +['m', 'uch'] +['ĠTh', 'reshold'] +['Ġl', 'f'] +['(C', 'ategory'] +['con', 'e'] +['M', 'ix'] +['_MET', 'ADATA'] +['ays', 'ia'] +['Ne', 'ighbors'] +['ĉĊ', 'ĉĉĊ'] +['IP', 'HER'] +['ĠFr', 'ag'] +['ĠC', 'ells'] +['Ġnames', 'paces'] +['(', 'back'] +['ĠRest', 'aurants'] +['sv', 'c'] +['Ġл', 'и'] +['ote', 'ch'] +['-s', 'l'] +['¥', '¿'] +['ĠW', 'T'] +['ĠRed', 'uction'] +['Ġd', 'otted'] +['ĉf', 'ound'] +['ĠTE', 'AM'] +['B', 'orn'] +['ĠM', 'ush'] +['ĠCompar', 'able'] +['Ġh', 'itch'] +['AT', 'O'] +['Ġmax', 'Height'] +['begin', 'Transaction'] +['ÃŃ', 'v'] +['_b', 'n'] +['Ġher', 'd'] +['Ġrevers', 'al'] +['ĠH', 'ond'] +['del', 'imiter'] +['Ġconf', 'use'] +['Ġh', 'ops'] +['Ġcent', 'roid'] +['Ġcourt', 'room'] +['.decor', 'ators'] +['Ġm', 'pi'] +['ĠImpro', 'ved'] +['IN', 'NER'] +['ĠBang', 'alore'] +['ĠT', 'amb'] +['Ġbo', 'ast'] +['()', '))čĊ'] +['Ġil', 'licit'] +['ĠMor', 'occo'] +['greg', 'ator'] +['_res', 'ume'] +['Ġcrack', 'down'] +['Ġport', 'raits'] +['/h', 'igh'] +['(', "\\'"] +['Ġay', 'ud'] +['_fe', 'edback'] +['Ġc', 'ate'] +['/', 'avatar'] +['Ġhe', 'b'] +['Point', 'Cloud'] +['Ġå', 'ĴĮ'] +['Ġ<', '!['] +['Ġget', 'Resources'] +['}', ':{'] +['Oper', 'ating'] +['ĠF', 'og'] +['ĉt', 'ab'] +['ĠResearch', 'ers'] +['Ġfabric', 'ation'] +['.datas', 'ets'] +['ĠCamp', 'o'] +['ĠKa', 'uf'] +['Ġd', 'll'] +['lig', 't'] +[']', '));ĊĊ'] +['st', 'ellen'] +['ACK', 'ET'] +['l', 'vl'] +['ĠGl', 'ory'] +['.date', 'Time'] +['Ġcomm', 'ute'] +['ĠonCreate', 'ViewHolder'] +['ĠX', 'Element'] +['ĠT', 'okens'] +['<', 'thead'] +['_p', 'ick'] +['ì', '¤'] +['v', 'on'] +['depart', 'ure'] +['(render', 'er'] +['phone', 'Number'] +['(P', 'erson'] +['gen', 'es'] +['ĠL', 'ars'] +['Ġ)', '{ĊĊ'] +['ĠJson', 'Result'] +['Ġmet', 'odo'] +['VO', 'KE'] +['.get', 'UserId'] +['Acc', 'eler'] +['ĉ', 'required'] +['Ġchampionship', 's'] +['Build', 'Context'] +['/t', 'ask'] +['/re', 'leases'] +['C', 'ategoria'] +['_over', 'lay'] +['Ġscar', 'ce'] +['_l', 'im'] +['n', 'gr'] +['ah', 'len'] +['ĠArt', 'ificial'] +['sp', 'read'] +['Ġbow', 'ling'] +['.an', 'alysis'] +['SM', 'TP'] +['ĉp', 'assword'] +['Ġbath', 's'] +[']', ')){Ċ'] +['current', 'ly'] +['ac', 'iente'] +['_se', 'parator'] +['Ġde', 'ber'] +['ĠDis', 'abled'] +['i', 'ères'] +['Ġâ', 'ķ'] +['_process', 'ing'] +['Ġprotest', 'ing'] +['ĠR', 'OT'] +['gr', 'ab'] +['Ġз', 'ак'] +['Ġpro', 'active'] +['word', 'press'] +['ĠSe', 'ver'] +['ind', 'en'] +['Ġw', 'ikipedia'] +['){', 'čĊčĊ'] +['_w', 'indows'] +['is', 'lation'] +['Ġun', 'rest'] +['Ġdismiss', 'al'] +['.N', 'UM'] +['_F', 'AST'] +['iss', 'ued'] +['ĠF', 'ACE'] +['_u', 'nder'] +['Ġpl', 'ugged'] +['Ġå', '°'] +['ĠbÄĻd', 'zie'] +['ĠI', 'CC'] +['Ġcombust', 'ion'] +['Ġkiss', 'ed'] +['Ġstar', 'red'] +['ĠW', 'atts'] +['Ġspi', 'elen'] +['-p', 'urpose'] +['ĠE', 'val'] +['arg', 'es'] +[',', 'result'] +['techn', 'ology'] +['Ġnational', 'ity'] +['ic', 'us'] +['ĠN', 'ug'] +['ĠÑĤ', 'о'] +['ĉĉĉĉĉĉĉ', 'ĠĠ'] +['col', 'o'] +['Ġg', 'astro'] +['ante', 'ed'] +['OL', 'ID'] +['.b', 'ias'] +['_t', 'ele'] +['.ins', 'pect'] +['Ġve', 'il'] +['.', 'footer'] +['Ġneglig', 'ence'] +['Ġjud', 'gments'] +['Room', 's'] +['yn', 'n'] +['ĉcount', 'er'] +['occup', 'ation'] +['Ġ', 'çĶŁ'] +['un', 'as'] +['Ġ(^', ')('] +['L', 'ambda'] +['f', 'el'] +['.Param', 's'] +['Ġд', 'обав'] +['set', 'Layout'] +['Ġdeport', 'ation'] +['Ġlocal', 'Object'] +['ĠPharm', 'aceutical'] +['cept', 'ive'] +['ĠN', 'ome'] +['Equ', 'ipment'] +['F', 'an'] +['Un', 'iversal'] +['ĉ', 'socket'] +['Ġgr', 'in'] +['Ġex', 'poses'] +['Ġhab', 'er'] +['Ġsincer', 'ely'] +['Ġc', 'ams'] +['Ġm', 'ü'] +['en', 'ia'] +['E', 'mer'] +['C', 'rypto'] +['Sl', 'ow'] +['(x', 'hr'] +['!', '=('] +['-s', 'ervices'] +['ĠP', 'W'] +['Ġprend', 're'] +['Ġm', 'ädchen'] +['em', 'ons'] +['озв', 'ÑĢаÑī'] +['.M', 'anager'] +['ì', 'Ļ'] +['Ġg', 'raf'] +['-', 'ra'] +['met', 'rical'] +['/', 'fl'] +['Ġc', 'emetery'] +['g', 'ens'] +['Ġp', 'ÅĻ'] +['ĠMySql', 'Command'] +['-', 'To'] +['Ġv', 'Ã¥'] +['Ġa', 'irst'] +['oment', 'um'] +['Ġserv', 'o'] +['m', 'illion'] +['ĠMir', 'anda'] +['"', 'She'] +['Ġadvoc', 'ating'] +['-c', 'aption'] +['ĠAt', 'tribution'] +['Ġwel', 'che'] +['_v', 'endor'] +['ĉ', 'Status'] +['arr', 'is'] +['Ġprint', 'k'] +['","', '#'] +['Ġrel', 'ativ'] +['if', 'ferences'] +['izz', 'es'] +['Ġdec', 'imals'] +['ĠPro', 'v'] +['.max', 'imum'] +['Ar', 'n'] +['Ġhelicopt', 'ers'] +['_B', 'OTTOM'] +['ch', 'ure'] +['od', 'ings'] +["'", '('] +['"))', ');čĊ'] +['(', 'bean'] +['.f', 'd'] +['F', 'und'] +['Ġhang', 's'] +['app', 'id'] +['/k', 'ernel'] +['.p', 'oi'] +['.Min', 'Value'] +['-', 'validation'] +['L', 'uke'] +['c', 'df'] +['ĠFun', 'eral'] +['ĠS', 'amples'] +['ĉ', 'de'] +['Ġto', 'astr'] +['Ġtax', 'able'] +['Ġcl', 'ustering'] +["Ġ'\\", "'"] +['Ġre', 'straint'] +['ec', 'ed'] +['ch', 'ains'] +['ãĢĤ', 'ï¼Ī'] +['_GR', 'APH'] +['Ġfue', 'led'] +['éľ', 'Ģ'] +['H', 'p'] +['å¤', 'į'] +['T', 'iles'] +['Ġa', 'unque'] +['J', 'C'] +['Ġhost', 'age'] +['ĠE', 'sk'] +['Ġm', 'av'] +['Ġgest', 'ion'] +['Ġb', 'anners'] +['}', '{$'] +['.int', 'Value'] +[".'", '"ĊĊ'] +['_M', 'ATRIX'] +['Ġce', 'ased'] +['ĠG', 'OD'] +['_CAM', 'ERA'] +['.Allow', 'User'] +['tr', 'acked'] +['C', 'ook'] +['b', 'airro'] +['(', 'company'] +['Ġview', 'point'] +['.get', 'Writer'] +['ĠN', 'ets'] +['w', 'ives'] +['Ġ(', '))Ċ'] +['example', 'Modal'] +['ĉ', 'child'] +['Ġmyth', 'ology'] +['Ġ//', '"'] +['_', 'axes'] +['ib', 'old'] +['.D', 'ark'] +['ĠMax', 'well'] +['Ġg', 'pointer'] +['olic', 'itud'] +['B', 'at'] +['ul', 'ner'] +['bal', 'anced'] +['mail', 'er'] +['Ġcont', 'empor'] +['æīĭ', 'æľº'] +['("', '__'] +['Ġ"', ')"'] +['re', 'ar'] +['ĠHu', 'ang'] +[']', "')Ċ"] +['×', '©'] +['FT', 'A'] +['ĠCalling', 'Convention'] +['ĠOutput', 's'] +['P', 'k'] +['.Re', 'ference'] +['lect', 'ual'] +['Ġ)', ':ĊĊ'] +['Ġbrace', 'let'] +['ug', 'er'] +['ĉ', 'Error'] +['S', 'weet'] +['("/', '");Ċ'] +['h', 'x'] +['Ġun', 'reasonable'] +['Inter', 'preter'] +['Ġlo', 'ft'] +['_product', 'o'] +['Ġsoci', 'etal'] +['.P', 'arser'] +['ĠAd', 'apt'] +['.', 'foo'] +['(', 'where'] +['.F', 'eature'] +['ĠYam', 'aha'] +['g', 'lass'] +['For', 'ge'] +['Ġprohib', 'its'] +['Ġcapac', 'ities'] +['Ġíķ¨', 'ìĪĺ'] +['Ġper', 'mutation'] +['Ġih', 'm'] +['F', 'ld'] +['el', 'ial'] +['========', '===Ċ'] +['@', 'Configuration'] +['Ġge', 'ared'] +['ios', 'o'] +['iest', 'a'] +['trans', 'lations'] +['Input', 'Change'] +['Pop', 'ular'] +['ĠPL', 'US'] +['Ġv', 'f'] +['_F', 'ree'] +['b', 'box'] +['Ġcaus', 'al'] +['PI', 'LE'] +['Ġsch', 'ö'] +['Ġiron', 'ic'] +['M', 'ir'] +['.', '@'] +['åį', 'Ĺ'] +['Ġè', 'ĩ'] +['R', 'ew'] +['ul', 'ence'] +['fl', 'en'] +['Ġcan', 'Activate'] +['-', 'response'] +['Ġacc', 'ents'] +['ign', 'ored'] +['°', 'F'] +['.Dependency', 'Injection'] +['ĉ', 'point'] +['Ġconting', 'ent'] +['Ġsqu', 'ash'] +['Ġpar', 'ms'] +['ĠC', 'emetery'] +['Ġdelta', 'Time'] +['ĠD', 'OS'] +['Ġvan', 'ished'] +['аÑĢам', 'еÑĤ'] +['ĠD', 'PS'] +['t', 'foot'] +['ĠZ', 'us'] +['_IN', 'STALL'] +['G', 'AN'] +['Ġar', 'b'] +['Ġmunicipal', 'ities'] +['Into', 'Constraints'] +['AutoresizingMask', 'IntoConstraints'] +[',', 'image'] +['_', 'ignore'] +['Ġdanger', 'ously'] +['quis', 'a'] +['pl', 'uck'] +['Ġhar', 'us'] +['up', 'pe'] +['Http', 'Exception'] +['Br', 'acket'] +[".'", "'ĊĊ"] +['ĠT', 'ol'] +['ĠView', 'er'] +['zb', 'ollah'] +['.Code', 'Analysis'] +['ì', 'nh'] +['Ġcorrect', 'amente'] +['.d', 'a'] +['ĠAl', 'ger'] +['×', 'IJ'] +['ba', 'um'] +['ĠPan', 'ther'] +['part', 'icipant'] +['å¿', 'ħ'] +['-s', 'up'] +['Ġem', 'ulator'] +['Ġf', 'ading'] +['ĠW', 'olver'] +['cre', 'ates'] +['Ġbook', 'ings'] +['.Q', 'uestion'] +['§', 'è¡Į'] +['Ġstress', 'es'] +['Ġre', 'written'] +['.PI', 'PE'] +['ed', 'es'] +['Ġc', 'bd'] +['":', '"/'] +['Ġenh', 'ancements'] +['_s', 'y'] +['B', 'IN'] +['ĠSl', 'ip'] +['Ins', 'pect'] +['ĠW', 'eg'] +['Ġcon', 'gregation'] +['Ġ_', ':'] +['_r', 'm'] +['Frame', 'buffer'] +["Ġ'&", '#'] +['ĠFall', 'out'] +['Is', 'Required'] +['ĠPear', 'son'] +['ĠF', 'ACT'] +['Ġrel', 'ie'] +['ĉ', 'box'] +['ĠShe', 'pherd'] +['ĠWiki', 'Leaks'] +['ĠCollect', 'or'] +['Ġres', 'ized'] +['method', 'Name'] +['Ġevent', 'Type'] +['ĠA', 'then'] +['Des', 'criptors'] +['Ġb', 'ers'] +['-', 'oper'] +['ĠInitial', 'ly'] +['å', '¡'] +['_B', 'TN'] +['ĠĠĠĠĠĠĠĠĠ', 'čĊ'] +['á', 'b'] +['_c', 'ampaign'] +['_w', 'atch'] +['F', 'ord'] +['-date', 'picker'] +['Ġvis', 'c'] +['Ġsat', 'u'] +['_s', 'ms'] +['Ġcont', 'ador'] +['-s', 'vg'] +['ĠDO', 'I'] +['$', 'args'] +['Ġkn', 'ob'] +['.B', 'OLD'] +['Ġdeb', 'ated'] +['img', 's'] +['sock', 'opt'] +['tr', 'uth'] +['ĠFe', 'es'] +['Ġh', 'Wnd'] +['_f', 'ood'] +['Ġab', 'ras'] +['Ġnot', 'ions'] +['ĠT', 'od'] +[':', 'create'] +['ĠConf', 'lict'] +['Us', 'uarios'] +['OT', 'OS'] +['Ġm', 'sm'] +['K', 'HTML'] +['([', '('] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['Ġ}', ']'] +['w', 'izard'] +['Ġm', 'ientras'] +['Ġdata', 'List'] +['Ġemerg', 'es'] +['Äĥ', 'ng'] +['.Read', 'Int'] +['PG', 'A'] +['ILL', 'ISE'] +['I', 'Enumerator'] +['(t', 'uple'] +['Christ', 'mas'] +['Look', 'AndFeel'] +['og', 'enerated'] +['Ġ#', 'ĊĊ'] +['control', 'led'] +['Ġex', 'quisite'] +['Ġa', 'cest'] +['Read', 'Write'] +['G', 'ain'] +['ãĢį', 'ãĢĮ'] +['Ġcopyright', 'ed'] +['Ġdo', 'om'] +['.Table', 'LayoutPanel'] +['ĠD', 'ort'] +['Ġch', 'ili'] +['Ġwer', 'k'] +['ĠEVENT', 'S'] +['ĠBe', 'acon'] +['Ġship', 'ments'] +['Ġse', 'bagai'] +['up', 'on'] +['ut', 'om'] +['.con', 'verter'] +['.Drop', 'Table'] +['={', '}Ċ'] +['f', 'ic'] +['~', 'ĊĊ'] +['Ġlesb', 'ians'] +['_n', 'a'] +['Fore', 'ign'] +['ĉ', 'then'] +['/', 'ms'] +['Ġor', 'i'] +['get', 'Property'] +['ĉsn', 'printf'] +['hes', 'ion'] +['ãģ', '¤'] +['"}', ',"'] +['Ġac', 'rylic'] +['P', 'ers'] +['@', 'Enable'] +['I', 'sl'] +['(C', 'ard'] +['.', 'Stack'] +['L', 'icensed'] +['_G', 'UID'] +[':', 'title'] +['Ġh', 'ust'] +['Ġprincipal', 'Table'] +['an', 'itize'] +['/', 'embed'] +['Ġens', 'ured'] +['ĠE', 'GL'] +['ÙĪ', 'ر'] +['ĠåĪ', 'Ĩ'] +['/', ',Ċ'] +['Ġfundra', 'iser'] +['Key', 'Name'] +['Ġmarch', 'ed'] +['_VAL', 'UES'] +['ĠSc', 'enario'] +['Ġmet', 'ic'] +['_ass', 'oci'] +['ĠPast', 'or'] +['ĉĉĉĉĉĉĉĉ', 'ĉĉĉĉĉĉĉĉĉĉ'] +['er', 'ate'] +['Ġinv', 'itations'] +['quo', 'ise'] +['Ġbl', 'aming'] +['Ġd', 'aring'] +['UM', 'MY'] +['Ġrich', 'er'] +['em', 'aker'] +['ĠIdent', 'ification'] +['ĠìĿ', '¸'] +['ĠBinding', 'Flags'] +['ch', 'as'] +['Ġresil', 'ient'] +['_p', 'g'] +['Ġre', 'leg'] +['ĠI', 'RA'] +['ST', 'E'] +['Ġtr', 'actor'] +['-', 'loading'] +['ĠPre', 'viously'] +['ĠV', 'acc'] +['/', 'be'] +['Ġn', 'Ã¥r'] +['Ġurl', 'encode'] +['ĠNor', 'folk'] +['.Re', 'lease'] +['ĠNe', 'utral'] +['ä¸Ń', 'åĽ½'] +['ĠAr', 'lington'] +['Ġalleg', 'es'] +['ĠW', 'riters'] +['Test', 'er'] +['ĠR', 'ally'] +['Ġc', 'á'] +['ĉ', 'Print'] +['Ġâĩ', 'Ĵ'] +['ĠUser', 'Controller'] +['ĠSeek', 'ing'] +['.V', 'AL'] +['List', 'Node'] +['_', 'ff'] +['ĠPhill', 'ip'] +['FA', 'CT'] +['Ġc', 'aramel'] +['ĠM', 'ultip'] +['ĠCom', 'pared'] +['ĠSer', 'bia'] +['Ł', '³'] +['Ġrev', 'ive'] +['ĠK', 'anye'] +['Ġver', 'ge'] +['ĠBulg', 'aria'] +['get', 'Body'] +['Ġ|', '>'] +['ce', 'ph'] +['.DateTime', 'Picker'] +['."', ';ĊĊ'] +['ĠT', 'ie'] +[',', 'item'] +['Ġm', 'enn'] +['G', 'as'] +['och', 'a'] +['_v', 'irtual'] +['Ġmaster', 'piece'] +['_se', 'quences'] +['L', 'TE'] +['ĠSub', 'mission'] +['Call', 'er'] +['$', '\\'] +['S', 'port'] +['ag', 'us'] +['Constraint', 'Maker'] +['Ġcol', 'oc'] +['Ġw', 'ig'] +['ĠÐ', '£'] +['ĉ', 'Array'] +['Look', 's'] +['ĠGT', 'A'] +['.st', 'eps'] +['atch', 'ewan'] +['_r', 'anges'] +['ext', 'Alignment'] +['ĠBren', 'nan'] +['Ġab', 'straction'] +['uler', 'Angles'] +['.m', 'isc'] +['Ġantib', 'odies'] +['Ġexponent', 'ial'] +['ĠCH', 'ANNEL'] +['exp', 'ense'] +["'", 'y'] +['Ġdetect', 'ives'] +['Ġpur', 'ported'] +['Y', 'STEM'] +['Ġradio', 'active'] +['ĠLat', 'ina'] +['.Enc', 'oding'] +['.T', 'AG'] +['x', 'in'] +['D', 'egree'] +['ur', 'acion'] +['pr', 'ices'] +['ĠRefer', 'entialAction'] +['Ġr', 'arity'] +['Ġp', 'iles'] +['g', 'ende'] +['_project', 's'] +['_g', 'lobals'] +['.start', 'Time'] +['Ġê', 'µ¬'] +['SE', 'CTION'] +['_p', 'ublish'] +['F', 'ault'] +['DD', 'L'] +['_p', 'rior'] +['M', 'om'] +['Ġth', 'icker'] +['Ġsequ', 'elize'] +['Ġessential', 's'] +['str', 'as'] +['in', 'tr'] +['>(', '()'] +['.man', 'agement'] +['e', 'il'] +['éĹ', 'Ń'] +['A', 'ware'] +['.C', 'ity'] +['ĠAr', 'bit'] +['_D', 'M'] +['_key', 'board'] +['L', 'Object'] +['-', 'webpack'] +['ĠNew', 'port'] +['Ġprincipal', 'Column'] +['leg', 'ant'] +['Ġp', 'allet'] +['Ġfract', 'ure'] +['Ġg', 'mail'] +['.M', 'eta'] +['A', 'bove'] +['.Key', 'Event'] +['j', 'it'] +['_mac', 'ro'] +['_P', 'USH'] +['á»', '©'] +['/', 'controller'] +['åĬł', 'è½½'] +['Ġsuperf', 'icial'] +['exter', 'ity'] +['Ġmens', 'agem'] +['W', 'ind'] +['ist', 'on'] +['.open', 'api'] +['и', 'ÑĢов'] +['ĠSerial', 'izer'] +['uct', 'ive'] +['Ġz', 'ar'] +['Pl', 'aces'] +['.St', 'atic'] +['B', 'a'] +['Ġin', 'advert'] +['ĠIndones', 'ian'] +['_IP', 'V'] +['(h', 'orizontal'] +['Ġget', 'Title'] +['ide', 'press'] +['ĠConsole', 'Color'] +['ip', 'ers'] +['$', 'out'] +['Ġfest', 'ive'] +['Ġeven', 'ings'] +['.Get', 'Data'] +['uit', 'ka'] +['ĠManual', 's'] +['uss', 'ed'] +['_M', 'ax'] +['.Ch', 'at'] +['ĠA', 'ircraft'] +['=', 'com'] +['FO', 'UND'] +['ap', 'ro'] +['Ġtre', 'asures'] +['_al', 'ive'] +['Ġgad', 'get'] +['ek', 'ing'] +['Button', 'Down'] +['B', 'rowsable'] +['.PER', 'MISSION'] +['P', 'ASSWORD'] +['ĠH', 'ASH'] +['f', 'é'] +['\\', 'TestCase'] +['LO', 'SS'] +['o', 'thers'] +[',', 'J'] +['Ġassh', 'ole'] +['wer', 'k'] +['Ġm', 'ã'] +['.', 'ie'] +['ev', 'il'] +['kont', 'akte'] +['////////////////////////////////////////////////////////////////////////////////', 'Ċ'] +['=', 'sys'] +['ĉ', 'lock'] +['--', ';ĊĊ'] +['_F', 'UN'] +['Fill', 'Color'] +['ó', 'a'] +['pre', 'nd'] +['Ġcompress', 'or'] +['M', 'other'] +['ĠAr', 'cher'] +['.g', 'oto'] +['Ġwür', 'de'] +['Ġbam', 'boo'] +['ï¼', 'İ'] +['ĠT', 'rees'] +['Ġb', 'umper'] +['Ġsa', 'usage'] +['ĠEl', 'asticsearch'] +['Ġhor', 'izontally'] +['ĠG', 'ul'] +['Im', 'mutable'] +['Ġlos', 'er'] +['Ġabort', 'ed'] +['-d', 'emo'] +['ĠH', 'atch'] +['Ġund', 'e'] +['Ġprocess', 'o'] +['-c', 'all'] +['In', 'come'] +['å', 'ĥ'] +['_', 'returns'] +['\']."', "'"] +['(s', 'w'] +['C', 'BS'] +['am', 'ilies'] +['ĠYour', 'self'] +['ĠH', 'olt'] +['.M', 'ON'] +['à§', 'ĩ'] +['ÑĪ', 'е'] +['an', 'on'] +['ĠFont', 'Awesome'] +['produ', 'cer'] +['j', 'r'] +['Ġm', 'au'] +['ĉint', 'er'] +['Ġdish', 'onest'] +['Ġmagn', 'a'] +['ĠCollect', 'ive'] +['Ġvra', 'iment'] +['Ġcho', 'ix'] +['st', 'ay'] +['Ġweld', 'ing'] +['r', 'ising'] +[',', 'min'] +['ĠF', 'ate'] +['g', 'lob'] +['RGB', 'A'] +['Ġdet', 'te'] +['V', 'en'] +['Ġembarrass', 'ment'] +['.DE', 'LETE'] +['greg', 'ar'] +['-re', 'nder'] +['(b', 'ucket'] +['">', 'ĊĊĊ'] +['.wait', 'Key'] +['Bus', 'y'] +['Ġdifferent', 'iation'] +['ĠC', 'ST'] +['.Con', 'stant'] +['Ġline', 'Number'] +['(m', 'atches'] +['Ġweb', 'socket'] +['Ġbar', 'red'] +['Ġpued', 'es'] +['M', 'ono'] +['C', 'ORE'] +['I', 'ID'] +['ĠĠĠĠ', 'čĊčĊ'] +['Ġpúb', 'lico'] +['lean', 'ing'] +['Ġcleans', 'ing'] +['Ġcr', 'is'] +['ĠDev', 'ils'] +['_SET', 'TING'] +['unt', 'ary'] +['.', ');Ċ'] +['Ċ', 'ĠĠĠĊ'] +['[', 'curr'] +['ts', 'y'] +['ĠAlex', 'is'] +['rit', 'el'] +['Ġpet', 'roleum'] +['.pre', 'processing'] +['m', 'atter'] +['For', 'Result'] +['-', 'license'] +['Ġtrav', 'ellers'] +['ĠDispatch', 'er'] +['enn', 'ifer'] +['Ġdigest', 'ive'] +['P', 'ED'] +['hib', 'ition'] +['MAS', 'ConstraintMaker'] +['ĠW', 'att'] +['Ben', 'ef'] +['.set', 'View'] +['d', 'to'] +['TE', 'E'] +['ĠPel', 'osi'] +['_EX', 'TRA'] +['Ġmed', 'als'] +['x', 'hr'] +['fore', 'cast'] +['Ġn', 'argin'] +['oun', 's'] +['-f', 'ill'] +['_CUR', 'SOR'] +['Ġsuperv', 'ised'] +['Ġtur', 'f'] +['ĠEd', 'gar'] +['POS', 'ITION'] +['Ġcategory', 'Id'] +['â', 'ī'] +['_', 'ER'] +['á»§', 'a'] +['Sh', 'own'] +['.', 'll'] +['_POL', 'ICY'] +['(),', "'"] +['ĠPre', 'v'] +['ĠString', 'Field'] +['ĉG', 'lobal'] +['ass', 'ed'] +['Through', 'out'] +['o', 'stringstream'] +['.awt', 'extra'] +['Ġslo', 'pes'] +['ĠSe', 'quential'] +['Ġgi', 'orn'] +['Ġz', 'elf'] +['Ġvers', 'atility'] +['lene', 'ck'] +['.c', 'gi'] +['Ġdou', 'bling'] +['ĠBang', 'kok'] +['Ġbu', 'urt'] +['Ġusu', 'ário'] +['st', 'udio'] +['Ġje', 'unes'] +['Ġm', 'uted'] +['Ġ', 'ips'] +['_f', 'raction'] +['&&', '('] +['Ġst', 'unt'] +["');", '?>čĊ'] +['Ġev', 'apor'] +['b', 'able'] +['ĠPR', 'ICE'] +['Ġæ', '³'] +['lu', 'cent'] +['Ġv', 'amp'] +['ĠTechn', 'ician'] +['Ġuniqu', 'eness'] +['M', 'es'] +['ur', 'ban'] +['.param', 'etrize'] +['ĠRe', 'play'] +['S', 'essions'] +['em', 'br'] +['-Americ', 'ans'] +['_PRO', 'XY'] +['Ġp', 'ian'] +['Ġtri', 'e'] +['ĠD', 'estructor'] +['Game', 'State'] +['ĠIM', 'F'] +['ch', 'in'] +['Ġport', 'e'] +['ĠSw', 'al'] +['åŁ', 'İ'] +['Sub', 'string'] +['im', 'ing'] +['/L', 'ibrary'] +['Ġfright', 'ened'] +['w', 'rites'] +['Ġrecurs', 'os'] +['ar', 'Result'] +['_INIT', 'IALIZ'] +['ĠBad', 'ge'] +['_c', 'rc'] +['E', 'ight'] +['ĠDIST', 'INCT'] +['Ġth', 'ro'] +['@', 'Xml'] +['ĠLegend', 'ary'] +['-t', 'witter'] +['_e', 'asy'] +['Ġ+', '++'] +['(D', 'ATA'] +['.L', 'ocale'] +['Ġk', 'ä'] +['Ġn', 'urt'] +['Ġcr', 'uis'] +['_', 'ios'] +['Ġsens', 'ing'] +['_L', 'ine'] +['Ċ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ'] +['pon', 'g'] +['ole', 'on'] +['Ġwild', 'card'] +['ç͍æĪ·', 'åIJį'] +['Ġbeg', 'ging'] +['R', 'od'] +['ĠÃ', 'İ'] +['_C', 'ELL'] +['Research', 'ers'] +['.', 'selector'] +['_', 'ing'] +['Ġaspir', 'ing'] +['Ġimm', 'ortal'] +['Ġy', 'min'] +['_', 'robot'] +['Ġpl', 'ur'] +['B', 'TC'] +['ĠD', 'ID'] +['Ġpier', 'cing'] +['*', 'u'] +['_DEFIN', 'ED'] +['ĠTh', 'i'] +['ita', 'ire'] +['(m', 'edia'] +['-', 'ons'] +['Ġche', 'fs'] +['Ġ"*', '.'] +['/', 'AP'] +['Ġraz', 'or'] +['Ġsearch', 'Data'] +['Ġ=', '&'] +['Ġ', 'ãĢĤ'] +['Ġm', 'ourn'] +['ting', 'ham'] +['Ġo', 'li'] +['ĠVern', 'on'] +['_R', 'S'] +['ŀ', 'æĢ§'] +['Ġf', 'ácil'] +['ang', 'en'] +['cel', 'ain'] +['Ġa', 'il'] +['le', 'st'] +['ĠQ', 'COMPARE'] +['g', 'ain'] +['ĠÎ', 'µ'] +['ĠK', 'ob'] +['ĠF', 'ault'] +['_config', 's'] +['ç»ĵ', 'æŀľ'] +['.', '+'] +['cal', 'ar'] +['(color', 's'] +['M', 'ul'] +['_', 'ART'] +['Ġexperiment', 'ing'] +['erm', 'en'] +['ĠAng', 'lo'] +['.Fixed', 'Single'] +['Se', 'a'] +['Ġc', 'txt'] +['.s', 'lider'] +['C', 'ollapse'] +['G', 'rey'] +['Ġf', 'ld'] +['-pro', 'of'] +['.cap', 'acity'] +['get', 'Parent'] +['ĠCom', 'pliance'] +['Ġburg', 'l'] +['-', 'rec'] +['Ġover', 'written'] +['M', 'U'] +['Ġrout', 'ers'] +['ĉ', 'Model'] +['Ġfantas', 'ies'] +['av', 'ian'] +['_p', 'rec'] +['ĠSc', 'andin'] +['Ġ//', '<'] +['/o', 'ct'] +['Ġceremon', 'ies'] +['Month', 's'] +['und', 'y'] +['Ġqu', 'ed'] +['ĠN', 'ou'] +['ĠV', 'ibr'] +['.r', 'gb'] +['Ġcit', 'rus'] +['Ġbr', 'aces'] +['-upper', 'case'] +['get', 'Table'] +['Ġdop', 'o'] +['ĠK', 'err'] +['_CH', 'ILD'] +['-', 'cloud'] +['ĉ', 'Matrix'] +['Ġgard', 'ening'] +['S', 'ing'] +['al', 'most'] +['Require', 'ments'] +['ugu', 'ay'] +['(', 'Property'] +['sub', 'scriber'] +['FA', 'ST'] +['re', 'action'] +['(l', 'p'] +[')', '})Ċ'] +['`', ').'] +['.w', 'allet'] +['_ex', 'change'] +['.Max', 'imum'] +['ĠVer', 'b'] +['âĶ', 'ģ'] +['()', '<'] +['ï¼Ľ', 'Ċ'] +['RO', 'T'] +['C', 'ARD'] +['ub', 'it'] +['{', '@'] +['_k', 'el'] +['ĠTool', 'tip'] +['My', 'SQL'] +['Main', 'Activity'] +['ar', 'f'] +['Ġm', 'align'] +['Ġse', 'inen'] +['ap', 'ist'] +['Ġ<', '%'] +['Method', 'Impl'] +['M', 'il'] +['ĠM', 'ick'] +['.de', 'pend'] +['<', 'ID'] +['Ġpredict', 'ive'] +['ĠAP', 'PLICATION'] +['le', 'f'] +['dim', 'ensions'] +['Ġconoc', 'er'] +['/', 'conf'] +['ĠTr', 'acy'] +['F', 'oto'] +['_rem', 'aining'] +['=', 'file'] +['Ġpage', 'Index'] +['ĠPar', 'ish'] +['Ġt', 'exas'] +['ĠM', 'AGIC'] +['ĠH', 'ew'] +['d', 'ifference'] +['Ġalt', 'ura'] +['c', 'um'] +['ĉdata', 'Type'] +['Ġcaracter', 'es'] +['avi', 'ours'] +['ĠV', 'OID'] +['è¿', 'ij'] +['P', 'UBLIC'] +['B', 'io'] +['ĠstringBy', 'Appending'] +['Parse', 'Exception'] +['ĠS', 'uff'] +['ĠN', 'orton'] +['/d', 'etails'] +['.n', 'ull'] +['>>', '&'] +['ĉ', 'ok'] +['-l', 'ow'] +['.', 'usuario'] +['n', 'ested'] +['X', 'B'] +['OUR', 'S'] +['.Border', 'Color'] +['Ġb', 'row'] +['ĠÐ', 'ķ'] +['cor', 'r'] +['ĠRed', 'skins'] +['.get', 'Tag'] +['.get', 'Transaction'] +['Ġst', 'igma'] +['hard', 't'] +['ĠPlayer', 'Prefs'] +['als', 'y'] +['uc', 'son'] +['L', 'anguages'] +['ĠOl', 'ivia'] +['Ġt', 'ac'] +['Ġb', 'li'] +['Ġc', 'aval'] +['Ġconsolid', 'ated'] +['Ġper', 'il'] +['Ġde', 'le'] +['Ġform', 'ulated'] +['Ġhigh', 'ways'] +['.sp', 'awn'] +['==', '$'] +['ĠN', 'iet'] +['Ġv', 'eggies'] +['yp', 'o'] +['-r', 'ule'] +['ĠV', 'ie'] +['/e', 'pl'] +['Ġenf', 'ants'] +['string', 'Literal'] +['Ġtou', 'ghest'] +['buy', 'er'] +['Ġcov', 'ariance'] +['Ġil', 'i'] +['ĠSoph', 'ie'] +['ĠB', 'AB'] +['Ġ"', '),'] +['ĠU', 'k'] +['current', 'Index'] +['_user', 'data'] +['.code', 'c'] +['ĠPun', 'jab'] +['ĠSN', 'P'] +['l', 'ol'] +['adv', 'ance'] +['Ġcom', 'fy'] +['Json', 'Ignore'] +['Ġfashion', 'able'] +['ĠI', 'CON'] +['Ġor', 'a'] +['ĠP', 'ricing'] +['<', 'num'] +['ĠI', 'RC'] +['ER', 'V'] +['ĠMe', 'in'] +['ĠID', 'ictionary'] +['AD', 'OW'] +['is', 'New'] +['ĠDev', 'on'] +['at', 'l'] +['(request', 'Code'] +['ĉ', 'PreparedStatement'] +['IM', 'PORT'] +['Ġmar', 'ital'] +['_SELECT', 'ED'] +['get', 'Response'] +['ar', 'Down'] +['B', 'V'] +['ib', 'Name'] +['ĠP', 'ATCH'] +['ä', 'än'] +['Ġda', 'ar'] +['ĠFile', 'Mode'] +['Ġm', 'arty'] +['.Spring', 'Application'] +['c', 'ene'] +['amp', 'oline'] +['get', 'Size'] +['Rest', 'art'] +['æķ', 'Ī'] +['.project', 's'] +['ĠEthi', 'opia'] +['Ġstatus', 'es'] +['T', 'ION'] +['(b', 'g'] +['ĠX', 'unit'] +['Temp', 'orary'] +['ĠEng', 'agement'] +['Ġx', 'f'] +['Ġprox', 'ies'] +['Ġgen', 'esis'] +['Pager', 'Adapter'] +['ĠSl', 'ave'] +['Ġsung', 'lasses'] +['ĠCh', 'loe'] +['Ġko', 'ji'] +['ad', 'em'] +['ĉ', 'JSONObject'] +['Î', '³'] +['Ġh', 'ors'] +['*', 'w'] +['ó', 'r'] +['es', 'ch'] +['Ġcritic', 'ised'] +['z', 'ial'] +['ĠSale', 'm'] +['.Vert', 'ical'] +['ĠR', 'ash'] +['>', 'E'] +['ter', 'ing'] +['/s', 'creens'] +['Ġheight', 'ened'] +['аÑĢ', 'ÑĤ'] +['Author', 'ities'] +['_b', 'box'] +['ün', 'st'] +['.font', 'Size'] +['ĠBO', 'OLEAN'] +['div', 'ide'] +['ĠSlo', 'ven'] +['uc', 'er'] +['Ù', 'Ĵ'] +['st', 'ub'] +['Ġnavig', 'ating'] +[':', 'animated'] +['_N', 'OW'] +['_v', 'ect'] +['}', '{Ċ'] +['@', '('] +['Ġtele', 'com'] +['Ġcontract', 'ing'] +['ĠAss', 'ange'] +['Ġextract', 'ing'] +['Ġgr', 'ö'] +['c', 'obra'] +['.D', 'IS'] +['Ġcr', 'ab'] +['Ġtw', 'itch'] +['Ġvert', 's'] +['Ġreject', 's'] +['ĉ', 'format'] +['Ġreg', 'eneration'] +['.S', 'ys'] +['s', 'olve'] +['ĉd', 'ialog'] +['sh', 'i'] +['m', 'eter'] +['(b', 'est'] +['valid', 'ators'] +['Ġon', 'wards'] +['Ġg', 'uru'] +['Ġmoder', 'ator'] +['ow', 'ied'] +['ex', 'periment'] +['r', 'ub'] +['Ġm', 'qtt'] +['ĠCa', 'ucas'] +['Ġnational', 'ism'] +['Ġm', 'ange'] +['ĉ', 'ImGui'] +['/', 'Edit'] +['Ġin', 'h'] +['Ġint', 'ellig'] +['ero', 'kee'] +['ĉ', 'export'] +['Ġdiscrim', 'inate'] +['sub', 'tract'] +['ĠM', 'oodle'] +['ens', 'er'] +['ĠGuid', 'es'] +['R', 'AP'] +['-h', 'ot'] +['_gr', 'p'] +['.p', 'icture'] +['X', 'A'] +['Ġinit', 'View'] +['_Com', 'm'] +['Ġoverd', 'ose'] +['Ġ+', 'ĊĊ'] +['ĠSil', 'ent'] +['show', 's'] +['Ġinterpol', 'ate'] +['Form', 'ation'] +['Ġb', 'isc'] +['mark', 'ets'] +['(', 'SC'] +['Z', 'e'] +['ĠNetwork', 'ing'] +['Ġad', 'renal'] +['ĠG', 'uns'] +['ete', 'or'] +['Decl', 'ared'] +['orget', 'own'] +['Ġk', 'arena'] +['/', 'password'] +['_address', 'es'] +['ITER', 'AL'] +['B', 'uzz'] +['ĠCon', 'way'] +['(c', 'ase'] +['P', 'WD'] +['he', 'iro'] +['(', 'act'] +['**', 'čĊ'] +['());ĊĊ', 'Ċ'] +['Ġan', 'v'] +['Ġ.', '.ĊĊ'] +['(Menu', 'Item'] +['(m', 'ail'] +['_section', 's'] +['ĉ', 'net'] +['Ġpl', 'ut'] +['Ġw', 'rench'] +['/', 'object'] +['ĠI', 'st'] +['ĠV', 'IS'] +['/p', 'ub'] +['al', 'ten'] +['Ġguit', 'ars'] +['Ġantibiot', 'ic'] +['ï¼', 'ĸ'] +['Â', '¹'] +['Ġ"', '+"'] +['form', 'ula'] +['Ġbab', 'es'] +['ĠP', 'rompt'] +['Ġen', 'im'] +['/', 'player'] +['ĉ', 'ref'] +['Ġby', 'Äĩ'] +['Ġconsum', 'es'] +['ĠH', 'ast'] +['ĠT', 'ao'] +["Ġ'", '))Ċ'] +['Ġcl', 'am'] +['Ġthigh', 's'] +['Ġmot', 'if'] +['Api', 'Operation'] +['ĠW', 'L'] +['get', 'C'] +['ĉf', 'lags'] +['oint', 'ments'] +['Ġeconom', 'ical'] +['need', 'le'] +['x', 'ls'] +['pr', 'actice'] +['ut', 'zer'] +['time', 'ofday'] +['-', 'output'] +['Ġfind', 'ById'] +['ĠBudd', 'y'] +['Ðŀ', 'ÑĤ'] +['Se', 'ven'] +['ĠB', 'ark'] +['Ġenv', 'oy'] +['_al', 'gorithm'] +['åĪ', '©'] +['Ġball', 'istic'] +['ç§', '»'] +['r', 'ades'] +['ĉd', 'oc'] +['rodu', 'cing'] +['ĠE', 'ating'] +['Un', 'mount'] +['/data', 'Tables'] +['_b', 'onus'] +['Ġl', 'itt'] +['pp', 's'] +[')', 'localObject'] +['per', 'f'] +['ĠHel', 'vetica'] +['sh', 'utdown'] +['/', 'ml'] +['.t', 'okens'] +['ĠHard', 'core'] +[',', 'row'] +['/b', 'g'] +['Sc', 'aler'] +['âĢĶ', 'as'] +['_log', 'its'] +['âĢĻ', 'int'] +['ĉ', 'App'] +['Imp', 'licit'] +['.F', 'printf'] +['ET', 'O'] +['Ġterr', 'a'] +['Ġpossess', 'ing'] +['.r', 'strip'] +[',', '),'] +['=', 'yes'] +['ĠStr', 'ipe'] +['?', '='] +['ne', 'utral'] +['.g', 'ood'] +['Ġk', 'ennen'] +['ĠS', 'ung'] +['f', 'ault'] +['ystate', 'change'] +['Can', 'adian'] +["','", '".$'] +['ĠM', 'its'] +['æ', 'nd'] +['ĠSTR', 'UCT'] +['ĠURL', 'WithString'] +['ĠCom', 'pass'] +['Ġ--', 'ĊĊ'] +['ĠNS', 'LayoutConstraint'] +['|', 'min'] +['-ad', 'just'] +['Ġreb', 'uilt'] +['L', 'IGHT'] +['/', 'se'] +['-m', 'ount'] +['vp', 'n'] +['valid', 'ated'] +['(Q', 'Object'] +['Ġign', 'ition'] +['ĠCharg', 'ers'] +['RYPT', 'O'] +[']initWith', 'Frame'] +['ĠFl', 'uid'] +['Ġcad', 're'] +['Ġnomin', 'ations'] +['Ne', 'ill'] +['ĠH', 'ou'] +['Ġcurrent', 's'] +['_g', 'ene'] +['(in', 'p'] +['Par', 'is'] +['z', 'ÄĻ'] +['ag', 'gregate'] +['Ġass', 'oc'] +['weet', 'ed'] +['err', 'at'] +['âĢĵ', 'ĊĊ'] +["Ġ'/", "',Ċ"] +['fix', 'ture'] +['ĠH', 'ighest'] +['amb', 'ient'] +['Ġch', 'mod'] +['Ġcon', 'te'] +['Ġsens', 'ual'] +['Ġgar', 'ment'] +['z', 'ers'] +['ĠPower', 'ed'] +['dom', 'ains'] +['R', 'eward'] +['i', 'omanip'] +['Ġcock', 'pit'] +['out', 'file'] +['Ġbuilt', 'in'] +['Ġins', 'isting'] +['.', 'vars'] +['zip', 'code'] +['Ġ', '����'] +['f', 'ails'] +['Ġconsolid', 'ation'] +['_', 'oid'] +['Plan', 'et'] +['Ġ=', '",'] +['ĉ', 'el'] +['UIL', 'T'] +['ät', 'z'] +['af', 'ari'] +['ĠMc', 'Cl'] +['Tim', 'eline'] +['Est', 'a'] +['Ġfr', 'am'] +['Y', 'E'] +['Ġcere', 'bral'] +['Of', 'Month'] +['ĠP', 'regn'] +['Ġкл', 'аÑģÑģ'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ'] +['ĠF', 'res'] +['Appro', 'ved'] +['.S', 'pecial'] +['ĠProtest', 'ant'] +['Ġallerg', 'y'] +['_p', 'cm'] +['ĉC', 'opyright'] +['Ġsuper', 'Class'] +['"', 'strconv'] +['ĠMoh', 'amed'] +["Ġ'", '//'] +['Fore', 'Color'] +['Ar', 'thur'] +['ĠJ', 'ungle'] +['Ġve', 'ins'] +['S', 'ad'] +['Ġback', 'ups'] +['ĠOp', 'inion'] +['û', 't'] +['Ġinter', 'mitt'] +['ody', 'n'] +['ĠChrist', 'ina'] +['Ġand', 're'] +['Ġevac', 'uation'] +['pa', 'lette'] +['h', 'orse'] +['ĠRes', 'ident'] +['ĠHass', 'an'] +['.N', 'il'] +['Ġa', 'isle'] +['ĠG', 'rowing'] +['Ġblog', 'info'] +['/s', 'ql'] +['_io', 'ctl'] +['Sc', 'aling'] +['ĠMon', 'ad'] +['_c', 'pp'] +['ĠH', 'utch'] +['ĠApple', 'WebKit'] +['Exp', 'ense'] +['_J', 'OB'] +['Ġpoint', 'less'] +['From', 'Body'] +['ant', 'al'] +['Ġdepict', 'ing'] +['ĠC', 'ELL'] +['Ġref', 'in'] +['ĠC', 'NC'] +['ì¹', 'ĺ'] +['_dim', 'ensions'] +['ĠS', 'AN'] +['Ġa', 'ft'] +['Ġfoot', 'steps'] +['cc', 'oli'] +['_PH', 'ONE'] +['/m', 'ath'] +['-k', 'ind'] +['ĠMe', 'ans'] +['ich', 'ael'] +['.g', 'una'] +['Ġinaug', 'uration'] +['-dr', 'iving'] +['(', 'delete'] +['Ġtotal', 'Count'] +['_M', 'C'] +['.Ext', 'ension'] +['Com', 'mercial'] +['Ġz', 'Index'] +['<', 'Customer'] +['"', 'g'] +['-sh', 'are'] +['Ġp', 'act'] +['ag', 'ara'] +['ĠS', 'IL'] +['_m', 'odes'] +['ĠM', 'olecular'] +['Ġsystem', 'atically'] +['<', 'G'] +['_s', 'cr'] +['ĠO', 'ro'] +['as', 'ers'] +['Ġb', 'ic'] +['Ġdest', 'roys'] +['PI', 'PE'] +['.Start', 'Position'] +['Ġc', 'á»§a'] +['ire', 'z'] +['.B', 'unifu'] +['_F', 'unction'] +['Ġs', 'ü'] +['_f', 'uture'] +['ĠWe', 'alth'] +['ĠNatur', 'ally'] +['æĢ', '»'] +['_y', 'es'] +['Ġabrupt', 'ly'] +['String', 'Encoding'] +['ĠCGPoint', 'Make'] +['Ġz', 'h'] +['Ġimp', 'erson'] +['Ġpiv', 'otal'] +['ĠSom', 'alia'] +['Ġsegment', 'ation'] +['_AN', 'AL'] +['ĠLogin', 'Component'] +['Cons', 'ult'] +['Ġtr', 'uncated'] +[']', '";Ċ'] +['.get', 'Config'] +['Ġintern', 'ship'] +['B', 'aby'] +['ê°', 'ľ'] +['Ġstrengthen', 'ed'] +['_M', 'I'] +['b', 'asket'] +['Ġnicht', 's'] +['ĠTV', 's'] +['ĠSh', 'an'] +['ãĤ', 'µ'] +['rac', 'use'] +['.Re', 'LU'] +['/', 'interfaces'] +['ĠgetItem', 'Count'] +['Ġret', 'iring'] +['Ġspecial', 's'] +['Ġentity', 'Manager'] +['bel', 'ief'] +['Ġs', 'older'] +['da', 'ughter'] +['ij', 'kl'] +['Ġutil', 'izes'] +['.f', 'ixed'] +['S', 'U'] +['Ġdr', 'astic'] +['Ġh', 'acks'] +['gr', 'und'] +['ĠM', 'U'] +['ĠSt', 'arter'] +['.Com', 'ponents'] +['_m', 'otor'] +['Gold', 'en'] +['Ġl', 'odge'] +['Ġ', '));'] +['ĠCor', 'inth'] +['иÑĩ', 'еÑģÑĤво'] +['ón', 'ico'] +['gre', 'SQL'] +['ĠFl', 'uent'] +['Ġmar', 'c'] +['.Load', 'Scene'] +['.Group', 's'] +['Ġer', 'h'] +['ĠAut', 'umn'] +['St', 'opped'] +['Ġitalian', 'o'] +['Ġmin', 'ions'] +['ĠAssert', 'ions'] +['Ġm', 'ux'] +['B', 'u'] +['Ġ----------------------------------------------------------------', '--------------------------------'] +['ĉ', 'up'] +['read', 'ystatechange'] +['_M', 'eta'] +['Ġcurrent', 'Date'] +['ĠChap', 'man'] +['Und', 'o'] +['Se', 'an'] +['ap', 'r'] +['Ġpar', 'm'] +['_', 'icons'] +['ĠSt', 'a'] +['á', 'z'] +['Ġsub', 'division'] +['Ġalter', 'ing'] +['P', 'NG'] +['ponent', 'ial'] +['Ġpost', 'gres'] +['ĠB', 'DS'] +['-ex', 'istent'] +['ĠBrad', 'ford'] +['ĠO', 'MX'] +['_W', 'HITE'] +['_PRO', 'GRAM'] +['q', 'c'] +['Ġtypings', 'Slinky'] +['ĠP', 'ics'] +['_M', 'ETA'] +['IT', 'TER'] +['_sub', 'scription'] +['IRON', 'MENT'] +['ĠHy', 'undai'] +['();ĊĊ', 'ĊĊ'] +['ĠØ', '³'] +['Ġj', 'ac'] +['Ġelimin', 'ates'] +[')', '});Ċ'] +['Ġcomp', 'rend'] +['ĉ', 'insert'] +['_f', 'aces'] +['">', '$'] +['Ġeb', 'ay'] +['Ġcapt', 'ive'] +['pl', 'iant'] +['ĠCalcul', 'ates'] +['ol', 'ta'] +['est', 'ing'] +['_re', 'vision'] +['Ġm', 'ús'] +['+', 'm'] +['","', '","'] +['WH', 'AT'] +['Ġcompassion', 'ate'] +['h', 'arga'] +['[', 'random'] +['Ġmod', 'ulo'] +['(s', 'n'] +['Ġoccup', 'ations'] +['////', 'Ċ'] +['ĉ', 'board'] +['ĠB', 'alk'] +['wi', 'Äħ'] +['ĠW', 'ifi'] +['.Pro', 'file'] +[':m', 'aj'] +['ĉm', 'at'] +['LOCK', 'S'] +['(j', 'Button'] +["Ġ('", '$'] +['M', 'ur'] +['æĮ', 'ī'] +['b', 'ble'] +['Ġf', 'rog'] +['-h', 'ide'] +['Ġbroad', 'caster'] +['à¸', 'ŀ'] +['ha', 'led'] +['Ġam', 'using'] +['_predict', 'ions'] +['_in', 'tr'] +['Ġe', 'agle'] +['аÑĤ', 'елÑĮ'] +['Ġget', 'List'] +['ps', 'ilon'] +['Ġcharacter', 'ization'] +['AR', 'DS'] +['Ġre', 'location'] +['Ġr', 'ulers'] +['P', 'AY'] +['ĠDef', 'initely'] +['_A', 'ction'] +['Ġclos', 'ures'] +['Ġfact', 'ual'] +['odyn', 'amic'] +['Ġpreca', 'utions'] +['nie', 'j'] +['ĠPart', 'ies'] +['ĠSub', 'aru'] +['Ġcous', 'ins'] +['ar', 'beit'] +['.m', 'oney'] +['gun', 'ta'] +['(', 'and'] +['get', 'item'] +['.Style', 'Priority'] +['Ġsl', 'id'] +['single', 'ton'] +['Ġg', 'arn'] +['ĠP', 'AS'] +['Ġd', 'azz'] +['a', 'ż'] +['Ġbog', 'us'] +['ĠM', 'og'] +['Ġrival', 'ry'] +['is', 'ol'] +['Ġland', 'marks'] +['ñ', 'as'] +['B', 'ern'] +['ĠSach', 's'] +['Ġ"', ')ĊĊ'] +['Ġhost', 'ility'] +['_m', 'ex'] +['m', 'ere'] +['M', 'ot'] +['p', 'ictureBox'] +['Def', 'ense'] +['Ġaffid', 'avit'] +['other', 'wise'] +['.d', 'irectory'] +['_', 'UnityEngine'] +['-b', 'log'] +['.s', 'kin'] +['ph', 'em'] +['Ap', 'ellido'] +['er', 'chant'] +['[', 'class'] +['Ġw', 'art'] +['."', '['] +['ale', 'ur'] +['/', 'back'] +['ĠĠĠĠ', 'ĉĠĠĠ'] +['Ġprecip', 'itation'] +['Ġob', 'struction'] +['Ġp', 'Obj'] +['Ġr', 'upt'] +['UCK', 'ET'] +['ay', 'e'] +['æİ', 'Ĵ'] +['g', 'x'] +['Ġe', 'cl'] +['Ġsecre', 'cy'] +['/', 'Header'] +['ĠLes', 'b'] +['Ġle', 'i'] +['ĠBullet', 'in'] +['Ġgive', 'away'] +['.H', 'ome'] +['_RO', 'OM'] +['"', 'W'] +['Ġcow', 'ork'] +['_', 'ra'] +['ĠC', 'ycling'] +['ĠP', 'aw'] +['Ġpup', 'il'] +['/', 'arch'] +['ĠFile', 'Utils'] +['é¦', 'ĸ'] +['r', 'sp'] +['Ġfreed', 'oms'] +['ĠL', 'ear'] +['}`', ').'] +['Ġbow', 'ls'] +['/b', 'lock'] +['_log', 'ging'] +['Ġmeth', 'ane'] +['Ġhorn', 's'] +['Ġwonder', 'fully'] +['Ġalter', 'ations'] +['Ġex', 'ile'] +['ls', 'en'] +['_p', 'ause'] +['_L', 'ANGUAGE'] +['ĠUS', 'DA'] +['_m', 'ysql'] +['_AM', 'OUNT'] +['ĠL', 'IFE'] +['Ġyoung', 'sters'] +['Ġri', 'ots'] +['[', 'E'] +['Ġun', 'forgettable'] +[',', '},Ċ'] +['Dis', 'posed'] +['ĠAss', 'assin'] +['UN', 'G'] +['ĠNew', 'sp'] +['User', 'Service'] +[':', 'aload'] +['+', "',"] +['Ġsett', 'lers'] +['Ġscre', 'ams'] +['Ġincon', 'venience'] +['.R', 'otate'] +['Ġj', 'ars'] +['ĠP', 'uzzle'] +['Ġm', 'est'] +['ars', 'i'] +['ĠSh', 'arma'] +['|', '('] +['.d', 's'] +['ĠSac', 'red'] +['_e', 'vt'] +['Ġexpress', 'es'] +['Ġh', 'och'] +['ĠD', 'uch'] +['.c', 'alls'] +['th', 'r'] +['ĠShe', 'ffield'] +['.Alert', 'Dialog'] +['Ġrad', 'ically'] +['Ġtr', 'ous'] +['Ġprev', 'ailing'] +['ĠWW', 'II'] +['âĢĻ', 'n'] +['ens', 'ely'] +['ĠY', 'esterday'] +['ĠSir', 'ius'] +['Ġkill', 'ers'] +['ĠF', 'FT'] +['Ġo', 'val'] +["')", ':čĊ'] +['Ġìłķ', 'ë³´'] +['our', 'age'] +['ĠCheck', 'box'] +['Work', 'book'] +['.def', 'er'] +['_f', 'loor'] +['Ġc', 'ouncill'] +['Ġnors', 'ke'] +['mo', 'il'] +['ore', 'a'] +['Ġmarket', 'ed'] +['_S', 'UR'] +['x', 'AA'] +['Ġst', 'ained'] +['e', 'ut'] +['ĠM', 'eng'] +['Ġi', 'eee'] +['.', 'extern'] +['eg', 'ie'] +['Ġr', 'app'] +['ĠPy', 'ongyang'] +["'", 'class'] +['M', 'ob'] +['Ġinitial', 'Value'] +['_w', 'ave'] +['Ġj', 'ab'] +['Ġmascul', 'ine'] +['Ġampl', 'ifier'] +['Ġt', 'ty'] +['Path', 'Component'] +['_', 'xt'] +['ĠG', 'FP'] +['/', 'sec'] +['ĉdis', 'patch'] +['mark', 'down'] +['ĠS', 'chn'] +['bo', 'le'] +['·', '·'] +['mouse', 'move'] +['Ġerr', 'Msg'] +['Ġas', 'ign'] +['_m', 'ono'] +['To', 'Selector'] +['ĠZ', 'u'] +['(R', 'ect'] +['ĠError', 'Code'] +['lat', 'in'] +['ang', 'ible'] +['v', 'tk'] +['CG', 'Size'] +['P', 'okemon'] +['Ġclass', 'mates'] +['Ġattract', 's'] +['ĠT', 'atto'] +['ult', 'an'] +['ol', 'óg'] +['Ġhalt', 'ed'] +['à¤', '¨'] +['ĠK', 'art'] +['Ġ', 'ue'] +['_Init', 'Structure'] +['Test', 'Class'] +['ĠAir', 'bnb'] +['_', '",'] +['Ġchar', 'coal'] +['Ġip', 'c'] +['ĠSt', 'retch'] +['.g', 'lide'] +['lates', 'AutoresizingMaskIntoConstraints'] +['Ġpot', 'ion'] +['ITT', 'LE'] +['Ġcount', 'ert'] +['_h', 'd'] +['pre', 'pared'] +['Ad', 's'] +['ĠV', 'ampire'] +['rob', 'ots'] +['.Create', 'Index'] +['Status', 'Label'] +['Ġt', 'ucked'] +['af', 'ür'] +['U', 't'] +['Ġswe', 'ater'] +['_F', 'N'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĉ'] +['ata', 'ka'] +['Ġeyeb', 'rows'] +['ac', 'oes'] +['ud', 'en'] +['.LinearLayout', 'Manager'] +['Ġsw', 'ay'] +['Ġmult', 'in'] +['()', ')))Ċ'] +['ĠNS', 'UInteger'] +['ĠMy', 'Base'] +['Part', 'ner'] +['uts', 'chen'] +['ĠC', 'ater'] +['.setBackground', 'Color'] +['Ġaccompl', 'ishment'] +['_pro', 'blem'] +['.d', 'td'] +['Ġpage', 'Number'] +['Ġj', 'ackets'] +['Ġcro', 'pped'] +['u', 'els'] +['ĠH', 'ep'] +['Ġc', 'apped'] +['*', 'Math'] +['_callback', 's'] +['Ġpub', 'b'] +['ĠBrun', 'swick'] +['.res', 'pond'] +['["', '_'] +['Ġbed', 'ding'] +['hyth', 'm'] +['O', 'X'] +['(s', 'peed'] +['Ġpestic', 'ides'] +['Ġ----', '---'] +['.Bl', 'ue'] +['Ġnood', 'les'] +['ĠGo', 'es'] +['Ġs', 'aver'] +['o', 'xy'] +['_com', 'pletion'] +['ĠSw', 'inger'] +['Ġget', 'Date'] +['Ġmind', 'ed'] +['int', 'egration'] +['ĠLot', 'us'] +['(st', 'op'] +["(',", "');Ċ"] +['Ġflood', 's'] +['ĠWork', 'flow'] +['Ġerupt', 'ed'] +['Mac', 'ro'] +['ĠSau', 'ce'] +['Ġevent', 'Name'] +['\\', 'Input'] +['Break', 'ing'] +['ĉ', 'when'] +['_p', 'w'] +['IND', 'ER'] +['ĠWell', 'ness'] +['Ġvox', 'el'] +['ĠM', 'ell'] +['ĠM', 'EDIA'] +['SE', 'NS'] +['ĠFund', 's'] +['ĠM', 'ild'] +['<', 'Array'] +['-', 'this'] +['ump', 'ed'] +['/f', 'w'] +['ĠDb', 'Context'] +['W', 'I'] +['girl', 's'] +['H', 'OW'] +["');", '?>Ċ'] +['Ġtempt', 'ing'] +['Ġtest', 'ament'] +['Ġb', 'ible'] +['Ġconsult', 'ed'] +['ĠIndex', 'Error'] +['è¨', 'ĺ'] +['Ġkey', 'pad'] +['izz', 'o'] +['(', 'ok'] +['Ġwhats', 'app'] +['ĠRemote', 'Exception'] +['Ġteam', 'ed'] +['âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ', 'âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ'] +['»', ','] +['Ġget', 'Time'] +['di', 'ag'] +['iss', 'y'] +['Ġh', 'ed'] +['Ġkn', 'ots'] +['j', 'om'] +['Ġfun', 'nel'] +['-m', 'ails'] +['Ġexport', 'ing'] +['ĠV', 'L'] +['ĠK', 'arn'] +['ĠBuddh', 'ism'] +['ĠAll', 'an'] +['_R', 'ADIUS'] +['Ġw', 'ording'] +['ĠFor', 'get'] +['ĠCor', 'ona'] +['ip', 'hy'] +['Ġlim', 'burg'] +['ugg', 'y'] +['ĠUser', 'Repository'] +['im', 'in'] +['(e', 'le'] +['Ġlabel', 'led'] +['ç¤', '¾'] +['ĠH', 'erman'] +['.q', 'q'] +['Ġ"', '));Ċ'] +['ie', 'ber'] +['.Trans', 'late'] +['ry', 'n'] +['Ġdes', 'env'] +['um', 'd'] +['Sim', 'ply'] +['ĉm', 'ode'] +['R', 'pc'] +['ĠVal', 'encia'] +['Ġstaff', 'ers'] +['Ġsel', 'v'] +['ĠSpi', 'ke'] +['Ġdel', 'ic'] +['Ġer', 'u'] +['_D', 'T'] +['J', 'udge'] +['á»', 'ķ'] +['ĠBas', 'in'] +['.m', 'utable'] +['"', 'url'] +['Ġtar', 'iff'] +['ĠSlee', 've'] +['Ġfl', 'are'] +['.drop', 'out'] +['Ġbr', 'ides'] +['))', ',čĊ'] +['_con', 'straints'] +['de', 'struct'] +['Out', 'line'] +['Ġdisappe', 'ars'] +['_lock', 'ed'] +['ĠNS', 'LocalizedString'] +['ck', 'e'] +['ĉ', 'null'] +['ad', 'resse'] +['Ġto', 'pping'] +['ĠJ', 'oker'] +['b', 'ishop'] +['но', 'ÑģÑĤÑĮ'] +['and', 'ering'] +['_', 'amp'] +['=', 'time'] +['_S', 'pace'] +['_P', 'ULL'] +["'", '='] +['Ġant', 'iqu'] +['Ġc', 'ach'] +['___', 'ĊĊ'] +['ON', 'ES'] +['о', 'Ñı'] +['Ġun', 'read'] +['.p', 'olicy'] +['oooo', 'oooo'] +['ëŁ', '¬'] +['Ġu', 'sted'] +['ĠRe', 'ce'] +['Ġal', 'lem'] +['ãĥ¼', 'ãĤ¹'] +['ĠThought', 's'] +['ve', 'illance'] +['istr', 'ate'] +['_l', 'ane'] +['Ġfam', 'ed'] +['.Get', 'Name'] +['Ġsmo', 'other'] +['ĠQual', 'ified'] +['az', 'ers'] +['_', 'geo'] +['F', 'ax'] +['ĠM', 'inds'] +['ĠR', 'aises'] +['Ġtrans', 'cripts'] +['Con', 'versation'] +['Ġremark', 'ed'] +['ëĤ', 'ĺ'] +['d', 'ling'] +['Ġdeploy', 'ing'] +['Ġshared', 'Application'] +['Ġk', 'p'] +['FontAwesome', 'Icon'] +['_d', 'ummy'] +['reib', 'en'] +['ĠJane', 'iro'] +['Direction', 's'] +['.get', 'Bean'] +['s', 'ass'] +['Ġcommand', 'ers'] +['v', 'ation'] +['error', 'Code'] +['ĠAl', 'loy'] +['.local', 'ized'] +['Ð', 'ij'] +['Ġdish', 'washer'] +['ĠSou', 'p'] +['N', 'u'] +['_D', 'efault'] +['Ġune', 'ven'] +['Ġ/>', '";Ċ'] +['-B', 'ased'] +['Ġseam', 'lessly'] +['-', 'null'] +['ĠX', 'C'] +['Ġst', 'ew'] +['(d', 'elay'] +['AT', 'ORS'] +['ĠWhe', 'eler'] +['"', '', 'H'] +['e', 'ast'] +['.', 'air'] +['âĢľ', 'But'] +['Object', 'Context'] +['success', 'fully'] +['_l', 'and'] +['Ġfold', 's'] +['_CO', 'ORD'] +['Ġsub', 'po'] +['.get', 'Address'] +['in', 'str'] +['Material', 's'] +['Ñĥ', 'ÑģÑĤ'] +['de', 'posit'] +['-l', 'ast'] +['_GR', 'AY'] +['=', 'find'] +['Ġmut', 'ant'] +['Ġlesb', 'ienne'] +['let', 'cher'] +['RO', 'UGH'] +['ure', 'ka'] +['.c', 'apture'] +['Ġen', 'n'] +['Ġ([', '['] +['ĠFl', 'u'] +['Ġtask', 'Id'] +['ĠHus', 'sein'] +['.f', 'older'] +['Ġa', 'usterity'] +['ISTR', 'ATION'] +['_', 'Impl'] +['注', 'æĦı'] +['Ġdec', 'ree'] +['-', 'chat'] +['Ġimp', 'lication'] +['Ġguess', 'es'] +['ul', 'kan'] +['An', 'alytics'] +['.', 'plus'] +['COM', 'MAND'] +['е', 'ли'] +['»', 'ĊĊ'] +['_S', 'ITE'] +['Ġequal', 'To'] +['Support', 'FragmentManager'] +['ĠRec', 'ording'] +['å®Į', 'æĪIJ'] +['Ġbag', 'gage'] +['Ġpitch', 'ers'] +['ĠE', 'h'] +['o', 'que'] +['ĉc', 'nt'] +['Ġ=>', '$'] +['/', 'foo'] +['IR', 'A'] +['ĠSat', 'ellite'] +['bor', 'ah'] +['Ġ}}', '"Ċ'] +['ĠEnd', 's'] +['ĠSpr', 'ay'] +[',', 'param'] +['.Ch', 'rome'] +['*', 'q'] +['th', 'ought'] +['ibr', 'ated'] +['Ġth', 'ieves'] +['Ġbenefici', 'aries'] +['Enter', 'ed'] +['ottes', 'ville'] +['Ġveter', 'in'] +['By', 'ID'] +['qu', 'ipe'] +['um', 'ption'] +['-', 'unit'] +['Execution', 'Context'] +['@', 's'] +['ĠG', 'iov'] +['.Tool', 'Tip'] +['_f', 'riend'] +['(', 'attributes'] +['Ġdump', 'ing'] +['ĠJ', 'C'] +['_D', 'OCUMENT'] +['ĠArm', 'our'] +['(', 'insert'] +['.Horizontal', 'Alignment'] +['ĠQ', 'ed'] +['ãģĦ', 'ãģ¾ãģĻ'] +['/g', 'it'] +['ĠY', 'YYY'] +['ĠCard', 'iff'] +['Ġap', 'a'] +['organ', 'ic'] +['ĠWhere', 'as'] +['Ġæ', 'Ŀ'] +['ĠM', 'ia'] +['Ġdemol', 'ition'] +['Ġsc', 'ars'] +['Ġp', 'ai'] +['Ġre', 'tries'] +['Ġr', 'q'] +['ĠDen', 'is'] +['(', 'Utils'] +['Ġallev', 'iate'] +['ĠP', 'IC'] +['id', 'ue'] +['Ġacknowled', 'ging'] +['Ġ//', '////////////////////////////////'] +['ç¡®', 'å®ļ'] +['Ä', '«'] +['\\', 'Json'] +['.b', 'inary'] +['Ġx', 'type'] +['sign', 'als'] +['ĠAp', 'pearance'] +['&', 'r'] +['}', 's'] +['C', 'i'] +['ĠI', 'llum'] +['por', 'ate'] +['h', 'og'] +['Ġindex', 'Of'] +['\\', 'Command'] +['_par', 'allel'] +['ĠSher', 'lock'] +['í', 'ĥ'] +['Ġ"', '")čĊ'] +['////////////////////////////////////////////////////////////////', '////////////////////////////////'] +['Ġcritic', 'ize'] +['ĠSo', 'ap'] +['ĠMatch', 'er'] +['Ġgr', 'illed'] +['*', 'T'] +['Ġad', 'ore'] +['ull', 'ing'] +['Ġjed', 'och'] +['_ref', 's'] +['lean', 'up'] +['ĠJ', 'AXB'] +['Ġro', 'ses'] +['ĠL', 'iam'] +['size', 'i'] +['Ġget', 'char'] +['Ġtar', 'de'] +['-to', 'oltip'] +['Ġqual', 'ifier'] +['ĠInter', 'mediate'] +['_W', 'indow'] +['ĠMal', 'ta'] +['Dis', 'connect'] +['ew', 'here'] +['Camp', 'o'] +['Ġirr', 'ational'] +['led', 'o'] +['ĠD', 'N'] +['ARG', 'V'] +['Ġout', 'ro'] +['Ġth', 'irteen'] +['Jose', 'ph'] +['M', 'AR'] +['/g', 'l'] +['J', 'ess'] +['ĠPsych', 'iat'] +['Ġpadding', 'Bottom'] +['-', 'loop'] +['/', 'fonts'] +['_se', 'en'] +['Te', 'ams'] +['React', 'DOM'] +['(m', 'an'] +['(x', 'path'] +['.get', 'SimpleName'] +['>(', '*'] +['ĠP', 'vt'] +['Ġel', 'ders'] +['Ġp', 'ies'] +['.user', 'Agent'] +['-', 'region'] +['ĠGree', 'ks'] +['(f', 'ragment'] +['st', 'u'] +['Ġcouncil', 's'] +['Ġst', 'amina'] +['ĠGod', 'dess'] +['è', '¥¿'] +['Ġphilosoph', 'ers'] +['Ġpers', 'one'] +['ĠL', 'ose'] +['ĠCL', 'R'] +['ĠD', 'ocs'] +['Ġso', 'ak'] +['ĠHOLD', 'ER'] +['Ġb', 'ells'] +['hash', 'Code'] +['R', 'ATE'] +['_WE', 'IGHT'] +['in', 'ous'] +['end', 'ra'] +['oph', 'obic'] +['Ġpro', 'se'] +['Ġfin', 'ely'] +['/o', 'auth'] +['(s', 'pace'] +['ad', 'ge'] +['ĠM', 'ama'] +['Ġstring', 'Buffer'] +['Ġst', 'int'] +['Ġmis', 'ma'] +['Ġvill', 'ains'] +['ĠCrime', 'a'] +['Ġdipl', 'oma'] +['Ġпо', 'Ñģл'] +['ĠBe', 'a'] +['(j', 'oin'] +['Ġíķ', '´'] +['CH', 'AT'] +['per', 'ing'] +['ĠC', 'ros'] +['Ġmon', 'keys'] +['Ġpred', 's'] +['yl', 'a'] +[',,', ','] +['Ġvibr', 'ator'] +['ĠN', 'U'] +['åħ', 'Ī'] +['f', 'ant'] +['z', 'et'] +['Ġb', 'ietet'] +['un', 'ft'] +['sw', 'orth'] +['.F', 'low'] +['Ġpsy', 'ched'] +['ĠContin', 'ental'] +['>', 't'] +['Ġqu', 'ilt'] +['.', 'UP'] +['Ġexpans', 'ive'] +['Dis', 'pose'] +['(l', 'anguage'] +['C', 'aps'] +['_Z', 'ONE'] +['Ġrec', 'ycle'] +['ĠMan', 'aged'] +['current', 'Color'] +['.b', 'roadcast'] +['sign', 'In'] +['.p', 'rom'] +['ll', 'u'] +['ue', 'blo'] +['Ġpunch', 'es'] +['Ġautom', 'at'] +['Ġassign', 'ing'] +['Ġcreate', 'User'] +['ĠAll', 'ied'] +['Ġconduct', 'or'] +['Ĥ', '¨'] +['Ġs', 'addle'] +['Ġd', 'ni'] +['omed', 'ical'] +['-W', 'est'] +['Positive', 'Button'] +['Ġit', 'alic'] +['?', '['] +['(tr', 'igger'] +['Ġele', 'phants'] +['":"', '","'] +['Ġcal', 'iber'] +['raft', 'ed'] +['d', 'igits'] +['Ġmar', 'shal'] +['mill', 'iseconds'] +['mark', 'ers'] +['m', 'om'] +['/', 'place'] +['Ġhol', 'istic'] +[':', 't'] +['#', ','] +['Ġb', 'oto'] +['Ġnause', 'a'] +['ĠSh', 'ooting'] +['ite', 'ch'] +['Ġtext', 'Status'] +['<', 'Class'] +['ĠDes', 'cribe'] +['Ġbuff', 'et'] +['g', 'il'] +['Ġlog', 'its'] +['std', 'call'] +['mod', 's'] +['ĠSk', 'ull'] +['ĠB', 'are'] +['h', 'ope'] +['ĠIn', 'tr'] +['F', 'air'] +['ĉ', 'pt'] +['Ġacompan', 'h'] +['Ġf', 'kk'] +['_r', 'pc'] +['Inst', 'alled'] +['_', 'ans'] +['.get', 'Minutes'] +['â̦', '"ĊĊ'] +['-', 'thread'] +['Ġpres', 'chool'] +['AIL', 'S'] +['Ġdiff', 'ic'] +['(', 'convert'] +['ĠN', 'ath'] +['ĠDO', 'J'] +['Ġreg', 'imes'] +['Ġenthusi', 'ast'] +['Ġwarrant', 'ies'] +['Ġfasc', 'inated'] +['_b', 'inding'] +['_N', 'ot'] +['oft', 'en'] +['_R', 'W'] +['/m', 'ail'] +['Ġtitle', 'Label'] +['Ġvill', 'agers'] +['ĠJ', 'iang'] +['Ġsw', 'agger'] +['.Row', 'Index'] +['_img', 's'] +['rap', 'y'] +['VER', 'AGE'] +['.', 'Up'] +['Ġno', 'op'] +['c', 'io'] +['ĉ', 'ST'] +['Ġdecre', 'ment'] +['Ġmagn', 'esium'] +['_', 'rotate'] +['S', 'it'] +['Ġnieu', 'we'] +['Ġter', 'med'] +['íķ', '©ëĭĪëĭ¤'] +['Ġur', 'g'] +['_t', 'ouch'] +['Ġsw', 'arm'] +['Ġcl', 'ave'] +['th', 'est'] +['ĠL', 'af'] +['H', 'X'] +['ĠH', 'ulk'] +['Ġplaint', 'ext'] +['ĠSof', 'a'] +['get', 'Session'] +['L', 'ed'] +['Ġecosystem', 's'] +['he', 'i'] +['ĠK', 'ills'] +['Ġhus', 'bands'] +['Ñħ', 'ÑĢан'] +['(d', 'om'] +['_t', 'iles'] +['Nib', 'Name'] +['Ġdon', 'ating'] +['.', 'acc'] +['Ġlifes', 'pan'] +['.b', 'n'] +['_RG', 'CTX'] +['æ', '¥'] +['ans', 'en'] +['Ġmod', 'elling'] +['Layout', 'Params'] +['ĠonChange', 'Text'] +['rs', 'a'] +['-', 'location'] +['.P', 'e'] +['(b', 'us'] +['(s', 'ong'] +['Ġprodu', 'k'] +['ĠSH', 'OULD'] +['ĠC', 'J'] +['Ġs', 'os'] +['ĠHome', 'Controller'] +['.load', 'ed'] +['(D', 'ocument'] +['.s', 'ocial'] +['t', 'iles'] +['Ġl', 'ame'] +['=', 'df'] +['.parse', 'Long'] +['Ġpr', 'ac'] +['Ġdet', 'ox'] +['ĠV', 'E'] +['Ġpunt', 'os'] +['Ġdo', 'ctr'] +['Ġan', 'cor'] +['CA', 'PE'] +['Ġc', 'mb'] +['çĦ', '¶'] +['*)', '"'] +['://', '/'] +['Value', 'Type'] +['Ġmort', 'gages'] +[';', 'q'] +['ĠRock', 'ets'] +['s', 'port'] +['UG', 'C'] +['ct', 's'] +['ãĤ', 'ģ'] +['ie', 'ur'] +['ĠAppe', 'al'] +['(n', 'b'] +['////////////////////////////////////////////////', '////////'] +['IM', 'ATION'] +['ĠC', 'res'] +['ĠMan', 'ip'] +['C', 'ause'] +['at', 'ypes'] +['man', 'ufacturer'] +['#', '----------------------------------------------------------------------------'] +['Ġsp', 'or'] +['es', 'on'] +['Ġpun', 'ched'] +['Ġbook', 'marks'] +['ĠBul', 'k'] +['Complete', 'Listener'] +['ĠTalk', 'ing'] +['ĠEr', 'nest'] +['Ġrub', 'bish'] +['k', 'ills'] +['ĠDE', 'FIN'] +['Ġneighbour', 'ing'] +['ar', 'lo'] +['ĠP', 'CA'] +['ĉm', 'atrix'] +['lo', 'k'] +['Ġat', 'las'] +['ĠG', 'ur'] +['Ġw', 'yn'] +['-n', 'egative'] +['Ġt', 'ul'] +['Ġre', 'lic'] +['ĠV', 'oltage'] +['ĠPre', 'is'] +['ĠJ', 'NICALL'] +['ĠPM', 'ID'] +['ak', 'et'] +['ĉ', 'attr'] +['Ġet', 'iqu'] +['ĠM', 'J'] +['ĠG', 'mail'] +['cl', 'r'] +['_exec', 'ution'] +['éĶ', '®'] +['pos', 'itor'] +['.', 'af'] +['N', 'r'] +['Ge', 'orgia'] +['Top', 'ology'] +['Ġperch', 'é'] +['Ġmus', 'lim'] +['Ġepid', 'emi'] +['Ġsab', 'ot'] +['act', 'us'] +['Ġë', 'ĮĢ'] +['ĠIO', 'Error'] +['.', 'est'] +['p', 'refs'] +['ĠKr', 'ish'] +['.Read', 'Key'] +['NAS', 'A'] +['u', 'ção'] +['_D', 'b'] +['umer', 'ator'] +['W', 'ide'] +['(st', 'atement'] +['.end', 'point'] +['....', '.....'] +['Ġ[', '*'] +['stream', 's'] +['m', 'time'] +['P', 'x'] +['at', 'r'] +['Ġt', 'pl'] +['R', 'oman'] +['Ġscen', 'ic'] +['.n', 'z'] +['ĠSe', 'conds'] +['sub', 'menu'] +['Ġìĭ', '¤í'] +['_b', 'undle'] +['Ġde', 'ÄŁ'] +['ĠS', 'isters'] +['pre', 'ferences'] +['Ġport', 'a'] +['Ad', 'visor'] +['max', 'Length'] +['ĠG', 'REAT'] +['__', '(Ċ'] +['ole', 'st'] +['ĠLabel', 's'] +['Ġen', 'fer'] +['ĠĠĠĠĠĠ', 'ĊĊ'] +['ĠThe', 'ft'] +['_F', 'ILL'] +['ĠW', 'ise'] +[')', 'application'] +['un', 'ami'] +['>', '())Ċ'] +['ADD', 'RESS'] +['B', 'ST'] +['et', 'zt'] +['ĠQ', 'gs'] +['S', 'ense'] +['Exception', 'Handler'] +['ĠCh', 'u'] +['.get', 'OwnProperty'] +['Ġexerc', 'ised'] +['iot', 'ic'] +['ĠRe', 'leases'] +['Ġp', 'interest'] +['ol', 'ie'] +['is', 'oft'] +['Ġsequ', 'encing'] +['Ġpad', 're'] +[']', '));čĊ'] +['(r', 'adius'] +['.m', 'ed'] +['aint', 'ies'] +['.Object', 'Model'] +['Ġem', 'ple'] +['Ġseg', 'uro'] +['St', 'ars'] +['Ġqual', 'itative'] +['lem', 'n'] +['á»', '±'] +['>', '").'] +['Ġg', 'x'] +['-c', 'ert'] +['ĠAST', 'M'] +['Ġfull', 'name'] +['Ġte', 'lemetry'] +['ĠCamb', 'odia'] +['_', 'ul'] +['ĠCl', 'are'] +['C', 'USTOM'] +['Q', 'C'] +['ĠUn', 's'] +['ĠHTTP', 'S'] +['ĠPark', 'inson'] +['ancy', 'box'] +["','", '.'] +['T', 'ue'] +['.get', 'Last'] +['Ġab', 'i'] +['Äħ', 'd'] +['A', 'st'] +['ĠEd', 'iting'] +['.Un', 'ity'] +['j', 'mp'] +['Ġm', 'ats'] +['Ġshared', 'Preferences'] +['Capt', 'ain'] +['.page', 'Size'] +['Ġr', 'tl'] +['Ġan', 'meld'] +['Runtime', 'Object'] +['Ġdemand', 'e'] +['("', ';'] +['se', 'ite'] +['-head', 'ed'] +['ĠK', 'ra'] +['ĠF', 'ONT'] +['`', '\\'] +['Class', 'NotFoundException'] +['.', 'avg'] +['atic', 'al'] +['A', 'j'] +['Ġpermit', 'ting'] +['Pro', 'j'] +['ERR', 'Q'] +['Ġcre', 'ampie'] +['ĠBuy', 'er'] +['-mod', 'ules'] +['ĠSund', 'ays'] +['|', '`Ċ'] +['Ġday', 'time'] +['Ġ+', '('] +['Ġgl', 'itch'] +['ĠOper', 'and'] +['Ġtox', 'ins'] +['iny', 'a'] +['D', 'NS'] +['ĠS', 'as'] +['C', 'ake'] +['ĠNation', 'als'] +['.add', 'To'] +['Ġs', 'inking'] +['Ġcompreh', 'ension'] +['Ġsc', 'or'] +['ag', 'ements'] +['Ġt', 'ard'] +['Ġmarch', 'ing'] +['ĠM', 'TV'] +['Ġs', 'ane'] +['Create', 'Info'] +['áº', '¯'] +['Ġend', 'Index'] +['ĉ', 'layout'] +['ĠåIJ', 'į'] +['S', 'ITE'] +['ĠT', 'HERE'] +['Ġ[', "{'"] +['opath', 'ic'] +['Ġtrans', 'mitter'] +['/', 'body'] +['Ġp', 'und'] +['ĠC', 'losing'] +['Ġset', 'attr'] +['Ġbound', 'ed'] +['At', 'las'] +['sum', 'ing'] +['(t', 'imes'] +['par', 'er'] +['yn', 'om'] +['fe', 'it'] +['Ġf', 'rem'] +['-', 'leg'] +['ĠBr', 'as'] +['>', '#'] +['Ġì¶', 'ľëł¥'] +['ĠIN', 'STANCE'] +['ĠC', 'ouch'] +['_host', 's'] +['lik', 'elihood'] +['.M', 'arker'] +['ĠM', 'asks'] +['Ġcere', 'al'] +['util', 'ities'] +['Ġelement', 'al'] +['Ġdist', 'orted'] +['in', 'active'] +['c', 'ry'] +['W', 'L'] +['UPPORT', 'ED'] +['.Th', 'rows'] +['/s', 'chema'] +['ser', 'ie'] +['."', "',"] +['ĠBened', 'ict'] +['-p', 'icker'] +['ig', 'gs'] +['ĠPir', 'ate'] +['åij¨', 'æľŁ'] +['ĠTh', 'ema'] +['ĠSouth', 'ampton'] +['Ġarray', 'With'] +['ĠPaul', 'a'] +['Ġpredict', 'or'] +['-', 'Ass'] +['.user', 'id'] +['Ġper', 'i'] +['Ġexagger', 'ated'] +['ur', 'ate'] +['arse', 'ille'] +['ĠCon', 'cent'] +['ĠP', 'ik'] +['Ġ@', '_;ĊĊ'] +['Ġform', 'ations'] +['Ġden', 'omin'] +['"/>', '.Ċ'] +['ended', 'or'] +['Ġpan', 'cre'] +['Ġam', 't'] +['Ġon', 'Resume'] +['on', 'Delete'] +['ĠB', 'CH'] +[')', '("'] +['m', 'ovement'] +['Ġpot', 'assium'] +['', 'čĊčĊ'] +['ĠMah', 'm'] +['}', '";ĊĊ'] +['Ġd', 'q'] +['ĠPublish', 'ers'] +['ĠAm', 'pl'] +['ĠDani', 'elle'] +['Ġt', 'ern'] +['èµ', '·'] +['no', 'ÅĽÄĩ'] +['e', 'in'] +['ĠAsync', 'Storage'] +['un', 'ger'] +['rou', 'w'] +['Ġsc', 'issors'] +['/', 'assert'] +['.b', 'ucket'] +['/', 'archive'] +['_M', 'an'] +['Ġint', 'oler'] +['Ġ()', '=>'] +['ĠÐĴ', 'Ñĭ'] +['Ġsa', 'i'] +['.x', 'y'] +['."', 'čĊ'] +['Ġur', 'inary'] +['es', 'ub'] +['IST', 'ICS'] +['ĠÎ', 'º'] +['Ġcompl', 'iments'] +['Ġtypings', 'Japgolly'] +['ih', 'ar'] +['Exp', 'ansion'] +['ĠS', 'erving'] +['_st', 'udents'] +['ĠX', 'BOOLE'] +['(', 'il'] +['Ġì²', 'ĺ'] +['Ġj', 'ó'] +['(t', 'ol'] +['(', 'JS'] +['ĉC', 'G'] +['ĠD', 'RAW'] +['tw', 'ig'] +['Ġo', 'at'] +['_sm', 'ooth'] +['ĠC', 'SL'] +['Ġos', 'ob'] +['Ġens', 'uing'] +['Ġbank', 'er'] +['ĠBack', 'pack'] +['_p', 'ing'] +['Ġwish', 'list'] +['=', 'ax'] +['ĉĠĠĠ', 'Ċ'] +['Dis', 'ney'] +['stead', 'y'] +['">', '%'] +['Ġproph', 'ets'] +['ĠZ', 'X'] +['Ġminimal', 'ist'] +['.PL', 'AIN'] +['Se', 'attle'] +['.', 'ordinal'] +['ĠPI', 'PE'] +['Ġret', 'orna'] +['Ġjug', 'ador'] +['ĠB', 'ret'] +['ĠâĶ', 'ľ'] +['Ġpl', 'ush'] +['UL', 'ATOR'] +['Sort', 'ing'] +['.grid', 'y'] +['ect', 'omy'] +['_', 'activ'] +['r', 'ack'] +['Inter', 'active'] +['ĠAntar', 'ctica'] +['Ġv', 'engeance'] +['en', 'so'] +['_k', 'nown'] +['up', 'plier'] +['.Mod', 'ules'] +['ĠConnection', 'State'] +['éļ', 'IJèĹı'] +['@', 'FindBy'] +['Ġpl', 'acer'] +['\\', 'model'] +['<', '()>'] +['.is', 'Successful'] +['-g', 'ood'] +['b', 'z'] +['ĠDr', 'aco'] +['Ass', 'istant'] +['-ex', 'tra'] +['аб', 'лиÑĨ'] +['Ġhyp', 'ocrisy'] +['Ġt', 'st'] +['ĠA', 'gr'] +['$', 'txt'] +['Ġlog', 'istic'] +['lic', 'ensed'] +['ĠH', 'of'] +['Ġt', 'at'] +['(', 'iv'] +['Ġinto', 'xic'] +['post', 'Id'] +['_st', 'rike'] +['Ġhum', 'iliation'] +['pc', 'odes'] +['"', 'sync'] +['(rec', 'ipe'] +['+', 'N'] +['rent', 'e'] +['ĉ', 'Client'] +['ycop', 'g'] +['ĠZur', 'ich'] +['ĠPro', 'files'] +['C', 'ountries'] +['Ġp', 'ict'] +['Ġroll', 'out'] +['requ', 'encies'] +['Ġpatch', 'ed'] +['Ġcar', 'tridges'] +['Ġsh', 'ading'] +['J', 'ar'] +['Ġsalv', 'age'] +['ĠTax', 'es'] +['Ġstand', 'by'] +['apor', 'an'] +['E', 'igen'] +['.', 'angular'] +['ĠN', 'ested'] +['äº', '«'] +['Ġis', 'Visible'] +['ĠDw', 'ight'] +['_BR', 'ANCH'] +['.D', 'elay'] +['Ġk', 'end'] +['Ġfacilit', 'ated'] +['.flat', 'Map'] +['Ġs', 'anta'] +['ĉS', 'end'] +['/m', 'essages'] +['Ġof', 'Type'] +['ĉs', 'wap'] +['#', 'plt'] +['ĠTur', 'ks'] +['N', 'ES'] +['Ġprogress', 'ively'] +['ĠRes', 'idence'] +['ĠT', 'REE'] +['Ġno', 'en'] +['d', 'io'] +['Ġn', 'elle'] +['Ġsog', 'ar'] +['itt', 'i'] +['week', 'ly'] +['Ġambigu', 'ity'] +['_Set', 'tings'] +['W', 'are'] +['.ne', 'o'] +['_D', 'ST'] +['Ġæĸ', '¹'] +['pre', 'p'] +['lob', 'by'] +['@', 'email'] +['/m', 'ovie'] +['Ġfun', 'kc'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'Ċ'] +['ÂŃ', 's'] +['Ġguard', 'ians'] +['-', 'pos'] +['Ġconfig', 'uring'] +['ĠC', 'PS'] +['ĠDe', 'us'] +['Ġvidé', 'os'] +['_', 'empresa'] +['Ġsl', 'apped'] +['<', 'Model'] +['Ġunders', 'cores'] +['U', 'h'] +['.access', 'Token'] +['SET', 'S'] +['ĠS', 'parse'] +['ĠCal', 'd'] +[':', 'path'] +['ĠS', 'ervers'] +['=', 'batch'] +['Ġkn', 'itting'] +['Ġx', 'a'] +['Ġsearch', 'Bar'] +['Ġsn', 'ag'] +['Ġinf', 'used'] +['.b', 'am'] +['le', 'ver'] +['Ġtax', 'onomy'] +['Ã', 'İ'] +['Ġatt', 'aching'] +['Ġh', 'ern'] +['_N', 'OP'] +['Click', 'able'] +['(P', 'arse'] +['ĠDynam', 'o'] +['-b', 'uilder'] +['Ġdere', 'g'] +['Ġsc', 'attering'] +['è¿Ľ', 'è¡Į'] +['an', 'zi'] +['ĠShe', 'pard'] +['">', "',Ċ"] +['_X', 'DECREF'] +['ĠBuzz', 'Feed'] +['_M', 'ARGIN'] +['P', 'LOY'] +['.sm', 'all'] +['Ġm', 'imeType'] +['Ġh', 'olog'] +['ĉc', 'amera'] +['li', 'as'] +['Ġsusp', 'ense'] +['ody', 'nam'] +['b', 'au'] +['Ġgrave', 'yard'] +['_n', 'amed'] +['":"', "'"] +['Ġ********************************', '****************'] +['Ġgame', 'Over'] +['ĠLENG', 'TH'] +['ĉs', 'creen'] +['Ġdo', 'InBackground'] +['_depend', 'encies'] +['Ġr', 'tc'] +['/', 'up'] +['_', 'ROM'] +['H', 'all'] +['Ġdef', 'iciencies'] +['(', 'te'] +["'", '#'] +['_e', 'quiv'] +['Ġpre', 'order'] +['ĠA', 'xe'] +['ом', 'Ñĥ'] +['.send', 'File'] +['Ġfil', 't'] +['ĠLim', 'its'] +['ĠCaval', 'iers'] +['.dis', 'count'] +['âĨ', 'IJ'] +['ĠW', 'it'] +['QRST', 'UV'] +['Ġi', 'j'] +['Ġt', 'egen'] +['Ġ:', '",'] +['diff', 'iculty'] +['p', 'unkt'] +['ĠEmail', 's'] +['ch', 'lor'] +['(f', 'un'] +['.U', 'int'] +['ĠSt', 'all'] +['_', 'verified'] +['u', 'D'] +['File', 'Type'] +['Ġple', 'asures'] +['Ġjud', 'iciary'] +['Ġsh', 'am'] +['ip', 'ur'] +['_PL', 'US'] +['off', 'ers'] +['(', 'foo'] +['_G', 'T'] +['ĉc', 'ore'] +['ENT', 'ION'] +['ĠLib', 'eration'] +['Command', 'Line'] +['_de', 'partment'] +['.A', 'r'] +['_ne', 'ighbor'] +['ĠSub', 'mitted'] +['ĠĊ'] +['Ġdro', 'its'] +['Ġhomosexual', 's'] +['Ġab', 'duction'] +['ĉw', 'idget'] +['$', 'headers'] +['ĠD', 'AR'] +['Ġfl', 'a'] +['th', 'reat'] +['Ġlou', 'is'] +['.Get', 'Property'] +['"', 'Just'] +['(f', 'rames'] +['ry', 'o'] +['prof', 'ession'] +['|', 'i'] +['íķ´', 'ìĦľ'] +['(s', 'v'] +['Ġun', 'recognized'] +['I', 'onic'] +['F', 'ashion'] +['Screen', 'State'] +['ĠIn', 'coming'] +['Not', 'Nil'] +['Ġsync', 'ing'] +['em', 'ie'] +['Ġtherm', 'o'] +['_pro', 'cs'] +['Ġincons', 'istency'] +['rel', 'igious'] +['.m', 'j'] +['Ġperson', 'n'] +['Ġmoment', 'os'] +['or', 'arily'] +['Ġæ', 'Ĭ'] +['_ne', 'urons'] +['Ill', 'ustr'] +['im', 'oto'] +['il', 'ik'] +['ĠW', 'oj'] +['Tr', 'ading'] +['Ġapp', 'are'] +['Ġentre', 'prises'] +['ach', 'at'] +['ĠÂ', '¬'] +['Ġne', 'igh'] +['BUTTON', 'DOWN'] +['ĠMah', 'er'] +['ag', 'han'] +['-h', 'ash'] +['"', 'f'] +['Ġclient', 'ele'] +['.add', 'Button'] +['ĉ', 'SP'] +['Q', 'i'] +['Ġgr', 'ated'] +['POS', 'ITE'] +[':', '>'] +['ĠHow', 'ell'] +['ĠCompar', 'ative'] +['ĠIS', 'C'] +['ÂŃ', 'i'] +['O', 'cean'] +['D', 'avis'] +['ĠFil', 'me'] +['W', 'ins'] +['ĠJ', 'IT'] +['oc', 'cer'] +['ĠC', 'orm'] +['ENCH', 'MARK'] +['rch', 'ive'] +['ica', 'ção'] +['Ġm', 'ata'] +['Ġchild', 'birth'] +['ĠOption', 'ally'] +['En', 's'] +['Ġx', 'http'] +['Ġel', 'ucid'] +['_Osc', 'InitStruct'] +['))', '):Ċ'] +['Ġint', 'uit'] +['ĠDon', 'ate'] +['Ġcorrel', 'ates'] +['>', 'Delete'] +['Ġequ', 'ipe'] +['Ġb', 'oca'] +['Ġinfl', 'atable'] +['er', 'ah'] +['ĠDateTime', 'Kind'] +['Ġcal', 'ves'] +['\\', 'Lib'] +['Ġem', 'lrt'] +['ĠTr', 'ilogy'] +['ĠP', 'anc'] +['ĠD', 'uis'] +['ĠpelÃŃcul', 'a'] +['WAR', 'DS'] +['_DE', 'TECT'] +['-section', 'al'] +['dh', 'cp'] +['For', 'Row'] +['-de', 'struct'] +['ĠPres', 'enter'] +['/s', 'lick'] +[',', 'on'] +['ĠCit', 'adel'] +['logged', 'in'] +['_sub', 'type'] +['Ġsig', 'ue'] +['Ġc', 'uring'] +['ĠFire', 'wall'] +['Ġfluores', 'cence'] +['ĠItal', 'ians'] +['иÑĤ', 'ÑģÑı'] +['.get', 'Style'] +['In', 'Seconds'] +['j', 'ie'] +['-S', 'mith'] +['Ġx', 'link'] +['Ġsub', 'missive'] +['он', 'ÑĤ'] +['arbon', 'ate'] +['ĠF', 'aul'] +['_go', 'als'] +['ĠCommission', 'ers'] +['chart', 'Instance'] +['_POST', 'FIELDS'] +['Ġmed', 'ial'] +['Ġman', 'os'] +['Ġdel', 't'] +['sv', 'm'] +['.Ap', 'is'] +['ep', 'hy'] +['Ġasym', 'pt'] +['Ġapp', 'Delegate'] +['Ġimpro', 'bable'] +['ck', 'a'] +['sim', 'd'] +['/', 'Error'] +['.', 'âĢĵ'] +['ĠP', 'TS'] +['de', 'er'] +['Ġs', 'ina'] +['m', 'agnitude'] +['ID', 'ADE'] +["']", "}'"] +['Ġmay', 'ores'] +['ĉ', 'comment'] +['/', 'console'] +['"', '@'] +['v', 'olt'] +['.s', 'ell'] +['ĠM', 'acy'] +['Ġmel', 'od'] +['Ġim', 'ágenes'] +['_ch', 'g'] +['Ġin', 'out'] +['ident', 'e'] +[')', "'),Ċ"] +['d', 'ni'] +['.b', 'lob'] +['Ġtyp', 'ography'] +['Ġe', 'erie'] +['_O', 'ID'] +['pes', 'an'] +['aj', 'an'] +['Ġch', 'opping'] +['Ġbl', 'uff'] +['ad', 'f'] +['_b', 'ases'] +['.Form', 'atter'] +['Ġ\\', '%'] +['ĠPage', 'Info'] +['Car', 'rier'] +['ĠCal', 'ibration'] +['com', 'o'] +['-b', 'odied'] +['Ġfinanc', 'ier'] +['ĠIN', 'A'] +['.', 'ERR'] +['Ġhood', 'ie'] +['ĠSan', 'ity'] +['gu', 'arded'] +['.opend', 'aylight'] +['ISM', 'ATCH'] +['High', 'lights'] +['ün', 'k'] +['ani', 'em'] +['anger', 'ed'] +['assign', 'ments'] +['Ġregistr', 'ado'] +['ĠU', 'PPER'] +['ampil', 'kan'] +['ash', 'ire'] +['ĠNik', 'ola'] +['ĠC', 'FL'] +['ĠH', 'DC'] +['Ġp', 'oids'] +['ĠIP', 's'] +['Ġprevent', 'ative'] +['ips', 'oid'] +['if', 'ix'] +['.c', 'amel'] +['.g', 'a'] +['V', 'olumes'] +['-', 'ste'] +['Y', 'ahoo'] +['_s', 'ibling'] +['H', 'ighest'] +['opt', 'group'] +['Ġkvin', 'na'] +['âĢĿ', 'ãĢĤĊĊ'] +['ĠAppl', 'iances'] +['Ġ"', '><'] +["')", '")Ċ'] +['ht', 't'] +['ĠIdent', 'ified'] +['Ġpenc', 'ils'] +['Ġmember', 'Id'] +['Ġappend', 'String'] +['.load', 'Data'] +['Ġmock', 'Mvc'] +['Ġj', 'ub'] +['ĠSl', 'ut'] +['ĠTai', 'pei'] +['st', 'att'] +['Pol', 'it'] +['Ġpart', 'ager'] +['Did', 'Change'] +['Incre', 'ases'] +[')', '}.'] +['ĠB', 'aba'] +['_CL', 'IP'] +['[', 'unit'] +['Ġк', 'лÑİÑĩ'] +['Ġalc', 'uni'] +['ĠL', 'ola'] +['Ġcl', 'inging'] +['@', 'PostMapping'] +['(con', 'cat'] +['Ġss', 'id'] +['ĠFa', 'uc'] +['ok', 'it'] +['ĠRecord', 'ed'] +['á', 'lez'] +['($', "('<"] +['.assertIs', 'Not'] +['Ġk', 'ali'] +['V', 'olt'] +['Ġwarm', 'ly'] +['Ġsca', 'res'] +['get', 'ti'] +['füh', 'rt'] +['_d', 'oes'] +['.', 'EMAIL'] +['im', 'ations'] +['Ġspring', 'fox'] +['ĠDec', 'om'] +['arc', 'y'] +['Ġgl', 'itches'] +['ĠM', 'off'] +['ĠV', 'oll'] +['.b', 'etween'] +['Ġcoord', 'en'] +['ĠPart', 'icularly'] +['GB', 'P'] +['Ġsem', 'ble'] +['East', 'ern'] +['_M', 'SB'] +['])', '{čĊ'] +['m', 'organ'] +['ĠE', 'VAL'] +['d', 'ere'] +['HO', 'USE'] +['mo', 'ire'] +['ist', 'ique'] +['_l', 'stm'] +['-com', 'mit'] +['yster', 'ious'] +['Ġtw', 'ink'] +['-th', 'umbnails'] +['en', 'ÃŃ'] +[":'", "',"] +['Ġblack', 'out'] +['ĠFlo', 'ors'] +['Ġso', 'fas'] +['Ġou', 'i'] +['lesh', 'oot'] +['ĠRa', 'q'] +['-', 'abs'] +['Ġk', 'ra'] +['M', 'ining'] +['sha', 'ft'] +['.set', 'Columns'] +['Cl', 'azz'] +['PRE', 'TTY'] +['.play', 'list'] +['éĸ', '¢'] +['-Sah', 'aran'] +['M', 'ING'] +['ĉ', 'bl'] +['è®', '®'] +['j', 'f'] +['DO', 'CKER'] +['hope', 'fully'] +['(', 'ignore'] +['ĠUsers', 'Controller'] +['ĠMitar', 'beiter'] +['ĠL', 'ES'] +['Ham', 'ilton'] +['-m', 'etadata'] +['ĠK', 'K'] +['ikt', 'ig'] +['Ġwoll', 'te'] +['egr', 'ator'] +[']', 'bool'] +[',', 'current'] +['Ġvalue', 'Type'] +['Ġexcav', 'ation'] +['ol', 'and'] +['Ġv', 'erv'] +['/file', 'path'] +['Auth', 'Provider'] +['Ġpro', 'crast'] +['ĉ', 'ULONG'] +['_MEM', 'BERS'] +['Ġup', 'lift'] +['ĠAut', 'onomous'] +['Ġart', 'works'] +['ĠOut', 'reach'] +['Ġp', 'ore'] +['Home', 'page'] +['Dialog', 'Title'] +['ĠGener', 'ating'] +['PAR', 'SE'] +['Ġsem', 'anas'] +['Ġhuman', 'o'] +['JSGlobal', 'Scope'] +['Ġvol', 'te'] +['Ġb', 'ella'] +['(is', 'instance'] +['Ġpl', 'c'] +['\\C', 'atalog'] +['Ġeste', 'emed'] +['éĽ', '·'] +['(s', 'uffix'] +['Ġswe', 'eps'] +['ĉ', 'ORDER'] +['Ġdo', 'ivent'] +['ĠSw', 'arm'] +['ĠComp', 'iled'] +['get', 'Page'] +['AD', 'R'] +['.R', 'ichTextBox'] +['ĠN', 'aming'] +['ag', 'ged'] +['ĠG', 'ANG'] +['r', 'asing'] +['ode', 'led'] +['Ġg', 'ala'] +['ĠJS', 'Name'] +['dd', 'f'] +['Ġill', 'ust'] +['ĠLans', 'ing'] +['[', 'port'] +['-de', 'ath'] +['Ġdin', 'heiro'] +['ĠE', 'ighth'] +['Ġb', 'ian'] +['st', 'Ã¥'] +['Ġvers', 'ión'] +['ĠLinear', 'Gradient'] +['ĠHard', 'ing'] +['.', '*)'] +['ec', 'zy'] +['$', 'header'] +['Ġv', 'Ã¥r'] +['Un', 'checked'] +['Ġko', 'je'] +['ĠPal', 'adin'] +['()', ')),'] +['G', 'iving'] +['()', '})Ċ'] +['Ġd', 'ips'] +['F', 'riendly'] +['Ġport', 'rays'] +['Ġhel', 'ium'] +['Ġinsurg', 'ency'] +['_ex', 'piry'] +['ĠstringByAppending', 'String'] +['Ġa', 'antal'] +['s', 'lope'] +['m', 'ast'] +['.get', 'Integer'] +['Ġ################', '########'] +['_PIPE', 'LINE'] +['Ġdens', 'ely'] +['Ġmut', 'ating'] +['m', 'idi'] +['ĠSe', 'it'] +['ay', 'ne'] +['NOW', 'LED'] +['ĠDes', 'mond'] +['ĠF', 'Name'] +['ĠN', 'airobi'] +['\\', 'Context'] +['Ġcalc', 'ular'] +['-d', 'en'] +['Ġc', 'ott'] +[']', '):čĊ'] +['ĠRecommend', 'ation'] +['ĠRole', 'x'] +['Ġvalidation', 'Result'] +['.p', 'at'] +['Ġn', 'Ãły'] +['ĠRest', 'Client'] +['ĠG', 'PI'] +['ĠAshe', 'ville'] +['ĠO', 'SP'] +['ĠPER', 'MISSION'] +['ÐĶ', 'аÑĤа'] +['/', 'notification'] +['K', 'night'] +['_W', 'ord'] +['ĠB', 'ender'] +['rank', 'ing'] +['Ġpart', 'ida'] +['_res', 'ervation'] +['Ì', 'Ģ'] +['Ġm', 'Name'] +['Ġget', 'ch'] +['Ġb', 'orr'] +['Ġdilig', 'ent'] +['Disc', 'uss'] +['æŃ£', 'åľ¨'] +['ape', 'ake'] +['ion', 'ed'] +['-N', 'azi'] +['.c', 'um'] +['ĠK', 'ron'] +['=$', "('#"] +['/s', 'ingle'] +['Ġerot', 'isch'] +['ĠV', 'ib'] +['Ġrat', 'ified'] +['Ġconcert', 'ed'] +['ĠREG', 'ARD'] +['Ġdo', 'br'] +['.Driver', 'Manager'] +["'", 'r'] +['Port', 'able'] +['ĉs', 'uite'] +['Ġrel', 'aciones'] +['ĠD', 'op'] +['emplo', 'i'] +['DO', 'B'] +['Ġcr', 'umbs'] +['Ġx', 'ls'] +['_App', 'lication'] +["(':", "',"] +['Ġ----------------------------------------------------------------', '--------Ċ'] +['m', 'se'] +['Ġber', 'k'] +['ĠReturn', 'Value'] +['ĠBel', 'ly'] +['Ġcam', 'ar'] +['ĠPe', 'ek'] +['els', 'ing'] +['Ġnot', 'ifies'] +['ĠTr', 'istan'] +['ĠG', 'AR'] +['em', 'me'] +['ĠElev', 'ated'] +['_C', 'SV'] +['(ch', 'alk'] +['Ġtw', 'enties'] +['ĠSearch', 'Result'] +['=', 'search'] +['ĠMix', 'ing'] +['ý', 't'] +['Ġrecru', 'iter'] +['ĠIDE', 'OGRAPH'] +['ĠA', 'go'] +['(', 'Operation'] +['$', 'values'] +['Ġworld', 'ly'] +['ĠRosen', 'berg'] +['ĠConfigure', 'Services'] +['>*', 'Ċ'] +['Ġsn', 'ork'] +['_op', 'acity'] +['ĠinitWith', 'NibName'] +['i', 'ado'] +['A', 'AC'] +['Ġ]', ').'] +[';', 'z'] +['_par', 'agraph'] +['Ġnos', 'es'] +['stand', 's'] +['if', 'r'] +['_m', 'E'] +['I', 'raq'] +['.P', 'redicate'] +['ena', 'ire'] +[']]', '];Ċ'] +['Ġun', 'idad'] +['Ġretire', 'es'] +['_h', 'ello'] +['Ġmode', 'le'] +['ĠUIT', 'ableViewController'] +['f', 'write'] +['_num', 'ero'] +['_vis', 'ited'] +['Ġrece', 'be'] +['(', 'Notification'] +['Fant', 'astic'] +['_sub', 'menu'] +['ĠP', 'EM'] +['ĠCup', 'ertino'] +['approx', 'imately'] +['class', 'ed'] +['.Read', 'String'] +['Ġdomic', 'ile'] +['_P', 'W'] +['Ġball', 'park'] +['ĠK', 'ale'] +['con', 'tra'] +['_f', 'avorite'] +['/', 'of'] +['Qu', 'ite'] +['ĠOT', 'A'] +['Ġacceler', 'ometer'] +['did', 'n'] +['|', '^'] +['ĠRohing', 'ya'] +['ivic', 'rm'] +['ann', 'abin'] +['обÑĭ', 'ÑĤи'] +['or', 'ado'] +["')", '+'] +['Ha', 'unted'] +[',', 'ID'] +['(', 'UIAlertAction'] +['ur', 'v'] +['_b', 'el'] +['ĠMex', 'icans'] +['/', 'terms'] +['ĠPaint', 'er'] +['Input', 'Label'] +['ĠV', 'inci'] +['ĠRos', 'ie'] +['\\', 'uc'] +['<', 'Menu'] +['Ġcool', 'ant'] +['(current', 'User'] +['_d', 'ual'] +[')', '"},Ċ'] +['&', 'p'] +['Ġconver', 'ged'] +['Ġrestr', 'ain'] +['ĠYugosl', 'avia'] +['=', 'target'] +['Ġimp', 'uls'] +['ds', 'a'] +['Search', 'Tree'] +['Ġh', 'box'] +['ĠImp', 'ress'] +['§', 'Ãĥ'] +['get', 'FullYear'] +['(d', 'a'] +['ĠY', 'YS'] +['.al', 'ignment'] +['.Get', 'Text'] +['.token', 'ize'] +['ĠOlymp', 'us'] +['Ġmur', 'ky'] +['ore', 'station'] +['Ġdiss', 'atisfaction'] +['ĉT', 'Array'] +['_', 'kses'] +['.Add', 'Singleton'] +['ĠStart', 'Time'] +['Ġfan', 'atic'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĉ'] +['Ġentity', 'Type'] +['.', 'override'] +['Ġ', '-------------'] +['ĠDat', 'agram'] +['f', 'out'] +['(with', 'Id'] +['Ġ#', '__'] +['Ł', 'èĥ½'] +['ek', 'yll'] +['.f', 'riends'] +['ame', 'leon'] +['Ġz', 'ach'] +['.simple', 'Button'] +['ret', 'orno'] +['Ġkon', 'k'] +['/s', 'mall'] +['ĠQuick', 'ly'] +['un', 'read'] +['Don', 'ate'] +['Detail', 'View'] +['Ġdu', 'a'] +['Ġpenetr', 'ated'] +['OM', 'UX'] +['Ġn', 'ir'] +['_p', 'data'] +['"],', '["'] +['Ġlow', 'es'] +['Ġdop', 'ing'] +['Ġas', 'ymmetric'] +['Ġneed', 'less'] +['our', 'cem'] +['Ġup', 'ro'] +['ĠGu', 'zzle'] +['af', 'b'] +['Ġsext', 'reffen'] +['-c', 'ollar'] +['Ġcol', 'ossal'] +['Mon', 'key'] +['n', 'ish'] +['Ġhandle', 'Message'] +['Incre', 'ased'] +['*', 'dx'] +['ĠChatt', 'anooga'] +['f', 'org'] +['ĠOr', 'den'] +['Ġsh', 'ri'] +['ĠV', 'and'] +['Ġ"', '@"'] +['Image', 'Sharp'] +['ĠWild', 'cats'] +['pon', 'ible'] +['.sc', 'enes'] +['Ġpaint', 'ers'] +['ĠPf', 'izer'] +['ĠZ', 'ah'] +['To', 'Local'] +['ĠFl', 'am'] +['Ġé', 'taient'] +['))', '^'] +['ĠSand', 'box'] +['ĠTR', 'ADE'] +['Ġchrom', 'ium'] +['Ġac', 'claim'] +['Ġpac', 'man'] +['´', 't'] +[')', 'reader'] +['M', 'ari'] +['.Dispatch', 'er'] +['.A', 'DMIN'] +['ĠRem', 'ed'] +['Sw', 'eden'] +['Ġoverl', 'ays'] +['.', 'er'] +['Ġp', 'ang'] +['Ġclean', 'ly'] +['aven', 'port'] +['Toy', 'ota'] +['patch', 'es'] +['Ġv', 'tx'] +['ĠE', 'is'] +['cl', 'ado'] +['ĠR', 'itch'] +['RO', 'LS'] +['Ġh', 'ade'] +['Ġconspic', 'uous'] +['Ġdo', 'cks'] +['(j', 'q'] +['ĠPrem', 'iership'] +['ĠBe', 'z'] +['ĠâĦ', 'ĸ'] +['ĠÑĥ', 'Ñģл'] +['_tot', 'als'] +['Ġprov', 'a'] +['ĠC', 'ue'] +['Ġsa', 'úde'] +['ĠGame', 'Controller'] +['IM', 'IZE'] +[',', 'port'] +['ãĢĤ', '('] +['.C', 'decl'] +['Instant', 'iationException'] +['Ġcoll', 'age'] +['ĠIO', 'C'] +['Ġb', 'ais'] +['Ġon', 'Finish'] +['-st', 'ars'] +['set', 'Size'] +['Ġmog', 'ul'] +['Ġdis', 'illusion'] +['Ġche', 'vy'] +['(S', 'chedulers'] +['(', 'IR'] +['_loc', 's'] +['Ġcann', 'ons'] +['Ġcancell', 'ing'] +['/b', 'us'] +['Ġbuf', 'io'] +['ĠY', 'ours'] +['ĠPik', 'achu'] +['Ġter', 'me'] +['r', 'Ã¥'] +['f', 'ahren'] +['Ġowner', 'Id'] +['Ġoblig', 'atory'] +['Ġcul', 'p'] +['Ġacid', 'ity'] +['-m', 'ult'] +['ĠBam', 'boo'] +["Ġ'", '">'] +['_g', 's'] +['Ġcomp', 'il'] +['n', 'ard'] +['-ex', 'c'] +['Ġrh', 'yme'] +['Ġbut', 'to'] +['s', 'ays'] +['ant', 'asy'] +['ë', '¸'] +['Ġcitt', 'Ãł'] +['Ġche', 'g'] +['Time', 'String'] +['Ġpos', 'itivity'] +['ĠD', 'abei'] +['Ġw', 'ang'] +['Ġes', 'cre'] +['"', 'c'] +['ĉv', 'ideo'] +['ĠRank', 'ed'] +['.str', 'ings'] +['>>', '>('] +['Ġин', 'ÑĤеÑĢ'] +['Ġrest', 'a'] +['[:', ',:'] +['Ġrend', 're'] +['Ġdes', 'er'] +['J', 'os'] +['Ġdis', 'ruptions'] +['Ġоп', 'еÑĢ'] +['s', 'ampling'] +['sup', 'press'] +['Ġcontainer', 'View'] +['ĠSeam', 'less'] +['Ġair', 'y'] +['Ġon', 'load'] +['.Window', 'Manager'] +['ĠPL', 'A'] +['br', 'aco'] +['.set', 'PositiveButton'] +['Ġp', 'du'] +['Ġg', 'si'] +['ĠC', 'li'] +['_gr', 'adients'] +['Ñı', 'д'] +['ĠWh', 'isper'] +['c', 'stdint'] +['Ġl', 'äng'] +['Ġform', 'ulations'] +['én', 'om'] +['ourn', 'emouth'] +['[$', '_'] +['Ġordin', 'arily'] +['.set', 'Username'] +['Ġfacult', 'ies'] +['MIT', 'TED'] +['/', 'values'] +['Ġwe', 'ir'] +['ĠA', 'pt'] +['M', 'Z'] +['ĉc', 'f'] +['uck', 'en'] +['ĉĉĉĉĉĉĉĉ', 'ĉĉĉĉĉĉĉĉĉĉĉĉ'] +['def', 'ense'] +['[i', 'Var'] +['ĠBusiness', 'Exception'] +['Select', 'ors'] +['(co', 'ordinates'] +['ĠRes', 'ets'] +['ĠDr', 'inks'] +['ole', 'ans'] +['(st', 'ypy'] +['_IO', 'C'] +['.x', 'xx'] +['ĠSl', 'ater'] +['ĠBel', 'ize'] +['Ġ/', '************************************************************************'] +['add', 'in'] +['_ep', 'isodes'] +['Ġis', 'chem'] +['legal', 'ArgumentException'] +['D', 'anny'] +['Ġp', 'ared'] +['.code', 'haus'] +['ĠAss', 'y'] +['ĉ', 'Rect'] +['â', 'ŀ'] +['.list', 'a'] +['Ġв', 'аÑĪ'] +['Ġv', 'ets'] +['HW', 'ND'] +['ison', 'er'] +['Ġx', 'o'] +['Ġor', 'ally'] +['ĠSt', 'mt'] +['.r', 'nn'] +['ĠD', 'PI'] +['ĠStr', 'ikes'] +['.setViewport', 'View'] +['Ġèĩª', 'åĬ¨çĶŁæĪIJ'] +['Y', 'ELLOW'] +['GL', 'enum'] +['part', 'ners'] +['ĠImp', 'licit'] +['Ġtak', 'o'] +['âĢĻ', 'elle'] +['Ġerm', 'ög'] +['total', 'Count'] +['G', 'il'] +['ĉ', 'work'] +['Ġpr', 'atic'] +['in', 'ati'] +['ab', 'ies'] +['ĠSk', 'inner'] +['Ġspir', 'ited'] +['Ġpancre', 'atic'] +['Ġh', 'df'] +["'", 'em'] +['Ġpsych', 'osis'] +['olic', 'it'] +['Ġ"', '{"'] +['_at', 'ual'] +['Ġé', 'lect'] +['TE', 'AM'] +['Ġd', 'ak'] +['ĠSW', 'AT'] +['.Fragment', 'Manager'] +['Ġprovision', 'ing'] +['l', 'ifetime'] +['_EXTENSION', 'S'] +['ĠC', 'ASCADE'] +['Ġ!', '['] +['(K', 'P'] +['Ġv', 'em'] +['ĠInterr', 'acial'] +["']", '},Ċ'] +['sp', 'acer'] +['_k', 'v'] +['W', 'arehouse'] +['R', 'DD'] +['_f', 'sm'] +['.Stretch', 'Image'] +[',', 'Yes'] +['ĠRefuge', 'e'] +['ĠBr', 'inging'] +['Ġv', 'álido'] +['.inter', 'section'] +['Ġsp', 'ooky'] +['_port', 'al'] +['Ġmo', 'th'] +['ĠZ', 'odiac'] +['ĠSOC', 'IAL'] +['M', 'imeType'] +["']", '}}'] +['_Bl', 'ue'] +['Ġbot', 'anical'] +['Ġfr', 'ags'] +['Ġfamil', 'ial'] +['-', 'du'] +['Ġse', 'izing'] +['(block', 's'] +['.r', 'd'] +['.check', 'NotNull'] +['Ġmis', 'er'] +['Ġmax', 'x'] +['ĠK', 'nee'] +['View', 'Item'] +['Inner', 'HTML'] +['D', 'anger'] +['((', '__'] +['Ġprz', 'ypad'] +['create', 'Url'] +['**', ','] +['ĠDecor', 'ating'] +['ATEG', 'Y'] +['?>', '/'] +['.Design', 'er'] +['hex', 'digest'] +['ĠEvery', 'where'] +['all', 'eries'] +['.TEXT', 'URE'] +['.Block', 's'] +['z', 'ell'] +['Ġpre', 'ço'] +['S', 'uddenly'] +['input', 'Email'] +['(s', 'ync'] +['.b', 'd'] +['gold', 'en'] +['>', "');"] +['ĠDick', 'inson'] +['>>', '(Ċ'] +['ĠQUE', 'UE'] +['Ġget', 'Column'] +['ĠS', 'AND'] +['.p', 'iece'] +['lic', 'er'] +['Fl', 'utter'] +['Ġget', 'Version'] +['Ġresource', 'Id'] +['og', 'l'] +['ÅĤ', 'aw'] +['.Br', 'anch'] +['ĉ', 'web'] +['Ġfr', 'amerate'] +['PP', 'P'] +['Ġfr', 'ay'] +['C', 'NT'] +['Ġinformat', 'ie'] +["']", 'čĊčĊ'] +['ne', 'as'] +['Header', 'Code'] +['Ġæ', '¸'] +['Ġtr', 'g'] +['raw', 'types'] +['H', 'onda'] +['Ġmark', 'eter'] +['Ġrequest', 'Data'] +['ĠP', 'g'] +['ĉ', 'not'] +['Ġpage', 'Info'] +['Ġakt', 'uellen'] +['ãģķ', 'ãĤĵ'] +['ĠA', 'MS'] +['push', 'ViewController'] +['ĉ', 'AL'] +['Ġv', 'ests'] +['produ', 'ce'] +['-m', 'ême'] +['ĠRah', 'man'] +['F', 'unny'] +['E', 'Z'] +['_', 'Valid'] +['Ġsquad', 'ron'] +['Ġl', 'ash'] +['Ġ', 'irm'] +['ias', 'co'] +['ĠPar', 'an'] +['Ġpet', 'ites'] +['ĠDec', 'ay'] +['Ġun', 'initialized'] +['priv', 'ileged'] +['Ġm', 'bedtls'] +['å¤ĩ', '注'] +['Ġ^', '.'] +['Ġec', 'static'] +['D', 'etroit'] +['Ġpart', 'en'] +['Ġsou', 'venir'] +['.get', 'Login'] +['моÑĤ', 'ÑĢ'] +['en', 'ção'] +['ĠmÃŃn', 'imo'] +['ĠAccess', 'ed'] +['ri', 'ó'] +['M', 'ic'] +['ĠV', 'ocal'] +['.Set', 'String'] +['Ġmens', 'ajes'] +['åĢ', 'į'] +['Ġattr', 'avers'] +['ĠA', 'ph'] +["Ġ'", ');čĊ'] +['ünd', 'e'] +['Ġench', 'anted'] +['ĠRoot', 'State'] +['ĠCLOSE', 'D'] +['ĉĉĉĉĉĉĉĉ', 'čĊ'] +['Ġcal', 'iente'] +['or', 'ris'] +['Ġphysic', 'ists'] +['h', 'wnd'] +['_v', 'i'] +['Ġráp', 'ido'] +['Ġcapital', 'ized'] +['ed', 'By'] +['Ġmach', 'ining'] +['Ġhub', 'by'] +['ĠSt', 'acy'] +['.B', 'us'] +['dr', 'ink'] +['H', 'ur'] +['Ġprop', 'ia'] +['Unit', 'Test'] +['Ġmiscon', 'ception'] +['__', '));Ċ'] +['/d', 'c'] +['ĠMay', 'weather'] +['_m', 'C'] +['.create', 'From'] +['ĠQ', 'Painter'] +['rops', 'ych'] +['inn', 'itus'] +['ay', 'as'] +['Ġg', 'eg'] +['(d', 'w'] +['Ġus', 'ado'] +['Ġtrick', 'le'] +['Ġann', 'ihil'] +['ĠP', 'asta'] +['Ġ++', 'Ċ'] +['(Expected', 'Conditions'] +['.post', 'Value'] +['ic', 'ap'] +['ĠDon', 'etsk'] +['_s', 'oup'] +['-p', 'ublish'] +['ĠP', 'b'] +['ment', 'ions'] +['AC', 'CEPT'] +['.P', 'ull'] +[',âĢĻ', 'âĢĻ'] +['Ġret', 'arded'] +['_AT', 'OM'] +['ĠTermin', 'ator'] +['-c', 'ourt'] +['ĠCLLocation', 'Coordinate'] +['Ġrever', 'ence'] +['ĠS', 'SC'] +['ut', 'ely'] +['ĠW', 'ON'] +['ĠG', 'SL'] +['fre', 'i'] +['.get', 'Longitude'] +['Ġopen', 'FileDialog'] +['.B', 'utter'] +['-', 'important'] +['_M', 'ANY'] +['ĠG', 'ong'] +['âĢľ', 'How'] +['Ġg', 'orge'] +['=', 'msg'] +['ĠEz', 'ek'] +['create', 'Command'] +[':', 'checked'] +['Ġinf', 'ographic'] +['.W', 'EST'] +['Dir', 's'] +['Ġguard', 'a'] +['Ġbeet', 'le'] +['<', 'small'] +['-', 'android'] +['Ġcred', 'itor'] +['ĠM', 'éd'] +['Ġfinal', 'ist'] +['Ġab', 'l'] +['ne', 'v'] +['_inter', 'action'] +['ĠMonter', 'ey'] +['j', 'ah'] +['Ġcand', 'ies'] +['ĠQu', 'incy'] +['èª', 'Ń'] +['Ġbatch', 'Size'] +['ak', 'it'] +['Ġo', 'be'] +['(p', 'ara'] +['Ġexperiment', 'ed'] +['Ġcouncill', 'ors'] +['Ġcl', 'ashed'] +['s', 'qu'] +['-st', 'rokes'] +['ĠG', 'K'] +['ĠEx', 'pires'] +['Ġprosec', 'utions'] +['ĠCreat', 'ures'] +['Ġy', 'ö'] +['x', 'lim'] +['_IM', 'P'] +['Entry', 'Point'] +['ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ', 'ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ'] +['.Default', 'CellStyle'] +['Ġbre', 've'] +['ĠBrit', 'ann'] +['Ġsweat', 'y'] +['Ġle', 'th'] +['Ġflash', 'back'] +['per', 'manent'] +['ĠJ', 'DK'] +['_D', 'etails'] +['E', 'uro'] +['p', 'pt'] +['Ġrich', 'TextBox'] +['/', 'board'] +['Ġtr', 'ance'] +['.c', 'ycle'] +["');", '");Ċ'] +['Ġtox', 'in'] +['_de', 'init'] +['Ġover', 'arching'] +['Ġconfig', 'parser'] +['ĠKaw', 'asaki'] +['.th', 'umb'] +['Ġplay', 'a'] +['ĠJose', 'f'] +['+', '_'] +['Ġzero', 'es'] +['Ġa', 'up'] +['ĠH', 'ari'] +['comm', 'itted'] +['N', 'it'] +['.file', 'Path'] +['ĠDis', 'abilities'] +['man', 'ufact'] +['-al', 'igned'] +['.RE', 'SET'] +['Ġrust', 'y'] +['E', 'y'] +['Ġou', 'sted'] +['cos', 'a'] +['Struct', 'ured'] +['.get', 'D'] +['Ġs', 'ábado'] +['>', 'Loading'] +['_m', 'A'] +['.get', 'Random'] +['bl', 'ings'] +['Ġchees', 'es'] +['tt', 'i'] +['.', 'âĢ¢'] +['ĠBurg', 'ess'] +['ender', 'it'] +['.', "',čĊ"] +['("', '"+'] +['ac', 'b'] +['%', 'p'] +['index', 'ed'] +['_pred', 'icate'] +['nes', 'ia'] +['Ġb', 'ied'] +['ĠC', 'IT'] +['(', 'Pos'] +['_r', 'adi'] +['ä»·', 'æł¼'] +['B', 'iz'] +['ĠAdoles', 'cent'] +['Ġvi', 'ên'] +['c', 'ycl'] +['_C', 'ancel'] +['Ġcon', 'clusive'] +['Ġappell', 'ate'] +['inform', 'atics'] +['S', 'J'] +['Ġelect', 'ive'] +['role', 'Id'] +['Fetch', 'er'] +['ĉ', 'Command'] +['("', '(%'] +['Ġf', 'art'] +['IL', 'A'] +['get', 'Block'] +['A', 'USE'] +['Ġд', 'ан'] +['ĠAr', 'te'] +['Ġnot', 'ifying'] +['Ġge', 'le'] +['.s', 'ame'] +['ĠReg', 'el'] +['ĠBa', 'ÅŁ'] +['.c', 'reation'] +['ĠV', 'N'] +['_comm', 'unity'] +['Ġuns', 'ustainable'] +['SE', 'X'] +['Ġgrid', 'Size'] +['res', 'cia'] +['avers', 'able'] +["(',", "')["] +['ĠPh', 'elps'] +['á»ķ', 'i'] +['ANCE', 'LED'] +['-', 'IS'] +['.run', 'ners'] +['ĠSt', 'okes'] +['.P', 'rodu'] +['Ġwh', 'ipping'] +['_ac', 'quire'] +['Ġinvestig', 'ación'] +['f', 'ried'] +['.copy', 'With'] +['ĠHard', 'cover'] +['-', 'Se'] +['áŀ¶', 'áŀ'] +['inv', 'itation'] +['les', 'ai'] +['ĠD', 'orm'] +['ĠÑģпиÑģ', 'ка'] +['Ġconcaten', 'ated'] +['oph', 'il'] +['Ġthink', 'er'] +['/font', 'awesome'] +['ĠLe', 'opard'] +['Ġ"/', '");Ċ'] +['Ġresidual', 's'] +['ĠMic', 'rowave'] +['Ġconform', 'e'] +['th', 'rop'] +['Ġdis', 'emb'] +['ĠO', 'MG'] +['ĠDisc', 'ipline'] +['ĠAc', 'robat'] +['/re', 'pository'] +['df', 'a'] +['_M', 'ED'] +['buf', 'io'] +['Ġméth', 'ode'] +['_H', 'OLD'] +['ias', 'i'] +['_', 'legacy'] +[')', 'ččĊ'] +['æ£', 'Ģ'] +['Get', 'ProcAddress'] +['Ġy', 'ay'] +['ot', 'ence'] +['order', 'id'] +['-t', 'w'] +['Ġdear', 'ly'] +['In', 'coming'] +['/', 'il'] +['Ġneu', 'rop'] +['uc', 'z'] +[');', 'čččĊ'] +['ĠInnov', 'ative'] +['Ġprof', 'und'] +['ig', 'mat'] +['Selection', 'Mode'] +['re', 'levant'] +['.G', 'O'] +['Ġbru', 'ises'] +['Ġs', 'ach'] +['ode', 'f'] +['Ġre', 'imb'] +['/d', 'esktop'] +['-s', 'pot'] +['und', 'ance'] +['Ent', 'ropy'] +['\\', 'core'] +['Ġsug', 'er'] +['ĠM', 'vc'] +['ĠGN', 'OME'] +['_ind', 'x'] +['ĠYY', 'STYPE'] +['ĠMat', 'lab'] +['ĠC', 'IF'] +['Ġ*', '))'] +['Ġproduct', 'List'] +['ĠAl', 'right'] +['ac', 'emark'] +['ÑĤи', 'в'] +['mod', 'ification'] +['int', 'ernational'] +['Ġhom', 'ers'] +['Ġdict', 's'] +['ĠQ', 'Font'] +['.SQL', 'ite'] +['Ġtransplant', 'ation'] +['ĠMessageBox', 'Button'] +['ĠEl', 'ves'] +["']", '])Ċ'] +['(Q', 'Icon'] +['Ġcin', 'emas'] +['CO', 'ORD'] +['-', 'China'] +['Ġkh', 'ẩu'] +['æĪij', 'çļĦ'] +['Ġskull', 's'] +['Ġpain', 'staking'] +['f', 'ce'] +['.XR', 'Label'] +['Ġspec', 'ifier'] +['Ġpref', 'erring'] +['/', 'activity'] +['(', 'Photo'] +['á', 'lt'] +['.l', 'ot'] +["'", "'."] +['ann', 'once'] +['.google', 'code'] +['-p', 'df'] +['ĠP', 'oke'] +['_A', 'CL'] +['Ġend', 'owed'] +['dis', 'cover'] +['.om', 'g'] +['Ġwood', 'land'] +['.M', 'agic'] +['Ġvol', 'ont'] +['Not', 'Allowed'] +['Ġch', 'ave'] +['BM', 'W'] +["','", "=',"] +['ĠS', 'IX'] +['æĪij', '们'] +['Ġkos', 'her'] +['Ġaspir', 'ation'] +['int', 'l'] +['_ref', 'ptr'] +["'+", 'Ċ'] +['ment', 'or'] +['.cl', 'ub'] +['Window', 'State'] +['.A', 'RR'] +['Ġz', 'za'] +['Ġmessage', 'Type'] +['.e', 'qu'] +['Th', 'or'] +['Ġin', 'just'] +['Ġg', 'ums'] +['Ġborder', 'Side'] +['////', '/'] +['ĠTrans', 'mit'] +['Ġbuf', 'size'] +['Ġh', 'ak'] +['Ġell', 'as'] +['R', 'ANDOM'] +['ĉm', 'c'] +['Ġpe', 'a'] +['ek', 'o'] +['document', 'o'] +['Ġhyster', 'ia'] +['Ġaren', 'as'] +['Ġgun', 'men'] +['Ġm', 'ike'] +['Ġimp', 'unity'] +['atis', 'ation'] +['_Z', 'ero'] +['_COMP', 'ANY'] +['ĠG', 'ors'] +['Ġuse', 'Class'] +['(', 'redis'] +['ĠRUN', 'NING'] +['ĠB', 'air'] +['vel', 'te'] +["Ġ','", '.'] +['аÑĤÑĮ', 'ÑģÑı'] +['ö', 'st'] +['encode', 'URIComponent'] +['_re', 'strict'] +['Ġdec', 'als'] +['ĠPed', 'ido'] +['Ġalter', 'cation'] +['Dis', 'plays'] +['ĠApp', 'licants'] +['C', 'US'] +['Text', 'area'] +['ĠAng', 'ola'] +['.f', 'uture'] +['ĠUS', 'HORT'] +['Ġsuppress', 'ing'] +['Ġset', 'zen'] +['AP', 'olynomial'] +['Ġto', 'ch'] +['Ġhall', 'mark'] +['Ġ$', '$$'] +['ĠCHAR', 'SET'] +['.r', 'pm'] +['ĠD', 'ich'] +['----------------', '----'] +['_p', 'arm'] +['è¿', 'ĺ'] +['acc', 'iones'] +['h', 'ait'] +['WAR', 'DED'] +['_r', 'outing'] +['ĠN', 'OM'] +['Ġen', 'clave'] +['ĠLot', 'to'] +['ĉf', 'r'] +['complex', 'Content'] +['ĠBall', 'ard'] +['k', 'ube'] +['/w', 'in'] +['.getColumn', 'Model'] +['_RE', 'PLACE'] +['Header', 'Value'] +['Ġest', 'udiantes'] +['Ġap', 'is'] +['Ġb', 'pm'] +['ĠType', 'Name'] +['And', 'Get'] +['rit', 'a'] +['Pl', 'ans'] +['>', 'Note'] +['Ġfet', 'isch'] +['Ġton', 'ed'] +['_g', 'oto'] +['ons', 'ense'] +['Ġm', 'olds'] +['Ġinfiltr', 'ation'] +['ĠGuerr', 'ero'] +['ub', 'bo'] +['ck', 'i'] +['($', '(".'] +['_', 'activities'] +['(ch', 'anges'] +['Ġof', 'App'] +['ĠKe', 'pler'] +['ĠD', 'emp'] +['ĠCont', 'inent'] +['.T', 'icks'] +['ĠUn', 'signed'] +['ĠJah', 'res'] +['Ġfresh', 'men'] +['ĠArch', 'ived'] +['ĠкоÑĤоÑĢ', 'Ñĭй'] +["Ġ'", '::'] +['T', 'utorial'] +['C', 'c'] +['Ġtable', 'LayoutPanel'] +['from', 'Json'] +['.level', 's'] +['_trans', 'ient'] +['Ġendors', 'ing'] +['ĠD', 'IC'] +['la', 'uf'] +['Ġsh', 'red'] +['_E', 'MIT'] +['ific', 'antly'] +['AL', 'A'] +['/', 'proto'] +['Ġnarrow', 'ing'] +['U', 'tc'] +['Fact', 'ors'] +['Ġsent', 'ient'] +['æŀ', 'IJ'] +['lix', 'ir'] +['ĠC', 'ROSS'] +['met', 'eor'] +['Ġgro', 'in'] +['Ġm', 'db'] +['ĠRot', 'terdam'] +['Ġcom', 'ida'] +['ĠOp', 'Code'] +['ĠDefault', 'Value'] +['Permissions', 'Result'] +['Ġheter', 'ogeneous'] +['Ġm', 'oot'] +['Ġde', 'ceived'] +['-in', 'dependent'] +['ĠObject', 'OutputStream'] +['Ġover', 'power'] +['.d', 'up'] +['Ġl', 'db'] +['Ġdomest', 'ically'] +['Ġbest', 'ellen'] +['Ġlo', 'v'] +['ĠContract', 'ors'] +['Tri', 'angles'] +['Ġfod', 'der'] +['Ġfilm', 'es'] +['ä¼', 'ģ'] +['Ġrev', 'olver'] +['Startup', 'Script'] +['/', 'validation'] +['ĠResource', 'Type'] +['i', 'ÅŁ'] +['ĠL', 'az'] +['f', 'ef'] +['Ġlst', 'm'] +['{', '*'] +['.', 'attachment'] +['.h', 'its'] +['ew', 'ith'] +['DO', 'G'] +['Al', 'abama'] +['Ġmedium', 's'] +['.m', 'Context'] +['-c', 'ols'] +['åı', 'ĭ'] +['.not', 'ice'] +['Ġat', 'tn'] +['ĠP', 'acking'] +['ĠL', 'n'] +['_COM', 'PLEX'] +['/', 'Users'] +['.sav', 'etxt'] +['ĠR', 'ounds'] +['?,?,', '?,?,'] +['Ġing', 'l'] +['ĠR', 'OC'] +['_f', 'emale'] +['ĠSt', 'ard'] +[']]', ';'] +['Ġwrest', 'lers'] +['Ġtorrent', 's'] +['Ġsin', 'h'] +['', 'ĊĊ'] +['ë³', 'µ'] +['s', 'ense'] +['how', 'ever'] +['.Ph', 'ysics'] +['Inf', 'rastructure'] +['ĠSac', 'r'] +['F', 'el'] +['ĠD', 'ISTRIBUT'] +['é', 'ments'] +['ĠValid', 'ates'] +['################################################', '############'] +['Ġ|', '/'] +['Ġes', 'l'] +['Ġré', 'seau'] +['ĠB', 'ip'] +['BY', 'TES'] +['_W', 'ATER'] +['Turn', 'ing'] +['EL', 'S'] +['Ġj', 'uxtap'] +['Ġlesb', 'ische'] +['ý', 'ch'] +['(', 'Unknown'] +['Ne', 'o'] +['@', 'JsonProperty'] +['Ġal', 'umnos'] +['ĠRaq', 'qa'] +['ime', 'i'] +['.get', 'Bounds'] +['.Mouse', 'EventHandler'] +['####', '###'] +['Generic', 'Type'] +['/c', 'ms'] +['Ġturn', 'o'] +['Ġм', 'ин'] +['Ġfolk', 'lore'] +['ĠE', 'vo'] +['Ġconduct', 'ivity'] +['Ġle', 'ben'] +['Ġgear', 'box'] +['-v', 's'] +['ĠÏ', 'Ĩ'] +['Ġdrink', 'ers'] +['Ġcon', 'exao'] +['ĠTe', 'eth'] +['Ġget', 'Arguments'] +['ĠR', 'AT'] +['ent', 'ious'] +['E', 'duc'] +['+', 'W'] +['ĠInstitution', 'al'] +['ĠB', 'ord'] +['is', 'Equal'] +['(p', 'wd'] +['Ġign', 'ited'] +['ĠR', 'ousse'] +['Ġimpact', 'ful'] +['ĠM', 'alk'] +['Ġg', 'eral'] +['ĠP', 'ivot'] +['Ġa', 'zt'] +['Ġcsv', 'file'] +['ĠR', 'ope'] +['ĠSOL', 'UTION'] +['ĠArbit', 'rary'] +['Ġlet', 'to'] +['.Mouse', 'Adapter'] +['Ġ}', '}}'] +['ĠSail', 'or'] +['der', 'a'] +['Put', 'ting'] +['Ġconcentr', 'ates'] +['Ġauth', 'Domain'] +['âĢĿ', 'çļĦ'] +['-f', 'inals'] +[',', 'strlen'] +['Mu', 'on'] +['ĠOrd', 'inary'] +['fire', 'fox'] +['ĠLa', 'TeX'] +['ĠH', 'und'] +['engine', 'ering'] +['/', 'blue'] +['ed', 'TextBox'] +['("', '");'] +['ĠC', 'DDL'] +['ke', 'pt'] +['ĠGet', 'String'] +['K', 'ir'] +['()', "='"] +['ĠO', 'CD'] +['ant', 'ium'] +['$', 'menu'] +['ĠAppalach', 'ian'] +['Secret', 'ary'] +['ë¥', 'ĺ'] +['ี', 'ย'] +['Sem', 'antic'] +['Ġ*', '['] +['est', 'one'] +['ung', 'kin'] +['Max', 'Y'] +['-t', 'one'] +['"}', ';čĊ'] +['_P', 'art'] +['<', 'Member'] +['tr', 'am'] +['Ġtrans', 'istor'] +['Ġ----------------------------------------------------------------', '----------Ċ'] +['ĠDes', 'de'] +['Ġright', 'ful'] +['ĠCorn', 'el'] +['æ', 'ij'] +['.H', 'OUR'] +['Ġsidel', 'ined'] +['ref', 'errer'] +['m', 'aze'] +['Ġhol', 'ster'] +['Ġcripp', 'led'] +['ĠDate', 'Formatter'] +['oph', 'age'] +['_m', 'D'] +['Ġdes', 'elect'] +['ra', 'ud'] +['ĠPK', 'K'] +['row', 'Data'] +['Ġlock', 'smith'] +['.res', 'ponses'] +['(product', 'Id'] +['_ST', 'MT'] +['Key', 'Type'] +['.Th', 'en'] +['z', 'ee'] +['Ġcr', 't'] +['ĠGrand', 'ma'] +['@', 'Resource'] +['Ġbit', 'wise'] +['-c', 'mpr'] +['ãĢĤ', 'www'] +['zeit', 'ig'] +['&', 'display'] +['Cart', 'Item'] +['-', 'No'] +['Ġnum', 'éro'] +['Ġm', 'aur'] +['Ġinst', 'ancia'] +['ĉd', 't'] +['_n', 'pc'] +['Ġskate', 'board'] +['âĢľ', 'All'] +['ĠCrow', 'd'] +['Ġä', 'n'] +['Ġb', 'raz'] +['ca', 'e'] +['yn', 'et'] +['/p', 'm'] +['/s', 'creen'] +['OPT', 'ARG'] +['ĠV', 'Box'] +['Ġle', 'opard'] +['_g', 'reater'] +['c', 'pt'] +['<', 'dd'] +['Ġmechan', 'ically'] +['osp', 'els'] +[')', 'f'] +['.l', 'wjgl'] +['.get', 'Port'] +['ĠP', 'REF'] +['.Add', 'Transient'] +['pp', 'ard'] +['Ġí', 'ļĮ'] +['Ether', 'net'] +['Ġsal', 'ine'] +['(level', 's'] +['Ġservice', 'Provider'] +['.A', 'ngle'] +['alt', 'itude'] +['illa', 'ume'] +['Ġs', 'cape'] +['_CAL', 'C'] +['_', 'quest'] +['ĠDiss', 'ertation'] +['ĠE', 'DM'] +['-C', 'ds'] +['Ġhon', 'orary'] +['st', 'ops'] +['Ġsub', 'dir'] +['ĠV', 'H'] +['ĠChe', 'at'] +['Ġright', 'fully'] +['Q', 'E'] +['.Write', 'Byte'] +['fig', 'ures'] +['enn', 'ie'] +['(', 'DBG'] +['Ġvoks', 'ne'] +['Ġexp', 'ended'] +['UN', 'ICATION'] +['il', 'inx'] +['ĠRec', 'ap'] +['_', 'verts'] +['Ġtra', 'umat'] +['Ġget', 'Player'] +['Ġverb', 'ess'] +['Ġcultiv', 'ating'] +['Ġiniti', 'ator'] +['Th', 'ông'] +['find', 'First'] +['_per', 'ms'] +['Ġbu', 'c'] +['Ġ"""', 'čĊčĊ'] +['T', 'YPES'] +['object', 'Manager'] +['(Configuration', 'Manager'] +['Ġtim', 'id'] +['Ġsnap', 'chat'] +['Ġcon', 'seg'] +['ĉd', 'istance'] +['_right', 's'] +['_D', 'es'] +['ĠF', 'lesh'] +['-', 'ver'] +['Ġa', 'fl'] +['fra', 'uen'] +['Ġblas', 'ph'] +['ĠQual', 'ität'] +['ma', 'f'] +['Monitor', 'ing'] +['.D', 'iff'] +['Ġshore', 'line'] +['Ġresponse', 'Body'] +['mem', 'set'] +['<', 'decimal'] +['Smarty', 'HeaderCode'] +['Ġin', 'sets'] +['ĠBinary', 'Tree'] +['amed', 'a'] +['Ġn', 'ihil'] +['ĠN', 'ay'] +['ym', 'ology'] +['ĠW', 'G'] +['Ġt', 'api'] +['ĠInst', 'alled'] +['m', 'aintenance'] +[')}', '"Ċ'] +['ĠX', 'O'] +['-per', 'iod'] +['s', 'ar'] +['Ġning', 'una'] +['ORM', 'AT'] +['.set', 'PrototypeOf'] +['ĠK', 'b'] +['ĠHen', 'rik'] +['ét', 'ique'] +['ĠLah', 'ore'] +['ĉ', 'Address'] +['Ġmel', 'ts'] +['N', 'y'] +['_adv', 'ance'] +['Ġveloc', 'idad'] +['Ġalum', 'no'] +['Ġsanit', 'izer'] +['Ġph', 'ishing'] +['ĠCom', 'et'] +['Ġch', 'iar'] +['ĉs', 'pec'] +['trim', 'med'] +['(state', 'arr'] +['on', 'nen'] +['Re', 'venue'] +['L', 'ens'] +['Ġcha', 'ired'] +['ĠAss', 'umes'] +['Tr', 'ash'] +['_un', 'set'] +['\\', 'Bridge'] +['Point', 'Size'] +['ĠPol', 'ic'] +['Ġsex', 'uales'] +['ĉd', 'fs'] +['ĠWide', 'String'] +['Ġaccru', 'ed'] +['Y', 'W'] +['_S', 'CHEDULE'] +['Ġk', 'ite'] +['Ġparach', 'ute'] +['[', 'table'] +['Ġactive', 'ClassName'] +['.Qu', 'ad'] +['Israel', 'i'] +['ĠÅ', 'ĵ'] +['Ġho', 'og'] +['Ġch', 'á»ī'] +['ew', 'ear'] +['Ġtire', 'lessly'] +['set', 'Error'] +['.get', 'Amount'] +['.set', 'Items'] +['ĠM', 'anson'] +['ĠBay', 'esian'] +['_F', 'lag'] +['AC', 'HER'] +['/', 'original'] +['Ġimm', 'ac'] +['ĠLos', 'ing'] +["'", '>ĊĊ'] +['L', 'ic'] +['ĠMir', 'age'] +['ĠAssembly', 'FileVersion'] +['Te', 'V'] +['ĠValue', 'EventListener'] +['-s', 'olving'] +['Th', 'o'] +['rou', 'lette'] +['_W', 'P'] +['Ġunint', 'errupted'] +['Ġfield', 'Type'] +['.T', 'yped'] +['Ġam', 'our'] +['Ġmock', 'ery'] +['(v', 'ol'] +['ĠSub', 'committee'] +['ĠR', 'uf'] +['ero', 'x'] +[':UIButtonType', 'Custom'] +['ĠBl', 'ur'] +['Ġwy', 'kon'] +['nc', 'es'] +['ASH', 'BOARD'] +['!!', '");Ċ'] +['Ġmurder', 'ers'] +['.d', 'aily'] +['ĠDI', 'AG'] +['j', 'ing'] +['Ġdol', 'phin'] +['Ġl', 'òng'] +['Ġb', 'ö'] +['ĠV', 'ocabulary'] +['.St', 'Object'] +["')", '">'] +['Ġz', 'un'] +['Ġscrim', 'mage'] +['tr', 'éal'] +['ĠL', 'ig'] +['[', 'vi'] +['C', 'ole'] +['Ġfrost', 'ing'] +['.Pl', 'ayers'] +['-', 'translate'] +['Fe', 'els'] +['=\\"', '/'] +['.Butter', 'Knife'] +['Ġ?>', ';Ċ'] +['Ġav', 'i'] +['inn', 'ie'] +['.F', 'ailure'] +['Ġsp', 'indle'] +['Configuration', 'Exception'] +['_h', 'op'] +['Ġpos', 'ição'] +['ĠA', 'wait'] +['UIImage', 'PickerController'] +['ĉ', 'day'] +['Ġgen', 'om'] +['C', 'ab'] +['ĠÑĢ', 'езÑĥлÑĮÑĤаÑĤ'] +['OR', 'IGINAL'] +['Ġejac', 'ulation'] +['(t', 'cp'] +['SE', 'COND'] +['Ġton', 'ic'] +['ĠList', 'Box'] +['Ġ', 'ĉĉĊ'] +['()', '>Ċ'] +['Ġqu', 'atre'] +['ượ', 'ng'] +['with', 'Errors'] +['.M', 'aybe'] +[',', 'â̦'] +['token', 'Id'] +['_UN', 'DEF'] +['Ġfresh', 'ness'] +['ĠAmend', 'ments'] +['.map', 'box'] +['.C', 'V'] +['(b', 'log'] +['_get', 'time'] +['.', 'quest'] +['s', 'parse'] +['Ġres', 'ale'] +['Ġenthusi', 'astically'] +['ĠProstit', 'utas'] +['W', 'a'] +['C', 'argo'] +['.Parcel', 'able'] +['SENS', 'OR'] +['ĠRy', 'u'] +['La', 'ughs'] +['_N', 'ative'] +['/', 'pg'] +['yst', 's'] +['Ġphot', 'oc'] +['ç®', 'Ģ'] +['ado', 'pt'] +['.spec', 'ies'] +['conc', 'iliation'] +['Adjust', 'ed'] +['.Firebase', 'Auth'] +['ut', 'tle'] +['ord', 'ination'] +['Ġm', 'unch'] +['ĠSt', 'ake'] +['.p', 'ing'] +['ank', 'er'] +['(QString', 'Literal'] +['Ġsub', 'script'] +['ĠĠ', 'ĉĊ'] +['ĠM', 'CC'] +['_C', 'md'] +['se', 'xy'] +['i', 'ou'] +['ĠM', 'ANY'] +['Ġn', 'anny'] +['TR', 'AIN'] +['Ġflour', 'ishing'] +['ĠW', 'atches'] +['ĠQ', 'Map'] +['ĠF', 'erm'] +['Ġwas', 'm'] +['ĠA', 'bed'] +['_', 'UD'] +['ĠGlass', 'es'] +['+', 'v'] +['Att', 'end'] +['.Ch', 'ain'] +['Ġdec', 'ency'] +['ĠSupplement', 'ary'] +['h', 'unter'] +['-t', 'xt'] +['Ġ"', '}";Ċ'] +['.set', 'WindowTitle'] +['("', ''] +['Ġmasc', 'ara'] +['(', 'Profile'] +['åĬŁ', 'èĥ½'] +['imit', 'é'] +['Ġwild', 'fires'] +['-', 'ROM'] +['.is', 'On'] +['(group', 'Id'] +['Re', 'pair'] +['accum', 'ulate'] +['Ġ<', '",'] +['Ġhand', 'written'] +['Ġach', 'eter'] +['ĠM', 'GM'] +['ĠIr', 'ma'] +['->{', '_'] +['ge', 'e'] +['cr', 'iminal'] +['Ġèĭ¥', 'è¦ģ'] +['Ġmoment', 'arily'] +['")', '!='] +['_l', 'it'] +['Ġexpires', 'In'] +['."', ').'] +['éķ¿', '度'] +['Ġfr', 'ække'] +['vl', 'c'] +['Ġor', 'bs'] +['),', '$'] +['Ġvent', 'ured'] +['/', '>\\'] +['char', 'm'] +['N', 'uitka'] +['eld', 'ig'] +['aton', 'in'] +['W', 'itness'] +['-l', 'at'] +['Ġset', 'Hidden'] +['Ġrelic', 's'] +['Ġcons', 'ulate'] +['.', 'IGNORE'] +['"', 'After'] +['Ġset', 'Address'] +['Ġbeste', 'ht'] +["Ġ''", ')ĊĊ'] +['.x', 'axis'] +['Ġser', 'ão'] +['Ġmis', 'led'] +['_UN', 'IFORM'] +['ĠV', 'IA'] +['inc', 'r'] +['Ġzen', 'ith'] +['Ġvis', 'cosity'] +['Ġthin', 'ly'] +['.get', 'SharedPreferences'] +['.Error', 'Code'] +['"),', '"'] +['ĠMillion', 'en'] +['Ġ/>', ')Ċ'] +['Scroll', 'Indicator'] +['-se', 'eking'] +['ĠPOLIT', 'ICO'] +['as', 'ca'] +['_r', 'l'] +['N', 'avig'] +['(full', 'file'] +['Ġsol', 'itude'] +['Ġju', 'ven'] +['Ġhaul', 'ing'] +['ĠMac', 'ros'] +['ĠG', 'ry'] +['Ġexerc', 'itation'] +['ĠATT', 'ACK'] +['Tick', 'Count'] +['Ġr', 'ites'] +['Ġdo', 'e'] +['Particle', 'System'] +['Ġsl', 'u'] +['Window', 'Text'] +['ĠClass', 'Name'] +['Ġsl', 'ander'] +['ĉ', 'Port'] +['j', 'ong'] +['?', 'a'] +['.D', 'ial'] +['âĢĶ', 'at'] +['$obj', 'PHPExcel'] +['Ġso', 'ar'] +['EN', 'N'] +['appe', 'ared'] +['Ġquot', 'id'] +['em', 'achine'] +['Ġn', 'ip'] +['Ġmicro', 'time'] +['ĠAl', 'ma'] +[';', '!'] +['----------------------------------------------------------------', '--------------------------------'] +['ĠPass', 'age'] +['Ġdump', 'sters'] +['ĠEx', 'clude'] +['Ġsuggest', 'ive'] +['ĠCircularProgress', 'Indicator'] +['_cl', 'r'] +['Array', 'Type'] +['ILL', 'A'] +['Elapsed', 'Time'] +['Dr', 'iven'] +['Ġresource', 'Name'] +['ĠG', 'arrison'] +['ser', 'ir'] +['-a', 'head'] +['Ġp', 'innacle'] +['ĠEs', 'presso'] +['S', 'parse'] +['Ġass', 'ays'] +['ĠGirl', 'friend'] +['im', 'id'] +["]='", '\\'] +['ONGL', 'ONG'] +['Ġportray', 'ing'] +['L', 'ane'] +['Ġb', 'úsqueda'] +['Ġrein', 'forcements'] +['ĠSpread', 'sheet'] +['ĠArray', 'Collection'] +[',', 'arr'] +['light', 'box'] +['ic', 'ana'] +['<', '"'] +['build', 'ers'] +['K', 'id'] +['ĠMat', 'SnackBar'] +['EX', 'PR'] +['od', 'cast'] +['ĠFound', 'ations'] +['Ġind', 's'] +["='", '${'] +['F', 'izz'] +['-function', 'al'] +['(work', 'space'] +['Ġstem', 'med'] +['_p', 'atches'] +['ĠJar', 'vis'] +['READ', 'ING'] +['Ġdisrespect', 'ful'] +['ĠQ', 'Dom'] +['Ġ$', '{Ċ'] +['est', 'atus'] +['Re', 'ached'] +['!', '.ĊĊ'] +['IL', 'T'] +['ĠN', 'DEBUG'] +['ĠCour', 'age'] +['birth', 'date'] +['ĠT', 'ing'] +['Ġutil', 'izado'] +['án', 'chez'] +['Out', 'door'] +['Ġhand', 'guns'] +['Ref', 'Count'] +['É', 'Ļ'] +['rom', 'o'] +['Ġt', 'ts'] +['.S', 'he'] +['ĠP', 'ane'] +['ãĢij,', 'ãĢIJ'] +['ĠIO', 'CTL'] +['/', 'black'] +['ins', 'cription'] +['Ġbi', 'opsy'] +['ĠTime', 'Interval'] +['.Test', 'Check'] +['ĠGUI', 'Style'] +['ĠCap', 'ability'] +['ĠBeit', 'rag'] +['don', 'nees'] +['T', 'reatment'] +['.back', 'up'] +['Ġsign', 'ings'] +['ĠB', 'oca'] +['dr', 'm'] +['.M', 'AIN'] +['Ġgo', 'ede'] +['ĠMark', 'up'] +['G', 'REE'] +['ĠBase', 'Service'] +['.C', 'reator'] +['Ġj', 'ails'] +['ĠK', 'ahn'] +['Ip', 'Address'] +['ACH', 'I'] +['Ġinhib', 'ited'] +['Ġ@', '$_'] +['ĠAss', 'ass'] +['Ġenvi', 'ado'] +['Hero', 'es'] +['ÐŁ', 'еÑĢ'] +['ĠM', 'aven'] +['.l', 's'] +['Ġ', 'ive'] +['|', 'RF'] +['Ġresize', 'Mode'] +['Ġrum', 'pe'] +['_attach', 'ments'] +['T', 'U'] +['Ġtact', 'ile'] +['Attempt', 'ing'] +['Ġro', 'bin'] +['y', 'aw'] +['Ġmerc', 'enaries'] +['ĠHab', 'itat'] +['end', 'date'] +['Ġo', 'xy'] +['ĉR', 'andom'] +['oh', 'on'] +['Is', 'Null'] +['ĠValidation', 'Result'] +['ãĥ', 'ļ'] +['um', 'bed'] +['pp', 'v'] +['Ġar', 'p'] +['ich', 'ick'] +['_r', 'nn'] +['ĠT', 'FT'] +['Tex', 'Image'] +['"', 'On'] +['ĠSam', 'pler'] +['top', 'l'] +['Ġj', 'ane'] +['y', 'ling'] +['ĠUN', 'ICODE'] +['Tab', 'Index'] +['<', '{Ċ'] +['s', 'uspend'] +['uv', 'ian'] +[',', 'application'] +['ол', 'иÑĩеÑģÑĤво'] +['y', 'at'] +['ez', 'ier'] +['ĠCH', 'UNK'] +['ĠAd', 'ler'] +['/', 'Add'] +['ĠKey', 'Value'] +['Ġspos', 'ób'] +['Sam', 'pling'] +['ch', 'ers'] +['_AM', 'D'] +['R', 'u'] +['.Must', 'Compile'] +['N', 'ation'] +['Ass', 'oc'] +['Man', 'aging'] +['ĠEng', 'l'] +['_G', 'B'] +['Ġsucc', 'inct'] +['Ġdis', 'liked'] +['ĠI', 'ke'] +['Bullet', 'in'] +['_ARCH', 'IVE'] +['Prop', 'osal'] +['Ġjog', 'ging'] +['.C', 'REATED'] +['Ġch', 'ol'] +['è£', 'ħ'] +['Į', '¨'] +['-p', 'ush'] +['Ġreserv', 'a'] +['core', 'v'] +['è', 'tre'] +['TH', 'R'] +['Ġincompet', 'ence'] +['Ġchar', 'isma'] +['æĦ', 'Ł'] +['Ġ"', '=='] +['BT', 'N'] +['ĠLoc', 'ator'] +['iv', 'et'] +["('.", "')Ċ"] +['Ġfor', 'IndexPath'] +['ô', 'me'] +['Ġcapac', 'it'] +['w', 'aters'] +['ĠWR', 'ONG'] +['ho', 'a'] +['ĠM', 'IPS'] +['Ġem', 'iss'] +['ĠJacqu', 'eline'] +['(c', 'mp'] +['Ġe', 'ens'] +['Le', 'o'] +['.tim', 'ing'] +['CLUS', 'ION'] +['Ġ("', '-'] +['åĵ', 'Ī'] +['.k', 'ode'] +['ĠUnd', 'ert'] +['Ġbew', 'ild'] +['ĠEss', 'en'] +['.h', 'd'] +['Ġren', 'egot'] +['Ġm', 'ower'] +['Ġl', 'sp'] +['Ġpen', 'chant'] +['Ġman', 'oe'] +['Ġag', 'li'] +['Ġrec', 'al'] +['ĠOPER', 'ATION'] +['(^', ')('] +['ĠÎ', '½'] +['ĠSc', 'oped'] +['Ġ@', '"Ċ'] +['=', 'label'] +['[', 'loc'] +['Int', 'l'] +['ĠN', 'z'] +['table', 't'] +['.Column', 'Name'] +['Ġscreen', 'Size'] +['DB', 'us'] +['co', 'oked'] +['-', 'registration'] +['âĢľ', 'One'] +['-n', 'on'] +['ĠwiÄĻ', 'c'] +['Ġcost', 'a'] +['.add', 'Tab'] +['.', 'conditions'] +['ĠH', 'ess'] +['MEM', 'ORY'] +['ĠAval', 'anche'] +['()', '}}Ċ'] +['Ġtri', 'plet'] +['Ġl', 'abyrinth'] +['ĠNode', 'List'] +['ĠNY', 'T'] +['Ġy', 'eni'] +['d', 'ff'] +['.Html', 'Controls'] +['AV', 'IS'] +['/', 'Math'] +['Ġmem', 'cmp'] +['اØ', '¡'] +['оÑģ', 'ÑĮ'] +['c', 'rap'] +['(p', 'ages'] +['Ġl', 'xml'] +['ĠQ', 'DateTime'] +['_t', 'cb'] +['Ġopen', 'id'] +['Ġsyn', 'aptic'] +['ĠMD', 'MA'] +['(s', 'lug'] +['igm', 'atic'] +['en', 'or'] +['Ġcr', 'amped'] +['G', 'OP'] +['Ń', 'IJ'] +['.is', 'File'] +['ĠD', 'ifferential'] +['Ġ="', '";Ċ'] +['ĉĉĉ', 'ĠĠĠĠĉ'] +['ĠC', 'ooke'] +['ĉU', 'FUNCTION'] +['Ġpersever', 'ance'] +['Relative', 'Layout'] +['IMPORT', 'ANT'] +['Ġex', 'on'] +['Ġо', 'н'] +['ib', 'ase'] +['(C', 'ONT'] +['n', 'ovation'] +['ä½', 'ķ'] +['[', 'sub'] +['Admin', 'Controller'] +['HTTP', 'Header'] +['cre', 'ar'] +['ĠN', 'IR'] +['ĠDrop', 'DownList'] +['Ġval', 'ide'] +['Ġde', 'hydration'] +['.', "']"] +['(W', 'IN'] +['Ġ...', '\\'] +['Ġphotos', 'hop'] +['ĉ', 'Init'] +['_c', 'ou'] +['Ġtime', 'Zone'] +['dar', 'win'] +['rom', 'atic'] +['Navigation', 'ItemSelectedListener'] +['br', 'ates'] +[']', '--;Ċ'] +['Ġtraged', 'ies'] +['ĠPed', 'iatrics'] +['SM', 'ART'] +['-A', 'PI'] +['ĠMessage', 'Lookup'] +['ĉ', 'vo'] +['Ġprejud', 'ices'] +['Ġm', 'A'] +['U', 'ps'] +['ĠMISS', 'ING'] +['ĉ', 'ad'] +['C', 'ream'] +['ĠT', 'b'] +['ĠMon', 'a'] +['_', 'ghost'] +['ĉt', 'ypes'] +['Em', 'b'] +['ĠDocument', 'ary'] +["');ĊĊ", 'ĊĊ'] +['Ġl', 'up'] +['_', 'Reference'] +['ĠB', 'ATCH'] +['Ġintertw', 'ined'] +['<', 'Cell'] +['ĠCab', 'r'] +['n', 'ation'] +['Ġis', 'Connected'] +['.remove', 'Listener'] +['Ġcon', 'g'] +['_t', 'i'] +['ĠSil', 'icone'] +['Ġê²°', 'ê³¼'] +['ĠW', 'AN'] +['ĠG', 'ibraltar'] +['/', 'response'] +['ĉp', 'erson'] +['ch', 'ants'] +['V', 'IP'] +['em', 'ergency'] +['Pixel', 'Format'] +['-', 'Am'] +['Ġsouth', 'western'] +['_pl', 'l'] +['if', 'ers'] +['_ON', 'CE'] +['ĠF', 'ayette'] +['.nc', 'bi'] +['_P', 'anel'] +['.Q', 'ual'] +['Ġpol', 'ys'] +['Ġcreate', 'StackNavigator'] +['�', 't'] +['Ġlay', 'offs'] +['ĠBl', 'anco'] +['Fe', 'at'] +['ĠV', 'imeo'] +['_ch', 'i'] +['_l', 'ifetime'] +['POINT', 'S'] +[',', 'private'] +['Ġunb', 'earable'] +['print', 'ing'] +['Ġc', 'gi'] +['.B', 'ACK'] +['Ġintern', 's'] +['ĠNew', 'ly'] +['inf', 'eld'] +['(', 'IB'] +['ĠK', 'ata'] +['ĠDef', 'endants'] +['Th', 'r'] +['é¢', 'Ħ'] +['_V', 'F'] +['FFFF', 'FFFF'] +['Ġdavid', 'jl'] +['Ġbitter', 'ly'] +['S', 'uggestions'] +['.set', 'Cancelable'] +['FIN', 'AL'] +['ason', 's'] +['_rw', 'lock'] +['_WRAP', 'PER'] +['Ġhapp', 'iest'] +['(row', 'Index'] +['ós', 'ito'] +['TOT', 'YPE'] +['Autom', 'ation'] +['Log', 'File'] +['Ġcons', 'olation'] +['ãĥ', 'Ģ'] +['Ġt', 'êm'] +['Ġpr', 'er'] +['rg', 'yz'] +['ĠG', 'eg'] +['ĉd', 'to'] +['.default', 'Value'] +['ĠK', 'ami'] +['ĠA', 'SE'] +['optim', 'ized'] +['Ġíı', '¬'] +['Ġorigin', 'ates'] +['err', 'Msg'] +['Ġespa', 'ço'] +['(S', 'YS'] +['ĠMc', 'B'] +['d', 'ance'] +['_det', 'ected'] +['Ġfr', 'ü'] +['ĉĉ', 'ĠĠĠĠĉĉ'] +['<', 'Date'] +['(com', 'b'] +['ĠDec', 'ide'] +['\\', 'Field'] +['ĠProp', 'osed'] +['R', 'ib'] +['Ġdis', 'likes'] +['ĠW', 'ien'] +['ĉ', 'Document'] +['Ġtr', 'af'] +['Ġst', 'oria'] +['ĠT', 'ells'] +["')", '=='] +['C', 'ri'] +['(', 'VALUE'] +['ĠBurn', 'ett'] +[',', 'void'] +['Ġdan', 'h'] +['Ġc', 'cp'] +['Block', 'chain'] +[':"-', '"`Ċ'] +['IC', 'lient'] +['IS', 'ODE'] +['Iss', 'uer'] +[')', '}čĊ'] +[',', 'but'] +['ĠU', 'ph'] +['(', 'Sub'] +['Ġtélé', 'phone'] +['ĠonData', 'Change'] +['Ġmarsh', 'aller'] +['-an', 'alytics'] +[',', 'content'] +['Ġdeb', 'acle'] +['_Value', 'Changed'] +['Ġfa', 'una'] +['Ġ#', '=>'] +['Ġf', 'oyer'] +["'util", 'isation'] +['ĠMü', 'ller'] +['ĠFet', 'ish'] +['Ġdefault', 'Manager'] +['Ġback', 'track'] +['B', 'ah'] +['Exp', 'licit'] +['_A', 'SCII'] +['Ġm', 'Activity'] +['(M', 'sg'] +['Ġê²', 'Į'] +['ĠTER', 'MS'] +['ĠAng', 'ie'] +['HS', 'V'] +['ĠMos', 'que'] +['.N', 'ames'] +['íĬ', '¼'] +['rest', 'e'] +['_p', 'arms'] +['Ġgap', 'ing'] +['Ġcro', 'pping'] +['Data', 'Frame'] +['Ġrespons', 'iveness'] +['_', 'undo'] +['_tr', 'an'] +['.', 'terminate'] +['Ġitalian', 'e'] +['Ġwalk', 'through'] +['Ġattract', 'iveness'] +['д', 'е'] +['_ST', 'S'] +['_', 'learn'] +['Ġchocol', 'ates'] +['ier', 'archical'] +['-th', 'inking'] +['Ġ', ')))'] +['ish', 'ments'] +['.Log', 'f'] +['ĠTM', 'Z'] +['ĠCan', 'ary'] +['fo', 'il'] +['ĠVacc', 'ine'] +['.v', 'x'] +['ĠSur', 'round'] +['Inter', 'mediate'] +['Ġi', 'ov'] +['v', 'ais'] +["';", '";Ċ'] +['ï½ŀ', 'ĊĊ'] +['éĢģ', 'æĸĻ'] +['â̦', 'it'] +['Se', 'ats'] +['Cl', 'ar'] +['W', 'ars'] +['ĠHutch', 'inson'] +['ĠHas', 'an'] +['!', "')ĊĊ"] +['ĠRich', 'ie'] +['che', 'iden'] +['($', "('"] +['Y', 'ork'] +['Ġl', 'ids'] +['Ġal', 'phanumeric'] +['ĠG', 'lock'] +['.sh', 'apes'] +['Ġspark', 'ing'] +['_', 'epsilon'] +['uplic', 'ated'] +['.dir', 'ty'] +['])', '=='] +['ĠìľĦ', 'ì¹ĺ'] +['Ġsc', 'n'] +['Ġ/', '****************************************************************'] +['_PRE', 'VIEW'] +['_H', 'C'] +['ield', 'ing'] +['f', 'gets'] +['ĠAdd', 'ison'] +['Ġproduct', 'Service'] +['-', 'figure'] +['(ret', 'val'] +['z', 'ano'] +['Ġaut', 'ob'] +['ĉs', 'd'] +['_n', 'umer'] +['ĠSet', 'LastError'] +['ĠF', 'ior'] +['ific', 'ance'] +['Unt', 'itled'] +['Ġin', 'field'] +['Ġ{}', '));Ċ'] +['Ġsp', 'ac'] +['Ġro', 'okies'] +['(des', 'cribing'] +['ng', 'en'] +['ி', 'à®'] +['.r', 'df'] +['.M', 'utex'] +['Ġkne', 'eling'] +['ĠQ', 'E'] +['set', 'Max'] +['Read', 'Stream'] +['Ġvent', 'as'] +['s', 'ut'] +['cm', 'peq'] +['.WriteAll', 'Text'] +['ĠEx', 'perienced'] +['$', '__'] +['Ġka', 'um'] +['ĠL', 'IS'] +['Ġdocument', 'os'] +['_HE', 'ALTH'] +['icont', 'ains'] +['Ġart', 'isans'] +['OWN', 'ER'] +['Ġblink', 'ed'] +['get', 'Display'] +['Ġto', 'en'] +['Ġrow', 'Num'] +['Ġav', 'ril'] +['Ġinv', 'is'] +['ĠK', 'ear'] +['toBe', 'InTheDocument'] +['ap', 'ur'] +['Ġr', 'acked'] +['ĠMc', 'Master'] +['_ATTR', 'IB'] +['H', 'az'] +['Ġfact', 'ura'] +['/', 'ts'] +['ĠÑĢаз', 'меÑĢ'] +['Ġz', 'f'] +['Ġshort', 'fall'] +['.f', 'asta'] +['ĠCONST', 'ANT'] +['.man', 'aged'] +['g', 'ems'] +['Shared', 'Pointer'] +['Ġblur', 'ry'] +['b', 'rightness'] +['(', 'components'] +['Ġ...', '"ĊĊ'] +['SE', 'LL'] +['ĠIllustr', 'ator'] +['.get', 'Channel'] +['Ġtrou', 'vé'] +['yst', 'ers'] +['Ġvo', 'is'] +['ĠLind', 'en'] +['Ġem', 'ojis'] +['Ġb', 'rawl'] +['ĠMS', 'R'] +['ĠE', 'lo'] +['ĠCroat', 'ian'] +['Popup', 'Menu'] +['L', 'ewis'] +['.J', 'WT'] +['Ġaston', 'ished'] +['B', 'ush'] +['(item', 'Id'] +['Ġdet', 'achment'] +['ĠEnc', 'ore'] +['å°', 'Ķ'] +['Ġre', 'kl'] +['Ġcr', 'am'] +[')$', '/'] +['.get', 'Host'] +['_re', 'commend'] +['-', 'HT'] +['_cal', 'ibration'] +['Auth', 'enticate'] +['.firebase', 'app'] +['UN', 'IX'] +['ĉC', 'amera'] +['ĠHE', 'AP'] +['I', 'deal'] +['.', 'office'] +['Ġgoof', 'y'] +['(S', 'ymbol'] +['Ġjou', 'er'] +['_part', 'itions'] +['Ġrapid', 'ement'] +['ĠGN', 'UNET'] +['id', 'User'] +['Ġsuperv', 'ise'] +['(', 'Contact'] +['AW', 'N'] +['ãģ', 'ĺ'] +['Ġna', 'am'] +['Ġa', 'ust'] +['åľ¨', '线'] +['_soft', 'max'] +['Allow', 'Anonymous'] +['amm', 'able'] +['RO', 'UTE'] +['*', 'D'] +['Ġad', 'en'] +['ĠCrist', 'ina'] +['ĠCrist', 'iano'] +['Ġblood', 'stream'] +['sub', 'class'] +['_person', 'a'] +['CH', 'ILD'] +['-k', 'now'] +['Ġnavigation', 'Options'] +['ĠZuk', 'unft'] +['ĠPix', 'ar'] +['Ty', 'ler'] +['Ġunder', 'world'] +['Ġsincer', 'ity'] +['Ġdispens', 'er'] +['Ġk', 'ter'] +['idd', 'ers'] +['.add', 'Node'] +['-', 'checked'] +['Ġke', 'yst'] +['ĠW', 'TO'] +['.sign', 'als'] +['Ġadvent', 'urer'] +['ĠP', 'ang'] +['\\', 'R'] +['=', 'pos'] +['Ġdispens', 'aries'] +['ĠClo', 'set'] +['("{', '\\"'] +['ide', 'on'] +['Ġnécess', 'aire'] +['()', '"Ċ'] +['_RECE', 'IVED'] +['Ġrésult', 'ats'] +['Ġmod', 'en'] +['ĠIceland', 'ic'] +[';', 'd'] +['.', 'allowed'] +['(new', 'User'] +['Ġmerc', 'iless'] +['.Wait', 'For'] +['Ġday', 'care'] +['ĠCon', 'veyor'] +['ç', 'ĸ'] +['ð', '¬'] +['ç', 'ĥ'] +['ç', 'Ĺ'] +['ç', 'ł'] +['è', 'Ħ'] +['é', '²'] +['å', '¦'] +['çĿ', 'Ģ'] +['å¾', 'Ī'] +['é', 'ħ'] +['ç', 'ĭ'] +['é', 'ª'] +['æ', 'Ĥ'] +['é', '¥'] +['è', 'ħ'] +['æĥ', '³'] +['å', '¨'] +['é', '¹'] +['ç', 'Ĥ'] +['å', 'Ĵ'] +['ç', 'Į'] +['è´', '¨'] +['æ', '¢'] +['æ°', 'Ķ'] +['ð', '«'] +['æķ', 'Ļ'] +['ç', 'Ł'] +['å', 'Ħ'] +['åıij', 'å±ķ'] +['åĪ', 'Ľ'] +['è', 'ij'] +['æ', 'ħ'] +['å', 'ŀ'] +['åģ', 'ļ'] +['æĪ', 'ĺ'] +['æ', 'IJ'] +['å¼', 'º'] +['æ·', '±'] +['åĩ', 'ł'] +['ç', '¿'] +['å', '©'] +['è', 'ŀ'] +['å§', 'Ķ'] +['åIJ', 'Ħ'] +['è', 'İ'] +['é', '¸'] +['é', 'º'] +['åı', 'Ĺ'] +['èģ', 'Į'] +['å', 'ĺ'] +['æ', '½'] +['é£', 'İ'] +['èIJ', '¥'] +['åħ', 'ļ'] +['è', 'ľ'] +['éĤ', '£'] +['é¢', 'Ĩ'] +['ç', 'ij'] +['é', '³'] +['æľ', '¯'] +['ä»', 'Ģ'] +['æĪ', '¿'] +['ç²', '¾'] +['å', 'ª'] +['é', 'Ĩ'] +['å¤', 'ª'] +['èĤ', '¡'] +['è', 'Ľ'] +['åħ', 'ī'] +['æŀ', 'ģ'] +['åĬ', 'ŀ'] +['è', 'ĵ'] +['ç', 'ĺ'] +['å', '´'] +['å', 'Ĺ'] +['èĬ', '±'] +['çł', 'Ķ'] +['å¿', '«'] +['å¸', 'Ī'] +['è¶', 'Ĭ'] +['è§', 'Ĥ'] +['æ', '¤'] +['æ', '¦'] +['ç', 'ŀ'] +['èĤ', '²'] +['çĪ', '±'] +['çĻ', '½'] +['ä¸', 'ĸ'] +['ä»Ģ', 'ä¹Ī'] +['çľ', '¼'] +['å', '³'] +['è', 'Ĵ'] +['æ', 'ĵ'] +['è¢', '«'] +['å¹', '²'] +['çĹ', 'ħ'] +['å£', '«'] +['ç', 'Ĵ'] +['è', '¸'] +['æ', '¾'] +['å·¥', 'ä½ľ'] +['è®', '©'] +['çĥ', 'Ń'] +['è¾', 'ĥ'] +['åĦ', '¿'] +['åĬ', '©'] +['ç§', '¯'] +['ç', '³'] +['ç', 'ĵ'] +['ç', '£'] +['å', 'Ĥ'] +['è', '¹'] +['è', 'ļ'] +['å·', '±'] +['çĻ', '¾'] +['åĬ', '¿'] +['èµ', 'Ľ'] +['æ', '¨'] +['æ', '¿'] +['è', 'ĸ'] +['æĿ', 'ij'] +['å¸', '¦'] +['å¢', 'ĥ'] +['æĬ', '¤'] +['é', 'Ń'] +['å', '«'] +['èĩª', 'å·±'] +['æµ', 'İ'] +['ä½', 'İ'] +['åĮ', '»'] +['éĺ', '²'] +['åĨ', 'ľ'] +['è', 'Ĩ'] +['ç', 'Ĩ'] +['é', '«'] +['åĨ', 'Ľ'] +['æĪ', 'ı'] +['åį', 'ĩ'] +['æĸ', '¯'] +['ä½', 'ı'] +['èIJ', '½'] +['åħ', '»'] +['èĩ', '´'] +['ç', 'Ĭ'] +['ç', 'ĩ'] +['ç', 'ħ'] +['è', 'Ķ'] +['ä¼ģ', 'ä¸ļ'] +['åĽ', '¢'] +['æī', 'į'] +['æł', '¡'] +['åĩ', 'Ĩ'] +['å¥', 'ĩ'] +['åī', '¯'] +['é', '¼'] +['æ¼', 'Ķ'] +['é©', '¬'] +['èµ', '°'] +['ç¥', 'ŀ'] +['åħ', 'ĭ'] +['æľ', 'Ľ'] +['æ²', '¹'] +['è¾', '¹'] +['åį', 'ĥ'] +['å¾', 'Ģ'] +['åĪ', 'ĩ'] +['æ', '©'] +['ç', '¶'] +['å', 'Ļ'] +['éĻ', 'ħ'] +['çī', 'Į'] +['社', 'ä¼ļ'] +['游', 'æĪı'] +['æĸ', '½'] +['ç', 'ħ§'] +['æİ', '§'] +['æ»', '¡'] +['è¯', 'Ĩ'] +['éĩį', 'è¦ģ'] +['è¶', '³'] +['çķ', 'Ļ'] +['ç»', 'Ĩ'] +['åį', 'ı'] +['éĢ', 'Ĥ'] +['æ', 'ĩ'] +['æ', '§'] +['é', 'Ħ'] +['è', 'Ŀ'] +['å¸Ĥ', 'åľº'] +['ç»ı', 'æµİ'] +['ä¹', 'ł'] +['æĸĩ', 'åĮĸ'] +['éļ', '¾'] +['ä¹', 'IJ'] +['åĨ', '³'] +['æ¬', '¢'] +['è§', 'ī'] +['åĽ', 'Ń'] +['åħ', '´'] +['åħ', 'ħ'] +['ä¸', '¾'] +['æī', '¹'] +['è', 'ķ'] +['æĬ', 'Ĭ'] +['æĬĢ', 'æľ¯'] +['ç©', '¶'] +['第', 'ä¸Ģ'] +['ä¾', '¿'] +['åĵ', 'į'] +['çİ', '©'] +['åĿ', 'ļ'] +['èŀ', 'į'] +['åį', 'Ĭ'] +['åĸ', 'ľ'] +['å±', 'Ĥ'] +['ç¦', '»'] +['ä»', 'ħ'] +['é', 'Ł'] +['åij', '³'] +['å¿', 'µ'] +['åŃ', '£'] +['ç´', '§'] +['ä¹', 'ħ'] +['é', '¤'] +['é', 'ŀ'] +['è', '¤'] +['åĢ', 'Ļ'] +['åĨ', 'µ'] +['ç', 'ٳ'] +['åģ', '¥'] +['æĢ', 'İ'] +['å®', 'Ŀ'] +['è¡', 'Ģ'] +['åŁ', 'Ł'] +['æĹ', '©'] +['çŁ¥', 'éģĵ'] +['è´', 'Ł'] +['åį', 'ļ'] +['å·', '´'] +['äº', '²'] +['å±', 'ŀ'] +['ä¸', '¥'] +['äº', 'ī'] +['å¯', 'Ł'] +['è', 'º'] +['ç', '°'] +['建', '设'] +['产', 'ä¸ļ'] +['åIJ', 'ĥ'] +['åŃ', '©'] +['æĹ', 'ħ'] +['æł', '¹'] +['æĿ', 'IJ'] +['ä¼', 'Ĺ'] +['éļ', 'ı'] +['å®', 'ĺ'] +['åº', 'ķ'] +['å½', '©'] +['å¯', 'Į'] +['æ¸', '©'] +['åį', '«'] +['åī', '§'] +['çĽ', 'Ĭ'] +['æĬ', 'Ĺ'] +['è´', '¢'] +['çº', 'ª'] +['æ', 'Ĩ'] +['çĶŁ', 'æ´»'] +['çº', '¢'] +['çĶŁ', '产'] +['è¿', 'ľ'] +['éĴ', '±'] +['åĶ', '®'] +['ç¾', '¤'] +['çı', 'Ń'] +['æ¥', '¼'] +['éĩ', 'ĩ'] +['èī', 'º'] +['å±', 'ħ'] +['åģ', 'ĩ'] +['è°', 'Ī'] +['æĻ', 'ļ'] +['é', '¬'] +['èĪ', 'ª'] +['å®', '³'] +['è', 'Ĺ'] +['ç', 'į'] +['å', 'µ'] +['çİ', 'ĭ'] +['åº', '·'] +['è', 'İ·'] +['ç»', 'Ń'] +['äº', 'ļ'] +['é£', 'Ł'] +['åİ', 'ĭ'] +['æĭ', 'Ľ'] +['èĮ', 'ĥ'] +['è®', '¸'] +['åĽ', '´'] +['é', '½'] +['éĻ', 'į'] +['çº', '³'] +['åĵ', 'ª'] +['æķĻ', 'èĤ²'] +['å·²', 'ç»ı'] +['å¾', '·'] +['æŀ', 'Ĺ'] +['å®ī', 'åħ¨'] +['é¾', 'Ļ'] +['大', 'å®¶'] +['éĿ', 'Ĵ'] +['åº', 'ľ'] +['æ²', '³'] +['åı', '¤'] +['èį', '¯'] +['åĿ', 'ĩ'] +['æĻ', 'º'] +['ä¹', '¡'] +['çķ', '¥'] +['åĨ', '·'] +['ç¦', 'ı'] +['å®', '¤'] +['ç»', '´'] +['æī', '¿'] +['å±', 'Ĭ'] +['è¯', 'ī'] +['åĪ', '»'] +['è', 'Ł'] +['æ', 'ª'] +['å°±', 'æĺ¯'] +['è¿Ļ', '个'] +['ä¸Ń', 'å¿ĥ'] +['ä¸ĸ', 'çķĮ'] +['åŁİ', 'å¸Ĥ'] +['éĿŀ', '常'] +['åĪ', 'Ĵ'] +['åı', 'Į'] +['æĢİ', 'ä¹Ī'] +['åΰ', 'äºĨ'] +['æľ', 'ĥ'] +['åı', '²'] +['ä¾', 'Ĩ'] +['å¾', 'ĭ'] +['å¥', 'ĸ'] +['ç»', 'Ī'] +['åª', 'Ĵ'] +['å®', 'ģ'] +['è¯', '¾'] +['èģĮ', 'ä¸ļ'] +['åħ', 'į'] +['æµ', 'ĭ'] +['æĢ', '¥'] +['æķ', 'ij'] +['çĭ', '¬'] +['èŃ', '¦'] +['é¤', 'IJ'] +['æĦ', '¿'] +['è´', '«'] +['çĸ', 'ij'] +['å', 'ļ'] +['å¥', '¹'] +['åı', 'Ī'] +['åĽł', '为'] +['ä¸į', 'æĺ¯'] +['å¤', 'Ł'] +['æĸ¹', 'éĿ¢'] +['éķ', 'ĩ'] +['äº', 'Ĵ'] +['éħ', 'Ĵ'] +['è®', '²'] +['çĸ', 'Ĺ'] +['æĺ', '¥'] +['æ¹', 'ĸ'] +['å¤', 'ľ'] +['è´£', 'ä»»'] +['人', 'æ°ij'] +['åħ', '°'] +['çŁ', 'Ń'] +['æķ', 'ħ'] +['åĩ', 'ı'] +['æĻ', '®'] +['äº', '®'] +['ä¾', 'Ŀ'] +['åį', '°'] +['éĿ', 'Ļ'] +['åĢ', 'ĭ'] +['å¾', 'ģ'] +['åIJ', '¸'] +['ç¼', 'º'] +['æĶ', '»'] +['åĩ', 'Ģ'] +['åħ', '¸'] +['åĽ', 'º'] +['è®', '¿'] +['ç', '¹'] +['ç', 'Ģ'] +['æıIJ', 'ä¾Ľ'] +['ç»', 'ĩ'] +['å¾Ī', 'å¤ļ'] +['çłĶ', 'ç©¶'] +['è·', 'Ł'] +['主', 'è¦ģ'] +['æĥħ', 'åĨµ'] +['çŃ', 'ĸ'] +['æŃ', '»'] +['大', 'åѦ'] +['æĶ¿', 'åºľ'] +['å½±', 'åĵį'] +['ä¹', '°'] +['åħ', 'Ń'] +['éĻ', '©'] +['åħ', '«'] +['æŁ', 'IJ'] +['è´¨', 'éĩı'] +['åį', 'ł'] +['å·', '®'] +['æĽ´', 'å¤ļ'] +['æľ', 'ĭ'] +['éĿ', '©'] +['å®', '£'] +['çł', '´'] +['è½', '»'] +['åº', '§'] +['æĺ', '¾'] +['ç¨', '³'] +['è´', 'µ'] +['èĥ', 'Į'] +['èī', '¯'] +['çĸ', '«'] +['æ¯', 'Ĵ'] +['ä¹', 'İ'] +['åĢ', 'Ł'] +['è¿', '·'] +['çŃ', 'Ķ'] +['æ¿', 'Ģ'] +['åij', '¼'] +['äºĨ', 'ä¸Ģ'] +['è¶', '£'] +['ä¼', '´'] +['ä¼', 'Ļ'] +['è', '¼'] +['ð¬', 'Ń'] +['åĽ½', 'å®¶'] +['æ´»', 'åĬ¨'] +['çݰ', 'åľ¨'] +['ç§ij', 'æĬĢ'] +['åį', '¡'] +['ä¸į', 'åIJĮ'] +['个', '人'] +['è®°', 'èĢħ'] +['ä¸į', 'æĸŃ'] +['éĹ', '»'] +['ä¹', 'Ŀ'] +['èij', 'Ĺ'] +['ç»', '¼'] +['ä¸', 'ĥ'] +['æł', 'ij'] +['æľĭ', 'åıĭ'] +['åį', 'ĸ'] +['ä¼', '¤'] +['æ²', 'Ļ'] +['åĸ', 'Ħ'] +['å¥', 'Ĺ'] +['è½', '®'] +['ç©', '¿'] +['è¡', '¥'] +['ä¸Ģ', 'å®ļ'] +['çª', 'ģ'] +['çĿ', '£'] +['è¿', '½'] +['å¨', 'ģ'] +['åı', '¦'] +['åĽ', '°'] +['æŀ', '¶'] +['ç»', 'Ŀ'] +['æķ', '£'] +['æİ', '¢'] +['æ´', 'Ĺ'] +['ä¸', '´'] +['ä¼', '¼'] +['è´', '¸'] +['ä¸', '°'] +['æĺ¯', 'ä¸Ģ'] +['ç«', 'ŀ'] +['è¿', 'İ'] +['èģ', 'ļ'] +['è', '«'] +['æį', 'Ł'] +['æī', '§'] +['é©', '¾'] +['è¿', 'Ŀ'] +['è', '¥'] +['è', 'ł'] +['ä»ĸ', '们'] +['æĹ¶', 'åĢĻ'] +['å®', 'ĥ'] +['人', 'åijĺ'] +['è¿Ļ', 'æł·'] +['å·¥', 'ç¨ĭ'] +['åĪĽ', 'æĸ°'] +['åŃ©', 'åŃIJ'] +['å¸', 'Į'] +['éĥ¨', 'åĪĨ'] +['éĵ', '¶'] +['代', '表'] +['é¦', 'Ļ'] +['å¸', '®'] +['æİ¨', 'è¿Ľ'] +['çĽ', 'ĺ'] +['积', 'æŀģ'] +['éĥ¨', 'éŨ'] +['åŁ', '¹'] +['æŃ', '¦'] +['ä¸į', 'ä¼ļ'] +['çŃ', 'ij'] +['éĢ', 'Ļ'] +['çİ©', 'å®¶'] +['æĭ', '¿'] +['åİ', 'Ĥ'] +['æ¯', 'Ľ'] +['çģ', 'µ'] +['æŃ', 'Į'] +['ç', '»¿'] +['å¦', 'Ī'] +['çĽ', 'Ľ'] +['é¦', 'Ĩ'] +['é¡', 'º'] +['èĦ', '¸'] +['å°', '¼'] +['ä¸', '½'] +['å¥', '¥'] +['éģ', 'ĩ'] +['è¯', 'į'] +['å°', 'ģ'] +['ä¸', 'Ŀ'] +['好', 'çļĦ'] +['æĭ', 'ħ'] +['èĦ', '±'] +['æģ', '¶'] +['åİ', 'ļ'] +['åĬ', '³'] +['çĽ', 'Ł'] +['æĬ', 'ĺ'] +['åı', '¥'] +['æĢ', 'Ģ'] +['æŁ', 'ĵ'] +['书', 'è®°'] +['åĨ', 'ł'] +['é²', 'ľ'] +['æ', '¦Ĥ'] +['éļ', 'IJ'] +['å¹', 'ħ'] +['èµ', 'ŀ'] +['å¹', 'ķ'] +['æ¥', 'Ń'] +['éģ', 'Ĺ'] +['åĪ', '¤'] +['è', 'ĺ'] +['å', '¶'] +['æĬķ', 'èµĦ'] +['è¡Į', 'ä¸ļ'] +['äº', 'ij'] +['çݯ', 'å¢ĥ'] +['åѦ', 'çĶŁ'] +['åIJĪ', 'ä½ľ'] +['åģ¥', '康'] +['é£', 'ŀ'] +['ä¸Ģ', 'æŃ¥'] +['ä¸Ģ', '缴'] +['åıij', 'çĶŁ'] +['éĺ', '¿'] +['é¢Ĩ', '导'] +['åĸľ', '欢'] +['åºĶ', '该'] +['çĤ', 'º'] +['è®', 'Ń'] +['æĿ', 'Ģ'] +['æ¸', '¯'] +['交', 'éĢļ'] +['éĺ', '¶'] +['éĴ', '¢'] +['ä»', '¤'] +['å°', '½'] +['æ¯', 'į'] +['è¡', '£'] +['ç²', 'ī'] +['é¡', '¶'] +['ä¹Ł', 'ä¸į'] +['æĬ', 'ĵ'] +['èĭ', '¦'] +['å¹', '¸'] +['ç¤', '¼'] +['第', 'ä¸ī'] +['大', 'çļĦ'] +['éģ', 'İ'] +['çĥ', 'Ł'] +['éģ', '¿'] +['ä»', 'į'] +['åº', 'Ĩ'] +['æĢ', 'ķ'] +['è°', '¢'] +['çĽ', 'ĸ'] +['å°', 'Ħ'] +['éľ', '²'] +['æĸ', 'Ĺ'] +['ç', 'Ĭ¶'] +['åŃ', '¸'] +['æ¯', 'ķ'] +['å·', '¨'] +['çŁ', '¿'] +['çļ', 'ĩ'] +['å¸', 'Ń'] +['çĹ', 'ĩ'] +['æī', '¬'] +['å»', '¶'] +['ä¾', '§'] +['æ·', '¡'] +['çļĦ', 'ä¸Ģ'] +['ç¶', '²'] +['æ´', 'ģ'] +['ç', '¸'] +['è§', 'Ī'] +['çŃ', '¹'] +['ç§', 'ĺ'] +['è¯', 'Ĭ'] +['çı', '¾'] +['èª', 'ī'] +['æ¯', '«'] +['ð', '¨'] +['åį', '´'] +['æĪIJ', '为'] +['èĥ½', 'åĬĽ'] +['é»', 'Ħ'] +['æĹħ', '游'] +['èĪ', '¬'] +['æ¯Ķ', 'è¾ĥ'] +['èµ·', 'æĿ¥'] +['äºĨ', 'è§£'] +['èĩª', 'çĦ¶'] +['ä¸Ģ', '次'] +['åŁº', 'æľ¬'] +['æĽ', '¾'] +['综', 'åIJĪ'] +['èı', 'ľ'] +['è§ī', 'å¾Ĺ'] +['第', 'äºĮ'] +['è·', 'ij'] +['æ³', '¢'] +['åĢ', 'Ĵ'] +['ç¡', 'Ģ'] +['åħ', 'µ'] +['èį', 'ī'] +['çĶ', '³'] +['çĶ', '°'] +['æĤ', '£'] +['è§Ħ', 'å®ļ'] +['èĥ', 'ľ'] +['èµĦ', '产'] +['æ¢', '¦'] +['æľ', 'Ŀ'] +['è¿Ļ', 'éĩĮ'] +['å¤', '«'] +['æĮ', '¥'] +['ä½', 'Ľ'] +['å®', 'Ī'] +['éĽ', '¶'] +['æĸ', '¼'] +['ç¯', 'ĩ'] +['å²', 'Ľ'] +['åĵ', '¥'] +['éŃ', 'Ķ'] +['ä¸į', 'åΰ'] +['æī', 'ĺ'] +['åº', 'Ĭ'] +['æ¬', '§'] +['èį', '£'] +['æ±', 'ĩ'] +['æī', '©'] +['åģ', 'ı'] +['å¢', 'Ļ'] +['è®', '¯'] +['å©', 'ļ'] +['æĥ', 'ł'] +['æ´', 'ĭ'] +['å®', 'ľ'] +['æ¶', '¦'] +['æħ', '¢'] +['éĢ', 'ı'] +['å®', '½'] +['é¡', '¾'] +['ç´', '¯'] +['æ±', '¡'] +['çĪ', 'Ĩ'] +['ç§', 'Ł'] +['æĥ', 'Ĭ'] +['æ¶', '¨'] +['é¥', '°'] +['éĺ', 'µ'] +['é¥', '®'] +['æļ', 'ĸ'] +['åº', 'Ł'] +['æĹ', 'Ĺ'] +['éļ', 'Ķ'] +['ç¶', 'ĵ'] +['åĭ', 'Ļ'] +['å¯', '¦'] +['éĢ', 'Ķ'] +['æī', '«'] +['çĥ', 'Ī'] +['éĽ', '»'] +['åĪ', 'ij'] +['éĹ', 'ľ'] +['éĹ', 'ª'] +['å¥', 'ĭ'] +['å', 'Ĥ¨'] +['ç¼', '©'] +['ä¾', 'µ'] +['å', '¬'] +['ð¬', '¶'] +['åĽ½', 'éĻħ'] +['ç»Ħ', 'ç»ĩ'] +['ä¸ĵ', 'ä¸ļ'] +['åıij', 'çݰ'] +['å¸Į', 'æľĽ'] +['ç»ı', 'èIJ¥'] +['åı', '«'] +['æĿ¥', '说'] +['éļ', 'ľ'] +['ä»»', 'ä½ķ'] +['交', 'æĺĵ'] +['éĩį', 'çĤ¹'] +['çļ', '®'] +['ç»', 'į'] +['æ´', '¾'] +['ç§ij', 'åѦ'] +['åºĶ', 'ç͍'] +['建', 'çŃij'] +['èĤ', 'ī'] +['æĶ¹', 'éĿ©'] +['åŁº', 'ç¡Ģ'] +['æ±', 'ī'] +['åĩº', 'æĿ¥'] +['è¿Ļ', 'ä¹Ī'] +['åĪ', 'ļ'] +['åĿ', 'IJ'] +['ä¸į', 'ä»ħ'] +['ä¼ļ', 'è®®'] +['éĿ', 'ł'] +['åªĴ', 'ä½ĵ'] +['æ°', '¸'] +['åĨ', '²'] +['èĭ', 'ı'] +['å¤', '®'] +['çĪ', '¶'] +['åł', 'Ĥ'] +['å®ŀ', 'éĻħ'] +['è¡', 'Ĺ'] +['ç«', '¥'] +['éĺ', 'ħ'] +['äºĭ', 'æĥħ'] +['åİŁ', 'åĽł'] +['éħ', '¸'] +['以', 'æĿ¥'] +['å¨', '±'] +['å®', '«'] +['åĿ', 'Ĺ'] +['ç»', '©'] +['éĩ', 'İ'] +['ä¸į', 'å¾Ĺ'] +['ä¼ł', 'å¥ĩ'] +['ç¡', '¬'] +['åİ', 'ħ'] +['æĹ', '¢'] +['ç»', 'ĥ'] +['èĦ', 'ij'] +['å¼', '±'] +['æİ', 'Į'] +['è´', '´'] +['æĮ', 'Ĥ'] +['åħ³', 'éĶ®'] +['å°', 'ļ'] +['é¥', 'Ń'] +['åº', 'Ħ'] +['çĻ', '¼'] +['åľ', 'ĭ'] +['æİ', 'Ī'] +['个', 'æľĪ'] +['äº', 'Ī'] +['å¸', 'ģ'] +['è·', 'Ŀ'] +['æ²', 'ī'] +['ç«', 'Ł'] +['åĨ', '¬'] +['æĬ', '½'] +['éĨ', 'Ĵ'] +['å¼', 'Ł'] +['è§', '¦'] +['èģ', 'ĺ'] +['è±', 'Ĩ'] +['æļ', '´'] +['åijĬ', 'è¯ī'] +['è±', 'ª'] +['èµ', '¢'] +['è·', '¨'] +['è³', 'ĩ'] +['çĪ', '¸'] +['æĬ', '±'] +['æµ', 'ª'] +['éº', '»'] +['ä»', 'ª'] +['è¡', '¡'] +['å¥', '¶'] +['çģ', '¾'] +['èµ', '¶'] +['èĤ', '¥'] +['å§', 'IJ'] +['åĢ', 'º'] +['éľ', 'ĩ'] +['è®', '¢'] +['æ¬', 'Ĭ'] +['ç', '·'] +['å»', 'ī'] +['ä¿', 'Ĺ'] +['å¿', 'ĺ'] +['å¦', 'ĩ'] +['ç¼', 'ĵ'] +['åŃ', 'ķ'] +['æ¼', '«'] +['è£', 'ģ'] +['çĩ', 'ĥ'] +['é»', 'ĺ'] +['çī', '¢'] +['çĪ', '·'] +['æĬ', 'µ'] +['å®', '¾'] +['æľī', 'ä¸Ģ'] +['è¿', '¹'] +['è¿', '«'] +['è²', 'Į'] +['æľī', 'çļĦ'] +['ð¬', 'ĺ'] +['è¿ĺ', 'æĺ¯'] +['æīĢ', '以'] +['ä¹Ł', 'æĺ¯'] +['è¿Ļ', 'äºĽ'] +['对', 'äºİ'] +['åIJ', '§'] +['缮', 'åīį'] +['èĩªå·±', 'çļĦ'] +['èĥ½', 'å¤Ł'] +['å¦Ĥ', 'ä½ķ'] +['æľº', 'æŀĦ'] +['åıª', 'æĺ¯'] +['ç½ij', 'ç«Ļ'] +['åħ¨', 'éĿ¢'] +['为', 'äºĨ'] +['å¼Ģ', 'åıij'] +['æĸ°', 'éĹ»'] +['éĩij', 'èŀį'] +['ç»', '§'] +['客', 'æĪ·'] +['ä¸Ģ', 'èµ·'] +['èĮ', '¶'] +['åħ³', '注'] +['æ°´', 'å¹³'] +['åİĨ', 'åı²'] +['å¢ŀ', 'éķ¿'] +['é', '±'] +['åŁº', 'éĩij'] +['åº', 'Ń'] +['åı', '¶'] +['ä¿', 'ĥ'] +['éĽ', '¨'] +['æ¶Ī', 'è´¹'] +['èĪ', '¹'] +['çŁ¥', 'è¯Ĩ'] +['æĪĺ', 'çķ¥'] +['ç»ı', 'éªĮ'] +['å³', '°'] +['æĽ', '²'] +['èĦ', 'ļ'] +['åĨ', '°'] +['å¤', 'ı'] +['å½', 'Ĵ'] +['ç¬', 'Ķ'] +['èĻ', 'ij'] +['çĶ', '²'] +['åľ', 'Ī'] +['è¯', 'Ĺ'] +['é½', 'IJ'] +['容', 'æĺĵ'] +['çłĶ', 'åıij'] +['éª', '¨'] +['çº', '¸'] +['è·', 'µ'] +['æĹ', '§'] +['çķ', '¶'] +['åĪ', '¸'] +['è´', '·'] +['åı', '¬'] +['ç§', 'ĭ'] +['æ¶', '²'] +['è¡Į', 'æĶ¿'] +['çĮ', '®'] +['èĤ', '¤'] +['éĢ', 'IJ'] +['è¶Ĭ', 'æĿ¥'] +['è¶ĬæĿ¥', 'è¶Ĭ'] +['æĦı', 'è§ģ'] +['èĪ', 'ŀ'] +['åī', 'Ĥ'] +['æ¶', 'ī'] +['ç¨ĭ', '度'] +['åħ¬', 'åħ±'] +['æ¢', '°'] +['æľ', '«'] +['çº', '¯'] +['åĶ', '±'] +['æ´', '²'] +['æĬ', '¢'] +['æ¤', 'į'] +['å¿', 'Ļ'] +['ä¼', '°'] +['å¼', '¹'] +['æ³', 'ī'] +['æľĢ', '大'] +['è¶', 'ĭ'] +['å·', '§'] +['ç¦', 'ģ'] +['æī', '¶'] +['åį', '±'] +['çı', 'ł'] +['çĨ', 'Ł'] +['æĭ', 'ľ'] +['主', 'ä¹ī'] +['æĿ', 'Ĥ'] +['éĻ', 'Ħ'] +['éģ', 'į'] +['æIJ', 'Ń'] +['æĮ', '¯'] +['å¤ļ', 'å¹´'] +['æķ', '¬'] +['æij', 'Ħ'] +['çº', '·'] +['å¼', 'ĥ'] +['æ¹', '¿'] +['å¨', 'ĺ'] +['æ¡', '£'] +['é©', '¶'] +['æľ', 'Ĺ'] +['æ®', 'ĸ'] +['æ¦', 'ľ'] +['åĵ', '¡'] +['ä¸Ģ', 'ä½ĵ'] +['æŁ¥', 'çľĭ'] +['ç¹', 'ģ'] +['æµ', 'ĵ'] +['åħ¬', 'å®ī'] +['æ½', 'ľ'] +['è´', '¯'] +['éª', 'Ĺ'] +['æ', 'IJľ'] +['å·', '¡'] +['è', '¬'] +['é', 'Ĭ'] +['å§Ķ', 'ä¼ļ'] +['æĤ', 'ł'] +['åī', '©'] +['æı', 'Ń'] +['åŃ£', '度'] +['ð', '«ĺ'] +['ð¬', '¬'] +['ä', '´'] +['ð', 'ª'] +['ä½Ĩ', 'æĺ¯'] +['éĥ½', 'æĺ¯'] +['å¹³', 'åı°'] +['åѦ', 'ä¹ł'] +['åĵģ', 'çīĮ'] +['ä¸', 'Ķ'] +['è¿Ļ', 'ç§į'] +['æĶ¿', 'çŃĸ'] +['æĭ', '¬'] +['认', '为'] +['ä¸Ģ', 'èά'] +['æłĩ', 'åĩĨ'] +['æĶ¯', 'æĮģ'] +['模', 'å¼ı'] +['åħ³', 'ç³»'] +['çļĦ', 'æĺ¯'] +['è¿Ļ', 'ä¸Ģ'] +['ä¸į', 'è¦ģ'] +['çĶ', 'ļ'] +['ç²¾', 'ç¥ŀ'] +['æĭ', '¥'] +['åĪ©', 'ç͍'] +['ä¿Ŀ', 'æĬ¤'] +['ä½ľ', 'ç͍'] +['èĭ', '¥'] +['åĽ½', 'åĨħ'] +['ä»ĭ', 'ç»į'] +['ä¸Ģ', 'ä¸ĭ'] +['å·¥', 'ä¸ļ'] +['缮', 'æłĩ'] +['æľĢ', 'åIJİ'] +['ä»·', 'å̼'] +['å°', 'į'] +['éĵ', 'ģ'] +['è°', 'ģ'] +['ç»ĵ', 'æŀĦ'] +['éĽ', 'ª'] +['æĻº', 'èĥ½'] +['ä¼ł', '绣'] +['ä½ĵ', 'èĤ²'] +['çĶŁ', 'æĢģ'] +['æĭ', 'į'] +['æİ', 'ª'] +['åĨľ', 'ä¸ļ'] +['çī¹', 'èī²'] +['è§Ħ', '模'] +['æĹ¶', '代'] +['è¿ĩ', 'ç¨ĭ'] +['éĴ', 'Ī'] +['æĿ', '¾'] +['åĶ', 'IJ'] +['åĮ»', 'çĸĹ'] +['çģ', '¯'] +['åζ', 'éĢł'] +['æł¸', 'å¿ĥ'] +['ä¸į', 'åı¯'] +['ç³»', 'åĪĹ'] +['åIJ', 'ī'] +['åľ', '£'] +['åĢ', 'ij'] +['ä½', '³'] +['æĿ¥', 'çľĭ'] +['æ¯Ķ', 'èµĽ'] +['ä¸ĭ', 'æĿ¥'] +['åĩº', 'äºĨ'] +['å¹²', 'éĥ¨'] +['å¾®', 'ä¿¡'] +['å½ĵ', 'åľ°'] +['åį', '·'] +['åį«', 'çĶŁ'] +['ä¼', 'Ł'] +['çĸ«', 'æĥħ'] +['è°', '·'] +['åĩł', '个'] +['éĺ', '´'] +['çĶŁ', 'çī©'] +['å°', '¤'] +['ä¼', 'Ĭ'] +['èĤ', '¯'] +['éĿ¢', '积'] +['åĪĽ', 'éĢł'] +['æı', '¡'] +['åľ', 'Ĩ'] +['æĻ', 'ĵ'] +['æĪIJ', 'äºĨ'] +['åĩ', '¡'] +['çĸ', '¾'] +['ç«ŀ', 'äºī'] +['è®', '¨'] +['主', 'é¢ĺ'] +['é²', 'ģ'] +['è¿', 'ª'] +['ä¿', 'Ħ'] +['æĢ', 'ª'] +['ä¸', '¦'] +['èĻ', 'ļ'] +['æ½', '®'] +['çĥ', '§'] +['èĢ', '³'] +['æ±', 'ł'] +['éĢĤ', 'åIJĪ'] +['æł¹', 'æľ¬'] +['åĬł', '缣'] +['ç͵', 'è§Ĩ'] +['æ·', '·'] +['ç¼', 'ĺ'] +['çª', 'Ĺ'] +['çĬ', '¯'] +['æĥ', '¯'] +['æĦı', 'ä¹ī'] +['åĬŀ', 'æ³ķ'] +['ä¼', 'ij'] +['æ»', 'ij'] +['åĭ', 'ĩ'] +['æķ', '¢'] +['å¯', '»'] +['è¦', 'Ĩ'] +['éĢ', 'ĥ'] +['ç»ı', 'çIJĨ'] +['åĿ', 'ı'] +['æ³', '½'] +['ä¹', 'ĺ'] +['åĪ', 'º'] +['å±', 'ı'] +['é¡', '¿'] +['äº', '¡'] +['éĤ', 'Ģ'] +['åħ', '¼'] +['åĭ', '¤'] +['æ®', 'ĭ'] +['æĺ', 'ł'] +['æ¯ķ', 'ä¸ļ'] +['æĪ', 'ª'] +['è·', 'Į'] +['å£', 'ģ'] +['åı¦', 'ä¸Ģ'] +['羣', 'å®ŀ'] +['ç£', '¨'] +['è¯', 'ļ'] +['å¿ħ', 'è¦ģ'] +['æģ', 'ĭ'] +['æĩ', 'Ĥ'] +['å¾', 'Ĵ'] +['è°', 'ĵ'] +['æķ', 'ı'] +['æ', 'ύ'] +['èĥ', '¸'] +['æĭ', '¼'] +['å¦', 'Ļ'] +['è¯', '¸'] +['èģ', 'Ĭ'] +['æĤ', 'ī'] +['éº', '¼'] +['åĩ', 'Ń'] +['èĪ', 'Ĵ'] +['æ¶', 'Ĥ'] +['è¿', 'ģ'] +['æ²', '¿'] +['å¡', 'ij'] +['æĽ', '¿'] +['æ¾', '³'] +['å¿', 'į'] +['èĢ', 'Ĺ'] +['éľ', '¸'] +['åĩł', 'å¹´'] +['åĪ', 'Ĭ'] +['èĦ', 'ī'] +['èħ', 'IJ'] +['æ¡', 'Į'] +['çº', 'ł'] +['æ»', 'ļ'] +['æĤ', '²'] +['åĨ', 'Ĵ'] +['å¦', '¹'] +['çķ', 'ħ'] +['çº', 'µ'] +['æij', 'ĩ'] +['å¤', 'º'] +['è·¯', 'ä¸Ĭ'] +['å¿', '½'] +['èĸ', 'ª'] +['æģ', 'IJ'] +['æĦı', 'æĢĿ'] +['å«', 'Į'] +['æı', '´'] +['æ°', '§'] +['èĢ', 'Ģ'] +['éĺ', '»'] +['è½', '¨'] +['å¹', '»'] +['æį', 'ķ'] +['åĿ', '¦'] +['åĵĪ', 'åĵĪ'] +['çĭ', 'IJ'] +['æ»', '¨'] +['è²', '»'] +['è¿', 'Ł'] +['人', 'éĥ½'] +['ç»', 'ĺ'] +['åı', '¹'] +['çµ', 'IJ'] +['æī', '°'] +['æ»', 'ĭ'] +['å¥', 'ij'] +['åĭ', 'Ł'] +['ç¢', 'º'] +['ð', '¦'] +['éĽĨ', 'åĽ¢'] +['æĿ', 'İ'] +['å¼Ģ', 'å±ķ'] +['æıIJ', 'åįĩ'] +['åħ¨', 'åĽ½'] +['æ±½', '车'] +['åѦ', 'æł¡'] +['æł¹', 'æį®'] +['è¿Ļ', 'æĺ¯'] +['åĩº', 'çݰ'] +['éĻ', 'Ī'] +['ç½', 'Ĺ'] +['èİ·', 'å¾Ĺ'] +['åĪ', 'ĺ'] +['éĶĢ', 'åĶ®'] +['æľª', 'æĿ¥'] +['éľĢ', 'æ±Ĥ'] +['å®ŀ', 'æĸ½'] +['åĿļ', 'æĮģ'] +['åħ¨', 'çIJĥ'] +['éĵ¶', 'è¡Į'] +['æİ§', 'åζ'] +['é¡', '»'] +['åľ°', 'åĮº'] +['æīĵ', 'éĢł'] +['çļĦ', 'è¯Ŀ'] +['帮', 'åĬ©'] +['ä½ĵ', 'ç³»'] +['è¾¾', 'åΰ'] +['è§Ħ', 'åĪĴ'] +['åŁ¹', 'è®Ń'] +['两', '个'] +['æĬ¥', 'åijĬ'] +['åľ°', 'æĸ¹'] +['å®Į', 'åħ¨'] +['æİ', 'ī'] +['ç»ĵ', 'åIJĪ'] +['宣', 'ä¼ł'] +['æ³ķ', 'å¾ĭ'] +['èīº', 'æľ¯'] +['ç͵', 'å½±'] +['èª', 'ª'] +['ä¸Ģ', 'çĤ¹'] +['è¶ħ', 'è¿ĩ'] +['ç͵', 'åŃIJ'] +['æĢĿ', 'æĥ³'] +['æķĻ', 'åѦ'] +['éĺ¶', '段'] +['åķĨ', 'ä¸ļ'] +['çī©', 'æµģ'] +['åĪĽ', 'ä¸ļ'] +['æĸ¹', 'æ¡Ī'] +['çݰ', '代'] +['æ¡', '¥'] +['èIJ½', 'å®ŀ'] +['带', 'æĿ¥'] +['产', 'çĶŁ'] +['ç§', 'Ģ'] +['æ³', '°'] +['ä¹', '±'] +['åħ·', 'ä½ĵ'] +['åĸ', 'Ŀ'] +['èĵ', 'Ŀ'] +['å®', 'Ĺ'] +['åįĩ', '级'] +['æ·±', 'åħ¥'] +['ä¿Ŀ', 'éĻ©'] +['ç®Ģ', 'åįķ'] +['çĹ', 'Ľ'] +['稳', 'å®ļ'] +['è¾', 'Ĩ'] +['å±ŀ', 'äºİ'] +['å·', 'Ŀ'] +['ä¸į', 'å°ij'] +['åĴ', '¨'] +['举', '西'] +['å½¢', 'å¼ı'] +['娱', 'ä¹IJ'] +['æŃ£', '常'] +['é¸', '¡'] +['åħħ', 'åĪĨ'] +['å®ŀ', 'è·µ'] +['éĩĮ', 'éĿ¢'] +['è·', '³'] +['èĻ', 'İ'] +['æĪIJ', 'éķ¿'] +['æļ', 'Ĺ'] +['çĿ', '¡'] +['ç½', 'ª'] +['çIJĨ', '念'] +['æĮ', 'ij'] +['èµĦ', 'æľ¬'] +['å¤ļ', 'å°ij'] +['ä¸ĭ', 'éĿ¢'] +['å¸', 'Ŀ'] +['åħ¬', 'å¼Ģ'] +['æ¸', 'IJ'] +['éķ', '·'] +['å±', 'ĭ'] +['欢', 'è¿İ'] +['å¿ĥ', 'çIJĨ'] +['çĤ', 'İ'] +['æ¹', '¾'] +['è®', 'ĵ'] +['éĤ', 'Ħ'] +['ç³', 'ĸ'] +['ä¹', 'Į'] +['åĬ', '±'] +['çī', 'Ļ'] +['èħ', '¿'] +['å²', 'Ĺ'] +['ä¼', 'į'] +['æĪIJ', 'åijĺ'] +['åŃ', 'Ķ'] +['å°ı', 'ç¼ĸ'] +['èij', '£'] +['æ³', '¡'] +['åħĪ', 'è¿Ľ'] +['åħ', '§'] +['åĺ', '´'] +['è´', 'Ŀ'] +['è', '»'] +['æIJ', 'ŀ'] +['æ³', 'Ľ'] +['é¸', 'Ł'] +['ç½', '²'] +['èĽ', 'ĭ'] +['主', 'ä»»'] +['缮', 'çļĦ'] +['ä¹', 'ı'] +['æ´', '¥'] +['æĪ', '´'] +['严', 'æł¼'] +['çħ', '¤'] +['çĮ', '«'] +['åĶ', '¯'] +['å°', 'Ĭ'] +['çĶ', 'ľ'] +['åŀ', 'ĥ'] +['åľ', '¾'] +['æĭ', 'Ł'] +['çĦ', '¦'] +['é«', 'Ķ'] +['å®', 'ı'] +['æ©', 'Ł'] +['é©', '»'] +['æĹ', 'ģ'] +['å½', '»'] +['éĥ½', 'ä¸į'] +['æij', '©'] +['ä»', 'ĵ'] +['ä¹', '³'] +['å²', '¸'] +['è°', 'ĭ'] +['大', 'å¤ļ'] +['çģ', 'Ń'] +['èħ', '¾'] +['æŁ', 'ľ'] +['èĪ', 'į'] +['åħļ', 'çļĦ'] +['å°', 'ĺ'] +['åįģ', 'å¹´'] +['æĭ', 'Ĵ'] +['è£', '¡'] +['æŁ', 'Ķ'] +['å¹', '¼'] +['éĶ', 'ģ'] +['ä¸ĵ', '项'] +['æī', 'İ'] +['驾', 'é©¶'] +['ç¢', 'İ'] +['è¢', 'ĭ'] +['éĶ', 'ĭ'] +['å£', '®'] +['å°', 'ĸ'] +['ç͵', 'æ±ł'] +['è¿', 'Ķ'] +['æ¼', 'ı'] +['å¾', 'ª'] +['èı', 'Į'] +['èĥ', 'ĥ'] +['è¾', 'ħ'] +['éĢ', 'Ĵ'] +['èĥ', 'İ'] +['éĻ', 'ª'] +['å¯', '¿'] +['å¥', 'Ķ'] +['çĮ', 'Ľ'] +['çº', '¹'] +['çŁ¥', 'åIJį'] +['å¿', 'Ĩ'] +['æ¡', 'ĥ'] +['æ£', 'ĭ'] +['éĢ', 'Ĩ'] +['çĤ', '¼'] +['ç±', 'į'] +['çī', '§'] +['æł·', 'çļĦ'] +['è¾', 'Ľ'] +['åł', 'Ĩ'] +['å®ŀ', 'åľ¨'] +['ä¼', 'ı'] +['å®', '¿'] +['èµ', 'ı'] +['è£', 'Ĥ'] +['åįĬ', 'å¹´'] +['åĢ', '¾'] +['满', 'æĦı'] +['æ¢', '¯'] +['æĦı', 'åij³'] +['åŃ', '¤'] +['ç¥', 'Ŀ'] +['æĻ', '¶'] +['èµ', 'Ķ'] +['åģ', '¿'] +['èĦ', 'Ĥ'] +['ç½', 'ļ'] +['ç¢', 'į'] +['æ²', 'ĥ'] +['æ', 'ĵį'] +['å´', 'ĩ'] +['æļ', 'Ĥ'] +['è·', 'ĥ'] +['æIJ', '¬'] +['å©', 'Ĩ'] +['é', 'ī'] +['éī', '´'] +['åħ´', 'è¶£'] +['èIJ¥', 'ä¸ļ'] +['è®', 'Ĭ'] +['èĦ', 'ı'] +['è¾', 'Ī'] +['å·ŀ', 'å¸Ĥ'] +['è´«', 'åĽ°'] +['ç©', '·'] +['ä¸Ń', 'å°ı'] +['æ¼', 'Ĥ'] +['çĻ', 'Į'] +['èľ', 'ľ'] +['ä¼Ļ', 'ä¼´'] +['çī', 'µ'] +['æĤ', 'Ł'] +['éĻ', '·'] +['èµĽ', 'åŃ£'] +['æ¨', '£'] +['åģ', '¶'] +['æĺ', 'Ĩ'] +['è¢', 'Ń'] +['æį', 'IJ'] +['èī', '°'] +['æ', 'Ĥ¬'] +['çĶ', '¢'] +['èij', '¡'] +['çĽ', 'Ĺ'] +['å©', '´'] +['å°', 'İ'] +['çº', '½'] +['åĢ', '¡'] +['æī', '®'] +['è¨', 'Ń'] +['æĬ', 'ij'] +['ç¡', 'ķ'] +['è¾', 'ĸ'] +['éĥ', 'ģ'] +['è¾', '©'] +['éĤ', '»'] +['çݰ', 'åĩº'] +['è¦', 'ı'] +['å½', '¹'] +['éĺ', 'Ķ'] +['åī', 'µ'] +['è¯', '±'] +['æĥ', 'ij'] +['æ·', 'Ģ'] +['é¢', 'Ī'] +['ä¾', '¦'] +['æģ', '°'] +['æ£Ģ', 'å¯Ł'] +['éĨ', '«'] +['çĦ¶', 'æĺ¯'] +['åĭ', 'ĥ'] +['èĮ', '«'] +['ä', 'ĵ'] +['ð', '¬¸'] +['ä½ľ', '为'] +['çļĦ', '人'] +['éĤ£', 'ä¹Ī'] +['ç¾İ', 'åĽ½'] +['è¿ĺ', 'æľī'] +['æıIJ', 'é«ĺ'] +['èĻ', '½'] +['åħ·', 'æľī'] +['åĮħ', 'æĭ¬'] +['æĪĸ', 'èĢħ'] +['ä¸į', 'è¿ĩ'] +['ä¸Ĭ', 'æµ·'] +['åĮ»', 'éĻ¢'] +['èµĦ', 'éĩij'] +['çĶļ', 'èĩ³'] +['åζ', '度'] +['è§£', 'åĨ³'] +['èģĶ', 'ç½ij'] +['ç»§', 'ç»Ń'] +['建', 'ç«ĭ'] +['è¿Ľ', 'ä¸ĢæŃ¥'] +['æĿIJ', 'æĸĻ'] +['ä»Ĭ', '天'] +['å¿ħ', 'é¡»'] +['åIJĦ', 'ç§į'] +['çݰ', 'åľº'] +['ä»ĸ', 'çļĦ'] +['å¢ŀ', 'åĬł'] +['é¢Ĩ', 'åŁŁ'] +['åıĤ', 'ä¸İ'] +['æĮģ', 'ç»Ń'] +['ä¹ĭ', 'ä¸Ģ'] +['çī¹', 'åĪ«'] +['é±', '¼'] +['åħ±', 'åIJĮ'] +['åĬ', 'ª'] +['çİ', 'ī'] +['人', '们'] +['åħĪ', 'çĶŁ'] +['ä¼ĺ', 'åĬ¿'] +['ä¿Ŀ', 'æĮģ'] +['ä½ľ', 'åĵģ'] +['çī', 'Ľ'] +['æĪIJ', 'æľ¬'] +['æĶ¶', 'åħ¥'] +['åıĬ', 'æĹ¶'] +['è´Ł', 'è´£'] +['æİ¥', 'åıĹ'] +['èį', 'IJ'] +['åıª', 'è¦ģ'] +['羣', 'çļĦ'] +['导', 'èĩ´'] +['æľº', 'åζ'] +['è¡Į', 'åĬ¨'] +['æĸ°', 'çļĦ'] +['å®Į', 'åĸĦ'] +['为', 'ä»Ģä¹Ī'] +['ä¸Ń', '央'] +['æĪIJ', 'ç«ĭ'] +['æĦŁ', 'è§ī'] +['åıĺ', 'åĮĸ'] +['åıĹ', 'åΰ'] +['å¹¶', 'ä¸į'] +['åŃ', 'Ļ'] +['æĸ½', 'å·¥'] +['æĺİ', 'æĺ¾'] +['è¿ĩ', 'åİ»'] +['åıij', 'æĮ¥'] +['羣', 'æŃ£'] +['åŁº', 'åľ°'] +['æĺİ', 'ç¡®'] +['èĥ', '¡'] +['许', 'å¤ļ'] +['ä¸Ģ', 'å¹´'] +['æĸ¹', 'åIJij'] +['æģ', '©'] +['缸', 'ä¿¡'] +['åľ', '³'] +['详', 'ç»Ĩ'] +['äºĭ', 'ä¸ļ'] +['çĶŁ', 'åij½'] +['åĴ¨', '询'] +['æĸĩ', 'æĺİ'] +['çij', 'ŀ'] +['绿', 'èī²'] +['èİ', '«'] +['æĦı', 'è¯Ĩ'] +['æĬķ', 'åħ¥'] +['åĬł', 'å¿«'] +['æ¢', 'ħ'] +['ç¿', '»'] +['å¼Ģ', 'æĶ¾'] +['æĻ®', 'éĢļ'] +['åįı', 'ä¼ļ'] +['æĪIJ', '绩'] +['ä»', 'Ļ'] +['å¯', 'Ĵ'] +['è¯ģ', 'åΏ'] +['认', 'è¯Ĩ'] +['ä¸', '¹'] +['大', 'éĩı'] +['è¿', 'ħ'] +['åģļ', 'åΰ'] +['设', 'æĸ½'] +['è´¸', 'æĺĵ'] +['èĥ½', 'æºIJ'] +['æĹ¶', 'æľŁ'] +['ä¸Ģ', '天'] +['æ²»', 'çIJĨ'] +['åĺ', 'ī'] +['å®', 'ĩ'] +['丰', 'å¯Į'] +['举', 'è¡Į'] +['æĪIJ', 'æŀľ'] +['èĤ¯', 'å®ļ'] +['çĭ', 'Ĺ'] +['åĬ¨', 'åĬĽ'] +['æ£', '®'] +['åĩł', 'ä¹İ'] +['åĽł', 'ç´ł'] +['æ°ij', 'æĹı'] +['æ´', 'ŀ'] +['ç½ij', 'åıĭ'] +['åIJĪ', 'çIJĨ'] +['广', '大'] +['æ®', 'Ĭ'] +['æ´', 'Ľ'] +['æĿ', '¯'] +['èĴ', 'Ļ'] +['ç͍', 'äºİ'] +['èŀį', 'èµĦ'] +['ç¥', 'ĸ'] +['æľº', '械'] +['举', 'åĬŀ'] +['èĩª', 'åĬ¨'] +['åĬŀ', 'åħ¬'] +['é»', 'ŀ'] +['éĽ', 'Ħ'] +['å̼', 'å¾Ĺ'] +['çĮ', 'ª'] +['以', '为'] +['æĺ', 'Į'] +['è·Ŀ', '离'] +['åIJ¸', 'å¼ķ'] +['ç»', 'ķ'] +['éļ', 'Ĩ'] +['计', 'ç®Ĺ'] +['éĺŁ', 'ä¼į'] +['大', 'ä¼ļ'] +['å¼ķ', 'èµ·'] +['çī¹', 'çĤ¹'] +['èĥ', '¶'] +['å¹´', 'è½»'] +['æľ¬', '身'] +['æľº', 'åħ³'] +['å®ĺ', 'æĸ¹'] +['éĥ', 'ij'] +['æµ', 'Ļ'] +['è§Ĵ', 'èī²'] +['èij£', 'äºĭ'] +['为', '主'] +['æĹł', '论'] +['ä¹ł', 'æĥ¯'] +['æ¥', 'ļ'] +['æĭ', 'ĵ'] +['绣', '计'] +['åħ', 'Ħ'] +['广', 'æ³Ľ'] +['åį', 'Ģ'] +['污', 'æŁĵ'] +['è«', 'ĭ'] +['èĬĤ', '缮'] +['ä¼', '¦'] +['è¦Ĩ', 'çĽĸ'] +['èĢ', 'IJ'] +['æī¶', 'è´«'] +['ç»ı', 'åİĨ'] +['éĩįè¦ģ', 'çļĦ'] +['èĤ¡', '举'] +['æĭĽ', 'èģĺ'] +['åĽĽ', '个'] +['æĩ', 'ī'] +['èĥ', 'ŀ'] +['æij', 'Ĩ'] +['é«ĺ', 'éĢŁ'] +['éº', '¦'] +['åİŁ', 'åĪĻ'] +['èİ', '±'] +['æĽ´', '好'] +['éķ', 'ľ'] +['åĩ', 'Į'] +['åŀĥ', 'åľ¾'] +['éĢ', '²'] +['çģ', '°'] +['éĵ', 'º'] +['äºĭ', 'æķħ'] +['çĶ', 'ĺ'] +['空', 'æ°Ķ'] +['é¾', 'Ħ'] +['èı', '²'] +['çĵ', '¶'] +['æĺ', '¨'] +['æĹ¥', 'æĬ¥'] +['æµ', '®'] +['åľ°', 'åĽ¾'] +['åij', 'Ī'] +['大', 'åĬĽ'] +['ç»', 'ª'] +['å¸', 'ħ'] +['æľį', 'åĭĻ'] +['ä¸į', 'éĶĻ'] +['乡', 'æĿij'] +['å±', '¥'] +['å¹³', 'æĸ¹'] +['éĹ', '²'] +['æī', '£'] +['ç´ł', 'è´¨'] +['èµ', '´'] +['éģ', 'Ń'] +['èIJ', '¨'] +['èĩª', '主'] +['éĩij', 'å±ŀ'] +['èī¯', '好'] +['两', 'å¹´'] +['æ³', '¥'] +['é¢', 'ľ'] +['ç²¾', '彩'] +['ä¸Ń', 'åįİ'] +['æĻ', 'ĭ'] +['ä¹ł', 'è¿ij'] +['ä¹łè¿ij', 'å¹³'] +['æĪĺ', '士'] +['åģļ', 'çļĦ'] +['éª', 'ij'] +['æ»', '´'] +['çĵ', 'ľ'] +['çīĪ', 'æĿĥ'] +['èĤ', 'ł'] +['æľĥ', 'åĵ¡'] +['çı', 'į'] +['ç¨', '®'] +['ä', '»¿'] +['çī©', 'ä¸ļ'] +['åĢĭ', '人'] +['å¦', '»'] +['ä¼', '¸'] +['æ±', 'Ĺ'] +['æĹ', 'º'] +['çIJĨ', 'æĥ³'] +['æij', '¸'] +['è¿Ŀ', 'æ³ķ'] +['å®Į', 'æķ´'] +['åİ', '¦'] +['è¸', 'ı'] +['æĸ', 'ij'] +['æ¡', 'Ĥ'] +['ä½ĵ', 'åζ'] +['å¸', '«'] +['æĿ', 'Ĩ'] +['æ®', '¿'] +['æ¯', 'ģ'] +['é¦', 'Ī'] +['è§Ĵ', '度'] +['æ¬', '£'] +['çĥ', '¦'] +['èĤ', 'º'] +['éĩĩ', '访'] +['æij', 'ĺ'] +['æĮ', '¡'] +['æ·', 'ĺ'] +['åħ»', 'èĢģ'] +['çĤ', '¸'] +['è¿', 'Ī'] +['åİ', 'ī'] +['åĿ', 'Ĭ'] +['è¾', '£'] +['åĩ', 'Ŀ'] +['æ³', 'ª'] +['çĸ', 'ı'] +['æİ', 'ĺ'] +['åĥı', 'æĺ¯'] +['éĽ', 'ķ'] +['ç¼', 'Ŀ'] +['èį', '·'] +['æį', '·'] +['åł', '¡'] +['åı¥', 'è¯Ŀ'] +['çĸ', '¼'] +['æł', 'ı'] +['éģ', 'µ'] +['ç¢', '³'] +['å·¥', 'åķĨ'] +['æIJ', 'º'] +['åĪ', '¥'] +['ä¹', 'Ļ'] +['æĹ', 'ĭ'] +['æĥ', 'ľ'] +['ä¸Ģ', '大'] +['å±Ĥ', '次'] +['èµ', 'ĸ'] +['æĬ', '¬'] +['æ¨', 'Ĥ'] +['è¯', 'ŀ'] +['åħ', 'Ĵ'] +['ç¯', '®'] +['èĤ', 'ĥ'] +['å§', '¿'] +['æĬ', 'ļ'] +['çĵ', '·'] +['ç͵', 'åĬ¨'] +['æĸ°', 'åĨł'] +['æ¶', 'µ'] +['ç¢', 'ij'] +['æ·', '®'] +['æĹ', '¨'] +['è¸', 'ª'] +['æ¸', 'Ķ'] +['æĦ', 'Ī'] +['åı', 'Ķ'] +['åįĹ', 'çľģ'] +['ç¾', '©'] +['å§Ķ', '书记'] +['è²', '¸'] +['æ¶', 'Į'] +['è«', 'ĸ'] +['èIJ', 'Ħ'] +['æı', 'ı'] +['å¿', '§'] +['è¾', '¦'] +['å¦', 'Ĩ'] +['æī', 'Ń'] +['åij', 'µ'] +['éģ', '¥'] +['è¨', '±'] +['ä»', 'ĩ'] +['åįģ', 'ä¸ī'] +['åī', '²'] +['èª', 'į'] +['èĪ', '°'] +['é¢', 'ĩ'] +['é¥', '±'] +['çĭ', 'ł'] +['é«ĺ', 'çļĦ'] +['çµ', '±'] +['æħ', 'İ'] +['é¢', 'ģ'] +['åIJĪ', 'éĢĤ'] +['æµ', '´'] +['èµ', 'ĭ'] +['æĬ', '¼'] +['å¦', '¥'] +['éĻ¢', 'éķ¿'] +['èĢ', 'ķ'] +['è¾', '¨'] +['æħ', '°'] +['åįģ', 'åĽĽ'] +['æľ', 'µ'] +['èĵ', 'Ħ'] +['æŀ', '¢'] +['å»', '·'] +['æĤ', 'Ħ'] +['æ¶', '¯'] +['çŁ', '©'] +['åŃIJ', 'éĩĮ'] +['çĬ', '¹'] +['å±Ģ', 'éķ¿'] +['é', 'IJ'] +['å¥', 'ł'] +['ä¼ļ', 'éķ¿'] +['æĵ', 'ļ'] +['ä¸į', 'åıĬ'] +['åįģ', 'ä¹Ŀ'] +['æ¬', 'º'] +['èº', 'º'] +['éĺ', 'IJ'] +['çº', 'Į'] +['è¨', '»'] +['åĨ', 'Ĭ'] +['èŃ', 'ĺ'] +['é«ĺ', 'çŃī'] +['èħ', 'º'] +['å¤', 'ķ'] +['ç»', 'ij'] +['åĶ', '¤'] +['èķ', '´'] +['çķ', 'ľ'] +['æħ', 'ĭ'] +['åı', 'Ļ'] +['åı', 'ĥ'] +['å³', '¡'] +['人', '大'] +['éħ', '¿'] +['éģ', '©'] +['å¥', '¢'] +['åı£', 'æ°Ķ'] +['éĮ', 'Ħ'] +['é', 'ı'] +['åĭ', 'ĺ'] +['è´', '¿'] +['éļ', 'ª'] +['é', 'ĭ'] +['éļ', '¶'] +['ð', '¥'] +['ð¬', '£'] +['ð', '£'] +['ð«', 'į'] +['ð¬', '³'] +['ð«', 'ĵ'] +['ð«', 'Ħ'] +['ð«', 'Ł'] +['ð¨', '±'] +['ä', 'Ĺ'] +['以', 'åıĬ'] +['æľī', 'éĻIJ'] +['åij', '¢'] +['åIJ', 'Ĺ'] +['çľĭ', 'åΰ'] +['计', 'åĪĴ'] +['è¿Ľ', 'åħ¥'] +['缴', 'æİ¥'] +['åĪĨ', 'æŀIJ'] +['åıª', 'æľī'] +['设', 'å¤ĩ'] +['åħ¶', 'å®ŀ'] +['åĬł', '强'] +['ä¸Ń', 'çļĦ'] +['ä¿Ŀ', 'éļľ'] +['èĢģ', 'å¸Ī'] +['人', 'æīį'] +['å¾Ĺ', 'åΰ'] +['é£İ', 'éĻ©'] +['ä¸Ģ', 'ç§į'] +['空', 'éĹ´'] +['æĪij', 'åĽ½'] +['ä¹ĭ', 'åīį'] +['ä¸ĵ', 'å®¶'] +['æĿ', '¨'] +['æĹ¥', 'æľ¬'] +['群', 'ä¼Ĺ'] +['åıĤ', 'åĬł'] +['æķĪ', 'æŀľ'] +['æľī', 'åħ³'] +['å®¶', 'åºŃ'] +['åĮº', 'åŁŁ'] +['åĬª', 'åĬĽ'] +['éļı', 'çĿĢ'] +['æĹł', 'æ³ķ'] +['交', 'æµģ'] +['è¡Į', '为'] +['æ£Ģ', 'æŁ¥'] +['æľŁ', 'éĹ´'] +['å¦Ĥ', 'æŃ¤'] +['èĤ¡', '份'] +['å½ĵ', 'æĹ¶'] +['è£ħ', 'å¤ĩ'] +['åĩĨ', 'å¤ĩ'] +['éħĴ', 'åºĹ'] +['è¿IJ', 'åĬ¨'] +['æıIJ', 'åĩº'] +['å·¦', 'åı³'] +['æİª', 'æĸ½'] +['é£Ł', 'åĵģ'] +['æ¶Īè´¹', 'èĢħ'] +['åѦ', 'éĻ¢'] +['æĮĩ', '导'] +['è¿IJ', 'èIJ¥'] +['éĩį', '大'] +['åĨľ', 'æĿij'] +['éĢł', 'æĪIJ'] +['æĶ¿', 'æ²»'] +['éĴĪ', '对'] +['æŃ£', 'å¼ı'] +['åıĸ', 'å¾Ĺ'] +['éĤ£', '个'] +['éĽĨ', 'ä¸Ń'] +['åıª', 'èĥ½'] +['å¿«', 'éĢŁ'] +['身', 'ä½ĵ'] +['åħļ', 'åijĺ'] +['èģĶ', 'åIJĪ'] +['åĬĽ', 'éĩı'] +['éĥ½', 'æľī'] +['æ', 'ħ§'] +['å¡', 'Ķ'] +['åĪ«', '人'] +['表', 'çݰ'] +['æķħ', 'äºĭ'] +['ä¸Ģ', 'åĪĩ'] +['å°', 'ĩ'] +['èµĦ', 'æĸĻ'] +['åŁ¹', 'åħ»'] +['éĺħ', '读'] +['æľī', '人'] +['èIJ¥', 'éĶĢ'] +['çĽij', 'çĿ£'] +['çݯ', 'ä¿Ŀ'] +['èĢĥ', 'èĻij'] +['æ·±', 'åľ³'] +['严', 'éĩį'] +['èĮĥ', 'åĽ´'] +['å§Ķ', 'åijĺ'] +['çĽij', '管'] +['ä¸ī', '个'] +['è£ħ', 'ä¿®'] +['åħ¬', 'éĩĮ'] +['åĪĨ', 'åĪ«'] +['çIJĨ', 'è§£'] +['éŁ', '©'] +['åĬł', 'å·¥'] +['认', '羣'] +['ä¸į', '好'] +['åİ»', 'å¹´'] +['éĻį', 'ä½İ'] +['æľº', 'ä¼ļ'] +['åįı', 'è®®'] +['符', 'åIJĪ'] +['å¢ŀ', '强'] +['æĬĢ', 'èĥ½'] +['é¦ĸ', 'åħĪ'] +['ç§', '¦'] +['ä¸', 'ģ'] +['å°', '¾'] +['æľī', 'äºĨ'] +['åľ°', '产'] +['æ¸', 'ł'] +['æĸ¹', '便'] +['ç§»', 'åĬ¨'] +['éĢŁ', '度'] +['å°¤', 'åħ¶'] +['éĢļ', 'çŁ¥'] +['åĿ', 'Ľ'] +['éģ¿', 'åħį'] +['æģ', '¢'] +['è´', '¡'] +['èģĮ', 'å·¥'] +['å®ŀ', 'åĬĽ'] +['æĺ¯ä¸Ģ', 'ç§į'] +['åIJ¯', 'åĬ¨'] +['çĸ¾', 'çĹħ'] +['æĿ¥', 'äºĨ'] +['缸', '对'] +['çݰ', 'å®ŀ'] +['èŀį', 'åIJĪ'] +['åIJĮ', 'æł·'] +['åħ¬', 'åijĬ'] +['çī¹', 'æ®Ĭ'] +['ç´', '«'] +['ä¸ĭ', 'åİ»'] +['ä¼ł', 'æĴŃ'] +['æľĢ', '好'] +['ä¼ĺ', 'è´¨'] +['æ²', 'Ĵ'] +['æĮ', 'º'] +['æĹ', '¦'] +['è¯', 'º'] +['ä¸Ģ', 'åIJį'] +['éģĵ', 'è·¯'] +['示', 'èĮĥ'] +['è¿ĩ', 'æĿ¥'] +['åIJĮ', 'åѦ'] +['é¼', 'ĵ'] +['æĿ', 'Ń'] +['æľ¬', '次'] +['åIJĮ', 'æĦı'] +['ä¸ĸ', '纪'] +['ç¾', 'Ĭ'] +['æ¬', '²'] +['å·¥', 'èīº'] +['çĵ', '¦'] +['人', '士'] +['æľī', 'æīĢ'] +['ä»İ', 'äºĭ'] +['æľī', 'å¾Īå¤ļ'] +['ä¸į', 'äºĨ'] +['å²Ĺ', 'ä½į'] +['åıĺ', 'å¾Ĺ'] +['åĬ³', 'åĬ¨'] +['å¤Ħ', 'äºİ'] +['å¹³', 'åĿĩ'] +['å½¢', '象'] +['å¡', 'ŀ'] +['åħ±', '享'] +['çĿ', 'Ľ'] +['åĪ©', '润'] +['æŃ£', 'æĺ¯'] +['å¾Ģ', 'å¾Ģ'] +['缸', 'æ¯Ķ'] +['æ¨', 'ª'] +['åĪ', '·'] +['æµĻ', 'æ±Ł'] +['大', 'éĥ¨åĪĨ'] +['å¤ļ', '个'] +['æĤ¨', 'çļĦ'] +['ç͵', 'åķĨ'] +['å¾®', 'åįļ'] +['å§ĭ', 'ç»Ī'] +['çĬ¯', '罪'] +['æĺ¯', 'åľ¨'] +['ç»Ħ', 'åIJĪ'] +['åİŁ', 'æĿ¥'] +['æ¸ħ', 'æ¥ļ'] +['åIJĦ', 'åľ°'] +['æĦŁ', 'åıĹ'] +['å½ĵ', 'ä¸Ń'] +['è¶ĭ', 'åĬ¿'] +['æĻ¯', 'åĮº'] +['羣', 'æĺ¯'] +['ä¾Ľ', 'åºĶ'] +['转', 'åŀĭ'] +['çĭ', 'Ĥ'] +['èĨ', 'ľ'] +['èĭ', 'Ĺ'] +['å¿', 'ł'] +['å¾Ī', '大'] +['èĤ¡', 'æĿĥ'] +['ç¾İ', 'åħĥ'] +['æİĴ', 'åIJį'] +['åĬ¨', 'çī©'] +['éĶ', 'ħ'] +['å¢', '¨'] +['主', 'å¸Ń'] +['å¾Ī', '好'] +['ç»Ŀ', '对'] +['æĿ', 'ľ'] +['转', 'è½½'] +['çĴ', 'ĥ'] +['æĿij', 'æ°ij'] +['åIJ', '¨'] +['åĽŃ', 'åĮº'] +['é«ĺ', '度'] +['çī©', 'è´¨'] +['è¾', 'ī'] +['æĹ¥', '常'] +['æı', 'Ĵ'] +['ä¸ī', 'å¹´'] +['ä½ĵ', 'çݰ'] +['æīį', 'æĺ¯'] +['代', 'çIJĨ'] +['ä¸į', '管'] +['æģ', 'Ĵ'] +['åľ°', 'ä½į'] +['ç²', '®'] +['èĸ', 'Ħ'] +['æĺİ', 'çϽ'] +['ä¸Ģ', 'èĩ´'] +['æĽ', '¼'] +['åĵ', 'Ń'] +['åĩ', '¤'] +['åĬ', '²'] +['æķ', 'Į'] +['æĪĺ', 'æĸĹ'] +['主', 'ä½ĵ'] +['åħ¬', 'å¸ĥ'] +['åıĤ', 'èĢĥ'] +['èĪª', '空'] +['å¯', 'º'] +['åѦ', 'ä¼ļ'] +['åıį', 'æĺł'] +['ç¾İ', '丽'] +['太', 'éĺ³'] +['建', 'æĪIJ'] +['æħ¢', 'æħ¢'] +['åIJĦ', '个'] +['éĤ', '¦'] +['ç»Ħ', 'æĪIJ'] +['ä¸ī', '大'] +['éĶ', '¦'] +['大å¤ļ', 'æķ°'] +['æ¦Ĥ', '念'] +['éŃ', 'Ĥ'] +['åħ¬', 'çĽĬ'] +['èį', 'Ĵ'] +['身', '份'] +['æ·±', 'åĪ»'] +['åħ', '©'] +['ç»ı', 'åħ¸'] +['åIJĦ', '项'] +['èĻ', 'ķ'] +['è¿Ľ', 'æŃ¥'] +['åįģ', 'äºĮ'] +['æī§', 'æ³ķ'] +['æĥ³', 'åΰ'] +['æĦŁ', 'æŁĵ'] +['åķĨ', 'åĬ¡'] +['å°ı', 'ç»Ħ'] +['èĶ', '¬'] +['çıŃ', 'åŃIJ'] +['åIJĮ', 'å¿Ĺ'] +['éĿ¢', '临'] +['çĤ', 'Ĵ'] +['å¤ļ', 'ç§į'] +['è§Ĥ', 'çĤ¹'] +['åĵª', 'éĩĮ'] +['å°', 'Ŀ'] +['å§', 'Ĩ'] +['èħ', '¹'] +['åŁİ', 'åĮº'] +['太', 'å¤ļ'] +['çĹħ', 'æ¯Ĵ'] +['åľ¨', 'äºİ'] +['æīĢ', 'è°ĵ'] +['æĻ', '°'] +['æŀ', 'Ŀ'] +['æĭ', 'ĸ'] +['å®', 'ħ'] +['æķ´', 'æ²»'] +['ä½ı', 'æĪ¿'] +['åģ', '·'] +['çĨ', 'Ĭ'] +['èµ', 'ģ'] +['æ°', 'Ľ'] +['æł¼', 'å±Ģ'] +['åŁºç¡Ģ', 'ä¸Ĭ'] +['èĥ', 'Ĩ'] +['åħ', '½'] +['鼶', 'åĶ®'] +['åĿ', '¡'] +['女', 'åŃ©'] +['æĴ', 'ŀ'] +['åħ¨', 'åĬĽ'] +['åĴ', 'ĸ'] +['èĤ', '©'] +['çľ', 'ī'] +['èĩ³', 'äºİ'] +['åħļ', 'ç»Ħ'] +['ä¸Ģ', 'ä»¶'] +['æĭ', 'Ĩ'] +['äºĭ', 'å®ŀ'] +['åĤ', '³'] +['æ¹', 'ĺ'] +['ç¶²', 'ç«Ļ'] +['循', 'çݯ'] +['åIJĮ', 'æ¯Ķ'] +['æĭ', 'Ķ'] +['åĮ»', 'èį¯'] +['åħ»', 'æ®ĸ'] +['åĽº', 'å®ļ'] +['å®ŀéĻħ', 'ä¸Ĭ'] +['è®°', 'å¾Ĺ'] +['åĪ©', 'äºİ'] +['æĤ', '¦'] +['æĭ', '³'] +['èĤ', 'Ŀ'] +['æķĪ', 'çĽĬ'] +['è©', '²'] +['æ°ij', '主'] +['çĹĩ', 'çĬ¶'] +['é¢', '¨'] +['å¹¼', 'åĦ¿'] +['å§', 'ij'] +['æĪ', 'Ĵ'] +['ä¸ĭ', 'çļĦ'] +['æ¸', '¡'] +['å¹´', 'åºķ'] +['è®°', 'å¿Ĩ'] +['åIJ', 'IJ'] +['大', 'å¹ħ'] +['å¾', '½'] +['åħ¬', 'ä¼Ĺ'] +['ä¿¡', 'å¿ĥ'] +['çİ', 'Ľ'] +['ä¼ļ', 'ä¸Ĭ'] +['ä¹', 'Ķ'] +['æijĦ', 'å½±'] +['æ£ĭ', 'çīĮ'] +['éĻ', 'ķ'] +['åºĶ', 'æĢ¥'] +['æĶ¶', 'è´¹'] +['æİ§', 'èĤ¡'] +['仪', 'å¼ı'] +['çŀ', '¬'] +['æīĢ', 'åľ¨'] +['ç¢', '°'] +['å§', 'ĵ'] +['é¡', 'Į'] +['æĶ¯', 'éĥ¨'] +['使', 'åij½'] +['çĤ', 'ī'] +['å¯', 'Ħ'] +['ç¿', '¼'] +['åľ°', 'ä¸ĭ'] +['è¾', 'ŀ'] +['ä¿', '±'] +['主', 'æĮģ'] +['è´§', 'å¸ģ'] +['æģ', '¨'] +['èĤ', 'Į'] +['çĽ', 'Ī'] +['éĶ', '»'] +['å¿Ĺ', 'æĦ¿'] +['ç±»', 'ä¼¼'] +['æĮ', 'ĸ'] +['éĢ', '»'] +['ç¸', '½'] +['纪', '念'] +['åķ', '¥'] +['å¼', '¯'] +['åIJį', 'åŃĹ'] +['åģ¥', '身'] +['çļĦ', 'å¿ĥ'] +['é©', '±'] +['èĥĮ', 'åIJİ'] +['æ³ķ', 'å¸Ī'] +['ç²', 'Ĵ'] +['èĥ½', 'éĩı'] +['è¾', '°'] +['èī', '³'] +['å½', '¼'] +['段', 'æĹ¶éĹ´'] +['åIJĪ', 'æ³ķ'] +['æĵ', '¦'] +['ç¾', '½'] +['åİ', '¨'] +['æĪij', '说'] +['äºĭ', 'åĬ¡'] +['åĩł', '天'] +['åħ', 'ģ'] +['ç¼', '´'] +['åį', 'ĵ'] +['两', 'ç§į'] +['çĭ¬', 'çī¹'] +['å¸', '¶'] +['éĴ', '»'] +['æĥ', '©'] +['é¢Ĩ', 'åħĪ'] +['è¶³', 'å¤Ł'] +['å£', '³'] +['æĦıåij³', 'çĿĢ'] +['åĪĨ', 'å¸ĥ'] +['ä¹', 'ĥ'] +['éģ', 'ĭ'] +['ä½', '©'] +['è°', '±'] +['çģ', '£'] +['èį', '¡'] +['è´¯', 'å½»'] +['å¹', '¾'] +['ç£', 'ģ'] +['åħ¸', 'åŀĭ'] +['åī', 'ĩ'] +['åĨ', '»'] +['æ¬', 'ł'] +['ä¸į', 'ä¹ħ'] +['æµ', '¦'] +['éŃ', 'ħ'] +['å¼Ģ', 'äºĨ'] +['使ç͍', 'èĢħ'] +['è¿Ļ', '款'] +['å°', 'Ī'] +['èĦ±', 'è´«'] +['æĶ»', 'åĿļ'] +['ç®Ĺ', 'æĺ¯'] +['ç¨', 'Ģ'] +['æĹł', '人'] +['åł', 'µ'] +['å¥', 'ı'] +['éĥ½', 'å¸Ĥ'] +['åı¯', 'è§ģ'] +['ä¸į', 'åĩº'] +['æ', '·»'] +['äº', 'ı'] +['ç¾İ', '好'] +['èĥ', 'ĸ'] +['éŁ', 'µ'] +['æłĩ', 'å¿Ĺ'] +['èĬĤ', 'èĥ½'] +['æĬ', '«'] +['å°', 'º'] +['å¯', '¸'] +['ä¸Ģ', '代'] +['é¢', 'Ĺ'] +['èĢ', '¶'] +['èĴ', '¸'] +['åĸ', '®'] +['æ', '»¿'] +['çĮ', 'ľ'] +['æµ', 'Ĩ'] +['åŁ', 'ĥ'] +['åįĥ', 'ä¸ĩ'] +['èµ', 'Į'] +['èģ', '²'] +['ä½ľ', 'é£İ'] +['è³', 'ª'] +['å¯', '¨'] +['å¹´', '人'] +['åį°', '象'] +['æ¡', '¶'] +['æĴ', '¤'] +['åįģ', 'äºĶ'] +['æ¯', 'ħ'] +['æ²', 'ª'] +['åĽ½', 'æľī'] +['大éĩı', 'çļĦ'] +['å¾', '¡'] +['å¯', 'ĵ'] +['è¦', 'ĸ'] +['æ¼Ĥ', '亮'] +['çľ', 'ł'] +['ç', 'ĤŃ'] +['é»', 'İ'] +['èĻ', '¹'] +['åĪ©', 'äºļ'] +['èŃ', 'ī'] +['æµ', 'ı'] +['åįģ', 'åħ«'] +['ä¸', '¢'] +['è¾', '½'] +['æľīä¸Ģ', 'äºĽ'] +['æħ', 'Ī'] +['åģľ', '车'] +['å®', 'ł'] +['è§£', 'æĶ¾'] +['æľī', 'å¤ļ'] +['éĤ', 'Ĭ'] +['常', 'è§ģ'] +['æĬ', '¹'] +['çº', '¤'] +['è¦', 'ª'] +['æ¡', 'Ĩ'] +['èİ', 'ŀ'] +['æ°§', 'åĮĸ'] +['è¿Ļ', 'ä»¶'] +['åĩ', '°'] +['æŁ', '´'] +['åıij', 'ç͵'] +['é¼', 'ł'] +['转', 'åĮĸ'] +['å¨', 'ĥ'] +['æĮ', '¤'] +['ç½', '©'] +['å¯Ĩ', 'åĪĩ'] +['æĪij', 'ä¸į'] +['é«ĺ', 'æĸ°'] +['ä¸Ģ', 'ç¯ĩ'] +['è¿Ľ', 'ç¨ĭ'] +['è¡', '°'] +['è¿ĺ', 'ä¸į'] +['ç', 'ħĮ'] +['æĸ°', 'åįİ'] +['èĤ', '¿'] +['æ»', '©'] +['ä¸Ģ', 'æµģ'] +['è¯', 'Ī'] +['å®ŀ', 'ä½ĵ'] +['å¤ĸ', 'åĽ½'] +['èº', '²'] +['èµ', 'ł'] +['è¦', 'º'] +['æ¢', 'Ŀ'] +['ä¸į', 'è§ģ'] +['è¨', 'Ĭ'] +['åĮ', '¹'] +['åį', 'µ'] +['çĩ', '¥'] +['æħ', 'ķ'] +['é½', '¿'] +['å®', '´'] +['é¥', '¼'] +['èij¡', 'èIJĦ'] +['å°ı', 'å¿ĥ'] +['æģ', '¼'] +['éĻ', 'Į'] +['æĺ', 'Ĥ'] +['åĥ', '¹'] +['èĬ', 'Ŀ'] +['æ¯ı', '个人'] +['åīį', 'æıIJ'] +['ä½ĵ', 'ä¼ļ'] +['æ¨', 'Ļ'] +['æIJľ', 'çĭIJ'] +['对', 'åħ¶'] +['ä¸', '§'] +['èľ', 'Ĥ'] +['æµ', '¸'] +['èª', '¿'] +['åĿ', 'ª'] +['é¢', 'ĸ'] +['åIJį', '为'] +['ç¬', '¼'] +['èĪ', 'Į'] +['æľ¬', '书'] +['èģ', '¯'] +['çº', 'º'] +['ç®Ģ', '缴'] +['éĽ', '¢'] +['ç¾İ', 'çļĦ'] +['éļ', '¨'] +['é«ĺ', 'å³°'] +['è¿Ļ', 'å®¶'] +['å', 'Ĥ¬'] +['å°', '¸'] +['ç¡ķ', '士'] +['èŃ', '·'] +['è°', '¨'] +['æĺ', 'ı'] +['æĶ¿', 'åįı'] +['è¡', 'Ķ'] +['ç¿', 'Ĵ'] +['åľ', 'Ĵ'] +['åĽ½', 'æ°ij'] +['主', 'è§Ĵ'] +['è£', 'ķ'] +['ä¼', 'ª'] +['åº', 'ŀ'] +['æ°ij', 'èIJ¥'] +['æĥ', '§'] +['ç§ĺ', '书'] +['çĹ', 'ķ'] +['çϾ', 'åĪĨ'] +['æº', '¶'] +['æĹł', 'çĸij'] +['çļĦ', 'çľ¼'] +['æĵ', 'İ'] +['ä¼Ł', '大'] +['å½', '°'] +['åħ¬å®ī', 'å±Ģ'] +['ç³', 'ķ'] +['å¼', '¥'] +['åĤ', 'Ļ'] +['ä¹', '¾'] +['毫', 'ä¸į'] +['注', 'æĺİ'] +['åī¯', 'æĢ»'] +['æĦ', 'ī'] +['æķ', '¦'] +['é¦', '¨'] +['æĶ', 'Ģ'] +['éĢ', 'Ŀ'] +['åı¯', 'éĿł'] +['å¤', '¸'] +['åľ', 'ĺ'] +['éĿ¢', 'ä¸Ĭ'] +['æĬ', 'ĸ'] +['èĦ', 'Ĩ'] +['é©', '°'] +['ä¼', 'IJ'] +['å¦', '¨'] +['å®ļ', 'äºĨ'] +['ç³', 'Ĭ'] +['æŃ', '¡'] +['éĥ¨', 'éķ¿'] +['ç§', 'ī'] +['èĪ', 'Ĩ'] +['åĪij', 'äºĭ'] +['åIJ', 'µ'] +['æ¤', 'Ĵ'] +['è¡', 'ĵ'] +['è±', '«'] +['èı', '©'] +['åŃ', 'µ'] +['é¥', '²'] +['å°±', '好'] +['åł', 'ª'] +['ä¸ī', 'è§Ĵ'] +['åľº', 'æ¯ĶèµĽ'] +['ä¸į', 'åģľ'] +['æĵ', 'ħ'] +['åħ¨', 'æĸĩ'] +['æ³', 'ģ'] +['åѦ', 'ä½į'] +['æ±', '°'] +['éł', 'ĺ'] +['åı', 'ł'] +['éļ', 'Ľ'] +['å¸', 'IJ'] +['çľĭ', 'åĩº'] +['åĮ', 'ł'] +['å±Ģ', 'éĿ¢'] +['æ³', 'Į'] +['è°', 'Ĭ'] +['åIJĮ', 'æľŁ'] +['æĬķ', 'æłĩ'] +['å¥', '´'] +['æĿ¥çľĭ', 'çľĭ'] +['èĦ', '¾'] +['èŀ', 'º'] +['æŃ', 'ī'] +['çĽ', '¯'] +['ç¨İ', 'åĬ¡'] +['å»', 'Ĭ'] +['æİ', '©'] +['æħ', '¨'] +['çĽ', '¼'] +['èĬ', 'Ĵ'] +['è®', 'Ģ'] +['æĮ', '£'] +['èĮ', 'ħ'] +['æĸ', '¥'] +['æ¤', 'ħ'] +['åΰ', 'æĿ¥'] +['èijĹ', 'ä½ľ'] +['çĭ', '±'] +['äºĮ', 'æīĭ'] +['ä»İ', 'æĿ¥'] +['çĸ', '²'] +['åºĬ', 'ä¸Ĭ'] +['æĸ°', '浪'] +['æ³', 'Ħ'] +['å¢ŀ', 'å̼'] +['ä¸', 'Ľ'] +['æļ', 'ij'] +['ä»İ', 'ä¸ļ'] +['æ·', 'ĭ'] +['å¤ļ', 'æł·'] +['æľ', '´'] +['份', 'é¢Ŀ'] +['æŀ', '£'] +['西', 'çľģ'] +['æľ¬', 'è´¨'] +['æ·±', 'æ·±'] +['èī', 'ĩ'] +['ç»', 'µ'] +['产', 'å̼'] +['æ¼', 'ł'] +['èħ', '»'] +['çŃ', 'Ľ'] +['åİ', 'Į'] +['æģ', 'Ń'] +['å«Į', 'çĸij'] +['æĪ', '¶'] +['æ»', 'ŀ'] +['èĨ', 'Ģ'] +['åĬ', '£'] +['座', 'è°Ī'] +['常', 'æĢģ'] +['çļĦ', 'æĥħ'] +['è¦', '½'] +['å¯', 'Ĥ'] +['åĮ', 'Ĩ'] +['èĩ', 'º'] +['é¡', '¯'] +['çķ', 'ı'] +['éģ', '£'] +['åį', 'ľ'] +['çŃī', 'å¥ĸ'] +['è²', '¬'] +['æº', '¯'] +['é', 'İ'] +['çĤ¹', '头'] +['èĵ', '¬'] +['æ±', 'º'] +['éħ', '¬'] +['éģ', 'Ĭ'] +['è³', '¼'] +['註', 'åĨĬ'] +['æľ¬', 'æĬ¥'] +['çµ', 'ķ'] +['æ´»', 'æĢ§'] +['åħ', 'ij'] +['éĮ', '¯'] +['åĨ', '¶'] +['åĸ', '»'] +['æº', 'ĸ'] +['èĤ', '¢'] +['æº', 'ĥ'] +['æĹ', '¬'] +['åī', 'Ĭ'] +['çIJĨ', 'äºĭ'] +['å±', 'ł'] +['æ²', '§'] +['èļ', 'Ģ'] +['鼻', 'åŃIJ'] +['为', 'æŃ¢'] +['常', 'å§Ķ'] +['çµ', 'Ĥ'] +['éĬ', '·'] +['çĭ', 'Ģ'] +['ä¾', '£'] +['èĥ', 'Ģ'] +['èŃ', '°'] +['ç͍', '车'] +['åĻ', 'ª'] +['æŃ', '·'] +['åį', 'Ķ'] +['åĪ', '¹'] +['竣', 'æĺ¯'] +['é©', 'Ĺ'] +['èIJ', 'Ŀ'] +['çĻ', '«'] +['çĹ', '«'] +['æŃ', '§'] +['å¼', 'Ĭ'] +['åª', '½'] +['çı', 'Ĭ'] +['è¡', '·'] +['éľ', 'ī'] +['åŁº', 'çĿ£'] +['éļ', '±'] +['æ°', '¨'] +['ç»', '¸'] +['å°¼', 'æĸ¯'] +['çĥ', 'ĺ'] +['æľŁ', 'åĨħ'] +['è°', 'ħ'] +['éĽ', 'ĩ'] +['éļ', 'Ļ'] +['å', 'ĸī'] +['åī', '¥'] +['çĹ', 'ĺ'] +['æĮ', '½'] +['çĵ', '£'] +['æ¹', 'Ľ'] +['æ¨', '±'] +['æ¾', 'İ'] +['æ¹', 'ĥ'] +['åĨ¬', '奥'] +['æ£', 'µ'] +['å®', '°'] +['åŀ', 'Ĵ'] +['æ§', 'ĭ'] +['ä¾', 'Ī'] +['èĮ', 'Ħ'] +['åĺ', '¿'] +['èı', 'ĩ'] +['ç', 'ĻĤ'] +['åĬ', 'ĥ'] +['é', 'į'] +['èĶ', '½'] +['çŀ', 'Ń'] +['æķ', 'ŀ'] +['ä¹', 'ĸ'] +['éŁ', '§'] +['è¾', 'ľ'] +['æĩ', 'Ī'] +['ä½', '£'] +['çŀ', '»'] +['åŁ', 'Ķ'] +['èĪ', 'ħ'] +['å®ŀ', 'äºĭ'] +['é', '¨'] +['å§', '¥'] +['çµ', '¡'] +['åĺ', '»'] +['çķ', '¢'] +['æ²ĥ', 'å°Ķ'] +['è¿', 'Ħ'] +['èĤ', 'ĩ'] +['æħ', 'ij'] +['ã', '§'] +['ä', 'ı'] +['ð', 'ł'] +['ð¬', 'ĩ'] +['ð«', 'Ń'] +['ð«', 'IJ'] +['ã', '³'] +['©', '½'] +['ð«', 'ł'] +['ã', 'Ľ'] +['ð¬', 'į'] +['é', '¿'] +['ð¬', 'Ĵ'] +['ã', 'Ļ'] +['ð¬', '¤'] +['ð', '¬´'] +['ð«', 'ĸ'] +['ð', '¤'] +['ã', '¬'] +['ä', '²'] +['ð«', 'Ķ'] +['ð«', 'ļ'] +['è¦ģ', 'æ±Ĥ'] +['ä¸Ģ', 'äºĽ'] +['å®ŀ', 'çݰ'] +['èĢĮ', 'ä¸Ķ'] +['åĽł', 'æŃ¤'] +['çͱ', 'äºİ'] +['åħ³', 'äºİ'] +['çĦ¶', 'åIJİ'] +['æİ¨', 'åĬ¨'] +['ä¸Ģ', 'æł·'] +['æĮī', 'çħ§'] +['è¿Ļæł·', 'çļĦ'] +['å½¢', 'æĪIJ'] +['æľī', 'äºĽ'] +['æĽ´', 'åĬł'] +['ç»ı', 'è¿ĩ'] +['建', 'è®®'] +['æ²»', 'çĸĹ'] +['ä½ł', '们'] +['æīį', 'èĥ½'] +['ä¿ĥ', 'è¿Ľ'] +['åijĺ', 'å·¥'] +['ä½ĵ', 'éªĮ'] +['èĪ', 'ĩ'] +['åģļ', '好'] +['ä¿Ŀ', 'è¯ģ'] +['æķ´', '个'] +['æĺ¯', 'ä¸Ģ个'] +['éĩĩ', 'ç͍'] +['çIJĨ', '论'] +['æ¯Ķ', 'å¦Ĥ'] +['ä¸Ĭ', 'çļĦ'] +['æİ¨', 'èįIJ'] +['çͳ', '请'] +['天', '空'] +['éĥ¨', 'èIJ½'] +['åįģ', 'åĪĨ'] +['æĿ¥', 'èĩª'] +['ä¹ĭ', 'éĹ´'] +['è°ĥ', 'æķ´'] +['æ¯ı', '天'] +['è°ĥ', 'æŁ¥'] +['æĤ£', 'èĢħ'] +['è¿ĩç¨ĭ', 'ä¸Ń'] +['é¦Ļ', '港'] +['广', 'åijĬ'] +['éĿ¢', '对'] +['满', 'è¶³'] +['éķ¿', 'æľŁ'] +['è§Ħ', 'èĮĥ'] +['æķ´', 'ä½ĵ'] +['æĶ¹', 'åıĺ'] +['æĻº', 'æħ§'] +['å¦Ī', 'å¦Ī'] +['å¦Ĥ', 'ä»Ĭ'] +['åIJĪ', 'åIJĮ'] +['éĥ½', 'ä¼ļ'] +['åĦ¿', 'ç«¥'] +['åĩı', 'å°ij'] +['éŁ³', 'ä¹IJ'] +['ç»ı', '常'] +['ä¸Ĭ', 'å¸Ĥ'] +['ä¼ĺ', 'ç§Ģ'] +['çļĦ', 'éĩįè¦ģ'] +['ä¸Ģ', 'æĿ¡'] +['æµ·', 'å¤ĸ'] +['åı¦', 'å¤ĸ'] +['ä¸Ģ', 'å®¶'] +['åİĭ', 'åĬĽ'] +['大', 'åŀĭ'] +['çľĭ', 'çĿĢ'] +['åĪ', 'Ģ'] +['幸', 'ç¦ı'] +['æİ¨', '广'] +['åIJ', 'Ľ'] +['å¾', 'IJ'] +['æī¾', 'åΰ'] +['äºİ', 'æĺ¯'] +['èĩª', '身'] +['ä¸Ģ', 'ä½į'] +['åľŁ', 'åľ°'] +['åĬł', 'åħ¥'] +['æİ¢', 'ç´¢'] +['æ¢', 'ģ'] +['主', 'åĬ¨'] +['å°±', 'ä¸ļ'] +['女', 'æĢ§'] +['çªģ', 'çł´'] +['ä¸įåIJĮ', 'çļĦ'] +['è¿IJ', 'è¾ĵ'] +['èĩª', 'çͱ'] +['å±ħ', 'æ°ij'] +['æŃ¤', '次'] +['çļĦ', 'æĹ¶éĹ´'] +['å®¶', 'éķ¿'] +['ä¸Ģ个', '人'] +['æ£Ģ', 'æµĭ'] +['åĨħ', 'éĥ¨'] +['广', 'å·ŀ'] +['缴', 'æĴŃ'] +['ä»İ', 'èĢĮ'] +['è´·', '款'] +['åı¬', 'å¼Ģ'] +['æĶ¹', 'éĢł'] +['人', 'çĶŁ'] +['å±ķ', '示'] +['æ¯ı', 'å¹´'] +['女', '人'] +['çļĦ', 'æĸ¹å¼ı'] +['æķĪ', 'çİĩ'] +['å±±', '举'] +['æ¸ł', 'éģĵ'] +['ä¼¼', 'ä¹İ'] +['æ¡Ī', 'ä»¶'] +['åĪ©', 'çĽĬ'] +['çľĭ', 'çľĭ'] +['å¿ĥ', 'éĩĮ'] +['ç»´', 'æĬ¤'] +['å®Ŀ', 'å®Ŀ'] +['ç½ij', 'ä¸Ĭ'] +['论', 'åĿĽ'] +['å°±', 'åı¯ä»¥'] +['ä¸į', 'è¶³'] +['æģ¢', 'å¤į'] +['å¸ĥ', 'å±Ģ'] +['è´¡', 'çĮ®'] +['ä¸ĭ', 'éĻį'] +['æİĮ', 'æı¡'] +['çļ®', 'èĤ¤'] +['å·¥', 'åħ·'] +['éĩį', 'åºĨ'] +['åĵģ', 'è´¨'] +['æİ¨', 'åĩº'] +['çĶ·', '人'] +['æī¿', 'æĭħ'] +['çªģ', 'åĩº'] +['èĢĮ', 'è¨Ģ'] +['æ²', 'Ł'] +['åįı', 'è°ĥ'] +['æĺ¯', 'ä»Ģä¹Ī'] +['æ±', '¤'] +['æĴ', 'ij'] +['çĭ¬', 'ç«ĭ'] +['çݯ', 'èĬĤ'] +['æī©', '大'] +['æ´', 'ª'] +['æĿ', '°'] +['çĽ', 'IJ'] +['ä»', 'ģ'] +['æ¶ī', 'åıĬ'] +['èĢģ', '人'] +['åį³', '使'] +['åįĹ', '京'] +['éħį', 'åIJĪ'] +['é¬', '¼'] +['çζ', '亲'] +['ç½Ĺ', 'æĸ¯'] +['å°ı', 'åĮº'] +['æķĻ', 'æİĪ'] +['åĨ³', 'çŃĸ'] +['é¢Ħ', '计'] +['æľ¬', '人'] +['ä¼', '¯'] +['ç«', '¹'] +['åΰ', 'åºķ'] +['å¸Ĥ', 'æ°ij'] +['åĩº', 'åı£'] +['éĩĩ', 'è´Ń'] +['æĢ»', 'ç»ĵ'] +['æŃ¦', 'æ±ī'] +['åĬł', '大'] +['广', '举'] +['æµģ', 'ç¨ĭ'] +['人', 'åı£'] +['å¦Ĥæŀľ', 'ä½ł'] +['åĩº', 'åİ»'] +['åĩ', 'ī'] +['åĨľ', 'æ°ij'] +['çݰ', '象'] +['åĬĽ', '度'] +['ç»Ļ', 'äºĪ'] +['åħļ', 'å§Ķ'] +['è¯Ń', 'è¨Ģ'] +['线', 'ä¸Ĭ'] +['æĢİ', 'æł·'] +['åĦ¿', 'åŃIJ'] +['ç¡®', 'å®ŀ'] +['ä¹ĭ', 'å¤ĸ'] +['éĥ½', 'åľ¨'] +['èī', '¾'] +['çļĦ', 'æĥħåĨµ'] +['éĩĮ', 'çļĦ'] +['åĽ´', 'ç»ķ'] +['æĽ´å¤ļ', 'çļĦ'] +['ä¾Ŀ', 'æ³ķ'] +['åħ¬', 'åĽŃ'] +['å®¶', 'éĩĮ'] +['æ¯į', '亲'] +['ä¸į', 'åĨį'] +['èĭ', '¹'] +['æ³ķ', 'éĻ¢'] +['飩', 'åĽ½'] +['缸', 'å½ĵ'] +['ä¸į', 'çŁ¥'] +['è¯Ħ', 'ä¼°'] +['ä¸į', 'ç͍'] +['顺', 'åĪ©'] +['éĩį', 'è§Ĩ'] +['è´¢', 'åĬ¡'] +['ä»ĸ', 'åĢij'] +['åıij', 'è¡Į'] +['ä¸ĵ', 'éŨ'] +['åħ·', 'å¤ĩ'] +['å¹¶', 'ä¸įæĺ¯'] +['è¶³', 'çIJĥ'] +['é', 'ŀĭ'] +['åıij', '表'] +['æ°¸', 'è¿ľ'] +['èIJ¥', 'åħ»'] +['éħį', 'å¥Ĺ'] +['æķ´', 'åIJĪ'] +['è´', 'º'] +['åĽŀ', 'çŃĶ'] +['æĶ¶', 'çĽĬ'] +['ä¹Ł', '许'] +['è»', 'Ĭ'] +['æİ¥', '触'] +['æĶ»', 'åĩ»'] +['åĽĽ', 'å·Ŀ'] +['æĢ§', 'èĥ½'] +['åĽŀ', 'åΰ'] +['èħ', '°'] +['ä¹Ł', '没æľī'] +['å¼', 'Ħ'] +['设', 'ç«ĭ'] +['éĺ²', 'æİ§'] +['æĬĢ', 'å·§'] +['éĢļ', '常'] +['è´¢', 'æĶ¿'] +['éĥ¨', 'ç½²'] +['åľº', 'æĻ¯'] +['æ±Ł', 'èĭı'] +['表', 'è¾¾'] +['åĸ', '·'] +['女', 'åĦ¿'] +['èĪ', '¶'] +['çµ', '¦'] +['ä¼ļ', 'åijĺ'] +['æĪĸ', '许'] +['äº', '©'] +['举', 'æĸ¹'] +['天', 'æ´¥'] +['è¿ij', 'å¹´'] +['çľĭ', 'æĿ¥'] +['æ¯Ķ', 'ä¾ĭ'] +['å²', '©'] +['éĵ', 'ľ'] +['çİ', '»'] +['å®ŀ', 'éªĮ'] +['æĢĿ', 'ç»´'] +['æĭħ', 'å¿ĥ'] +['æ²', 'Ī'] +['身', 'è¾¹'] +['æ·±', 'åĮĸ'] +['ç²¾', 'åĩĨ'] +['ç§ģ', 'æľį'] +['æ¶Ī', 'éĺ²'] +['åİ»', 'äºĨ'] +['ç»Ĩ', 'èĥŀ'] +['çIJĥ', 'éĺŁ'] +['æĺİ', 'æĺŁ'] +['é£Ł', 'çī©'] +['å¾Ī', 'å¿«'] +['让', 'ä½ł'] +['ä¿¡', 'ç͍'] +['å͝', 'ä¸Ģ'] +['åħ¶', 'å®ĥ'] +['çŃī', 'æĸ¹éĿ¢'] +['å¾ĭ', 'å¸Ī'] +['æŃ»', '亡'] +['æ', 'ٳ'] +['ä¸Ģ', 'æī¹'] +['ä¸Ĭ', '涨'] +['æľº', 'åľº'] +['å½¢', 'åĬ¿'] +['æĦ¿', 'æĦı'] +['éĽĨ', 'ä½ĵ'] +['æĸ°', 'åŀĭ'] +['æįŁ', '失'] +['æĽ', '¸'] +['ä¸ĭ', 'åįĪ'] +['æ¯ı', '次'] +['æĪIJ', 'å°±'] +['åħ¬', 'è·¯'] +['èĻ', '«'] +['åĴ', '±'] +['西', 'å®ī'] +['æľĢ', 'ä½³'] +['ç§ij', 'çłĶ'] +['å¤į', 'æĿĤ'] +['æľº', 'åύ'] +['çα', 'æĥħ'] +['çħ§', 'çīĩ'] +['å¹´', 'é¾Ħ'] +['è³ĩ', 'æĸĻ'] +['ç²', 'Ĺ'] +['åĩĨ', 'ç¡®'] +['åĬł', 'ä¸Ĭ'] +['åĩº', 'çīĪ'] +['è°', 'IJ'] +['å®¶', 'å±ħ'] +['èĥĮ', 'æĻ¯'] +['ä¸Ģ', '线'] +['äºĭ', '项'] +['åĬ¨', 'ä½ľ'] +['ç¥', '¥'] +['æĢ»', 'ä½ĵ'] +['æĪ¿', 'åŃIJ'] +['ä¹Ł', 'å°±æĺ¯'] +['大', 'æ¦Ĥ'] +['é«ĺ', 'æķĪ'] +['åIJ', '¹'] +['æİ', 'ĪæĿĥ'] +['éĻĦ', 'è¿ij'] +['æ¡Ī', 'ä¾ĭ'] +['éĹ', '¹'] +['çΏ', 'çΏ'] +['彩', '票'] +['æĢ', 'Ĵ'] +['举', 'æĬ¥'] +['æĻ®', 'éģį'] +['çķĻ', 'ä¸ĭ'] +['è¡£', 'æľį'] +['æĹłè®º', 'æĺ¯'] +['åħħ', '满'] +['æ·±', '度'] +['æ¡', 'ij'] +['æĪª', 'èĩ³'] +['带æĿ¥', 'çļĦ'] +['éĻ', 'µ'] +['æĦŁ', 'æĥħ'] +['èµ', 'ļ'] +['åĵª', 'äºĽ'] +['æķ´', 'æĶ¹'] +['æĪIJ', 'çĨŁ'] +['å¨', 'ľ'] +['é¼', '»'] +['çŁ', 'Ľ'] +['çĽ', '¾'] +['好', '好'] +['第', 'åĽĽ'] +['åĨł', 'åĨĽ'] +['è´¢', 'å¯Į'] +['æľĢ', '好çļĦ'] +['车', 'åŀĭ'] +['éĸ', 'Ģ'] +['åį³', 'å°Ĩ'] +['åĪĨ', '为'] +['éĿĴ', 'å²Ľ'] +['纷', '纷'] +['ä»Ĭ', 'æĹ¥'] +['å¹³', 'è¡¡'] +['å¹³æĸ¹', 'ç±³'] +['éĤ£', 'ç§į'] +['åĩº', 'çĶŁ'] +['éĿĴ', 'æĺ¥'] +['人', '群'] +['人', 'å·¥'] +['ä¹ĭ', 'ä¸ĭ'] +['æ¹ĸ', 'åĮĹ'] +['åľ¨', 'æŃ¤'] +['åįļ', '士'] +['æĹ¶', 'åĪ»'] +['æ²³', 'åĮĹ'] +['æĶ¾', 'å¼ĥ'] +['éĢļ', 'éģĵ'] +['森', 'æŀĹ'] +['çĸ', 'Ĩ'] +['æķ', '¸'] +['èĬ', '³'] +['æīĵ', 'åĩ»'] +['æĽ', '¹'] +['åĮĸ', 'åѦ'] +['æĥ³', '象'] +['ä¸ĩ', '人'] +['è´¢', 'ç»ı'] +['åħĥ', 'ç´ł'] +['ä¼ļ', '计'] +['åħ¨', 'ä½ĵ'] +['æĦ', 'Ľ'] +['é«ĺ', 'ä¸Ń'] +['æľº', 'éģĩ'] +['声', 'éŁ³'] +['æĹħ', 'è¡Į'] +['æµ', '©'] +['æŁ', '±'] +['å°ij', 'å¹´'] +['åĽ½', 'å¤ĸ'] +['èijĹ', 'åIJį'] +['çĶŁ', 'åŃĺ'] +['å§', 'ľ'] +['带', 'é¢Ĩ'] +['é¢ľ', 'èī²'] +['ä¸Ĭ', 'ä¸ĭ'] +['产ä¸ļ', 'éĵ¾'] +['æĽ´', '好çļĦ'] +['å²', 'Ń'] +['ä¼ĺ', 'æĥł'] +['便', 'æĺ¯'] +['åħ§', '容'] +['ä¸Ģ', 'åıª'] +['çIJ', '´'] +['梦', 'æĥ³'] +['ç§Ł', 'èµģ'] +['å¼Ģ', 'åIJ¯'] +['è´Ń', 'çī©'] +['åĮħ', 'åIJ«'] +['åĪ©', 'çİĩ'] +['èµ·', 'äºĨ'] +['æľī', 'åĬĽ'] +['éĤ£', 'éĩĮ'] +['审', 'æī¹'] +['对', 'æīĭ'] +['çݰ', 'éĩij'] +['天', 'çĦ¶'] +['çĽ', 'Ĵ'] +['çĪ', '½'] +['å¿ħ', 'çĦ¶'] +['åĮĸ', 'å·¥'] +['ä¸ĵ', 'åĪ©'] +['åķ', '¡'] +['å¼Ģ', 'å¿ĥ'] +['人', 'ä½ĵ'] +['éģĵ', '士'] +['æĢģ', '度'] +['空', 'è°ĥ'] +['æĭĽ', 'åķĨ'] +['å§', '»'] +['第', 'äºĶ'] +['æ£', 'Ĵ'] +['ä¸Ģ', 'ç³»åĪĹ'] +['åį±', 'æľº'] +['转', 'åıĺ'] +['åľº', 'æīĢ'] +['é¸', '£'] +['æĪ¿', 'éĹ´'] +['éĢ', '¼'] +['è¯ķ', 'çĤ¹'] +['对', 'å¤ĸ'] +['åĩº', 'åı°'] +['åľ¨', 'è¿Ļ'] +['åİĤ', 'å®¶'] +['å·¨', '大'] +['ç®Ģ', 'ä»ĭ'] +['çľĭ', 'äºĨ'] +['åħļ', '建'] +['æĮĩ', 'æĮ¥'] +['çŁ³', 'æ²¹'] +['ä¸į', 'åı¯èĥ½'] +['èİ', '²'] +['ä¸į', '太'] +['åĪĽ', 'æĦı'] +['第', 'ä¸Ģ个'] +['è´µ', 'å·ŀ'] +['è¿ĩ', 'äºĨ'] +['æľ¬', 'æĿ¥'] +['éģĵ', 'å¾·'] +['çŃĶ', 'æ¡Ī'] +['éĻ', '¶'] +['ä¸Ģ', 'è·¯'] +['èĤ', 'ĸ'] +['æ¸ħ', 'æ´ģ'] +['æľī', 'æľº'] +['åIJį', 'åįķ'] +['æĿ', '±'] +['åij¼', 'åIJ¸'] +['ä¸', 'Ī'] +['ç¦ı', '建'] +['è¯ķ', 'éªĮ'] +['å¼ķ', 'åıij'] +['ä¹Ł', '没'] +['ä¸į', 'ä½ı'] +['çĨŁ', 'æĤī'] +['èIJ', '¬'] +['ä¸į', 'èī¯'] +['çł', 'ĸ'] +['èĩ´', 'åĬĽ'] +['çѾ', '订'] +['åIJ', 'Ĭ'] +['ä¾', '¯'] +['çĺ', '¦'] +['å§ij', 'å¨ĺ'] +['æĸ', '¤'] +['妻', 'åŃIJ'] +['æĺ¥', 'èĬĤ'] +['çĪ', '¬'] +['æĽ', 'Ŀ'] +['çĥŃ', 'æĥħ'] +['éķ¿', 'æ²Ļ'] +['èIJ¥', 'éĢł'] +['éħ', '·'] +['éĵ', 'Ŀ'] +['åŁºæľ¬', 'ä¸Ĭ'] +['åij¨', 'åĽ´'] +['ä»Ģ', '麼'] +['认', 'åı¯'] +['åĪĨ', 'åŃIJ'] +['ä¸Ģ', 'æĸ¹éĿ¢'] +['è½', '´'] +['å¼', '·'] +['马', 'ä¸Ĭ'] +['éĽ', '¾'] +['èĩ', '£'] +['å°', '¿'] +['çĶŁ', 'æĦı'] +['å®ī', 'å¾½'] +['ç¥ŀ', 'ç»ı'] +['åĩº', 'å¸Ń'] +['èį¯', 'åĵģ'] +['çIJĨ', 'çͱ'] +['åįı', 'åIJĮ'] +['æµģ', 'åĬ¨'] +['åıij', 'åĬ¨'] +['åĿļ', 'å®ļ'] +['表', 'æĺİ'] +['åIJİ', 'éĿ¢'] +['ä¹ī', 'åĬ¡'] +['å¦', 'ĸ'] +['æľī', 'åı¯èĥ½'] +['å¹´è½»', '人'] +['大', 'éĻĨ'] +['å²', '³'] +['ä¸į', 'èµ·'] +['çŀ¬', 'éĹ´'] +['ä¸įå¾Ĺ', 'ä¸į'] +['çѾ', '约'] +['åIJĪ', 'æł¼'] +['åħļ', 'æĶ¯éĥ¨'] +['æµİ', 'åįĹ'] +['便', 'åĪ©'] +['éļı', 'æĹ¶'] +['å¥', 'ī'] +['ç§°', '为'] +['产', 'æĿĥ'] +['åIJ', 'ķ'] +['çĽ', 'Ĩ'] +['课', 'åłĤ'] +['ç·', 'ļ'] +['æ£', 'ī'] +['线', 'ä¸ĭ'] +['èĩª', 'è¡Į'] +['举', 'æİª'] +['åݦ', 'éŨ'] +['èĩª', 'ä¿¡'] +['å½±', 'è§Ĩ'] +['ä»', 'Ķ'] +['çĶŁæ´»', 'ä¸Ń'] +['æĿĥ', 'çĽĬ'] +['çϽ', 'èī²'] +['å°±', 'ä¸į'] +['è¿Ľ', 'å±ķ'] +['æ¯ı', 'æĹ¥'] +['ä¾Ľ', 'ç»Ļ'] +['æĿĥ', 'åĪ©'] +['æĹł', 'æķ°'] +['çIJĨ', 'è´¢'] +['ä¾Ŀ', 'æĹ§'] +['ä¸Ĭ', 'åįĪ'] +['è¯Ĩ', 'åĪ«'] +['çĽĪ', 'åĪ©'] +['çł', 'Ĥ'] +['许', 'åı¯'] +['åIJĮ', 'äºĭ'] +['åĺ', 'Ľ'] +['éģ', '¸'] +['çĿĢ', 'åĬĽ'] +['éŨ', 'åı£'] +['ä¸į', 'å¤ļ'] +['åħ¶', '次'] +['ç¢', '§'] +['çī©', 'çIJĨ'] +['åĨħ', 'å¿ĥ'] +['çϾ', 'å§ĵ'] +['æĢ»', '绣'] +['å¹²', 'åĩĢ'] +['积', 'ç´¯'] +['åıį', 'é¦Ī'] +['æłij', 'ç«ĭ'] +['社', '交'] +['ç§', '©'] +['åįģ', 'ä¸Ģ'] +['éĤ', 'ĵ'] +['驱', 'åĬ¨'] +['å±ķ', 'è§Ī'] +['èĪĴ', 'éĢĤ'] +['åŁº', 'åĽł'] +['å·®', 'å¼Ĥ'] +['转', '让'] +['å°ı', 'å§IJ'] +['æł·', 'åŃIJ'] +['ç¿', 'Ķ'] +['é«ĺ', 'åħ´'] +['å½±åĵį', 'åĬĽ'] +['æīĭ', 'ç»Ń'] +['缸', 'åIJĮ'] +['缸', 'åºĶ'] +['æĻ', 'Ĵ'] +['è§', 'Ģ'] +['å¸Ĥ', 'å§Ķ'] +['èĬ', '¯'] +['å±ķ', 'çݰ'] +['åľ°', 'çIJĥ'] +['éĤ', 'ª'] +['ä¸Ģå®ļ', 'çļĦ'] +['åħģ', '许'] +['ä¿¡', 'ä»»'] +['æī', 'ij'] +['éĻ¢', 'æł¡'] +['ç®Ģ', 'ç§°'] +['åģļ', 'æ³ķ'] +['ä¹ĭ', 'è·¯'] +['æĹĹ', 'ä¸ĭ'] +['èħ', 'Ķ'] +['æ¶Ī', '失'] +['ä¸ĸçķĮ', 'ä¸Ĭ'] +['åŁİ', '乡'] +['èĪŀ', 'åı°'] +['å¾Ī', '大çļĦ'] +['绣', 'çѹ'] +['åħ¬', 'å¹³'] +['èĤ', '¾'] +['çļĦ', '好'] +['æ±', 'ģ'] +['çľ¼', 'åīį'] +['éĽ', '£'] +['å¹', '½'] +['åħ±', '产'] +['主', 'åĬŀ'] +['å¤Ħ', 'ç½ļ'] +['åº', 'Ļ'] +['éģĵ', 'çIJĨ'] +['å¼', 'µ'] +['æİ¥', 'çĿĢ'] +['çĮ', 'İ'] +['çģ', 'Į'] +['çͱ', 'æŃ¤'] +['人', 'åĬĽ'] +['æµģ', 'è¡Į'] +['ä¾', 'ł'] +['åı¯ä»¥', '说'] +['èĴ', 'ĭ'] +['å½¢', 'æĢģ'] +['æĹ¥', 'åŃIJ'] +['æ¼', 'Ĩ'] +['çķĻ', 'åѦ'] +['缸', 'éĹľ'] +['æľĢ', 'å¤ļ'] +['åĩŃ', 'åĢŁ'] +['åħ¬', '交'] +['æĮĸ', 'æİĺ'] +['æĿĤ', 'å¿Ĺ'] +['主', '人'] +['éļľ', 'ç¢į'] +['æł¡', 'éķ¿'] +['æĸ¹', 'ä½į'] +['ä¸Ĭ', 'çıŃ'] +['å¤ļ', 'åħĥ'] +['è', 'ĥģ'] +['éŃħ', 'åĬĽ'] +['èĮ', 'Ĥ'] +['åħħ', 'ç͵'] +['强', '大'] +['çĥ', '¤'] +['å¥ĭ', 'æĸĹ'] +['å®ŀ', 'ç͍'] +['éĺ', 'ģ'] +['ç»Ļ', 'äºĨ'] +['æľ¬', 'ç§ij'] +['æł', 'ĭ'] +['æĭ', '¨'] +['æķĻ', 'ç»ĥ'] +['éĥ½', 'çŁ¥éģĵ'] +['æ¯ķä¸ļ', 'çĶŁ'] +['ç¢', 'Ĺ'] +['åŀ', 'Ĥ'] +['è®', '¼'] +['å®ģ', 'æ³¢'] +['åѦ', 'èĢħ'] +['è°¢', 'è°¢'] +['åŁİ', 'éķĩ'] +['æĢİä¹Ī', 'åĬŀ'] +['éģ', 'Ķ'] +['æĪIJ', '交'] +['æ½ľ', 'åĬĽ'] +['åį', '§'] +['æĸ°', 'å¼Ģ'] +['éħį', 'å¤ĩ'] +['主', 'åĬĽ'] +['åij³', 'éģĵ'] +['çĥ', 'Ĥ'] +['é£ŀ', 'è¡Į'] +['å«', 'ģ'] +['大', '大'] +['ç»Ļ', '大家'] +['å¤ĸ', 'éĿ¢'] +['éĨ', 'ī'] +['åıij', 'è¨Ģ'] +['æĹ©', 'é¤IJ'] +['åIJĦ', 'èĩª'] +['å®', 'Ļ'] +['èį£', 'èªī'] +['æĬ«', 'éľ²'] +['é¡', 'ŀ'] +['åĨħ', 'çļĦ'] +['èĤ', 'ª'] +['è¾', 'IJ'] +['æ³', 'µ'] +['æĬ', 'Ľ'] +['æĺŁ', 'æľŁ'] +['ä¸Ģ', '带'] +['çĶŁ', 'ç´ł'] +['ç»ı', 'éĶĢ'] +['åĩ', '¶'] +['åľ°', 'ä¸Ĭ'] +['åij½', 'è¿IJ'] +['åĵ', '²'] +['ä¸Ĭ', 'åİ»'] +['æĸĩ', 'çī©'] +['è¯', 'ij'] +['æĮ¯', 'åħ´'] +['éķ¿', 'æĹ¶éĹ´'] +['ç¥', 'Ń'] +['åIJĪ', 'èĤ¥'] +['è¿Ŀ', 'è§Ħ'] +['èģ', 'ª'] +['ä½İ', 'äºİ'] +['éĢĤ', 'å½ĵ'] +['æľī', 'åºı'] +['æľ¬', 'ç½ij'] +['çķĻ', 'è¨Ģ'] +['æĥ³', 'æ³ķ'] +['çѾ', 'ç½²'] +['å§', 'ļ'] +['æĢ§', 'æł¼'] +['èĴĻ', 'åı¤'] +['æŁ', 'ı'] +['åŀ', '«'] +['åѦ', 'åİĨ'] +['ä»ħ', 'ä»ħ'] +['讲', 'è¯Ŀ'] +['éĶ', 'IJ'] +['æĢ', 'ĸ'] +['åī', 'ª'] +['èĭ', 'į'] +['åIJ', 'ĵ'] +['强', 'çĥĪ'] +['åģ¥', 'åħ¨'] +['çĸ', '¯'] +['åı¤', '代'] +['å¥', 'Ī'] +['ä¸į', 'çĦ¶'] +['乡', 'éķĩ'] +['æľĭåıĭ', '们'] +['åĤ', 'ħ'] +['èģ', '½'] +['个', 'æĢ§'] +['æ³ķ', 'è§Ħ'] +['å°ı', 'éķĩ'] +['çĶ»', 'éĿ¢'] +['第', 'åħŃ'] +['ç¶²', 'è·¯'] +['åīį', 'æĻ¯'] +['åIJ¬', '说'] +['ä¼ł', 'åªĴ'] +['æĿ¡', 'ä¾ĭ'] +['åĪ«', 'çļĦ'] +['ä¸į', 'æĩĤ'] +['顾', 'éĹ®'] +['强', '度'] +['éĺ¿', 'éĩĮ'] +['èµ°', 'åĬ¿'] +['å¸', '½'] +['çļĦ', 'ç¡®'] +['åĮº', 'åĪ«'] +['éĮ', '¢'] +['主', '管'] +['ä¸Ģ', 'çľĭ'] +['æĸ', 'ľ'] +['åŃĺåľ¨', 'çļĦ'] +['ä»', '²'] +['åį±', '害'] +['éĵ', 'Ń'] +['游æĪı', 'ä¸Ń'] +['éħ', '±'] +['é¾Ļ', '头'] +['人', 'å¿ĥ'] +['éĢĢ', 'ä¼ij'] +['æµı', 'è§Ī'] +['åĬ', '«'] +['éĺ²', 'æ²»'] +['ç®', 'Ń'] +['å±', 'Ī'] +['è¾½', 'å®ģ'] +['å£', '¤'] +['è¿İ', 'æĿ¥'] +['éŀ', 'į'] +['ç͍', 'æĿ¥'] +['大', 'åľ°'] +['ä»', '°'] +['éĢļ', '讯'] +['å¼Ģ', 'å·¥'] +['è£', '¤'] +['å¦Ĥ', 'åIJĮ'] +['éª', '¤'] +['éĺŁ', 'åijĺ'] +['è½', '©'] +['ç¾İ', 'æľ¯'] +['èĻ', 'Ł'] +['åIJĮ', 'ä¸Ģ'] +['åľ', 'ĸ'] +['书', 'æ³ķ'] +['æīĵ', 'åį°'] +['åIJ«', 'æľī'] +['éĽĨ', 'æĪIJ'] +['éĹ', '·'] +['å¸Ĥåľº', 'ä¸Ĭ'] +['æĹģ', 'è¾¹'] +['åľ°', 'æĿ¿'] +['产çĶŁ', 'çļĦ'] +['ç²', '¤'] +['éĩį', 'ç»Ħ'] +['è¡Ģ', 'æ¶²'] +['çŃ', 'ĭ'] +['åĬŀ', 'äºĭ'] +['常è§ģ', 'çļĦ'] +['ä¸Ĭ', 'åįĬå¹´'] +['å±ı', 'å¹ķ'] +['åIJī', 'æŀĹ'] +['å·', '©'] +['åĸľ', 'çα'] +['ç¿', 'ł'] +['ä¸ī', 'ç§į'] +['æ¡Ĩ', 'æŀ¶'] +['举', 'èİŀ'] +['çĶĺ', 'èĤĥ'] +['èĬ', '¬'] +['åĽ¾', '书'] +['åĩ¤', 'åĩ°'] +['æ°Ķ', 'åĢĻ'] +['å°', '´'] +['å°', '¬'] +['两', '天'] +['è¾ħ', '导'] +['åĢŁ', '款'] +['æĹ¥', 'èµ·'] +['æ´', 'Ĵ'] +['ä¸Ģ', '度'] +['è¹', 'Ī'] +['æ½', 'Ń'] +['æī', 'ĩ'] +['çĻ', 'ľ'] +['æĸ°', 'åħ´'] +['åĤ', '²'] +['诸', 'å¤ļ'] +['è´', 'ª'] +['éĻ·', 'åħ¥'] +['èĪ', 'Ł'] +['èĤº', 'çĤİ'] +['ä¸Ģ', 'æł·çļĦ'] +['åİ', 'ĺ'] +['åľ°', 'çIJĨ'] +['æĬķ', '注'] +['éļ', 'Ĭ'] +['åħī', 'ä¼ı'] +['ä¿Ŀ', 'åģ¥'] +['åħ', 'Ķ'] +['åħ¬', 'åĬ¡'] +['æīĵ', 'çł´'] +['çĶ·', 'åŃ©'] +['åĬ³', 'åĬ¡'] +['ä½ł', 'ä¼ļ'] +['ç͍', 'åľ°'] +['æº', '¢'] +['åıij', 'è¾¾'] +['èĤ', 'ļ'] +['è¿ĩ', 'äºİ'] +['èĩ', 'Ĥ'] +['éĢĻ', '樣'] +['è½»', 'è½»'] +['ä¸Ń', 'åħ±'] +['åIJĦ', 'åĽ½'] +['åĶ', 'ĩ'] +['å®ŀ', 'ä¹ł'] +['èĻ', '¾'] +['æ§', '½'] +['ä¸į', 'ä¸Ĭ'] +['åħį', 'çĸ«'] +['åįł', 'æį®'] +['å·¥', 'ä¼ļ'] +['åĽ', 'Ĭ'] +['èĪª', '天'] +['åı¯', 'çα'] +['æĸĹ', 'äºī'] +['çĺ', '¤'] +['å¦Ĥ', 'æľī'] +['éĽ', 'ĸ'] +['对', 'æĪij'] +['åĩº', 'ç§Ł'] +['好', 'çľĭ'] +['太', '大'] +['æ°´', 'åĪ©'] +['åĬ¿', 'åĬĽ'] +['åħ¨', 'æ°ij'] +['ç½', '¢'] +['èµ¢', 'å¾Ĺ'] +['ç͵', 'ä¿¡'] +['车', 'éĹ´'] +['æĻĤ', 'åĢĻ'] +['å°ij', 'æķ°'] +['éĵ', '¸'] +['åħ³', 'èģĶ'] +['ä¸įä»ħ', 'ä»ħ'] +['为', 'æĤ¨'] +['åĴ', '¸'] +['æľº', 'åĬ¨'] +['è£', 'Ļ'] +['åĵį', 'åºĶ'] +['éģ', 'ł'] +['è²', '·'] +['ç©', '´'] +['å¢', 'ħ'] +['éĶ', '¡'] +['çµ', 'Ħ'] +['çģ«', '车'] +['è³ĩ', 'è¨Ĭ'] +['åĨ³', 'èµĽ'] +['污', 'æ°´'] +['èª', 'ŀ'] +['å´', 'Ľ'] +['ç´§', 'å¯Ĩ'] +['缺', 'å°ij'] +['å¤ļ', '人'] +['æĢ»', '书记'] +['éĶ', 'Ī'] +['èij', 'Ľ'] +['å¿ĺ', 'è®°'] +['éĻĮ', 'çĶŁ'] +['éķ¿', '大'] +['åħĪè¿Ľ', 'çļĦ'] +['ç¡', 'ħ'] +['åıij', 'æĺİ'] +['å©´', 'åĦ¿'] +['æīİ', 'å®ŀ'] +['èĽĭ', 'çϽ'] +['ä¸Ģ', 'çϾ'] +['缮', 'åħī'] +['æ', 'ħĮ'] +['åĬł', 'æ²¹'] +['åIJ', 'ŀ'] +['ä¸Ģ', '群'] +['ä¸Ń', 'ä»ĭ'] +['å¸', 'ĸ'] +['å¿', 'Į'] +['èģĮ', 'èĥ½'] +['广', 'æĴŃ'] +['çĽij', 'å¯Ł'] +['ç§ĺ', 'å¯Ĩ'] +['çĭ', '®'] +['è¿Ļ', 'æĿ¡'] +['éĢ', '¢'] +['æĢ', '¨'] +['åįģ', 'åħŃ'] +['è©', '¦'] +['说', 'åΰ'] +['åĩĿ', 'èģļ'] +['æĮĩ', '示'] +['æ°', '¢'] +['å¼', 'ĺ'] +['éĺ', 'Ģ'] +['æĸ', '©'] +['éł', 'ħ'] +['ä¸Ģ', 'å¼Ģå§ĭ'] +['æİĴ', 'è¡Į'] +['åľ¨', 'æĪij'] +['纪', 'å½ķ'] +['æĬ', 'Ħ'] +['æł', 'ª'] +['说', 'æ³ķ'] +['ä¸Ń', 'èį¯'] +['好', 'å¤ļ'] +['åıª', 'ä¸įè¿ĩ'] +['çķĻ', 'åľ¨'] +['个', 'å°ıæĹ¶'] +['认', 'çŁ¥'] +['çķ', '«'] +['è§ģ', 'è¿ĩ'] +['å°ı', 'å¾®'] +['ä½Ľ', 'å±±'] +['çľ', '¾'] +['讲', 'è¿°'] +['æ¢', '³'] +['ç§°', 'åı·'] +['æĹ¥', 'æĻļ'] +['è¢', 'ĸ'] +['åķ', '¤'] +['æľª', 'ç»ı'] +['æľĢ', 'æĹ©'] +['æī®', 'æ¼Ķ'] +['è¡Ģ', '管'] +['çº', '±'] +['æĥħ', 'èĬĤ'] +['第', 'ä¸ĥ'] +['æį', '§'] +['ä»', 'Ĺ'] +['æ¿Ģ', 'çĥĪ'] +['æĹł', '线'] +['ä¸į', '容æĺĵ'] +['å¼Ģ', 'å¹ķ'] +['æĸ°', 'çĶŁ'] +['ä¸ĵ', '注'] +['èij', '±'] +['åįĹ', 'æµ·'] +['çĩ', 'Ł'] +['èµ·', 'ä¾Ĩ'] +['æ´¾', 'åĩº'] +['åĦ', 'Ĵ'] +['ä¾', '¨'] +['è¼', 'ĥ'] +['åįļ', 'è§Ī'] +['éĢ', '¾'] +['åĮ', 'Ģ'] +['ç»ıæµİ', 'åѦ'] +['æ¸', 'Ĺ'] +['ä¿Ŀ', 'èŃ·'] +['çī', 'º'] +['çī', '²'] +['çİ', '«'] +['çij', '°'] +['æľĢåIJİ', 'ä¸Ģ'] +['æĶ¿', 'åĬ¡'] +['æ§', 'Ľ'] +['èĻķ', 'çIJĨ'] +['éļIJ', 'æĤ£'] +['æī¿', 'åĮħ'] +['æ¥', 'µ'] +['æ¡', '©'] +['çĽ', '²'] +['导', 'åIJij'] +['èĩ´', 'å¯Į'] +['ç¼', 'Ĩ'] +['æģĭ', 'çα'] +['ä¸į', 'åĬ¨'] +['ç»Ļ', '人'] +['å·', '¢'] +['表', 'æĥħ'] +['举', 'åįĹ'] +['åĨħ', 'å¤ĸ'] +['è¾Ī', 'åŃIJ'] +['åı', 'ī'] +['åįļ', 'ä¼ļ'] +['åĬŁ', 'æķĪ'] +['æ¸', '´'] +['å±', '¬'] +['æİĴ', 'éϤ'] +['éĢ', 'Ľ'] +['ä¸Ģ', 'ä¼ļ'] +['ä¸į', 'å¼Ģ'] +['å¼Ģ', 'å¥ĸ'] +['é»ij', 'é¾Ļ'] +['é»ijé¾Ļ', 'æ±Ł'] +['å¿«', 'ä¸ī'] +['度', 'åģĩ'] +['åĿ', '¤'] +['éĤ®', 'ä»¶'] +['æĩ', 'Ĵ'] +['ä¾Ľ', 'ç͵'] +['å»', '£'] +['好', 'è¯Ħ'] +['ç§ĺ书', 'éķ¿'] +['æĪĺ', 'åľº'] +['好', 'å¥ĩ'] +['ä¾µ', 'æĿĥ'] +['æĨ', '¾'] +['æľĢ', 'åĪĿ'] +['æī¹', 'åıij'] +['åİ', 'ķ'] +['è¼', 'ķ'] +['æŀ', '¯'] +['ä¸ļ', 'åĨħ'] +['è´Ń', 'æĪ¿'] +['ä¸į', 'åľ¨'] +['纪', 'å§Ķ'] +['æīĢ', 'éľĢ'] +['å¸Ĥ', 'éķ¿'] +['è³', '½'] +['å¼ķ', 'æĵİ'] +['çģµ', 'éŃĤ'] +['éĬ', 'Ģ'] +['æ»', '¤'] +['çĿ', 'IJ'] +['å¤ļ', '项'] +['åĽŀ', '头'] +['èī', 'ĺ'] +['å¤į', 'å·¥'] +['éĥ¨', 'ä»¶'] +['ç´§', 'ç´§'] +['æŁIJ', 'ç§į'] +['使', 'åħ¶'] +['æĸ°', '人'] +['æŀ', 'ļ'] +['æ³ķ', 'å®ļ'] +['å·´', 'å·´'] +['æ¶µ', 'çĽĸ'] +['ç¨', '»'] +['æĭ', '¾'] +['æĻ', 'ķ'] +['è½', '¿'] +['éĢļ', 'è¡Į'] +['åĵ', 'Ģ'] +['æ³', 'Ĭ'] +['温', '馨'] +['éĽĨ', 'èģļ'] +['çĨ', 'Ļ'] +['åĩ', 'ij'] +['åįģ', 'ä¸ĥ'] +['æ°Ķ', 'æģ¯'] +['æıIJä¾Ľ', 'çļĦ'] +['æ³', '³'] +['奥', 'è¿IJ'] +['çģ¾', '害'] +['åĩĢ', 'åĮĸ'] +['è·¨', 'è¶Ĭ'] +['åĵª', 'æĢķ'] +['éŁ', '¿'] +['å¢ŀ', 'æ·»'] +['çĦ', 'Ĭ'] +['æ®ĭ', 'çĸ¾'] +['ç¢', 'Į'] +['æĤ', 'Ķ'] +['è§ģ', 'è¯ģ'] +['è¾ĸ', 'åĮº'] +['å¿ĥ', 'èĦı'] +['éļ', '§'] +['åį', '¸'] +['åı¯èĥ½', 'æĢ§'] +['æľī', 'è¶£'] +['åī¯', '书记'] +['åĮĸ', 'å¦Ĩ'] +['ä¿', 'Ĥ'] +['æ£', 'ļ'] +['éĨ', 'ĩ'] +['带', '头'] +['éł', 'Ī'] +['追', 'ç©¶'] +['æij', 'Ķ'] +['è¿Ļ', 'éĥ¨'] +['ä¸į', '论'] +['ç¥', '¸'] +['å', '³»'] +['éģ', 'ķ'] +['çĶŁ', 'èĤ²'] +['å¤', 'ł'] +['å¤ĸ', '交'] +['è¯Ħ', '为'] +['ä»İ', 'å°ı'] +['å°ı', 'å°ı'] +['é', '¥¿'] +['æĴ', '¼'] +['è·¨', 'å¢ĥ'] +['被', 'åijĬ'] +['åįĹ', 'å®ģ'] +['身', 'å¿ĥ'] +['åĨį', 'çĶŁ'] +['æīĢ', '说'] +['æĹ¶éĹ´', 'åĨħ'] +['åĪĹ', 'åħ¥'] +['éĿĴ', 'æµ·'] +['çα', '好'] +['çª', 'Ħ'] +['èĪ', 'Ī'] +['è¿ĩ', '渡'] +['æ¿', 'Ł'] +['éĽ', 'Ģ'] +['审', 'è®®'] +['åĽ½', 'èµĦ'] +['æŃ¥', 'ä¼IJ'] +['轨', 'éģĵ'] +['ä¿¡', '念'] +['ä¸ī', 'åĪĨ'] +['çĨ', '¬'] +['åѵ', 'åĮĸ'] +['ç¼', 'ł'] +['éĥ', 'Ĭ'] +['èĪĴ', 'æľį'] +['纪', 'æ£Ģ'] +['ä¸Ģä¸ĭ', 'åŃIJ'] +['鼻', '話'] +['è²', 'ł'] +['éĴ', '¥'] +['åĮ', 'Ļ'] +['çĹ', '´'] +['è¶', 'ģ'] +['ç»', '£'] +['çĪ', 'µ'] +['è½', '°'] +['éª', 'Ħ'] +['å§', '¨'] +['æĭ', 'ĺ'] +['çĮ', '´'] +['è®', '¶'] +['è¿Ļ', '座'] +['çį', '¨'] +['æ·ĺ', 'æ±°'] +['çĹħ', 'ä¾ĭ'] +['æ²Ļ', 'åıij'] +['è§Ĩ', '为'] +['头', 'æĿ¡'] +['å¿ħè¦ģ', 'çļĦ'] +['åı¯', 'è°ĵ'] +['è¯Ŀ', '说'] +['ç¯', 'Ħ'] +['æĹ©', 'çĤ¹'] +['æŀ¢', '纽'] +['ç¾', '¡'] +['çα', 'åĽ½'] +['çªģ', 'åıij'] +['éĢ', 'Ĭ'] +['æ½', 'į'] +['èį£', 'èĢĢ'] +['èŁ', '¹'] +['æ¦Ĥ', 'çİĩ'] +['å¾Ī', 'ä¹ħ'] +['æĥ', 'ķ'] +['è¨', '´'] +['åľĨ', '满'] +['çļ', '±'] +['åĪĨ', 'æ³Į'] +['åħħ', 'è¶³'] +['çľĭ', 'æ³ķ'] +['è¾', 'Ł'] +['æĭ', '¦'] +['æĭ', '©'] +['对', 'åºĶ'] +['为', 'æł¸å¿ĥ'] +['èħ', 'Ĭ'] +['å¤ļ', 'ä¹Ī'] +['æµ', 'ij'] +['å®ı', 'è§Ĥ'] +['èĦ', 'ĸ'] +['åIJĪ', 'èµĦ'] +['çĶŁ', '涯'] +['å®ŀ', 'è´¨'] +['ä¼ĺ', 'çĤ¹'] +['ç͍', 'æ°´'] +['寿', 'åij½'] +['æ²', '«'] +['åIJ', 'ģ'] +['è©', '¹'] +['åĽ½', 'éĺ²'] +['å´', '©'] +['åĿ', 'İ'] +['èĨ', 'ı'] +['ä¸Ģ', 'è½®'] +['éģĹ', '产'] +['æ¹¾', 'åĮº'] +['ç»', 'İ'] +['åįķ', '纯'] +['æ¾', 'Ħ'] +['åīį', 'åĪĹ'] +['身', 'å½±'] +['é»ĺ', 'é»ĺ'] +['æį', 'ī'] +['çĴ', '°'] +['èı', 'Ĭ'] +['æĢ', 'ľ'] +['åħĭ', 'æĢĿ'] +['æĢ»', 'å±Ģ'] +['çĩĥ', 'æĸĻ'] +['ä¸ļ', 'æĢģ'] +['åIJĦ', 'æł·'] +['åĴ', '½'] +['åĩº', 'èī²'] +['åĪĿ', 'å¿ĥ'] +['åı', 'Ľ'] +['çłĶ', '讨'] +['è¡', '«'] +['åİĨ', 'ç¨ĭ'] +['ç¦', '½'] +['è¶³å¤Ł', 'çļĦ'] +['èį', 'Ĩ'] +['çľĭ', 'å¾ħ'] +['è´', '©'] +['åĨ³', 'å¿ĥ'] +['è£', '¹'] +['å¸Ī', 'èĮĥ'] +['åŀ', 'Ħ'] +['æĿ', 'ł'] +['åĩ', '¸'] +['çĬ¹', '豫'] +['çĥŃ', 'è¡Ģ'] +['åIJĪ', 'ä¼Ļ'] +['éħ', 'µ'] +['èIJ½', 'åľ¨'] +['åįł', 'åľ°'] +['è¡', '¬'] +['èĵ', 'ī'] +['æĦ', '¤'] +['æ¸', 'Ĭ'] +['åĪĨ', 'æķ°'] +['ç¬ij', 'çĿĢ'] +['太', 'å¹³'] +['çĤ', '«'] +['æİ¨', 'ä»ĭ'] +['æĸ¯', 'åĿ¦'] +['å½¢', '容'] +['æĵ', 'Ĭ'] +['æĦŁ', 'åħ´è¶£'] +['åĨĽ', '人'] +['åĩĮ', 'æĻ¨'] +['对', 'çħ§'] +['åıij', 'çĹħ'] +['å·', '¾'] +['èĪ', 'ī'] +['æª', '¢'] +['ç¬ij', 'äºĨ'] +['ç¡®', 'è¯Ĭ'] +['è´Ł', 'åĢº'] +['壮', '大'] +['æĪ', 'ļ'] +['äºĴ', 'èģĶ'] +['èª', '²'] +['èħ', '¦'] +['æĹ', '±'] +['åıĹ', '欢è¿İ'] +['åį', 'ī'] +['éĻ¢', '士'] +['æ©', '¡'] +['ä¸Ģ', '对'] +['è¾', '±'] +['æ²', 'Ĥ'] +['åı²', 'ä¸Ĭ'] +['æIJ', 'ı'] +['å´', 'ĸ'] +['代', 'è°¢'] +['ç£', '·'] +['é¡', 'ĺ'] +['æµ', 'ĩ'] +['常', 'ç͍'] +['åį', 'ij'] +['åĩº', 'åĽ½'] +['è¯', 'ł'] +['稳', 'æŃ¥'] +['ç»ı', '纪'] +['å¤ļ', 'å¤ļ'] +['æīĢ', 'å¾Ĺ'] +['为', '主é¢ĺ'] +['ä¸Ģ', 'åĪĨ'] +['æł', '½'] +['é¡', '§'] +['çº', '²'] +['åĥ', 'ħ'] +['å£', 'ĵ'] +['åĦ', 'ª'] +['ç¿', '°'] +['æİ', 'Ģ'] +['人', '为'] +['åª', '³'] +['æ´', '½'] +['èĿ', '¶'] +['å¤į', 'åħ´'] +['ä¼ļ', 'å½±åĵį'] +['åIJĦ', 'çķĮ'] +['éĤ£', 'ä¸Ģ'] +['é¢', '¤'] +['çĢ', 'ı'] +['çĢı', '覽'] +['å¯', 'ŀ'] +['åı¯', 'æĢķ'] +['åį³', 'æĹ¶'] +['çķ', '´'] +['ä¸ĭ', 'åįĬå¹´'] +['ç¬Ķ', 'è®°'] +['éĻĦ', 'åĬł'] +['çĥŃ', 'æ°´'] +['å¥', '¸'] +['ç£', 'ħ'] +['æĿ', 'ī'] +['æ¸ħ', 'åįİ'] +['éĸ', '±'] +['ç°', '¡'] +['å¤Ħ', 'å¤Ħ'] +['åIJĪ', 'éĩij'] +['æ²³', 'æµģ'] +['ç´', '°'] +['è´Ł', 'éĿ¢'] +['çļĦ', '羣å®ŀ'] +['åύ', '械'] +['èĴ', 'IJ'] +['西', 'äºļ'] +['å·', 'ħ'] +['ç²', '¹'] +['åİŁ', 'æĸĩ'] +['æŀ', 'ķ'] +['è¡Ģ', 'åİĭ'] +['åļ', '´'] +['å¸', 'ĺ'] +['åĨ', 'Ģ'] +['æĮ', '«'] +['ç͵', 'è·¯'] +['å°ı', 'ä¼Ļä¼´'] +['èĿ', '´'] +['æľĢ', 'å¿«'] +['æĭ', 'Į'] +['å®', 'ª'] +['æĸ', '·'] +['ç¿', 'ħ'] +['åĴ', '³'] +['åĹ', '½'] +['ç¾', 'ŀ'] +['躺', 'åľ¨'] +['èµĽ', '车'] +['æ²', 'IJ'] +['éĻIJ', '度'] +['为', 'ä¸Ģä½ĵ'] +['èĴ', 'ľ'] +['å¹', '«'] +['æIJ', 'ħ'] +['åĭ', 'ĭ'] +['åī', 'ĸ'] +['纳', 'ç¨İ'] +['éķ¿', 'æķĪ'] +['ç½', 'ķ'] +['åī¯', 'æľ¬'] +['ç©', 'į'] +['éĴ', '©'] +['ç¹', '¼'] +['åĽ½', 'åľŁ'] +['è¼', 'ī'] +['ä¸į', 'å¿ĺ'] +['èѦ', '示'] +['çģ', '¿'] +['å¿ĥ', 'å¾Ĺ'] +['æĦ', 'ļ'] +['忽', 'çķ¥'] +['åĽŀ', 'äºĭ'] +['åįł', 'æľī'] +['æ·', 'Ħ'] +['çī', '¡'] +['çĽij', 'äºĭ'] +['ç¿', '¡'] +['éĴĪ对', 'æĢ§'] +['çª', 'ĥ'] +['è£', '½'] +['èĨ', 'Ŀ'] +['ç³', 'Ł'] +['港', 'æ¾³'] +['太', '太'] +['æ¾', '¡'] +['ç»Ĩ', 'åĮĸ'] +['åĶ®', 'åIJİ'] +['å®ŀåľ¨', 'æĺ¯'] +['ç«', '£'] +['çį', '²'] +['å̾', 'åIJij'] +['å¼ķ', 'ç͍'] +['é¹', 'ħ'] +['ç¬ij', '容'] +['ä¹IJ', 'è¶£'] +['æ°ij', 'æĶ¿'] +['éŨ', 'æĪ·'] +['å±', 'ģ'] +['è¿·', '失'] +['éĶ', 'Į'] +['å°ı', '康'] +['åĭ', 'ī'] +['æ³', '¼'] +['ä¾ĭ', 'åŃIJ'] +['ä¸ī', 'ä½į'] +['å»', 'ł'] +['èĶ', 'ĵ'] +['广', 'éĺĶ'] +['èĢ', 'į'] +['èĢģ', 'èĻİ'] +['åĭŁ', 'éĽĨ'] +['èĦļ', 'æŃ¥'] +['æĭ', '¯'] +['åŃĹ', 'åı·'] +['çĦ', '°'] +['é¢', 'ł'] +['èļ', 'Ĥ'] +['èļ', 'ģ'] +['é£', '¯'] +['人', 'æĢ§'] +['æĴ', '°'] +['åİ', '¢'] +['å±Ģ', 'éĻIJ'] +['æľª', 'æĪIJ'] +['åĵª', 'åĦ¿'] +['大', 'åıij'] +['ä¸į', 'å®ļ'] +['å¾ģ', 'æ±Ĥ'] +['éĥ', 'µ'] +['åĢº', 'æĿĥ'] +['çα', 'ä½ł'] +['èº', 'ģ'] +['ä»ħ', 'ä¾Ľ'] +['è¿ľ', 'å¤Ħ'] +['éĨ', 'Ľ'] +['åĥ', 'µ'] +['积æŀģ', 'æĢ§'] +['æİ', '¡'] +['åīį', 'ä¸ī'] +['äºİ', 'ä¸Ģä½ĵ'] +['çŀ', 'Ħ'] +['çĿ', 'ģ'] +['æ²', '¸'] +['åħ±', 'èµ¢'] +['éĢĢ', 'å½¹'] +['è´Ŀ', 'å°Ķ'] +['æİ', 'ı'] +['æĪ', '²'] +['è¡', 'į'] +['éĶ', 'Ĥ'] +['ä¸ĩ', 'ä½Ļ'] +['ç§ij', 'åĪĽ'] +['æ¼Ķ', 'åͱ'] +['欧', 'åħĥ'] +['æ·¡', 'æ·¡'] +['éĿĴ', 'å±±'] +['èĹ', 'Ŀ'] +['ç»', '½'] +['令', 'çīĮ'] +['éĽĨ', '群'] +['ä½ľ', 'çī©'] +['çĢ', 'ij'] +['å¤', '¯'] +['ç½ij', '游'] +['åħ«', '大'] +['éª', 'ļ'] +['èª', 'ĵ'] +['ä¼ļ', 'å±ķ'] +['åħļ', 'åı²'] +['æ£Ģå¯Ł', 'éĻ¢'] +['åĸ', 'ĺ'] +['éĺ', '±'] +['èĢĮ', 'åĩº'] +['éĢļ', '车'] +['éĴ', 'ĵ'] +['æĥħ', '人'] +['æ¸', 'Ľ'] +['ä¸Ń', 'ç§ĭ'] +['çĪ', 'Ń'] +['åıª', 'åī©'] +['æĺ', 'Ķ'] +['éĩİ', 'çĶŁ'] +['ç¡', '«'] +['èIJĿ', 'åįľ'] +['æĬµ', 'æĬĹ'] +['çĻ«', 'çĹ«'] +['éĻ', 'Ģ'] +['èĶ', 'ļ'] +['å¸', 'ľ'] +['满', '满'] +['èı', '±'] +['éļĨ', 'éĩį'] +['æĺŁ', '级'] +['æ½', 'ĩ'] +['åħ¬', 'åħĥ'] +['è°', '£'] +['æ¯Ķ', 'äºļ'] +['æ¡Į', 'åŃIJ'] +['èµ', '£'] +['è²', '¼'] +['æĦ¿', 'æľĽ'] +['é¡', '½'] +['æ´¾', 'éģ£'] +['ç¥', 'Ľ'] +['åª', 'ļ'] +['éĺ', 'ľ'] +['èij', '«'] +['èĬ', '¦'] +['æ³', '»'] +['å¡', 'Į'] +['çĭ', 'Ń'] +['å»ī', 'æĶ¿'] +['å¥ij', 'æľº'] +['æĹĹ', 'èΰ'] +['æĥ', '«'] +['严', 'åİī'] +['åıĭ', 'æĥħ'] +['å¦', 'Ĭ'] +['å¨', 'ł'] +['åĵª', 'å®¶'] +['èĨ', '¨'] +['è¶', 'Ł'] +['æĮ', 'ª'] +['èĻ', 'IJ'] +['é', 'łģ'] +['çŀ', '©'] +['éº', 'Ł'] +['ç¨', '£'] +['èģĶ', 'éĢļ'] +['åı', '®'] +['çİĭ', 'èĢħ'] +['ä¸į', 'ç¡®å®ļ'] +['ç', 'ijľ'] +['è°', 'İ'] +['çī¢', 'è®°'] +['ç¢', '¼'] +['æĬ¤', 'èĤ¤'] +['é¡', '·'] +['çĦ', 'ķ'] +['åģļ', '强'] +['éļ±', 'ç§ģ'] +['éļ±ç§ģ', 'æ¬Ĭ'] +['åıĹ', '害'] +['ä¸į', 'çͱ'] +['çĥ', '¹'] +['é¥', 'ª'] +['é©', '³'] +['ä¼', '½'] +['ä¸Ŀ', '绸'] +['è¥', 'Ħ'] +['åįģ', 'ä½Ļ'] +['éº', 'Ĺ'] +['æ¬Ĭ', 'åĪ©'] +['èģ', 'ŀ'] +['åı¤', 'èĢģ'] +['éģ', 'ı'] +['åIJĦ', 'å¼ı'] +['å°±', 'è¡Į'] +['åħ¥', 'å¢ĥ'] +['ç', 'ĥģ'] +['èľ', 'ĺ'] +['èĽ', 'Ľ'] +['çº', '¬'] +['çŁ', '«'] +['è»', 'Ł'] +['æ´Ĺ', 'è¡£'] +['æĦ', '§'] +['é¢Ħ', 'æ¡Ī'] +['éľ', 'Ĩ'] +['æ·±', 'åİļ'] +['éĺ¿', 'æĭī'] +['åĨĻ', 'åŃĹ'] +['åį', '¦'] +['éķ', 'Ģ'] +['模', 'æł·'] +['åĤ', 'į'] +['æIJ', 'į'] +['èĸ', '¯'] +['åł', 'ħ'] +['åħ¬', '积'] +['è¨', 'İ'] +['ä¼ł', 'æŁĵ'] +['æ¯', '¯'] +['çIJĨ', 'å·¥'] +['åĨ·', 'éĵ¾'] +['ç«ĭ', 'æĸ¹'] +['æ¢', 'Ń'] +['åľ£', 'è¯ŀ'] +['综', 'èīº'] +['çİ©', 'ç¬ij'] +['æĥ³', 'ä¸įåΰ'] +['æijĩ', '头'] +['æ·', '¹'] +['åģĩ', 'æĹ¥'] +['åĢ', 'ĺ'] +['èĢ', '½'] +['èİ', 'ĵ'] +['åŁ', '·'] +['èĩª', 'è´¸'] +['åįĬ', '天'] +['æª', 'Ķ'] +['æ¾İ', 'æ¹ĥ'] +['éķ', 'ij'] +['ä¸', '«'] +['éĩĮ', 'ç¨ĭ'] +['å¼Ģ', 'èįĴ'] +['èı', 'ı'] +['å®Ŀ', 'è´µ'] +['èŃ', '¬'] +['åķ', 'Ł'] +['æŁ', 'ł'] +['æª', '¬'] +['é©', 'Ń'] +['æ±', 'Ľ'] +['çĨĬ', 'çĮ«'] +['èķ', 'ī'] +['éļı', 'ä¹ĭ'] +['å±', 'ij'] +['è¾ĥ', '强'] +['èĥ', '³'] +['èĨ', 'Ĭ'] +['éĿĻ', 'éĿĻ'] +['åĴ', 'ª'] +['æĭĽ', 'åij¼'] +['代', 'è¨Ģ'] +['ä¿¡', 'ç®±'] +['è£ħ', 'éħį'] +['æĤ', 'į'] +['åįķ', '车'] +['èIJ', 'İ'] +['å¤ļ', '彩'] +['éĻ', '¸'] +['ä»İ', '严'] +['æ©', 'Ħ'] +['æ¦', 'Ħ'] +['éĢ', '®'] +['éĩĮ', 'æĸ¯'] +['å§¿', 'æĢģ'] +['太', 'æŀģ'] +['éĩ', 'Ŀ'] +['æº', 'ī'] +['è¿', 'Ń'] +['ç§', '¸'] +['ç§', 'Ĩ'] +['å·¥', 'å§Ķ'] +['æ±', 'ķ'] +['èģ', 'Ĩ'] +['ä½', '¬'] +['ç¼', 'ħ'] +['çĶ', '¸'] +['åī¯', 'å±Ģéķ¿'] +['éĹ', 'º'] +['èª', '¤'] +['è¤', 'IJ'] +['ä¸į', 'éĻIJ'] +['èħ', 'ķ'] +['åij', 'ķ'] +['çŁ', '¶'] +['åĨľ', 'å®¶'] +['管', 'å§Ķä¼ļ'] +['é¥', 'º'] +['èĬ', 'ľ'] +['æ¾', 'Ī'] +['è©', '¢'] +['å¨ģ', 'å°¼æĸ¯'] +['ä½ķ', 'åĨµ'] +['å°ı', 'ä¼Ļ'] +['奢', 'ä¾Ī'] +['è¿Ļ', 'ç¯ĩ'] +['è¯', 'µ'] +['竳', 'ç¨ĭ'] +['ç´', 'Ģ'] +['éIJ', 'ĺ'] +['éĤ', '¢'] +['ç³', 'Ļ'] +['ç¼', 'Ģ'] +['ä¹', 'Ĵ'] +['ä¹', 'ĵ'] +['çī¢', 'åĽº'] +['åĿ', 'ŀ'] +['å¼', 'Ī'] +['ä¾ĭ', 'å¤ĸ'] +['å»', '³'] +['è§Ħ', '竳'] +['èĬ', 'Ļ'] +['ç¯', '·'] +['èº', '¯'] +['æł', 'Ī'] +['åĿļ', 'å®ŀ'] +['åŁº', '建'] +['çĿĢ', 'çľ¼'] +['ç·', '´'] +['èij', '©'] +['ç¼', 'ļ'] +['æ¦', 'Ĩ'] +['主', 'åĭķ'] +['ç¥', 'Ģ'] +['äºĴ', 'éĢļ'] +['å°¤', '为'] +['å®', 'Ľ'] +['éª', '¼'] +['æ±', '²'] +['ä¾', 'ĥ'] +['æĤł', 'ä¹ħ'] +['æij', '§'] +['æĭ', 'ĩ'] +['é«', 'ĵ'] +['éº', 'Ĵ'] +['éĻ', 'Ľ'] +['æŀ', '¸'] +['æĿ', 'ŀ'] +['è´', '¬'] +['å°ı', 'é¾Ļ'] +['åĵ', '®'] +['èĵ¬', 'åĭĥ'] +['åĮ', 'Ī'] +['çķľ', 'çī§'] +['å¨', '©'] +['个', 'å¤ļ'] +['æ²', '¥'] +['æĺ', '§'] +['çĦ', 'ļ'] +['æĬij', 'éĥģ'] +['çĸ', '¡'] +['èĺ', 'ij'] +['éģİ', 'ç¨ĭ'] +['æ©', '±'] +['éĿ', 'ĵ'] +['大', 'çIJĨ'] +['é«', '¦'] +['åĪĨ', '辨'] +['æ¸', '¤'] +['çĸ', '¤'] +['åĬ¨', 'èĥ½'] +['å¼ł', 'å®¶'] +['ä¸ĩ', 'åįĥ'] +['æ»', '¥'] +['é¥', '¥'] +['åºŁ', 'å¼ĥ'] +['å¸', '³'] +['æ¼', '³'] +['è±', 'IJ'] +['ä»', 'ij'] +['å«', 'ī'] +['å¦', 'Ĵ'] +['çŀ', 'Ĵ'] +['è¡', 'ħ'] +['çĭ', '¸'] +['å¾ģ', 'ç¨ĭ'] +['éĤ', '¯'] +['éĥ', '¸'] +['ç¥', 'Ī'] +['ç¥', '·'] +['è¶', '´'] +['ç»ĵæŀĦ', 'æĢ§'] +['è§Ĩ', 'åIJ¬'] +['è¬', 'Ŀ'] +['çĴ', 'Ģ'] +['çĴ', '¨'] +['åĩº', 'å¤Ħ'] +['è¯', 'Ģ'] +['å¾', 'ĺ'] +['å¾', 'Ĭ'] +['çľ', '¨'] +['åĸ', 'ĩ'] +['åı', 'Ń'] +['åĺ', '²'] +['çķ', '¸'] +['å¹²', 'äºĭ'] +['æļ', '§'] +['æ²', 'Ľ'] +['åĦ', 'Ħ'] +['å»', 'ĵ'] +['åİ¿', 'éķ¿'] +['èĥ', 'ļ'] +['çIJ', '¢'] +['çŃ', '·'] +['éĩ', 'ĭ'] +['ä¾', '®'] +['åIJ', '©'] +['åĴ', 'IJ'] +['åĮ', '¿'] +['æĬ¬', 'èµ·'] +['æ³', '£'] +['æ¶', '¤'] +['éº', '½'] +['æĽ', 'Ļ'] +['åī¯', 'éĻ¢éķ¿'] +['åħļ', 'åĴĮ'] +['æķ£', 'åıij'] +['润', 'æ»ij'] +['åĵ', 'º'] +['æĥ', '¬'] +['漫', 'éķ¿'] +['ä¸į', 'æĩĪ'] +['åŁ', 'ł'] +['åĹ', 'ĵ'] +['èĢģ', 'çĪ·'] +['è®', '½'] +['æĪĺ', 'ç»ĦåIJĪ'] +['æ£', 'ł'] +['åħ¨', 'åŁŁ'] +['èł', '¢'] +['è¯', '¡'] +['åīį', 'çŀ»'] +['æķ', 'Ľ'] +['ä¸Ģ', 'å°ģ'] +['å¹', 'Ĥ'] +['èİ', 'Ĩ'] +['è¯Ŀ', 'è¯Ń'] +['ç»Ĩ', 'åĪĻ'] +['å±', '¿'] +['åµ', 'Į'] +['éĢ', 'į'] +['åĺ', '±'] +['æ¸', '²'] +['çĥ', '¯'] +['çĿ', '¹'] +['é¦', 'Ĵ'] +['èħ', '¥'] +['æĬĹ', 'åĩ»'] +['çĿ', '«'] +['èį', 'Ķ'] +['éļ', 'İ'] +['æ³ī', 'æ°´'] +['è¬', 'Ĥ'] +['ç', 'Ĥ¬'] +['åĩı', 'æİĴ'] +['è¸', 'Ĭ'] +['è', '·»'] +['æ·', 'Į'] +['éľ', '¾'] +['å¥ĩ', '纳'] +['å¯', 'Ŀ'] +['æ¤', 'İ'] +['æŁ', '¬'] +['æĸ¯', 'åŁº'] +['åħ¬', 'ç«ĭ'] +['è¨', 'ĵ'] +['é£', 'Ļ'] +['é©', '¿'] +['åĤ', 'µ'] +['èĽ', 'Ļ'] +['ç¯ĩ', '竳'] +['åĪĨ', 'æĶ¯'] +['ä¸Ĭ', 'å¹´'] +['çŃ', 'Ŀ'] +['ç¼', '¤'] +['èĢģ', 'æĹ§'] +['åĻ', '¬'] +['æľ', '¦'] +['èĥ', '§'] +['æ¶Ī', 'è²»'] +['æĵ', 'Ķ'] +['æ¦', '´'] +['æ¿', 'Ĵ'] +['ç³', '¯'] +['æ³', '¸'] +['æį', 'Ĩ'] +['ç»', 'ļ'] +['èµ', 'İ'] +['çIJ', 'IJ'] +['èµ', 'Ĥ'] +['æħ', '®'] +['æ²', 'Į'] +['çĦ', 'Ļ'] +['æĴŃ', 'æĬ¥'] +['æ·', 'ĩ'] +['åĪĩ', 'åħ¥'] +['çij', 'ķ'] +['çĸ', 'µ'] +['éģ', '´'] +['ç¨', 'ļ'] +['ç©', '©'] +['èŀ', 'ĥ'] +['æ£', 'ķ'] +['æĨ', '§'] +['æĨ', '¬'] +['ä¼', 'º'] +['æ¯', 'Ĺ'] +['æį', 'į'] +['æĬ', 'ī'] +['ç´', 'Ĭ'] +['å¼', 'Ľ'] +['æĭ', 'Ń'] +['æĹı', 'èĩªæ²»'] +['åĿ', '·'] +['ç«', '¶'] +['è©', '³'] +['è¿Ħ', 'ä»Ĭ'] +['è°', '´'] +['çŀŃ', 'è§£'] +['æŁ', '¿'] +['é¢', 'Ĭ'] +['ç°', '§'] +['çĥŁ', 'èĬ±'] +['ä¾', '¥'] +['çĿ', '¦'] +['éħ', 'Ŀ'] +['æ°', 'ĵ'] +['çIJ', 'ī'] +['å§', 'Ĭ'] +['æ²', '®'] +['æħ', '·'] +['èľ', 'ķ'] +['çij', 'ļ'] +['éĩĩ', 'çŁ¿'] +['åł', '°'] +['åºķ', 'èķ´'] +['èĨ', '³'] +['è¾', 'ķ'] +['éŁ', 'Ń'] +['åĴ', 'Ļ'] +['ç²', '½'] +['åī', 'Ķ'] +['æ²', '¦'] +['èĤ', '´'] +['éķ', '¶'] +['æĺ', '¼'] +['è¾', 'Ĺ'] +['å©', 'ª'] +['åĮ', '®'] +['æĸ', 'ĵ'] +['æ±', '¶'] +['éĥ', '´'] +['éł', '»'] +['çª', 'Ĵ'] +['è¢', '±'] +['åĽ', '±'] +['èĢ', 'ĺ'] +['è', 'ļĮ'] +['çĭ', 'Ļ'] +['çĹ', '¹'] +['ç¥', 'ī'] +['æı', '®'] +['æ·', 'Ĩ'] +['ç£', 'ĭ'] +['éĺ', 'ª'] +['æ', '«'] +['ã', '¸'] +['Ļ', '¶'] +['ã', 'ij'] +['ð£', '²'] +['ä', '¢'] +['ã', 'Ń'] +['ð¬', '¨'] +['ð¬', 'Ģ'] +['ð¬', '®'] +['ð¬', '¯'] +['ð¬', 'ľ'] +['ðª', '¨'] +['ð«', 'Ĺ'] +['ð¬', 'Ĭ'] +['ð¬', '±'] +['ð¬', 'Ł'] +['ä', 'İ'] +['ð', '¡'] +['ä', 'ĥ'] +['ã', 'ł'] +['ð', '©'] +['ð©', '¾'] +['ð¬', 'º'] +['ð¬', 'Ļ'] +['ãĢ', 'Ķ'] +['ãĢ', 'ķ'] +['çļĦ', 'æĹ¶åĢĻ'] +['æľīéĻIJ', 'åħ¬åı¸'] +['ä¹ĭ', 'åIJİ'] +['ä¸ļ', 'åĬ¡'] +['åķ', 'Ĭ'] +['èϽ', 'çĦ¶'] +['æĭ¥', 'æľī'] +['äºĴ', 'èģĶç½ij'] +['éĤ£', 'äºĽ'] +['ä½ł', 'çļĦ'] +['åĨ³', 'å®ļ'] +['éϤ', 'äºĨ'] +['åĽ¢', 'éĺŁ'] +['åı¯', 'æĺ¯'] +['以', 'åIJİ'] +['社', 'åĮº'] +['çļĦ', 'éĹ®é¢ĺ'] +['å¹¶', 'ä¸Ķ'] +['æķĻ', 'å¸Ī'] +['å°±', 'ä¼ļ'] +['天空', 'éĥ¨èIJ½'] +['æľĢ', 'ç»Ī'] +['å½ĵ', 'çĦ¶'] +['ä¹Ł', 'æľī'] +['ç¡®', 'ä¿Ŀ'] +['æĥ³', 'è¦ģ'] +['è´Ń', 'ä¹°'] +['人', 'çļĦ'] +['åIJ', '´'] +['çļĦ', 'åıijå±ķ'] +['ä¸į', 'çŁ¥éģĵ'] +['软', 'ä»¶'] +['æĪij们', 'çļĦ'] +['çζ', 'æ¯į'] +['åī', 'ij'] +['èĢĮ', 'æĺ¯'] +['å®ī', 'æİĴ'] +['åIJİ', 'æĿ¥'] +['çļĦ', 'åľ°æĸ¹'] +['èµ', 'µ'] +['èĢĥ', 'è¯ķ'] +['çªģ', 'çĦ¶'] +['ä¸Ģå®ļ', 'è¦ģ'] +['åζ', 'ä½ľ'] +['è¯Ħ', 'ä»·'] +['åħį', 'è´¹'] +['è´¹', 'ç͍'] +['绣', 'ä¸Ģ'] +['çĦ¶', 'èĢĮ'] +['è¿Ļ', '次'] +['éĿĴ', 'å¹´'] +['人', 'ç±»'] +['äº', '¦'] +['让', '人'] +['è´Łè´£', '人'] +['éĩĩ', 'åıĸ'] +['çļĦ', 'äºĭæĥħ'] +['ä¹Ł', 'ä¼ļ'] +['车', 'è¾Ĩ'] +['æĽ´', 'æĺ¯'] +['强', 'åĮĸ'] +['æĪij', 'åĢij'] +['以', 'åīį'] +['ä¼ĺ', 'åĮĸ'] +['å§Ķåijĺ', 'ä¼ļ'] +['åĽ°', 'éļ¾'] +['å¹´', '度'] +['ä½į', 'äºİ'] +['æĮĩ', 'åĩº'] +['åĨį', '次'] +['åĬŀ', 'çIJĨ'] +['æ¯ı', '个'] +['对', 'æĸ¹'] +['è¿Ľè¡Į', 'äºĨ'] +['æľĢ', 'é«ĺ'] +['课', 'ç¨ĭ'] +['身', 'ä¸Ĭ'] +['æĽ¾', 'ç»ı'] +['åĮ»', 'çĶŁ'] +['å®ī', 'è£ħ'] +['æľ', '±'] +['è¿IJ', 'è¡Į'] +['åıĮ', 'æĸ¹'] +['æľĢ', '大çļĦ'] +['æŀĦ', '建'] +['è¿ŀ', 'ç»Ń'] +['çļĦ', 'å°ı'] +['她', 'çļĦ'] +['çŃī', 'çŃī'] +['æĶ¹', 'åĸĦ'] +['åIJĦ', 'ç±»'] +['éģĩ', 'åΰ'] +['æľī', 'çĿĢ'] +['人', 'çī©'] +['æĢ»', 'æĺ¯'] +['è¿ħ', 'éĢŁ'] +['åζ', 'å®ļ'] +['å®ĥ', '们'] +['å®ĺ', 'ç½ij'] +['è¿ĺ', 'è¦ģ'] +['ç»Ī', 'äºİ'] +['æĪ¿', 'åľ°äº§'] +['è¯ģ', 'æĺİ'] +['èĤ¡', '票'] +['åºĶ', 'å½ĵ'] +['èĭ±', 'åĽ½'] +['è¿IJ', 'ç͍'] +['æľĢ', 'æĸ°'] +['享', 'åıĹ'] +['让', 'æĪij'] +['æĻļ', 'ä¸Ĭ'] +['å¾', 'ŀ'] +['å°ı', '说'] +['å°¤åħ¶', 'æĺ¯'] +['è®Ń', 'ç»ĥ'] +['åħ¨', 'å¸Ĥ'] +['æĮij', 'æĪĺ'] +['æľī', 'çĤ¹'] +['带', 'çĿĢ'] +['çļĦ', 'ä¸ľè¥¿'] +['é£İ', 'æł¼'] +['é»Ħ', 'éĩij'] +['å¼ķ', '导'] +['æŃ¤', 'å¤ĸ'] +['æľĢ', 'è¿ij'] +['追', 'æ±Ĥ'] +['强', 'è°ĥ'] +['ä¹Ł', 'åı¯ä»¥'] +['æĦŁ', 'åΰ'] +['èĩª', 'æĪij'] +['çī¹åĪ«', 'æĺ¯'] +['æĪIJ', 'éĥ½'] +['éĢIJ', 'æ¸IJ'] +['å¿«', 'ä¹IJ'] +['ä¹ĭ', 'ä¸Ń'] +['æĬķèµĦ', 'èĢħ'] +['ä»ĸ们', 'çļĦ'] +['æ°', 'ı'] +['å·¥ä½ľ', '人åijĺ'] +['äºĨ', 'ä¸Ģ个'] +['åķ', '¦'] +['ä¸Ģ', 'åĢĭ'] +['åŁº', 'å±Ĥ'] +['æ²Ł', 'éĢļ'] +['第ä¸Ģ', '次'] +['å¹¶', '没æľī'] +['çļĦ', 'å·¥ä½ľ'] +['åľ¨', 'è¿ĻéĩĮ'] +['æŀ', 'ª'] +['æĶ¯', 'æĴij'] +['æĹ¶', 'å°ļ'] +['æĿ¥', 'åΰ'] +['æĶ¶', 'è´Ń'] +['éĿ©', 'åij½'] +['æĺ¯', 'ä¸įæĺ¯'] +['讨', '论'] +['ä¸ļ', '绩'] +['å°±', 'èĥ½'] +['ç«ĭ', 'åį³'] +['è¡Ĺ', 'éģĵ'] +['åľ¨', 'ä¸Ģèµ·'] +['æľĪ', '份'] +['é«ĺ', '端'] +['å¾Ī', 'éļ¾'] +['ä¿Ħ', 'ç½Ĺæĸ¯'] +['æīĭ', '段'] +['åģļ', 'åĩº'] +['ä¼Ĺ', 'å¤ļ'] +['å®ŀ', 'è¡Į'] +['æīĵ', 'å¼Ģ'] +['游', '客'] +['ä¾Ŀ', 'çĦ¶'] +['å°±', 'åĥı'] +['离', 'å¼Ģ'] +['说', 'éģĵ'] +['æĸ°', 'èĥ½æºIJ'] +['æº', 'ª'] +['äº', 'ķ'] +['令', '人'] +['ä¸Ģ', 'åľº'] +['æĪij', 'æĥ³'] +['两', '人'] +['èĩ³', 'å°ij'] +['çļĦ', 'çĶŁæ´»'] +['æĺ¯', '个'] +['èĭ±', 'è¯Ń'] +['æ²Ĵ', 'æľī'] +['æĢĿ', 'èĢĥ'] +['éĻIJ', 'åζ'] +['åı°', 'æ¹¾'] +['ä¸Ģ', 'æĹ¦'] +['çļĦ', 'ä¸Ģ个'] +['é«ĺ', '级'] +['åĬŀåħ¬', '室'] +['å¾·', 'åĽ½'] +['æĪij', 'å°±'] +['å®ļ', 'ä½į'] +['éĢĤ', 'åºĶ'] +['æĮĩ', 'æłĩ'] +['åħ¨', 'çľģ'] +['ä¸Ĭ', 'è¿°'] +['å®ĥ', 'çļĦ'] +['åĽŀ', 'å®¶'] +['欧', 'æ´²'] +['éĵģ', 'è·¯'] +['é¼ĵ', 'åĬ±'] +['çļĦ', 'å½±åĵį'] +['é«ĺ', 'æł¡'] +['天', 'ä¸ĭ'] +['é«ĺ', 'è´¨éĩı'] +['æĿŃ', 'å·ŀ'] +['èµĦ', '讯'] +['æĶ¾', 'åľ¨'] +['æľī', 'ä¸Ģ个'] +['å°±', 'è¦ģ'] +['ä¸Ĭ', 'éĿ¢'] +['è§£', 'éĩĬ'] +['éĢIJ', 'æŃ¥'] +['å°½', '管'] +['æľī', 'ä»Ģä¹Ī'] +['çļĦ', 'äºĭ'] +['çĻ»', 'è®°'] +['人æ°ij', 'å¸ģ'] +['è§Ĥ', 'ä¼Ĺ'] +['è§Ĥ', 'å¯Ł'] +['ç͵', 'èĦij'] +['çļĦ', 'åIJĮæĹ¶'] +['ä½ľ', 'ä¸ļ'] +['宣', 'å¸ĥ'] +['çļĦ', 'ä½ľç͍'] +['åĽŀ', 'æĿ¥'] +['éļ¾', '以'] +['æīĢæľī', 'çļĦ'] +['å°ı', 'åѦ'] +['æıIJ', 'åīį'] +['æ¤į', 'çī©'] +['åĩ', '¯'] +['ä¸Ĭ', 'äºĨ'] +['å°±', 'åľ¨'] +['åħĪ', 'åIJİ'] +['æīĭ', 'æľ¯'] +['éĥ', 'Ń'] +['éĿ¢', 'åīį'] +['æ¯ķ', '竣'] +['äºĮ', 'æĺ¯'] +['红', 'èī²'] +['éĺ³', 'åħī'] +['èĭ¹', 'æŀľ'] +['å¾Īå¤ļ', '人'] +['ç»Ļ', 'æĪij'] +['åĵ', '¦'] +['çľ¼', 'çĿĽ'] +['éł', 'Ń'] +['ä¸Ģ', 'æĺ¯'] +['åıijå±ķ', 'çļĦ'] +['åıį', 'åºĶ'] +['æĪ¿', 'å±ĭ'] +['æľŁ', 'å¾ħ'] +['ç§į', 'æ¤į'] +['æĸĩ', 'åѦ'] +['åį³', 'åı¯'] +['é¦ĸ', '次'] +['èĭ±', 'éĽĦ'] +['å¤ļ', '次'] +['åĮħ', 'è£ħ'] +['æ²³', 'åįĹ'] +['ä¹ĭéĹ´', 'çļĦ'] +['ä»į', 'çĦ¶'] +['åIJ¬', 'åΰ'] +['èij£äºĭ', 'éķ¿'] +['è§Ħ', 'åĪĻ'] +['ä¸Ģ', '份'] +['大', 'ä¼Ĺ'] +['使', 'å¾Ĺ'] +['è¿Ľ', 'åı£'] +['ä¸Ģ', 'çīĩ'] +['æĢ§', 'çļĦ'] +['çļĦ', '大'] +['æĪij', 'æĺ¯'] +['äºĴ', 'åĬ¨'] +['æ°', '£'] +['çļ', 'Ĩ'] +['åħ¬åı¸', 'çļĦ'] +['ä¸Ģ', 'è¾¹'] +['åıĬ', 'åħ¶'] +['èī¯', '好çļĦ'] +['æĭĵ', 'å±ķ'] +['å½ĵ', 'å¹´'] +['广', 'åľº'] +['åģļ', 'äºĨ'] +['åŁº', 'äºİ'] +['æıIJ', 'éĨĴ'] +['åħĦ', 'å¼Ł'] +['èĢģ', 'æĿ¿'] +['è¿ij', 'æĹ¥'] +['çĬ¶', 'åĨµ'] +['注', 'éĩį'] +['åĪļ', 'åĪļ'] +['è°ĥ', 'çłĶ'] +['å¿ĥ', 'ä¸Ń'] +['æĬĬ', 'æı¡'] +['éļı', 'åIJİ'] +['ä¸į', 'å¤Ł'] +['åĪĽ', 'ä½ľ'] +['ç«Ļ', 'åľ¨'] +['缸', 'äºĴ'] +['çĸ«æĥħ', 'éĺ²æİ§'] +['å¹´', '代'] +['带', 'åĬ¨'] +['伤', '害'] +['竣', 'çĦ¶'] +['å¼ķ', 'è¿Ľ'] +['ç´¯', '计'] +['让', 'æĪij们'] +['åĽŀ', 'æĶ¶'] +['æĬ¥', 'åIJį'] +['åĬ©', 'åĬĽ'] +['èģĶ', '缣'] +['çŃĸ', 'çķ¥'] +['åij¨', 'è¾¹'] +['åĭ', 'Ĵ'] +['è¿ĺ', 'åľ¨'] +['æµģ', 'éĩı'] +['寻', 'æī¾'] +['ç͵', 'åĬĽ'] +['èι', 'èζ'] +['è¿ĺ', 'èĥ½'] +['æĭħ', 'ä»»'] +['çļĦæĥħåĨµ', 'ä¸ĭ'] +['çļĦ', 'åİŁåĽł'] +['缺', 'ä¹ı'] +['çIJĥ', 'åijĺ'] +['å²ģ', 'çļĦ'] +['çĶ·', 'åŃIJ'] +['å·¥', 'èµĦ'] +['è¿ijå¹´', 'æĿ¥'] +['åij', 'Ģ'] +['æıIJä¾Ľ', 'äºĨ'] +['她', '们'] +['å®¶', 'åħ·'] +['çĩ', 'ķ'] +['è½»', 'æĿ¾'] +['æł¡', 'åĽŃ'] +['èĢĥ', 'æł¸'] +['åį±', 'éĻ©'] +['åħļ', 'ç»Ħç»ĩ'] +['æĢ»', 'ç»ıçIJĨ'] +['çļĦ', 'æĸ°'] +['çİ»', 'çĴĥ'] +['è¿Ļ', 'ä½į'] +['对', 'æŃ¤'] +['å®¶', '人'] +['çļĦ', 'è¦ģæ±Ĥ'] +['温', '度'] +['æĮĩ', 'æķ°'] +['缴', 'åΰ'] +['æŃ¤', 'æĹ¶'] +['æ¹ĸ', 'åįĹ'] +['éĥ½', 'è¦ģ'] +['ä½ľ', 'åĩº'] +['åIJĦ', 'ä½į'] +['èĢĥ', 'çĶŁ'] +['ä¾Ŀ', 'æį®'] +['说', 'è¯Ŀ'] +['æĪij', 'ä¹Ł'] +['å·¥', 'åİĤ'] +['åıĺ', 'æĪIJ'] +['ä»ĸ', '人'] +['æĪij', 'è§īå¾Ĺ'] +['åIJĦ', '级'] +['ä¼łå¥ĩ', 'ç§ģæľį'] +['ä¸Ĭ', 'åįĩ'] +['好', 'åĥı'] +['åĬł', 'éĢŁ'] +['äºĮ', 'åįģ'] +['è¢', 'ģ'] +['è£ħ', '饰'] +['éĥ½', 'èĥ½'] +['ä¸Ģ', 'å¼ł'] +['åĬ¨', 'æĢģ'] +['å¹´', 'çļĦ'] +['è¿Ļ', 'å°±æĺ¯'] +['ä¹Ł', 'è¦ģ'] +['èµĦ', 'æł¼'] +['æĪĺ', 'äºī'] +['æĦŁ', 'è°¢'] +['åŁ¹', 'èĤ²'] +['天', 'æ°Ķ'] +['女', '士'] +['åı¯èĥ½', 'ä¼ļ'] +['çļĦ', '产åĵģ'] +['ä¹Ł', 'å°±'] +['主è¦ģ', 'æĺ¯'] +['åĪº', 'æ¿Ģ'] +['ç»Ļ', 'ä½ł'] +['大', 'æķ°æį®'] +['åĮ»', 'åѦ'] +['åĪ', '¤æĸŃ'] +['ä»ĸ', '说'] +['表', 'æ¼Ķ'] +['äºļ', 'æ´²'] +['ä¸ĵ', 'é¢ĺ'] +['ç«ŀäºī', 'åĬĽ'] +['éĤ£', 'æł·'] +['å±ķ', 'å¼Ģ'] +['å¹³', 'æĹ¶'] +['æİ¥', 'ä¸ĭæĿ¥'] +['æī¿', '诺'] +['æ³ķ', 'åĽ½'] +['åħ³', 'å¿ĥ'] +['ä¼ļ', 'æľī'] +['éĤĢ', '请'] +['é¢Ħ', 'éĺ²'] +['对', 'æİ¥'] +['好', 'äºĨ'] +['åĴ±', '们'] +['çļĦ', 'æĦŁè§ī'] +['æĢĿ', 'è·¯'] +['éĥ½', '没æľī'] +['çļĦ', 'æĸ¹æ³ķ'] +['女', 'åŃIJ'] +['åı¸', 'æ³ķ'] +['è¿ĺ', 'ä¼ļ'] +['è¶ĬæĿ¥è¶Ĭ', 'å¤ļ'] +['åĽł', 'çĤº'] +['æµ·', 'åįĹ'] +['人', 'æķ°'] +['å°Ĩ', 'ä¼ļ'] +['ä¸ļ', '主'] +['é¤IJ', '饮'] +['å±ħ', 'ä½ı'] +['åıij', 'åĩº'] +['è¿ij', 'æľŁ'] +['å¼ķ', 'é¢Ĩ'] +['æľºåύ', '人'] +['åĩºæĿ¥', 'çļĦ'] +['çľĭ', 'è§ģ'] +['ä¿', 'Ĭ'] +['让', 'ä»ĸ'] +['ä¸į', 'æĥ³'] +['å·¥ä½ľ', 'çļĦ'] +['è¡¥', 'åħħ'] +['æµ', 'ħ'] +['çī¹', 'å¾ģ'] +['ä¸Ĭå¸Ĥ', 'åħ¬åı¸'] +['ç¾İ', 'é£Ł'] +['广', '西'] +['æ¯ı', 'ä¸Ģ个'] +['èIJ½', 'åľ°'] +['åĵģ', 'ç§į'] +['åĴĮ', 'è°IJ'] +['å½»', 'åºķ'] +['é«ĺ', 'èĢĥ'] +['æĺ¨', '天'] +['åīį', 'å¾Ģ'] +['çĽij', 'æµĭ'] +['çϾ', '度'] +['åľ¨', 'ä¸ŃåĽ½'] +['çļĦ', 'éľĢæ±Ĥ'] +['亿', 'ç¾İåħĥ'] +['åѦ', 'æľ¯'] +['æĶ¶', 'åΰ'] +['æĿ¿', 'åĿĹ'] +['ä¸Ģ', '段'] +['æŀĦ', 'æĪIJ'] +['ä¼ģä¸ļ', 'çļĦ'] +['表', 'éĿ¢'] +['æķ´', 'çIJĨ'] +['ç»ĵ', 'å©ļ'] +['人', 'å®¶'] +['åģľ', 'æŃ¢'] +['åѦ', 'ç§ij'] +['æĺ¾', 'å¾Ĺ'] +['ä¼ij', 'æģ¯'] +['é¢Ħ', 'æľŁ'] +['æĪĸ', 'æĺ¯'] +['çļĦ', '主è¦ģ'] +['åºĶ', '对'] +['èµ°', 'äºĨ'] +['ä¸Ń', 'éĹ´'] +['èµ°', 'è¿Ľ'] +['åijĪ', 'çݰ'] +['æIJŃ', 'éħį'] +['é¹', 'ı'] +['æĺ¯', 'åĽłä¸º'] +['æĥħ', '绪'] +['å®ļ', 'æľŁ'] +['社ä¼ļ', '主ä¹ī'] +['çŃī', '级'] +['磼', 'çĽ¾'] +['é£ŀ', 'æľº'] +['èĩ³', 'ä»Ĭ'] +['æĶ¶', 'éĽĨ'] +['çļĦ', 'æķħäºĭ'] +['åĪĩ', 'å®ŀ'] +['å®ŀçݰ', 'äºĨ'] +['å½¢', 'æĪIJäºĨ'] +['åįĹ', 'æĸ¹'] +['ä¸Ń', 'åѦ'] +['æµ·', 'æ´ĭ'] +['åIJ¦', 'åĪĻ'] +['æĭį', 'æijĦ'] +['大åѦ', 'çĶŁ'] +['åĩºçݰ', 'äºĨ'] +['æĦı', 'å¤ĸ'] +['ä¹Ł', 'èĥ½'] +['çļĦ', 'èĥ½åĬĽ'] +['åĿIJ', 'åľ¨'] +['åĪĻ', 'æĺ¯'] +['èĢĥ', 'å¯Ł'] +['å°Ĭ', 'éĩį'] +['éĺ²', 'æŃ¢'] +['ç´§', 'å¼ł'] +['读', '书'] +['åĩº', 'è¡Į'] +['å°±', 'æľī'] +['å±¥', 'è¡Į'] +['çݰ代', 'åĮĸ'] +['åĽ½', 'åĬ¡'] +['åĽ½åĬ¡', 'éĻ¢'] +['ç»´', 'ä¿®'] +['åİŁ', 'åĪĽ'] +['æĺ¯', 'æĮĩ'] +['ä¼ij', 'éĹ²'] +['çĤ', '®'] +['æĸ°', 'æĹ¶ä»£'] +['éĢĻ', 'åĢĭ'] +['ä¸į', 'æķ¢'] +['å®Į', 'ç¾İ'] +['ç»Ĩ', 'èĬĤ'] +['éŃ', 'ı'] +['èͬ', 'èıľ'] +['é¢Ĩ导', 'çıŃåŃIJ'] +['è¶ħ', '级'] +['è¡Į', 'æĥħ'] +['人工', 'æĻºèĥ½'] +['åį°', '度'] +['åŁºç¡Ģ', '设æĸ½'] +['åıĪ', 'æĺ¯'] +['èį¯', 'çī©'] +['åIJ¸', 'æĶ¶'] +['åį´', 'æĺ¯'] +['éĥ', 'İ'] +['å¥ĸ', 'åĬ±'] +['çļĦ', 'æľĭåıĭ'] +['ä¿Ŀ', 'çķĻ'] +['è§Ħ', 'å¾ĭ'] +['æĸ°', 'çĸĨ'] +['è¿ĺ', 'åı¯ä»¥'] +['æİ¥', 'è¿ij'] +['æŃ¤', 'åīį'] +['æī¹', 'åĩĨ'] +['æĢİä¹Ī', 'æł·'] +['çļĦ', 'ä½įç½®'] +['ä¸Ģ', 'åĿĹ'] +['æĭĴ', 'ç»Ŀ'] +['顾', '客'] +['ä¹Ł', 'åľ¨'] +['ä¸Ģ', 'çĶŁ'] +['éĥ¨', 'éĺŁ'] +['å¹´', 'åīį'] +['æĸ¹éĿ¢', 'çļĦ'] +['å°Ŀ', 'è¯ķ'] +['羣æŃ£', 'çļĦ'] +['ç¦ģ', 'æŃ¢'] +['è¿ĺ', '没æľī'] +['æ°ij', 'çĶŁ'] +['èµ°', 'åIJij'] +['èĦ¸', 'ä¸Ĭ'] +['å½ĵ', '天'] +['éĽĨåĽ¢', 'åħ¬åı¸'] +['çļĦä¸Ģ', 'ç§į'] +['西', 'æĸ¹'] +['åĽŀ', 'åºĶ'] +['ä¸Ģ', '声'] +['常', '常'] +['æıIJ', 'åΰ'] +['èħ¾', '讯'] +['æľį', 'è£ħ'] +['为', 'ä½ķ'] +['äºij', 'åįĹ'] +['å°±', 'ç®Ĺ'] +['ä¼ł', 'æī¿'] +['åıį', 'èĢĮ'] +['ä¸ĩ', 'åIJ¨'] +['è´¢', '产'] +['å¦Ĥ', 'ä¸ĭ'] +['æĹ¥', 'åīį'] +['åİŁ', 'æľ¬'] +['æľĢ', 'éĩįè¦ģçļĦ'] +['认', 'è¯ģ'] +['ä¸Ģ', 'éģĵ'] +['ä¿¡æģ¯', 'åĮĸ'] +['å¾Ĺ', 'åΰäºĨ'] +['é̲', 'è¡Į'] +['æĪij', 'è¦ģ'] +['éĢļ', 'ä¿¡'] +['室', 'åĨħ'] +['èµļ', 'éĴ±'] +['æĶ¶', 'èĹı'] +['è§£åĨ³', 'æĸ¹æ¡Ī'] +['æĪ¿', '产'] +['çĭ', '¼'] +['æ´»', 'åĬĽ'] +['ç»ıæµİ', 'åıijå±ķ'] +['çŃī', 'å¾ħ'] +['ä¹Ł', 'å¾Ī'] +['åĿ', 'ij'] +['å¾Ī', '好çļĦ'] +['éļ¾', '度'] +['ä¸į', 'å¦Ĥ'] +['人æ°ij', 'æĶ¿åºľ'] +['åĩº', 'åıij'] +['åīį', 'æľŁ'] +['æ¼Ķ', 'åijĺ'] +['女', 'çĶŁ'] +['èģļ', 'çĦ¦'] +['审', '计'] +['é¢Ħ', 'æµĭ'] +['ä¾Ŀ', 'æīĺ'] +['äºĶ', 'å¹´'] +['è¡¥', 'è´´'] +['æ¸ħ', 'æĻ°'] +['éª', 'Ĥ'] +['çľĭ', 'èµ·æĿ¥'] +['çļĦ', 'åŃ©åŃIJ'] +['é¢ij', 'éģĵ'] +['ä½ı', 'å®ħ'] +['éĿ¢', 'åIJij'] +['æľĢ', 'ä½İ'] +['æĹ¢', 'çĦ¶'] +['ä¸Ģ', 'å¥Ĺ'] +['æķ°', 'åѦ'] +['群', 'ä½ĵ'] +['åĮĹ京', 'å¸Ĥ'] +['å±ħ', 'çĦ¶'] +['æ°Ľ', 'åĽ´'] +['éĢĶ', 'å¾Ħ'] +['çļĦ', 'åŁºç¡Ģä¸Ĭ'] +['èģĮ', 'è´£'] +['åı¯èĥ½', 'æĺ¯'] +['åĨĽ', 'äºĭ'] +['æĪIJ', 'æķĪ'] +['åŃ©åŃIJ', '们'] +['计ç®Ĺ', 'æľº'] +['èµ', '¤'] +['产ä¸ļ', 'åıijå±ķ'] +['å·¨', '大çļĦ'] +['å·¥', '人'] +['çĶŁ', 'éķ¿'] +['éĥ½', 'åı¯ä»¥'] +['çļĦ', 'æľºä¼ļ'] +['èµĦ', 'è´¨'] +['çĹĽ', 'èĭ¦'] +['ç²ī', 'ä¸Ŀ'] +['å¢', 'ĵ'] +['å¹³', 'å®ī'] +['管', 'éģĵ'] +['è·Ł', 'çĿĢ'] +['饮', 'é£Ł'] +['åķĨ', 'å®¶'] +['å¤ļ', 'å®¶'] +['åı¸', 'æľº'] +['åºĶ该', 'æĺ¯'] +['éĢı', 'éľ²'] +['认', 'å®ļ'] +['è¡Įä¸ļ', 'çļĦ'] +['çļĦ', 'ä¼ģä¸ļ'] +['æ¯ı', 'ä¸Ģ'] +['èĮĥåĽ´', 'åĨħ'] +['è¾ĥ', '大'] +['è´', '¤'] +['大', 'èµĽ'] +['å¤ļ', 'äºĨ'] +['é¸', '¿'] +['临', 'åºĬ'] +['åľ¨', 'è¿Ļ个'] +['çļĦ', 'åĨħ容'] +['éĶĢ', 'éĩı'] +['å¾Ī', 'å°ij'] +['åŃ', 'Ł'] +['ç»´', 'æĮģ'] +['åĴĸ', 'åķ¡'] +['æľ¬', 'åľ°'] +['èī²', '彩'] +['å¹¶', 'éĿŀ'] +['èĢĮ', 'å·²'] +['温', 'æļĸ'] +['èIJ', '§'] +['æĬĵ', 'ä½ı'] +['èĢĮ', 'ä¸įæĺ¯'] +['åĸ', 'Ĭ'] +['çļĦ', 'åħ³ç³»'] +['çī©', 'åĵģ'] +['éĤ£', 'æĺ¯'] +['åĨľ', '产åĵģ'] +['è¿Ļ', 'æĹ¶'] +['å©ļ', 'å§»'] +['æ°´', 'æŀľ'] +['æĶ¶', 'èİ·'] +['ä»ĺ', 'åĩº'] +['客æĪ·', '端'] +['æ¼Ķ', 'åĩº'] +['åħ¨', 'æĸ°'] +['è¿Ļ', 'ä¹Łæĺ¯'] +['æĺ¯', 'çͱ'] +['è§Ĥ', '念'] +['æľī', '个'] +['éĢł', 'åŀĭ'] +['èĥľ', 'åĪ©'] +['ä¸ī', 'æĺ¯'] +['è¶ħ', 'å¸Ĥ'] +['åħļ建', 'å·¥ä½ľ'] +['æĶ¾', 'å¿ĥ'] +['线', 'è·¯'] +['æĭĽ', 'çĶŁ'] +['åIJĥ', 'é¥Ń'] +['è½', 'ī'] +['å°½', 'éĩı'] +['è§ģ', 'åΰ'] +['åIJĮæ¯Ķ', 'å¢ŀéķ¿'] +['åįİ', '为'] +['æĪij', 'å¸Ĥ'] +['æıIJ', 'åĩºäºĨ'] +['æ°ij', 'èѦ'] +['åįļ', 'çī©'] +['åįļçī©', 'é¦Ĩ'] +['è¯ļ', 'ä¿¡'] +['åīį', 'éĿ¢'] +['å±±', '西'] +['è¾ħ', 'åĬ©'] +['转', 'ç§»'] +['æĽ´', '为'] +['丰å¯Į', 'çļĦ'] +['åį', '¢'] +['å¿«', 'éĢĴ'] +['æĺ¾', 'èijĹ'] +['çī©', 'èµĦ'] +['åΰ', 'è¾¾'] +['æľī', 'åĪ©äºİ'] +['åij', 'Ĩ'] +['åŃ©åŃIJ', 'çļĦ'] +['ä¸į', 'ä½Ĩ'] +['çłĶç©¶', 'éĻ¢'] +['çͳ', 'æĬ¥'] +['æļ', '¨'] +['æ°ij', 'éĹ´'] +['åį', '»'] +['çļĦ', 'å£°éŁ³'] +['å¸Ĥåľº', 'çļĦ'] +['ä¸Ģ', 'åı¥'] +['çľģ', '级'] +['æĿ¥', 'çļĦ'] +['åĵª', '个'] +['æīį', 'ä¼ļ'] +['åĪĨ', 'éħį'] +['èĶ', '¡'] +['ä»ĸ', 'åľ¨'] +['åħ±', 'æľī'] +['å¡', 'ĺ'] +['èĴ', 'Ĥ'] +['éľ', 'į'] +['åıĤ', 'è§Ĥ'] +['ä¸Ī', '夫'] +['ä¾Ŀ', 'éĿł'] +['æľī', 'æĹ¶'] +['äºĨ', 'å¾Īå¤ļ'] +['ä¸ĸçķĮ', 'æĿ¯'] +['å®¶', 'æĹı'] +['ä¸į', 'éľĢè¦ģ'] +['大', 'å¸Ī'] +['èŀį', 'åħ¥'] +['éĿŀ', 'æ³ķ'] +['çĹħ', '人'] +['åIJİ', 'æľŁ'] +['大家', 'éĥ½'] +['ç½ij', 'åĿĢ'] +['åİŁ', 'æĸĻ'] +['便', 'å®ľ'] +['æ¶', 'Ľ'] +['仿', 'ä½Ľ'] +['å·®', 'è·Ŀ'] +['åı¦ä¸Ģ', 'æĸ¹éĿ¢'] +['产åĵģ', 'çļĦ'] +['èµ', '«'] +['æĥħåĨµ', 'ä¸ĭ'] +['éĴ¢', 'éĵģ'] +['æľ¬', 'ç«Ļ'] +['纳', 'åħ¥'] +['å·²', 'æľī'] +['æľī', '没æľī'] +['ä¼°', '计'] +['é£', 'ĺ'] +['æľŁ', 'è´§'] +['åĢĭ人', 'è³ĩæĸĻ'] +['ä¸ĵä¸ļ', 'çļĦ'] +['çĪĨ', 'åıij'] +['èĩ´åĬĽ', 'äºİ'] +['çİ°åľ¨', 'çļĦ'] +['æľī', 'åĵªäºĽ'] +['çł´', 'åĿı'] +['æķ°åŃĹ', 'åĮĸ'] +['åľ°', 'éĿ¢'] +['é»ij', 'èī²'] +['å¹¼åĦ¿', 'åĽŃ'] +['çļĦ', 'ç²¾ç¥ŀ'] +['äº', 'Ń'] +['导', 'æ¼Ķ'] +['çݰ', 'æľī'] +['æŃ¦', 'åύ'] +['èĭı', 'å·ŀ'] +['çİ', 'Ħ'] +['æ±Ł', '西'] +['å»¶', '伸'] +['论', 'æĸĩ'] +['è¾ĥ', '为'] +['çİ©', 'æ³ķ'] +['é¼', 'İ'] +['åIJĮ', 'æŃ¥'] +['éĩĬ', 'æĶ¾'] +['æĽĿ', 'åħī'] +['åĿļ', 'åĨ³'] +['å§Ķ', 'æīĺ'] +['å°Ĩ', 'åľ¨'] +['äºĪ', '以'] +['ä½ľ', 'æĸĩ'] +['èĢĮ', 'åľ¨'] +['ä¼ĺ', 'åħĪ'] +['åĽŀ', 'åİ»'] +['ä¿®', 'å¤į'] +['åĽ½åĨħ', 'å¤ĸ'] +['çŃĸ', 'åĪĴ'] +['åıij', 'æĶ¾'] +['å¿ĥ', 'æĥħ'] +['çļĦ', 'åİĨåı²'] +['éĿ¢', 'è¯ķ'] +['举', 'åĮĹ'] +['ä¿¡', 'åı·'] +['ç²®', 'é£Ł'] +['è¯ģ', '书'] +['æŁIJ', 'äºĽ'] +['è¿IJ', 'ä½ľ'] +['åĨ²', 'åĩ»'] +['çĥŃ', 'çĤ¹'] +['æĹ¶', 'æĹ¶'] +['æĹ¶æĹ¶', '彩'] +['åľ°', 'çĤ¹'] +['ä¸Ģä½ĵ', 'åĮĸ'] +['éļ¾', 'é¢ĺ'] +['æĽ', '°'] +['ç«ĭ', 'åĪ»'] +['æĺ¯', 'éĿŀ常'] +['åħ±', 'åĴĮ'] +['åħ±åĴĮ', 'åĽ½'] +['æ¿Ģ', 'åĬ±'] +['æľīæķĪ', 'çļĦ'] +['å¤Ħ', 'ç½®'] +['该', 'åħ¬åı¸'] +['æ£Ģ', 'éªĮ'] +['èѦ', 'æĸ¹'] +['è´', '¾'] +['äºĨä¸Ģ', 'ä¸ĭ'] +['ä»Ĭ', 'åIJİ'] +['çħ', '®'] +['ç͍', 'åĵģ'] +['读', 'èĢħ'] +['æĪij', 'åľ¨'] +['åĽŀ', 'å¤į'] +['ä¸Ģ', '座'] +['è¿ĺ', '没'] +['å®ļ', 'åζ'] +['没', 'æĥ³åΰ'] +['å¤', '¹'] +['ä¼ł', 'éĢĴ'] +['ä¸Ģ', '款'] +['强', '大çļĦ'] +['çļĦ', 'è¡Į为'] +['å¤ı', '天'] +['åıijåĬ¨', 'æľº'] +['é¢ĨåŁŁ', 'çļĦ'] +['å®ŀéªĮ', '室'] +['ä¸Ģ', 'æĬĬ'] +['æĺ¯', '为äºĨ'] +['éĻķ', '西'] +['æĭħ', 'ä¿Ŀ'] +['è¾¾', 'æĪIJ'] +['è¦ģ', 'æĺ¯'] +['æĺİ', '天'] +['ç»Ļ', 'ä»ĸ'] +['建ç«ĭ', 'äºĨ'] +['ä¸į', 'è¡Į'] +['ä¸Ń', 'æĸĩ'] +['åľ°', '说'] +['åIJİ', 'çļĦ'] +['çĽij', 'æİ§'] +['éĢ', '¸'] +['æĢ»', 'éĥ¨'] +['æľ¬', 'æĸĩ'] +['é¹', '¿'] +['æĻ¯', 'è§Ĥ'] +['çļĦ', '缮æłĩ'] +['èĽ', 'ĩ'] +['åĨ', '¯'] +['ä¸Ń', 'åĮ»'] +['æķĪ', 'åºĶ'] +['产', 'éĩı'] +['åŃ', 'Ŀ'] +['è´¦', 'æĪ·'] +['è¿Ŀ', 'åıį'] +['èij£äºĭ', 'ä¼ļ'] +['京', '举'] +['责任', 'ç¼ĸè¾ij'] +['åķı', 'é¡Į'] +['çα', 'å¿ĥ'] +['èѦ', 'å¯Ł'] +['é¤IJ', 'åİħ'] +['å¸Ĥ', 'æĶ¿åºľ'] +['天', '天'] +['æĸ°', 'é²ľ'] +['éĥij', 'å·ŀ'] +['è¶ħ', 'è¶Ĭ'] +['å½', 'Ń'] +['çŁ¥è¯Ĩ', '产æĿĥ'] +['åĽŀ', 'å¿Ĩ'] +['è·¯', '线'] +['å»ī', 'æ´ģ'] +['éĿĴ', 'å°ijå¹´'] +['åıĸå¾Ĺ', 'äºĨ'] +['çľĭ', 'åΰäºĨ'] +['é¦', '¬'] +['ç²¾', 'åĵģ'] +['åľ°', 'éĵģ'] +['æĮģ', 'æľī'] +['ä¸ĭ', 'äºĨ'] +['æľī', 'æĹ¶åĢĻ'] +['ä¸Ģ', '人'] +['æĴ', 'Ĵ'] +['ä»Ķ', 'ç»Ĩ'] +['èĢģ', 'åħ¬'] +['äºĭå®ŀ', 'ä¸Ĭ'] +['èģĶ', 'èµĽ'] +['ä¾ĽåºĶ', 'éĵ¾'] +['é¢Ħ', 'ç®Ĺ'] +['åζéĢł', 'ä¸ļ'] +['å®īåħ¨', 'çĶŁäº§'] +['俱', 'ä¹IJ'] +['俱ä¹IJ', 'éĥ¨'] +['çļĦ', 'æł¸å¿ĥ'] +['æīĵ', 'ç®Ĺ'] +['å½±', 'çīĩ'] +['æIJŃ', '建'] +['ä¹Ł', 'ä¸įä¼ļ'] +['æĭħ', 'å½ĵ'] +['å±Ĥ', 'éĿ¢'] +['åѦ', 'åijĺ'] +['临', 'æĹ¶'] +['缸', 'ç»ĵåIJĪ'] +['对', 'æ¯Ķ'] +['ä»ĸ', 'æĺ¯'] +['æĸ°', 'åĮº'] +['è¿Ľ', 'åİ»'] +['çϾ', 'å¹´'] +['ä¿', '©'] +['å°½', 'å¿«'] +['ç͵åŃIJ', 'åķĨåĬ¡'] +['æĽ´', 'æľī'] +['æ¸ħ', 'çIJĨ'] +['åı¦', 'ä¸Ģ个'] +['åĤ', '»'] +['ä»Ģä¹Ī', 'æł·çļĦ'] +['æĺ¯', 'æľĢ'] +['åij¨', 'å¹´'] +['å¾Ī', '容æĺĵ'] +['åĽ¢', 'ç»ĵ'] +['ç´', 'Ħ'] +['æĹ©', 'å·²'] +['çļĦ', 'åıĺåĮĸ'] +['éľ', 'ŀ'] +['æĹ¥', 'ä¸ĬåįĪ'] +['失', 'åİ»'] +['ä¸Ń', 'åľĭ'] +['çļĦä¸Ģ', 'äºĽ'] +['å°ı', 'åŃ©'] +['ä¸ĭ', 'è·Į'] +['éĶ»', 'çĤ¼'] +['é', 'ij'] +['éij', '«'] +['å¿ĹæĦ¿', 'èĢħ'] +['èĤ¡', 'å¸Ĥ'] +['èµĽ', 'äºĭ'] +['许åı¯', 'è¯ģ'] +['åı¯', 'æĮģç»Ń'] +['åijĬè¯ī', 'è®°èĢħ'] +['éĢ»', 'è¾ij'] +['å¼ķ', 'åħ¥'] +['çļĦ', 'è¿ĩç¨ĭä¸Ń'] +['è§Ĩ', 'è§ī'] +['èĩªæ²»', 'åĮº'] +['è¯ģ', 'æį®'] +['è£ħ', 'ç½®'] +['第ä¸ī', 'æĸ¹'] +['å¹´', 'æĿ¥'] +['å¹¿ä¸ľ', 'çľģ'] +['带æĿ¥', 'äºĨ'] +['éķ¿', 'æ±Ł'] +['访', 'éĹ®'] +['å·®', 'ä¸įå¤ļ'] +['æĺ¯', 'æĪij'] +['éģŃ', 'éģĩ'] +['æĬĵ', '好'] +['é«ĺ', 'è¾¾'] +['å¹¶', 'åľ¨'] +['èĩª', 'è§ī'] +['ä¾ĽåºĶ', 'åķĨ'] +['æĥħ', 'æĦŁ'] +['ä½ı', 'äºĨ'] +['çļĦ', 'èģĮä¸ļ'] +['çļĩ', 'å¸Ŀ'] +['西', 'éĥ¨'] +['åĴĮ', 'å¹³'] +['çļĦ', 'åĬĽéĩı'] +['æ±', 'ª'] +['åħħåĪĨ', 'åıijæĮ¥'] +['æĬķ', 'è¯ī'] +['èµ·', 'åΰ'] +['äºĴ', '缸'] +['æ¾³', 'éŨ'] +['æİ¥', 'åΰ'] +['æ°´', 'æ³¥'] +['模', 'åŀĭ'] +['ä¸Ģ', 'åįĬ'] +['ç§©', 'åºı'] +['æĪij们', 'åľ¨'] +['æī¿', '认'] +['ä¸Ģ', 'éĥ¨åĪĨ'] +['åįł', 'æ¯Ķ'] +['å¦ĩ', '女'] +['ç²', 'ĺ'] +['äºĨè§£', 'åΰ'] +['ä¸Ģå®ļ', 'ä¼ļ'] +['åIJĦ', '大'] +['èµ°', 'åĩº'] +['为', '大家'] +['é«ĺ', 'éĵģ'] +['åı¯ä»¥', 'åľ¨'] +['ä½Ĩ', 'åľ¨'] +['çĶŁæĢģ', 'çݯå¢ĥ'] +['èı', '¯'] +['çļĦ', 'ä»·æł¼'] +['麻', 'çĥ¦'] +['æ¿Ģ', 'åıij'] +['éĤ£', 'å°±'] +['çļĦ', 'æł·åŃIJ'] +['为', 'æŃ¤'] +['天', 'åľ°'] +['çļĦ', '缮çļĦ'] +['åĢº', 'åΏ'] +['å·²', 'ç¶ĵ'] +['åĽĽ', '大'] +['åIJĮæĹ¶', 'ä¹Ł'] +['å½¼', 'æŃ¤'] +['æĭ¿', 'åΰ'] +['åIJ«', 'éĩı'] +['åįģ', '大'] +['éļ¾', 'éģĵ'] +['å¼', 'Ĺ'] +['ä¸Ģ', '段æĹ¶éĹ´'] +['çħ§', '顾'] +['æķ°æį®', 'æĺ¾ç¤º'] +['æĪIJ为', 'äºĨ'] +['èµ°', 'åΰ'] +['æľ¬', 'åħ¬åı¸'] +['ç»Ī', '端'] +['ä¹Ł', 'ä¸įæĺ¯'] +['头', 'åıij'] +['大', '约'] +['é£İ', 'æĻ¯'] +['æ¶Ī', 'èĢĹ'] +['审', 'æŁ¥'] +['äºī', 'åıĸ'] +['æ³ķ', 'æ²»'] +['äºĭ', 'çī©'] +['ç¼ĵ', 'è§£'] +['æĥ', '¨'] +['缸åºĶ', 'çļĦ'] +['çļĦ', 'æķĪæŀľ'] +['åıį', 'å¤į'] +['åıijçĶŁ', 'äºĨ'] +['éĢĻ', 'äºĽ'] +['ç»ĥ', 'ä¹ł'] +['åݨ', 'æĪ¿'] +['å¼Ģ', 'æĭĵ'] +['欣', 'èµı'] +['夫', '妻'] +['ä¸į', 'ä¸Ģæł·'] +['产', 'èĥ½'] +['èĬ¯', 'çīĩ'] +['è¦ģ', 'ç´ł'] +['åıį', '对'] +['çİĩ', 'åħĪ'] +['è´§', 'çī©'] +['æĹ¥', 'ç͵'] +['ä½ľ', 'å®¶'] +['æĶ¹', 'è¿Ľ'] +['æĪIJ', 'åĪĨ'] +['åĽł', 'èĢĮ'] +['åĩı', 'èĤ¥'] +['æ½', 'ĺ'] +['å±±ä¸ľ', 'çľģ'] +['åĬ', 'Ŀ'] +['åŁ', 'ĭ'] +['æŃ¦', 'è£ħ'] +['æ±ĩ', 'æĬ¥'] +['ä¸Ģ个', 'æľĪ'] +['çĥŃ', 'éŨ'] +['大', 'éģĵ'] +['æ´»', 'åĭķ'] +['éĥ½', 'å¾Ī'] +['ç͵', '梯'] +['ç´§', 'æĢ¥'] +['åĢº', 'åĬ¡'] +['客', 'æľį'] +['ä¸Ģ', 'éĥ¨'] +['ä½ł', 'æĺ¯'] +['çݰ', 'çĬ¶'] +['æŃ£ç¡®', 'çļĦ'] +['ä¹ĭ', 'å¤Ħ'] +['ç¼ĸ', 'åζ'] +['ä½ł', 'åı¯ä»¥'] +['çŃī', 'åľ°'] +['èİ', 'ī'] +['对', 'è¯Ŀ'] +['æ·ĺ', 'å®Ŀ'] +['è°ĥ', 'èĬĤ'] +['æİĴ', 'æĶ¾'] +['åºĵ', 'åŃĺ'] +['ç´', 'ļ'] +['çļĦ', 'ä¼ĺåĬ¿'] +['æĿĥ', 'å¨ģ'] +['以ä¸ĭ', 'ç®Ģç§°'] +['ä¸Ģ', '项'] +['èģļ', 'éĽĨ'] +['ä¼łç»Ł', 'çļĦ'] +['æ··', 'åIJĪ'] +['è¿Ļä¸Ģ', 'çĤ¹'] +['ä¸Ģ', 'çľ¼'] +['æĹł', 'éĻIJ'] +['èİ·å¾Ĺ', 'äºĨ'] +['éĢī', 'æīĭ'] +['åζ', 'åĵģ'] +['åįı', 'ä½ľ'] +['çĭ¬çī¹', 'çļĦ'] +['ä¸Ģ', '级'] +['è¿Ļ个', 'éĹ®é¢ĺ'] +['æĸ', 'Į'] +['æĺ¯', 'æĪij们'] +['æķĮ', '人'] +['æ¸ħ', 'æ´Ĺ'] +['ä¸Ģ缴', 'åľ¨'] +['å°ı', 'ç±³'] +['çļĦ', 'è¿ĩç¨ĭ'] +['åľ¨', 'åĮĹ京'] +['ä¸Ģ', 'æĶ¯'] +['æĹ©', 'ä¸Ĭ'] +['æĸĩ', 'èīº'] +['ç¦ı', 'åĪ©'] +['é£Ł', 'ç͍'] +['æĦŁ', 'åĬ¨'] +['åħ¨', 'ç¨ĭ'] +['æĶ¯', 'åĩº'] +['æĸ°', '建'] +['å¸', 'ķ'] +['æĺ¾', 'çĦ¶'] +['羣', 'çļĦæĺ¯'] +['æĸ°éĹ»', 'ç½ij'] +['èĥ½', 'åIJ¦'] +['åįı', 'åĬ©'] +['亲', 'èĩª'] +['å¾Ī', 'æľī'] +['çϼ', 'å±ķ'] +['æĦı', '大'] +['æĦı大', 'åĪ©'] +['ç͵', 'ç½ij'] +['æĹ¥', 'çĽĬ'] +['çĨ', '±'] +['èĤĮ', 'èĤ¤'] +['çĶ·', 'æĢ§'] +['ç»Ħ', '建'] +['çŃī', 'éĹ®é¢ĺ'] +['æ¶Ī', 'éϤ'] +['æĬ¤', 'çIJĨ'] +['å¡ij', 'æĸĻ'] +['ä¹Į', 'åħĭ'] +['ä¹Įåħĭ', 'åħ°'] +['åķĨ', 'æłĩ'] +['çIJ', '³'] +['æĸ°', 'æīĭ'] +['çļĦ', 'çī¹çĤ¹'] +['åĴ', '¬'] +['å½ĵ', 'ä¸ĭ'] +['设计', 'å¸Ī'] +['èµĶ', 'åģ¿'] +['第', 'åįģ'] +['æĻºèĥ½', 'åĮĸ'] +['å¼Ģåıij', 'åĮº'] +['åı¯ä»¥', 'éĢļè¿ĩ'] +['åħ±äº§', 'åħļ'] +['åİī', '害'] +['çģµ', 'æ´»'] +['æĹ¶', 'åħī'] +['éĥ¨', 'ä½į'] +['人', 'æĸĩ'] +['è¿Ľ', 'æĿ¥'] +['ä¹ĭ', 'æīĢ以'] +['ä¸ī', 'åįģ'] +['çļĦ', 'åѦçĶŁ'] +['éĺ²', 'æĬ¤'] +['åĽ½', '产'] +['æ·±åľ³', 'å¸Ĥ'] +['éĤ£', 'å°±æĺ¯'] +['åΰ', 'ä½į'] +['çī¹', 'æľĹ'] +['çľĹ', 'æĻ®'] +['å®ŀ', 'æĹ¶'] +['åı°', 'çģ£'] +['èĢĮ', 'ä¸į'] +['æĮĩ', 'å®ļ'] +['åĿ', 'Ŀ'] +['èħIJ', 'è´¥'] +['çī¹', 'å®ļ'] +['å¢ŀ', 'éĢŁ'] +['æłĩ', 'çѾ'] +['æĪ¿', 'ä»·'] +['æĦ', 'ģ'] +['贯彻', 'èIJ½å®ŀ'] +['æĢ§', 'è´¨'] +['çłĶç©¶', 'çĶŁ'] +['ç¾İ', '容'] +['æī¹', 'è¯Ħ'] +['ç©¶', '竣'] +['人åĬĽ', 'èµĦæºIJ'] +['éĸĭ', 'å§ĭ'] +['åĽŀ', 'å½Ĵ'] +['èIJ¥', 'åķĨ'] +['èIJ¥åķĨ', 'çݯå¢ĥ'] +['ä¸ŃåĽ½', '人'] +['çļĦ', 'åŁºæľ¬'] +['è¯Ŀ', 'é¢ĺ'] +['æłĩåĩĨ', 'åĮĸ'] +['西', 'èĹı'] +['åĭ', '¾'] +['çļĦ', '设计'] +['ç®Ģåįķ', 'çļĦ'] +['å¤į', 'åζ'] +['æ¸IJ', 'æ¸IJ'] +['以', 'å¤ĸ'] +['èģĶ', 'åĬ¨'] +['两', '次'] +['æĢ§', 'åĴĮ'] +['æĽ´', '大'] +['çļĦ', 'åIJįåŃĹ'] +['éŁ', '¦'] +['ä½ł', 'è¦ģ'] +['å¢ĥ', 'å¤ĸ'] +['æĹ©', 'æľŁ'] +['åĪĿ', 'æŃ¥'] +['è´¦', 'åı·'] +['害', 'æĢķ'] +['æĺ¨', 'æĹ¥'] +['åĪļ', 'æīį'] +['ç¥ŀ', 'ç§ĺ'] +['ç²¾', 'å¿ĥ'] +['æµģ', 'éĢļ'] +['åħ¨', 'æĸ¹ä½į'] +['以', 'å¾Ģ'] +['ä¹Ł', 'å°Ĩ'] +['æĺ¯', 'ä¸ŃåĽ½'] +['åĽ½å®¶', '级'] +['å°Ĩ', 'åĨĽ'] +['æij', 'Ĭ'] +['æľĢ', '为'] +['第ä¸Ģ', 'æĹ¶éĹ´'] +['æ¶Ī', 'æ¯Ĵ'] +['å°Ĩ', 'äºİ'] +['å¨ģ', 'èĥģ'] +['èĭ±', 'æĸĩ'] +['æīĭ', 'ä¸Ń'] +['çIJĥ', 'è¿·'] +['è§Ĥ', 'çľĭ'] +['离', 'å©ļ'] +['æľ¬', 'åľŁ'] +['åĪĨ', 'æķ£'] +['æĻ', '´'] +['è¦ģ', '注æĦı'] +['浪', 'è´¹'] +['管', 'æİ§'] +['åĩº', 'åĶ®'] +['æĢ»', 'è£ģ'] +['ä¸Ģ', 'éĺµ'] +['å¨', 'ĩ'] +['äºĶ', '个'] +['å½ĵ', 'åĪĿ'] +['çºł', '纷'] +['ä¸ĵ', 'ç͍'] +['å¤ĩ', 'æ¡Ī'] +['åĪĿ', 'æľŁ'] +['å®ĥ', 'æĺ¯'] +['åĮº', 'åĿĹ'] +['åĮºåĿĹ', 'éĵ¾'] +['大', 'è¿ŀ'] +['è¿Ļ', 'ç±»'] +['åıĺ', 'æĪIJäºĨ'] +['éĤĦ', 'æĺ¯'] +['åįļ', '客'] +['çı¾', 'åľ¨'] +['ä¸Ģ', 'æĸ¹'] +['å®ĮæĪIJ', 'äºĨ'] +['è¿Ļ个', 'æĹ¶åĢĻ'] +['åħ¨', 'å¹´'] +['ä¸Ĭ', '线'] +['ç½', 'IJ'] +['ç«ŀ', 'èµĽ'] +['åĩºçīĪ', '社'] +['åĵ¥', 'åĵ¥'] +['å¯', '«'] +['å¾Ĺ', '以'] +['èĬ±', 'åĽŃ'] +['äºĨ', 'èµ·æĿ¥'] +['èĦ±è´«', 'æĶ»åĿļ'] +['çļĦ', 'åİŁåĪĻ'] +['讲', 'è§£'] +['æ¶Ī', 'åĮĸ'] +['æįŁ', '害'] +['æļĤ', 'æĹ¶'] +['å¾Ĺ', 'çŁ¥'] +['éĢĤ', 'ç͍'] +['éŨ', 'åºĹ'] +['è§£', '读'] +['æĻ®', 'åıĬ'] +['人æ°ij', 'æ³ķéĻ¢'] +['åī¯', '主任'] +['å¿ĥ', 'çģµ'] +['è¯Ĭ', 'æĸŃ'] +['ç¾İ', '女'] +['æŁ', '¯'] +['å¹´', '以æĿ¥'] +['æ´»', 'è·ĥ'] +['åĢŁ', 'åĬ©'] +['åħ±', '建'] +['è¯ī', '讼'] +['æĶ¾', 'æĿ¾'] +['çªĹ', 'åı£'] +['ä¼ģ', 'æ¥Ń'] +['åĬł', 'æĭ¿'] +['åĬłæĭ¿', '大'] +['ä¹°', 'äºĨ'] +['主', 'æµģ'] +['æĩĤ', 'å¾Ĺ'] +['å°Ĩ', 'åħ¶'] +['éĢı', 'æĺİ'] +['å·¥ä½ľ', 'ä¸Ń'] +['èĤ¡', 'ä»·'] +['æ¡£', 'æ¡Ī'] +['没æľī', 'ä»»ä½ķ'] +['åijĬ', 'çŁ¥'] +['å¹´', 'åĪĿ'] +['æĹ¥', 'ä¸ĭåįĪ'] +['åİĤ', 'åķĨ'] +['èĬĤ', 'å¥ı'] +['主', '导'] +['è£', 'Ŀ'] +['åħ³éĶ®', 'è¯į'] +['èģĬ', '天'] +['åĨĻ', 'ä½ľ'] +['æĶ¹éĿ©', 'å¼ĢæĶ¾'] +['æľī', 'æľĽ'] +['éĢļ', 'æĬ¥'] +['èIJ', 'Į'] +['æĢ»', 'é¢Ŀ'] +['çŁŃ', 'æľŁ'] +['ä¸Ģ', 'çķª'] +['çĶŁæ´»', 'çļĦ'] +['åĮĸ', 'çļĦ'] +['æĺ¥', '天'] +['è¿Ļ', 'åľº'] +['æĸ°å¼Ģ', 'ä¼łå¥ĩ'] +['æĺ¯', 'è¦ģ'] +['å°ļ', 'æľª'] +['åıĺ', 'æĽ´'] +['ä¸Ģ', 'åij¨'] +['客', 'è§Ĥ'] +['æĹ¥', 'èĩ³'] +['é¹', '°'] +['çİ', '²'] +['å°Ĩ', 'æĿ¥'] +['客', '人'] +['åıĺ', 'éĿ©'] +['说', 'äºĨ'] +['åİŁ', 'çIJĨ'] +['èģĮ', 'åĬ¡'] +['åıĪ', 'æľī'] +['ä¸Ģ', 'åı¥è¯Ŀ'] +['æĦŁ', 'åıĹåΰ'] +['ç¬Ķ', 'èĢħ'] +['ç§»', 'æ°ij'] +['西', 'åįĹ'] +['ä¹ĥ', 'èĩ³'] +['æŃ£', 'è§Ħ'] +['åĪĿ', 'ä¸Ń'] +['çĬ', '¬'] +['å½ĵ', 'äºĭ'] +['å½ĵäºĭ', '人'] +['æĪij们', 'è¦ģ'] +['åħ¥', 'åı£'] +['éĤ£', 'æĹ¶'] +['æľīéĻIJ', '责任'] +['å°ij', '女'] +['è¿Ļä¹Ī', 'å¤ļ'] +['åĪĨ', 'åħ¬åı¸'] +['å®ĩ', 'å®Ļ'] +['çļĦ', 'éĢīæĭ©'] +['å§IJ', 'å§IJ'] +['åıij', 'èµ·'] +['è»', 'į'] +['æĽ´å¥½', 'åľ°'] +['éĻĨ', 'ç»Ń'] +['æľ¬', 'æľįåĭĻ'] +['å«', '©'] +['èµ¶', 'ç´§'] +['èĦĤ', 'èĤª'] +['第äºĮ', '天'] +['æĪij', 'ä¼ļ'] +['两', 'ä½į'] +['æķ', '²'] +['åħ¬å®ī', 'æľºåħ³'] +['ç§ijæĬĢ', 'åĪĽæĸ°'] +['å°º', '寸'] +['è¾IJ', 'å°Ħ'] +['å®Ĺ', 'æķĻ'] +['转', 'æį¢'] +['åĩº', 'çİ°åľ¨'] +['ä¸Ģ', 'é¢Ĺ'] +['æľŁ', 'éĻIJ'] +['åIJĮåѦ', '们'] +['åĮĹ', 'æĸ¹'] +['ä½ł', 'å°±'] +['ä¸Ģ带', 'ä¸Ģè·¯'] +['èĢģ', 'å©Ĩ'] +['游æĪı', 'çݩ家'] +['çļĦ', 'ç»ĵæŀľ'] +['è¡¥', 'åģ¿'] +['å¤ĸ', 'è´¸'] +['对', 'å¾ħ'] +['ç»´', 'çĶŁç´ł'] +['ç»ıéĶĢ', 'åķĨ'] +['è¿ĺ', 'å°Ĩ'] +['åŃIJ', '女'] +['æĽ´', 'é«ĺ'] +['ä¸į', '大'] +['éī´', 'å®ļ'] +['让', 'ä»ĸ们'] +['æīĢè°ĵ', 'çļĦ'] +['æŃ»', 'äºĨ'] +['帮', 'æī¶'] +['åĵ²', 'åѦ'] +['以ä¸Ĭ', 'çļĦ'] +['çļĦ', 'åħ³éĶ®'] +['æĹ©', 'å°±'] +['æĬ¥', 'ä»·'] +['éģµ', 'å®Ī'] +['æī©', 'å¼ł'] +['æĺ¯', 'å¾Ī'] +['å¼Ģ', 'éĢļ'] +['æĸ°', 'åĬł'] +['æĸ°åĬł', 'åĿ¡'] +['ç¿»', 'è¯ij'] +['询', 'éĹ®'] +['é¸', 'Ń'] +['ä½ĵ', 'åĨħ'] +['两', '个人'] +['çĪ', '¹'] +['éľ', 'ľ'] +['乡æĿij', 'æĮ¯åħ´'] +['çĿ¡', 'è§ī'] +['å®ĺ', 'åijĺ'] +['åĪĽ', 'å§ĭ'] +['åĪĽå§ĭ', '人'] +['ä¼Ĺ', '人'] +['åį³', '便'] +['çĸ«', 'èĭĹ'] +['ä¼ģä¸ļ', 'å®¶'] +['æ¸', '£'] +['ç²¾', 'åĬĽ'] +['å¤ĸ', 'éĥ¨'] +['èģª', 'æĺİ'] +['è¿Ļ', 'ä¹Ł'] +['å½ķ', 'åıĸ'] +['åĨ²', 'çªģ'] +['åħ¨', '身'] +['åŃ£', 'èĬĤ'] +['忽', 'çĦ¶'] +['çļĦ', 'æĢģ度'] +['åĤ¨', 'å¤ĩ'] +['ä¿Ŀ', 'åħ»'] +['çļĦ', 'æĥ³æ³ķ'] +['ä¸Ĭæµ·', 'å¸Ĥ'] +['æIJº', 'æīĭ'] +['çļĦ', 'ä¿¡æģ¯'] +['åķĨ', 'åľº'] +['çļĦ', 'æĢĿæĥ³'] +['æĿĥ', 'åĬĽ'] +['毫', 'æĹł'] +['æĢĢ', 'åŃķ'] +['硬', 'ä»¶'] +['åĨħ', 'èĴĻåı¤'] +['æİ¢', '讨'] +['åħ»', 'çĶŁ'] +['çļĦ', '表çݰ'] +['空', 'ä¸Ń'] +['æģIJ', 'æĢĸ'] +['å¾Ī', 'é«ĺ'] +['ç»ıæµİ', '社ä¼ļ'] +['ä¸Ĭ', 'æĿ¥'] +['å»¶', 'ç»Ń'] +['éĩį', 'å¤į'] +['éĺ²', 'èĮĥ'] +['çļĦ', 'å½¢å¼ı'] +['æľĪ', 'åºķ'] +['èĢģ', '年人'] +['绿', 'åĮĸ'] +['å±±', 'åĮº'] +['æĭ¿', 'åĩº'] +['æĹħ', '客'] +['æĽ´', 'æį¢'] +['åħ¬', '主'] +['èĬĤ', '约'] +['åħ¨', 'åİ¿'] +['åĽŀ', 'æĬ¥'] +['çIJĨ', 'æĢ§'] +['çĸ¯', 'çĭĤ'] +['æ¶ī', 'å«Į'] +['åī§', 'æĥħ'] +['åĨ¬', 'åŃ£'] +['åIJİ', 'ç»Ń'] +['è¿Ļæĺ¯', 'ä¸Ģ个'] +['æ¼Ķ', '讲'] +['ä¸Ģ', 'å±Ĥ'] +['æľīåħ³', 'éĥ¨éŨ'] +['æĹł', 'å¥Ī'] +['ç§į', 'ç±»'] +['缸åħ³', 'çļĦ'] +['æĪĸèĢħ', 'æĺ¯'] +['æī¶', 'æĮģ'] +['å¤ļ', 'æķ°'] +['çļĦ', 'ä½ľåĵģ'] +['ä¸ĭ', 'ä¸ĢæŃ¥'] +['å¸Ī', 'åĤħ'] +['é«ĺéĢŁ', 'åħ¬è·¯'] +['好', 'åıĭ'] +['ä¼ĺç§Ģ', 'çļĦ'] +['è¿Ľ', 'äºĨ'] +['æģIJ', 'æĢķ'] +['äºĨ', 'åIJ§'] +['大', 'è§Ħ模'] +['çļĦ', 'ä¸ĸçķĮ'] +['æĢĢ', 'çĸij'] +['å·', '·'] +['åħ´', 'å¥ĭ'] +['æĪ', '°'] +['æĿij', 'éĩĮ'] +['æľĭåıĭ', 'åľĪ'] +['åĨ¬', '天'] +['ä¸Ńåįİ', '人æ°ij'] +['åįı', 'åķĨ'] +['è¯Ħ', 'éĢī'] +['æĹ', 'Ń'] +['å¢ŀåĬł', 'äºĨ'] +['åıĹ', '伤'] +['ä¸Ģ', 'èĤ¡'] +['便', 'æį·'] +['ä¸', 'ij'] +['é¹', '¤'] +['å¤ĸ', 'è§Ĥ'] +['å·¥ç¨ĭ', 'å¸Ī'] +['åĴĮ', 'åħ¶ä»ĸ'] +['è¿Ļ', 'å°±'] +['ä¸Ńå°ı', 'ä¼ģä¸ļ'] +['西', 'åĮĹ'] +['åĽ½æľī', 'ä¼ģä¸ļ'] +['èĭ¥', 'æĺ¯'] +['åı¯', 'æĥľ'] +['çĶŁ', 'æĹ¥'] +['åĩ', '½'] +['ä¹°', 'åįĸ'] +['ç¥Ŀ', 'ç¦ı'] +['人æ°ij', '群ä¼Ĺ'] +['åħī', 'æĺİ'] +['åħ¬', 'å¯ĵ'] +['æĺ¯', 'è°ģ'] +['æĪij', 'çŁ¥éģĵ'] +['è¯Ń', 'æĸĩ'] +['æķı', 'æĦŁ'] +['ä¸įéĶĻ', 'çļĦ'] +['æĿ¥', '讲'] +['æ³¢', 'åĬ¨'] +['çļĦ', '第ä¸Ģ'] +['åľ°', 'éľĩ'] +['åľ¨', 'åħ¨åĽ½'] +['骨', 'å¹²'] +['å®ī', 'ç½®'] +['å®¶', 'ç͵'] +['ä¸İ', 'æŃ¤'] +['ä¸İæŃ¤', 'åIJĮæĹ¶'] +['åıĹ', 'çģ¾'] +['çĥŃ', '线'] +['çļĦ', 'æĬĢæľ¯'] +['æµĭ', 'éĩı'] +['ä¾Ŀ', 'èµĸ'] +['ä¸ŃåĽ½', 'çļĦ'] +['çī¹', 'æĢ§'] +['è¾ĥ', 'é«ĺ'] +['è¸', '©'] +['ä¼ļ', 'åľ¨'] +['建', 'éĢł'] +['导', 'èĪª'] +['æĥ³', 'èµ·'] +['åħ¨', 'ä¸ĸçķĮ'] +['建', 'æĿIJ'] +['ç¯', 'Ģ'] +['çļĦ', 'åŁºç¡Ģ'] +['èĩªåĬ¨', 'åĮĸ'] +['åīį', 'åIJİ'] +['çĿ¡', 'çľł'] +['æİ¨', 'è¡Į'] +['æį®', 'äºĨè§£'] +['ä»Ģä¹Ī', 'æĹ¶åĢĻ'] +['ä¸į', 'åĸľæ¬¢'] +['çħ¤', 'çĤŃ'] +['éĤ£ä¹Ī', 'å¤ļ'] +['å¸Ĥåľº', 'åĮĸ'] +['ä¸į管', 'æĺ¯'] +['ç«ĭ', 'åľº'] +['éĥ½', '没'] +['课', 'é¢ĺ'] +['æĪij们', 'å°Ĩ'] +['è¿ĩ', 'çļĦ'] +['åĨį', 'åĬłä¸Ĭ'] +['çĪ', '¾'] +['身', 'æĿIJ'] +['çĶ·', '女'] +['è¿ľ', 'è¿ľ'] +['çĶ·', 'çĶŁ'] +['èĩªèº«', 'çļĦ'] +['è´Ł', 'æĭħ'] +['çϾ', 'ä¸ĩ'] +['西', 'çıŃ'] +['西çıŃ', 'çīĻ'] +['åĩĢ', 'åĪ©æ¶¦'] +['æ¾³', '大'] +['澳大', 'åĪ©äºļ'] +['ä¸į', 'åİ»'] +['æī¿', 'åıĹ'] +['楼', 'çĽĺ'] +['å¢ĥ', 'åĨħ'] +['æ··', 'åĩĿ'] +['æ··åĩĿ', 'åľŁ'] +['æĢĿæĥ³', 'æĶ¿æ²»'] +['å¸Ĥ', 'åĮº'] +['æĭĽ', 'æłĩ'] +['åĽ¢', 'ä½ĵ'] +['è¿Ľ', '度'] +['åĨĽ', 'éĺŁ'] +['åıį', 'å¼¹'] +['äºĨä¸Ģ', 'äºĽ'] +['æİ¥', 'å¾ħ'] +['çļĦ', 'åŃ¦ä¹ł'] +['éħį', 'éĢģ'] +['é£Łåĵģ', 'å®īåħ¨'] +['æĽ¿', '代'] +['æĺ¯', '以'] +['éĢļ', 'ç͍'] +['çłĶç©¶', 'æīĢ'] +['ç¦', 'ħ'] +['æī', 'Ķ'] +['éļĶ', '离'] +['ä¸ĩ', 'å¹³æĸ¹ç±³'] +['çļĦ', 'è§Ħå®ļ'] +['ç»Ļ', 'æĪij们'] +['æ¿Ģ', 'åħī'] +['ä¼ļ', 'åĩºçݰ'] +['çŁŃ', 'ä¿¡'] +['ç©¿', 'çĿĢ'] +['æ²Ī', 'éĺ³'] +['æķĻ', 'æĿIJ'] +['éĺ²', 'çĸ«'] +['ä¼ĺ', 'èī¯'] +['约', 'å®ļ'] +['æĪij', 'çľģ'] +['åħ¬', 'æ°ij'] +['éģ¸', 'æĵ'] +['é쏿ĵ', 'ĩ'] +['å·²', 'æĪIJ为'] +['ä¸į', 'å¿ħ'] +['ç¥ĸ', 'åĽ½'] +['å¹¶', 'æľª'] +['åľŁ', '壤'] +['å¾®', 'ç¬ij'] +['äºĭä¸ļ', 'åįķä½į'] +['çļĦ', '游æĪı'] +['åħ¬', '示'] +['åIJĪçIJĨ', 'çļĦ'] +['çª', 'Ŀ'] +['æ°Ķ', '象'] +['å®¶', 'ä¸Ń'] +['亮', '缸'] +['åį«', 'æĺŁ'] +['è®°', 'è½½'] +['è§Ĩ', 'éĩİ'] +['åľ°åĮº', 'çļĦ'] +['ä½Ĩ', 'ä»ĸ'] +['èĤĮ', 'èĤī'] +['äºı', 'æįŁ'] +['åĬŀ', 'åѦ'] +['ä¸Ģ', 'è¡Į'] +['è¯ŀ', 'çĶŁ'] +['åıijå¸ĥ', 'çļĦ'] +['çļĦ', 'æľįåĬ¡'] +['çļĦ', 'çłĶç©¶'] +['åij¨', 'æľ«'] +['产ä¸ļ', 'åĽŃ'] +['é«ĺ', '温'] +['æĪIJåĬŁ', 'çļĦ'] +['æŃ¥', '骤'] +['åŃĺ', 'åĤ¨'] +['åŃIJ', 'åħ¬åı¸'] +['让', '她'] +['ä¸Ń', 'æľī'] +['åĺī', '宾'] +['å¦', '®'] +['æĺİ', 'å¹´'] +['äºĨ', 'åIJĹ'] +['äºī', 'è®®'] +['æĪ', 'Ī'] +['ä¸Ģ', 'æľ¬'] +['ç¾İ丽', 'çļĦ'] +['ä½ł', '说'] +['大', '人'] +['æĶ»', 'çķ¥'] +['ä¸į', 'æľĥ'] +['å¾ħ', 'éģĩ'] +['ä¸Ģ', 'è¾Ĩ'] +['çīĪæĿĥ', 'æīĢæľī'] +['æ°ij', 'ä¼Ĺ'] +['åĬŁ', '夫'] +['å±ķ', 'ä¼ļ'] +['大', 'èĦij'] +['æ¯ı', 'æľĪ'] +['å°ı', '麦'] +['æµĻæ±Ł', 'çľģ'] +['çļĦ', 'æīĢæľī'] +['ä¸ĭ', 'æ»ij'] +['èĵĿ', 'èī²'] +['è¦ģ', 'æĥ³'] +['åѦçĶŁ', 'çļĦ'] +['å½ĵ', 'ä½ł'] +['ä½ľ', 'æĪĺ'] +['å®¶', '乡'] +['å¤ļ', 'åIJį'] +['é«ĺ', 'äºİ'] +['åĿļ', '强'] +['è¿ŀ', 'éĶģ'] +['åIJİ', 'æŀľ'] +['人', 'äºĭ'] +['ç´', 'ħ'] +['æ¿Ģ', 'åĬ¨'] +['è¿Ľ', 'æĶ»'] +['ç©', 'Ĩ'] +['ä¸', 'ĺ'] +['让', 'èĩªå·±'] +['以', 'æŃ¤'] +['夫', '人'] +['å¼Ģ', '设'] +['æ°Ķ', 'è´¨'] +['鸡', 'èĽĭ'] +['çĦ¡', 'æ³ķ'] +['åIJĥ', 'äºĨ'] +['åĪĨåĪ«', '为'] +['èģĶåIJĪ', 'åĽ½'] +['å½ĵ', '代'] +['å¦Ĥæŀľ', 'æĺ¯'] +['è¿ľ', 'ç¨ĭ'] +['åĸ', 'Ĥ'] +['è®°', 'ä½ı'] +['æ¸ħ', 'åįķ'] +['åIJĪä½ľ', 'ä¼Ļä¼´'] +['åİ»', 'åģļ'] +['æķħ', 'éļľ'] +['模', 'æĭŁ'] +['å¸Ī', 'çĶŁ'] +['åīį', 'æĿ¥'] +['ç͵è§Ĩ', 'åī§'] +['çĥŃ', 'çα'] +['éľ²', 'åĩº'] +['é«ĺ', 'å±Ĥ'] +['ç͵', 'åύ'] +['纪', 'å¾ĭ'] +['å¼Ģåıij', 'åķĨ'] +['éķ¿', 'å®ī'] +['è½½', 'ä½ĵ'] +['çļĦ', 'å°±æĺ¯'] +['被', '人'] +['åıĹ', 'çIJĨ'] +['篮', 'çIJĥ'] +['èİ', 'İ'] +['交', 'ç»Ļ'] +['æľªæĿ¥', 'çļĦ'] +['两', '大'] +['åIJķ', 'å¸ĥ'] +['çŃī', '人'] +['çļĦ', 'æĹ¥åŃIJ'] +['åIJĪä½ľ', '社'] +['æĮij', 'éĢī'] +['åŃĺ', '款'] +['ç³»ç»Ł', 'çļĦ'] +['æĬĬ', 'å®ĥ'] +['没æľī', 'ä»Ģä¹Ī'] +['ä»İ', 'æŃ¤'] +['ä¸Ń', 'åįĪ'] +['çĸ¼', 'çĹĽ'] +['å·©', 'åĽº'] +['浪', '漫'] +['缸åħ³', 'éĥ¨éŨ'] +['éķ¿', 'åŁİ'] +['纤', 'ç»´'] +['ä¸Ĭ', 'éŨ'] +['çĪĨ', 'çĤ¸'] +['èµ·', 'çĤ¹'] +['çļĦ', 'éĢļçŁ¥'] +['èĢĮ', 'æĿ¥'] +['çļĦ', 'èĢģ'] +['æīĭ', 'éĩĮ'] +['è¯Ń', 'éŁ³'] +['è¾Ľ', 'èĭ¦'] +['æ±Łèĭı', 'çľģ'] +['ç͍', 'äºĨ'] +['身份', 'è¯ģ'] +['æľī', 'åĬ©'] +['æľīåĬ©', 'äºİ'] +['çī©', 'èģĶç½ij'] +['åĩº', 'éŨ'] +['å¼Ł', 'åŃIJ'] +['æĥ', '¹'] +['è¿Ļä»¶', 'äºĭ'] +['æĪij们', 'åı¯ä»¥'] +['çļĦ', 'çĶŁåij½'] +['æľīä¸Ģ', 'ç§į'] +['åºĹ', 'éĵº'] +['åıĮ', 'æīĭ'] +['çļĦ', 'æ¶Īæģ¯'] +['èĢIJ', 'å¿ĥ'] +['å°´', 'å°¬'] +['éĤ£', '天'] +['é¦ĸ', 'æī¹'] +['æĺ¯ä¸Ģ', 'å®¶'] +['人', 'æ°Ķ'] +['åıį', 'æŃ£'] +['æĪij', 'åĴĮ'] +['å®ł', 'çī©'] +['ä¸į', '对'] +['寻', 'æ±Ĥ'] +['缸', 'ä¼¼'] +['åľ¨', 'ç¾İåĽ½'] +['åı«', 'åģļ'] +['åĹ', 'İ'] +['ç«ĭ', 'è¶³'] +['ç͍', 'éĢĶ'] +['åħ', 'Ĩ'] +['大', 'æ°Ķ'] +['åIJij', 'ä¸Ĭ'] +['ä»ĸ', 'å°±'] +['é¡¹çĽ®', '建设'] +['èĭ¥', 'å¹²'] +['æĺ¯', 'æľī'] +['æ¿Ģ', 'æĥħ'] +['çļĦ', 'æĦıä¹ī'] +['æĺ', 'Ń'] +['严éĩį', 'çļĦ'] +['å¯Ĩ', 'éĽĨ'] +['èĪŀ', 'è¹Ī'] +['èį£', 'èİ·'] +['èİ·', 'æĤī'] +['æ±Ł', 'åįĹ'] +['åģĩ', 'å¦Ĥ'] +['æĪ·', 'å¤ĸ'] +['线', 'ç´¢'] +['ç§ģ', '人'] +['转åŀĭ', 'åįĩ级'] +['çļĦ', 'ä»·å̼'] +['åįķ', 'çĭ¬'] +['èĢģ', 'çϾå§ĵ'] +['å°į', 'æĸ¼'] +['åĽ½éĻħ', 'åĮĸ'] +['ä¼°', 'å̼'] +['æľįåĬ¡', 'ä¸ļ'] +['èĩ', 'Ń'] +['æİī', 'äºĨ'] +['è§£åĨ³', 'äºĨ'] +['ä¹Ł', 'ä¸įèĥ½'] +['åħ', '¹'] +['æĸ¯', 'çī¹'] +['æķħ', 'æĦı'] +['è¿ĩ', '度'] +['èĬĤ', 'æĹ¥'] +['çϽ', 'çĻľ'] +['çϽçĻľ', 'é£İ'] +['ç»§', 'æī¿'] +['äºĨ', 'ä¸įå°ij'] +['äºĮ', '人'] +['è§ģ', 'éĿ¢'] +['æĥ³', 'æĥ³'] +['å¤į', 'åIJĪ'] +['康', 'å¤į'] +['åİ¿', 'åŁİ'] +['åľ¨', 'åĽ½åĨħ'] +['åľº', 'åľ°'] +['é϶', 'çĵ·'] +['è¿Ļ', '项'] +['çľ¼', 'ä¸Ń'] +['çł', '¸'] +['æĦŁè§ī', 'åΰ'] +['æŀľ', 'çĦ¶'] +['æĶ¾', 'åħ¥'] +['约', 'æĿŁ'] +['æİĴ', 'æŁ¥'] +['车', '主'] +['çļĦ', 'æĦıæĢĿ'] +['æĸ°', 'åŁİ'] +['æĥ³', 'çĿĢ'] +['éģ', 'Ĥ'] +['èĮ¶', 'åı¶'] +['ä¹°', 'æĪ¿'] +['åĨľ', 'æĪ·'] +['é«ĺ', 'æīĭ'] +['çİī', 'ç±³'] +['æĸ°åĨł', 'èĤºçĤİ'] +['çħ§', 'æĺİ'] +['æĮĩ', 'åįĹ'] +['è¸', '¢'] +['æķij', 'æı´'] +['æĻ¯', 'çĤ¹'] +['ç¨İ', 'æĶ¶'] +['çļĦ', 'æīĭ'] +['æŃ£', '好'] +['è¦ģ', 'æĬĬ'] +['éļı', 'æĦı'] +['åħ¶å®ŀ', 'æĺ¯'] +['ç»Ļ', 'èĩªå·±'] +['è°Ī', 'åΤ'] +['æ¯ı天', 'éĥ½'] +['æĢģ', 'åĬ¿'] +['é¢Ħ', '约'] +['åİĨåı²', 'ä¸Ĭ'] +['å®Ŀ', 'è´Ŀ'] +['åīį', 'è¿Ľ'] +['ä¹Łå°±æĺ¯', '说'] +['çļĦ', 'æĦıè§ģ'] +['åı£', '罩'] +['åİĺ', 'ç±³'] +['èĬ±', 'è´¹'] +['ä½ĵèĤ²', 'æĬķæ³¨'] +['åħ¬ä¼Ĺ', 'åı·'] +['èijĹåIJį', 'çļĦ'] +['å¼Ģ', 'æĪ·'] +['æĭį', 'åįĸ'] +['å²ģ', 'æľĪ'] +['åĨħ', 'æ¶µ'] +['å®Įæķ´', 'çļĦ'] +['é«ĺ', 'åİĭ'] +['åħ¬åĬ¡', 'åijĺ'] +['使ç͍', 'çļĦ'] +['çĶŁäº§', '线'] +['妹', '妹'] +['èµ°', '访'] +['æĺ¯', 'åı¯ä»¥'] +['åľ¨', 'å®¶'] +['æļ´', 'åĬĽ'] +['æ³°', 'åĽ½'] +['è´¨', 'çĸij'] +['ä¸į', 'éģİ'] +['天çĦ¶', 'æ°Ķ'] +['缺', 'çĤ¹'] +['å°ı', 'åŀĭ'] +['ä¸įä»ħ', 'æĺ¯'] +['é»ij', 'æļĹ'] +['æ¢', '¨'] +['æĸĩ', 'æĹħ'] +['è¦ģ', 'æľī'] +['ä¸Ń', 'å±±'] +['çļĦ', 'æķ°æį®'] +['å¾Ĺ', 'å¾Ī'] +['以', '便'] +['对', 'ä»ĸ'] +['åĬł', '以'] +['çϼ', 'çı¾'] +['设', 'å®ļ'] +['èĤļ', 'åŃIJ'] +['éĿ', 'ĸ'] +['å¥ī', 'çĮ®'] +['ä¸į', 'åıĺ'] +['åı£', 'ç¢ij'] +['åľ¨', 'åĵªéĩĮ'] +['ä½', 'IJ'] +['è¿Ļ', '两个'] +['çļĦ', 'æĸ¹åIJij'] +['æŀ', '«'] +['äºĮ', '次'] +['çīĩ', 'åĮº'] +['éł', 'IJ'] +['ç£', 'Ĭ'] +['æĭ¿', 'çĿĢ'] +['å·²ç»ı', 'æĪIJ为'] +['ä¹ĭ', 'ä¸Ĭ'] +['å®Ĺ', 'æĹ¨'] +['奶', '奶'] +['é«ĺæĸ°', 'åĮº'] +['社', 'æľĥ'] +['è·Ł', '踪'] +['æľįåĬ¡', 'ä¸Ńå¿ĥ'] +['æī', '¯'] +['æīĭ', 'æĮĩ'] +['礼', 'çī©'] +['宿', 'èĪį'] +['ç͍', 'å¿ĥ'] +['æıIJé«ĺ', 'äºĨ'] +['亮', 'çĤ¹'] +['ä¸į', 'æĦ¿æĦı'] +['æĴŃ', 'æĶ¾'] +['å¤ļå°ij', 'éĴ±'] +['没', 'ä»Ģä¹Ī'] +['æķ°', 'åįģ'] +['æĢ»', 'çĽij'] +['çļĦ', 'åŁİå¸Ĥ'] +['æī¾', 'åΰäºĨ'] +['åĨħ', 'åľ°'] +['åΰ', 'çİ°åľ¨'] +['æĪĺæĸĹ', 'åĬĽ'] +['åİŁ', 'å§ĭ'] +['åĥ', '§'] +['åĢĴ', 'æĺ¯'] +['æľĢ', 'åħ·'] +['è´«åĽ°', 'æĪ·'] +['éĢģ', 'åΰ'] +['级', 'åĪ«'] +['åĩº', 'èµĦ'] +['æĪª', 'æŃ¢'] +['ç§į', 'åŃIJ'] +['èĥ½', 'ä¸įèĥ½'] +['幸', 'è¿IJ'] +['èĸ', 'ĩ'] +['项', 'éĵ¾'] +['æĮĤ', 'çīĮ'] +['ä¸Ģ', '樣'] +['ä¹ĺ', '客'] +['èIJ½', 'åIJİ'] +['ä½Ĩ', 'æĪij'] +['æĹ©', 'åľ¨'] +['åĬ¨', '漫'] +['å¹³', 'çŃī'] +['对', 'ä½ł'] +['ä¸į', 'æĢķ'] +['å¤ĸ', 'çķĮ'] +['å¤ļå¹´', 'æĿ¥'] +['é¦ĸ', '个'] +['æ²³', 'åįĹçľģ'] +['æĪĸ', 'åħ¶ä»ĸ'] +['éķľ', '头'] +['åįĹ', 'æĺĮ'] +['ä¸Ģ', 'éĿ¢'] +['éĢłæĪIJ', 'çļĦ'] +['å´', 'Ķ'] +['çŃ', 'Ĵ'] +['æķĻèĤ²', 'éĥ¨'] +['åľ°', 'åŁŁ'] +['æĺĨ', 'æĺİ'] +['å·´', 'é»İ'] +['æīĭ', '游'] +['ä¸Ģ', 'æĹ¶'] +['çł', 'į'] +['é¡¶', '级'] +['åħ±', '计'] +['åİŁ', 'æ²¹'] +['è¾ī', 'çħĮ'] +['说', 'æĺ¯'] +['æĸ°åįİ', '社'] +['ç»ıåİĨ', 'äºĨ'] +['ä¸į', 'æŃ¢'] +['è¦ģ', 'ä¹Ī'] +['èĢħ', 'çļĦ'] +['æĢ»', 'æĬķèµĦ'] +['è¡Į', 'é©¶'] +['ä¸Ĭ', 'å¸Ŀ'] +['å¹´', '纪'] +['çIJ', '¼'] +['ä¼ł', '说'] +['ç²¾', 'èĭ±'] +['æĸ¹', 'éĴĪ'] +['æ±Ł', 'æ¹ĸ'] +['æĪIJ', 'çĤº'] +['æĢ»', 'éĩı'] +['æĬķ', 'æĶ¾'] +['åĬ¨', 'çĶ»'] +['èĹ', '¤'] +['ç͵', 'æºIJ'] +['éĴ', 'Ļ'] +['åIJĮ', 'è¡Į'] +['æĻ®éĢļ', 'çļĦ'] +['åĽ¾ä¹¦', 'é¦Ĩ'] +['è¯Ī', 'éªĹ'] +['æħĪ', 'åĸĦ'] +['è¿Ļ', '份'] +['主æĮģ', '人'] +['å°±', 'è¿Ļæł·'] +['èĢĮ', 'æĪIJ'] +['èĩªè¡Į', '车'] +['ä¸ŃåĽ½', 'çī¹èī²'] +['èĤ¿', 'çĺ¤'] +['åIJ', '¾'] +['å¼Ł', 'å¼Ł'] +['åıĹ', 'çĽĬ'] +['éĢīæĭ©', 'äºĨ'] +['æĺİæĺ¾', 'çļĦ'] +['æĬ¥', 'èĢĥ'] +['ç¬ij', 'éģĵ'] +['éĽĸ', 'çĦ¶'] +['温', 'å·ŀ'] +['éĿŀ', 'æ´²'] +['ç§į', 'ç§į'] +['åıĤåĬł', 'äºĨ'] +['è´§', 'è¿IJ'] +['éļı', '便'] +['å°±', '没æľī'] +['ç¸', '£'] +['央', 'è§Ĩ'] +['ç©¿', 'è¶Ĭ'] +['çļĦ', 'çݰ象'] +['åĩł', '次'] +['çļĦ', 'é£İéĻ©'] +['æŃĮ', 'æĽ²'] +['æľ¬', 'å±Ĭ'] +['å¹´', 'åĨħ'] +['ä¸į', 'è¶ħè¿ĩ'] +['è¿ĩ', 'å¤ļ'] +['å¿ħé¡»', 'è¦ģ'] +['ç»ĵ', '论'] +['åĢŁ', 'éī´'] +['ç¥ŀ', 'å¥ĩ'] +['æľŁ', 'æľĽ'] +['ä¸ĵ', '享'] +['éĿŀ常', 'éĩįè¦ģ'] +['æĦıè¯Ĩ', 'åΰ'] +['åIJĪ', 'å¹¶'] +['æĬĬ', 'èĩªå·±'] +['å¥Ĺ', 'è£ħ'] +['éŃĶ', 'æ³ķ'] +['å¤ı', 'åŃ£'] +['ä¸į', 'åĥı'] +['å¢ĥ', 'çķĮ'] +['æĥĬ', 'åĸľ'] +['æľīä¸Ģ', '天'] +['çĦ¦', 'çĤ¹'] +['æĪij', '认为'] +['åħ°', 'å·ŀ'] +['ç͵', 'æ°Ķ'] +['èģĶç³»', 'æĪij们'] +['ç§ij', 'æĻ®'] +['她', '说'] +['çļĦ', 'æĸĩ竳'] +['å¥ĩ', 'æĢª'] +['åıĭ', '好'] +['饮', 'æĸĻ'] +['çļĦ', 'æĶ¯æĮģ'] +['çŃĶ', 'åºĶ'] +['éĩį', 'éĩı'] +['çij', '¶'] +['åĩı', 'è½»'] +['ç§ijåѦ', 'å®¶'] +['å·´', '西'] +['éĩijèŀį', 'æľºæŀĦ'] +['åħļ', 'å§Ķ书记'] +['貸', '款'] +['ç²¾', 'èĩ´'] +['ä»İ', 'æľª'] +['åį°', 'åĪ·'] +['åĽŀ', '顾'] +['é¦ĸ', 'éĥ½'] +['åıij', 'èĤ²'] +['éĹ®', 'éģĵ'] +['è¾¾', 'åΰäºĨ'] +['å¿į', 'ä¸įä½ı'] +['æīį', 'æľī'] +['æįIJ', 'èµł'] +['ä½Ľ', 'æķĻ'] +['ä¸į', 'æ¸ħ'] +['éĺŁ', 'éķ¿'] +['缸', 'åıį'] +['æĬ¥', 'èѦ'] +['大', 'åħ¨'] +['欧', '缣'] +['帮', 'å¿Ļ'] +['çļĦ', 'æĻĤåĢĻ'] +['缮', 'å½ķ'] +['è¶³', '以'] +['èī°', 'éļ¾'] +['ä»ĸ', 'ä¹Ł'] +['å·¥', 'ä½ľèĢħ'] +['头', 'èĦij'] +['缺', 'éĻ·'] +['æĪIJç«ĭ', 'äºĨ'] +['å°±', 'å¼Ģå§ĭ'] +['认', 'åIJĮ'] +['é»Ħ', 'èī²'] +['çĹħ', 'æĥħ'] +['覺', 'å¾Ĺ'] +['è¿Ļ', '两'] +['ä¿¡', 'ä»°'] +['åľĭ', 'å®¶'] +['ä¸įä»ħä»ħ', 'æĺ¯'] +['çĭ¬', 'å®¶'] +['èά', 'çļĦ'] +['æĿIJ', 'è´¨'] +['æµ·', 'ä¸Ĭ'] +['çĤº', 'äºĨ'] +['æľºåĬ¨', '车'] +['缸å½ĵ', 'äºİ'] +['å¤ļåħĥ', 'åĮĸ'] +['æĽ´', '大çļĦ'] +['èĽ', '®'] +['åģĩ', 'æľŁ'] +['å¼ı', 'çļĦ'] +['交éĢļ', 'è¿IJè¾ĵ'] +['çľģ', 'å§Ķ'] +['ä¸į', 'ç®Ĺ'] +['æĶ¾', 'ä¸ĭ'] +['éĹ', '¯'] +['人', 'åľ¨'] +['港', 'åı£'] +['æĹ¨', 'åľ¨'] +['åij½', '令'] +['æŁIJ', '个'] +['å¹³', '稳'] +['åıª', '好'] +['人', '人'] +['äº', 'ŀ'] +['äºĮ', 'ç»´'] +['äºĮç»´', 'çłģ'] +['æŀģ', '为'] +['åĪ«', 'å¢ħ'] +['åħ¶', 'ä½Ļ'] +['大', 'äºĭ'] +['主管', 'éĥ¨éŨ'] +['æĹł', 'éĶ¡'] +['éĹ', 'µ'] +['éģŃ', 'åΰ'] +['说', 'è¿ĩ'] +['为', 'ä½ł'] +['è§£', 'çŃĶ'] +['éªĮ', 'æĶ¶'] +['çļĦ', 'ç»ıéªĮ'] +['åĮ¹', 'éħį'] +['çģ«', 'ç®Ń'] +['豪', 'åįİ'] +['æŁIJ', 'æŁIJ'] +['çļĦ', 'æĹ¶ä»£'] +['书', 'éĿ¢'] +['æģĴ', '大'] +['å»¶', 'éķ¿'] +['ä¸Ģ', 'åIJĮ'] +['æľª', 'èĥ½'] +['交', 'æį¢'] +['çĶ¢', 'åĵģ'] +['çŃī', 'åΰ'] +['åĪĨ', '离'] +['æīĵ', 'ç͵è¯Ŀ'] +['å¹²', 'çĩ¥'] +['è¾ĥ', 'å¤ļ'] +['å¤ļå¹´', 'çļĦ'] +['èĥĮæĻ¯', 'ä¸ĭ'] +['为', 'ä¾ĭ'] +['æijĺ', 'è¦ģ'] +['å´Ľ', 'èµ·'] +['æŃ¤', 'åĪ»'] +['æľī', 'æľºä¼ļ'] +['æĿ¡', '款'] +['é¢Ĩ导', 'å°ıç»Ħ'] +['çļĦ', '身ä½ĵ'] +['åįķ', 'ä¸Ģ'] +['央', 'è¡Į'] +['ä¸įæĸŃ', 'æıIJé«ĺ'] +['ä»·å̼', 'è§Ĥ'] +['èĬ', '½'] +['èIJ', 'į'] +['æ³ķå¾ĭ', 'æ³ķè§Ħ'] +['ä¸į', 'éĶĪ'] +['ä¸įéĶĪ', 'éĴ¢'] +['åĩº', 'äºİ'] +['èĻļ', 'æĭŁ'] +['æį®', 'æĤī'] +['çĥ¦', 'æģ¼'] +['åħ¨', 'æĸ°çļĦ'] +['æī«', 'æıı'] +['çĻ»', 'éĻĨ'] +['èīºæľ¯', 'å®¶'] +['çļĦ', 'é£Łçī©'] +['çļĦ', 'åŃĺåľ¨'] +['客', 'åİħ'] +['æĪij们', 'å°±'] +['æŁ¥çľĭ', 'æĽ´å¤ļ'] +['è¯Ħ', '审'] +['å¸Ĥ', 'åł´'] +['è¬', 'Ľ'] +['å·¨', '头'] +['ä¸ŃåĽ½', 'ç»ıæµİ'] +['äºĨ', 'èĩªå·±çļĦ'] +['åĨ³', 'è®®'] +['çĽijçĿ£', '管çIJĨ'] +['æĬķ', '票'] +['åĨį', '度'] +['è¡Į', 'çĤº'] +['注', 'åħ¥'] +['ä½ľä¸º', 'ä¸Ģ个'] +['æ¯ı个人', 'éĥ½'] +['åįķ', 'åħĥ'] +['è¦ģ', 'çŁ¥éģĵ'] +['被', '称为'] +['ä¹ĭ', 'éĻħ'] +['è§£', 'éϤ'] +['ä¸', '¸'] +['æº', '«'] +['ä¸ī', 'æĺŁ'] +['é²ľ', 'æĺİ'] +['ä¹Ł', 'éĥ½'] +['æĹ¶', 'æľº'] +['åĩº', 'æīĭ'] +['æĥħ', 'å½¢'] +['åķĨ', 'è´¸'] +['éĢī', '举'] +['对', 'èĩªå·±'] +['çĶŁ', 'åĬ¨'] +['åħĭ', 'æľį'] +['个', 'ä½ĵ'] +['èĭ', 'ij'] +['ç¨', '±'] +['大', 'åݦ'] +['æĺ¯', '对'] +['åĪ©', 'æģ¯'] +['è¿IJåĬ¨', 'åijĺ'] +['åĮĸ', 'è§£'] +['åīį', '沿'] +['æĦŁ', 'æģ©'] +['æĢ»', 'ä¹ĭ'] +['é«ĺæĸ°', 'æĬĢæľ¯'] +['åĿĩ', '为'] +['åħ¨', 'åĮº'] +['æ°Ķ', 'æ°Ľ'] +['åı¯ä»¥è¯´', 'æĺ¯'] +['ä½ı', '宿'] +['åħļåijĺ', 'å¹²éĥ¨'] +['åĹ', '¯'] +['è·µ', 'è¡Į'] +['çļĦ', 'ä¸ĵä¸ļ'] +['èĢĥ', 'éªĮ'] +['èķ', '¾'] +['åħ¬', 'åŃIJ'] +['çļĦ', 'çĬ¶æĢģ'] +['æ½®', 'æµģ'] +['ä¿¡', 'æīĺ'] +['è´', '¼'] +['åIJĦ', 'æĸ¹'] +['æķij', 'åĬ©'] +['éĿŀ常', 'çļĦ'] +['æ¡¥', 'æ¢ģ'] +['åħ¬', 'æĸ¤'] +['ä¼¼', 'çļĦ'] +['çľĭ', '好'] +['å±Ģ', 'éĥ¨'] +['å®ī', 'éĿĻ'] +['éħį', 'ä»¶'] +['常', 'è§Ħ'] +['å¼Ģ', '车'] +['第äºĮ', '次'] +['ä¸Ĭ', '级'] +['åıĤ', 'èµĽ'] +['å®¶', 'å±ŀ'] +['强', 'åĬ¿'] +['åľ¨', 'ä»ĸ'] +['åIJij', 'åīį'] +['ä¹ĭ', 'åľ°'] +['éĥ', '¡'] +['è¡Į', 'ç¨ĭ'] +['èѦ', 'åijĬ'] +['è§Ħå®ļ', 'çļĦ'] +['åķĨ', 'åŁİ'] +['äºĶ', '大'] +['æķĻ', '室'] +['åįģ', 'è¶³'] +['æīĢ以', 'åľ¨'] +['å°Ĩ', 'ç»§ç»Ń'] +['çŃī', 'æĸ¹å¼ı'] +['å®¶', 'ä¼ģä¸ļ'] +['交', 'ä»ĺ'] +['çĤ¹', 'è¯Ħ'] +['ç»ĵ', 'ç®Ĺ'] +['ä¹Ł', 'åı¯'] +['å¤ĸ', 'æ±ĩ'] +['è¿Ļç§į', 'æĥħåĨµ'] +['æİĪ', 'äºĪ'] +['å¸ĥ', 'ç½®'] +['æĪIJç«ĭ', 'äºİ'] +['é¢Ħ', 'èѦ'] +['管çIJĨ', '人åijĺ'] +['å©ļ', '礼'] +['ç»ĵæĿŁ', 'åIJİ'] +['åħ¥', 'éĢī'] +['æĹł', 'æ¯Ķ'] +['åĴĮ', 'åıijå±ķ'] +['çϽ', 'éħĴ'] +['çİ©', 'åħ·'] +['ä¸ĩ', 'ç¾İåħĥ'] +['çļĦ', 'æĪIJ绩'] +['æĭį', 'çħ§'] +['èĢĥèĻij', 'åΰ'] +['ä¼ģä¸ļ', 'åıijå±ķ'] +['äºĨ', '个'] +['çĶŁ', 'æ°Ķ'] +['çļĦ', '女人'] +['äºĶ', 'åįģ'] +['çĪ·', 'çĪ·'] +['纽', '约'] +['éĥ½', '被'] +['ä¸Ĭ', '课'] +['çĽ', '¡'] +['ä¼łç»Ł', 'æĸĩåĮĸ'] +['æ½ľ', 'åľ¨'] +['åıij', 'å°Ħ'] +['ä¸Ģ', '身'] +['éĺ²', 'å®Ī'] +['åĪ', '®'] +['é¢ĺ', '缮'] +['åľ¨', 'åĨħçļĦ'] +['ç¾İ', '好çļĦ'] +['è¿ĻéĩĮ', 'çļĦ'] +['ä¸Ģ', 'ä¸Ŀ'] +['人', 'åĿĩ'] +['åĢ¡', '导'] +['身', 'åIJİ'] +['æī©', 'å±ķ'] +['大', 'éŨ'] +['å°±', '被'] +['该', 'é¡¹çĽ®'] +['æŀ¶', 'æŀĦ'] +['ä¸Ģ', 'åı£'] +['ä¿¡æģ¯', 'æĬĢæľ¯'] +['å¼Ģ', 'ä¸ļ'] +['æĶ¶', 'åıĸ'] +['ç½ij', '页'] +['æĶ¯', 'æı´'] +['å°ģ', 'éĹŃ'] +['å¡ij', 'éĢł'] +['大', 'èĥĨ'] +['å¿«éĢŁ', 'åıijå±ķ'] +['çľĭ', 'ä¼¼'] +['æ¸', 'Ŀ'] +['è¿Ļæł·', 'ä¸Ģ个'] +['模', 'åĿĹ'] +['注æĦı', 'åΰ'] +['çł´', 'è§£'] +['èĩª', 'ä»İ'] +['åijµ', 'åijµ'] +['ä¹ĭ', 'å¾Į'] +['ä¹ĭ', 'æĹħ'] +['è·Ł', 'æĪij'] +['æ³ķ', '人'] +['æİĴè¡Į', 'æ¦ľ'] +['åĿļ', 'å®Ī'] +['好', 'å¤Ħ'] +['çŁ³', '头'] +['å¹¶', 'å°Ĩ'] +['èĪ', '±'] +['æŃ', 'ĩ'] +['两', '岸'] +['å¤ļ', 'ä¹ħ'] +['象', 'å¾ģ'] +['个æĢ§', 'åĮĸ'] +['çļĦ', 'è§Ĵ度'] +['å¸', 'Ĩ'] +['ç¦ı', 'å·ŀ'] +['æŁ¥', 'å¤Ħ'] +['两', 'åĽ½'] +['åIJ¸å¼ķ', 'äºĨ'] +['é¦ĸ', 'å¸Ń'] +['大', 'åĵ¥'] +['é¤', 'Ĭ'] +['涨', 'å¹ħ'] +['éĢī', 'ç͍'] +['許', 'å¤ļ'] +['èIJ½', 'æĪ·'] +['åĵĪ', 'å°Ķ'] +['åĵĪå°Ķ', '滨'] +['åģļ', 'ä»Ģä¹Ī'] +['以', 'åħį'] +['é¾', 'į'] +['æĹł', 'éľĢ'] +['åΰåºķ', 'æĺ¯'] +['æĢ', '¡'] +['åijĬè¯ī', 'ä½ł'] +['éĺ²', 'æ°´'] +['è¿Ļ', 'æĹ¶åĢĻ'] +['欢', 'ä¹IJ'] +['转', 'åIJij'] +['è¿Ļ个', 'åľ°åĽ¾'] +['åħ¥', 'é©»'] +['èįī', 'åİŁ'] +['æĹ¶ä»£', 'çļĦ'] +['åıĺ', 'åĬ¨'] +['åĬłå¼º', '对'] +['åģ¶', 'å°Ķ'] +['å®Ī', 'æĬ¤'] +['æ°Ķ', '温'] +['人', 'éĹ´'] +['æľĿ', 'é²ľ'] +['ç»ı', 'è´¹'] +['åĽŃ', 'æŀĹ'] +['å·¥', 'åľ°'] +['è§Ħ', 'æł¼'] +['åĩł', 'åįģ'] +['è¯ķ', 'åĽ¾'] +['å¦', 'ĥ'] +['éĤ£', 'æĹ¶åĢĻ'] +['å¼ĺ', 'æī¬'] +['ä¸ļ', 'çķĮ'] +['çļĦ', 'éĢŁåº¦'] +['ä¼ļ', 'ä¸įä¼ļ'] +['èIJ¥', 'æĶ¶'] +['å°ıå¾®', 'ä¼ģä¸ļ'] +['çľĭ', 'è¿ĩ'] +['æĬĬ', 'ä»ĸ'] +['éģµ', '循'] +['è¿Ļ', 'è¾¹'] +['没æľī', '人'] +['å£', '¶'] +['æ¹ĸ', 'åįĹçľģ'] +['æŀģ', 'åħ¶'] +['çļĦ人', 'çĶŁ'] +['ä»ĸ', 'è¿ĺ'] +['转åĮĸ', '为'] +['èµ°', 'è¿ĩ'] +['æĬ±', 'çĿĢ'] +['çīĽ', '奶'] +['ä¸ĩ', '亩'] +['å¿ĥ', 'æĢģ'] +['æĹ¥å¸¸', 'çĶŁæ´»'] +['ä½ĵ', 'æ£Ģ'] +['æĻ', 'ĥ'] +['çŃī', 'é¢ĨåŁŁ'] +['æĩī', '該'] +['åı¯ä»¥', 'çľĭåΰ'] +['æī¾', 'ä¸įåΰ'] +['èĢģ', 'å¹´'] +['æĬĬ', 'æĪij'] +['积', 'åĪĨ'] +['梳', 'çIJĨ'] +['ç»', '³'] +['çļĦ', 'æĶ¿æ²»'] +['å¸Ŀ', 'åĽ½'] +['éĻª', 'ä¼´'] +['æ´Ľ', 'éĺ³'] +['åħ¬', 'æŃ£'] +['å¼Ģ', 'åı£'] +['çī¹èī²', 'çļĦ'] +['åĽ°', 'å¢ĥ'] +['ä¸Ĭ', 'æľī'] +['ç«ĭ', 'ä½ĵ'] +['æīĵ', 'å·¥'] +['åķ¤', 'éħĴ'] +['åľ¨', 'éĤ£éĩĮ'] +['éĤ£', 'è¾¹'] +['个', 'åĪ«'] +['ä¸Ģå®ļ', 'æĺ¯'] +['çļĦéĩįè¦ģ', 'æĢ§'] +['主', 'å¼ł'] +['åĴĮ', 'æľįåĬ¡'] +['ä¸Ĭ', 'ç½ij'] +['è¡¥', 'åĬ©'] +['åıª', 'éľĢ'] +['å¼', '¦'] +['éģ', '®'] +['åĬĽ', 'äºī'] +['度', 'è¿ĩ'] +['èij', '¬'] +['é¡¿', 'æĹ¶'] +['éĦ', 'ī'] +['纺', 'ç»ĩ'] +['åľ°', 'åĿĹ'] +['ä¿¡ç͍', 'åį¡'] +['ç½ļ', '款'] +['åijĬè¯ī', 'æĪij'] +['éĽ', 'Ļ'] +['书', 'çĶ»'] +['è¨Ń', 'è¨Ī'] +['æĢ»', 'ä¼ļ'] +['åΤ', 'åĨ³'] +['ä¿¡', 'èªī'] +['个', 'èĤ¡'] +['å¹³', '常'] +['æĢİ', '麼'] +['ä½ĵ', 'çİ°åľ¨'] +['é»Ħ', 'æ²³'] +['åĽĽå·Ŀ', 'çľģ'] +['羣', '缸'] +['åIJĦ项', 'å·¥ä½ľ'] +['åĬ¨', 'åijĺ'] +['å³°', 'ä¼ļ'] +['ä¸Ģ', 'æľŁ'] +['æľī', 'ä¸Ģå®ļçļĦ'] +['é«ĺ度', 'éĩįè§Ĩ'] +['ç¹ģ', 'èį£'] +['åıijçݰ', 'äºĨ'] +['ç½ij', '红'] +['æīĭ', 'æ³ķ'] +['å®¶', 'åĽŃ'] +['仪', 'åύ'] +['è¾ĥ', 'ä½İ'] +['çļĦ', 'å®īåħ¨'] +['æ¡', 'IJ'] +['ä»ĺ', '款'] +['æĬij', 'åζ'] +['åįĵ', 'è¶Ĭ'] +['æŃ£', 'éĿ¢'] +['åĵ', 'ij'] +['强', 'åζ'] +['ä»Ĭ天', 'çļĦ'] +['æĪĺ', 'èĥľ'] +['楼', 'å¸Ĥ'] +['æĭ¿', 'ä¸ĭ'] +['é¢ľ', 'å̼'] +['举', 'éĥ¨'] +['çłĶ', 'åζ'] +['çļĦ', 'æĪĺçķ¥'] +['åľ¨', 'ä¸Ģ个'] +['ä¸ī', '人'] +['å®Į', 'äºĨ'] +['æĸ°', 'æĬĢæľ¯'] +['ç»ıæµİ', 'æķĪçĽĬ'] +['å¯Į', 'æľī'] +['æ¾³', 'æ´²'] +['åĬ©', 'çIJĨ'] +['é¢Ĩ', 'åıĸ'] +['è°', 'Ń'] +['çĩĥ', 'çĥ§'] +['ç´ł', 'åħ»'] +['éĤĦ', 'æľī'] +['è¿Ľ', 'èĢĮ'] +['ä»Ģä¹Ī', 'æĺ¯'] +['çłĶç©¶', 'ä¸Ńå¿ĥ'] +['éĢĤ', 'ç͍äºİ'] +['æİ¥', 'æĶ¶'] +['失', 'æľĽ'] +['äºĮ', '级'] +['éĹ´', 'çļĦ'] +['åİŁ', 'æłĩé¢ĺ'] +['èªį', 'çĤº'] +['æį', '¡'] +['对', 'çĿĢ'] +['对', 'éĿ¢'] +['ä¸Ń', 'åİŁ'] +['éĵ', 'ĥ'] +['çĶŁäº§', 'çļĦ'] +['åıijå¸ĥ', 'ä¼ļ'] +['士', 'åħµ'] +['è¿Ļ', 'åı¥è¯Ŀ'] +['ç¼´', '纳'] +['ä¸Ģ个', '个'] +['åѸ', 'çĶŁ'] +['çĸij', 'éĹ®'] +['交', 'èѦ'] +['示èĮĥ', 'åĮº'] +['天', '使'] +['åľ¨', 'ä¸Ĭæµ·'] +['åIJĮ', 'æĻĤ'] +['è½»', 'æĺĵ'] +['å͝ä¸Ģ', 'çļĦ'] +['çĥŃ', 'éĹ¹'] +['ä¹IJ', 'è§Ĥ'] +['çļĦ', '身份'] +['åĸĦ', 'äºİ'] +['大', 'åİħ'] +['èĤ¯å®ļ', 'æĺ¯'] +['éĺ²', 'çģ«'] +['å¤ĸ', 'åĩº'] +['æį®', '说'] +['é¡¹çĽ®', 'çļĦ'] +['ä¸Ģ', 'åı°'] +['èĻļ', 'åģĩ'] +['ä¸Ģ', 'ç¬Ķ'] +['ç«ĭ', 'æ³ķ'] +['严', 'èĤĥ'] +['æī¿', 'åĬŀ'] +['åįģ', 'åĩł'] +['çļĦ', '空éĹ´'] +['æľ¬', 'ç½ijç«Ļ'] +['åģļ', 'å¾Ĺ'] +['ä¿Ŀ', '温'] +['æľĪ', 'åĪĿ'] +['åľ¨', 'ç½ijä¸Ĭ'] +['åIJĦ', 'æĸ¹éĿ¢'] +['ä¸ī', '天'] +['交æĺĵ', 'æīĢ'] +['è§£', 'æŀIJ'] +['åħļ', 'ä¸Ń央'] +['è¿Ľ', 'åĩºåı£'] +['åĴĮ', '社ä¼ļ'] +['次', 'æķ°'] +['ä¹ĭ', 'å®¶'] +['ç»´', '度'] +['æ´¾åĩº', 'æīĢ'] +['产çĶŁ', 'äºĨ'] +['带', 'æľī'] +['å¾Ī', '强'] +['æľīäºĽ', '人'] +['å¹´', 'åIJİ'] +['äºĨ', '许å¤ļ'] +['å¯Ĩ', '度'] +['åѦ', 'æľŁ'] +['çıł', 'æµ·'] +['æľĢå¤ļ', 'çļĦ'] +['è¾¹', 'ç¼ĺ'] +['容', 'éĩı'] +['第äºĮ', '个'] +['ä¸Ģ缴', 'æĺ¯'] +['ä¸į', 'ç¦ģ'] +['æŃ', '²'] +['ä»ĭç»į', 'äºĨ'] +['ä¼ĺ', 'éĽħ'] +['æ¯Ķ', 'è¼ĥ'] +['èģĮ', 'ä½į'] +['温', 'æŁĶ'] +['æľī', 'éĴ±'] +['æľĢ', 'é«ĺçļĦ'] +['åįļè§Ī', 'ä¼ļ'] +['ä¸į', 'æĪIJ'] +['éĶĻ', 'äºĨ'] +['è¯ģ', 'çĽij'] +['è¯ģçĽij', 'ä¼ļ'] +['æĪIJ', '人'] +['åĿĩ', 'åĮĢ'] +['æľī', 'åĪ©'] +['è¶Ĭ', 'åįĹ'] +['æīĵ', 'äºĨ'] +['好', 'åIJĥ'] +['ç³»', 'çµ±'] +['è·Ł', 'éļı'] +['çļĦ', 'åľ°ä½į'] +['æŃ£', 'å¦Ĥ'] +['ç¨į', 'å¾®'] +['åį°', 'åıij'] +['åĪĽ', 'ç«ĭ'] +['é£İ', 'åħī'] +['å°Ĩ', 'æĪIJ为'] +['ä¸į', 'é«ĺ'] +['é¢ij', 'ç¹ģ'] +['设', 'æľī'] +['ä¼', 'ŀ'] +['æĭĨ', 'éϤ'] +['å½±', 'åĥı'] +['æ¸Ĺ', 'éĢı'] +['å¹´', 'å¼Ģå§ĭ'] +['ç½ij', 'æĺĵ'] +['è¦ģ', 'åģļ'] +['ç͵åĬ¨', '车'] +['羣', 'å¿ĥ'] +['æµ·', 'åĨĽ'] +['ä¼ł', 'æĿ¥'] +['å·®', 'åĪ«'] +['è°¨', 'æħİ'] +['çĥŁ', 'åı°'] +['åįĥ', 'å¹´'] +['è¯ģ', 'å®ŀ'] +['çIJ', 'ª'] +['çļĦ', 'åħ·ä½ĵ'] +['åΰ', 'å¤Ħ'] +['ä¸į', 'å®ľ'] +['èľ', 'Ģ'] +['èĥ½åĬĽ', 'åĴĮ'] +['çīº', 'çī²'] +['çļĦ', 'éĴ±'] +['大', 'éĺŁ'] +['é¦ĸ', 'è¦ģ'] +['ä¸į', 'æĦ¿'] +['çİ«', 'çij°'] +['人æ°ij', 'ç½ij'] +['è¿ĺæĺ¯', 'è¦ģ'] +['åĽĽ', 'å¹´'] +['æįŁ', '伤'] +['çļĦ', 'åģļæ³ķ'] +['éĿ', 'Ī'] +['è¡Ķ', 'æİ¥'] +['åIJĪ', 'æĪIJ'] +['没', '人'] +['éŨ', 'æ§Ľ'] +['ä¿¡', 'è´·'] +['çļĦ', '缸åħ³'] +['举', 'é£İ'] +['社', 'ä¿Ŀ'] +['ä¸ĭ', '游'] +['åĿĹ', 'éĴ±'] +['è¿ĩ', 'åIJİ'] +['çļĦ', 'åºĶç͍'] +['é¥', '¶'] +['é¢ģ', 'åıij'] +['ä¸Ģ', 'å¤Ħ'] +['åįİ', 'å¤ı'] +['为', 'ä¼ģä¸ļ'] +['åıª', 'ä¼ļ'] +['ä¾µ', '害'] +['çļĦ', 'åĬŁèĥ½'] +['åѸ', 'ç¿Ĵ'] +['ä¸Ńåįİ', 'æ°ijæĹı'] +['åıijå¸ĥ', 'äºĨ'] +['è¿İ', 'æİ¥'] +['æĪij', 'èĩªå·±'] +['è¿ĺ', 'éľĢè¦ģ'] +['太éĺ³', 'èĥ½'] +['åİ»', 'ä¸ĸ'] +['æĺ¯', 'ä½ł'] +['åIJĪ', 'åĬĽ'] +['ç»ĺ', 'çĶ»'] +['åı°', 'åĮĹ'] +['çĿ£', 'ä¿ĥ'] +['åĮĹ', 'éĥ¨'] +['æľī', 'å¤ļå°ij'] +['å¾Ī', 'éĩįè¦ģ'] +['åĪĴ', 'åĪĨ'] +['åı·', '线'] +['æĶ¾', '大'] +['ä¼ļ', '被'] +['èİ·', 'å¥ĸ'] +['ä¹ĭ', 'åĨħ'] +['失', 'åİ»äºĨ'] +['çݩ家', '们'] +['éĩĩ', 'éĽĨ'] +['å£', '¹'] +['å®¶', 'ä¼Ļ'] +['çϽ', '天'] +['åĽłä¸º', 'ä»ĸ'] +['社ä¼ļ', 'æ²»çIJĨ'] +['å¼Ģ', 'åĪĽ'] +['ç͵', 'ç¼Ĩ'] +['æĸ°', 'ä¸Ģ代'] +['å¹¶', 'è´Ń'] +['å°±', 'å·²ç»ı'] +['çļĦ', '社ä¼ļ'] +['éϤ', 'éĿŀ'] +['åı¯ä»¥', 'ç͍'] +['å©', 'ī'] +['æ¯Ķè¾ĥ', '好'] +['å®ŀ', 'ä¸ļ'] +['åĪĽ', 'åĬŀ'] +['æıIJ', 'èµ·'] +['é»', 'ĥ'] +['ä½ı', 'åľ¨'] +['å¸Ĥ', 'æĶ¿'] +['éĿ¢ä¸´', 'çļĦ'] +['èĥ½', 'åľ¨'] +['çŁŃ', 'çŁŃ'] +['羣', '人'] +['æĺİ', 'æĺİ'] +['èµĦ', 'åĬ©'] +['çļĦ', 'ä¸įåIJĮ'] +['å°ı', 'æľĭåıĭ'] +['é¢ĺ', 'æĿIJ'] +['ç¾İ', 'åij³'] +['æĺŁ', '座'] +['ä¸į', 'ä¸Ģæł·çļĦ'] +['çľĭ', 'ä¸Ĭåİ»'] +['ä¸Ģ', 'æł¹'] +['广', 'å·ŀå¸Ĥ'] +['åıijçĶŁ', 'çļĦ'] +['é«ĺ', 'ç§ijæĬĢ'] +['ä¸Ģ', 'è¾ĪåŃIJ'] +['交', 'åıī'] +['ä½ĵç³»', '建设'] +['åĽłä¸º', 'æĪij'] +['çıį', 'æĥľ'] +['ä¸Ĭ', 'åѦ'] +['æĪĺ', 'æľ¯'] +['æŃ¤', 'ç±»'] +['交', 'å¾Ģ'] +['æĮī', 'æij©'] +['人们', 'çļĦ'] +['åħ¶', '實'] +['åİŁ', 'æĿIJæĸĻ'] +['渴', 'æľĽ'] +['缸', 'å¤Ħ'] +['å¾®', 'å¾®'] +['æ®', '·'] +['ä¹ĺ', 'åĿIJ'] +['å¼Ģå±ķ', 'äºĨ'] +['é«ĺ', 'åĵģè´¨'] +['æĹłäºº', 'æľº'] +['ä¸įæĺ¯', 'å¾Ī'] +['çļĦ', 'æĬķèµĦ'] +['èĬĤ', 'çľģ'] +['èĩ', 'ī'] +['ç²¾', 'éĢī'] +['çļĦ', 'æłĩåĩĨ'] +['åįĹ', 'éĥ¨'] +['认è¯Ĩ', 'åΰ'] +['å¹³', 'éĿĻ'] +['èĹ', '¥'] +['æī«', 'é»ij'] +['æī«é»ij', 'éϤ'] +['æī«é»ijéϤ', 'æģ¶'] +['éĢĻ', '種'] +['建çŃij', 'éĿ¢ç§¯'] +['ç¡®', 'ç«ĭ'] +['管çIJĨ', 'åĬŀæ³ķ'] +['æĦı', 'å¿Ĺ'] +['ä¸', '¨'] +['让', 'åŃ©åŃIJ'] +['æķij', 'çģ¾'] +['å½ĵ', 'ä»Ĭ'] +['çģ«', 'çģ¾'] +['åIJĦ', 'éĥ¨éŨ'] +['ä¾µ', 'çĬ¯'] +['æ¯ı', 'åij¨'] +['æı', '½'] +['ä¸Ģ次', 'æĢ§'] +['åħ¶ä»ĸ', '人'] +['éĶĻ', 'è¿ĩ'] +['ä¸İ', 'åħ¶'] +['åĭĩ', 'æ°Ķ'] +['çĩĥ', 'æ°Ķ'] +['é¦ĸ', 'å±Ĭ'] +['æľį', '饰'] +['ç²', '¥'] +['å®Į', 'æ¯ķ'] +['å°±', 'æĬĬ'] +['åĬŀäºĭ', 'å¤Ħ'] +['ä¸Ģä¼ļ', 'åĦ¿'] +['离', 'ä¸įå¼Ģ'] +['å¦Ĥæŀľ', 'æĤ¨'] +['ä»ĵ', 'åºĵ'] +['导', 'å¸Ī'] +['åIJĪéĢĤ', 'çļĦ'] +['毫', 'ç±³'] +['å®īåħ¨', 'æĢ§'] +['ä¾Ŀ', 'çħ§'] +['产ä¸ļ', 'åĮĸ'] +['ä½ł', 'çľĭ'] +['羣çļĦ', 'å¾Ī'] +['åѤ', 'çĭ¬'] +['éĺ²', '御'] +['å¾Ī', 'ç®Ģåįķ'] +['é£İ', 'æ°´'] +['ä½Ĩ', 'ä¹Ł'] +['æİ¨', 'åĩºäºĨ'] +['æ°ijèIJ¥', 'ä¼ģä¸ļ'] +['çłģ', '头'] +['å¤įæĿĤ', 'çļĦ'] +['ç»ĦæĪIJ', 'éĥ¨åĪĨ'] +['åħħ满', 'äºĨ'] +['è¿ij', 'åĩłå¹´'] +['çľģ', 'æĶ¿åºľ'] +['æľī', 'å¿ħè¦ģ'] +['éĻ', '³'] +['ä¹ĭ', 'ç±»'] +['ä¹ĭç±»', 'çļĦ'] +['æĢ§', 'ä»·'] +['æĢ§ä»·', 'æ¯Ķ'] +['åķĨ', 'åºĹ'] +['å¸Ĥ', 'å̼'] +['人æīį', 'åŁ¹åħ»'] +['æ·±', 'åıĹ'] +['管çIJĨ', 'å±Ģ'] +['æģIJ', 'æĥ§'] +['ä»ħ', 'æľī'] +['æĬµ', 'è¾¾'] +['æµ·', 'åħ³'] +['èµĭ', 'äºĪ'] +['äºĭ', 'åĦ¿'] +['ä»·', 'éĴ±'] +['æīĭ', 'ä¸Ĭ'] +['èĩª', 'å¾ĭ'] +['åħ³', 'çα'] +['享', 'æľī'] +['éģĹ', 'æĨ¾'] +['å¾Īå¿«', 'å°±'] +['æĽ´', 'å¿«'] +['æłĩ', 'è¯Ĩ'] +['åºĨ', 'ç¥Ŀ'] +['ä¹Ł', '好'] +['ä¸į', 'æĺĵ'] +['æĪij', 'å¾Ī'] +['æĶ¹éĿ©', 'åıijå±ķ'] +['å¤ĸ', 'åľ°'] +['æĬµ', 'æĬ¼'] +['è¯Ĺ', '人'] +['åİķ', 'æīĢ'] +['æĸ°', 'åªĴä½ĵ'] +['èĸ', 'Ľ'] +['è°Ī', 'è¯Ŀ'] +['ä¸Ģå®ļ', 'ç¨ĭ度'] +['èµ°', 'åľ¨'] +['æľĢ', '强'] +['åĬŁ', 'çİĩ'] +['åħ±', 'è¯Ĩ'] +['大', 'æ¡¥'] +['ä¸ĭ', 'æĸ¹'] +['å¤ĸ', 'èµĦ'] +['ç¢', '±'] +['å·¡', 'è§Ĩ'] +['æ¹ĸåĮĹ', 'çľģ'] +['个', 'çϾåĪĨ'] +['个çϾåĪĨ', 'çĤ¹'] +['çļĦ', '责任'] +['çļĦ', 'åĵģçīĮ'] +['åĬ©', 'æİ¨'] +['åĪĽéĢł', 'äºĨ'] +['ä»»', 'èģĮ'] +['å¿«', 'æį·'] +['æĿij', 'åºĦ'] +['åİ»', 'çľĭ'] +['æīį', 'èĥ½å¤Ł'] +['å±', '¤'] +['æĪij', 'å®¶'] +['æĺ¯ä¸Ģ', '款'] +['ç¾', 'ħ'] +['åĨ°', 'éĽª'] +['æŀģ', '大'] +['çģ¯', 'åħī'] +['éĨ', 'ĭ'] +['ä¸İ', 'åħ¶ä»ĸ'] +['æıIJåĩº', 'çļĦ'] +['éĿł', 'è¿ij'] +['è°ĥ', 'åĬ¨'] +['å°½', 'åı¯èĥ½'] +['åıij', 'åĬĽ'] +['ç»Ļ', '她'] +['éĢĤ', 'éĩı'] +['è·¨', 'åĽ½'] +['åħĪ', 'è¡Į'] +['æĸ°', 'æĿIJæĸĻ'] +['ä½ľ', 'äºĨ'] +['满', 'äºĨ'] +['ä¸į', '满'] +['çļĦçľ¼', 'çĿĽ'] +['çľĭ', 'å¾Ĺ'] +['è¿Ļ', 'ä¸Ģ次'] +['é½IJ', 'åħ¨'] +['çļĦä¸Ģ', 'éĥ¨åĪĨ'] +['ä¸', 'Ļ'] +['æ¸ħ', 'æĸ°'] +['說', 'æĺİ'] +['身边', 'çļĦ'] +['æīĢæľī', '人'] +['å½°', 'æĺ¾'] +['è±', '¹'] +['åį', '¿'] +['è¿IJ', '转'] +['æĮĩ', 'å¼ķ'] +['å¸Ĥ', 'åħ¬å®īå±Ģ'] +['åıĤ', 'å±ķ'] +['ä¹ĭ', 'æĹ¶'] +['éĩijèŀį', 'æľįåĬ¡'] +['èµĦæľ¬', 'å¸Ĥåľº'] +['èĥ½', '让'] +['å¿ĺ', 'äºĨ'] +['天', 'åłĤ'] +['æ¯Ķå¦Ĥ', '说'] +['éĬĢ', 'è¡Į'] +['èĽĭ', 'ç³ķ'] +['çĶ', '©'] +['æł¸', 'å®ŀ'] +['æĻ®', '京'] +['ä¼ĺ', 'ç¾İ'] +['åı£', 'èħĶ'] +['漫', 'çĶ»'] +['çľ¼', 'éĩĮ'] +['äºĨ', 'ä¸ĭæĿ¥'] +['æĪij们', 'ä¹Ł'] +['ä¾', 'į'] +['为', 'ä¸Ńå¿ĥ'] +['å¥ĩ', '迹'] +['éĿĴ', 'çĿIJ'] +['æĪªèĩ³', '缮åīį'] +['åĩº', 'ä¾Ĩ'] +['æĢ»', 'åħ¬åı¸'] +['å¼¥', 'è¡¥'] +['ç®Ĺ', 'æ³ķ'] +['å·¥ä½ľ', '室'] +['æīĢ以', 'æĪij'] +['æ°´', 'åĪĨ'] +['æīĢ', 'å±ŀ'] +['ä¸į', '说'] +['ä½Ĩæĺ¯', 'åľ¨'] +['è¦ģ', 'åİ»'] +['åĪĽä¸ļ', 'èĢħ'] +['ä¸į', 'æ¸ħæ¥ļ'] +['åĽĽ', 'åij¨'] +['æĺ¯', 'ä»İ'] +['çļĦ', 'æł¹æľ¬'] +['çģ', '¶'] +['æ¯Ľ', 'æ³½'] +['æ¯Ľæ³½', '举'] +['æµ·', 'åı£'] +['åĽĽ', 'åįģ'] +['ä¹Ł', '被'] +['èģ', '·'] +['ä¸Ģ', 'æīĭ'] +['绩', 'æķĪ'] +['çļĦ', 'çĶ·äºº'] +['书', 'ç±į'] +['ä¸Ģ', 'èĦ¸'] +['大', 'äºİ'] +['鼶', 'éĥ¨ä»¶'] +['åħ³', 'æĢĢ'] +['å¹³', 'ç±³'] +['æļ´', 'éľ²'] +['å¾Ĺ', 'å¤ļ'] +['ä¸ī', '级'] +['æľ¬', 'åij¨'] +['两', 'èĢħ'] +['对', 'ä¸ŃåĽ½'] +['åıª', 'è§ģ'] +['欧', 'ç¾İ'] +['å¦Ĥæŀľ', 'æľī'] +['å·²ç»ı', 'æĺ¯'] +['çľĭ', 'å®Į'] +['çģ«', 'éĶħ'] +['èµ', 'IJ'] +['ä¸Ģ', 'éģį'] +['æĦŁ', 'åĨĴ'] +['ç»ĵ', 'å±Ģ'] +['ä»ĵ', 'åĤ¨'] +['å®ŀ', 'åľ°'] +['å̻', 'ç»ıçIJĨ'] +['ä¹Łä¸į', 'çŁ¥éģĵ'] +['碰', 'åΰ'] +['åIJĪ', '计'] +['客æĪ·', 'çļĦ'] +['ç½Ĺ', '马'] +['æĦī', 'å¿«'] +['é£', 'Ľ'] +['çĥŃ', 'çĥĪ'] +['伦', 'æķ¦'] +['åĮ»', 'ä¿Ŀ'] +['éĺ¿éĩĮ', 'å·´å·´'] +['åĨį', '说'] +['为', 'åŁºç¡Ģ'] +['çĶŁäº§', 'ç»ıèIJ¥'] +['è¿ĻäºĽ', '人'] +['åĪĹ', '车'] +['æ²³åĮĹ', 'çľģ'] +['è¿Ļ', '段'] +['æ´»åĬ¨', 'ä¸Ń'] +['å©', '·'] +['çĶŁ', 'çIJĨ'] +['ä¸ŃåĽ½', '人æ°ij'] +['éĦ', 'Ĥ'] +['åIJ¬', 'åıĸ'] +['å¤į', 'ä¹ł'] +['æľī', 'çĽĬ'] +['æĶ¶', 'æĭ¾'] +['å¾Ī', 'åı¯èĥ½'] +['ç½ij绾', '游æĪı'] +['们', 'çļĦ'] +['èµĭ', 'èĥ½'] +['éļ¾', 'å¾Ĺ'] +['åĪĨ', 'æīĭ'] +['羣', 'è¯ļ'] +['åħ¬åı¸', 'åľ¨'] +['åĿĩ', 'è¡¡'] +['åı£', 'åij³'] +['çīµ', '头'] +['ä¸Ģèά', 'çļĦ'] +['轿', '车'] +['çŃī', 'äºİ'] +['æ²ī', 'é»ĺ'] +['æĪij', 'éĥ½'] +['å°ı', 'ç¨ĭåºı'] +['ä¸Ģ', 'åī¯'] +['æī¿', 'è½½'] +['åľ°', 'è´¨'] +['çķĮ', 'éĿ¢'] +['ç͵', 'æľº'] +['çĦ¦', 'èĻij'] +['éĶĢåĶ®', 'é¢Ŀ'] +['æĸ°', '车'] +['ä¸Ĭ', '游'] +['主', 'æ¼Ķ'] +['éļIJ', 'ç§ģ'] +['åıijå±ķ', 'æĪĺçķ¥'] +['çļĦ', 'åĬªåĬĽ'] +['å¼Ģ', 'åħ³'] +['è§£åĨ³', 'éĹ®é¢ĺ'] +['çĿ£', '导'] +['对', 'æĬĹ'] +['å¾Īå¤ļ', '人éĥ½'] +['æĹł', 'æķĪ'] +['产åĵģ', 'è´¨éĩı'] +['å®ī', 'å¿ĥ'] +['åįİ', '人'] +['ä¸į', '符åIJĪ'] +['èĩª', 'å®¶'] +['éĺµ', '容'] +['çļĦ', 'åIJĦç§į'] +['çļĦ', 'çIJĨ念'] +['çļĦ', 'æĸĩåĮĸ'] +['为', 'èĩªå·±'] +['å±±', 'æ°´'] +['游', 'æ³³'] +['éľĩ', 'èį¡'] +['çĶŁæ´»', 'æĸ¹å¼ı'] +['è¿ľ', '离'] +['çŁ³', 'åĮĸ'] +['æŃ¤', 'äºĭ'] +['æĺ¯', '羣çļĦ'] +['çļĦ', 'æ¯Ķä¾ĭ'] +['ç͍', 'ç͵'] +['奥è¿IJ', 'ä¼ļ'] +['ä¿Ŀ', 'å®ī'] +['èĽĭçϽ', 'è´¨'] +['çļĦ', 'å¿ĥçIJĨ'] +['å·', '«'] +['åı·', 'çłģ'] +['æ°Ķ', 'ä½ĵ'] +['åıij', 'æĶ¹'] +['åıijæĶ¹', 'å§Ķ'] +['åĮ»', 'å¸Ī'] +['æ¶Ĥ', 'æĸĻ'] +['æĺ', 'Ĭ'] +['å¸Ĥ', '级'] +['ä¸ĸçķĮ', 'çļĦ'] +['åĪĨåĪ«', 'æĺ¯'] +['çł´', '产'] +['ä¸Ģ', 'æĿ¯'] +['æĭī', 'å¼Ģ'] +['å¹³', 'åĩ¡'] +['çļĦ', 'åıijçĶŁ'] +['åĬ¨', 'æīĭ'] +['ä¸Ģ缴', '以æĿ¥'] +['æīĭ', 'å·¥'] +['éĩĮéĿ¢', 'çļĦ'] +['æĹł', 'åħ³'] +['ä»ĭ', 'åħ¥'] +['èµ°', 'ä¸Ĭ'] +['å°±æĺ¯', 'è¦ģ'] +['å¹´', 'éĹ´'] +['åĩº', 'çı¾'] +['å½±', 'éŁ¿'] +['å¹ħ', '度'] +['éĽ', 'ģ'] +['éģĵ', 'åħ·'] +['缮çļĦ', 'åľ°'] +['åIJİ', 'èĢħ'] +['ä¸Ĭ', 'æ¼Ķ'] +['äºĨ', 'åĩł'] +['æ®ĭçĸ¾', '人'] +['å¿Ļ', 'ç¢Į'] +['æĺ¯åIJ¦', 'æľī'] +['å¹¶', '对'] +['ä¼ļ', '导èĩ´'] +['æ°´', 'åºĵ'] +['ç»Ĩ', 'èĩ´'] +['åIJİ', 'æĤĶ'] +['å¿ĥ', 'æĢĿ'] +['åģļ', 'äºĭ'] +['åİĤ', 'æĪ¿'] +['çĿ', '¿'] +['è¿IJèIJ¥', 'åķĨ'] +['头', 'éĥ¨'] +['çļĦ', 'è§Ĵèī²'] +['æĺ¯', 'ä»ĸ'] +['æĹ¢', 'æľī'] +['å°ıæĹ¶', 'åĢĻ'] +['强', 'åĬ²'] +['主', 'æĴŃ'] +['åħ¨åĽ½', 'åIJĦåľ°'] +['æį', 'ı'] +['æįŁ', 'åĿı'] +['åķĨ', 'ä¼ļ'] +['ä¿Ŀ', 'ç½Ĺ'] +['çľģ', 'å¸Ĥ'] +['éļ§', 'éģĵ'] +['æľī', 'ä¸įå°ij'] +['è¦ģ', 'åľ¨'] +['建设', 'é¡¹çĽ®'] +['ç³ĸ', 'å°¿'] +['ç³ĸå°¿', 'çĹħ'] +['æĿ¡ä»¶', 'ä¸ĭ'] +['ä¼ĺè´¨', 'çļĦ'] +['é¦ĸ', 'åıij'] +['å½ĵæĹ¶', 'çļĦ'] +['丰', 'çͰ'] +['大', 'çĽĺ'] +['缸', 'ç»§'] +['å®ģ', 'å¤ı'] +['åħ¥', 'ä½ı'] +['æĪij', 'è¿ĺ'] +['åħĭ', 'æĸ¯'] +['å®ļ', 'ä»·'] +['å¹³æĸ¹', 'åħ¬éĩĮ'] +['çļĦ', 'çŁ¥è¯Ĩ'] +['æĪij们', 'ä¼ļ'] +['åħĥ', 'å®Ŀ'] +['ä½ĵ', 'éĩį'] +['è³', '£'] +['对', 'æĪij们'] +['çŁ³', 'å®¶'] +['çŁ³å®¶', 'åºĦ'] +['ç²¾', 'åįİ'] +['å½¢', 'çĬ¶'] +['åıĹ', 'åΰäºĨ'] +['ä¿®', '订'] +['ç¾İ', 'åľĭ'] +['é«ĺ', 'æ¸ħ'] +['çľ¼', 'éķľ'] +['è§īå¾Ĺ', 'èĩªå·±'] +['带', 'ç»Ļ'] +['åĶ®', 'ä»·'] +['éŨ', '票'] +['åŃķ', 'å¦ĩ'] +['ç͵è§Ĩ', 'åı°'] +['åıij', 'ä½ľ'] +['çļĦ', 'åij³éģĵ'] +['éķ¿', 'è¿ľ'] +['åħ¬åħ±', 'æľįåĬ¡'] +['æŃ£å¸¸', 'çļĦ'] +['æľī', 'è¿ĩ'] +['é£İ', 'æĥħ'] +['æ¯Ķ', 'éĩį'] +['åIJ', '»'] +['管çIJĨ', 'å·¥ä½ľ'] +['综åIJĪ', 'æĢ§'] +['å·²', '被'] +['说', 'èµ·'] +['æİĴ', 'æ°´'] +['ä¸įæĸŃ', 'åľ°'] +['æĥħ', 'æĢĢ'] +['è¾ĵ', 'éĢģ'] +['è¿ĩ', 'æķı'] +['çļĦ', 'åı¯èĥ½æĢ§'] +['æľį', 'ç͍'] +['æľī', '许å¤ļ'] +['å§Ķ', 'åī¯ä¹¦è®°'] +['åĮĸå¦Ĩ', 'åĵģ'] +['æļĤ', 'åģľ'] +['æĬķèµĦ', '人'] +['çıŃ', '级'] +['说', 'çĿĢ'] +['åįĹ', 'åĮĹ'] +['åĪĨ', 'è¡Į'] +['çıł', 'å®Ŀ'] +['å¯', '¶'] +['å¢ŀ', 'å¤ļ'] +['被', 'åĬ¨'] +['ç®Ĭ', 'çļĦ'] +['éĹľ', 'ä¿Ĥ'] +['çļĦ', 'èĦ¸'] +['æĥ', 'Ł'] +['ä¸į', 'ä¸Ģå®ļ'] +['ç¶', 'Ń'] +['çģ«', 'çĪĨ'] +['ç§Ł', 'éĩij'] +['çŀ', '§'] +['éĩį', '建'] +['è·', 'ª'] +['ä¸Ģ', '種'] +['çļĦ', 'åIJĪä½ľ'] +['å®ī', 'æħ°'] +['ä»į', 'æĺ¯'] +['ä¸ĵä¸ļ', 'åĮĸ'] +['è°ĥ', 'è§£'] +['ä¸į', '妨'] +['éĢĻ', 'æĺ¯'] +['å¿ħ', 'éłĪ'] +['ä¼Ĭ', 'æľĹ'] +['å¾Ĺ', 'äºĨ'] +['æľįåĬ¡', 'å¹³åı°'] +['å§', '¬'] +['åħĪ', 'éĶĭ'] +['çİĭ', 'åŃIJ'] +['çļĦä¸Ģ', 'åĪĩ'] +['æĢ»', 'çIJĨ'] +['åĵ', '¼'] +['çª', 'ij'] +['çļĦå¿ĥ', 'æĥħ'] +['çļĦ', 'éĩį大'] +['çij', 'Ł'] +['ä¸Ģ', 'ç¬ij'] +['åıijå±ķ', 'ä¸Ń'] +['åģ¥åº·', 'åıijå±ķ'] +['åĵģçīĮ', 'çļĦ'] +['ç¦', '®'] +['ä½Ļ', '人'] +['ä»Ĭå¹´', '以æĿ¥'] +['æķ°', 'çłģ'] +['çѾ', 'è¯ģ'] +['åİ»', 'æī¾'] +['åŁºéĩij', 'ä¼ļ'] +['æĬ±', 'æĢ¨'] +['æŃ£', 'å½ĵ'] +['çıŃåŃIJ', 'æĪIJåijĺ'] +['ä¸į', 'åIJĪæł¼'] +['åζ', 'å®ļäºĨ'] +['ç¼ĵ', 'æħ¢'] +['åζ', '约'] +['æłı', '缮'] +['å¸Ĥåľº', 'ç»ıæµİ'] +['ç»ĦæĪIJ', 'çļĦ'] +['严', 'å³»'] +['æĹ¥', '讯'] +['ä¸ĢçĤ¹', 'çĤ¹'] +['æĺ¯', 'æĢİä¹Ī'] +['çļĦ', 'çħ§çīĩ'] +['éĺ»', 'æŃ¢'] +['模', 'ç³Ĭ'] +['ç¼', '¸'] +['éģķ', 'åıį'] +['æIJ¬', 'è¿ģ'] +['éĩij', 'éĴ±'] +['å½', '¬'] +['ä¸į', 'å®ī'] +['æĪĺçķ¥', 'åIJĪä½ľ'] +['å¡«', 'åĨĻ'] +['讲', 'ç©¶'] +['åħħåĪĨ', 'åĪ©ç͍'] +['èĥ½', 'å¤ł'] +['èij¡èIJĦ', 'éħĴ'] +['éĩĩç͍', 'äºĨ'] +['åľ¨', 'ä»Ĭå¹´'] +['ä¸Ńå°ı', 'åѦ'] +['åľ¨', 'æĦı'] +['çļĦ', 'åİĭåĬĽ'] +['ä¸į', '幸'] +['åζ', 'èį¯'] +['åı¯ä»¥', '让'] +['被', 'è¯Ħ为'] +['ç»Ĩ', 'èıĮ'] +['æĪı', 'åī§'] +['åįĬ', '导'] +['åįĬ导', 'ä½ĵ'] +['è§Ĩ', 'è§Ĵ'] +['åĸľ', 'æŃ¡'] +['å¾ģ', 'æĶ¶'] +['è°ĭ', 'åĪĴ'] +['æŀģ', '大çļĦ'] +['çĤ¹', 'èµŀ'] +['è®°èĢħ', 'ä»İ'] +['两', 'åIJį'] +['èĩª', 'åĬ©'] +['èµ·', 'æŃ¥'] +['æĬ¤', '士'] +['å®Ŀ', '马'] +['太', 'åŃIJ'] +['å°ıå°ı', 'çļĦ'] +['温', 'æ³ī'] +['åĩºç§Ł', '车'] +['ç§Ł', 'æĪ¿'] +['两', 'å®¶'] +['éľĩ', 'æĴ¼'] +['ç§ī', 'æī¿'] +['ä¸Ģä»¶', 'äºĭ'] +['çĥĪ', '士'] +['å®ĺ', 'åħµ'] +['转', '身'] +['ä¹IJ', 'åĽŃ'] +['çĻĮ', 'çĹĩ'] +['模', 'èĮĥ'] +['æĦ', '£'] +['è¿ĩåİ»', 'çļĦ'] +['代', 'ä»·'] +['çļĦ', 'æ¦Ĥ念'] +['åĩł', 'çϾ'] +['è´µ', 'éĺ³'] +['æĭħ', 'å¿§'] +['éĢĤ', 'å®ľ'] +['çݯå¢ĥ', 'ä¿ĿæĬ¤'] +['çĥ', '«'] +['ä½ł', 'æĥ³'] +['æŃ¤', 'åIJİ'] +['ä½ł', 'ä¹Ł'] +['çį', 'İ'] +['éϤ', 'æŃ¤'] +['éϤæŃ¤', 'ä¹ĭå¤ĸ'] +['è°ĥ', '度'] +['ç§ij', '缮'] +['æīĢ说', 'çļĦ'] +['åĬ', 'ĩ'] +['忽', 'è§Ĩ'] +['ä¸ī', '次'] +['ä¸Ģ', 'æĹ¥'] +['åŀĤ', '缴'] +['ç«ŀ', 'æĬĢ'] +['éĿ¢', 'åĮħ'] +['大', 'æĪĺ'] +['æIJº', '带'] +['å¦Ĥæŀľ', '没æľī'] +['åħ»', 'æĪIJ'] +['åĩº', 'è¡Ģ'] +['çα好', 'èĢħ'] +['æīĵ', 'éĢļ'] +['èµ·', 'è¯ī'] +['åijĪ', 'çݰåĩº'] +['æŃĮ', 'æīĭ'] +['åľ¨', 'å¤ĸ'] +['é¢Ĩ导', 'å¹²éĥ¨'] +['åĨ', '¥'] +['èĪĨ', '论'] +['æıIJ', 'åıĸ'] +['éĺ¿', 'å°Ķ'] +['æľĽ', 'çĿĢ'] +['ä¸ī', 'äºļ'] +['è²', '¡'] +['åĪ', '·æĸ°'] +['æĻļ', 'æĬ¥'] +['è¿ĺæľī', 'ä¸Ģ个'] +['åĨ°', 'ç®±'] +['ç½ij', 'çĤ¹'] +['åĩº', 'åħ·'] +['强çĥĪ', 'çļĦ'] +['æĪij', 'çĽ¸ä¿¡'] +['å¸ĮæľĽ', 'èĥ½'] +['çīĻ', '齿'] +['äºĭ', 'å®ľ'] +['ä¸ļåĨħ', '人士'] +['代', 'æĽ¿'] +['åıĺ', 'å½¢'] +['éĽ', '²'] +['è°ĥ', 'æİ§'] +['åĪĽæĸ°', 'åĪĽä¸ļ'] +['æĭĨ', 'è¿ģ'] +['æł¸', 'æŁ¥'] +['éĢ', 'Ĺ'] +['åħ¥', 'åѦ'] +['æĦı', 'åIJij'] +['æı', 'Ľ'] +['ä¸ĭ', '次'] +['ä¼ł', 'è¾ĵ'] +['ä»ĸ们', 'åľ¨'] +['èĢĮä¸Ķ', 'è¿ĺ'] +['æĹ¥', 'åľ¨'] +['æķĻ', 'è®Ń'] +['æ´»', 'çĿĢ'] +['çļĦ', 'æľīæķĪ'] +['å¤įå·¥', 'å¤į'] +['å¤įå·¥å¤į', '产'] +['æĺ¯ä¸Ģ', 'ä»¶'] +['çŃī', 'çĿĢ'] +['å¾', '©'] +['åĭĩ', 'æķ¢'] +['éģŃ', 'åıĹ'] +['å¥Ķ', 'é©°'] +['讲', '座'] +['说', 'å®Į'] +['ç»Ļ', 'åĩº'] +['è°', '¦'] +['è¯Ĭ', 'çĸĹ'] +['çĽ²', '缮'] +['客', 'è¿IJ'] +['å°±', 'è¿ŀ'] +['å¼Ģ', 'åħĥ'] +['å¼Ģåħĥ', 'æ£ĭçīĮ'] +['ä¸įæĸŃ', 'æıIJåįĩ'] +['ç͍æĪ·', 'çļĦ'] +['æĴ', 'ķ'] +['ä¾Ľ', 'æ°´'] +['ç¶ĵ', 'æ¿Ł'] +['ä¸Ń', 'åĮ»èį¯'] +['èģĶ', 'æĥ³'] +['åħ¬äº¤', '车'] +['èĪª', 'çıŃ'] +['æĬĢ', 'è¡ĵ'] +['å¼ķèµ·', 'çļĦ'] +['å°', '¹'] +['èµĦ', 'æ·±'] +['åĽ½èµĦ', 'å§Ķ'] +['èĺ', 'Ń'] +['é¼»', 'åŃIJ'] +['éĹ', '½'] +['æİĴ', 'éĺŁ'] +['è§Ĥ', 'åħī'] +['éģĹ', 'åĿĢ'] +['举', '京'] +['é¥Ń', 'åºĹ'] +['ä¸įæĸŃ', 'çļĦ'] +['å°±æĺ¯', 'ä¸Ģ个'] +['éķ¿', 'ä¹ħ'] +['çļĦ', 'è§ĤçĤ¹'] +['å¨', '¶'] +['æĪij', 'çİ°åľ¨'] +['çķ', '°'] +['å¾Ĺ', 'åĩº'] +['å¿ħ', 'å®ļ'] +['ä¸į', 'åıĹ'] +['åıª', 'éľĢè¦ģ'] +['åĽ°', 'æī°'] +['ç§ijåѦ', 'æĬĢæľ¯'] +['çīĽ', 'èĤī'] +['è¾ĥ', 'é«ĺçļĦ'] +['è·ij', 'æŃ¥'] +['æ²', '¾'] +['èı©', 'èIJ¨'] +['æľĢ', 'å¾Į'] +['ä¿Ŀ', 'å¯Ĩ'] +['æ²»', 'å®ī'] +['éĤ', '±'] +['常', 'è¯Ĩ'] +['èĦ¸', 'èī²'] +['åĮĹ', '大'] +['æ±ĩ', 'èģļ'] +['æijĨ', 'èĦ±'] +['é¾Ļ头', 'ä¼ģä¸ļ'] +['女', 'åıĭ'] +['çŃī', 'å·¥ä½ľ'] +['ä¸Ń', 'ç¾İ'] +['èģĮ', 'åľº'] +['èĦij', 'è¢ĭ'] +['åĨĻ', 'çļĦ'] +['饲', 'æĸĻ'] +['åĬ³', 'åĬ¨åĬĽ'] +['å±', '¯'] +['æĮģ', 'èĤ¡'] +['åĽ¾', 'åĥı'] +['è¿ĩåİ»', 'äºĨ'] +['è²', '¨'] +['è¾', '²'] +['éĹ®', 'æĪij'] +['è·Ł', 'ä½ł'] +['çĶŁ', 'æŃ»'] +['审', 'ç¾İ'] +['é¢Ĺ', 'ç²Ĵ'] +['ä¸Ń', 'æĸ¹'] +['åĬł', 'çĥŃ'] +['æĹħè¡Į', '社'] +['çϼ', 'çĶŁ'] +['ä¸į', 'åłª'] +['åĤ', '·'] +['æ¥', 'ł'] +['åĬŀ', 'æ¡Ī'] +['æŁ', 'Ħ'] +['æĹ¢', 'æĺ¯'] +['å¤Ħ', 'åĪĨ'] +['羣å®ŀ', 'çļĦ'] +['æĬ¥', '纸'] +['å¸Ī', 'çζ'] +['å®īå¾½', 'çľģ'] +['åī¯', '主å¸Ń'] +['ä¹ĭ', 'éģĵ'] +['导', 'å¼¹'] +['åŃ¦æł¡', 'çļĦ'] +['åŁİå¸Ĥ', 'çļĦ'] +['è°Ī', 'åΰ'] +['æ¢', 'Ĺ'] +['å¹³', 'éĿ¢'] +['说', 'ä»Ģä¹Ī'] +['é¢ij', 'çİĩ'] +['éķ¿', 'ä¸īè§Ĵ'] +['çļĦ', 'åĪ©çĽĬ'] +['é»', '¨'] +['è±Ĩ', 'èħIJ'] +['å®ŀéĻħ', 'æĥħåĨµ'] +['æŀĹ', 'ä¸ļ'] +['纪æ£Ģ', 'çĽijå¯Ł'] +['ä½ı', 'éĻ¢'] +['çļĦ', 'æķ´ä½ĵ'] +['åīį', 'è¡Į'] +['æĮ', '¨'] +['çħ¤', 'çŁ¿'] +['å̻', 'è£ģ'] +['å°ı', 'åIJĥ'] +['æŀģ', '端'] +['å©Ĩ', 'å©Ĩ'] +['çݰ', 'è´§'] +['è¯Ĺ', 'æŃĮ'] +['éĴ¥', 'åĮĻ'] +['缩', 'çŁŃ'] +['ä½Ĩ', 'è¿Ļ'] +['æĸ°', 'åĵģ'] +['è¿Ļ', '对'] +['çŁ¥åIJį', '度'] +['å¿ĹæĦ¿', 'æľįåĬ¡'] +['大', 'å±Ģ'] +['è¡¡', 'éĩı'] +['ä½ĵçݰ', 'äºĨ'] +['æ¡ĥ', 'èĬ±'] +['åIJ¸å¼ķ', 'åĬĽ'] +['åł', '¤'] +['æĵħ', 'éķ¿'] +['åĴ', 'Ĵ'] +['缸', 'æľº'] +['ä¸Ģ', 'ç«Ļ'] +['ä¸Ģç«Ļ', 'å¼ı'] +['æľĢ', 'ç¾İ'] +['æ°¸', 'ä¹ħ'] +['çļĦ', 'éĥ¨åĪĨ'] +['åĪĨ', 'å·¥'] +['å·¥ç¨ĭ', '建设'] +['æIJŃ', 'è½½'] +['æ°´', 'ä¸Ń'] +['èĮ', '¨'] +['çļĦ', 'æĵįä½ľ'] +['绣', 'æ²»'] +['çķħ', 'éĢļ'] +['åħļçļĦ', 'åįģ'] +['è¼', '¸'] +['æ¸', '¬'] +['ç¾İ', 'è§Ĥ'] +['ä¸į', 'åĪ©'] +['åıį', 'æĢĿ'] +['éªĦ', 'åĤ²'] +['æłĩ', 'çļĦ'] +['æĿĢ', '人'] +['éĺ¿', '姨'] +['é£Ł', 'æĿIJ'] +['åIJĥ', 'çļĦ'] +['åIJİ', 'åĨį'] +['çŁ', '£'] +['两', 'ä¾§'] +['æ¸ħ', 'æ°´'] +['è¿Ľ', 'çIJĥ'] +['å¼Ģå§ĭ', 'äºĨ'] +['åIJ¬', 'äºĨ'] +['çĦĬ', 'æİ¥'] +['çŁ', '®'] +['å¨', 'Ł'] +['为', '人'] +['éĢģ', 'ç»Ļ'] +['åĨĴ', 'éĻ©'] +['æķ', '·'] +['ç»Ī', 'æŃ¢'] +['æīį', 'çŁ¥éģĵ'] +['è¿IJ', 'æ°Ķ'] +['éĢļ', 'é£İ'] +['æĥĬ', 'è®¶'] +['ç§ijåѦ', 'éĻ¢'] +['æıIJ', 'éĹ®'] +['太', 'åİŁ'] +['缸åIJĮ', 'çļĦ'] +['ä»', 'ķ'] +['èģ', 'ĸ'] +['æĥħ', 'æ³ģ'] +['é¢Ĩ导', '人'] +['åĩºæĿ¥', 'äºĨ'] +['沿', '线'] +['éĻ', '½'] +['æĦŁ', '覺'] +['ä»į', 'åľ¨'] +['æ©', 'Ļ'] +['约', '为'] +['åĸĿ', 'éħĴ'] +['ç͍', 'èį¯'] +['ä¸ĭ', 'ä¸Ģ'] +['æ³ķ', 'å®ĺ'] +['顺', 'åºı'] +['åģļ', 'ä¸Ģ个'] +['åĭ', '¢'] +['æŃ', 'ª'] +['ç͵', 'ç«ŀ'] +['ä¼´', 'éļıçĿĢ'] +['ä¹ĭ', 'åĬĽ'] +['ä¹ĭ', '人'] +['äºij', '计ç®Ĺ'] +['åĪ«äºº', 'çļĦ'] +['ç§ijåѦ', 'åıijå±ķ'] +['第', 'åħ«'] +['å¹²', 'æī°'] +['女', 'ç¥ŀ'] +['è¿Ļæł·', 'åģļ'] +['å¤Ħ', 'åľ¨'] +['æ°´', 'è´¨'] +['éķ¿', 'æĺ¥'] +['å¸Ĥåľº', 'éľĢæ±Ĥ'] +['ç»´', 'æĿĥ'] +['è̳', 'æľµ'] +['æĸĩåĮĸ', 'çļĦ'] +['奶', 'ç²ī'] +['ä¼ł', 'è¾¾'] +['æīĭæľº', 'çīĪ'] +['æĽ¾', 'åľ¨'] +['äºĮ', 'æľŁ'] +['åİŁåĽł', 'æĺ¯'] +['æºIJ', '头'] +['åıĪ', 'èĥ½'] +['è£', '¸'] +['æĬĢæľ¯', 'åĪĽæĸ°'] +['æĸĩåĮĸ', 'æĹħ游'] +['åıij', '票'] +['å¹´', '级'] +['ä½ł', 'ä¸į'] +['ä¹ĭ', 'å¿ĥ'] +['æķ°', 'çϾ'] +['åIJij', 'å¾Ģ'] +['èĢģ', 'å®¶'] +['åľĭ', 'éļĽ'] +['çļĦ', 'é«ĺ度'] +['æľĿ', 'éĺ³'] +['æ¸ħ', 'éϤ'] +['èĩª', 'æľī'] +['书', 'ä¸Ń'] +['游æĪı', 'è£ħå¤ĩ'] +['ä¸ĩ', 'å¤ļ'] +['驾驶', 'åijĺ'] +['ä½ł', 'çŁ¥éģĵ'] +['åĽ½', 'åºĨ'] +['é£Ł', 'åłĤ'] +['æİ¥', 'åı£'] +['æĢ»', 'æķ°'] +['åħ¶ä»ĸ', 'çļĦ'] +['çĶŁåij½', 'çļĦ'] +['ä½ł', 'åľ¨'] +['çļĦ', '缮åħī'] +['è¿Ļ', 'æĸ¹éĿ¢'] +['éĥ½', '说'] +['çĸĹ', 'æ³ķ'] +['åĭĩ', '士'] +['åľ¨', 'åħ¨çIJĥ'] +['ä¿ĿéĻ©', 'åħ¬åı¸'] +['çĿ£', 'æŁ¥'] +['åĸĦ', 'èī¯'] +['表', 'å½°'] +['è¹', '²'] +['è·¯', '段'] +['æľĥåĵ¡', 'è¦ı'] +['æľĥåĵ¡è¦ı', 'ç¯Ħ'] +['æĪ·', 'åŀĭ'] +['ä¿ĥ', '使'] +['ä¿®', '建'] +['é«ĺ', 'æ°´å¹³'] +['åģļ', 'åĩºäºĨ'] +['主', 'åľº'] +['è¡Į', 'èµ°'] +['空', 'çϽ'] +['æľī人', '说'] +['è¿Ļ个', 'ä¸ĸçķĮ'] +['åIJį', 'ä¹ī'] +['å®Į', 'ç¾İçļĦ'] +['羡', 'æħķ'] +['åıĬ', 'åħ¶ä»ĸ'] +['åı¯', 'ç͍'] +['æĭ', 'IJ'] +['è¾ĥ', '大çļĦ'] +['æĬĢæľ¯', 'åĴĮ'] +['å°¼', 'äºļ'] +['çϾ', 'è´§'] +['æı', 'ī'] +['éĢī', 'è´Ń'] +['éĺŁ', 'åıĭ'] +['ä¼ł', 'æĦŁ'] +['ä¼łæĦŁ', 'åύ'] +['åıªè¦ģ', 'ä½ł'] +['为ä»Ģä¹Ī', 'è¦ģ'] +['ä¸ĵ注', 'äºİ'] +['ä½Ļ', 'é¢Ŀ'] +['åħ¸åŀĭ', 'çļĦ'] +['缮åīį', 'å·²'] +['欲', 'æľĽ'] +['èģĶ', '绾'] +['æµģ', 'ä¼ł'] +['çļĦ', 'å®¶åºŃ'] +['åı·', 'åı¬'] +['çıį', 'è´µ'] +['ä¼Ł', '大çļĦ'] +['éī´', 'äºİ'] +['è·Ł', 'ä»ĸ'] +['产', 'çī©'] +['ä¸į', 'å·²'] +['è¿Ŀæ³ķ', 'è¡Į为'] +['头', 'ä¸Ĭ'] +['åĪĨ', 'è§£'] +['åı¯ä»¥', 'çľĭåĩº'] +['æł¡', 'åĮº'] +['åŃĹ', 'ä½ĵ'] +['ä¿®', 'çĤ¼'] +['çĶļèĩ³', 'æĺ¯'] +['微信', 'åħ¬ä¼Ĺ'] +['åıĸ', '代'] +['èIJ¥ä¸ļ', 'æĶ¶åħ¥'] +['æ½į', 'åĿĬ'] +['ä½ł', 'èĥ½'] +['社ä¼ļ', 'ä¿Ŀéļľ'] +['æ¯ĶèµĽ', 'ä¸Ń'] +['污水', 'å¤ĦçIJĨ'] +['夫', 'å¦ĩ'] +['ä¸Ģ', 'å¹ħ'] +['沿', 'æµ·'] +['åı£', 'æĦŁ'] +['ä½Ĩ', 'åį´'] +['å½ĵ', 'æĹ¥'] +['çļĦ', 'æľĢ大'] +['æ¯ı', 'ä¸Ģä½į'] +['没', 'äºĭ'] +['çī¹', 'åĪ¥'] +['å¼Ģ', 'åѦ'] +['è·¯', 'éĿ¢'] +['å¿ĥçIJĨ', 'åѦ'] +['æĶ¾', 'ç½®'] +['éĩįåºĨ', 'å¸Ĥ'] +['ä½ł', 'èĩªå·±'] +['æ¶Īè´¹èĢħ', 'çļĦ'] +['ä¸Ģ', 'æ³¢'] +['èѦ', 'æĥķ'] +['åį§', '室'] +['注', 'å°Ħ'] +['é£İ', '鼨'] +['沿', 'çĿĢ'] +['åijĬ', '訴'] +['表', 'çݰåĩº'] +['åĽĽ', 'æĺ¯'] +['åı¤', 'åħ¸'] +['æĽ´', 'éĩįè¦ģçļĦ'] +['好', 'äºĭ'] +['çľ¼', '泪'] +['æ¨', 'ĵ'] +['审', 'åΤ'] +['碰', 'æĴŀ'] +['车', 'ç«Ļ'] +['è¿Ľåħ¥', 'äºĨ'] +['éĽĨ', 'åIJĪ'] +['æł¼', 'å¤ĸ'] +['宾', 'é¦Ĩ'] +['æĶ¯ä»ĺ', 'å®Ŀ'] +['她', 'æĺ¯'] +['æĺ¯', 'å¦Ĥä½ķ'] +['人', '次'] +['çļĦ', 'æĪIJåĬŁ'] +['æĹł', 'åĬĽ'] +['æµ·', 'æĭĶ'] +['æĺ¥', 'åŃ£'] +['éĥ½', 'ä¸įä¼ļ'] +['çŃī', 'å¤ļç§į'] +['ä¸Ģ个', 'å°ı'] +['åģľè½¦', 'åľº'] +['让', 'æĽ´å¤ļ'] +['è¿Ļ', 'çĤ¹'] +['æĪIJ', 'åĵģ'] +['éĴ', 'ī'] +['éģĩ', 'è§ģ'] +['çıŃ', '主任'] +['æĦı', 'æĦ¿'] +['çļĦ', 'åIJĮåѦ'] +['游', 'è§Ī'] +['åİĭ', '缩'] +['åľ¨', 'ä¼łå¥ĩ'] +['å¼¹', 'æĢ§'] +['æĹ¥', 'åĨħ'] +['ç¦ı建', 'çľģ'] +['è§Ĵ', 'èIJ½'] +['åĪĨ', 'å¼Ģ'] +['ä¼ļ', '让'] +['å¤ĸ', 'åĽ´'] +['çĨŁæĤī', 'çļĦ'] +['çĨ', 'Ķ'] +['ä¸ĩ', 'è¾Ĩ'] +['å¤ľ', 'éĹ´'] +['车', '身'] +['ä¸Ń', 'æľŁ'] +['å®ĮåĸĦ', 'çļĦ'] +['åĵģ', 'ç±»'] +['åıĭ', 'è°Ĭ'] +['éĢīæĭ', 'Ķ'] +['éªij', '士'] +['å½', '¦'] +['çļĦ', 'çľĭæ³ķ'] +['åĽ½', 'çİĭ'] +['è¾£', 'æ¤Ĵ'] +['åıijå¸ĥ', 'æĹ¶éĹ´'] +['åı¤', 'åŁİ'] +['éļı', 'æľº'] +['ç«', 'ĸ'] +['å¼Ģ', 'è¾Ł'] +['ä¼Ĺ', 'çĶŁ'] +['没', 'åĬŀæ³ķ'] +['åįĥ', 'éĩĮ'] +['æĿ¥æºIJ', 'äºİ'] +['çļĦ', 'æĿĥåĪ©'] +['æ¯Ķ', 'åĪĨ'] +['满æĦı', 'çļĦ'] +['ä¿®', 'è¡Į'] +['åĿ', 'ł'] +['大', 'æµ·'] +['èİ', '¹'] +['åĩº', '身'] +['è«', 'ĩ'] +['åħ³', 'èĬĤ'] +['åIJį', '人'] +['éľĢè¦ģ', '注æĦı'] +['æĹ©', 'æĻ¨'] +['å¤ĸ', 'åįĸ'] +['åıĪ', 'è¦ģ'] +['æ¶ī', 'æ¡Ī'] +['çĶ³è¯·', '人'] +['éĻĦè¿ij', 'çļĦ'] +['åĬłå¿«', 'æİ¨è¿Ľ'] +['æĸ°', 'å¹´'] +['大', 'è¡Ĺ'] +['ä¸Ģ', 'é»ŀ'] +['èĭı', 'å®ģ'] +['æĤĦ', 'æĤĦ'] +['èĦ¾', 'æ°Ķ'] +['å¸Į', 'èħĬ'] +['éļı', 'åį³'] +['æķ¢', 'äºİ'] +['å®ŀè·µ', 'ä¸Ń'] +['æĺ¯', '没æľī'] +['æľīè¶£', 'çļĦ'] +['æĿ¥èĩª', 'äºİ'] +['è£ģ', 'åΤ'] +['女', 'åŃ©åŃIJ'] +['èĩ³', 'åħ³'] +['èĩ³åħ³', 'éĩįè¦ģ'] +['æĻº', 'åĬĽ'] +['èµ°', 'åĩºåİ»'] +['çŁŃ', 'æĿ¿'] +['大', 'åĽ½'] +['çļĦ', '认è¯Ĩ'] +['å¹´', 'å¤ľ'] +['åĨį', 'åΰ'] +['åIJĮ', 'æł·çļĦ'] +['å¯Ĩ', 'å°ģ'] +['å¤ĸ交', 'éĥ¨'] +['çĶŁ', 'æķĪ'] +['æĤ¨', 'åı¯ä»¥'] +['ä½ł', 'åĢij'] +['è¿ĩ', 'å¹´'] +['å¼', 'ĵ'] +['è¡Į', 'æĿİ'] +['æ¯Ķ', 'èµ·'] +['身', 'é«ĺ'] +['è¿Ļ个', '人'] +['ä¸Ń', 'å¤ĸ'] +['éģĵ', 'æŃī'] +['çĽ¯', 'çĿĢ'] +['亲', 'åŃIJ'] +['éĹ', '¸'] +['çϽ', 'äºij'] +['èĦĸ', 'åŃIJ'] +['ä¸ĢåĪĩ', 'éĥ½'] +['æ·', 'ij'] +['è°', 'ľ'] +['åģ¶', 'çĦ¶'] +['éĿł', 'è°±'] +['é«ĺ', '管'] +['ä¸ĭ', 'åıij'] +['æĶ¾', 'åΰ'] +['ç±»', 'åĪ«'] +['ä¸ĭ', 'åĪĹ'] +['æ··', 'ä¹±'] +['åIJĪæ³ķ', 'æĿĥçĽĬ'] +['çݯ', 'çIJĥ'] +['æľīæķĪ', 'åľ°'] +['åķĨ', 'æĪ·'] +['æ¹ĸ', '人'] +['æµ·', '岸'] +['æĬķ', '产'] +['两', '个æľĪ'] +['éĥ½', 'éĿŀ常'] +['å¢ŀ强', 'äºĨ'] +['æĿ¥', 'åΰäºĨ'] +['åī©', 'ä½Ļ'] +['æĤ¨çļĦ', 'åŃ©åŃIJ'] +['æµģ', 'æ°´'] +['æŃ£', 'ä¹ī'] +['天', 'çĮ«'] +['åģļ', 'è¿ĩ'] +['ä½ķ', 'æĹ¶'] +['æĪij', 'åİ»'] +['çľģ', '份'] +['å¥ĸ', 'éĩij'] +['该', 'å¦Ĥä½ķ'] +['ä¸ĭ', 'çıŃ'] +['åģ¶', 'åĥı'] +['æijĨ', 'æĶ¾'] +['æĸ°', '模å¼ı'] +['æĬķ', 'è³ĩ'] +['è·¯', 'åı£'] +['åĨľæ°ij', 'å·¥'] +['大', 'åѸ'] +['ä»¶', 'äºĭ'] +['æł¹æľ¬', 'ä¸į'] +['æµĵ', '度'] +['æµĵ', 'åİļ'] +['è½®', 'èĥİ'] +['æĪ¿', 'ä¼ģ'] +['éĿŀ常', '好'] +['ä»İ', 'ä¸Ń'] +['人', 'æł¼'] +['ç¿', 'ģ'] +['æĹ¶éĹ´', 'åĴĮ'] +['è¿Ļ', 'ä¸įæĺ¯'] +['åΏ', 'åķĨ'] +['æĥĬ', '人'] +['åύ', 'å®ĺ'] +['åĩĨ', 'åĪĻ'] +['æĥħ', 'æĻ¯'] +['æĽ´', 'é«ĺçļĦ'] +['åѦ', 'å®¶'] +['泡', '沫'] +['åľ°æĸ¹', 'æĶ¿åºľ'] +['å°±', 'çŁ¥éģĵ'] +['åij¼', 'åIJģ'] +['ç»ı', 'è´¸'] +['èĬ±', 'éĴ±'] +['æľī', 'ä¸Ģ次'] +['æĦŁ', 'æħ¨'] +['ä¸Ģ', 'åįĥ'] +['å¤ľ', 'æĻļ'] +['詹', 'å§Ĩ'] +['詹å§Ĩ', 'æĸ¯'] +['è¦ģ', 'éĹ»'] +['ç»', 'Ĵ'] +['æºIJ', 'äºİ'] +['çļĦ', 'è´¨éĩı'] +['注æĦı', 'äºĭ项'] +['æħ¢', 'æĢ§'] +['稳å®ļ', 'çļĦ'] +['建设', 'åĴĮ'] +['æĻ¯', '象'] +['éĩı', 'åĮĸ'] +['çļĦ', '話'] +['è¯Ħ', '级'] +['æº', 'ľ'] +['红', 'åĮħ'] +['éĢļ', 'éģİ'] +['社ä¼ļ', '责任'] +['æĸ°', '产åĵģ'] +['åĨ·', 'éĿĻ'] +['çľĭ', 'ä¸įåΰ'] +['èģĶ', 'éĤ¦'] +['éŃ', 'Ħ'] +['çļĦ', 'åīįæıIJ'] +['çļĦåīįæıIJ', 'ä¸ĭ'] +['è¾ĥ', '好'] +['çļĦ', 'æĦŁæĥħ'] +['客æĪ·', 'æıIJä¾Ľ'] +['çĭ¬', 'èĩª'] +['å¢ŀ', 'æĶ¶'] +['æĸĩ', 'çĮ®'] +['æĭ¼', 'åij½'] +['管çIJĨ', 'åĴĮ'] +['æµģåĬ¨', 'æĢ§'] +['åħ¨', 'å®¶'] +['ä¸Ĭ', 'æĸ¹'] +['æİ¨åĩº', 'çļĦ'] +['ä¸ī', 'åĽ½'] +['ä¸Ģ个', 'æĺ¯'] +['æĸ°', 'ä¸Ģè½®'] +['æĸĩåĮĸ', 'éģĹ产'] +['æ®', 'º'] +['大', 'æ¹¾åĮº'] +['éĥ½', 'éľĢè¦ģ'] +['çļĦ', 'å®ŀéĻħ'] +['ç·', 'Ĭ'] +['大', 'å¥ĸ'] +['åħī', 'èĬĴ'] +['便', 'äºİ'] +['çļĦ', '表æĥħ'] +['æ¼Ķ', 'ç»İ'] +['红', 'åĨĽ'] +['å½ĵ', 'æĪij'] +['æ²»', 'æĦĪ'] +['é¢Ŀ', '度'] +['éĿ', 'ľ'] +['ä»»ä½ķ', '人'] +['è¡Ĺ', '头'] +['çī¹', 'æĸ¯'] +['çĸ¯', 'æĭī'] +['åĮ»çĸĹ', 'æľºæŀĦ'] +['ç»Ļ', 'åŃ©åŃIJ'] +['è§Ħ', '磩'] +['è£', 'ľ'] +['çļĦ', '身影'] +['ä¸ĵ', 'æłı'] +['æĿ¥', '临'] +['ç«¥', 'å¹´'] +['å¤į', 'èĭı'] +['è¨', 'Ĥ'] +['åŀĭ', 'åı·'] +['åĽ¾', 'æ¡Ī'] +['ç®Ģ', 'åİĨ'] +['æĭ', '±'] +['èį·', 'åħ°'] +['ä»»', 'æĦı'] +['æī¿', 'æİ¥'] +['è¿Ļ', 'æīį'] +['客', '车'] +['æľĿ', 'çĿĢ'] +['éłħ', '缮'] +['åı°', 'é£İ'] +['çļĦ', 'æĪ¿åŃIJ'] +['éª', 'ı'] +['æĿ±', '西'] +['éģĹ', 'ä¼ł'] +['è¶Ĭ', 'å¤ļ'] +['äºĨ', 'ä»ĸçļĦ'] +['ä¸Ĭ', 'åij¨'] +['管çIJĨ', 'åĪ¶åº¦'] +['失', 'ä¸ļ'] +['çĶ·', 'åıĭ'] +['æİ¥', 'ç§į'] +['å¨ģ', 'åIJį'] +['çĴ°', 'å¢ĥ'] +['åıijçĶŁ', 'åľ¨'] +['个', 'åĽ½å®¶'] +['åĪĽæĸ°', 'åıijå±ķ'] +['æĶ¹åıĺ', 'äºĨ'] +['åģ¥åº·', 'çļĦ'] +['å̼å¾Ĺ', 'ä¸Ģ'] +['å̼å¾Ĺä¸Ģ', 'æıIJ'] +['åĽ¢', 'ä¼Ļ'] +['åģĩ', '设'] +['åı°', 'ä¸Ĭ'] +['è§ĦèĮĥ', 'åĮĸ'] +['éĻª', 'åIJĮ'] +['座', 'æ¤ħ'] +['åı¯', 'æĢľ'] +['åħĭæĢĿ', '主ä¹ī'] +['æ³ķå¾ĭ', '责任'] +['ä¸Ģ', 'é¡¿'] +['æĬ¬', '头'] +['为', 'éĩįçĤ¹'] +['è¿ľ', 'æ´ĭ'] +['éĢı', 'è¿ĩ'] +['åħ¨çIJĥ', 'åĮĸ'] +['è¶£', 'åij³'] +['票', 'æĪ¿'] +['æ¯ı', '人'] +['åIJĦç§į', 'åIJĦæł·'] +['äºĨ', 'åĩºæĿ¥'] +['ç»Ŀ对', 'æĺ¯'] +['ä¸ĭ', 'å±ŀ'] +['ä¸Ģ', 'åıĮ'] +['è¿Ļ', 'åĿĹ'] +['æĬĹ', 'çĸ«'] +['è¦ģ', 'çĤ¹'] +['å½¢æĪIJ', 'çļĦ'] +['æĪij', 'çľĭ'] +['ä¸ĩ', 'éĩĮ'] +['èĢĥ', 'çłĶ'] +['为', 'åħ¶'] +['æ°ij', '宿'] +['å¤ļ', 'ä½į'] +['大', 'èĩ´'] +['ä»ĺ', 'è´¹'] +['åħ¥', 'æīĭ'] +['å±ħ', 'å®¶'] +['æīĢåľ¨', 'åľ°'] +['人', '身'] +['è¿ĩ', 'å¾Ĺ'] +['è¯ķ', 'è¯ķ'] +['访', 'è°Ī'] +['åĬł', 'éĩį'] +['å°±', 'ä¸įä¼ļ'] +['çĶŁäº§', 'ä¼ģä¸ļ'] +['åĽŀ', 'åĽ½'] +['åºķ', '线'] +['èµ¶', 'åΰ'] +['æĶ¯', 'éĺŁ'] +['æĪij们', 'éĥ½'] +['éĤ®', 'æĶ¿'] +['缴', 'èĩ³'] +['éĴ¢', 'çIJ´'] +['åħ', 'ľ'] +['çłĶ讨', 'ä¼ļ'] +['æľĪ', '亮'] +['åĿļæĮģ', '以'] +['åħ¬å®ī', 'éĥ¨'] +['éĴ¢', '管'] +['å°ı', 'çϽ'] +['ç½®', 'ä¸ļ'] +['èģ', 'ĭ'] +['书', 'åĨĻ'] +['æĿ', 'ı'] +['éħį', 'æĸ¹'] +['èĢĮ', 'åıĪ'] +['çijŀ', '士'] +['çķĮ', 'çļĦ'] +['èĢģ', '大'] +['æĪIJçĨŁ', 'çļĦ'] +['å¹²', 'ä»Ģä¹Ī'] +['ä¸ĵ项', 'æĸĹäºī'] +['çŃī', 'å¤ļ个'] +['èĦ±', '离'] +['ä¸ī', '个æľĪ'] +['çłĶç©¶', 'åijĺ'] +['æĹĭ', '转'] +['æŀģ', 'èĩ´'] +['åħį', 'è´£'] +['åħįè´£', '声æĺİ'] +['å¾Īå¤ļ', 'çݩ家'] +['车', 'ä¸Ĭ'] +['交', 'äºĴ'] +['å·²', 'æĺ¯'] +['ä¸Ģ', 'å°ı'] +['çļĦ', 'éĩįçĤ¹'] +['èĬ±', 'äºĨ'] +['ä¸į', 'æĺİ'] +['æľīåħ³', 'è§Ħå®ļ'] +['çĬ¹', 'å¦Ĥ'] +['çľ', '¸'] +['å¯', '¡'] +['çļĦ', 'è¡£æľį'] +['åĮħ', '裹'] +['身', 'åŃIJ'] +['å¸ĪèĮĥ', '大åѦ'] +['äºĭ', 'åħĪ'] +['线', 'æĿ¡'] +['æ³ķ', 'åζ'] +['åħ»', 'æĬ¤'] +['稳å®ļ', 'æĢ§'] +['éĤ', 'µ'] +['åŀĦ', 'æĸŃ'] +['é¡', 'į'] +['èĢĥ', 'åı¤'] +['æĿł', 'æĿĨ'] +['èĭı', 'èģĶ'] +['æ°´', 'ç͵'] +['åħ·ä½ĵ', 'çļĦ'] +['æ¿Ģ', 'æ´»'] +['æĪij', 'æł¡'] +['åĪļ', 'å¼Ģå§ĭ'] +['åĩ¸', 'æĺ¾'] +['ç¦', '¾'] +['åħ¼', 'èģĮ'] +['éĢı', 'éģİ'] +['åľ¨', '游æĪıä¸Ń'] +['社ä¼ļ', 'åıijå±ķ'] +['好', 'çİ©'] +['å¹»', 'æĥ³'] +['ä¸į', '代表'] +['注æĦı', 'åĬĽ'] +['æ£', 'į'] +['ç͍', 'æīĭ'] +['ç¾İ', '人'] +['许å¤ļ', '人'] +['å¾Ī', 'æĺ¯'] +['çļĦ', 'çłĶåıij'] +['æīĵ', 'åĩº'] +['åIJĪä¼Ļ', '人'] +['ä¸Ģ', 'å¤ľ'] +['ç¼ĵ', 'ç¼ĵ'] +['ä¿®', 'æŃ£'] +['æĦŁ', 'çŁ¥'] +['ç»Ī', '身'] +['æ¿Ģ', 'ç´ł'] +['çݯå¢ĥ', 'ä¸ĭ'] +['次', 'ä¼ļè®®'] +['ç»ıæµİ', 'å¢ŀéķ¿'] +['æī', 'Ľ'] +['åıij', 'éħµ'] +['åĪĨæŀIJ', 'å¸Ī'] +['åľ¨', 'æľªæĿ¥'] +['主è¦ģ', 'æľī'] +['ä¸Ģ', 'åŃ£åº¦'] +['çļĦ', '说æ³ķ'] +['ä»İæĿ¥', '没æľī'] +['è´§', '车'] +['缩', 'å°ı'] +['太', 'è¿ĩ'] +['æķĪ', 'åĬĽ'] +['ä¸į', 'ä¸ĭ'] +['æĬķ', '稿'] +['èį¯', 'ä¸ļ'] +['ç»Ħ', 'éķ¿'] +['ç«Ļ', 'çĤ¹'] +['å¾Ī', 'åĸľæ¬¢'] +['éIJ', 'µ'] +['åĬ¿', '头'] +['æ¼ı', 'æ´ŀ'] +['æĦ¤', 'æĢĴ'] +['åħħ', 'å®ŀ'] +['åĪĽä¸ļ', 'æĿ¿'] +['çĪ', 'ª'] +['æľª', 'å¿ħ'] +['åºķ', 'éĥ¨'] +['å¾Ĺ', 'åĪĨ'] +['人æ°ij', 'åĮ»éĻ¢'] +['äºĮæīĭ', 'æĪ¿'] +['å·²ç»ı', '被'] +['大', '楼'] +['æĸ°', 'æĪ¿'] +['辦', 'æ³ķ'] +['ç͍', 'åĬĽ'] +['æĭĵ', '宽'] +['åĨħ', 'åľ¨'] +['æĴŃ', 'åĩº'] +['饰', 'æ¼Ķ'] +['ä¹Ł', '让'] +['ä½ľ', 'çĤº'] +['çī©ä¸ļ', '管çIJĨ'] +['åį´', 'ä¸į'] +['为', 'ä¸ŃåĽ½'] +['å±Ģ', 'åĬ¿'] +['ä¸į', 'èĤ¯'] +['æľĢ', 'æĸ°çļĦ'] +['åı¯ä»¥', 'éĢīæĭ©'] +['æĺ¾', 'çݰ'] +['å°±', 'ç®Ĺæĺ¯'] +['åľ¨', 'æł¡'] +['é¾', 'Ł'] +['两', 'æĿ¡'] +['çļĦ', 'å®ŀåĬĽ'] +['è¶Ĭ', '好'] +['她', 'åľ¨'] +['å¿ł', 'è¯ļ'] +['ä¹Ł', 'éľĢè¦ģ'] +['游æĪı', 'æĵįä½ľ'] +['è¶ħ', 'åĩº'] +['å¦Ĥæŀľ', 'ä¸į'] +['æīĢåľ¨', 'çļĦ'] +['ä½ł', 'è¿ĺ'] +['以', 'åĨħ'] +['æľī', 'ä¸Ģå®ļ'] +['åı¯', 'è¾¾'] +['è·ij', 'åΰ'] +['åī', 'Ľ'] +['建ç«ĭ', 'åģ¥åħ¨'] +['æķ´', '车'] +['åīį', 'æĸ¹'] +['éĹ´', 'æİ¥'] +['çѹ', 'å¤ĩ'] +['çĸ²', 'åĬ³'] +['离', 'å¼ĢäºĨ'] +['æ±', 'Ŀ'] +['éĿ¢', 'éĥ¨'] +['ä¹ĭåīį', 'çļĦ'] +['åıĺ', '为'] +['å¦Ĥæŀľ', '说'] +['对', 'ä»ĺ'] +['åĿĩ', 'åı¯'] +['被åijĬ', '人'] +['ç²¾', 'ç¾İ'] +['èģļ', 'ä¼ļ'] +['çĿĢ', 'æĢ¥'] +['è°·', 'æŃĮ'] +['ä¸Ģ', 'åı·'] +['红', 'åĪ©'] +['ä¼łå¥ĩ', '游æĪı'] +['å»', 'ĸ'] +['è´', 'ŀ'] +['ä¹°', 'åΰ'] +['éŃ', 'ļ'] +['ä½ĵ', 'è´¨'] +['å°ij', 'äºĨ'] +['æ³ī', 'å·ŀ'] +['åIJ', 'Ł'] +['ç»Ŀ', 'ä¸į'] +['é»ij', 'æģ¶'] +['é»ijæģ¶', 'åĬ¿åĬĽ'] +['ä¸Ĭ', 'æĺł'] +['çļĦè¯Ŀ', 'é¢ĺ'] +['ä¸ĩ人', '次'] +['ä¸ĸ', 'éĹ´'] +['ç͍', 'å·¥'] +['è´¯', 'ç©¿'] +['å®Ŀ', 'çŁ³'] +['ä½ł', '好'] +['åĪĩ', 'åī²'] +['强', 'åĽ½'] +['åĽŀ', 'èIJ½'] +['æ°´', 'æĻ¶'] +['模', '仿'] +['æ´ª', 'æ°´'] +['éĢĻ', '麼'] +['åįģä¸ī', 'äºĶ'] +['ä½', 'ij'] +['éĻ', 'Ħä»¶'] +['çļĦ', 'å¢ŀéķ¿'] +['éĻĦ', 'å±ŀ'] +['çݰ', 'å·²'] +['帮', 'ä½ł'] +['éĩij', 'çīĮ'] +['é«ĺ', 'åİŁ'] +['åľ¨', 'å®¶éĩĮ'] +['éĺ²', 'èħIJ'] +['ç¡®å®ŀ', 'æĺ¯'] +['宣', '讲'] +['天', 'æīį'] +['ç»ıèIJ¥', '管çIJĨ'] +['éĶħ', 'çĤī'] +['åIJĪ', 'ä¸Ģ'] +['è§Ĥ', 'èµı'] +['éķ¿', 'è¾¾'] +['主ä¹ī', 'æĢĿæĥ³'] +['éĤ£', '麼'] +['é£İ', 'äºij'] +['为主', 'çļĦ'] +['æļij', 'åģĩ'] +['æĮģ', 'ä¹ħ'] +['å¼Ĥ', 'åľ°'] +['å¼Ģ', 'éŨ'] +['模', 'æĿ¿'] +['æī¹', '次'] +['ä¸į', '便'] +['天', 'çĶŁ'] +['åĩł', '个æľĪ'] +['ä¸ĵ', 'ç§ij'] +['åı¦', 'æľī'] +['åħ¬å¸ĥ', 'çļĦ'] +['æĩ', '·'] +['åľº', 'åIJĪ'] +['çļĦå¿ĥ', 'æĢģ'] +['è¿ĺ', '好'] +['å®ŀ', 'æĪĺ'] +['èĢģå¸Ī', 'çļĦ'] +['åħ©', 'åĢĭ'] +['åı¯', 'åľ¨'] +['éĤ£', 'ä½į'] +['å¥ł', 'å®ļäºĨ'] +['ä¿ĥ', 'éĶĢ'] +['æı´', 'åĬ©'] +['ä¸ĩ', 'çī©'] +['æĥħ', 'æĬ¥'] +['é¦ĸåħĪ', 'è¦ģ'] +['æĸĩåĮĸ', 'åĴĮ'] +['éĥ½', 'å·²ç»ı'] +['ä¸Ĭ', 'ä¸ĸ纪'] +['åĨľ', 'åľº'] +['大', 'æī¹'] +['æĺİçϽ', 'äºĨ'] +['çļĦ', 'æĪIJéķ¿'] +['çļĦ', 'æ¯ĶèµĽ'] +['失', '误'] +['åģļ', 'æĪIJ'] +['ä»Ĭ天', 'å°ıç¼ĸ'] +['é¢Ĩ', 'è¢ĸ'] +['æıIJåįĩ', 'äºĨ'] +['å¾IJ', 'å·ŀ'] +['ä»į', 'æľī'] +['è¿ĩ', '滤'] +['å¹½', 'é»ĺ'] +['çĥŃ', 'éĩı'] +['ä¸Ģ', 'é¦ĸ'] +['æ¼Ĥ亮', 'çļĦ'] +['åĩł', 'ç§į'] +['åĢ¡', 'è®®'] +['å°±åı¯ä»¥', 'äºĨ'] +['æİĴ', 'åĪĹ'] +['éĩį', 'éĩį'] +['ä¼ģä¸ļ', 'åĴĮ'] +['ä¸ĵ', 'å±ŀ'] +['çħ', 'İ'] +['亲', 'æĪļ'] +['çϾåĪĨ', 'ä¹ĭ'] +['稿', 'ä»¶'] +['è¿ĺ', 'å¾Ĺ'] +['人', 'åĵ¡'] +['äºī', '夺'] +['æĽ´', '容æĺĵ'] +['大', 'èĩªçĦ¶'] +['鼻', 'èħ¦'] +['太', '空'] +['åľ°', 'å¤Ħ'] +['å¤', '¢'] +['ä»ĸ', '对'] +['å¿ħ', 'å°Ĩ'] +['ä¸į', 'å½ĵ'] +['严', 'è°¨'] +['åĩº', 'åľº'] +['å·²ç»ı', 'æľī'] +['é¢Ĩ', 'åĨĽ'] +['é«ĺ', 'æ¡£'] +['ä¸Ģ', 'æīĢ'] +['æł', 'Ĺ'] +['让', 'åѦçĶŁ'] +['æĽ¹', 'æĵį'] +['æŁIJ', 'ä¸Ģ'] +['伸', 'åĩº'] +['èĬ±', 'åįī'] +['æ¸ħ', 'éĨĴ'] +['èģĶç³»', 'æĸ¹å¼ı'] +['åĪĨ', 'å±Ģ'] +['èħ', '³'] +['æ©¡', 'èĥ¶'] +['éķ¿', 'å¾Ĺ'] +['绿', 'åľ°'] +['è¢', 'į'] +['çļĦ', 'èīºæľ¯'] +['女', 'æľĭåıĭ'] +['ä¸Ń', 'è¶ħ'] +['离', 'åŃIJ'] +['å¤ļæł·', 'åĮĸ'] +['éĺ³', 'åı°'] +['ä½İ', '碳'] +['ä¸Ģ', 'ç±»'] +['çŃīæĸ¹éĿ¢', 'çļĦ'] +['å¾Ĺ', '好'] +['模', 'åħ·'] +['ä¸ĩ', '亿'] +['çķĻ', 'æĦı'] +['临', 'æ²Ĥ'] +['å°ij', 'éĩı'] +['çľĭ', 'åIJij'] +['ç»ıèIJ¥', 'èĢħ'] +['çķĻä¸ĭ', 'äºĨ'] +['åĿı', 'äºĨ'] +['åijĬ', 'åĪ«'] +['羣', 'çIJĨ'] +['ç¼´', 'è´¹'] +['æĬĬ', 'ä½ł'] +['çļĦ', 'ä»»åĬ¡'] +['æĪij', '对'] +['ä¹°', 'åħ¥'] +['çĻ»', 'ä¸Ĭ'] +['æľī', '两个'] +['ä¸Ģ', '头'] +['æĵį', 'æİ§'] +['åħ¨', 'è¦ĨçĽĸ'] +['çĿĢ', 'æīĭ'] +['å¢Ļ', 'éĿ¢'] +['å¤ļ', 'æĸ¹'] +['åı¯çα', 'çļĦ'] +['ä¹Ł', 'åı¯èĥ½'] +['æľĢ', 'æľī'] +['è¿ĻäºĽ', 'éĥ½æĺ¯'] +['æĥ', '¡'] +['å®', '®'] +['å¾Ī', 'å°ı'] +['éĹ®é¢ĺ', 'æĺ¯'] +['åĿĩ', 'æľī'] +['å¾ģ', 'éĽĨ'] +['说', 'åĩº'] +['æľī', 'æĦı'] +['é¢', 'Ĥ'] +['æī¬', 'å·ŀ'] +['åķĨä¸ļ', '模å¼ı'] +['çĶŁ', 'èĤĸ'] +['æįIJ', '款'] +['å²', 'Ĥ'] +['ç¾İ', 'æĻ¯'] +['è¿ĺ', '羣'] +['æĭ¥', 'æĬ±'] +['身ä½ĵ', 'åģ¥åº·'] +['æ·±', 'å¤Ħ'] +['çľ¼', 'ç¥ŀ'] +['çļĦ', '形象'] +['ä¼ĺ', 'è¶Ĭ'] +['å½ĵ', 'æĪIJ'] +['åĮº', 'åĪĨ'] +['åİ»', 'éϤ'] +['注', 'å®ļ'] +['å§IJ', '妹'] +['åĮº', 'åĨħ'] +['é©', 'ļ'] +['æļĹ', '示'] +['æĺİ', '亮'] +['æħ°', 'éĹ®'] +['å¸Ĥåľº', '份é¢Ŀ'] +['çĮª', 'èĤī'] +['çļĦ', 'èµĦéĩij'] +['åİĨ', 'ç»ı'] +['å§ĭç»Ī', 'åĿļæĮģ'] +['çĶŁ', 'æľº'] +['ä¸į', '顾'] +['éĩij', 'åĪļ'] +['大', '声'] +['éĻķ', '西çľģ'] +['é²', 'į'] +['åĨľä¸ļ', 'åĨľæĿij'] +['æľī', '害'] +['éŨ', 'è¯Ĭ'] +['æ¯ı', 'ä¸Ģ次'] +['çļĦ', 'åĽłç´ł'] +['é¢Ŀ', 'å¤ĸ'] +['åİ¿', '级'] +['çļĩ', 'åIJİ'] +['åĽ½', 'ä¼ģ'] +['é¦ĸ', 'éĢī'] +['ç¼ĸ', 'åĨĻ'] +['æĭ¿', 'èµ·'] +['åģ·', 'åģ·'] +['ä¸İ', 'ä¸ŃåĽ½'] +['åįĸ', 'å®¶'] +['ç»Ļ', 'ä»ĸ们'] +['ç¥ŀ', 'è¯Ŀ'] +['åѸ', 'æł¡'] +['æĪij', 'ä¸Ģ缴'] +['çŁ¥éģĵ', 'äºĨ'] +['åį', 'Ĵ'] +['åĴĮ', 'åľ°åĮº'] +['ä»Ģä¹Ī', 'éĥ½'] +['çĶ»', 'å®¶'] +['æľ¬', 'çĿĢ'] +['ä½Ļ', 'åIJį'] +['审', 'çIJĨ'] +['ä¸Ģ', 'åIJij'] +['åıijå±ķ', 'è¶ĭåĬ¿'] +['åĮº', 'éĹ´'] +['注åĨĮ', 'èµĦæľ¬'] +['çIJ', '¦'] +['ä¸į', 'åı¯ä»¥'] +['çļĦ', 'åĦ¿åŃIJ'] +['å̼', 'çıŃ'] +['ä¸¥æł¼', 'çļĦ'] +['å®ŀä½ĵ', 'ç»ıæµİ'] +['æľī', 'æĿĥ'] +['æĪij', 'åıĪ'] +['éĵ¶', 'æ²³'] +['ç«ĭ', '马'] +['æĿĢ', 'äºĨ'] +['åĮħ', '容'] +['管', 'å®¶'] +['身', 'é«Ķ'] +['éĵ', 'ħ'] +['å°ı', 'åŃIJ'] +['管çIJĨ', 'ç³»ç»Ł'] +['æľīçļĦ', '人'] +['é£İ', 'ç͵'] +['æĻºèĥ½', 'åζéĢł'] +['ç²¾', 'ç¡®'] +['æĭĽåķĨ', 'å¼ķ'] +['æĭĽåķĨå¼ķ', 'èµĦ'] +['äºĮæīĭ', '车'] +['åİ¿', 'å§Ķ'] +['èīº', '人'] +['å¥', 'ķ'] +['è¿İ', 'æĿ¥äºĨ'] +['ç»ĵæĿŁ', 'äºĨ'] +['çļĦ', 'ä¼łç»Ł'] +['æĭ¼', 'æIJı'] +['奥', '迪'] +['çĸij', 'æĥij'] +['ä¹ĭ', 'æĹ¥èµ·'] +['æłĩå¿Ĺ', 'çĿĢ'] +['åľ°', 'åįĢ'] +['è¯ł', 'éĩĬ'] +['åΰ', 'æľŁ'] +['åħ¨', 'éĥ½'] +['çŁŃ', 'æļĤ'] +['æĺ¯', 'æĪijåĽ½'] +['æĪij', 'å·²ç»ı'] +['æ»´', 'æ»´'] +['天', 'èµĭ'] +['对', '她'] +['åį«çĶŁ', 'éĹ´'] +['çĶŁäº§', 'åŁºåľ°'] +['æĹ¥', 'è®°'] +['çļĦ', 'æķĻåѦ'] +['åĵ', 'ĩ'] +['æ°ij', 'äºĭ'] +['è¿ĺ', 'åİŁ'] +['æīĭ', 'ä¸ŃçļĦ'] +['çļĦ', 'èī¯å¥½'] +['æ·', '«'] +['ä¸Ńåħ±', 'ä¸Ń央'] +['åĪ', 'ĥ'] +['åĵ', 'Ħ'] +['åľ¨', 'ä»ĸçļĦ'] +['å°Ī', 'æ¥Ń'] +['åľº', 'éĿ¢'] +['éĤ»', 'å±ħ'] +['çĹ', 'Ĵ'] +['å¦', 'Ħ'] +['å¤ĸ', 'ç§ij'] +['ä¸į', 'éĢĤ'] +['举åĬŀ', 'çļĦ'] +['é', 'Ĥ¹'] +['åħļçļĦ', '建设'] +['çϼ', '表'] +['è·¨', 'çķĮ'] +['æ²ī', 'æ·Ģ'] +['大', 'çīĩ'] +['è¶Ĭ', 'é«ĺ'] +['å°Ĩ', 'æĺ¯'] +['è§ī', 'éĨĴ'] +['åĤ¨', 'åŃĺ'] +['å¢ŀ', '大'] +['ä¸į', '让'] +['æķ´', 'å½¢'] +['å¹³åı°', 'ä¸Ĭ'] +['åĩł', 'ä½į'] +['è¯ī', 'æ±Ĥ'] +['好', 'ä¸į好'] +['åľ', 'į'] +['æĸĩ', 'æľ¬'] +['é̲', 'åħ¥'] +['ç´', 'į'] +['æł¹', 'æĵļ'] +['èįī', 'æ¡Ī'] +['åħŃ', '个'] +['åĭ', '¿'] +['åζ', 'æĪIJ'] +['饮', 'æ°´'] +['æ°¸', 'æģĴ'] +['èĩª', 'æĿĢ'] +['åı¸', '马'] +['éļ¾', 'çĤ¹'] +['为', 'æĪij们'] +['å¼', '§'] +['åī©', 'ä¸ĭçļĦ'] +['åĩĨå¤ĩ', '好'] +['çļĦ', 'æľĢä½³'] +['èģĶåIJĪ', 'ä¼ļ'] +['æĤ£èĢħ', 'çļĦ'] +['æĪijä¸į', 'çŁ¥éģĵ'] +['ä¸ĭ', 'ä¸Ģ个'] +['åıijå±ķ', 'æĸ¹åIJij'] +['ç¬', '¨'] +['æīĢ以', 'æĪij们'] +['åĨĻ', 'äºĨ'] +['éĢł', 'æĪIJäºĨ'] +['æ²Ļ', 'æ¼ł'] +['çŃĽ', 'éĢī'] +['çģ¾', 'åĮº'] +['ä¸Ĭ', 'çľĭ'] +['éħ', '¶'] +['æ»ļ', 'åĬ¨'] +['éļ¾', 'åħį'] +['åIJī', 'åĪ©'] +['ä¸Ģ', 'ä¸Ģ'] +['ç²¾', 'å¯Ĩ'] +['伸', 'æīĭ'] +['礼', '仪'] +['åħ¨', 'æĺ¯'] +['è¶Ĭ', '大'] +['ä¸Ń', 'æłĩ'] +['åıĸ', 'åĨ³'] +['åıĸåĨ³', 'äºİ'] +['éĢĶ', 'ä¸Ń'] +['讨', 'åİĮ'] +['æīĭ', 'åĨĮ'] +['第', 'ä¹Ŀ'] +['åŃĶ', 'åŃIJ'] +['çĦ¶', 'å¾Į'] +['ä¸Ģ', 'åħ±'] +['æµ·', 'æĬ¥'] +['款', 'å¼ı'] +['æķ´', '天'] +['è¾¹', 'çķĮ'] +['è·¯', 'è¾¹'] +['æĻĭ', '级'] +['åIJIJ', 'æ§½'] +['çļĦ', 'åħ³æ³¨'] +['æĪij', '没æľī'] +['å°±æĺ¯', 'åľ¨'] +['缮', 'çļĦæĺ¯'] +['åį³ä½¿', 'æĺ¯'] +['é¡¶', 'å°ĸ'] +['å·²ç»ı', 'åľ¨'] +['å®īåħ¨', 'éļIJæĤ£'] +['æłĩ', 'æĿĨ'] +['åįĹ', 'éĢļ'] +['ä¼ļ', '对'] +['座', 'ä½į'] +['èµ¢å¾Ĺ', 'äºĨ'] +['åİŁæĿ¥', 'çļĦ'] +['身', '为'] +['书', 'åºĹ'] +['è¢Ń', 'åĩ»'] +['ä»Ĭ', 'æĻļ'] +['以', 'èī²'] +['以èī²', 'åĪĹ'] +['æĬĸ', 'éŁ³'] +['åį´', '没æľī'] +['丧', '失'] +['çļĦ', 'å±ĢéĿ¢'] +['åįģåĽĽ', 'äºĶ'] +['çŃī', '缸åħ³'] +['æ±ĩ', 'æĢ»'] +['å¤ĸ', '表'] +['为', 'æ°ij'] +['éľĩ', 'æĥĬ'] +['å¥Ĺ', 'è·¯'] +['çĬ¯ç½ª', 'å«Įçĸij'] +['å°Ĩ', '以'] +['çİĩ', 'é¢Ĩ'] +['éħĴ', 'åIJ§'] +['è¡Įä¸ļ', 'åıijå±ķ'] +['å¹´', 'èĩ³'] +['åύ', 'æĿIJ'] +['åĴĮ', 'æĬĢæľ¯'] +['æľĢ', 'å°ı'] +['è¿Ļä¸Ģ', 'åĪĩ'] +['èģĮ', 'ç§°'] +['å½ĵ', 'ä½ľ'] +['æİĢ', 'èµ·'] +['åĴ', 'ĭ'] +['ä¸Ń', 'éĥ¨'] +['æīĭ', 'èĩĤ'] +['ç½¢', 'äºĨ'] +['媳', 'å¦ĩ'] +['æ´½', 'è°Ī'] +['æĹ¶ä»£', 'ä¸ŃåĽ½'] +['人çĶŁ', 'çļĦ'] +['æŀģ', 'éĻIJ'] +['ç¦', 'Ħ'] +['åĮº', 'æĶ¿åºľ'] +['æľ¬', 'éĴ±'] +['礼', 'åĵģ'] +['çļĦ', 'éĤ£ä¸ª'] +['侦', 'æŁ¥'] +['太å¤ļ', 'çļĦ'] +['å®ŀæĸ½', 'æĸ¹æ¡Ī'] +['é«ĺ', 'æłĩåĩĨ'] +['æĮĩæĮ¥', 'éĥ¨'] +['å̾', 'æĸľ'] +['çī¹èī²', '社ä¼ļ'] +['çµIJ', 'æŀľ'] +['éĴ»', 'çŁ³'] +['ç§»', 'æ¤į'] +['çī¹', 'ç§į'] +['èĩª', 'æĦ¿'] +['æĭľ', 'çĻ»'] +['åįķ', '身'] +['åį´', 'åıĪ'] +['åĪ¥', '人'] +['åIJĪ', 'è§Ħ'] +['æľº', 'ç͵'] +['çī¹', 'æĦı'] +['å½ĵåīį', 'ä½įç½®'] +['ä¹°', 'å®¶'] +['åIJĪ', '约'] +['èĤ©', 'èĨĢ'] +['为', 'åĩĨ'] +['å®¶', 'è£ħ'] +['çļĦ', 'çĥŃæĥħ'] +['éĿŀ', 'éģĹ'] +['çļĦ', 'éŃħåĬĽ'] +['åİŁ', 'åijĬ'] +['社ä¼ļ', 'åIJĦçķĮ'] +['ä¹°', 'çļĦ'] +['å¤ļ', 'åIJĥ'] +['éĽķ', 'å¡ij'] +['èµ·', 'ä¹ī'] +['åĬł', 'åī§'] +['éĤ£ä¸Ģ', 'åĪ»'] +['å°Ĩ', 'è¿Ľä¸ĢæŃ¥'] +['æ¡Ĥ', 'æŀĹ'] +['æĽ´', '强'] +['对', 'ä¼ģä¸ļ'] +['æĹł', 'æĦı'] +['ä¹łè¿ijå¹³', 'æĸ°'] +['æµģ', '失'] +['å¾®', '软'] +['缸', '对äºİ'] +['座è°Ī', 'ä¼ļ'] +['主', 'èIJ¥ä¸ļ'] +['主èIJ¥ä¸ļ', 'åĬ¡'] +['ç§ģ', 'åĭŁ'] +['å±ķ示', 'äºĨ'] +['常æĢģ', 'åĮĸ'] +['è²', '´'] +['符', 'åı·'] +['å¹´è½»', 'çļĦ'] +['å°±', 'éľĢè¦ģ'] +['ä¹Ł', 'æĽ¾'] +['çļĦæĥħ', '绪'] +['è¾¾', 'æłĩ'] +['èĩ', '¨'] +['ä½į', 'å±ħ'] +['ä»ħ', '为'] +['é¦ĸ', 'å®¶'] +['éĺ´', 'éĺ³'] +['ä¸įåĨį', 'æĺ¯'] +['åĽłä¸º', 'å®ĥ'] +['ä¼ģä¸ļ', 'åľ¨'] +['çĺ', '¾'] +['åIJ¬', 'è§ģ'] +['åİŁ', 'æľī'] +['åζ', 'è£ģ'] +['å¯Ĥ', 'å¯ŀ'] +['éĢļè¿ĩ', '对'] +['æ»ij', 'éĽª'] +['è¿Ļ', 'å¼ł'] +['çļĦ', 'çIJĨè§£'] +['æĸ°', 'ä¸ŃåĽ½'] +['è¿Ļ', 'åĦ¿'] +['ä½İ', 'ä»·'] +['æĥ³', 'è¿ĩ'] +['çļĦ', 'ä¿¡å¿ĥ'] +['建çŃij', 'çī©'] +['çļĦ', 'é¢ľèī²'] +['ä¸į', 'åºĶ该'] +['æĹłçĸij', 'æĺ¯'] +['å¼ķèµ·', 'äºĨ'] +['åħ¨', 'åijĺ'] +['æĿ°', 'åĩº'] +['è¿Ļæĺ¯', 'æĪij'] +['èª', '°'] +['èĺ', 'ĩ'] +['éĺµ', 'åľ°'] +['åħħ', 'å̼'] +['çŁ¿', 'ä¸ļ'] +['çĿĢ', 'ä»ĸ'] +['ä¿¡', '访'] +['ä¸ĩ', 'è¾¾'] +['æij©', 'æĵ¦'] +['å¼Ģ', '端'] +['èı²', 'å¾ĭ'] +['èı²å¾ĭ', '宾'] +['车', 'åŃIJ'] +['æľ¬èº«', 'çļĦ'] +['çģ«è½¦', 'ç«Ļ'] +['常', 'å·ŀ'] +['为', '代表'] +['为代表', 'çļĦ'] +['广', 'ç͵'] +['亲', '人'] +['åı³', 'æīĭ'] +['éĽĨ', 'è£ħ'] +['éĽĨè£ħ', 'ç®±'] +['çļĦ', 'åį°è±¡'] +['æ©Ł', 'æľĥ'] +['åĮĨ', 'åĮĨ'] +['åħī', 'ç͵'] +['大', 'æĸ¹'] +['è¿ĺ', 'æľª'] +['åĪ©', '好'] +['ç»Ŀ', '大å¤ļæķ°'] +['åľ¨', 'è¿Ļç§į'] +['ä¸Ģ', 'ç»Ħ'] +['æĸ°', 'èĤ¡'] +['转', 'åıij'] +['æ³ķ', 'åºŃ'] +['æĹł', 'æīĢ'] +['éģĵ', 'è·¯ä¸Ĭ'] +['çŁ¿', 'å±±'] +['èij', 'ī'] +['æĶ¶', 'åĽŀ'] +['ç§°', 'ä¹ĭ'] +['ç§°ä¹ĭ', '为'] +['æıŃ', 'éľ²'] +['åı£', '岸'] +['åIJ', '¼'] +['å¿ĥ', 'æĥ³'] +['çļĦ', '梦æĥ³'] +['éĽ', '¯'] +['ä¹ĭ', 'åĪĿ'] +['å¥ĸ', '项'] +['订', 'éĺħ'] +['èĵĿ', '天'] +['åĿ¦', 'åħĭ'] +['ç«ĭ', 'æ¡Ī'] +['èģĶ', 'æīĭ'] +['ä½Ĩæĺ¯', 'æĪij'] +['帮', 'æĪij'] +['ä»ħ', '代表'] +['说', 'æĪij'] +['çļĦ', 'è¶ĭåĬ¿'] +['æ¯Ķè¾ĥ', '大'] +['èµ°', 'å»Ĭ'] +['éĩįçĤ¹', 'é¡¹çĽ®'] +['èµĮ', 'åľº'] +['åIJį', 'çīĩ'] +['æĦŁ', 'åı¹'] +['åľ¨', 'åľ°ä¸Ĭ'] +['åıij', 'çĥŃ'] +['èĮĥ', 'çķ´'] +['çļĦ', 'éģĵè·¯'] +['éĩij', 'èī²'] +['ä»ĸ', 'åıĪ'] +['ä¼ļ', '产çĶŁ'] +['æ°ij', 'åĽ½'] +['å®ĺæĸ¹', 'ç½ijç«Ļ'] +['æĶ¶çĽĬ', 'çİĩ'] +['çļĦ', 'åΰæĿ¥'] +['çļĦ', 'åĬŀæ³ķ'] +['æĶ¹', 'åζ'] +['ä¸ĩ', 'ç§ij'] +['ä¸į', 'äºĪ'] +['è¿ĻäºĽ', 'éĹ®é¢ĺ'] +['çα', 'ä¸Ĭ'] +['çIJĥ', 'åľº'] +['è´£', '令'] +['æİĪ', '课'] +['åľ¨', 'é¦Ļ港'] +['ç»Ĩ', 'èħ»'] +['å¤ļ', 'ä¸ĩ'] +['åIJĮ', 'å¹´'] +['大', '使'] +['æĸ', 'ĭ'] +['ä¹Ł', '为'] +['æĥł', 'å·ŀ'] +['åIJī', '祥'] +['çͰ', 'åĽŃ'] +['åĽ½å®¶', 'éĺŁ'] +['éĩį', 'çĶŁ'] +['åľ¨', 'åħ¶'] +['é¦Ļ', 'åij³'] +['è´Ł', 'èį·'] +['亲', 'åĪĩ'] +['èĩª', '豪'] +['没', 'éĶĻ'] +['åĽłä¸º', 'åľ¨'] +['æĺŁ', 'æĺŁ'] +['éĤ', 'ij'] +['è¿ĺæľī', 'å¾Īå¤ļ'] +['æij©', 'æīĺ'] +['æij©æīĺ', '车'] +['æŃ¥', 'è¡Į'] +['管çIJĨ', 'ä½ĵç³»'] +['èĦļ', 'ä¸ĭ'] +['éģİ', 'åİ»'] +['æ±ī', 'è¯Ń'] +['对', 'ä¸įèµ·'] +['çļĦ', 'ç»ıåİĨ'] +['åıĬ', '缸åħ³'] +['ä¸įå°ij', '人'] +['éĩį', 'ç£ħ'] +['åĬ³åĬ¨', 'èĢħ'] +['大åĬĽ', 'åıijå±ķ'] +['æĢİä¹Ī', 'åģļ'] +['çĭĹ', 'çĭĹ'] +['举åįĹ', 'äºļ'] +['åĭĩ', 'äºİ'] +['åħ¬', 'éĸĭ'] +['çĵ·', 'çłĸ'] +['åıĤ', 'çħ§'] +['广æĴŃ', 'ç͵è§Ĩ'] +['举', 'åĬ¨'] +['æ±Ł', '西çľģ'] +['æķĪ', 'èĥ½'] +['å͝', 'æľī'] +['éĿ¢', 'è²Į'] +['èĩªåĬ¨', '驾驶'] +['æ¦ľ', 'åįķ'] +['å½ĵ', 'æĪij们'] +['仲', 'è£ģ'] +['æľ¨', 'æĿIJ'] +['ç±³', 'åħ°'] +['çϽ', 'éĵ¶'] +['çļĦ', '人éĥ½'] +['å°±', 'åĥıæĺ¯'] +['æŃ¥', 'åħ¥'] +['åįł', 'ç͍'] +['åĩ»', 'è´¥'] +['让', '大家'] +['ä¼ļ', 'è®©ä½ł'] +['åİ¿', 'æĶ¿åºľ'] +['è¦ģ', 'ç͍'] +['çŃī', 'å½¢å¼ı'] +['åįĩ', 'é«ĺ'] +['责任', 'æĦŁ'] +['å¤ĩ', 'ç͍'] +['ä»ĸ', '认为'] +['æ¸ħåįİ', '大åѦ'] +['ä»ĸ', 'èĩªå·±'] +['éĸ±', 'è®Ģ'] +['太平', 'æ´ĭ'] +['éĶģ', 'å®ļ'] +['çŃ', 'Ĩ'] +['è¿Ļ', 'çīĩ'] +['æī§', 'æĶ¿'] +['è¿ĶåĽŀ', 'æIJľçĭIJ'] +['å°±', 'æŃ¤'] +['éģĩ', 'åΰäºĨ'] +['å¼Ģå¹ķ', 'å¼ı'] +['管çIJĨ', 'éĥ¨éŨ'] +['å§¿', 'åĬ¿'] +['设', 'æĥ³'] +['åĽĽ', 'åŃ£'] +['æĬĢæľ¯', '人åijĺ'] +['å·®', 'çĤ¹'] +['è¾ŀ', 'èģĮ'] +['èĢģ', '師'] +['çļĦ', 'æĦŁåıĹ'] +['ä¹Ł', 'éĿŀ常'] +['å¹´', 'ä¸ĬåįĬå¹´'] +['æĢª', 'çī©'] +['èĮĥ', 'æĸĩ'] +['æĪĺ', 'å½¹'] +['åIJ«', 'ä¹ī'] +['åħ¨', 'è¿ĩç¨ĭ'] +['èĢĮ', 'éĿŀ'] +['éĢļ讯', 'åijĺ'] +['è¿Ļæł·', 'æīįèĥ½'] +['æľº', 'ç»Ħ'] +['è£', 'ı'] +['çķ¶', 'çĦ¶'] +['èµĮ', 'åįļ'] +['åIJĦ', 'æľī'] +['å·¥ä½ľ', 'æľºåζ'] +['äºĭ', 'åIJİ'] +['åī§', 'éĻ¢'] +['å±Ĭ', 'æĹ¶'] +['åĺ´', 'éĩĮ'] +['主', '线'] +['ä¸Ģ', 'åľĪ'] +['主è¦ģ', 'åİŁåĽł'] +['å°¸', 'ä½ĵ'] +['åĮ»çĸĹ', 'åĻ¨æ¢°'] +['ä½ł', 'æĢİä¹Ī'] +['ä½Ĩ', 'çͱäºİ'] +['æĹ¶', '空'] +['çĶ·', 'æľĭåıĭ'] +['çĶľ', 'èľľ'] +['é«ĺ', 'åľ°'] +['æĻ', 'ĸ'] +['èĴIJ', 'éĽĨ'] +['åĩĿèģļ', 'åĬĽ'] +['å¤ĩ', 'åıĹ'] +['æĸĩ', 'åĪĽ'] +['马', 'æĿ¥'] +['马æĿ¥', '西äºļ'] +['æŁ´', 'æ²¹'] +['使', '人'] +['æķĻ', 'ä¼ļ'] +['ç§ĭ', '天'] +['æĺİ', 'çıł'] +['åħŃ', 'åįģ'] +['çݯå¢ĥ', 'ä¸Ń'] +['æ¸ħ', 'æĻ¨'] +['积æŀģ', 'åıĤä¸İ'] +['å·ħ', 'å³°'] +['为', 'æľŁ'] +['çѾ', 'åŃĹ'] +['æĦŁ', 'æ¿Ģ'] +['ç§ĭ', 'åŃ£'] +['æĿij', 'åŃIJ'] +['æ¢ħ', '西'] +['æļ´', '鼨'] +['çĶŁæ´»', 'åľ¨'] +['çªĹ', 'æĪ·'] +['æģ¶', 'åĬ£'] +['纯', 'ç²¹'] +['åľ¨', 'æİ¥åıĹ'] +['没', 'èĥ½'] +['è¡Į', '人'] +['åĭ', 'º'] +['æĭ¨', 'æīĵ'] +['ä½ľ', 'åĩºäºĨ'] +['çļĦ', '主é¢ĺ'] +['æľª', 'ä¾Ĩ'] +['ä¸Ń', 'æľĢ'] +['æ¾', 'ľ'] +['é«ĺ', 'è¡Ģåİĭ'] +['åħ´', 'èµ·'] +['æŃ£', 'èĥ½éĩı'] +['åŁ¹è®Ń', 'çıŃ'] +['æİ¥', 'åħ¥'] +['çĦ¶åIJİ', 'åĨį'] +['åѦçĶŁ', '们'] +['é¢ĨåħĪ', 'çļĦ'] +['çģ«', 'çĥŃ'] +['ä¸ĵ', 'èģĮ'] +['æĪĸèĢħ', '说'] +['建', 'è¨Ń'] +['é»', 'ı'] +['对', 'åħ¬åı¸'] +['çī¹', 'æľīçļĦ'] +['åħī', 'èį£'] +['å½ĵ', 'åľº'] +['éĿ¢', 'åŃIJ'] +['èµĦ产', '管çIJĨ'] +['æĹ¶æľŁ', 'çļĦ'] +['çŀ', 'İ'] +['åįİ', '举'] +['åıĪ', 'ä¸Ģ次'] +['èĥİ', 'åĦ¿'] +['å®ļ', 'çĤ¹'] +['头', 'çĹĽ'] +['æ¶²', 'ä½ĵ'] +['æĺ¯ä¸Ģ', 'ä½į'] +['帽', 'åŃIJ'] +['å¹´', 'èµ·'] +['ä¸į', 'ä½İäºİ'] +['è¾ĥ', 'å°ij'] +['éĿ¢ä¸´', 'çĿĢ'] +['å±Ĥ', 'å±Ĥ'] +['èĿ´', 'èĿ¶'] +['èī°', 'èĭ¦'] +['éĺ¿', 'æł¹'] +['éĺ¿æł¹', 'å»·'] +['æ¦Ĥ', 'æĭ¬'] +['请', 'éĹ®'] +['èµ·', 'åºĬ'] +['å±Ģ', 'å±Ģéķ¿'] +['稳', 'åģ¥'] +['å¦Ĥæŀľ', 'æĪij们'] +['éħĴ', 'ç²¾'] +['æĪ·', 'åı£'] +['æĦŁ', 'æĤŁ'] +['æĪij们', 'éľĢè¦ģ'] +['æĬĢ', 'èīº'] +['èĩª', 'åªĴä½ĵ'] +['è¿Ľ', 'åĮĸ'] +['æ¿ĢçĥĪ', 'çļĦ'] +['ä½ĵ', '温'] +['èļ', 'ķ'] +['èĩ´', 'è¾ŀ'] +['宪', 'æ³ķ'] +['ä¸Ģ', 'çŃīå¥ĸ'] +['çĵ¶', 'é¢Ī'] +['æĥł', 'æ°ij'] +['èµ°', 'è·¯'] +['çݰ', 'ä»»'] +['åķĨ', 'éĩı'] +['ä¸ĭ', '车'] +['åĪ', 'ł'] +['責', 'ä»»'] +['èŀįåIJĪ', 'åıijå±ķ'] +['ç´ł', 'æĿIJ'] +['æ²¹', 'ä»·'] +['åģļ', '人'] +['çŀ', 'ª'] +['æĶ¹éĿ©', 'åĪĽæĸ°'] +['çļĦ', 'åĮºåĪ«'] +['è·¨å¢ĥ', 'ç͵åķĨ'] +['æ¶īåıĬ', 'åΰ'] +['æīĺ', '管'] +['æĪij', 'è¿ĺæĺ¯'] +['åĿIJ', 'æłĩ'] +['ç½ij', '讯'] +['å½ĵåľ°', 'çļĦ'] +['追', '溯'] +['åľŁ', 'è̳'] +['åľŁè̳', 'åħ¶'] +['åºķ', 'ä¸ĭ'] +['åĩł', 'åįģå¹´'] +['ç©¿', 'è¿ĩ'] +['çĶŁæĢģ', 'æĸĩæĺİ'] +['æİ¨', 'èĸ'] +['æİ¨èĸ', '¦'] +['éł', 'Ĩ'] +['åĴ³', 'åĹ½'] +['åĪĨ', 'æĪIJ'] +['çĹķ', '迹'] +['æĪ·', 'ç±į'] +['éĥ½', 'ä¸įèĥ½'] +['æĻļ', 'ä¼ļ'] +['åĢ', '©'] +['ä½ĵ', 'åĬĽ'] +['è¿Ļ个', 'èģĮä¸ļ'] +['æĹł', 'å½¢'] +['åıª', 'æĥ³'] +['è¿Ľ', 'åıĸ'] +['æĿĢ', 'æŃ»'] +['èĦ', 'Ĭ'] +['äºij', 'åįĹçľģ'] +['æľª', 'çŁ¥'] +['ç¾İ', 'èģĶ'] +['ç¾İèģĶ', 'åĤ¨'] +['å¤ĸ', 'å½¢'] +['诱', 'æĥij'] +['çĽ', '£'] +['è¡Į', '使'] +['åłĨ', '积'] +['çĨŁ', 'ç»ĥ'] +['éĺIJ', 'è¿°'] +['æľĢ大', 'éĻIJ度'] +['å·¡', 'æŁ¥'] +['夺', 'åĨł'] +['ä¼ģä¸ļ', 'æĸĩåĮĸ'] +['çĭ®', 'åŃIJ'] +['ä¿Ŀ', 'å®Ī'] +['ä¸ºæł¸å¿ĥ', 'çļĦ'] +['æī©', 'æķ£'] +['åζéĢł', 'åķĨ'] +['æŁĶ', '软'] +['为ä¸Ģä½ĵ', 'çļĦ'] +['游', 'çİ©'] +['çĶŁ', 'çĹħ'] +['幫', 'åĬ©'] +['åͱ', 'æŃĮ'] +['æīį', 'åı¯ä»¥'] +['宽', 'æĿ¾'] +['è¦ģ', 'æ¯Ķ'] +['æĺ¯', 'æĢİæł·'] +['çģ°', 'èī²'] +['çİĭ', 'åĽ½'] +['æIJħ', 'æĭĮ'] +['计', 'éĩı'] +['åij¨åĽ´', 'çļĦ'] +['æĻºèĥ½', 'æīĭæľº'] +['常', 'åĬ¡'] +['常åĬ¡', 'åī¯'] +['é©', '´'] +['å°Ĩ', 'è¿ij'] +['寻', '常'] +['ä¸ŃåĽ½', 'å¸Ĥåľº'] +['容', 'åύ'] +['å±±', 'ä¸Ĭ'] +['èĥĮåIJİ', 'çļĦ'] +['亲', 'å¯Ĩ'] +['æīĢ以', '说'] +['éİ', '®'] +['çļĦ', 'çIJĨçͱ'] +['大', 'åŁİå¸Ĥ'] +['常', 'å¹´'] +['æĹħ游', 'ä¸ļ'] +['å°±æĺ¯', 'è¿Ļæł·'] +['åĨį', 'æĿ¥'] +['é«ĺ', 'ä½į'] +['åĨħ', '饰'] +['æŀĦ', 'éĢł'] +['ä¸Ģ', 'èµ·æĿ¥'] +['çͳ', 'è«ĭ'] +['å·²ç»ı', 'å¼Ģå§ĭ'] +['çļĦ', 'åĬ¨ä½ľ'] +['被', 'è¿«'] +['éģį', 'å¸ĥ'] +['åīĸ', 'æŀIJ'] +['å°ı', 'äºĭ'] +['å¿ĥ', 'ä¸ŃçļĦ'] +['ä½ĵåζ', 'æĶ¹éĿ©'] +['çļĩ', 'å®¶'] +['æķĻ', 'åłĤ'] +['åIJĥ', 'å®Į'] +['åĽ½æ°ij', 'åħļ'] +['æĺİç¡®', 'äºĨ'] +['åıijå±ķ', 'è§ĦåĪĴ'] +['第ä¸Ģ', 'æŃ¥'] +['å¾Ĺ', 'èµ·'] +['åľ¨', 'åĵª'] +['çļĦ', 'è·¯ä¸Ĭ'] +['é»', 'Ķ'] +['çķ¶', 'æĻĤ'] +['大åĬĽ', 'æĶ¯æĮģ'] +['åıĮ', 'éĩį'] +['çŁ¥éģĵ', 'èĩªå·±'] +['åIJĪä½ľ', 'åįıè®®'] +['æ°Ķ', 'åĬ¿'] +['éķ¿æķĪ', 'æľºåζ'] +['ç½ķ', 'è§ģ'] +['åĽŀ', 'æĿ¥äºĨ'] +['ä»ĸ', 'ä¼ļ'] +['ä¸Ń', 'æĸ°'] +['ä¸Ńæĸ°', 'ç½ij'] +['çļĦ', 'åķĨåĵģ'] +['èµł', 'éĢģ'] +['決', 'å®ļ'] +['å¸Ĥåľº', 'çĽij管'] +['çķĻ', 'åѦçĶŁ'] +['ç͵', 'åİĭ'] +['äºļ', '马'] +['äºļ马', 'éĢĬ'] +['è¿ĺæĺ¯', 'æ¯Ķè¾ĥ'] +['ä¿ĥè¿Ľ', 'äºĨ'] +['æµģ', 'åħ¥'] +['æijĦ', 'åĥı'] +['æijĦåĥı', '头'] +['æıIJ', 'åıĬ'] +['åıij', 'æİĺ'] +['æī¾', 'åĩº'] +['æ¢Ŀ', 'ä»¶'] +['ç¹¼', 'çºĮ'] +['æĪij', 'åĸľæ¬¢'] +['å¥', 'İ'] +['æ¦ľ', 'æł·'] +['å¼Ģ', 'èĬ±'] +['æ²ī', 'éĩį'] +['åŁº', 'åĩĨ'] +['ä»ħä»ħ', 'æĺ¯'] +['轨éģĵ', '交éĢļ'] +['åĶIJ', 'å±±'] +['çŃī', 'ä¸Ģç³»åĪĹ'] +['ä¸įè¿ĩ', 'æĺ¯'] +['åŃĺåľ¨', 'çĿĢ'] +['èĬ±', 'çĶŁ'] +['å¤', '·'] +['ç»Ī', 'ç©¶'] +['ä¹Łæĺ¯', 'ä¸Ģ个'] +['åįģ', 'åŃĹ'] +['èĸª', 'éħ¬'] +['伤', 'å¿ĥ'] +['æĺ¥', 'ç§ĭ'] +['åĨ·', 'åį´'] +['ç²¾', 'çģµ'] +['çļĦ', 'åľ°åĽ¾'] +['æ¯Ķ', 'çī¹'] +['æ¯Ķçī¹', 'å¸ģ'] +['æĢ§', 'åĪ«'] +['ä½Ļ', 'ä¸ĩåħĥ'] +['ä¸įå¿ĺ', 'åĪĿå¿ĥ'] +['å¿ĥ', 'çĸ¼'] +['æĽ²', '线'] +['é«ĺ', 'ä½İ'] +['è¦ı', 'å®ļ'] +['æĻ¯', 'èī²'] +['è¦ģ', '说'] +['åħ¬åı¸', 'å°Ĩ'] +['æ¶²', 'åİĭ'] +['è¿Ŀ', '约'] +['åİļ', '度'] +['åºŀ', '大çļĦ'] +['è¿ĺæĺ¯', 'å¾Ī'] +['é¦ĸåħĪ', 'æĺ¯'] +['çµ', '²'] +['åĬ¡', 'å®ŀ'] +['並', 'ä¸Ķ'] +['å¢ŀ', 'è¿Ľ'] +['ç»Ħç»ĩ', 'å¼Ģå±ķ'] +['èµ·æĿ¥', 'äºĨ'] +['è¾ĥ', 'å°ı'] +['导', '游'] +['两', 'åľ°'] +['ç¿', 'ĺ'] +['çģ¿', 'çĥĤ'] +['é£İ', 'éĩĩ'] +['æĶ¯', '线'] +['æĶ¯çº¿', 'ä»»åĬ¡'] +['娱ä¹IJ', 'åľĪ'] +['天津', 'å¸Ĥ'] +['åĮħ', 'åĽ´'] +['æľ¬', 'èµĽåŃ£'] +['éĩįè¦ģ', '讲è¯Ŀ'] +['åıĮ', 'åIJij'] +['åįİ', '丽'] +['éĶ', '¤'] +['åĦ¿', '女'] +['åįĸ', 'åĩº'] +['ä¾Ĩ', '說'] +['ä»ĭç»į', 'ä¸Ģä¸ĭ'] +['åIJ¦', '认'] +['åĭ', 'Ŀ'] +['æĻ®éĢļ', '人'] +['çļĦ', 'åĬ¨åĬĽ'] +['涨', 'åģľ'] +['åŁºéĩij', '管çIJĨ'] +['ä¸Ģ个', 'éĩįè¦ģ'] +['è¿IJ', 'æ²³'] +['çħ', 'ŀ'] +['è´¢æĶ¿', 'éĥ¨'] +['è¡Įä¸ļ', 'åįıä¼ļ'] +['éĥ½', 'å°Ĩ'] +['è¨Ģ', '论'] +['ä¸ĭ', 'ä¾Ĩ'] +['墨', '西'] +['墨西', 'åĵ¥'] +['åĽłä¸º', 'ä»ĸ们'] +['æĢİä¹Ī', 'åĽŀäºĭ'] +['åĬłå¤§', '对'] +['èĬ', 'Ń'] +['çīĮ', 'åŃIJ'] +['ä¼ļ', '使'] +['妹', 'åŃIJ'] +['ç«Ļ', 'éķ¿'] +['å¿ħ', 'å¤ĩ'] +['æłij', 'æľ¨'] +['æģ¶', 'æĦı'] +['æ²³', 'éģĵ'] +['å¯Į', 'è£ķ'] +['ç¹ģ', 'åįİ'] +['代表', 'åĽ¢'] +['æµij', '身'] +['é¦ĸ', 'ä½į'] +['èĪªç©º', 'åħ¬åı¸'] +['鼻', 'å½±'] +['ä¸ĵ', 'è¾ij'] +['æ°´', 'æºIJ'] +['ä¸Ń', 'æ¯Ĵ'] +['並', 'ä¸į'] +['èĢĮ', 'åİ»'] +['é', 'ĥĿ'] +['äºİ', 'æŃ¤'] +['æĸĩåĮĸ', '建设'] +['èĤ¯å®ļ', 'ä¼ļ'] +['å¸ĮæľĽ', '大家'] +['æıı', 'åĨĻ'] +['ä½İ', 'è°ĥ'] +['æĸ°åħ´', '产ä¸ļ'] +['æ·Ħ', 'åįļ'] +['æĶ¾', 'å¼Ģ'] +['çļĦ', 'æĢ§æł¼'] +['çĸ¾çĹħ', 'çļĦ'] +['æķ´', 'é¡¿'] +['线ä¸Ĭ', '线ä¸ĭ'] +['éĢī', '项'] +['çļĦ', '认åı¯'] +['æķ´', 'é½IJ'] +['çĶļ', 'ä¹Ī'] +['çľģ', 'åĨħ'] +['åı¤', '人'] +['æ°ij', 'ä¿Ĺ'] +['çī¡', '丹'] +['éŨ', 'çªĹ'] +['éĤ£', 'æł·çļĦ'] +['çĽijäºĭ', 'ä¼ļ'] +['ç¿¡', 'ç¿ł'] +['ç¦', '¹'] +['åįĥä¸ĩ', 'ä¸įè¦ģ'] +['æĶ¶', '缩'] +['çļĦ', 'æĸĩåŃĹ'] +['åĴĮ', 'å°ļ'] +['æĮĩ', '令'] +['åħ±äº§', 'åħļåijĺ'] +['çļĦ', 'çĪ¶äº²'] +['å®Į', 'å·¥'] +['åĬ¡', 'å·¥'] +['马', 'æĭī'] +['马æĭī', 'æĿ¾'] +['æµĭ', 'è¯Ħ'] +['å²', 'ļ'] +['ä¸į', 'åģļ'] +['ä¸ĥ', 'å¹´'] +['åĿĩ', 'ä»·'] +['主', 'è§Ĥ'] +['å¾Ī', 'ä¸įéĶĻ'] +['èĤ¡ä¸ľ', '大ä¼ļ'] +['äºĶ', 'ä¸Ģ'] +['é£İ', 'åIJ¹'] +['å¼Ģ', 'éĩĩ'] +['è¿Ļä¹Ī', '大'] +['èĥ½', 'çľĭåΰ'] +['èĢĥ', 'è¯Ħ'] +['åį³', '便æĺ¯'] +['çݰ代', 'åĨľä¸ļ'] +['æ¯Ķè¾ĥ', 'é«ĺ'] +['è¦ģ', 'çľĭ'] +['没', 'äºĨ'] +['è§£', '決'] +['çݯ', 'æ¯Ķ'] +['åĨ²', 'åĬ¨'] +['æ·±', 'å¤ľ'] +['åĩł', 'åįĥ'] +['ä¿', 'ı'] +['ç½ij', 'æ°ij'] +['å°±', '没'] +['ä»ĸ', '表示'] +['éĩı', 'åŃIJ'] +['æĹ©é¤IJ', 'åĬłçĽŁ'] +['åįĬ', 'å²Ľ'] +['æIJŀ', 'ç¬ij'] +['ä¸Ĭ', 'æĬ¥'] +['å¯', '©'] +['é¢Ħ', '订'] +['èľĤ', 'èľľ'] +['æŁ¥', 'æī¾'] +['ä¼Ĺ', 'æīĢ'] +['ä¼ĹæīĢ', 'åij¨'] +['ä¼ĹæīĢåij¨', 'çŁ¥'] +['æĹ©', 'æĹ¥'] +['åıij', 'æī¬'] +['åĴĮ', '个人'] +['åĬłåħ¥', 'äºĨ'] +['åĸ®', 'ä½į'] +['åĪĨ', 'æĺİ'] +['第ä¸Ģ', 'æī¹'] +['ç¾İ', 'åĨĽ'] +['æĿĢ', 'æīĭ'] +['éŨ', 'å¤ĸ'] +['åķĨ', 'åľĪ'] +['ä¸Ģ', 'åĪ»'] +['çļĦçľ¼', 'ç¥ŀ'] +['éľ', 'Ħ'] +['äºĽ', 'ä»Ģä¹Ī'] +['åĬł', 'æ·±'] +['æ¯ı', 'ä½į'] +['å¸Ĥ', 'éĿ¢ä¸Ĭ'] +['åıĶ', 'åıĶ'] +['çļĦ', 'éĤ£ç§į'] +['粤', '港澳'] +['è´´', 'å¿ĥ'] +['æĸĩåĮĸ', '产ä¸ļ'] +['红', 'æĹĹ'] +['åĺī', 'åħ´'] +['æĶ¶', 'çĽĺ'] +['å®ĮæĪIJ', 'åIJİ'] +['ä¼ģä¸ļ', '管çIJĨ'] +['纵', '横'] +['ä¸į', 'ä¿¡'] +['æĪIJ', 'éĥ½å¸Ĥ'] +['æ´Ĺ', '澡'] +['举è¡Į', 'çļĦ'] +['çĶ¢', 'çĶŁ'] +['ç©¿', 'ä¸Ĭ'] +['åĪļ', '好'] +['åħī', '线'] +['æīĵ', 'æŀ¶'] +['è¿Ļ', 'æľ¬ä¹¦'] +['åĶ®åIJİ', 'æľįåĬ¡'] +['åĩł', 'åĪĨ'] +['ä¸Ĭ', '次'] +['ä¸į', 'åĪĨ'] +['产', 'åIJİ'] +['éģ¿', 'å¼Ģ'] +['ç»Ī', 'æŀģ'] +['代表', '大ä¼ļ'] +['æ¼Ķ', 'æĬĢ'] +['åĽŀ', 'è´Ń'] +['åѦ', 'è´¹'] +['éĺ»', 'ç¢į'] +['ä¸Ģ大', 'æī¹'] +['ç«£', 'å·¥'] +['åĨ³', 'å®ļäºĨ'] +['ä½Ĩ', 'å¦Ĥæŀľ'] +['ç͵', 'æµģ'] +['ä¸Ŀ', '毫'] +['èĥ½å¤Ł', 'åľ¨'] +['éĶĢåĶ®', 'æĶ¶åħ¥'] +['åľ¨', 'åŃ¦æł¡'] +['æ°´', 'åĩĨ'] +['è§Ĩ', '线'] +['èĩª', 'åľ¨'] +['åķĨä¸ļ', 'éĵ¶è¡Į'] +['为äºĨ', '让'] +['çį²', 'å¾Ĺ'] +['çݩ家', 'æľĭåıĭ'] +['éĿ¢', 'èĨľ'] +['åĪĨ', 'åī²'] +['åī§', 'æľ¬'] +['ç«', 'Ń'] +['说', 'å¾Ĺ'] +['æĥ³', 'çŁ¥éģĵ'] +['çļĦ人', 'çī©'] +['èĮħ', 'åı°'] +['åIJĮ', 'ä¸Ģ个'] +['æķ°æį®', 'ä¸Ńå¿ĥ'] +['çĶ', 'Ħ'] +['åĸľ', 'æĤ¦'] +['ä¸ĭæĿ¥', 'çļĦ'] +['å®ļ', 'åIJij'] +['æŀģ', 'åħ·'] +['çļĦ', 'åľŁåľ°'] +['éĤ£', 'åĢĭ'] +['æijĦ', 'åħ¥'] +['äºĨ', 'æĪijçļĦ'] +['马', 'è·¯'] +['åħ¨', '社ä¼ļ'] +['è®®', 'æ¡Ī'] +['å±ĭ', 'åŃIJ'] +['åIJį', 'åı«'] +['åĮ', 'ª'] +['åľ¨', 'å¤ĸéĿ¢'] +['åįİ', 'åįĹ'] +['åıij', 'è´§'] +['å¯Ĵ', 'åĨ·'] +['é«ĺçŃī', 'æķĻèĤ²'] +['详ç»Ĩ', 'çļĦ'] +['个', 'é¡¹çĽ®'] +['çĶŁäº§', 'åĬĽ'] +['æĹ¶', '常'] +['å°±', 'æľĥ'] +['ä¸ĩ', 'èĤ¡'] +['éĻĮçĶŁ', '人'] +['æıı', 'ç»ĺ'] +['å½ĵ', 'çĦ¶æĺ¯'] +['æĭī', 'åĬ¨'] +['éĵ¾', 'æĿ¡'] +['æī£', 'éϤ'] +['ä¸Ģ缴', 'éĥ½'] +['å°ı', 'åŃ©åŃIJ'] +['伤', 'åı£'] +['第äºĮ', 'å±Ĭ'] +['è´Ń', 'ç½®'] +['çļĩ', '马'] +['æĹł', 'èģĬ'] +['表', 'åĨ³'] +['诸', 'å¦Ĥ'] +['åĵį', 'èµ·'] +['é£İ', 'æļ´'] +['ä¸Ģæµģ', 'çļĦ'] +['ç', '·¨'] +['è§£æĶ¾', 'åĨĽ'] +['室', 'å¤ĸ'] +['å°±', 'è¿Ļä¹Ī'] +['å³', '¶'] +['æīĢæľī', '人éĥ½'] +['æIJľç´¢', 'å¼ķæĵİ'] +['çļĦ', 'æĪIJæľ¬'] +['åħļ', 'æĶ¿'] +['åıijè¡Į', '人'] +['çļĦ', 'äºĭå®ŀ'] +['对', '该'] +['åıĹ', 'æįŁ'] +['ä¿Ħ', 'ä¹Į'] +['é²ľ', 'èĬ±'] +['åĨľ', 'èį¯'] +['æŀģ', 'éĢŁ'] +['æĢ¥', 'æĢ§'] +['两', 'ä¼ļ'] +['ä¸Ģèά', 'æĿ¥è¯´'] +['æµ·', 'é²ľ'] +['åĨ', 'Ī'] +['ç͍', '人'] +['çĶ¨äºº', 'åįķä½į'] +['åĢ', 'ª'] +['åĦª', 'æĥł'] +['æł¹', 'æºIJ'] +['åĽ¢', 'è´Ń'] +['ç¾İ', 'æ´²'] +['ä¸ĭ', 'è¡Į'] +['å¹´', 'æľ«'] +['èľ', '¡'] +['è¯ģ', 'ä»¶'] +['åľ¨', 'æĪijåĽ½'] +['ä¸į', 'åºĶ'] +['æĮī', 'æĹ¶'] +['åłª', 'ç§°'] +['åľº', 'ä¸Ĭ'] +['å¹²éĥ¨', 'èģĮå·¥'] +['æľī', 'å¾Ī大çļĦ'] +['æķ°åŃĹ', 'ç»ıæµİ'] +['æ¼Ķ', 'ç»ĥ'] +['æį®', 'ç»Łè®¡'] +['å¾Ģ', 'æĿ¥'] +['广åijĬ', 'æľįåĬ¡'] +['çļĦ', 'è·Ŀ离'] +['æŃ', '¸'] +['è¨Ģ', 'è¯Ń'] +['被', 'èªī'] +['被èªī', '为'] +['åĭī', '强'] +['å°Ĭ', 'æķ¬'] +['ä¸ĩ', '亿åħĥ'] +['ä¸ŃåĽ½', 'åĽ½éĻħ'] +['å¹²', 'é¢Ħ'] +['å¹´', '产'] +['èĢķ', 'åľ°'] +['èĮ', 'İ'] +['åį³', 'æĺ¯'] +['æĺ¨', 'æĻļ'] +['æĪIJ为', 'ä¸Ģ个'] +['çºł', 'æŃ£'] +['åij½', 'åIJį'] +['é¢ģ', 'å¸ĥ'] +['çĮľ', 'æµĭ'] +['ä¿ĿèŃ·', 'æĶ¿çŃĸ'] +['æĭ', '¢'] +['æ´»', 'æ³¼'] +['çŃī', 'éĥ¨éŨ'] +['åѦ', 'åΰ'] +['å¢ŀå̼', 'ç¨İ'] +['èĪª', '线'] +['åĨ', '¤'] +['åįģ', 'åĩłå¹´'] +['æİ§èĤ¡', 'èĤ¡ä¸ľ'] +['ä¸Ģ', 'éŨ'] +['个', 'å·¥ä½ľ'] +['ä¸ªå·¥ä½ľ', 'æĹ¥'] +['æĸ°', '西'] +['æĸ°è¥¿', 'åħ°'] +['论', 'è¯ģ'] +['ä»', 'Ĩ'] +['åı¦å¤ĸ', 'ä¸Ģ个'] +['æĶ¹', 'ç¼ĸ'] +['严', 'ç¦ģ'] +['åĸľ', '好'] +['个人', 'ä¿¡æģ¯'] +['满æĦı', '度'] +['åĵ', '¨'] +['å¸Ī', 'èµĦ'] +['æĶ¹', '为'] +['ç«ŀäºī', '对æīĭ'] +['åĩº', 'çĤī'] +['åķĨ', '人'] +['大', 'æ£ļ'] +['æĮĩ导', 'ä¸ĭ'] +['å¦ĩ', 'ç§ij'] +['è¼', 'ª'] +['æī', 'ģ'] +['åIJĮæĹ¶', 'è¿ĺ'] +['å¹¶', 'éĢļè¿ĩ'] +['æĪĺ', 'éĺŁ'] +['èĶĵ', 'å»¶'] +['ä¿', 'ŀ'] +['éĢĤå½ĵ', 'çļĦ'] +['åīį', 'è¾Ī'] +['åĵģ', 'åij³'] +['湿', 'åľ°'] +['æĪIJ', 'åŀĭ'] +['ä¸į', 'åıªæĺ¯'] +['æĥ©', 'ç½ļ'] +['åĩºåı°', 'äºĨ'] +['çİ©', '游æĪı'] +['æīį', 'åıijçݰ'] +['åºĶ', 'èģĺ'] +['å¤ĸ', 'æĿ¥'] +['åįł', 'é¢Ĩ'] +['å±ķ', 'æľĽ'] +['å«', 'Ĥ'] +['港', 'èĤ¡'] +['æ¡Į', 'ä¸Ĭ'] +['æĶ¯', 'æŁ±'] +['çļĦæĥħ', 'å½¢'] +['广éĺĶ', 'çļĦ'] +['æĶ¯', 'è¡Į'] +['å´©', 'æºĥ'] +['æľĪ', 'ä¸Ń'] +['æľĪä¸Ń', 'æĹ¬'] +['ç»į', 'åħ´'] +['临', 'è¿ij'] +['æĬ¤', 'æłı'] +['æļ', '®'] +['åįķ', 'èģĮä¸ļ'] +['è¾¹', 'å¢ĥ'] +['æĹ¥', 'çħ§'] +['ä¸Ģ', 'åłĨ'] +['缴', 'å¾Ħ'] +['åħ±åIJĮ', 'ä½ĵ'] +['æĸ°åįİ', 'ç½ij'] +['æīĵ', '好'] +['ç͵åĬ¨', '汽车'] +['ä¸į', 'æĺİçϽ'] +['éĢĻ', '裡'] +['缼', '大'] +['çİĭ', 'æľĿ'] +['åĨį', 'ä¸Ģ次'] +['åĬŀåħ¬', 'åİħ'] +['è´¨', 'æĬ¼'] +['åIJĪ', 'åĩ»'] +['人们', '对'] +['鼶', 'é£Ł'] +['éĥ½ä¸į', 'çŁ¥éģĵ'] +['çļĦ', 'è¯Ńè¨Ģ'] +['åĭŁéĽĨ', 'èµĦéĩij'] +['åĬ¨', 'èĦī'] +['å½', '¤'] +['è¿Ļ', 'åĩłå¹´'] +['çŁŃ', 'è§Ĩé¢ij'] +['太', 'é«ĺ'] +['常', 'å§Ķä¼ļ'] +['åĬł', 'çıŃ'] +['éĩį', 'å¿ĥ'] +['åªĴä½ĵ', 'æĬ¥éģĵ'] +['没', 'æ³ķ'] +['éĹ»', 'åIJį'] +['çĥŃ', '度'] +['å¹¿æ³Ľ', 'çļĦ'] +['åħŃ', '大'] +['çī©', 'ä½ĵ'] +['ä¸į', '该'] +['é¢ĺ', '主'] +['精彩', 'çļĦ'] +['为', 'è¿Ľä¸ĢæŃ¥'] +['èĻ', 'ŀ'] +['åĽº', 'çĦ¶'] +['è´µå·ŀ', 'çľģ'] +['çºł', 'ç»ĵ'] +['代çIJĨ', '人'] +['æ³ķå®ļ', '代表'] +['åı¦ä¸Ģ', 'ç§į'] +['ä¸į', 'åIJ«'] +['æĭ¯', 'æķij'] +['ä¼ļ', 'ç»Ļ'] +['è¯Ĺ', 'è¯į'] +['åIJĮ', 'ç±»'] +['å¾Ĺ', 'ä¸įåΰ'] +['æĬĵ', 'ç´§'] +['以', 'åħ¶'] +['åħ¥', 'åħļ'] +['è¿ĺ', 'åı¯'] +['æľŁ', 'åĪĬ'] +['å¾Īå¤ļ', 'æĹ¶åĢĻ'] +['æĹ¥', 'åIJİ'] +['åħ¬', '约'] +['ä¸Ģ', '举'] +['æ¯Ķè¾ĥ', 'å¤ļ'] +['éĩij', 'æ²Ļ'] +['æį', 'ŀ'] +['æİĴ', 'åĩº'] +['æŃ¦', 'æľ¯'] +['ä¸į', 'æĸ·'] +['ä¸Ń', 'èĢĥ'] +['ä¿¡', 'èµĸ'] +['ä»İä¸ļ', '人åijĺ'] +['çģ«', 'çĦ°'] +['éĨĴ', 'æĿ¥'] +['ä½İ', '温'] +['é̾', 'æľŁ'] +['åĬ±', 'å¿Ĺ'] +['éħ', '¥'] +['åı¯è°ĵ', 'æĺ¯'] +['è¿Ļ', 'æĦıåij³çĿĢ'] +['é¢ł', 'è¦Ĩ'] +['åĮĹ京', '大åѦ'] +['ä¸ĵ', '线'] +['åıĬ', '以ä¸Ĭ'] +['è¨', 'ª'] +['èĢĮ', 'åIJİ'] +['çŁ¥', 'ä¹İ'] +['ä¸Ģ对', 'ä¸Ģ'] +['å¨ĥ', 'å¨ĥ'] +['çģ¾', 'éļ¾'] +['åħ¨', 'å±Ģ'] +['æīĢå¾Ĺ', 'ç¨İ'] +['å®ŀ', 'æĥł'] +['èļĤ', 'èļģ'] +['ä¹Ł', 'çŁ¥éģĵ'] +['温', 'åĴĮ'] +['èIJ½', 'ä¸ĭ'] +['åŀĭ', 'ä¼ģä¸ļ'] +['åĨį', 'ä¹Ł'] +['ä¾Ľ', 'çĥŃ'] +['é«ĺ', 'æ½®'] +['çĢı覽', 'åύ'] +['çļĦ', '巨大'] +['åħĪ', '天'] +['å¹´', 'ä¸ŃåĽ½'] +['类似', 'çļĦ'] +['çIJĨäºĭ', 'ä¼ļ'] +['空', 'éĸĵ'] +['çģµ', 'æĦŁ'] +['åĬĽ', 'æ°Ķ'] +['带', 'ä¸Ĭ'] +['ä¸į好', 'æĦıæĢĿ'] +['æľī', 'ä½ķ'] +['å·²', 'åľ¨'] +['åıĸ', 'åĩº'] +['è¿Ŀæ³ķ', 'çĬ¯ç½ª'] +['åŃ¦ä¹ł', '贯彻'] +['åľ°', '带'] +['楼', '梯'] +['çŃī', 'æĥħåĨµ'] +['ä»İ', 'åīį'] +['çļĦ', 'ä¹łæĥ¯'] +['ç³Ł', 'ç³ķ'] +['å°±', 'èĥ½å¤Ł'] +['è©', 'ķ'] +['ä¸Ģ', 'å¾ĭ'] +['æĮ«', 'æĬĺ'] +['åİŁæĸĩ', 'åľ°åĿĢ'] +['å½ĵ', 'å±Ģ'] +['ä¸į', 'éĢļ'] +['æķ°', 'åįĥ'] +['éĺŁä¼į', '建设'] +['æĹ¶', 'èĬĤ'] +['åģļ', 'èµ·'] +['çļĦ', 'è®°å¿Ĩ'] +['ç½ij绾', 'å®īåħ¨'] +['åĩ¡', 'æĺ¯'] +['æ°', '¯'] +['éĽķ', 'åĪ»'] +['åŁĥ', 'åıĬ'] +['æĪij', 'åı¯ä»¥'] +['çĽij', 'çIJĨ'] +['æĽ´', 'åħ·'] +['åŁİ', '管'] +['èĭ', '¯'] +['åı¥', 'åŃIJ'] +['èĭ¥', 'æľī'] +['ä»İæĿ¥', 'ä¸į'] +['缸åħ³', 'è´Łè´£'] +['å®īåħ¨', 'æĦŁ'] +['æĽ´', 'è¦ģ'] +['çļĦæĥħ', 'æĦŁ'] +['çī¢', 'çī¢'] +['è¾ĥ', '好çļĦ'] +['æ°', '®'] +['ç¬ij', 'è¯Ŀ'] +['车', 'å±ķ'] +['ä¹ĭ', 'ç¾İ'] +['ç®Ģ', '约'] +['ç±»åŀĭ', 'çļĦ'] +['èĢģ', 'åĮĸ'] +['çľĭ', 'ä½ł'] +['è¿ĩ', 'åĪĨ'] +['éŨ', 'åīį'] +['ä¸Ģ', 'éĹ´'] +['æĥ³', 'åİ»'] +['åª', 'Ľ'] +['åľŁ', 'è±Ĩ'] +['åıĪ', 'ç§°'] +['ä¸Ń', 'ä¿¡'] +['åŃĺ', 'éĩı'] +['马', 'äºij'] +['èĩ´', '使'] +['åħĪ', 'åīį'] +['èĢģ', 'åŃIJ'] +['æīĵ', 'æī®'] +['æ¯ķä¸ļ', 'äºİ'] +['æ¯ķä¸ļ', 'åIJİ'] +['ç¾İ好', 'çĶŁæ´»'] +['å·¥ä¸ļ', 'ä¼ģä¸ļ'] +['就好', 'äºĨ'] +['èħIJ', 'èļĢ'] +['çıį', 'çıł'] +['åΰ', 'è¿ĻéĩĮ'] +['æīĢéľĢ', 'çļĦ'] +['è¿Ļæĺ¯', 'åĽłä¸º'] +['çIJĨæĥ³', 'çļĦ'] +['å·®å¼Ĥ', 'åĮĸ'] +['é', '®'] +['é®', '®'] +['äºļ', '太'] +['æĹł', 'ç©·'] +['æıIJ', 'çݰ'] +['ä¸ĵä¸ļ', 'æĬĢæľ¯'] +['çĶ¢', 'æ¥Ń'] +['åѦ', 'åŃIJ'] +['ç§ij', 'å¹»'] +['åįłåľ°', 'éĿ¢ç§¯'] +['ä¸į', 'åĩĨ'] +['æľªæĪIJ', '年人'] +['æĶ¶', 'å½ķ'] +['è¿ĺ', '款'] +['éĴ¢', 'çŃĭ'] +['æ¼', '¢'] +['å¾Ĺ', 'æĦı'] +['综åIJĪ', 'ä½ĵ'] +['æŀģ', 'é«ĺ'] +['åįķ', 'è¯į'] +['é«ĺæķĪ', 'çļĦ'] +['骨', '头'] +['æī§', 'çĿĢ'] +['缼', 'ä¸ĸ'] +['模', 'çī¹'] +['æĽ´', 'èĥ½'] +['ç»Ŀ', 'æľĽ'] +['对åºĶ', 'çļĦ'] +['æ¨', 'Ĭ'] +['æĸ°', 'ä¸ī'] +['æĸ°ä¸ī', 'æĿ¿'] +['æģ°', 'æģ°'] +['åIJį', 'å®¶'] +['æł¸å¿ĥ', 'æĬĢæľ¯'] +['个', 'å°ı'] +['æĢİä¹Ī', 'ä¼ļ'] +['说', 'ä¸įå®ļ'] +['西', 'çĵľ'] +['åĵ', 'İ'] +['ç¢', 'Ł'] +['å¿ħ', 'ä¸įåı¯'] +['å¿ħä¸įåı¯', 'å°ij'] +['ä¹ĭ', 'éĸĵ'] +['åĪĨ', '管'] +['交éĢļ', 'äºĭæķħ'] +['å¼Ģ', 'åĬŀ'] +['å¾ģæ±Ĥ', 'æĦıè§ģ'] +['äº', '¨'] +['鼻åŃIJ', 'éĥµ'] +['鼻åŃIJéĥµ', 'ä»¶'] +['ä¿¡æģ¯', 'æľįåĬ¡'] +['ä½ł', 'è§īå¾Ĺ'] +['缴', 'è§Ĥ'] +['å·²', 'å®ĮæĪIJ'] +['åĪĨ', 'ä¼ļ'] +['åĽŀ', 'åįĩ'] +['éļ', '»'] +['好', '人'] +['äºĨè§£', 'ä¸Ģä¸ĭ'] +['åį«', 'æµ´'] +['æľĢ', 'çα'] +['åºŀ', '大'] +['客', 'æĪ¿'] +['çijŀ', 'åħ¸'] +['éĥ½', 'ä¸įæĺ¯'] +['é¤', '¨'] +['èĹ', 'ī'] +['çļĦ', 'åIJĦ项'] +['为', '缮æłĩ'] +['çļĦ', 'è®¤çŁ¥'] +['å½±åĵįåĬĽ', 'çļĦ'] +['夸', 'å¼ł'] +['佩', 'æĪ´'] +['æ±ĩ', 'çİĩ'] +['çļĦ', 'çαæĥħ'] +['æĺ¥', 'é£İ'] +['æĺ¯', 'æĪijçļĦ'] +['æ¨', '¹'] +['åįĬ', 'å°ıæĹ¶'] +['å±±', 'åİ¿'] +['å±±', '西çľģ'] +['èĢĮ', 'è¿Ļ'] +['æĽ´å¤ļ', 'ä¿¡æģ¯'] +['è¿ĺ', 'æľīä¸ĢäºĽ'] +['ç²¾', 'ç»ĨåĮĸ'] +['ç¾İ', 'åѦ'] +['çͱ', 'æĸ¼'] +['ä»ħä¾Ľ', 'åıĤèĢĥ'] +['å¾Ī', 'é«ĺçļĦ'] +['åıł', 'åĬł'] +['è¿Ļä¹Ī', '说'] +['å±ķ', 'åĩº'] +['åĽĽ', 'å¤Ħ'] +['ä¸ĩ', 'å®¶'] +['æĭĽ', 'åĭŁ'] +['çļĦ', '强大'] +['æĤ£', 'æľī'] +['å°ı', 'äºİ'] +['ä¹Łè®¸', 'æĺ¯'] +['对', 'èĩªå·±çļĦ'] +['èģĮä¸ļ', 'æķĻèĤ²'] +['æĿ¥', 'è¿Ľè¡Į'] +['æ¡£', '次'] +['æīĵ', 'èµ¢'] +['éĥ½æľī', 'çĿĢ'] +['åº', '¸'] +['è¯Ń', 'æ°Ķ'] +['çͲ', 'éĨĽ'] +['空', 'åĨĽ'] +['车', 'åĨħ'] +['åĽłä¸º', 'ä½ł'] +['å®ŀ', 'æķĪ'] +['æĥħ', 'ä¾£'] +['åıijè¾¾', 'åĽ½å®¶'] +['éķľ', 'åŃIJ'] +['æ¯į', 'å©´'] +['ä½Ĩæĺ¯', 'ä»ĸ'] +['积æŀģ', 'æİ¨è¿Ľ'] +['大å¹ħ', '度'] +['çļĦ', '女åĦ¿'] +['é¤IJ', 'æ¡Į'] +['åIJ¬', 'å¾Ĺ'] +['çļĦ', '积æŀģæĢ§'] +['好', 'åIJ§'] +['æĹ¥', 'æ¶Īæģ¯'] +['æľī', 'ä»»ä½ķ'] +['æ¯Ĵ', 'åĵģ'] +['æĹ©çĤ¹', 'åĬłçĽŁ'] +['第ä¸Ģ', '天'] +['å°½', 'åĬĽ'] +['æł', 'ĸ'] +['主', 'æīĵ'] +['æĺ¯ä¸Ģ', 'åIJį'] +['çĪĨ', 'æĸĻ'] +['äºĭä¸ļ', 'åıijå±ķ'] +['å¾®', 'åķĨ'] +['äºİä¸Ģä½ĵ', 'çļĦ'] +['çĶŁ', 'çĮª'] +['èĩªçĦ¶', 'èµĦæºIJ'] +['çŀĦ', 'åĩĨ'] +['è§Ħ模', 'åĮĸ'] +['å¹¶', 'ä¸İ'] +['èĤ¥', 'èĥĸ'] +['å®¶', 'ç͍'] +['大', 'çĪ·'] +['é¢Ħ', 'åijĬ'] +['æĿ¥', 'åģļ'] +['éĺ³', 'åİ¿'] +['æŀĦ', 'çŃij'] +['é¢ģ', 'å¥ĸ'] +['åİĨåı²', 'æĸĩåĮĸ'] +['æľįåĭĻ', 'æĪĸ'] +['æĢ»', 'åĨ³èµĽ'] +['åıij', 'åŀĭ'] +['æĪij', '羣çļĦ'] +['æĽ', '¦'] +['åıĤ', 'ä¼ļ'] +['èĦĨ', 'å¼±'] +['åĩĨ', 'åħ¥'] +['èħ¹', 'éĥ¨'] +['åı¸', '令'] +['æĤ²', 'åī§'] +['天', 'ä¸Ĭ'] +['åı£', 'ä¸Ń'] +['ä¸ĩ', '个'] +['åѦ', 'ä¸ļ'] +['æıIJ', 'åĢ¡'] +['两', 'è¾¹'] +['大', 'èĤ¡ä¸ľ'] +['åı¤', 'éķĩ'] +['è¡Ģ', 'ç³ĸ'] +['çļĦ', 'ç¨ĭ度'] +['æ£ī', 'èĬ±'] +['åIJİ', 'åı°'] +['å°±', 'åĮ»'] +['æķ´', 'æķ´'] +['èĴ', '²'] +['çĽĪåĪ©', 'èĥ½åĬĽ'] +['ç±', '½'] +['èĦ', '«'] +['çľĭ', 'éĩį'] +['å®¶', 'éķ·'] +['èģĺ', 'ç͍'] +['èµĽ', 'éģĵ'] +['åīį', 'èĢħ'] +['建', 'èѰ'] +['å¾ĭå¸Ī', 'äºĭåĬ¡'] +['èīºæľ¯', 'åĵģ'] +['æľī', 'èĩªå·±çļĦ'] +['åIJ¦', 'å®ļ'] +['社', 'åĽ¢'] +['åij¨', 'äºĶ'] +['带', 'åΰ'] +['å·¥ä½ľ', 'ä¼ļè®®'] +['èĤ¡', 'æľ¬'] +['å¤ĸ', 'åĮħ'] +['å®¶', 'åħ¬åı¸'] +['çĽij', 'çĭ±'] +['èĪ', 'Ĭ'] +['åIJį', 'æł¡'] +['西', 'æ¹ĸ'] +['è¶ħè¿ĩ', 'äºĨ'] +['åįĹ', 'å±±'] +['ç»Ħ', 'ä»¶'] +['å̼å¾Ĺ', '注æĦı'] +['æĮ£', 'æīİ'] +['äºĭ', '迹'] +['ç¶ĵ', 'çĩŁ'] +['ç§ij', '室'] +['好', 'åIJĹ'] +['æ¤ħ', 'åŃIJ'] +['åľĪ', 'åŃIJ'] +['ä½Ĩ', '她'] +['æµģ', 'çķħ'] +['åIJĦèĩª', 'çļĦ'] +['èģĮ', 'åijĺ'] +['è¡į', 'çĶŁ'] +['åħ¨', 'åľº'] +['æĴ¤', 'éĶĢ'] +['åį´', '被'] +['å®ģ', 'éĿĻ'] +['åīį', 'æīĢ'] +['åīįæīĢ', 'æľª'] +['åīįæīĢæľª', 'æľī'] +['主', 'ä¸ļ'] +['åĮĹ', 'ç¾İ'] +['è¯Ħ', 'å®ļ'] +['åĵģ', 'å°Ŀ'] +['大家', 'éĥ½åľ¨'] +['主', 'å¸ħ'] +['ç»Ĩ', 'å¿ĥ'] +['ä¿¡æģ¯', 'æĬ«éľ²'] +['çļĦ', 'ç«ŀäºī'] +['éĢĻæ¨£', 'çļĦ'] +['ç§ijåĪĽ', 'æĿ¿'] +['éĩĩ', 'æijĺ'] +['票', 'æį®'] +['éĢIJ', 'å¹´'] +['èĭ±', 'è¶ħ'] +['è¡Įä¸ļ', 'åĨħ'] +['人', '寿'] +['åIJİ', 'åĭ¤'] +['å¦Ĥ', 'æĦı'] +['ç¬Ķ', 'è¯ķ'] +['æ·¡æ·¡', 'çļĦ'] +['ä¸į', 'èĪĴæľį'] +['ä½ĵ', '积'] +['ä¹Łä¸į', 'è¦ģ'] +['éĿ¢', 'æĸĻ'] +['æł·', 'æľ¬'] +['ç¥', 'ģ'] +['æĮī', 'è§Ħå®ļ'] +['大æ¦Ĥ', 'æĺ¯'] +['æĥħåĨµ', 'è¿Ľè¡Į'] +['åIJĦ', 'åįķä½į'] +['çļĦ', 'ç¬ij容'] +['åĩºèī²', 'çļĦ'] +['代表', 'æĢ§'] +['çļĦ', 'ç¾İ好'] +['éĴ', '¦'] +['å¾®', 'çĶŁçī©'] +['è¶Ĭ', 'æĺ¯'] +['æĸ¹', 'åı¯'] +['å¹²', 'èĦĨ'] +['éģĬ', 'æĪ²'] +['çļĦ', 'åħ´è¶£'] +['éĹ®', 'è´£'] +['åĽłä¸º', 'æĪij们'] +['èĢĥ', 'éĩı'] +['çĶŁ', 'çĶŁ'] +['éĺ»', 'åĬĽ'] +['ä¸į', 'åħģ许'] +['æıIJ', 'è®®'] +['åĩı', 'æĮģ'] +['åıªæĺ¯', 'ä¸Ģ个'] +['æĪij', 'æĬĬ'] +['åıijçݰ', 'èĩªå·±'] +['å¢ŀ', 'å¹ħ'] +['å¦', 'į'] +['èĹĿ', 'è¡ĵ'] +['ä¸Ģå®¶', '人'] +['åĪĨ', '级'] +['çļĦ', 'æķ°éĩı'] +['è½®', 'èŀįèµĦ'] +['çŃī', 'åĽłç´ł'] +['大', '夫'] +['èģĺ', '请'] +['é£İ', 'æľº'] +['绽', 'æĶ¾'] +['ä»»ä½ķ', 'ä¸Ģ个'] +['éł', 'Ĥ'] +['éĺ¶', '级'] +['æĬĬ', '她'] +['è¿Ľ', 'åĨĽ'] +['èĥ½', 'åģļåΰ'] +['åŁ¹è®Ń', 'æľºæŀĦ'] +['çī©', 'æĸĻ'] +['ç«¥', 'è¯Ŀ'] +['æĮĩ导', 'æĦıè§ģ'] +['éĺ', '®'] +['æ·±åħ¥', 'æİ¨è¿Ľ'] +['主', 'æľº'] +['æ¸Ķ', 'ä¸ļ'] +['ä¸į', 'æľį'] +['æµĵ', 'éĥģ'] +['è¡Ĺ', 'ä¸Ĭ'] +['ä¾Ŀ', '次'] +['æĹ¶', '段'] +['æ¢', 'µ'] +['çļĦ', 'åĸľçα'] +['å¾Ī', 'éķ¿'] +['åĪĿ', '级'] +['æŀľ', 'æĸŃ'] +['æĬ¢', 'æķij'] +['é¼ĵ', 'èĪŀ'] +['ä¾Ľ', 'éľĢ'] +['æ·±åħ¥', 'å¼Ģå±ķ'] +['产ä¸ļ', 'éĽĨ群'] +['åĻª', 'éŁ³'] +['åIJ¬', 'çĿĢ'] +['æ·±åĪ»', 'çļĦ'] +['å¿į', 'åıĹ'] +['ç͵', 'ç£ģ'] +['强', 'èĢħ'] +['æ»ĭ', 'åij³'] +['æĽ¼', 'èģĶ'] +['åı¯ä»¥', '缴æİ¥'] +['大', 'ç±³'] +['æŃ·', 'åı²'] +['æĶ¿åĬ¡', 'æľįåĬ¡'] +['åħ¬', 'å¼ı'] +['社', '群'] +['éģĵ士', 'èģĮä¸ļ'] +['ä¹ĭ', 'æĥħ'] +['æµ·', 'æ°´'] +['æ¼Ķ', 'å¥ı'] +['åºĹ', 'éĩĮ'] +['迹', '象'] +['åıijå±ķ', 'çIJĨ念'] +['é«ĺ', '空'] +['åij¨', 'åĪĬ'] +['åĽŀ', 'åΰäºĨ'] +['ä¸į', 'éĢĤåIJĪ'] +['åłµ', 'å¡ŀ'] +['åĬ', 'Ī'] +['æ°´', 'ä¸Ĭ'] +['çĢij', 'å¸ĥ'] +['纳ç¨İ', '人'] +['çĩĥ', 'æ²¹'] +['å·¥ç¨ĭ', 'é¡¹çĽ®'] +['峡', 'è°·'] +['æľī', 'éĴĪ对æĢ§'] +['åľĨ', 'å½¢'] +['æľ¬', 'å¸Ĥ'] +['è¿Ļ', 'è¯Ŀ'] +['管çIJĨ', 'èĢħ'] +['ç¡®è¯Ĭ', 'çĹħä¾ĭ'] +['æĬĬ', 'æīĭ'] +['彩', 'èī²'] +['ä¸Ĭ', 'åīį'] +['夯', 'å®ŀ'] +['ç¾Ĭ', 'èĤī'] +['å¾Ģ', 'å¹´'] +['æĵħ', 'èĩª'] +['è¿·', '人'] +['èĪª', 'æ¯į'] +['ç²¾', 'ç»Ĩ'] +['åľ¨', 'æĪijçļĦ'] +['åĪĽ', 'æĬķ'] +['麦', 'åħĭ'] +['æľĪ', 'ç»ı'] +['åĮĹ', 'æµ·'] +['ä¹ĭ', 'æĺŁ'] +['åı¶', 'åŃIJ'] +['å¸Ĥåľº', 'ç«ŀäºī'] +['è¿Ļ', 'äºĭ'] +['åıĥ', 'èĪĩ'] +['产', 'åľ°'] +['åĶ', 'ī'] +['åķĨåĵģ', 'æĪ¿'] +['èĪª', 'è¿IJ'] +['ä¼ĺ', 'å¼Ĥ'] +['ä»ĸ们', 'æĺ¯'] +['鼨', 'æ°´'] +['è¯į', 'æ±ĩ'] +['åĨľ', 'çͰ'] +['欧', 'éĺ³'] +['çŁŃ', '线'] +['管', 'ç½ij'] +['æł¹', 'åŁº'] +['åıªæľī', 'ä¸Ģ个'] +['éŀĭ', 'åŃIJ'] +['å¸Ĥ', 'å§Ķ书记'] +['åĪ»', 'æĦı'] +['è¡Į', '车'] +['åıĪ', '被'] +['åı¯éĿł', 'æĢ§'] +['è´', '±'] +['ä»»', 'åij½'] +['åºĶ', 'åľ¨'] +['å°±', 'å¾Ĺ'] +['æľįåĬ¡', 'ä½ĵç³»'] +['æĶ¿', 'æĿĥ'] +['åıijè¨Ģ', '人'] +['è¿ĩ', 'å¾Ģ'] +['两', 'åıª'] +['èϽ', '说'] +['éĢģ', 'ä¸Ĭ'] +['ä»Ģä¹Ī', 'äºĭ'] +['æķ£', 'æĸĩ'] +['æİĮ', 'æİ§'] +['èĸĦ', 'å¼±'] +['ä¸ĭéĿ¢', 'å°±'] +['主è¦ģ', 'åĨħ容'] +['å¾Ī', 'éĩįè¦ģçļĦ'] +['å°±', '说'] +['çϽèī²', 'çļĦ'] +['éĤ£ä¸ª', 'æĹ¶åĢĻ'] +['ç»ı纪', '人'] +['çļĦ', 'æ¯į亲'] +['ç¬Ķè®°', 'æľ¬'] +['åºķ', 'å±Ĥ'] +['è¿ij', '代'] +['è§£', '说'] +['è²ł', '責'] +['æľĢ大', 'åĮĸ'] +['åķĨ', 'éĵº'] +['æł¡', 'åıĭ'] +['æ²', 'ģ'] +['ä¸į', 'åĩºæĿ¥'] +['éĻ·', 'éĺ±'] +['ç¨', 'ħ'] +['åħ¬å¸ĥ', 'äºĨ'] +['åĩĢ', 'å̼'] +['çĽ¸å¯¹', 'è¾ĥ'] +['ç¬', 'Ľ'] +['æł¸', 'ç®Ĺ'] +['åįİ', '侨'] +['æĢ¥', 'æķij'] +['æĮº', '好'] +['åħĴ', 'ç«¥'] +['äºĮ', 'èĥİ'] +['åĩº', 'èĩª'] +['åĿ', 'Ł'] +['æīĭ', 'ä¸ĭ'] +['å±', '¡'] +['åĪĽéĢł', 'æĢ§'] +['ä¸¥æł¼', 'æĮīçħ§'] +['åĨį', 'åİ»'] +['举', '缣'] +['人', 'æµģ'] +['äºĨä¸Ģ', '声'] +['å°ıæĹ¶', 'åīį'] +['è´µ', 'æĹı'] +['éľ', 'ĸ'] +['ä¹Łæĺ¯', 'éĿŀ常'] +['éĢ', '±'] +['çľĭäºĨ', 'çľĭ'] +['ç¹ģ', 'æ®ĸ'] +['èĩ³', 'æŃ¤'] +['é¢Ħ', 'å¤ĩ'] +['å¾Ī', 'æĺİæĺ¾'] +['æ¼Ķ', 'èīº'] +['åĿIJ', 'çĿĢ'] +['ä¿Ħ', 'åĨĽ'] +['åľ¨', 'è¿ĩåİ»'] +['ä¹ĭ', 'äºĭ'] +['æĬĵ', 'èİ·'] +['åĿIJ', 'ä¸ĭ'] +['çͱ', 'ä¸ŃåĽ½'] +['ä¹Ł', 'å¼Ģå§ĭ'] +['çŃĶ', 'å¤į'] +['åŀĥåľ¾', 'åĪĨç±»'] +['éĴĵ', 'é±¼'] +['åIJĦ', '種'] +['缸', 'éģĩ'] +['ä¸įåģľ', 'çļĦ'] +['æī¹', 'éĩı'] +['éĩįè¦ģ', 'ä½ľç͍'] +['å§Ķ', 'å±Ī'] +['åħŃ', 'å¹´'] +['ä¸ĥ', 'åįģ'] +['ä¹ĭ', 'æĪĺ'] +['é£İéĻ©', '管çIJĨ'] +['éŁ³', 'æ¨Ĥ'] +['è¡ĮæĶ¿', 'å¤Ħç½ļ'] +['æľ¬', 'äºĭ'] +['æĴ°', 'åĨĻ'] +['èģļ', 'åIJĪ'] +['éĢĤ', 'æĹ¶'] +['æIJ¬', 'å®¶'] +['ç¢İ', 'çīĩ'] +['缼', 'å®´'] +['ç®Ģ', 'æ´ģ'] +['åı¬', 'éĽĨ'] +['ç®Ģ', 'åĮĸ'] +['åĮĹ京', 'æĹ¶éĹ´'] +['第ä¸ī', 'å±Ĭ'] +['æĿ¥', 'åĽŀ'] +['常ç͍', 'çļĦ'] +['京', 'æ´¥'] +['京津', 'åĨĢ'] +['梦', 'å¹»'] +['è¯ķ', 'è¡Į'] +['æľº', 'åºĬ'] +['åΰ', 'æľĢåIJİ'] +['åĬ©', 'æīĭ'] +['åĪĨ', '彩'] +['åĩº', 'åĵģ'] +['åι', '车'] +['åIJ¯', 'åıij'] +['ä¾§', 'éĿ¢'] +['æ¯ı', 'å½ĵ'] +['缸åħ³', 'è§Ħå®ļ'] +['ä¸ĸ', '人'] +['è´Ń', '车'] +['å¿ĥ', '缮'] +['å¿ĥ缮', 'ä¸Ń'] +['äºĶ', 'éĩij'] +['è¿ĺ', 'è®°å¾Ĺ'] +['ä¾Ŀ', 'çĦ¶æĺ¯'] +['æıIJ', 'æ¡Ī'] +['ç͵åķĨ', 'å¹³åı°'] +['åģļ', 'åΰäºĨ'] +['æĿľ', 'ç»Ŀ'] +['å®ī', 'åįĵ'] +['ä¸ĸçķĮ', 'åIJĦåľ°'] +['åīį', 'éĢĶ'] +['æ´Ĺ', 'åĩĢ'] +['å¥ĭ', 'åĬĽ'] +['åŁİå¸Ĥ', '建设'] +['å¤ļ', 'åĬŁèĥ½'] +['ä¼ļ', 'éĢłæĪIJ'] +['åıijå¸ĥ', 'ä¼ļä¸Ĭ'] +['ç©¶', '竣æĺ¯'] +['åĪĨ', '红'] +['çŁ¥', 'èŃĺ'] +['éĿ¢', 'æĿ¿'] +['æĹł', '声'] +['æĢ¥', 'éľĢ'] +['失', 'çľł'] +['çΏ', 'å¦Ī'] +['äº', 'Ĥ'] +['åħ¨', 'æĻ¯'] +['ç»ıåħ¸', 'çļĦ'] +['åī§', 'ä¸Ń'] +['é¢Ĩ导', 'ä¸ĭ'] +['åħļ', 'åĨħ'] +['åħ¥', 'ä¾µ'] +['æĭī', 'æĸ¯'] +['ä¸Ģ', 'å¹ķ'] +['åĬł', 'ä¹ĭ'] +['èĤ', 'Ĩ'] +['èĭ±', 'æł¼'] +['èĭ±æł¼', 'åħ°'] +['å·§', 'åħĭ'] +['å·§åħĭ', 'åĬĽ'] +['ä¸Ģ', 'å¿ĥ'] +['èģ', 'Ĥ'] +['å¾Ģå¾Ģ', 'æĺ¯'] +['管çIJĨ', 'å±Ĥ'] +['çĻ»', 'åħ¥'] +['建ç«ĭ', 'èµ·'] +['建', 'åĽ½'] +['åŃIJ', '宫'] +['åºĶ', 'ä»ĺ'] +['æİ¢', 'ç©¶'] +['第ä¸Ģ', 'ä½į'] +['ä½Ļ', 'å®¶'] +['çŃī', 'æ´»åĬ¨'] +['æīĢ', 'èĩ´'] +['è¾ĥ', 'å¿«'] +['æĺ¯', 'éĿŀ'] +['æıIJ', 'åIJį'] +['äºĮ', 'èĢħ'] +['åıªåī©', 'ä¸ĭ'] +['åħ¶ä¸Ń', 'åĮħæĭ¬'] +['ç¼ĸ', 'ç¨ĭ'] +['çł´', 'ç¢İ'] +['ä¸Ń', '举'] +['å·¥ä½ľ', 'æĬ¥åijĬ'] +['çѾ', 'åIJį'] +['éħĴ', 'ä¸ļ'] +['çŁ¥', 'æĻĵ'] +['çĥŃ', 'å¿ĥ'] +['éĿŀ', 'åĩ¡'] +['èIJ¥ä¸ļ', 'æī§'] +['èIJ¥ä¸ļæī§', 'çħ§'] +['人大', '代表'] +['ä¸Ģ个', 'æĸ°çļĦ'] +['å¨ģ', 'æµ·'] +['éĤ£', '人'] +['涨', 'ä»·'] +['æ¶Ī', 'çģŃ'] +['éļ¾', 'å¿ĺ'] +['ç¶ĵ', 'é©Ĺ'] +['åı£', 'è¢ĭ'] +['ç³»', 'æķ°'] +['æĸĩ', 'ä¸Ń'] +['好', '转'] +['æĸ°', '鼶åĶ®'] +['讲述', 'äºĨ'] +['å¼Ģ', 'çĽĺ'] +['çķĻ', 'ç»Ļ'] +['æħ¢æħ¢', 'çļĦ'] +['æĤ²', '伤'] +['æľ¬', 'æľŁ'] +['äºĨ', 'å¤ļå°ij'] +['è¿Ļ', '让'] +['åIJĮ', 'çŃī'] +['æ¸ħ', 'æĺİ'] +['个', 'åŁİå¸Ĥ'] +['æºĸ', 'åĤĻ'] +['åĩłä¹İ', 'æĺ¯'] +['强', 'åĬĽ'] +['ä¿', '¯'] +['æ°´', '稻'] +['åĽºå®ļ', 'çļĦ'] +['æł¸', 'åĩĨ'] +['说', 'æľį'] +['顯', '示'] +['è¿Ļ', 'å¥Ĺ'] +['æĻºæħ§', 'åŁİå¸Ĥ'] +['å±ĭ', 'é¡¶'] +['ä¸į', 'æĿ¥'] +['çĶŁ', 'é²ľ'] +['çŁ¥', 'æĥħ'] +['æĬķ', '身'] +['åijĬè¯ī', 'æĪij们'] +['ä¸ī', 'åĽĽ'] +['ä¸ĩ', 'ä¸Ģ'] +['è¾Ĩ', '车'] +['为', 'ä¹ĭ'] +['åΰ', 'æĹ¶åĢĻ'] +['è¿Ļ', 'æīįæĺ¯'] +['åIJį', 'çīĮ'] +['åºŁ', 'æ°´'] +['åݻ年', 'åIJĮæľŁ'] +['å¹´', 'éĻIJ'] +['éģĭ', 'åĭķ'] +['åıĮ', 'çľ¼'] +['è¦ģ', 'ç´§'] +['对', 'çŃĸ'] +['åľº', 'é¦Ĩ'] +['çϾ', 'ç§ij'] +['è¶Ĭ', 'éĩİ'] +['å¯Į', 'åIJ«'] +['大å¤ļæķ°', '人'] +['æľĢ', 'å°ij'] +['åı¬', 'åͤ'] +['åħ¸', 'èĮĥ'] +['åĨľ', 'æľº'] +['æŃ£', 'æĸĩ'] +['åºĶç͍', 'äºİ'] +['æ·±', 'èĢķ'] +['ä¿', 'Ń'] +['ä»Ģä¹Ī', 'ä¸ľè¥¿'] +['å¥Ĺ', 'é¤IJ'] +['å½ĵ', 'éĢī'] +['å·¦', 'æīĭ'] +['è°ĥ', 'çIJĨ'] +['æĻļ', 'é¤IJ'] +['éļ¾', 'åħ³'] +['åĩŃ', 'è¯ģ'] +['çα', '人'] +['æĮĩ', 'è´£'] +['è´£', 'ç¼ĸ'] +['çļĦä¸Ģ', '款'] +['éĵ', '²'] +['åįģ', '个'] +['èĢ', '»'] +['æľįåĬ¡', 'åķĨ'] +['åľ°', 'çĭ±'] +['è¿ŀ', 'å¿Ļ'] +['åĽ°', 'æĥij'] +['çļ', 'ĵ'] +['ä¸į', 'åIJĥ'] +['çİ°åľ¨', 'å·²ç»ı'] +['çĽĺ', 'çĤ¹'] +['ä¸įåģľ', 'åľ°'] +['管çIJĨ', '模å¼ı'] +['è¿Ļ', '段æĹ¶éĹ´'] +['æ¤', '°'] +['礼', 'åĮħ'] +['æµģ', '转'] +['æī«', 'çłģ'] +['éĽĨä¸Ń', 'åľ¨'] +['æ±Ĥ', 'åĬ©'] +['åįĬ', '个'] +['å¿«éĢŁ', 'å¢ŀéķ¿'] +['å¾Ģ', 'ä¸ĭ'] +['è¯Ħ', 'åĪĨ'] +['å°±', 'æĥ³'] +['åķĨåĬ¡', 'éĥ¨'] +['æľī', 'éĹ®é¢ĺ'] +['èİ·', 'åĪ©'] +['æ¯Ľ', 'çĹħ'] +['æĦŁ', 'åºĶ'] +['èī¯', 'æĢ§'] +['åĪĨ', 'æŃ§'] +['åĨ', 'ī'] +['æĪij们', 'çİ°åľ¨'] +['è¦ģ', 'åĬłå¼º'] +['å·§', 'å¦Ļ'] +['èŀº', 'æĹĭ'] +['åĪĩ', 'æį¢'] +['çĭ', 'Ħ'] +['顺', 'çķħ'] +['å°¤åħ¶', 'æĺ¯åľ¨'] +['èĬĿ', '麻'] +['éļ¾', 'è¿ĩ'] +['æĹĹ', 'å¸ľ'] +['å¤į', 'åį°'] +['å¤įåį°', 'ä»¶'] +['å¿ħ', 'éľĢ'] +['对å¤ĸ', 'å¼ĢæĶ¾'] +['éļ¾', 'åıĹ'] +['åİŁæĿ¥', 'æĺ¯'] +['ç®Ĺ', 'äºĨ'] +['é«ĺ', 'å±±'] +['离', 'èģĮ'] +['çµĦ', 'ç¹'] +['çµĦç¹', 'Ķ'] +['å±ģ', 'èĤ¡'] +['çϾ', 'å®¶'] +['éģĩ', 'ä¸Ĭ'] +['æĺĶ', 'æĹ¥'] +['ä¸į', '容'] +['çĽij管', 'éĥ¨éŨ'] +['主', 'æĦı'] +['æµģ', 'åŁŁ'] +['è·Į', 'å¹ħ'] +['èĩ³', 'ä¸Ĭ'] +['åĪ«', '说'] +['æĺ¯', 'æ¯Ķè¾ĥ'] +['å®ıè§Ĥ', 'ç»ıæµİ'] +['å¸Ĥåľº', '主ä½ĵ'] +['污æŁĵ', 'çī©'] +['æķij', 'æ²»'] +['丰', 'æĶ¶'] +['åŃĺ', 'æĶ¾'] +['åĩ', 'Ħ'] +['éĩij', 'å±±'] +['æį¢', 'äºĨ'] +['ä¸ĵ', '人'] +['éĹľ', 'æĸ¼'] +['æĹ¢', 'è¦ģ'] +['åĽ½', 'è¶³'] +['éļ', 'ĭ'] +['åıį', 'åĩ»'] +['èµ·', '身'] +['åħĪ', 'æĺ¯'] +['å¸ĮæľĽ', 'èĥ½å¤Ł'] +['åζ', '订'] +['åºĹ', 'éĿ¢'] +['åĸ', 'Ģ'] +['æķĻ', 'ä½ł'] +['éĻį', '温'] +['åĬĽ', 'æ±Ĥ'] +['ä¸ī', 'çϾ'] +['çī©', 'ä»·'] +['丢', '失'] +['å¢Ļ', 'ä¸Ĭ'] +['éĥ¨', '份'] +['æł·', 'æĿ¿'] +['ä¹ĭ', 'æĦı'] +['ç½ij', 'å°ıç¼ĸ'] +['ä¸ĸ', 'ä¸Ĭ'] +['è°ĥ', 'è¯ķ'] +['污æŁĵ', 'éĺ²æ²»'] +['å½±', 'éĻ¢'] +['å®Įåħ¨', 'åı¯ä»¥'] +['éĢļ', 'åħ³'] +['ä¹īåĬ¡', 'æķĻèĤ²'] +['没æľī', 'åĬŀæ³ķ'] +['èĢ', '¿'] +['å¦', '³'] +['æĹł', 'æĥħ'] +['å¾Ĺ', 'çĽĬ'] +['å¾ĹçĽĬ', 'äºİ'] +['æľŁ', 'çĽ¼'] +['娱ä¹IJ', 'åľº'] +['çͲ', 'æĸ¹'] +['ä¸Ģ', 'æ±½'] +['çĹ', '°'] +['çĸij', 'ä¼¼'] +['æĸ°æµª', 'å¾®åįļ'] +['强', 'è¡Į'] +['å½ĵ', 'ä»ĸ'] +['èĥ', 'º'] +['ç͍æĪ·', 'æıIJä¾Ľ'] +['åĮº', 'å§Ķ'] +['æĦ¿', 'æĻ¯'] +['æĬĺ', 'æī£'] +['失', '踪'] +['è¿«', 'åĪĩ'] +['åŃĹ', 'æ¯į'] +['åĴ', '¯'] +['èªį', 'èŃĺ'] +['ä»Ģä¹Ī', 'æĦıæĢĿ'] +['çĽĴ', 'åŃIJ'] +['å½ķ', 'éŁ³'] +['建设', 'å·¥ç¨ĭ'] +['ä¸ļ', 'ä½Ļ'] +['å®ŀè·µ', 'æ´»åĬ¨'] +['羣', '空'] +['çĤ', 'ĸ'] +['åľ¨', 'è·¯ä¸Ĭ'] +['主è¦ģ', 'åĮħæĭ¬'] +['该', 'æĢİä¹Ī'] +['æĢ»', 'æľī'] +['æĢ§', 'æĦŁ'] +['æ°ij', 'èĪª'] +['å¼Ģ', 'åºĹ'] +['欺', 'éªĹ'] +['çªģ', 'åĩ»'] +['缺', '失'] +['æī§', 'ä¸ļ'] +['åľ°', 'éģĵ'] +['å¹¶', 'æĹł'] +['æ°ij', 'åĬŀ'] +['ç»Ħç»ĩ', 'çĶŁæ´»'] +['æĪij', 'å¦Ī'] +['è¨ĺ', 'èĢħ'] +['管', 'åζ'] +['æī¾', '个'] +['èĹ', '»'] +['çĤİ', 'çĹĩ'] +['äºĴ', 'åĬ©'] +['æµıè§Ī', 'åύ'] +['çݩ家', 'æĿ¥è¯´'] +['éĻįä½İ', 'äºĨ'] +['è£', 'Ķ'] +['æĮ£', 'éĴ±'] +['åķĨ', 'æľº'] +['æĶ¹', 'è£ħ'] +['æµģ', '浪'] +['æĶ¿', 'æ³ķ'] +['èĢģ', '头'] +['çĶŁäº§', 'åĴĮ'] +['ç©', 'Ĺ'] +['亲', 'çα'] +['亲çα', 'çļĦ'] +['å±¥', 'èģĮ'] +['åŁİ', 'éĩĮ'] +['ç»Ĩ', 'åĪĨ'] +['åĬ³åĬ¨', 'åIJĪåIJĮ'] +['åľ¨', 'æĹ¥æľ¬'] +['å¨ģ', 'å°Ķ'] +['åį«', 'è§Ĩ'] +['éĢ£', 'çµIJ'] +['çĿĢ', 'éĩį'] +['æĬĺ', '磨'] +['åĽ¾', '为'] +['çľ', '·'] +['å·¥', 'åºı'] +['æĵ', 'ģ'] +['æĵģ', 'æľī'] +['ç½ijç«Ļ', 'åľ°åĽ¾'] +['çļĦä¸Ģ', '大'] +['ç»Ħç»ĩ', 'å®ŀæĸ½'] +['æĬĽ', 'å¼ĥ'] +['åĴĮ', 'æĶ¯æĮģ'] +['æ³ķ', 'åĪĻ'] +['浪', 'æ½®'] +['çݰ', 'æľīçļĦ'] +['åĩł', 'çİĩ'] +['为', '客æĪ·'] +['åįģ', 'ä¸ĩ'] +['è', '¹Ħ'] +['çªģåĩº', 'éĹ®é¢ĺ'] +['åıĥ', 'åĬł'] +['éĥ½ä¼ļ', 'æľī'] +['çĽ', '¤'] +['è°ģ', 'éĥ½'] +['æīĭ', 'åĬ¨'] +['缴', 'è¾¾'] +['çĤ¹', 'å¤ļ'] +['éĺ¶', 'å±Ĥ'] +['ä¸į', 'ä½³'] +['éĤ£', '段'] +['滨', 'æµ·'] +['æĺ¯', 'åĽ½åĨħ'] +['æĪij', 'å¸ĮæľĽ'] +['åIJĽ', 'åŃIJ'] +['è§Ĥ', 'éŁ³'] +['åģļ', 'é¥Ń'] +['æ±½', 'è»Ĭ'] +['åħ³', 'ç¨İ'] +['çľ¼åīį', 'çļĦ'] +['æ°´', 'éĿ¢'] +['è̳', 'æľº'] +['追', '踪'] +['æİ¨', 'éĢģ'] +['éĴ±', 'åĮħ'] +['æģ¶', 'å¿ĥ'] +['æµ·', 'åŁŁ'] +['å·', 'į'] +['å¼Ģ', 'æĿ¥'] +['表', 'æĢģ'] +['仪', '表'] +['å¹³', 'åİŁ'] +['åįģ', 'å¤ļå¹´'] +['ä¹Ł', 'æĹłæ³ķ'] +['åħ¼', '顾'] +['è¡£', 'æŁľ'] +['æł½', 'åŁ¹'] +['æĪ¿', 'æºIJ'] +['设ç«ĭ', 'äºĨ'] +['ä¸ĩ', 'åIJį'] +['æķ°', 'é¢Ŀ'] +['è¦ģ', 'åĿļæĮģ'] +['åIJīæŀĹ', 'çľģ'] +['请', 'èģĶç³»'] +['ç»ıåİĨ', 'è¿ĩ'] +['çļĦ', 'æľ¬è´¨'] +['åħ¥', 'éŨ'] +['æľ¬', 'æ¡Ī'] +['çİĩ', 'è¾¾åΰ'] +['åı°', 'éĺ¶'] +['éĴ', 'ŀ'] +['æĪij', 'èĥ½'] +['èݲ', 'èĬ±'] +['éĴ', 'ł'] +['ä¸Ģ', 'äºĭ'] +['åİŁ', 'æľīçļĦ'] +['æ¯ı', 'åĢĭ'] +['æ¯Ķäºļ', '迪'] +['æ£ĭçīĮ', '游æĪı'] +['ä¸įä¼ļ', 'æľī'] +['å½Ĵ', 'æĿ¥'] +['äºĶ', 'çϾ'] +['è¿ĩ', 'é«ĺ'] +['鼷', 'è¾¾'] +['ä¸Ģèµ·', 'åİ»'] +['æķĻ', '导'] +['å°±', 'è¯Ĭ'] +['å°±', 'å¾Ī'] +['ä¸įåIJĮ', 'äºİ'] +['ä¿', 'º'] +['å¸ĸ', 'åŃIJ'] +['æĶ¿åįı', 'å§Ķåijĺ'] +['çĸ«æĥħ', 'å½±åĵį'] +['åĪĨ', 'è£Ĥ'] +['为ä»Ģä¹Ī', 'ä¼ļ'] +['äºĶ', 'æĺŁ'] +['å°ij', 'åĦ¿'] +['æĬ¢', 'éĻ©'] +['梦', 'è§ģ'] +['è®°èĢħ', 'éĩĩ访'] +['å±±', 'è·¯'] +['æĪij', '个人'] +['æ²Ļ', '滩'] +['è¹', 'Ń'] +['æĶ¹', 'è®Ĭ'] +['æĸ°åŀĭ', 'åĨł'] +['æĸ°åŀĭåĨł', 'çĬ¶'] +['åĮ»', 'æĬ¤'] +['åĮ»æĬ¤', '人åijĺ'] +['æµ·', 'å°Ķ'] +['åħ³äºİ', 'æĪij们'] +['éϤ', 'å¤ĸ'] +['åº', 'ļ'] +['宣', 'åijĬ'] +['ä¸ī', 'åįĥ'] +['æ¦', '¨'] +['ç§ijæĬĢ', '大åѦ'] +['ä¸ĥ', 'åħ«'] +['顺', 'åºĶ'] +['çΏçΏ', 'å¦Īå¦Ī'] +['éĢī', 'åıĸ'] +['åī§', 'çĥĪ'] +['乡æĿij', 'æĹħ游'] +['积æŀģ', 'æİ¢ç´¢'] +['表çݰ', '为'] +['å¾Ī', 'æ¸ħæ¥ļ'] +['大', 'åĨĽ'] +['æĿ¥', 'ç͵'] +['å¥Ĺ', 'æĪ¿'] +['çݰ', 'è¡Į'] +['享', 'åıĹåΰ'] +['çľĭ', 'çĤ¹'] +['åĽºå®ļ', 'èµĦ产'] +['以', '人为'] +['以人为', 'æľ¬'] +['ä¸į', 'å®Į'] +['éĻį', '鼨'] +['åģļçļĦ', 'äºĭæĥħ'] +['å¹¶', 'äºİ'] +['顽', '强'] +['èĢ', '¸'] +['åĺ´', 'å·´'] +['缸åħ³', 'ä¿¡æģ¯'] +['æĪij', '没'] +['æĪĺçķ¥', 'æĢ§'] +['æĢĿ', '念'] +['åĪĺ', 'å¤ĩ'] +['åĬ©', 'æĶ»'] +['é£İ', 'è²Į'] +['éĿ¢å¯¹', 'éĿ¢'] +['积æŀģ', 'å¼Ģå±ķ'] +['çĸĹ', 'æķĪ'] +['çľĭ', '书'] +['缺', 'åı£'] +['åĽ½æ°ij', 'ç»ıæµİ'] +['使ç͍', 'æĿĥ'] +['éģ¥', 'è¿ľ'] +['å¡«', 'è¡¥'] +['第ä¸ī', '人'] +['åįĬ', 'å¤ľ'] +['æŃ¦æ±ī', 'å¸Ĥ'] +['æĪij', 'åıijçݰ'] +['ä¼ĺæĥł', 'æĶ¿çŃĸ'] +['é£İ', 'åı£'] +['å°±', 'ä¸įèĥ½'] +['为', '主è¦ģ'] +['æµģ', 'åĩº'] +['å´ĩ', 'æĭľ'] +['å¹¶', 'ä¸įèĥ½'] +['é«ĺ', 'ä¸ī'] +['ä¸ĸçķĮä¸Ĭ', 'æľĢ'] +['æĥ³', 'å¿ħ'] +['åħ¶', 'æīĢ'] +['åĢĻ', 'éĢī'] +['åĢĻéĢī', '人'] +['ä¸į', 'çα'] +['åī¯', 'ä½ľç͍'] +['人æ°ij', 'æĹ¥æĬ¥'] +['æĪij', 'ä¸įæĺ¯'] +['å®ŀ', 'çī©'] +['ç͵', 'åİĤ'] +['ä¹Ł', 'ç®Ĺæĺ¯'] +['æľī', 'éĹľ'] +['æľī', 'èĥ½åĬĽ'] +['æĮĤ', 'åľ¨'] +['çľ¼', 'ä¸ĭ'] +['约', 'ç¿°'] +['å°ı', 'åѦçĶŁ'] +['èµ·', 'åΰäºĨ'] +['å·¥', '夫'] +['åIJĮ', 'å¿ĥ'] +['åĿ¦', 'è¨Ģ'] +['çł', 'Į'] +['åıijæĮ¥', 'äºĨ'] +['èģĮä¸ļ', 'éģĵå¾·'] +['è¿ĻäºĽ', 'å¹´'] +['念', '头'] +['èĢģ', 'é¼ł'] +['åħ¨', 'èµĦ'] +['åħ¨èµĦ', 'åŃIJ'] +['ä¸Ģ', 'åij³'] +['å¤ļ', 'ä¸ĩåħĥ'] +['æł¼', 'æľĥ'] +['éķ¿', 'éĢĶ'] +['带', 'èµ°'] +['èĭ±', '寸'] +['æĸĩ', 'ä½ĵ'] +['对', 'ä»ĸ们'] +['åĵŃ', 'äºĨ'] +['å¡«', 'æĬ¥'] +['çīĪæĿĥ', '声æĺİ'] +['ç͵', '线'] +['è´Ńçī©', 'ä¸Ńå¿ĥ'] +['饱', '满'] +['ä½İ', '头'] +['强', 'è¿«'] +['ä¿Ŀ', 'æ´ģ'] +['欧', 'åĨł'] +['缸', 'è¿ŀ'] +['认', 'è´Ń'] +['çģ«', 'æĺŁ'] +['é«ĺ', 'å°Ķ'] +['é«ĺå°Ķ', '夫'] +['èij«', 'èĬ¦'] +['æłĩ', '注'] +['çļĦ', 'çIJĨæĥ³'] +['æł¸', 'éħ¸'] +['æł¸éħ¸', 'æ£Ģæµĭ'] +['åĬ', 'ī'] +['ä¸Ģèά', 'æĺ¯'] +['æĢĿ', 'ç´¢'] +['轨', '迹'] +['çĥŃ', '带'] +['éĻ', '£'] +['åĩĨç¡®', 'æĢ§'] +['æĪ´', 'çĿĢ'] +['åľ¨', 'çĶŁæ´»ä¸Ń'] +['æīĢ', 'èĥ½'] +['æľ¯', 'åIJİ'] +['带', 'ä½ł'] +['ç¥', 'ł'] +['æ®ĭ', 'éħ·'] +['ä¹Ł', 'åıªæĺ¯'] +['çͳ', 'è´Ń'] +['举åĬŀ', 'äºĨ'] +['æľī', 'æĦıä¹ī'] +['æĹº', '缼'] +['åľ¨', 'ç¶²'] +['åľ¨ç¶²', 'è·¯ä¸Ĭ'] +['å¾Ī大', 'ç¨ĭ度'] +['管', 'è¾ĸ'] +['çĸ«æĥħ', 'æľŁéĹ´'] +['触', 'æij¸'] +['éĺ¶æ®µ', 'æĢ§'] +['ä¼ļ', 'è§īå¾Ĺ'] +['çļĦ', 'çĶ»éĿ¢'] +['æİ¥åıĹ', 'äºĨ'] +['表达', 'äºĨ'] +['éĤĵ', 'å°ı'] +['éĤĵå°ı', 'å¹³'] +['åħļ', 'é£İ'] +['åħļé£İ', 'å»īæĶ¿'] +['åķĨ', 'åѦéĻ¢'] +['åħij', 'æį¢'] +['é£Łåĵģ', 'èį¯åĵģ'] +['éĿŀ常', '好çļĦ'] +['çľ', '¯'] +['纳', 'ç±³'] +['åĬ¨', 'æijĩ'] +['åĽŀ', 'éģ¿'] +['çľĭ', 'èijĹ'] +['款', '项'] +['åħ«', 'å¹´'] +['åģļ', '个'] +['æĸĩ', 'æ¡£'] +['éĩijèŀį', 'ç§ijæĬĢ'] +['åħ¶ä¸Ń', 'æľī'] +['äºĨä¸Ģ', 'ç³»åĪĹ'] +['æĹĹèΰ', 'åºĹ'] +['ç§°', 'èµŀ'] +['éĽ¢', 'éĸĭ'] +['åζ', 'åĨ·'] +['å®¶', 'éŨåı£'] +['åįģ', 'å¤ļ'] +['ä¼´', 'ä¾£'] +['çľĭ', 'çĹħ'] +['æĭī', 'çĿĢ'] +['æī', 'Ĵ'] +['çĸ²', 'æĥ«'] +['å°ijæķ°', 'æ°ijæĹı'] +['åĽ¾', 'å½¢'] +['è½', '§'] +['å¢ŀ', 'éĩı'] +['饲', 'åħ»'] +['çģ«', 'å±±'] +['æ¯ı', '个æľĪ'] +['ä½ľä¸º', 'ä¸ĢåIJį'] +['è½´', 'æī¿'] +['æĸĩ', '书'] +['ç¼', 'ķ'] +['åħ·ä½ĵ', 'æĥħåĨµ'] +['çĹĽ', 'çĤ¹'] +['缴', 'éĶĢ'] +['å¡', 'Ĭ'] +['ä¹Ł', 'æľĥ'] +['çĥŃ', 'æ½®'] +['å¹³', 'æ°ij'] +['æ¼Ķåͱ', 'ä¼ļ'] +['æķĻ', 'çłĶ'] +['éĢĥ', 'éģ¿'] +['ä¸Ģ', 'è´¯'] +['å°±', 'è¶Ĭ'] +['å®ŀ', 'å®ŀåľ¨'] +['å®ŀå®ŀåľ¨', 'åľ¨'] +['ä¹łè¿ijå¹³', 'æĢ»'] +['æº', 'º'] +['å¿ĥ', 'åºķ'] +['éķ¿', 'å¾ģ'] +['媽', '媽'] +['第ä¸ī', '次'] +['åĩº', 'æ¼Ķ'] +['çĭĢ', 'æ³ģ'] +['å°Ķ', 'æĸ¯'] +['代çIJĨ', 'åķĨ'] +['çĨ', 'ı'] +['çļĦ', '对象'] +['ç͵', 'éĩı'] +['è¡Į', 'åĪĹ'] +['åĽ½', '人'] +['è·ij', 'äºĨ'] +['åįĶ', 'åĬ©'] +['èIJ¥', 'è¿IJ'] +['å¸Ī', 'åħĦ'] +['æ¦', '®'] +['æĥ³', 'åĥı'] +['æĢ§', '强'] +['ç§ijåѦ', 'çłĶç©¶'] +['å»¶', 'å®ī'] +['ä¸¥æł¼', 'èIJ½å®ŀ'] +['é¢Ĩ', 'ä¼ļ'] +['缸', 'å·®'] +['è·¯', '人'] +['çĶ', '«'] +['æľī', 'ä»·å̼'] +['æľīä»·å̼', 'çļĦ'] +['ç¾İ', 'åĽ¢'] +['æ°ij主', 'çĶŁæ´»'] +['æĪij', 'æīį'] +['ç¾İåĽ½', '人'] +['æ°Ķ', 'åij³'] +['åıį', 'å°Ħ'] +['çļĦ', 'åĨ³å¿ĥ'] +['大', 'è±Ĩ'] +['交', '代'] +['è¿Ľ', 'åĩº'] +['åıį', 'æĬĹ'] +['æĮĩ', 'çļĦæĺ¯'] +['ä»·', 'ä½į'] +['è¿Ľ', 'é©»'] +['ä¸Ĭ', 'çϾ'] +['ä½į', 'åĪĹ'] +['ä¸ŃåĽ½', 'ä¼ģä¸ļ'] +['çļĦ好', 'å¤Ħ'] +['主', 'ç¼ĸ'] +['æ±½', 'æ²¹'] +['ä½Ĩ', 'æĪij们'] +['æĢİä¹Ī', 'çľĭ'] +['é»Ħ', 'å±±'] +['å¤ļ', 'åªĴä½ĵ'] +['åIJİ', 'åį«'] +['èİ·å¾Ĺ', 'æĽ´å¤ļ'] +['åĬ¡', 'å¿ħ'] +['为', 'å¥ijæľº'] +['é¦ĸ', '饰'] +['ä¸ĩ', 'åįļ'] +['è¶ĬæĿ¥è¶Ĭ', '大'] +['ä¸ĵ项', 'è¡ĮåĬ¨'] +['å¥ĭ', 'è¿Ľ'] +['ä»į', 'çĦ¶æĺ¯'] +['è´¨', 'æĦŁ'] +['å¦Ĥæŀľ', 'ä¸įæĺ¯'] +['ç«Ļ', 'èµ·æĿ¥'] +['ä¹¾', 'éļĨ'] +['åı¯æĢķ', 'çļĦ'] +['å¯Į', 'è´µ'] +['æ¸ħ', 'ç®Ĺ'] +['åIJij', 'ä¸ĭ'] +['åĢ', 'ļ'] +['çļĦ', 'çŃĶæ¡Ī'] +['èι', 'ä¸Ĭ'] +['çļĦ羣å®ŀ', 'æĢ§'] +['çŃī', 'åĬŁèĥ½'] +['åĸľ', 'åī§'] +['å¨ģ', 'åĬĽ'] +['æĸ°', 'é¢ĸ'] +['æł¸', 'ç͵'] +['æĬ¥', 'éĶĢ'] +['æķħ', '乡'] +['ä¼´', 'éļı'] +['éŀ', 'Ń'] +['å¦Ĭ', 'å¨ł'] +['åĪĨ', 'åĮĸ'] +['æľī', 'å¾Ī大'] +['æĢİä¹Ī', '说'] +['æĻĤ', '代'] +['产', 'åĩº'] +['ä»ĭç»į', '说'] +['å¤ĦçIJĨ', 'åύ'] +['èĨ¨', 'èĥĢ'] +['åī¯', 'å¸Ĥéķ¿'] +['çļĦ', '妻åŃIJ'] +['æł·', 'åĵģ'] +['åIJĮæ¯Ķ', 'ä¸ĭéĻį'] +['åħĥ', 'å·¦åı³'] +['ç͍', 'èĩªå·±çļĦ'] +['é«ĺ', 'éĽĦ'] +['æĺ¥', 'æĻļ'] +['ä¹Ł', 'æľīå¾Īå¤ļ'] +['çľ¼', 'çIJĥ'] +['æķ£', 'æŃ¥'] +['ä»ĸ们', 'éĥ½'] +['第ä¸Ģ', 'å®¶'] +['åĬŀ', '好'] +['å®ī', 'éĺ²'] +['ä¸Ģ', 'ä¸ĩ'] +['åľ¨', 'éĩĮéĿ¢'] +['éŁ³', 'é¢ij'] +['åı£', 'åı·'] +['ä¸Ģ', 'è¶Ł'] +['ç¦ı', 'çī¹'] +['é³', 'ŀ'] +['æĥĬ', 'èī³'] +['æĸ°', 'å¨ĺ'] +['绿èī²', 'åıijå±ķ'] +['ä¸Ń', 'å¼ı'] +['ä¹Ł', 'åıªæľī'] +['çݰ', '身'] +['åı¯', 'ä¾Ľ'] +['æ¯ı', 'ä¸Ģ个人'] +['第ä¸ī', 'èĢħ'] +['åľ°', 'å½¢'] +['éĴ¢', 'ç»ĵæŀĦ'] +['çĽijçĿ£', 'æ£ĢæŁ¥'] +['åı«', 'æĪij'] +['èĩ´', 'æķ¬'] +['æ´Ĺ', 'æīĭ'] +['ä¸ĭ', 'è°ĥ'] +['康', 'çĨĻ'] +['æĪIJ交', 'éĩı'] +['ä¹Ł', 'æĪIJ为'] +['åħī', 'æ»ij'] +['å®Įæķ´', 'æĢ§'] +['çģ', '¼'] +['ç¶²', 'éłģ'] +['éķ¿', '寿'] +['éģ©', 'ç͍'] +['çļĦä¸Ģ', '项'] +['çŀ©', '缮'] +['æĬĬ', 'èĩªå·±çļĦ'] +['éĵ¶è¡Į', 'åį¡'] +['å°±', 'å¿ħé¡»'] +['ç¾İ', 'çϽ'] +['éŀį', 'å±±'] +['æľ¬', 'é¢Ĩ'] +['ä¸Ģ', 'ç¢Ĺ'] +['æīĵ', 'æ³ķ'] +['æĤ¨', '好'] +['对', 'åŃ©åŃIJ'] +['æĬ¥éģĵ', 'ç§°'] +['ä¼ł', 'åĩº'] +['大', 'èĩ£'] +['ç¬', 'ĭ'] +['çĽ', 'ı'] +['é¾', 'ļ'] +['缴', '线'] +['æĻº', 'åºĵ'] +['ç§Ł', '车'] +['é£İ', 'åij³'] +['çľĭ', 'ä¸Ģä¸ĭ'] +['æİ¨', 'éĶĢ'] +['éĥ¨', 'éĥ¨éķ¿'] +['è´¨éĩı', 'åĴĮ'] +['åĪĬ', 'çĻ»'] +['å·¥ä¸ļ', 'åĮĸ'] +['çİĩ', '为'] +['鼶', 'ä»¶'] +['硬', 'åĮĸ'] +['ä¸Ĭ', 'åįĥ'] +['ç»ıéªĮ', 'å̼'] +['å¹³', 'è¡Į'] +['声', 'éģĵ'] +['æľįåĬ¡', 'è´¨éĩı'] +['çĶŁ', 'çĶ¢'] +['æľĢ', '容æĺĵ'] +['ä¸Ģ', 'æŀļ'] +['å¹´', 'æĬ¥'] +['åħ¬', 'ç½ij'] +['åħ¬ç½ij', 'å®ī'] +['åħ¬ç½ijå®ī', 'å¤ĩ'] +['çļĦ', 'èĥ½éĩı'] +['å®ŀéĻħ', 'è¡ĮåĬ¨'] +['è¦ģ', 'ä¸įè¦ģ'] +['æĹ¥æľ¬', '人'] +['è̶', '稣'] +['ç¼ĸ', 'åī§'] +['æ¶', '©'] +['åį°', 'å°¼'] +['ä¸Ĭä¸ĭ', '游'] +['åĩł', 'åı¥'] +['ä¸Ń', 'éĵģ'] +['ç°¡', 'åĸ®'] +['èĩª', '带'] +['çĶŁ', 'äºİ'] +['ä¸Ģ', 'åı£æ°Ķ'] +['åĭ¤', 'å¥ĭ'] +['éĻį', 'ä»·'] +['å±ķçݰ', 'äºĨ'] +['å¸ĥ', 'æĭī'] +['ä¼ļ', 'éĢīæĭ©'] +['çļĦ', 'ç»ıåħ¸'] +['好', 'æľĭåıĭ'] +['车', 'éģĵ'] +['æķ´', 'åĢĭ'] +['åľ', 'ĵ'] +['éķ¿æľŁ', '以æĿ¥'] +['æĬķ', 'å½±'] +['çļĩ', 'åĨł'] +['è¿ĩ', '大'] +['åijĬè¯ī', 'ä»ĸ'] +['ä¼ģä¸ļ', 'æıIJä¾Ľ'] +['æĬ½', '象'] +['éĢĤ', '度'] +['çļĦ', '女åŃ©'] +['èµ·', 'ä¼ı'] +['çļĦ', 'åĬŁæķĪ'] +['ä¸ĵ项', 'æķ´æ²»'] +['åı¯', 'éĢļè¿ĩ'] +['ä¸įåIJĮ', 'ç¨ĭ度'] +['å¼Ĥ', 'è®®'] +['åĩĢ', 'èµĦ产'] +['åij', 'Ĺ'] +['ä»Ģä¹Ī', 'åij¢'] +['å·¡', 'éĢ»'] +['è¸ı', 'ä¸Ĭ'] +['ä½Ĩ', 'å®ĥ'] +['ç²¾', '度'] +['管', 'å±Ģ'] +['第ä¸Ģ', 'åIJį'] +['åĨħ', 'åŃĺ'] +['æijĨ', 'åľ¨'] +['åī©', 'ä¸ĭ'] +['主ä½ĵ', '责任'] +['çĤ¹', 'åįĬ'] +['以', 'èĩ³äºİ'] +['åħ»èĢģ', 'ä¿ĿéĻ©'] +['æĦŁåıĹ', 'åΰäºĨ'] +['çŁ¥åIJį', 'çļĦ'] +['å¯Į', '豪'] +['妥', 'åĸĦ'] +['åŃĻ', 'åŃIJ'] +['éĵ', 'Ĥ'] +['说', 'èĩªå·±'] +['让', 'æĤ¨'] +['æķ°', 'æİ§'] +['çļĦçľ¼', 'åħī'] +['注', 'éĶĢ'] +['çļĦ', 'çģµéŃĤ'] +['è¿ĺ', 'ä¸įéĶĻ'] +['éĹ®', 'ä»ĸ'] +['èĩªä¸»', 'çłĶåıij'] +['èĵ', 'ĭ'] +['ç´«', 'èī²'] +['åĽ½å®¶', 'å®īåħ¨'] +['è¾½å®ģ', 'çľģ'] +['ä¹Ł', 'æ¯Ķè¾ĥ'] +['ç¾İ', 'èĤ¡'] +['ä¸įç¡®å®ļ', 'æĢ§'] +['å¿ĥ', '头'] +['æĪ', '³'] +['级', 'åĪ«çļĦ'] +['论', 'è¿°'] +['çļĦ', 'åĽŀçŃĶ'] +['ä¿Ŀè¯ģ', 'éĩij'] +['çŃī', 'è¡Įä¸ļ'] +['幸ç¦ı', 'æĦŁ'] +['æŃ§', 'è§Ĩ'] +['æľº', '票'] +['æ´¾', '人'] +['èĩ´', 'åij½'] +['åĺ´', 'è§Ĵ'] +['æĸ°éĹ»', 'ä¸Ńå¿ĥ'] +['æĶ¾å¼ĥ', 'äºĨ'] +['å®ľ', 'å±ħ'] +['åĨĻ', 'ä¸ĭ'] +['éĹ®', 'çŃĶ'] +['è¿ĻéĩĮ', 'æĺ¯'] +['å¤ļ', 'åľ°'] +['åĮºåŁŁ', 'åĨħ'] +['åīµ', 'æĸ°'] +['çľĭ', 'ä»ĸ'] +['æī§æ³ķ', '人åijĺ'] +['åĬ¨', 'æľº'] +['éŁ³', 'åĵį'] +['çļĦ', 'åij½è¿IJ'] +['é¡¶', 'éĥ¨'] +['åĵ', 'Ł'] +['éĥ½', 'æľĥ'] +['æīĵéĢł', 'æĪIJ'] +['æĦı', 'åĽ¾'] +['çļ', 'ĸ'] +['åĢĴ', 'åħ¥'] +['å·´', 'èIJ¨'] +['åĬ©', 'åѦ'] +['å¤į', 'åı¤'] +['åIJ¯', 'ç͍'] +['åĽ½éĻħ', 'å¸Ĥåľº'] +['åĤ¨', 'èĥ½'] +['é»ijé¾Ļæ±Ł', 'çľģ'] +['ä¹ĺ', '车'] +['è¿IJåĬ¨', 'ä¼ļ'] +['ä¿Ŀ', 'åĪ©'] +['çŁ³', 'æĿIJ'] +['çµ', '®'] +['çĤĴ', 'ä½ľ'] +['çļĦ', 'ä¿¡ä»»'] +['å°±', 'æĪIJäºĨ'] +['åı¯', 'è§Ĥ'] +['çļĩ', 'ä¸Ĭ'] +['è¿Ļ', 'åĩłå¤©'] +['ä¸Ģ', 'éĶ®'] +['åĨ·', 'åĨ»'] +['ä¿Ŀ', 'åį«'] +['æł¸', 'æ¡ĥ'] +['åIJĪä½ľ', 'åħ³ç³»'] +['éĢģ', 'åĩº'] +['æĹĹ', 'ä¸ĭçļĦ'] +['åľ¨', 'ä¹İ'] +['为', '广大'] +['åįĪ', 'é¤IJ'] +['ä¸ĵ', '访'] +['æĪĸ', 'å°Ĩ'] +['éĿĴå²Ľ', 'å¸Ĥ'] +['å¥Ķ', 'è·ij'] +['æĹ¥', 'æĬ¥éģĵ'] +['å¥ij', 'åIJĪ'] +['æĸ°', 'æĺ¥'] +['ä¸į', 'å°ıå¿ĥ'] +['两', 'ä¸ī'] +['æĦıæĢĿ', 'æĺ¯'] +['åĨ·', 'èĹı'] +['çļĦ', 'çĹĩçĬ¶'] +['æĢ§', 'åij½'] +['è¶ħ', 'æłĩ'] +['å¯Ĩ', '碼'] +['ç§ijæĬĢ', 'èĤ¡ä»½'] +['äºĨä¸Ģ', 'æī¹'] +['çĿ£', 'å¯Ł'] +['åªĴ', 'ä»ĭ'] +['å°Ħ', 'æīĭ'] +['ä¿®', 'åħ»'] +['çīĩ', 'åĪ»'] +['éĢĤåIJĪ', 'èĩªå·±'] +['åıªè¦ģ', 'æĺ¯'] +['åIJĥ', 'è¿ĩ'] +['éĩij', 'éĵ¶'] +['缴', 'å±ŀ'] +['åѦ', 'éĹ®'] +['åİĭ', 'åζ'] +['çªĹ', 'å¤ĸ'] +['æĶ¶', 'åΰäºĨ'] +['åħ¨åĽ½', '人大'] +['ä½Ĩæĺ¯', '对äºİ'] +['åľ¨', 'æķ´ä¸ª'] +['çļĦ', 'èĥĮåIJİ'] +['åĩıå°ij', 'äºĨ'] +['åıį', 'èħIJ'] +['åıįèħIJ', 'åĢ¡'] +['åıįèħIJåĢ¡', 'å»ī'] +['æĹ', '·'] +['åĪĨ', 'æľŁ'] +['åľ¨', 'æ·±åľ³'] +['æīĵ', 'çĿĢ'] +['æī«', 'ä¸Ģ'] +['æī«ä¸Ģ', 'æī«'] +['æĶ¿åºľ', 'éĥ¨éŨ'] +['æİ¥', 'è¿ŀ'] +['å±ŀäºİ', 'èĩªå·±'] +['åŃIJ', 'å¼¹'] +['åIJĮæł·', 'æĺ¯'] +['æĢ»', 'åħ±'] +['车', 'ä¼ģ'] +['æ¢', 'ĵ'] +['åħ¬', 'é¡·'] +['åıij', '声'] +['éĴ', 'Ľ'] +['èµ°åĬ¿', 'åĽ¾'] +['主', 'èIJ¥'] +['åĸ', 'Ķ'] +['æķ°æį®', 'åĪĨæŀIJ'] +['ä¸į', 'è¿ľ'] +['æľī', 'åIJį'] +['æľīåIJį', 'çļĦ'] +['åģ¿', 'è¿ĺ'] +['å¾Ī', 'ä½İ'] +['è®ĵ', '人'] +['èĿ', 'ī'] +['é«ĺ', 'è´µ'] +['å°ij', '许'] +['æ°', 'Ł'] +['å¹', '¢'] +['亲', 'æĥħ'] +['è¿Ļä»¶', 'äºĭæĥħ'] +['ç͍', 'é¤IJ'] +['缸åħ³', 'æĸ°éĹ»'] +['å°±', 'åºĶ该'] +['ç»Ī', 'çĤ¹'] +['æĺ¯', 'å¤ļå°ij'] +['çĻ»', 'åľº'] +['è¯ķ', '管'] +['è¯ķ管', 'å©´åĦ¿'] +['åģļ', '大'] +['åģļ大', 'åģļ强'] +['çļĦ', 'ä¾ĭåŃIJ'] +['åħ«', '个'] +['æĺİ', 'æĹ¥'] +['çĤ', '³'] +['èµ°', 'åİ»'] +['éģ', 'º'] +['å¢', '©'] +['ä½ĵä¼ļ', 'åΰ'] +['åĴ', 'ı'] +['ä¸ĭ', 'è¾¾'] +['å¤į', 'åıij'] +['追', 'éĢIJ'] +['æīĵ', 'åĵį'] +['çļĦ', 'éļ±ç§ģæ¬Ĭ'] +['åħ·æľī', 'ä¸Ģå®ļ'] +['è¿Ļä¹Ī', 'å¤ļå¹´'] +['æłij', 'æŀĹ'] +['æľĢ', 'éķ¿'] +['åIJĮ', 'èĥŀ'] +['åħī', 'æ³½'] +['åŁŁ', 'åIJį'] +['æĮĩ', 'åIJij'] +['åıĹ害', 'èĢħ'] +['æłij', 'èĦĤ'] +['æľīå¤ļ', '大'] +['大', 'éĿ¢ç§¯'] +['æĹł', 'ç¼Ŀ'] +['æĶ¹', 'æŃ£'] +['æĽ´å¤ļ', 'çļĦæĺ¯'] +['æľŁ', 'æľ«'] +['æŃ', '¼'] +['ä¹ī', 'ä¹Į'] +['éĤ£', 'ä½ł'] +['çļĦ', '第ä¸Ģ个'] +['èĮ', 'µ'] +['å°', '§'] +['èį', '«'] +['ä¸įä»ħ', 'åı¯ä»¥'] +['æ¶Į', 'çݰ'] +['æĢ»', 'éĿ¢ç§¯'] +['æĸ°éĹ»', 'åıijå¸ĥ'] +['æ°ij', 'ç͍'] +['å°±', '读'] +['æīĵ', 'è´¥'] +['å¤ĸ', 'è¯Ń'] +['æĪij们', 'ä¸Ģèµ·'] +['é¢Ħ', 'å®ļ'] +['çĥ¹', '饪'] +['æľĢ', '主è¦ģ'] +['æľĢ主è¦ģ', 'çļĦ'] +['çīĮ', 'çħ§'] +['åĽł', 'åħ¶'] +['ä½İ', 'ä¸ĭ'] +['ä¼ļ', 'åIJĮ'] +['è§ģ', 'è§£'] +['éĹ´', 'éļĶ'] +['æķĻ', 'ç¨ĭ'] +['å°', 'ī'] +['å¸Ĥ', 'ä¸Ńå¿ĥ'] +['åħ³éĶ®', 'æĺ¯'] +['æµ·', 'åįĹçľģ'] +['çī¹åĪ«', 'æĺ¯åľ¨'] +['ä¸ŃåĽ½', '大éĻĨ'] +['åħħè¶³', 'çļĦ'] +['æĹ¢', 'èĥ½'] +['åĤ³', 'çµ±'] +['çijľ', 'ä¼½'] +['åħ¥', 'åĽ´'] +['æħ¢æħ¢', 'åľ°'] +['æĬ¥', 'éħ¬'] +['æī¹', 'å¤į'] +['å·¥ä¸ļ', 'åĽŃåĮº'] +['ä¸İ', 'åıijå±ķ'] +['èĥ¸', 'éĥ¨'] +['åľ¨', 'ç½ij绾'] +['åľ¨ç½ij绾', 'ä¸Ĭ'] +['交', 'è°Ī'] +['æĽ´', 'æĶ¹'] +['åįłæľī', 'çİĩ'] +['ä¸Ŀ绸', 'ä¹ĭè·¯'] +['è¡', 'Ľ'] +['çłĶ', 'åΤ'] +['åĪ', 'ª'] +['åĪª', 'éϤ'] +['è¿Ļ', 'åıª'] +['çļĦ', 'æ°Ķæģ¯'] +['åĬł', 'å·ŀ'] +['éĴ', '§'] +['çIJĨäºĭ', 'éķ¿'] +['ä¸ĸ', 'å®¶'] +['æµģè¡Į', 'çļĦ'] +['å¾Ī', 'æľīåı¯èĥ½'] +['们', 'éĥ½'] +['ç»ıèIJ¥', '模å¼ı'] +['è¡Įä¸ļ', 'ä¸Ń'] +['éĢļçŁ¥', '书'] +['åij½', 'é¢ĺ'] +['æľ¬', 'ç¶²ç«Ļ'] +['æ²Ļ', 'çī¹'] +['åıij', 'åħī'] +['é«ĺ', 'ä»·'] +['å·²', 'çĦ¶'] +['åıĮ', 'åįģä¸Ģ'] +['ä¸Ĭ', 'è¯ī'] +['ç¿ħ', 'èĨĢ'] +['è¿Ļä¸Ģ', 'å¹´'] +['大ä¼ļ', 'ä¸Ĭ'] +['éĩ', 'ī'] +['å®Įåħ¨', 'æĺ¯'] +['å¾Ĺ', '太'] +['ä¸Ģèά', '人'] +['è¿ĺ', 'ç®Ĺ'] +['æĬĺ', 'åıł'] +['æĬķ', 'æľº'] +['çĤ¹', 'çĩĥ'] +['çݰéĩij', 'æµģ'] +['åħĶ', 'åŃIJ'] +['ç½ij', 'æł¼'] +['æİ¥', 'è¿ĩ'] +['ä¾Ľ', 'è´§'] +['éĺ´', 'å½±'] +['åİŁ', 'åħĪ'] +['æį', '£'] +['å·¦', 'ä¾§'] +['åħĭ', 'æĭī'] +['æīĵ', 'åį¡'] +['ç§ij', 'æ¯Ķ'] +['æ±ĩ', 'éĽĨ'] +['åľ°çIJĨ', 'ä½įç½®'] +['è¯Ħ', 'å§Ķ'] +['ç»ĵåIJĪ', 'èµ·æĿ¥'] +['è¿Ľåħ¥', 'åΰ'] +['åı¯', 'è¡Į'] +['åı¯è¡Į', 'æĢ§'] +['让', 'å®ĥ'] +['åĪ¶åº¦', 'æĶ¹éĿ©'] +['çĶĺèĤĥ', 'çľģ'] +['åĵ', 'Ĺ'] +['åģı', 'åģı'] +['è¡£', 'çī©'] +['ç¥Ŀ', 'è´º'] +['æºIJ', 'èĩª'] +['å¹¶ä¸į', '代表'] +['åĽ½', '度'] +['好', 'åĿı'] +['æĿ', 'ĸ'] +['æĿŃ', 'å·ŀå¸Ĥ'] +['湿', '度'] +['é²', '¸'] +['åįļ', '彩'] +['æ³°', 'å±±'] +['æĿij', 'èIJ½'] +['æĸ°', 'èģŀ'] +['èĤ', 'ĭ'] +['åı¤èĢģ', 'çļĦ'] +['çļĦ', 'ç§ĺå¯Ĩ'] +['ä¸Ģ个', 'éĹ®é¢ĺ'] +['éģı', 'åζ'] +['åįĥ', '亿'] +['è¿ĩ', '硬'] +['å°Ħ', 'åĩ»'] +['èĩªçĦ¶', 'æĺ¯'] +['产', 'åĮº'] +['çĤ¹', 'çĤ¹å¤´'] +['åı¯ä»¥', '帮åĬ©'] +['说', 'å®ŀ'] +['说å®ŀ', 'è¯Ŀ'] +['æĪij', 'åıªæĺ¯'] +['ä¹ĭ', 'ä½Ļ'] +['åIJĮæĹ¶', 'ä¹Łæĺ¯'] +['ä¸ŃåĽ½', 'éĺŁ'] +['建æĪIJ', 'åIJİ'] +['ä¹IJ', 'è§Ĩ'] +['åij¨', 'å²ģ'] +['èį¯', 'åºĹ'] +['éĩij', 'åįİ'] +['严éĩį', 'å½±åĵį'] +['è´¨', 'åľ°'] +['æĹħ', 'éģĬ'] +['åħµ', 'åύ'] +['æķĻèĤ²', 'æķĻåѦ'] +['离', 'åİ»'] +['åIJĦå¼ı', 'åIJĦæł·'] +['ä»ĭ', 'ç´'] +['ä»ĭç´', '¹'] +['å¼Ģ', '头'] +['å°Ĩ', 'èĩªå·±çļĦ'] +['åIJ¬', 'åĬĽ'] +['ä¿¡æģ¯', 'ç³»ç»Ł'] +['ä»İ', 'æł¹æľ¬'] +['ä»İæł¹æľ¬', 'ä¸Ĭ'] +['æİĮ', '声'] +['欢', 'åĸľ'] +['å±ķ', 'åĮº'] +['åķ', '¸'] +['太å¤ļ', 'äºĨ'] +['éĹ²', 'ç½®'] +['èĥ¡', 'èIJĿåįľ'] +['å§Ķ', 'å®£ä¼ł'] +['å§Ķå®£ä¼ł', 'éĥ¨'] +['åįĹ', 'éĺ³'] +['å·ŀ', 'åĮº'] +['ä¸İ', 'æĹ¶'] +['ä¸İæĹ¶', '俱'] +['ä¸İæĹ¶ä¿±', 'è¿Ľ'] +['å«Įçĸij', '人'] +['èī¯', 'å¿ĥ'] +['头', 'é¡¶'] +['è´¢', 'æĬ¥'] +['ä½Ľ', 'æ³ķ'] +['å¾', 'µ'] +['åİŁ', 'ä»¶'] +['åĭ', 'ŀ'] +['çĶ·', '篮'] +['å¤ĸåĽ½', '人'] +['è¿Ŀ', '纪'] +['æī¾', 'äºĨ'] +['æįķ', 'æįī'] +['缸', 'è¯Ĩ'] +['æIJľ', 'éĽĨ'] +['çļĦ', 'ä¼Łå¤§'] +['ä¸ī', 'ç»´'] +['å°±è¡Į', 'äºĨ'] +['çĭIJ', 'æľĪ'] +['çĭIJæľĪ', 'å±±'] +['å¸ĮæľĽ', 'éĢļè¿ĩ'] +['èĢĮ', '对äºİ'] +['éĿ¢', 'å°į'] +['åĨĽ', 'åĽ¢'] +['è¡Ĺ', 'åĮº'] +['æĤ¬', 'æĮĤ'] +['便', 'ç§ĺ'] +['æľīä¸Ģ', 'çĤ¹'] +['ä¼ļè®®', 'ä¸Ĭ'] +['ä¸ĭ', 'æīĭ'] +['廣', 'åijĬ'] +['äºĶ', 'è¡Į'] +['çŃī', 'åĢĻ'] +['ç´§ç´§', 'åĽ´ç»ķ'] +['æĭ¿', 'äºĨ'] +['æ¡Į', 'éĿ¢'] +['ç¥ŀ', 'æĥħ'] +['éĽĦ', 'åİļ'] +['çŀ', '³'] +['楼', 'ä¸ĭ'] +['å½', 'ª'] +['äºĭ', 'åıij'] +['åĨį', 'è§ģ'] +['é¤', 'ĺ'] +['é¢Ħ', 'åĶ®'] +['åİ»', 'çľĭçľĭ'] +['æĪij们', 'åºĶ该'] +['ä¸ī', 'å®¶'] +['æµ', 'Ĭ'] +['ä¹IJ', 'éĺŁ'] +['çľĭ', 'ä¸įè§ģ'] +['èĦij', 'åŃIJ'] +['æĮģ', 'æľīçļĦ'] +['çϽ', 'èıľ'] +['éĹª', 'çĥģ'] +['åĸĿ', 'æ°´'] +['æİ§åζ', 'ç³»ç»Ł'] +['ä¸ĵ', 'åĮº'] +['æľĿ', 'å»·'] +['æĪij', 'å¿ĥéĩĮ'] +['å±ķ', 'åİħ'] +['èľĺ', 'èĽĽ'] +['åĨ»', 'ç»ĵ'] +['ç²', 'ª'] +['åº', 'IJ'] +['åIJij', '社ä¼ļ'] +['åĨ³çŃĸ', 'éĥ¨ç½²'] +['çŁŃ', 'æľŁåĨħ'] +['æĸ°', 'ä¸ļæĢģ'] +['æľ', 'Ķ'] +['æĹ¶', 'æĬ¥'] +['使', 'ä¹ĭ'] +['åĽł', 'åŃIJ'] +['åıĤä¸İ', 'èĢħ'] +['çļĦ', '年轻人'] +['æīĭ', '表'] +['å°ģ', 'éĶģ'] +['为ä»Ģä¹Ī', 'ä¸į'] +['åIJ¸', 'çĥŁ'] +['æ¯Ĵ', 'ç´ł'] +['åĪij', 'æ³ķ'] +['磫', 'æŃ£'] +['身', 'æĹģ'] +['åİŁ', 'è°ħ'] +['çĽij', 'æĬ¤'] +['æŃ¤', 'å¤Ħ'] +['éļ¨', 'æĻĤ'] +['æŀľ', 'å®ŀ'] +['åĮ»çĸĹ', 'æľįåĬ¡'] +['ä¸į', 'åIJĪçIJĨ'] +['æIJŀ', '好'] +['çļĦ', 'èĦļæŃ¥'] +['å¤ĸ', 'å¥Ĺ'] +['ç¶ĵ', 'éģİ'] +['æĶ¾', 'ç¼ĵ'] +['åģľ', 'çķĻ'] +['æĺŁ', 'çIJĥ'] +['çļĦä¸Ģ', 'éĿ¢'] +['åĩł', 'ä½ķ'] +['è½®', 'åĽŀ'] +['æ¯Ľ', 'å·¾'] +['ä¿®', 'çIJĨ'] +['ä¸įçŁ¥', 'ä¸į'] +['ä¸įçŁ¥ä¸į', 'è§ī'] +['æķ´', '个人'] +['æ¯ģ', 'çģŃ'] +['åı°', 'å·ŀ'] +['使ç͍', '寿åij½'] +['é»ij', 'çϽ'] +['æij¸', 'ç´¢'] +['é¼ł', 'æłĩ'] +['éĿ©', 'æĸ°'] +['éº', 'µ'] +['ä¸ĵéŨ', '为'] +['å¾Īå¤ļ', 'æľĭåıĭ'] +['å·¥ä½ľ', 'ç»Ħ'] +['åIJĪ', 'å½±'] +['çĤº', 'ä»Ģ麼'] +['æŀģ', '度'] +['çļĦ', 'è¿ĽæŃ¥'] +['å½ĵ', 'ä¹ĭ'] +['å½ĵä¹ĭ', 'æĹł'] +['å½ĵä¹ĭæĹł', 'æĦ§'] +['è´´', 'è¿ij'] +['å°º', '度'] +['åľ¨', 'çİ°åľº'] +['éĻį', '临'] +['åħ»èĢģ', 'éĩij'] +['ç£', 'ķ'] +['åı¯ä»¥', '使'] +['管çIJĨ', 'æ°´å¹³'] +['æľ¬æĬ¥', 'è®°èĢħ'] +['æ³ķ', '令'] +['åį¡', '车'] +['举', 'æµ·'] +['å¤ļ', 'éĩį'] +['åħ¶', 'éĹ´'] +['ç´', 'Ļ'] +['éĩį大', 'é¡¹çĽ®'] +['æ±Ĺ', 'æ°´'] +['ç»Ħ', 'å§Ķä¼ļ'] +['ä¿¡æģ¯', 'åħ¬å¼Ģ'] +['ä¸į论', 'æĺ¯'] +['ä¸Ģ', 'åIJ¬'] +['èĴ¸', 'æ±½'] +['æıŃ', 'ç§ĺ'] +['è¶ħ', 'éģİ'] +['触', 'åıij'] +['å©', '¦'] +['åħ³èģĶ', '交æĺĵ'] +['å°±', 'ç»Ļ大家'] +['好', 'ä¹ħ'] +['åĢŁ', 'è´·'] +['游æĪı', 'è§Ĵèī²'] +['å¼ĢåIJ¯', 'äºĨ'] +['æİ', 'ł'] +['åħļçļĦ', 'åįģä¹Ŀ'] +['ä¸ĭ', '鼨'] +['çŁŃ', 'æĹ¶éĹ´åĨħ'] +['å¯', 'ħ'] +['导', 'åħ¥'] +['å·¥ä½ľ', 'ç»ıéªĮ'] +['ä¹Ł', 'åıªèĥ½'] +['鼷', 'éľĨ'] +['è·Ł', 'è¿Ľ'] +['åį¡', 'éĢļ'] +['é¢ĩ', 'æľī'] +['æľº', 'ä½ĵ'] +['æĪĺ士', 'èģĮä¸ļ'] +['女', '主'] +['ä½ĵåζ', 'æľºåζ'] +['è¶³', 'åįı'] +['èĪĴéĢĤ', 'çļĦ'] +['åĢŁ', 'åı£'] +['æī¹', 'åΤ'] +['æķ°', 'å̼'] +['è«', '¾'] +['éĺ¿æĭī', '伯'] +['åĺ', 'İ'] +['æħ', '¶'] +['è¾¾', '人'] +['å¼Ģ', 'æ°´'] +['大', '鼨'] +['温', '室'] +['ä½İ', 'è¿·'] +['ä»į', 'æĹ§'] +['éªĹ', 'åŃIJ'] +['亲', 'å±ŀ'] +['çIJĨ', 'æĻº'] +['æľ¬', 'åŁºéĩij'] +['å¨', 'ħ'] +['åĨĻåŃĹ', '楼'] +['å¢Ļ', 'å£ģ'] +['å®', 'µ'] +['èϽ', 'çĦ¶æĺ¯'] +['顺', 'çĿĢ'] +['åħ«', 'åį¦'] +['åķĨ', 'ç͍'] +['ä¸į', '失'] +['è¿·', 'èĮ«'] +['顺', '便'] +['æļij', 'æľŁ'] +['欺', 'è´Ł'] +['é¢ij', 'é¢ij'] +['该', 'æł¡'] +['æĸĻ', 'çIJĨ'] +['æ·±', 'æĥħ'] +['åīį', 'éĶĭ'] +['ä¿Ŀ', 'èŃī'] +['èģĮä¸ļ', 'çĶŁæ¶¯'] +['åħ¬', 'å¼Ģåıij'] +['åħ¬å¼Ģåıij', 'è¡Į'] +['åħ¥', 'æĪ·'] +['éł', 'ĵ'] +['å̾', 'åIJ¬'] +['éŃ', 'ģ'] +['æĦī', 'æĤ¦'] +['åĽŀ', 'åIJĪ'] +['åħ¨åĬĽ', '以'] +['åħ¨åĬĽä»¥', 'èµ´'] +['åĥ¹', 'å̼'] +['èĥ½åĬĽ', '强'] +['ç»ı', 'å¼Ģ'] +['ç»ıå¼Ģ', 'åĮº'] +['è¿ľ', 'æĸ¹'] +['çļĦ', 'éģĵçIJĨ'] +['缴', 'åįĩ'] +['缴åįĩ', 'æľº'] +['为主é¢ĺ', 'çļĦ'] +['ç»Ļ', 'æĤ¨'] +['è¿ĺ', 'æĥ³'] +['æ¯Ķ', 'æĪij'] +['åĨľ', 'çī§'] +['æµ·', 'åºķ'] +['çŃ¾è®¢', 'äºĨ'] +['对äºİ', 'æĪij们'] +['æĹ¶', '许'] +['éĶ®', 'çĽĺ'] +['å®ŀéĻħ', 'æİ§åζ'] +['çļĦ', 'æ¨¡æł·'] +['åıįæĺł', 'äºĨ'] +['代', 'åĬŀ'] +['åĮ»', 'ç͍'] +['éĽĨ', 'ç»ĵ'] +['åıijå±ķ', 'åīįæĻ¯'] +['æĮĩ', 'çĿĢ'] +['åįİ', 'åĮĹ'] +['è¿Ļ', 'åĩłä¸ª'] +['åIJį', 'æ°Ķ'] +['åĤį', 'æĻļ'] +['èĩª', 'åıij'] +['æ³¢', 'åħ°'] +['大åĬĽ', 'æİ¨è¿Ľ'] +['èĩª', 'ç§°'] +['èįĨ', 'å·ŀ'] +['æIJį', '害'] +['äºĨä¸Ģ', 'åı¥'] +['æľĢåĪĿ', 'çļĦ'] +['éĩijèŀį', 'å᱿ľº'] +['æĢĢ', '念'] +['è¡Į', 'åĭķ'] +['女', 'æİĴ'] +['ä¸į', 'è§£'] +['ä¼ł', 'éĶĢ'] +['转载', '请'] +['饰', 'åĵģ'] +['åıª', '为'] +['ä¸İ', 'ä¼Ĺ'] +['ä¸İä¼Ĺ', 'ä¸įåIJĮ'] +['èĥ½', 'èĢĹ'] +['èı©', 'æıIJ'] +['è¿ij', '两年'] +['è¿Ķ', '乡'] +['马ä¸Ĭ', 'å°±'] +['äºĮ', 'çŃīå¥ĸ'] +['æ°´', '管'] +['æ³ķ', 'åѦ'] +['çģŃ', 'çģ«'] +['大', 'å§IJ'] +['åij¨', '转'] +['æľī', 'æľŁ'] +['æľīæľŁ', 'å¾Ĵ'] +['æľīæľŁå¾Ĵ', 'åĪij'] +['å°į', 'æĸ¹'] +['ç¥ŀ', 'èī²'] +['æ²¹', 'èĦĤ'] +['ä¸ī', 'çĤ¹'] +['ä¸į', 'åĪ©äºİ'] +['äºĭä¸ļ', 'éĥ¨'] +['å°±', 'è·Ł'] +['å¼Ģ', 'æĶ¯'] +['å°ı', '女åŃ©'] +['åħ±åIJĮ', 'åĬªåĬĽ'] +['çĶļèĩ³', 'è¿ĺ'] +['è¿Ļ', 'åIJį'] +['è¿Ļ', 'ç¬Ķ'] +['çݯ', 'åį«'] +['æľī', 'ç§į'] +['è§Ĩ', 'åĬĽ'] +['çĨŁ', 'çŁ¥'] +['åħ¬ç§¯', 'éĩij'] +['æ¶Īéĺ²', 'å®īåħ¨'] +['é¢ĩ', '为'] +['大', 'èħ¿'] +['éĿ', '¶'] +['çī¹', 'æķĪ'] +['æľįåĬ¡', 'åĮº'] +['å¼Ģ', 'åĩº'] +['深度', 'èŀįåIJĪ'] +['æĹł', 'å¿§'] +['æŁ¥', 'éĺħ'] +['ç»Ī', 'ç»ĵ'] +['ä¿Ŀ', 'ç¨İ'] +['è¨İ', 'è«ĸ'] +['å½ĵ', 'åģļ'] +['è·³', 'èĪŀ'] +['å¯', '§'] +['女', 'çİĭ'] +['è®°èĢħ', 'åľ¨'] +['åħ¨', '产ä¸ļéĵ¾'] +['è´¯', 'éĢļ'] +['åħ´', 'ä¸ļ'] +['éĻį', 'åΰ'] +['å°ģ', 'éĿ¢'] +['åħ¨éĿ¢', 'æİ¨è¿Ľ'] +['奶', 'èĮ¶'] +['éĢī', 'åĿĢ'] +['äºĨä¸Ģ', 'åľº'] +['åIJĮ', 'ä¼´'] +['è®®', '论'] +['æIJ', 'ĵ'] +['诸', 'èijĽ'] +['诸èijĽ', '亮'] +['å¹²', 'åĺĽ'] +['æµģ', 'æĦŁ'] +['ä¸ĵä¸ļ', 'çŁ¥è¯Ĩ'] +['ç͵', 'ç«Ļ'] +['åĩı', 'å¼±'] +['åĩº', 'åħ¥'] +['åIJĦ', 'çľģ'] +['éĿŀ常', 'é«ĺ'] +['åľ°', '毯'] +['åıij', 'æĸĩ'] +['çĦ', 'ī'] +['çĥ§', 'çĥ¤'] +['å£ģ', '纸'] +['æģ¶', 'åĮĸ'] +['èĬ', '¸'] +['èĥĸ', 'åŃIJ'] +['çĩ', 'Ĵ'] +['çľģ', 'éĴ±'] +['çϾ', '强'] +['çIJĨå·¥', '大åѦ'] +['éĴ¢', 'æĿIJ'] +['åĽ½æľī', 'èµĦ产'] +['æĪĺ', 'æľº'] +['æ³Ħ', 'éľ²'] +['åIJİéĿ¢', 'çļĦ'] +['æ°´', 'èµĦæºIJ'] +['æ¢ħ', 'èĬ±'] +['åĨĻ', 'çĿĢ'] +['ä¹ĭ', '声'] +['æĹł', 'åı¯'] +['æĺİ', 'æľĿ'] +['ç«ĭæĸ¹', 'ç±³'] +['ç·', '£'] +['æĶ¾', 'è¿ĩ'] +['ç¦ı', 'çͰ'] +['å¾Ĺ', 'ä½ı'] +['åıĹ', 'ä¼Ĺ'] +['ä¸Ń', '级'] +['çĹħ', 'åıĺ'] +['ä¸Ģ', 'çŀ¬éĹ´'] +['æĿĥ', 'éĩį'] +['人æĢ§', 'åĮĸ'] +['åĮ»çĸĹ', 'åį«çĶŁ'] +['ä¸įåΰ', 'ä½į'] +['æĻºèĥ½', 'å®¶å±ħ'] +['饮', 'ç͍'] +['æ¼Ķ', 'åıĺ'] +['é«ĺ', 'ç´łè´¨'] +['ä¹Ļ', 'æĸ¹'] +['åģľ', 'çķĻåľ¨'] +['èİ·', 'æī¹'] +['ç©¿', 'æ¢Ń'] +['客', 'åľº'] +['æĮ½', 'åĽŀ'] +['京', 'åŁİ'] +['çĶŁåij½', 'åĬĽ'] +['實', 'éļĽ'] +['çĩ', 'Ī'] +['åĨį', 'çݰ'] +['çݰå®ŀ', 'ä¸Ń'] +['æľī', 'ä¿¡å¿ĥ'] +['çĸı', 'éĢļ'] +['åĺ´', 'åĶĩ'] +['鼷', 'éĶĭ'] +['èıľ', 'åįķ'] +['éħ', '¯'] +['è¶ħ', 'é«ĺ'] +['å¾Ī', 'é«ĺåħ´'] +['çĶŁ', 'æ®ĸ'] +['éĢł', 'ä»·'] +['误', 'åĮº'] +['æĨ', 'ĭ'] +['好', 'æ¶Īæģ¯'] +['å´', 'Ń'] +['以', 'èĩ´'] +['å¼Ģ', 'çİ©ç¬ij'] +['çĽij', 'è§Ĩ'] +['å·¡', 'å¯Ł'] +['å¾·', 'å·ŀ'] +['æĹ©', 'æĹ©'] +['éĹª', 'ç͵'] +['æĪª', 'åĽ¾'] +['åı¯ä»¥', 'æł¹æį®'] +['æīĭ', 'èīº'] +['æİ¥', '轨'] +['ç§į', 'æĹı'] +['æĢĢ', 'éĩĮ'] +['åİ»', 'åĮ»éĻ¢'] +['ä¸Ģ', 'äºĮ'] +['å¼Ģ', 'éĺĶ'] +['åĩı', 'éĢŁ'] +['ä½Ĩ', 'ä»İ'] +['éĢĻ', 'ä¸Ģ'] +['åĩı', 'åħį'] +['主é¢ĺ', 'æķĻèĤ²'] +['å¼Ģå·¥', '建设'] +['è¹', '¦'] +['æľĪ', '饼'] +['ä¸ĭ', 'æ²ī'] +['å°Ĭ', '严'] +['éĻ', 'ĩ'] +['å®ŀ', 'æľ¨'] +['å»ł', 'åķĨ'] +['声', 'ç§°'] +['èĢĥ', 'åľº'] +['å¸ĥ', 'é²ģ'] +['èĩª', 'æĿ¥'] +['èĩªæĿ¥', 'æ°´'] +['éĴ', '¾'] +['å¹´', '以ä¸Ĭ'] +['大', 'åıĶ'] +['ä»ĸ', 'å·²ç»ı'] +['åħ¨', 'æĿij'] +['èģĶç³»', 'ç͵è¯Ŀ'] +['为', '导åIJij'] +['åΤ', 'å¤Ħ'] +['对', 'éĺµ'] +['缮', 'æ¨Ļ'] +['åIJį', 'é¢Ŀ'] +['客', 'æ°Ķ'] +['横', 'åIJij'] +['çŃī', 'åĨħ容'] +['åĩł', 'çĤ¹'] +['è°Ī', '论'] +['ä¸į', 'ä¹ı'] +['å±ķ', 'çݰåĩº'] +['è¾ĥ', 'éķ¿'] +['éĢĨ', '转'] +['å°ı', 'æĻĤ'] +['æĺ¯', 'å¤ļä¹Ī'] +['æľ¬', 'æľĪ'] +['è¿ij', 'è§Ĩ'] +['æĪIJç«ĭ', '以æĿ¥'] +['代表', 'çĿĢ'] +['æĬ¥', 'å¤į'] +['æĪı', 'æĽ²'] +['è¨Ń', 'åĤĻ'] +['åħ¥', 'èĤ¡'] +['å¾ģ', 'æľį'] +['é«ĺ', 'åĩº'] +['èĪŀåı°', 'ä¸Ĭ'] +['å¿ĥ', 'åĬ¨'] +['两', 'çĤ¹'] +['缸', 'çķ¶'] +['èĻ', 'Ľ'] +['主', '页'] +['åĩł', 'å®¶'] +['æĹł', 'ä¸į'] +['åįı', 'å®ļ'] +['æĸ', 'IJ'] +['å¯ĵ', 'æĦı'] +['åħ¨', '线'] +['æįķ', 'é±¼'] +['åı¯ä»¥', 'ä»İ'] +['æľī', 'è¿Ļæł·çļĦ'] +['æģ¶', 'éŃĶ'] +['åĮħ', 'åŃIJ'] +['æģ', '¤'] +['å¼Ģå¥ĸ', 'ç»ĵæŀľ'] +['ä¸į', 'æŃ»'] +['èĹ', 'į'] +['弯', 'æĽ²'] +['æµ·', '峡'] +['éĶĢ', 'æ¯ģ'] +['çļĦ', 'çĭ¬çī¹'] +['示', 'æĦı'] +['ä¸įèĥ½', 'åĨį'] +['èĥ½', 'æĬĬ'] +['éĺ²', '线'] +['ä¸įå°ij', 'äºİ'] +['æ±', 'Ģ'] +['çļĦ', 'éĤ£ä¸Ģ'] +['羣', 'æĥħ'] +['åŀ', '®'] +['被', 'æīĵ'] +['åĽ½', 'å®ī'] +['ç¾İ', 'å¦Ļ'] +['è¿Ļ', 'åĩł'] +['åĩº', 'éģĵ'] +['æľįåĬ¡', 'äºİ'] +['æĪIJæŀľ', '转åĮĸ'] +['æīį', 'åįİ'] +['天', 'é¹ħ'] +['åĩł', '个人'] +['åĢĺ', 'èĭ¥'] +['è̽', '误'] +['æĬĹ', 'æĪĺ'] +['è¡Į', 'éĬ·'] +['æĿ¥', 'è¢Ń'] +['åĢŁ', 'éĮ¢'] +['èįī', 'èİĵ'] +['ä¸¥æł¼', 'æī§è¡Į'] +['举è¡Į', 'äºĨ'] +['å¤ĸ', 'ç±į'] +['å·²', 'è¾¾'] +['æĿij', 'åħļæĶ¯éĥ¨'] +['è¡', 'Ŀ'] +['éĻį', 'èĩ³'] +['æµ·', 'éĩı'] +['é¤IJ', 'é¦Ĩ'] +['æĢ¥', 'å¿Ļ'] +['æ·±', 'è¿ľ'] +['å¾Ģ', 'è¿Ķ'] +['ç¨İåĬ¡', 'å±Ģ'] +['å¹¿æ³Ľ', 'åºĶç͍'] +['è®®', 'åijĺ'] +['æĹł', 'æķĮ'] +['çľ¼', 'åħī'] +['çĥŃè¡Ģ', 'ä¼łå¥ĩ'] +['æŃ', 'IJ'] +['äºĨ', 'äºĽ'] +['è¿Ŀ', 'èĥĮ'] +['è¿Ļ', 'æĺ¯ä¸Ģç§į'] +['ä¸į', '稳å®ļ'] +['大家', 'åĪĨ享'] +['表', 'çı¾'] +['åīį', 'åįģ'] +['è·¯', 'è¿ĩ'] +['æĴ', '©'] +['åIJĮ', 'æĥħ'] +['ä¹ł', 'ä¿Ĺ'] +['åıij', 'è´¢'] +['åºĶ', 'æľīçļĦ'] +['æĿİ', 'æŁIJ'] +['èĤ', 'Ľ'] +['马', 'åħĭ'] +['éĢļ', 'åijĬ'] +['å·¨', '人'] +['ä¸Ģ', 'åĽ¢'] +['éĢĻ', '次'] +['ä¸į', 'äºĨè§£'] +['æĸ½', 'è¡Į'] +['èij¡èIJĦ', 'çīĻ'] +['åıĺå¾Ĺ', 'æĽ´åĬł'] +['æı', '£'] +['åĪĽæĸ°', 'èĥ½åĬĽ'] +['çķħ', 'éĶĢ'] +['表', 'æī¬'] +['æ¯Ķ', 'åĪ©'] +['æ¯ĶåĪ©', 'æĹ¶'] +['åĮ»çĸĹ', 'ä¿ĿéĻ©'] +['æĵį', '纵'] +['伤', '亡'] +['æµİ', 'å®ģ'] +['åıĺ', 'äºĨ'] +['æľ¬æ¬¡', 'æ´»åĬ¨'] +['åľŁ', '豪'] +['æĥ³', 'åĬŀæ³ķ'] +['æĺ', 'ķ'] +['å½ĵ', 'æĻļ'] +['åĩº', 'å±Ģ'] +['çĥŃ', 'è®®'] +['è°Ī', 'è°Ī'] +['æĻĭ', 'åįĩ'] +['åĬ¿', 'å¿ħ'] +['çĻ»', 'å±±'] +['éĤ£', 'åĦ¿'] +['åIJĥ', 'åΰ'] +['ä¹ĭ', 'åŁİ'] +['å¿«', 'æĿ¥'] +['æ¹Ľ', 'æ±Ł'] +['第ä¸ī', '个'] +['åħ¨éĿ¢', 'æıIJåįĩ'] +['å¥ĸ', 'åѦ'] +['å¥ĸåѦ', 'éĩij'] +['æĬķåħ¥', '使ç͍'] +['é½IJ', 'é²ģ'] +['åı¯ä»¥', 'æĬĬ'] +['åĴĮ', 'ä»ĸçļĦ'] +['è´ŃæĪ¿', 'èĢħ'] +['æŃ£å¼ı', 'åIJ¯åĬ¨'] +['åįİ', '润'] +['ä¸įæĸŃ', 'å®ĮåĸĦ'] +['éĴ¢', 'æĿ¿'] +['ç´¯', '积'] +['满', 'èĦ¸'] +['åĽĽ', 'æĸ¹'] +['è´¢', 'çī©'] +['ä»ĸ们', 'ä¼ļ'] +['å¤ı', 'æĹ¥'] +['éĤ£', '个人'] +['éĿł', 'çĿĢ'] +['çĤ¹', 'äºĨ'] +['çĤ¹äºĨ', 'çĤ¹å¤´'] +['æ©', 'ĭ'] +['åıĪ', '好'] +['åıĪ好', 'åıĪ'] +['åıĪ好åıĪ', 'å¿«'] +['éĺµ', 'éĺµ'] +['å°ģ', '建'] +['æľ¬', 'çͰ'] +['çī©ä¸ļ', 'æľįåĬ¡'] +['èĩªè´¸', 'åĮº'] +['åIJ', 'ı'] +['便åĪ©', 'åºĹ'] +['åĽ½å®¶', 'æłĩåĩĨ'] +['éĿ¢', 'ç²ī'] +['èī°', 'è¾Ľ'] +['æĶ»', 'åħ³'] +['æīĵ', 'åĮħ'] +['车', 'éĺŁ'] +['人', 'éĢī'] +['åı¯', 'ä¸įæĺ¯'] +['äºĮ', 'åįģå¹´'] +['åIJį', 'å¸Ī'] +['浦', '举'] +['åħ¬', 'è¯ģ'] +['è¿IJ', 'éĢģ'] +['æĺ¯', 'æľĢ好çļĦ'] +['æŁĶ', 'åĴĮ'] +['çİĭ', 'æŁIJ'] +['çĹħ', 'æĪ¿'] +['åĨ¶', 'éĩij'] +['ä¸Ģä»¶', 'äºĭæĥħ'] +['åį', '¤'] +['åı¯', 'æİ§'] +['çī', 'Ł'] +['æĭ', 'Ĥ'] +['å·²', 'äºİ'] +['人', 'éĢł'] +['çĶŁçī©', 'åĮ»èį¯'] +['ä½ĵ', 'çݰåĩº'] +['èĤ²', 'åĦ¿'] +['èĢģ', 'å®ŀ'] +['åľĸ', 'çīĩ'] +['è«', '¸'] +['ç´¯', 'äºĨ'] +['æĦŁåħ´è¶£', 'çļĦ'] +['åĽ¾çīĩ', 'æĿ¥æºIJ'] +['ä¹Ł', 'æĺ¯ä¸Ģç§į'] +['æ¾İæ¹ĥ', 'æĸ°éĹ»'] +['æĹ¶', '表示'] +['åħī', 'è¾ī'] +['æĬ¥', 'åºŁ'] +['å²ģ', 'æĹ¶'] +['éħ', '®'] +['æ£Ģ', 'ä¿®'] +['åıĺ', 'éĢŁ'] +['åıĺéĢŁ', 'ç®±'] +['åľ¨', 'èģĮ'] +['éı', '¡'] +['æį', 'Ĥ'] +['çĿ£', 'åĬŀ'] +['æ°¸', 'ä¸į'] +['åģļ', 'ä¸ĢäºĽ'] +['åİĨ', 'æĹ¶'] +['å·¥ç¨ĭ', 'æľºæ¢°'] +['æģ°', 'å½ĵ'] +['å°±', 'åľ¨äºİ'] +['ç§°', 'åij¼'] +['éĢļ常', 'æĺ¯'] +['æł·', 'å¼ı'] +['åij¨', 'ä¸Ģ'] +['èĭ±', 'éķij'] +['åĿĩ', '线'] +['ä¼ł', 'éĹ»'] +['ç͍æĪ·', 'ä½ĵéªĮ'] +['èµŀ', 'åIJĮ'] +['骨', 'æĬĺ'] +['为主', 'ä½ĵ'] +['æ±Ł', 'å±±'] +['æ¸ħ', 'æľĿ'] +['æĶĢ', 'åįĩ'] +['ä¸į', 'çĽ¸ä¿¡'] +['éĿ', '´'] +['æŃ¦', 'åĬŁ'] +['åĭ¤', 'åĬ³'] +['æĿ¥', 'æī¾'] +['å°Ĩ', 'æĮģç»Ń'] +['丫', '头'] +['æ¨Ļ', 'æºĸ'] +['è£', '´'] +['深深', 'çļĦ'] +['åŃķ', 'èĤ²'] +['è§ĦåĪĴ', '建设'] +['æ¸ħ', 'çν'] +['ç²¾åĩĨ', 'æī¶è´«'] +['æīĵçł´', 'äºĨ'] +['è¿Ļä¸Ģ', '天'] +['å·¥ä½ľ', 'æĢ»ç»ĵ'] +['æĹħ', 'ç¨ĭ'] +['举', 'èIJ¥'] +['æĶ¾', 'å°Ħ'] +['æľī', 'åĩłä¸ª'] +['éĿŀ', 'çī©è´¨'] +['åIJĥ', 'å¾Ĺ'] +['åĹ', '¨'] +['ä¼ļ', 'åıijçĶŁ'] +['篮', 'æĿ¿'] +['å¼Ģ', 'å°ģ'] +['麻', 'å°Ĩ'] +['èıı', 'æ³½'] +['ä¸į', 'åIJĪ'] +['ç³»åĪĹ', '产åĵģ'] +['èѬ', 'å¦Ĥ'] +['ç¾İ', 'èªī'] +['èĩªå·±', 'åĸľæ¬¢'] +['交æĺĵ', 'ä¸Ńå¿ĥ'] +['åIJĪ', 'åͱ'] +['使', 'æĪij'] +['åĥı', 'ç´ł'] +['带', 'éĺŁ'] +['ä½Ĩ', '对äºİ'] +['æĬĬ', 'è¿Ļ个'] +['èĤĿ', 'èĦı'] +['åįķ纯', 'çļĦ'] +['æĶ»åĿļ', 'æĪĺ'] +['缼', 'ä¼ļ'] +['åijµ', 'æĬ¤'] +['æª', 'Ģ'] +['èµ¶', 'ä¸Ĭ'] +['æ¥', 'Ĭ'] +['ä¹ħ', 'äºĨ'] +['ç¡', 'Ŀ'] +['çŃĶ', 'é¢ĺ'] +['ä¿ĿæĮģ', 'çĿĢ'] +['è§ģ', 'è¯Ĩ'] +['çĤ¹', 'åĦ¿'] +['åįĬ', '个æľĪ'] +['æ»', 'ĩ'] +['浸', '泡'] +['ä¼ł', 'éĢģ'] +['åľ¨', 'å¸Ĥåľºä¸Ĭ'] +['ä¹ĭ', '乡'] +['çī¹', 'éķ¿'] +['éĽ', 'ŀ'] +['èª', 'ł'] +['身', 'å¤Ħ'] +['æŁł', '檬'] +['身', 'ç©¿'] +['çľģ', 'åħ¬å®ī'] +['çľģåħ¬å®ī', 'åİħ'] +['åıĻ', 'åĪ©äºļ'] +['åĩł', 'åĪĨéĴŁ'] +['人', 'åĢij'] +['åľ°', '段'] +['èĩª', 'åѦ'] +['ä¹Ł', 'è¶ĬæĿ¥è¶Ĭ'] +['èģĮ', 'æĿĥ'] +['æĸ', '§'] +['èĩ', '»'] +['å½Ĵ', '纳'] +['驾', 'é©Ń'] +['éĥ¨åĪĨ', 'åľ°åĮº'] +['没æľī', 'æĥ³åΰ'] +['æĴ', 'ĩ'] +['ä¹Į', 'é²ģ'] +['ä¹Įé²ģ', 'æľ¨'] +['ä¹Įé²ģæľ¨', 'é½IJ'] +['èĤ²', '人'] +['çļĦ', 'æŃ¥ä¼IJ'] +['å»¶', 'æľŁ'] +['æ²¹', 'æ°Ķ'] +['åģļ', 'å®Į'] +['åľ£', 'åľ°'] +['丰', 'åİļ'] +['宽', '带'] +['åı¯éĿł', 'çļĦ'] +['åºŃ', 'éĻ¢'] +['åŃ', 'ľ'] +['å°ı康', '社ä¼ļ'] +['å®īåħ¨', '管çIJĨ'] +['å¹´', '第'] +['æİĴ', '污'] +['èĥĮ', 'åĮħ'] +['å®¶', 'ä½ı'] +['åħ¶å®ŀ', 'å°±æĺ¯'] +['ä¼ļ', 'è§ģ'] +['帮åĬ©', 'ä¼ģä¸ļ'] +['ç½ij', 'è´Ń'] +['æĺ¯', 'ä¸įä¼ļ'] +['飯', 'åºĹ'] +['æŃ»', 'åİ»'] +['åħįçĸ«', 'åĬĽ'] +['æľ', 'ķ'] +['åĸĿ', 'äºĨ'] +['è½»', 'å¾®'] +['个æľĪ', 'åĨħ'] +['ç»Ħ', 'åĽ¢'] +['åĴĮ', 'å®ĮåĸĦ'] +['é¸', '½'] +['æıIJ', 'éĢŁ'] +['西å®ī', 'å¸Ĥ'] +['ä¸Ńå¿ĥ', '主任'] +['æĹ¶éĹ´', '为'] +['æľŁ', 'æĿĥ'] +['è¶', 'ķ'] +['ä¸įä»ħ', 'è¦ģ'] +['æľį', 'ä»İ'] +['é¡ĺ', 'æĦı'] +['ä¸į', 'å°ı'] +['ä¸įå°ı', 'çļĦ'] +['ç°', 'ĩ'] +['çª', '¦'] +['åĪĩ', 'æĪIJ'] +['åĵĪ', 'åĪ©'] +['天', '羣'] +['ä¸Ģ次', '次'] +['éĩij', 'å¸ģ'] +['æĢİä¹Ī', 'èĥ½'] +['ç½ij', 'è´·'] +['ä¼ļ计', 'å¸Ī'] +['çŁŃ', '缺'] +['对', 'æłĩ'] +['åıĺå¾Ĺ', 'æĽ´'] +['åīį', 'åĩłå¤©'] +['éĺ²', 'æ±Ľ'] +['彩', 'èϹ'] +['åĵģ', 'ä½į'] +['表', 'æł¼'] +['严', 'å¯Ĩ'] +['æ¯Ľ', 'åĪ©çİĩ'] +['çļĦ', 'åį±å®³'] +['å½ķ', 'åζ'] +['æ°´', 'åĬ¡'] +['èĥ½å¤Ł', '让'] +['å¹³', 'æĿ¿'] +['ä¹³', 'æĪ¿'] +['è¸ı', 'å®ŀ'] +['é¦ĸ', 'åĪĽ'] +['é¦Ļ', 'èķī'] +['æĬ¥', '表'] +['ä¸Ģ', 'æĬ¹'] +['åĩºçĶŁ', 'äºİ'] +['è²»', 'ç͍'] +['åĩº', '让'] +['åIJĪæ³ķ', 'æĢ§'] +['å°¼', 'åħĭ'] +['åĨ°', 'åĨ·'] +['é¦Ļ', 'æ°Ķ'] +['åı·', 'ç§°'] +['èµ·', 'çłģ'] +['åŁİ', 'åİ¿'] +['çİ©', 'èĢį'] +['ä¸Ĭ', 'éĻIJ'] +['ä¼ļè®®', 'ç²¾ç¥ŀ'] +['æĹģè¾¹', 'çļĦ'] +['便', 'ä¼ļ'] +['æıŃ', 'æĻĵ'] +['çİ©', 'æĦı'] +['éĽª', 'å±±'] +['åIJij', 'çĿĢ'] +['ä½ĵèĤ²', 'åľ¨çº¿'] +['说æĺİ', '书'] +['åĮĸ', 'èĤ¥'] +['åħļç»Ħ', '书记'] +['åĬ¨', '人'] +['ä¹ĭ', 'æīĢ'] +['æľĪ', 'èĩ³'] +['æľĢå¿«', 'çļĦ'] +['èĬĤ', 'åģĩæĹ¥'] +['ä¸ĵ', 'åľº'] +['èĢĥ', 'ä¸Ĭ'] +['çª', 'Ł'] +['é²ľ', 'è¡Ģ'] +['è¾ĥ强', 'çļĦ'] +['æĤĦ', 'çĦ¶'] +['å¤ļ个', 'åĽ½å®¶'] +['çªĹ', 'å¸ĺ'] +['æŀģ', 'å¤§åľ°'] +['ä¸įç͍', 'æĭħå¿ĥ'] +['è¿Ļä¹Ī', 'åģļ'] +['åĥ¹', 'æł¼'] +['ç¾İ丽', '乡æĿij'] +['å°ıæĹ¶', 'åĨħ'] +['ç´§', 'è¿«'] +['大', 'çģ«'] +['èĥ³', 'èĨĬ'] +['æĵįä½ľ', 'ç³»ç»Ł'] +['æ®ĭ', 'çķĻ'] +['åĨĻ', 'åĩº'] +['ç¦ģ', 'å¿Į'] +['åĬłçĽŁ', 'åºĹ'] +['è¿ij', 'çϾ'] +['便', 'åı¯'] +['æķ´æĶ¹', 'æİªæĸ½'] +['éĩĩ访', 'æĹ¶'] +['åĶIJ', '代'] +['æ·±åĮĸ', 'æĶ¹éĿ©'] +['çŁ', '¢'] +['éĥ½', 'åĸľæ¬¢'] +['è¶ĬæĿ¥è¶Ĭ', 'é«ĺ'] +['èĬ±', 'æľµ'] +['头', 'çĸ¼'] +['å®ī', '康'] +['å¢ŀéķ¿', 'çİĩ'] +['çľ¼', 'çľĭ'] +['å°±æĺ¯', '为äºĨ'] +['èĢĮ', '导èĩ´'] +['åĬłå¿«', '建设'] +['èĬ±', 'æł·'] +['åĨħå¿ĥ', 'çļĦ'] +['æĺĨ', 'å±±'] +['è³ĩ', 'æºIJ'] +['åĽŀåΰ', 'å®¶'] +['èıĬ', 'èĬ±'] +['æ°´', 'éĩı'] +['å¾ģ', 'ä¿¡'] +['è¡ĮæĶ¿', 'åĮº'] +['ä¹ĥ', 'æĺ¯'] +['æĬķèµĦ', 'é¡¹çĽ®'] +['å«ģ', 'ç»Ļ'] +['ç¥ŀ', 'åľ£'] +['ç¨', 'ł'] +['æľ¬æĿ¥', 'å°±'] +['éĢIJ', 'ä¸Ģ'] +['èģĮä¸ļ', 'æĬĢæľ¯'] +['ä¸įèī¯', 'ä¿¡æģ¯'] +['æīĺ', 'è¿IJ'] +['åIJ¯', '示'] +['ä¹ĭ', 'åħ§å®¹'] +['éŁ', '¶'] +['奢', 'åįİ'] +['æıŃ', '示'] +['æĪIJ为', 'ä¸ŃåĽ½'] +['æ¶Īè´¹', 'åĵģ'] +['åħ¬', 'ç͍'] +['æIJŀ', 'å®ļ'] +['请', 'ä½ł'] +['æŁ', 'ļ'] +['åĨħ', 'è¡£'] +['ä½Ĩ', 'ä»ĸ们'] +['ä¿Ŀ', '湿'] +['该', 'åİ¿'] +['饱', 'åĴĮ'] +['æİ¨', 'åIJij'] +['èµĦæĸĻ', 'æĺ¾ç¤º'] +['ä¸į', 'å½±åĵį'] +['人', '人éĥ½'] +['åıijå±ķ', '壮大'] +['åħ»èĢģ', 'æľįåĬ¡'] +['çĶŁæ´»', 'æ°´å¹³'] +['åIJĦ', 'åİ¿'] +['ä½ł', 'éľĢè¦ģ'] +['说', 'çļĦæĺ¯'] +['å¤ĸ', 'åªĴ'] +['æŃ¤', '人'] +['次', 'è¦ģ'] +['追', 'èµ¶'] +['åºĶ该', 'å¦Ĥä½ķ'] +['æĹ¥', 'åĩĮæĻ¨'] +['çķ¥', 'æľī'] +['éĥ½', 'æĥ³'] +['游', 'ä¹IJ'] +['è¿Ļ款', '游æĪı'] +['å¹³', 'æ·¡'] +['æĺ¯ä¸Ģ', 'åĢĭ'] +['å¤ĩ', 'èĢĥ'] +['åζ', 'æŃ¢'] +['ä¸Ģå®ļ', 'èĥ½'] +['å¾Ĵ', 'å¼Ł'] +['以', 'çĤº'] +['åįĥ', 'åħĥ'] +['äºĶ', 'åħŃ'] +['迪', '士'] +['迪士', 'å°¼'] +['éĺ³', 'æĢ§'] +['åĨ¬å¥¥', 'ä¼ļ'] +['å°±æĺ¯', 'åĽłä¸º'] +['æĮĤ', 'éĴ©'] +['æ¦Ĥ', 'åĨµ'] +['åıªè¦ģ', 'æľī'] +['æ²¹', 'çĶ»'] +['åľ°', 'æłĩ'] +['ä¸Ĭ', 'è°ĥ'] +['产ä¸ļ', 'åĽŃåĮº'] +['åħ«', 'åįģ'] +['æ£', '±'] +['æ¶²', 'æĻ¶'] +['æĿij', 'å§Ķä¼ļ'] +['çŃ¾çº¦', '仪å¼ı'] +['è¿Ļ', 'åħ¶ä¸Ń'] +['åĨĻ', 'éģĵ'] +['示èĮĥ', 'åŁºåľ°'] +['éĩİçĶŁ', 'åĬ¨çī©'] +['鼻åŃIJ', 'ä¿¡ç®±'] +['åĽ½éĻħ', 'è´¸æĺĵ'] +['人', 'æĿĥ'] +['ä¿Ŀ', '管'] +['èĭ¥', 'æĤ¨'] +['åİĭ', 'æĬij'] +['é»', 'Ľ'] +['åľ°', 'çľĭçĿĢ'] +['éĻ', '°'] +['ä¸Ģå¹´', 'å¤ļ'] +['ä»İ', '容'] +['ä¸Ń', 'æĸŃ'] +['å¯Ł', 'è§ī'] +['ç§»', '交'] +['éĶ', '¯'] +['æĪĸ许', 'æĺ¯'] +['ç¶', 'ł'] +['两', '项'] +['æľĢ', 'åĸľæ¬¢'] +['æľĢåĸľæ¬¢', 'çļĦ'] +['å¤ľ', 'éĩĮ'] +['åIJĮ', 'ä»ģ'] +['åĪĽæĸ°', '驱åĬ¨'] +['è°ģ', 'èĥ½'] +['é£', '¾'] +['åħī', 'åѦ'] +['åİ', 'Ħ'] +['èĦ±', 'é¢ĸ'] +['èĦ±é¢ĸ', 'èĢĮåĩº'] +['è¿', '¦'] +['æĺ¯', 'ä¸įåı¯èĥ½'] +['çª', '¥'] +['èĥ½', '满足'] +['宽', '度'] +['伦', 'çIJĨ'] +['åı¯ä»¥', 'èİ·å¾Ĺ'] +['转', 'ä¼ļ'] +['å±±', 'æĿij'] +['éĵº', '设'] +['åĩº', 'åĩ»'] +['æĸĩåĮĸ', 'èīºæľ¯'] +['ä¼ļè®®', '室'] +['æŃĮ', '声'] +['æ»', 'Ķ'] +['èIJİ', '缩'] +['æľįåĬ¡', 'åijĺ'] +['åıij表', 'äºĨ'] +['æĸ¼', 'æĺ¯'] +['æĺİç¡®', 'è§Ħå®ļ'] +['ç»´', 'å¥ĩ'] +['æ°´', '产'] +['æĬķ', 'ä¿Ŀ'] +['éĺ´', 'éģĵ'] +['èµ¶', 'å¿«'] +['夺', 'å¾Ĺ'] +['ä¸ĭ', 'åįķ'] +['çµģ', 'åħ¬åı¸'] +['çݯ', 'ç»ķ'] +['å½', 'Ī'] +['ä½ľé£İ', '建设'] +['æĹħ游', 'æĻ¯åĮº'] +['æľī', 'æĽ´å¤ļçļĦ'] +['丰å¯Į', 'å¤ļ彩'] +['çIJĨè´¢', '产åĵģ'] +['åĩº', 'å·®'] +['ä»İ严', 'æ²»'] +['ä»İ严治', 'åħļ'] +['缸', 'å¹²'] +['æ»ĭ', '润'] +['主åĬŀ', 'æĸ¹'] +['åī§', 'åľº'] +['æ»ļ', 'çIJĥ'] +['æ©Ħ', 'æ¦Ħ'] +['èĩªä¸»', 'åĪĽæĸ°'] +['éĢļ', 'å¾Ģ'] +['æł¼', 'å°Ķ'] +['çļĦ', 'ä¼ĺçĤ¹'] +['èĥĮ', 'ä¸Ĭ'] +['çª', 'ľ'] +['çĪĨ', 'åĩº'] +['å¹³', 'æķ´'] +['ä¸Ģ', 'èĦļ'] +['åħ¨ä½ĵ', 'åijĺå·¥'] +['éĻIJ', 'å®ļ'] +['åŁİéķĩ', 'åĮĸ'] +['æ·', '³'] +['éĢ®', 'æįķ'] +['è¡ĮåĬ¨', '计åĪĴ'] +['æīĵ', 'å¾Ĺ'] +['åİļ', 'éĩį'] +['纪å½ķ', 'çīĩ'] +['åĿļ', 'ä¿¡'] +['央', 'ä¼ģ'] +['åĨį', 'ä¹Łä¸į'] +['天', '涯'] +['åıĤèĢĥ', 'èµĦæĸĻ'] +['æľī', 'æ¯Ĵ'] +['åIJ¸', '纳'] +['è¶Ĭ', 'åıij'] +['éĩįè¦ģ', 'æĦıä¹ī'] +['åĽ½éĺ²', 'éĥ¨'] +['è¿Ļ个', 'è¡Įä¸ļ'] +['æĻ®', 'æŁ¥'] +['å¼Ĥ', 'æĢ§'] +['å»¶', 'è¿Ł'] +['å°ı', 'å¹ħ'] +['èī²', 'æĥħ'] +['综åIJĪ', 'æ²»çIJĨ'] +['æŃ£æĺ¯', 'åĽłä¸º'] +['产ä¸ļ', 'ç»ĵæŀĦ'] +['çłĶç©¶', 'æĬ¥åijĬ'] +['åģľ', 'ä¸ĭ'] +['éķ¿', 'èĢģ'] +['éĩĿ', 'å°į'] +['åįĹ京', 'å¸Ĥ'] +['çģĮ', 'æºī'] +['转', 'è¿IJ'] +['欺', 'è¯Ī'] +['éĢł', 'åģĩ'] +['åĪĨå¸ĥ', 'å¼ı'] +['æĦŁ', '触'] +['æĪij', 'å½ĵæĹ¶'] +['åıij', 'è§ī'] +['åĽ¾', '纸'] +['æĶ¹', 'èī¯'] +['çĭł', 'çĭł'] +['åĨ²', 'åĪº'] +['æĸ°', '京'] +['æĸ°äº¬', 'æĬ¥'] +['ç¥ŀ', 'åύ'] +['秸', 'ç§Ĩ'] +['çĪ', 'º'] +['å°Ĩ', 'è¿İæĿ¥'] +['å·¥', 'ä¿¡'] +['工信', 'éĥ¨'] +['éĻIJ', 'éĩı'] +['æŃ¢', 'æįŁ'] +['åѦä¼ļ', 'äºĨ'] +['åįİ', '缼'] +['åįİ缼', 'é¡¿'] +['å¾Į', 'ä¾Ĩ'] +['ä¸ĭéĿ¢', 'æĺ¯'] +['ä¸ĭéĿ¢æĺ¯', 'å°ı'] +['æIJ¬', 'è¿IJ'] +['ç¾İæľ¯', 'é¦Ĩ'] +['æ¸ħ', 'åĩī'] +['å¤ļå¹´', 'åīį'] +['è©', 'ŀ'] +['åįĥ', 'ç±³'] +['表', 'è¿°'] +['æ±Ł', 'éŨ'] +['åĬłæ²¹', 'ç«Ļ'] +['æľ¬', 'èĥ½'] +['导', '读'] +['åĽ´', 'è§Ĥ'] +['å¹¶', 'åIJij'] +['åŁºæľ¬', 'æĥħåĨµ'] +['æīĵ', 'å¼ĢäºĨ'] +['è¿Ļ', 'ä¸ī个'] +['æ±ķ', '头'] +['强', 'æľīåĬĽ'] +['强æľīåĬĽ', 'çļĦ'] +['è¿Ľ', 'åľº'] +['ä¹Ŀ', 'æ±Ł'] +['çIJĥ', 'æĺŁ'] +['好çľĭ', 'çļĦ'] +['大', 'æĪ·'] +['æ¹', '¯'] +['å¥ĩ', 'å¦Ļ'] +['ä¹IJ', 'åύ'] +['æĪijçļĦ', 'å¿ĥ'] +['çľī', '头'] +['åĨľä¸ļ', 'çĶŁäº§'] +['ç¼ĸ', 'çłģ'] +['åŁº', 'ç¤'] +['åŁºç¤', 'İ'] +['天', 'æĸĩ'] +['åĢĭ人', 'è³ĩè¨Ĭ'] +['åİ»', 'è¿ĩ'] +['èģĨ', 'åIJ¬'] +['æĶ¾', 'åģĩ'] +['ä¸į', 'åħ·å¤ĩ'] +['æ·Ģ', 'ç²ī'] +['大', '佬'] +['åħ¨', '天'] +['åħ¨éĿ¢', '建æĪIJ'] +['éļIJ', 'å½¢'] +['ç¼ħ', 'ç͏'] +['åIJ', '³'] +['è¡ĮæĶ¿', 'æī§æ³ķ'] +['åŁİ', 'åł¡'] +['èİ«', 'æĸ¯'] +['èİ«æĸ¯', 'ç§ij'] +['æīĢæľī', 'æĿĥ'] +['éĽĨ', 'åľĺ'] +['å±Ģ', 'åī¯å±Ģéķ¿'] +['åĩłä¹İ', '没æľī'] +['æ´ģ', 'åĩĢ'] +['ç͵影', 'èĬĤ'] +['åŃ©', 'ç«¥'] +['æīĢ', 'åģļçļĦ'] +['æ¸ħ', '代'] +['æĸ°', 'çīĪ'] +['éĵĿ', 'åIJĪéĩij'] +['为', 'æĬĵ'] +['为æĬĵ', 'æīĭ'] +['åΤ', 'å®ļ'] +['çī¹', '产'] +['æīĭ', 'æ©Ł'] +['ä¸įåı¯', 'æĪĸ'] +['ä¸įåı¯æĪĸ', '缺'] +['å¸Ĥåľº', 'è§Ħ模'] +['åĿ', '¯'] +['åĮ»', 'åѦéĻ¢'] +['å¿«', 'è¦ģ'] +['èĮ', 'ľ'] +['æĬĺ', 'èħ¾'] +['äºĨ', 'è¿ĩæĿ¥'] +['æĬ¥åijĬ', 'æľŁåĨħ'] +['çī©', 'ç§į'] +['ç»Łè®¡', 'å±Ģ'] +['æī©', '建'] +['æ¶', 'ħ'] +['责任', '人'] +['éĺ', 'İ'] +['è¯Ħ', 'è®®'] +['å¾Ģ', 'äºĭ'] +['æīĢ', '示'] +['æķ´', 'æ´ģ'] +['éĹº', 'èľľ'] +['æĹħ', 'éĢĶ'] +['å®ŀ', 'è®Ń'] +['ä¹ĭ', 'ç§°'] +['å·´', '士'] +['éĢŁåº¦', 'å¿«'] +['ä¸įä»ħ', 'å¦ĤæŃ¤'] +['å®Ŀè´µ', 'çļĦ'] +['åºŁ', 'çī©'] +['æ²³', 'æ°´'] +['æİ¥', '纳'] +['ç²¾', 'æ¹Ľ'] +['åħ¶æ¬¡', 'æĺ¯'] +['顺', 'å¾·'] +['åħ¬åħ±', 'åį«çĶŁ'] +['è¤IJ', 'èī²'] +['ä¸į', 'æĥľ'] +['æĬĢæľ¯', 'æľįåĬ¡'] +['æİ', '·'] +['æ±Ĥ', 'èģĮ'] +['ä¸ī', '峡'] +['æĬķåħ¥', 'åΰ'] +['太', 'åIJİ'] +['åIJ¯åĬ¨', '仪å¼ı'] +['缴æİ¥', 'å½±åĵį'] +['æĸ°', '款'] +['个', '乡éķĩ'] +['çϾ', '亿'] +['åº', '«'] +['ä¹Ł', 'æŃ£æĺ¯'] +['åı¶', 'çīĩ'] +['æľĢæĹ©', 'çļĦ'] +['æĪĺ', '绩'] +['å·¥', 'æľŁ'] +['æĻļ', 'æľŁ'] +['è¿Ļæł·', '说'] +['è¯į', 'è¯Ń'] +['ä¾', 'Ħ'] +['æķ£', 'çĥŃ'] +['éĽĨæĪIJ', 'çĶµè·¯'] +['åIJį', 'è¯į'] +['æĻº', 'åķĨ'] +['æĭ¥', 'åłµ'] +['çĭĤ', '欢'] +['è¿Ļ', 'èά'] +['æµ´', '室'] +['åijķ', 'åIJIJ'] +['æľªæĿ¥', 'åıijå±ķ'] +['ä¸īä½į', 'ä¸Ģä½ĵ'] +['åªĴ', 'é«Ķ'] +['ä¸įå¾Ĺ', '转载'] +['åĽłä¸º', '她'] +['æĺ¾ç¤º', 'å±ı'] +['ä¾Ľ', 'æļĸ'] +['éĨ«', 'éĻ¢'] +['æľī', 'æĦıæĢĿ'] +['æľīæĦıæĢĿ', 'çļĦ'] +['娱ä¹IJ', 'åŁİ'] +['åįµ', 'å·¢'] +['åĪĽéĢł', 'åĬĽ'] +['竳', 'èĬĤ'] +['人大', '常å§Ķ'] +['èĢĮ', 'çİ°åľ¨'] +['å¤ĸ', 'å©Ĩ'] +['å¢ŀ', 'æĮģ'] +['äºĶ', 'åįĥ'] +['èĢģå¸Ī', '们'] +['æ´Ľ', 'æĿī'] +['æ´ĽæĿī', '磶'] +['æİĮæı¡', 'äºĨ'] +['ä¸ŃåĽ½', 'æĸĩåĮĸ'] +['æĸ°', 'æĶ¿'] +['主è¦ģ', 'ç͍äºİ'] +['åıij', 'çĥ§'] +['类似', 'äºİ'] +['åĮĹ', 'æŀģ'] +['æĪij们', '认为'] +['å¼¥', '漫'] +['åħ¨çIJĥ', 'ç»ıæµİ'] +['é¢', 'IJ'] +['ä¸Ģèµ·', 'è£ħä¿®'] +['æĶ', 'Ĵ'] +['æĭī', 'èIJ¨'] +['帶', 'ä¾Ĩ'] +['åĨ·', 'æ°´'] +['ä¸ī', 'åĨľ'] +['æĿ¿', 'æĿIJ'] +['è¿ŀ', 'è¿ŀ'] +['éĵ', '®'] +['ç»ıèIJ¥', 'çIJĨ念'] +['å±±', 'é¡¶'] +['å¾Ī', 'æĥ³'] +['çĺ', '«'] +['å§ĭç»Ī', 'ä¿ĿæĮģ'] +['åľ¨', '广å·ŀ'] +['ä¸įåIJĮ', 'æĦı'] +['åıĺ', 'åİĭ'] +['åıĺåİĭ', 'åύ'] +['产', 'éĶĢ'] +['表', 'éĿ¢ä¸Ĭ'] +['æīĢ以', 'ä»ĸ'] +['ç»ıéªĮ', '丰å¯Į'] +['éĥ¨', 'å§Ķ'] +['åħµ', 'åĽ¢'] +['æīĢ', 'è¿°'] +['æķ¦', 'çħĮ'] +['ç»ıèIJ¥', 'èĮĥåĽ´'] +['åı£', 'è¯Ń'] +['失', 'ä¿¡'] +['æ¯ı个人', 'çļĦ'] +['æīĭ', 'æĮģ'] +['æģIJ', 'æħĮ'] +['åł¡', 'åŀĴ'] +['é¦', 'ħ'] +['éĵ¸', 'éĢł'] +['æĭ¿', 'åĩºæĿ¥'] +['æİ¢', 'æµĭ'] +['大家', 'ä¸Ģèµ·'] +['å¥', '§'] +['å®ŀè´¨', 'æĢ§'] +['å°ı', 'åĦ¿'] +['èĩº', 'åįĹ'] +['èĩºåįĹ', 'å¸Ĥ'] +['å¼Ģåıij', 'èĢħ'] +['åı¯', 'æł¹æį®'] +['ç®±', 'åŃIJ'] +['饺', 'åŃIJ'] +['å¿Ļ', 'çĿĢ'] +['æĿ¥', 'ä¸įåıĬ'] +['缸', 'ä¼ł'] +['åĽ½', 'ç½ij'] +['èħ¹', 'æ³»'] +['è¿ĻéĩĮ', 'æľī'] +['é£İ', 'æĻ¯åĮº'] +['åıĤ', 'ä¿Ŀ'] +['æŃ»', 'èĢħ'] +['æĪ´', 'ä¸Ĭ'] +['æ©Ł', 'æ§ĭ'] +['è¯ķéªĮ', 'åĮº'] +['ä¼ł', 'æİĪ'] +['æµ·', 'è¾¹'] +['泪', 'æ°´'] +['缸åħ³', 'åĨħ容'] +['éĥij', 'å·ŀå¸Ĥ'] +['åħij', 'çݰ'] +['两', 'åij¨'] +['èĬľ', 'æ¹ĸ'] +['ç͵åŃIJ', 'ä¿¡æģ¯'] +['红', 'å¤ĸ'] +['æĹħ游', 'å±Ģ'] +['å¾Ģå¾Ģ', 'ä¼ļ'] +['è¿ħ', 'çĮĽ'] +['ä¼ł', '羣'] +['æ¸ħ', 'æ¾Ī'] +['å°±', 'è¿ij'] +['微信', '群'] +['ç³»åĪĹ', 'æ´»åĬ¨'] +['ç»ı常', 'ä¼ļ'] +['è§Ĥ', 'æµĭ'] +['å¿ĥå¾Ĺ', 'ä½ĵä¼ļ'] +['éĻĪ', 'åĪĹ'] +['åĮĹ', 'æĸĹ'] +['è«', '®'] +['è«®', 'è©¢'] +['è¿ĺæĺ¯', 'ä¼ļ'] +['æµĭ', 'ç®Ĺ'] +['æĺŁ', '空'] +['宽', '容'] +['çī©ä¸ļ', 'åħ¬åı¸'] +['æĪĴ', 'æĮĩ'] +['å¸ħ', 'æ°Ķ'] +['ä¸ĢæŃ¥', 'æŃ¥'] +['åħ±', '鸣'] +['åĨ³', 'ä¸į'] +['æİ¥', '管'] +['å¦ĩ', 'èģĶ'] +['æ¯Ķ', 'åĸ»'] +['é²ģ', 'è¿ħ'] +['æĮģ', 'çºĮ'] +['缸', '亲'] +['å¨ģå°¼æĸ¯', '人'] +['ç«ĭ', '项'] +['åĪ', 'Ŀå§ĭ'] +['èĩª', 'åζ'] +['è¿Ī', 'è¿Ľ'] +['ä¸Ĭ', 'æ±½'] +['å®ı', 'ä¼Ł'] +['æł¹æľ¬', '没æľī'] +['æĸ°åĨł', 'çĹħæ¯Ĵ'] +['åĵª', 'ç§į'] +['康', 'åħ»'] +['è¡°', 'èĢģ'] +['å½ķ', 'åĥı'] +['é«Ķ', 'é©Ĺ'] +['ç»ij', 'å®ļ'] +['é¢Ŀ', '头'] +['äºĶ', 'æľĪ'] +['èĬ±', 'å¼Ģ'] +['ä¸Ģ线', 'åŁİå¸Ĥ'] +['åΰ', 'åľº'] +['æĬķ', 'éĻį'] +['çĹĺ', 'çĹĺ'] +['åıĹ', 'ä¸įäºĨ'] +['æīİ', 'æł¹'] +['æĽ´', 'ä½ķåĨµ'] +['æĬ½', 'æŁ¥'] +['åĩº', 'è·¯'] +['审议', 'éĢļè¿ĩ'] +['ä¸į', 'åĥħ'] +['èī²', 'è°ĥ'] +['çϾ', 'ä½Ļ'] +['èĤł', 'éģĵ'] +['æ·±åİļ', 'çļĦ'] +['马', 'åĬĽ'] +['æĹ©', 'æĻļ'] +['æŃĮ', 'èĪŀ'] +['éĺ²', 'æĻĴ'] +['æľĢåIJİ', 'ä¸Ģ个'] +['樱', 'èĬ±'] +['å°ıä¼Ļ', 'åŃIJ'] +['åľ¨', 'å½ĵåľ°'] +['å°ıä¼Ļä¼´', '们'] +['èµ·', 'æºIJ'] +['åħ¨', 'åªĴä½ĵ'] +['ç°', '½'] +['éħ±', 'æ²¹'] +['æĹłè®º', 'å¦Ĥä½ķ'] +['裤', 'åŃIJ'] +['åģľ', '产'] +['ä¸įçͱ', 'å¾Ĺ'] +['çīµ', 'å¼ķ'] +['ä¼ł', 'åĬ¨'] +['ä¹Ŀ', 'é¾Ļ'] +['åĬł', 'åĽº'] +['ä¹Łä¸į', 'æķ¢'] +['æĬĢæľ¯', 'æĶ¯æĮģ'] +['ä¸Ĭ', 'å²Ĺ'] +['ç»ıéªĮ', 'åĴĮ'] +['æł¼', 'æŀĹ'] +['åIJ¸', 'éĻĦ'] +['æľªæĪIJ', 'å¹´'] +['奢ä¾Ī', 'åĵģ'] +['追', 'æį§'] +['好', 'ä¸į容æĺĵ'] +['èķ´', 'åIJ«'] +['ä¿Ŀ', 'å®ļ'] +['æĬ¥', 'ä¸ļ'] +['æµ·', 'åĨħå¤ĸ'] +['ä½ł', 'çİ°åľ¨'] +['æ²¹', 'èĢĹ'] +['è´¨éĩı', '管çIJĨ'] +['æ½ľ', 'æ°´'] +['丽', 'æ±Ł'] +['转', 'åħ¥'] +['è¿Ļä¹Ī', 'ä¹ħ'] +['æĺİ', '代'] +['责任', 'åζ'] +['éĩį', 'å·¥'] +['大', 'å·´'] +['触', 'åıĬ'] +['èµ·', 'åĪĿ'] +['大', 'å¦Ī'] +['æĸ¯', 'å¡Ķ'] +['åĨĽ', 'å·¥'] +['书', 'éĻ¢'] +['å³', '¨'] +['æİ¨', 'çIJĨ'] +['è¿Ļç¯ĩ', 'æĸĩ竳'] +['è¿ģ', 'ç§»'] +['åľ¨', 'åIJĮä¸Ģ'] +['ç»Ĩ', 'ç»Ĩ'] +['åīĬ', 'å¼±'] +['书', 'æĪ¿'] +['ç¶ĵ', '常'] +['è¯ķ', 'é¢ĺ'] +['æĤ£', 'ä¸Ĭ'] +['çĻ«çĹ«', 'çĹħ'] +['åĨ²', 'æ´Ĺ'] +['å¤ĸ', 'æı´'] +['åħĭ', 'åζ'] +['åįģ', 'æľĪ'] +['åģļ', 'ä¸įåΰ'] +['ç¾İ', 'åĮĸ'] +['å¦Ĥ', 'æľŁ'] +['è¿ĺ', 'éľĢ'] +['天', 'åºľ'] +['å°±', 'æĦıåij³çĿĢ'] +['çļĦç¡®', 'æĺ¯'] +['éªĹ', 'å±Ģ'] +['å°ıç»Ħ', 'èµĽ'] +['è©', '©'] +['ä¹Ŀ', 'å¹´'] +['æĻĵ', 'å¾Ĺ'] +['çłĶç©¶', '人åijĺ'] +['大', 'éħĴåºĹ'] +['ç§ij', 'åѸ'] +['åħŃ', 'åIJĪ'] +['çķĮ', 'å®ļ'] +['车', 'è½½'] +['å¼Ģ', 'çĿĢ'] +['毫', 'æĹłçĸij'] +['毫æĹłçĸij', 'éĹ®'] +['è¿IJ', 'ç»´'] +['ç¦ģ', 'åĮº'] +['èĦ±', 'èIJ½'] +['讲', 'å¸Ī'] +['产ä¸ļ', 'åŁºåľ°'] +['é«ĺ', 'æĢ§èĥ½'] +['åħī', '彩'] +['çݰ', 'éĺ¶æ®µ'] +['åĩ', '¿'] +['è¾ĥ', 'å·®'] +['饮', 'çĶ¨æ°´'] +['éĸĭ', 'çϼ'] +['ç½ij', 'åIJ§'] +['çĮ´', 'åŃIJ'] +['æŃ¦', 'æŀĹ'] +['å®ī', 'åİ¿'] +['ä¸įåı¯', 'æĢĿ'] +['ä¸įåı¯æĢĿ', 'è®®'] +['éĬ·', 'åĶ®'] +['è´«', 'ç©·'] +['为', 'åķ¥'] +['éº', 'ĵ'] +['å¹¾', 'åĢĭ'] +['è§Ħ模', '以ä¸Ĭ'] +['æı', 'ļ'] +['被', 'åĽ°'] +['缺', 'å¸Ń'] +['å¿«', 'é¤IJ'] +['æĬ¢', 'åįł'] +['æĻ', 'Ł'] +['å¤į', 'æ´»'] +['æľ¬æĬ¥', '讯'] +['åĪĽ', 'ä¸ĭ'] +['æµ·', '滩'] +['éĩı', '产'] +['å¦Ĥä½ķ', 'åİ»'] +['车', 'ä½į'] +['å¯', 'ĩ'] +['äºĮ', 'åįģåĽĽ'] +['ç»ıæµİ', 'æįŁå¤±'] +['éħįå¥Ĺ', '设æĸ½'] +['åŁºæľ¬', 'éĿ¢'] +['äºī', '论'] +['就好', 'åĥı'] +['çłĶç©¶', 'æĪIJæŀľ'] +['éĻĪ', 'è¿°'] +['æīĵ', 'åĬ¨'] +['ä¸ĭ', 'å·´'] +['ç§Ĵ', 'éĴŁ'] +['对', '人ä½ĵ'] +['æĬĢæľ¯', 'çłĶåıij'] +['åİŁ', 'åŃIJ'] +['æĺ¯ä¸Ģ', '项'] +['äºĨä¸Ģ', '份'] +['æĮĩ', 'çͲ'] +['ç͍', 'éĩı'] +['è¿ĺä¸į', 'å¤Ł'] +['æĶ¿åºľ', 'éĩĩè´Ń'] +['çŁ¥è¯Ĩ', 'çĤ¹'] +['ä¸ŃåĽ½', '梦'] +['å¾Ī', 'å¼Ģå¿ĥ'] +['礼', 'è²Į'] +['éĿŀ常', 'å¤ļ'] +['éĿŀ常å¤ļ', 'çļĦ'] +['åĽ', 'ļ'] +['æĹħ', 'é¦Ĩ'] +['å°½', 'æĥħ'] +['æŃĮ', 'åͱ'] +['æ²Ļ', 'é¾Ļ'] +['车', 'åİ¢'] +['客', 'æµģ'] +['åģı', 'å·®'] +['积累', 'äºĨ'] +['æ¡', 'Ķ'] +['çĶ»', 'çĶ»'] +['ä¹Ł', 'åºĶ该'] +['åºĶç͍', 'ç¨ĭåºı'] +['èĥĥ', 'èĤł'] +['以', 'å¾Į'] +['豪', 'å®ħ'] +['æ·±', 'åĬłå·¥'] +['缴', 'è¨Ģ'] +['åĮĸ', 'çŁ³'] +['åĽ½', 'éģĵ'] +['ä¸ĥ', '个'] +['ä»İèĢĮ', '使'] +['èĤł', 'èĥĥ'] +['æĹ¥', 'è¶ĭ'] +['çζ', 'åŃIJ'] +['ç·', '©'] +['æĭĽ', 'çīĮ'] +['产', 'å¦ĩ'] +['çķª', 'èĮĦ'] +['æĪij', 'éĻ¢'] +['建çŃij', 'å·¥ç¨ĭ'] +['å±ķè§Ī', 'ä¼ļ'] +['å®¶éķ¿', '们'] +['åĨľ', 'ä½ľçī©'] +['æĹ¥', 'å¤ľ'] +['æĶ»', 'æĵĬ'] +['è§Ħ', 'éģ¿'] +['èĪŁ', 'å±±'] +['便', 'æ°ij'] +['åħ«', 'åŃĹ'] +['ä¸į', 'æĽ¾'] +['æĶ¯', 'éħį'] +['çĨ¬', 'å¤ľ'] +['人', 'é¡ŀ'] +['ç´Ģ', 'éĮĦ'] +['ç»ıèIJ¥', 'æ´»åĬ¨'] +['大', '涨'] +['å¸Ĥå§Ķ', '常å§Ķ'] +['åĪĨ', 'éIJĺ'] +['ä¸Ģ个', 'èģĮä¸ļ'] +['çĹħ', 'åĽł'] +['è¿Ļ', '对äºİ'] +['ä¸įå¾Ĺä¸į', '说'] +['åıijç͵', 'æľº'] +['æľīæīĢ', '帮åĬ©'] +['缮æłĩ', 'ä»»åĬ¡'] +['åĽł', 'åľ°'] +['åĽłåľ°', 'åζ'] +['åĽłåľ°åζ', 'å®ľ'] +['å°Ĩ', 'è¾¾åΰ'] +['ç²Ĺ', 'ç³Ļ'] +['稳', 'åĽº'] +['å«', '£'] +['çİ°åľ¨', 'å¾Īå¤ļ'] +['ä¸ĸçķĮ', '级'] +['å¼ł', 'æŁIJ'] +['çĤ¹', 'ç¼Ģ'] +['èij', 'µ'] +['社ä¼ļ', 'ç»Ħç»ĩ'] +['å¾Ģ', 'åIJİ'] +['åĬł', 'æģ¯'] +['åĻª', '声'] +['æľī', 'åħ´è¶£'] +['为æĤ¨', 'æıIJä¾Ľ'] +['æ²¹', 'æ¼Ĩ'] +['ç¬¬åĽĽ', 'å±Ĭ'] +['çļĩ', '宫'] +['ä¹Ĵ', 'ä¹ĵ'] +['ä¹Ĵä¹ĵ', 'çIJĥ'] +['éļ¨', 'èijĹ'] +['éģ©', 'åIJĪ'] +['åįĹ', 'éĿŀ'] +['æĵ', '´'] +['西', 'æ´ĭ'] +['åĬł', 'å¯Ĩ'] +['æĪIJåĬŁ', '举åĬŀ'] +['åı£', 'æ°´'] +['æĪIJ', '年人'] +['æīĢ', 'æıIJä¾ĽçļĦ'] +['éļĶ', 'å£ģ'] +['åľ¨', '京'] +['å½ĵåľ°', 'æĹ¶éĹ´'] +['çŃī', 'åIJĦç§į'] +['é£İ', 'æ°Ķ'] +['å±ĭ', 'éĩĮ'] +['ä¸Ģ', 'åŃĹ'] +['çļĦæĹ¶éĹ´', 'éĩĮ'] +['åĺ¿', 'åĺ¿'] +['å¿«', '讯'] +['ä¸Ń', 'åľº'] +['ä¸Ģ', 'çĵ¶'] +['æ»', 'ķ'] +['é¢Ĩ', 'è·ij'] +['好', 'èݱ'] +['好èݱ', 'åĿŀ'] +['没', 'åħ³ç³»'] +['åĩº', 'å¢ĥ'] +['ä¸įæĺ¯', 'ä¸Ģ个'] +['éĥ½æĺ¯', 'éĿŀ常'] +['éľĩ', 'åĬ¨'] +['èİ·', 'èĥľ'] +['åįļ', 'å¼Ī'] +['æĬļ', 'åħ»'] +['对', 'ç«ĭ'] +['æľįåĬ¡', 'æľºæŀĦ'] +['è°£', 'è¨Ģ'] +['社ä¼ļ', 'ç§ijåѦ'] +['åIJ¬è¯´', 'è¿ĩ'] +['æī', '³'] +['æīĵ', '磨'] +['åı£', 'æľį'] +['好', 'åĥıæĺ¯'] +['以åıĬ', 'åħ¶ä»ĸ'] +['çī¹', 'è´¨'] +['亲', 'è¿ij'] +['ä¸Ģ', 'ç»ı'] +['æ¶', 'Ŀ'] +['éŃĶ', 'æľ¯'] +['éģĵè·¯', '交éĢļ'] +['è§Ħ模', 'æľĢ大'] +['å®ŀæĸ½', 'æĦıè§ģ'] +['ä¹', 'ŀ'] +['ä¸Ģ', 'ä¸ĸ'] +['åŁ·', 'è¡Į'] +['è±Ĩ', 'çĵ£'] +['åĪĹ', '为'] +['æķħ', '宫'] +['çĶŁ', 'åij½åij¨æľŁ'] +['ä¸īç§į', 'èģĮä¸ļ'] +['详ç»Ĩ', 'ä»ĭç»į'] +['å®Į', 'å¤ĩ'] +['岩', 'çŁ³'] +['éļı', 'æīĭ'] +['é£', '²'] +['æķĪæŀľ', 'åĽ¾'] +['ç§ĭ', 'åĨ¬'] +['åĬŁ', 'å¾·'] +['è§Ħ竳', 'åĪ¶åº¦'] +['æĹ¥', 'æ¸IJ'] +['æīĢ', 'éľĢè¦ģ'] +['æīĢéľĢè¦ģ', 'çļĦ'] +['å²Ľ', 'ä¸Ĭ'] +['åĩº', 'åľŁ'] +['åĽ¾', 'æĸĩ'] +['ç§ijæĬĢ', 'è¿ĽæŃ¥'] +['éĢļ', 'èĥĢ'] +['èĢģ', '太太'] +['èĭĹ', 'æľ¨'] +['éĵ¶', 'å·Ŀ'] +['å¸IJ', '篷'] +['éĿŀ', 'è¦ģ'] +['éħį', 'ç͵'] +['å¤Ħ', 'å¢ĥ'] +['èĤ¡æĿĥ', 'æĬķèµĦ'] +['ä¸Ģ缴', 'åΰ'] +['åĿĩ', 'çͱ'] +['æĬĹ', 'æĹ¥'] +['æį®', 'ä»ĭç»į'] +['ä½ł', 'åĸľæ¬¢'] +['åĪĽæĸ°', 'åŀĭ'] +['åıĺ', 'è¿ģ'] +['è§Ĩ', 'å¯Ł'] +['å®Įåħ¨', '没æľī'] +['åħĥ', 'æĹ¦'] +['åı¯', 'ä¿¡'] +['åı¦', 'è¡Į'] +['æĿij', '级'] +['åħ¥', 'åľº'] +['æIJŃ', 'æ¡£'] +['ä¹Ł', 'åĽłæŃ¤'] +['æį¢', 'æĪIJ'] +['ä¸į', 'è´Ł'] +['äºĨ', '大éĩıçļĦ'] +['éģĶ', 'åΰ'] +['å¸Ĥ', 'åİ¿'] +['å¹´', 'è¼ķ'] +['å¿«', 'æīĭ'] +['å¸Į', 'å°Ķ'] +['èĩª', 'èIJ¥'] +['éĽª', 'èĬ±'] +['æIJ', 'ģ'] +['çľ¼', 'ç§ij'] +['æŃ£', '確'] +['çļĦ', 'å§¿æĢģ'] +['åĿļå®ŀ', 'çļĦ'] +['æĮĩ', '纹'] +['æªĶ', 'æ¡Ī'] +['ç½®', 'äºİ'] +['佩', 'æľį'] +['豪', 'éŨ'] +['åĵ', 'Ĵ'] +['æģ°', '好'] +['檢', 'æŁ¥'] +['åĪĿ', 'è¡·'] +['大', 'åĶIJ'] +['约', 'ä¼ļ'] +['èĴ¸', 'åıij'] +['çѹ', 'åĪĴ'] +['å¹´', 'ç»Ī'] +['è¡Į', 'æ¥Ń'] +['åħ±', 'éĿĴ'] +['åħ±éĿĴ', 'åĽ¢'] +['ä¼ļ', 'å¼ķèµ·'] +['ä¸Ń', 'ç§ij'] +['ä¸Ńç§ij', 'éĻ¢'] +['æĮ¯', 'åĬ¨'] +['åį´', 'åıijçݰ'] +['ä¸įåĬ¨', '产'] +['èĮ', '¹'] +['æĪ¿éĹ´', 'éĩĮ'] +['è´§å¸ģ', 'æĶ¿çŃĸ'] +['æ²»', 'çĻĤ'] +['æħİ', 'éĩį'] +['å¡ŀ', 'å°Ķ'] +['åĽ½', 'ç±į'] +['åĽł', 'æŀľ'] +['çŃī', 'çī¹çĤ¹'] +['å±±', 'è°·'] +['ä¸ĭ', 'è¼ī'] +['è®ĵ', 'æĪij'] +['饮', 'éħĴ'] +['è¿Ļ个', '游æĪı'] +['ç»Ŀ', '大éĥ¨åĪĨ'] +['åĴ¨è¯¢', 'æľįåĬ¡'] +['å¹²', 'æ´»'] +['è®®', 'ä¼ļ'] +['æ¦Ĥ', 'è¿°'] +['åĪĨ', 'åĮº'] +['æŃ»', 'åIJİ'] +['ç«Ļ', 'çĿĢ'] +['主è¦ģ', 'é¢Ĩ导'] +['åIJĮ', 'åŁİ'] +['大', 'æłij'] +['对', 'åѦçĶŁ'] +['社ä¼ļ', 'ä¿ĿéĻ©'] +['å¢ŀ', 'èµĦ'] +['主人', 'åħ¬'] +['å®£ä¼ł', 'æķĻèĤ²'] +['æĸĩåĮĸ', '交æµģ'] +['客', 'æĪ¶'] +['çŁ¥åIJį', 'åĵģçīĮ'] +['æ»ŀ', 'åIJİ'] +['äºĴ', 'è¡¥'] +['æĦŁ', '人'] +['åī', '¿'] +['åIJİ', '代'] +['äºī', '龸'] +['æķĻèĤ²', 'åŁ¹è®Ń'] +['éĿĻ', 'èĦī'] +['ä¹ı', 'åĬĽ'] +['说', 'åĩºæĿ¥'] +['çİĭèĢħ', 'èį£èĢĢ'] +['åĢ', '«'] +['åįĩ', 'èµ·'] +['éķ', 'ģ'] +['åĩº', '游'] +['éĢļè¡Į', 'è¯ģ'] +['å·¥ä½ľ', 'å²Ĺä½į'] +['åĮł', 'å¿ĥ'] +['æĭ¿', 'æĿ¥'] +['æ´Ĺè¡£', 'æľº'] +['æĪijä¸į', 'æĥ³'] +['é¢Ħ', 'è§ģ'] +['æ¼Ķ', '示'] +['ä¸Ģ缴', '没æľī'] +['è·Ł', '她'] +['对çħ§', 'æ£ĢæŁ¥'] +['ç°', '¿'] +['ä¸ĵ', 'å¿ĥ'] +['è®®', 'äºĭ'] +['åīį', '端'] +['åį¡', 'å°Ķ'] +['è¨Ń', 'å®ļ'] +['设置', 'äºĨ'] +['å©ļ', '纱'] +['åľ¨', 'åĽ½å¤ĸ'] +['åı³', 'ä¾§'] +['è³¼', 'çī©'] +['å¥ĩ', 'èij©'] +['å¢ŀåĬł', 'å̼'] +['好', 'è¿IJ'] +['åĽ½éĻħ', 'æľºåľº'] +['ä¸ĭ', 'ç§°'] +['缮åīį', '为æŃ¢'] +['ç¥ŀ', 'ä»Ļ'] +['å®ĥ', 'åı¯ä»¥'] +['æ¾Ħ', 'æ¸ħ'] +['èĥ½', '使'] +['游', 'åĩ»'] +['游åĩ»', 'éĺŁ'] +['åĩ', '¹'] +['ä¸įè¦ģ', 'åĨį'] +['åĨ³', 'èĥľ'] +['åĨ³', 'æĪĺ'] +['æĭ', '½'] +['缼', 'åħ¸'] +['å¾Ī好', 'åľ°'] +['æľĢ', 'ç¾İçļĦ'] +['åĥ', 'ļ'] +['å·´', 'åŁº'] +['å·´åŁº', 'æĸ¯åĿ¦'] +['æľĢ', 'éĢĤåIJĪ'] +['é«ĺ', 'èģĮ'] +['ä¿Ŀ', 'å§Ĩ'] +['æİĪ', 'æ¬Ĭ'] +['说åΰ', 'è¿ĻéĩĮ'] +['æİ¨', 'å¼Ģ'] +['çİĩ', 'è¾¾'] +['ä¸īåĪĨ', 'ä¹ĭä¸Ģ'] +['管çIJĨ', 'ä¸Ńå¿ĥ'] +['交', 'æ±ĩ'] +['森æŀĹ', 'åħ¬åĽŃ'] +['å¾Ģ', 'ä¸Ĭ'] +['éªij', 'è¡Į'] +['æį®', 'æŃ¤'] +['纽', '带'] +['ç»', 'ŀ'] +['ä¸ī', 'æĸ¹'] +['æĦıä¹ī', 'ä¸ĬçļĦ'] +['æİ¨', 'è¿Ł'] +['å¤ļæł·', 'æĢ§'] +['æĥ³', 'èµ·äºĨ'] +['æİĴåIJį', '第'] +['å·¨', 'é¢Ŀ'] +['æĿŁ', 'ç¼ļ'] +['å®ī', 'å®ļ'] +['äºĭ', '實'] +['çļĦ', 'æĦ¿æľĽ'] +['è£ħå¤ĩ', 'åζéĢł'] +['人', 'å±ħ'] +['人å±ħ', 'çݯå¢ĥ'] +['å¿ĺè®°', 'äºĨ'] +['该', '游æĪı'] +['楼', 'ä¸Ĭ'] +['å¼Ģ', 'ä¼ļ'] +['æģ', '³'] +['åıĭæĥħ', 'éĵ¾æİ¥'] +['ç¡', 'Ĵ'] +['ç»ĻäºĪ', 'äºĨ'] +['åģı', '好'] +['åĵ', 'ī'] +['交éĢļ', 'å®īåħ¨'] +['éĽ', 'Į'] +['æ²»', 'çĹħ'] +['è§īå¾Ĺ', 'å¾Ī'] +['衬', 'è¡«'] +['å¿ĥ', 'æĦ¿'] +['æ´ŀ', 'å¯Ł'] +['æ°ij', 'æ£Ģå¯ŁéĻ¢'] +['æıIJ', 'çĤ¼'] +['è¦ģ', 'è¿Ľä¸ĢæŃ¥'] +['驾', '车'] +['æĻ®', 'æĥł'] +['æķ', 'ĸ'] +['ç¦ı', 'éŁ³'] +['éĢģ', 'è¾¾'] +['è§ĦåĪĴ', '设计'] +['æīĭ', 'å¥Ĺ'] +['å®ī', 'ä¿Ŀ'] +['è¿ĺä¸į', 'å¦Ĥ'] +['åīį', 'è¿°'] +['æłĩ', 'è®°'] +['ç´§', 'æİ¥çĿĢ'] +['æ§', 'IJ'] +['深深', 'åľ°'] +['满满', 'çļĦ'] +['æĺ¥', 'è¿IJ'] +['æĹ¥', '产'] +['çα', 'æĬ¤'] +['åħ¨', 'æĹ¥'] +['åħ¨æĹ¥', 'åζ'] +['转', 'åĬ¨'] +['ç¥Ń', 'ç¥Ģ'] +['ä¹°', 'ä¸ľè¥¿'] +['对', 'æľªæĿ¥'] +['æ¶Ī失', 'äºĨ'] +['åļ´', 'éĩį'] +['ä¸ī', 'æĿ¡'] +['éħ¸', '奶'] +['éĽĨåĽ¢', 'èĤ¡ä»½'] +['西', 'è·¯'] +['åıª', 'å¾Ĺ'] +['éĢģ', 'åİ»'] +['çĭł', 'æĬĵ'] +['åĪ©ç͍', 'çİĩ'] +['ä¸ĭ', 'åij¨'] +['å¥ĭ', 'æĪĺ'] +['æĺ¥èĬĤ', 'æľŁéĹ´'] +['è´Ł', '责任'] +['æĺĤ', 'è´µ'] +['å°¾', 'å·´'] +['ç¯ĩ', 'æĸĩ竳'] +['åħ', '®'] +['è®Ĭ', 'æĪIJ'] +['å¹', '¹'] +['çĻ»', 'éĮĦ'] +['ä½', 'Ī'] +['å·¥', 'åĮł'] +['åĵªæĢķ', 'æĺ¯'] +['åıį', 'åĵį'] +['ç§', 'ĥ'] +['åĩº', '轨'] +['æĹ¥', 'åĨĽ'] +['åIJį', 'èªī'] +['æķı', 'éĶIJ'] +['æľįåĬ¡', 'æ°´å¹³'] +['çħ§', 'å°Ħ'] +['ä¼Ĭ', 'æĭī'] +['ä¼Ĭæĭī', 'åħĭ'] +['åĨħ', 'éĺģ'] +['èĬĴ', 'æŀľ'] +['ä¸ĩ', 'åĪĨ'] +['éĢĢ', '款'] +['缴æĴŃ', 'éĹ´'] +['æĭ¿', 'åΰäºĨ'] +['å°İ', 'èĩ´'] +['空æ°Ķ', 'ä¸Ń'] +['客æĪ·', 'æľįåĬ¡'] +['è¿IJ', 'åĬ¿'] +['ç»ĵ', 'çŁ³'] +['ä¸į', 'å¿ħè¦ģçļĦ'] +['èĥ¶', 'åĽĬ'] +['çIJĨ', 'ä¼ļ'] +['æĬ½', 'åĩº'] +['空æ°Ķ', 'è´¨éĩı'] +['æ¯ķ', '竣æĺ¯'] +['åĨ·', 'æ¼ł'] +['ä¸Ģ', 'å¦Ĥ'] +['ä¸Ģå¦Ĥ', 'æĹ¢'] +['ä¸Ģå¦ĤæĹ¢', 'å¾Ģ'] +['æĤ£', 'çĹħ'] +['åĬł', 'æĮģ'] +['èµŀ', 'åĬ©'] +['é«', '®'] +['åij½', 'ä¸Ń'] +['æĦıä¹ī', 'ä¸Ĭ'] +['ä¸į', 'èĪį'] +['åģļ', '梦'] +['æīĵ', 'æī«'] +['æĺŁ', 'åħī'] +['æĸŃ', 'è£Ĥ'] +['åħ¨', 'å¥Ĺ'] +['è£ģ', 'å®ļ'] +['马', 'åħĭæĢĿ'] +['骨', '骼'] +['ä¸Ģ', 'è·¯ä¸Ĭ'] +['å®ļ', 'æĹ¶'] +['å·¥ç¨ĭ', 'æĬĢæľ¯'] +['å½¼', 'å¾Ĺ'] +['æ±²', 'åıĸ'] +['ä¸Ģ', 'è§Ī'] +['åIJµ', 'æŀ¶'] +['ä¿Ĺ', 'ç§°'] +['æłª', 'æ´²'] +['åºŁ', 'æĹ§'] +['è¡Į', 'æĺŁ'] +['åıijçĶŁ', 'åıĺåĮĸ'] +['é¦ĸ', 'ä»ĺ'] +['åįģåĪĨ', 'éĩįè¦ģ'] +['æĬĬ', 'è¿ĻäºĽ'] +['ç¥ŀ', 'å·ŀ'] +['æıIJä¾Ľ', 'åķĨ'] +['æ¥', '·'] +['å±', 'İ'] +['çĬ¶', 'åħĥ'] +['åŁİ', 'å¢Ļ'] +['çľĭ', 'ä¸Ģçľĭ'] +['çĶŁäº§', 'èĥ½åĬĽ'] +['åŁºæľ¬ä¸Ĭ', 'éĥ½'] +['æīĵ', 'æī°'] +['åĪĿ', '次'] +['åĩº', '示'] +['åħ¶ä¸Ń', 'ä¸Ģ个'] +['çĶŁæĢģ', 'ç³»ç»Ł'] +['æīĭ', 'æİĮ'] +['æµİåįĹ', 'å¸Ĥ'] +['åľĭ', 'åħ§'] +['æŃ£', 'å̼'] +['å¹¾', 'ä¹İ'] +['æİ¨èįIJ', 'éĺħ读'] +['è¿Ń', '代'] +['è°ĥ', 'ä¾ĥ'] +['饮', 'åĵģ'] +['å¢Ļ', 'ä½ĵ'] +['åıĺ', 'çݰ'] +['äºĨ', '好'] +['äºĨ好', 'åĩł'] +['ä¸į', 'çķĻ'] +['çĪ', '²'] +['å°½', 'æĹ©'] +['æŃ£åľ¨', 'è¿Ľè¡Į'] +['åĩº', 'éĻ¢'] +['æĿĢ', '害'] +['æıIJ', '款'] +['åıijå±ķ', '空éĹ´'] +['åīį', '身'] +['ä¸įæĸŃ', 'å¢ŀ强'] +['æ·±', 'å±Ĥ次'] +['容', '纳'] +['éĤ£', '份'] +['å·¥ä½ľ', 'æķĪçİĩ'] +['æľ¬', 'åĽ½'] +['失', 'èIJ½'] +['æŃ£', 'åĽłä¸º'] +['èĬĤ', 'æ°´'] +['ä¸ĭ', 'ä¸Ģ代'] +['çłĶåıij', 'ä¸Ńå¿ĥ'] +['ä¸į', 'çIJĨ'] +['å®Į', '好'] +['ä¿ĿæĬ¤', 'åĮº'] +['ç»ĵæŀĦ', 'è°ĥæķ´'] +['å¥ł', 'å®ļ'] +['宣', 'ç§°'] +['éĺ»', 'æĮ¡'] +['æĴ¤', '离'] +['ä¸į', 'æĸ¹ä¾¿'] +['åĴ', 'ķ'] +['ç¬ijäºĨ', 'ç¬ij'] +['çݯå¢ĥ', '污æŁĵ'] +['ä½ı', 'æĪ·'] +['ç»Ŀ', 'ç¼ĺ'] +['éϤ', 'å°ĺ'] +['é«ĺ', 'å°ļ'] +['æĢİä¹Ī', 'åı¯èĥ½'] +['éĿ¢', 'èī²'] +['åķĨ', 'æ¥Ń'] +['çĸ', '¹'] +['èµĦæºIJ', 'ä¼ĺåĬ¿'] +['è¾ĸåĮº', 'åĨħ'] +['èĢĢ', 'çľ¼'] +['æij§', 'æ¯ģ'] +['ä¸ĸçķĮ', 'ç»ıæµİ'] +['å¼ķ', 'æĿ¥'] +['ä¸Ģ', 'åĪĻ'] +['æĭĩ', 'æĮĩ'] +['æĬµ', '御'] +['éĽ', 'į'] +['åĩĨå¤ĩ', 'å·¥ä½ľ'] +['çıł', 'ä¸īè§Ĵ'] +['ç¨Ģ', 'åľŁ'] +['èİ·å¾Ĺ', 'æĦŁ'] +['æĪIJåĬŁ', 'çİĩ'] +['ç½ij', '约'] +['ç½ij约', '车'] +['èĦ', 'IJ'] +['æķ¬', 'ä¸ļ'] +['éĩij', 'ä»·'] +['ç²¾', 'é«ĵ'] +['ä¹°', '车'] +['åħ³', 'åı£'] +['åĨį', 'å¤ļ'] +['æŀģ', 'åĵģ'] +['åIJĦ', 'å®¶'] +['举æĬ¥', 'ç͵è¯Ŀ'] +['èļ', 'Ĭ'] +['æĸ¹', 'å½¢'] +['ç§ijæĬĢ', 'æĪIJæŀľ'] +['æľĢ好', 'æĺ¯'] +['éĹ®', 'åĢĻ'] +['红', 'éħĴ'] +['åĽĽ', 'ç§į'] +['ç¿Ĵ', 'æħ'] +['ç¿Ĵæħ', '£'] +['åŀ', '¦'] +['éĤ£', 'åıª'] +['é¢Ĩ', 'æĤŁ'] +['çľ¼', 'éĥ¨'] +['æ³°', 'å®ī'] +['ä»»', 'æľŁ'] +['磨', 'æįŁ'] +['æĽ¿', 'æį¢'] +['åħ¸', '礼'] +['符åIJĪ', 'æĿ¡ä»¶'] +['è¿ĺæľī', 'ä»Ģä¹Ī'] +['åħ±äº«', 'åįķ车'] +['åı¯', 'åĪĨ为'] +['åŃ£', 'åIJİ'] +['åŃ£åIJİ', 'èµĽ'] +['举èİŀ', 'å¸Ĥ'] +['å¿ĥ', 'æĦı'] +['æīŃ', 'æĽ²'] +['ä½ľä¸º', 'ä¸Ģç§į'] +['è¿Ļ', 'éĥ¨åĪĨ'] +['åıĤä¸İ', 'åΰ'] +['ç½ij', 'çIJĥ'] +['實', 'çı¾'] +['ç»Ħ', 'è£ħ'] +['åIJij', 'å¤ĸ'] +['å·¥ä½ľ', 'æĸ¹æ¡Ī'] +['åįģ', 'æĿ¡'] +['課', 'ç¨ĭ'] +['颤', 'æĬĸ'] +['åĵ', '©'] +['éĤ®', 'å¯Ħ'] +['äº', '¢'] +['åħį', 'è²»'] +['ç§', '¤'] +['åºĶæĢ¥', '管çIJĨ'] +['åĽĽ', 'äºĶ'] +['éºĴ', 'éºŁ'] +['å¾Ĵ', 'æŃ¥'] +['è¨ĺ', 'å¾Ĺ'] +['çĴ', 'IJ'] +['æĺ¯åIJ¦', 'ä¼ļ'] +['æĦıè§ģ', 'åıįé¦Ī'] +['éļ¾', 'æĢª'] +['çª', 'į'] +['交', 'æİ¥'] +['两', 'åįĥ'] +['æĩī', 'ç͍'] +['æľŁ', 'éĸĵ'] +['æIJ¬', 'åΰ'] +['è®®', 'é¢ĺ'] +['碧', 'æ¡Ĥ'] +['碧æ¡Ĥ', 'åĽŃ'] +['åģļ', 'çĶŁæĦı'] +['éĻĽ', 'ä¸ĭ'] +['è·', 'ĭ'] +['èĢģ人', 'å®¶'] +['带', 'åĽŀ'] +['æŀ¸', 'æĿŀ'] +['è¡Į', 'éķ¿'] +['åĨħ容', 'ç®Ģä»ĭ'] +['æ¢', '¢'] +['æĮĩ', 'æİ§'] +['éĩį', 'çĹĩ'] +['ç½ijåıĭ', '们'] +['çı¾', '代'] +['ç±»', '产åĵģ'] +['å¥Ķ', 'æ³¢'] +['æ¸', 'º'] +['ç²ī', 'ç¢İ'] +['è¿Ļ', 'åıªæĺ¯'] +['æ£Ģå¯Ł', 'æľºåħ³'] +['é½', 'Ĭ'] +['æĪ¿', 'ç§Ł'] +['å¾·', 'æĭī'] +['å²ģ', '以ä¸Ĭ'] +['纯', 'åĩĢ'] +['åĪĨå¸ĥ', 'åľ¨'] +['èĥ½', 'å¾Ĺåΰ'] +['ä¸į', 'å°½'] +['ç«ŀ', 'ä»·'] +['çļĦ', '带é¢Ĩ'] +['çļĦ带é¢Ĩ', 'ä¸ĭ'] +['ä¸Ńèį¯', 'æĿIJ'] +['æĿij', 'éķĩ'] +['ä¸įåı¯', 'éģ¿åħį'] +['éľ²', '天'] +['å°ı', 'å§ijå¨ĺ'] +['çī©', 'ä»¶'] +['èijĹä½ľ', 'æĿĥ'] +['æĭĺ', 'çķĻ'] +['éĥ½', 'è§īå¾Ĺ'] +['æĽ²', 'æĬĺ'] +['æ·»åĬł', 'åīĤ'] +['åı¬', 'åĽŀ'] +['æīİå®ŀ', 'æİ¨è¿Ľ'] +['æĬĦ', 'è¢Ń'] +['åĮĸ', '身'] +['缴', 'èIJ¥'] +['ä¹Ł', 'å¸ĮæľĽ'] +['èį£èªī', 'ç§°åı·'] +['åįĸ', 'ç»Ļ'] +['æľī', 'ä¸įåIJĮçļĦ'] +['å¥ĩ', 'çī¹'] +['éĥ½', '认为'] +['å¦', 'ŀ'] +['æĪIJéķ¿', '为'] +['辩', 'æĬ¤'] +['主', 'æķĻç»ĥ'] +['æ³ķå¸Ī', 'èģĮä¸ļ'] +['æ¤į', 'åħ¥'] +['ç´¢', 'å°¼'] +['åIJ¬', 'è¿ĩ'] +['ä¹łæĥ¯', 'äºĨ'] +['夺', 'åıĸ'] +['éŁ', 'ĵ'] +['æľ¬è´¨', 'ä¸Ĭ'] +['æİ¥', 'åĬĽ'] +['äºij', '端'] +['è¦ģ', 'åģļ好'] +['è·¯', 'çģ¯'] +['åįıåIJĮ', 'åıijå±ķ'] +['æľī', 'å¾ħ'] +['æ°´', 'åŁŁ'] +['æIJľçĭIJ', 'é¦ĸ页'] +['è´¨éĩı', 'å®īåħ¨'] +['åįģäºĮ', 'äºĶ'] +['åĵ®', 'åĸĺ'] +['èĵ¬åĭĥ', 'åıijå±ķ'] +['åIJį', '声'] +['身', '亡'] +['çİĭ', 'åºľ'] +['åİŁåĪĻ', 'ä¸Ĭ'] +['çĥĺ', 'å¹²'] +['éģĹ', 'æ¼ı'] +['éĿ¢', '缮'] +['åĽ½', 'ä¼ļ'] +['ä¸Ģ缴', 'éĥ½æĺ¯'] +['æľīä¸Ģ', 'ä½į'] +['éħį', 'æľī'] +['éĻª', 'çĿĢ'] +['ä¼ģ', 'åĽ¾'] +['æĮī', 'ä¸ĭ'] +['èĵĿ', 'åĽ¾'] +['æ©', 'ĺ'] +['大å¤ļ', 'æĺ¯'] +['辩', '论'] +['æĹĭ', 'å¾ĭ'] +['æĬ¥', 'éĢģ'] +['æĿ¡', 'è§Ħå®ļ'] +['åĬ¨', 'éĿĻ'] +['åĮĪ', '奴'] +['æĭľ', '访'] +['ä¸Ģ', 'åĪĢ'] +['ä»ĸ', 'çŁ¥éģĵ'] +['主', 'æĿĥ'] +['ä»ĸ', 'æĽ¾'] +['æĴŃ', 'ç§į'] +['å£ģ', 'åŀĴ'] +['çī¢è®°', '使åij½'] +['åľ¨è¿Ļ', 'æĸ¹éĿ¢'] +['æīĭ', 'èħķ'] +['æĶ¯', 'æŀ¶'] +['ä¾Ĩ', 'èĩª'] +['éĩį', 'å¡ij'] +['å¤ļ', 'å±Ĥ次'] +['ä»ĭ', 'è´¨'] +['éĿ¢', 'åŃĶ'] +['æ½®', '湿'] +['åİ¿', 'åŁŁ'] +['游æĪı', 'å½ĵä¸Ń'] +['å£', 'ŀ'] +['åĪĹ', 'åĩº'] +['èµĽ', 'åĮº'] +['å¤ļ', 'åįĬ'] +['éĩįçĤ¹', 'å·¥ä½ľ'] +['æĪij们', 'å¿ħé¡»'] +['æŁı', 'æŀĹ'] +['é²ģ', 'èĥ½'] +['æĸ½', 'å±ķ'] +['åIJĦ', 'åĮº'] +['åħį', 'ç¨İ'] +['èµĽ', 'åIJİ'] +['æľĢ', 'éĩįè¦ģ'] +['ä¸Ģ个', '好çļĦ'] +['è¿Ŀæ³ķ', 'è¿Ŀè§Ħ'] +['äºĨè§£', 'æĽ´å¤ļ'] +['æķ¬', '请'] +['ç¬ijçĿĢ', '说'] +['ä¸įæĸŃ', 'åıijå±ķ'] +['æijĦå½±', 'å¸Ī'] +['以', 'éĺ²'] +['çĤ¸', 'å¼¹'] +['声', 'åĵį'] +['ç¤', 'ģ'] +['æĩ', '¿'] +['èĪĨ', 'æĥħ'] +['èĩªçͱ', 'è´¸æĺĵ'] +['æķı', 'æį·'] +['ä¸ī大', 'éĺ¶æ®µ'] +['èĭ', 'Ķ'] +['æĹº', 'åŃ£'] +['ä¸į', '满æĦı'] +['微信', 'åı·'] +['ä¿®', '为'] +['çł´', 'è£Ĥ'] +['éĢĥ', '离'] +['æ¯ı', 'èĤ¡'] +['è¾¾', 'ä¸įåΰ'] +['æ¯ıå¹´', 'éĥ½'] +['çģ¯', '笼'] +['æŃ¤', 'åŁºç¡Ģä¸Ĭ'] +['åĥı', '个'] +['åĪĨ', '娩'] +['æĻ', '¾'] +['ä¸į', 'èĩ³äºİ'] +['红', '线'] +['误', 'è§£'] +['举', 'è·¯'] +['æ·®', 'å®ī'] +['产', 'åѦ'] +['产åѦ', 'çłĶ'] +['èī¾', 'æ»ĭ'] +['è»ĭ', 'çĹħ'] +['åīįæıIJ', 'æĺ¯'] +['æ¯ı', 'ä¸Ģ天'] +['ä¸ĥ', '大'] +['æłij', 'åı¶'] +['èµ°', 'å¾Ĺ'] +['è¿Ļ', '两ç§į'] +['æİı', 'åĩº'] +['æİ', 'IJ'] +['é¢Ĩ导', 'èĢħ'] +['ä¸Ģ', 'æľµ'] +['个å¤ļ', 'æľĪ'] +['ä¸Ń', 'åħ³'] +['ä¸Ńåħ³', 'æĿij'] +['课åłĤ', 'æķĻåѦ'] +['大', 'åĴĸ'] +['éģĭ', 'ç͍'] +['è¯ļ', 'æĦı'] +['ç»Ħ', 'åĽ¾'] +['è¯ķ', 'çĿĢ'] +['ä¹Ķ', 'æ²»'] +['è¿ĺ', 'ä¸įæĺ¯'] +['æľī', 'æĽ´å¥½çļĦ'] +['åIJİ', 'å¤ĩ'] +['æĸ°çĶŁ', 'åĦ¿'] +['æ°Ķ', 'è¡Ģ'] +['æ²¥', 'éĿĴ'] +['å±ı', 'éļľ'] +['æ¥Ń', 'åĭĻ'] +['æĪij', '以为'] +['éķ¿', '缸'] +['èĢģ', 'çΏ'] +['éķĩ', 'æ±Ł'] +['æľºæ¢°', '设å¤ĩ'] +['ä½Ĩæĺ¯', 'å¦Ĥæŀľ'] +['åĿļå®ļ', 'ä¸į'] +['åĿļå®ļä¸į', 'ç§»'] +['åĨ²', 'éĶĭ'] +['ç®Ģ缴', 'æĺ¯'] +['åĤ¨', 'èĵĦ'] +['纯', 'ç͵åĬ¨'] +['漫', 'æŃ¥'] +['举', 'èµ·'] +['æģ¶', 'æĢ§'] +['è¨ĺ', 'éĮĦ'] +['èģĮèĥ½', 'éĥ¨éŨ'] +['åħ¨', 'éķ¿'] +['鼻', 'è¦ĸ'] +['ä¹³', 'èħº'] +['ä½ķ', 'å¤Ħ'] +['æ¶Ī', 'æŀģ'] +['æŃ£', 'å¤Ħäºİ'] +['å®ī', 'å®ģ'] +['æĪIJ', 'éķ·'] +['åıĻ', 'è¿°'] +['æºĥ', 'çĸ¡'] +['ä½Ĩ', 'çİ°åľ¨'] +['女', 'æĺŁ'] +['å©´', 'å¹¼åĦ¿'] +['æĬķ', 'èŀįèµĦ'] +['éĹ®', 'éĹ®'] +['æıŃ', 'å¼Ģ'] +['è¯', 'ı'] +['åIJį', 'å½ķ'] +['èĺij', 'èıĩ'] +['åIJĬ', 'é¡¶'] +['æ¹ĸ', 'åĮº'] +['åįĸ', 'åľº'] +['建', 'ç¯'] +['建ç¯', 'ī'] +['èİ', '½'] +['åIJ¬', 'åIJ¬'] +['ç«ŀäºī', 'ä¼ĺåĬ¿'] +['åĩº', 'ä»»'] +['æľī', '两ç§į'] +['橱', 'æŁľ'] +['è¤', 'ª'] +['è¯ķ', 'åį·'] +['ç»ıæµİ', 'æĬĢæľ¯'] +['æ·±', 'å±Ĥ'] +['éĩįè¦ģ', 'åĨħ容'] +['é£İ', 'æİ§'] +['çĬ¶æĢģ', 'ä¸ĭ'] +['éĥ¨', 'éĸĢ'] +['广', 'æ±½'] +['è§Ĥ', 'æij©'] +['éģĹ', 'çķĻ'] +['转', 'è´¦'] +['æĮģ', 'ä»ĵ'] +['æĢ»', '计'] +['åľĺ', 'éļĬ'] +['æĪ¿', '举'] +['éĺĢ', 'éŨ'] +['åħ¬', 'åħ³'] +['åħ³', 'åĪĩ'] +['èĤ', 'ĺ'] +['æķ¸', 'æĵļ'] +['ä¸ī', 'åįģå¹´'] +['è§ģè¯ģ', 'äºĨ'] +['å±', 'Ĩ'] +['çģ°', 'å°ĺ'] +['æ¦ľ', 'é¦ĸ'] +['è¦ĨçĽĸ', 'çİĩ'] +['ä»Ļ', '女'] +['çĶŁäº§', 'æĢ»'] +['çĶŁäº§æĢ»', 'å̼'] +['æĪ¿', 'è´·'] +['æ±Ł', 'åĮº'] +['åħħç͵', 'æ¡©'] +['çϾ', 'åIJĪ'] +['確', 'èªį'] +['转', 'ç§»åΰ'] +['éĥ½', 'æĹłæ³ķ'] +['纪念', 'é¦Ĩ'] +['çŃ¾ç½²', 'äºĨ'] +['å¹¶ä¸į', 'å¤ļ'] +['æĮ', 'ł'] +['ä¸į太', '好'] +['ä¸ĸ', '代'] +['误', '导'] +['é«ĺå³°', '论åĿĽ'] +['åħ¼', '容'] +['龸', 'æ°Ķ'] +['æĿ¥', '访'] +['æīĢ', '带æĿ¥çļĦ'] +['æĺ¯ä¸Ģ', 'éĥ¨'] +['æĻļ', 'é¥Ń'] +['åİĨ', '代'] +['åIJ¦', 'åīĩ'] +['ä¹ħ', 'ä¹ħ'] +['æľīæķĪ', 'æľŁ'] +['诱', 'åıij'] +['æĢ»', 'èµĦ产'] +['æľ¬èº«', 'å°±æĺ¯'] +['çĶŁäº§', 'åİĤå®¶'] +['æĹ¶', '髦'] +['èĢIJ', 'ç͍'] +['ä»İå°ı', 'å°±'] +['æĿ¡', '约'] +['èĭ±', 'åĭĩ'] +['ä¿Ĺ', 'è¯Ŀ说'] +['寺', 'åºĻ'] +['å¿ĥçIJĨ', 'åģ¥åº·'] +['ä»Ģä¹Ī', 'äºĭæĥħ'] +['æ±ī', 'åŃĹ'] +['çķĻ', 'ä½ı'] +['åįĹ', 'è·¯'] +['ä¸ī', '项'] +['丢', 'äºĨ'] +['æĥ³', 'åΰäºĨ'] +['çѹ', 'éĽĨ'] +['éĻĦåĬł', 'å̼'] +['西', 'è£ħ'] +['ä¹ĭ', 'ä½ľ'] +['åģļçļĦ', 'äºĭ'] +['çķ¶', 'æĤ¨'] +['çķ¶æĤ¨', 'åľ¨'] +['é¦ĸ', '款'] +['ä¸įåľ¨', 'ä¹İ'] +['å·¥ç¨ĭ', 'æĸ½å·¥'] +['éļIJ', 'éļIJ'] +['åıĺ', '身'] +['沿', 'éĢĶ'] +['æĤł', 'æĤł'] +['ä¿Ŀ', 'æļĸ'] +['çĶŁæ´»', 'åŀĥåľ¾'] +['渤', 'æµ·'] +['æŃ¦', 'ä¾ł'] +['女', '主è§Ĵ'] +['举', 'ä¾ĭ'] +['æ', '·¨'] +['çϽ', 'é¢Ĩ'] +['è£Ļ', 'åŃIJ'] +['è¿Ķ', 'è¿ĺ'] +['è¿Ī', 'åĩº'] +['é¾Ļ', 'éŨ'] +['ç»ıæµİ', 'ä½ĵ'] +['æĶ¶', 'å®ĺ'] +['çķĮ', 'éĻIJ'] +['è·³', 'åĩº'] +['åįĩ', 'å̼'] +['绵', 'éĺ³'] +['çĸ¤', 'çĹķ'] +['çľĭ', 'æ¸ħ'] +['æĭĴ', 'çµķ'] +['è¥Ħ', 'éĺ³'] +['课', 'å¤ĸ'] +['åŃIJ', 'åŃĻ'] +['æŃĮ', 'è¯į'] +['æĪIJ', 'åIJį'] +['溶', 'æ¶²'] +['åĦĴ', 'å®¶'] +['åķĨä¸ļ', 'åĮĸ'] +['辨', 'åĪ«'] +['å¤ļ', 'è¾¾'] +['ç½ij', 'åºĹ'] +['ä¹Ŀ', '大'] +['ä¹Ŀ大', 'ç²¾ç¥ŀ'] +['æŃ¤', '举'] +['è¿ŀ', 'è½½'] +['ä¸Ģ', 'åĢĭ人'] +['èī²', 'æ³½'] +['æ¶µçĽĸ', 'äºĨ'] +['è¦ı', 'åĬĥ'] +['åĽ½', 'æĥħ'] +['åį«çĶŁ', 'åģ¥åº·'] +['积æŀģ', 'åĵįåºĶ'] +['æĭ', 'Ļ'] +['åζ', 'åĬ¨'] +['æĥ³è±¡', 'åĬĽ'] +['çļĦ', 'ä¹IJè¶£'] +['å¼łå®¶', 'çķĮ'] +['å´', 'İ'] +['éĩį', 'åŀĭ'] +['å¤ĸ', 'å¢Ļ'] +['æĶ¾', 'åѦ'] +['è®¤çľŁ', 'åŃ¦ä¹ł'] +['è´¬', 'å̼'] +['æ³ķ', 'æ¡Ī'] +['æĬ¤èĤ¤', 'åĵģ'] +['éĻ·åħ¥', 'äºĨ'] +['请', 'æĤ¨'] +['åŀ', '¢'] +['æķĻèĤ²', 'èµĦæºIJ'] +['交æĺĵ', 'å¹³åı°'] +['æĹ¶', 'è£ħ'] +['ä¼łæŁĵ', 'çĹħ'] +['æ¹ĸ', 'æ³Ĭ'] +['èµĦ', '管'] +['åݨ', 'å¸Ī'] +['éĹľ', 'éį'] +['éĹľéį', 'µ'] +['åĵĪåĵĪ', 'åĵĪ'] +['çĽĹ', 'çªĥ'] +['çĶľ', 'ç¾İ'] +['åºĦ', 'åĽŃ'] +['缮åīį', 'å·²ç»ı'] +['è¾¹', 'ä¸Ĭ'] +['çģ«', 'èĬ±'] +['æĬ¥', 'è®°èĢħ'] +['æģĭ', 'æĥħ'] +['ç´§', 'åĩij'] +['æ°´', 'æµģ'] +['è¿Ļæĺ¯', 'æĪij们'] +['æ³¥', 'åľŁ'] +['æĽ¾', 'ä»»'] +['æĸ¹', 'è¨Ģ'] +['åij¨', 'åħŃ'] +['åı·', '楼'] +['ä¼ij', 'åģĩ'] +['误', 'ä¼ļ'] +['åĽ½', 'åĢº'] +['åīį', 'å¤ķ'] +['两', 'å¼ł'] +['éĹ', '«'] +['éŃĶ', '鬼'] +['æĬĬ', 'æĮģ'] +['èĬĤèĥ½', 'çݯä¿Ŀ'] +['æ¸ħæ´ģ', 'èĥ½æºIJ'] +['èĤ¥', 'æĸĻ'] +['é«ĺ', 'é¢ij'] +['å°±', 'æľīäºĨ'] +['交', 'ä¼ļ'] +['没', 'éĴ±'] +['éĽħ', 'æĢĿ'] +['è¦ģ', 'åıĬæĹ¶'] +['åŁ¹åħ»', 'åѦçĶŁ'] +['欣', 'åĸľ'] +['çĥŃæ°´', 'åύ'] +['é¾Ļ', 'æ¹ĸ'] +['äºĮ', '楼'] +['æĸ°æµª', 'è´¢ç»ı'] +['æĸ°', 'åĬ¨èĥ½'] +['èµ£', 'å·ŀ'] +['æĭ³', '头'] +['æµģ', 'åIJij'] +['ä¹Łæĺ¯', 'å¾Ī'] +['åıij', 'åĶ®'] +['ä¸Ń', 'åIJ«æľī'] +['åIJĵ', 'å¾Ĺ'] +['å·¨', 'æĺŁ'] +['æĹł', 'æīĢè°ĵ'] +['æ¯Ľ', 'åŃĶ'] +['åħ¬åħ±', '交éĢļ'] +['çĤİ', 'çĥŃ'] +['èµ·', 'èįī'] +['åĬłçĽŁ', 'åķĨ'] +['说', 'ä¸įåĩº'] +['大åѦ', 'æ¯ķä¸ļ'] +['å·¥ä¸ļ', 'åĽŃ'] +['éłĺ', 'åŁŁ'] +['åºĨ', 'åħ¸'] +['æµģ', '产'] +['èģ²', 'éŁ³'] +['ä¼¼ä¹İ', 'æĺ¯'] +['è´§', 'æºIJ'] +['æ·±', 'åĪĩ'] +['æ²»çĸĹ', 'æĸ¹æ³ķ'] +['èµĦæºIJ', 'éħįç½®'] +['ç¶²', 'åıĭ'] +['çĶ', '£'] +['äº', '¥'] +['躲', 'åľ¨'] +['社', 'ç§ij'] +['è»Ł', 'é«Ķ'] +['女', 'è£ħ'] +['æŃ¡', 'è¿İ'] +['综åIJĪ', 'å®ŀåĬĽ'] +['æł¼', 'å°ĩ'] +['åħļåı²', 'åŃ¦ä¹ł'] +['æľĢ', 'åŁºæľ¬'] +['æľĢåŁºæľ¬', 'çļĦ'] +['çľĭ', 'æľĽ'] +['åıĹ', 'è´¿'] +['ä¸įä»ħ', 'èĥ½'] +['ä½ķ', 'å¿ħ'] +['ä¸Ģ个', 'å°ıæĹ¶'] +['ç¾', 'Į'] +['æĭĽ', 'æĶ¶'] +['çĤĴ', 'èĤ¡'] +['æĿij', 'å¹²éĥ¨'] +['缸', 'çα'] +['æ½ľ', 'èĥ½'] +['ä¹', 'į'] +['æĹ¶', 'è¾°'] +['欣', 'æħ°'] +['éĵ¶', 'è¡Įä¸ļ'] +['çĭŃ', 'çªĦ'] +['éĩįçĤ¹', 'é¢ĨåŁŁ'] +['çݰå®ŀ', 'çĶŁæ´»'] +['éĮ¯', '誤'] +['æĸ°', 'è§Ħ'] +['滥', 'ç͍'] +['æĹ¶', 'ä¸į'] +['æĹ¶ä¸į', 'æĹ¶'] +['帳', 'èĻŁ'] +['ç¨Ģ', '缺'] +['åIJij', '举'] +['ä¿Ŀåģ¥', 'åĵģ'] +['çıŃ', 'éķ¿'] +['äºĴ', 'åĭķ'] +['笼', '罩'] +['æ½', 'Ľ'] +['æļĸ', 'å¿ĥ'] +['è½°', 'çĤ¸'] +['åºĨ', '幸'] +['è²Į', 'ä¼¼'] +['æĵ', 'º'] +['èĢIJ', '磨'] +['ä¸ĵä¸ļ', '人士'] +['ä¸Ģèά', 'éĥ½æĺ¯'] +['æ¼³', 'å·ŀ'] +['åħ¨', 'èĩªåĬ¨'] +['å½ķ', 'ç͍'] +['大', 'è·Į'] +['æľīæķĪ', 'æĢ§'] +['èĩª', 'åĭķ'] +['ä¸ī个', 'æĸ¹éĿ¢'] +['港', 'åĮº'] +['ä¿¡', '貸'] +['éĢļ', 'è¯Ŀ'] +['é«ĺ', '涨'] +['æ³Ħ', 'æ¼ı'] +['éħį', 'ä¸Ĭ'] +['åħļ', 'å·¥å§Ķ'] +['被', '认为'] +['被认为', 'æĺ¯'] +['ä¸įä¼ļ', 'åĨį'] +['è°ĥ', 'åīĤ'] +['åıĤ', 'èĤ¡'] +['èĦ±', 'åıij'] +['å¿ł', 'å®ŀ'] +['åĨħ', 'åĪĨæ³Į'] +['ç¹ģ', 'å¿Ļ'] +['åıĮ', 'åĪĽ'] +['é©»', 'æĿij'] +['åĪĴ', 'ç®Ĺ'] +['éģİ', 'ä¾Ĩ'] +['åľ£', 'ç»ı'] +['èıľ', '鸣'] +['æĭ¼', 'å¤ļå¤ļ'] +['ä¸ŃåĽ½', '汽车'] +['çĥŁ', 'èįī'] +['缴', 'æµģ'] +['äºĨä¸Ģ', 'åı£æ°Ķ'] +['ä½İ', 'æĪIJæľ¬'] +['æī¾', 'åĽŀ'] +['èĩª', 'åįij'] +['總', 'æĺ¯'] +['æĸĩåĮĸ', 'åĪĽæĦı'] +['天', 'æ²³'] +['樱', 'æ¡ĥ'] +['éªij', 'åħµ'] +['éĩĮéĿ¢', 'æľī'] +['çİ', '®'] +['èĥ½', 'æī¾åΰ'] +['éĢĥ', 'è·ij'] +['åĪĩ', 'å°Ķ'] +['åĪĩå°Ķ', '西'] +['以ä¸ĭ', 'æĺ¯'] +['å²³', 'éĺ³'] +['çļĦ', 'æ¦Ĥçİĩ'] +['æĬµ', 'åζ'] +['å¸Ī', 'äºĭåĬ¡'] +['å¸ĪäºĭåĬ¡', 'æīĢ'] +['åĩĨ', 'æĹ¶'] +['屬', 'æĸ¼'] +['订', 'è´Ń'] +['åįłæį®', 'äºĨ'] +['ä¸Ń', 'éĢĶ'] +['å°', 'ĭ'] +['é»ij', '马'] +['åİ¿', 'åħ¬å®īå±Ģ'] +['ä¸ĥ', 'æľĪ'] +['èī²', 'ç´ł'] +['å¿ĥèĦı', 'çĹħ'] +['æĹ¶', 'éĻIJ'] +['æ¯į', 'åħ¬åı¸'] +['å¹ķ', 'åIJİ'] +['ä¸Ĭ', 'æ¦ľ'] +['å̾åIJij', 'äºİ'] +['纸', 'ä¸Ĭ'] +['æ¡', 'ĵ'] +['éĽĨä½ĵ', 'ç»ıæµİ'] +['æĥħ', 'å¢ĥ'] +['è¦ģ', 'åģļåΰ'] +['ç©į', '極'] +['åıª', 'æĢķ'] +['æ¹ĺ', '西'] +['çļ±', '纹'] +['åħ¨', 'åľĭ'] +['çĦ¡', 'è«ĸ'] +['好', 'æĦŁ'] +['åįķ', 'ä»·'] +['è¿Ľç¨ĭ', 'ä¸Ń'] +['æĺĨ', 'ä»ij'] +['åĪĽ', '客'] +['åħħ', 'æĸ¥'] +['åħĪ', 'æĬĬ'] +['该', 'æĢİä¹ĪåĬŀ'] +['åĵģ', 'å¾·'] +['åħ¨éĿ¢', 'åıijå±ķ'] +['è¨Ī', 'åĬĥ'] +['æĢ»', 'å·¥ä¼ļ'] +['ä½Ľå±±', 'å¸Ĥ'] +['æĬĹ', 'è¡¡'] +['å¼Ģ', 'åľº'] +['éĴ±', 'å¸ģ'] +['åıĭ', '们'] +['å«ī', 'å¦Ĵ'] +['ç´¢', 'èµĶ'] +['è®Ĭ', 'åĮĸ'] +['æĮ¤', 'åİĭ'] +['æĮij', 'è¡ħ'] +['çŃī', 'ä¸Ģæī¹'] +['æĿ¨', '欢'] +['ä¸ĵå®¶', 'åѦèĢħ'] +['èĥ½', 'è¾¾åΰ'] +['èµ°', 'è¿ij'] +['è´«åĽ°', 'åľ°åĮº'] +['éĻIJ', 'æľŁ'] +['ä¸į', '平衡'] +['åĽ½åĨħ', 'å¸Ĥåľº'] +['èµĽ', 'åľº'] +['éħį', 'èµĦ'] +['è¦ģ', 'èĢĥèĻij'] +['ä¸ĩ', 'åı°'] +['æľĪ', 'æľ«'] +['éĶ', '¥'] +['åŃ', '«'] +['æİ¥è§¦', 'åΰ'] +['åĩº', '产'] +['æķĻ', 'åѸ'] +['ä½ľ', 'å¼Ĭ'] +['çļĦ', 'æľĢåIJİä¸Ģ'] +['ä¿ĥ', 'æĪIJ'] +['åIJ¸', 'åıĸ'] +['æ½ľ', 'èīĩ'] +['被', 'éªĹ'] +['è¾ĵ', 'äºĨ'] +['çĭIJ', 'çĭ¸'] +['åįĩ', 'éĻį'] +['è¿ĻäºĽ', 'ä¸ľè¥¿'] +['æĬķèµĦ', 'åŁºéĩij'] +['çĶŁçī©', 'åѦ'] +['ç½ij绾', 'èIJ¥éĶĢ'] +['åIJij', 'è®°èĢħ'] +['èįī', 'åľ°'] +['æĢ', '¯'] +['æľįåĬ¡', 'èĥ½åĬĽ'] +['éĥģ', 'éĹ·'] +['åįķ', 'åĵģ'] +['å¾Ĺ', '罪'] +['æĺĵ', 'äºİ'] +['个å¤ļ', 'å°ıæĹ¶'] +['éĩį', 'ä»»'] +['ä¸Ĭ', 'å®ĺ'] +['æľ¬', 'éĩij'] +['çı¾', 'åł´'] +['溢', 'ä»·'] +['æĺŁ', 'è¾°'] +['æ´»åĬ¨', 'çİ°åľº'] +['丹', '麦'] +['å¸Ŀ', 'çİĭ'] +['æŁ¥', 'æĺİ'] +['åŃĺåľ¨', 'äºİ'] +['é¦Ļ', 'æ°´'] +['æĬ½', 'æ£Ģ'] +['å®ŀéĻħä¸Ĭ', 'æĺ¯'] +['æĸ°', 'å¾ģç¨ĭ'] +['è´¢åĬ¡', '管çIJĨ'] +['æİ', 'Ľ'] +['åĨľ', 'åİĨ'] +['éĥ½', 'èĥ½å¤Ł'] +['éĤ¯', 'éĥ¸'] +['羣', '實'] +['ç»', 'Ĭ'] +['åĨµ', 'ä¸Ķ'] +['ç½®', '身'] +['ç¥Ī', '祷'] +['çĿģ', 'å¼Ģ'] +['æĮĩ', 'çĤ¹'] +['å¼Ģ', 'æľº'] +['西', 'å®ģ'] +['åĮĹ', '约'] +['积', 'æ°´'] +['åĩº', 'åĬ¨'] +['åıijå±ķ', '模å¼ı'] +['转', 'æĬĺ'] +['èĢĥ', 'çĤ¹'] +['æľī', 'ç½ijåıĭ'] +['è´«åĽ°', 'æĿij'] +['æĪij们', 'çŁ¥éģĵ'] +['åĪĨ', 'éĶĢ'] +['å±±', 'èĦī'] +['æ¯Ķ', 'æĭŁ'] +['ä¼°', 'ç®Ĺ'] +['æĶ¹', '建'] +['壮', 'è§Ĥ'] +['ç§ī', 'æĮģ'] +['æı', 'ª'] +['ç¦', 'Ģ'] +['åĮĸåѦ', 'åĵģ'] +['ä¸ŃåĽ½', 'åζéĢł'] +['ä¸Ģ', 'æŀ¶'] +['æīį', 'è¡Į'] +['æĭĽ', 'å¾ħ'] +['åıĺ', 'æį¢'] +['åīį', '线'] +['幸', '好'] +['è¿Ļæł·', 'çļĦè¯Ŀ'] +['å¿ĥ', 'è¡Ģ管'] +['æĢ§', 'çĸ¾çĹħ'] +['åħ¨', 'èĥ½'] +['åĪij', '侦'] +['ä¿¡æģ¯', 'åıijå¸ĥ'] +['æĺ¾', 'çĦ¶æĺ¯'] +['éĿĴ', 'éĵľ'] +['åIJĥ', 'ä»Ģä¹Ī'] +['ç͵', 'ä»·'] +['æ³ķå¾ĭ', 'è§Ħå®ļ'] +['çħ', '²'] +['çĵ·', 'åύ'] +['èĤī', 'ç±»'] +['æıĴ', 'åħ¥'] +['åĹ', 'ľ'] +['è¿Ł', 'è¿Ł'] +['ä¸ĢçĤ¹', 'éĥ½ä¸į'] +['è¿ĺ', 'åĮħæĭ¬'] +['èĪį', 'ä¸įå¾Ĺ'] +['æłĩå¿Ĺ', 'æĢ§'] +['æľĪ', '以æĿ¥'] +['ç³ĸ', 'æŀľ'] +['éĥ½', 'åºĶ该'] +['çݯå¢ĥ', 'åį«çĶŁ'] +['èĪª', 'è¡Į'] +['éĥij', 'éĩį'] +['ç½ij', 'æĬķ'] +['åįģ', 'ä½³'] +['ç§ģ', 'ä¸ĭ'] +['æļ´', 'è·Į'] +['åĬłå¿«', 'åıijå±ķ'] +['产åĵģ', 'çłĶåıij'] +['åĪĽéĢł', 'åĩº'] +['æĢ»', 'è§īå¾Ĺ'] +['åºķ', 'çĽĺ'] +['èķ', 'Ĭ'] +['åĩºå¸Ń', 'ä¼ļè®®'] +['主', 'æĿ¿'] +['æĹ¥æĻļ', 'éĹ´'] +['å®ĺæĸ¹', 'å¾®åįļ'] +['å¼ķç͍', 'æĹ¥æľŁ'] +['åī¯', 'æķĻæİĪ'] +['ç͵åŃIJ', '产åĵģ'] +['è¡°', 'éĢĢ'] +['çķĻ', 'åŃĺ'] +['çģ«', 'åĬĽ'] +['çĴ', '§'] +['çļ', 'Ĥ'] +['åħ¼', 'åħ·'] +['éĩį', 'è¿Ķ'] +['é¢Ĩ', 'çķ¥'] +['åĪĩ', 'éϤ'] +['åĨįçĶŁ', 'èĥ½æºIJ'] +['å®ŀåľ¨', '太'] +['çIJĨ论', 'ä¸Ĭ'] +['ä¸ī', 'å±Ĥ'] +['ä¸ĸçķĮ', 'åIJĦåĽ½'] +['å®ľ', 'æĺĮ'] +['è̳', 'è¾¹'] +['宽', 'æķŀ'] +['æ±ī', 'æĹı'] +['çϽ', 'çϽ'] +['è¿ĻéĩĮ', 'éĿ¢'] +['çĶŁæ´»', 'ä¹łæĥ¯'] +['èµŀ', 'èµı'] +['çĶ·', '士'] +['ä¸Ń', 'ä¿Ħ'] +['车', '祸'] +['åīĤ', 'éĩı'] +['éϤ', 'åİ»'] +['å·¦', 'è¾¹'] +['çŃij', 'çī¢'] +['çīĽ', 'å¸Ĥ'] +['å®¶', 'åĬ¡'] +['åķ', 'ĥ'] +['ç½®', 'æį¢'] +['ç´«', 'å¤ĸ'] +['ç´«å¤ĸ', '线'] +['å¾Ģ', 'åīį'] +['åĬĽ', 'åѦ'] +['ç´§', 'è·Ł'] +['缮çļĦ', 'åľ¨äºİ'] +['ç»', '®'] +['ç¥', 'Ĥ'] +['宣', 'è¨Ģ'] +['äºĮ', 'æ°§åĮĸ'] +['äºĮæ°§åĮĸ', '碳'] +['æĹł', 'ç¼ĺ'] +['ç²¾', 'éĢļ'] +['è¨', 'º'] +['å¼ķåıij', 'äºĨ'] +['æľĢ', 'åħĪ'] +['æ´¾', 'é©»'] +['ä¸į', 'å¿į'] +['æĪij', 'çΏ'] +['å¹´', 'ä¸ĭåįĬå¹´'] +['æ·ĭ', 'å·´'] +['没', 'éĹ®é¢ĺ'] +['åºĹ', 'åĨħ'] +['è·Ł', 'æĪij说'] +['çĶŁäº§', 'çĶŁæ´»'] +['è§Ĥ', 'æľĽ'] +['æ¸', 'į'] +['被', 'æī§è¡Į'] +['被æī§è¡Į', '人'] +['èĪ', 'ľ'] +['æİ', 'º'] +['ä¸Ģ', 'ç§Ĵ'] +['èįī', 'åĿª'] +['åij¼', 'åĴĮ'] +['åij¼åĴĮ', '浩'] +['åij¼åĴĮ浩', 'çī¹'] +['人æ°ij', 'éĵ¶è¡Į'] +['çĦķ', 'åıij'] +['è¯ģåΏ', '交æĺĵ'] +['çķ', 'Ķ'] +['æľº', 'èĥ½'] +['å¦', '¾'] +['æĻļ', 'å¹´'] +['å·¥åķĨ', 'èģĶ'] +['åİŁ', 'åŀĭ'] +['è§Ĵ度', 'çľĭ'] +['æĬ¥', '社'] +['è¯į', 'æĿ¡'] +['躲', 'éģ¿'] +['éĩį', 'åIJ¯'] +['å¤ķ', 'éĺ³'] +['èĤ¡æĿĥ', '转让'] +['åľ¨', 'ä¸Ģ'] +['åľ¨ä¸Ģ', 'æĹģ'] +['社ä¼ļ', 'åĮĸ'] +['åıijå±ķ', 'åİĨç¨ĭ'] +['æĭĸ', 'æ¬ł'] +['使', 'èĢħ'] +['ä¸İ', 'åIJ¦'] +['æĸ°', 'å±ĢéĿ¢'] +['ä»Ĭ天', 'æĪij们'] +['é½IJ', 'èģļ'] +['对', 'æĪij说'] +['éĢĴ', '交'] +['æľª', 'æĽ¾'] +['èİ', 'Ĭ'] +['éĸ', 'ī'] +['亲', 'æīĭ'] +['è§Ĵ', 'éĢIJ'] +['æľī', 'é»ŀ'] +['ç¨İ', 'çİĩ'] +['ä½İ', '声'] +['é»ĺ', 'å¥ij'] +['æĻ®', 'æ³ķ'] +['大', 'ä¸ĵ'] +['第äºĮ', '大'] +['ä½ı', 'åĿĢ'] +['æĶ¾', 'è¿Ľ'] +['äºĮ', 'æĪĺ'] +['亲', '身'] +['åĽº', 'åĮĸ'] +['ä¸ĭ', '乡'] +['åħ³éĶ®', 'æĬĢæľ¯'] +['åĽŀ', 'æĥ³'] +['æĬ¥', 'åĪĬ'] +['æ¶Ĥ', 'æĬ¹'] +['èĹı', 'çĿĢ'] +['ç¥Ŀ', 'æĦ¿'] +['åįĩ', '温'] +['çĶļèĩ³', 'è¿ŀ'] +['åħ¬åħĥ', 'åīį'] +['ç¾İ', 'æĸ¹'] +['è¯ļ', 'å®ŀ'] +['æĹł', 'åģ¿'] +['åīµ', 'æ¥Ń'] +['å°ıå¿ĥ', '翼'] +['å°ıå¿ĥ翼', '翼'] +['两', 'æīĭ'] +['温馨', 'æıIJ示'] +['仿', '羣'] +['æĥ', '¶'] +['èĥ¡', 'åŃIJ'] +['å·¥ä½ľ', 'ç«Ļ'] +['硬', 'çĽĺ'] +['ç«', '¿'] +['åĤ³', 'éĢģ'] +['åħ¨', 'æł¡'] +['é²ľ', 'æ´»'] +['çĴĢ', 'çĴ¨'] +['ç»ĵ', 'å°¾'] +['æį¢', 'æĿ¥'] +['æĪ', 'Ģ'] +['ä½İ', 'ä½į'] +['ä¸ĩåħĥ', '以ä¸Ĭ'] +['åĬł', 'åĪĨ'] +['æİ¨ä»ĭ', 'ä¼ļ'] +['çIJĨ', 'èµĶ'] +['å¾·', 'å°Ķ'] +['æĬĹ', 'è®®'] +['æ´', '¼'] +['åĸ', '§'] +['åŁİ', 'éĻħ'] +['å¾Ī', 'æ£Ĵ'] +['人', 'æŃ»äº¡'] +['ä¼ļå±ķ', 'ä¸Ńå¿ĥ'] +['äºĴèģĶ', 'äºĴéĢļ'] +['èĸĦ', 'èĨľ'] +['éĩį', 'é»ŀ'] +['ç¦ģ', 'æ¯Ĵ'] +['åĨ·', 'ç¬ij'] +['大家', 'åı¯ä»¥'] +['é¦ĸ', '缸'] +['è¿ij', 'è·Ŀ离'] +['æµ®', 'çݰ'] +['ç§ĺ', 'è¯Ģ'] +['èµ·', 'é£ŀ'] +['æIJ', '¶'] +['羣', 'åģĩ'] +['æģ', 'ķ'] +['å°ı', 'åºĹ'] +['æ°ij', 'çľ¾'] +['åıijå¸ĥ', 'åħ¬åijĬ'] +['ä¾§', 'éĩį'] +['å¾ĺ', 'å¾Ĭ'] +['æĢ', 'Ķ'] +['æª', 'IJ'] +['æķ°', '缮'] +['åī¯', 'ç§ĺ书éķ¿'] +['两', 'åı¥'] +['éļIJ', 'çŀĴ'] +['åıĮ', 'åıĮ'] +['æīĭ', 'æĦŁ'] +['èij¡', '京'] +['éģĹ', 'å¿ĺ'] +['é¬', '¥'] +['è¿Ļ个', 'åľ°æĸ¹'] +['说', 'çļĦè¯Ŀ'] +['å·¡', 'åĽŀ'] +['è¿Ŀ', '竳'] +['æī¾', 'å·¥ä½ľ'] +['æĶ¯', 'çIJĥéĺŁ'] +['裡', 'éĿ¢'] +['æĺ¾ç¤º', 'åĩº'] +['èĩ³', 'å°Ĭ'] +['两', '级'] +['åīį', '段æĹ¶éĹ´'] +['çĺ¦', '身'] +['èĤ¢', 'ä½ĵ'] +['æ¯į', '親'] +['æīĭç»Ń', 'è´¹'] +['汽车', 'è¡Įä¸ļ'] +['æİ©', 'çĽĸ'] +['æİ§èĤ¡', 'éĽĨåĽ¢'] +['åı£', 'å¾Ħ'] +['æĶ¿çŃĸ', 'æİªæĸ½'] +['æµ·', '绵'] +['åħ¨', 'éķĩ'] +['äºĭ', 'åħ³'] +['å¸Ń', 'æī§è¡Į'] +['å¸Ńæī§è¡Į', 'å®ĺ'] +['éĤ£', '次'] +['åı¯èĥ½', 'åĩºçݰ'] +['ä¸Ńå¿ĥ', 'åŁİå¸Ĥ'] +['ç¿»', '身'] +['ä¹Ł', 'ç®Ĺ'] +['ä¾µ', 'çķ¥'] +['åĸĩ', 'åıŃ'] +['æ¯ı次', 'éĥ½'] +['è§', 'ħ'] +['éĻ¢', 'éĻ¢éķ¿'] +['å§ĭ', 'äºİ'] +['èѦ', 'åĬ¡'] +['èį¯', 'æĿIJ'] +['å±ł', 'æĿĢ'] +['æľ¬èº«', 'å°±'] +['éļıæĹ¶', 'éļı'] +['éļıæĹ¶éļı', 'åľ°'] +['åĶ®', 'åįĸ'] +['æĹłäºº', '驾驶'] +['é¢', 'ħ'] +['åĵģ', '質'] +['åĺ²', 'ç¬ij'] +['è·ij', 'åİ»'] +['åħĭ', 'éĩĮæĸ¯'] +['çķ¸', 'å½¢'] +['ä¿®', '饰'] +['磩', 'éĺµ'] +['éŁ³ä¹IJ', 'ä¼ļ'] +['æŁ³', 'å·ŀ'] +['é½', '¡'] +['ä¼ļ', 'è°Ī'] +['æŃ£', 'çīĪ'] +['ä¹Ł', 'åIJĮæł·'] +['æļ§', 'æĺ§'] +['è¡ĮæĶ¿', 'éĥ¨éŨ'] +['ä¹ĸ', 'ä¹ĸ'] +['èĤ¤', 'èī²'] +['æĹ¶', 'ä»»'] +['羣', 'åĪĩ'] +['æľĪ', 'ä¸ĭ'] +['æľĪä¸ĭ', 'æĹ¬'] +['举æĸ¹', 'è´¢å¯Į'] +['è£ħä¿®', 'åħ¬åı¸'] +['éĢĢ', 'è¿ĺ'] +['åĭĺ', 'å¯Ł'] +['åĵ¥', '伦'] +['åĵ¥ä¼¦', 'æ¯Ķäºļ'] +['çĭ¬', 'ä¸Ģ'] +['çĭ¬ä¸Ģ', 'æĹł'] +['çĭ¬ä¸ĢæĹł', 'äºĮ'] +['è°ĥ', 'åij³'] +['åİĭ', 'è¿«'] +['åħ¨çIJĥ', 'æľĢ大'] +['åī¯', 'æł¡éķ¿'] +['æĽ´', 'ä½İ'] +['åĪĨéĴŁ', 'åIJİ'] +['åĽŀ', 'ä¾Ĩ'] +['åζ', 'åīĤ'] +['åijĬè¯ī', '大家'] +['çĤ¹', 'éĴŁ'] +['åįģä¸ī', 'å±Ĭ'] +['åij¨', 'åĽĽ'] +['è¿Ļæł·', 'ä¸Ģ'] +['è¿Ļæł·ä¸Ģ', 'æĿ¥'] +['èĭ', 'Ł'] +['æľĽ', 'åİ»'] +['æĪIJ', 'è¯Ń'] +['å½ĵ', 'åį³'] +['ç¬ij', '声'] +['ä¹ĭ', 'åĬ¿'] +['åĪijäºĭ', 'æ¡Īä»¶'] +['æĮĤ', 'çĿĢ'] +['ä½ķ', 'ç§į'] +['å°ı', '游æĪı'] +['åĽ½å®¶', 'æĪĺçķ¥'] +['åĨ·', 'åĨ·'] +['å®ľ', '宾'] +['æIJº', 'ç¨ĭ'] +['è¶ĭ', 'äºİ'] +['åıį', 'çľģ'] +['常', '说'] +['ä¸ĩ', 'æĪ·'] +['åĥµ', 'å°¸'] +['åįĥä¸ĩ', 'åĪ«'] +['åıijçݰ', 'éĹ®é¢ĺ'] +['åı¯', 'çŁ¥'] +['éŨæĪ·', 'ç½ijç«Ļ'] +['åģ¥åº·', '产ä¸ļ'] +['åı³', 'è¾¹'] +['æµ·', 'è¿IJ'] +['è¿ij', 'ä¹İ'] +['åĮ»', 'æ²»'] +['æĢ»', 'ç®Ĺ'] +['ä¸Ģ', 'åĪĨéĴŁ'] +['æĭ', '§'] +['ä¹Ł', 'æľīä¸ĢäºĽ'] +['ä¾Ľç͵', 'åħ¬åı¸'] +['å»ī', 'ä»·'] +['帮', 'ä»ĸ'] +['æŃ¤æ¬¡', 'æ´»åĬ¨'] +['åıªèĥ½', '说'] +['èĬ', 'ĭ'] +['çīĩ', '段'] +['åŃĺåľ¨', 'éĹ®é¢ĺ'] +['ä½łä¼ļ', 'åıijçݰ'] +['è½®', 'å»ĵ'] +['ç½ij', 'éĢļ'] +['滨', 'æ±Ł'] +['æİĪ', 'ä¿¡'] +['é»İ', 'æĺİ'] +['ä¸į', 'å±ŀäºİ'] +['约', 'åįł'] +['éķ¿æ²Ļ', 'å¸Ĥ'] +['èĥļ', 'èĥİ'] +['åħĥ', 'ä»¶'] +['éĻĨ', 'åĨĽ'] +['è³¼', 'è²·'] +['æĮĩ', 'æľĽ'] +['å®ŀä¹ł', 'çĶŁ'] +['çī¹çĤ¹', 'æĺ¯'] +['çıł', 'æ±Ł'] +['çľĭ', 'ä¸įåĩº'] +['ä¸įè§ģ', 'äºĨ'] +['ç¼', 'ī'] +['éĺµ', 'èIJ¥'] +['åĶIJ', 'æľĿ'] +['没', 'å¿ħè¦ģ'] +['åĽ½åľŁ', 'èµĦæºIJ'] +['ç»ıæµİåѦ', 'å®¶'] +['åIJĪèĤ¥', 'å¸Ĥ'] +['çIJ¢', '磨'] +['ç¡®', 'åĪĩ'] +['åŁİå¸Ĥ', 'åıijå±ķ'] +['çŃ·', 'åŃIJ'] +['人æ°ij', 'æľįåĬ¡'] +['满', 'åĪĨ'] +['è¿·', 'ä¿¡'] +['ä½ľèĢħ', 'æľ¬äºº'] +['æĸĩ竳', 'æĿ¥æºIJ'] +['ç«Ļ', 'ç«ĭ'] +['æŀĦ', 'æĪIJäºĨ'] +['è¾Ľ', 'åĭ¤'] +['è¶ħ', '强'] +['éĶ', 'ļ'] +['åīįä¸ī', 'åŃ£åº¦'] +['å°±', 'è§īå¾Ĺ'] +['å´ĩ', 'é«ĺ'] +['è¶Ĭ', 'ä¾Ĩ'] +['è¶Ĭä¾Ĩ', 'è¶Ĭ'] +['å¸Ĥåľº', 'èIJ¥éĶĢ'] +['综åIJĪ', 'ç´łè´¨'] +['åŃ', 'ļ'] +['ä¾®', 'è¾±'] +['äºĮ', 'åŃĹ'] +['å·¥ä½ľ', 'ä»»åĬ¡'] +['åı²ä¸Ĭ', 'æľĢ'] +['æľĢ', 'ä¼ĺ'] +['åIJ©', 'åĴIJ'] +['表', 'çϽ'] +['èİ«', 'åIJį'] +['èİ«åIJį', 'åħ¶'] +['èİ«åIJįåħ¶', 'å¦Ļ'] +['å¹', '£'] +['åIJĮå¿Ĺ', '们'] +['建设', 'çĶ¨åľ°'] +['åĦ', 'Ģ'] +['éħį', 'åģ¶'] +['å¼', '©'] +['åͱ', 'çīĩ'] +['æīĭ', 'èĦļ'] +['åħ¼', 'ä»»'] +['åģľ', 'æĶ¾'] +['æŃ£', 'å®Ĺ'] +['æĸ°', 'åĨľæĿij'] +['åĤ¬', 'çĶŁ'] +['æīĢ', 'åŃ¦æł¡'] +['念', 'ä½Ľ'] +['åͤ', 'éĨĴ'] +['åħ±', 'åĪĽ'] +['æĭī', 'ä¸ģ'] +['èĥĮ', 'çĿĢ'] +['çĶŁæĢģ', 'ä¿ĿæĬ¤'] +['åı£', '头'] +['æĸ¹åIJij', 'çĽĺ'] +['調', 'æķ´'] +['æĭĽèģĺ', 'ä¿¡æģ¯'] +['åħ¶ä»ĸ', 'åĽ½å®¶'] +['ç®Ģ', 'æĺĵ'] +['åĮ¿', 'åIJį'] +['è¯Ħ', 'æµĭ'] +['æĺ¯ä¸Ģ', '座'] +['çīµ', 'æīĭ'] +['è¶³', '迹'] +['çIJĨè§£', 'åĴĮ'] +['æľĢ', 'åıĹ'] +['å¿ĥ', 'è·³'] +['çζ', '親'] +['éĿŀ常', 'åĸľæ¬¢'] +['èĭ¦', 'éļ¾'] +['æĬĢ', 'å¸Ī'] +['æ°ij', 'æĦı'] +['æĪĺ', 'åĽ½'] +['æĽ¿', 'è¡¥'] +['æ´¥', 'è´´'] +['ä¸ŃåĽ½', 'ä¼łç»Ł'] +['åIJĦ', 'è¡Į'] +['åIJĦè¡Į', 'åIJĦ'] +['åIJĦè¡ĮåIJĦ', 'ä¸ļ'] +['第äºĶ', 'å±Ĭ'] +['èį·', 'èĬ±'] +['æĦı', 'èŃĺ'] +['票', 'ä»·'] +['åĪĨ', 'æµģ'] +['æĿİ', 'çϽ'] +['æ±Ł', 'åĮĹ'] +['æİĴ', 'æĸ¥'] +['ä½ĵ', 'éĩı'] +['åĮħåIJ«', 'äºĨ'] +['åĪĺ', 'æŁIJ'] +['çݰ', 'å¦Ĥä»Ĭ'] +['å·¥èīº', 'åĵģ'] +['è¿Ļç§į', 'æĸ¹æ³ķ'] +['åĬŀåħ¬', '楼'] +['ç͵', 'å·¥'] +['çħ', 'Ļ'] +['åį¡', 'çīĩ'] +['å¹´', 'å¹´åºķ'] +['ä¸ĵ项', 'èµĦéĩij'] +['åĮ»', 'ç§ij'] +['åĮ»ç§ij', '大åѦ'] +['åĽŀ头', 'çľĭ'] +['ä¸į', 'å±ij'] +['èĩª', '驾'] +['没', 'æĶ¶'] +['æīĵ', 'çĮİ'] +['èĦ¸', 'éĥ¨'] +['åıĥ', 'èĢĥ'] +['å°Ĩ', '士'] +['è´«åĽ°', '人åı£'] +['çIJĨæĥ³', '信念'] +['é£İ', 'å°ļ'] +['人æīį', 'éĺŁä¼į'] +['çij', '¾'] +['æĿ¥', 'è¿ĻéĩĮ'] +['æ´Ĺ', '涤'] +['å¹´', 'èĸª'] +['èĭį', 'çϽ'] +['ä¸ĩ', 'äºĭ'] +['课', 'æľ¬'] +['åºĵ', 'éĩĮ'] +['çī¹', 'æ´¾'] +['ç´¾', 'åijĺ'] +['èµŀ', 'ç¾İ'] +['ç©¿', 'æĪ´'] +['製', 'ä½ľ'] +['èµŀ', 'æĪIJ'] +['ä¸Ģ', 'ä¾§'] +['å½ĵåľ°', '人'] +['æĭ', 'İ'] +['纸', 'è´¨'] +['ä½Ļ', '个'] +['éĶĤ', 'çĶµæ±ł'] +['æľº', 'åŀĭ'] +['éĻ¢', 'éϢ士'] +['åģļ', 'å·¥'] +['å¼ł', 'è´´'] +['ç¥Ľ', 'æĸij'] +['æ®ĸ', 'æ°ij'] +['å¥ij', '约'] +['æ¹ĺ', 'æ½Ń'] +['æIJ', 'ĸ'] +['åŃĺ', 'è´§'] +['交éĢļ', '大åѦ'] +['è¶ģ', 'çĿĢ'] +['æĸĩçī©', 'ä¿ĿæĬ¤'] +['å¤ĩ', 'æĪĺ'] +['éĩĩ', '纳'] +['åįĬ', 'æľĪ'] +['æľĢ', 'åħ³éĶ®'] +['æľĢåħ³éĶ®', 'çļĦ'] +['æİ¥', 'éĢģ'] +['æĶ¶', 'åī²'] +['åıį', 'åĢĴ'] +['çĥ', 'Ľ'] +['æ', '½Ķ'] +['ä¼Łå¤§', 'å¤įåħ´'] +['çļĦè¯Ŀ', 'è¯Ń'] +['容', 'å¿į'] +['å®ļ', 'éĩı'] +['æķ', 'Ĺ'] +['åĵģçīĮ', '形象'] +['æīŃ', '转'] +['åĽ½å®¶', 'éĩįçĤ¹'] +['èĨĿ', 'çĽĸ'] +['ä¸Ģ', '楼'] +['大', 'éϏ'] +['éĤª', 'æģ¶'] +['åĽŀ', 'åij³'] +['çĮ', '¿'] +['çĿ¡', 'åīį'] +['æĹł', 'è¾ľ'] +['çĹħæ¯Ĵ', 'æĦŁæŁĵ'] +['æľºæ¢°', 'åĮĸ'] +['çĤ¹', '亮'] +['溶', 'è§£'] +['åĩłä¹İ', 'æīĢæľī'] +['è·ij', 'éģĵ'] +['ç͵è§Ĩ', 'æľº'] +['åı', '¨'] +['æijĩ', 'äºĨ'] +['æijĩäºĨ', 'æijĩ头'] +['èĩª', 'è´Ł'] +['综åIJĪ', 'åĪ©ç͍'] +['èĩª', 'å¦Ĥ'] +['åİŁ', 'ä¾Ĩ'] +['ä¹Łä¸į', 'æĥ³'] +['èĬĤ', '课'] +['è¿ĩ', 'åī©'] +['çͲ', 'çĬ¶'] +['çͲçĬ¶', 'èħº'] +['æĸ°', 'ä¸ĸ纪'] +['èĩªä¸»', 'åĵģçīĮ'] +['é«ĺ', 'å±Ĥ次'] +['ä¸Ģ', 'è§Ĵ'] +['è¡Į', 'äºĭ'] +['ç¥ĸ', 'åħĪ'] +['å©ļ', 'åIJİ'] +['éĹ´', 'éļĻ'] +['ç¼Ŀ', 'éļĻ'] +['è¿Ļ', 'æĶ¯'] +['ä¸įæĸŃ', 'åĪĽæĸ°'] +['å¾®', 'åŀĭ'] +['æĽĻ', 'åħī'] +['享', 'ç͍'] +['ä¸ŃåĽ½', 'ç§»åĬ¨'] +['éĹŃ', 'çݯ'] +['æī§', 'æĦı'] +['åıijå±ķ', 'æł¼å±Ģ'] +['æł¸å¿ĥ', 'åĮº'] +['éªļ', 'æī°'] +['åħļåĴĮ', 'åĽ½å®¶'] +['ä¸ŃåĽ½', 'æĶ¿åºľ'] +['帶', 'èijĹ'] +['ä¸ĩåįĥ', 'çĵ¦'] +['åħ©', '人'] +['äºİæĺ¯', 'æĪij'] +['åĽº', 'ä½ĵ'] +['çªģ', 'å¦Ĥ'] +['çªģå¦Ĥ', 'åħ¶'] +['çªģå¦Ĥåħ¶', 'æĿ¥'] +['éĩĮç¨ĭ', 'ç¢ij'] +['çα', 'ç¾İ'] +['æŁ¥', 'éªĮ'] +['åıĮ', 'èµ¢'] +['éĹª', 'åħī'] +['楼', 'å®ĩ'] +['æĻ', 'ı'] +['æľī', 'è¶³å¤ŁçļĦ'] +['æŁĶ', 'æĢ§'] +['ä¿¡æģ¯', 'å®īåħ¨'] +['管', '线'] +['å¹¶', 'ä¸įä¼ļ'] +['åύ', 'ä»¶'] +['ä½ł', 'åºĶ该'] +['çĿĢ', 'å®ŀ'] +['æĺİ', 'æ¸ħ'] +['æĬĹ', 'çĶŁç´ł'] +['æīĵ', 'æŃ»'] +['å®Įåħ¨', 'ä¸įåIJĮ'] +['èĬ±', 'æ¤Ĵ'] +['æĶ¾', '宽'] +['ä½İ', '端'] +['åĽĽ', 'èĤ¢'] +['åĮĹ京', 'èµĽè½¦'] +['éĽĨ', 'å¸Ĥ'] +['æľª', 'å©ļ'] +['大å¹ħ', 'æıIJåįĩ'] +['建çŃij', '设计'] +['çĭ¬', 'æľīçļĦ'] +['æİ¢', 'éĻ©'] +['æ²³æµģ', 'åŁŁ'] +['æħķ', '容'] +['被', 'çĽĹ'] +['åĵº', 'ä¹³'] +['èı', 'ģ'] +['æĥ¬', 'æĦı'] +['è¶ĬæĿ¥è¶Ĭ', '好'] +['广大', '群ä¼Ĺ'] +['å¾·', 'èĤ²'] +['å¸Ĥåľº', 'ä»·æł¼'] +['奥', 'å·´'] +['奥巴', '马'] +['èĬĤ缮', 'ä¸Ń'] +['两', '款'] +['ä¸ĩä½Ļ', 'åħĥ'] +['ç»´', 'å°Ķ'] +['çĶŁçī©', 'ç§ijæĬĢ'] +['åIJ¬', 'èµ·æĿ¥'] +['çł', 'ļ'] +['æĭŁ', 'å®ļ'] +['æ²¹', 'çͰ'] +['声', 'èªī'] +['建çŃij', 'ä¸ļ'] +['éĻIJ', 'è´Ń'] +['çīĩ', 'åŃIJ'] +['çķľ', '禽'] +['ç½ij', 'é¦ĸ页'] +['ä¼Ĺ', 'çѹ'] +['æĴŀ', 'åĩ»'] +['åīį', 'ä¸įä¹ħ'] +['åīį', 'ä¸ĸ'] +['åĽĽä¸ª', 'æĦıè¯Ĩ'] +['æµĭ', 'ç»ĺ'] +['éĺ²', '空'] +['漫éķ¿', 'çļĦ'] +['æ²IJ', 'æµ´'] +['æ¯Ķè¾ĥ', 'ç®Ģåįķ'] +['æµĭ', 'å®ļ'] +['åĽŀ', 'è°ĥ'] +['让', '人们'] +['èĴĭ', 'ä»ĭ'] +['èĴĭä»ĭ', 'çŁ³'] +['ç»ĵ', 'æĻ¶'] +['å¢ŀæ·»', 'äºĨ'] +['æĿ¡', 'è¯Ħ论'] +['åī¯', 'ä¼ļéķ¿'] +['ä½ı', 'æīĢ'] +['ç»Ļ', 'åĩºäºĨ'] +['è°ĥ', 'éħį'] +['æ²', 'ĸ'] +['æľī', 'ç͍'] +['æľīç͍', 'çļĦ'] +['ä¸ĢæĿ¡', 'é¾Ļ'] +['éĩİ', 'å¤ĸ'] +['ç¼ĺ', 'åĪĨ'] +['æ°¸è¿ľ', 'ä¸įä¼ļ'] +['æŀľ', 'æłij'] +['大åıij', 'å¿«ä¸ī'] +['麻', 'éĨī'] +['äºij', 'éĽĨ'] +['åİ»', 'åĵªéĩĮ'] +['åħ¥', 'å¸Ĥ'] +['ä»»', 'æĢ§'] +['建', 'æ¡£'] +['建档', 'ç«ĭ'] +['建档ç«ĭ', 'åį¡'] +['ä¸Ģ', '棵'] +['社', 'åįĢ'] +['缸', 'ä¼´'] +['åļ', '·'] +['å¡«', 'åħħ'] +['ä¸Ģ', 'æĹı'] +['ç¾', 'ģ'] +['åıĸ', 'è¯ģ'] +['èΰ', 'éĺŁ'] +['åİĤ', 'åĮº'] +['è¡·', 'å¿ĥ'] +['åıijå±ķ', 'éĺ¶æ®µ'] +['é«ĺ', '强度'] +['åĹĵ', 'åŃIJ'] +['é¢Ĩ', 'è¡Ķ'] +['楼', '主'] +['大', 'èĴľ'] +['æŀķ', '头'] +['ç²®', 'æ²¹'] +['é»Ħ', 'çĵľ'] +['æĵ', 'Ĵ'] +['å°ı', 'çĭĹ'] +['æĶ¹éĿ©', 'å§Ķ'] +['åįģ', 'åĪĨéĴŁ'] +['é²ľ', 'èī³'] +['åħ³', 'ç¾½'] +['çĭĢ', 'æħĭ'] +['å®ŀç͍', 'æĢ§'] +['å°ij', 'è§ģ'] +['é£ŀ', 'æī¬'] +['çͰ', 'éĩİ'] +['æIJ', 'Ĥ'] +['è¿Ļ个', 'è¯į'] +['åºĶæĢ¥', 'é¢Ħæ¡Ī'] +['è§Ĵ度', 'æĿ¥çľĭ'] +['æķ¬', 'çķı'] +['æ³ķ', 'å®Ŀ'] +['åĸĦ', 'æĦı'] +['æīĵ', 'æĸŃ'] +['对', 'åĨ³'] +['çµķ', 'å°į'] +['åĢŁ', 'æŃ¤'] +['å¼Ģ', 'æºIJ'] +['å°ı', '說'] +['ç¥', 'º'] +['å²ģ', '以ä¸ĭ'] +['éĢĢå½¹', 'åĨĽäºº'] +['ä¸įä¹ħ', 'åīį'] +['åĩº', 'åİĤ'] +['讽', 'åĪº'] +['æĿ¥çľĭçľĭ', 'åIJ§'] +['éŃĶ', 'åħ½'] +['çķĻ', 'ä¸ĭæĿ¥'] +['å±ħ', '室'] +['åłħ', 'æĮģ'] +['çľĭ', 'äºĨä¸Ģ'] +['çľĭäºĨä¸Ģ', 'çľ¼'] +['éĽĨåĽ¢', 'æĹĹä¸ĭ'] +['æĪĺ', 'æĪĺç»ĦåIJĪ'] +['è®¤çľŁ', 'èIJ½å®ŀ'] +['汽车', '产ä¸ļ'] +['çī©çIJĨ', 'åѦ'] +['æķ', 'µ'] +['éĴ', 'Ŀ'] +['åĽ¢', 'éķ¿'] +['ä¸įæĸŃ', 'æī©å¤§'] +['èĤ©', 'è´Ł'] +['åıijå±ķ', '缮æłĩ'] +['è³ĩ', 'éĩij'] +['åīį', 'ç½®'] +['ä¸ŃåĽ½', 'åı¤ä»£'] +['æŃ»', 'åĪij'] +['åħħåĪĨ', 'ä½ĵçݰ'] +['åħ³', 'éŨ'] +['ç¾İ', 'æĦŁ'] +['æīĵ', 'åħ¥'] +['æĬijéĥģ', 'çĹĩ'] +['å°ij', 'çĪ·'] +['æłij', 'æŀĿ'] +['æ¶Īæģ¯', 'ç§°'] +['æ´Ľ', 'åħĭ'] +['åį', '¯'] +['è¿Ī', 'åIJij'] +['æİ¨', 'åĭķ'] +['ä»İä¸ļ', 'èĢħ'] +['åİ»', 'ä¹°'] +['欢', 'å¿«'] +['æĭ¥', 'æĮ¤'] +['马', 'æ¡¶'] +['æĬĬ', 'æİ§'] +['æĶ¿', 'åħļ'] +['å¼ł', 'æī¬'] +['客', 'æłĪ'] +['红', 'æĺŁ'] +['éĢģ', 'æĿ¥'] +['åħ¨åŁŁ', 'æĹħ游'] +['èĩª', 'ç§ģ'] +['åįģäºĮ', 'æĿ¡'] +['åı¹', 'æģ¯'] +['ä¸Ģ', 'èīĺ'] +['ä¿Ŀ', 'è´¹'] +['æĸ½å·¥', 'çİ°åľº'] +['æľī', '幸'] +['ç»Ń', 'èĪª'] +['åı¯èĥ½', 'æľĥ'] +['èĥĮ', 'åıĽ'] +['ä½£', 'éĩij'] +['ä¸ī', 'çŃīå¥ĸ'] +['å¾Ī', '满æĦı'] +['游æĪı', 'åľ¬'] +['群', 'éĩĮ'] +['æŀĦ', 'ä»¶'] +['åºı', 'å¹ķ'] +['太', 'æ¹ĸ'] +['æľ¨', 'è´¨'] +['æĻĭ', 'æ±Ł'] +['çµĤ', 'æĸ¼'] +['è·³', 'è·ĥ'] +['åĢºæĿĥ', '人'] +['çŃī', '诸å¤ļ'] +['æĶ¾', 'åĩº'] +['åħ³éĶ®', 'æĹ¶åĪ»'] +['æĦŁæŁĵ', 'èĢħ'] +['é£ŀè¡Į', 'åijĺ'] +['èĥĨ', 'åĽº'] +['èĥĨåĽº', 'éĨĩ'] +['æĬ±', 'æŃī'] +['åij¨', 'äºĮ'] +['æĸ°', 'æĹ¶æľŁ'] +['åĨ·éĵ¾', 'çµģ'] +['è¿Ļç§į', 'æĸ¹å¼ı'] +['该', 'æĿij'] +['åĽŀ', 'é¦Ī'] +['åŁºçĿ£', 'æķĻ'] +['人', 'åıĤ'] +['æŀ¯', 'çĩ¥'] +['æī¹åıij', 'å¸Ĥåľº'] +['åħħåĪĨ', 'èĤ¯å®ļ'] +['å¸Ĥ', 'æĶ¿åįı'] +['äºĭ', 'æ¥Ń'] +['龸', 'çİĭ'] +['çĥŃ', 'æIJľ'] +['åįģä¹Ŀ', '大'] +['ä¼´', 'æľī'] +['ç¾İåĽ½', 'æĢ»ç»Ł'] +['åŁİå¸Ĥ', '管çIJĨ'] +['ä¸ĭ', '令'] +['èĥ¸', 'åı£'] +['åıª', 'çŁ¥éģĵ'] +['åij¨', 'ä¸ī'] +['ç͍', 'æĪ¶'] +['éŃ', '¯'] +['å¿ĥ', 'è¡Ģ'] +['带头', '人'] +['åĮ»', 'åĬ¡'] +['åĮ»åĬ¡', '人åijĺ'] +['æİ§åζ', 'åύ'] +['ä½ľåĵģ', 'åĨħ容'] +['æĪĺ', 'åıĭ'] +['åİĨ', 'å¹´'] +['ä¸į', 'åħĭ'] +['ä¸įåħĭ', 'ä¸įåıĬ'] +['æĹ¥', 'æŃ£å¼ı'] +['è±IJ', 'å¯Į'] +['ç¨İ', 'è´¹'] +['æĹ¶', 'æķĪ'] +['å±ķ', 'ä½į'] +['è¡¡', 'éĺ³'] +['æĪ¿', '貸'] +['çĪĨ', '款'] +['ä¹IJ', 'æĦı'] +['çĶ·', '主'] +['å¯', '¬'] +['æľĥ', 'èѰ'] +['ä¹ĭ', 'å¤ľ'] +['åIJĮ', '樣'] +['ä¸įè¦ģ', '太'] +['ä¼Ĭ', 'æĸ¯'] +['ä¼Ĭæĸ¯', 'åħ°'] +['åŁºæľ¬', 'åİŁåĪĻ'] +['åİ»', 'æİī'] +['ä½İ', 'ä¿Ŀ'] +['个', '交æĺĵ'] +['个交æĺĵ', 'æĹ¥'] +['èģĬ', 'èģĬ'] +['åĽĽ', 'ä½į'] +['åħļç»Ħ', 'æĪIJåijĺ'] +['主è¦ģ', 'ä»İäºĭ'] +['å½±', 'éŁ³'] +['åĨĴ', 'åĩº'] +['åij¼åIJ¸', 'éģĵ'] +['è¾¾', 'å°Ķ'] +['æľ¨', 'åľ°æĿ¿'] +['诡', 'å¼Ĥ'] +['çģ¯', 'åħ·'] +['çģ«', 'çĥ§'] +['è§£', 'èĦ±'] +['æĦĪ', 'åıij'] +['æ¹ĸ', 'å·ŀ'] +['é£İ', 'ä¿Ĺ'] +['æĸ°', 'å½¢åĬ¿'] +['æĸ°å½¢åĬ¿', 'ä¸ĭ'] +['è²', 'Ŀ'] +['èĦ', 'ĵ'] +['åĬ¨åĬĽ', 'çĶµæ±ł'] +['é£ŀ', 'èι'] +['飧', 'æĢ§'] +['åĪ©', 'çī©'] +['åĪ©çī©', '浦'] +['ä¸į', '认è¯Ĩ'] +['ç¼ĸ', 'ç»ĩ'] +['ä½ľ', 'åĿĬ'] +['èģĮä¸ļ', 'æĬĢèĥ½'] +['çľĭ', 'è¦ĭ'] +['åĽ´', 'æ£ĭ'] +['æĺı', 'è¿·'] +['å½Ĵ', 'å±ŀäºİ'] +['æĤ¬', 'å´ĸ'] +['éĨ«', 'çĻĤ'] +['å®ĭ', '代'] +['åºĦ', 'æĿij'] +['èĹ', 'ķ'] +['çĮĽ', 'çĦ¶'] +['çĩĥæĸĻ', 'çĶµæ±ł'] +['å®ŀä½ĵ', 'åºĹ'] +['ä¸įè¶³', '以'] +['æĥħ', 'ç·'] +['æĥħç·', 'Ĵ'] +['å»Ĭ', 'åĿĬ'] +['ç͵', 'åı°'] +['åºĶ', 'åĬĽ'] +['ä¸Ńå°ı', 'åѦçĶŁ'] +['èĥ¡', 'åIJĮ'] +['éī´', 'åĪ«'] +['åĨħ', 'ç½®'] +['ä¹±', '象'] +['æ¬Ĭ', 'çĽĬ'] +['å¼ĢæĶ¾', 'å¼ı'] +['åįļ', 'æĸĩ'] +['讲', '课'] +['çŃī', 'åİŁåĽł'] +['ç©·', '人'] +['交', 'æĽ¿'] +['æĬ¤', 'çħ§'] +['åıijå±ķ', 'æľºéģĩ'] +['客', 'åķĨ'] +['åıį', 'ä¹ĭ'] +['ç±³', 'é¥Ń'] +['å¹¶', 'åıij'] +['å¹¶åıij', 'çĹĩ'] +['æ±ī', 'åŃIJ'] +['æŀľ', 'åĽŃ'] +['对æĪij', 'æĿ¥è¯´'] +['åģı', 'åIJij'] +['æī¹', '示'] +['读', 'åIJİ'] +['读åIJİ', 'æĦŁ'] +['æĺİ', 'æĻº'] +['åĽ´', 'çĿĢ'] +['åıį', '转'] +['æĿ¨', 'å¹Ĥ'] +['ä¸ĵ', 'åįĸ'] +['ä¸ĵåįĸ', 'åºĹ'] +['åıĹ', 'éĻIJ'] +['åºŁ', 'è¯Ŀ'] +['æŀģ', 'å°ij'] +['åįĪ', 'åIJİ'] +['è¿Ľ', 'ä¿®'] +['åīĬ', 'åĩı'] +['æľ¬ç§ij', 'çĶŁ'] +['ä¼ĺ', 'éĢī'] +['åħī', 'çħ§'] +['åıĻ', 'äºĭ'] +['åıĸ', 'æļĸ'] +['åĮĹ', 'è·¯'] +['æ¦', 'ķ'] +['èİĨ', 'çͰ'] +['楼', 'å±Ĥ'] +['天', 'èĬ±'] +['天èĬ±', 'æĿ¿'] +['çĤ', 'ľ'] +['å·²ç»ı', 'æľīäºĨ'] +['è¶', '¾'] +['çͳ', 'åįļ'] +['ç͵', 'éĺ»'] +['åĬŁ', '课'] +['æŃ¥', 'æŃ¥'] +['éĤ£ä¹Ī', '容æĺĵ'] +['æŃ¤', 'æĸĩ'] +['ä½', '°'] +['计', 'è¾ĥ'] +['çīĩ', 'éĿ¢'] +['ç͵影', 'éĻ¢'] +['ä¸į', 'åħ¬å¹³'] +['ä¸ī', 'æľŁ'] +['æĹħ游', 'èµĦæºIJ'] +['å¤ļç§į', 'å½¢å¼ı'] +['è£Ĥ', 'ç¼Ŀ'] +['åIJİ', 'æİĴ'] +['硬', '度'] +['åĽŀ', 'æļĸ'] +['éģĵ', 'æķĻ'] +['è´«', 'è¡Ģ'] +['æ¸ħ', 'é¦Ļ'] +['伤', 'çĹħ'] +['æĦı', '義'] +['çļĦ', 'ç¼ĺ'] +['çļĦç¼ĺ', 'æķħ'] +['åºĦ', '严'] +['åıªæĺ¯', '为äºĨ'] +['æīĵ', 'æĬĺ'] +['以', 'ä¾Ĩ'] +['滿', 'è¶³'] +['çİĽ', '丽'] +['風', 'éļª'] +['æĸĩ', 'ç§ij'] +['éħįå¤ĩ', 'äºĨ'] +['è¿Ľ', 'é£Ł'] +['æ¶', '¡'] +['è·¯', 'ç¨ĭ'] +['åı«', '声'] +['ä¸Ńå¿ĥ', 'åŁİåĮº'] +['æľīæīĢ', 'ä¸įåIJĮ'] +['å¼µ', 'è²¼'] +['é¢Ħ', 'æĬ¥'] +['æľīå¤ļ', 'ä¹Ī'] +['è¿Ľè¡Į', 'åħ¨éĿ¢'] +['æĽ¾', 'ç¶ĵ'] +['ä¸ī', '代'] +['å®ı', '大'] +['æ¸ħ', 'æī«'] +['éĢī', 'åĩº'] +['åĵª', 'ä¸Ģ个'] +['主', '義'] +['ä¾Ŀ', 'æĵļ'] +['çļ®', 'éĿ©'] +['èµ¶', 'æĿ¥'] +['çŃĽ', 'æŁ¥'] +['æ¨', 'Ł'] +['ä¿Ŀ', 'èįIJ'] +['åIJĥ', 'æĥĬ'] +['æľĭåıĭ们', '对'] +['ä»ĸ', 'æĺ¯ä¸Ģ个'] +['åºŁ', 'æ°Ķ'] +['æ»', 'ħ'] +['è´¢', 'ç¨İ'] +['æĿij', 'æĿijæ°ij'] +['èµĦ产', 'è´ŁåĢº'] +['å®ī', 'å¨ľ'] +['缮åīį', 'åĽ½åĨħ'] +['æĦŁè§ī', 'èĩªå·±'] +['çµIJ', 'åIJĪ'] +['éͦ', 'æłĩ'] +['éͦæłĩ', 'èµĽ'] +['æĽ´', 'æ·±'] +['åŁº', 'æķ°'] +['éħ¿', 'éħĴ'] +['çī¹èī²', '产ä¸ļ'] +['åİĭ', 'å®ŀ'] +['ä¾Ŀæ³ķ', '追究'] +['æ·¡', 'å®ļ'] +['ç®Ģ缴', 'å°±æĺ¯'] +['å£ĵ', 'åĬĽ'] +['æ°ij', 'å¿ĥ'] +['ä¸į', 'åIJĪéĢĤ'] +['çͱæŃ¤', 'åı¯è§ģ'] +['èµŀ', 'èªī'] +['æ¾', '¤'] +['åĩłå¹´', 'åīį'] +['åIJī', 'ä»ĸ'] +['çł´', 'æįŁ'] +['轻轻', 'åľ°'] +['å²Ľ', '屿'] +['æĦı', 'å¢ĥ'] +['ä»Ģä¹Ī', 'åı«'] +['åģĩ', 'è£ħ'] +['éĢģ', 'è´§'] +['å¹ķ', 'å¢Ļ'] +['妥', 'åįı'] +['åĽ½', 'æĹĹ'] +['äºĨ', 'å¾Īä¹ħ'] +['åĪĨ辨', 'çİĩ'] +['ç´', 'Ķ'] +['éĺ³', 'åĮº'] +['åĩŃ', 'çĿĢ'] +['åģľè½¦', 'ä½į'] +['京', 'éĥ½'] +['éĶ', '£'] +['æĵ', '¾'] +['è¿Ľ', 'éŨ'] +['åĪĺ', 'æµ·'] +['åĽĽ', '级'] +['女', 'è¶³'] +['è¡ĮæĶ¿', '审æī¹'] +['éģ¥', 'æİ§'] +['ä¸į', 'éĮ¯'] +['å¾Ĺ', 'å¾Ī好'] +['为', '缮çļĦ'] +['ä»į', 'æľª'] +['ç²¾', 'è£ħ'] +['éĢį', 'éģ¥'] +['å°½', '头'] +['çºł', 'ç¼ł'] +['éłĺ', 'å°İ'] +['æĭħ', 'è´Ł'] +['æĪĸèĢħ', 'åħ¶ä»ĸ'] +['åıªä¸įè¿ĩ', 'æĺ¯'] +['åı®', 'åĺ±'] +['åģĩ', 'åĨĴ'] +['æļĸ', 'æ°Ķ'] +['çĽIJ', 'åŁİ'] +['被', 'è§Ĩ为'] +['诺', 'è´Ŀå°Ķ'] +['ç»ĻäºĨ', 'æĪij'] +['è¿ij', 'åįĥ'] +['éĩį', 'åĽŀ'] +['éĨĴ', 'äºĨ'] +['ç͵', 'è§£'] +['忽çķ¥', 'äºĨ'] +['èĥĮ', 'éĥ¨'] +['æĸĩæĺİ', 'åŁİå¸Ĥ'] +['æº', 'ħ'] +['è²', 'ĵ'] +['æĬµ', 'æĮ¡'] +['åĸľæ¬¢', 'åIJĥ'] +['éĿĻéĿĻ', 'åľ°'] +['å¾Ī', 'æ·±'] +['åŁºç¡Ģ', 'çŁ¥è¯Ĩ'] +['è¿ĩ', 'éĶĻ'] +['çIJĨ', 'ç§ij'] +['交æµģ', 'åIJĪä½ľ'] +['èĪ', 'Ķ'] +['調', 'æŁ¥'] +['æħĪ', 'æĤ²'] +['éĴ', '°'] +['èĩ´', 'ç͵'] +['å®£ä¼ł', 'æ´»åĬ¨'] +['åıĺ', 'éĩı'] +['çļĦ人', 'æĿ¥è¯´'] +['æĹ¶', 'éļĶ'] +['ä¸į管', 'ä½ł'] +['缸', 'è¿ij'] +['è´µ', 'éĩijå±ŀ'] +['ä¹Łä¸į', 'åı¯èĥ½'] +['ç²ī', 'æľ«'] +['åįĹ', 'çĵľ'] +['çϽ', '马'] +['åħī', 'æºIJ'] +['éĩij', 'å¥ĸ'] +['çĭ¬', 'è§Ĵ'] +['çĭ¬è§Ĵ', 'åħ½'] +['妨', 'ç¢į'] +['ç»Ļ', 'åĬĽ'] +['ä½Ĩ', 'ä»į'] +['å¼łå®¶', 'åı£'] +['èIJ¬', 'åħĥ'] +['渲', 'æŁĵ'] +['éķ¿å¤§', 'äºĨ'] +['è®°èĢħ', 'äºĨè§£'] +['æĢĢ', 'çĿĢ'] +['è¦ģ', 'åѦä¼ļ'] +['游æĪı', '代'] +['游æĪı代', 'ç»ĥ'] +['äºĮ', 'çϾ'] +['æĦıè¯Ĩ', 'å½¢æĢģ'] +['çİ', 'º'] +['计åĪĴ', 'çĶŁèĤ²'] +['æī¾', 'åĩĨ'] +['åħ°', 'èĬ±'] +['è¿Ļ座', 'åŁİå¸Ĥ'] +['污', 'æ³¥'] +['å®ĺæĸ¹', '微信'] +['å½Ĵ', 'å±ŀ'] +['æ°§', 'æ°Ķ'] +['éģİç¨ĭ', 'ä¸Ń'] +['åį°è±¡', 'æ·±åĪ»'] +['稳', '妥'] +['çµIJ', 'æĿŁ'] +['åŃķ', 'æľŁ'] +['çī¹', 'æĿĥ'] +['åĿļ', 'åĽº'] +['顺', 'åĬ¿'] +['æŀľ', 'èͬ'] +['éĨ«', '師'] +['åİ', '®'] +['ä¹Łæĺ¯', 'å¦ĤæŃ¤'] +['é¦Ĵ', '头'] +['缸', 'åĬ©'] +['å¹²', '线'] +['ä¸Ģ', 'æľ¬ä¹¦'] +['ç»', '¥'] +['æĮ¯', 'å¥ĭ'] +['èĤ¾', 'èĦı'] +['åĭķ', 'çī©'] +['é£ŀ', 'è·ĥ'] +['èıľ', 'åĵģ'] +['å¤ļ', 'ä½Ļ'] +['å¤ļä½Ļ', 'çļĦ'] +['éĢĿ', 'ä¸ĸ'] +['æģĭ', '人'] +['å¼Ģåıij', 'åĪ©ç͍'] +['顺', '丰'] +['éĩİ', 'å¿ĥ'] +['æł¡', 'å¤ĸ'] +['æģIJ', 'é¾Ļ'] +['éĿ¢', 'åħ·'] +['éķ¿', 'è¾Ī'] +['éļı', 'å¤Ħ'] +['éļıå¤Ħ', 'åı¯è§ģ'] +['ç´§', '缺'] +['éĩį', 'ä¸Ń'] +['éĩįä¸Ń', 'ä¹ĭ'] +['éĩįä¸Ńä¹ĭ', 'éĩį'] +['奥', 'æĸ¯'] +['奥æĸ¯', 'åį¡'] +['ä¸Ģ个', 'å¤ļ'] +['ä¸Ģ个å¤ļ', 'æľĪ'] +['ä¸įåı¯', '缺å°ij'] +['æĸ°', 'æł¼å±Ģ'] +['æıIJ', 'æĮ¯'] +['è¡Į', 'è´¿'] +['æ¼Ĥ', 'æµģ'] +['èģĬ', 'åŁİ'] +['åħ´', '建'] +['è´¨', 'æ£Ģ'] +['ç§ģæľį', '游æĪı'] +['æĽ´', 'éĩįè¦ģ'] +['è´', '®'] +['çħ', 'ľ'] +['转åıĺ', '为'] +['è¿Ļ', '两年'] +['ä¿Ŀ', 'é²ľ'] +['æī§', 'æķĻ'] +['çĥ', '¨'] +['å¼Ģåıij', '建设'] +['è¿IJèIJ¥', '管çIJĨ'] +['误', 'å·®'] +['京', 'åī§'] +['å¸IJ', 'åı·'] +['å·¥ä½ľ', 'ä½ľé£İ'] +['ä¸ĸ', 'ä¿Ĺ'] +['çϽ', '宫'] +['天', 'åĽ½'] +['å¤©åĽ½', 'ç»§ç»Ń'] +['å·´', 'æĸ¯'] +['èIJ¥', 'åĪ©'] +['åĵģ', 'æł¼'] +['æĿijæ°ij', '们'] +['æĪ¿', '车'] +['çŃī', 'çĹĩçĬ¶'] +['å¦Ĥ', 'å®ŀ'] +['å®', '¸'] +['å±Ĥ', '级'] +['éĶĻ', 'è¿ĩäºĨ'] +['ç»ĵ', 'å®ŀ'] +['ç¬ij', 'èĦ¸'] +['羣å®ŀ', 'æĢ§'] +['éĥ½å¸Ĥ', 'æĬ¥'] +['é¥Ń', 'èıľ'] +['åºĶ', '注æĦı'] +['æĬ½', 'çĥŁ'] +['伪', 'éĢł'] +['åīį', 'ä¸Ģ天'] +['éŃĶ', 'é¾Ļ'] +['éŃĶé¾Ļ', '令çīĮ'] +['约', 'è°Ī'] +['绣çѹ', 'æİ¨è¿Ľ'] +['让', 'ç͍æĪ·'] +['åħ¨éĿ¢', 'èIJ½å®ŀ'] +['å¼Ħ', 'å¾Ĺ'] +['è°Ī', 'æģĭçα'] +['鸣', 'æĪIJéķ¿'] +['鸣æĪIJéķ¿', 'è®°'] +['æ´ĭ', 'æ´ĭ'] +['çĸı', 'æķ£'] +['éĿ¢ç§¯', '约'] +['æµĵ', '缩'] +['æĸ¯', 'é¡¿'] +['çĶŁæĢģ', 'åľĪ'] +['æī§', '导'] +['ç§»', 'éĢģ'] +['齿', 'è½®'] +['æł¹æľ¬', 'å°±ä¸į'] +['缩', 'åĩı'] +['èµ°', 'ä¸ĭåİ»'] +['çĿ«', 'æ¯Ľ'] +['ä¹Łä¸į', 'éĶĻ'] +['åıįæĺł', 'åĩº'] +['èĭ¦', 'æģ¼'] +['缸åħ³', 'æĶ¿çŃĸ'] +['é«ĺ', '楼'] +['ç²ī', 'èī²'] +['æĬķèµĦ', 'é¢Ŀ'] +['ä¸į', 'ç»ı'] +['ä¸įç»ı', 'æĦı'] +['å®ģ', 'æĦ¿'] +['èĪĮ', '头'] +['æ»ĭ', 'çĶŁ'] +['å®ģ', 'åİ¿'] +['åīįåĪĹ', 'èħº'] +['åĩ', '³'] +['é£Ł', '欲'] +['åıĸ', 'èĥľ'] +['éĻ¢', 'åŃIJ'] +['ç´łè´¨', 'æķĻèĤ²'] +['滨', 'å·ŀ'] +['æĬ¢', 'æĬĵ'] +['å¼Ĥ', 'åij³'] +['åĴ', 'ļ'] +['åĬ', 'į'] +['宽', 'éĺĶ'] +['æļ´', '涨'] +['æĥł', 'åıĬ'] +['è§Ħ', 'ç¨ĭ'] +['ä¾Ľ', 'åħ»'] +['éĢģ', 'å¾Ģ'] +['å±±', 'åºĦ'] +['举', 'äºļ'] +['å±ķ', 'é¦Ĩ'] +['è§£', 'éĶģ'] +['æĹł', 'è§Ĩ'] +['éĻį', 'èIJ½'] +['è¿ŀ', 'äºij'] +['è¿ŀäºij', '港'] +['åıĤ', 'è°ĭ'] +['çİ', 'ĸ'] +['ç¬', 'ĥ'] +['èĢĹ', 'è´¹'] +['æī¿', 'å¾·'] +['社ä¼ļ', 'æķĪçĽĬ'] +['åįĹæµ·', 'ç½ij'] +['åĪĽ', '伤'] +['èIJ', '±'] +['åħħ', 'æ²Ľ'] +['ç½ijç«Ļ', '建设'] +['大', 'åºĨ'] +['åĨį', 'éĢł'] +['åŃĹ', 'æł·'] +['åħ¨æ°ij', 'åģ¥èº«'] +['èĮ«', 'èĮ«'] +['æµ®', 'åĬ¨'] +['åīį', 'åı°'] +['å¢ŀ', '设'] +['éĢĽ', 'è¡Ĺ'] +['åĢĴ', 'éĹŃ'] +['æ³ķå¾ĭ', '顾éĹ®'] +['çĸ', '®'] +['çĹħ', 'çĹĩ'] +['空', 'åīį'] +['请', 'æķĻ'] +['èĥľ', 'ä»»'] +['æĿĢ', 'èıĮ'] +['æĪĺæĸĹ', 'æľº'] +['ç»ĺ', 'åζ'] +['å¤Ħ', 'æĸ¹'] +['çªģ', 'åĽ´'] +['çĮ«', 'åĴª'] +['æĬ¥åijĬ', 'æĺ¾ç¤º'] +['ç¿', 'Ł'] +['çķ¶', 'åľ°'] +['æľĢ', 'éļ¾'] +['纪', 'å§Ķ书记'] +['ä½İ', 'åİĭ'] +['èĻļ', '空'] +['è¿Ļéĥ¨', 'ç͵影'] +['产ä¸ļ', 'åįĩ级'] +['è°·', 'çα'] +['è°·çα', 'åĩĮ'] +['æĬ¼', 'éĩij'] +['女', 'æĸ¹'] +['éĴ»', 'çłĶ'] +['æļĹ', 'æļĹ'] +['è¿·', 'ä½ł'] +['æīĢ', 'è¬Ĥ'] +['å¨ģ', 'å»ī'] +['å¼Ģ', 'æľĹ'] +['å²', 'Ķ'] +['çģ«', 'çĤ¬'] +['åIJĪçIJĨ', 'æĢ§'] +['åħ¬', 'åĬŀ'] +['ä¼ļ', 'ä¼ļéķ¿'] +['éĺ´', 'è°ĭ'] +['å¼Ģ', 'å±Ģ'] +['æĻ®éĢļ', 'è¯Ŀ'] +['åį¡', 'æĭī'] +['å°ij', 'åIJĥ'] +['éĹª', 'èĢĢ'] +['æŀľ', 'æ±ģ'] +['æī§è¡Į', 'åĬĽ'] +['è°', 'Ľ'] +['æĬ¢', 'åĬ«'] +['é«ĺéĢŁ', 'åıijå±ķ'] +['éŁ', '¬'] +['åįĹ', 'æ²Ļ'] +['é«ĺçŃī', 'åŃ¦æł¡'] +['æį¢', '个'] +['åı¯èĥ½', 'åŃĺåľ¨'] +['æĬ', 'Ĵ'] +['è°±', 'åĨĻ'] +['被', 'æĬĵ'] +['æĿ¯', 'åŃIJ'] +['èĬĤèĥ½', 'åĩıæİĴ'] +['æ°ĶåĢĻ', 'åıĺåĮĸ'] +['åĪĨ', 'åĪ¥'] +['ä¸Ń', 'æŀ¢'] +['欢', 'åij¼'] +['åħī', '纤'] +['è¿Ļ', '群'] +['çľ¼', 'çķĮ'] +['åħ±åIJĮ', 'åıijå±ķ'] +['çݰ', 'ä»Ĭ'] +['éĹ»', 'è¨Ģ'] +['çī¹èī²', 'å°ıéķĩ'] +['æķij', '人'] +['éĻį', 'æ°´'] +['ä¸ĸçķĮ', 'ä¸Ģæµģ'] +['å°±', 'é¤IJ'] +['çŀ', '¥'] +['å¤į', 'ä»ĩ'] +['ç¾½', 'æ¯Ľ'] +['ç¾½æ¯Ľ', 'çIJĥ'] +['è´©', 'åįĸ'] +['æºIJ', 'æ³ī'] +['æĢ»ä½ĵ', 'è§ĦåĪĴ'] +['åĬ¨', 'æĦŁ'] +['ä¸Ģ', '审'] +['åĢŁ', 'éĴ±'] +['è§ģ', 'æķĪ'] +['èĬ±', 'èįī'] +['åIJĮ', 'ä¸ļ'] +['æŁ¥', 'è©¢'] +['åĽ½éĻħ', 'åIJĪä½ľ'] +['ä¾Ľ', 'åĽ¾'] +['åģ', '´'] +['æł', 'ĵ'] +['缸', 'éĢļ'] +['è°Ī', 'åıĬ'] +['è¿ĩç¨ĭ', 'å½ĵä¸Ń'] +['é¦Ļ', 'èıĩ'] +['åįģåĽĽ', 'æĿ¡'] +['ä¸Ģå¼Ģå§ĭ', 'å°±'] +['ä¸ĵ', 'åijĺ'] +['æĺİ', '顯'] +['æīĵéĢł', 'åĩº'] +['ä¸ĭéĿ¢', 'æĪij们'] +['æľº', 'æ²¹'] +['åı°', 'è¯į'] +['åŃIJ', 'å¼Ł'] +['æľĢ', '常è§ģçļĦ'] +['æĪij', 'è®°å¾Ĺ'] +['ç»', '°'] +['æĤ¬', 'æµ®'] +['è¿ĺ', '羣æĺ¯'] +['æĮĤ', 'åı·'] +['åıĭ', 'åĸĦ'] +['éĩį', '伤'] +['çħ§', '亮'] +['æŃ¦', 'èѦ'] +['åĩºçݰ', 'éĹ®é¢ĺ'] +['è¸Ĭ', 'è·ĥ'] +['åľ°çIJĥ', 'ä¸Ĭ'] +['å¸Ĥ', '人大'] +['åıĹ害', '人'] +['å²', 'IJ'] +['åIJĮ', 'åѸ'] +['éĩijèŀį', 'å¸Ĥåľº'] +['æľīçļĦ', 'çݩ家'] +['å¸Ĥ', 'æķĻèĤ²'] +['å¸ĤæķĻèĤ²', 'å±Ģ'] +['åIJĦ', 'å¼Ĥ'] +['ç·ļ', 'ä¸Ĭ'] +['æģ', 'º'] +['æľī', '大éĩıçļĦ'] +['åķĨ', 'æĬ¥'] +['åįķ', 'åįķ'] +['åħ¨', 'é¢Ŀ'] +['ä¾ĿæĹ§', 'æĺ¯'] +['好', 'åĩłä¸ª'] +['åĸ', 'µ'] +['éĩį', 'æķ´'] +['çĶŁæ´»', 'è´¨éĩı'] +['æİ¢', '访'] +['åį°', 'èĬ±'] +['缼', 'è¡Į'] +['å¾®', 'è§Ĥ'] +['èĪį', 'å¾Ĺ'] +['åºŁå¼ĥ', 'çī©'] +['积', 'èĵĦ'] +['å®ļ', 'å±ħ'] +['æĤ', '¼'] +['èĮ', '¸'] +['çļĦ', '帮åĬ©'] +['çļĦ帮åĬ©', 'ä¸ĭ'] +['亿', 'åIJ¨'] +['åŃĶ', 'éĽĢ'] +['è¿ĻæĿ¡', 'è·¯'] +['é¥', 'µ'] +['æĦĪ', 'åĬł'] +['éķ', 'į'] +['ä½ľ', 'æ¡Ī'] +['èįĶ', 'æŀĿ'] +['太', 'å°ij'] +['è·»', '身'] +['åħ¬çĽĬ', 'æ´»åĬ¨'] +['çϽ', 'æĸij'] +['æĬĢæľ¯', 'æ°´å¹³'] +['å¸', '§'] +['æĹł', 'çŁ¥'] +['åºĶ该', 'æĢİä¹Ī'] +['éĢĢ', 'å¸Ĥ'] +['æ¸', 'Ń'] +['åħ»', 'çĮª'] +['é©', '¼'] +['群', 'å²Ľ'] +['大', 'åį«'] +['ä¹ĺ', 'çĶ¨è½¦'] +['èı²', 'å°Ķ'] +['è´´', 'åIJ§'] +['åģľ', 'ä¸ĭæĿ¥'] +['æľīæľº', 'ç»ĵåIJĪ'] +['åĪ»', 'èĭ¦'] +['çļĦ', 'åľ°'] +['çļĦåľ°', 'æŃ¥'] +['è¯Ĭ', 'æīĢ'] +['å¼Ģ', 'æĪĺ'] +['èĢģ', 'çīĮ'] +['çѹ', 'çłģ'] +['åħ«å¤§', '以æĿ¥'] +['楼', 'æĪ¿'] +['åŃĻ', 'æĤŁ'] +['åŃĻæĤŁ', '空'] +['åħĴ', 'åŃIJ'] +['第ä¸Ģ', 'æĿ¡'] +['社交', 'åªĴä½ĵ'] +['æĥ³', 'èµ·æĿ¥'] +['大', 'æ´ĭ'] +['æĭ¼', 'éŁ³'] +['è¿Ľ', 'åįļä¼ļ'] +['è¿ĩ', 'åħ³'] +['æ²', '¼'] +['ç©¿', 'æIJŃ'] +['éĤ£', 'ä¸Ģ天'] +['çł´', 'éŨ'] +['æĬķæłĩ', '人'] +['èµ¢', 'å®¶'] +['èĻļ', 'å¼±'] +['æ¿', 'ĥ'] +['å®ī', 'æ£Ģ'] +['客', 'å®¶'] +['çĭ¬ç«ĭ', 'èij£äºĭ'] +['æīĭ', 'åĬ¿'] +['åīµ', 'éĢł'] +['åľĨ满', 'å®ĮæĪIJ'] +['为主', '线'] +['好å¥ĩ', 'å¿ĥ'] +['é¢Ĩ', 'åľŁ'] +['çª', 'ĸ'] +['åħ¸åŀĭ', 'æ¡Īä¾ĭ'] +['çªģåıij', 'äºĭä»¶'] +['åºķ', 'æ°Ķ'] +['头', 'æĻķ'] +['å®Ľ', 'å¦Ĥ'] +['è§', '¸'] +['æ¸ħ', 'æ·¡'] +['åļ', '¼'] +['åģľ', 'ç͵'] +['ç²ī', 'å°ĺ'] +['éĻįä½İ', 'æĪIJæľ¬'] +['æĶ¾', 'æīĭ'] +['è®°èĢħ', '表示'] +['æĭĸ', 'å»¶'] +['éª', 'ĩ'] +['æ®ĭ', 'å¿į'] +['çľģ', 'æķĻèĤ²'] +['çľģæķĻèĤ²', 'åİħ'] +['é«ĺ', 'é¢Ŀ'] +['éĦ', 'Ļ'] +['æ¥', 'ŀ'] +['åĨħ', 'ç§ij'] +['èIJ¥ä¸ļ', 'é¢Ŀ'] +['åŁº', 'çŁ³'] +['æµģ', 'æ·Į'] +['主', 'æĹ¨'] +['éĺIJ', 'éĩĬ'] +['建', 'åįİ'] +['æĥĬ', 'åı¹'] +['çī¢åĽº', 'æłijç«ĭ'] +['æĺ¯åIJ¦', 'åŃĺåľ¨'] +['建', 'åĨĽ'] +['éĽ¾', 'éľ¾'] +['åħ¬', '认'] +['åħ¬è®¤', 'çļĦ'] +['æ°¨', 'åŁº'] +['æ°¨åŁº', 'éħ¸'] +['åīį', 'åĩłå¹´'] +['åι', 'éĤ£'] +['æ±Ł', '举'] +['å·¥', 'æ¥Ń'] +['ä¸ĢçĤ¹', 'ä¹Łä¸į'] +['ä¿®', '士'] +['äºĨä¸Ģ', 'éģį'] +['åĪ', 'ģ'] +['æ»ļ', 'æ»ļ'] +['åĪĨ', 'æł¡'] +['羣', 'çα'] +['è¡Ģ', 'èĦī'] +['æĢ¥', 'åī§'] +['ä¸Ģ群', '人'] +['ç¾', '¯'] +['æĪIJ', 'é¾Ļ'] +['ç²¾ç¥ŀ', 'çĹħ'] +['缸åħ³', '人åijĺ'] +['éĿĵ', '丽'] +['ä¸ī', 'åŃ£åº¦'] +['åĪĴ', 'å®ļ'] +['ä¸ĸçķĮ', '第ä¸Ģ'] +['éĢļ', 'ä¿Ĺ'] +['åķĨä¸ļ', 'åľ°äº§'] +['åĬŁèĥ½', 'æĢ§'] +['èµĦæľ¬', '主ä¹ī'] +['详', 'è§ģ'] +['æĬĵ', 'æįķ'] +['æĸĩ', 'æĺĮ'] +['å®Ŀ', 'å®ī'] +['è£ħéħį', 'å¼ı'] +['æºIJ', 'æºIJ'] +['æºIJæºIJ', 'ä¸įæĸŃ'] +['çĶŁ', 'æĢķ'] +['纵', 'åIJij'] +['å£', '½'] +['çľ¼', 'è¢ĭ'] +['èĤī', 'ä½ĵ'] +['åı¤', 'ä»Ĭ'] +['èŀį', 'åªĴä½ĵ'] +['åģ', 'ī'] +['æł¼', 'æľĥåĵ¡'] +['çĥ', '·'] +['åĬŁ', 'ç͍'] +['æīŃ', '磩'] +['绿èī²', 'éĢļéģĵ'] +['åī§', 'ç»Ħ'] +['å¼±', 'åĬ¿'] +['è´¨éĩı', 'éĹ®é¢ĺ'] +['éĻIJ', 'é¢Ŀ'] +['éª', 'Ĩ'] +['éģµ', 'ä¹ī'] +['å¯Ŀ', '室'] +['æĥ³', '念'] +['åł±', 'åijĬ'] +['ä»ħ', '次'] +['ä»ħ次', 'äºİ'] +['èŀį', 'åĪĽ'] +['æĭĽèģĺ', 'ä¼ļ'] +['åºĬ', 'åŀ«'] +['转åŀĭ', 'åıijå±ķ'] +['ä¸ŃåĽ½', 'çĶµä¿¡'] +['åIJ¬', 'è¯Ŀ'] +['è«ĭ', 'æ±Ĥ'] +['大éĥ¨åĪĨ', '人'] +['æ´»', 'å¾Ĺ'] +['åĵŃ', 'æ³£'] +['è¶', 'Ļ'] +['åıijçĹħ', 'çİĩ'] +['ä¸į', '符'] +['åĨĽ', 'å®ĺ'] +['é¢Ī', 'æ¤İ'] +['æĸ°åĨł', 'çĸ«æĥħ'] +['æŁ¬', 'åŁĶ'] +['æŁ¬åŁĶ', '寨'] +['ä»»ä½ķ', 'å½¢å¼ı'] +['人', 'éĻħ'] +['人éĻħ', 'åħ³ç³»'] +['æĢ»', 'æī¿åĮħ'] +['å¹³åĿĩ', 'æ¯ı'] +['æģŃ', 'åĸľ'] +['åĦ', 'ĺ'] +['åħµ', '马'] +['è¿Ł', 'åΰ'] +['å·¥', '伤'] +['çīĪæĿĥ', 'å½Ĵ'] +['çīĪæĿĥå½Ĵ', 'åİŁ'] +['æĭ¥', 'æĬ¤'] +['ç³Ĭ', 'æ¶Ĥ'] +['å¹²', 'æ¶ī'] +['å°ij', 'ä¸įäºĨ'] +['æĥ³', 'æī¾'] +['è´¹', 'çİĩ'] +['该', 'éĻ¢'] +['èŀį', 'åĮĸ'] +['è¿İ', 'åIJĪ'] +['è§ĨåIJ¬', 'èĬĤ缮'] +['æł¼', 'ç¶²ç«Ļ'] +['çľī', 'æ¯Ľ'] +['欢è¿İ', '大家'] +['å®¶åºŃ', 'æķĻèĤ²'] +['ä¾µ', 'èļĢ'] +['ç»Ļ', 'ä½łä»¬'] +['è¡Ģæ¶²', '循çݯ'] +['å¯Ħ', 'æīĺ'] +['å°ĸ', 'åı«'] +['以ä¸ĭ', 'åĩłä¸ª'] +['è¿ĺ', '以为'] +['åħ¶ä»ĸ', 'çݩ家'] +['ç¬ij', 'ç¬ij'] +['æīĵ', 'åIJ¬'] +['èĩªçĦ¶', 'ç§ijåѦ'] +['åŁº', 'ç«Ļ'] +['ä¹Ŀ', 'å·ŀ'] +['ä¿Ŀ', '驾'] +['ä¿Ŀ驾', 'æĬ¤'] +['ä¿Ŀ驾æĬ¤', 'èĪª'] +['æĶ¾', 'çľ¼'] +['çŁ¥åIJį', 'ä¼ģä¸ļ'] +['ç¸', '®'] +['ç¨', '½'] +['æļ', 'ĩ'] +['使ç͍', '網路'] +['é¢Ħ', 'çķĻ'] +['大', '象'] +['åıijæĺİ', 'ä¸ĵåĪ©'] +['æĸĩ', '娱'] +['éĢł', 'ç¦ı'] +['湿', '润'] +['éĿ¢', 'æĿ¡'] +['æ¶Īè´¹', 'åįĩ级'] +['è®Ĭ', 'å¾Ĺ'] +['åĩł', 'åIJį'] +['ä»', 'Ħ'] +['认', 'æ¸ħ'] +['è¿ľ', 'æĻ¯'] +['æıĴ', '座'] +['诸', '侯'] +['åıĺ', 'æĢģ'] +['ç¦ı', '彩'] +['è´§', 'æŀ¶'] +['失', 'æİ§'] +['ç§»åĬ¨', '端'] +['ä¸Ĭ', 'åı¸'] +['éĢł', '纸'] +['å¸ĥ', 'æľĹ'] +['çĴ', 'ĩ'] +['åı°', 'åįĹ'] +['åĮĹ京', 'åĨ¬å¥¥'] +['èĵĿ', 'çīĻ'] +['éķ¿', 'çŁŃ'] +['æĬĺ', 'å°Ħ'] +['ç»ij', 'æŀ¶'] +['å¯Ĵ', 'åģĩ'] +['转', 'åŁºåĽł'] +['æĢ¥', 'äºİ'] +['æŃ£', 'åĵģ'] +['åħħ', '滿'] +['大', '纲'] +['æĬĹ', 'ä½ĵ'] +['è¨ĵ', 'ç·´'] +['æĶ¶', 'ç´§'] +['æ¯Ķ', 'è³½'] +['åħµ', 'åĬĽ'] +['æľ¬', 'æĽ¸'] +['äºĮ', '代'] +['æĢ¥', 'è¯Ĭ'] +['æĸĩ', 'æ¡Ī'] +['ç»ı', 'åķĨ'] +['æĻ¨', 'æĬ¥'] +['æ£', 'ĺ'] +['æĢ»ä¹¦è®°', 'åľ¨'] +['åıĹ', 'éĤĢ'] +['äºĶ', 'åĽĽ'] +['å²Ń', 'åįĹ'] +['çα', 'åIJĥ'] +['åŁĥ', 'å°Ķ'] +['å¿ĥ', 'å¢ĥ'] +['è¦ĨçĽĸ', 'éĿ¢'] +['å®ŀåľ¨æĺ¯', '太'] +['æł¹', 'åºķ'] +['纷纷', '表示'] +['åĹ', 'ħ'] +['éļıçĿĢ', 'æĹ¶éĹ´'] +['åİĨåı²', 'æĤłä¹ħ'] +['éħ', 'ī'] +['æĢ»', 'éĺŁ'] +['主é¢ĺ', 'æ´»åĬ¨'] +['éĹ®', 'åį·'] +['é©¿', 'ç«Ļ'] +['æı¡', 'ä½ı'] +['åı¯èĥ½', '导èĩ´'] +['æ°ij', 'éĸĵ'] +['éĸĭ', 'åķŁ'] +['ä½Ĩ', 'ä¸įéĻIJ'] +['ä½Ĩä¸įéĻIJ', 'äºİ'] +['åįģ', 'éĩĮ'] +['å¨', '¥'] +['æįŁ', 'èĢĹ'] +['çĸı', '导'] +['çݯ', 'æ°§'] +['ç¥ŀ', 'éĢļ'] +['çα', 'å°Ķ'] +['çαå°Ķ', 'åħ°'] +['æľ´', 'å®ŀ'] +['å¿«', 'æĬ¥'] +['æĶ¶', 'åıĹ'] +['æĪĸ', '許'] +['èĥĮ', 'éĿ¢'] +['æĸĩåĮĸ', 'ä¼łåªĴ'] +['ä¸ī', 'åĢĭ'] +['æĶ»', 'åĬ¿'] +['å®ī', '举'] +['å®ī举', 'å°¼'] +['åĿĩ', 'å·²'] +['顾', 'èĻij'] +['éĦ', 'Ń'] +['è¿Ļå®¶', 'åħ¬åı¸'] +['åħ¬åijĬ', 'ç§°'] +['æıIJä¾Ľ', 'ä¼ĺè´¨'] +['稳æŃ¥', 'æİ¨è¿Ľ'] +['å¤į', 'è¯ķ'] +['å°Ĩ', 'é¢Ĩ'] +['è°Ī', 'èµ·'] +['å¨', 'Ħ'] +['è¿ŀ', '线'] +['æ©Ł', 'éĹľ'] +['åºĶç͍', 'åľºæĻ¯'] +['çĶ»', 'åĥı'] +['è´¢', 'è¿IJ'] +['ä¿Ŀ', 'éļª'] +['çĹħ', 'çIJĨ'] +['æ¯Ľ', '主å¸Ń'] +['ä¸Ŀ', '毫ä¸į'] +['çα', 'å¥ĩ'] +['çαå¥ĩ', 'èīº'] +['ä¸ĵå®¶', 'ç»Ħ'] +['åij¼', 'åͤ'] +['éĭ', '¼'] +['çģ', '¸'] +['é¢ĨåħĪ', 'åľ°ä½į'] +['æıIJ', 'æĭĶ'] +['龸', 'éģĵ'] +['å±±', 'åĿ¡'] +['èĿ', 'İ'] +['沸', 'èħ¾'] +['该', '项'] +['ä»Ĭ', 'çĶŁ'] +['ä¸Ģç¯ĩ', 'æĸĩ竳'] +['æĸ¹å¼ı', 'è¿Ľè¡Į'] +['é»ij', '客'] +['æĶ¹', 'åĬ¨'] +['主', 'é¡Į'] +['æķ£', 'å¸ĥ'] +['ä»Ģä¹Ī', 'åľ°æĸ¹'] +['åĮĸ', 'åIJĪ'] +['åĮĸåIJĪ', 'çī©'] +['éĿĻ', 'ç͵'] +['æĢ»', 'æĶ¶åħ¥'] +['å§Ķ', 'ç»Ħç»ĩ'] +['å§Ķç»Ħç»ĩ', 'éĥ¨'] +['éĿĻ', 'æĢģ'] +['èĢģ', 'åŃĹåı·'] +['室', 'åıĭ'] +['éĥ½ä¸į', 'æķ¢'] +['æŀ¶', 'åŃIJ'] +['çģµ', 'æķı'] +['审', 'è§Ĩ'] +['æĤ£', 'åĦ¿'] +['å±±', '寨'] +['èĸª', 'èµĦ'] +['é©°', 'æı´'] +['éĥ¨åĪĨ', 'åĨħ容'] +['好', 'ä¼¼'] +['æĪIJåijĺ', 'åĽ½'] +['åľ¨æĪij', 'çľĭæĿ¥'] +['åħ³æ³¨', '度'] +['éĻĪ', 'æŁIJ'] +['è¿Ļç§į', 'äºĭæĥħ'] +['éĢī', 'å®ļ'] +['ç²¾', 'åŃIJ'] +['å£ģ', 'çĶ»'] +['æ±Ł', 'æ·®'] +['é«ĺ', 'æĺĤ'] +['æł¼', 'åĬĽ'] +['è¼', '©'] +['åѦ', 'åłĤ'] +['æĤ¨', 'åIJĮæĦı'] +['ä¸ĢåĪĩ', 'éĥ½æĺ¯'] +['æ½', '¤'] +['éĸ', 'ĥ'] +['å¸ĮæľĽ', 'èĩªå·±'] +['ä¿', 'ĺ'] +['æ±Ł', 'åİ¿'] +['æ³', '¾'] +['ç§ij', 'æķĻ'] +['æīĵ', 'è¿Ľ'] +['ä¸į', 'æħİ'] +['å¯Ĵ', 'åĨ¬'] +['æ¸Ķ', 'æ°ij'] +['鼷', 'æĸ¯'] +['主', 'å®°'] +['æĹħ游', '度åģĩ'] +['ç͵åŃIJ', 'éĤ®ä»¶'] +['æ±Ĥ', 'å©ļ'] +['éļİ', '段'] +['åģ¥èº«', 'æĪ¿'] +['注æĺİ', 'åĩºå¤Ħ'] +['äºĭæķħ', 'åıijçĶŁ'] +['级', '以ä¸Ĭ'] +['åŃĺ', 'æ´»'] +['æĸ½', 'èĤ¥'] +['èľľ', 'èľĤ'] +['åµ', '©'] +['æĮĸæİĺ', 'æľº'] +['æĬĹ', 'æĭĴ'] +['ä¼ł', '导'] +['æĺ¯ä»Ģä¹Ī', 'åij¢'] +['ä¸Ĭå¹´', 'åIJĮæľŁ'] +['建', 'åħļ'] +['çĶŁ', 'æħĭ'] +['ä¿Ŀ', 'ä½ı'] +['款', '车åŀĭ'] +['人', 'èĦī'] +['éļIJ', 'èͽ'] +['失', 'æķĪ'] +['éģ¿', 'åŃķ'] +['ç®Ģ', '便'] +['谢谢', 'ä½ł'] +['å®Ī', 'ä½ı'] +['æĶ¾', 'æĺł'] +['è¨Ī', 'çķ«'] +['çݰ代', 'çµģ'] +['é¤IJ', '廳'] +['æķħ', 'å±ħ'] +['大', '大å°ı'] +['大大å°ı', 'å°ı'] +['çī¹åĪ«', '声æĺİ'] +['éģį', 'åıĬ'] +['å¿ĥçIJĨ', 'åĴ¨è¯¢'] +['è³', '´'] +['çĮ®', 'è¡Ģ'] +['å·²ç»ı', 'è¾¾åΰ'] +['æīĵ', 'æĭĽåij¼'] +['åıĮ', 'è¾¹'] +['ä¸Ģæĸ¹éĿ¢', 'æĺ¯'] +['å´ĩ', 'å°ļ'] +['éĺ¿', 'å¯Į'] +['éĺ¿å¯Į', 'æ±Ĺ'] +['æĮģ', 'æľī人'] +['è±', 'ģ'] +['é£İ', 'çŃĿ'] +['åĬ¨', 'èį¡'] +['äºĨä¸Ģ', 'ä¼ļ'] +['äºĨä¸Ģä¼ļ', 'åĦ¿'] +['ä¸ĩ', '象'] +['çľĭ', 'ç͵è§Ĩ'] +['åįģä¸ī', 'æĿ¡'] +['çĮĽ', 'çĥĪ'] +['è¦ģ', 'ä¸įçĦ¶'] +['太æŀģ', 'æĭ³'] +['å¼ķ', 'çĪĨ'] +['ç»ıè¿ĩ', 'å¤ļå¹´'] +['游æĪı', 'éĩĮçļĦ'] +['é¾Ļ', 'æ³ī'] +['æłĩ', 'éħį'] +['è®ĵ', 'ä»ĸåĢij'] +['éĢł', 'æŀĹ'] +['åĮºåŁŁ', 'æĢ§'] +['亿', 'ä¸ĩ'] +['æĪĺçķ¥', 'å¸ĥå±Ģ'] +['éķĩ', 'æĶ¿åºľ'] +['åĶ®', '票'] +['çĶŁäº§', 'å·¥èīº'] +['éķĩ', 'åħļå§Ķ'] +['ä¸Ńå°ı', 'åŀĭ'] +['æľ¨', 'è̳'] +['æ²³', 'è¾¹'] +['èĦ¾', 'èĥĥ'] +['欢è¿İ', 'æĤ¨'] +['åıĺ', 'å¼Ĥ'] +['缤', '纷'] +['åŀĥåľ¾', 'æ¡¶'] +['辩', 'è¯ģ'] +['车', 'åºĵ'] +['æ¯Ķ', 'çİĩ'] +['åħ´', 'æĹº'] +['详ç»Ĩ', 'äºĨè§£'] +['å®ī', 'å±ħ'] +['çħ§', 'æĸĻ'] +['æĸ¹', 'æīį'] +['èµ', '¦'] +['åĨ', 'ķ'] +['å¥Ķ', 'èµ´'] +['å®Ŀ', '鸡'] +['åľº', 'åĿĩ'] +['缮åīį', 'æŃ£åľ¨'] +['åIJŀ', 'åϬ'] +['è¿°', 'èģĮ'] +['æĩ', 'µ'] +['å¥ĩ', 'çijŀ'] +['ä»į', 'å°Ĩ'] +['èĪī', '辦'] +['å·¥åķĨ', 'å±Ģ'] +['å¡ij', 'èĥ¶'] +['åĬŀ', 'å®ŀäºĭ'] +['æĸ¹', 'æĸ¹éĿ¢'] +['æĸ¹æĸ¹éĿ¢', 'éĿ¢'] +['æĸĩåĮĸ', 'èĬĤ'] +['åħ¥', 'èģĮ'] +['é¸', '¥'] +['ç©¿', 'éĢı'] +['以', 'ä¹łè¿ijå¹³'] +['åį±', 'éļª'] +['æľ¦', 'èĥ§'] +['åİĨåı²', 'æĢ§'] +['æķŀ', 'å¼Ģ'] +['ä¼Ļä¼´', 'åħ³ç³»'] +['çŁ¿', 'åĮº'] +['åĽ½éĻħ', 'åľ¨çº¿'] +['ä¼łå¥ĩ', 'éĩĮéĿ¢'] +['è¿ij', 'äºĽ'] +['è¿ijäºĽ', 'å¹´'] +['åĬ£', 'åĬ¿'] +['æĶ»åĩ»', 'åĬĽ'] +['æĻº', 'éĢł'] +['ç¦', '§'] +['çİĭ', 'åħĪçĶŁ'] +['éĨ«', 'çĶŁ'] +['åĽĽ', '项'] +['å®ŀ', 'æĻ¯'] +['åĪĿ', 'åĪĽ'] +['å¿ĥ', '裡'] +['æĻ¶', 'ä½ĵ'] +['交', 'éĻħ'] +['让', 'æ¶Īè´¹èĢħ'] +['课', 'æĸĩ'] +['æİĴ', 'æ°Ķ'] +['å¹¶ä¸į', 'æĦıåij³'] +['缸', '声'] +['第ä¸Ģ', 'å±Ĭ'] +['åİŁ', 'èijĹ'] +['éĽ', 'ľ'] +['没æľī', '太大'] +['è¡¥', 'æ°´'] +['çµģ', 'ä¼ģä¸ļ'] +['第äºĮ', 'æī¹'] +['åħ¶å®ĥ', 'éĹ®é¢ĺ'] +['æİĮ', 'éŨ'] +['责任', 'å¿ĥ'] +['é¤IJ', 'åħ·'] +['ç¾Ĭ', 'æ¯Ľ'] +['没æľī', 'å¿ħè¦ģ'] +['ä¹IJ', 'åĽ¢'] +['è¿Ľ', 'åŁİ'] +['ä¸ĢçĤ¹', 'åĦ¿'] +['身', 'å½¢'] +['çļ®èĤ¤', 'çĹħ'] +['æĺ', '±'] +['å¢ŀ', 'èĩ³'] +['èģ²', 'æĺİ'] +['æıIJ', 'è´¨'] +['ä½ĵèĤ²', 'åľº'] +['çѹ', '建'] +['é¬', 'Ĩ'] +['车', 'çīĮ'] +['éļĶ', 'éŁ³'] +['è´Łè´£', 'åIJĮå¿Ĺ'] +['丰', 'ç¡ķ'] +['ä½Ľ', 'éĻĢ'] +['äºī', 'åIJµ'] +['åº', '¶'] +['æ·¡', 'æ°´'] +['å°ı', 'çĶ·åŃ©'] +['ç§ģ', 'èĩª'] +['åĮĸ', 'è¿Ľç¨ĭ'] +['æĪĺ士', 'æĿ¥è¯´'] +['æ²¹', 'èħ»'] +['èĦ±è´«', 'èĩ´å¯Į'] +['æĹ¥å¸¸', 'å·¥ä½ľ'] +['交', 'èŀį'] +['åĨľ', 'è´¸'] +['åĨľè´¸', 'å¸Ĥåľº'] +['åĵĪ', 'çĻ»'] +['ç͵', 'è´¹'] +['èµ', 'ĺ'] +['åıĮ', 'èħ¿'] +['æĵĶ', 'å¿ĥ'] +['æĿ¥', '形容'] +['使åij½', 'æĦŁ'] +['éĤ£ä¹Ī', 'ç®Ģåįķ'] +['èĬĻ', 'èĵī'] +['åĢŁæ¬¾', '人'] +['ç§Ģ', '丽'] +['è®ĵ', 'ä»ĸ'] +['严åİī', 'æīĵåĩ»'] +['è³', 'ŀ'] +['æļ', '«'] +['çħ¤', 'æ°Ķ'] +['çά', 'ä¸Ĭ'] +['æ½ĩ', 'æ´Ĵ'] +['太', 'ä¹ħ'] +['åij½', 'åIJį为'] +['è·¯', 'çͱ'] +['è·¯çͱ', 'åύ'] +['é©', '¯'] +['æıIJ', 'æĹ©'] +['æĬĹåĩ»', 'çĸ«æĥħ'] +['åĩ', 'Ľ'] +['交', 'åıĭ'] +['éĶĢåĶ®', 'æ¸łéģĵ'] +['毫ä¸į', 'çĬ¹è±«'] +['èIJ¥', 'åľ°'] +['çłĶç©¶', '表æĺİ'] +['é±¼', 'ç±»'] +['æį¢', 'å±Ĭ'] +['æİ¡', 'åıĸ'] +['çī', 'Ĩ'] +['缼', 'å¼Ģ'] +['æ²§', 'æ¡ij'] +['åºŃ', '审'] +['ç»ı', 'æŁ¥'] +['åĬł', 'å¼·'] +['缸æ¯Ķ', 'äºİ'] +['ä¸ĵ', 'çıŃ'] +['ä½ĵ', 'åŀĭ'] +['被', '害'] +['被害', '人'] +['æĶ¶', '款'] +['åħ·æľī', 'èī¯å¥½'] +['é«ĺå³°', 'æľŁ'] +['åģı', 'ä½İ'] +['åĦ', 'Ł'] +['åĨľä¸ļ', 'ç§ijæĬĢ'] +['ç®Ĭ', 'æĥħåĨµ'] +['å¦Ĥæŀľ', 'çݩ家'] +['éķ¿', '约'] +['第åħŃ', 'å±Ĭ'] +['åħ¬å¼Ģ', 'æĭĽèģĺ'] +['åĪĩ', 'æĸŃ'] +['è¿«', '使'] +['çĸĹ', 'ç¨ĭ'] +['第äºĮ', 'ç§į'] +['ä¸į', 'åħį'] +['å¹²', 'èѦ'] +['çŁ³', '榴'] +['åĹ', '£'] +['两', 'ç±»'] +['çε', '士'] +['åŁİ乡', 'å±ħæ°ij'] +['æŃ¤', '项'] +['缴', 'è¾ĸ'] +['缴è¾ĸ', 'å¸Ĥ'] +['åij¼', 'åºĶ'] +['éĴ', '¯'] +['ç¦ı', 'å¾·'] +['æľº', '身'] +['æĵį', 'åľº'] +['æ¿Ĵ', '临'] +['人群', 'ä¸Ń'] +['èĤ¡', 'æ°ij'] +['åŃ', '½'] +['æ³ķ', 'åħ°'] +['é¨', 'İ'] +['糯', 'ç±³'] +['æĢ»', 'çļĦ'] +['æĢ»çļĦ', 'æĿ¥è¯´'] +['åħ¸', 'éĽħ'] +['æĸ°', 'éĻĪ'] +['æĸ°éĻĪ', '代谢'] +['缮', 'çĿ¹'] +['é¢Ħ', 'è¨Ģ'] +['è·Į', 'çł´'] +['æĸ°', 'ç¯ĩ竳'] +['æ¯Ĵ', 'æĢ§'] +['åĸĿ', 'èĮ¶'] +['æŁ¥', 'èİ·'] +['亮', '丽'] +['çĶŁäº§', 'åķĨ'] +['æĶ¹', 'æĪIJ'] +['为äºĨ', 'æĽ´å¥½'] +['æ·±', '交'] +['深交', 'æīĢ'] +['æİ', 'ĥ'] +['ä¹Ļ', 'èĤĿ'] +['泸', 'å·ŀ'] +['åħĪè¿Ľ', 'æĬĢæľ¯'] +['è¾ĵ', 'ç»Ļ'] +['æķ£', 'æĪ·'] +['æĢĿç»´', 'æĸ¹å¼ı'] +['åºĹ', '主'] +['è°ĭ', 'æ±Ĥ'] +['游æĪı', 'æĬĢå·§'] +['ä¸Ģå¹´', '级'] +['çľ¼', 'è§Ĵ'] +['ä¸Ńä»ĭ', 'æľºæŀĦ'] +['å·§', 'åIJĪ'] +['éĺ²', 'çĽĹ'] +['导', 'è´Ń'] +['æĪ', 'Ĭ'] +['æĽ´', 'éĢĤåIJĪ'] +['åŁºæľ¬', 'ä¿¡æģ¯'] +['马', 'ä¸ģ'] +['åħ»æ®ĸ', 'åľº'] +['åıį', 'è¿ĩæĿ¥'] +['æİ¨', 'å´ĩ'] +['å¯ĨåĪĩ', 'åħ³æ³¨'] +['åŁºéĩij', 'ç»ıçIJĨ'] +['æĮī', 'éĶ®'] +['åĨħéĥ¨', 'æİ§åζ'] +['æĪIJåijĺ', 'åįķä½į'] +['æľ¯', 'è¯Ń'] +['åζ', 'æľį'] +['åĪļ', 'éľĢ'] +['æ£Ģ', 'ç´¢'] +['大大', 'æıIJé«ĺ'] +['åģ¥åº·', '管çIJĨ'] +['èĩª', 'æŃ¤'] +['客æĪ·', 'éľĢæ±Ĥ'] +['丰', 'èĥ¸'] +['èµ·', 'éĩį'] +['èµ·éĩį', 'æľº'] +['æ¬ł', '缺'] +['æ¡Ī', 'åŃIJ'] +['æĥħ人', 'èĬĤ'] +['åħļ', 'æł¡'] +['è¢', 'ľ'] +['该', 'åī§'] +['迷失', 'ä¼łå¥ĩ'] +['ç»ļ', '丽'] +['åķ', 'ª'] +['æĹł', 'ç§ģ'] +['é̲', 'ä¸ĢæŃ¥'] +['第ä¸Ģ', '竳'] +['åύ', 'åħ·'] +['åĨľ', 'èµĦ'] +['確', '實'] +['åºı', 'åĪĹ'] +['娱ä¹IJ', 'å¹³åı°'] +['èŀįèµĦ', 'ç§Łèµģ'] +['èµĦæºIJ', 'åħ±äº«'] +['èģ½', 'åΰ'] +['æIJŀ', 'å¾Ĺ'] +['ç»§ç»Ń', 'ä¿ĿæĮģ'] +['åIJ¯', 'èĴĻ'] +['çľ', 'º'] +['ä¸Ŀ', 'è·¯'] +['设æĸ½', '建设'] +['æİ¥', 'åľ°'] +['æİ¥åľ°', 'æ°Ķ'] +['第ä¸ī', 'åŃ£åº¦'] +['åŁº', 'è°ĥ'] +['åıij', 'éŁ³'] +['社ä¼ļ', 'èµĦæľ¬'] +['éĽĩ', '主'] +['è¿ŀ', 'èĥľ'] +['没', 'åķ¥'] +['å»', '¢'] +['èµ¶', 'èµ´'] +['æ¼Ķ', 'åĮĸ'] +['åı¤', 'æĢª'] +['çİĭ', 'çĪ·'] +['é¢Ħ', 'åħĪ'] +['å¼Ģ', 'åħ·'] +['åĽŀ', 'é¦ĸ'] +['åľ°ä¸ĭ', 'æ°´'] +['å°ıç¼ĸ', 'ä¸Ģèµ·'] +['èµİ', 'åĽŀ'] +['åľ°', 'è²Į'] +['åĪĿ', 'ä¸ī'] +['åı¯', 'ç͍äºİ'] +['éģĹ', '迹'] +['è¿Ļ', 'æī¹'] +['èĸª', 'æ°´'] +['å¿ħçĦ¶', 'ä¼ļ'] +['æ²', '½'] +['éį', 'ĭ'] +['第ä¸Ģ', 'éĥ¨'] +['åĪĬ', 'çī©'] +['å®ŀ', 'ä¾ĭ'] +['æ¸ħ', 'åĩĢ'] +['ä¸Ĭ', 'èµĽåŃ£'] +['åĽ¾', '表'] +['éĤ®', 'è½®'] +['åĵª', '裡'] +['缸', 'è§ģ'] +['æī°', 'ä¹±'] +['æ¯ı', 'æ¯ı'] +['è¿Ļ', 'è¾ĪåŃIJ'] +['ç¡«', 'éħ¸'] +['äºī', '缸'] +['溯', 'æºIJ'] +['åĩº', 'ä¼Ĺ'] +['çİī', 'çŁ³'] +['åħ±', 'çĶŁ'] +['æĹ¶éĹ´', '段'] +['éĩįè¦ģ', 'æĮĩ示'] +['æ¶Īè´¹', 'éľĢæ±Ĥ'] +['éķ¿', 'éķ¿'] +['éķ¿éķ¿', 'çļĦ'] +['å®ī', 'æĬļ'] +['å¢ŀ', 'é«ĺ'] +['æľ¬', 'è½®'] +['亲', 'çľ¼'] +['é£İ', 'æ³¢'] +['èĢģ', 'å¦Ī'] +['æĶ¶è´¹', 'æłĩåĩĨ'] +['åĨħ', 'éĻĨ'] +['æĮ¥', 'åıij'] +['åįĩ', 'åѦ'] +['èĥ¸', 'åīį'] +['åģı', 'è¿ľ'] +['纯', 'æ´ģ'] +['æĸ½å·¥', 'åįķä½į'] +['身', 'ä»·'] +['è´¢', 'åĬĽ'] +['çº', '¶'] +['è£ħ', 'çͲ'] +['æĺ¾ç¤º', 'åύ'] +['毫', 'åįĩ'] +['æ·±', 'çŁ¥'] +['è̶', 'ç©'] +['è̶ç©', 'Į'] +['è¾ĥ', 'éĩı'] +['åľ¨', 'è¿ĩ渡'] +['åľ¨è¿ĩ渡', 'æľŁ'] +['èĮ', 'Ĺ'] +['ä¸Ģ个', 'æĺŁæľŁ'] +['èĬ', '·'] +['è´¿', 'èµĤ'] +['æ¿', 'ķ'] +['æĩĤ', 'äºĭ'] +['ç§', '§'] +['åħħ', 'å½ĵ'] +['åĽ½', 'ç«ĭ'] +['èĬ±', 'çĵ£'] +['éĤĦ', 'è¦ģ'] +['åħ¬', 'åľĴ'] +['触', 'åĬ¨'] +['æ³°', 'å·ŀ'] +['ä»Ģä¹Ī', 'æł·'] +['æ»ĭ', 'åħ»'] +['è¯Ħ', 'åΤ'] +['æĮ¥', 'æīĭ'] +['èĦ', 'Ī'] +['å§¥', 'å§¥'] +['è¿IJ', 'è´¹'] +['æ¯ħ', 'åĬĽ'] +['å¿ĥ', 'æĻº'] +['ä¸į', 'æİĴéϤ'] +['第ä¸ī', '代'] +['éĢĢ', 'è´§'] +['æĺŁ', 'éĻħ'] +['æ°¸', 'åĪ©'] +['æĬ¤', 'åį«'] +['çıŃ', '车'] +['è¨Ģ', 'è¡Į'] +['ç¹', 'ª'] +['主åĬ¨', 'æĢ§'] +['å·¥ç¨ĭ', 'è´¨éĩı'] +['éĥĬ', 'åĮº'] +['ä¸Ģ', 'æłĭ'] +['ä½Ĩ', 'å®ŀéĻħä¸Ĭ'] +['ä¸ī大', 'èģĮä¸ļ'] +['åij¼', 'åı«'] +['女', 'åħĴ'] +['è¯ģåΏ', 'æĬķèµĦ'] +['èĢĥ', 'æħ®'] +['çĤ«', 'èĢĢ'] +['æ²»', '好'] +['åĺ', '¶'] +['èĥ', '¤'] +['åħīä¼ı', 'åıijç͵'] +['åĩł', 'æŃ¥'] +['æīĢ', 'æīĢ'] +['æīĢæīĢ', 'éķ¿'] +['çħ§', 'æł·'] +['åĵ¥', '们'] +['è¯', 'Ľ'] +['è¿Ļä¸Ģ', 'åĪ»'] +['çŁ¿', 'çī©è´¨'] +['ä¸įå¾Ĺ', 'å·²'] +['åIJĮ', '缣'] +['ç»Ĩ', 'å¾®'] +['è·¯', 'èĻİ'] +['çϾ', 'èĬ±'] +['æ··', 'æ²Į'] +['ä¸Ĭæµ·', 'è¯ģåΏ'] +['éĢĢ', 'ç¨İ'] +['èµŀ', 'åı¹'] +['æī®æ¼Ķ', '游æĪı'] +['åIJį', 'åĪĹ'] +['åIJįåĪĹ', 'åīį'] +['åIJįåĪĹåīį', 'èĮħ'] +['ç±³', 'å°Ķ'] +['ä»Ģä¹Ī', 'åİŁåĽł'] +['å®īåħ¨', 'ä¿Ŀéļľ'] +['ä¸Ģåıª', 'æīĭ'] +['ä¹³', 'ä¸ļ'] +['ä¸į', 'çĶĺ'] +['æĥħ', 'åķĨ'] +['æĮ¡', 'ä½ı'] +['åİŁåĽł', 'ä¹ĭä¸Ģ'] +['è¿Ļ', '两天'] +['çĥĺ', 'çĦĻ'] +['è±', '¬'] +['ä½ł', '以为'] +['没', 'è§ģè¿ĩ'] +['åĵªå®¶', '好'] +['åīį', 'ä»»'] +['è¿Ľ', 'è´§'] +['éĢĢ', 'åĽŀ'] +['串', 'èģĶ'] +['èĩ³', 'æĸ¼'] +['åĨ°', 'æ·ĩ'] +['åĨ°æ·ĩ', 'æ·ĭ'] +['æŁ¥çľĭ', '详æĥħ'] +['çı¾', '實'] +['æİ¨', 'æµĭ'] +['æİ¥', 'æīĭ'] +['éļ¶', 'å±ŀäºİ'] +['åŁİå¸Ĥ', '群'] +['æĿİ', 'åħĪçĶŁ'] +['çŁ¿', 'æ³īæ°´'] +['çī¹', 'ä»·'] +['æĽ´å¤ļ', '精彩'] +['ç¨ĭ', 'å¼ı'] +['读', 'æĩĤ'] +['å±ı', 'èͽ'] +['奥', 'æŀĹ'] +['奥æŀĹ', 'åĮ¹'] +['奥æŀĹåĮ¹', 'åħĭ'] +['红', 'èĸ¯'] +['å¥', '®'] +['å®Ŀ', 'çİī'] +['ç¶²', '絡'] +['è²', '§'] +['欧', 'å¼ı'] +['çϽ', 'ç³ĸ'] +['èĩªçĦ¶', 'çģ¾å®³'] +['åijĬè¯ī', '她'] +['å»', 'ļ'] +['çĤ¹åĩ»', 'æŁ¥çľĭ'] +['é£İ', '湿'] +['èµĦ产', 'éĩįç»Ħ'] +['ä¹Łä¸į', 'ä¾ĭå¤ĸ'] +['åįĬ', '个å°ıæĹ¶'] +['åIJ¸å¼ķ', 'æĽ´å¤ļ'] +['æĹ¶éĹ´', 'èĬĤçĤ¹'] +['æĶ¶', '纳'] +['åIJ¸', 'æ¯Ĵ'] +['èĢģ', '乡'] +['çIJ', 'ħ'] +['æľĢ', 'çµĤ'] +['åıį', 'æĦŁ'] +['ç͍', '微信'] +['çĶ¨å¾®ä¿¡', 'æī«'] +['éĢŁ', 'çİĩ'] +['大', 'çĨĬçĮ«'] +['åı¯', 'æĥ³'] +['åı¯æĥ³', 'èĢĮ'] +['åı¯æĥ³èĢĮ', 'çŁ¥'] +['åĴ', '§'] +['èµ°', 'åħ¥'] +['碳', 'éħ¸'] +['èĮĥ', 'åĨ°'] +['èĮĥåĨ°', 'åĨ°'] +['被', 'åΤ'] +['积æŀģ', 'æİ¨åĬ¨'] +['è¶³', 'è¶³'] +['ç²Ĵ', 'åŃIJ'] +['大', 'å®Ĺ'] +['大å®Ĺ', 'åķĨåĵģ'] +['ç½ij绾', 'ç§ijæĬĢ'] +['æĽ¼', 'åŁİ'] +['å·²', 'ä¹ħ'] +['å·²ä¹ħ', 'çļĦ'] +['秦', 'çļĩ'] +['秦çļĩ', 'å²Ľ'] +['ä»»', 'æķĻ'] +['å͝', 'ç¾İ'] +['æ·¡', 'åĮĸ'] +['æ¡Ĥ', 'èĬ±'] +['çŁ¥è¯Ĩ', 'åĪĨåŃIJ'] +['æĩĴ', 'å¾Ĺ'] +['主', 'åħ¬'] +['设计', 'çIJĨ念'] +['è³', 'º'] +['æīĢ', 'æıIJä¾Ľ'] +['æīĢæıIJä¾Ľ', 'ä¹ĭ'] +['æĶ»', 'åħĭ'] +['åĤ', '¾'] +['è¯Ń', 'æ³ķ'] +['åįĥ', 'åı¤'] +['éĸĭ', 'æĶ¾'] +['第ä¸Ģ', 'èĬĤ'] +['éĤĦ', 'æ²Ĵ'] +['éĢĥ', 'çĶŁ'] +['æ³', 'Ĺ'] +['åİ¿', 'å§Ķ书记'] +['ä½ľèĢħ', 'æīĢæľī'] +['çħ', '½'] +['ç»', 'ħ'] +['æł', 'ħ'] +['æľ´', 'ç´ł'] +['çijķ', 'çĸµ'] +['åĮħ', 'åĮħ'] +['æ°ij主', 'åħļ'] +['ä¸į', 'è¿ľå¤Ħ'] +['å¥ĩ', 'å¼Ĥ'] +['åĺ»', 'åĺ»'] +['æī', '¼'] +['ç¿»', 'å¼Ģ'] +['æĢİ', 'èĥ½'] +['éģ´', 'éĢī'] +['è§£', 'éĩĭ'] +['å¹¼', 'ç¨ļ'] +['è¦ģ', '好好'] +['è¶´', 'åľ¨'] +['ç´¢', 'åıĸ'] +['ç»Ī', 'çĶŁ'] +['åħ¨', 'æµģç¨ĭ'] +['éģ©', 'çķ¶'] +['åįıè°ĥ', 'åıijå±ķ'] +['æĬ¥', 'ä»ĩ'] +['ç§ijæĬĢ', 'åĽŃ'] +['ä»Ģä¹Ī', 'éĥ½ä¸į'] +['æľĢåIJİ', 'ä¸Ģ次'] +['ç»Ļ人', 'ä¸Ģç§į'] +['æł¸', 'å®ļ'] +['被', 'åĪĹåħ¥'] +['æĦı', 'æĥ³ä¸įåΰ'] +['èĢĥ', 'æŁ¥'] +['åľ¨æŃ¤', 'ä¹ĭåīį'] +['æīĵ', 'çIJĥ'] +['è¶ĬæĿ¥è¶Ĭ', 'å°ij'] +['å®ļ', 'å¾ĭ'] +['è¡ĮæĶ¿', 'æľºåħ³'] +['ä½ıæĪ¿', 'åħ¬ç§¯'] +['å°ıå§IJ', 'å§IJ'] +['ä¸ī', 'èı±'] +['ä¿®', 'è¡¥'] +['èŀĥ', 'èŁ¹'] +['西', 'çͲ'] +['æĢ', 'ł'] +['çŃī', 'å¤ļ项'] +['产ä¸ļ', 'éĽĨèģļ'] +['ä»·æł¼', 'ä¸Ĭ涨'] +['åħ¬åħ±', 'åľºæīĢ'] +['è¢ĭ', 'åŃIJ'] +['æĨ§', 'æĨ¬'] +['çļĦæĸ¹å¼ı', 'æĿ¥'] +['åΰ', 'è´¦'] +['çģ', '½'] +['å·´', 'èı²'] +['å·´èı²', 'çī¹'] +['æ¼Ķ', 'ä¹ł'] +['èŃ¦ç¤º', 'æķĻèĤ²'] +['çķı', 'æĥ§'] +['å¼ķ', 'æµģ'] +['æĶ¶', 'æĶ¯'] +['å±Ĥ', 'åĩº'] +['å±Ĥåĩº', 'ä¸į'] +['å±Ĥåĩºä¸į', 'ç©·'] +['æijĩ', 'æ»ļ'] +['辦', 'çIJĨ'] +['纵', 'è§Ĥ'] +['æķij', 'æµİ'] +['å®¶', 'éĥ½çŁ¥éģĵ'] +['åĮ', '¯'] +['å°ı', '鸣'] +['ä»»', 'åĭĻ'] +['计', 'åħ¥'] +['ç«ŀ', 'éĢī'] +['å¼ĢèįĴ', 'æĹ¶æľŁ'] +['åij¨', 'æģ©'] +['åij¨æģ©', 'æĿ¥'] +['交', 'ç»ĩ'] +['çķ¢', 'æ¥Ń'] +['æł¹æį®', 'èĩªå·±'] +['æĸ°äºº', 'çݩ家'] +['åѵåĮĸ', 'åύ'] +['éĩĩ', 'æļĸ'] +['å¹³åĿĩ', 'æ°´å¹³'] +['åħ¬å¼Ģ', '课'] +['失', 'åĪ©'] +['伺', 'æľį'] +['çĬ', 'ģ'] +['忽', 'æĤł'] +['主è¦ģ', 'éĽĨä¸Ń'] +['æ¤į', 'æłij'] +['æ¯Ĺ', 'éĤ»'] +['èĩº', 'çģ£'] +['åĩºåĽ½', 'çķĻåѦ'] +['æĬĹ', 'éľĩ'] +['æĥ©', 'æĪĴ'] +['å¹´åºķ', 'åīį'] +['åĴ¸', 'éĺ³'] +['æ°ij', 'å±ħ'] +['大çIJĨ', 'çŁ³'] +['éĿ', '³'] +['éķ', 'ĸ'] +['æ¸ħ', 'è¿ľ'] +['è£ħ', 'è½½'] +['èĩ', 'Ģ'] +['å½±', 'ä¸ļ'] +['å¼Ł', 'åħĦ'] +['æĤ²', 'è§Ĥ'] +['çĿĢçľ¼', 'äºİ'] +['æįį', 'åį«'] +['åī¥', '夺'] +['ç¯', 'Ĩ'] +['å¾Ī', 'éķ¿æĹ¶éĹ´'] +['è¥', 'Ł'] +['第ä¸Ģ', 'çϾ'] +['ä¸ĢåĪĨ', 'éĴ±'] +['æĸ°éĹ»', 'è®°èĢħ'] +['éķ·', 'æľŁ'] +['æ³ķ', 'æĪĺç»ĦåIJĪ'] +['è°ģ', 'çŁ¥éģĵ'] +['èħ°', 'éĥ¨'] +['æ±ī', 'åł¡'] +['åħ¥', 'çĿ¡'] +['åįĸ', 'æİī'] +['æ¶Īè²»', 'èĢħ'] +['æĥ¯', 'ä¾ĭ'] +['æĥ³', 'äºĨ'] +['æĥ³äºĨ', 'æĥ³'] +['èĢģæĹ§', 'å°ıåĮº'] +['ä¼ł', 'è¨Ģ'] +['åĪĨæķ°', '线'] +['æµģ', '泪'] +['ç»Ħç»ĩ', 'é¢Ĩ导'] +['äºļ', 'åĨĽ'] +['å¢ŀå̼', 'æľįåĬ¡'] +['å¾', '¹'] +['ä¼', '¶'] +['äºĽ', '许'] +['å¸ĥ', 'èݱ'] +['强', 'æĤį'] +['宫', 'å»·'] +['绿', 'èĮ¶'] +['åĮ', '¡'] +['å¾Ī', 'æŃ£å¸¸'] +['æĺ¥', 'å¤ı'] +['æ¯', 'Ļ'] +['è¯Ħ', 'æ¯Ķ'] +['åĩ¡', 'äºĭ'] +['æĬī', 'æĭ©'] +['åĢĴ', 'éľī'] +['éĩį', '度'] +['åįıä¼ļ', 'ä¼ļéķ¿'] +['å¿§', 'èĻij'] +['ä¸ĭ', 'ä¸Ģç¯ĩ'] +['沪', 'æ·±'] +['æĪ', 'İ'] +['æīĵ', 'ä»Ĺ'] +['åįĪ', 'é¥Ń'] +['å¹´é¾Ħ', '段'] +['ä¸ŃåĽ½', 'è¶³çIJĥ'] +['设计', 'æĸ¹æ¡Ī'] +['åºĶç͍', 'æŁ¥çľĭ'] +['é¢Ħ', 'æĸĻ'] +['åĹ', '¡'] +['ç¥ĸ', 'çζ'] +['çļĦä¸Ģ', 'åijĺ'] +['æ´Ĺ', 'å¹²åĩĢ'] +['åİĨåı²', 'æĸ°'] +['åİĨåı²æĸ°', 'é«ĺ'] +['çĭ¬', 'åħ·'] +['æħĭ', '度'] +['æīĵ', '交'] +['æīĵ交', 'éģĵ'] +['é»Ħ', 'çŁ³'] +['çĽ¼', 'æľĽ'] +['çī§', 'åľº'] +['转', '弯'] +['åįĩ', 'åįİ'] +['åĨį', 'ä¹Łæ²¡æľī'] +['èĭ±', 'æīį'] +['æĽ´', 'åIJį为'] +['åĢŁ', 'ç͍'] +['çºł', 'éĶĻ'] +['ç»Ŀ对', 'ä¸įä¼ļ'] +['çİĭ', 'çīĮ'] +['çĽĨ', 'åľ°'] +['失', 'è°ĥ'] +['好', '象'] +['é³', '¥'] +['ä¿Ŀ', 'ä¿®'] +['åĽĽä¸ª', 'èĩªä¿¡'] +['头', 'çļ®'] +['åİŁ', 'åīĩ'] +['æĬ¥', 'æ¡Ī'] +['奴', 'éļ¶'] +['å³', 'Ļ'] +['è°ĥ', 'æĸĻ'] +['ä¹Ł', '許'] +['èIJ½', 'åΰ'] +['èIJ½åΰ', 'å®ŀ'] +['èIJ½åΰå®ŀ', 'å¤Ħ'] +['çĦļ', 'çĥ§'] +['çĶŁæ´»', 'çݯå¢ĥ'] +['åºĶ', 'åıĬæĹ¶'] +['è¶Ĭ', 'è¿ĩ'] +['æĦŁ', 'è¬Ŀ'] +['æĻ¯', 'å¾·'] +['æĻ¯å¾·', 'éķĩ'] +['çĬ', 'Ģ'] +['身', 'éĤĬ'] +['ç¨İåĬ¡', 'æĢ»å±Ģ'] +['åĩĢ', 'åľŁ'] +['ä¾µ', 'åįł'] +['åĬ¨', 'å·¥'] +['å¹´', 'ä¹ĭ'] +['å¹´ä¹ĭ', 'ä¹ħ'] +['第äºĮ', 'èĬĤ'] +['åĬ¨çī©', 'åĽŃ'] +['第ä¸Ģ', '书记'] +['éħ', 'ļ'] +['çĶŁäº§', '设å¤ĩ'] +['æŁIJç§į', 'ç¨ĭ度'] +['åľ', 'Ń'] +['åĩŃåĢŁ', 'çĿĢ'] +['éĺħ', 'è§Ī'] +['çϽ', 'æ²Ļ'] +['æ²¹', 'çĥŁ'] +['çªģçł´', 'åı£'] +['åıĹ', 'å½±åĵį'] +['åı¯ä»¥', 'æĽ´å¥½'] +['å³°', 'å̼'] +['æĿĤ', 'è´¨'] +['宿', 'è¿ģ'] +['çĽĺ', 'æ´»'] +['æ¿Ģ', 'èµ·'] +['åĦ¿', 'ç§ij'] +['åĿIJ', 'èIJ½åľ¨'] +['æĮª', 'å¨ģ'] +['æµ·', 'å²Ľ'] +['绣', '绣'] +['éĻ', '¨'] +['ä¼ĺ', 'äºİ'] +['å°Ī', 'å®¶'] +['ä¸Ģ', 'éĤĬ'] +['èIJ', 'Ĭ'] +['äºĨä¸Ģ', 'åı£'] +['æ²ĥå°Ķ', 'æ²ĥ'] +['æŃ£å¸¸', '使ç͍'] +['æĻ®éģį', 'åŃĺåľ¨'] +['丰', '满'] +['çĶ»', 'åį·'] +['åºĶ', 'æĶ¶'] +['åºĶæĶ¶', 'è´¦'] +['åºĶæĶ¶è´¦', '款'] +['å®Įæķ´', 'çĥŃ'] +['å®Įæķ´çĥŃ', 'æ¦ľ'] +['注', 'è§Ĩ'] +['çĨ', 'Ħ'] +['èº', '¬'] +['éĶĢåĶ®', '人åijĺ'] +['è¶ĭ', 'åIJij'] +['çĦ¦', 'æĢ¥'] +['åįģå¹´', 'åīį'] +['ä¼łç»Ł', '产ä¸ļ'] +['質', 'éĩı'] +['åĩ¤åĩ°', 'ç½ij'] +['èµĦæºIJ', 'æķ´åIJĪ'] +['æ¶Į', 'åħ¥'] +['æĸĩåĮĸ', 'ä¼łæĴŃ'] +['çķĮ', '第ä¸Ģ'] +['æ°´', 'æ³µ'] +['宫', '殿'] +['æİ¢', '寻'] +['ä¿®', 'åīª'] +['æĦı', 'è¦ĭ'] +['ç´Ĭ', 'ä¹±'] +['æĽ', 'ī'] +['çϽ', 'è¡£'] +['èĻİ', 'åį«'] +['ç´§', 'æī£'] +['å¤Ħå¤Ħ', 'éķ¿'] +['åĪĽå»º', 'å·¥ä½ľ'] +['红', 'æŀ£'] +['饼', 'å¹²'] +['äºĨ', 'åįĬ天'] +['ä¼ļå½±åĵį', 'åΰ'] +['çĽ¸ä¿¡', '大家'] +['èħ¾', 'é£ŀ'] +['å°±', 'å¦ĤåIJĮ'] +['ä¸ĭéĿ¢', 'å°ıç¼ĸ'] +['æ°ijèIJ¥', 'ç»ıæµİ'] +['æĻ', '¦'] +['è£ħ', 'æī®'] +['é»ij', 'å¤ľ'] +['常', 'å¾·'] +['å·¥ä¸ļ', '大åѦ'] +['æĺİ', 'çŁ¥'] +['éĺŁåijĺ', '们'] +['åIJ¬', '课'] +['æ¯ı', 'éļĶ'] +['羣æĺ¯', '太'] +['åIJĪä½ľ', 'åħ±èµ¢'] +['çIJĨ', 'åıij'] +['æīį', 'å¹²'] +['çľĭ', 'èµ·ä¾Ĩ'] +['殿', 'ä¸ĭ'] +['å®ī', 'éĺ³'] +['æīĢ', '产çĶŁçļĦ'] +['éĽĩ', 'ä½£'] +['æĬ¬èµ·', '头'] +['æį®', 'æĬ¥éģĵ'] +['éļĨéĩį', '举è¡Į'] +['交', 'éĶĻ'] +['è¶ħ', 'é¢Ŀ'] +['åĮĸ', 'çĸĹ'] +['é¡', 'Ĩ'] +['纵', 'æ·±'] +['çĪ±åĽ½', '主ä¹ī'] +['éĻ¢', 'åī¯éĻ¢éķ¿'] +['è®', '³'] +['羣æŃ£', 'åģļåΰ'] +['åѤ', 'åįķ'] +['èĩªçĦ¶', 'èĢĮ'] +['èĩªçĦ¶èĢĮ', 'çĦ¶'] +['ä¿®', '身'] +['èĬ', '¹'] +['æģ¯', 'æģ¯'] +['æģ¯æģ¯', '缸åħ³'] +['驾', 'æł¡'] +['æİ©', '饰'] +['æ³½', 'è¿ŀ'] +['æ³½è¿ŀ', 'æĸ¯åŁº'] +['举', 'æŃ¢'] +['管çIJĨ', 'ä½ĵåζ'] +['åħ¶ä¸Ń', 'ä¹ĭä¸Ģ'] +['æĿ¾', 'å¼Ľ'] +['æĭ¦', 'æĪª'] +['åį«', 'åģ¥'] +['åį«åģ¥', 'å§Ķ'] +['ä»İ', 'åݻ年'] +['åĤ', '¢'] +['è´Ń', '票'] +['åĽ¾', 'æłĩ'] +['æ²³', '西'] +['æ°ijæĶ¿', 'å±Ģ'] +['ç§ģ', 'èIJ¥'] +['å¤ĸåĽ½', 'è¯Ń'] +['å¹²', 'è´§'] +['æĵ¦', 'æĭŃ'] +['åľ°', 'ä¸Ń'] +['åľ°ä¸Ń', 'æµ·'] +['æµĵ', 'æµĵ'] +['æµĵæµĵ', 'çļĦ'] +['å§ĭ', '建'] +['å§ĭ建', 'äºİ'] +['ç¶ĵ', 'æŃ·'] +['è·¯', 'æ¼Ķ'] +['æļ´', 'é£İ'] +['åŁº', 'è¾ħ'] +['æī¶è´«', 'å·¥ä½ľ'] +['ä¸Ģ缴', 'å¤Ħäºİ'] +['æĥħ', 'è¶£'] +['äºĮ', 'åŃ£åº¦'] +['åİĮ', 'æģ¶'] +['顺åĪ©', 'å®ĮæĪIJ'] +['æŁ¥', 'å°ģ'] +['é¡¶', '端'] +['ä¸į', 'åŃķ'] +['ä¸Ģ大', 'åłĨ'] +['被', 'æ·ĺæ±°'] +['æĺ¯', 'ç͍æĿ¥'] +['æľĢ', 'åIJĪéĢĤ'] +['亮', 'çľ¼'] +['å¹¶ä¸įæĺ¯', 'å¾Ī'] +['ç§ijçłĶ', 'éĻ¢'] +['ç§ijçłĶéĻ¢', 'æīĢ'] +['ç²', 'Ł'] +['é¢Ī', 'éĥ¨'] +['é»ĺé»ĺ', 'åľ°'] +['é«ĺä¸Ń', 'çĶŁ'] +['æĹıèĩªæ²»', 'åİ¿'] +['æķĻåѦ', 'è´¨éĩı'] +['æĪĺ', 'çģ«'] +['åĿİ', 'åĿ·'] +['æIJŃ', 'ä¹ĺ'] +['è¯Ĺ', 'æĦı'] +['åĪij', 'èѦ'] +['åĩº', 'æ±Ĺ'] +['åįģåħŃ', 'æĿ¡'] +['请', 'åıĬæĹ¶'] +['åĨľä¸ļ', '大åѦ'] +['èIJ½', 'åı¶'] +['æĢ»', 'èĢĮè¨Ģ'] +['æĢ»èĢĮè¨Ģ', 'ä¹ĭ'] +['æĿľ', 'åħ°'] +['æĿľåħ°', 'çī¹'] +['éĻª', 'ä½ł'] +['åħ¬', 'æĬ¥'] +['çķĻè¨Ģ', 'æĿ¿'] +['éĺħ', 'åİĨ'] +['ç«¶', 'çĪŃ'] +['ç»Ļ', 'åĪ«äºº'] +['æĹ¥æĬ¥', '社'] +['åĿIJ', 'èIJ½'] +['åĿIJèIJ½', 'äºİ'] +['éĩij', 'åŃĹ'] +['éĩijåŃĹ', 'å¡Ķ'] +['åĽ', '¤'] +['è¯Ŀ', 'åī§'] +['æĮģç»Ń', 'æİ¨è¿Ľ'] +['æ¼ı', 'æ°´'] +['詳', 'ç´°'] +['æĢĢ', 'æĬ±'] +['åıĺ', 'å¹»'] +['饥', '饿'] +['éļIJ', '身'] +['个', 'èµĽåŃ£'] +['åĵ¡', 'å·¥'] +['æģ¢å¤į', 'æŃ£å¸¸'] +['äºĨ', '好å¤ļ'] +['æĺŁ', 'å·´'] +['æĺŁå·´', 'åħĭ'] +['åħī', 'çݯ'] +['å¸ħ', 'åĵ¥'] +['çϽ', 'éĽª'] +['ç¨į', 'ç¨į'] +['计', 'æıIJ'] +['æĦĽ', 'æĥħ'] +['éİ', 'ĸ'] +['ä¿¡', 'éĺ³'] +['è§Ģ', 'å¯Ł'] +['å¦Ĥæŀľä½ł', 'æĥ³'] +['缸æ¯Ķ', 'ä¹ĭä¸ĭ'] +['è§£', 'å¼Ģ'] +['æīĵåį°', 'æľº'] +['身', '躯'] +['ç²¾ç¥ŀ', 'æĸĩæĺİ'] +['èĤ¡', 'æĮĩ'] +['å¾®', 'åĪĽ'] +['红', 'èĮ¶'] +['èĩ´', 'çĻĮ'] +['æģ©', 'æĸ½'] +['èħ¿', 'éĥ¨'] +['大åŀĭ', 'å¤ļ人'] +['å®ī', 'åĢį'] +['è¾ħ导', 'åijĺ'] +['èĪª', 'éģĵ'] +['å¸ĥ', 'å°Ķ'] +['åįĹå®ģ', 'å¸Ĥ'] +['ä¸ĬçıŃ', 'æĹı'] +['ä¾§', 'ç»ĵæŀĦæĢ§'] +['追', 'éļı'] +['å½ĵåľ°', 'æĶ¿åºľ'] +['èµ°', 'åĩºæĿ¥'] +['éĩijèŀį', 'ä¸ļ'] +['丼', '书'] +['é¡¹çĽ®', 'ç»ıçIJĨ'] +['è¿ĩ', 'æĪ·'] +['骨', 'æŀ¶'] +['è¡', 'Ļ'] +['ä»Ģ', '麽'] +['èħ', 'ĭ'] +['è¦ģ', '害'] +['åľ¨', 'åºĬä¸Ĭ'] +['代è¨Ģ', '人'] +['並', 'å°ĩ'] +['åIJĦ个', 'æĸ¹éĿ¢'] +['è°´', 'è´£'] +['åħ±', 'æĮ¯'] +['åį³å°Ĩ', 'åΰæĿ¥'] +['èĤº', 'çĻĮ'] +['ä¾Ľ', 'éĶĢ'] +['丼', 'æŀĹ'] +['èµ', 'ĥ'] +['åįģä½Ļ', 'å¹´'] +['åĭĺ', 'æİ¢'] +['飵', 'åij³'] +['èĭ¦', 'ç¬ij'] +['æľĢ大', 'ç¨ĭ度'] +['éĩįçĤ¹', 'åħ³æ³¨'] +['ä¹ĭ', '举'] +['满', 'æĢĢ'] +['åıĹåΰ', 'å½±åĵį'] +['æĭĽ', 'æĬķæłĩ'] +['è¡¥', 'é½IJ'] +['西', '红'] +['西红', 'æŁ¿'] +['é¬', '§'] +['è£ħ', 'åį¸'] +['éĤ»', 'éĩĮ'] +['èĤĩ', 'äºĭ'] +['æİĴ', 'æ¯Ĵ'] +['åѤ', 'åĦ¿'] +['鼶', 'è·Ŀ离'] +['å®ŀ', 'å¹²'] +['çľĭ', 'æŁ¥çľĭ'] +['æĶ¶è´¹', 'ç«Ļ'] +['ç»', '·'] +['åħ¬çĽĬ', 'æĢ§'] +['éĢĴ', 'ç»Ļ'] +['æĶ»', 'æīĵ'] +['æĺŁçº§', 'éħĴåºĹ'] +['æĺİ', 'åªļ'] +['çį¨', 'ç«ĭ'] +['è¯Ŀè¯Ń', 'æĿĥ'] +['ä¸ĢæŃ¥', 'ä¸ĢæŃ¥'] +['书æ³ķ', 'å®¶'] +['æľªç»ı', 'æİĪæĿĥ'] +['çŁ³', 'èĨı'] +['åĩŃ', 'ä»Ģä¹Ī'] +['çļĦ', 'æĹ¥'] +['çļĦæĹ¥', 'åŃIJéĩĮ'] +['诱', '人'] +['çϾåĪĨ', 'çϾ'] +['èĪĪ', 'è¶£'] +['å¼ł', 'åħĪçĶŁ'] +['èĢģçĪ·', 'åŃIJ'] +['æ³¢', 'çī¹'] +['åŁºéĩij', '份é¢Ŀ'] +['æ²Ļåıij', 'ä¸Ĭ'] +['å¥ĭæĸĹ', '缮æłĩ'] +['æ°¢', 'èĥ½'] +['æ²ĥå°Ķ', 'çİĽ'] +['義', 'åĭĻ'] +['éŁ³', 'ç®±'] +['æ²ī', '浸'] +['æ²ī浸', 'åľ¨'] +['èĭ±', 'åľĭ'] +['çģ¯', 'çģ«'] +['è¿Ľ', '项'] +['两', '端'] +['ä¹Ķ', '丹'] +['èĦ¸', 'é¢Ĭ'] +['åıijå±ķ', 'æ½ľåĬĽ'] +['åĭķ', 'ä½ľ'] +['åĵĪ', 'ä½Ľ'] +['å®´', 'ä¼ļ'] +['æ§', 'į'] +['ç«ĭ', 'å¿Ĺ'] +['ç¡ķ士', 'åѦä½į'] +['åĭĭ', '竳'] +['è¿Ļ', 'åľºæ¯ĶèµĽ'] +['æĮģ', 'å¹³'] +['éķĢ', 'éĶĮ'] +['èĭ±', 'çī¹'] +['èĭ±çī¹', 'å°Ķ'] +['æķĻ', 'èģĮå·¥'] +['åĬŁ', 'åĬĽ'] +['该', 'æ¡Ī'] +['ä¸Ģ', 'æ¢Ŀ'] +['åĺī', 'å¹´'] +['åĺīå¹´', 'åįİ'] +['è¿«', 'ä¸įåıĬ'] +['è¿«ä¸įåıĬ', 'å¾ħ'] +['è¿Ļ个', 'æĹ¶ä»£'] +['精彩', 'æĴŃæĬ¥'] +['人', 'èĦ¸'] +['人èĦ¸', 'è¯ĨåĪ«'] +['æ£Ģå¯Ł', 'å®ĺ'] +['å°ı', 'èħ¿'] +['éĨĴ', '缮'] +['åħļ', 'æĢ»'] +['åħļæĢ»', 'æĶ¯'] +['æĪ', 'Ł'] +['èĮ«', 'çĦ¶'] +['è±Ĩ', 'æµĨ'] +['主', 'æ²»'] +['éĿĴæµ·', 'çľģ'] +['åĪijäºĭ', '责任'] +['çł', '°'] +['ä¹ĭ', 'æ¬ĬåĪ©'] +['äºĶ', 'å®ĺ'] +['è¿·', 'æĥij'] +['åħ¥', 'åºĵ'] +['å®¶', '纺'] +['å¼¹', 'ç°§'] +['åįģäºĶ', 'æĿ¡'] +['ç»Ļ', 'å®Ŀå®Ŀ'] +['èĪªç©º', 'èĪªå¤©'] +['å¾Ģ', 'å¤ĸ'] +['å¼ķ', 'åĬĽ'] +['çľ¼', 'çļ®'] +['æ¶ī', 'è¶³'] +['æĿ¥', '宾'] +['åľ¨çº¿', 'è§Ĵèī²'] +['çĥŃ', 'éĶĢ'] +['æµģ', 'éĢĿ'] +['泡', '泡'] +['éĻį', 'å¹ħ'] +['è´ŁéĿ¢', 'å½±åĵį'] +['红', '楼'] +['红楼', '梦'] +['éļĶ', 'çĿĢ'] +['ä¾¥', '幸'] +['许', 'ä¹ħ'] +['åĴĮ', 'çĿ¦'] +['èŃ', '½'] +['使ç͍èĢħ', 'æĪĸ'] +['ä¹°', 'åįķ'] +['è¿', '´'] +['é£İ', 'æīĩ'] +['æķĻ', '師'] +['æ¡ĮåŃIJ', 'ä¸Ĭ'] +['å¾Ī', 'æ¼Ĥ亮'] +['åł±', 'å°İ'] +['第ä¸Ģ', 'åŃ£åº¦'] +['ç©©', 'å®ļ'] +['æĤ²', 'åĵĢ'] +['çĿĢåĬĽ', 'æīĵéĢł'] +['æĮ', 'Ł'] +['è·¯', 'æ¡¥'] +['åij', 'IJ'] +['åľ£è¯ŀ', 'èĬĤ'] +['çļĩ', 'åŃIJ'] +['ä»ĩ', 'æģ¨'] +['éħĿ', 'éħ¿'] +['ä¸į', 'éĹ´'] +['ä¸įéĹ´', 'æĸŃ'] +['æĮĩ', 'å°ĸ'] +['ä¸ŃåĽ½', 'ç½ij游'] +['åŀ', '£'] +['æĦıè§ģ', '建议'] +['æ¯ħ', 'çĦ¶'] +['亮', '度'] +['èģĶ', 'è°Ĭ'] +['å½ķ', 'åħ¥'] +['åĦ', '²'] +['å¨ĺ', 'å®¶'] +['ç§ij', 'å°Ķ'] +['ä¹Łæ²¡', 'ä»Ģä¹Ī'] +['æł¹æį®', 'ä¸įåIJĮ'] +['åı¶', 'ä¿®'] +['å̼', 'å®Ī'] +['æľ«', '端'] +['åĪ', '¨'] +['åĤµ', 'åĭĻ'] +['èģ¯', 'åIJĪ'] +['å¥ĩ', 'å¹»'] +['èĻļ', 'æŀĦ'] +['é»Ħ', 'æĺı'] +['å¹³', 'åĿ¦'] +['æµģ', 'æ°ĵ'] +['æĸ°', 'åŁºå»º'] +['æĮ½', 'æķij'] +['åįİ', 'å°Ķ'] +['åįİå°Ķ', 'è¡Ĺ'] +['æľĢ', 'åıĹæ¬¢è¿İ'] +['ç»Ń', '约'] +['å¼Ĭ', '端'] +['éŃĶ', 'æ³ķå¸Ī'] +['éŃĶæ³ķå¸Ī', 'åĴĮ'] +['åħ·ä½ĵ', 'åĨħ容'] +['çIJī', 'çĴĥ'] +['æī©', '容'] +['èĮ¶', 'åĽŃ'] +['主ä¹ī', 'èĢħ'] +['ç«ĭ', 'éĿ¢'] +['æİ¥åıĹ', 'éĩĩ访'] +['åĩº', 'åħ¥å¢ĥ'] +['ç§ij', 'åįı'] +['éĴ', '³'] +['çµIJ', 'æ§ĭ'] +['ç»ĵæŀľ', 'æĺ¾ç¤º'] +['åı°', 'è´¦'] +['å°±', 'æĿ¥çľĭçľĭ'] +['èĩª', 'æķij'] +['åıį', 'æĩī'] +['åİ»', 'åĵªåĦ¿'] +['è¿Ļ', 'é¦ĸ'] +['è¿Ļé¦ĸ', 'æŃĮ'] +['åIJ¬', 'ä¼Ĺ'] +['å¤ĸ', '壳'] +['ä½ĵèĤ²', 'é¦Ĩ'] +['實', 'æĸ½'] +['èŀº', 'ä¸Ŀ'] +['æĭī', 'åįĩ'] +['çĮĽ', 'åľ°'] +['åħ¨åĽ½', '人æ°ij'] +['æĤī', 'å°¼'] +['æĹı', '群'] +['åĽ¢', 'åijĺ'] +['两个', 'å°ıæĹ¶'] +['åľ¨', 'çݩ家'] +['åľ¨çݩ家', 'ä¸Ń'] +['çĶľ', 'çĶľ'] +['æĬķ', 'è¡Į'] +['åįĶ', 'æľĥ'] +['éĻ', '¡'] +['åĬłå·¥', 'åİĤ'] +['æ¦Ĩ', 'æŀĹ'] +['æŃ»', 'è§Ĵ'] +['åĨħ', 'å¹ķ'] +['æīĢæľī', 'æĥħèĬĤ'] +['åĪ·', 'åį¡'] +['æ°´', 'èĤ¿'] +['èĥĥ', 'åı£'] +['å«Į', 'å¼ĥ'] +['æ²®', '丧'] +['ä¸īå¹´', '级'] +['æ¶Ĥ', 'å±Ĥ'] +['å¿ĥ', '仪'] +['å¿ĥ仪', 'çļĦ'] +['å¤', 'Ń'] +['é¦ĸ', 'è½®'] +['æĹłè®ºæĺ¯', 'åħ¶'] +['éĢı', 'æ°Ķ'] +['äºĮ', 'åįģäºĶ'] +['ç®', '«'] +['åĬŁ', 'åĬ³'] +['çѾ', 'ä¸ĭ'] +['æ²ī', 'è¿·'] +['æķij', 'åij½'] +['éĹª', 'éĹª'] +['åIJĥ', 'äºı'] +['å±ķ', 'åĵģ'] +['åį³æĹ¶', 'åıijçĶŁ'] +['ç¶', 'ľ'] +['ç¶ľ', 'åIJĪ'] +['æłĩ', 'æĺİ'] +['çľĭ', 'ç͵影'] +['åħ¬', '竳'] +['éĺ¿', '森'] +['éĺ¿æ£®', '纳'] +['身', 'åĪĽéĢł'] +['身åĪĽéĢł', 'çļĦ'] +['æ¸Ľ', 'å°ij'] +['å̼å¾Ĺ', 'åħ³æ³¨'] +['鼶åĶ®', 'åķĨ'] +['æįĨ', 'ç»ij'] +['è¸ı', 'åħ¥'] +['èĽ', 'Ł'] +['æŁ´', '纳'] +['èĢģ', 'åħµ'] +['绿èī²', 'çݯä¿Ŀ'] +['é¹', 'Ń'] +['麻', 'æľ¨'] +['æıŃ', 'çīĮ'] +['è¿Ļ款', '车'] +['ç¾İ', 'å¾·'] +['ç¾İå¾·', 'åħ¬åı¸'] +['æ¶', '§'] +['è°ģ', 'çŁ¥'] +['æ´ĭ', 'èij±'] +['æ¯į', 'æł¡'] +['ä¸Ģ', 'éĹª'] +['çĶ·', '主è§Ĵ'] +['æĹłçº¿', 'ç͵'] +['å±ł', 'å®°'] +['æĺ¯', 'éŁ©åĽ½'] +['æĺ¯éŁ©åĽ½', '娱'] +['容', 'è²Į'] +['åĿĩ', '使åħ¶'] +['太', 'å¿«'] +['å¹´', 'çͱ'] +['å¹´çͱ', '缼'] +['èĭ¦', 'èĭ¦'] +['åĬĽ', 'è¿ĺæĺ¯'] +['åĬĽè¿ĺæĺ¯', 'èĩª'] +['æĨ', '©'] +['èģ¯', '絡'] +['åĶ', '¾'] +['åħ·æľī', 'æĪĺ士'] +['追', 'éĹ®'] +['åłĨ', 'æĶ¾'] +['åıį', '驳'] +['å®ŀäºĭ', 'æ±Ĥ'] +['å®ŀäºĭæ±Ĥ', 'æĺ¯'] +['åѸ', 'éĻ¢'] +['åįģ', 'åĩłä¸ª'] +['æķij', 'æĬ¤'] +['æķijæĬ¤', '车'] +['ç½ij绾', 'ä¼łæĴŃ'] +['åįģåħ«', 'å±Ĭ'] +['éĥ¨', 'åī¯'] +['éĥ¨åī¯', 'éĥ¨éķ¿'] +['çĹ´', 'è¿·'] +['管çIJĨ', 'æĿ¡ä¾ĭ'] +['èŀį', '为ä¸Ģä½ĵ'] +['æĢ»', '产å̼'] +['è³', 'ĵ'] +['ä¸ĥ', 'æĺŁ'] +['çıŃ', 'ç»Ħ'] +['绣', 'é¢Ĩ'] +['请', '大家'] +['éĩij', 'éϵ'] +['èĪħ', 'èĪħ'] +['æµ·', 'æ¹¾'] +['æĸ½', 'çŃĸ'] +['享', 'èªī'] +['éº', '¥'] +['端', 'åįĪ'] +['绿', 'åŁİ'] +['確', 'ä¿Ŀ'] +['å·´', 'æĭī'] +['åĨĴ', 'çĿĢ'] +['æħ·', 'æħ¨'] +['个人', 'è§ĤçĤ¹'] +['ä¹Ļ', 'çĥ¯'] +['ç¡ħ', 'è°·'] +['éĸĭ', 'å±ķ'] +['å°ļ', '书'] +['åĿļ', '飧'] +['åº', 'µ'] +['èĢģ', 'é¾Ħ'] +['èĢģé¾Ħ', 'åĮĸ'] +['羨', 'çľ¼'] +['绿', 'æ°´'] +['绿水', 'éĿĴå±±'] +['书', 'é¦Ļ'] +['主åĬĽ', 'åĨĽ'] +['æīįæĺ¯', '羣æŃ£'] +['æĬ¢', 'åħĪ'] +['æĪIJå°±', 'æĦŁ'] +['éĩį', 'æŀĦ'] +['éĴ¢', 'åİĤ'] +['æĪIJ', '份'] +['èĬ±', '纹'] +['ä¹ĭ', 'äºī'] +['å¹²', 'ç»Ĩèĥŀ'] +['æĹ¢', 'åı¯ä»¥'] +['ç¹ģ', 'çIJIJ'] +['æĦļ', 'èł¢'] +['éĿŀ常', 'æĺİæĺ¾'] +['ä½ĵ', '彩'] +['æĬĢ', 'æ³ķ'] +['æĿĨ', 'èıĮ'] +['å¹¿æ³Ľ', 'åħ³æ³¨'] +['åĮĹ', 'å®ĭ'] +['å§Ĭ', '妹'] +['åįı', 'åĬŀ'] +['æ·®', 'åįĹ'] +['çĥ', 'ı'] +['æ´Ĺ', 'èĦ¸'] +['åıĹ', '访'] +['åıĹ访', 'èĢħ'] +['éĩįè¦ģ', 'åĽłç´ł'] +['å½±è§Ĩ', 'åī§'] +['综èīº', 'èĬĤ缮'] +['èľķ', 'åıĺ'] +['äºĮ', '线'] +['äºĮ线', 'åŁİå¸Ĥ'] +['ä¼Ĭ', 'å§ĭ'] +['çıĬ', 'çijļ'] +['èĩª', 'æŁ¥'] +['åħ¥', 'åĽŃ'] +['åĩ¶', 'æīĭ'] +['åħ¬', 'è¯ī'] +['éģĩ', 'éļ¾'] +['éĩĩçŁ¿', 'çŃī'] +['èĩª', 'çIJĨ'] +['åĸ·', 'æ¶Ĥ'] +['æī©', 'åħħ'] +['éĢı', 'è§Ĩ'] +['é«ĺéĢŁ', 'å¢ŀéķ¿'] +['åĽ¾', 'çĶ»'] +['ç¾', '¹'] +['èĤĩ', 'åºĨ'] +['è¾ľ', 'è´Ł'] +['èµĶ', 'ä»ĺ'] +['è·', '¡'] +['åģ¥åº·', 'æĪIJéķ¿'] +['以ä¸Ĭ', 'åѦåİĨ'] +['åıĸå¾Ĺ', '以åıĬ'] +['æ²ī', '积'] +['åįģä¹Ŀ', 'å±Ĭ'] +['缸éĹľ', 'æľįåĭĻ'] +['æī§', 'åĭ¤'] +['åī¯', 'åİ¿éķ¿'] +['å¯', '°'] +['åģľ', 'æ»ŀ'] +['æ·¹', '没'] +['çŁ³', 'çģ°'] +['çį', '¸'] +['åĢ', '¦'] +['ç¾İ', 'åªĴ'] +['æķĻ', 'æ¡Ī'] +['åĬł', 'çĽĸ'] +['åħ¬å¼Ģ', 'èµĽ'] +['å¥ł', 'åŁº'] +['æĺĨ', 'èĻ«'] +['çŀ', 'ħ'] +['磷', 'éħ¸'] +['äºī', 'åĪĽ'] +['çİĭ', 'æĻĵ'] +['ç¼ĵ', 'åĨ²'] +['åİļ', 'åİļ'] +['åİļåİļ', 'çļĦ'] +['æŀ£', 'åºĦ'] +['ç²¾', 'çĽĬ'] +['ç²¾çĽĬ', 'æ±Ĥ'] +['ç²¾çĽĬæ±Ĥ', 'ç²¾'] +['åĪĨæĶ¯', 'æľºæŀĦ'] +['å®ŀæĸ½', 'ç»ĨåĪĻ'] +['æĸ°', 'èµĽåŃ£'] +['總', 'çµ±'] +['éĢł', 'è¡Ģ'] +['é¢ĩ', 'åħ·'] +['é»Ħ', 'åŁĶ'] +['è¡Ģ', 'èĦĤ'] +['交éĢļ', 'å·¥åħ·'] +['å³', '¥'] +['æĹıèĩªæ²»', 'å·ŀ'] +['寺', 'éĻ¢'] +['確', 'å®ļ'] +['æ¦Ĥ念', 'èĤ¡'] +['æĦŁ', 'å®ĺ'] +['æŁľ', 'åı°'] +['åĶ', 'Ķ'] +['çŀŃè§£', '並'] +['æĢ»', 'ä»·'] +['åIJ¸', 'åħ¥'] +['æĢ', '¼'] +['æĻļ', 'éĹ´'] +['å±Ĭ', 'æ¯ķä¸ļçĶŁ'] +['çĶŁ', 'å§ľ'] +['éĺħ读', 'åħ¨æĸĩ'] +['å¾Ĺåΰ', 'æľīæķĪ'] +['æIJľ', 'æķij'] +['åİĨ', 'æĿ¥'] +['èŃī', 'æĺİ'] +['åĥ', '»'] +['èĨ³', 'é£Ł'] +['åĦĦ', 'åħĥ'] +['æīĵ', 'åİĭ'] +['宾', '客'] +['åķ', '¼'] +['ä¸ĢçϾ', 'å¤ļ'] +['æ·±åħ¥', '人å¿ĥ'] +['æ¢ħ', 'å·ŀ'] +['çłĶ', 'åѦ'] +['åħ³', 'ä¹İ'] +['è¼', 'Ľ'] +['亲', 'åıĭ'] +['éħį', 'æĸĻ'] +['æĪij', 'çĪ±ä½ł'] +['è´¸æĺĵ', 'æĪĺ'] +['æľī', 'èī²'] +['æľīèī²', 'éĩijå±ŀ'] +['æįIJ', 'åĬ©'] +['为', 'é¦ĸ'] +['为é¦ĸ', 'çļĦ'] +['å¯Į', 'åĬĽ'] +['çĶ·', 'ç¥ŀ'] +['é³', '³'] +['æµĩ', 'æ°´'] +['åIJ', '±'] +['æĺİç¡®', 'æıIJåĩº'] +['åı¹', 'äºĨ'] +['åı¹äºĨ', 'åı£æ°Ķ'] +['礼', 'æĭľ'] +['è¿Ļ个', 'åIJįåŃĹ'] +['ä¿¡', 'å¾Ĵ'] +['å¿Ĺ', '强'] +['éĻIJ', 'æĹ¶'] +['æĶ¶', 'è²»'] +['åĨľå®¶', 'ä¹IJ'] +['å°ıé¾Ļ', 'èϾ'] +['èIJ½', 'å¹ķ'] +['æ§', 'Ł'] +['åѦ', '龸'] +['æĪĸ', 'å¤ļ'] +['æĪĸå¤ļ', 'æĪĸ'] +['æĪĸå¤ļæĪĸ', 'å°ij'] +['座è°Ī', 'ä¼ļä¸Ĭ'] +['æ¶', '¼'] +['éŃĶ', 'çİĭ'] +['å²', '±'] +['é¡¶', 'å±Ĥ'] +['é¡¶å±Ĥ', '设计'] +['èĦij', 'åŃIJéĩĮ'] +['éĻ¢', 'åŃIJéĩĮ'] +['轩', 'è¾ķ'] +['身å¿ĥ', 'åģ¥åº·'] +['èħ', 'ij'] +['éĹľ', '注'] +['åıĤåĬł', 'ä¼ļè®®'] +['ä¸Ńåįİ', 'æĸĩåĮĸ'] +['追', '寻'] +['å®ī', 'çĦ¶'] +['é£Ļ', 'åįĩ'] +['éŁŃ', 'èıľ'] +['é¸', '¦'] +['åĤ¨', 'éĩı'] +['çĶ·', 'æĸ¹'] +['å¤ĩ', '份'] +['æijĶ', 'åĢĴ'] +['润æ»ij', 'æ²¹'] +['é̼', 'è¿ij'] +['çͳ', 'è¯ī'] +['鸣', 'ç±»'] +['çŁ³æ²¹', 'åĮĸå·¥'] +['åĿļ', 'æŀľ'] +['è¿Ļå®¶', 'ä¼Ļ'] +['æĭĴ', 'ä¸į'] +['羣', 'çļ®'] +['è·Ŀ', 'éĽ¢'] +['è¿ĺ', 'æĮº'] +['éĽķ', 'åĥı'] +['åĪĿ', 'æģĭ'] +['æıIJä¾Ľ', 'æĽ´å¤ļ'] +['æŁ¥çľĭ', 'åħ¨æĸĩ'] +['æķ°åŃĹ', 'è´§å¸ģ'] +['åĸī', 'åĴĻ'] +['åı¦ä¸Ģ', 'ä½į'] +['åĤ¬', 'åĮĸ'] +['åĤ¬åĮĸ', 'åīĤ'] +['ä»İæĿ¥', '没'] +['å¯ĨåĪĩ', '缸åħ³'] +['éĥ¨', '主任'] +['产åĵģ', 'ç»ıçIJĨ'] +['並', 'åIJĮæĦı'] +['èIJ½', 'åħ¥'] +['å±ıå¹ķ', 'ä¸Ĭ'] +['åħ¬åı¸', '竳ç¨ĭ'] +['æį¢', 'åı¥è¯Ŀ'] +['æį¢åı¥è¯Ŀ', '说'] +['ä½į', 'æĸ¼'] +['ä½', 'Ķ'] +['åĩ»', 'æĿĢ'] +['缸', 'è¾ĥ'] +['缸è¾ĥ', 'äºİ'] +['ç²½', 'åŃIJ'] +['åįĹ', 'æŀģ'] +['宫', 'é¢Ī'] +['è£ģ', 'åijĺ'] +['æĺİ', 'ç»Ĩ'] +['ä»·å̼', 'éĵ¾'] +['åĽĽä¸ª', 'æĸ¹éĿ¢'] +['æĥħåĨµ', 'æĿ¥çľĭ'] +['æĮij', 'åīĶ'] +['æ®', 'ĺ'] +['æŀģ', 'åĬĽ'] +['çĸij', 'éļ¾'] +['æĬµæĬĹ', 'åĬĽ'] +['æĢ¥', 'éĢŁ'] +['æĪ', 'Į'] +['ä½İ', 'ä¼°'] +['éĹª', 'è¿ĩ'] +['æģ', '¬'] +['èµŀ', 'æī¬'] +['ä»ĸ', 'å¦Ī'] +['æĪIJ为', 'ä¸ĢåIJį'] +['æ´Ĺ', '礼'] +['é¢Ħ计', 'å°Ĩ'] +['åħĪè¿Ľ', 'åįķä½į'] +['è¼', 'Ķ'] +['éĢĥ', 'èĦ±'] +['çݰ', 'åŃĺ'] +['èĢģèĻİ', 'æľº'] +['åįģä¸ĥ', 'æĿ¡'] +['åı¦ä¸Ģ', 'åįĬ'] +['温', 'æĥħ'] +['åī¥', '离'] +['ä¸ĸ', 'è´¸'] +['å®ĺ', 'åı¸'] +['å¾Ī', 'å·®'] +['éĹ´', 'è·Ŀ'] +['请', '注æĦı'] +['åı²', 'è¯Ĺ'] +['åĪ©', 'åύ'] +['è¿IJ', 'ç®Ĺ'] +['沦', '为'] +['該', '使ç͍èĢħ'] +['èĮ', '¬'] +['éͦ', '绣'] +['åı²', 'æĸĻ'] +['çģµ', 'æ´»æĢ§'] +['èģĶ', '社'] +['æĹł', 'åĬ©'] +['æĬĹ', 'æ°§åĮĸ'] +['èıľ', 'èĤ´'] +['éĢł', 'èι'] +['æİī', 'èIJ½'] +['å¤į', 'æŁ¥'] +['åĭĥ', 'åĭĥ'] +['åij¼', '声'] +['給', 'äºĪ'] +['åIJĮäºĭ', '们'] +['ç½', '°'] +['è¯ķ', 'æİ¢'] +['åħ³éĶ®', 'åŃĹ'] +['æįIJ', 'çĮ®'] +['ç»Łè®¡', 'æķ°æį®'] +['åĪĽ', 'ä½ľèĢħ'] +['ä¸ĭ', 'åįĬ'] +['ä¸ĭåįĬ', 'åľº'] +['æī¿æĭħ', '责任'] +['端', 'æŃ£'] +['ç©¿', 'è¡£'] +['ä¼ł', 'çIJĥ'] +['åĬ©', 'éķ¿'] +['åĩ', '±'] +['éķ¶', 'åµĮ'] +['é£ŀ', 'ç¿Ķ'] +['è¾ĵ', 'åįµ'] +['è¾ĵåįµ', '管'] +['ä¸ĩ', 'åħ¬éĩĮ'] +['æİ¨å¹¿', 'åºĶç͍'] +['å¿«', 'æ¨Ĥ'] +['ç§', '½'] +['èī°', 'å·¨'] +['åIJ¬', 'å®Į'] +['åĿļ', '硬'] +['奥', 'åľ°'] +['å¥¥åľ°', 'åĪ©'] +['é¢', 'ĵ'] +['èĻIJ', 'å¾ħ'] +['ä¾Ľ', 'æ±Ĥ'] +['éľī', 'ç´ł'] +['伪', 'è£ħ'] +['乡', 'åľŁ'] +['åĩ¡', 'æľ¬ç½ij'] +['åĩ¡æľ¬ç½ij', '注'] +['ä¼Ĭ', 'åĪ©'] +['è¡¡', 'æ°´'] +['æĽ´', 'åĥıæĺ¯'] +['åĪĨéĴŁ', 'å·¦åı³'] +['è¦ı', '模'] +['äºĶ', 'åĪĨéĴŁ'] +['åºĹ', 'åĬłçĽŁ'] +['åĽ°', 'éĽ£'] +['åħ³', 'åģľ'] +['æĢĿ', '绪'] +['åĴ½', 'åĸī'] +['缸', '符'] +['çĥ¦', 'èºģ'] +['æĻĤ', 'æľŁ'] +['åijĪ', 'çı¾'] +['è§£', 'æķ£'] +['诱', '导'] +['éļĶ', 'çĥŃ'] +['çĮ', '¶'] +['åįĹ', 'å®ĭ'] +['æ·±åħ¥', 'äºĨè§£'] +['çŃĶ', 'çĸij'] +['æĺ¼', 'å¤ľ'] +['åįĥ', 'ä¼ı'] +['åĬ³åĬ¡', 'æ´¾éģ£'] +['红', 'è±Ĩ'] +['åĿı', 'äºĭ'] +['çĤ¹', 'æ»´'] +['å°±ä¸ļ', 'å²Ĺä½į'] +['约', 'åIJĪ'] +['åħį', 'éϤ'] +['éĢĨ', 'åĬ¿'] +['éĩį', 'éĩijå±ŀ'] +['å®ĺ', '宣'] +['ä½İ', 'å»ī'] +['æģ¨', 'ä¸įå¾Ĺ'] +['å¾Ĺ', '天'] +['å¾Ĺ天', 'çĭ¬'] +['å¾Ĺ天çĭ¬', 'åİļ'] +['ä¸Ģå°ģ', 'ä¿¡'] +['æĬ½', 'å¥ĸ'] +['è¾Ĺ', '转'] +['çķĻ', 'å®Ī'] +['çķĻå®Ī', 'åĦ¿ç«¥'] +['çŃĶ', 'åį·'] +['å·¨', 'åŀĭ'] +['æľĢ好', 'ä¸įè¦ģ'] +['æµĻæ±Ł', '大åѦ'] +['æĨ', '¨'] +['æı¡', 'æīĭ'] +['éĴĪ', 'ç»ĩ'] +['æİĴ', '骨'] +['çĤ', '½'] +['å°ģ', 'è£ħ'] +['åįĢ', 'åŁŁ'] +['空æ°Ķ', 'åĩĢåĮĸ'] +['åħī', 'å½±'] +['åĢĴ', 'å¡Į'] +['å§ļ', 'æĺİ'] +['æ¤į', '被'] +['åѦ', 'åīį'] +['åѦåīį', 'æķĻèĤ²'] +['èĬĿ', 'åĬł'] +['èĬĿåĬł', 'åĵ¥'] +['缩', 'æ°´'] +['ä½', 'Ł'] +['åľ¨çº¿', 'åĴ¨è¯¢'] +['èµı', 'æŀIJ'] +['éĿĴ', 'èĽĻ'] +['æĬ±', 'ä½ı'] +['èĮĤ', 'åIJį'] +['åħ¨åĬĽ', 'æīĵéĢł'] +['åįļ士', 'åѦä½į'] +['æ²§', 'å·ŀ'] +['åĻ', '¢'] +['æĿĤ', 'çī©'] +['åĪ»', 'çĶ»'] +['æį', 'ħ'] +['å¾®', 'éĩı'] +['å¾®éĩı', 'åħĥç´ł'] +['ä¸Ģ', 'åĽŀäºĭ'] +['鸡', 'èĤī'] +['åĪ©æ¶¦', 'çİĩ'] +['æīį', 'ç®Ĺ'] +['å¾®', 'å¦Ļ'] +['棵', 'æłij'] +['è´ª', '婪'] +['åĩı', 'å̼'] +['梦', 'å¢ĥ'] +['åı¯', 'è§Ĩ'] +['åı¯è§Ĩ', 'åĮĸ'] +['广大', 'å¸Ĥæ°ij'] +['ä¸ĵä¸ļ', 'ä»İäºĭ'] +['ç»ı', '纬'] +['ç´§', 'çĽ¯'] +['çŁ¥', 'å·±'] +['è¤', 'ļ'] +['æĸĩåĮĸ', 'åºķèķ´'] +['åݦéŨ', 'å¸Ĥ'] +['临', '港'] +['对åħ¶', '羣å®ŀ'] +['岸', 'è¾¹'] +['è¦ĸ', 'çĤº'] +['æĬĹ', 'çĻĮ'] +['åĶIJ', 'å®ĩ'] +['ä¸įå¾Ĺ', 'è¶ħè¿ĩ'] +['å¨ģ', 'æħij'] +['æ¡Ĩæŀ¶', 'åįıè®®'] +['èµ°', 'ç§ģ'] +['åĽ¢', 'å§Ķ'] +['夸', '大'] +['æ¬', 'Ħ'] +['ç¥ŀç»ı', 'ç³»ç»Ł'] +['æijĦå½±', 'ä½ľåĵģ'] +['èĬ', '¥'] +['å®ī', 'åºĨ'] +['æµ·', '滨'] +['æŀĦ', 'æĢĿ'] +['çīµ', 'æĮĤ'] +['åı', '©'] +['éĺIJ', 'æĺİ'] +['éģ', 'ģ'] +['ç²¾', 'æ²¹'] +['ç©´', 'ä½į'] +['æĬ¤', '身'] +['æĬ¤èº«', '符'] +['æĮĩ', 'å°İ'] +['åŃĺåľ¨', 'ä¸Ģå®ļ'] +['å¯Ĥ', 'éĿĻ'] +['æµ·å¤ĸ', 'å¸Ĥåľº'] +['éĿ', '¡'] +['综åIJĪ', 'å¾ģ'] +['ä¿', 'IJ'] +['è¨Ī', 'ç®Ĺ'] +['æĺİ', 'æľĹ'] +['äºļ', 'è¿IJ'] +['äºļè¿IJ', 'ä¼ļ'] +['åīįçŀ»', 'æĢ§'] +['åĮ®', 'ä¹ı'] +['产ä¸ļ', 'æī¶è´«'] +['èĦij', 'æµ·'] +['èĦijæµ·', 'ä¸Ń'] +['åħļçļĦ', 'é¢Ĩ导'] +['åĪĺ', 'éĤ¦'] +['æµģ', 'æĺŁ'] +['æĵ', 'Ĥ'] +['æĶĢ', 'çĻ»'] +['åĴ', 'Ķ'] +['ä¸Ģä¸ĭåŃIJ', 'å°±'] +['è¯Ĭ', 'æ²»'] +['使', 'åĬ²'] +['åīµ', 'ä½ľ'] +['éĵŃ', 'è®°'] +['éĴ±', 'è´¢'] +['æĹ¥æĬ¥', 'è®°èĢħ'] +['çĥŁ', 'çģ«'] +['èĥľ', 'è´Ł'] +['åįļ', '主'] +['ä¸ŃåĽ½', 'èģĶéĢļ'] +['ç½ijç«Ļ', 'é¦ĸ页'] +['å°±', 'å¤Ł'] +['å°±å¤Ł', 'äºĨ'] +['æīij', 'åħĭ'] +['å±ħ', 'å§Ķä¼ļ'] +['è°', '¬'] +['å®īåħ¨', 'äºĭæķħ'] +['åķĨ', 'çĶ¨è½¦'] +['循çݯ', 'ç»ıæµİ'] +['æ·', '¤'] +['èĢĥ', 'è¯ģ'] +['å®Ŀ', 'èĹı'] +['å®Į', 'ç»ĵ'] +['çłĶåıij', 'æĬķåħ¥'] +['å²', 'ij'] +['æģŃ', 'æķ¬'] +['离', 'éĢĢä¼ij'] +['æ°´', '墨'] +['å©', '¶'] +['è¯Ĺ', 'åı¥'] +['å®ģæ³¢', 'å¸Ĥ'] +['å¼±', 'çĤ¹'] +['åģľ', 'çīĮ'] +['奶', 'æ²¹'] +['å¥ĩ纳', 'æ²³'] +['æĨ', 'Ĥ'] +['社ä¼ļ', 'å®ŀè·µ'] +['è´Ŀ', '壳'] +['çłĤ', 'æµĨ'] +['èι', 'åıª'] +['宣', 'æī¬'] +['综åIJĪ', 'æķ´æ²»'] +['åĤ', 'ij'] +['æ°ijæĹı', 'æĸĩåĮĸ'] +['éĩį', 'çݰ'] +['积', 'æ·Ģ'] +['åħ¬', 'çĦ¶'] +['çħ', 'ī'] +['缸', 'èģļ'] +['æ±', '¾'] +['纹', 'çIJĨ'] +['çĩĥ', 'çħ¤'] +['æŃ¤', 'ç§į'] +['ç¾İ', 'å¦Ĩ'] +['åįĥ', 'çĵ¦'] +['çIJ', 'Ľ'] +['驾驶', 'è¯ģ'] +['éĺ¶', '梯'] +['ä¸Ŀ', 'ä¸Ŀ'] +['å¾Īå¤ļ', 'äºĭæĥħ'] +['åħī', 'éĺ´'] +['èijĹä½ľ', 'æ¬Ĭ'] +['åħ§', 'éĥ¨'] +['çĽ¸å¯¹', 'æĿ¥è¯´'] +['éĸ', 'Ĵ'] +['éľĩ', 'æħij'] +['說', '話'] +['æĨ', 'ij'] +['ç«¥', 'è£ħ'] +['ä½ıæĪ¿', 'åĴĮ'] +['ä½ıæĪ¿åĴĮ', 'åŁİ'] +['å·²ç»ı', 'è¶ħè¿ĩ'] +['侦', 'å¯Ł'] +['çŁ¿', 'çī©'] +['ä¾Ľ', '大家'] +['çī¹', 'éĤĢ'] +['ç¨ĭåºı', 'åijĺ'] +['çķľçī§', 'ä¸ļ'] +['æ°', 'ª'] +['çij', 'ª'] +['åĢĴ', 'åľ¨'] +['åĢĴåľ¨', 'åľ°'] +['æ¯', 'Ģ'] +['梯', 'éĺŁ'] +['æİ¥', 'èijĹ'] +['æĬĹ', 'èıĮ'] +['è¤', 'ĩ'] +['ç¬', 'Ļ'] +['æ¯Ķ', 'ä¸Ĭå¹´'] +['鸡', '汤'] +['åŃ¦ä¹ł', 'æĪIJ绩'] +['æĸij', 'æĸĵ'] +['åħĪ', '导'] +['åĪĹ', '举'] +['è°ĥæŁ¥', 'æĺ¾ç¤º'] +['æ©', '«'] +['ä¹Ŀ', 'åįģ'] +['è°¢', '飵'] +['è·¨è¶Ĭ', 'å¼ı'] +['女æĢ§', 'æľĭåıĭ'] +['èIJ¥åħ»', 'ä»·å̼'] +['å®ŀè·µ', 'ç»ıéªĮ'] +['èĭı', 'å·ŀå¸Ĥ'] +['çĵ¶', 'åŃIJ'] +['æĸ°', 'çļĦä¸Ģ'] +['æĸ°çļĦä¸Ģ', 'å¹´'] +['æĺİ', 'æĻ°'] +['å®ł', 'çα'] +['åŃĹ', '第'] +['æľĹ', '诵'] +['纳', 'æĸ¯'] +['éĢĨ', 'è¡Į'] +['è«ĭ', 'æĤ¨'] +['è«ĭæĤ¨', 'æıIJä¾Ľ'] +['èĥ¸', 'æĢĢ'] +['第ä¸ĥ', 'å±Ĭ'] +['强', '壮'] +['代', 'åŃķ'] +['æ±¶', 'å·Ŀ'] +['å®¶', 'åĸ»'] +['å®¶åĸ»', 'æĪ·'] +['å®¶åĸ»æĪ·', 'æĻĵ'] +['èħ', '®'] +['åIJ¯', '迪'] +['æĹł', 'éļľç¢į'] +['èĻķçIJĨ', 'åıĬ'] +['æĿ¥', 'åİĨ'] +['å®ŀ', 'åĬ¡'] +['ä¹Ł', 'éļıä¹ĭ'] +['æĬĢèĥ½', 'åŁ¹è®Ń'] +['åѤ', 'ç«ĭ'] +['åī', 'ģ'] +['éĥ´', 'å·ŀ'] +['æĶ¶', 'æķĽ'] +['éł»', 'éģĵ'] +['èį£', '幸'] +['èİ«', 'è¿ĩäºİ'] +['æŃ¤', 'æĻĤ'] +['纪å§Ķ', 'çĽij'] +['纪å§ĶçĽij', 'å§Ķ'] +['缸', 'éĤ»'] +['åı¦ä¸Ģ', 'è¾¹'] +['çªĴ', 'æģ¯'] +['æľīå¾Īå¤ļ', 'ç§į'] +['æ¯ı', 'éĢ¢'] +['éĹ®', 'ä¸ĸ'] +['ç´¯', 'ç´¯'] +['éĿĴæĺ¥', 'æľŁ'] +['è·¯', 'åĨµ'] +['åħĭ', 'èݱ'] +['è¿Ħä»Ĭ', '为æŃ¢'] +['æĥĬ', 'å¥ĩ'] +['è·¨', '度'] +['éħ¿', 'éĢł'] +['åĩ', 'ĭ'] +['è¿ij', 'ä¸īå¹´'] +['åĨħ', '马'] +['åĨħ马', 'å°Ķ'] +['æı', 'į'] +['è¿Ľå±ķ', 'æĥħåĨµ'] +['èĮ', '§'] +['æľīåºı', 'æİ¨è¿Ľ'] +['æĢ»', 'åĨłåĨĽ'] +['æĪIJ绩', 'åįķ'] +['éĽ»è©±', 'åıĬ'] +['ç´§å¯Ĩ', 'ç»ĵåIJĪ'] +['åºĬ', 'ä½į'] +['é¹', 'Ĭ'] +['æķ£åıij', 'çĿĢ'] +['åĭŁ', 'èµĦ'] +['æ°¨', 'éħ¸'] +['彩', 'ç¥ŀ'] +['è®Ģ', 'åıĸ'] +['éĩį', '温'] +['ä¸Ń', 'åŃĺåľ¨çļĦ'] +['ç¾İ', 'éºĹ'] +['ä¸įæĸŃ', 'å¢ŀåĬł'] +['è½®', 'æµģ'] +['æİ¥', 'åIJ¬'] +['å¹´', '产å̼'] +['åįĥ', 'åħĭ'] +['æĪĺåľº', 'ä¸Ĭ'] +['çħ§', 'é¡§'] +['å¹²éĥ¨', 'éĺŁä¼į'] +['åį°', '竳'] +['ä¸Ģèĩ´', 'æĢ§'] +['è¿ŀ', 'å¤ľ'] +['åħħ', 'è£ķ'] +['é»ij', 'åIJįåįķ'] +['åĩĢ', 'æ°´'] +['ä¸Ģ大', 'æĹ©'] +['åĮħ', '袱'] +['çĬ¯', 'è§Ħ'] +['çIJĨ', 'è«ĸ'] +['æŀģ', 'æĺĵ'] +['éª', '¸'] +['å¨ĺ', 'å¨ĺ'] +['åĽ¢', 'åľĨ'] +['亿åħĥ', '以ä¸Ĭ'] +['åĪ©ç͍', 'æĤ¨çļĦ'] +['带æĿ¥', 'æĽ´å¤ļ'] +['ä¸Ń央', '空è°ĥ'] +['æľĪ', 'èĸª'] +['çĮľ', 'æĥ³'] +['åĪº', '客'] +['ä½ľ', 'æģ¯'] +['åįķ', 'è°ĥ'] +['äºĴ', 'åĪ©'] +['å¦Ĥæľī', 'ä¾µæĿĥ'] +['å°ı', 'å·§'] +['åįģ', 'åł°'] +['åĵĪåĵĪ', 'åĵĪåĵĪ'] +['è¾¹', 'éĻħ'] +['æłĩ', 'è¯Ń'] +['åĪĩåħ¥', 'çĤ¹'] +['éĢĨ', 'è¢Ń'] +['è¯ķ', 'åīĤ'] +['绿', 'è±Ĩ'] +['è®', 'ļ'] +['åŁºçĿ£', 'å¾Ĵ'] +['å£', '¬'] +['åħ¨', 'æĺİæĺŁ'] +['éĢī', 'ç§Ģ'] +['èĪĮ', 'å°ĸ'] +['ä¸įåIJĮ', 'ç±»åŀĭ'] +['çĥŁ', 'åĽ±'] +['çģµ', 'æ°Ķ'] +['åĮº', '管å§Ķä¼ļ'] +['åĨľ', 'åī¯'] +['åĨľåī¯', '产åĵģ'] +['èĶļ', 'æĿ¥'] +['沪', 'æĮĩ'] +['åħ»æ®ĸ', 'æĪ·'] +['æĸĹ', 'å¿Ĺ'] +['é¦ĸ', 'é¢Ĩ'] +['è¡Ģ', 'èħ¥'] +['åĬł', 'ç´§'] +['ä¸Ģèĩ´', '好è¯Ħ'] +['第ä¸ī', 'èĬĤ'] +['æī¬', 'å°ĺ'] +['交éĢļ', 'æŀ¢çº½'] +['鼶', 'ç¢İ'] +['é»ij', 'æ´ŀ'] +['çľĭ', 'ä¸įæĩĤ'] +['å±ŀ', 'å®ŀ'] +['主', 'åŁİåĮº'] +['å¨', 'Ľ'] +['å¨Ľ', 'æ¨Ĥ'] +['ç¬ij', 'æĦı'] +['èϹ', 'æ¡¥'] +['åIJĦ个', 'çݯèĬĤ'] +['çķ¥', 'å¾®'] +['èĢķ', 'èĢĺ'] +['æľ¬', 'åľºæ¯ĶèµĽ'] +['æĪIJ', 'è´¥'] +['éĢī', 'èĤ¡'] +['èªŀ', 'è¨Ģ'] +['çŃĶ', '辩'] +['èĩª', 'ä¹ł'] +['æ£', 'º'] +['ä¸ĩ', '欧åħĥ'] +['åģľ', 'å·¥'] +['对åħ¶', 'è¿Ľè¡Į'] +['积æŀģ', 'éħįåIJĪ'] +['ä¹¾', 'åĿ¤'] +['å¦ĸ', 'æĢª'] +['èļĮ', 'åŁł'] +['èµĦ产', 'è¯Ħä¼°'] +['è°ĥ', 'çļ®'] +['éϤ', 'å¤ķ'] +['åĽ´', 'å¢Ļ'] +['æľį', 'å½¹'] +['æ·±', 'æ¸Ĭ'] +['é¢Ħ', 'åζ'] +['ç', 'ĥ½'] +['å®ī', '稳'] +['建', 'æŀĦ'] +['çĭĻ', 'åĩ»'] +['主åĭķ', '註åĨĬ'] +['éĥ½æľī', 'èĩªå·±'] +['æİĴåIJį', '第ä¸Ģ'] +['麻', 'è¾£'] +['çĢ', 'ļ'] +['çĥŁèĬ±', 'çĪĨ'] +['çĥŁèĬ±çĪĨ', '竹'] +['èĩªçĦ¶', 'ä¿ĿæĬ¤'] +['ä»Ļ', 'å¢ĥ'] +['为äºĨ', 'éģ¿åħį'] +['åĨ·', 'åºĵ'] +['è§£æĶ¾', 'æĢĿæĥ³'] +['åĪĿ', 'äºĮ'] +['ä½ĵ', 'è´´'] +['é¦ĸ', 'å¯Į'] +['迪', 'æĭľ'] +['æļĤ', 'ç¼ĵ'] +['æĶ¯æĮģ', 'åĬĽåº¦'] +['侦', 'æİ¢'] +['马', 'åĪº'] +['åĮĹ', 'æ±½'] +['ç¹', 'ŀ'] +['è°İ', 'è¨Ģ'] +['éĢ£', 'çºĮ'] +['å·', '³'] +['ä»»ä½ķ', 'æĹ¶åĢĻ'] +['车', 'èģĶç½ij'] +['åįķ', '项'] +['å¸Ń', 'åį·'] +['建çŃij', 'æĿIJæĸĻ'] +['ä¸Ńç§ĭ', 'èĬĤ'] +['ç¡ķ士', 'çłĶç©¶'] +['ç§ģ', 'ç«ĭ'] +['åħļåĴĮ', 'æĶ¿åºľ'] +['æľ¬æ¬¡', '交æĺĵ'] +['èººåľ¨', 'åºĬä¸Ĭ'] +['ç½ijåıĭ', 'è¯Ħ论'] +['å¦', 'Ŀ'] +['害', 'ç¾ŀ'] +['åħ¬ç«ĭ', 'åĮ»éĻ¢'] +['ä¸', 'ŀ'] +['çĶŁçī©', 'è´¨'] +['åºĶ', 'éĤĢ'] +['æĬ½', 'åıĸ'] +['åĩł', 'å¼ł'] +['æijĺ', 'ç¼ĸ'] +['ç»ĺ', 'æľ¬'] +['详', 'è§£'] +['强', '硬'] +['æľĢ', 'åħĪè¿ĽçļĦ'] +['æĭĽ', 'èĤ¡'] +['æĭĽèĤ¡', '书'] +['åįĥ', 'æĸ¹'] +['åįĥæĸ¹', 'çϾ'] +['åįĥæĸ¹çϾ', '计'] +['éħį', 'éŁ³'] +['驾', 'çħ§'] +['å¾ģ', 'æĪĺ'] +['èªĵ', 'è¨Ģ'] +['æĭľ', 'å¸Ī'] +['æĭľå¸Ī', 'åѦ'] +['æĭľå¸ĪåѦ', 'èīº'] +['æĬ±', 'åĽ¢'] +['ç±³', 'ç²ī'] +['éĿŀ常', 'éĢĤåIJĪ'] +['èĪª', 'æµ·'] +['å±¥', '约'] +['åįģåħ«', 'æĿ¡'] +['éĶ»', 'éĢł'] +['éĩįè¦ģ', '举æİª'] +['åıijæĮ¥', 'ä½ľç͍'] +['æ·', 'ļ'] +['人', '社'] +['人社', 'å±Ģ'] +['è¯ķçĤ¹', 'å·¥ä½ľ'] +['éĺľ', 'éĺ³'] +['æ¡ĥ', 'åľĴ'] +['æ°ij', 'ä¼ģ'] +['æ´ģ', 'çϽ'] +['è´µ', '宾'] +['åħ¬', '社'] +['è§ī', 'æĤŁ'] +['è®°å¿Ĩ', 'åĬĽ'] +['æľĥåĵ¡', '註åĨĬ'] +['æŃ¤', 'æ¡Ī'] +['麻', 'çĹ¹'] +['çı', 'Ģ'] +['æĸ©', 'èİ·'] +['çĶ·', 'åŃ©åŃIJ'] +['å±ĢéĻIJ', 'äºİ'] +['åĭĺ', 'æŁ¥'] +['åIJĥ', '饱'] +['èĬ¬', 'åħ°'] +['æ£ķ', 'èī²'] +['ç¦ı', 'ç¥ī'] +['çͳ', 'èĬ±'] +['æµ·', 'çĽĹ'] +['èĶ', 'ij'] +['æĸĩ', 'åѸ'] +['æ´»æĢ§', 'çĤŃ'] +['缴', 'éĢļ车'] +['è°¢', 'éĤĢ'] +['躺', 'çĿĢ'] +['åľ', 'ĥ'] +['æ¯ıæĹ¥', 'ç»ıæµİ'] +['åħ¬åħ±', 'æĸĩåĮĸ'] +['讲', 'æķħäºĭ'] +['å¯Ł', 'çľĭ'] +['æĤł', 'éĹ²'] +['åľ°', 'åĿª'] +['æ¶Į', 'çݰåĩº'] +['é«ĺçŃī', 'éĻ¢æł¡'] +['èĮĦ', 'åŃIJ'] +['éĺ²', 'åį«'] +['ä¾ĭ', 'è¡Į'] +['æĺ¾', 'éľ²'] +['æĸ°', '常æĢģ'] +['ç»Ŀ', 'ä½³'] +['å¯Į', 'æ°ij'] +['以', '人æ°ij'] +['以人æ°ij', '为'] +['éĤ¢', 'åı°'] +['å±ķ', 'æ¼Ķ'] +['çϼ', 'å¸ĥ'] +['è´Ł', 'è½½'] +['åģı', '离'] +['æ°¸', 'éģł'] +['éĩįè¦ģ', 'åİŁåĽł'] +['åįıä¼ļ', 'ä¼ļåijĺ'] +['éļ¾', 'æ°ij'] +['çĶŁäº§', '车éĹ´'] +['çģµ', 'åĬ¨'] +['两年', 'åīį'] +['æĸ¹', 'åľĨ'] +['æ´»', 'ä¸ĭåİ»'] +['ä¸ĸçķĮ', 'è§Ĥ'] +['éªĹ', 'åıĸ'] +['ç¾İ', 'è²Į'] +['èĥ½', 'çľĭåĩº'] +['çϼ', 'æı®'] +['è§Ĥ', 'å½±'] +['åī', 'ĥ'] +['åIJĪèµĦ', 'åħ¬åı¸'] +['å©', '§'] +['å¹²', 'æĹ±'] +['åħŃ', '个æľĪ'] +['尤为', 'éĩįè¦ģ'] +['èĤ', '½'] +['秦', 'åĽ½'] +['æīĺ', 'ç¦ı'] +['建çŃij', 'å¸Ī'] +['åįĩ级', 'æĶ¹éĢł'] +['å°ı', 'é¢Ŀ'] +['å°ıé¢Ŀ', '贷款'] +['两个', 'ç»´æĬ¤'] +['æĭį', 'æĭį'] +['åı¯', 'çĸij'] +['æį¢', 'åıĸ'] +['æŃ¦', '士'] +['èµĸ', '以'] +['èµĸ以', 'çĶŁåŃĺ'] +['æĮ', 'ļ'] +['殿', 'åłĤ'] +['èĩªçĦ¶', 'çķĮ'] +['ç£ģ', 'åľº'] +['å¦Ĥä½ķ', 'çľĭå¾ħ'] +['ä»ĬæĹ¥', '头æĿ¡'] +['西', 'åŁŁ'] +['èİ·', 'è¯Ħ'] +['風', 'æł¼'] +['ä¿Ħ', 'åĽ½'] +['æīĵ', 'æĭ¼'] +['å®£ä¼ł', 'çīĩ'] +['å¾Ī', 'æĸ¹ä¾¿'] +['ä¾Ľç»Ļ', 'ä¾§'] +['纪念', 'ç¢ij'] +['毫', 'åħĭ'] +['èĬ³', 'é¦Ļ'] +['å·¥åķĨ', 'éĵ¶è¡Į'] +['请', 'çĤ¹åĩ»'] +['ç¼', 'ª'] +['æĹłæķ°', '次'] +['èį¯', 'å¸Ī'] +['èħ', '¸'] +['游', 'èīĩ'] +['åĮ', '¾'] +['å·¡', 'èĪª'] +['æ²»çIJĨ', 'ä½ĵç³»'] +['èIJ¥éĢł', 'èī¯å¥½'] +['æ··', 'æ·Ĩ'] +['éĢļ', 'çķħ'] +['åĬ³', 'ç´¯'] +['ä»ĵ', 'ä½į'] +['å¢ŀ', 'éķ·'] +['éļIJ', '约'] +['æĿĤå¿Ĺ', '社'] +['åħ»', 'èĤ²'] +['åı¯èĥ½', 'åıijçĶŁ'] +['èĢĥ', '試'] +['西', 'ä¾§'] +['åĬł', 'åĢį'] +['主æĮģ', 'åı¬å¼Ģ'] +['çķ¢', '竣'] +['éĹ®', '询'] +['æµ·', 'æ£ł'] +['èĹ', '©'] +['注æĺİ', 'æĿ¥æºIJ'] +['æ£Ģ', 'çĸ«'] +['请', 'åģĩ'] +['æĬļ', 'æij¸'] +['èĵĦ', 'çĶµæ±ł'] +['è·Ł', 'ä¸įä¸Ĭ'] +['çݰ代', '社ä¼ļ'] +['çѹ', 'èµĦ'] +['ä½ĵèĤ²', '彩票'] +['å»¶', '误'] +['è¾Ľ', 'è¾£'] +['éĿ¢', '容'] +['åį°', 'è®°'] +['çģŃ', '亡'] +['ç´ł', 'é£Ł'] +['åħ´', 'èĩ´'] +['éľĢè¦ģ', 'ç͍'] +['éľĢè¦ģç͍', 'åΰ'] +['å®Ŀ', 'å¦Ī'] +['ç£ĭ', 'åķĨ'] +['éļ¶', 'å±ŀ'] +['è´¡çĮ®', 'åĬĽéĩı'] +['åħ¬åħ±', 'èµĦæºIJ'] +['大', 'éĺª'] +['åĨĽ', 'è®Ń'] +['æĤ¬', '念'] +['社ä¼ļ', '稳å®ļ'] +['å¹²äºĭ', 'åĪĽä¸ļ'] +['æľī', 'æĿ¡ä»¶'] +['æľīæĿ¡ä»¶', 'çļĦ'] +['ä¸Ģå¹´', 'ä¸Ģ度'] +['åİ', '¥'] +['强', '奸'] +['豪', '车'] +['æİĮ', 'æŁľ'] +['æ°´åĪ©', 'å·¥ç¨ĭ'] +['å³', 'ª'] +['积æŀģ', 'ä½ľç͍'] +['æµ·', 'æ·Ģ'] +['æµ·æ·Ģ', 'åĮº'] +['çĥŃ', 'æĴŃ'] +['åĿļæĮģ', 'ä¸įæĩĪ'] +['åıĮ', 'èĦļ'] +['绣', 'æĪĺ'] +['ä»»ä½ķ', '人éĥ½'] +['åľ°ä¸ĭ', '室'] +['åĨ¶', 'çĤ¼'] +['è°ħ', 'è§£'] +['æ¸Ķ', 'èι'] +['太éĺ³', 'åŁİ'] +['被', 'æįķ'] +['计ç®Ĺ', 'åύ'] +['西', 'åĮ»'] +['èĪĴ', 'å¿ĥ'] +['æ¡', '¦'] +['éģ', '²'] +['åĬ', 'ij'] +['è¨', 'Ĺ'] +['èİ', 'º'] +['åĸ', '¬'] +['çĵ', '¯'] +['åĺ', 'ĺ'] +['åł', 'ķ'] +['æķ', 'Ŀ'] +['åij', '¦'] +['èĭ', 'ŀ'] +['æŃ', '¹'] +['æĵ', '¬'] +['æ£', 'Ħ'] +['èĪ', 'µ'] +['å¥', 'ª'] +['çļ', 'ĭ'] +['æĶ', '¸'] +['åľ', '©'] +['ç¤', 'Ļ'] +['ç¢', 'ĺ'] +['éı', 'Ī'] +['æĦ', 'ķ'] +['ç¹', '³'] +['èĺ', '¸'] +['è²', 'Ĥ'] +['æ¼', '²'] +['æij', '¹'] +['æĶ', 'Ŀ'] +['åŃ', '¢'] +['èķ', 'Ń'] +['é¨', '°'] +['æ½', '¼'] +['éħ', '°'] +['æĴ', '¥'] +['è¹', '¬'] +['é¨', 'Ļ'] +['è¸', '¹'] +['éģ', 'IJ'] +['çĺ', 'Ģ'] +['èĽ', '¤'] +['æĤ', 'ĸ'] +['çĴ', 'ŀ'] +['ç£', 'IJ'] +['æİ', '°'] +['è¾', 'Ĭ'] +['å¾', 'ij'] +['æİ', 'ĸ'] +['éģ', 'ŀ'] +['éĤ', '¸'] +['éĽ', 'ı'] +['æĨ', 'İ'] +['æľ', '½'] +['çį', '»'] +['ç®', 'Ķ'] +['è¤', '¶'] +['æļ', '¢'] +['æĺ', 'µ'] +['çı', 'Ĥ'] +['æĤ', '¸'] +['åģ', 'µ'] +['åĻ', 'ľ'] +['å£', '¯'] +['æĴ', '®'] +['æģ', 'į'] +['å©', 'ķ'] +['ç¯', '±'] +['éĺ', 'Ļ'] +['çī', 'ł'] +['è£', 'ĺ'] +['è³', '¢'] +['éĩ', 'ľ'] +['éĵ', 'ł'] +['èİ', 'ĺ'] +['æ®', 'Ĩ'] +['çĻ', '¸'] +['è´', 'ı'] +['ç²', '±'] +['å«', '¡'] +['åĨ', '¢'] +['è¤', 'Ĵ'] +['æĩ', 'Ĭ'] +['éľ', 'ĵ'] +['å¡', 'µ'] +['æĭ', '£'] +['å»', 'Ł'] +['é£', '½'] +['é¢', 'Į'] +['åļ', 'İ'] +['æ·', 'º'] +['èĨ', 'ł'] +['åİ', 'Ń'] +['åļ', 'ĩ'] +['åij', 'ĥ'] +['çĴ', 'ĭ'] +['çŃ', '±'] +['æĭ', '·'] +['èį', '§'] +['éĶ', '°'] +['åŃ', '°'] +['èĵ', 'ĵ'] +['èĨ', '½'] +['æŀ', 'ī'] +['åĸ', '½'] +['çĽ', 'Ķ'] +['çŃ', 'IJ'] +['ç¾', 'ļ'] +['è', 'ħĮ'] +['è¾', '«'] +['æ³', 'ĵ'] +['çĶ', '¬'] +['èŁ', '²'] +['åĸ', 'ª'] +['å¦', 'ĵ'] +['è¬', 'Ģ'] +['çĤ', 'Ĭ'] +['æĽ', 'ľ'] +['æ±', 'IJ'] +['è´', 'Ī'] +['èį', 'Ģ'] +['æĬ', 'ł'] +['ç¢', '¾'] +['æ«', 'ĥ'] +['éŀ', 'ł'] +['èij', 'Ĩ'] +['ç¥', '¯'] +['å½', 'Ŀ'] +['é¦', 'į'] +['åĮ', '£'] +['æľ', 'Ń'] +['åĿ', 'Ĥ'] +['ä¿', 'ij'] +['èĵ', '®'] +['çij', 'Ľ'] +['æī', 'ī'] +['èĩ', 'Ł'] +['è²', '«'] +['çİ', '¥'] +['æ·', '¼'] +['åİ', '²'] +['é³', 'Į'] +['å³', 'Ń'] +['åij', 'Ľ'] +['é', '§'] +['é§', 'IJ'] +['éģ', '·'] +['ä¿', 'ª'] +['æĢ', 'Ĥ'] +['è¾', 'į'] +['å±', 'į'] +['åĭ', 'ģ'] +['å¥', 'ļ'] +['éļ', 'ħ'] +['éĴ', '´'] +['è¼', 'Ŀ'] +['å®', '¦'] +['èIJ', 'ĥ'] +['çĺ', 'ĭ'] +['æĨ', '¶'] +['æĤ', 'ħ'] +['è¾', 'Ļ'] +['åij', 'ľ'] +['çł', 'º'] +['éĢ', 'ŀ'] +['æµ', 'ļ'] +['éĸ', '£'] +['èĸ', '©'] +['éĻ', 'ĭ'] +['çĤ', 'Ļ'] +['èª', 'ķ'] +['ä¸', 'Ł'] +['é¹', '½'] +['ç±', 'Į'] +['è´', '°'] +['éĭ', 'ª'] +['çľ', '©'] +['æĴ', 'IJ'] +['èĨ', 'º'] +['éŀ', 'ĺ'] +['ç¾', '²'] +['çª', '®'] +['ç´', 'IJ'] +['æ®', '´'] +['çº', '¾'] +['èº', 'į'] +['ç´', 'ĭ'] +['çĦ', 'ĸ'] +['çĶ', 'º'] +['çī', '½'] +['çĤ', '¯'] +['ç¼', 'Ķ'] +['æ¯', 'ĵ'] +['å¬', '°'] +['æ¢', '§'] +['äº', 'Ł'] +['è¢', 'ħ'] +['çį', 'Ħ'] +['è¿', '¥'] +['æ¼', '¾'] +['çĿ', 'ij'] +['ç¸', '¾'] +['é¦', 'ĭ'] +['é¤', 'ħ'] +['æ', '¹Ħ'] +['æĺ', 'ĩ'] +['æŀ', 'Ń'] +['èĸ', '°'] +['æŁ', 'ij'] +['æ¦', '»'] +['åĻ', 'Ĺ'] +['åĻ', '´'] +['æ£', '£'] +['åĶ', '§'] +['çĨ', '¹'] +['è¼', '¯'] +['å¢', 'Ł'] +['é²', '²'] +['æĪ', 'Ľ'] +['èī', '¦'] +['èĬ', '®'] +['åĺ', 'Ł'] +['å¸', '¥'] +['å¿', '»'] +['çĮ', 'Ŀ'] +['å¯', 'µ'] +['è³', '¦'] +['èĽ', '¾'] +['æ»', '¾'] +['çĤ', 'ķ'] +['éĵ', '¬'] +['èĴ', '¿'] +['éĴ', '¨'] +['çĥ', 'Ļ'] +['ç²', 'ķ'] +['æĥ', '¦'] +['æº', '§'] +['é¢', 'į'] +['éħ', '£'] +['å³', '¦'] +['ç±', 'ģ'] +['çĥ', 'ĥ'] +['åĨ', 'Ĺ'] +['åı', 'ģ'] +['çĽ', '§'] +['ç½', 'µ'] +['éĴ', 'Ĺ'] +['å¬', 'ī'] +['è°', 'ı'] +['ç³', '§'] +['è¾', 'Ń'] +['æ·', '¬'] +['èŁ', 'Ĵ'] +['è¯', '©'] +['è¦', 'ĥ'] +['çĻ', 'ĸ'] +['é½', 'Ĵ'] +['çĪ', 'IJ'] +['ç®', 'į'] +['ç¼', 'İ'] +['ç£', 'º'] +['è¯', '«'] +['è¤', '²'] +['æĵ', 'ł'] +['èIJ', '¦'] +['çĿ', '¬'] +['è°', 'į'] +['éĦ', '°'] +['æł', '¾'] +['é¡', 'ı'] +['ç¸', '±'] +['æ¡', '¨'] +['éĨ', '¬'] +['è¥', '²'] +['è®', 'ª'] +['å©', 'º'] +['èį', 'Ł'] +['åĮ', 'Ŀ'] +['çĨ', 'ł'] +['èĽ', 'Ĭ'] +['æ¸', 'ļ'] +['å´', '½'] +['é²', '¤'] +['åķ', '°'] +['åĮ', 'ķ'] +['ä¸', 'IJ'] +['è®', '¥'] +['åı', '½'] +['åı', '¼'] +['çļ', '¿'] +['è¿', 'Ĥ'] +['åIJ', 'Ĩ'] +['å±', '¹'] +['èĩ', '¼'] +['è®', '¹'] +['é©', '®'] +['çº', '«'] +['æ±', 'ŀ'] +['æĬ', '¡'] +['èĭ', 'ĩ'] +['åIJ', 'ł'] +['åIJ', 'Ń'] +['åIJ', '®'] +['å²', 'ĸ'] +['ä½', 'ĥ'] +['çĭ', 'Ī'] +['åº', 'ĩ'] +['åIJ', 'Ŀ'] +['éĹ', '°'] +['æ±', '¹'] +['å¿', '±'] +['æĭ', 'Ħ'] +['æĭ', 'Ĺ'] +['èĮ', 'ī'] +['èĭ', 'Ľ'] +['èĮ', 'ģ'] +['çŁ', '¾'] +['èĻ', 'ı'] +['åij', '»'] +['åĴ', 'Ħ'] +['å¿', '¿'] +['èĤ', '®'] +['çĭ', 'ŀ'] +['çĸ', 'Ł'] +['çĸ', 'Ļ'] +['çĸ', 'ļ'] +['æ³', 'ŀ'] +['å¸', 'ļ'] +['å±', 'ī'] +['è¿', '¢'] +['é©', '¹'] +['ç', 'İ·'] +['çıĬ', 'ó'] +['çıĬó', 'ł'] +['çıĬół', 'Ħ'] +['çıĬółĦ', 'ģ'] +['æĮ', 'İ'] +['æĭ', '´'] +['åŀ', 'Ľ'] +['èį', '¤'] +['æ®', 'ĥ'] +['çĽ', '¹'] +['åĵ', 'Ĩ'] +['è´', '»'] +['æ¯', '¡'] +['çĭ', '°'] +['çĭ', '¡'] +['æŁ', 'Ĵ'] +['æģ', 'ĥ'] +['è¯', '¬'] +['è¢', 'Ħ'] +['è¯', '²'] +['èļ', '¤'] +['èĢ', 'Ļ'] +['åŁ', 'Ĥ'] +['æį', 'İ'] +['æį', 'Į'] +['æ¢', 'Ĩ'] +['é', 'ħĮ'] +['çł', '¾'] +['æ®', 'ī'] +['åĶ', 'ł'] +['æĻ', 'Į'] +['èļ', '£'] +['èļ', 'ª'] +['èļ', 'ĵ'] +['é¸', '¯'] +['åĶ', 'ģ'] +['åĶ', 'Ĩ'] +['åĢ', 'Ķ'] +['èĪ', 'Ģ'] +['è±', 'º'] +['èĥ', '°'] +['é¸', 'µ'] +['é¸', '³'] +['é¦', 'ģ'] +['ç¾', 'Ķ'] +['æ¶', '£'] +['æ¶', 'ķ'] +['æĤ', '¯'] +['è¯', '½'] +['è°', 'Ĩ'] +['ç¥', 'Ł'] +['ç»', '¢'] +['æį', 'º'] +['æį', '¶'] +['æį', '»'] +['æİ', 'Ĥ'] +['èı', 'ł'] +['èIJ', '¤'] +['éħ', 'Ĺ'] +['çľ', '¶'] +['åķ', 'Ħ'] +['èļ', '¯'] +['èĽ', 'Ģ'] +['åĶ', '¬'] +['å¸', '·'] +['éĵ', 'IJ'] +['éĵ', 'Ľ'] +['åģ', 'İ'] +['å¾', 'Ļ'] +['èĦ', '¯'] +['è±', 'ļ'] +['çĮ', 'ĸ'] +['çĹ', 'Ĭ'] +['æ¶', '®'] +['æĥ', 'Ń'] +['æĤ', '´'] +['æĥ', 'ĭ'] +['è°', 'ļ'] +['æı', '©'] +['æIJ', 'Ģ'] +['æIJ', 'Ķ'] +['æ¦', 'Ķ'] +['æ¤', 'Ń'] +['éĽ', '³'] +['åĸ', '³'] +['è·', 'Ľ'] +['èľ', 'ĵ'] +['èľ', 'Ĵ'] +['é¹', 'ĥ'] +['éĶ', 'Ħ'] +['çĶ', '¥'] +['çŃ', 'ı'] +['çĮ', '©'] +['çĮ', '¬'] +['çĮ', '¾'] +['çĹ', '¢'] +['çĹ', 'ª'] +['æĥ', '°'] +['çª', 'ĺ'] +['è°', '¤'] +['éļ', 'ĺ'] +['å©', '¿'] +['é¹', 'ī'] +['çij', 'Ļ'] +['æĸ', 'Ł'] +['æ¤', '¿'] +['éħ', 'ª'] +['éĽ', '¹'] +['åĹ', '¦'] +['è·', '·'] +['è·', 'º'] +['è·', '¤'] +['èľ', 'Ī'] +['èľ', 'Ĺ'] +['å¹', 'Į'] +['é¦', 'ı'] +['èª', 'Ĭ'] +['æ¼', 'ĵ'] +['è¤', 'Ĥ'] +['èĶ', 'Ĺ'] +['èĶ', '¼'] +['åħ', '¢'] +['è£', '³'] +['èľ', '»'] +['èĿ', 'ĩ'] +['åĺ', 'Ģ'] +['éĶ', '¹'] +['ç®', 'ķ'] +['ç®', '©'] +['çĺ', '©'] +['çĺ', 'Ł'] +['æ¼', '±'] +['å¯', '¥'] +['éª', '¡'] +['æĴ', 'µ'] +['æĴ', '¬'] +['è±', 'Į'] +['åĺ', '¹'] +['èĿ', 'ł'] +['èĿ', 'Į'] +['èĿ', 'Ĺ'] +['èĿ', 'Ļ'] +['éķ', 'IJ'] +['ç¨', '¼'] +['ç¯', 'ĵ'] +['èĨ', 'Ľ'] +['é²', '«'] +['çĺ', 'ª'] +['é²', '¨'] +['æĨ', 'Ķ'] +['ç¿', '©'] +['è¤', '¥'] +['ç¼', 'Ń'] +['åĻ', '©'] +['çĵ', '¢'] +['éľ', 'İ'] +['è¸', '±'] +['è¹', 'Ĥ'] +['èŁ', 'Ĩ'] +['é¹', '¦'] +['ç¯', '¡'] +['çĺ', '¸'] +['çª', '¿'] +['ç¼', '°'] +['èĹ', 'IJ'] +['è¹', 'ĭ'] +['èŁ', 'ĭ'] +['èŁ', 'Ģ'] +['èµ', '¡'] +['èĩ', 'Ĭ'] +['é³', 'Ħ'] +['ç³', 'ł'] +['æĩ', '¦'] +['åļ', '£'] +['éķ', '°'] +['é³', 'į'] +['ç°', '¸'] +['çĻ', '£'] +['é³', 'ĸ'] +['é¬', 'ĵ'] +['èł', 'ķ'] +['éľ', '¹'] +['èº', 'ı'] +['é»', '¯'] +['çĵ', '¤'] +['çŁ', 'Ĺ'] +['ä¹', 'Ĥ'] +['ä¹', 'ľ'] +['åħ', 'Ģ'] +['å¼', 'ĭ'] +['åŃ', 'ij'] +['åŃ', 'ĵ'] +['å¹', 'º'] +['äº', 'ĵ'] +['å', '»¿'] +['ä¸', 'ı'] +['åį', 'ħ'] +['ä»', 'ĥ'] +['ä»', 'ī'] +['ä»', 'Ĥ'] +['åĪ', 'Ī'] +['çĪ', '»'] +['åį', 'ŀ'] +['éĹ', '©'] +['è®', '£'] +['å¤', '¬'] +['çĪ', '¿'] +['æ¯', 'ĭ'] +['éĤ', 'Ĺ'] +['éĤ', 'Ľ'] +['èī', '½'] +['èī', '¿'] +['åı', 'µ'] +['ä¸', 'ķ'] +['åĮ', 'ľ'] +['åĬ', '¢'] +['åį', 'Ł'] +['åı', '±'] +['åı', '»'] +['ä»', '¨'] +['ä»', 'Ł'] +['ä»', '¡'] +['ä»', '«'] +['ä»', 'ŀ'] +['åį', '®'] +['æ°', 'IJ'] +['çĬ', '°'] +['åĪ', 'į'] +['éĤ', 'Ŀ'] +['éĤ', 'Ļ'] +['è®', '¦'] +['è®', '§'] +['è®', '«'] +['å°', '»'] +['éĺ', '¡'] +['å°', 'ķ'] +['å¼', 'ģ'] +['èĢ', 'Ĵ'] +['çİ', 'İ'] +['çİ', 'ij'] +['åľ', '¬'] +['æī', '¦'] +['åľ', 'ª'] +['åľ', '¹'] +['æī', 'ª'] +['åľ', '®'] +['åľ', '¯'] +['èĬ', 'Ĭ'] +['èĬ', 'į'] +['èĬ', 'Ħ'] +['èĬ', '¨'] +['èĬ', 'ij'] +['èĬ', 'İ'] +['èĬ', 'Ĺ'] +['äº', 'ĺ'] +['åİ', 'į'] +['å¤', '¼'] +['æĪ', 'į'] +['å°', '¥'] +['ä¹', '©'] +['æĹ', '¯'] +['æĽ', '³'] +['å²', 'Į'] +['å±', 'º'] +['åĩ', '¼'] +['åĽ', '¡'] +['éĴ', 'ĩ'] +['ç¼', '¶'] +['æ°', 'ĺ'] +['æ°', 'ĸ'] +['çī', 'Ŀ'] +['ä¼', 'İ'] +['ä¼', 'Ľ'] +['ä¼', '¢'] +['ä½', '¤'] +['ä»', 'µ'] +['ä¼', '¥'] +['ä¼', '§'] +['ä¼', 'ī'] +['ä¼', '«'] +['åĽ', 'Ł'] +['æ±', 'Ĩ'] +['åĪ', 'ĸ'] +['å¤', 'Ļ'] +['æĹ', '®'] +['åĪ', 'İ'] +['çĬ', '·'] +['çĬ', '¸'] +['èĪ', 'Ľ'] +['åĩ', '«'] +['é', 'Ĥ¬'] +['é¥', '§'] +['æ±', 'Ķ'] +['æ±', 'ľ'] +['æ±', 'Ĭ'] +['å¿', 'ĸ'] +['å¿', 'ı'] +['è®', '´'] +['è®', 'µ'] +['è®', '·'] +['èģ', '¿'] +['èī', '®'] +['åİ', '¾'] +['å¦', 'ģ'] +['çº', '¡'] +['çº', '£'] +['çº', '¥'] +['çº', '¨'] +['çİ', 'ķ'] +['çİ', 'Ļ'] +['æĬ', 'Ł'] +['æĬ', 'Ķ'] +['åľ', '»'] +['åĿ', 'į'] +['æĬ', 'ĥ'] +['ã§', 'IJ'] +['èĬ', '«'] +['èĬ', '¾'] +['èĭ', 'Ī'] +['èĭ', '£'] +['èĭ', 'ĭ'] +['èĬ', '¼'] +['èĭ', 'Į'] +['èĭ', 'ģ'] +['èĬ', '©'] +['èĬ', 'ª'] +['èĬ', '¡'] +['èĬ', 'Ł'] +['èĭ', 'Ħ'] +['èĭ', 'İ'] +['èĭ', '¡'] +['æĿ', 'Į'] +['æĿ', 'ĵ'] +['æĿ', 'Ī'] +['å¿', 'ij'] +['åŃ', 'Ľ'] +['éĤ', '´'] +['éĤ', '³'] +['å¥', 'ģ'] +['è±', 'ķ'] +['å¿', 'Ĵ'] +['æ¬', '¤'] +['è½', '«'] +['è¿', 'ĵ'] +['éĤ', '¶'] +['å¿', 'IJ'] +['åį', '£'] +['éĤ', 'º'] +['æĹ', '°'] +['åij', 'ĭ'] +['åij', 'Ĵ'] +['åij', 'ĵ'] +['åij', 'Ķ'] +['åij', 'ĸ'] +['æĹ', '¸'] +['åIJ', '¡'] +['èĻ', '¬'] +['åIJ', '½'] +['åIJ', '£'] +['åIJ', '²'] +['å¸', 'ı'] +['å²', 'Ī'] +['å²', 'ĺ'] +['åħ', 'ķ'] +['åĽ', 'µ'] +['åĽ', '«'] +['éĴ', 'Ĭ'] +['éĴ', 'ĭ'] +['é', 'ĴĮ'] +['è¿', 'ķ'] +['æ°', 'Ļ'] +['æ°', 'ļ'] +['çī', '¤'] +['ä½', 'ŀ'] +['ä½', 'ļ'] +['ä½', 'Ŀ'] +['ä½', 'Ĺ'] +['å½', '·'] +['ä½', 'ĺ'] +['ä½', '¥'] +['è±', '¸'] +['åĿ', 'Į'] +['èĤ', 'Ł'] +['å¥', 'Ĥ'] +['åĬ', '¬'] +['çĭ', 'ģ'] +['é¸', 'ł'] +['é¥', '¨'] +['é¥', '©'] +['é¥', '«'] +['é¥', '¬'] +['åº', 'ij'] +['åº', 'ĭ'] +['çĸ', 'Ķ'] +['çĸ', 'ĸ'] +['èĤ', 'ĵ'] +['éĹ', '±'] +['éĹ', '³'] +['çĤ', 'Ģ'] +['æ²', '£'] +['æ²', 'ħ'] +['æ²', 'Ķ'] +['æ²', '¤'] +['æ²', 'ı'] +['æ²', 'ļ'] +['æ±', '©'] +['æ±', '¨'] +['æ²', '¨'] +['æ±', '´'] +['æ²', 'Ĩ'] +['æ²', '©'] +['æ³', 'IJ'] +['æĢ', 'ĥ'] +['æĢ', 'Ħ'] +['å¿', '¡'] +['å¿', '¤'] +['å¿', '¾'] +['æĢ', 'ħ'] +['å¿', 'ª'] +['æĢ', 'Ĩ'] +['å¿', 'Ń'] +['å¿', '¸'] +['è¯', 'Ĥ'] +['è¯', 'ĥ'] +['è¯', 'ħ'] +['è¯', 'ĭ'] +['è¯', 'Į'] +['è¯', 'Ĵ'] +['éĻ', 'Ĥ'] +['éĻ', 'ī'] +['å¦', '©'] +['å¦', 'ª'] +['å¦', '£'] +['å¦', 'Ĺ'] +['å¦', '«'] +['å§', 'Ĵ'] +['å¦', '¤'] +['åĬ', 'Ń'] +['åĪ', 'Ń'] +['éĤ', '°'] +['çº', 'Ń'] +['çº', '°'] +['çº', '´'] +['çİ', '¡'] +['çİ', 'Ń'] +['çİ', 'ł'] +['çİ', '¢'] +['çİ', '¦'] +['çĽ', 'Ĥ'] +['å¿', 'Ŀ'] +['åĮ', '¦'] +['åĿ', '©'] +['æĬ', '¨'] +['æĭ', '¤'] +['åĿ', '«'] +['æĭ', 'Ī'] +['åŀ', 'Ĩ'] +['æĬ', '»'] +['åĬ', '¼'] +['æĭ', 'ĥ'] +['æĭ', 'Ĭ'] +['åĿ', '¼'] +['åĿ', '»'] +['ã§', 'Ł'] +['åĿ', '¨'] +['åĿ', 'Ń'] +['æĬ', '¿'] +['åĿ', '³'] +['èĭ', '·'] +['èĭ', '¤'] +['èĮ', 'ı'] +['èĭ', '«'] +['èĭ', 'ľ'] +['èĭ', '´'] +['èĭ', 'Ĵ'] +['èĭ', 'ĺ'] +['èĮ', 'Į'] +['èĭ', '»'] +['èĭ', 'ĵ'] +['èĮ', 'ļ'] +['èĮ', 'Ĩ'] +['èĮ', 'ij'] +['èĮ', 'ĵ'] +['èĮ', 'Ķ'] +['èĮ', 'ķ'] +['è', 'ĮĢ'] +['èĭ', 'ķ'] +['æŀ', '¥'] +['æŀ', 'ĩ'] +['æĿ', 'ª'] +['æĿ', '³'] +['æŀ', '§'] +['æĿ', 'µ'] +['æŀ', '¨'] +['æŀ', 'ŀ'] +['æŀ', 'ĭ'] +['æĿ', '»'] +['æĿ', '·'] +['æĿ', '¼'] +['çŁ', '¸'] +['ç', 'łĢ'] +['åĪ', '³'] +['å¥', 'Ħ'] +['æ®', 'ģ'] +['éĥ', 'ı'] +['è½', 'Ń'] +['éĥ', 'ħ'] +['é¸', '¢'] +['çĽ', '±'] +['æĺ', 'Ļ'] +['æĿ', '²'] +['æĺ', 'ĥ'] +['åĴ', 'Ĥ'] +['åij', '¸'] +['æĺ', 'Ģ'] +['æĹ', '»'] +['æĺ', 'ī'] +['çĤ', 'ħ'] +['çķ', 'Ģ'] +['èĻ', '®'] +['åĴ', 'Ģ'] +['åij', '·'] +['é»', '¾'] +['åij', '±'] +['åij', '¤'] +['åĴ', 'Ĩ'] +['åĴ', 'Ľ'] +['åij', '¶'] +['åij', '£'] +['åĴ', 'Ŀ'] +['å²', '¢'] +['å²', '¿'] +['å²', '¬'] +['å²', '«'] +['å¸', 'Ļ'] +['å²', '£'] +['å³', 'ģ'] +['åĪ', '¿'] +['å²', '·'] +['åī', 'Ģ'] +['å¸', 'Ķ'] +['å³', 'Ħ'] +['æ²', 'ĵ'] +['åĽ', '¹'] +['ç½', 'Ķ'] +['éĴ', 'į'] +['éĴ', 'İ'] +['éĴ', 'ı'] +['éĴ', 'Ĵ'] +['éĴ', 'ķ'] +['éĤ', '¾'] +['è¿', '®'] +['çī', '¦'] +['ç«', 'º'] +['è¿', '¤'] +['ä½', '¶'] +['ä¾', 'ij'] +['ä¾', 'ī'] +['èĩ', '¾'] +['ä¾', 'Ĺ'] +['ä¾', 'ı'] +['ä¾', '©'] +['ä½', '»'] +['ä½', '¾'] +['ä¾', 'ª'] +['ä½', '¼'] +['ä½', '¯'] +['ä¾', '¬'] +['å¸', 'Ľ'] +['ä¾', 'Ķ'] +['å¾', 'Ĥ'] +['åĪ', '½'] +['éĥ', 'Ħ'] +['ç±', '´'] +['çĵ', '®'] +['æĪ', 'Ĺ'] +['èĤ', '¼'] +['äı', 'Ŀ'] +['èĤ', '±'] +['èĤ', '«'] +['è¿', '©'] +['éĥ', 'ĩ'] +['çĭ', 'İ'] +['çĭ', 'į'] +['çĭ', 'Ĵ'] +['åĴ', 'İ'] +['é¥', '¯'] +['é¥', '´'] +['åĨ', '½'] +['åĨ', '¼'] +['åº', 'ĸ'] +['çĸ', 'ł'] +['çĸ', 'Ŀ'] +['åħ', 'ĸ'] +['åĬ', '¾'] +['ð¬', 'ī'] +['ð¬ī', '¼'] +['çĤ', 'ĺ'] +['çĤ', 'Ŀ'] +['çĤ', 'Ķ'] +['æ³', 'Ķ'] +['æ²', 'Ń'] +['æ³', '·'] +['æ³', '±'] +['æ³', 'ħ'] +['æ³', 'ł'] +['æ³', 'º'] +['æ³', 'ĸ'] +['æ³', '«'] +['æ³', '®'] +['æ²', '±'] +['æ³', '¯'] +['æĢ', 'Ļ'] +['æĢ', 'µ'] +['æĢ', '¦'] +['æĢ', 'Ľ'] +['æĢ', 'ı'] +['æĢ', 'į'] +['ã', '¤'] +['ã¤', 'ĺ'] +['æĢ', '©'] +['æĢ', '«'] +['æĢ', '¿'] +['å®', 'ķ'] +['ç©', '¹'] +['å®', 'ĵ'] +['è¯', 'ĵ'] +['è¯', 'Ķ'] +['è¯', 'ĸ'] +['è¯', 'ĺ'] +['æĪ', '¾'] +['è¯', 'Ļ'] +['æĪ', '½'] +['éĥ', 'ĵ'] +['è¡', '©'] +['ç¥', 'Ĩ'] +['ç¥', 'İ'] +['ç¥', 'ĩ'] +['è¯', 'ľ'] +['è¯', 'Ł'] +['è¯', '£'] +['è¯', '¤'] +['è¯', '§'] +['è¯', '¨'] +['æĪ', 'ķ'] +['éĻ', 'Ķ'] +['å¦', '²'] +['å¦', '¯'] +['å§', 'Ĺ'] +['å¸', 'ij'] +['åŃ', '¥'] +['é©', '½'] +['èĻ', '±'] +['è¿', '¨'] +['ç»', 'Ģ'] +['ç»', 'ģ'] +['ç»', 'Ĥ'] +['é©', '·'] +['é©', '¸'] +['ç»', 'ī'] +['ç»', 'Į'] +['éª', 'Ģ'] +['çĶ', '¾'] +['çı', 'ı'] +['çı', 'IJ'] +['çı', 'ij'] +['çİ', '³'] +['é¡', '¸'] +['çı', 'ī'] +['çı', 'Ī'] +['æĭ', '®'] +['åŀ', 'Ń'] +['æĮ', 'Ŀ'] +['æĮ', 'ŀ'] +['åŀ', '¤'] +['èµ', '³'] +['è´', '²'] +['åŀ', '±'] +['åŀ', 'Į'] +['åŀ', '§'] +['åŀ', 'ĵ'] +['æĮ', '¦'] +['åŀ', 'ł'] +['èį', 'ļ'] +['èį', 'ij'] +['è´', '³'] +['èį', 'ľ'] +['èİ', 'Ĵ'] +['èĮ', '¼'] +['èĮ', '´'] +['èĮ', '±'] +['èİ', 'Ľ'] +['èį', 'ŀ'] +['èĮ', '¯'] +['èį', 'ı'] +['èį', 'ĩ'] +['èį', 'ĥ'] +['èį', 'ł'] +['èĮ', 'Ń'] +['åŀ', '©'] +['èį', '¥'] +['èį', '¦'] +['èį', '¨'] +['èį', '©'] +['åī', 'ĭ'] +['èį', 'ª'] +['èį', '¬'] +['èį', '®'] +['æŁ', '°'] +['æł', 'ī'] +['æŁ', 'ĺ'] +['æł', 'Ĭ'] +['æŁ', '©'] +['æŀ', '°'] +['æł', 'Į'] +['æŁ', 'Ļ'] +['æŀ', 'µ'] +['æŀ', '³'] +['æŁ', 'ŀ'] +['æŁ', 'Ŀ'] +['æł', 'Ģ'] +['æŁ', '¢'] +['æł', 'İ'] +['æŁ', 'Ī'] +['æŁ', 'ģ'] +['æŀ', '·'] +['æŁ', '½'] +['åī', 'Į'] +['éħ', 'Ĭ'] +['éĥ', '¦'] +['çĶ', 'Ń'] +['çł', 'Ĺ'] +['çł', 'ĺ'] +['çł', 'Ĵ'] +['æĸ', '«'] +['çł', 'Ń'] +['çł', 'ľ'] +['èĢ', '·'] +['èĻ', 'º'] +['æ®', 'Ĥ'] +['æ®', 'ĩ'] +['æ®', 'Ħ'] +['è½', '±'] +['è½', '²'] +['è½', '³'] +['è½', '¶'] +['è½', '¸'] +['èĻ', '¿'] +['æ¯', 'ĸ'] +['è§', 'ĩ'] +['å°', 'ľ'] +['åĵ', 'IJ'] +['çľ', 'Ħ'] +['çľ', 'į'] +['ðł', '³'] +['ðł³', 'IJ'] +['éĥ', '¢'] +['çľ', 'ĩ'] +['çľ', 'Ĭ'] +['çľ', 'Ī'] +['ç¦', 'º'] +['åĵ', 'Ĥ'] +['åĴ', '´'] +['æĽ', '·'] +['æĺ', '´'] +['åĴ', '¦'] +['åĵ', 'ĵ'] +['åĵ', 'Ķ'] +['çķ', 'İ'] +['åij', '²'] +['èĥ', 'Ħ'] +['çķ', 'ĭ'] +['çķ', 'Ī'] +['èĻ', '¼'] +['èĻ', '»'] +['çĽ', 'ħ'] +['åĴ', '£'] +['åĵ', 'ķ'] +['åī', 'IJ'] +['éĥ', '§'] +['åĴ', '»'] +['åĽ', '¿'] +['åĴ', '¿'] +['åĵ', 'Į'] +['åĵ', 'Ļ'] +['åĵ', 'ļ'] +['åĴ', '©'] +['åĴ', '¤'] +['åĵ', 'Ŀ'] +['åĵ', 'ı'] +['åĵ', 'ŀ'] +['å³', '£'] +['ç½', 'ĺ'] +['å³', 'Ĵ'] +['å³', '¤'] +['å³', 'ĭ'] +['è´', '¶'] +['éĴ', 'ļ'] +['éĴ', '¡'] +['éĴ', '£'] +['éĴ', '¤'] +['éĴ', '«'] +['æ°', '¡'] +['çī', '¯'] +['éĥ', 'ľ'] +['ç§', 'ķ'] +['ç§', 'Ń'] +['ç«', '½'] +['ç¬', 'Ī'] +['ä¿', '¦'] +['ä¿', '¨'] +['ä¿', 'ħ'] +['åı', 'Ł'] +['åŀ', '¡'] +['çī', '®'] +['ä¿', '£'] +['ä¿', 'ļ'] +['çļ', 'Ī'] +['ä¿', 'Ł'] +['éĢ', 'ħ'] +['å¾', 'ĩ'] +['å¾', 'ī'] +['èĪ', '¢'] +['éĥ', 'Ĺ'] +['ä¿', 'İ'] +['éĥ', '¤'] +['çĪ', '°'] +['éĥ', 'Ľ'] +['çĵ', '´'] +['èĥ', '¨'] +['èĥ', 'ª'] +['èĥ', 'Ľ'] +['èĥ', 'Ĥ'] +['èĥ', 'Ļ'] +['èĥ', 'į'] +['èĥ', 'Ĺ'] +['è', 'ĥĿ'] +['æľ', 'IJ'] +['èĥ', '«'] +['é¸', '¨'] +['åĮ', 'į'] +['çĭ', '¨'] +['çĭ', '¯'] +['é£', 'ij'] +['çĭ', '©'] +['çĭ', '²'] +['è¨', 'ĩ'] +['éĢ', 'Ħ'] +['æĺ', 'Ŀ'] +['é¥', '·'] +['é¥', '¸'] +['é¥', '¹'] +['åŃ', 'ª'] +['å¨', 'Ī'] +['åº', '¥'] +['çĸ', '¬'] +['çĸ', '£'] +['çĸ', '¥'] +['çĸ', 'Ń'] +['åº', 'ł'] +['ç«', 'ij'] +['é£', 'Ĵ'] +['éĹ', '¼'] +['éĹ', '¾'] +['éĹ', '¿'] +['éĺ', 'Ĥ'] +['ç¾', 'ij'] +['è¿', '¸'] +['ç±', '¼'] +['éħ', 'ĭ'] +['çĤ', '»'] +['çĥ', 'Ģ'] +['çĤ', '·'] +['æ´', '±'] +['æ´', '¹'] +['æ´', '§'] +['æ´', 'Į'] +['æµ', 'ĥ'] +['æ´', 'ĩ'] +['æ´', 'Ħ'] +['æ´', 'Ļ'] +['æ¶', 'İ'] +['æ´', 'İ'] +['æ´', '«'] +['æµ', 'į'] +['æ´', '®'] +['æ´', 'µ'] +['æµ', 'Ĵ'] +['æµ', 'Ķ'] +['æµ', 'ķ'] +['æ´', '³'] +['æģ', '¸'] +['æģ', 'ĵ'] +['æģ', '¹'] +['æģ', '«'] +['æģ', '»'] +['æģ', 'Ĥ'] +['æģ', 'ª'] +['æģ', '½'] +['å®', '¥'] +['æī', 'ĥ'] +['è¡', '²'] +['è¡', '½'] +['è¡', '¿'] +['è¢', 'Ĥ'] +['ç¥', 'ľ'] +['ç¥', 'ĵ'] +['ç¥', 'ļ'] +['è¯', '®'] +['ç¥', 'Ĺ'] +['ç¥', '¢'] +['è¯', '°'] +['è¯', '³'] +['é¸', '©'] +['æĺ', '¶'] +['åĴ', '«'] +['å¼', 'Ń'] +['çī', 'ģ'] +['èĥ', '¥'] +['éĻ', 'Ł'] +['å§', '®'] +['å¨', 'Ĩ'] +['å§', 'Ŀ'] +['å§', '£'] +['å§', 'ĺ'] +['å§', '¹'] +['ç¾', '¿'] +['çĤ', '±'] +['çŁ', 'ľ'] +['ç»', 'Ķ'] +['éª', 'ģ'] +['éª', 'ħ'] +['ç»', 'Ĺ'] +['ç»', 'Ľ'] +['éª', 'Ī'] +['èĢ', 'ĸ'] +['æĮ', 'Ī'] +['çı', '¥'] +['çı', 'Ļ'] +['é¡', '¼'] +['çı', '°'] +['çı', '©'] +['çı', '§'] +['çı', '£'] +['çı', 'ŀ'] +['çIJ', '¤'] +['çı', '²'] +['æģ', 'ļ'] +['åŁ', 'ķ'] +['åŁ', 'ĺ'] +['åŁ', 'Ļ'] +['åŁ', 'ļ'] +['æĮ', '¹'] +['èĢ', 'Ĩ'] +['èĢ', 'Ħ'] +['åŁ', 'Ĵ'] +['æį', 'ĭ'] +['è´', '½'] +['åŀ', '¸'] +['æį', 'ĥ'] +['çĽ', 'į'] +['èį', '¸'] +['èİ', '³'] +['èİ', '´'] +['èİ', 'ª'] +['èİ', 'ł'] +['èİ', 'ľ'] +['èİ', 'ħ'] +['èį', '¼'] +['èİ', '©'] +['èį', '½'] +['èİ', '¸'] +['èį', '»'] +['èİ', '¨'] +['é¸', 'ª'] +['èİ', '¼'] +['æł', '²'] +['æł', '³'] +['æ¡', '¡'] +['æ¡', 'İ'] +['æ¡', '¢'] +['æ¡', '¤'] +['æ¢', 'ĥ'] +['æł', 'Ŀ'] +['æ¡', 'ķ'] +['æ¡', 'ģ'] +['æ¡', '§'] +['æ¡', 'ħ'] +['æł', 'Ł'] +['æ¡', 'ī'] +['æł', '©'] +['éĢ', 'ij'] +['éĢ', 'ĭ'] +['å½', '§'] +['é¬', '²'] +['è±', 'ĩ'] +['éħ', 'IJ'] +['éĢ', '¦'] +['åİ', 'Ŀ'] +['åŃ', '¬'] +['çł', 'Ŀ'] +['çł', '¹'] +['çł', '§'] +['çł', '·'] +['çł', 'Ł'] +['çł', '¼'] +['çł', '¥'] +['çł', '£'] +['åī', 'ŀ'] +['çł', '»'] +['è½', '¼'] +['è½', '¾'] +['è¾', 'Ĥ'] +['é¸', '«'] +['è¶', '¸'] +['é¾', 'Ģ'] +['é¸', '¬'] +['èĻ', 'Ķ'] +['çľ', '¬'] +['åĶ', 'Ľ'] +['çľ', 'Ļ'] +['åĵ', '§'] +['åĵ', '½'] +['æĻ', 'ģ'] +['é¸', '®'] +['è¶', 'µ'] +['è¶', '¿'] +['çķ', 'Ľ'] +['èļ', '¨'] +['èļ', 'ľ'] +['èļ', 'į'] +['èļ', 'ĭ'] +['èļ', '¬'] +['èļ', 'Ŀ'] +['èļ', '§'] +['åĶ', '¢'] +['åľ', 'Ħ'] +['åĶ', '£'] +['åĶ', 'ı'] +['çĽ', 'İ'] +['åĶ', 'ij'] +['å´', 'Ĥ'] +['å´', 'ĥ'] +['ç½', '¡'] +['ç½', 'Ł'] +['è§', 'Ĭ'] +['èµ', 'ħ'] +['éĴ', '²'] +['éĴ', 'µ'] +['éĴ', '¹'] +['éĴ', 'º'] +['éĴ', '½'] +['éĴ', '¼'] +['éĴ', '¿'] +['éĵ', 'Ģ'] +['éĵ', 'Ħ'] +['éĵ', 'Ĩ'] +['éĵ', 'Ī'] +['éĵ', 'ī'] +['éĵ', 'Ĭ'] +['éĵ', 'ĭ'] +['éĵ', 'Į'] +['é', 'ĵį'] +['ä', '¥'] +['ä¥', '½'] +['éĵ', 'İ'] +['æ°', '©'] +['æ°', '¤'] +['æ°', '¦'] +['æ¯', 'ª'] +['èĪ', 'IJ'] +['ç§', '£'] +['ç§', '«'] +['çĽ', 'ī'] +['ç¬', 'Ħ'] +['ç¬', 'ķ'] +['ç¬', 'Ĭ'] +['ç¬', 'ı'] +['ç¬', 'Ĩ'] +['ä¿', '¸'] +['ä¿', 'µ'] +['åģ', 'Į'] +['ä¿', '³'] +['ä¿', '¶'] +['åĢ', '¬'] +['åĢ', 'ı'] +['æģ', 'ģ'] +['åĢ', 'Ń'] +['ä¿', '¾'] +['åĢ', 'ľ'] +['éļ', '¼'] +['éļ', '½'] +['åĢ', 'Į'] +['åĢ', '¥'] +['èĩ', '¬'] +['éĥ', '«'] +['åĢ', '¨'] +['è¡', 'Ħ'] +['é¢', 'Ģ'] +['å¾', 'ķ'] +['èĪ', '«'] +['è¡', '¾'] +['èĥ', '¯'] +['èĥ', '±'] +['èĥ', '´'] +['èĥ', 'Ń'] +['èĦ', 'į'] +['èĥ', '¼'] +['èĦ', 'Ĵ'] +['é¸', '±'] +['é¸', '²'] +['çĭ', '·'] +['çĮ', 'ģ'] +['çĭ', '³'] +['çĮ', 'ĥ'] +['çĭ', 'º'] +['éĢ', 'ĸ'] +['æ¡', 'Ģ'] +['é¥', '½'] +['åĩ', 'ĩ'] +['æĮ', 'Ľ'] +['äº', '³'] +['çĸ', '³'] +['çĸ', '´'] +['çĸ', '¸'] +['çĸ', '½'] +['çĹ', 'Ī'] +['çĸ', '±'] +['çĹ', 'Ĥ'] +['çĹ', 'ī'] +['è¡', '®'] +['é¢', 'ĥ'] +['æģ', '£'] +['æĹ', 'Ĩ'] +['æĹ', 'Ħ'] +['æĹ', 'ĥ'] +['éĺ', 'ĥ'] +['éĺ', 'Ħ'] +['è¨', 'ļ'] +['éĺ', 'Ĩ'] +['æģ', 'Ļ'] +['ç²', 'ij'] +['çĥ', 'ľ'] +['çĥ', '©'] +['çĥ', 'Ĭ'] +['åī', '¡'] +['éĥ', '¯'] +['çĥ', '¬'] +['æ¶', 'ij'] +['æµ', '¯'] +['æ¶', 'ŀ'] +['æ¶', 'Ł'] +['å¨', 'ij'] +['æ¶', 'ł'] +['æµ', 'ŀ'] +['æ¶', 'ĵ'] +['æµ', '¥'] +['æ¶', 'Ķ'] +['æµ', 'ľ'] +['æµ', 'ł'] +['æµ', '£'] +['æĤ', 'ļ'] +['æ', 'ĤŃ'] +['æĤ', 'Ŀ'] +['æĤ', 'Ĵ'] +['æĤ', 'Į'] +['æĤ', 'Ľ'] +['çª', 'Ī'] +['åī', 'ľ'] +['è¯', '¹'] +['è¯', '¼'] +['è¢', 'Ĵ'] +['è¢', '¢'] +['è¯', '¿'] +['è°', 'Ģ'] +['è°', 'Ĥ'] +['è°', 'Ħ'] +['è°', 'ĩ'] +['å±', 'IJ'] +['å±', 'Ļ'] +['éĻ', '¬'] +['åĭ', 'IJ'] +['å¥', 'ĺ'] +['çī', 'Ĥ'] +['èļ', '©'] +['éĻ', '²'] +['å¨', 'Į'] +['å¨', 'ī'] +['å¨', '²'] +['å¨', '´'] +['å¨', '£'] +['å¨', 'ĵ'] +['å©', 'Ģ'] +['çķ', 'ļ'] +['éĢ', '¡'] +['ç»', 'ł'] +['éª', 'Ĭ'] +['ç»', '¡'] +['éª', 'ĭ'] +['ç»', '¦'] +['ç»', '¨'] +['éª', 'İ'] +['éĤ', 'ķ'] +['é¸', '¶'] +['å½', 'Ĺ'] +['èĢ', 'ľ'] +['çĦ', 'ĺ'] +['èĪ', 'Ĥ'] +['çIJ', 'ı'] +['çIJ', 'ĩ'] +['éº', '¸'] +['æı', '¶'] +['åŁ', '´'] +['åŁ', '¯'] +['æį', '¯'] +['æİ', '³'] +['æİ', '´'] +['åŁ', '¸'] +['åŁ', 'µ'] +['èµ', '§'] +['åŁ', '¤'] +['æį', 'Ń'] +['éĢ', 'µ'] +['åŁ', 'Ŀ'] +['åł', 'ĭ'] +['åł', 'į'] +['æİ', '¬'] +['é¸', '·'] +['æį', '½'] +['æİ', 'Ĭ'] +['åł', 'ī'] +['æİ', '¸'] +['æį', '©'] +['æİ', '®'] +['æĤ', '«'] +['åŁ', 'Ń'] +['åŁ', '½'] +['æİ', 'ĩ'] +['æİ', '¼'] +['èģ', 'ĥ'] +['èIJ', 'ģ'] +['èı', 'ĺ'] +['åł', 'ĩ'] +['èIJ', 'ĺ'] +['èIJ', 'ĭ'] +['èı', '½'] +['èı', 'ĸ'] +['è', 'IJľ'] +['èIJ', '¸'] +['èIJ', 'ij'] +['æ£', '»'] +['èı', 'Ķ'] +['èı', 'Ł'] +['èIJ', 'ı'] +['èı', '¹'] +['èı', 'ª'] +['èı', 'ħ'] +['èı', 'Ģ'] +['èı', '°'] +['èı', '¡'] +['æ¢', '¿'] +['æ¢', 'ı'] +['è§', 'ĭ'] +['æ¡', '´'] +['æ¡', '·'] +['æ£', 'ģ'] +['æ¡', '«'] +['æ£', 'Ĥ'] +['åķ', '¬'] +['éĥ', '¾'] +['æķ', 'ķ'] +['è±', 'ī'] +['éĦ', 'Ħ'] +['éħ', 'ŀ'] +['ç¡', 'İ'] +['ç¡', 'Ń'] +['ç¡', 'ĸ'] +['ç¡', 'Ĺ'] +['ç¡', 'IJ'] +['ç¡', 'ĩ'] +['ç¡', 'Į'] +['é¸', '¸'] +['çĵ', 'ł'] +['åĮ', 'ı'] +['åİ', '©'] +['æ®', 'Ĵ'] +['æ®', 'ĵ'] +['æ®', 'į'] +['èµ', 'ī'] +['éĽ', '©'] +['è¾', 'Ħ'] +['åł', 'ij'] +['çľ', 'Ń'] +['çľ', '¦'] +['åķ', '§'] +['æĻ', '¡'] +['æĻ', '¤'] +['çľ', 'µ'] +['åľ', 'Ĭ'] +['åĸ', 'ı'] +['åķ', 'ī'] +['åĭ', 'ĸ'] +['æĻ', 'ŀ'] +['åĶ', 'µ'] +['æĻ', 'Ĺ'] +['åķ', 'Ń'] +['çķ', '¦'] +['è¶', 'º'] +['åķ', '®'] +['è·', 'Ħ'] +['èļ', '¶'] +['è', 'ĽĦ'] +['èĽ', 'İ'] +['èĽ', 'Ĩ'] +['èļ', '°'] +['åľ', 'ī'] +['èļ', '±'] +['èĽ', 'ī'] +['èĽ', 'ı'] +['èļ', '´'] +['åķ', 'ģ'] +['åķ', 'ķ'] +['åĶ', '¿'] +['åķ', 'IJ'] +['åĶ', '¼'] +['åĶ', '·'] +['åķ', 'ĸ'] +['åķ', 'µ'] +['åķ', '¶'] +['åķ', '·'] +['åĶ', '³'] +['åĶ', '°'] +['åķ', 'ľ'] +['å¸', '»'] +['å´', 'ļ'] +['å´', '¦'] +['å¸', '¼'] +['å´', '®'] +['å´', '¤'] +['å´', 'Ĩ'] +['èµ', 'ĩ'] +['èµ', 'Ī'] +['èµ', 'Ĭ'] +['éĵ', 'ij'] +['éĵ', 'Ĵ'] +['éĵ', 'Ĺ'] +['éĵ', 'Ļ'] +['éĵ', 'Ł'] +['éĵ', '¡'] +['éĵ', '¢'] +['éĵ', '£'] +['éĵ', '¤'] +['éĵ', '§'] +['éĵ', '¨'] +['éĵ', '©'] +['éĵ', 'ª'] +['éĵ', '«'] +['éĵ', '¯'] +['éĵ', '°'] +['éĵ', '±'] +['éĵ', '³'] +['éĵ', 'µ'] +['éĵ', '·'] +['çī', '¾'] +['é¸', '¹'] +['ç§', '¾'] +['éĢ', '¶'] +['ç¬', 'º'] +['çŃ', 'ĩ'] +['ç¬', '¸'] +['ç¬', 'ª'] +['ç¬', '®'] +['ç¬', 'ł'] +['ç¬', '¥'] +['ç¬', '¤'] +['ç¬', '³'] +['ç¬', '¾'] +['ç¬', 'ŀ'] +['åģ', '¾'] +['åģ', 'ĥ'] +['åģ', 'ķ'] +['åģ', 'Ī'] +['åĤ', 'Ģ'] +['åģ', '¬'] +['åģ', '»'] +['çļ', 'ij'] +['çļ', 'İ'] +['é¸', '»'] +['å¾', 'ľ'] +['èĪ', '¸'] +['èĪ', '»'] +['èĪ', '´'] +['èĪ', '·'] +['é¾', 'Ľ'] +['ç¿', 'İ'] +['èĦ', '¬'] +['èĦ', 'ĺ'] +['èĦ', '²'] +['åĮ', 'IJ'] +['çĮ', 'Ĺ'] +['çĮ', '¡'] +['çĮ', 'ŀ'] +['æĸ', 'Ľ'] +['çĮ', 'ķ'] +['é¦', 'Ĺ'] +['é¦', 'ĥ'] +['é¦', 'Ħ'] +['é¸', '¾'] +['åº', '¹'] +['åº', '¾'] +['çĹ', 'Ķ'] +['çĹ', 'į'] +['ç¿', 'Ĭ'] +['æĹ', 'Į'] +['æĹ', 'İ'] +['è¢', '¤'] +['éĺ', 'ĩ'] +['éĺ', 'Ī'] +['éĺ', 'ī'] +['éĺ', 'Ĭ'] +['éĺ', 'ĭ'] +['éĺ', 'į'] +['éĺ', 'ı'] +['ç¾', 'Ł'] +['ç²', 'Ŀ'] +['çĦ', 'IJ'] +['çĦ', 'ĵ'] +['çĦ', 'Ĺ'] +['æ·', 'ħ'] +['æ·', 'ŀ'] +['æ¸', 'İ'] +['æ¶', '¿'] +['æ·', 'ĸ'] +['æĮ', '²'] +['æ·', 'ł'] +['æ¶', '¸'] +['æ¸', 'ij'] +['æ·', '¦'] +['æ·', 'Ŀ'] +['æ¶', 'ª'] +['æ·', 'Ļ'] +['æ¶', '«'] +['æ¸', 'Į'] +['æĤ', '»'] +['æĤ', '±'] +['æ', 'ĥĿ'] +['æĥ', 'ĺ'] +['æĥ', 'Ĩ'] +['æĥ', 'ļ'] +['æĥ', 'ĩ'] +['æĥ', '®'] +['çª', 'ķ'] +['è°', 'Į'] +['æī', 'Ī'] +['çļ', '²'] +['è°', 'ij'] +['è£', 'Ĩ'] +['è¢', '·'] +['è£', 'ī'] +['è°', 'Ĵ'] +['è°', 'Ķ'] +['è°', 'ķ'] +['è°', 'ĸ'] +['è°', 'Ĺ'] +['è°', 'Ļ'] +['è°', 'Ŀ'] +['éĢ', '¯'] +['éĥ', '¿'] +['éļ', 'Ī'] +['ç²', 'ľ'] +['éļ', 'į'] +['éļ', 'Ĺ'] +['å©', 'Ĭ'] +['å¨', '¼'] +['å©', '¢'] +['å©', 'µ'] +['èĥ', '¬'] +['è¢', 'Ī'] +['ç¿', 'Į'] +['æģ', '¿'] +['æ¬', '¸'] +['ç»', '«'] +['éª', 'IJ'] +['ç»', '¯'] +['ç»', '±'] +['éª', 'Ĵ'] +['ç»', '²'] +['éª', 'ĵ'] +['ç»', '¶'] +['ç»', 'º'] +['ç»', '»'] +['ç»', '¾'] +['éª', 'ĸ'] +['ç¼', 'ģ'] +['èĢ', 'ł'] +['çIJ', '«'] +['çIJ', 'µ'] +['çIJ', '¶'] +['çIJ', '¥'] +['çIJ', '¨'] +['çIJ', '°'] +['çIJ', '®'] +['çIJ', '¯'] +['çIJ', '¬'] +['çIJ', 'ļ'] +['è¾', 'ĩ'] +['é¼', 'ĭ'] +['æı', '³'] +['åł', 'ŀ'] +['æIJ', '½'] +['æı', '¸'] +['æı', 'ł'] +['åł', 'Ļ'] +['è¶', 'Ħ'] +['æı', 'ĸ'] +['é¢', 'ī'] +['å¡', 'Ħ'] +['æı', '¿'] +['èĢ', 'ĭ'] +['æı', 'Ħ'] +['èĽ', '©'] +['èĽ', '°'] +['å¡', 'Ĩ'] +['æij', 'Ĵ'] +['æı', 'Ĩ'] +['æİ', '¾'] +['èģ', 'Ĵ'] +['èij', 'ij'] +['èij', 'ļ'] +['éĿ', '°'] +['éĿ', '¸'] +['èij', '³'] +['èij', 'º'] +['èij', '¸'] +['èIJ', '¼'] +['èij', '¶'] +['è', 'ĴĮ'] +['èij', 'Ń'] +['æ¥', '®'] +['æ', '£¼'] +['æ¤', 'Ł'] +['æ£', '¹'] +['æ¤', '¤'] +['æ£', '°'] +['èµ', 'į'] +['æ¤', 'ĭ'] +['æ¤', 'ģ'] +['æ¤', 'ª'] +['æ¤', 'IJ'] +['é¹', 'ģ'] +['éħ', '¤'] +['éħ', '¢'] +['éħ', '¡'] +['é¹', 'Ĥ'] +['æ®', 'ļ'] +['æ®', 'Ľ'] +['éĽ', '±'] +['è¾', 'ĭ'] +['æ¤', 'ł'] +['è¾', 'İ'] +['çĿ', 'Ħ'] +['çĿ', 'ĩ'] +['çĿ', 'ĥ'] +['æĪ', '¢'] +['åĸ', 'ĭ'] +['åĹ', 'Ĵ'] +['åĸ', 'ĥ'] +['åĸ', '±'] +['åĸ', '¹'] +['æĻ', '·'] +['åĸ', 'Ī'] +['è·', 'ĸ'] +['è·', 'Ĺ'] +['è·', 'ŀ'] +['è·', 'ļ'] +['è·', 'İ'] +['è·', 'ı'] +['è·', 'Ĩ'] +['èĽ', '±'] +['èĽ', '²'] +['èĽ', 'Ń'] +['èĽ', '³'] +['èĽ', 'IJ'] +['èĽ', 'Ķ'] +['èĽ', 'ŀ'] +['èĽ', '´'] +['èĽ', 'ĺ'] +['åĸ', 'ģ'] +['åĸ', 'Ł'] +['åķ', '¾'] +['åĹ', 'ĸ'] +['åĸ', 'ij'] +['åĹ', 'Ł'] +['åĹ', 'ŀ'] +['åĸ', 'Ļ'] +['åµ', 'ĺ'] +['åµ', 'ĸ'] +['å´', '´'] +['éģ', 'Ħ'] +['è©', 'Ī'] +['åµ', 'İ'] +['å', 'µ¬'] +['åµ', 'Ľ'] +['åµ', '¯'] +['åµ', 'Ŀ'] +['åµ', '«'] +['å¹', 'Ħ'] +['åµ', 'ĭ'] +['èµ', 'ķ'] +['éĵ', '»'] +['éĵ', '¼'] +['éĵ', '¿'] +['éĶ', 'ĥ'] +['éĶ', 'Ĩ'] +['éĶ', 'ĩ'] +['éĶ', 'ī'] +['éĶ', 'ı'] +['éĶ', 'ij'] +['éĶ', 'Ĵ'] +['éĶ', 'Ķ'] +['éĶ', 'ķ'] +['æİ', '£'] +['çŁ', '¬'] +['æ°', '°'] +['æ¯', '³'] +['æ¯', '½'] +['çĬ', 'Ĭ'] +['çĬ', 'Ħ'] +['çĬ', 'ĭ'] +['é', '¹Ħ'] +['çĬ', 'į'] +['åµ', 'ĩ'] +['é»', 'į'] +['ç¨', 'ĥ'] +['ç¨', 'Ĥ'] +['çŃ', 'ļ'] +['çŃ', 'µ'] +['çŃ', 'Į'] +['åĤ', '£'] +['åĤ', 'Ī'] +['èĪ', 'Ħ'] +['çī', 'į'] +['åĤ', '¥'] +['åĤ', '§'] +['éģ', 'ij'] +['åĤ', '©'] +['å¾', '¨'] +['åª', 'Ń'] +['çķ', '²'] +['å¼', 'ij'] +['ç¿', 'ķ'] +['é¹', 'Ĩ'] +['èħ', 'Ī'] +['èħ', 'ĵ'] +['èħ', 'Ĩ'] +['èħ', '´'] +['èħ', 'ļ'] +['èħ', '±'] +['é±', '¿'] +['é²', 'Ģ'] +['é²', 'Ĥ'] +['çĮ', '¢'] +['çĮ', '¹'] +['çĮ', '¥'] +['é£', 'ĵ'] +['è§', 'ŀ'] +['è§', 'ļ'] +['çĮ', '±'] +['é¢', 'İ'] +['é£', '§'] +['é¦', 'ĩ'] +['é¦', 'Ĭ'] +['äº', 'µ'] +['èĦ', 'Ķ'] +['è£', 'Ĵ'] +['çĹ', '£'] +['çĹ', '¨'] +['çĹ', '¦'] +['çĹ', 'ŀ'] +['çĹ', '¤'] +['çĹ', '§'] +['èµ', 'ĵ'] +['ç«', '¦'] +['çĵ', '¿'] +['åķ', '»'] +['é¢', 'ı'] +['é¹', 'ĩ'] +['éĺ', 'ij'] +['éĺ', 'Ĵ'] +['éĺ', 'ķ'] +['ç²', 'ŀ'] +['éģ', 'Ĵ'] +['åŃ', '³'] +['çĦ', '¯'] +['çĦ', 'ľ'] +['çĦ', '±'] +['é¹', 'Ī'] +['æ¸', '«'] +['æ¹', '®'] +['æ¹', 'İ'] +['æ¹', 'ľ'] +['æ¹', 'į'] +['æ¹', '«'] +['æº', '²'] +['æ¹', 'Ł'] +['æº', 'Ĩ'] +['æ¹', '²'] +['æ¹', 'Ķ'] +['æ¹', 'ī'] +['æ¸', '¥'] +['æ»', 'ģ'] +['æĦ', 'ł'] +['æĥ', 'º'] +['æĦ', '¦'] +['æĥ', '´'] +['æĦ', 'Ģ'] +['æĦ', 'İ'] +['æĦ', 'Ķ'] +['åĸ', '¾'] +['å¯', 'IJ'] +['è°', 'Ł'] +['è£', '¢'] +['è£', 'İ'] +['è£', '¥'] +['ç¥', '¾'] +['è°', 'ł'] +['è°', '¡'] +['è°', '¥'] +['è°', '§'] +['åŃ', '±'] +['å¼', '¼'] +['å·', '½'] +['éª', 'ĺ'] +['åª', 'ª'] +['å·', '¯'] +['ç¿', 'ļ'] +['çļ', '´'] +['éª', 'Ľ'] +['ç¼', 'Ĥ'] +['ç¼', 'ĥ'] +['ç¼', 'Ħ'] +['å½', 'ĺ'] +['ç¼', 'ĩ'] +['ç¼', 'Ī'] +['ç¼', 'Į'] +['ç¼', 'ij'] +['ç¼', 'Ĵ'] +['ç¼', 'Ĺ'] +['é£', '¨'] +['èĢ', '¢'] +['çij', 'ģ'] +['çij', 'Ĺ'] +['çij', 'Ħ'] +['éģ', '¨'] +['éª', 'ľ'] +['éŁ', '«'] +['é«', '¡'] +['å¡', '¬'] +['éĦ', '¢'] +['è¶', 'Ķ'] +['è¶', 'ij'] +['æij', 'ħ'] +['æij', 'ģ'] +['èľ', 'ĩ'] +['æIJ', 'ĭ'] +['æIJ', 'ª'] +['æIJ', 'IJ'] +['æIJ', 'Ľ'] +['æIJ', 'ł'] +['æij', 'Ī'] +['å½', 'Ģ'] +['æ¯', 'Ĥ'] +['æIJ', '¦'] +['æIJ', '¡'] +['èĵ', 'ģ'] +['æĪ', '¡'] +['è', 'ĵį'] +['éĦ', 'ŀ'] +['èĵ', 'IJ'] +['èĵ', '¦'] +['é¹', 'ĭ'] +['èĴ', '½'] +['èĵ', 'ĸ'] +['èĵ', 'Ĭ'] +['èĴ', '¯'] +['èĵ', 'Ł'] +['èĵ', 'ij'] +['èĴ', 'º'] +['èĵ', 'ł'] +['èĴ', 'Ł'] +['èĴ', '¡'] +['èĴ', '¹'] +['èĴ', '´'] +['èĴ', 'Ĺ'] +['èĵ', '¥'] +['æ¥', 'Ķ'] +['æ¥', 'Ĥ'] +['æ¥', 'Ŀ'] +['æ¥', '«'] +['æ¥', '¸'] +['æ¤', '´'] +['æ§', 'Į'] +['æ¥', '¯'] +['çļ', 'Ļ'] +['æ¦', 'Ī'] +['æ§', 'İ'] +['æ¦', 'ī'] +['æ¥', '¦'] +['æ¥', '£'] +['æ¥', '¹'] +['æ¤', '½'] +['åī', '½'] +['éħ', '©'] +['èľ', 'ĥ'] +['ç¢', 'Ľ'] +['ç¢', 'ĵ'] +['ç¡', '¼'] +['ç¢', 'ī'] +['ç¢', 'ļ'] +['ç¢', 'ĩ'] +['ç¢', 'ľ'] +['é¹', 'Į'] +['è¾', 'ı'] +['é¾', 'ĥ'] +['é¾', 'ħ'] +['è¨', '¾'] +['ç²', '²'] +['çĿ', 'ļ'] +['åĹ', 'ª'] +['éŁ', 'ª'] +['åĹ', '·'] +['åĹ', 'ī'] +['çĿ', '¨'] +['çĿ', '¢'] +['éĽ', 'İ'] +['çĿ', '¥'] +['åĹ', 'ij'] +['åĹ', '«'] +['åĹ', '¬'] +['åĹ', 'Ķ'] +['åĹ', 'Ŀ'] +['æĪ', '¥'] +['åĹ', 'Ħ'] +['çħ', '¦'] +['æļ', 'Ħ'] +['éģ', '¢'] +['æ', 'ļĮ'] +['è·', '¬'] +['è·', '¶'] +['è', '·¸'] +['è·', 'IJ'] +['è·', '£'] +['è·', '¹'] +['èĽ', '¸'] +['èľ', 'Ĭ'] +['èľ', 'į'] +['èľ', 'ī'] +['èľ', '£'] +['çķ', '¹'] +['èĽ', '¹'] +['åĹ', '¥'] +['åĹ', '²'] +['åĹ', '³'] +['åĹ', 'Į'] +['åĹ', 'į'] +['åĹ', 'IJ'] +['åĹ', '¤'] +['åĹ', 'µ'] +['ç½', '¨'] +['åµ', 'Ĭ'] +['åµ', '´'] +['éª', '°'] +['éĶ', 'Ĺ'] +['éĶ', 'Ľ'] +['éĶ', 'ľ'] +['éĶ', 'Ŀ'] +['éĶ', 'ŀ'] +['éĶ', 'Ł'] +['éĶ', '¢'] +['éĶ', '¨'] +['éĶ', '©'] +['éĶ', 'Ń'] +['éĶ', '±'] +['éĽ', 'ī'] +['æ°', '²'] +['çĬ', 'ı'] +['æŃ', 'ĥ'] +['ç¨', 'ŀ'] +['ç¨', 'Ĺ'] +['ç¨', 'Ķ'] +['çŃ', 'ł'] +['çŃ', '¢'] +['çŃ', '®'] +['çŃ', '²'] +['çī', 'Ĵ'] +['æķ', '«'] +['å¾', 'Ń'] +['æĦ', 'Ĩ'] +['èī', 'Ħ'] +['è§', 'İ'] +['æ¯', '¹'] +['è²', 'Ĭ'] +['è²', 'ħ'] +['è²', 'ī'] +['é¢', 'Ķ'] +['èħ', 'ł'] +['èħ', '©'] +['èħ', '¼'] +['èħ', 'Ń'] +['è', 'ħ§'] +['å¡', 'į'] +['åª', 'µ'] +['é²', 'ħ'] +['é²', 'Ĩ'] +['é²', 'ĩ'] +['é²', 'Ī'] +['é²', 'ĭ'] +['é²', 'IJ'] +['èĤ', 'Ħ'] +['é¹', 'IJ'] +['é£', 'ķ'] +['è§', '¥'] +['éģ', 'Ľ'] +['é¦', 'IJ'] +['é¹', 'ij'] +['äº', '¶'] +['çĺ', 'ĥ'] +['çĹ', '±'] +['çĹ', '¼'] +['çĹ', '¿'] +['çĺ', 'IJ'] +['çĺ', 'ģ'] +['çĺ', 'Ĩ'] +['éº', 'Ĥ'] +['æŃ', 'Ĩ'] +['æĹ', 'Ĵ'] +['éĺ', 'ĸ'] +['éĺ', 'Ĺ'] +['ç¾', '§'] +['è±', '¢'] +['ç²', '³'] +['çĮ', '·'] +['çħ', '³'] +['çħ', '¨'] +['çħ', 'ħ'] +['çħ', 'Ĭ'] +['çħ', '¸'] +['çħ', 'º'] +['æ»', 'Ł'] +['æº', '±'] +['æº', 'ĺ'] +['æ¼', 'Ń'] +['æ»', '¢'] +['æº', '¥'] +['æº', '½'] +['è£', 'Ł'] +['æº', '»'] +['æº', '·'] +['æ»', 'Ĺ'] +['æ»', '«'] +['æº', '´'] +['æ»', 'ı'] +['æ»', 'ĥ'] +['æ»', '¦'] +['æº', 'ı'] +['æ»', 'Ĥ'] +['æ»', 'ĵ'] +['æº', 'Ł'] +['æ»', 'ª'] +['æĦ', '«'] +['æħ', 'Ĭ'] +['é²', 'İ'] +['éª', 'ŀ'] +['çª', 'ł'] +['çª', '£'] +['è£', '±'] +['è£', '¨'] +['è£', '¾'] +['è£', '°'] +['ç¦', 'Ĭ'] +['è°', '©'] +['è°', 'ª'] +['åª', '¾'] +['å«', '«'] +['åª', '²'] +['å«', 'Ĵ'] +['å«', 'Ķ'] +['åª', '¸'] +['ç¼', 'Ļ'] +['ç¼', 'ľ'] +['ç¼', 'Ľ'] +['è¾', 'Ķ'] +['éª', 'Ŀ'] +['ç¼', 'Ł'] +['ç¼', '¡'] +['ç¼', '¢'] +['ç¼', '£'] +['éª', 'Ł'] +['èĢ', '¥'] +['çĴ', 'Ī'] +['çij', 'Ń'] +['çį', 'Ĵ'] +['è§', 'ı'] +['æħ', 'Ŀ'] +['å«', 'ł'] +['åı', 'Ĩ'] +['æij', '½'] +['å¢', 'ģ'] +['æĴ', 'Ĥ'] +['æij', 'ŀ'] +['æĴ', 'Ħ'] +['ç¿', '¥'] +['è¸', 'ħ'] +['æij', 'Ń'] +['å¢', 'ī'] +['å¢', 'Ĵ'] +['æ¦', 'ĸ'] +['ç¶', '¦'] +['èĶ', '«'] +['èĶ', '·'] +['éĿ', 'º'] +['éĿ', '¼'] +['éŀ', 'ħ'] +['éĿ', '¿'] +['çĶ', 'į'] +['èĶ', '¸'] +['èĶ', 'Ł'] +['èĶ', 'º'] +['æĪ', '¬'] +['èķ', 'ĸ'] +['èĶ', '»'] +['èĵ', '¿'] +['æĸ', '¡'] +['é¹', 'ķ'] +['èĵ', '¼'] +['æ¦', 'Ľ'] +['æ¦', '§'] +['æ¦', '«'] +['æ¦', 'Ń'] +['æ§', 'Ķ'] +['æ¦', '±'] +['æ§', 'ģ'] +['æ§', 'ł'] +['æ¦', '·'] +['åĥ', '°'] +['éħ', '½'] +['éħ', '¹'] +['ç¢', '¡'] +['ç¢', '´'] +['ç¢', '£'] +['ç¢', '²'] +['èĩ', '§'] +['è±', '¨'] +['æ®', '¡'] +['éľ', 'ģ'] +['èľ', 'ļ'] +['é¾', 'ĩ'] +['é¾', 'Ī'] +['ä', 'ģ'] +['äģ', 'ĸ'] +['çĿ', '½'] +['åĺ', 'ŀ'] +['åĺ', 'Ī'] +['åĺ', 'Į'] +['åĺ', 'ģ'] +['æļ', 'Ŀ'] +['è¸', 'Į'] +['è¸', 'ī'] +['èľ', 'ŀ'] +['èľ', '¥'] +['èľ', '®'] +['èĿ', 'Ī'] +['èľ', '´'] +['èľ', '±'] +['èľ', '©'] +['èľ', '·'] +['èľ', '¿'] +['èŀ', 'Ĥ'] +['èľ', '¢'] +['åĺ', '¡'] +['é¹', 'Ĺ'] +['åĺ', '£'] +['åĺ', '¤'] +['åĺ', 'ļ'] +['åĹ', '¾'] +['åĺ', '§'] +['ç½', '´'] +['ç½', '±'] +['å¹', 'Ķ'] +['å¶', 'Ĥ'] +['å¹', 'Ľ'] +['èµ', 'Ļ'] +['ç½', 'Ĥ'] +['éª', '·'] +['éª', '¶'] +['é¹', 'ĺ'] +['éĶ', '²'] +['éĶ', '´'] +['éĶ', '¶'] +['éĶ', '·'] +['éĶ', '¸'] +['éĶ', 'µ'] +['éķ', 'Ĥ'] +['çĬ', 'Ĵ'] +['ç®', 'IJ'] +['ç®', '¦'] +['ç®', '§'] +['ç®', '¸'] +['ç®', '¬'] +['ç®', 'ħ'] +['ç®', 'ª'] +['ç®', 'ľ'] +['ç®', '¢'] +['ç®', 'ĵ'] +['åĥ', 'ĸ'] +['åĦ', 'Ĩ'] +['åĥ', '³'] +['åĥ', 'Ń'] +['åĬ', 'ģ'] +['åĥ', '®'] +['éŃ', 'ĥ'] +['éŃ', 'Ĩ'] +['çĿ', '¾'] +['èī', 'ĭ'] +['éĦ', '±'] +['èĨ', 'Ī'] +['èĨ', 'ij'] +['é²', 'ij'] +['é²', 'Ķ'] +['é²', 'ļ'] +['é²', 'Ľ'] +['é²', 'Ł'] +['çį', 'IJ'] +['è§', '«'] +['éĽ', 'Ĵ'] +['å¤', '¤'] +['é¦', 'ij'] +['éĬ', '®'] +['å¡', '¾'] +['çĺ', 'Į'] +['çĺ', 'Ĭ'] +['çĺ', 'ĺ'] +['çĺ', 'Ļ'] +['æĹ', 'ĸ'] +['èĨ', 'Ĥ'] +['éĺ', 'ļ'] +['éĦ', '¯'] +['é²', 'ŀ'] +['ç²', '¿'] +['ç²', '¼'] +['ç³', 'ģ'] +['æ§', 'Ĭ'] +['é¹', 'ļ'] +['çĨ', 'ĺ'] +['çĨ', '¥'] +['æ½', '¢'] +['æ¼', 'ķ'] +['æ»', '¹'] +['æ¼', '¯'] +['æ¼', '¶'] +['æ½', 'ĭ'] +['æ½', '´'] +['æ¼', 'ª'] +['æ¼', 'ī'] +['æ¼', '©'] +['æ¾', 'ī'] +['æħ', 'µ'] +['æIJ', '´'] +['çª', '¨'] +['å¯', '¤'] +['ç¶', '®'] +['è°', '®'] +['è¤', '¡'] +['è¤', 'Ļ'] +['è¤', 'ĵ'] +['è¤', 'Ľ'] +['è¤', 'Ĭ'] +['è°', '¯'] +['è°', '°'] +['è°', '²'] +['å±', '£'] +['é¹', 'Ľ'] +['å«', '±'] +['å«', 'ĸ'] +['å«', '¦'] +['å«', 'ļ'] +['å', '«ĺ'] +['é¼', 'IJ'] +['çŀ', 'Ģ'] +['é¹', 'ľ'] +['éª', 'ł'] +['ç¼', '¥'] +['ç¼', '¦'] +['ç¼', '§'] +['ç¼', '¨'] +['éª', '¢'] +['ç¼', '«'] +['èĢ', '¦'] +['èĢ', '§'] +['çĴ', 'ľ'] +['çĴ', 'İ'] +['çĴ', 'ģ'] +['å¥', 'Ń'] +['é«', '¯'] +['é«', '«'] +['æĴ', '·'] +['æĴ', 'ħ'] +['èµ', 'Ń'] +['æĴ', '¸'] +['éĭ', 'Ĩ'] +['æĴ', 'Ļ'] +['æĴ', 'º'] +['å¢', 'Ģ'] +['èģ', '©'] +['è§', 'IJ'] +['éŀ', 'ij'] +['èķ', 'Ļ'] +['éŀ', 'Ĵ'] +['èķ', 'Ī'] +['èķ', '¨'] +['èķ', '¤'] +['èķ', 'ŀ'] +['èķ', 'º'] +['çŀ', '¢'] +['èķ', 'ĥ'] +['èķ', '²'] +['èµ', 'ľ'] +['æ§', '¿'] +['æ¨', '¯'] +['æ§', 'Ń'] +['æ¨', 'Ĺ'] +['æ¨', 'ĺ'] +['æ§', '²'] +['éĨ', 'Į'] +['éĨ', 'ħ'] +['éĿ', '¥'] +['éŃ', 'ĩ'] +['é¤', 'į'] +['ç£', 'Ķ'] +['ç£', 'Ļ'] +['éľ', 'Ī'] +['è¾', 'ĺ'] +['é¾', 'ī'] +['é¾', 'Ĭ'] +['è§', 'ij'] +['çŀ', 'Į'] +['ç', 'ŀĭ'] +['çŀ', 'ij'] +['åĺ', 'Ń'] +['åĻ', 'İ'] +['åĻ', '¶'] +['é¢', 'Ļ'] +['æļ', '¹'] +['åĻ', 'ĺ'] +['è¸', 'Ķ'] +['è¸', 'Ŀ'] +['è¸', 'Ł'] +['è¸', 'Ĵ'] +['è¸', '¬'] +['è¸', '®'] +['è¸', '¯'] +['è¸', 'º'] +['è¸', 'ŀ'] +['èĿ', '½'] +['èĿ', '¾'] +['èĿ', '»'] +['èĿ', '°'] +['èĿ', '®'] +['è', 'ŀĭ'] +['èĿ', 'ĵ'] +['èĿ', '£'] +['è', 'Ŀ¼'] +['åĺ', '¬'] +['é¢', 'ļ'] +['åĻ', 'į'] +['åĻ', 'Ļ'] +['åĻ', 'Į'] +['åĻ', 'Ķ'] +['é¢', 'Ľ'] +['å¹', 'ŀ'] +['å¹', '¡'] +['å¶', 'Ļ'] +['å¶', 'Ŀ'] +['éª', 'º'] +['éķ', 'Ĭ'] +['éķ', 'ī'] +['éķ', 'Į'] +['éķ', 'ı'] +['éķ', 'Ĵ'] +['éķ', 'ĵ'] +['éķ', 'Ķ'] +['ç¨', '·'] +['ç®', '´'] +['ç¯', 'ij'] +['ç¯', 'ģ'] +['ç¯', 'Į'] +['çī', 'ĸ'] +['åĦ', 'ĭ'] +['èĻ', '¢'] +['é¹', 'ŀ'] +['èĨ', 'ĺ'] +['é²', 'ł'] +['é²', '¡'] +['é²', '¢'] +['é²', '£'] +['é²', '¥'] +['é²', '§'] +['é²', '©'] +['çį', 'Ĺ'] +['çį', 'ł'] +['è§', '¯'] +['é¦', 'ĵ'] +['é¦', 'Ķ'] +['éº', '¾'] +['å»', 'Ľ'] +['çĺ', 'Ľ'] +['çĺ', '¼'] +['çĺ', '¢'] +['çĺ', 'ł'] +['é½', 'ij'] +['ç¾', '°'] +['ð¥', '»'] +['ð¥»', 'Ĺ'] +['ç³', 'Į'] +['ç³', 'į'] +['ç³', 'ħ'] +['çĨ', 'ľ'] +['ç', 'Ĩµ'] +['æ¾', 'į'] +['æ¾', 'Į'] +['æ½', '¸'] +['æ½', '¦'] +['æ½', '²'] +['éĭ', 'Ī'] +['æ½', 'Ł'] +['æ½', 'º'] +['å¯', '®'] +['çª', '³'] +['è°', '³'] +['è¤', '´'] +['è¤', 'Ł'] +['è¤', '«'] +['è°', 'µ'] +['çĨ', '¨'] +['å±', '¦'] +['åĭ', '°'] +['æĪ', '®'] +['èĿ', '¥'] +['ç¼', '¬'] +['ç¼', '®'] +['ç¼', '¯'] +['éª', '£'] +['çķ', '¿'] +['èĢ', '©'] +['èĢ', '¨'] +['èĢ', 'ª'] +['çĴ', 'Ł'] +['éĿ', 'Ľ'] +['çĴ', 'ł'] +['çĴ', 'ĺ'] +['èģ', '±'] +['èŀ', '¯'] +['é«', '»'] +['é«', 'Ń'] +['é«', '¹'] +['æĵ', 'Ģ'] +['çĶ', 'ı'] +['æĵ', 'ŀ'] +['ç¸', 'ł'] +['ç£', '¬'] +['é¢', 'ŀ'] +['èķ', '»'] +['é¢', 'Ł'] +['èĸ', '¤'] +['èĸ', '¨'] +['æª', 'ł'] +['èĸ', 'ı'] +['èĸ', '®'] +['èĸ', 'ľ'] +['èĸ', 'ħ'] +['æ¨', '¾'] +['æ©', 'Ľ'] +['æ©', 'ĩ'] +['æ¨', 'µ'] +['æª', 'İ'] +['æ©', '¹'] +['æ¨', '½'] +['æ¨', '¨'] +['æ©', '¼'] +['å¢', '¼'] +['æ©', 'IJ'] +['ç¿', '®'] +['éĨ', 'IJ'] +['éĨ', 'į'] +['éĨ', 'ļ'] +['ç£', '²'] +['èµ', 'Ŀ'] +['æ®', 'ª'] +['éľ', 'ı'] +['éĮ', '¾'] +['è¾', 'ļ'] +['éģ', '½'] +['æ°', 'ħ'] +['çŀ', 'Ł'] +['çŀ', 'ł'] +['çŀ', '°'] +['åļ', 'Ħ'] +['åļ', 'Ĩ'] +['åĻ', '¤'] +['æļ', '¾'] +['è¹', 'Ģ'] +['è¸', 'µ'] +['è¸', '½'] +['è¹', 'ī'] +['è¹', 'ģ'] +['èŀ', '¨'] +['èŀ', 'Ī'] +['èŀ', 'ħ'] +['èŀ', 'Ń'] +['èŀ', 'ł'] +['èŀ', 'Ł'] +['åĻ', '±'] +['åĻ', '«'] +['åĻ', '»'] +['åĻ', '¼'] +['ç½', '¹'] +['åľ', 'ľ'] +['ä', '¦'] +['ä¦', 'ĥ'] +['éķ', 'Ĺ'] +['éķ', 'ĺ'] +['éķ', 'ļ'] +['éķ', 'Ľ'] +['éķ', 'Ŀ'] +['éķ', 'ŀ'] +['éķ', 'ł'] +['æ°', 'ĩ'] +['æ°', 'Ĩ'] +['ç©', 'ij'] +['ç¯', 'Ŀ'] +['ç¯', '¥'] +['ç¯', '¦'] +['ç¯', 'ª'] +['ç¯', 'Ļ'] +['çĽ', '¥'] +['åĬ', 'ĵ'] +['ç¿', '±'] +['éŃ', 'ī'] +['éŃ', 'Ī'] +['å¾', '¼'] +['æŃ', 'Ļ'] +['èĨ', '¦'] +['èĨ', 'Ļ'] +['é²', '®'] +['é²', '±'] +['é²', '³'] +['é²', '´'] +['é²', 'µ'] +['é²', '·'] +['é²', '»'] +['çį', '´'] +['çį', 'Ń'] +['çį', '¬'] +['éĤ', 'Ĥ'] +['é¹', '§'] +['å»', '¨'] +['èµ', 'Ł'] +['çĺ', '°'] +['å»', 'ª'] +['çĺ', '¿'] +['çĺ', 'µ'] +['çĺ', '´'] +['çĻ', 'ĥ'] +['çĺ', '³'] +['éº', 'ĩ'] +['éº', 'Ī'] +['å', '¬´'] +['å£', 'ħ'] +['ç³', 'Ĺ'] +['çĶ', 'ij'] +['çĩ', 'İ'] +['çĩ', 'ł'] +['çĩ', 'Ķ'] +['çĩ', '§'] +['æ¿', 'ij'] +['æ¿', 'ī'] +['æ½', 'ŀ'] +['æ¾', '§'] +['æ¾', '¹'] +['æ¾', '¥'] +['æ¾', '¶'] +['æ¿', 'Ĥ'] +['è¤', '°'] +['çª', '¸'] +['å¬', 'ĸ'] +['çĬ', 'Ł'] +['éļ', '°'] +['å¬', 'Ĺ'] +['é¢', '¡'] +['ç¼', '±'] +['ç¼', '²'] +['ç¼', '³'] +['çĴ', '©'] +['çĴ', 'ª'] +['èŀ', '«'] +['æĵ', '¤'] +['å£', 'ķ'] +['è§', '³'] +['ç½', 'Ħ'] +['æĵ', '¢'] +['èĸ', '¹'] +['éŀ', '¡'] +['éŀ', '¬'] +['èĸ', '·'] +['èĹ', 'ĵ'] +['èĹ', 'ģ'] +['æª', 'Ħ'] +['æª', '©'] +['æĩ', 'ĭ'] +['éĨ', '¢'] +['ç¿', '³'] +['ç¤', 'ħ'] +['ç£', '´'] +['é¹', '©'] +['é¾', 'ĭ'] +['é¾', 'Į'] +['è±', '³'] +['å£', 'ij'] +['é»', '»'] +['åļ', 'ı'] +['åļ', 'ħ'] +['è¹', 'ij'] +['è¹', 'Ĵ'] +['è¹', 'Ĭ'] +['è', 'Ł¥'] +['èŀ', '¬'] +['èŀ', 'µ'] +['çĸ', 'ĥ'] +['èŀ', '³'] +['èŁ', 'ij'] +['åļ', 'ĵ'] +['ç½', '½'] +['ç½', '¾'] +['å¶', '·'] +['é»', 'ľ'] +['é»', 'Ŀ'] +['é«', 'ģ'] +['é«', 'Ģ'] +['éķ', '¡'] +['éķ', '¢'] +['éķ', '£'] +['éķ', '¦'] +['éķ', '§'] +['éķ', '©'] +['éķ', 'ª'] +['éķ', '«'] +['ç½', 'ħ'] +['ç°', 'Į'] +['ç¯', '¾'] +['ç¯', '¼'] +['ç°', 'ĸ'] +['ç°', 'ĭ'] +['é¼', '¢'] +['åĦ', '¡'] +['é¹', 'ª'] +['é¼', '¾'] +['çļ', '¤'] +['éŃ', 'į'] +['é¾', 'ł'] +['ç¹', 'ĩ'] +['è²', 'ĺ'] +['éĤ', 'Ī'] +['è²', 'Ķ'] +['èĩ', 'Į'] +['èĨ', '»'] +['èĩ', 'Ĩ'] +['èĩ', 'ĥ'] +['é²', '¼'] +['é²', '½'] +['é³', 'Ģ'] +['é³', 'ĥ'] +['é³', 'ħ'] +['é³', 'ĩ'] +['é³', 'Ĭ'] +['èŀ', '½'] +['çĩ', '®'] +['é¹', '«'] +['ç³', 'ľ'] +['ç¸', '»'] +['çĻ', 'į'] +['éº', 'ĭ'] +['æĩ', 'ij'] +['æ¿', '¡'] +['æ¿', '®'] +['æ¿', 'ŀ'] +['æ¿', 'ł'] +['æ¿', '¯'] +['è¹', 'ĩ'] +['è¬', 'ĩ'] +['éĤ', 'ĥ'] +['è¥', 'ģ'] +['æª', 'Ĺ'] +['æ', 'ĵĺ'] +['åŃ', 'º'] +['éļ', '³'] +['å¬', '·'] +['èŁ', 'Ĭ'] +['é¹', '¬'] +['éį', 'ª'] +['éı', 'Ĭ'] +['é¬', 'Ī'] +['é¬', 'ĥ'] +['çŀ', '½'] +['éŀ', '¯'] +['éŀ', '¨'] +['éŀ', '«'] +['éŀ', '§'] +['éŀ', '£'] +['èĹ', 'ľ'] +['èĹ', 'ł'] +['éĨ', 'ª'] +['è¹', 'Ļ'] +['ç¤', 'ĵ'] +['çĩ', '¹'] +['é¤', '®'] +['çŀ', '¿'] +['æĽ', 'Ľ'] +['é¢', '¢'] +['èº', 'ĩ'] +['è¹', 'ļ'] +['èŁ', 'Ľ'] +['èŁ', 'ª'] +['èŁ', 'ł'] +['èŁ', '®'] +['é¹', '®'] +['é»', 'ł'] +['é»', 'Ł'] +['é«', 'ħ'] +['é«', 'Ĥ'] +['éķ', '¬'] +['éķ', 'Ń'] +['éķ', '¯'] +['é¦', '¥'] +['ç°', 'Ł'] +['ç°', 'ª'] +['é¼', '¬'] +['éĽ', 'ł'] +['èī', 'Ł'] +['é³', 'İ'] +['é³', 'ı'] +['é³', 'IJ'] +['çĻ', 'ŀ'] +['çĻ', 'Ķ'] +['ç³', '¨'] +['è¹', '©'] +['éİ', 'ı'] +['éĤ', 'ĭ'] +['é¬', 'ı'] +['æĶ', 'ī'] +['éŀ', '²'] +['éŀ', '´'] +['èĹ', '¿'] +['èĺ', '§'] +['èĺ', 'ħ'] +['éĨ', '®'] +['éĨ', '¯'] +['éħ', 'ĥ'] +['éľ', 'ª'] +['éľ', 'Ń'] +['éľ', '¨'] +['é»', '¼'] +['åļ', '¯'] +['è¹', '°'] +['è¹', '¶'] +['è¹', '½'] +['è¹', '¼'] +['è¹', '´'] +['è¹', '¾'] +['è¹', '¿'] +['èł', 'ĸ'] +['èł', 'ĵ'] +['èŁ', '¾'] +['èł', 'Ĭ'] +['é»', '¢'] +['é«', 'ĭ'] +['é«', 'Į'] +['éķ', '²'] +['ç±', 'Ģ'] +['é½', 'ģ'] +['éŃ', 'ij'] +['èī', '¨'] +['é³', 'ĵ'] +['é³', 'Ķ'] +['é³', 'ķ'] +['é³', 'Ĺ'] +['é³', 'Ļ'] +['éı', 'ĸ'] +['ç¾', '¸'] +['ã¸', 'Ĩ'] +['çĢ', '£'] +['çĢ', 'Ľ'] +['è¥', '¦'] +['è°', '¶'] +['è¥', 'ŀ'] +['éª', '¥'] +['ç¼', 'µ'] +['çĵ', 'Ĵ'] +['æĶ', 'ĺ'] +['èĺ', '©'] +['èĺ', 'ĸ'] +['éĨ', '´'] +['éľ', '°'] +['éħ', 'Ĩ'] +['çŁ', 'į'] +['èº', 'ħ'] +['é¼', 'į'] +['å·', 'ī'] +['é»', '©'] +['é»', '¥'] +['é»', 'ª'] +['éķ', '³'] +['éķ', '´'] +['é»', '§'] +['çº', 'Ĥ'] +['çĴ', 'º'] +['é¼', '¯'] +['èĩ', 'ľ'] +['é³', 'ľ'] +['é³', 'Ŀ'] +['é³', 'Ł'] +['çį', '¾'] +['åŃ', 'Ģ'] +['éª', '§'] +['ç', 'ĵĺ'] +['é¼', 'Ļ'] +['éĨ', 'º'] +['ç¤', '´'] +['é¢', '¦'] +['æĽ', '©'] +['é³', '¢'] +['éº', 'Ŀ'] +['å¤', 'Ķ'] +['çĪ', 'Ŀ'] +['çģ', 'ı'] +['ç¦', '³'] +['éIJ', '¾'] +['ç¾', '¼'] +['èł', '¡'] +['èĢ', '±'] +['é¹', '³'] +['æ°', 'į'] +['é¥', 'ķ'] +['èº', 'IJ'] +['é«', 'ij'] +['éķ', 'µ'] +['ç©', '°'] +['é¥', 'Ķ'] +['é¬', '»'] +['é¬', 'Ł'] +['è¶', '±'] +['æĶ', '«'] +['æĶ', '¥'] +['é¢', '§'] +['èº', 'ľ'] +['é¼', '¹'] +['çĻ', '¯'] +['èł', '²'] +['èł', '¹'] +['èº', 'ŀ'] +['è¡', '¢'] +['çģ', 'ŀ'] +['è¥', '»'] +['çº', 'Ľ'] +['é¬', '£'] +['æĶ', '®'] +['åĽ', 'Ķ'] +['é¦', 'ķ'] +['æĪ', 'Ĩ'] +['çĪ', '¨'] +['é½', 'ī'] +['äº', 'į'] +['å°', '¢'] +['å½', '³'] +['åį', '¬'] +['æ®', '³'] +['ðł', '϶'] +['æ¯', 'Į'] +['éĤ', 'ĺ'] +['æĪ', 'ĭ'] +['åľ', '¢'] +['æ°', 'ķ'] +['ä¼', 'ĭ'] +['ä»', 'Ŀ'] +['åĨ', '®'] +['æ°', '¿'] +['æ±', 'Ī'] +['æ°', '¾'] +['å¿', 'ī'] +['å®', 'Ħ'] +['ð¬£', 'Ļ'] +['è®', '±'] +['æī', 'ŀ'] +['åľ', '²'] +['åľ', '«'] +['èĬ', 'ı'] +['èĬ', 'ĥ'] +['æľ', '³'] +['æľ', '¸'] +['ð¨', 'Ļ'] +['ð¨Ļ', '¸'] +['éĤ', '¨'] +['åIJ', 'Ĵ'] +['åIJ', 'ĸ'] +['å±', '¼'] +['å±', '¾'] +['è¾', '¿'] +['éĴ', 'Ĩ'] +['ä»', '³'] +['ä¼', '£'] +['ä¼', 'Ī'] +['çĻ', '¿'] +['çĶ', 'ª'] +['éĤ', 'ł'] +['çĬ', '´'] +['åĨ', '±'] +['éĤ', '¡'] +['ð¬ĩ', 'ķ'] +['æ±', 'ĭ'] +['ä', 'ľ'] +['äľ', '£'] +['è®', '»'] +['ð¬£', 'ŀ'] +['åŃ', 'ĸ'] +['ð¬ĺ', 'ĵ'] +['çº', '©'] +['çİ', 'Ĵ'] +['çİ', 'ĵ'] +['çİ', 'ĺ'] +['çİ', 'ļ'] +['åĪ', '¬'] +['ð«Ń', 'Ł'] +['åĿ', 'ľ'] +['åĿ', 'ī'] +['æī', '½'] +['ð«Ń', '¢'] +['åĿ', 'ĭ'] +['æī', 'º'] +['ã§', 'ij'] +['æ¯', 'IJ'] +['èĬ', '°'] +['èĬ', '£'] +['èĭ', 'Ĭ'] +['èĭ', 'ī'] +['èĬ', 'ĺ'] +['èĬ', '´'] +['èĬ', 'ł'] +['ð«', 'ĩ'] +['ð«ĩ', 'Ń'] +['èĬ', '¤'] +['æĿ', 'ķ'] +['æĿ', 'Ļ'] +['æĿ', 'Ħ'] +['æĿ', '§'] +['æĿ', '©'] +['å°', 'ª'] +['å°', '¨'] +['è½', 'ª'] +['ð«IJ', 'Ħ'] +['åĿ', 'Ĵ'] +['èĬ', 'Ī'] +['æĹ', '´'] +['æĹ', 'µ'] +['åij', 'Ļ'] +['ã', 'ķ'] +['ãķ', '®'] +['å²', 'į'] +['ð«', 'µ'] +['ð«µ', '·'] +['å²', 'ł'] +['å²', 'ľ'] +['åij', 'ĩ'] +['åĨ', 'ı'] +['è§', 'ĥ'] +['å²', 'Ļ'] +['ä¼', '¾'] +['ãij', 'ĩ'] +['ä¼', 'Ń'] +['ä½', 'ĸ'] +['ä¼', '²'] +['ä½', 'ģ'] +['é£', 'ı'] +['çĭ', 'ĥ'] +['éĹ', '¶'] +['æ±', '§'] +['æ±', '«'] +['ð£²', 'ĺ'] +['ð£²', 'Ĺ'] +['æ²', 'Ħ'] +['æ²', 'ĺ'] +['ð¬ĩ', 'Ļ'] +['æ±', 'Ń'] +['ã³', 'ĩ'] +['æ²', 'ĩ'] +['å¿', '®'] +['å¿', '³'] +['å¿', 'º'] +['ð¬£', '¡'] +['ç¥', 'ĥ'] +['è¯', 'ĩ'] +['éĤ', '²'] +['è¯', 'İ'] +['è¯', 'IJ'] +['å±', 'ĥ'] +['ð«', '¸'] +['ð«¸', '©'] +['å²', 'Ĭ'] +['éĺ', '½'] +['ä¢', 'º'] +['éĺ', '¼'] +['å¦', '§'] +['å¦', 'ĺ'] +['ð¨', 'ļ'] +['ð¨ļ', 'ķ'] +['çº', '®'] +['é©', '²'] +['ð«ĺ', 'ľ'] +['çº', '»'] +['ð¬ĺ', 'ĺ'] +['ð«ĺ', 'Ŀ'] +['çº', '¼'] +['çİ', '¤'] +['çİ', 'ŀ'] +['çİ', '±'] +['çİ', 'Ł'] +['éĤ', '½'] +['éĤ', '¿'] +['åĿ', '¥'] +['åĿ', '°'] +['åĿ', '¬'] +['åĿ', '½'] +['å¼', 'Ĩ'] +['èĢ', 'µ'] +['ä¢', '¼'] +['ð¦', 'Ń'] +['ð¦Ń', 'ľ'] +['èĮ', 'ĭ'] +['èĭ', '§'] +['èĭ', '¾'] +['èĭ', 'ł'] +['æŀ', 'ħ'] +['ãŃ', 'İ'] +['æŀ', 'ĺ'] +['æŀ', 'į'] +['çŁ', '¼'] +['çŁ', '»'] +['åĮ', '¼'] +['ð¬¨', 'Ĥ'] +['ð¬Ģ', '©'] +['ð¬Ģ', 'ª'] +['æĹ', '¿'] +['æĺ', 'Ħ'] +['æĺ', 'Ĵ'] +['æĺ', 'Ī'] +['åĴ', 'ī'] +['åĴ', 'ĩ'] +['åĴ', 'į'] +['å²', 'µ'] +['å²', '½'] +['å²', '¨'] +['å²', 'ŀ'] +['å³', 'Ĥ'] +['ã', 'Ł'] +['ãŁ', 'ĥ'] +['åĽ', '·'] +['ð¬¬', '©'] +['éĴ', 'IJ'] +['éĴ', 'Ķ'] +['éĴ', 'ĸ'] +['çī', '¥'] +['ä½', '´'] +['åŀ', 'Ī'] +['ä¾', 'ģ'] +['ä¾', '¹'] +['ä½', '¸'] +['ä½', 'º'] +['éļ', '¹'] +['ãij', 'Ĭ'] +['ä¾', 'Ĥ'] +['ä½', '½'] +['ä¾', 'ĺ'] +['éĥ', 'Ī'] +['èĪ', 'ł'] +['éĥ', 'IJ'] +['éĥ', 'ĥ'] +['æĶ', '½'] +['èĤ', 'Ń'] +['èĤ', '¸'] +['èĤ', '·'] +['çĭ', 'ī'] +['çĭ', 'Ŀ'] +['é¥', '³'] +['å¿', 'ŀ'] +['çĤ', 'Į'] +['çĤ', 'Ĩ'] +['æ³', 'Ļ'] +['æ²', 'º'] +['æ³', 'Ĥ'] +['æ³', 'ľ'] +['æ³', 'ĥ'] +['æ³', 'ĩ'] +['æĢ', 'Ĭ'] +['å³', 'ĥ'] +['ç©', '¸'] +['ç¥', 'ĭ'] +['ç¥', 'Ĭ'] +['ð«į', '£'] +['ð¬£', '³'] +['ð¬', '©½'] +['é¸', '¤'] +['å¼', '¢'] +['å¼', '¨'] +['éĻ', 'ij'] +['ð¬®', '¿'] +['éĻ', 'İ'] +['ð¬¯', 'Ģ'] +['åį', 'º'] +['ä¹', '¸'] +['å¦', 'Ń'] +['å§', 'Ī'] +['ð«', '°'] +['ð«°', 'Ľ'] +['è¿', '³'] +['åı', 'ķ'] +['ð¬³', 'µ'] +['é©', 'µ'] +['ð¬³', '¶'] +['ä', 'Į'] +['äĮ', '¹'] +['é©', 'º'] +['ð«ł', 'Ĭ'] +['ç»', 'ĭ'] +['ç»', 'IJ'] +['çł', 'ī'] +['èĢ', 'Ķ'] +['ãĽ', 'ĥ'] +['çİ', '¶'] +['çı', 'ĩ'] +['çı', 'ħ'] +['ð¬į', 'Ľ'] +['çı', 'ĭ'] +['çİ', '¹'] +['çı', 'Į'] +['çİ', '¿'] +['éŁ', '¨'] +['åŀ', 'ļ'] +['åŀ', '¯'] +['åŀ', 'Ļ'] +['åŀ', '²'] +['åŁ', 'ı'] +['åŀ', 'į'] +['èĢ', 'ĩ'] +['é¿', 'į'] +['åŀ', 'İ'] +['åŀ', '´'] +['åŀ', 'Ł'] +['åŀ', 'ŀ'] +['æĮ', 'ĵ'] +['åŀ', 'µ'] +['åŀ', 'ı'] +['æĭ', '¶'] +['èį', 'ĸ'] +['èį', 'ģ'] +['èį', 'Ļ'] +['èį', 'Ľ'] +['èĮ', 'Ī'] +['èĮ', '½'] +['èį', 'Ħ'] +['èĮ', 'º'] +['ð¬ľ', '¬'] +['èį', 'ĵ'] +['èĮ', '³'] +['ð¦', '°'] +['ð¦°', '¡'] +['èĮ', 'Ľ'] +['èį', 'Ń'] +['ãŃ', 'ķ'] +['æŁ', '·'] +['æŁ', 'ĥ'] +['æŁ', 'Ĭ'] +['æŀ', '¹'] +['æł', 'IJ'] +['æŁ', 'ĸ'] +['éĥ', 'ļ'] +['åī', 'ħ'] +['ä´', 'ĵ'] +['è¿', 'º'] +['åİ', 'ĸ'] +['çł', 'Ĩ'] +['çł', 'ij'] +['çł', 'Ħ'] +['èĢ', 'ı'] +['å¥', 'ĵ'] +['ä', '¶'] +['ä¶', '®'] +['è½', 'µ'] +['è½', '·'] +['è½', '¹'] +['è½', 'º'] +['æĺ', 'º'] +['ðª', '¾'] +['ðª¾', '¢'] +['æĺ', '½'] +['çĽ', '·'] +['åĴ', '¡'] +['åĴ', 'º'] +['æĺ', '³'] +['æĺ', '£'] +['æĺ', '¤'] +['æĺ', '«'] +['æĺ', '¡'] +['åĴ', '¥'] +['æĺ', 'ª'] +['èĻ', '·'] +['èĻ', '¸'] +['åĵ', 'ĥ'] +['å³', 'ĺ'] +['èĢ', 'ij'] +['å³', 'Ľ'] +['ðª¨', '°'] +['å³', 'Ĺ'] +['å³', '§'] +['å¸', '¡'] +['éĴ', 'ĺ'] +['ð«ĵ', '§'] +['éĴ', 'ľ'] +['ð¬¬', '®'] +['ð¬¬', '±'] +['ð¬¬', 'Ń'] +['éĴ', 'ª'] +['éĴ', '¬'] +['éĴ', 'Ń'] +['çŁ', '§'] +['ç§', '¬'] +['ä¿', '«'] +['èĪ', 'ģ'] +['ä¿', 'ľ'] +['ä¿', 'Ļ'] +['ä¿', 'į'] +['åŀ', 'ķ'] +['è¡', 'İ'] +['èĪ', '£'] +['å¼', 'ĩ'] +['ä¾', '´'] +['é¸', '§'] +['äı', '¡'] +['èĥ', 'ł'] +['ð¦', '϶'] +['èĥ', 'Ī'] +['èĥ', '©'] +['èĥ', '£'] +['æľ', 'ı'] +['é£', 'IJ'] +['è¨', 'Ħ'] +['é¥', '»'] +['åº', '¤'] +['çĸ', '¢'] +['çĤ', '£'] +['çĤ', 'Ł'] +['ã', '¶'] +['ã¶', '²'] +['æ´', 'Ń'] +['æ´', 'ĺ'] +['æ´', 'ĵ'] +['æ´', '¿'] +['ã³', 'ļ'] +['æ³', 'ļ'] +['æµ', 'Ī'] +['æµ', 'ī'] +['æ´', '¸'] +['æ´', 'ij'] +['æ´', '¢'] +['æ´', 'Ī'] +['æ´', 'ļ'] +['æ´', 'º'] +['æ´', '¨'] +['æµ', 'IJ'] +['ã³', 'ĺ'] +['æ´', '´'] +['æ´', '£'] +['æģ', 'Ķ'] +['å®', '¬'] +['çª', 'Ģ'] +['æī', 'Ĥ'] +['è¢', 'Ĩ'] +['ç¥', 'ı'] +['ç¥', 'IJ'] +['ç¥', 'ķ'] +['åı', 'ļ'] +['éĻ', '§'] +['éĻ', 'ŀ'] +['å¨', 'Ģ'] +['å§', 'ŀ'] +['å§', '±'] +['å§', '¤'] +['å§', '¶'] +['å§', '½'] +['æŀ', '²'] +['ç»', 'ĸ'] +['éª', 'ĥ'] +['ð¬ĺ', '¡'] +['ð¬³', '½'] +['ð¬ĺ', '©'] +['ð«Ħ', '§'] +['å½', 'ĸ'] +['éª', 'ī'] +['æģ', 'Ŀ'] +['çı', 'ª'] +['çı', 'Ľ'] +['çı', '¹'] +['çIJ', 'Ĭ'] +['çİ', '¼'] +['çı', 'ĸ'] +['ðª', 'Ł'] +['ðªŁ', 'Ŀ'] +['çı', '½'] +['çı', '¦'] +['çı', '«'] +['çı', 'Ĵ'] +['ð¬į', '¤'] +['çı', '¢'] +['çı', 'ķ'] +['çı', 'Ŀ'] +['ð«Ń', '¼'] +['åŁ', 'Ĺ'] +['åŀ', '¾'] +['åŀ', 'º'] +['åŁ', 'Ĩ'] +['åŀ', '¿'] +['åŁ', 'Į'] +['åŁ', 'ĩ'] +['èİ', '°'] +['èĮ', 'Ŀ'] +['ð¬ľ', '¯'] +['éĦ', 'Ģ'] +['èİ', '¶'] +['èİ', 'Ŀ'] +['äĵ', 'ĸ'] +['èİ', 'Ļ'] +['æł', '»'] +['æ¡', 'ł'] +['ð¬', 'Ĥ'] +['ð¬Ĥ', '©'] +['æ¡', 'Ħ'] +['æ¢', 'ł'] +['æł', '´'] +['æ¢', '´'] +['æł', 'Ĵ'] +['éħ', 'İ'] +['éħ', 'ı'] +['ð«ł', 'Ĩ'] +['çł', 'µ'] +['çł', 'ł'] +['çł', '«'] +['çł', '¬'] +['ç¡', 'ģ'] +['æģ', '§'] +['ç¿', 'ĥ'] +['éĥ', 'ª'] +['ð¨', 'IJ'] +['ð¨IJ', 'Ī'] +['è¾', 'Ģ'] +['è¾', 'ģ'] +['ð¬', 'Į'] +['ð¬Į', 'Ĺ'] +['åī', 'ķ'] +['èµ', 'Ģ'] +['åĵ', '¢'] +['æĻ', 'ħ'] +['æĻ', 'Ĭ'] +['åĶ', 'Ŀ'] +['åĵ', '³'] +['åĵ', '±'] +['åĨ', 'Ķ'] +['æĻ', 'Ķ'] +['æĻ', 'IJ'] +['çķ', 'ĸ'] +['èļ', 'Ħ'] +['èļ', 'Ĩ'] +['ð«', 'ij'] +['ð«ij', '¡'] +['å¸', '±'] +['å´', 'ģ'] +['å³', '¿'] +['ðª¨', '¶'] +['å´', 'Ħ'] +['å¸', '¨'] +['å', '´Ģ'] +['èµ', 'Ĩ'] +['ð¬', '¬¸'] +['éĴ', '·'] +['ð¬¬', '»'] +['ð¬¬', '¹'] +['ð¬¬', '¿'] +['ð¬Ń', 'ģ'] +['çľ', 'ļ'] +['çĶ', '¡'] +['ç¬', '«'] +['åĢ', '»'] +['åĢ', '´'] +['èĦ', '©'] +['åĢ', '®'] +['åĢ', 'ķ'] +['åĢ', 'ŀ'] +['ð«', '¢'] +['ð«¢', '¸'] +['åĢ', 'ĵ'] +['åĢ', '§'] +['è¡', 'ĥ'] +['èĻ', 'Ĵ'] +['èĪ', 'Ń'] +['èĪ', '¯'] +['èĪ', '¥'] +['çĵ', 'ŀ'] +['é¬', '¯'] +['é¸', '°'] +['èĦ', 'İ'] +['æľ', 'ĵ'] +['èĥ', '²'] +['èĻ', 'ĵ'] +['é±', '½'] +['çĭ', '´'] +['å³', '±'] +['çĭ', '»'] +['çľ', '¢'] +['ð«Ĺ', '§'] +['åĭ', 'į'] +['çĹ', 'Ħ'] +['çĸ', '°'] +['çĹ', 'ĥ'] +['ç«', 'ĺ'] +['ç¾', 'ĸ'] +['ç¾', 'ĵ'] +['æ¡', 'Ĭ'] +['æķ', 'ī'] +['çĥ', 'ł'] +['çĥ', 'Ķ'] +['çĥ', '¶'] +['çĥ', '»'] +['ð¬Ĭ', 'Ī'] +['æ¶', 'į'] +['æµ', '¡'] +['æµ', 'Ń'] +['æµ', '¬'] +['æ¶', 'Ħ'] +['æ¶', '¢'] +['æ¶', 'IJ'] +['æµ', '°'] +['æµ', 'Ł'] +['æµ', 'Ľ'] +['æµ', '¼'] +['æµ', '²'] +['æ¶', 'ĺ'] +['æĤ', 'Ī'] +['æĤ', 'ĥ'] +['æĤ', '¢'] +['ð¬Ĵ', 'Ī'] +['å®', '§'] +['çª', 'ħ'] +['çª', 'Ĭ'] +['çª', 'İ'] +['æī', 'ħ'] +['æī', 'Ĩ'] +['è¢', 'ª'] +['è¢', 'Ĺ'] +['è¢', '¯'] +['ç¥', '§'] +['éļ', 'º'] +['åł', '²'] +['çĸ', 'į'] +['ð¨', 'º'] +['ð¨º', 'Ļ'] +['éĻ', '´'] +['ç', 'ĥĿ'] +['çł', '®'] +['ãĽ', 'ļ'] +['åĵ', '¿'] +['ç¿', 'Ģ'] +['ç¿', 'Ĥ'] +['åī', 'Ł'] +['ð¬³', '¿'] +['ð«Ħ', '¨'] +['ç»', '¤'] +['éª', 'į'] +['ð¬ĺ', '«'] +['ä', 'Ĥ'] +['äĤ', '®'] +['çIJ', 'İ'] +['çı', '¸'] +['çı', 'µ'] +['çIJ', 'Ħ'] +['çIJ', 'Ī'] +['çIJ', 'Ģ'] +['çı', 'º'] +['æİ', 'Ń'] +['åł', 'İ'] +['åł', 'IJ'] +['åŁ', '¼'] +['æİ', 'İ'] +['åŁ', '«'] +['åł', 'Į'] +['æĻ', '¢'] +['ð«', '®'] +['ð«®', 'ĥ'] +['æİ', 'ŀ'] +['åŁ', 'ª'] +['å£', '¸'] +['ãĻ', 'į'] +['èģ', 'į'] +['èı', 'Ŀ'] +['èIJ', 'ļ'] +['èı', '¥'] +['èİ', '¿'] +['äĵ', '«'] +['åĭ', 'ļ'] +['äĵ', '¬'] +['èIJ', 'Ĩ'] +['èı', 'Ĥ'] +['èı', 'į'] +['èı', '¼'] +['èIJ', '£'] +['äĵ', '¨'] +['èı', 'ī'] +['äĵ', 'Ľ'] +['æ¢', '¼'] +['æ¢', '½'] +['æ¡', '²'] +['æ¢', '¾'] +['æ¡', '¯'] +['æ¢', '£'] +['æ¢', 'Į'] +['æ¡', '¹'] +['æķ', 'Ķ'] +['åİ', '£'] +['ç¡', 'Ķ'] +['é¿', 'İ'] +['ç¡', 'Ļ'] +['ç¡', 'ļ'] +['ç¡', 'Ĭ'] +['ç¡', 'į'] +['åĭ', 'Ķ'] +['ä´', 'ķ'] +['é¾', 'ģ'] +['éĢ', '´'] +['åĶ', 'ª'] +['åķ', '«'] +['ç¿', 'Ī'] +['ã', '«'] +['ã«', '°'] +['æĻ', 'Ļ'] +['çķ', '¤'] +['ð¬±', 'ĸ'] +['è¶', '¼'] +['è·', 'Ĥ'] +['èĽ', 'ĥ'] +['èļ', '²'] +['ð¬Ł', '½'] +['èļ', 'º'] +['åķ', '´'] +['äİ', 'ĥ'] +['å´', '§'] +['å´', 'Ł'] +['å´', 'ŀ'] +['å´', 'Ĵ'] +['å´', 'Į'] +['å´', '¡'] +['éĵ', 'ı'] +['ð«ĵ', '¯'] +['ð«Ł', '¹'] +['éĵ', 'ķ'] +['ð«Ł', '¼'] +['éĵ', 'ĸ'] +['éĵ', 'ĺ'] +['éĵ', 'ļ'] +['éĵ', 'ŀ'] +['éĵ', '¥'] +['éĵ', '´'] +['çī', '»'] +['çī', '¿'] +['ç¨', 'Ĩ'] +['ç¬', '±'] +['ç¬', '¯'] +['åģ', '°'] +['åģ', '¡'] +['é¸', 'º'] +['åģ', 'Ń'] +['åģ', '²'] +['åģ', 'ģ'] +['ã', '¿'] +['ã¿', 'ł'] +['éĦ', 'ħ'] +['åģ', 'ĵ'] +['å¾', 'Ľ'] +['è¡', 'Ĵ'] +['èĪ', '³'] +['èĪ', '²'] +['é¸', '¼'] +['æĤ', 'Ĩ'] +['éĦ', 'ĥ'] +['çĵ', '»'] +['ä', 'Ŀ'] +['äĿ', 'Ļ'] +['èĦ', '¶'] +['èĦ', 'ŀ'] +['èĦ', 'Ł'] +['äı', '²'] +['é±', '¾'] +['çĮ', 'ĩ'] +['çĮ', 'Ĭ'] +['çĮ', 'Ħ'] +['è§', 'ĸ'] +['ðł', 'ħ'] +['ðłħ', '¤'] +['åº', '±'] +['åº', '¼'] +['åº', '³'] +['çĹ', 'ĵ'] +['ä´', 'Ķ'] +['ç«', '«'] +['åł', 'ĥ'] +['éĺ', 'Į'] +['ç¾', 'Ŀ'] +['ç¾', 'ķ'] +['çĦ', 'Ĩ'] +['çĥ', 'º'] +['çĦ', 'Į'] +['æ·', 'ı'] +['ð¬ĩ', '¹'] +['æ·', 'Ł'] +['æ·', 'ľ'] +['æ·', '´'] +['æ·', '¯'] +['æ¹', '´'] +['æ¶', '´'] +['ð¬į', '¡'] +['ã', '¥'] +['ã¥', 'Ħ'] +['æĥ', 'Ľ'] +['æĥ', 'Ķ'] +['æĤ', '°'] +['æĥ', 'Ļ'] +['å¯', 'ģ'] +['éĢ', 'Ń'] +['ð¬¤', 'ĩ'] +['ð«į', '¯'] +['è¢', '¼'] +['è£', 'Ī'] +['ç¥', '²'] +['ð¬¤', 'Ĭ'] +['ð«į', '²'] +['è°', 'ŀ'] +['èī', '´'] +['å¼', '¸'] +['å¼', '¶'] +['ð¬¯', 'İ'] +['éļ', 'ĥ'] +['å©', 'ŀ'] +['å¨', 'µ'] +['å©', '¼'] +['åª', 'ĸ'] +['å©', '³'] +['å©', 'į'] +['å©', 'Į'] +['å©', '«'] +['å©', '¤'] +['å©', 'ĺ'] +['å©', 'ł'] +['ð¬ĺ', '¬'] +['ð¬ĺ', 'Ń'] +['ð¬´', 'Ĥ'] +['ð«ĺ', '¦'] +['ç»', '¹'] +['ð«Ł', 'ħ'] +['ð¬ĺ', '¯'] +['éª', 'ķ'] +['ð«ĺ', '§'] +['çµ', 'ľ'] +['çı', '·'] +['çIJ', '²'] +['çIJ', '¡'] +['çIJ', 'Ł'] +['çIJ', 'Ķ'] +['çIJ', 'Ń'] +['åł', '¾'] +['åł', '¼'] +['æı', 'ķ'] +['ãĻ', 'ĺ'] +['åł', '§'] +['åĸ', 'Ĩ'] +['åł', '¨'] +['å¡', 'ħ'] +['åł', 'ł'] +['çµ', '·'] +['ðª', '£'] +['ðª£', '»'] +['ð¡', 'İ'] +['ð¡İ', 'ļ'] +['è', 'ijľ'] +['æĥ', 'İ'] +['èIJ', '³'] +['èij', 'Ļ'] +['éĿ', '¬'] +['èij', '´'] +['èĴ', 'ĩ'] +['èĴ', 'Ī'] +['éĦ', 'ļ'] +['èĴ', 'ī'] +['èĵ', 'ĩ'] +['èIJ', '©'] +['èij', '°'] +['èij', 'İ'] +['éĦ', 'ij'] +['èĴ', 'İ'] +['èij', 'ĸ'] +['èĴ', 'Ħ'] +['èIJ', '¹'] +['æ£', '¤'] +['æ£', '½'] +['æ£', '«'] +['æ¤', 'ĵ'] +['æ¤', 'ij'] +['ð¬', 'ĥ'] +['ð¬ĥ', 'Ĭ'] +['é¹', 'Ģ'] +['æ¤', 'Ĩ'] +['æ£', 'ĵ'] +['æ£', '¬'] +['æ£', 'ª'] +['æ¤', 'Ģ'] +['æ¥', 'Ĺ'] +['ð¬', '·'] +['ð¬·', 'ķ'] +['çĶ', '¦'] +['éħ', '¦'] +['è§', 'Į'] +['å¥', '¡'] +['çļ', 'ķ'] +['ç¡', 'ª'] +['æ¬', '¹'] +['è©', 'Ł'] +['ð«IJ', 'IJ'] +['è¾', 'Į'] +['æ£', 'IJ'] +['é¾', 'Ĥ'] +['ð¬', '¹'] +['ð¬¹', '¼'] +['é»', '¹'] +['çī', 'ļ'] +['çĿ', 'İ'] +['æĻ', '«'] +['æĻ', 'ª'] +['æĻ', '±'] +['ð', '§'] +['ð§', '¿'] +['ð§¿', '¹'] +['èĽ', 'ij'] +['çķ', '¯'] +['æĸ', 'Ŀ'] +['åĸ', '¤'] +['å´', '¶'] +['åµ', 'ģ'] +['ð«', '¶'] +['ð«¶', 'ĩ'] +['å´', '¾'] +['åµ', 'ħ'] +['å´', '¿'] +['åµ', 'ļ'] +['ç¿', 'Ļ'] +['ð«ĸ', '®'] +['åľ', 'Į'] +['åľ', 'IJ'] +['èµ', 'ij'] +['èµ', 'Ĵ'] +['é¿', 'ı'] +['éĵ', '¹'] +['ð¬Ń', 'Ĭ'] +['éĵ', '½'] +['ð¨±', 'ĩ'] +['ð«ĵ', '¶'] +['éĶ', 'Ĭ'] +['éĶ', 'į'] +['éĶ', 'İ'] +['ð¬Ń', 'İ'] +['éĶ', 'ĵ'] +['çĬ', 'ĩ'] +['é¢', 'ĭ'] +['ç¨', 'Į'] +['çŃ', 'Ģ'] +['çŃ', 'ĺ'] +['çŃ', 'ľ'] +['çŃ', '¥'] +['çŃ', 'ħ'] +['åĤ', 'ĥ'] +['åĤ', 'ī'] +['ç¿', 'Ľ'] +['åĤ', 'Ĵ'] +['åĤ', 'ķ'] +['èĪ', '¾'] +['çķ', '¬'] +['ð«ĸ', '¯'] +['èĦ', '¿'] +['èħ', 'ĺ'] +['ä', 'IJ'] +['äIJ', 'ĥ'] +['èħ', 'Ļ'] +['èħ', 'Ĵ'] +['ð¬±', 'Ł'] +['é²', 'ĥ'] +['çĮ', '°'] +['ð«', 'Ľ'] +['ð«Ľ', 'Ń'] +['çĮ', '¯'] +['ã', 'º'] +['ãº', 'Ħ'] +['é¦', 'ī'] +['åĩ', 'ĵ'] +['éĦ', 'Ĺ'] +['ð«', '·'] +['ð«·', '·'] +['å»', 'ĭ'] +['å»', 'Ĩ'] +['éĦ', 'Į'] +['ç²', '¢'] +['éģ', 'Ĩ'] +['æĹ', 'IJ'] +['ð¬®', '±'] +['çĦ', 'ŀ'] +['ð¬Ĭ', '¤'] +['æ¬', '»'] +['ð£', '¸'] +['ð£¸', '£'] +['æº', 'ļ'] +['æº', 'ģ'] +['æ¹', 'Ŀ'] +['æ¸', '°'] +['æ¹', 'ĵ'] +['ã', '´'] +['ã´', 'Ķ'] +['æ¸', 'Ł'] +['æº', 'ł'] +['æ¸', '¼'] +['æº', 'ĩ'] +['æ¹', '£'] +['æ¹', 'ij'] +['æº', 'ŀ'] +['æĦ', 'IJ'] +['æĦ', 'ĥ'] +['æķ', '©'] +['çĶ', '¯'] +['æ£', '¨'] +['æī', 'Ĭ'] +['è£', '£'] +['ç¥', '¼'] +['å©', '»'] +['åª', 'Ĩ'] +['åª', 'ŀ'] +['ãĽ', '¹'] +['åª', 'ĵ'] +['åª', 'Ĥ'] +['åª', 'Ħ'] +['æ¯', 'µ'] +['çŁ', 'ŀ'] +['ð¬´', 'ĥ'] +['ð«ĺ', '¨'] +['ç¼', 'Ĭ'] +['ç¼', 'IJ'] +['éª', 'Ļ'] +['çij', 'ĥ'] +['çij', 'ĵ'] +['çij', 'ħ'] +['çij', 'Ĩ'] +['ä´', 'ĸ'] +['çij', 'ĸ'] +['çij', 'Ŀ'] +['çij', 'Ķ'] +['çij', 'Ģ'] +['ð¤', '§'] +['ð¤§', 'Ľ'] +['çij', '³'] +['çij', 'Ĥ'] +['å¶', 'ħ'] +['çij', 'ij'] +['éģ', 'ĺ'] +['é«', '¢'] +['å¡', '¥'] +['åł', '½'] +['èµ', 'ª'] +['æij', 'Ľ'] +['å¡', 'Ŀ'] +['æIJ', 'Ĵ'] +['æIJ', 'Į'] +['èĴ', '±'] +['èĴ', '¨'] +['èĵ', 'ı'] +['èĶ', 'Ģ'] +['èĵ', '¢'] +['èĵ', 'Ĥ'] +['èĴ', '»'] +['èĵ', '£'] +['æ¤', '¹'] +['æ¥', 'ª'] +['æ¦', 'ĥ'] +['æ¦', 'ħ'] +['æ¥', 'Ĵ'] +['æ¥', '©'] +['æ¦', 'ĩ'] +['æ¤', '¸'] +['æ¥', 'Ļ'] +['æŃ', 'ħ'] +['ð¬', 'ª'] +['ð¬ª', '©'] +['ç¢', 'ĥ'] +['ç¢', 'ı'] +['ð¬Ĵ', 'Ķ'] +['ç¢', 'Ī'] +['äĥ', 'ħ'] +['ç¡', '¿'] +['éĦ', 'ł'] +['è¾', 'Ĵ'] +['ð¬¨', 'İ'] +['ð«IJ', 'ĵ'] +['é¾', 'Ĩ'] +['è§', 'ľ'] +['ä', '£'] +['ä£', 'ĺ'] +['æļ', 'ķ'] +['é¹', 'į'] +['ð«', '«'] +['ð««', 'ĩ'] +['ã¬', 'Ĭ'] +['æļ', 'ħ'] +['è·', '±'] +['èľ', 'IJ'] +['èľ', 'İ'] +['åµ', '²'] +['èµ', 'Ĺ'] +['éª', '±'] +['éĶ', 'ĸ'] +['ð«ĵ', '¹'] +['éĶ', 'ĺ'] +['éĶ', '³'] +['éĶ', '§'] +['éĶ', 'ª'] +['ð¬Ń', 'ļ'] +['éĶ', '«'] +['éĶ', '¬'] +['ð¬Ń', 'Ľ'] +['ç¨', 'ij'] +['ç¨', 'Ļ'] +['ä', 'ħ'] +['äħ', 'Ł'] +['ð¬', 'ķ'] +['ð¬ķ', 'Ĥ'] +['çŃ', '»'] +['çŃ', '¼'] +['çŃ', '¶'] +['çŃ', '¦'] +['çŃ', '¤'] +['åĤ', 'º'] +['é¹', 'İ'] +['åĥ', 'ĩ'] +['èī', 'ħ'] +['èī', 'ī'] +['è°', '¼'] +['è²', 'Ĩ'] +['èħ', '½'] +['èħ', '¨'] +['èħ', '¯'] +['é²', 'ī'] +['é²', 'Ĭ'] +['é²', 'Į'] +['ä²', 'Ł'] +['ð¬¶', 'ĭ'] +['ð¬¶', 'į'] +['é²', 'ı'] +['éĽ', 'Ĭ'] +['çĮ', 'º'] +['é£', 'Ķ'] +['è§', 'Ł'] +['ð¦', 'Ŀ¼'] +['é¦', 'Į'] +['è£', 'Ľ'] +['å»', 'Ĵ'] +['çĺ', 'ħ'] +['éĦ', 'ĺ'] +['é¹', 'Ĵ'] +['éĦ', 'ľ'] +['éº', 'Ģ'] +['éĦ', '£'] +['éĺ', 'ĺ'] +['ð«Ķ', '¶'] +['çħ', 'ģ'] +['çħ', 'ĥ'] +['çħ', '´'] +['çħ', 'ĭ'] +['çħ', 'Ł'] +['çħ', 'ĵ'] +['æ»', 'ł'] +['æº', 'į'] +['æº', '¹'] +['æ»', 'Ĩ'] +['æ»', 'ī'] +['æº', '¦'] +['æº', 'µ'] +['æ¼', '·'] +['æ»', '§'] +['æ»', 'ĺ'] +['æ»', 'į'] +['æĦ', 'Ń'] +['æħ', '¥'] +['æħ', 'Ĩ'] +['å¡', '±'] +['ð«', 'ĮĢ'] +['è', '£¼'] +['ç¦', 'ĭ'] +['ç¦', 'Ķ'] +['ç¦', 'ĺ'] +['ç¦', 'Ĵ'] +['è°', '«'] +['é¹', 'Ķ'] +['ð«ĸ', '³'] +['æĦ', 'į'] +['å«', 'Ħ'] +['åª', '±'] +['æĪ', '¤'] +['åĭ', 'ł'] +['æĪ', '£'] +['ð«ĺ', 'ª'] +['ð«ĺ', '¬'] +['ç¼', 'ŀ'] +['èĢ', '¤'] +['çij', '§'] +['ð«', 'ŀ'] +['ð«ŀ', '©'] +['çij', '¨'] +['çij', '±'] +['çij', '·'] +['çij', '¢'] +['æĸ', 'ł'] +['æij', 'ı'] +['å¢', 'ķ'] +['å¢', 'Ī'] +['å¢', 'IJ'] +['å¢', 'ĺ'] +['æij', '´'] +['éĬ', 'İ'] +['ð¡', 'IJ'] +['ð¡IJ', 'ĵ'] +['å¢', 'ļ'] +['æĴ', 'ĸ'] +['ðª', '¤'] +['ðª¤', 'Ĺ'] +['éĿ', '½'] +['éŀ', 'ģ'] +['èĶ', 'Į'] +['èĶ', 'Ī'] +['èĵ', '°'] +['èĶ', '¹'] +['èĶ', 'Ĭ'] +['åĺ', 'ı'] +['æ¦', '°'] +['æ¦', 'ij'] +['æ§', 'ļ'] +['ð£', 'Ĺ'] +['ð£Ĺ', 'ĭ'] +['æ§', 'ľ'] +['æ¦', 'į'] +['çĸ', 'IJ'] +['ð¬¸', 'ĺ'] +['éħ', 'º'] +['éħ', '¾'] +['éħ', '²'] +['éħ', '´'] +['ç¢', '¶'] +['äĥ', 'İ'] +['ð¬Ĵ', 'Ĺ'] +['ç¢', '¨'] +['ð¥', 'Ķ'] +['ð¥Ķ', '²'] +['ç¢', '¹'] +['ç¢', '¥'] +['åĬ', 'Ĥ'] +['ð«ļ', 'ĸ'] +['ä´', 'Ĺ'] +['å¤', '¥'] +['çŀ', 'į'] +['é¹', 'ĸ'] +['ã¬', 'İ'] +['è·', '½'] +['èľ', '¾'] +['å¹', 'ĸ'] +['å¶', 'į'] +['åľ', 'Ļ'] +['ð¨±', 'ı'] +['éĶ', 'º'] +['éĶ', '¼'] +['éĶ', '½'] +['ð¬Ń', '¤'] +['éĶ', '¾'] +['éĶ', '¿'] +['éķ', 'ĥ'] +['éķ', 'Ħ'] +['éķ', 'ħ'] +['é¦', 'Ŀ'] +['é¹', 'Ļ'] +['ç®', '¨'] +['ç®', 'ĸ'] +['åĬ', 'Ħ'] +['åĥ', '¬'] +['åĥ', '¦'] +['åĥ', 'Ķ'] +['åĥ', 'İ'] +['æ§', 'ĥ'] +['ãĻ', '¦'] +['é²', 'Ĵ'] +['é²', 'ķ'] +['ð«ļ', 'ķ'] +['é²', 'ĸ'] +['é²', 'Ĺ'] +['é²', 'ĺ'] +['é²', 'Ļ'] +['ð¬¶', 'IJ'] +['ð¬¶', 'ı'] +['ð', '©½'] +['ð©½', '¾'] +['å¤', 'IJ'] +['çį', 'į'] +['é£', 'Ĺ'] +['ð¬¸', 'ļ'] +['åĩ', 'ĺ'] +['å»', 'ij'] +['å»', 'Ļ'] +['çĺ', 'Ĺ'] +['çĺ', '¥'] +['çĺ', 'ķ'] +['é²', 'Ŀ'] +['éĦ', '«'] +['çĨ', 'ĩ'] +['æ¼', '¹'] +['æ¼', 'ĸ'] +['æ½', 'Ĩ'] +['æ¼', '¤'] +['æ½', '©'] +['æ¼', '¼'] +['æ¼', '´'] +['ã', '½'] +['ã½', 'ı'] +['æ¼', 'Ī'] +['æ¼', 'ĭ'] +['æ¼', '»'] +['æħ', '¬'] +['çª', '¬'] +['çª', 'Ń'] +['ã', '®'] +['ã®', '¾'] +['ð¬¤', 'Ŀ'] +['è¤', 'ķ'] +['ç¦', 'Ľ'] +['ç¦', 'ļ'] +['éļ', '©'] +['å«', 'ķ'] +['å«', 'Ń'] +['å«', 'ľ'] +['å«', 'ª'] +['ð¬', 'ĻĤ'] +['ã', '»'] +['ã»', '¬'] +['éº', '¹'] +['çĴ', 'Ĩ'] +['æ¼', '¦'] +['åı', 'ĩ'] +['å¢', '£'] +['å¢', '¦'] +['å¢', '¡'] +['åĬ', 'IJ'] +['èĸ', 'ģ'] +['èķ', '°'] +['èĶ', 'ĥ'] +['é¼', 'Ĵ'] +['æ§', '±'] +['é¹', 'Ŀ'] +['ç£', 'ı'] +['ç£', 'ī'] +['æ®', '£'] +['æħ', 'Ń'] +['éľ', 'ħ'] +['æļ', 'µ'] +['æļ', '²'] +['æļ', '¶'] +['è¸', '¦'] +['è¸', '£'] +['äĹ', 'ĸ'] +['èĿ', 'ĺ'] +['èĿ', '²'] +['èĿ', '¤'] +['åĻ', 'ĩ'] +['å', 'ĻĤ'] +['åĻ', 'Ģ'] +['ç½', '¶'] +['å¶', '²'] +['å¶', 'ĵ'] +['ãł', 'ĩ'] +['å¶', 'Ł'] +['å¶', 'Ĵ'] +['éķ', 'Ĩ'] +['éķ', 'Ī'] +['éķ', 'ĭ'] +['éķ', 'İ'] +['ð¬Ń', '©'] +['éķ', 'ķ'] +['ç¨', '¹'] +['åĦ', 'ĩ'] +['çļ', 'ŀ'] +['çļ', 'Ľ'] +['ä´', 'ĺ'] +['èī', 'İ'] +['èī', 'ı'] +['é¹', 'Ł'] +['ð©¾', 'ĥ'] +['é²', '¦'] +['é²', 'ª'] +['é²', '¬'] +['æ©', '¥'] +['è§', 'Ń'] +['é¹', 'ł'] +['é¹', '¡'] +['ç³', 'ĩ'] +['ç³', 'Ī'] +['ç¿', '¦'] +['é¹', '¢'] +['é¹', '£'] +['çĨ', 'Ľ'] +['æ½', 'ĸ'] +['æ½', 'µ'] +['ã', 'µ'] +['ãµ', 'IJ'] +['æ¾', 'Ĥ'] +['æ¾', 'Ľ'] +['çij', '¬'] +['æ½', '½'] +['æ½', '¾'] +['æ½', 'ı'] +['æĨ', 'Ń'] +['æĨ', 'ķ'] +['ð¬¸', '£'] +['æĪ', 'Ń'] +['è¤', '¯'] +['ç¦', '¤'] +['ð«į', '½'] +['å«', '½'] +['éģ', '¹'] +['ð¬´', 'Ĭ'] +['çĴ', '¥'] +['çĴ', '²'] +['çĴ', 'Ĵ'] +['æĨ', 'Ļ'] +['æĵ', 'IJ'] +['éĦ', '¹'] +['èĸ', '³'] +['éŀ', 'Ķ'] +['é»', 'ĩ'] +['ð¬', 'ŀ'] +['ð¬ŀ', 'Ł'] +['èķ', 'Ĺ'] +['èĸ', '¢'] +['èķ', '¹'] +['æ©', 'ŀ'] +['æ©', 'ij'] +['æ©', '¦'] +['éĨ', 'ij'] +['è§', '±'] +['ç£', '¡'] +['ð¥', 'ķ'] +['ð¥ķ', '¢'] +['ç£', 'ľ'] +['è±', '®'] +['ð«Ł', '¦'] +['ð¬º', 'Ī'] +['ð«ł', 'ľ'] +['é¹', '¾'] +['èĻ', '¤'] +['æļ', '¿'] +['æĽ', 'Į'] +['æĽ', 'Ī'] +['ã¬', 'ļ'] +['è¹', 'ħ'] +['è¸', '¶'] +['äĹ', 'Ľ'] +['èŀ', 'Ĺ'] +['çĸ', 'ģ'] +['ãł', 'ĵ'] +['å¹', 'ª'] +['ðª', '©'] +['ðª©', 'ĺ'] +['å¶', '¦'] +['ð¬Ń', '¬'] +['ð¨±', 'ij'] +['ð¬Ń', '¯'] +['é¦', 'ŀ'] +['ç©', 'Ħ'] +['ç¯', 'ļ'] +['ç¯', '¯'] +['ç°', 'ī'] +['é¼', '½'] +['è¡', 'ł'] +['çĽ', '¦'] +['èŀ', '£'] +['ç¸', '¢'] +['é²', 'Ń'] +['é²', '¯'] +['é²', '°'] +['é²', 'º'] +['é²', '¹'] +['ð«Ĺ', '´'] +['äº', '¸'] +['çĻ', 'Ģ'] +['çĺ', 'Ń'] +['ð¬¸', '¦'] +['ç¾', '±'] +['ç³', 'Ĵ'] +['çĩ', 'ĭ'] +['çĨ', '»'] +['çĩ', 'Ĭ'] +['çĩ', 'ļ'] +['çĩ', 'ı'] +['æ¿', '©'] +['æ¿', 'ĭ'] +['æ¾', 'ª'] +['æ¾', '½'] +['æ¾', '´'] +['æ¾', 'Ń'] +['æ¾', '¼'] +['æĨ', '·'] +['æĨ', 'º'] +['æĩ', 'Ķ'] +['é»', 'ī'] +['å¬', 'Ľ'] +['é¹', '¨'] +['ç¿', '¯'] +['ð«Ħ', '·'] +['çĴ', '±'] +['ð¤', '©½'] +['çĴ', '¬'] +['çĴ', '®'] +['é«', '½'] +['æĵ', '¿'] +['èĸ', '¿'] +['èĸ', '¸'] +['æª', 'ij'] +['æ«', 'Ĩ'] +['æª', 'ŀ'] +['éĨ', '¨'] +['ç', '¹Ħ'] +['ç£', '¹'] +['ç£', '»'] +['çŀ', '«'] +['çŀ', 'µ'] +['è¹', 'IJ'] +['èŁ', 'ı'] +['ã', 'ĺ'] +['ãĺ', 'İ'] +['ð¬Ń', '³'] +['éķ', '¤'] +['ð¬Ń', '¶'] +['ð«Ķ', 'į'] +['éķ', '¥'] +['éķ', '¨'] +['ð¬Ń', '¸'] +['ð¨±', 'Ķ'] +['ð¬Ń', '¼'] +['ð«Ķ', 'İ'] +['çŁ', '°'] +['ç©', 'Ļ'] +['ç©', 'ľ'] +['ç©', 'Ł'] +['ç°', 'ķ'] +['ç°', 'ĥ'] +['ç°', 'ı'] +['åĦ', '¦'] +['éŃ', 'ĭ'] +['æĸ', '¶'] +['èī', 'ļ'] +['ð¬¸', 'ª'] +['è°', '¿'] +['ä²', 'ł'] +['ð¬¶', 'Ł'] +['é²', '¾'] +['ð¬¶', 'ł'] +['é²', '¿'] +['é³', 'ģ'] +['é³', 'Ĥ'] +['é³', 'Ī'] +['é³', 'ī'] +['çį', '¯'] +['äĹ', 'ª'] +['é¦', 'ĺ'] +['è¥', 'ķ'] +['è¥', 'ļ'] +['ð¬¶', '¨'] +['èŀ', '±'] +['çĶ', 'ĵ'] +['å¬', '¬'] +['å¬', '¥'] +['ð¦', 'Ī'] +['ð¦Ī', '¡'] +['ð«Ħ', '¸'] +['çĵ', 'Ģ'] +['éĩ', 'IJ'] +['é¬', '¶'] +['çĪ', 'ĩ'] +['éŀ', '³'] +['éŀ', '®'] +['ð¬Ł', 'ģ'] +['èĹ', 'Ł'] +['èĹ', '¦'] +['èĹ', '¨'] +['é¹', '²'] +['æª', '«'] +['é»', '¡'] +['ç¤', 'ŀ'] +['ç¤', 'Į'] +['ð¥', 'ĸ'] +['ð¥ĸ', '¨'] +['è¹', '¢'] +['è¹', 'ľ'] +['èŁ', '«'] +['äĹ', '´'] +['åļ', 'ļ'] +['é«', 'ĥ'] +['éķ', '®'] +['éķ', '±'] +['éħ', 'Ĥ'] +['é¦', '§'] +['ç°', 'ł'] +['ç°', 'Ŀ'] +['ç°', '°'] +['é¼', '«'] +['é¼', '©'] +['çļ', '¦'] +['èĩ', 'ij'] +['ä²', '¢'] +['é³', 'ij'] +['é³', 'Ĵ'] +['é¹', '±'] +['é¹', '¯'] +['çĻ', 'Ĺ'] +['ð¦', 'Ĵ'] +['ð¦Ĵ', 'į'] +['æĹ', 'ŀ'] +['ç¿', '·'] +['åĨ', 'ģ'] +['äİ', 'ĸ'] +['çĢ', 'Ķ'] +['çĢ', 'į'] +['çĢ', 'Į'] +['è¥', 'ľ'] +['ä´', 'Ļ'] +['ð¬Ļ', 'Ĭ'] +['åļ', 'Ń'] +['ã', '°'] +['ã°', 'Ģ'] +['é¬', '·'] +['éĨ', 'Ń'] +['è¹', '¯'] +['èł', 'ĭ'] +['ç¿', '¾'] +['é³', 'ĺ'] +['åĦ', '³'] +['åĦ', '´'] +['é¼', 'Ĺ'] +['ð¬¶', 'Ń'] +['ð©¾', 'Į'] +['é³', 'ļ'] +['é³', 'Ľ'] +['éº', 'ij'] +['éº', 'ĸ'] +['èł', 'ĥ'] +['å½', 'Ł'] +['å¬', '¿'] +['é¬', 'Ĵ'] +['èĺ', 'ĺ'] +['æ¬', 'Ĥ'] +['é', 'Ĩµ'] +['é¢', '¥'] +['çĶ', 'Ĺ'] +['ð¨', 'Ł'] +['ð¨Ł', 'ł'] +['å·', 'ĩ'] +['éħ', 'ħ'] +['é«', 'İ'] +['çĬ', '¨'] +['ð¬¶', '®'] +['ð¨', 'Ń'] +['ð¨Ń', 'ī'] +['ã¸', 'Į'] +['çĪ', 'Ķ'] +['çĢ', '±'] +['çĢ', '¹'] +['çĢ', '¼'] +['çĢ', 'µ'] +['è¥', '«'] +['åŃ', 'ħ'] +['éª', '¦'] +['ð¬Ļ', 'ĭ'] +['èĢ', '°'] +['ð¤', '«'] +['ð¤«', 'ī'] +['çĵ', 'ĸ'] +['é¬', 'ĺ'] +['è¶', '¯'] +['ð¬º', 'ĵ'] +['ç½', 'į'] +['é¼', '±'] +['é³', 'ł'] +['é³', '¡'] +['é³', '£'] +['çĪ', 'Ł'] +['çĪ', 'ļ'] +['çģ', 'Ī'] +['éŁ', 'Ĥ'] +['ç³', 'µ'] +['èĺ', '¼'] +['ç¤', 'µ'] +['é¹', '´'] +['èº', 'Ķ'] +['çļ', 'Ń'] +['é¾', '¢'] +['é³', '¤'] +['äº', '¹'] +['ç±', '¥'] +['é¼', '·'] +['ð«ļ', 'Ń'] +['çİ', 'ĥ'] +['éĨ', '¾'] +['é½', 'ĩ'] +['è§', '¿'] +['èł', '¼'] +['×', '§'] +['×', '¤'] +['×', 'Ľ'] +['×ķ×', 'ª'] +['×', '¡'] +['×Ļ×', 'Ŀ'] +['×', '¦'] +['×', 'Ĵ'] +['×', 'ĺ'] +['×ķ×', '¨'] +['×', 'Ŀ'] +['×ķ×', 'ľ'] +['×', 'ĸ'] +['à¹', 'Ĥ'] +['ï', 'º'] +['ðŁ', 'į'] +['ðŁ', 'IJ'] +['×Ļ×', '¨'] +['ï', '»'] +['ðŁ', 'ij'] +['ðĿ', 'IJ'] +['ðŁ', 'ı'] +['ðŁ', 'Ķ'] +['ðŁ', 'Į'] +['ðŁ', 'İ'] +['ðŁ', 'ĵ'] +['×', 'Ł'] +['ðĿ', 'ij'] +['×ķ×', 'ĵ'] +['ï', '¦'] +['Ġ×', 'ķ'] +['×ķ×', 'ij'] +['à¸Ń', 'à¸ĩ'] +['ðĿ', 'ĺ'] +['×Ļ×', 'ª'] +['ðĿ', 'ķ'] +['à¸Ĺ', 'ีà¹Ī'] +['اØ', '¦'] +['ðŁ', '¤'] +['×ķ×', 'Ł'] +['ر', 'ÙĬ'] +['×Ļ×', 'ľ'] +['ร', 'ะ'] +['า', 'ย'] +['ï', '¯'] +['ï', '®'] +['า', 'ม'] +['â', 'ĩ'] +['ðŁ', '¥'] +['ï', 'Ń'] +['ðĿ', 'Ļ'] +['×ķ×', 'ł'] +['á', '½'] +['Ġ×', 'Ľ'] +['ðŁ', 'ļ'] +['â', 'ļ'] +['ï', '§'] +['×ij', 'ר'] +['×Ļ×', 'ł'] +['á', '´'] +['Ġ×', 'Ĺ'] +['á', '¼'] +['ðĿ', 'Ĺ'] +['Ġ×', '¢'] +['×Ļ×', 'Ķ'] +['ãģ£', 'ãģŁ'] +['ãģĵ', 'ãģ¨'] +['á', '¸'] +['ÙĬ', 'ÙĨ'] +['ãģª', 'ãģĦ'] +['ا', 'ع'] +['à¸', '¨'] +['à¹Ī', 'à¸ĩ'] +['×Ļ×', 'ĵ'] +['×ŀ', 'ש'] +['á', 'Ī'] +['׳', '×Ļ'] +['×Ļ×', 'ij'] +['ï', '¥'] +['ðĿ', 'ĵ'] +['Ġ×', 'Ļ'] +['×', 'ļ'] +['ั', 'à¸ĩ'] +['â', 'ĵ'] +['ï', '¤'] +['ĠاÙĦ', 'Ø£'] +['า', 'à¸ģ'] +['à¹ī', 'à¸Ļ'] +['à¹Ģ', 'ร'] +['×ķ×', 'Ŀ'] +['á', '¹'] +['à¸', '¶'] +['×Ļ×', '§'] +['à¸', 'ĭ'] +['à¸Ħ', 'ร'] +['à¸', 'ĺ'] +['ั', 'à¸ģ'] +['ðŁ', 'ķ'] +['ÙĪ', 'ÙĨ'] +['à¸Ń', 'ย'] +['â', 'Ĭ'] +['ðĿ', 'Ĵ'] +['ĠاÙĦ', 'ع'] +['า', 'à¸Ļ'] +['×Ļ×', 'Ł'] +['ÙĦ', 'ÙĬ'] +['×Ļ×', '©'] +['à¸Ľ', 'ระ'] +['à¹Ģ', 'à¸Ľ'] +['Ġ×', 'ł'] +['×ķ×', '¡'] +['à¸', 'ł'] +['Ùħ', 'ÙĨ'] +['×ķ×', '¢'] +['×ķ×', 'ŀ'] +['â', 'Į'] +['ðŁ', '§'] +['à¹ĩ', 'à¸Ļ'] +['à¸', 'į'] +['ã', 'İ'] +['á', 'µ'] +['ĠاÙĦ', 'س'] +['×ķ×', '§'] +['ห', 'ล'] +['ðŁ', 'ĩ'] +['â', 'ı'] +['ðŁ', '¦'] +['Ġ×Ķ', '×ŀ'] +['ÙĪ', 'ا'] +['Ġ×', 'ª'] +['ר', '×IJ'] +['à¸Ń', 'à¸Ļ'] +['à¸', '©'] +['à¹Ī', 'ว'] +['×ķ×', '¦'] +['í', 'Ĺ'] +['ã', 'Ħ'] +['ï', '¨'] +['ï', '¹'] +['â', 'İ'] +['ï', '²'] +['ðĿ', 'ļ'] +['ð', 'IJ'] +['à¸Ħ', 'ว'] +['ห', 'à¸Ļ'] +['Ġ×', '¨'] +['ب', 'ÙĬ'] +['ร', 'à¹Į'] +['ر', 'ا'] +['Ø´', 'ر'] +['×ķ×', 'Ĺ'] +['×ķ×', '¤'] +['×ķ×', '©'] +['×ķ×', 'Ĵ'] +['í', 'Ŀ'] +['â', 'Ľ'] +['à¸ķ', 'ิ'] +['à¹Ģ', 'à¸ģ'] +['ï', '³'] +['ï', '±'] +['à¸Ķ', 'à¹ī'] +['ë', '¹'] +['ï', '¬'] +['á', '¿'] +['ðŁ', 'Ľ'] +['ðĿ', 'ĸ'] +['à¹Īา', 'à¸ĩ'] +['ู', 'à¹ī'] +['Ġ×Ķ', '×IJ'] +['ĠاÙĦ', 'ØŃ'] +['פ', 'ר'] +['ÙĪ', 'Ùħ'] +['à¹Ģ', 'ล'] +['í', 'ĸ'] +['×Ļ×', '¢'] +['ì', 'Ī'] +['í', 'ĵ'] +['ðŁ', 'ħ'] +['á', 'ł'] +['à¸Ħว', 'าม'] +['à¸Ī', 'ะ'] +['׳', '×Ķ'] +['Ġ×', '§'] +['à¸', 'Ł'] +['à¹ī', 'à¸ĩ'] +['ห', 'ม'] +['ت', 'Ùħ'] +['׾', '×Ļ'] +['ÙĬ', 'د'] +['à¹Ī', 'à¸Ļ'] +['×Ĺ', 'ר'] +['ש', 'ר'] +['à¹Ģ', 'à¸Ĺ'] +['×ŀ', 'ר'] +['ë', 'ĸ'] +['ع', 'ÙĦ'] +['×ŀ', '×¢'] +['â', '²'] +['׾', '×Ķ'] +['Ġ×', '¤'] +['à¸Ń', 'à¸ģ'] +['س', 'ÙĦ'] +['×Ļ×', 'ŀ'] +['ÙĤ', 'ÙĬ'] +['í', 'İ'] +['ت', 'ØŃ'] +['×Ļ×', '¡'] +['×Ļ×', 'Ĺ'] +['í', 'Ľ'] +['ï', '°'] +['â', '½'] +['á', 'ī'] +['á', 'Ĭ'] +['á', '¨'] +['Ùĩ', 'ا'] +['Ġ׾', '×Ķ'] +['×ķ×', 'IJ'] +['Ùħ', 'ا'] +['à¹īà¸Ń', 'à¸ĩ'] +['ر', 'ب'] +['ĠاÙĦ', 'ج'] +['×ŀ', '×ĵ'] +['Ùħ', 'ÙĦ'] +['ت', 'ر'] +['à¹Ģ', 'à¸Ķ'] +['×§', 'ר'] +['í', 'ħ'] +['ì', '¼'] +['ê', '¿'] +['ã', 'Ī'] +['á', 'IJ'] +['ðŁ', 'Ĺ'] +['ê', '¦'] +['á', 'ĭ'] +['ðĿ', 'Ķ'] +['à¹Ģà¸Ľ', 'à¹ĩà¸Ļ'] +['à¹ĥ', 'ห'] +['ม', 'า'] +['ว', 'à¹Īา'] +['ม', 'ี'] +['ี', 'à¹ī'] +['à¹Ħม', 'à¹Ī'] +['ÙĨ', 'ÙĬ'] +['Ø', '¤'] +['ร', 'า'] +['×ķ', '×Ļ'] +['ãĤĪ', 'ãģĨ'] +['ิ', 'à¸Ķ'] +['×Ļ×', '¤'] +['×Ĺ', '׾'] +['ÙĤ', 'د'] +['à¹Ģ', 'ส'] +['×Ļ×', 'ĺ'] +['à¸ģ', 'ล'] +['ר', '׼'] +['×ķ×', 'Ľ'] +['×Ļ×', 'Ľ'] +['ë', 'Ī'] +['ë', 'ĥ'] +['ðŁ', 'ĸ'] +['á', 'ħ'] +['â', '¼'] +['ã', 'ī'] +['à¹Ħ', 'à¸Ķà¹ī'] +['ת', '×Ļ'] +['×Ļ×', 'IJ'] +['ĠاÙĦ', 'Ø¥'] +['à¸ł', 'า'] +['ร', 'ิ'] +['ÙĤ', 'Ø©'] +['ØŃ', 'د'] +['ê', '»'] +['ì', '±'] +['ת', '×Ĺ'] +['ì', 'º'] +['â', 'ĭ'] +['á', 'Ħ'] +['á', '¾'] +['â', 'µ'] +['â', '¾'] +['ĠÙĪ', 'اÙĦ'] +['׳', '×ķ'] +['Ù', 'Ģ'] +['ÙĬ', 'ا'] +['à¸ģ', 'à¹ĩ'] +['×ŀ', '×Ķ'] +['ãģĦ', 'ãĤĭ'] +['ع', 'د'] +['ĠاÙĦ', 'ÙĨ'] +['Ġ×Ķ', 'ש'] +['Ø', '¦'] +['ั', 'à¹īà¸ĩ'] +['ร', 'ัà¸ļ'] +['ÙĪ', 'ÙĤ'] +['ãģ§', 'ãģį'] +['à¹Ģ', 'à¸ŀ'] +['׼', '׾'] +['×ĺ', 'ר'] +['ั', 'à¸Ķ'] +['à¸Ń', 'า'] +['ì', '¢'] +['à¸Ń', 'à¸ļ'] +['à¸ķ', 'ร'] +['à¹Ģ', 'à¸Ĭ'] +['ì', 'Ķ'] +['ãģĹ', 'ãģ¾'] +['ë', 'ģ'] +['ë', 'ķ'] +['ðŁ', 'Ļ'] +['â', 'Ĵ'] +['á', '¶'] +['à¹ģ', 'ล'] +['ÙĨ', 'ا'] +['à¹ĥห', 'à¹ī'] +['à¹Ħ', 'à¸Ľ'] +['×', '£'] +['ั', 'ว'] +['า', 'à¸ĩ'] +['×ĵ', 'ר'] +['×ij', '׾'] +['פ', '×Ļ'] +['Ġ×', 'ĵ'] +['ĠاÙĦ', 'Ùģ'] +['à¹Ģ', 'à¸Ĥ'] +['ש', '×Ķ'] +['×IJ', 'ר'] +['ë', '¬'] +['ãģ«', 'ãģª'] +['ÑĢ', 'о'] +['ว', 'ิ'] +['Ùħ', 'ر'] +['×IJ', 'ת'] +['Ùĥ', 'ر'] +['س', 'ب'] +['ÙĨ', 'ت'] +['ãģĹ', 'ãģĦ'] +['ا', 'ج'] +['à¸Ń', 'รà¹Į'] +['Ùĥ', 'ÙĦ'] +['س', 'Ùħ'] +['ส', 'ิ'] +['×Ļ×', '¦'] +['ë', 'Ŀ'] +['í', 'ľ'] +['ì', 'ī'] +['á', 'Ĩ'] +['Ùĩ', 'Ùħ'] +['à¸Ļ', 'ีà¹ī'] +['ãģĤ', 'ãĤĭ'] +['ãģĦ', 'ãģ¦'] +['س', 'ÙĬ'] +['׾', '×IJ'] +['د', 'ر'] +['ãģ', 'ļ'] +['ÙĪ', 'ج'] +['ĠاÙĦ', 'Ø®'] +['ص', 'ر'] +['í', 'ı'] +['à¹īา', 'à¸ĩ'] +['ุ', 'à¸Ķ'] +['×ķ×', 'ĺ'] +['×ij', '×¢'] +['í', 'Ĩ'] +['à¸Ĭ', 'า'] +['ร', 'ม'] +['ש', '×ŀ'] +['×ŀ', 'ס'] +['ê', '´'] +['ì', '´'] +['ë', 'ľ'] +['ì', '¿'] +['ì', '©'] +['ë', '»'] +['â', '¤'] +['ðŁ', 'Ĩ'] +['á', 'Į'] +['á', 'ķ'] +['ذ', 'ا'] +['à¸Ĺ', 'ำ'] +['à¸ķ', 'à¹Ī'] +['ĠاÙĦ', 'ÙĤ'] +['ÙĦ', 'Ùĥ'] +['ู', 'à¹Ī'] +['à¸Ħ', 'ุ'] +['ÙĬ', 'Ùħ'] +['׳', '×Ļ×Ŀ'] +['ืà¹Ī', 'à¸Ń'] +['ÙĪ', 'ع'] +['ãĤ', 'ĩ'] +['ا', 'ÙĤ'] +['Ġ×ij', '×¢'] +['à¹Ģ', 'ม'] +['ج', 'Ùħ'] +['á»', '«'] +['ãģĵãģ¨', 'ãģĮ'] +['ب', 'د'] +['×ķ×', 'Ķ'] +['ש', '׾'] +['Ùĩ', 'ر'] +['à¹Ģ', 'à¸Ļ'] +['ãģ', '¹'] +['í', 'ĭ'] +['ì', '»'] +['ì', '½'] +['ë', 'Ń'] +['ì', 'Į'] +['í', 'Ģ'] +['ë', 'Į'] +['ë', 'º'] +['ã', 'Ĭ'] +['à¹ĥ', 'à¸Ļ'] +['Ġ×', 'Ĵ'] +['à¹', 'Ĩ'] +['à¸Ī', 'าà¸ģ'] +['ว', 'ย'] +['à¹ĥ', 'à¸Ĭ'] +['à¸ĩ', 'าà¸Ļ'] +['ĠاÙĦ', 'Ø´'] +['ا', 'ØŃ'] +['à¹īา', 'à¸Ļ'] +['ืà¹Ī', 'à¸Ńà¸ĩ'] +['×IJ', '×Ļ'] +['ب', 'ÙĦ'] +['ãģ¨', 'æĢĿ'] +['׳', 'ס'] +['ãģ¾', 'ãģĽ'] +['Ùĥ', 'ÙĨ'] +['×¢', 'ר'] +['ĠاÙĦ', 'د'] +['ש', 'ת'] +['í', 'ŀ'] +['Ùħ', 'س'] +['ص', 'ÙĦ'] +['×ķ׳', '×Ķ'] +['ار', 'Ø©'] +['ÙĦ', 'Ùħ'] +['ส', 'ม'] +['Ø£', 'ÙĨ'] +['ת', 'ר'] +['×IJ', '×ŀ'] +['ع', 'ب'] +['Ø®', 'ت'] +['ãĤ', 'ĥ'] +['ì', '¡'] +['ì', '£'] +['ив', 'а'] +['ส', 'ั'] +['ึ', 'à¸ģ'] +['ì', '¸'] +['ë', 'Ĩ'] +['алÑĮ', 'н'] +['ì', '³'] +['ì', 'į'] +['ê', '¼'] +['ê', '½'] +['ì', 'ı'] +['ã', 'Į'] +['ã', 'ı'] +['ï', '©'] +['ê', 'ª'] +['á', 'İ'] +['Ġ×', 'ĸ'] +['à¸ģ', 'ัà¸Ļ'] +['×Ļ', '×ķ'] +['à¸Ħ', 'à¸Ļ'] +['׳', '×ķת'] +['à¸ľ', 'ูà¹ī'] +['à¹ĥ', 'à¸Ī'] +['ãģĦ', 'ãģŁ'] +['Ùģ', 'ر'] +['×ĺ', '×Ļ'] +['צ', '×Ļ'] +['ãĤĤ', 'ãģ®'] +['ĠاÙĦ', 'ص'] +['ãģ¾ãģĽ', 'ãĤĵ'] +['د', 'Ø©'] +['×ij', '×Ļ'] +['ĠاÙĦ', 'ر'] +['Ġ×ŀ', '×IJ'] +['ส', 'ำ'] +['à¹Ģ', 'ห'] +['ع', 'ر'] +['ãģª', 'ãģı'] +['à¸ģร', 'ะ'] +['×ij', '×ĵ'] +['à¹Ģ', 'à¸Ī'] +['×Ļ×', 'ļ'] +['×Ĺ', '×Ļ'] +['ÙĬ', 'ع'] +['ש', '×ij'] +['ÙĨ', 'Ø©'] +['ÙĪ', 'ض'] +['ÙĦ', 'Ùģ'] +['ÙĢ', 'ÙĢ'] +['פ', '×¢'] +['í', 'Ī'] +['×ŀ', '×§'] +['à¸', 'IJ'] +['ØŃ', 'Ø©'] +['ا', 'ص'] +['Ñĭв', 'а'] +['à¸Ħ', 'ม'] +['ว', 'ั'] +['à¸Ľ', 'ล'] +['ì', 'Ł'] +['í', 'ļ'] +['ë', '´'] +['ë', 'ij'] +['ë', 'ī'] +['ë', 'ĩ'] +['ì', '¨'] +['ë', '±'] +['ë', 'İ'] +['â', '¬'] +['á', '¥'] +['á', 'Ĺ'] +['á', 'Ľ'] +['á', 'į'] +['Å', '©'] +['à¸Ķ', 'ี'] +['ô', 'i'] +['Ġ×', '¡'] +['׾', '×ķ'] +['á»Ŀ', 'i'] +['à¸Ħุ', 'à¸ĵ'] +['â', 'y'] +['à¸Ļ', 'า'] +['×Ĺ', '×ĵ'] +['×ĵ', '×Ļ'] +['ห', 'า'] +['ج', 'ÙĦ'] +['à¹Ģ', 'ว'] +['ãĤĩ', 'ãģĨ'] +['Ùħ', 'Ø©'] +['ĠاÙĦ', 'Ùĥ'] +['Ġ×Ķ', '×¢'] +['ج', 'ر'] +['×ĸ', 'ר'] +['ا', 'Ø·'] +['׼', 'ת'] +['×ķ׳', '×Ļ×Ŀ'] +['ØŃ', 'Ùħ'] +['ê', '¶'] +['ر', 'Ùĥ'] +['Ġ׾', '×¢'] +['×ķ×', 'ĸ'] +['ส', 'ร'] +['צ', '׾'] +['Ø', '¢'] +['ا', 'ست'] +['à¹Ī', 'ม'] +['Ø®', 'ر'] +['צ', '×¢'] +['×Ļר', '×ķת'] +['اد', 'Ø©'] +['Ø´', 'ار'] +['×ŀ', '×Ĺ'] +['í', 'Ĵ'] +['à¹Ģร', 'ีย'] +['×Ĺ', '×§'] +['اØ', '«'] +['ร', 'à¸ĩ'] +['à¹Ģ', 'à¸ķ'] +['à¸Ī', 'ำ'] +['à¸', 'Ŀ'] +['à¹Īา', 'ย'] +['à¸Ħ', 'ล'] +['ÙĤ', 'ÙĪ'] +['иÑĩеÑģ', 'к'] +['à¸ĵ', 'à¹Į'] +['ั', 'ย'] +['Ùħ', 'ع'] +['ë', '¨'] +['ë', '¿'] +['ë', '®'] +['ï', '´'] +['ì', '¥'] +['ì', '«'] +['ë', 'µ'] +['á', '¡'] +['â', 'į'] +['ð', 'ĵ'] +['â', '°'] +['à¸Ĥ', 'à¸Ńà¸ĩ'] +['Ù', 'ĭ'] +['à¸ģ', 'ัà¸ļ'] +['ãģ®', 'ãģ§'] +['à¹ī', 'ว'] +['à¸Ńย', 'à¹Īาà¸ĩ'] +['ãģ', 'Ń'] +['á»ĩ', 't'] +['à¸ķ', 'à¹īà¸Ńà¸ĩ'] +['×ŀ', '×Ļ'] +['à¹ģ', 'à¸ļ'] +['×Ĵ', 'ר'] +['ÙĪ', 'Ùģ'] +['ÙĤ', 'ÙĦ'] +['à¸łà¸²', 'à¸ŀ'] +['ר', '×Ļ'] +['ล', 'า'] +['ÙĬ', 'س'] +['Ġ×', '¦'] +['ÙĬ', 'Ùģ'] +['Ġ×', 'ĺ'] +['à¸ľ', 'ล'] +['á', 'ng'] +['ร', 'ว'] +['Ġ×ŀ', 'ש'] +['×IJ', '×ķת'] +['×ĸ', '×Ķ'] +['ู', 'à¸ģ'] +['à¸Ļ', 'ัà¸ģ'] +['اÙĨ', 'ÙĬ'] +['د', 'ا'] +['ãģ', '³'] +['׼', 'ף'] +['ãĤī', 'ãĤĮ'] +['ãĤĮ', 'ãģ°'] +['ת', '×§'] +['ú', 'c'] +['ÙĪ', 'ز'] +['×Ļר', '×Ķ'] +['Ġn', 'gh'] +['án', 'h'] +['Ġ×ķ', '×IJ'] +['á»', 'ħ'] +['ส', 'ุà¸Ķ'] +['ë', 'į°'] +['ا', 'ض'] +['اÙĦ', 'ÙĬ'] +['ب', 'ار'] +['ع', 'Ùħ'] +['à¸ļ', 'า'] +['ت', 'ج'] +['à¸ŀ', 'ร'] +['×ķר', '×Ķ'] +['ả', 'ng'] +['Ø®', 'ÙĦ'] +['à¸', 'ī'] +['ắ', 'c'] +['ש', '×Ļ×Ŀ'] +['í', 'Ķ'] +['Ùģ', 'س'] +['×Ļ×', 'Ĵ'] +['п', 'ÑĢ'] +['ĠاÙĦ', 'Ø«'] +['س', 'Ø·'] +['ร', 'ูà¹ī'] +['ีà¹Ī', 'ย'] +['à¸Ń', 'à¸Ķ'] +['ãģª', 'ãĤĬ'] +['×Ĵ', '×ĵ'] +['ãģĦ', 'ãģ¾ãģĹãģŁ'] +['ס', '×§'] +['Ø®', 'ص'] +['la', 'ÅŁ'] +['ен', 'но'] +['ب', 'ØŃ'] +['ส', 'à¸Ļ'] +['à¸', '®'] +['ר×IJ', 'ש'] +['Ùħ', 'ÙĪ'] +['دÙĬ', 'د'] +['ษ', 'า'] +['×ķ×', 'ļ'] +['ãĥ§', 'ãĥ³'] +['à¸ķ', 'ุ'] +['Ġê', 'µ'] +['ĠÑģв', 'о'] +['צ', '×ij'] +['à¸Ń', 'ม'] +['à¸Ľ', 'ร'] +['ت', 'ع'] +['×Ķ', 'ת'] +['اÙħ', 'ÙĦ'] +['×ŀ', '׳'] +['ç', '¶ļ'] +['à¸', '¤'] +['í', 'į'] +['ë', 'ĺ'] +['ë', '¤'] +['ì', 'ij'] +['â', '´'] +['ã', 'ĭ'] +['Ġب', 'اÙĦ'] +['á»ģ', 'u'] +['ĠاÙĦ', 'ÙĦ'] +['à¸ķ', 'ัว'] +['ذ', 'Ùĩ'] +['ึ', 'à¸ĩ'] +['à¹ĥà¸Ĭ', 'à¹ī'] +['á»ĵ', 'ng'] +['à¸Ļ', 'ั'] +['ม', 'าà¸ģ'] +['ãĥ', 'Ł'] +['×ŀ', '×ķ'] +['à¸Ĺ', 'ย'] +['á»Ļ', 'i'] +['áº', '±'] +['ả', 'o'] +['à¹Ĥ', 'à¸Ķ'] +['×IJ', '׾'] +['ส', 'าม'] +['ÙĪ', 'ب'] +['à¸Ĺ', 'ุ'] +['ย', 'ัà¸ĩ'] +['×¢', 'ת'] +['×ķ׳', '×ķת'] +['à¸Ĥ', 'ึ'] +['à¸Ĥึ', 'à¹īà¸Ļ'] +['à¸ģ', 'à¹Ī'] +['áº', '«'] +['á»ij', 'c'] +['ãģĹ', 'ãĤĩãģĨ'] +['á»ĭ', 'ch'] +['Ġ×IJ', '×ķת'] +['Ġש', '×IJ'] +['׼', '×ķ׾'] +['á»Ļ', 'c'] +['ع', 'Ø©'] +['à¸Ĺ', 'ี'] +['à¹Ģ', 'à¸Ń'] +['Ùĥ', 'ت'] +['ãģ', '»'] +['áº', '»'] +['ìĹ', 'ħ'] +['à¸Ń', 'à¸Ńà¸ģ'] +['اÙĨ', 'ت'] +['à¹Ħ', 'ร'] +['Ġ×IJ', '×Ĺר'] +['Ø·', 'ر'] +['ÙĨ', 'د'] +['ื', 'à¹īà¸Ń'] +['Ø·', 'ÙĦ'] +['×IJ', '×Ķ'] +['uy', 'ên'] +['í', 'ĸī'] +['×ij', '×Ķ'] +['à¸Ħ', 'à¹Ī'] +['à¸Ĭ', 'à¹Īว'] +['ãģĤãĤĬ', 'ãģ¾ãģĻ'] +['ÙĬ', 'ب'] +['×§', '׾'] +['ãĥ', 'Ļ'] +['Ä', '©'] +['س', 'ر'] +['า', 'ว'] +['ãĤ', '±'] +['à¸ļ', 'ริ'] +['ר', '×Ĵ'] +['á»ĥ', 'u'] +['ØŃ', 'ت'] +['×ķ×ŀ', '×Ļ'] +['ب', 'ÙĨ'] +['êµ', 'IJ'] +['ÄŁ', 'u'] +['ãģª', 'ãĤĵ'] +['×ij', '×§'] +['Ġפ', 'ר'] +['ắ', 'n'] +['ØŃ', 'ÙĦ'] +['×ij', '×Ĺ'] +['ấ', 'u'] +['×ij', '×ķ×ĵ'] +['ãĥ', '¯'] +['Ġ׾', '×§'] +['ั', 'à¸į'] +['à¸ŀ', 'ิ'] +['×Ĺ', '×Ķ'] +['×ĸ', '׼'] +['ãĥ¼ãĥ', 'ł'] +['ÑĤ', 'елÑĮ'] +['×ŀ', '×Ļ×ĵ'] +['ÙĬ', 'Ø®'] +['áº', '³'] +['ت', 'ص'] +['à¸ĺ', 'ิ'] +['è¾', '¼'] +['ì', 'ĵ'] +['Ùĥ', 'Ø©'] +['ÙĤ', 'ب'] +['à¸Ħ', 'à¹Į'] +['à¹īา', 'ย'] +['à¸ĵ', 'ะ'] +['า', 'ะ'] +['ë', 'Ĵ'] +['ê', '¾'] +['ë', '·'] +['ì', 'ĩ'] +['ê', 'º'] +['ì', 'ģ'] +['ë', 'Ģ'] +['ì', '¾'] +['ë', '½'] +['ë', 'ļ'] +['ì', 'Ń'] +['ì', 'İ'] +['á', 'ij'] +['ë', 'Ĺ'] +['ê', 'Ĵ'] +['à', '¡'] +['à', '¬'] +['ðIJ', 'Į'] +['ã', 'ĩ'] +['ðĿ', 'Ħ'] +['Ġ׾', '×IJ'] +['ãģ¨', 'ãģĦãģĨ'] +['Ġn', 'hi'] +['×Ļ', '×ķת'] +['Ġש', '×Ķ'] +['à¹ģล', 'à¹īว'] +['Æ°á»Ľ', 'c'] +['à¸Ķà¹ī', 'วย'] +['à¸Ĺ', 'าà¸ĩ'] +['׳', 'ת'] +['פ', 'ת'] +['à¹ģ', 'à¸ķà¹Ī'] +['ư', 'ng'] +['à¸Ńย', 'ูà¹Ī'] +['à¹ī', 'ำ'] +['Ġ×IJ', '׾'] +['Ùĥ', 'Ùħ'] +['ấ', 'p'] +['ล', 'à¸ĩ'] +['ãģŁ', 'ãĤģ'] +['×Ĵ', '׾'] +['ห', 'ร'] +['ĠÑĢ', 'е'] +['à¹Ģà¸Ĥ', 'à¹īา'] +['ÙĤ', 'ر'] +['Ġ×Ķ', 'ס'] +['ÙĪ', 'ÙĬ'] +['สาม', 'าร'] +['สามาร', 'à¸ĸ'] +['Äĥ', 'n'] +['à¸Ń', 'ี'] +['פ', '×ķ'] +['×Ļ׳', '×ķ'] +['ว', 'ัà¸Ļ'] +['ặ', 'c'] +['íķ', 'Ļ'] +['×ŀ', 'ת'] +['ê', 'u'] +['áº', '¹'] +['Ùģ', 'ÙĬ'] +['×ŀ', 'צ'] +['à¸Ħ', 'า'] +['ãģĿ', 'ãģĨ'] +['ãĢ', 'ħ'] +['ا', 'ز'] +['ا', 'Ùĩ'] +['ר', '×Ļ×Ŀ'] +['ấ', 'n'] +['ห', 'าร'] +['ạ', 't'] +['ÙĨ', 'Ùĩ'] +['à¹Ģ', 'à¸Ħร'] +['ج', 'Ùĩ'] +['׼', '×Ļ'] +['ắ', 't'] +['à¸Ħ', 'à¹īา'] +['ر', 'Ø©'] +['ãĥ', 'ı'] +['Ùĥ', 'ÙĪÙĨ'] +['ứ', 'ng'] +['Ġìļ', '°'] +['ย', 'à¹Į'] +['à¹Īว', 'à¸Ļ'] +['à¸ģ', 'ำ'] +['Ø«', 'ر'] +['Ñģ', 'и'] +['ĠاÙĦ', 'Ø·'] +['Ġ×Ķ', 'צ'] +['ĠØ', '·'] +['ĠاÙĦ', 'ÙĪ'] +['ê¹', 'Į'] +['ØŃ', 'ÙĬ'] +['ار', 'ات'] +['à¹Ģ', 'à¸ĭ'] +['ب', 'ا'] +['г', 'ÑĢ'] +['ร', 'ี'] +['ืà¸Ń', 'à¸Ļ'] +['ع', 'ت'] +['ÙĤ', 'اÙĦ'] +['د', 'Ùħ'] +['Ø', '¡'] +['Ġ×ŀ', '×§'] +['×ĵ', '×Ļ×Ŀ'] +['×¢', '׾'] +['ãģ', 'Ĵ'] +['ëĭ', 'ĺ'] +['×¢', '×Ķ'] +['Ġìĸ', '´'] +['Ñģ', 'ÑĮ'] +['ÙĤ', 'Ø·'] +['ãĥ', 'Ľ'] +['èĢĥ', 'ãģĪ'] +['à¹ģ', 'à¸Ļ'] +['ÙĪ', 'ات'] +['â', 'u'] +['ĠìĤ¬', 'ëŀ'] +['ห', 'ว'] +['ĠاÙĦØ£', 'Ùħ'] +['Ġ×Ķ', '×ŀש'] +['ب', 'ÙĪ'] +['à¸Ĭ', 'à¸Ļ'] +['ãĤĵ', 'ãģ§ãģĻ'] +['ว', 'à¸Ļ'] +['à¸ģร', 'รม'] +['×ŀ', '×ķ×ĵ'] +['Ùĥ', 'اÙĨ'] +['×ķ×', '£'] +['ол', 'ог'] +['ت', 'ÙĨ'] +['à¸ķ', 'à¹Į'] +['ê²', 'ĥ'] +['ר', '×ĺ'] +['ừ', 'ng'] +['×ķ×ij', '×Ķ'] +['Ùħ', 'ØŃ'] +['ĠÐ', '§'] +['פ', '×Ĵ'] +['ส', 'à¸ĸ'] +['ãģĭ', 'ãĤĬ'] +['ını', 'z'] +['à¹Ģ', 'ย'] +['ãĥ¼', 'ãĥ³'] +['ãģĬ', 'ãĤĬ'] +['פ', 'ש'] +['ิ', 'à¸ķ'] +['Ø·', 'ÙĨ'] +['×Ļת', '×Ļ'] +['×IJ', '׳'] +['ç', 'ek'] +['ì', 'ª'] +['×ŀ', '×ij'] +['ศ', 'า'] +['ãĤ¹', 'ãĤ¿'] +['à¸ļ', 'ุ'] +['×ĵ', '×ijר'] +['ãģĦ', 'ãģı'] +['ส', 'ะ'] +['à¹Ģ', 'หล'] +['ิ', 'à¸ĩ'] +['à¸ŀ', 'ัà¸Ļ'] +['ãģĦ', 'ãģŁãģł'] +['ãĤĤ', 'ãĤī'] +['à¹ī', 'ม'] +['ãģĵãģ¨ãģĮ', 'ãģ§ãģį'] +['าร', 'à¹Į'] +['ุ', 'à¸ĩ'] +['í', 'ij'] +['ì', '¯'] +['ë', '¼'] +['í', 'Ĥ'] +['ì', '·'] +['ê', '¡'] +['á', 'ı'] +['á', 'Ĵ'] +['ðĿ', 'ľ'] +['á', '©'] +['ðŁ', 'Ħ'] +['ðIJ', '¤'] +['Ġש', '׾'] +['Ġ×ŀ', '×Ķ'] +['à¹ģล', 'ะ'] +['Ġ׼', '׾'] +['áº', '½'] +['á»Ļ', 'ng'] +['ذ', 'ÙĬ'] +['л', 'е'] +['×', '¥'] +['ãģª', 'ãģ©'] +['ĠÙĪ', 'Ø£'] +['หà¸Ļ', 'à¹īา'] +['ãģ¾', 'ãģ§'] +['à¸ķà¹Ī', 'à¸Ń'] +['à¸Ĺ', 'ัà¹īà¸ĩ'] +['ãģł', 'ãģij'] +['à¹ģà¸ļ', 'à¸ļ'] +['à¹Ģร', 'า'] +['פ', '׾'] +['ãģŁ', 'ãģĦ'] +['à¹Ģล', 'ย'] +['ãģ£ãģ¦', 'ãģĦãĤĭ'] +['ế', 'p'] +['ึ', 'à¹Īà¸ĩ'] +['ê', '´Ģ'] +['ê³', 'Ħ'] +['׼', '×ķ'] +['à¹Ģร', 'ืà¹Īà¸Ńà¸ĩ'] +['×§', '×Ļ'] +['êµ', 'Ń'] +['פ', 'ס'] +['ت', 'ÙĬ'] +['ãĥ', 'Ħ'] +['Ġ×Ķ', '×Ĺ'] +['г', 'и'] +['ר×IJ', '׾'] +['×ŀ', '׾'] +['ĠØ£', 'ÙĬ'] +['Ġع', 'ÙĦÙĬ'] +['ãģĭ', 'ãģ£ãģŁ'] +['ש', '×Ļ'] +['д', 'Ñĥ'] +['×ŀ', 'ף'] +['׳', '×ĺ'] +['׳', '×Ļת'] +['mi', 'ÅŁ'] +['׼', '×Ŀ'] +['Ġ×ij', 'ר'] +['Ġ׾', '×ij'] +['ĠÐ', 'Ľ'] +['ç', 'e'] +['×ķ׳', '×Ļ'] +['ãĤĪãģĨ', 'ãģ«'] +['פ', '×ķר'] +['ãĥ', 'į'] +['Ùĥ', 'ÙĬ'] +['×Ĺ', 'ת'] +['Ùģ', 'ÙĦ'] +['Ġ×Ķ', '×§'] +['Ġ×Ķ', '×ij'] +['Ġ×ŀ', 'ס'] +['à¹Īา', 'à¸Ļ'] +['п', 'еÑĢ'] +['à¹Īา', 'ว'] +['Ġ×ij', '×IJ'] +['ĠÙĪ', 'Ùĩ'] +['à¸Ļ', 'ำ'] +['Ġ×ij', 'ש'] +['׳', '×§'] +['ãģ©', 'ãģĨ'] +['ש', '×ķת'] +['×ĵ', '×Ķ'] +['à¹Ģ', 'à¸ļ'] +['ÙĨ', 'س'] +['Ġìļ°', '리'] +['ส', 'à¹Īวà¸Ļ'] +['ล', 'ัà¸ĩ'] +['ج', 'ز'] +['Ġ×Ĺ', '×Ļ'] +['Ùĥ', 'ثر'] +['ล', 'ะ'] +['Ùĩ', 'د'] +['ĠÙĪ', 'ب'] +['اÙĦ', 'Ùħ'] +['à¹ģ', 'ม'] +['Æ¡', 'i'] +['Ġ×ij', '×Ĺ'] +['ữ', 'a'] +['à¹Ģà¸Ĺ', 'ศ'] +['à¸ķ', 'ัà¹īà¸ĩ'] +['ог', 'да'] +['׾', '×§'] +['د', 'د'] +['สร', 'à¹īาà¸ĩ'] +['à¸Ĭ', 'ี'] +['Ùģ', 'ض'] +['à¹ģ', 'ห'] +['uy', 'á»ĩn'] +['ร', 'ัà¸ģ'] +['á»ĩ', 'm'] +['ส', 'า'] +['פ', '×§'] +['ีย', 'à¸ĩ'] +['à¸ķ', 'à¹Īาà¸ĩ'] +['à¸Ħร', 'ัà¹īà¸ĩ'] +['ØŃ', 'ÙĤ'] +['à¹Ģ', 'à¸Ńà¸ĩ'] +['ائ', 'ÙĬ'] +['×ĺ', '×¢'] +['اÙĦ', 'Ø©'] +['ิ', 'à¹Īม'] +['ãĤ', '½'] +['د', 'Ùī'] +['Ġר', '×IJ'] +['ãģ£', 'ãģ¨'] +['ãĥĥ', 'ãĥĹ'] +['ÙĬر', 'Ø©'] +['ê±', '´'] +['×ŀ', '×IJ'] +['×ķ', '×ķ'] +['ب', 'ع'] +['ãģ', '²'] +['ร', 'าย'] +['×ĵ', '×Ŀ'] +['ت', 'Ùģ'] +['à¸ķ', 'à¸ģ'] +['ạ', 'ng'] +['ãĤĴ', 'è¦ĭ'] +['à¸Ĭ', 'ั'] +['ưá»', 'Ł'] +['Æ°á»Ł', 'ng'] +['ج', 'ب'] +['×ķ×ŀ', 'ר'] +['ĠìĤ¬ëŀ', 'Į'] +['ó', 'ng'] +['ร', 'ั'] +['Ġ×Ķ', '×ĸ'] +['ר', 'צ'] +['Ġ×Ĺ', '×ĵ'] +['ذ', 'ÙĦÙĥ'] +['×ķר', '×Ļ'] +['ãģ¡', 'ãĤĥ'] +['Ùģ', 'ع'] +['Ġ׾', 'צ'] +['á', 'i'] +['à¹ĩ', 'à¸ļ'] +['ãģ', 'İ'] +['à¸ģ', 'ิ'] +['ạ', 'c'] +['ë©', '°'] +['ãģª', 'ãĤĭ'] +['×ķ׾', '×Ŀ'] +['à¹ģ', 'à¸Ĺ'] +['×ķ×', '¥'] +['м', 'еÑĤ'] +['ü', 'ÅŁ'] +['ÑĢ', 'Ñı'] +['à¸', 'Ĵ'] +['ÑģÑĤ', 'оÑı'] +['ع', 'ÙĪØ¯'] +['Ùħ', 'ار'] +['Ø·', 'Ø©'] +['à¸ŀ', 'ื'] +['к', 'ÑĢ'] +['à¹ģ', 'à¸ģ'] +['à¹Ĥ', 'รà¸ĩ'] +['×ij', '×Ļ×ĺ'] +['ê²', 'ł'] +['×ķ׾', '×Ķ'] +['ØŃ', 'ر'] +['ืà¹Ī', 'à¸Ńà¸Ļ'] +['×ķ×ij', 'ר'] +['×Ĺ', 'ש'] +['ãĥķãĤ', '¡'] +['×ŀ', '×ĺ'] +['ú', 't'] +['Ġd', 'ön'] +['ắ', 'ng'] +['ëł', 'ĩ'] +['ẳ', 'ng'] +['ว', 'à¸ģ'] +['ص', 'د'] +['Ø®', 'Ø·'] +['à¸Ń', 'ั'] +['ãĤı', 'ãĤĮ'] +['سÙĦ', 'اÙħ'] +['à¹Ģร', 'à¹ĩ'] +['×Ļש', '×Ļ'] +['ج', 'اÙĦ'] +['ãģij', 'ãĤĭ'] +['à¸Ĭา', 'à¸ķิ'] +['ÙĪØ§', 'ÙĤ'] +['à¹Ĥ', 'à¸Ļ'] +['ãģ¦', 'ãģĹãģ¾'] +['اع', 'Ø©'] +['ãĤŃ', 'ãĥ£'] +['à¸į', 'า'] +['ÙĦا', 'ÙĤ'] +['ิ', 'à¸ģ'] +['ĠÑģ', 'ов'] +['ÑĢаÐ', 'º'] +['×Ļ׳', '×Ļ'] +['ü', 'ÄŁ'] +['Ã¼ÄŁ', 'ü'] +['×§', '×ij'] +['à¹Ī', 'à¸Ńà¸ĩ'] +['Ġger', 'çek'] +['à¸Ĺ', 'ั'] +['ов', 'аниÑı'] +['×ŀ', '׼'] +['س', 'Ø©'] +['×Ļ×', '£'] +['le', 'ÅŁ'] +['Ùħ', 'ؤ'] +['ĠìĿ', 'ĺ'] +['à¸IJ', 'าà¸Ļ'] +['ĠÑģ', 'об'] +['Ġêµ', 'Ń'] +['×¢', 'צ'] +['з', 'в'] +['ส', 'à¸ĩ'] +['ز', 'ÙĦ'] +['ãģı', 'ãĤĮ'] +['и', 'ÑĢÑĥ'] +['ت', 'Ø£'] +['п', 'олн'] +['ìĺ', 'Ģ'] +['ÙĨ', 'Ø´'] +['׼', '×IJ'] +['Ùħ', 'Ø´'] +['à¸Ķ', 'à¹Į'] +['ÙĪ', 'ÙĬÙĦ'] +['à¹ģ', 'à¸Ĥ'] +['ãģ£ãģ¦', 'ãģĹãģ¾'] +['но', 'ÑģÑĤ'] +['в', 'л'] +['Ùħ', 'ÙĤ'] +['را', 'ج'] +['å¤', 'ī'] +['ë', 'Ľ'] +['â', '¸'] +['ì', 'IJ'] +['à', '»'] +['á', 'ļ'] +['â', '»'] +['ê', 'Ļ'] +['â', '§'] +['ð', 'Ĵ'] +['ðĿ', 'ĩ'] +['Ġ×IJ', 'ת'] +['ĠÙĦ', 'ÙĦ'] +['ĠØ£', 'ÙĨ'] +['Ġ×ķ', '×Ķ'] +['ãģ«', 'ãģ¯'] +['Ġ×Ļ', 'ש'] +['ت', 'Ùĩ'] +['ÃŃ', 'nh'] +['ÙĬ', 'ات'] +['Ġ×ij', '×ŀ'] +['à¸Ļั', 'à¹īà¸Ļ'] +['à¸Ļ', 'à¹īำ'] +['Ãł', 'o'] +['à¸ķ', 'าม'] +['ãģ®', 'ãģ¯'] +['d', 'ır'] +['Ġn', 'ghi'] +['ặ', 't'] +['×ŀ', '×Ļ×Ŀ'] +['ãģ¦', 'ãģĦãĤĭ'] +['Ġ×ij', 'ת'] +['หร', 'ืà¸Ń'] +['Ġس', 'ÙĬ'] +['ãģª', 'ãĤī'] +['à¹Ĥà¸Ķ', 'ย'] +['ı', 'yor'] +['à¸Ńี', 'à¸ģ'] +['á»ĩ', 'nh'] +['Ñĭ', 'м'] +['à¸Ĺุ', 'à¸ģ'] +['Ġ׾', '×Ĺ'] +['Ġ×Ķ', 'ר'] +['Ġ×Ķ', '×Ļ'] +['à¸ŀ', 'ระ'] +['à¹Ģว', 'ลา'] +['ĠØ', 'º'] +['ẫ', 'n'] +['m', 'Ä±ÅŁ'] +['׼', '×Ķ'] +['á»ij', 'n'] +['ãģ§', 'ãģĹãĤĩãģĨ'] +['ãĥ', '¢'] +['à¸Ľ', 'ี'] +['ס', '×Ļ'] +['ãģĵ', 'ãĤį'] +['Ġ׾', 'פ'] +['ร', 'à¸ĸ'] +['ê¸', 'Ī'] +['à¸ģ', 'วà¹Īา'] +['ë', '¬´'] +['á»į', 'ng'] +['ãĤĵ', 'ãģ§'] +['ãĤĪãģĨ', 'ãģª'] +['á»ĵ', 'i'] +['ãĤ', '¬'] +['ส', 'à¹Īà¸ĩ'] +['×Ļ׳', '×Ķ'] +['à¸ĸ', 'ูà¸ģ'] +['à¸Ī', 'ัà¸Ķ'] +['Ġ×Ķ', '×Ĵ'] +['ãĥ', 'ľ'] +['×ŀ', '×ķת'] +['ÙĪ', 'Ùĥ'] +['ëĭ', '¨'] +['ĠØ', '«'] +['ãģ®', 'ãģĮ'] +['à¹Ģห', 'à¹ĩà¸Ļ'] +['ع', 'ا'] +['à¸Ļ', 'ิ'] +['Å', 'ŀ'] +['à¸Ń', 'ะ'] +['ãģĪ', 'ãĤĭ'] +['Ø«', 'ÙĦ'] +['ØŃÙħ', 'د'] +['à¹Ģà¸ģ', 'ิà¸Ķ'] +['פ', 'שר'] +['פ', '×Ķ'] +['ม', 'ิ'] +['ئ', 'ÙĬس'] +['à¸Ĺำ', 'à¹ĥหà¹ī'] +['×¢', '×ĵ'] +['ìĭ', '¤'] +['à¸Ĭà¹Īว', 'ย'] +['ĠاÙĦÙħ', 'ÙĨ'] +['ز', 'ÙĬ'] +['ع', 'ÙĬ'] +['Ġ׼', '×IJ'] +['ạ', 'nh'] +['á»', '¹'] +['ãĤĵ', 'ãģª'] +['ส', 'ู'] +['צ', 'ר'] +['Æ°á»Ľ', 'ng'] +['×ķ', '×ķ×Ķ'] +['à¹Ĥ', 'ล'] +['ĠاÙĦ', 'Ùĩ'] +['ว', 'า'] +['หล', 'าย'] +['Ñī', 'е'] +['à¸Ĥ', 'à¹īà¸Ń'] +['à¹īà¸Ń', 'ย'] +['ب', 'Ø·'] +['ка', 'Ñı'] +['ĠØ', '¢'] +['Ġи', 'Ñģ'] +['ĠاÙĦ', 'غ'] +['à¸ģ', 'า'] +['à¸Ļ', 'à¹Īา'] +['ÙĬ', 'ÙĪ'] +['×ij', '×ķר'] +['á»ħ', 'n'] +['ว', 'à¸ĩ'] +['×Ļ×', 'ĸ'] +['ì²', 'Ń'] +['н', 'им'] +['ëŁ', '°'] +['×Ĵ', '×ķר'] +['ص', 'ØŃ'] +['ÙĦ', 'ÙĪ'] +['×Ĺ', '×ķת'] +['ส', 'ุ'] +['رÙĬ', 'ÙĤ'] +['ס', '×ĺ'] +['Ġ×ŀ', '×¢'] +['ãĥĨ', 'ãĤ£'] +['à¸Ħ', 'ิà¸Ķ'] +['ãĤį', 'ãģĨ'] +['à¹Ħ', 'ล'] +['à¸Ļ', 'à¹Į'] +['á»ı', 'i'] +['ÑģÑĤÑĢ', 'о'] +['ส', 'à¸Ķ'] +['ส', 'าร'] +['ÙĪÙĦ', 'Ø©'] +['ầ', 'm'] +['ร', 'à¹Īว'] +['รà¹Īว', 'ม'] +['ร', 'ุ'] +['ĠاÙĦس', 'ÙĬ'] +['ìĺ', 'ģ'] +['Ġ×ŀ', '×ij'] +['פ', '×ĺ'] +['à¸ķิ', 'à¸Ķ'] +['×ĺ', '×Ļ×Ŀ'] +['Ġë', '¬´'] +['ÙĤد', 'Ùħ'] +['Ġdü', 'ÅŁ'] +['ائ', 'ÙĦ'] +['м', 'Ñĭ'] +['ØŃ', 'س'] +['ÙĪ', 'ص'] +['×Ļ×§', '×Ķ'] +['ãģ§ãģ¯', 'ãģªãģĦ'] +['à¹Ģ', 'หม'] +['оÑĢ', 'ÑĤ'] +['í', 'Ĩµ'] +['ãģ', 'IJ'] +['к', 'ÑĢа'] +['ีย', 'ว'] +['ع', 'ار'] +['ئ', 'Ø©'] +['íĥ', 'Ģ'] +['ãģ«ãģª', 'ãĤĬ'] +['ج', 'Ø©'] +['ÙĪÙĤ', 'ع'] +['ÑĮ', 'Ñı'] +['×ķצ', '×Ķ'] +['ש', '×Ŀ'] +['ب', 'ÙĤ'] +['Ġ×Ļ', '×Ķ'] +['ÙĬ', 'Ø·'] +['ım', 'ız'] +['д', 'еÑĢж'] +['×Ļש', 'ר×IJ׾'] +['غ', 'ÙĬر'] +['ร', 'à¸Ńà¸ĩ'] +['à¹Ģรีย', 'à¸Ļ'] +['Ġ×Ķ', '×ĺ'] +['หม', 'าย'] +['Ùħ', 'Ùĩ'] +['اÙģ', 'Ø©'] +['Ġо', 'ÑĢг'] +['ÙĪ', 'Ùī'] +['ãĥ©', 'ãĤ¤'] +['×ŀ', '׳×Ķ'] +['ĠÄij', 'o'] +['Ġг', 'оÑĢ'] +['اÙħ', 'Ø©'] +['æ¥', '½'] +['Ø«', 'ÙĬر'] +['à¸ģิ', 'à¸Ī'] +['á»ĵ', 'n'] +['ÙĨ', 'ب'] +['ÑĢÑĥ', 'д'] +['ìĹ', 'Ī'] +['Ġ×Ĺ', '×ijר'] +['ÑĢаÐ', '¶'] +['ạ', 'ch'] +['ت', 'ÙĪ'] +['à¹Ĥ', 'ม'] +['×ij', '×Ļ×ij'] +['Ġí', 'Ĩµ'] +['aca', 'ģı'] +['جÙĦ', 'س'] +['à¹Ģà¸Ľ', 'ล'] +['ว', 'à¸Ķ'] +['à¸Ń', 'ล'] +['ãģŁ', 'ãĤĬ'] +['à¸Ľ', 'ัà¸į'] +['Ġìķ', 'Į'] +['عر', 'Ùģ'] +['à¹Ħ', 'à¸Ł'] +['Ø£', 'Ø®'] +['å¤ļ', 'ãģĦ'] +['à¸Ķ', 'ัà¸ĩ'] +['Ø´', 'Ùģ'] +['ãģ£ãģ¦', 'ãģĦãģ¾ãģĻ'] +['׼', '×ł×¡'] +['ÑĨ', 'е'] +['еÑģ', 'п'] +['Ùħ', 'اÙħ'] +['à¸ŀื', 'à¹īà¸Ļ'] +['иÑĩеÑģ', 'ки'] +['Ø®', 'د'] +['Ùĥ', 'ÙĪÙħ'] +['Ġ×Ķ', 'ר×IJש'] +['ت', 'اب'] +['é£Ł', 'ãģ¹'] +['ื', 'à¸Ļ'] +['оÑĢ', 'о'] +['Ġb', 'öl'] +['×ķ×Ĺ', '×ĵ'] +['دÙĬ', 'ر'] +['ắ', 'm'] +['د', 'ع'] +['ãģķ', 'ãģĽ'] +['à¸ĺ', 'ร'] +['à¸ĺร', 'รม'] +['ãģĭ', 'ãĤĤ'] +['å¤ļ', 'ãģı'] +['r', 'ä'] +['س', 'ع'] +['×Ļ׾', '×Ķ'] +['ض', 'ر'] +['ĠاÙĦ', 'شر'] +['×ĸ', '×ķר'] +['×¢', '×ijר'] +['ạ', 'm'] +['алÑĮ', 'но'] +['ر', 'ÙĨ'] +['اÙħ', 'ج'] +['׼', '×ļ'] +['d', 'ıģ'] +['д', 'ен'] +['ض', 'ا'] +['ÙĦÙĬ', 'Ùħ'] +['Ġê·¸', '룬'] +['تÙħ', 'اع'] +['ار', 'ÙĬØ®'] +['à¹Ĥ', 'à¸ķ'] +['ĠÑģ', 'ÑĢед'] +['Ġ׳', '×ķס'] +['ÙĤ', 'بÙĦ'] +['оÑĤ', 'ов'] +['le', 'ÅŁtir'] +['Ġм', 'еÑģÑĤ'] +['سÙĦ', 'Ùħ'] +['Ġ×¢', 'צ'] +['ĠاÙĦس', 'ÙĦ'] +['еÑĤ', 'ÑĮ'] +['اب', 'Ø©'] +['н', 'ак'] +['สà¸ĸ', 'าà¸Ļ'] +['Ġ×ij', '׳'] +['à¸ļ', 'ัà¸Ļ'] +['׼', '׳'] +['Ġö', 'ÄŁ'] +['ãģ¨', 'è¨Ģ'] +['uy', 'ến'] +['di', 'ÄŁ'] +['áºŃ', 'u'] +['ÑĢ', 'аÑģ'] +['ãĤ·', 'ãĥ§ãĥ³'] +['n', 'ız'] +['×ķ×ĵ', '×Ķ'] +['ت', 'س'] +['Ùħ', 'اÙĦ'] +['à¹Ģห', 'à¸ķุ'] +['ย', 'ว'] +['à¸ŀ', 'ัà¸ģ'] +['ãģĦ', 'ãģªãģĦ'] +['Ġк', 'аÑĩ'] +['ล', 'à¹Į'] +['ר׼', 'ת'] +['ÅŁt', 'ur'] +['×ŀ', '×ķס'] +['ãģ', '¥'] +['б', 'ол'] +['عÙħ', 'اÙĦ'] +['×ķר', 'ת'] +['ÑĨи', 'он'] +['ศ', 'ึà¸ģ'] +['à¸', 'ı'] +['ÑĢ', 'ен'] +['اس', 'ÙĬ'] +['ائ', 'ر'] +['à¹Ĥ', 'à¸Ľà¸£'] +['Ġse', 'ç'] +['غ', 'ÙĬ'] +['Ñį', 'ÑĤ'] +['ен', 'н'] +['ãģª', 'ãģ®'] +['×Ļש', '×Ķ'] +['×Ļפ', '×ķר'] +['ãģŁãĤģ', 'ãģ«'] +['ز', 'Ø©'] +['Ġç', 'oc'] +['ãĤ¯', 'ãĥª'] +['ÑĪ', 'ен'] +['ãĤı', 'ãģij'] +['رÙĬ', 'د'] +['ĠÑĢ', 'аÑģÑģ'] +['Ùĥ', 'ات'] +['ส', 'à¸Ńà¸ļ'] +['ce', 'ÄŁi'] +['ãĤ¿', 'ãĤ¤'] +['à¸ļ', 'ร'] +['ĠاÙĦ', 'بر'] +['׳', '×ķ×¢'] +['r', 'ün'] +['را', 'ض'] +['ศา', 'ส'] +['à¸ķ', 'รà¹Į'] +['ãģį', 'ãģŁ'] +['×ķ׾', '×ĵ'] +['еÑĢ', 'и'] +['íĹ', 'ĺ'] +['ắ', 'p'] +['ت', 'عÙĦ'] +['Ùĥ', 'د'] +['иÑĤелÑĮ', 'но'] +['Ø·', 'Ùģ'] +['Ġав', 'ÑĤом'] +['Ġ×ŀ', 'צ'] +['ÑĪи', 'Ñħ'] +['ات', 'Ùģ'] +['ĠÑħ', 'оÑĤ'] +['Ùİ', 'ا'] +['ãģı', 'ãĤĭ'] +['×Ķ', 'פ'] +['à¹Ĥ', 'à¸Ĺ'] +['à¹ģ', 'à¸ŀ'] +['à¹Ī', 'à¸Ńย'] +['ĠاÙĦÙħ', 'Ø´'] +['à¸ģาร', 'à¸ĵà¹Į'] +['ани', 'з'] +['×Ķ', '׾'] +['ظ', 'Ùħ'] +['ย', 'ุ'] +['li', 'ÄŁ'] +['à¹Ħ', 'à¸Ĥ'] +['à¸ĸ', 'ืà¸Ń'] +['ö', 'z'] +['ãģij', 'ãģ¦'] +['à¹Ģ', 'à¸ľ'] +['ุ', 'ม'] +['ãĥĹ', 'ãĥ¬'] +['Ġ×Ķ×IJ', '×Ĺר'] +['خت', 'ÙĦÙģ'] +['à¸', 'İ'] +['ÙĦا', 'ØŃ'] +['Ġdü', 'zen'] +['צ', '×Ķ'] +['س', 'اء'] +['×ķר', '×ļ'] +['×ķ×ĵ', '×Ļ'] +['ÑĢа', 'ÑĦ'] +['ÅŁt', 'ır'] +['ãģ«', 'åħ¥'] +['ãģĪ', 'ãģ°'] +['ص', 'ÙĪÙĦ'] +['ĠÐľ', 'оÑģ'] +['ا', 'Ùĩر'] +['ãģ£', 'ãģ'] +['ĠлÑİ', 'б'] +['×Ļ×¢', '×Ķ'] +['Ġ×Ķ×ŀ', '×§'] +['สิ', 'à¸Ĺ'] +['สิà¸Ĺ', 'à¸ĺิ'] +['×Ļ׳', '×Ŀ'] +['ÙĦا', 'Ùģ'] +['à¸ŀัà¸Ļ', 'à¸ĺ'] +['×ķ×IJ', '×Ķ'] +['ม', 'ั'] +['à¸Ĥ', 'à¸ĵะ'] +['д', 'оÑĢ'] +['ãģ¨', 'ãģª'] +['à¸ģระ', 'à¸Ĺ'] +['ac', 'ı'] +['×ķ׾', '×ķ×Ĵ'] +['Ñĥ', 'ÑĪ'] +['ãĥ¥', 'ãĥ¼'] +['ãĥ', '¦'] +['Ùħ', 'ست'] +['Ġa', 'ÅŁ'] +['ש', '×§'] +['פ', 'ת×Ĺ'] +['าย', 'à¸Ļ'] +['í', 'ĩ'] +['ë', '¢'] +['ï', '·'] +['í', 'ī'] +['ì', 'µ'] +['ì', '¬'] +['ðĿ', 'Ľ'] +['ì', 'Ĵ'] +['ë', 'Ļ'] +['ê', '§'] +['á', 'ĸ'] +['â', '¨'] +['â', '±'] +['á', 'ĺ'] +['ð', 'ĸ'] +['à', 'ł'] +['á', 'Ķ'] +['ðIJ', 'Ń'] +['ữ', 'ng'] +['Å©', 'ng'] +['Ġ×Ķ', 'ת'] +['ĠاÙĦ', 'ا'] +['Ġ×ŀ', 'ת'] +['à¸ĸ', 'ึà¸ĩ'] +['ò', 'n'] +['á»ĭ', 'nh'] +['нÑĭ', 'м'] +['Ġc', 'ả'] +['à¸Ķ', 'ู'] +['Ġ', 'à¹ģà¸ķà¹Ī'] +['Ġ×ij', '×Ķ'] +['ó', 'i'] +['ãģ¨', 'ãģĹãģ¦'] +['ú', 'ng'] +['ĠØ', '°'] +['Ġ×Ķ', '׳'] +['Ġب', 'ÙĨ'] +['ÙĦ', 'اÙĦ'] +['à¹Ħ', 'à¸Ĺย'] +['á»ĩ', 'p'] +['t', 'ı'] +['ม', 'ัà¸Ļ'] +['ằ', 'ng'] +['á»ij', 't'] +['к', 'ом'] +['à¸ĭ', 'ึà¹Īà¸ĩ'] +['à¸Ħร', 'ัà¸ļ'] +['à¸ļ', 'à¹īาà¸Ļ'] +['ĠاÙĦ', 'ÙĬ'] +['l', 'ü'] +['ÙĪ', 'س'] +['ãģł', 'ãģ£ãģŁ'] +['à¹Ģ', 'à¸ĩ'] +['Ġê³', 'µ'] +['н', 'Ñĥ'] +['ãĤĪ', 'ãĤĬ'] +['м', 'Ñĥ'] +['à¹Ģà¸Ĥ', 'า'] +['ãĤ', 'Ģ'] +['ни', 'е'] +['ãģ«ãģª', 'ãĤĭ'] +['áºŃ', 'y'] +['ĠÙĪ', 'ا'] +['ëł', '¤'] +['ש', '×ķ'] +['á', 'p'] +['×ĵ', '×ķ'] +['ãģ§', 'ãģĹãģŁ'] +['ع', 'ض'] +['Ñģк', 'ой'] +['æĦŁ', 'ãģĺ'] +['ÑİÑĤ', 'ÑģÑı'] +['Ġ×Ļ', '׼×ķ׾'] +['ãĤĵ', 'ãģł'] +['в', 'и'] +['à¹Ģล', 'à¹Īà¸Ļ'] +['ìĿ´', 'ëĭ¤'] +['ĠÙĦ', 'Ùĩ'] +['à¸Ħ', 'ืà¸Ń'] +['ت', 'Ùĥ'] +['Ùħ', 'ÙĥÙĨ'] +['a', 'ģı'] +['׳', '×ĵ'] +['ë¯', '¼'] +['à¹Ħ', 'ว'] +['สำ', 'ห'] +['สำห', 'รัà¸ļ'] +['Ñģл', 'ед'] +['t', 'ır'] +['ĠÙĦ', 'ÙĬ'] +['ĠاÙĦع', 'ÙħÙĦ'] +['×ij', '×ķת'] +['×ij', '×Ļ×Ŀ'] +['à¸Ħ', 'ำ'] +['à¹Ģà¸Ħร', 'ืà¹Īà¸Ńà¸ĩ'] +['lı', 'ģı'] +['ืà¸Ń', 'à¸ĩ'] +['ج', 'د'] +['íŀ', 'Ī'] +['ìĭ', '¬'] +['×¢', '×ķת'] +['ส', 'ิà¸Ļ'] +['Ñĩ', 'и'] +['ر', 'ض'] +['à¹Ģà¸Ľ', 'ิà¸Ķ'] +['à¸Ħ', 'à¹Īา'] +['ìĦ', 'ł'] +['ÙĪØ±', 'Ø©'] +['×§', '×ĺ'] +['ìľ', 'ł'] +['ع', 'ÙħÙĦ'] +['×IJ', '×Ļ×Ŀ'] +['׾', '×Ļ×Ŀ'] +['à¹ĥห', 'à¸į'] +['à¹ĥหà¸į', 'à¹Ī'] +['ừ', 'a'] +['á»į', 'i'] +['ãģ', '¶'] +['ÃŃ', 'ch'] +['ãĥĩ', 'ãĤ£'] +['×ķר', '×Ļ×Ŀ'] +['Ñģ', 'о'] +['ìķ', '½'] +['ов', 'а'] +['Ñĩ', 'аÑģÑĤ'] +['à¹Ģà¸Ī', 'à¹īา'] +['п', 'ÑĢо'] +['Ġ×ŀ', '×Ĺ'] +['ãĥ', 'İ'] +['×ķ×Ļ', '×ķת'] +['Ġд', 'е'] +['ë§', 'Ī'] +['ì§', 'ģ'] +['×Ļפ', '×Ķ'] +['ĠاÙĦع', 'اÙĦÙħ'] +['ë¥', '´'] +['ר×IJ', '×Ķ'] +['uy', 'á»ĥn'] +['×¢', '×Ļ'] +['ม', 'ืà¸Ń'] +['Ø¥', 'ÙĨ'] +['ร', 'ู'] +['ĠØ', '²'] +['×Ļ', '×ķ×Ŀ'] +['à¸ķ', 'à¹īà¸Ļ'] +['ãģ¦', 'ãģĦãģ¾ãģĻ'] +['Ùħ', 'اÙĨ'] +['ĠÐ', '¥'] +['à¸Ľà¸£à¸°', 'à¹Ģà¸Ĺศ'] +['á»', '³'] +['׾', '×ij'] +['à¹Ģà¸Ķ', 'à¹ĩ'] +['ãģŁ', 'ãģ¡'] +['à¸Ĺี', 'ม'] +['à¸Ļ', 'ะ'] +['ìĹ', '°'] +['Ġìł', 'Ģ'] +['ÙĦ', 'Ùĩ'] +['ợ', 'i'] +['ĠاÙĦ', 'ز'] +['د', 'ار'] +['ãĤ³', 'ãĥ³'] +['м', 'ин'] +['à¹ģห', 'à¹Īà¸ĩ'] +['à¸Ķ', 'ัà¸ļ'] +['׼', 'ר'] +['ж', 'а'] +['íĸ', 'Ī'] +['×ŀ', '×ĸ'] +['ợ', 'i'] +['à¸Ķ', 'า'] +['Ġع', 'بد'] +['à¹ģ', 'ร'] +['×IJת', 'ר'] +['×¢', '׳×Ļ'] +['à¹Ģ', 'à¸Ħ'] +['×ķצ', 'ר'] +['ì§Ģ', 'ë§Į'] +['ائ', 'Ùħ'] +['Ø£', 'س'] +['uy', 'á»ģn'] +['Ġ×IJ', '׳'] +['×Ĺ', '׳×ķ'] +['×ĸ', '×Ļ'] +['ร', 'à¹īาà¸Ļ'] +['ĠÐł', 'оÑģ'] +['ĠÐłÐ¾Ñģ', 'Ñģ'] +['رب', 'ÙĬØ©'] +['t', 'ür'] +['ãĤĭ', 'ãģĵãģ¨'] +['ظ', 'ر'] +['б', 'Ñĭ'] +['à¸Ĺีà¹Ī', 'สุà¸Ķ'] +['Ġצ', 'ר'] +['èĩª', 'åĪĨ'] +['л', 'аÑģ'] +['ĠÑı', 'в'] +['ĠÑıв', 'лÑı'] +['à¸ŀร', 'à¹īà¸Ńม'] +['à¸Ńา', 'à¸Ī'] +['à¸ļริ', 'à¸ģาร'] +['Ġç', 'ı'] +['ëį', 'ĺ'] +['ĠاÙĦÙħ', 'ست'] +['ت', 'Ø´'] +['ש', '×ķ×ij'] +['ãĤ', '´'] +['Ġyap', 'ıl'] +['ĠاÙĦ', 'ذ'] +['ุ', 'à¹Īม'] +['à¸ĸ', 'à¹īา'] +['ìĦ', '¤'] +['ì°', '¨'] +['в', 'аÑĢ'] +['à¹Ģà¸ŀ', 'ิà¹Īม'] +['Æ°á»Ľ', 'i'] +['Ùĥ', 'س'] +['à¸Ńย', 'าà¸ģ'] +['ãģ¦', 'ãĤĤ'] +['Ġг', 'од'] +['ÙĬ', 'ار'] +['à¸ķ', 'à¸Ńà¸Ļ'] +['Ġиг', 'ÑĢ'] +['à¹Ħà¸Ķà¹ī', 'รัà¸ļ'] +['ĠاÙĦÙħ', 'ر'] +['ÙĤ', 'ت'] +['Ġë', 'ĺ'] +['Ġëĺ', 'IJ'] +['ẩ', 'n'] +['ãģĻãĤĭ', 'ãģĵãģ¨'] +['×Ĵ', '×Ŀ'] +['Ġ×ij', '×ij'] +['ت', 'د'] +['ÙĪ', 'ار'] +['ãĤ', '®'] +['п', 'ол'] +['Ġм', 'ог'] +['تر', 'Ùĥ'] +['ÙĪ', 'Ø«'] +['Ġç', 'ık'] +['ا', 'Ø©'] +['à¹Ģà¸Ķ', 'ียว'] +['มี', 'à¸Ħวาม'] +['Ġ×ŀ', '×Ĵ'] +['ص', 'Ùģ'] +['ĠТ', 'ак'] +['Ġ׼', 'ת'] +['×Ļ×ĵ', '×Ļ'] +['ов', 'оÑĢ'] +['ầ', 'y'] +['สิ', 'à¹Īà¸ĩ'] +['ب', 'ت'] +['ür', 'ü'] +['ÙĨ', 'ج'] +['หล', 'ัà¸ģ'] +['×Ļ×Ķ', '×Ŀ'] +['ÙĤ', 'ص'] +['з', 'Ñĭ'] +['×Ľ×ª', '×ij'] +['ư', 'u'] +['m', 'ız'] +['ĠìĦ', '¸'] +['л', 'ог'] +['Ùħ', 'ÙĬÙĦ'] +['ÙĬ', 'ج'] +['íĴ', 'Ī'] +['à¸ŀ', 'à¸ļ'] +['ห', 'ัว'] +['з', 'на'] +['ר', '×§'] +['à¹Ĥ', 'ร'] +['Ġ×ij', 'ס'] +['ĠBaÅŁ', 'kan'] +['ĠëĶ', '°'] +['à¸Ń', 'ัà¸Ļ'] +['ีà¹Īย', 'ว'] +['н', 'еÑģ'] +['à¹Ģà¸Ķ', 'ิà¸Ļ'] +['ÙĬ', 'اÙĨ'] +['×ķ׾', '×Ļ'] +['ا', 'خت'] +['צ', '×ķת'] +['ãģĵ', 'ãģĵ'] +['ĠاÙĦ', 'اÙĨ'] +['ĠпÑĢо', 'ÑĨ'] +['ãģ¾', 'ãģł'] +['׼', 'ס'] +['ĠاÙĦ', 'Ø¢'] +['ÙĬ', 'ز'] +['ĠاÙĦد', 'ÙĪÙĦ'] +['Ġíķĺ', 'ëĤĺ'] +['ض', 'ع'] +['ê»', 'ĺ'] +['ÅĽ', 'wi'] +['ย', 'ิ'] +['ãģ¡ãĤĥ', 'ãĤĵ'] +['ĠÙħ', 'Ø´'] +['à¸ĺ', 'ี'] +['ãģ¨', 'ãģį'] +['׳×Ļ', '×ķת'] +['Ġë', '¯'] +['Ġë¯', '¸'] +['Ġs', 'ı'] +['ëĭĪ', 'ê¹Į'] +['Ġп', 'л'] +['غ', 'ÙĦ'] +['à¹ģ', 'รà¸ĩ'] +['ب', 'ÙĬر'] +['ãģĤãĤĬ', 'ãģ¾ãģĽãĤĵ'] +['ê·', '¼'] +['Ġy', 'üz'] +['ĠdeÄŁ', 'er'] +['åł´', 'åIJĪ'] +['á»', '¡'] +['м', 'аÑĤ'] +['รา', 'à¸Ĭ'] +['ÙĪØ±', 'ÙĬ'] +['ж', 'ен'] +['ãģ¾', 'ãĤĬ'] +['ãģ®', 'ä¸Ń'] +['×Ļ×ĵ', '×¢'] +['à¸Ń', 'ุ'] +['à¸ļ', 'à¸Ńล'] +['à¸Ľà¸±à¸į', 'หา'] +['ز', 'Ùħ'] +['ÄŁ', 'a'] +['à¸Ń', 'ืà¹Ī'] +['à¸Ńืà¹Ī', 'à¸Ļ'] +['п', 'л'] +['Ġне', 'обÑħодим'] +['׼', '×ij'] +['à¹Ģ', 'ศ'] +['קר', '×Ķ'] +['ì²', 'ĺ'] +['ëł', '¨'] +['×ŀ×§', '×ķ×Ŀ'] +['jÄħ', 'c'] +['Ùĩ', 'ÙĦ'] +['Ġ×¢', '×ij×ķ×ĵ'] +['à¹Ħม', 'à¹ī'] +['à¸ģล', 'ัà¸ļ'] +['×ķ׼', '׾'] +['×§', '×ĵ'] +['اÙĦ', 'ÙĬØ©'] +['ر', 'Ùĩ'] +['ãģij', 'ãĤĮãģ°'] +['ĠÙĨ', 'Ù쨳'] +['ãĤ¢', 'ãĥ«'] +['ìĹ', 'Īëĭ¤'] +['×§', '×ķר'] +['н', 'еÑĢ'] +['ب', 'اب'] +['ãĤ', '¶'] +['سب', 'ب'] +['ÙĦ', 'ÙĬÙĦ'] +['ص', 'ÙĨ'] +['ص', 'در'] +['ế', 'm'] +['à¸Ĭà¹Īว', 'à¸ĩ'] +['ØŃ', 'ÙĨ'] +['Ġ×ij', '×Ĵ'] +['×ŀ', '×ķ×¢'] +['׾', '×Ĺ'] +['大', 'ãģį'] +['ت', 'ب'] +['н', 'еÑĤ'] +['×Ļ×ij', '×Ķ'] +['б', 'л'] +['ãĥĹ', 'ãĥª'] +['اص', 'Ø©'] +['ãģ¤', 'ãģij'] +['×Ļ×ŀ', '×ķש'] +['ãģĮ', 'ãģĤ'] +['ëĭ', '´'] +['ãģĭãĤĤ', 'ãģĹ'] +['ãģĭãĤĤãģĹ', 'ãĤĮ'] +['ãģ¡', 'ãĤī'] +['×ij', '×ĺ'] +['Ġba', 'ÄŁ'] +['×Ļ×Ĺ', 'ס'] +['×ij', '×ķ×¢'] +['ล', 'ี'] +['פע', '×Ļ׾'] +['им', 'и'] +['g', 'ÅĤ'] +['Ġим', 'е'] +['خد', 'اÙħ'] +['×IJ', '×Ļר'] +['Ġy', 'apt'] +['ãģ¨', 'ãģĦ'] +['à¸ĩ', 'à¹Īาย'] +['׾×Ļ', '×ķ'] +['ØŃد', 'Ø«'] +['را', 'ÙĤ'] +['ĠÄIJ', 'i'] +['اد', 'ر'] +['ãģĵãģ¨', 'ãĤĤ'] +['×ij', '×Ļר'] +['Ġв', 'з'] +['ض', 'اÙģ'] +['ת', '×ķ׼'] +['ÑĢ', 'ом'] +['ر', 'ات'] +['à¹Ģà¸Ĺ', 'à¹Īา'] +['ãģĺ', 'ãĤĥ'] +['ãģĿ', 'ãģĵ'] +['اج', 'تÙħاع'] +['à¹īà¸Ń', 'à¸Ļ'] +['ÙĤ', 'Ùħ'] +['ë³', '¸'] +['Ä', 'ŀ'] +['ש', '×Ļ×ķ'] +['×ij', '׳×Ļ'] +['ìľĦ', 'ìĽIJ'] +['à¹ģ', 'à¸Ī'] +['×Ĺ', '×ķר'] +['دÙĬ', 'ÙĨØ©'] +['ت', 'Ø·'] +['ằ', 'm'] +['ò', 'a'] +['ย', 'à¸Ńà¸Ķ'] +['Ġëĭ', '¹'] +['สุ', 'à¸Ĥ'] +['×ĵר', '×ļ'] +['د', 'ÙĨ'] +['س', 'ÙĬÙĨ'] +['ÙĪÙĤ', 'Ùģ'] +['ÑĨ', 'Ñĭ'] +['г', 'оÑĤов'] +['еж', 'дÑĥ'] +['à¸ŀ', 'วà¸ģ'] +['اÙĤ', 'تص'] +['اÙĤتص', 'اد'] +['cz', 'ÄĻ'] +['ni', 'ÄĻ'] +['ÑĢ', 'еб'] +['ØŃ', 'ÙĪ'] +['à¸Ĺ', 'à¹Į'] +['ãĤĪ', 'ãģŃ'] +['д', 'ж'] +['à¸ģล', 'à¹Īาว'] +['دÙĬ', 'Ø«'] +['ãĤ³', 'ãĥŁ'] +['ÙĤ', 'ÙĪÙħ'] +['Ġت', 'ØŃ'] +['à¹Ģ', 'à¸ķิ'] +['اÙģ', 'ظ'] +['à¸Ī', 'ุ'] +['رÙĬ', 'اض'] +['×ŀש', '×ļ'] +['à¹Ĥ', 'ย'] +['еÑĢ', 'е'] +['ãģ¿', 'ãģŁãģĦ'] +['ìĿ´', 'ëĿ¼'] +['ĠاÙĦÙħ', 'ÙĪ'] +['ĠÑģÑĤ', 'о'] +['à¹Ģรà¹ĩ', 'ว'] +['Ġд', 'еÑĤ'] +['ĠÑģ', 'дел'] +['à¹Ģà¸Ĭ', 'ืà¹Īà¸Ń'] +['פ', '׳×Ļ'] +['ÙĪØ¶', 'ÙĪØ¹'] +['×ij', 'ס'] +['à¹ģ', 'à¸Ķ'] +['ó', 'c'] +['ริ', 'ม'] +['ÑĢаÐ', '´'] +['ìĪ', 'ł'] +['ãĥ¼ãĤ', 'º'] +['ãģ«', 'ãģĬ'] +['и', 'но'] +['פ', '×Ļ׾'] +['à¸Ĭั', 'à¹Īà¸Ļ'] +['×Ĺ×ĵ', 'ש'] +['à¹Ģà¸Ļ', 'ืà¹Īà¸Ńà¸ĩ'] +['׳', '×Ļס'] +['غ', 'رب'] +['ãĤ¸', 'ãĥ£'] +['ส', 'ัà¸ĩ'] +['à¹Ģ', 'à¸Ĺีà¹Ī'] +['à¹Ģà¸Ĺีà¹Ī', 'ยว'] +['ëŁ', '¼'] +['à¹ģ', 'à¸Ł'] +['ãĥ¼ãĤ', '·'] +['ãĥ¼ãĤ·', 'ãĥ§ãĥ³'] +['Ġвоз', 'мож'] +['جÙħ', 'ÙĪØ¹'] +['×ijר', '×Ļ×Ŀ'] +['ãĥĪ', 'ãĥ©'] +['ĠкаÑĩ', 'еÑģÑĤв'] +['Ø·', 'ÙĬ'] +['ÑĤ', 'Ñı'] +['צ', '×ķ×¢'] +['ÄŁ', 'ını'] +['ع', 'ÙĦÙī'] +['ا', 'ذ'] +['ÙĪØ§ÙĤ', 'ع'] +['Ùħ', 'ÙĪØ§'] +['ائ', 'ÙĬÙĦ'] +['к', 'ол'] +['á»ģ', 'm'] +['à¸ľà¸¥', 'ิà¸ķ'] +['×Ļ׳', '×ĺר'] +['س', 'Ùĥ'] +['ש', '×Ļר'] +['ศึà¸ģ', 'ษา'] +['à¸ļ', 'ั'] +['Ñĩ', 'аÑģ'] +['×ķפ', '×Ķ'] +['×Ļפ', '×ķ׾'] +['ĠاÙĦس', 'اب'] +['رÙĬ', 'ب'] +['ĠاÙĦ', 'بÙĬ'] +['ãĤ¹', 'ãĥĨ'] +['Ñĩ', 'ен'] +['à¹ģ', 'à¸ľ'] +['Ġ׳', 'ש'] +['ز', 'ÙĬد'] +['ØŃ', 'اد'] +['ëį', 'Ķ'] +['رÙĪ', 'ع'] +['à¸Ĺุ', 'à¸Ļ'] +['ส', 'มา'] +['c', 'zeÅĦ'] +['×Ļ×ĵ', '×Ķ'] +['ãģ§', 'ãģĤ'] +['Ġçoc', 'uk'] +['Ø®', 'ب'] +['à¸ļ', 'าย'] +['à¸Ľà¸£à¸°', 'à¸Ĭา'] +['×ŀש', '׾'] +['ãģª', 'ãģĭ'] +['à¸ģ', 'าย'] +['ãĥģ', 'ãĥ£'] +['аÑĢ', 'и'] +['ĠÑĩ', 'а'] +['à¸Ķ', 'ำ'] +['à¸Ĺั', 'à¹Īว'] +['Ñĥ', 'Ñħ'] +['Ġö', 'z'] +['Ġì¢', 'ĭ'] +['ج', 'رÙĬ'] +['ائ', 'ÙĤ'] +['à¸ł', 'ัย'] +['Ø·', 'ار'] +['د', 'ارة'] +['Ä©', 'nh'] +['Ø«', 'ÙĨ'] +['zell', 'ik'] +['اÙĦ', 'ت'] +['Ġg', 'eli'] +['ãĥķãĤ', '©'] +['ол', 'од'] +['رب', 'ع'] +['שת', '×ŀש'] +['à¸ļร', 'ร'] +['íĿ', '¬'] +['Ġü', 'rün'] +['Ġê·¸', 'ëłĩ'] +['ศาส', 'à¸ķรà¹Į'] +['ãģ', 'ľ'] +['×Ļ×ij', '׾'] +['ĠпÑĢед', 'ÑģÑĤав'] +['سط', 'ÙĬÙĨ'] +['ãĤĴ', '使'] +['Ġпом', 'оÑī'] +['×ķ×§', 'ר'] +['ãĥ¯', 'ãĥ¼'] +['Ġyö', 'net'] +['×Ļ×§', 'ר'] +['à¸Ĥ', 'า'] +['еÑĢи', 'ал'] +['ØŃ', 'Ùģ'] +['Ġ×Ļ', 'צ'] +['à¸Ĺ', 'ิ'] +['å£', '²'] +['à¸Ļ', 'à¸Ńà¸ģ'] +['×ķ׼', 'ר'] +['íĻ', 'ľ'] +['á»§', 'y'] +['ĠاÙĦÙĤ', 'ر'] +['×Ļ×ij', '×ķת'] +['ÅĽ', 'ni'] +['Ùħ', 'شار'] +['ượ', 't'] +['ĠÙĦ', 'دÙĬ'] +['ÑĤ', 'ел'] +['ĠØ¥', 'ÙĦÙĬ'] +['عÙĦ', 'ÙĪÙħ'] +['ìķ', 'ĺ'] +['в', 'иÑĤ'] +['à¸Ħ', 'ะ'] +['yr', 'ı'] +['ãģ¨', 'ãģ£ãģ¦'] +['à¹Ģ', 'à¸ī'] +['à¸ĸ', 'าม'] +['ÙĤ', 'ار'] +['عÙĦ', 'اÙħ'] +['ặ', 'ng'] +['Ùħ', 'ÙĴ'] +['×Ļ×ŀ', 'ת'] +['سب', 'Ø©'] +['ãĤ¯', 'ãĥ©'] +['×ķס', '×£'] +['ĠпÑĢ', 'ин'] +['ãģĦ', 'ãĤį'] +['س', 'اس'] +['عت', 'بر'] +['วิ', 'à¸Ĺย'] +['วิà¸Ĺย', 'า'] +['س', 'Ùĥر'] +['ãĤ·', 'ãĥ§'] +['ãģ', 'ģ'] +['ัà¸ģ', 'ษ'] +['×ij', '×ķ×Ķ'] +['ห', 'ย'] +['ãģ¾', 'ãĤĮ'] +['ĠоÑĢг', 'аниз'] +['каз', 'ал'] +['ĠÑģв', 'Ñıз'] +['uy', 'ết'] +['ĠпÑĢо', 'из'] +['Ġ×§', '×ĺ'] +['à¹ģà¸ģ', 'à¹ī'] +['п', 'ÑĥÑģ'] +['Ġê·¸', 'ê²ĥ'] +['ëĬ', 'IJ'] +['л', 'екÑģ'] +['ãĥ¼ãĥ', 'Ĺ'] +['à¸ķ', 'ำ'] +['ת×Ĺ', '×Ļ׾'] +['à¸Ńà¸ĩ', 'à¸Ħà¹Į'] +['áº', 'µ'] +['׳', 'צ'] +['Ø£', 'Ø´'] +['Ø´', 'Ùĩ'] +['ย', 'ะ'] +['à¸ģ', 'à¸İ'] +['ĠاÙĦØ¥', 'سÙĦاÙħ'] +['ед', 'ÑĮ'] +['ãģ²', 'ãģ¨'] +['ëıĦ', 'ë¡Ŀ'] +['ãģ©', 'ãģ®'] +['Ñĥ', 'в'] +['еÑĩ', 'ение'] +['ĠاÙĦت', 'ج'] +['ãģ«', 'è¡Į'] +['Ġп', 'озв'] +['ãĤı', 'ãĤĬ'] +['ÙĦ', 'اث'] +['íķĺ', 'ìĺĢ'] +['Ġм', 'аÑĢ'] +['Ġkon', 'uÅŁ'] +['ãĥ¬', 'ãĤ¹'] +['ãĤĴ', 'æĮģ'] +['ĠоÑģ', 'нов'] +['×Ĺ', '×ij'] +['ÙĪØ¬', 'ÙĪØ¯'] +['פ', '×ķף'] +['в', 'оÑĢ'] +['Ġн', 'ик'] +['ãģĭ', 'ãĤĭ'] +['ÅŁtır', 'ma'] +['×Ļס', '×ĺ'] +['Ø£', 'ÙĦ'] +['ห', 'à¹Į'] +['и', 'она'] +['лÑĮ', 'н'] +['Ġг', 'оÑģ'] +['ĠÐľÐ¾Ñģ', 'к'] +['ÑĢ', 'об'] +['×ķ×IJ', '×Ļ'] +['ãģĬãĤĬ', 'ãģ¾ãģĻ'] +['ãģ£ãģ', '±'] +['к', 'л'] +['à¸Ļ', 'à¸Ķà¹Į'] +['رÙĬ', 'Ùģ'] +['اس', 'ب'] +['ĠÑĢ', 'еÑĪ'] +['Ġд', 'ол'] +['ãģ¹', 'ãģį'] +['×Ļ×ij', '×ķר'] +['м', 'еÑī'] +['Ġна', 'ÑĪ'] +['à¹ģ', 'à¸Ľà¸¥'] +['ÑĢ', 'иÑĤ'] +['кÑĥ', 'Ñģ'] +['и', 'ÑĢа'] +['аÑĤ', 'ÑĥÑĢ'] +['ÙĪØ§', 'صÙĦ'] +['à¹Ģà¸ľ', 'ย'] +['à¸Ń', 'ำ'] +['à¹Ģà¸ģ', 'ิà¸Ļ'] +['غ', 'Ùħ'] +['ãģĻ', 'ãģİ'] +['lı', 'kl'] +['ÅĦ', 'sk'] +['ê²', '¬'] +['×Ļ׼', '×Ķ'] +['×Ĺ', 'ש×ij'] +['ÙĪØ±', 'ÙĬØ©'] +['Ġд', 'ейÑģÑĤв'] +['×Ĺ׾', '×ĺ'] +['Ġ׾', '×ŀ×¢'] +['צ׾', '×Ļ×Ĺ'] +['еÑĩ', 'а'] +['Ùģ', 'اع'] +['×Ĵ', '×Ļ×ĵ'] +['áºŃ', 'm'] +['ÄĻ', 'b'] +['Ø´', 'ع'] +['ãģı', 'ãĤĬ'] +['à¸ŀ', 'ุ'] +['ед', 'еÑĢ'] +['à¸Ĥ', 'à¸Ļ'] +['à¸Ħ', 'าร'] +['ĠболÑĮ', 'ÑĪ'] +['ãģı', 'ãģªãĤĬ'] +['à¸ĵ', 'า'] +['×ĵ', '×ķ×Ĵ'] +['Ġм', 'н'] +['ä¸Ĭ', 'ãģĮ'] +['ç¶ļ', 'ãģį'] +['ฤ', 'ษ'] +['à¸', 'Ĩ'] +['Ø®', 'ÙĬ'] +['à¹Ģà¸Ĺ', 'à¸ŀ'] +['สั', 'ม'] +['à¹Ģส', 'à¸Ļ'] +['à¹Ģสà¸Ļ', 'à¸Ń'] +['ãĥ', '´'] +['Ġи', 'ÑģÑĤ'] +['با', 'شر'] +['ĠÑĥ', 'ÑĢов'] +['×ŀ', '×ķ×ĸ'] +['ab', 'ı'] +['wa', 'ż'] +['×ķצ', '×IJ×Ķ'] +['ÑĤ', 'веÑĢ'] +['à¸ŀัà¸Ļà¸ĺ', 'à¹Į'] +['׳', '×Ĵ×ĵ'] +['ãĤĭ', 'ãģĵãģ¨ãģĮãģ§ãģį'] +['ĠÑĤÑĢ', 'еб'] +['à¸ģร', 'ุà¸ĩ'] +['ØŃت', 'اج'] +['à¹Ģ', 'à¸Ħล'] +['ã', 'Ĩ'] +['ÄĻ', 'tr'] +['Ġszcz', 'eg'] +['Ġר', 'ש'] +['à¸Ĺ', 'à¸ĺ'] +['Ġн', 'ек'] +['Ġнек', 'оÑĤоÑĢ'] +['в', 'ÑĪ'] +['Ð', '¬'] +['à¹Īว', 'ย'] +['ล', 'ุ'] +['б', 'ÑĢÑı'] +['หม', 'ูà¹Ī'] +['à¹ģ', 'à¸ķà¸ģ'] +['ר׼', '×Ļ×Ŀ'] +['Ġí', 'ĸī'] +['ã', 'i'] +['Ùĥر', 'Ø©'] +['â', 'Ń'] +['í', 'IJ'] +['ã', 'į'] +['á', 'ģ'] +['â', '®'] +['â', '¥'] +['ì', '®'] +['à', '¿'] +['â', '¿'] +['á', 'Ĥ'] +['á', '¤'] +['â', 'ł'] +['í', 'Ł'] +['ðIJ', 'į'] +['ðIJ', '°'] +['ðĿ', 'Ĩ'] +['ðŁ', 'Ī'] +['Ġ×¢', '׾'] +['Ġع', 'ÙĨ'] +['ĠÙħ', 'ع'] +['Ġ×ĸ', '×Ķ'] +['ĠÙħ', 'ا'] +['Ġm', 'Ãł'] +['Ġd', 'ụ'] +['á»ĩ', 'c'] +['а', 'Ñħ'] +['s', 'ı'] +['íķĺ', 'ê³ł'] +['Ġ×ķ', '×ij'] +['ĠÐŁ', 'о'] +['×ķת', 'ר'] +['ĠÙĦ', 'Ùħ'] +['Ġ×ķ', '׾'] +['ãģĹãģ¦', 'ãģĦãĤĭ'] +['Ġ×ŀ', '×Ļ'] +['Ġب', 'ÙĬÙĨ'] +['з', 'а'] +['ĠÙĥ', 'اÙĨ'] +['Ġ×Ķ', '×Ļ×Ķ'] +['ëħ', 'Ħ'] +['×IJ', '×ķ'] +['д', 'и'] +['ĠпеÑĢ', 'е'] +['d', 'ı'] +['Ġ׾', 'ש'] +['Ġש', '×ŀ'] +['ãģĮ', 'ãģĤãĤĭ'] +['ãģĦ', 'ãģĦ'] +['ÑĢ', 'е'] +['×§', '×ķ'] +['и', 'ли'] +['м', 'е'] +['ÙĬ', 'ت'] +['ãģ§', 'ãģĤãĤĭ'] +['Ġв', 'о'] +['à¹ĥ', 'หม'] +['à¹ĥหม', 'à¹Ī'] +['Ġש', '×ij'] +['Ġ', 'à¹Ĥà¸Ķย'] +['ÙĬ', 'Ùĩ'] +['ãģ§ãģĻ', 'ãģĮ'] +['ãģ¨', 'ãģ¯'] +['ר', '×ķ'] +['Ġ', 'à¸ĭึà¹Īà¸ĩ'] +['ãģ§ãģį', 'ãĤĭ'] +['м', 'о'] +['à¹Ģà¸ŀ', 'ืà¹Īà¸Ń'] +['צ', '×ķ'] +['×ĺ', '×ķ'] +['ìķ', 'Ī'] +['Ġh', 'á»į'] +['à¹Ģà¸ĩ', 'ิà¸Ļ'] +['ĠاÙĦ', 'ب'] +['Ġ', 'มี'] +['ë¬', '¼'] +['Ñģ', 'е'] +['ëĵ¤', 'ìĿ´'] +['Ġë§', 'IJ'] +['Ġl', 'Ỽ'] +['a', 'ÅĤ'] +['×Ĺ', '×ijר'] +['Ġd', 'á»±'] +['ÙĬ', 'Ø«'] +['Ġth', 'á»ĭ'] +['à¸ģà¹Ī', 'à¸Ńà¸Ļ'] +['Ġ×ij', '׼׾'] +['ãģ', '¸'] +['ã썿ĢĿ', 'ãģĦãģ¾ãģĻ'] +['ả', 'nh'] +['ย', 'า'] +['Ùģ', 'ا'] +['ส', 'ี'] +['à¸ķ', 'า'] +['ë²', 'ķ'] +['ãĥª', 'ãĥ¼'] +['รา', 'à¸Ħา'] +['Ġ×ķ', '׾×IJ'] +['ãģ¨', 'ãģĵãĤį'] +['à¹Ģล', 'ืà¸Ń'] +['di', 'ÄŁi'] +['ÙĪ', 'اÙĨ'] +['Ġ׾×Ķ', 'ת'] +['รว', 'ม'] +['פ', '×Ļ×Ŀ'] +['à¸ľ', 'ม'] +['ж', 'и'] +['c', 'ı'] +['ÑĢ', 'од'] +['Ġkar', 'ÅŁÄ±'] +['×Ĵ', '×ķ'] +['ãģ«', 'ãģ¤'] +['ãģ«ãģ¤', 'ãģĦãģ¦'] +['r', 'Ãł'] +['×Ļ×ķת', 'ר'] +['ĠìĨ', 'Į'] +['×§', '×Ķ'] +['ÑģÑĤв', 'о'] +['ãģij', 'ãģ©'] +['g', 'é'] +['à¸Ķ', 'à¹īาà¸Ļ'] +['çļĦ', 'ãģ«'] +['ĠÙĬ', 'ÙħÙĥÙĨ'] +['ìĨ', 'į'] +['ÙĬ', 'Ùĥ'] +['à¹Ħว', 'à¹ī'] +['Ñģки', 'й'] +['ì', 'm'] +['Ġ׾×IJ', '×Ĺר'] +['à¸Ńา', 'หาร'] +['Ġà¹Ģ', 'à¸ŀ'] +['รา', 'ะ'] +['ล', 'ูà¸ģ'] +['ÑģÑĤ', 'а'] +['Ġìľ', 'ł'] +['ÙĤ', 'ÙĪÙĦ'] +['б', 'оÑĢ'] +['Ñģк', 'ого'] +['หล', 'ัà¸ĩ'] +['à¸Ĥ', 'à¹Īาว'] +['à¹Ģม', 'ืà¸Ńà¸ĩ'] +['ê°', 'ģ'] +['t', 'Ãł'] +['ÙĬ', 'ÙĬÙĨ'] +['عر', 'ض'] +['ë°', '©'] +['Ġëı', 'Ļ'] +['Ġà¹Ģ', 'à¸Ľ'] +['Ġà¹Ģà¸Ľ', 'à¹ĩà¸Ļ'] +['ç', 'i'] +['li', 'ÄŁi'] +['ìĹIJ', 'ê²Į'] +['ãĤ¿', 'ãĥ¼'] +['Ġ׾', 'ת'] +['פ', '×ķת'] +['à¸Ĥ', 'à¸Ń'] +['ر', 'س'] +['ìł', 'IJ'] +['à¸ľ', 'à¹Īาà¸Ļ'] +['ÑĦ', 'и'] +['ج', 'ÙĨ'] +['ì¢', 'ħ'] +['Ġ×Ķ', 'פ'] +['Ġn', 'go'] +['á»ĭ', 'a'] +['Ġtá»', 'ķ'] +['Ġê·¸', '리'] +['à¹Ģม', 'ืà¹Īà¸Ń'] +['ذ', 'Ùĥر'] +['ìĸ', 'ij'] +['ìĹ', 'Ń'] +['×ĺ', '׾'] +['k', 'ı'] +['Ġع', 'ÙħÙĦ'] +['Ġع', 'ÙĨد'] +['à¸ĭ', 'ืà¹īà¸Ń'] +['Ġê±', '°'] +['в', 'е'] +['r', 'ü'] +['à¹Ģ', 'à¸Ńา'] +['ส', 'à¹Į'] +['à¸Ī', 'à¸Ļ'] +['ס', 'ת'] +['Ġgi', 'ả'] +['ãĤĭ', 'ãģ¨'] +['à¸ģำ', 'ลัà¸ĩ'] +['н', 'ей'] +['à¸Ī', 'ริ'] +['à¸Īริ', 'à¸ĩ'] +['Ġë', 'į'] +['Ġëį', 'Ķ'] +['à¸Ħà¹Ī', 'ะ'] +['ì', 'n'] +['Ġsü', 're'] +['Ġqu', 'y'] +['à¸ļ', 'าà¸ĩ'] +['åıĸ', 'ãĤĬ'] +['ר', '×Ĺ'] +['×ij', 'ת'] +['ãģĮ', 'ãģĤãĤĬãģ¾ãģĻ'] +['ר', 'ש'] +['ìĹIJ', 'ëĬĶ'] +['Ġ×IJ', 'פשר'] +['ay', 'ı'] +['ãģĮ', 'ãĤī'] +['ØŃ', 'ب'] +['ан', 'Ñģ'] +['س', 'ÙĪ'] +['ĠпÑĢ', 'е'] +['د', 'ÙĪ'] +['ãģ«', 'ãĤĪ'] +['à¹Ģà¸ģ', 'ม'] +['สู', 'à¸ĩ'] +['m', 'akt'] +['makt', 'ad'] +['maktad', 'ır'] +['Ġön', 'em'] +['×Ļ×ŀ', '×Ļ×Ŀ'] +['б', 'о'] +['ÙĪ', 'ÙĬØ©'] +['รู', 'à¸Ľ'] +['à¹Ĥล', 'à¸ģ'] +['Ùħ', 'ÙĬع'] +['ÑģÑĤ', 'Ñĥп'] +['à¹Ĥ', 'à¸Ń'] +['دÙĬ', 'ÙĨ'] +['ì¤', 'ij'] +['ãģĹãģ', 'ı'] +['à¹Ģส', 'ีย'] +['в', 'Ñĭ'] +['Ùħ', 'ت'] +['íĺ', 'Ħ'] +['ãĥIJ', 'ãĥ¼'] +['ا', 'Ø´'] +['×§', 'ס'] +['Ġtá»', '¥'] +['ล', 'à¸Ķ'] +['Ùģ', 'Ø©'] +['í', 'ijľ'] +['ر', 'ج'] +['k', 'ÅĤad'] +['ĠÅŁ', 'ey'] +['ĠØ£', 'Ùħ'] +['Ġà¹Ģ', 'ม'] +['Ġب', 'ÙĦ'] +['Ñģ', 'каÑı'] +['ãģ¨', 'ãģ®'] +['Ġìĭ', '¤'] +['ấ', 'm'] +['ห', 'à¹īà¸Ńà¸ĩ'] +['à¸Ĭ', 'ม'] +['d', 'ü'] +['Ġç', 'ek'] +['Ġê³', 'ł'] +['×Ĵ', '×ij'] +['à¸Ĭี', 'วิ'] +['à¸Ĭีวิ', 'à¸ķ'] +['Ù쨶', 'ÙĦ'] +['à¸', '¯'] +['ç', 'ı'] +['Ġب', 'Ø´'] +['ĠÙĩ', 'ÙĨا'] +['ãģį', 'ãģ¾ãģĹãģŁ'] +['t', 'ü'] +['Ġìĺ', 'ģ'] +['ĠTür', 'k'] +['к', 'ÑĤ'] +['פר', 'ס'] +['ãģ¨ãģĦãģĨ', 'ãģĵãģ¨'] +['í', 'ĶĦ'] +['à¹ģร', 'à¸ģ'] +['ר', '×ķף'] +['Ġar', 'as'] +['×ŀצ', '×IJ'] +['Ġtá»', 'ī'] +['س', 'ا'] +['à¸ŀ', 'à¸Ń'] +['ĠاÙĦÙħ', 'ØŃ'] +['ãĥ', '¤'] +['ĠاÙĦ', 'است'] +['Ùģ', 'ÙĨ'] +['×Ļ×ŀ', '×Ķ'] +['ر', 'ت'] +['ãģ¨', 'ãĤĤ'] +['Ġна', 'Ñģ'] +['п', 'ÑĢи'] +['Ġ×Ĺ', '×ķ'] +['и', 'ла'] +['ÙĬ', 'Ø´'] +['Ġgö', 'z'] +['Ġ×ij', '׳×Ļ'] +['ım', 'ı'] +['ĠÑĤ', 'еÑħ'] +['Ġh', 'á»Ļ'] +['غ', 'ر'] +['к', 'он'] +['اØŃ', 'ت'] +['Ġ', 'à¸ŀ'] +['à¸Ń', 'à¸Ńà¸Ļ'] +['à¸Ńà¸Ńà¸Ļ', 'à¹Ħล'] +['à¸Ńà¸Ńà¸Ļà¹Ħล', 'à¸Ļà¹Į'] +['Ñħ', 'о'] +['Ñı', 'в'] +['à¹ģ', 'สà¸Ķ'] +['à¹ģสà¸Ķ', 'à¸ĩ'] +['à¹Ģà¸ŀ', 'ียà¸ĩ'] +['ÑĤ', 'ов'] +['ا', 'ÙĬ'] +['Ġ×Ķ', '×ĵ'] +['Ġ×ķ', '׼'] +['ãĤī', 'ãģĦ'] +['×ķפ', 'ף'] +['Ġë', '¶Ī'] +['ล', 'à¸Ńà¸ĩ'] +['Ø·', 'اÙĦ'] +['Ġн', 'и'] +['ĠÙħ', 'ست'] +['ế', 'c'] +['Ġש', '׼'] +['ĠëķĮ', '문'] +['วัà¸Ļ', 'à¸Ĺีà¹Ī'] +['×Ļ׾', '×ĵ'] +['ØŃ', 'ا'] +['е', 'ÑĨ'] +['Ġc', 'ứ'] +['×ĵ', '×ķר'] +['ĠÙħ', 'ØŃ'] +['ר׼', '×ij'] +['بÙĬ', 'ع'] +['ни', 'и'] +['ĠاÙĦØ£', 'ÙĪÙĦ'] +['à¸Ħว', 'ร'] +['ã썿ĢĿ', 'ãģĨ'] +['ĠС', 'о'] +['ائ', 'ÙĬØ©'] +['ر', 'اء'] +['оÑģ', 'об'] +['Ġب', 'Ø£ÙĨ'] +['×¢', '×ķ×ĵ'] +['ĠÑĤ', 'е'] +['ãģĵ', 'ãģĨ'] +['ÑģÑĤ', 'ÑĢа'] +['ай', 'н'] +['Ġsö', 'z'] +['ت', 'ÙĨا'] +['à¸Ń', 'ิ'] +['ặ', 'p'] +['ĠìķĦ', 'ëĭĪ'] +['íķ', 'Ń'] +['Ġר×IJ', 'ש'] +['Ġ', 'à¹Ħà¸Ķà¹ī'] +['Ġ×Ĵ', '×ĵ'] +['Ġס', 'פר'] +['обÑī', 'е'] +['ĠÙĪ', 'Ø¥'] +['ada', 'ÅŁ'] +['ãģ¡', 'ãĤĩ'] +['×§', '×ķ׾'] +['ÑĢ', 'ез'] +['ĠdÃ¼ÅŁ', 'ün'] +['Ġ×ij', '×IJ×ŀ'] +['Ġìĸ´', 'ëĸ'] +['ער', '×ij'] +['н', 'ее'] +['ĠÑģÑĤÑĢ', 'ан'] +['س', 'اÙĨ'] +['yn', 'ı'] +['ĠاÙĦر', 'ئÙĬس'] +['ãģĹãģ', 'ª'] +['Ġ׳', 'ת'] +['ãģ«ãģª', 'ãģ£ãģŁ'] +['g', 'ü'] +['åıĹ', 'ãģij'] +['׾', 'ת'] +['ìł', 'Ī'] +['ëĬĶ', 'ëį°'] +['Ø®', 'ÙĬر'] +['à¸ķà¹īà¸Ńà¸ĩ', 'à¸ģาร'] +['ĠÙĦ', 'Ø£ÙĨ'] +['Ġch', 'á»ĭ'] +['ÙĪ', 'Ø©'] +['à¹ĥ', 'ส'] +['ë¶Ģ', 'íĦ°'] +['íķĺ', 'ë©´'] +['ữ', 'u'] +['à¹Ģหม', 'ืà¸Ńà¸Ļ'] +['б', 'еÑĢ'] +['ĠìĿ´', 'ìļ©'] +['ĠÑģ', 'еб'] +['wiÄĻ', 'ks'] +['Ġ׳', '×¢'] +['ÑĤ', 'ÑĥÑĢ'] +['Ġngh', 'Ä©'] +['ש', '×ķ×ĺ'] +['ti', 'ÄŁi'] +['Ġde', 'ÄŁi'] +['×IJ', '×ij'] +['Ġ×ŀ', '×ŀ'] +['ãĥĹ', 'ãĥŃ'] +['wa', 'ÅĤ'] +['à¸Ī', 'ึà¸ĩ'] +['Ø®', 'دÙħ'] +['×IJ', '×Ŀ'] +['Ä±ÅŁ', 'ı'] +['cz', 'Äħ'] +['ר', '×ĵ'] +['ĠÑĢ', 'Ñĥб'] +['خر', 'Ùī'] +['ãģ®', 'æĸ¹'] +['Ġд', 'енÑĮ'] +['×Ĺ', '×Ļ×Ŀ'] +['еÑĤ', 'е'] +['ëĤ', 'ľ'] +['×IJ', '×Ĵ'] +['×¢', '×ķר'] +['ë³', 'Ħ'] +['åIJĮ', 'ãģĺ'] +['ãĤ', '²'] +['ר', '×ļ'] +['×ķש', '×IJ'] +['ìľ', '¡'] +['ا', 'Ø®'] +['צ', '×Ļ×Ķ'] +['á»±', 'a'] +['ãģĪ', 'ãģ¦'] +['ש×Ķ', '×ķ'] +['ан', 'ÑĤ'] +['ลา', 'à¸Ķ'] +['ин', 'г'] +['ë¡', 'ł'] +['اع', 'د'] +['ÙĪ', 'سط'] +['Ġв', 'оп'] +['Ġвоп', 'ÑĢоÑģ'] +['Ùħ', 'ÙĬÙĨ'] +['à¸Ħ', 'à¸ĩ'] +['×Ļר', '×Ļ×Ŀ'] +['c', 'ów'] +['ê²', '©'] +['Ġê·¸', '룰'] +['Ġì§', 'Ħ'] +['Ġש', '׾×Ķ'] +['à¹Ģร', 'ิà¹Īม'] +['à¸Ĭ', 'à¸Ńà¸ļ'] +['д', 'еÑĤ'] +['ÑİÑī', 'иÑħ'] +['à¸ļ', 'à¸Ńà¸ģ'] +['æĢĿ', 'ãģĦ'] +['ع', 'ÙĬد'] +['ס', '×ŀ'] +['×Ĵ', '×Ļ×¢'] +['צ', '×ĵ'] +['ب', 'ات'] +['ĠëͰ', 'ëĿ¼'] +['à¸Ī', 'ัà¸ĩ'] +['ãģłãģij', 'ãģ§'] +['×¢', '×Ļר'] +['ĠÑĩ', 'ел'] +['ĠÑĩел', 'ов'] +['ĠÑĩелов', 'ек'] +['ãĥĥ', 'ãĥģ'] +['à¹Ģà¸ģ', 'ีà¹Īยว'] +['à¸Ķ', 'ิ'] +['Ġפ', '×¢'] +['×Ļ×ŀ', '×Ļ'] +['ë°', 'ĺ'] +['Ø®', 'ار'] +['×ij', '×Ļת'] +['×¢', '×Ļ×Ŀ'] +['ü', 'yor'] +['ãĤģ', 'ãģ¦'] +['к', 'лад'] +['Ġ', 'à¸Īาà¸ģ'] +['à¹Ģà¸Ħ', 'ย'] +['ส', 'à¸Ńà¸ĩ'] +['à¹ģ', 'à¸Ħà¹Ī'] +['ẫ', 'u'] +['หà¸Ļ', 'ัà¸ĩ'] +['ש׾', '×ķ×Ŀ'] +['اÙĨ', 'ÙĬØ©'] +['åĩº', 'ä¼ļ'] +['åĩºä¼ļ', 'ãģĦ'] +['à¸ł', 'าย'] +['à¸ļา', 'à¸Ĺ'] +['à¸Ĭา', 'ว'] +['mu', 'ÅŁ'] +['Ġ׾ק', '×ij׾'] +['ãĤ·', 'ãĥ£'] +['Ġİ', 'ÅŁ'] +['×Ĵ×ĵ', '×ķ׾'] +['ج', 'عÙĦ'] +['ë³', 'Ģ'] +['ยิ', 'à¹Īà¸ĩ'] +['à¸Ļ', 'าย'] +['à¸Ļ', 'ีà¹Ī'] +['วิ', 'à¸ĺี'] +['ãĤī', 'ãģªãģĦ'] +['ëł', 'Ī'] +['Ġ문', 'ìłľ'] +['Ġ', 'à¸ģ'] +['à¸Ĺำ', 'à¸ĩาà¸Ļ'] +['à¹Ģว', 'à¹ĩà¸ļ'] +['ÑĦ', 'е'] +['楽', 'ãģĹ'] +['สำ', 'à¸Ħ'] +['สำà¸Ħ', 'ัà¸į'] +['ر', 'Ùħ'] +['ãģķãĤĮ', 'ãģ¦'] +['Ġоб', 'ла'] +['ר×IJ', '×Ļ'] +['หม', 'à¸Ķ'] +['ÙĨ', 'ÙĬØ©'] +['ли', 'н'] +['Ġe', 'ÄŁ'] +['it', 'im'] +['ëł', '¹'] +['ص', 'اÙĦ'] +['ÅĽ', 'l'] +['à¸ľ', 'ิà¸Ķ'] +['ãĥŀ', 'ãĥ³'] +['åħ¥', 'ãĤĮ'] +['à¹Ģà¸ķ', 'à¸Ńรà¹Į'] +['ار', 'ÙĬ'] +['ĠÐ', '¦'] +['d', 'ür'] +['ส', 'วย'] +['ë¦', '½'] +['رÙĥ', 'Ø©'] +['Ġh', 'ã'] +['×Ļת', '×Ķ'] +['à¸Ĥ', 'à¸Ļา'] +['à¸Ĥà¸Ļา', 'à¸Ķ'] +['à¸Īำ', 'à¸Ļ'] +['à¸Īำà¸Ļ', 'วà¸Ļ'] +['ש', '×ķ×§'] +['Ġд', 'ом'] +['ì±', 'ħ'] +['ãģĭ', 'ãģij'] +['פ', '×ķ׾'] +['à¸Ĭ', 'าย'] +['Ñģ', 'моÑĤÑĢ'] +['Ñģл', 'Ñĥж'] +['ש', '×IJ׾'] +['кÑĢÑĭ', 'ÑĤ'] +['Ġìŀ', 'ĺ'] +['é«ĺ', 'ãģĦ'] +['ĠÑĢ', 'Ñĥк'] +['ÙĨ', 'ص'] +['д', 'ав'] +['ưá»', '¡'] +['ưỡ', 'ng'] +['ر', 'اÙħ'] +['×Ļ׳', '×Ļ×Ŀ'] +['ãĥ©', 'ãĥ¼'] +['ëĦ', '¤'] +['Ġت', 'ع'] +['l', 'ke'] +['好', 'ãģį'] +['æĮģ', 'ãģ¡'] +['Ġë§', 'İ'] +['Ġy', 'ük'] +['ĠÑģоÑģÑĤ', 'ав'] +['енÑĤ', 'ÑĢ'] +['pe', 'ÅĤ'] +['à¹Ģà¸Ľà¸¥', 'ีà¹Īย'] +['à¹Ģà¸Ľà¸¥à¸µà¹Īย', 'à¸Ļ'] +['íı', 'ī'] +['ãĤĦ', 'ãģĻ'] +['×Ĺ', '×ĸ'] +['×ijר', '×Ķ'] +['ë£', '¨'] +['ìĶ', 'Ģ'] +['بØŃ', 'Ø«'] +['à¹Ģà¸ķ', 'à¹ĩ'] +['ów', 'i'] +['ب', 'Ùĩ'] +['ãģį', 'ãģ¾ãģĻ'] +['Ġ×¢', '×ŀ'] +['×Ĵ', '×ķ׾'] +['ез', 'д'] +['ÙĬÙģ', 'Ø©'] +['สà¸Ļ', 'à¹ĥà¸Ī'] +['Ġת', '׾'] +['Ñı', 'Ñī'] +['Ġس', 'ÙĨ'] +['ĠÙĪØ§', 'ØŃد'] +['ĠÑģ', 'м'] +['lad', 'ı'] +['ı', 'ld'] +['×Ļר', 'ת'] +['ีย', 'à¸Ļ'] +['ת×Ĺ', 'ת'] +['Ġж', 'из'] +['à¸ŀ', 'ั'] +['à¸ŀั', 'à¸Ĵ'] +['à¸ŀัà¸Ĵ', 'à¸Ļา'] +['à¸Ĭ', 'ิ'] +['ا', 'Ø®ÙĦ'] +['ãģ£ãģ¦', 'ãģĦãģŁ'] +['รั', 'à¸IJ'] +['ãĤģ', 'ãĤĭ'] +['à¹Ĥ', 'à¸ģ'] +['ĠT', 'á»ķ'] +['Ġh', 'akk'] +['ر', 'Ùģ'] +['ìł', 'Ģ'] +['Ñģ', 'об'] +['ãģª', 'ãģijãĤĮãģ°'] +['Ùĩ', 'ÙĪ'] +['Ġë²', 'ķ'] +['ãĤ', 'Ĩ'] +['ĠاÙĦس', 'عÙĪØ¯'] +['Ġ×IJ', 'תר'] +['اØ', 'º'] +['Ġ׾', '×ĵ'] +['à¹ģ', 'à¸ķ'] +['à¹ģà¸ķ', 'à¹Īà¸ĩ'] +['íĮ', 'Į'] +['Ñĥп', 'иÑĤÑĮ'] +['à¸ŀืà¹īà¸Ļ', 'à¸Ĺีà¹Ī'] +['×ij', 'ת×Ļ'] +['à¹ĩ', 'à¸ģ'] +['ÅĤ', 'at'] +['Ġê°ľ', 'ìĿ¸'] +['ìłķ', 'ë³´'] +['ÑĤ', 'ал'] +['Ġgü', 'ven'] +['Ġİ', 'l'] +['Ġê°', 'ģ'] +['Ġب', 'ت'] +['×ŀ', '×ķ׳×Ķ'] +['ĠاÙĦØŃ', 'ÙĥÙĪÙħ'] +['ÙĤ', 'ات'] +['à¹ģ', 'à¸ģà¹Ī'] +['ห', 'าà¸ģ'] +['н', 'ÑĮ'] +['à¸Ľ', 'รัà¸ļ'] +['มา', 'à¸ĵ'] +['Ġне', 'Ñģк'] +['ĠØ', '¶'] +['สม', 'ั'] +['สมั', 'à¸Ħร'] +['ãģĮ', 'ãģĤãĤĬ'] +['м', 'еÑģÑĤ'] +['Ġ×IJ', 'צ׾'] +['Ġкомп', 'ани'] +['ס', 'ר'] +['ÙĬÙħ', 'Ø©'] +['ĠÑħ', 'оÑĢо'] +['ĠÑħоÑĢо', 'ÑĪ'] +['Ġ×Ļ', '×ķ×ĵ'] +['ü', 's'] +['×Ĵ', '×Ļש'] +['à¸ļ', 'à¸Ĺ'] +['تÙĨ', 'ظ'] +['ว', 'าà¸ĩ'] +['ม', 'หา'] +['Ġ׼', '×ķ׾'] +['à¸Ĥ', 'à¹īาà¸ĩ'] +['ë°', 'ľ'] +['г', 'од'] +['д', 'ан'] +['ãģĭãĤĤãģĹãĤĮ', 'ãģ¾ãģĽãĤĵ'] +['ãģĵ', 'ãģ¡ãĤī'] +['ãĥIJ', 'ãĤ¤'] +['ece', 'ÄŁi'] +['دÙĬ', 'دة'] +['ÙĨ', 'Ùī'] +['Ġëĭ¤', 'ìĿĮ'] +['ว', 'ี'] +['غ', 'ا'] +['ли', 'з'] +['à¹Ģà¸Ķ', 'ิ'] +['à¹Ģà¸Ķิ', 'ม'] +['ĠÙĬ', 'ست'] +['Ġy', 'ılı'] +['ko', 'ÅĦ'] +['ãģ§ãģĹãĤĩãģĨ', 'ãģĭ'] +['ãģĤ', 'ãģª'] +['ãģĤãģª', 'ãģŁ'] +['ÑĨ', 'ен'] +['ĠÙĪ', 'ز'] +['×IJ', '×Ļש'] +['à¹Ī', 'à¸Ń'] +['ر', 'ØŃ'] +['ê´', 'ij'] +['ÑĢа', 'ÑģÑĤ'] +['Ġ×Ķ', '׾'] +['ãģĹãģ¦', 'ãĤĤ'] +['×ŀר', '׼'] +['×ŀר׼', '×ĸ'] +['éģķ', 'ãģĦ'] +['ãģŁ', 'ãģı'] +['ĠÑģ', 'Ñĥд'] +['в', 'еÑģÑĤи'] +['ĠíķĦ', 'ìļĶ'] +['ãĥķ', 'ãĤ§'] +['ÑĤелÑĮ', 'но'] +['à¹Ģà¸ŀ', 'ืà¹Īà¸Ńà¸Ļ'] +['ÅĤu', 'ż'] +['à¹Ģà¸Ķิà¸Ļ', 'à¸Ĺาà¸ĩ'] +['ש', '×ķר'] +['Ġ×ŀ', '×ĵ'] +['×ķ×¢', '׾'] +['ÙĦ', 'اÙħ'] +['à¹Ħ', 'à¸ĭ'] +['л', 'ей'] +['кÑĥ', 'ÑĢ'] +['áº', '¢'] +['à¸Ĺ', 'าà¸Ļ'] +['ì§', 'ij'] +['ĠгоÑĢ', 'од'] +['ר', 'ס'] +['׾', '×ķ×Ĵ'] +['mas', 'ını'] +['Ġл', 'ÑĥÑĩ'] +['ล', 'à¹Īา'] +['ìļ', '¸'] +['ש', '×ĺ'] +['ĠÐĺ', 'н'] +['í', 'Ĥ¤'] +['ÙĪÙĦ', 'ا'] +['ìķ', 'ł'] +['ĠØ£ÙĬ', 'ضا'] +['Ùĥ', 'ار'] +['ĠاÙĦت', 'ع'] +['ส', 'ูà¹Ī'] +['ãĤ', '¼'] +['×ij', '×Ļ×IJ'] +['ย', 'à¸ģ'] +['ĠØŃ', 'ÙĤ'] +['ر', 'بÙĬ'] +['ãģĺãĤĥ', 'ãģªãģĦ'] +['รัà¸ģ', 'ษา'] +['Ñħод', 'иÑĤ'] +['à¸ķ', 'à¸Ńà¸ļ'] +['׳', '×ĺ×Ļ'] +['ĠاÙĦÙħ', 'ج'] +['تÙħ', 'ع'] +['ов', 'аÑĤÑĮ'] +['ÙĦ', 'ÙĬÙĨ'] +['×Ļ×ŀ', '×ķת'] +['Ġm', 'ù'] +['n', 'ÄĻ'] +['Ġد', 'ÙĬ'] +['׼', 'ש×Ļ×ķ'] +['Ġhi', 'ç'] +['ë', 'ijIJ'] +['ÙĪ', 'اء'] +['ÙĪ', 'Ø·'] +['ĠاÙĦ', 'بÙĦ'] +['à¹ģม', 'à¹ī'] +['×§', '×ķת'] +['ÙĪØ¬', 'د'] +['å§ĭ', 'ãĤģ'] +['ÙĬ', 'ئة'] +['Ġë§', '¤'] +['ص', 'بØŃ'] +['פ', '×IJ'] +['г', 'оÑĢ'] +['ס', '×Ķ'] +['بÙĬ', 'ÙĤ'] +['ย', 'าà¸ģ'] +['Ġн', 'ад'] +['ÙĬ', 'Ùij'] +['Ġب', 'ÙĪ'] +['ס', '×ķר'] +['Ùħ', 'ÙĥاÙĨ'] +['ר', '×ij'] +['×Ĵ', '×ĸ'] +['צ', 'ת'] +['b', 'ilit'] +['л', 'аг'] +['ĠN', 'go'] +['×IJ', '×ķר'] +['à¸ķ', 'à¸Ļ'] +['íĬ', '¹'] +['à¸Ĺีà¹Ī', 'à¸Ķี'] +['à¸Ľà¸£à¸°', 'à¸Īำ'] +['ов', 'ание'] +['ãģĦ', 'ãģ¤'] +['ãĥĥãĤ¯', 'ãĤ¹'] +['åIJĪ', 'ãĤı'] +['åIJĪãĤı', 'ãģĽ'] +['×Ļ׳', '×ķ×Ļ'] +['ạ', 'y'] +['Ø«', 'ÙĤ'] +['ĠпÑĢ', 'об'] +['ĠпÑĢоб', 'лем'] +['ÅŁ', 'eh'] +['ÅŁeh', 'ir'] +['ع', 'ادة'] +['اÙĨ', 'ÙĪÙĨ'] +['à¸ķัว', 'à¹Ģà¸Ńà¸ĩ'] +['ì¶', 'ķ'] +['ı', 'lan'] +['б', 'ан'] +['ãĥ³', 'ãĥī'] +['à¸Ī', 'ี'] +['Ġ×Ķש', '׳×Ļ'] +['п', 'оÑĤ'] +['×ķ׾', '×Ļ×Ŀ'] +['ล', 'ัà¸ļ'] +['ĠÑį', 'ÑĤи'] +['×ij×§', 'ש'] +['ë¹Ħ', 'ìĬ¤'] +['à¸Ńยà¹Īาà¸ĩ', 'à¹Ħร'] +['×Ļ׾', '×Ļ'] +['à¹ĥà¸Ĭ', 'à¹Ī'] +['ĠاÙĦ', 'ÙĥÙĦ'] +['ãĥļ', 'ãĥ¼ãĤ¸'] +['ص', 'Ø©'] +['ÑĤи', 'ÑĢ'] +['ãĤĵ', 'ãģ©'] +['зÑĭ', 'к'] +['wy', 'ż'] +['Ùĩ', 'ÙĬ'] +['ĠÙħ', 'ÙĦÙĬ'] +['Ġвид', 'е'] +['ظ', 'اÙħ'] +['دا', 'ÙĪÙĦ'] +['×ŀ', 'ת×Ļ'] +['Ġs', 'ık'] +['à¹Ģà¸ķิ', 'ม'] +['ãĤ¢', 'ãĤ¤'] +['ка', 'Ñħ'] +['צ', '×Ļ׾'] +['à¹Ģà¸Ĭ', 'à¹Īà¸Ļ'] +['м', 'аг'] +['маг', 'аз'] +['магаз', 'ин'] +['à¸Ľ', 'ั'] +['à¸Ľà¸±', 'à¸Ī'] +['Ġש', '×Ļר×ķת'] +['ีย', 'ม'] +['ãĥĸ', 'ãĥ«'] +['Ġد', 'ÙĪÙĦ'] +['קר', '×Ļ×Ŀ'] +['Ùĩ', 'Ùı'] +['ов', 'о'] +['Ġü', 'ret'] +['د', 'ÙĪÙĨ'] +['à¹ģà¸Ļ', 'ว'] +['à¹Ģà¸Ļ', 'ืà¹īà¸Ń'] +['ĠÑĦ', 'оÑĤ'] +['ãĥ', 'ĺ'] +['ãģ¤', 'ãģĭ'] +['Ñı', 'Ñģ'] +['ĠíķĺëĤĺ', 'ëĭĺ'] +['ائ', 'ع'] +['Ġп', 'лаÑĤ'] +['ìĺ', 'Ī'] +['Ġdost', 'ÄĻp'] +['ÙĪØ¬', 'Ùĩ'] +['Ġ×Ķ', '×Ĺ×Ļ'] +['׳', '×Ļ×§'] +['д', 'ей'] +['í', 'ĽĦ'] +['ı', 'y'] +['بØŃ', 'ر'] +['à¹Ģส', 'ริม'] +['Ġ׾', '×Ĵ'] +['ذÙĩ', 'ب'] +['ج', 'ÙĬÙĦ'] +['رÙĥ', 'ز'] +['Ġë', 'ħ'] +['Ġëħ', '¸'] +['פ×Ļ׾', '×ķ'] +['ãģ¾', 'ãģļ'] +['iri', 'ÅŁ'] +['ĠÙĥ', 'ÙĬÙģ'] +['Ġ×ij', 'צ'] +['Ġêµ', 'IJ'] +['ÑĢоÑģ', 'Ñģ'] +['ĠØ´', 'ÙĬ'] +['Ġiç', 'er'] +['×Ĵ', '×ķ×ij×Ķ'] +['мен', 'но'] +['×¢', '×ij×Ļר'] +['×ķ×ŀ', '×Ķ'] +['ãĤī', 'ãģĹãģĦ'] +['ãģ', '¼'] +['Ñī', 'ин'] +['è²·', 'ãģĦ'] +['جÙħÙĪØ¹', 'Ø©'] +['Ġdön', 'em'] +['Ġ×ij', '×IJר'] +['в', 'еÑģÑĤ'] +['×ķר', '×ķת'] +['س', 'Ùģ'] +['à¹ģà¸Ĺ', 'à¸Ļ'] +['Ġд', 'окÑĥменÑĤ'] +['Ġا', 'ÙĬ'] +['ج', 'اÙĨ'] +['צ×ķ×¢', '×Ļ'] +['ĠоÑģ', 'об'] +['ĠاÙĦÙħ', 'س'] +['ÑĢаÐ', '±'] +['à¸ł', 'ู'] +['à¸Ķ', 'าว'] +['л', 'екÑĤ'] +['ع', 'ÙĤ'] +['×ķ×ĵ', '×ķת'] +['Ġol', 'u'] +['Ġolu', 'ÅŁtur'] +['ãģ¾', 'ãģ¾'] +['ед', 'ин'] +['à¹Ģ', 'à¸Ńà¸ģ'] +['ãĤµ', 'ãĤ¤'] +['ëĦ', 'Ī'] +['Ø·', 'ÙĨÙĬ'] +['Ø·', 'ÙĤØ©'] +['ĠÐł', 'аз'] +['ÙĦ', 'Ùij'] +['Ñĩ', 'ем'] +['Ġ׾', '×ĺ'] +['สั', 'à¹Īà¸ĩ'] +['سر', 'ائÙĬÙĦ'] +['Ġפר', '×ĺ×Ļ'] +['д', 'еÑģÑĮ'] +['Ġ׳', '׼'] +['اÙĨ', 'ب'] +['ÙĬا', 'Ø©'] +['Ùħ', 'بر'] +['Ġk', 'ı'] +['à¸Ľ', 'à¸ı'] +['à¸Ľà¸ı', 'ิ'] +['à¸ļั', 'à¸ķิ'] +['׳', 'ת×Ļ'] +['ìĨ', '¡'] +['ر', 'اب'] +['à¹ĥ', 'à¸ķ'] +['à¹ĥà¸ķ', 'à¹ī'] +['×Ļ׳', 'ת'] +['ÙĪ', 'ÙĬر'] +['Ġ×Ķ×ŀ', '×Ļ'] +['ей', 'ÑĩаÑģ'] +['×§', '×ķ×ij'] +['در', 'اس'] +['ĠÙħ', 'ÙĤ'] +['رÙĬ', 'ÙĨ'] +['Ø®', 'اص'] +['ãģĬ', 'éĩij'] +['Ġج', 'دا'] +['ãģĨ', 'ãģ¡'] +['ëħ', '¸'] +['ır', 'ım'] +['æ§', 'ĺ'] +['ãģ«', 'å¯'] +['ãģ«å¯', '¾'] +['ÑĨ', 'ев'] +['Ġv', 'ard'] +['ĠÐIJ', 'н'] +['e', 'ÄŁ'] +['ÑģÑĤв', 'енно'] +['Ð', '¨'] +['س', 'د'] +['à¸ģ', 'ุ'] +['à¹ģà¸ľ', 'à¸Ļ'] +['รูà¹ī', 'ส'] +['รูà¹īส', 'ึà¸ģ'] +['ات', 'ØŃاد'] +['Ñij', 'ÑĤ'] +['×Ĺ', '×ķ×§'] +['ãģĻ', 'ãģIJ'] +['Ø·', 'ÙĦاÙĤ'] +['Ġ×§', '×ķ×ĵ'] +['à¹ĥà¸Ĭ', 'à¹īà¸ĩ'] +['à¹ĥà¸Ĭà¹īà¸ĩ', 'าà¸Ļ'] +['ãĥ¼ãĤ', '¿'] +['Ġs', 'ür'] +['ÑĢ', 'ок'] +['ë³', 'ij'] +['สมา', 'à¸Ĭ'] +['สมาà¸Ĭ', 'ิà¸ģ'] +['ãĥķ', 'ãĥ¬'] +['è¾¼', 'ãģ¿'] +['ãĤ»', 'ãĥ³'] +['Ġê°Ģ', 'ì§Ģ'] +['à¸ľ', 'à¹īา'] +['ÑįÑĤ', 'омÑĥ'] +['иÑĤ', 'ел'] +['à¸ł', 'ั'] +['à¸', 'ij'] +['ãĥĸ', 'ãĥ©'] +['×Ľ×ª', '×ķ×ij'] +['׳', '×Ŀ'] +['ен', 'нÑĭе'] +['×¢', '×¨×Ľ×ª'] +['Ġì', 'Ĥ'] +['ĠìĤ', '´'] +['à¸Ĥ', 'à¹īา'] +['׳', '×ķס'] +['ãĥ¬', 'ãĥĵ'] +['ÑĢ', 'еÑģ'] +['à¹Ģล', 'à¸Ĥ'] +['Ø«', 'اÙĦ'] +['ìĹ', 'Ĩ'] +['ĠÑĩ', 'аÑģÑĤ'] +['า', 'ศ'] +['ãĥª', 'ãĤ¢'] +['u', 'ç'] +['×Ļ׼', '×ķת'] +['ล', 'à¹īาà¸Ļ'] +['i', 'ë'] +['ãĤ¸', 'ãĤ§'] +['à¸Ī', 'à¸Ń'] +['ÙĪ', 'ØŃد'] +['×Ļצ', '×ķ×ij'] +['Ġ×ij', 'ש׾'] +['ок', 'о'] +['ض', 'Ø©'] +['ذ', 'ر'] +['ĠÑĥ', 'д'] +['İ', 'L'] +['×ķצ', '×Ļ×Ŀ'] +['×ĸ', '×ŀף'] +['à¸Ľ', 'à¸ģ'] +['íķĻ', 'êµIJ'] +['س', 'اÙħ'] +['à¹Ħ', 'à¸Ķ'] +['ละ', 'à¹Ģà¸Ń'] +['ละà¹Ģà¸Ń', 'ีย'] +['ละà¹Ģà¸Ńีย', 'à¸Ķ'] +['ả', 'y'] +['аÑĨи', 'он'] +['ãĤ¹', 'ãĤ¯'] +['פ', '×ķס'] +['ร', 'à¹Īาà¸ĩ'] +['ен', 'нÑĭй'] +['ع', 'ÙĨ'] +['عÙĦ', 'ÙĨ'] +['ائ', 'Ùģ'] +['d', 'ÄĻ'] +['ؤ', 'ÙĪÙĦ'] +['׾×ķ', '×ķ'] +['Ġ×ij', 'ש×ij'] +['ä»Ĭ', 'åĽŀ'] +['ĠاÙĦج', 'ÙĨ'] +['د', 'اد'] +['wa', 'Äĩ'] +['ãĥª', 'ãĥ³'] +['ĠìŀIJ', 'ìĭł'] +['اÙĨ', 'ÙĬا'] +['ãĥ¡', 'ãĥª'] +['ÙĦ', 'ÙĪÙĨ'] +['à¸Ĺ', 'à¹Īà¸Ńà¸ĩ'] +['à¸Ĺà¹Īà¸Ńà¸ĩ', 'à¹Ģà¸Ĺีà¹Īยว'] +['اÙģ', 'ÙĬ'] +['Ġли', 'ÑĪ'] +['Ùħ', 'ÙĬØ©'] +['оÑĤ', 'веÑĤ'] +['Ñĩ', 'ин'] +['Ã', 'Ĭ'] +['ãĥ¡', 'ãĥ³'] +['å®', 'Ł'] +['éļĽ', 'ãģ«'] +['ĠÑĢаÐ', '¹'] +['ãĤ¦', 'ãĥ³'] +['×Ļר', '×ķש'] +['×Ļר×ķש', '׾×Ļ×Ŀ'] +['ม', 'ะ'] +['Ġar', 'a'] +['каз', 'аÑĤÑĮ'] +['à¸ķ', 'ัà¸Ķ'] +['ÑĥÑİ', 'ÑĤ'] +['Ġü', 'st'] +['×Ĵ', '×ķ×ij'] +['×Ĵ×ķ×ij', '×ķת'] +['mal', 'ı'] +['ег', 'од'] +['егод', 'нÑı'] +['اÙģ', 'ÙĤ'] +['à¸Ĭ', 'à¹Īà¸Ńà¸ĩ'] +['Ġö', 'zellik'] +['×Ļצ', '×ķר'] +['Ġmi', 'ÄĻd'] +['Ġili', 'ÅŁ'] +['Ġна', 'Ñħод'] +['×¢', '×ĸר'] +['׾', '×Ľ×ª'] +['ÙĨت', 'اج'] +['ĠÑģ', 'ем'] +['à¸Ī', 'à¹Īาย'] +['à¸ķร', 'ว'] +['à¸ķรว', 'à¸Ī'] +['פר', '×ķ'] +['à¸Ĥ', 'ัà¸ļ'] +['ãģ', 'ŀ'] +['Ġп', 'ло'] +['к', 'олÑĮ'] +['×ŀ×¢', '×ĺ'] +['íķĺ', 'ìĭľ'] +['jÄħ', 'ce'] +['ÙĨ', 'اÙĨ'] +['ลี', 'à¸ģ'] +['н', 'ÑĥÑĤ'] +['Ġоб', 'ÑĢаз'] +['Ùĥ', 'بر'] +['ĠاÙĦÙĪ', 'Ø·ÙĨ'] +['ãģķãģĽ', 'ãģ¦'] +['ÙĤ', 'اء'] +['×ŀ×ĵ', '×Ļ׳'] +['y', 'ü'] +['פ', '×Ļת'] +['׳', '×ķף'] +['ÙħÙĨ', 'ظ'] +['หà¸Ļ', 'ัà¸ģ'] +['ìŀ', 'Ī'] +['ãĤ«', 'ãĥ¼ãĥī'] +['ع', 'ÙĨÙĬ'] +['п', 'од'] +['ض', 'اء'] +['à¸Ļ', 'à¸ķà¹Į'] +['×ŀש', 'פ'] +['ว', 'à¹Į'] +['ר', '×ķ×§'] +['ส', 'ืà¹Īà¸Ń'] +['פק', '×Ļ×ĵ'] +['ãģªãĤī', 'ãģªãģĦ'] +['ĠìŬ', '룬'] +['ÙĦ', 'ج'] +['Ñī', 'иÑĤ'] +['ãĥĥ', 'ãĤ·'] +['ÙĦÙĬ', 'س'] +['ĠÙĦ', 'Ùħا'] +['ìł', 'ij'] +['×ij', '×Ļף'] +['ãĥģ', 'ãĤ§'] +['Ġgü', 'ç'] +['Ġch', 'ứ'] +['×ķצ', '×IJ'] +['קר', '×ij'] +['à¹Ĥ', 'à¸ŀ'] +['оÑĩ', 'но'] +['סק', '×Ļ'] +['ש׾', '×Ŀ'] +['صر', 'Ùģ'] +['ĠL', 'Ãł'] +['×¢', '×Ļת'] +['á»', '·'] +['à¹Ĥ', 'à¸Ńà¸ģ'] +['à¹Ĥà¸Ńà¸ģ', 'า'] +['à¹Ĥà¸Ńà¸ģา', 'ส'] +['Ġ×Ķ', '×ĵ×ijר'] +['à¸Ļั', 'à¹Īà¸Ļ'] +['ز', 'ر'] +['нак', 'о'] +['íļ', 'į'] +['ãĤĤ', 'ãģ¡'] +['ãĤĤãģ¡', 'ãĤį'] +['ãĤĤãģ¡ãĤį', 'ãĤĵ'] +['اÙħ', 'ت'] +['عد', 'اد'] +['и', 'нÑĭ'] +['ÅĤy', 'w'] +['à¸Ħ', 'à¸ĵะ'] +['à¸Ĺ', 'ะ'] +['kt', 'ör'] +['×Ļ×Ĺ', '×Ķ'] +['Ġм', 'е'] +['Ġме', 'ÑģÑı'] +['׳×Ķ', '×Ĵ'] +['ĠÑģ', 'ÑĥÑīеÑģÑĤв'] +['à¸Ļ', 'ัà¸Ļ'] +['ÑĦ', 'ÑĦ'] +['ек', 'ÑĤив'] +['عÙĦÙĪÙħ', 'ات'] +['б', 'Ñĥд'] +['à¸Ļัà¸ģ', 'à¸ĩาà¸Ļ'] +['หà¸Ļà¹īา', 'à¸Ĺีà¹Ī'] +['ÙĤÙĬ', 'ÙĤ'] +['ãĤ·', 'ãĥ³'] +['ãģ«', 'éĸ¢'] +['×IJר', '×Ĵ'] +['ĠпÑĢ', 'оÑĤ'] +['ĠпÑĢоÑĤ', 'ив'] +['ĠìŀĪ', 'ìĸ´'] +['ÙĤÙĬ', 'ÙĤØ©'] +['ìĹ', 'ĩ'] +['k', 'ür'] +['ãģ«ãģªãĤĬ', 'ãģ¾ãģĹãģŁ'] +['Ġде', 'ÑıÑĤ'] +['ĠдеÑıÑĤ', 'елÑĮ'] +['פ×ķר', '×ĺ'] +['à¸Ł', 'à¹īา'] +['à¹Ģ', 'à¸ł'] +['ĠавÑĤом', 'аÑĤ'] +['×ĸ', '×Ļ×§'] +['Ġold', 'uk'] +['ع', 'اÙħ'] +['ĠÑĤ', 'оÑĢ'] +['yrı', 'ca'] +['ê', 'Ì'] +['ãĤŃ', 'ãĥ³ãĤ°'] +['ãģ«', 'ãģ¨ãģ£ãģ¦'] +['à¹Ģà¸ī', 'à¸ŀ'] +['à¹Ģà¸īà¸ŀ', 'าะ'] +['ãģ¯', 'ãģļ'] +['×ŀ', '×IJ×Ļ'] +['สะ', 'à¸Ķ'] +['สะà¸Ķ', 'วà¸ģ'] +['ìľ¼', 'ë©°'] +['à¸ģ', 'ี'] +['à¸', '¬'] +['Ġ×¢', '×ķש'] +['à¸łà¸²', 'ษา'] +['à¸Ĺ', 'ัà¸Ļ'] +['ac', 'akt'] +['acakt', 'ır'] +['اع', 'دة'] +['ĠÑĥÑģл', 'Ñĥг'] +['ס', 'ר×ĺ'] +['×ķ×ŀ', '×ķת'] +['×Ķ', '×ķר'] +['×ŀ', '×ķ×ij'] +['×ŀ×ķ×ij', 'ף'] +['سÙĬ', 'اس'] +['اتÙģ', 'اÙĤ'] +['×Ķ', 'צ׾'] +['Ùħؤ', 'س'] +['Ġp', 'ó'] +['Ġк', 'ни'] +['×Ļ׼', '×ķ׾'] +['à¹Ģหล', 'ืà¸Ń'] +['׼׾', '׼'] +['׳', '×ĸ'] +['ÑĪи', 'е'] +['r', 'ès'] +['ĠاÙĦØŃ', 'ÙĤ'] +['лÑı', 'ÑĢ'] +['ห', 'à¸į'] +['หà¸į', 'ิà¸ĩ'] +['ר×Ĵ', '×Ļש'] +['à¹Ģส', 'à¹īà¸Ļ'] +['ש×ij', '×ķף'] +['ô', 'tel'] +['ап', 'ÑĢ'] +['апÑĢ', 'имеÑĢ'] +['اب', 'ÙĦ'] +['ĠÑĢаз', 'виÑĤ'] +['Ġп', 'олÑĮз'] +['ĠС', 'еÑĢ'] +['×ķ×ij', '×Ļ'] +['r', 'óż'] +['ìĭ', 'Ń'] +['ãĤ¯', 'ãĥĪ'] +['ãģĹ', 'ãĤĪãģĨ'] +['à¸ģร', 'ม'] +['ØŃ', 'ÙĥÙĪÙħ'] +['à¹Ĥ', 'à¸ļ'] +['à¸Ĺ', 'à¹īาย'] +['ĠM', 'á'] +['ĠÑĤ', 'Ñĭ'] +['à¸Ħร', 'ัว'] +['ÑĢÑĥ', 'б'] +['ạ', 'p'] +['Ġm', 'ÅĤ'] +['ĠmÅĤ', 'od'] +['Ġgör', 'Ã¼ÅŁ'] +['Ġgeli', 'ÅŁ'] +['ươ', 'i'] +['×ŀש', '×§'] +['ÙĢÙĢ', 'ÙĢÙĢ'] +['รา', 'ว'] +['ãģĹãģ', '£'] +['ãģĹãģ£', 'ãģĭãĤĬ'] +['ĠÐļ', 'он'] +['Ġk', 'ê'] +['à¹Ĥà¸Ĺ', 'ร'] +['èIJ½', 'ãģ¡'] +['åĩº', 'ãģ¦'] +['ล', 'ัà¸ģษ'] +['Ġ×Ĵ', '×ij×ķ×Ķ'] +['ãĥĻ', 'ãĥ«'] +['ê±°', 'ëĤĺ'] +['ë§', 'IJ'] +['×Ļ׾', '×ĵ×Ļ×Ŀ'] +['ĠëĦ', 'Ī'] +['×ŀר', '×Ļ'] +['ร', 'ส'] +['ãĥŃ', 'ãĥ³'] +['и', 'ло'] +['ноÑģÑĤÑĮ', 'Ñİ'] +['×ĸר', '×Ĺ'] +['п', 'он'] +['Ġ×Ķש', '׾'] +['ê²ł', 'ìĬµëĭĪëĭ¤'] +['Ġki', 'ÅŁ'] +['ĠÐļ', 'и'] +['ว', 'ร'] +['د', 'اع'] +['ÅŁ', 'im'] +['ÙĨ', 'Ùij'] +['в', 'аÑĤ'] +['را', 'Ùĥ'] +['ب', 'اÙĦ'] +['ид', 'е'] +['Ġ×Ķ×ŀ', '×Ĺ'] +['ìĸ', 'µ'] +['تÙģ', 'اع'] +['Ø£', 'ت'] +['ëĬ', 'ĺ'] +['ש', '×Ļת'] +['ست', 'Ùħر'] +['ĠÑĦ', 'ак'] +['ĠاÙĦØ£Ùħ', 'رÙĬ'] +['ëŀ', '¨'] +['اس', 'Ùħ'] +['Ġa', 'ÄŁ'] +['Ġç', 'ev'] +['Ùĥ', 'ÙĪØ±'] +['ãģķ', 'ãģ¾'] +['Ġç', 'öz'] +['Ġر', 'س'] +['Äħ', 'da'] +['สà¸Ļ', 'ุ'] +['ãģĹãģ¦', 'ãģıãĤĮ'] +['н', 'Ñİ'] +['leÅŁ', 'me'] +['ãĤª', 'ãĥ³'] +['ãģ¨', 'ãģªãĤĬ'] +['ava', 'ÅŁ'] +['×ĺ', '×Ļ×ij'] +['ØŃ', 'ض'] +['×ķצ', '×IJ×ķת'] +['ÙĨ', 'ÙħÙĪ'] +['ı', 't'] +['ĠÑħ', 'а'] +['ĠÑħа', 'ÑĢак'] +['ĠÑħаÑĢак', 'ÑĤеÑĢ'] +['Ġd', 'ÅĤ'] +['ãĥĹ', 'ãĥ©'] +['à¸Ĭ', 'ุม'] +['à¹Ī', 'à¸Ńà¸Ļ'] +['×ķ×ij', '׾'] +['Ñģ', 'ол'] +['×ĵ', '×Ĵ'] +['аÑĢ', 'аÑĤ'] +['n', 'ivers'] +['Ġgerçek', 'leÅŁtir'] +['ĠاÙĦ', 'ÙĦÙĬ'] +['ระ', 'ยะ'] +['ĠÙħ', 'ختÙĦÙģ'] +['Ġgö', 'nder'] +['Ùģ', 'ار'] +['do', 'ÄŁ'] +['doÄŁ', 'an'] +['ص', 'ÙĦاØŃ'] +['Ġyay', 'ın'] +['ãĥĨ', 'ãĥ³'] +['รว', 'à¸Ī'] +['×Ļ×Ĺ', '×Ļ×ĵ'] +['ünk', 'ü'] +['ÑĨи', 'алÑĮн'] +['à¸ļ', 'ู'] +['ม', 'ุ'] +['h', 'ä'] +['Ø®', 'Ùģ'] +['å¢', 'Ĺ'] +['å¢Ĺ', 'ãģĪ'] +['еÑĩ', 'но'] +['ĠاÙĦس', 'ÙĨ'] +['à¸Ĥ', 'าว'] +['im', 'di'] +['Ð', '«'] +['à¸Ļà¸Ńà¸ģ', 'à¸Īาà¸ģ'] +['à¸ļา', 'ล'] +['ת', 'ש'] +['Ġdüzen', 'le'] +['мÑĭ', 'Ñģл'] +['ãģı', 'ãģª'] +['ż', 'u'] +['Ġwsp', 'óÅĤ'] +['Ġн', 'аз'] +['ınd', 'aki'] +['تر', 'Ø©'] +['ÅŁ', 'ek'] +['Ġö', 'd'] +['ĠÙĪ', 'Ùĥ'] +['Ġпозв', 'олÑı'] +['Ġת', '×ķ׼'] +['ÙħÙĨ', 'تج'] +['ë§', 'ī'] +['ĠاÙĦØ«', 'ÙĦاث'] +['аÑĨи', 'Ñİ'] +['ÙĪØ±', 'ÙĪ'] +['Ñĭв', 'аеÑĤ'] +['خص', 'ص'] +['ĠاÙĦÙģ', 'ÙĦ'] +['ĠاÙĦÙģÙĦ', 'سطÙĬÙĨ'] +['Ø¥', 'جر'] +['إجر', 'اء'] +['اÙĨت', 'Ø®'] +['اÙĨتخ', 'اب'] +['ار', 'ÙĬØ©'] +['×ķ', 'Ö'] +['Ø¢', 'ÙĨ'] +['×ŀ×¢', '×ķת'] +['Ġм', 'ал'] +['Ġ×IJ', '×Ĺ'] +['à¸Ĺ', 'à¹īà¸Ńà¸ĩ'] +['ze', 'ÅĽ'] +['Ġë§Į', 'ëĵ¤'] +['رÙĬ', 'ع'] +['äºĭ', 'ãĤĴ'] +['à¸ļริ', 'หาร'] +['׾', '×ŀ×Ļ×ĵ'] +['Ġм', 'Ñĥж'] +['ت', 'رÙĪ'] +['ĠباÙĦ', 'Ø¥'] +['פ', '×Ļ×§'] +['ز', 'ÙħØ©'] +['ĠÃ¶ÄŁ', 'renc'] +['ãĥ', '¶'] +['اÙħ', 'عة'] +['×§×ij', '×ķצ'] +['×ŀ', '׳×ķת'] +['رÙĬ', 'Ùħ'] +['Ġо', 'каз'] +['ãģłãģij', 'ãģ©'] +['Ġh', 'ız'] +['Ġש', '×IJת'] +['ãĤ¢', 'ãĥ¼'] +['Ġmożli', 'wo'] +['ìĦ', '¼'] +['ÙĪ', 'اب'] +['ог', 'ÑĢаÑĦ'] +['Ġعبد', 'اÙĦ'] +['ãĤĴ', 'è¡Į'] +['ب', 'ÙĬÙĦ'] +['Ġİ', 'ç'] +['ย', 'าย'] +['ĠÑĥ', 'ÑĩаÑģÑĤ'] +['ÑĦ', 'еÑģÑģ'] +['ÑĦеÑģÑģ', 'иона'] +['áº', '¤'] +['ÙĨ', 'ÙĬÙĨ'] +['عد', 'ÙĦ'] +['สร', 'ร'] +['دÙĬ', 'ÙĦ'] +['×ij', '×Ļ×§'] +['czy', 'ÅĤ'] +['ÑĢом', 'е'] +['Ġм', 'ед'] +['ìĻ', 'Ķ'] +['ãĥ©', 'ãĤ¤ãĥ³'] +['ĠÑĤ', 'еп'] +['еÑĢ', 'ÑĮ'] +['i', 'ÄŁi'] +['в', 'ели'] +['ÑĢи', 'ÑģÑĤ'] +['ס', '×ķפ'] +['×ŀ׾', '×Ĺ'] +['ĠاÙĦØ¥', 'ÙĨ'] +['Ġ׾×Ķ', 'ש'] +['è¶Ĭ', 'ãģĹ'] +['ĠÑĢ', 'Ñĭ'] +['×ķ×IJ', 'ר'] +['رÙĩ', 'اب'] +['פ', '×ķ×IJ×Ļ'] +['ĠгоÑģ', 'Ñĥд'] +['ĠгоÑģÑĥд', 'аÑĢ'] +['ĠгоÑģÑĥдаÑĢ', 'ÑģÑĤв'] +['ĠاÙĦØ£Ùħ', 'ÙĬر'] +['Ùħ', 'ج'] +['à¹Ģหม', 'าะ'] +['ÑĢ', 'ев'] +['à¸Ĭี', 'à¸ŀ'] +['ãĥķ', 'ãĥĪ'] +['иÑĩ', 'но'] +['ĠاÙĦÙħ', 'ؤ'] +['Ġi', 'ht'] +['íħ', 'ľ'] +['د', 'ÙĨÙĬ'] +['ر', 'ص'] +['ла', 'ÑģÑĤ'] +['à¹Ģหล', 'à¹Īา'] +['ılı', 'r'] +['ร', 'à¸ĵà¹Į'] +['×ŀש', '×Ļ×ļ'] +['Ġd', 'á»ĭ'] +['Ø·Ùģ', 'اÙĦ'] +['×ĺ', '×ķף'] +['Ġ×ij', '×Ļ׳'] +['ãģ¾', 'ãģ£ãģŁ'] +['лож', 'ениÑı'] +['تØŃ', 'ر'] +['ب', 'اØŃ'] +['à¹Ģส', 'ืà¹īà¸Ń'] +['ãģĻ', 'ãģĶ'] +['lt', 'ür'] +['à¸ĩ', 'าม'] +['Ġt', 'ü'] +['ĠпÑĢ', 'им'] +['ĠпÑĢим', 'ен'] +['Ġhay', 'at'] +['ëĥ', 'IJ'] +['ëĭ', 'Į'] +['׳×Ļ', '×ķ'] +['вед', 'ен'] +['ìħ', '¨'] +['à¸Ī', 'ัย'] +['à¸ģà¹Ī', 'à¸Ń'] +['Ġв', 'од'] +['оÑģÑĤ', 'оÑı'] +['н', 'аÑĤ'] +['à¹ģ', 'หล'] +['سÙħ', 'ÙĬ'] +['à¸Ķำ', 'à¹Ģà¸Ļ'] +['à¸Ķำà¹Ģà¸Ļ', 'ิà¸Ļ'] +['w', 'ód'] +['ö', 'yle'] +['ãĥĢ', 'ãĤ¤'] +['ÑĪи', 'й'] +['меÑī', 'ен'] +['ãģĹãģ¾', 'ãģĨ'] +['ãĥī', 'ãĥ©'] +['ÙĪØ¶', 'ØŃ'] +['à¸Ńà¸Ļ', 'ุ'] +['ĠاÙĦ', 'اجتÙħاع'] +['laÅŁ', 'ma'] +['à¸Ħ', 'à¸Ńà¸Ļ'] +['×ŀר', '×Ļ×Ŀ'] +['ÙĨ', 'اÙħج'] +['שר', '×ķת'] +['اÙĦ', 'Ø£'] +['Ġksi', 'Äħż'] +['Ġа', 'н'] +['ÑĢаÐ', '¹'] +['اÙĩر', 'Ø©'] +['×ŀ×ĵ', '×Ķ'] +['ä¸Ģ', 'ç·'] +['ä¸Ģç·', 'Ĵ'] +['ä¸Ģç·Ĵ', 'ãģ«'] +['ÑĢиÑĤ', 'оÑĢ'] +['d', 'ıkl'] +['à¹ģ', 'à¸ĸ'] +['à¹ģà¸Ĥ', 'à¹Īà¸ĩ'] +['екÑĤ', 'оÑĢ'] +['×ŀס', '×¢'] +['ÑĢак', 'ÑĤи'] +['u', 'ÄŁu'] +['×ķ×ij', 'ת'] +['สู', 'à¸ķร'] +['ĠçalÄ±ÅŁ', 'm'] +['ĠçalÄ±ÅŁm', 'alar'] +['Ġа', 'на'] +['ãĥĽ', 'ãĥ¼ãĥł'] +['Ġböl', 'üm'] +['Ġب', 'ص'] +['ол', 'оÑģ'] +['ĠìķĬ', 'ëĬĶ'] +['à¹Ī', 'ะ'] +['ÙĪ', 'تر'] +['ä¹', 'Ĺ'] +['ست', 'خداÙħ'] +['פ×Ļ', '×Ļס'] +['פ×Ļ×Ļס', '×ij'] +['פ×Ļ×Ļס×ij', '×ķ×§'] +['Ġк', 'ÑĢаÑģ'] +['ли', 'к'] +['رÙĬ', 'ØŃ'] +['×ŀש', '׾×Ķ'] +['à¹Ģย', 'ีà¹Īย'] +['à¹Ģยีà¹Īย', 'ม'] +['в', 'иÑģ'] +['ом', 'н'] +['ÄŁ', 'un'] +['ãĥŃ', 'ãĥ¼ãĥ³'] +['Ø£', 'تÙĬ'] +['à¸ķร', 'ี'] +['çͳ', 'ãģĹ'] +['تÙħ', 'ر'] +['ìĹ', 'ĪìĬµëĭĪëĭ¤'] +['ĠÙĪ', 'غÙĬر'] +['red', 'ni'] +['ĠاÙĦص', 'Ùģ'] +['Ġна', 'ÑģÑĤоÑı'] +['ĠнаÑģÑĤоÑı', 'Ñī'] +['à¸ķ', 'รา'] +['ĠÑĥÑģл', 'ов'] +['ĠÑĥÑģлов', 'иÑı'] +['ÑĨ', 'еп'] +['×Ķ', '×Ĺ׾×ĺ'] +['Ø·', 'ÙĬع'] +['ĠB', 'akan'] +['ĠاÙĦ', 'رÙĪ'] +['илÑĮ', 'но'] +['Ġм', 'еÑĤ'] +['à¸Ķ', 'à¸Ńà¸ģ'] +['ãģĭãĤī', 'ãģªãģĦ'] +['Ġпо', 'ÑģÑĤоÑı'] +['ĠпоÑģÑĤоÑı', 'н'] +['ĠÑĩ', 'аÑģ'] +['ü', 'c'] +['wr', 'ó'] +['б', 'ÑĥÑĢ'] +['ãĥIJ', 'ãĥĥãĤ¯'] +['ãĥ©ãĥ³', 'ãĥī'] +['Ġо', 'гÑĢ'] +['สั', 'à¸į'] +['สัà¸į', 'à¸įา'] +['มั', 'à¹Īà¸Ļ'] +['à¸Ħ', 'à¸Ńม'] +['al', 'ık'] +['Ġн', 'ед'] +['üm', 'üz'] +['ĠÅĽ', 'wie'] +['é', 'rio'] +['×Ļ×IJ', '×Ķ'] +['دÙħ', 'ات'] +['ı', 'rl'] +['ĠоÑĤ', 'з'] +['ĠоÑĤз', 'Ñĭв'] +['ä»ĺ', 'ãģį'] +['Ġkaż', 'de'] +['мин', 'иÑģÑĤ'] +['ãĤ°', 'ãĥ«'] +['ë°', 'ĸ'] +['ез', 'н'] +['اÙĦ', 'Ùģ'] +['Ġש', 'ק׾'] +['Ùħ', 'ض'] +['ãĥĿ', 'ãĥ¼ãĥĪ'] +['ÙħÙĨ', 'ت'] +['ÙĤÙĬ', 'اÙħ'] +['Ø´', 'ÙĨ'] +['×Ļר', '×ķ×¢'] +['ãĤŃãĥ£', 'ãĥ³'] +['доÑĢ', 'ов'] +['×ŀ', '×Ļת×Ļ'] +['ÙĪÙĦ', 'ÙĪØ¬'] +['Ùĥ', 'اÙģ'] +['ĠÑĢаз', 'лиÑĩ'] +['иÑĤ', 'еÑĤ'] +['н', 'олог'] +['ลà¸ĩ', 'à¸Ĺุà¸Ļ'] +['Ġyak', 'laÅŁ'] +['ãĥ¬', 'ãĤ¤'] +['ê²ł', 'ëĭ¤'] +['æ±Ĥ', 'ãĤģ'] +['رÙĪ', 'Ùģ'] +['Ġí', 'Ĭ'] +['ĠíĬ', '¹'] +['ãģ£', 'ãģıãĤĬ'] +['à¸Ħวาม', 'à¸Ħิà¸Ķ'] +['×Ķ', '×Ļס×ĺ'] +['Ø¥', 'ÙĤ'] +['ãģ¦', 'ãģĦ'] +['à¹Ĥ', 'à¸Ĭ'] +['ĠBü', 'yük'] +['ĠФ', 'едеÑĢ'] +['ÑĨи', 'н'] +['ÑĢов', 'а'] +['ĠاÙĦ', 'اÙĤتصاد'] +['Ġch', 'á'] +['à¸ĺ', 'าà¸Ļ'] +['ë¥', 'ł'] +['à¹Ħ', 'à¸ķ'] +['ÃŃ', 'pio'] +['Ùĭ', 'ا'] +['Ġоб', 'Ñıз'] +['Ùĩ', 'ج'] +['Ġì¤ij', 'ìļĶ'] +['ãģ®', 'ãģ§ãģ¯ãģªãģĦ'] +['بار', 'اة'] +['ãĤ¤', 'ãĥ«'] +['Ġн', 'оÑĢм'] +['á»ī', 'nh'] +['m', 'ö'] +['mö', 'glich'] +['ÑĨи', 'п'] +['ãĤ¢', 'ãĤ¯'] +['×Ķ', '×Ļ'] +['ÑĨи', 'алÑĮно'] +['ĠÅĽ', 'wi'] +['ت', 'ÙĤ'] +['ĠÑģÑĤо', 'им'] +['بÙĬ', 'عÙĬ'] +['Ġ׾', 'ש×ŀ'] +['г', 'лÑı'] +['глÑı', 'д'] +['ãģ¦', 'ãģıãĤĮ'] +['ÄĻd', 'zi'] +['à¸Ĥ', 'ั'] +['à¸Ĥั', 'à¹īà¸Ļ'] +['Ø·', 'ÙĤ'] +['ĠìĹ', 'Ń'] +['ãģ£ãģ¦ãģĹãģ¾', 'ãģĨ'] +['ĠdeÄŁer', 'l'] +['ĠdeÄŁerl', 'endir'] +['Ġü', 'lk'] +['Ġмн', 'ог'] +['à¹', 'ĭ'] +['ë¿', 'IJ'] +['ĠУ', 'кÑĢа'] +['ÄŁ', 'ini'] +['Ġбез', 'оп'] +['Ġбезоп', 'аÑģ'] +['à¸Ńà¸Ńà¸ģ', 'à¹ģà¸ļà¸ļ'] +['اØ', '¸'] +['ØŃد', 'اث'] +['л', 'еÑĢ'] +['×Ļ×', '¥'] +['×Ļ׳×ĺר', '׳×ĺ'] +['lar', 'ınız'] +['ØŃÙĬ', 'ØŃ'] +['ż', 'eli'] +['à¸Ń', 'ัà¸ĩ'] +['à¸Ńัà¸ĩ', 'à¸ģ'] +['à¸Ńัà¸ĩà¸ģ', 'ฤษ'] +['ĠоÑĤ', 'лиÑĩ'] +['ั', 'ส'] +['ëŀ', 'į'] +['ож', 'но'] +['ãĤ¹', 'ãĥĿ'] +['ĠÑħ', 'оÑĩ'] +['Ġк', 'ап'] +['еÑĩ', 'ен'] +['ØŃÙĦ', 'Ø©'] +['ÙĬا', 'Ùĩ'] +['на', 'л'] +['×ķצ', 'ר×Ļ×Ŀ'] +['Ġk', 'ald'] +['åĥ', 'į'] +['ĠاÙĦØ´', 'خص'] +['Ġз', 'на'] +['Ġwz', 'gl'] +['ż', 'ycz'] +['ê°', 'Ŀ'] +['à¸ŀ', 'ลัà¸ĩ'] +['íģ', '¼'] +['Ġö', 'l'] +['Ġb', 'ụ'] +['Ø´', 'Ùĩر'] +['Ġз', 'ам'] +['Ġд', 'ев'] +['×Ļ×ĺ', 'ת'] +['تعÙĦ', 'ÙĤ'] +['ÙĪÙħ', 'Ø©'] +['ãĤĴ', 'ä½ľ'] +['ãģį', 'ãģ¦'] +['í', 'ĥĿ'] +['ras', 'ında'] +['ãĤĴ', 'æİ¢'] +['ĠÙħ', 'باشر'] +['راج', 'ع'] +['Ġв', 'озд'] +['ÙħØŃ', 'ا'] +['×ķש', 'ר'] +['ĠиÑģÑĤ', 'оÑĢ'] +['ม', 'ัà¸ģ'] +['t', 'ıģ'] +['Ø«', 'ار'] +['تر', 'ÙĨت'] +['à¹ģà¸Ĥ', 'à¹ĩ'] +['à¹ģà¸Ĥà¹ĩ', 'à¸ĩ'] +['п', 'оÑĩ'] +['Ġ×ij', '×IJ×ķת'] +['ë¯', 'Ģ'] +['ëĿ¼', 'ëıĦ'] +['à¸Ĭ', 'ัà¸Ķ'] +['ส', 'à¸ķà¹Į'] +['ãĥĭ', 'ãĥĥãĤ¯'] +['ид', 'енÑĤ'] +['Ġг', 'ÑĢÑĥпп'] +['ت', 'Ø®'] +['áº', 'ł'] +['ย', 'ืà¸Ļ'] +['ย', 'ัà¸Ļ'] +['ó', 'ry'] +['T', 'Ãľ'] +['ãģĹ', 'ãĤĥ'] +['ĠпÑĢов', 'ед'] +['лÑı', 'еÑĤ'] +['Ùħ', 'Ø®'] +['ย', 'à¸Ńม'] +['×Ľ×ł×¡', 'ת'] +['ĠاÙĦÙħ', 'ÙĨت'] +['Ġol', 'mad'] +['ר׼', '×ĸ×Ļ'] +['Ġв', 'ÑģÑĤÑĢ'] +['ĠиÑģ', 'Ñģлед'] +['ÑĤвеÑĢ', 'ж'] +['بد', 'ÙĪ'] +['еÑĢ', 'ÑĤ'] +['ï»', '·'] +['±', 'ħ'] +['สัม', 'à¸ŀัà¸Ļà¸ĺà¹Į'] +['ิ', 'à¹Īà¸Ļ'] +['צ', '×Ļ×ij'] +['wiÄĻ', 't'] +['Ġì°', '¸'] +['Ġz', 'wiÄħz'] +['سب', 'ÙĪØ¹'] +['ãĥĥ', 'ãĤ°'] +['à¸Ľà¸¥', 'à¸Ńà¸Ķ'] +['à¸Ľà¸¥à¸Ńà¸Ķ', 'à¸łà¸±à¸¢'] +['ãĤĤ', 'ãĤĬ'] +['ÙĤد', 'س'] +['Ġspr', 'z'] +['Ġsprz', 'eda'] +['Ġist', 'edi'] +['Ġk', 'hu'] +['Ġд', 'ен'] +['Ġko', 'ÅĦ'] +['Ġ×ij', '×Ĺ×Ļ'] +['à¹Ģà¸Ĺ', 'à¹īา'] +['×ķס', '×Ļ×£'] +['ãĥĭ', 'ãĥ¥ãĥ¼'] +['ĠпÑĢед', 'оÑģÑĤ'] +['ĠпÑĢедоÑģÑĤ', 'ав'] +['à¹Ĥ', 'à¸Ł'] +['é', 'v'] +['ĠاÙĦص', 'ØŃ'] +['صØŃ', 'اب'] +['à¹Ģà¸Ī', 'à¹ĩà¸ļ'] +['вл', 'ек'] +['วั', 'à¸ķ'] +['à¸ĸ', 'ุ'] +['ãģĵãģ¨ãģĮãģ§ãģį', 'ãģ¾ãģĻ'] +['ÙĤÙĬ', 'ÙĤÙĬ'] +['×ķ×Ĺ', 'ר'] +['Ñĭ', 'ÑĪ'] +['ĠоÑĤ', 'но'] +['ĠоÑĤно', 'ÑĪ'] +['об', 'илÑĮ'] +['Ùģ', 'ØŃ'] +['ı', 'nt'] +['ınt', 'ı'] +['Ġ׾', '×ij×ĵ'] +['í', 'İĺìĿ´ì§Ģ'] +['ãĥĬ', 'ãĥ«'] +['ĠÙħ', 'ساء'] +['×Ļ×ĺ', '×ij'] +['ÑĮ', 'еÑĢ'] +['ëĦ', '·'] +['Ñĭ', 'ÑĤа'] +['ĠоÑĩ', 'еÑĢ'] +['à¸Ķ', 'ืà¹Ī'] +['à¸Ķืà¹Ī', 'ม'] +['ĠN', 'gh'] +['ت', 'عب'] +['ÙĦاÙĤ', 'ات'] +['×ķ׾×ķ×Ĵ', '×Ļ×Ķ'] +['ĠìĿ´', 'ê²ĥ'] +['Ġ×Ķ', '×ijר'] +['ìľ', 'µ'] +['à¹Ģà¸Ħล', 'ืà¹Īà¸Ńà¸Ļ'] +['Ùĩ', 'Ø©'] +['à¸Īำ', 'à¹Ģà¸Ľà¹ĩà¸Ļ'] +['å¤ī', 'ãģĪ'] +['wi', 'ÅĽcie'] +['ch', 'od'] +['chod', 'zÄħ'] +['в', 'ÑĢо'] +['×ŀ×Ĺ', '×Ļר'] +['Ġy', 'ı'] +['Ġyı', 'll'] +['ì¡', 'Į'] +['à¹Ħ', 'หว'] +['ãģªãģı', 'ãģª'] +['Ġзав', 'иÑģ'] +['ĠìĺĪ', 'ìĪĺ'] +['Ùģ', 'ذ'] +['á»§', 'ng'] +['à¸ŀุ', 'à¸Ĺà¸ĺ'] +['з', 'н'] +['lay', 'an'] +['ãĤ', '¡'] +['à¸ģà¹ĩ', 'à¸ķาม'] +['ĠsaÄŁ', 'lam'] +['ร', 'à¸ĵ'] +['ĠÑģ', 'иÑĤ'] +['ĠÑģиÑĤ', 'Ñĥ'] +['ĠاÙĦت', 'ÙĨ'] +['×Ķ', '×ĸ'] +['ĠØ·', 'ÙĪÙĬÙĦ'] +['ta', 'ÅĤ'] +['Ġgö', 'rd'] +['å¤ī', 'ãĤı'] +['ëĥ', '¥'] +['à¸Ħà¹Ī', 'à¸Ńย'] +['×IJ', '×ķ×ĺ'] +['ëħ', 'IJ'] +['ãĥ©ãĥ³', 'ãĤ¹'] +['วั', 'à¸Ĵ'] +['วัà¸Ĵ', 'à¸Ļ'] +['Ġol', 'uÅŁ'] +['פע', '×ķ׾'] +['Ġszczeg', 'óÅĤ'] +['à¸Ħา', 'สิ'] +['à¸Ħาสิ', 'à¹Ĥà¸Ļ'] +['pow', 'ied'] +['ĠÑĤ', 'еб'] +['หà¸Ļ', 'à¹Īวย'] +['Ġм', 'ил'] +['ØŃ', 'Ùĥ'] +['à¸Ĺ', 'à¸Ķ'] +['ĠмаÑĤ', 'еÑĢиал'] +['ÅĤ', 'ow'] +['à¹Ģà¸ģ', 'ีย'] +['ĠÑģов', 'еÑĢ'] +['ãĤ', '©'] +['à¸Ľ', 'ริ'] +['Ġи', 'Ñİ'] +['наÑĩ', 'ен'] +['ÑĢен', 'д'] +['mu', 'ÅŁtur'] +['ĠпÑĢод', 'Ñĥк'] +['з', 'д'] +['Ñı', 'ÑĤи'] +['ÑıÑĤи', 'Ñı'] +['à¹Ģม', 'ีย'] +['رات', 'ÙĬج'] +['Ġam', 'acı'] +['ש', '×ķ׾'] +['ש×ķ׾', '×Ĺ'] +['สะ', 'à¸Ńา'] +['สะà¸Ńา', 'à¸Ķ'] +['פ×Ĵ', '×¢'] +['عب', 'Ø©'] +['d', 'ın'] +['íħ', 'Ķ'] +['Ġ×ŀש', '×Ĺ×§'] +['Ġfi', 'yat'] +['Ġз', 'аÑı'] +['ĠзаÑı', 'в'] +['à¹Ĥ', 'หล'] +['à¹Ĥหล', 'à¸Ķ'] +['à¸ģรุà¸ĩ', 'à¹Ģà¸Ĺà¸ŀ'] +['צ×Ļ', '×Ļף'] +['ìļ', '±'] +['Ùħ', 'ب'] +['Ùħب', 'اد'] +['land', 'ır'] +['Ġв', 'еÑģÑĮ'] +['Ġh', 'ük'] +['ĠÐĴ', 'оз'] +['ÑĩиÑĤ', 'Ñĭва'] +['ว', 'ล'] +['×ķצ', '×¢'] +['à¸Ĥà¸ĵะ', 'à¸Ĺีà¹Ī'] +['ĠaÅŁ', 'aģı'] +['׾×IJ', '×ķ×ŀ×Ļ'] +['tr', 'zym'] +['Ã¤ÃŁ', 'ig'] +['owo', 'ÅĽci'] +['ãģĿ', 'ãĤĤ'] +['Ġroz', 'wiÄħz'] +['ĠgÅĤ', 'ówn'] +['м', 'онÑĤ'] +['×ŀ', '×ķ×ŀ'] +['ĠÑģÑĤ', 'ан'] +['ÙĦا', 'ÙĤØ©'] +['p', 'rowad'] +['prowad', 'zi'] +['ĠÑģоÑģÑĤ', 'оÑı'] +['×Ļ×IJ', '×ķת'] +['r', 'ı'] +['g', 'ı'] +['ãĥij', 'ãĥij'] +['Ġна', 'лиÑĩ'] +['×Ķ', 'צע'] +['Ġ׳', '×Ķ'] +['à¸Ħ', 'ัà¸ļ'] +['ع', 'راض'] +['и', 'ж'] +['Ùĩ', 'ائÙĬ'] +['ãĤī', 'ãģı'] +['ож', 'еÑĤ'] +['Ġоб', 'оÑĢ'] +['ĠобоÑĢ', 'Ñĥд'] +['Ø£', 'سÙĦ'] +['à¹ĩ', 'à¸Ķ'] +['ÑĢÑĥ', 'ÑĤ'] +['دÙĬ', 'ÙħÙĤ'] +['دÙĬÙħÙĤ', 'را'] +['Ġjest', 'e'] +['×ķ×ķ', '×Ļר'] +['×ij×ĵ', '×Ļ×§'] +['деÑĢж', 'ива'] +['ãģĬ', 'ãģı'] +['ewn', 'ÄĻtr'] +['ewnÄĻtr', 'zn'] +['à¸ŀ', 'ฤ'] +['Ġ×IJ', '×ķ×Ķ'] +['ת×Ĺ', '×ķש'] +['Ġz', 'ob'] +['д', 'Ñĥм'] +['ĠÑģ', 'Ñĭ'] +['ÙĬر', 'ا'] +['ĠwiÄĻ', 'ks'] +['à¹ģà¸ķà¸ģ', 'à¸ķà¹Īาà¸ĩ'] +['lar', 'aras'] +['lararas', 'ı'] +['íĺ', 'Ģ'] +['ëī', '´'] +['×ķ×Ĵ', '׾'] +['ĠоÑĤ', 'меÑĤ'] +['ĠÑĢ', 'ан'] +['ت', 'ÙĥÙĦ'] +['иÑĤелÑĮ', 'н'] +['à¸Ľà¸£à¸°', 'วั'] +['à¸Ľà¸£à¸°à¸§à¸±', 'à¸ķิ'] +['ìŀ', 'ĸ'] +['мож', 'но'] +['pie', 'czeÅĦ'] +['pieczeÅĦ', 'st'] +['ëª', '»'] +['ìĬ', '¨'] +['×ŀס', '×ŀ'] +['á»', '¦'] +['ศ', 'ิ'] +['ศิ', 'ล'] +['ศิล', 'à¸Ľ'] +['ĠÅļ', 'w'] +['ãĥĥ', 'ãĤ·ãĥ§ãĥ³'] +['unit', 'Ãł'] +['Ġmiesz', 'ka'] +['Ġmieszka', 'ÅĦ'] +['pr', 'zed'] +['przed', 'si'] +['przedsi', 'ÄĻb'] +['przedsiÄĻb', 'ior'] +['à¸Ľà¸£à¸°', 'สิà¸Ĺà¸ĺิ'] +['à¸Ľà¸£à¸°à¸ªà¸´à¸Ĺà¸ĺิ', 'à¸łà¸²à¸ŀ'] +['ย', 'à¹Ī'] +['ìķ', 'Ļ'] +['รว', 'à¸Ķ'] +['รวà¸Ķ', 'à¹Ģรà¹ĩว'] +['å½ĵ', 'ãģŁãĤĬ'] +['äl', 'le'] +['Ñĥ', 'еÑĤÑģÑı'] +['ã', 'n'] +['ëł', 'µ'] +['th', 'è'] +['ãĤĴ', 'åĪ©ç͍'] +['ì', 'µľ'] +['íĵ', '¨'] +['à¸Ĺ', 'ัà¸ļ'] +['า', 'à¸Ħม'] +['ãģ', 'ĩ'] +['ëĤ', 'Į'] +['à¹Ģà¸Ľà¸¥', 'à¹Īา'] +['â', '¦'] +['ë', '¾'] +['ê', 'Ģ'] +['ê', 'ĩ'] +['â', '¡'] +['ðŁ', 'Ł'] +['ã', 'IJ'] +['â', 'º'] +['á', 'Ń'] +['á', 'Ļ'] +['á', 'ĵ'] +['á', '²'] +['ðĵ', 'ı'] +['á', '¬'] +['â', '¯'] +['ä', '¨'] +['ê', 'Ŀ'] +['ê', '«'] +['ð', 'ij'] +['ðĵ', 'ĥ'] +['ðĿ', 'ħ'] +['<', 'unk'] +[''] +[''] +[''] +['Ġع', 'ÙĦÙī'] +['Ġm', 'á»Ļt'] +['Ġv', 'Ỽi'] +['Ġng', 'ưá»Ŀi'] +['ĠØ¥', 'ÙĦÙī'] +['Ġnh', 'ững'] +['Ġth', 'á»ĥ'] +['Ġ×IJ', '×ķ'] +['Ġ×¢', '×Ŀ'] +['ا', 'Ùĭ'] +['Ġ', 'à¹ģละ'] +['ĠÙĦ', 'ا'] +['Ġnh', 'ư'] +['ĠاÙĦت', 'ÙĬ'] +['Ġ×Ķ', '×ķ×IJ'] +['ĠÄij', 'ến'] +['ĠØ£', 'ÙĪ'] +['Ġv', 'á»ģ'] +['ĠlÃł', 'm'] +['Ġs', 'ẽ'] +['Ġc', 'Å©ng'] +['Ġ', 'ợ'] +['ĠÄij', 'ó'] +['Ġnhi', 'á»ģu'] +['Ġt', 'ại'] +['Ġtr', 'ên'] +['Ġ×Ĵ', '×Ŀ'] +['Ġnh', 'Ãł'] +['Ġ׼', '×Ļ'] +['Ġs', 'á»±'] +['ĠÄij', 'ầu'] +['Ġb', 'á»ĭ'] +['ĠÙĩ', 'ذا'] +['Ġnh', 'ất'] +['Ġph', 'ải'] +['Ġhi', 'á»ĩn'] +['Ġdụ', 'ng'] +['ĠÄij', 'á»Ļng'] +['ĠاÙĦÙĦ', 'Ùĩ'] +['ĠØ', 'Į'] +['ĠÙĥ', 'ÙĦ'] +['Ġvi', 'á»ĩc'] +['Ġn', 'Äĥm'] +['Ġth', 'ì'] +['Ġh', 'á»įc'] +['ĠÙĪ', 'ت'] +['t', 'é'] +['Ġا', 'ÙĨ'] +['Ġt', 'ôi'] +['Ġ×IJ', '׳×Ļ'] +['Ġ׾', '×Ļ'] +['Ġ×ŀ', '×ķ'] +['Ġng', 'Ãły'] +['Ġn', 'Æ°á»Ľc'] +['Ġ×Ķ', '×Ļ×IJ'] +['Ġ×IJ', '×Ļ'] +['Ġh', 'Æ¡n'] +['ĠÙĩ', 'ذÙĩ'] +['ĠÙĪ', 'ÙĬ'] +['ĠاÙĦ', 'ذÙĬ'] +['Ġ×ķ', '×ŀ'] +['Ġgi', 'á'] +['Ġnh', 'ân'] +['Ġch', 'ÃŃnh'] +['Ġm', 'ình'] +['ĠÐĿ', 'а'] +['Ġth', 'ế'] +['Ġ×Ļ', '×ķתר'] +['Ġ×IJ', '×Ŀ'] +['Ġn', 'ên'] +['Ġh', 'ợ'] +['Ġhợ', 'p'] +['Ġc', 'òn'] +['ĠÙĩ', 'ÙĪ'] +['Ġc', 'Æ¡'] +['Ġr', 'ất'] +['ĠVi', 'á»ĩt'] +['Ġب', 'عد'] +['Ġש', '×Ļ'] +['Ġth', 'á»Ŀi'] +['Ġc', 'ách'] +['ĠÄij', 'á»ĵng'] +['Ġн', 'о'] +['Ġtr', 'ưá»Ŀng'] +['Ø', 'Ł'] +['ĠÄij', 'á»ĭnh'] +['ĠÄiji', 'á»ģu'] +['×Ļ', '×Ļ×Ŀ'] +['Ġth', 'á»±c'] +['n', 'ın'] +['Ġh', 'ình'] +['Ġn', 'ói'] +['Ġc', 'ùng'] +['Ġ×Ķ', '×Ķ'] +['ĠØ¥', 'ÙĨ'] +['Ġ×IJ', '×ij׾'] +['Ġnh', 'ưng'] +['Ġbi', 'ết'] +['Ġж', 'е'] +['Ġch', 'úng'] +['ĠÄij', 'ang'] +['Ġذ', 'ÙĦÙĥ'] +['Ġl', 'ên'] +['Ġkh', 'ách'] +['Ġn', 'Ãło'] +['Ġs', 'á»Ń'] +['Ġkh', 'ác'] +['Ġë°', 'ı'] +['Ġl', 'ý'] +['×Ļ', '×Ļ'] +['ĠÄij', 'ây'] +['Ġ׾', '×ŀ'] +['Ġc', 'ần'] +['Ġtr', 'ình'] +['Ġph', 'át'] +['ãģ«', 'ãĤĤ'] +['п', 'о'] +['Ġn', 'Äĥng'] +['Ġb', 'á»Ļ'] +['Ġv', 'ụ'] +['ĠÄij', 'á»Ļ'] +['Ñĩ', 'е'] +['Ġnh', 'áºŃn'] +['Ġtr', 'Æ°á»Ľc'] +['Ġ×¢', '×ĵ'] +['Ġh', 'Ãłnh'] +['ĠØ®', 'ÙĦاÙĦ'] +['Ġl', 'ượng'] +['Ġc', 'ấp'] +['Ġtá»', '±'] +['Ġv', 'ì'] +['Ġt', 'ư'] +['Ġch', 'ất'] +['Ġ׼', '×ŀ×ķ'] +['Ġg', 'ì'] +['Ġש', '׳'] +['Ġt', 'ế'] +['ת', '×ķ'] +['Ġnghi', 'á»ĩp'] +['Ġm', 'ặt'] +['ĠÙĥ', 'Ùħا'] +['Ġ×ij', '×Ļף'] +['Ġר', '×§'] +['Ġth', 'ấy'] +['Ġmá', 'y'] +['ĠÙģ', 'Ùī'] +['Ġd', 'ân'] +['Ġ×IJ', '×Ĺ×ĵ'] +['Ġt', 'âm'] +['Ġ׼', '×ļ'] +['Ġ׾', '×ķ'] +['в', 'о'] +['Ġt', 'ác'] +['Ġto', 'Ãłn'] +['ĠÙĪ', 'Ùħ'] +['Ġk', 'ết'] +['Ġ', 'หรืà¸Ń'] +['ĠÙĪØ§ÙĦ', 'Ùħ'] +['ĠÄiji', 'á»ĥm'] +['Ġ×ĸ', '×ķ'] +['Ġ×ij', '×ķ'] +['׼', '×ķת'] +['Ġh', 'á»Ļi'] +['Ġb', 'ằng'] +['ت', 'Ùĩا'] +['Ġ׼', '×ĵ×Ļ'] +['Ġ×Ķ', '×Ŀ'] +['Ġxu', 'ất'] +['ĠÙĤ', 'د'] +['Ġb', 'ảo'] +['Ġt', 'á»ijt'] +['Ġt', 'ình'] +['ĠÙĩ', 'ÙĬ'] +['ĠÄij', 'á»iji'] +['Ġthi', 'ết'] +['Ġhi', 'á»ĩu'] +['Ġti', 'ếp'] +['Ġt', 'ạo'] +['ת', '×Ķ'] +['Ġch', 'á»§'] +['o', 'ÅĽÄĩ'] +['Ġgi', 'ú'] +['Ġgiú', 'p'] +['ĠÃ', '½'] +['Ġqu', 'ả'] +['Ġlo', 'ại'] +['Ġc', 'ô'] +['ĠÃ', '´'] +['Ġô', 'ng'] +['Ġ×Ķ', '×ķ'] +['ĠاÙĦÙĬ', 'ÙĪÙħ'] +['ĠtÃŃ', 'nh'] +['г', 'а'] +['Ġph', 'òng'] +['Ġ', 'Äĥn'] +['Ġع', 'اÙħ'] +['Ġv', 'á»ĭ'] +['lar', 'ını'] +['r', 'ÃŃa'] +['Ġt', 'Ỽi'] +['ĠÄij', 'ưá»Ŀng'] +['Ġgi', 'Ỽi'] +['Ġb', 'ản'] +['Ġc', 'ầu'] +['Ġnhi', 'ên'] +['Ġb', 'á»ĩnh'] +['Ġth', 'ưá»Ŀng'] +['Ġ×IJ', '×Ļף'] +['ĠÄij', 'á»ģ'] +['Ġh', 'á»ĩ'] +['Ġ×Ļש', 'ר×IJ׾'] +['Ġqu', 'á'] +['ĠÐĹ', 'а'] +['ãģ®', 'ãģ§ãģĻãģĮ'] +['ĠÐŁ', 'ÑĢи'] +['Ġph', 'ần'] +['ĠÙĪ', 'ÙĦا'] +['ĠlỼ', 'n'] +['Ġtr', 'á»ĭ'] +['Ġcả', 'm'] +['Ġм', 'о'] +['Ġd', 'ùng'] +['ĠاÙĦ', 'Ùī'] +['ĠعÙĦÙĬ', 'Ùĩ'] +['ĠìŀĪ', 'ìĬµëĭĪëĭ¤'] +['ÙĬ', 'ÙĤ'] +['ĠÙĤ', 'بÙĦ'] +['Ġho', 'ặc'] +['ĠØŃ', 'ÙĬØ«'] +['Ġ', 'à¸Ĺีà¹Ī'] +['Ġغ', 'ÙĬر'] +['ĠÄij', 'ại'] +['Ġsá»ij', 'ng'] +['нÑĭ', 'ми'] +['Ġth', 'ức'] +['Ġפ', '×Ļ'] +['ĠÄiji', 'á»ĩn'] +['ãģª', 'ãģĭãģ£ãģŁ'] +['Ġgi', 'ải'] +['Ġv', 'ẫn'] +['Ġи', 'Ñħ'] +['Ġö', 'nce'] +['Ġv', 'áºŃy'] +['Ġmu', 'á»ijn'] +['Ġ', 'ảnh'] +['à¹ĥà¸Ļ', 'à¸ģาร'] +['ĠQu', 'á»ijc'] +['Ġk', 'ế'] +['׳', '×IJ'] +['Ġס', '×Ļ'] +['Ġy', 'êu'] +['ãģ®', 'ãģĭ'] +['ĠÄij', 'ẹ'] +['ĠÄijẹ', 'p'] +['Ġch', 'ức'] +['Ġy', 'ıl'] +['ĠTür', 'kiye'] +['d', 'é'] +['ĠÙĤ', 'اÙĦ'] +['Ġd', 'á»ĭch'] +['ĠolduÄŁ', 'u'] +['Ġch', 'á»įn'] +['Ġت', 'Ùħ'] +['หà¸Ļ', 'ึà¹Īà¸ĩ'] +['ãģķãĤĮ', 'ãģŁ'] +['Ġph', 'áp'] +['ìĽ', 'Ķ'] +['Ġti', 'á»ģn'] +['ãģĹ', 'ãģ¾ãģĹãģŁ'] +['Ġש', '׾×IJ'] +['ÙĦ', 'Ø©'] +['Ġ׾פ', '׳×Ļ'] +['Ġ×ij', '×Ļת'] +['ĠH', 'Ãł'] +['ĠØŃ', 'ت'] +['ĠØŃت', 'Ùī'] +['Ġ×¢', '×ķ×ĵ'] +['Ġn', 'ó'] +['Ġth', 'áng'] +['à¹Ģลืà¸Ń', 'à¸ģ'] +['ר', '×Ķ'] +['Ġt', 'Äĥng'] +['Ġcá', 'i'] +['Ġtri', 'á»ĥn'] +['Ġ×IJ×ķת', '×ķ'] +['ìłģ', 'ìĿ¸'] +['ĠC', 'ông'] +['Ġ׾×Ķ', '×Ļ×ķת'] +['Ġг', 'ода'] +['и', 'Ñİ'] +['Ġب', 'عض'] +['Ġ', 'à¸ģาร'] +['èī¯', 'ãģĦ'] +['ÙĪ', 'ت'] +['Ġli', 'ên'] +['ĠÐĿ', 'о'] +['ĠÐĿ', 'е'] +['çļĦ', 'ãģª'] +['ĠÙħ', 'ت'] +['ĠÑĤак', 'же'] +['ĠкоÑĤоÑĢ', 'Ñĭе'] +['Ġ×Ļ', '×ĵ×Ļ'] +['Ġtr', 'á»įng'] +['ãĤµ', 'ãĤ¤ãĥĪ'] +['ìłģ', 'ìľ¼ë¡ľ'] +['Ġt', 'áºŃp'] +['Ġש', '׾×Ļ'] +['íķĺ', 'ê²Į'] +['Ġt', 'Ãłi'] +['ĠÐ', '¯'] +['Ġr', 'á»ĵi'] +['ا', 'Ùĥ'] +['Ġth', 'ương'] +['Ġ×Ķ', '×ĸ×Ķ'] +['ĠÙĪ', 'ÙħÙĨ'] +['à¸Ĺีà¹Ī', 'มี'] +['Ġcu', 'á»Ļc'] +['Ġbü', 'yük'] +['ãģ¨', 'ãģĭ'] +['Ġ×ij', '×Ļ×ķתר'] +['Ġl', 'ần'] +['Ġgö', 're'] +['Ġtr', 'ợ'] +['Ġ×ĺ', '×ķ×ij'] +['ÑĤÑĮ', 'ÑģÑı'] +['Ġth', 'á»ijng'] +['Ġ׼', 'ש'] +['Ġti', 'êu'] +['Ġ×ŀ×IJ', '×ķ×ĵ'] +['Ø', 'Ľ'] +['k', 'Äħ'] +['Ġ', 'à¹ĥà¸Ļ'] +['Ġv', 'ấn'] +['Ġש', '׾×ķ'] +['ĠÄij', 'á»ģu'] +['Ùģ', 'ت'] +['Ġê²ĥ', 'ìĿ´'] +['Ġh', 'óa'] +['ĠاÙĦع', 'اÙħ'] +['ĠÙĬ', 'ÙĪÙħ'] +['к', 'ой'] +['Ġbi', 'á»ĩt'] +['ÑģÑĤ', 'о'] +['Ġ×Ķ', '×Ļ×ķ'] +['à¸Ĺีà¹Ī', 'à¸Īะ'] +['Ġ×ĵ', '×Ļ'] +['Ġ×IJ', '×ļ'] +['Ġá', 'n'] +['ص', 'ÙĪØ±'] +['Ġtr', 'ÃŃ'] +['ĠÐŁÑĢ', 'о'] +['Ġl', 'á»±c'] +['ãģĹãģ¦', 'ãģĦãģ¾ãģĻ'] +['Ġb', 'Ãłi'] +['Ġ×ĸ', '×IJת'] +['Ġb', 'áo'] +['à¸ļ', 'à¸Ļ'] +['ĠëĮĢ', 'íķľ'] +['Ġti', 'ế'] +['Ġtiế', 'ng'] +['Ġb', 'ên'] +['ãģķãĤĮ', 'ãĤĭ'] +['s', 'ión'] +['Ġt', 'ìm'] +['×¢', '×ķ'] +['m', 'é'] +['ни', 'Ñı'] +['ãģ»', 'ãģ©'] +['Ġà¹Ģà¸ŀ', 'ราะ'] +['ب', 'Ø©'] +['Ġë¶', 'Ħ'] +['Ġ×IJ', '×ĸ'] +['à¸Ĺ', 'à¹Īาà¸Ļ'] +['ת', '×Ŀ'] +['Ġth', 'êm'] +['Ġho', 'ạt'] +['y', 'ı'] +['×ĸ', '×ķ'] +['Ġgi', 'á»Ŀ'] +['Ġb', 'án'] +['à¸Ĥ', 'าย'] +['Ñĩ', 'а'] +['Ġ', 'à¹Ĩ'] +['ĠاÙĦÙħ', 'ت'] +['ĠоÑĩ', 'енÑĮ'] +['Ġb', 'ất'] +['Ġtr', 'ẻ'] +['ÑĤ', 'ÑĢ'] +['ĠØ£', 'ÙĨÙĩ'] +['ĠØ«', 'Ùħ'] +['Ġ׼', '×ŀ×Ķ'] +['Ġkh', 'ó'] +['Ġr', 'ằng'] +['ĠÙĪ', 'ÙģÙĬ'] +['ни', 'й'] +['Ġho', 'Ãłn'] +['t', 'ó'] +['Ġ×IJ', 'שר'] +['ĠìĥĿ', 'ê°ģ'] +['Ñģ', 'а'] +['Ġ׼', '×ijר'] +['ĠÑįÑĤ', 'ом'] +['lar', 'ının'] +['Ġch', 'ưa'] +['з', 'и'] +['Ġd', 'ẫn'] +['ĠÐļ', 'ак'] +['ج', 'ÙĪ'] +['ĠбÑĭ', 'ло'] +['ĠÙĬ', 'ت'] +['n', 'ı'] +['ÅĤ', 'am'] +['ĠÙĪÙĩ', 'ÙĪ'] +['×ij', '×ķ'] +['п', 'и'] +['ר', 'ת'] +['Ġqu', 'á»ijc'] +['ж', 'д'] +['ĠÄij', 'Æ¡n'] +['Ùĥت', 'ب'] +['Ġm', 'ắt'] +['ระ', 'à¸ļ'] +['ระà¸ļ', 'à¸ļ'] +['ĠÙĥ', 'اÙĨت'] +['Ġth', 'ân'] +['สิà¸Ļ', 'à¸Ħà¹īา'] +['×Ĵ', '×Ļ'] +['Ġph', 'ương'] +['à¹Ħมà¹Ī', 'à¹Ħà¸Ķà¹ī'] +['ĠìĦ', '±'] +['ĠC', 'ác'] +['Ġ×Ķ×ŀ', '×ķ'] +['ĠÑĤ', 'ем'] +['Ġ×ĵ', '×ķ'] +['à¸Ńะ', 'à¹Ħร'] +['Ġv', 'Äĥn'] +['ãģª', 'ãģ®ãģ§'] +['ĠN', 'á»Ļi'] +['Ġ×¢', '×ķ'] +['ãĤīãĤĮ', 'ãĤĭ'] +['Ġs', 'áng'] +['Ġgö', 'ster'] +['ãģĵãģ¨', 'ãĤĴ'] +['Ġtaraf', 'ından'] +['Ġм', 'а'] +['ĠпоÑģл', 'е'] +['Ġ׳', '×Ļת'] +['Ġ׳×Ļת', 'ף'] +['Ġл', 'еÑĤ'] +['Ġ׾', '׳×ķ'] +['Ñģ', 'Ñģ'] +['Ġ×Ļ', '×ķ'] +['п', 'е'] +['ĠÙĪ', 'ÙĦÙĥ'] +['ĠÙĪÙĦÙĥ', 'ÙĨ'] +['Ġngo', 'Ãłi'] +['ĠÄij', 'á»ĭa'] +['r', 'zÄħd'] +['dz', 'iaÅĤ'] +['ĠÙħ', 'ر'] +['иÑĤÑĮ', 'ÑģÑı'] +['Ġ×IJ×Ĺר', '×Ļ'] +['Ġ׾', '׼׾'] +['à¸Ĥ', 'à¹īà¸Ńม'] +['à¸Ĥà¹īà¸Ńม', 'ูล'] +['Ġб', 'ол'] +['Ġбол', 'ее'] +['جÙħ', 'ع'] +['л', 'еÑĤ'] +['Ġl', 'á»ĭch'] +['ĠÙħ', 'Ø«ÙĦ'] +['Ġ그리', 'ê³ł'] +['Ġth', 'ứ'] +['ĠdeÄŁ', 'il'] +['ÙĪ', 'ØŃ'] +['Ġש׾', '×ļ'] +['ĠÙħ', 'ØŃÙħد'] +['Ġn', 'ếu'] +['ĠÄij', 'á»ķi'] +['Ġv', 'ừa'] +['Ġm', 'á»įi'] +['Ġо', 'ни'] +['Ġl', 'úc'] +['ĠÙĬ', 'ÙĥÙĪÙĨ'] +['ì§', 'Ī'] +['Ġש׾', '׳×ķ'] +['ĠÐĶ', 'о'] +['Ġש', '׳×Ļ'] +['ล', 'ิ'] +['×IJ', 'פשר'] +['Ġs', 'ức'] +['ê¶', 'Į'] +['Ġ', 'ứng'] +['à¹Ħมà¹Ī', 'มี'] +['Ø·ÙĦ', 'ب'] +['ĠÑĩ', 'ем'] +['Ġch', 'uyên'] +['Ġth', 'ÃŃch'] +['Ġ×ķ', '×Ļ'] +['íķ', '©'] +['ĠÙħ', 'صر'] +['д', 'о'] +['ĠÄij', 'ất'] +['Ġch', 'ế'] +['à¸Ĭ', 'ืà¹Īà¸Ń'] +['Ġìĭ', 'ł'] +['ĠØ¥', 'ذا'] +['Ġر', 'ئÙĬس'] +['Ġש', '×Ļש'] +['Ġgiả', 'm'] +['Ñģ', 'ка'] +['lar', 'ında'] +['Ġs', 'ợ'] +['ĠtÃŃ', 'ch'] +['ĠÙĦ', 'ÙĥÙĨ'] +['Ġب', 'Ùħ'] +['×¢', '×ķ×ij'] +['×¢×ķ×ij', '×ĵ'] +['ÅĤÄħ', 'cz'] +['ları', 'na'] +['Ġש', '×Ŀ'] +['ĠÙĦ', 'ت'] +['Ġש×Ķ', '×ķ×IJ'] +['t', 'ów'] +['Ġëĭ¤', '른'] +['ĠØ£', 'Ùĥثر'] +['ãģ®', 'ãģ§ãģĻ'] +['׼', '×Ļ×Ŀ'] +['ĠolduÄŁ', 'unu'] +['ãģĭ', 'ãģª'] +['ãĤĤ', 'ãģĨ'] +['ÙĬ', 'ØŃ'] +['Ġnh', 'ìn'] +['Ġngh', 'á»ĩ'] +['ãģ«ãģª', 'ãģ£ãģ¦'] +['п', 'а'] +['Ġquy', 'ết'] +['ÙĦ', 'ÙĤ'] +['t', 'á'] +['Ġlu', 'ôn'] +['ĠÄij', 'ặc'] +['Ġ×IJ', 'ר'] +['Ġtu', 'á»ķi'] +['s', 'ão'] +['ìĻ', '¸'] +['ر', 'د'] +['ĠبÙĩ', 'ا'] +['Ġ×Ķ×Ļ', '×ķ×Ŀ'] +['×ķ', '×ķ×Ļ'] +['ãģ§ãģĻ', 'ãģŃ'] +['ĠÑĤ', 'ого'] +['Ġth', 'á»§'] +['ãģĹãģŁ', 'ãģĦ'] +['ر', 'ÙĤ'] +['Ġb', 'ắt'] +['г', 'Ñĥ'] +['Ġtá»', 'Ń'] +['ÑĪ', 'а'] +['Ġ', 'à¸Ľà¸µ'] +['Ġ×Ķ×IJ', '×Ŀ'] +['íı', '¬'] +['ż', 'a'] +['Ġ×IJת', '×Ķ'] +['Ġn', 'á»Ļi'] +['Ġph', 'ÃŃ'] +['ĠÅŁek', 'ilde'] +['Ġl', 'á»Ŀi'] +['d', 'ıģı'] +['Ġ׼×IJ', 'ף'] +['Ġt', 'üm'] +['Ġm', 'ạnh'] +['ĠM', 'ỹ'] +['ãģĿ', 'ãĤĵãģª'] +['Ġnh', 'á»ı'] +['ãģª', 'ãģĮãĤī'] +['Ġb', 'ình'] +['ı', 'p'] +['à¸ŀ', 'า'] +['ĠÄij', 'ánh'] +['ĠÙĪ', 'ÙĦ'] +['ר', '×ķת'] +['Ġ×IJ', '×Ļ×ļ'] +['Ġch', 'uyá»ĥn'] +['Ùĥ', 'ا'] +['ãĤĮ', 'ãĤĭ'] +['à¹ģม', 'à¹Ī'] +['ãĤĪ', 'ãģı'] +['ĠÙĪ', 'ÙĤد'] +['íĸ', 'Īëĭ¤'] +['Ġn', 'Æ¡i'] +['ãģ«ãĤĪ', 'ãģ£ãģ¦'] +['Ġvi', 'ết'] +['Ġà¹Ģà¸ŀ', 'ืà¹Īà¸Ń'] +['ëIJĺ', 'ëĬĶ'] +['اد', 'ÙĬ'] +['ĠÙģ', 'Ø¥ÙĨ'] +['ì¦', 'Ŀ'] +['ĠÄij', 'ặt'] +['Ġh', 'Æ°á»Ľng'] +['Ġx', 'ã'] +['Ġönem', 'li'] +['ãģł', 'ãģ¨'] +['Ġm', 'ẹ'] +['Ġ×ij', '×Ļ'] +['Ġ×ĵ', '×ijר'] +['Ġv', 'áºŃt'] +['ĠÄij', 'ạo'] +['Ġdá»±', 'ng'] +['ĠÑĤ', 'ом'] +['ĠÙģÙĬ', 'Ùĩا'] +['Ġج', 'ÙħÙĬع'] +['Ġthu', 'áºŃt'] +['st', 'ÄĻp'] +['Ġti', 'ết'] +['Ø´', 'ÙĬ'] +['Ġе', 'Ñīе'] +['ãģĻãĤĭ', 'ãģ¨'] +['ĠmÃł', 'u'] +['ĠÑįÑĤ', 'ого'] +['Ġv', 'ô'] +['ĠÐŃ', 'ÑĤо'] +['Ġth', 'áºŃt'] +['Ġn', 'ữa'] +['Ġbi', 'ến'] +['Ġn', 'ữ'] +['Ġ׾', '׼×Ŀ'] +['×Ļ', '×Ļף'] +['Ġس', 'ت'] +['ĠÐŀ', 'ÑĤ'] +['Ġph', 'ụ'] +['ê¹Į', 'ì§Ģ'] +['Ġ׾', '×ļ'] +['Ġk', 'ỳ'] +['à¹ĥ', 'à¸Ħร'] +['Ġg', 'ây'] +['ĠÙĦ', 'ÙĦÙħ'] +['Ġtụ', 'c'] +['ت', 'ÙĬÙĨ'] +['Ġtr', 'ợ'] +['Ġ׾', 'פ×Ļ'] +['Ġb', 'á»ij'] +['ĠÐļ', 'а'] +['ĠÄij', 'ình'] +['ow', 'Äħ'] +['s', 'ında'] +['Ġkhi', 'ến'] +['s', 'ız'] +['Ġк', 'огда'] +['ס', '׾'] +['ĠбÑĭ', 'л'] +['à¸Ļ', 'à¹īà¸Ńย'] +['обÑĢаÐ', '·'] +['Ġê²ĥ', 'ìĿ´ëĭ¤'] +['ëĵ¤', 'ìĿĢ'] +['ãģ¸', 'ãģ®'] +['Ġà¹Ģม', 'ืà¹Īà¸Ń'] +['Ġph', 'ục'] +['Ġ×Ĺ', '׾ק'] +['Ġh', 'ết'] +['ĠÄij', 'a'] +['à¹Ģà¸Ķà¹ĩ', 'à¸ģ'] +['íĺ', 'ķ'] +['l', 'ÃŃ'] +['ê¸', 'ī'] +['Ġع', 'دد'] +['ĠÄij', 'á»ĵ'] +['Ġg', 'ần'] +['Ġ×Ļ', '×ķ×Ŀ'] +['Ġs', 'Ä©'] +['ÑĢ', 'Ñıд'] +['Ġquy', 'á»ģn'] +['Ġ×IJ', '׾×IJ'] +['Ùĩ', 'Ùħا'] +['׳', '×Ļ×Ķ'] +['׾', '×ķת'] +['Ġ×Ķר', '×ij×Ķ'] +['Ġti', 'ên'] +['Ġal', 'ın'] +['Ġd', 'á»ħ'] +['人', 'ãģĮ'] +['но', 'Ñģ'] +['л', 'ÑģÑı'] +['ĠÄij', 'ưa'] +['ส', 'าว'] +['иÑĢов', 'ан'] +['Ġ×ŀס', 'פר'] +['×Ĵ', 'ף'] +['Ġki', 'ến'] +['ĠÐ', '¨'] +['p', 'é'] +['б', 'Ñĥ'] +['ов', 'ой'] +['б', 'а'] +['ĠØ¥', 'ÙĦا'] +['×IJ', '׾×Ļ'] +['Ġx', 'ây'] +['Ġb', 'ợi'] +['Ġש', '×ķ'] +['人', 'ãģ®'] +['×§', '×Ļ×Ŀ'] +['à¹Ģà¸Ķ', 'ืà¸Ńà¸Ļ'] +['Ġkh', 'á'] +['Ġ×ķ', '׾×Ķ'] +['×ĵ', '×ķת'] +['Ġ×¢', '×ij×ķר'] +['Ġبش', 'ÙĥÙĦ'] +['ĠÙĩÙĨا', 'Ùĥ'] +['ÑĤ', 'ÑĢа'] +['Ġ', 'íķĺëĬĶ'] +['ร', 'à¸Ńà¸ļ'] +['owa', 'ÅĤ'] +['h', 'é'] +['Ġdi', 'á»ħn'] +['Ġ×Ķ', '׼׾'] +['ĠØ£', 'س'] +['Ġch', 'uyá»ĩn'] +['ระ', 'à¸Ķัà¸ļ'] +['ĠNh', 'ững'] +['Ġ×IJ', '×Ĺת'] +['ĠØŃ', 'ÙĪÙĦ'] +['л', 'ов'] +['׳', 'ר'] +['Ġ×ķ', '׳'] +['Ġch', 'Æ¡i'] +['Ġiç', 'inde'] +['ÑģÑĤв', 'Ñĥ'] +['Ġph', 'á»ij'] +['ĠÑģ', 'Ñĥ'] +['ç§ģ', 'ãģ¯'] +['Ġch', 'ứng'] +['Ġv', 'á»±c'] +['à¹ģ', 'à¸Ń'] +['Ġl', 'áºŃp'] +['Ġtừ', 'ng'] +['å°ij', 'ãģĹ'] +['ĠNg', 'uy'] +['ĠNguy', 'á»ħn'] +['ĠÙģÙĬ', 'Ùĩ'] +['Ġб', 'а'] +['×Ļ', '×Ļת'] +['Ġ×ľ×¢', 'ש×ķת'] +['Ġ×ŀ', '׼'] +['Ġnghi', 'á»ĩm'] +['Ġм', 'ного'] +['Ġе', 'е'] +['ëIJĺ', 'ìĸ´'] +['Ġl', 'ợi'] +['Ġ׾', '׾×IJ'] +['Ġ׼', 'ף'] +['Ġch', 'ÃŃ'] +['ãģ§', 'ãģ®'] +['×Ĺ', '×ķ'] +['ש', '×ķ×Ŀ'] +['Ġ×ŀ', 'ר'] +['ĠÐĶ', 'лÑı'] +['Å', 'ģ'] +['Ġ׼×IJ', 'שר'] +['ĠM', 'á»Ļt'] +['ĠÙĪØ§ÙĦ', 'ت'] +['ĠìĿ´', '룰'] +['ÅŁ', 'a'] +['Ġchi', 'ến'] +['Ġaras', 'ında'] +['Ġ×ij', '×IJתר'] +['ãģķãĤĮ', 'ãģ¦ãģĦãĤĭ'] +['Ø´', 'ÙĥÙĦ'] +['Ġt', 'ượng'] +['Ġت', 'ت'] +['ĠC', 'ó'] +['Ġb', 'á»ı'] +['Ġtá»ī', 'nh'] +['Ġkh', 'ÃŃ'] +['ĠпÑĢ', 'оÑģÑĤ'] +['ĠпÑĢоÑģÑĤ', 'о'] +['ĠÙĪ', 'ÙĤاÙĦ'] +['Ġgi', 'áo'] +['ĠN', 'ếu'] +['×IJ', '×ŀר'] +['×¢×ł×Ļ', '×Ļף'] +['íİ', '¸'] +['Ùĩد', 'Ùģ'] +['ĠB', 'á»Ļ'] +['Ġb', 'Ãłn'] +['Ġng', 'uyên'] +['Ġgü', 'zel'] +['ส', 'าย'] +['ì²', 'ľ'] +['×ŀ', '×ķר'] +['Ġph', 'ân'] +['ס', 'פק'] +['×§', '×ij׾'] +['ĠاÙĦÙħ', 'تØŃ'] +['ĠاÙĦÙħتØŃ', 'دة'] +['ائ', 'د'] +['Ġ×IJ', '×ŀר'] +['Ġki', 'ÅŁi'] +['ì¤', 'Ģ'] +['Ġtr', 'uyá»ģn'] +['ĠÙĦ', 'Ùĩا'] +['ĠÐľ', 'а'] +['à¸ļริ', 'ษ'] +['à¸ļริษ', 'ั'] +['à¸ļริษั', 'à¸Ĺ'] +['Ġש', '׳×Ļ×Ŀ'] +['Ġмен', 'Ñı'] +['ÅŁ', 'e'] +['Ġdi', 'á»ĩn'] +['Ġ×IJ׳', '×Ĺ׳×ķ'] +['k', 'ü'] +['Ġc', 'á»ķ'] +['Ġm', 'á»Ĺi'] +['w', 'ä'] +['Ùħ', 'ÙĬ'] +['Ġhi', 'á»ĥu'] +['ëĭ', '¬'] +['Ġ×Ķ', '×Ĺ׾'] +['Ġt', 'ên'] +['Ġki', 'á»ĩn'] +['ÙĨ', 'ÙĤÙĦ'] +['Ġv', 'á»ĩ'] +['×ĵ', 'ת'] +['ĠÐłÐ¾ÑģÑģ', 'ии'] +['л', 'Ñĥ'] +['ĠاÙĦع', 'ربÙĬØ©'] +['ĠØ·', 'رÙĬÙĤ'] +['Ġ×Ķ×ij', '×Ļת'] +['Ñģ', 'еÑĢ'] +['Ġм', 'не'] +['ä', 'u'] +['Ġtri', 'á»ĩu'] +['ĠÄij', 'á»§'] +['Ġר', '×ij'] +['ت', 'ÙĩÙħ'] +['à¸ĭ', 'ี'] +['Ġì§Ģ', 'ê¸Ī'] +['li', 'ÅĽmy'] +['د', 'عÙħ'] +['ãģł', 'ãĤįãģĨ'] +['Ñģки', 'е'] +['Ġh', 'á»ıi'] +['Ġ×§', '×ķ'] +['ÑĢÑĥ', 'Ñģ'] +['ÙĨ', 'ظر'] +['ãģ®', 'ãĤĤ'] +['Ġ×Ķ', '׼×Ļ'] +['ĠìĽ', 'IJ'] +['ÙĪ', 'Ùĩ'] +['ĠÙĪ', 'Ùİ'] +['ĠB', 'ạn'] +['п', 'лаÑĤ'] +['Ġ×ŀ', '×ŀש'] +['лÑİ', 'б'] +['ĠнÑĥж', 'но'] +['Ġth', 'ư'] +['ãģ', 'µ'] +['ãģı', 'ãĤīãģĦ'] +['ر', 'Ø´'] +['ר', '×ķ×Ĺ'] +['ĠÙĬ', 'تÙħ'] +['Ġצר', '×Ļ×ļ'] +['Ġph', 'á'] +['ม', 'à¸Ńà¸ĩ'] +['Ġ×ij×IJ', '×ķפף'] +['Ġcả', 'nh'] +['Ġíķľ', 'ëĭ¤'] +['Ġ×Ķ×ŀ', 'ת'] +['à¸ķà¹Īาà¸ĩ', 'à¹Ĩ'] +['มี', 'à¸ģาร'] +['Ñģки', 'Ñħ'] +['ĠÐĴ', 'Ñģе'] +['Ġا', 'ÙĪ'] +['ج', 'ÙĬ'] +['ãģĵãģ¨', 'ãģ¯'] +['Ġd', 'Ãłi'] +['Ġh', 'á»ĵ'] +['èĩªåĪĨ', 'ãģ®'] +['à¹Ħ', 'หà¸Ļ'] +['ëĵ¤', 'ìĿĦ'] +['ĠV', 'Äĥn'] +['Ġд', 'аж'] +['Ġдаж', 'е'] +['Ñĭ', 'ми'] +['лаÑģ', 'ÑĮ'] +['ÙĬ', 'ÙĪÙĨ'] +['ÙĨ', 'ÙĪ'] +['c', 'ó'] +['ãģĹãģ¦', 'ãģĦãģŁ'] +['ãģł', 'ãģĭãĤī'] +['طاÙĦ', 'ب'] +['Ġc', 'á»Ńa'] +['п', 'ÑĢоÑģ'] +['ãģªãģ©', 'ãģ®'] +['รุ', 'à¹Īà¸Ļ'] +['Ġchi', 'ếc'] +['л', 'Ñĭ'] +['ĠÑıвлÑı', 'еÑĤÑģÑı'] +['Ġn', 'á»ķi'] +['ãģ®', 'ãģĬ'] +['Ġ×IJת', '×Ŀ'] +['ĠëķĮ문', 'ìĹIJ'] +['à¸ģล', 'าà¸ĩ'] +['ĠbaÅŁ', 'ka'] +['ìĦ', 'Ŀ'] +['ĠÑĨ', 'ел'] +['Ùģ', 'ÙĤ'] +['ãģ«ãĤĪ', 'ãĤĭ'] +['ÙĤ', 'ا'] +['Ġçı', 'kar'] +['Ġcứ', 'u'] +['Ø·', 'ا'] +['Ġש', 'ת'] +['à¹Ĥ', 'à¸Ħ'] +['Ġ×ŀ', '׾'] +['Ġ×Ķ', 'פר'] +['Ġг', 'де'] +['ĠØ®', 'Ø·'] +['åīį', 'ãģ«'] +['c', 'jÄĻ'] +['Ġ×Ĺ', 'ש×ķ×ij'] +['ר×Ĵ', '×¢'] +['Ġkho', 'ảng'] +['ĠÄij', 'á»Ŀi'] +['ĠÐł', 'е'] +['Ġо', 'на'] +['Ġ×IJ', '׳×ķ'] +['ãģ®', 'ãģ«'] +['ĠاÙĦذ', 'ÙĬÙĨ'] +['кÑĥ', 'п'] +['ãĤµ', 'ãĥ¼ãĥ'] +['ãĤµãĥ¼ãĥ', 'ĵ'] +['ãĤµãĥ¼ãĥĵ', 'ãĤ¹'] +['в', 'ал'] +['г', 'е'] +['Ġgi', 'ữa'] +['ĠKh', 'ông'] +['ĠâĹ', 'ĭ'] +['à¸ģล', 'ุà¹Īม'] +['ĠÙħÙĨ', 'ذ'] +['à¸Ń', 'à¹Īาà¸Ļ'] +['ĠÑģп', 'оÑģоб'] +['ĠÄij', 'á»Ļi'] +['Ġdi', 'ÄŁer'] +['Ġ', 'à¸ĸà¹īา'] +['Ùħ', 'Ø«ÙĦ'] +['Ġ×Ķ×IJ', '×Ļ'] +['Ġد', 'ÙĪÙĨ'] +['ÙĬر', 'اÙĨ'] +['Ñī', 'и'] +['بÙĨ', 'اء'] +['ĠØ¢', 'خر'] +['ظ', 'Ùĩر'] +['Ġ×ij', '׼'] +['ĠاÙĦÙħ', 'ع'] +['ãĥ', 'Ĵ'] +['Ġt', 'ất'] +['Ġm', 'ục'] +['ĠdoÄŁ', 'ru'] +['ãģŁ', 'ãĤī'] +['Ġס', '×ķ'] +['Ġx', 'ác'] +['ร', 'à¸Ń'] +['ĠcÄĥ', 'n'] +['Ġон', 'л'] +['Ġонл', 'айн'] +['Ġk', 'ý'] +['Ġch', 'ân'] +['Ġ', 'à¹Ħมà¹Ī'] +['اØŃ', 'Ø©'] +['r', 'án'] +['׳×Ļ', '×Ļ×Ŀ'] +['Ġ×ij', 'ף'] +['ĠÐ', 'ĸ'] +['à¸ķร', 'à¸ĩ'] +['д', 'Ñĭ'] +['Ġs', 'ắc'] +['ÙĦ', 'ت'] +['ãĥŃ', 'ãĥ¼'] +['ĠÙĦ', 'ÙĨ'] +['Ġר', '×ķ'] +['Ġd', 'Æ°á»Ľi'] +['à¹Ģ', 'à¸ĺ'] +['à¹Ģà¸ĺ', 'à¸Ń'] +['e', 'ÄŁi'] +['Ġ×ķ', 'ש'] +['ĠÙĦ', 'Ø£'] +['Ġg', 'ặp'] +['Ġc', 'á»ij'] +['ãģ¨', 'ãģ¦ãĤĤ'] +['رÙĪ', 'س'] +['Ġ׾×Ķ', '×Ļ'] +['Ġë³', '¸'] +['ä¸Ĭ', 'ãģĴ'] +['Ġm', 'ức'] +['Ñħ', 'а'] +['Ġìŀ', '¬'] +['à¸ī', 'ัà¸Ļ'] +['ÑĢÑĥ', 'ж'] +['Ġaç', 'ık'] +['ÙĪ', 'اÙĦ'] +['Ġ×ĸ', '×ŀף'] +['人', 'ãģ¯'] +['ع', 'ÙĬÙĨ'] +['Ñı', 'Ñħ'] +['Ġ×Ĵ×ĵ', '×ķ׾'] +['ר', '×ķ×ij'] +['g', 'ó'] +['ëĿ¼', 'ê³ł'] +['Ġark', 'adaÅŁ'] +['ÙĨ', 'شر'] +['Ġгод', 'Ñĥ'] +['ĠболÑĮ', 'ÑĪе'] +['ãģ¡ãĤĩ', 'ãģ£ãģ¨'] +['Ġcâ', 'u'] +['Ġs', 'át'] +['íĶ', '¼'] +['Ġti', 'ến'] +['íķ´', 'ìķ¼'] +['ĠÙĪ', 'Ø£ÙĨ'] +['à¸Ļ', 'าà¸Ļ'] +['Ġ×ij×IJ×ŀ', 'צע'] +['Ġ×ij×IJ×ŀצע', '×ķת'] +['Ġ׾', 'ר'] +['Ġqu', 'ản'] +['ĠÙĪØ§ÙĦ', 'Ø£'] +['Ġ×IJ×ķת', '×Ķ'] +['Ġìĸ´ëĸ', '¤'] +['Ġê²ĥ', 'ìĿĢ'] +['ØŃس', 'ÙĨ'] +['Ġm', 'ất'] +['à¸Ħ', 'ูà¹Ī'] +['ãĥ¬', 'ãĥ¼'] +['ĠÐĶ', 'а'] +['Ġol', 'ması'] +['Ġthu', 'á»Ļc'] +['׳', '×Ĺ'] +['íĨ', 'ł'] +['Ġsö', 'yle'] +['ãģĿãģĨ', 'ãģ§ãģĻ'] +['Ġت', 'ÙĥÙĪÙĨ'] +['л', 'ÑĥÑĩ'] +['׾', '×Ļ×ļ'] +['ĠØ£', 'ØŃد'] +['ли', 'ÑģÑĮ'] +['ĠвÑģ', 'его'] +['Ġ×Ķר', '×ij'] +['Ġëª', '»'] +['o', 'ÄŁ'] +['oÄŁ', 'lu'] +['ĠìĦ', 'ł'] +['Ġк', 'аÑĢ'] +['à¸łà¸²', 'à¸Ħ'] +['e', 'ÅĦ'] +['Ġ', 'à¸ģà¹ĩ'] +['Ġa', 'ynı'] +['Ġb', 'Ãł'] +['ãģªãĤĵ', 'ãģ¦'] +['Ġ모', 'ëĵł'] +['ÙĤر', 'ار'] +['ãģĹãģª', 'ãģĦ'] +['ĠÐĴ', 'о'] +['ĠÙĪÙĩ', 'ÙĬ'] +['ни', 'ки'] +['ãĤĮ', 'ãģŁ'] +['Ġchu', 'ẩn'] +['ר', '×¢'] +['Ùģ', 'رÙĬÙĤ'] +['ãĤĴ', 'åıĹãģij'] +['ĠÄij', 'úng'] +['б', 'е'] +['׼', '×ķ×Ĺ'] +['п', 'Ñĥ'] +['Ġ×ķ', '×Ĵ×Ŀ'] +['×ŀ', '׳×Ļ'] +['íĸ', '¥'] +['צ', '×Ļ×Ŀ'] +['à¸ĭ', 'ิ'] +['Ùĩ', 'ÙĨ'] +['н', 'ем'] +['Ġ×ij×ij', '×Ļת'] +['ر', 'ع'] +['Ġ', 'ส'] +['ĠÄIJ', 'Ãł'] +['íķĺ', 'ëĭ¤'] +['Ġ', 'ấy'] +['×Ĺ', '×ķ×ĵ'] +['×Ĺ×ķ×ĵ', 'ש'] +['ĠÑĩеÑĢ', 'ез'] +['Ñĥ', 'л'] +['ĠB', 'ình'] +['Ġê²ĥ', 'ìĿĦ'] +['Ġ×Ĵ', 'ר'] +['ä»ĺ', 'ãģij'] +['×Ĺ׾', '×§'] +['Ġت', 'ÙĦÙĥ'] +['à¹ĥส', 'à¹Ī'] +['sz', 'Äħ'] +['ÙĤ', 'اÙħ'] +['د', 'ÙĪØ±'] +['ĠÙģ', 'ÙĤØ·'] +['Ġh', 'ữu'] +['Ġмог', 'ÑĥÑĤ'] +['Ġg', 'á»įi'] +['Ġ×§', 'ר'] +['à¸Īะ', 'มี'] +['ت', 'ÙĤدÙħ'] +['Ġع', 'بر'] +['Ġ׾×Ķ', '×Ŀ'] +['ĠÑģам', 'о'] +['ס', '×ĵר'] +['Ġc', 'Ãłng'] +['r', 'ÃŃ'] +['Ġìŀ', '¥'] +['ëĵ¤', 'ìĿĺ'] +['ĠÙĦ', 'Ùĥ'] +['п', 'оÑĢÑĤ'] +['Ġkh', 'ả'] +['ĠÑģеб', 'Ñı'] +['׳', 'ף'] +['Ġد', 'ÙĪØ±'] +['Ġm', 'ợ'] +['Ġcâ', 'y'] +['Ġf', 'ark'] +['Ġfark', 'lı'] +['а', 'ÑİÑĤ'] +['Ġtr', 'á»±c'] +['wiÄĻks', 'z'] +['Ġthu', 'á»ijc'] +['Ġت', 'ØŃت'] +['ت', 'ÙĦ'] +['ов', 'Ñĭе'] +['ëĤ', 'ł'] +['Ġв', 'ам'] +['بÙĦ', 'غ'] +['Ġê°Ļ', 'ìĿĢ'] +['íĮ', 'IJ'] +['ÙĦ', 'ب'] +['Ġnas', 'ıl'] +['Ġод', 'ин'] +['м', 'ан'] +['ĠعÙĦÙĬ', 'Ùĩا'] +['б', 'и'] +['Ġפ', 'ש×ķ×ĺ'] +['×ijר', '×Ļ'] +['Ġש', '׳×Ķ'] +['Ġëı', 'Ħ'] +['ĠÄIJ', 'ại'] +['Ġ×IJ×ķת', '×Ŀ'] +['ĠاÙĦØŃ', 'ر'] +['Ġб', 'о'] +['à¸Ī', 'ุà¸Ķ'] +['Ġr', 'õ'] +['ĠdeÄŁi', 'ÅŁ'] +['Ġëĭ', '¨'] +['ĠÑģлÑĥÑĩ', 'а'] +['ĠÑģлÑĥÑĩа', 'е'] +['Ġ×IJ׳', 'ש×Ļ×Ŀ'] +['×ĵ', '×£'] +['ש×ij', 'ת'] +['Ġש׾', '׼×Ŀ'] +['Ġch', 'ú'] +['nik', 'ów'] +['Ġtan', 'ı'] +['Ġcá', 'o'] +['ĠÄij', 'á'] +['Ġ×IJ', '×ĵ×Ŀ'] +['Ġê°', 'ķ'] +['Ġnhi', 'á»ĩm'] +['Ġ׾', 'ס'] +['Ġ×Ľ×ª', '×ij'] +['Ġ×Ķס', 'פר'] +['ĠÄij', 'Äĥng'] +['Ġë', 'ijIJ'] +['à¸ľ', 'ิ'] +['à¸ľà¸´', 'ว'] +['ج', 'ا'] +['Ġê°', 'IJ'] +['ر', 'Ø£'] +['ست', 'خدÙħ'] +['ãģ«ãģªãĤĬ', 'ãģ¾ãģĻ'] +['Ġtá»', '·'] +['×ĺ', '×ķר'] +['г', 'овоÑĢ'] +['Ġв', 'оÑģ'] +['ĠÙħÙĨ', 'Ùĩا'] +['иÑĢов', 'аÑĤÑĮ'] +['ĠÄij', 'ầy'] +['׳', '×Ĵ'] +['ĠÙħ', 'ÙĪ'] +['ĠÙħ', 'ÙĪÙĤع'] +['ר׼', '×Ļ'] +['ت', 'Ùı'] +['ëª', '¨'] +['Ġת', '×ķ'] +['ÙĬا', 'Ùĭ'] +['à¹ĥ', 'à¸Ķ'] +['ãĤĬ', 'ãģ¾ãģĻ'] +['à¸Ńยูà¹Ī', 'à¹ĥà¸Ļ'] +['ĠØ£', 'ÙĪÙĦ'] +['ĠØ£', 'خرÙī'] +['Ġc', 'ư'] +['ص', 'ار'] +['×ŀ×Ĺ', 'ש×ij'] +['б', 'ÑĢа'] +['ÅĦ', 'ski'] +['б', 'ÑĢ'] +['ĠÙĬ', 'Ùı'] +['à¸ģ', 'ิà¸Ļ'] +['Ġch', 'á»ijng'] +['Ùħ', 'Ùı'] +['Ġ', 'à¸Ħืà¸Ń'] +['Ġت', 'ÙĨ'] +['t', 'ÃŃ'] +['y', 'Äĩ'] +['Ġm', 'ạng'] +['Ùģ', 'ÙĪ'] +['Ġdü', 'nya'] +['×§', 'ר×IJ'] +['Ġ×§', '׾'] +['ĠØŃ', 'اÙĦ'] +['c', 'ÃŃa'] +['Ġà¹Ģ', 'รา'] +['Ġר', '×ķצ×Ķ'] +['Ġá', 'p'] +['ë°', 'ķ'] +['ا', 'ÙĤØ©'] +['ни', 'Ñİ'] +['Ġ×IJ', '׾×ķ'] +['Ġ×ŀס', '×ķ'] +['ãģ§ãģ¯', 'ãģªãģı'] +['Ġtr', 'ả'] +['Ġ×§', 'שר'] +['mi', 'ÅŁtir'] +['Ġl', 'ưu'] +['Ġh', 'á»Ĺ'] +['ĠбÑĭ', 'ли'] +['Ġl', 'ấy'] +['عÙĦ', 'Ùħ'] +['Ġö', 'zel'] +['æ°Ĺ', 'ãģĮ'] +['Ġ×ĵ', 'ר×ļ'] +['Ùħ', 'د'] +['s', 'ını'] +['׳', '×ķש×IJ'] +['r', 'ów'] +['Ñĩ', 'еÑĢ'] +['êµIJ', 'ìľ¡'] +['ĠÐľ', 'о'] +['л', 'ег'] +['ĠV', 'Ỽi'] +['วัà¸Ļ', 'à¸Ļีà¹ī'] +['ÑİÑī', 'ие'] +['ãģĬ', 'ãģĻ'] +['ãģĬãģĻ', 'ãģĻ'] +['ãģĬãģĻãģĻ', 'ãĤģ'] +['ëı', 'ħ'] +['Ġ×Ļ×Ķ', '×Ļ×Ķ'] +['×ŀ', '×ĺר'] +['Ñı', 'ми'] +['Ġl', 'á»±a'] +['ĠÄij', 'ấu'] +['à¹Ģส', 'ียà¸ĩ'] +['Ġt', 'ương'] +['ëĵ', '±'] +['ĠÑģÑĤ', 'аÑĢ'] +['à¹ĥ', 'à¸ļ'] +['ว', 'ัà¸Ķ'] +['Ġİ', 'stanbul'] +['Ġ', 'à¸Īะ'] +['à¸ķ', 'ลาà¸Ķ'] +['Ġب', 'ÙĬ'] +['à¹ģà¸Ļ', 'ะ'] +['à¹ģà¸Ļะ', 'à¸Ļำ'] +['س', 'اعد'] +['Ġب', 'Ø£'] +['Ġki', 'á»ĥm'] +['ØŃ', 'سب'] +['à¸Ĭั', 'à¹īà¸Ļ'] +['Ġ×ķ', '×¢×ķ×ĵ'] +['ов', 'ÑĭÑħ'] +['оÑģ', 'нов'] +['Ġtr', 'Æ°á»Łng'] +['צ', '×ij×¢'] +['ĠÃŃ', 't'] +['Ġk', 'ỹ'] +['cr', 'é'] +['Ñı', 'м'] +['êµ', '°'] +['ãģĮ', 'ãģªãģĦ'] +['ÙĬÙĦ', 'Ø©'] +['ãĥķ', 'ãĤ£'] +['ر', 'Ùī'] +['ĠÙĬ', 'جب'] +['Ġ×IJ', '×£'] +['Ġc', 'á»±c'] +['ãĤīãĤĮ', 'ãģŁ'] +['Ġ', 'à¸ľà¸¹à¹ī'] +['Ġ', 'à¸Ń'] +['lar', 'ımız'] +['Ġkad', 'ın'] +['Ġê·¸', 'ëŀĺ'] +['Ġê·¸ëŀĺ', 'ìĦľ'] +['ĠëĺIJ', 'ëĬĶ'] +['ĠÄij', 'ả'] +['ĠÄijả', 'm'] +['Ġ×IJ', '×ķ×ŀר'] +['Ġy', 'ếu'] +['ci', 'Äħ'] +['ciÄħ', 'g'] +['Ġt', 'á»ij'] +['Ġש×IJ', '׳×Ļ'] +['Ġdz', 'iaÅĤa'] +['Ñī', 'а'] +['ĠÄij', 'Ãłn'] +['s', 'ına'] +['ãģĵãĤĮ', 'ãģ¯'] +['Ġ×ij', '׾×Ļ'] +['Ġ×ij', '×Ļשר×IJ׾'] +['л', 'оÑģÑĮ'] +['Ġgi', 'ữ'] +['ê°', 'IJ'] +['ÑĢ', 'он'] +['تج', 'ار'] +['г', 'лав'] +['в', 'ин'] +['Ġh', 'ạn'] +['Ġyapı', 'lan'] +['ب', 'س'] +['Ġ', 'à¸ŀรà¹īà¸Ńม'] +['ê´Ģ', '리'] +['mÄ±ÅŁ', 'tır'] +['b', 'ü'] +['r', 'ück'] +['ĠBaÅŁkan', 'ı'] +['ĠÙĦ', 'ÙĬس'] +['Ġs', 'Æ¡'] +['à¸Īัà¸ĩ', 'หว'] +['à¸Īัà¸ĩหว', 'ัà¸Ķ'] +['د', 'اء'] +['Ġ×Ķ', '׼'] +['v', 'ÃŃ'] +['ש', '×IJר'] +['Ġh', 'Æ°á»Łng'] +['Ġb', 'óng'] +['ĠCh', 'ÃŃnh'] +['Äħ', 'c'] +['à¹Ģà¸ģีà¹Īยว', 'à¸ģัà¸ļ'] +['Ġtá»', '©'] +['Ġtứ', 'c'] +['ĠÑĨ', 'веÑĤ'] +['Ġt', 'á»iji'] +['ĠnghÄ©', 'a'] +['ÙĦا', 'عب'] +['د', 'ÙĦ'] +['Ġפע', '×Ŀ'] +['h', 'ör'] +['à¸Ĭ', 'ุà¸Ķ'] +['à¸ŀ', 'ู'] +['à¸ŀู', 'à¸Ķ'] +['п', 'аÑģ'] +['ĠÅŁ', 'u'] +['Ġt', 'Æ°á»Łng'] +['خار', 'ج'] +['Ġâ', 'm'] +['ĠинÑĤеÑĢ', 'еÑģ'] +['ен', 'нÑĭÑħ'] +['×IJ', '׳×Ļ'] +['بد', 'Ø£'] +['ëĿ¼', 'ëĬĶ'] +['ì¹', '´'] +['æĸ¹', 'ãģĮ'] +['ли', 'в'] +['Ġ', 'à¸Ħà¸Ļ'] +['ער', '×ļ'] +['à¸Ĥà¸Ńà¸ĩ', 'à¸Ħุà¸ĵ'] +['п', 'ад'] +['Ġc', 'ạnh'] +['ĠëĤ', '¨'] +['ĠÄij', 'âu'] +['Ġbi', 'á»ĥu'] +['ãĤĤ', 'ãģĤãĤĭ'] +['׾', '×Ĵ'] +['Ġ', 'สำหรัà¸ļ'] +['Ġxu', 'á»ijng'] +['ס', '×ķ'] +['Ġذ', 'ات'] +['ĠÐľ', 'е'] +['ع', 'اÙĦÙħ'] +['×IJ', 'ס'] +['ب', 'ÙĬØ©'] +['Ø´', 'ا'] +['и', 'ем'] +['ĠNg', 'ưá»Ŀi'] +['íĺ', 'ij'] +['Ñģл', 'ов'] +['Ġп', 'а'] +['Ġm', 'ẫu'] +['ĠпÑĢоÑĨ', 'еÑģÑģ'] +['ĠNh', 'Ãł'] +['пÑĢо', 'из'] +['пÑĢоиз', 'вод'] +['à¸łà¸²à¸¢', 'à¹ĥà¸Ļ'] +['Ġ', 'à¸ļาà¸Ĺ'] +['×ŀ', '׳×ķ'] +['ĠоÑĢг', 'ан'] +['רצ', '×ķ'] +['×ķ×ŀ', '×Ļ×Ŀ'] +['Ġyaz', 'ı'] +['Ġd', 'ù'] +['ãĥ¬', 'ãĥ³'] +['ÙĪÙĦ', 'ÙĬ'] +['ย', 'ู'] +['Ġtr', 'ò'] +['à¹Ģà¸ŀ', 'ลà¸ĩ'] +['Ġ×ŀ', '׾×IJ'] +['à¸ķ', 'ล'] +['à¸ķล', 'à¸Ńà¸Ķ'] +['ĠÄij', 'ạt'] +['Ġ×Ĺ×ĵ', 'ש'] +['p', 'óÅĤ'] +['Ġ×ŀ', '×ĵ×Ļ'] +['ujÄħ', 'c'] +['×ŀ׳×Ķ', '׾'] +['Ġש×ij', '×ķ'] +['Ġ×Ķ×ŀש', 'פ×ĺ'] +['Ġ×IJ', '׾×Ķ'] +['ĠÙĪ', 'ذÙĦÙĥ'] +['à¹Ģà¸ŀ', 'ราะ'] +['ĠÄijo', 'Ãłn'] +['Ġíķ¨', 'ê»ĺ'] +['Ġd', 'ục'] +['Ø´', 'ت'] +['Ġ', 'ula'] +['Ġula', 'ÅŁ'] +['Ġqu', 'ý'] +['Ġ×Ķ', '×Ĵ×ĵ×ķ׾'] +['à¸ķัà¹īà¸ĩ', 'à¹ģà¸ķà¹Ī'] +['Ġש', 'ר'] +['Ø´', 'Ùĩد'] +['׳', 'ש×Ļ×Ŀ'] +['à¸ŀ', 'ล'] +['رÙĪ', 'ا'] +['ãĤĮ', 'ãģ¦'] +['Ġн', 'иÑħ'] +['Ġдел', 'а'] +['ãģ§ãģį', 'ãģªãģĦ'] +['ÅĤo', 'ż'] +['×IJ', '×Ĺר'] +['ì', '½Ķ'] +['ãĤ¢', 'ãĥĥãĥĹ'] +['د', 'Ù쨹'] +['Ġti', 'á»ĩn'] +['Ġkh', 'á»ı'] +['Ġkhá»ı', 'e'] +['ĠاÙĦع', 'اÙħØ©'] +['ãģ«', 'ãģĤãĤĭ'] +['ĠÄij', 'á»Ļc'] +['ì¡', '±'] +['Ġc', 'ụ'] +['й', 'ÑĤе'] +['Ġзак', 'он'] +['ĠпÑĢо', 'екÑĤ'] +['ìĸ', '¸'] +['ÙĦ', 'ØŃ'] +['ĠçalÄ±ÅŁ', 'ma'] +['ãĤĴ', 'ãģĻãĤĭ'] +['Ñħ', 'и'] +['ع', 'اد'] +['Ġ׳', '×ŀצ×IJ'] +['Ġר', '×Ļ'] +['à¸Ńà¸Ńà¸ģ', 'มา'] +['ĠT', 'ôi'] +['Ġth', 'ần'] +['ĠÙĬ', 'ا'] +['ล', 'าย'] +['Ġав', 'ÑĤо'] +['Ġsı', 'ra'] +['ĠÙĥ', 'Ø«ÙĬر'] +['Ùħ', 'ÙĬز'] +['ĠاÙĦع', 'ÙĦÙħ'] +['æĸ¹', 'ãģ¯'] +['×ķ×¢', '×ĵ'] +['Ġобла', 'ÑģÑĤи'] +['×Ļ׾', '×Ļ×Ŀ'] +['ãģĮ', 'åĩº'] +['à¸ĺ', 'ุ'] +['à¸ĺุ', 'ร'] +['à¸ĺุร', 'à¸ģิà¸Ī'] +['ÙĤت', 'ÙĦ'] +['ר×IJ', '×ķ'] +['Ġng', 'u'] +['Ġngu', 'á»ĵn'] +['Ġ', 'มา'] +['Ġпл', 'ан'] +['t', 'ório'] +['Ġcu', 'á»iji'] +['Ñģк', 'ом'] +['ĠاÙĦÙħ', 'اض'] +['ĠاÙĦÙħاض', 'ÙĬ'] +['Ġ×ij×¢', '׾'] +['Ġר', '×ij×Ļ×Ŀ'] +['Ġlu', 'áºŃn'] +['Ùĥ', 'ÙĪ'] +['à¸Ĺัà¹īà¸ĩ', 'หมà¸Ķ'] +['в', 'ан'] +['Ġtho', 'ại'] +['à¹Ħ', 'à¸Ń'] +['б', 'иÑĢ'] +['ĠاÙĦ', 'ض'] +['ت', 'ا'] +['ĠÑĢ', 'од'] +['ĠV', 'Ãł'] +['×ŀ', '×Ļף'] +['ĠбÑĭ', 'ла'] +['к', 'ами'] +['ĠÐĶ', 'е'] +['t', 'ık'] +['קר', '×Ļ'] +['ĠeÄŁ', 'itim'] +['ĠÙĥ', 'بÙĬر'] +['ب', 'Ùĥ'] +['ĠÙĦ', 'ÙĪ'] +['в', 'ой'] +['Ġ', 'ãģĵãģ®'] +['ĠÑĤ', 'ÑĢÑĥд'] +['my', 'ÅĽl'] +['Ġs', 'ư'] +['à¸ŀ', 'ีà¹Ī'] +['Ġ', 'à¹ģลà¹īว'] +['×¢', '×§'] +['Ġ×Ĺ×ijר', 'ת'] +['ระ', 'หว'] +['ระหว', 'à¹Īาà¸ĩ'] +['×Ļ', '×Ļ×Ķ'] +['ĠاÙĦÙĨ', 'اس'] +['ün', 'ü'] +['Ġ׾', '×ŀ×Ķ'] +['Ġch', 'ương'] +['ĠH', 'á»ĵ'] +['ار', 'ت'] +['ãĤĪãģĨ', 'ãģ§ãģĻ'] +['l', 'á'] +['×§×Ļ', '×Ļ×Ŀ'] +['æľ¬', 'å½ĵ'] +['æľ¬å½ĵ', 'ãģ«'] +['ãģĵãĤĵ', 'ãģª'] +['Ñģ', 'ов'] +['Ġ×ķ', '×Ĺ'] +['à¹Ģà¸ģ', 'à¹ĩà¸ļ'] +['Ġк', 'ÑĤо'] +['à¹Ĥร', 'à¸Ħ'] +['ĠØ´', 'رÙĥØ©'] +['ع', 'زÙĬ'] +['عزÙĬ', 'ز'] +['Ø·ÙĦ', 'ÙĤ'] +['п', 'ÑĥÑģÑĤ'] +['Ùģ', 'تØŃ'] +['ëŀ', 'Ģ'] +['Ġhã', 'y'] +['ض', 'Ùħ'] +['ë¦', '°'] +['åł´åIJĪ', 'ãģ¯'] +['ãĤª', 'ãĥ¼'] +['Ġh', 'ắn'] +['Ġ×IJ', '×ij×Ļ×ij'] +['Ġש׾×Ķ', '×Ŀ'] +['Ġ×Ķ×Ļ', '×Ļת×Ķ'] +['ĠاÙĦد', 'ÙĪÙĦØ©'] +['ĠاÙĦ', 'ÙĪÙĤ'] +['ĠاÙĦÙĪÙĤ', 'ت'] +['ãģĤ', 'ãģ¾ãĤĬ'] +['Ġta', 'ÅŁÄ±'] +['İ', 'N'] +['×¢', 'סק'] +['ãģ¦', 'ãģĦãģŁ'] +['Ġtá»ķ', 'ng'] +['ĠاÙĦØ¥', 'ÙĨس'] +['ĠاÙĦØ¥ÙĨس', 'اÙĨ'] +['ÑĢ', 'еÑĪ'] +['Ġg', 'ái'] +['ĠÑĨ', 'ен'] +['ĠÙģ', 'ÙĤد'] +['Ùħ', 'ات'] +['ãģķãĤĵ', 'ãģ®'] +['Ġph', 'ù'] +['×ĺ', '×Ķ'] +['ĠÙĪØ§ÙĦ', 'تÙĬ'] +['Ġب', 'Ùĥ'] +['ìĿ´', 'ëĤĺ'] +['к', 'Ñģ'] +['Ùħ', 'ÙĬر'] +['Ġv', 'ùng'] +['ĠاÙĦØ´', 'عب'] +['ĠNh', 'ưng'] +['ãĥĢ', 'ãĥ¼'] +['Ġ×Ĺ×Ļ', '×Ļ×Ŀ'] +['ĠØ´', 'خص'] +['×§', '×ķ×ĵ'] +['ê²', 'Ģ'] +['×¢', 'ש'] +['×¢', '×ķ׾×Ŀ'] +['צ', '×ķר'] +['ع', 'ÙĤد'] +['ĠiÅŁ', 'lem'] +['Ġ×Ķ×ij', '×IJ'] +['Ġd', 'ưỡng'] +['à¸Ł', 'รี'] +['Ġph', 'ÃŃa'] +['ãģ®ä¸Ń', 'ãģ§'] +['Ġп', 'и'] +['Ġng', 'Ãłnh'] +['ним', 'а'] +['ĠÙĩ', 'ÙĦ'] +['Ġ×ķ', '×IJת'] +['ĠÄij', 'áng'] +['é', 'quipe'] +['ĠÑįÑĤ', 'оÑĤ'] +['Ġgö', 'rev'] +['ë§', '¤'] +['Ġqu', 'ân'] +['å¼ķ', 'ãģį'] +['æĻĤ', 'ãģ«'] +['Ġب', 'Ùħا'] +['×ŀ', '×Ļת'] +['Ġü', 'lke'] +['Ġ×ŀ×§', '×ķ×Ŀ'] +['×ij', 'ף'] +['æ°Ĺ', 'æĮģãģ¡'] +['Ġë§İ', 'ìĿĢ'] +['Ġyük', 'sek'] +['ÑĨ', 'енÑĤÑĢ'] +['ĠÙħ', 'جÙĦس'] +['ç§ģ', 'ãģ®'] +['ÙĤد', 'ر'] +['Ġë¶Ģ', 'ë¶Ħ'] +['Ġì°', '¨'] +['خر', 'ج'] +['ãģĭ', 'ãģªãĤĬ'] +['ë³´', 'ëĭ¤'] +['Ġ×ŀ', '×Ļ×ĵ×¢'] +['peÅĤ', 'ni'] +['Ġx', 'á»Ń'] +['ìĹIJìĦľ', 'ëĬĶ'] +['ĠباÙĦ', 'Ùħ'] +['ĠÙĪ', 'Ùħا'] +['ĠÑįÑĤ', 'ой'] +['ب', 'ÙĬÙĨ'] +['n', 'ü'] +['ØŃ', 'ز'] +['ØŃز', 'ب'] +['ĠÑĢабоÑĤ', 'а'] +['ĠNh', 'áºŃt'] +['ÙĦ', 'اء'] +['Ġëĵ', '¤'] +['Ġëĵ¤', 'ìĸ´'] +['ãĤĦãģĻ', 'ãģĦ'] +['×Ĺ×ĸ', '×§'] +['Ġ×Ķ×Ĺ', '×ijר×Ķ'] +['п', 'иÑĤ'] +['ãģĭãĤī', 'ãģ®'] +['Ġë§IJ', 'ìĶĢ'] +['Ġפ', '×ķ'] +['ÙĦ', 'Ùİ'] +['à¹Ģà¸ķà¹ĩ', 'ม'] +['ĠÐļ', 'о'] +['Ġm', 'ówi'] +['Ġt', 'ÃŃn'] +['ר×Ĵ', 'ש'] +['פר', '×§'] +['Ġtr', 'ạng'] +['ĠÐŀ', 'н'] +['×Ĺ', '×ķ×¥'] +['ĠعÙĨد', 'Ùħا'] +['Ġب', 'ر'] +['使', 'ãģĦ'] +['Ġr', 'á»Ļng'] +['ëĮĢ', 'ë¡ľ'] +['íĪ', '¬'] +['Ġktóry', 'ch'] +['в', 'ид'] +['ลูà¸ģ', 'à¸Ħà¹īา'] +['Ġmog', 'Äħ'] +['Ġש', '×Ĺ'] +['×ij', '×Ĺר'] +['ãĥĸ', 'ãĥŃãĤ°'] +['ĠTh', 'Ãłnh'] +['Ġ×Ķ', 'ר×Ļ'] +['ĠÑģÑĤ', 'аÑĤÑĮ'] +['ĠH', 'á»Ļi'] +['à¸ļ', 'à¹īาà¸ĩ'] +['çī¹', 'ãģ«'] +['ĠÄIJ', 'ức'] +['èĢħ', 'ãģ®'] +['×¢', '×ŀ×ķ×ĵ'] +['×ĺר', '×Ķ'] +['Ð', '¥'] +['ĠÙħ', 'Ùħا'] +['Ġe', 'ÅŁ'] +['ĠнеобÑħодим', 'о'] +['ник', 'ов'] +['Ġüzer', 'inde'] +['a', 'ÅĤa'] +['Ġchá»ĭ', 'u'] +['ĠاÙĦ', 'دÙĬÙĨ'] +['أخ', 'بار'] +['ĠÄij', 'au'] +['ãģĮ', 'å¤ļãģĦ'] +['jÄħ', 'cych'] +['د', 'Ø®ÙĦ'] +['ları', 'nd'] +['larınd', 'an'] +['Ġs', 'ẻ'] +['à¸ŀิ', 'à¹Ģศ'] +['à¸ŀิà¹Ģศ', 'ษ'] +['ת', 'ף'] +['t', 'ıģı'] +['Ġlu', 'áºŃt'] +['ĠÅŀ', 'e'] +['ãĤ«', 'ãĥ¼'] +['ãģ®', 'ãģĤãĤĭ'] +['Ġ×Ķ×IJ', 'תר'] +['ĠاÙĦØ¢', 'ÙĨ'] +['ıld', 'ı'] +['Ġá', 'o'] +['ĠнаÑĩ', 'ал'] +['Ġvi', 'á»ĩn'] +['Ġ×ij×¢', '×ķ׾×Ŀ'] +['з', 'наÑĩ'] +['×Ļ×ĺ', '×Ķ'] +['к', 'ам'] +['ĠÐĺ', 'з'] +['à¹Ģà¸Ĥ', 'ียà¸Ļ'] +['à¸Ļ', 'à¹īà¸Ńà¸ĩ'] +['ÑĤ', 'ÑĢо'] +['à¹Ģ', 'à¸Ł'] +['Ġжиз', 'ни'] +['Ġ', 'สà¹Īวà¸Ļ'] +['Ġv', 'áºŃn'] +['Ġê´Ģ', '볨'] +['Ġl', 'âu'] +['ס', '×ĺר'] +['×§', 'ש'] +['س', 'ÙĬر'] +['Ġ×IJ×ķת', '×Ļ'] +['Ġm', 'ôi'] +['ائ', 'ب'] +['Ġо', 'ÑģÑĤа'] +['Ġm', 'ón'] +['Ġ×ij', '×ŀ×§×ķ×Ŀ'] +['Ġد', 'اخÙĦ'] +['Ġ×IJ', '×ķר'] +['Ġв', 'аÑģ'] +['Ùĥ', 'Ø´Ùģ'] +['ìĺ', '¨'] +['à¸ĸ', 'à¹Īาย'] +['Ġkullan', 'ıl'] +['Ġt', 'ô'] +['ãģ«', 'ãĤĪãĤĬ'] +['ĠëĺIJ', 'íķľ'] +['Ġ×¢×ij×ķ×ĵ', '×Ķ'] +['Ġri', 'ê'] +['Ġriê', 'ng'] +['Ġyak', 'ın'] +['ز', 'ا'] +['Å', '»'] +['×IJ', '×ķ׼׾'] +['شار', 'Ùĥ'] +['Ġб', 'еÑģ'] +['×', '´'] +['Ġا', 'بÙĨ'] +['ĠTá»ķ', 'ng'] +['ÙĨ', 'ظ'] +['ÅĽwi', 'ad'] +['ãĤµ', 'ãĥ¼'] +['ห', 'าย'] +['ĠG', 'ün'] +['Ġhakk', 'ında'] +['à¹Ģà¸Ĥà¹īา', 'มา'] +['ز', 'ÙĨ'] +['ĠÐł', 'о'] +['Ġbi', 'á»ĥn'] +['ãģ©', 'ãģĵ'] +['Ùģ', 'عÙĦ'] +['ز', 'ع'] +['פר', '×ĺ'] +['Ġ×Ķ', 'ף'] +['Ø£', 'ÙĩÙĦ'] +['Ġth', 'ất'] +['ØŃ', 'ÙħÙĦ'] +['Ñĩ', 'Ñĥ'] +['ĠìĤ¬', 'ìĭ¤'] +['ì°', '¸'] +['ĠìľĦ', 'íķ´'] +['ÙĪ', 'ظ'] +['ĠÐŁ', 'од'] +['Ġkho', 'ản'] +['ÑĤ', 'ен'] +['ĠÙģ', 'اÙĦ'] +['Ñģ', 'ад'] +['à¸Ļ', 'à¸Ńà¸Ļ'] +['ĠاÙĦسعÙĪØ¯', 'ÙĬØ©'] +['"', 'ØĮ'] +['ĠاÙĦ', 'ÙĴ'] +['ãĤī', 'ãģļ'] +['Ġto', 'án'] +['Ġch', 'ắc'] +['׼', '×Ļר'] +['m', 'éd'] +['méd', 'ia'] +['ز', 'ÙĪ'] +['Ġyan', 'ı'] +['פ', '׳×Ļ×Ŀ'] +['ØŃ', 'ظ'] +['Ġб', 'еÑģп'] +['ĠбеÑģп', 'лаÑĤ'] +['ĠбеÑģплаÑĤ', 'но'] +['ĠØ£', 'ÙħاÙħ'] +['à¸Ń', 'าย'] +['à¸Ńาย', 'ุ'] +['ר', 'שת'] +['Ġg', 'á»ĵ'] +['Ġgá»ĵ', 'm'] +['Ġu', 'á»ijng'] +['ص', 'ب'] +['k', 'ır'] +['ãĥij', 'ãĥ¼'] +['Ġ׾×ĵ', 'עת'] +['Ġк', 'ÑĥпиÑĤÑĮ'] +['׾', '×ķ×Ĺ'] +['ÙĪØ¶', 'ع'] +['ÙĤÙĬ', 'Ùħ'] +['à¸Ľ', 'า'] +['ж', 'ив'] +['à¸Ķ', 'ิà¸Ļ'] +['×IJ', '×ķפ'] +['à¹Ģล', 'à¹ĩà¸ģ'] +['ãĥĥ', 'ãĥī'] +['иÑĩеÑģки', 'Ñħ'] +['ĠCh', 'á»§'] +['кÑĢ', 'аÑģ'] +['ÙĪ', 'صÙĦ'] +['p', 'ÅĤat'] +['м', 'оÑĢ'] +['Ġ×Ķ×IJ', '×ķ'] +['à¸Ń', 'ิà¸Ļ'] +['Ġíķľ', 'êµŃ'] +['гÑĢ', 'е'] +['Ġìłľ', 'ê³µ'] +['ì°', '½'] +['Ġê°ľìĿ¸', 'ìłķë³´'] +['Ġngh', 'á»ĭ'] +['à¸ĭ', 'า'] +['ØŃس', 'اب'] +['Ġby', 'ÅĤa'] +['ÙħÙĦ', 'Ùĥ'] +['иÑĩеÑģки', 'е'] +['Ġb', 'ác'] +['ض', 'ØŃ'] +['ê¸', '¸'] +['ש', '×ŀ×¢'] +['Ġìĸ´ëĸ', '»'] +['Ġìĸ´ëĸ»', 'ê²Į'] +['ìĽ', 'Į'] +['ات', 'Ùĩ'] +['à¹Ĥรà¸ĩ', 'à¹ģ'] +['à¹Ĥรà¸ĩà¹ģ', 'รม'] +['خد', 'ÙħØ©'] +['ĠÐł', 'а'] +['׼×ķ׾', '×Ŀ'] +['×ŀש', '×Ĺ×§'] +['ĠÙĪ', 'ÙĥاÙĨ'] +['ס', '×ķ×£'] +['ĠاÙĦØŃÙĥÙĪÙħ', 'Ø©'] +['Ġ×ij', '×ĺ'] +['Ġtr', 'áºŃn'] +['Ġ×Ķ×¢', '×ķ׾×Ŀ'] +['ĠÃŃ', 'ch'] +['t', 'Äħ'] +['ש×ŀ', '×ķ'] +['Ġ×Ķר×IJש', '×ķף'] +['Ġíķĺ', 'ê³ł'] +['ãģķ', 'ãĤī'] +['ãģķãĤī', 'ãģ«'] +['ãģ«', 'ãģĹãģ¦'] +['Ġ', 'à¸ľà¸¡'] +['ãģ®', 'ãĤĪãģĨãģª'] +['ĠÙĪ', 'ÙĤت'] +['ãĥį', 'ãĥĥãĥĪ'] +['ÙĦ', 'عب'] +['ÙĪ', 'Ø´'] +['ìĺ', '¬'] +['Ġ', 'หาà¸ģ'] +['Ġm', 'iaÅĤ'] +['à¸Ĺ', 'à¸Ńà¸ĩ'] +['иÑĤ', 'а'] +['ا', 'صر'] +['ил', 'ÑģÑı'] +['з', 'е'] +['à¸Ľà¸£à¸°', 'มาà¸ĵ'] +['ãģĿãĤĮ', 'ãģ¯'] +['Ġb', 'ır'] +['Ġbır', 'ak'] +['صÙĨ', 'اع'] +['Ð', '®'] +['Ø´', 'عر'] +['Ġ׳', '×Ĵ×ĵ'] +['Ġب', 'سبب'] +['ãĥĿ', 'ãĤ¤'] +['ãĥĿãĤ¤', 'ãĥ³ãĥĪ'] +['ĠاÙĦج', 'ÙĪ'] +['ĠнеÑģк', 'олÑĮко'] +['Ġki', 'ếm'] +['Ùģ', 'Ùİ'] +['Ġض', 'د'] +['×ij×Ļ×ĺ', '×ķ×Ĺ'] +['تاب', 'ع'] +['ÙĨ', 'ز'] +['ĠB', 'ản'] +['Ġaç', 'ıkl'] +['Ġaçıkl', 'ama'] +['Ġ', 'à¸Ħุà¸ĵ'] +['à¸Ĺ', 'า'] +['ÅĤ', 'ów'] +['Ø·', 'ب'] +['ÙĨ', 'ØŃÙĨ'] +['Ġ×ŀ×§', '×ķר'] +['Ġİ', 's'] +['Ġдом', 'а'] +['Ġ', 'วัà¸Ļ'] +['Ġd', 'Ãłnh'] +['Ñı', 'н'] +['ми', 'ÑĢ'] +['Ġm', 'ô'] +['ĠvÃł', 'ng'] +['ص', 'اب'] +['s', 'ının'] +['à¸Ħ', 'ืà¸Ļ'] +['Ø®', 'بر'] +['×ĸ׼', '×ķ'] +['Ġ×ŀ', 'ש×Ķ×ķ'] +['m', 'ü'] +['Ġкомпани', 'и'] +['Ġ×Ķ×¢', '×Ļר'] +['ĠÙĥ', 'ÙĪ'] +['ÙĤÙĦ', 'ب'] +['ĠlỼ', 'p'] +['и', 'ки'] +['׳', '×ij'] +['à¹Ĥ', 'à¸Ħร'] +['à¹Ĥà¸Ħร', 'à¸ĩ'] +['à¹Ĥà¸Ħรà¸ĩ', 'à¸ģาร'] +['×ŀ×ķ×¢', '×ĵ'] +['ÑıÑĤ', 'ÑģÑı'] +['หลัà¸ĩ', 'à¸Īาà¸ģ'] +['ени', 'Ñİ'] +['Ġש', '×¢'] +['Ġb', 'Æ°á»Ľc'] +['ãĥ¡', 'ãĥ¼ãĥ«'] +['ãĤĦ', 'ãĤĬ'] +['Ġ×Ļ×ķ×ĵ', '×¢'] +['Ġê´Ģ', 'íķľ'] +['ĠاÙĦØ£', 'Ùħر'] +['Ġböl', 'ge'] +['ĠÑģв', 'ой'] +['ÙĦ', 'س'] +['Ġ×ŀ×Ļ', '×ķ×Ĺ×ĵ'] +['ĠëĤ´', 'ìļ©'] +['ĠØ£', 'جÙĦ'] +['ĠÄIJ', 'ông'] +['Ġ×ŀ', '×ł×ª'] +['Ġìĭľ', 'ê°Ħ'] +['Ùĥ', 'Ùİ'] +['ãģ¨ãģĦãģĨ', 'ãģ®ãģ¯'] +['Ġnale', 'ży'] +['تÙĨظ', 'ÙĬÙħ'] +['ĠÑģозд', 'а'] +['Ġph', 'é'] +['Ġphé', 'p'] +['ãģ§ãģį', 'ãģ¾ãģĻ'] +['Ġع', 'ÙĦÙħ'] +['大ãģį', 'ãģª'] +['ãĤ²', 'ãĥ¼ãĥł'] +['í', 'ħĮ'] +['Ġ׼×ķ׾', '׾'] +['ĠинÑĤеÑĢ', 'неÑĤ'] +['ĠT', 'ừ'] +['ãģ¨', 'ãģªãĤĭ'] +['ز', 'اÙĦ'] +['Ġktóry', 'm'] +['Ġnh', 'é'] +['ìĪ', 'ľ'] +['н', 'ев'] +['д', 'еÑĢ'] +['ãĤ¢', 'ãĥĹãĥª'] +['i', 'á»ĩu'] +['×ij', '×Ļ׾'] +['Ġت', 'س'] +['ĠÄIJ', 'ây'] +['ĠاÙĦØ®', 'اصة'] +['Ġà¹Ģ', 'à¸Ĭ'] +['Ġà¹Ģà¸Ĭ', 'à¹Īà¸Ļ'] +['ص', 'اد'] +['Ġd', 'ạng'] +['س', 'عر'] +['Ġש', '×Ļ×ŀ×ķש'] +['×Ĵ', '×Ļ×Ŀ'] +['ãģĮãģĤ', 'ãģ£ãģŁ'] +['п', 'ÑĢов'] +['пÑĢов', 'од'] +['Ġ×IJ', '×Ļ׳×ķ'] +['Ġ׾', 'ר×IJ'] +['Ġ׾ר×IJ', '×ķת'] +['ĠØ£', 'Ù쨶ÙĦ'] +['ĠØŃ', 'ÙĦ'] +['ĠØ£', 'بÙĪ'] +['ê°', 'ķ'] +['Ġì§', 'ij'] +['ãģ®', 'ãĤĪãģĨãģ«'] +['Ġפ', '׳×Ļ'] +['ס', '×Ļ×Ŀ'] +['ĠÙĪÙĩ', 'ذا'] +['Ġka', 'ç'] +['Ġé', 'én'] +['Ġê±', '´'] +['ë°', 'Ķ'] +['Ñĥ', 'з'] +['à¸Ĥà¸Ńà¸ĩ', 'à¹Ģรา'] +['i', 'ÅĤ'] +['ĠÐľ', 'Ñĭ'] +['Ġch', 'ết'] +['ĠاÙĦØ«', 'اÙĨÙĬ'] +['×IJ', '×§'] +['Ġ×ķ', '×¢×ľ'] +['ĠاÙĦØ·', 'ب'] +['×ij×ĺ', '×Ĺ'] +['Ġج', 'دÙĬدة'] +['Ġع', 'دÙħ'] +['ع', 'ز'] +['สิà¹Īà¸ĩ', 'à¸Ĺีà¹Ī'] +['ãģĻ', 'ãĤĮãģ°'] +['ĠÄij', 'ô'] +['ì£', 'ł'] +['د', 'ÙĤ'] +['н', 'омÑĥ'] +['Ġk', 'á»ĥ'] +['ãĤ¢', 'ãĥ³'] +['å¤ļãģı', 'ãģ®'] +['à¸Ľà¸£à¸°', 'à¸ģ'] +['à¸Ľà¸£à¸°à¸ģ', 'à¸Ńà¸ļ'] +['פע×Ļ׾', '×ķת'] +['ĠÑģÑĤ', 'ол'] +['may', 'ı'] +['ãģ¤', 'ãģĦ'] +['Ġyılı', 'nda'] +['Ġ', 'à¸Īึà¸ĩ'] +['koÅĦ', 'cz'] +['ĠTh', 'ông'] +['Ġак', 'ÑĤив'] +['н', 'ÑģÑĤ'] +['нÑģÑĤ', 'ÑĢÑĥ'] +['ĠÃĸ', 'z'] +['Ġת', '×ŀ×Ļ×ĵ'] +['ĠÙĥ', 'ÙĨت'] +['Ñģ', 'иÑģÑĤем'] +['pr', 'és'] +['prés', 'ent'] +['Ġn', 'â'] +['Ġnâ', 'ng'] +['gÅĤ', 'os'] +['ĠÙĪØ²', 'ÙĬر'] +['ØŃ', 'صÙĦ'] +['Ġиме', 'еÑĤ'] +['ØŃ', 'رÙĥØ©'] +['à¸ŀ', 'à¹Īà¸Ń'] +['ãĤĴ', 'ãģĬ'] +['Ġاست', 'خداÙħ'] +['×IJ×Ļר', '×ķ×¢'] +['ä»ĸ', 'ãģ®'] +['Ġש×Ķ', '×Ŀ'] +['ãģĹãģŁ', 'ãĤī'] +['ש×ŀ', '×Ļ'] +['Ñģ', 'ла'] +['m', 'ı'] +['Ġbaz', 'ı'] +['Ġíķĺ', 'ì§Ģë§Į'] +['×ĵ', '׾'] +['Ġyapt', 'ıģı'] +['ãĥĬ', 'ãĥ¼'] +['׾', '×Ļ׾×Ķ'] +['ãģ¨ãģĦ', 'ãģ£ãģŁ'] +['änd', 'ig'] +['ĠÅŁ', 'a'] +['ĠÙģÙĬ', 'Ùħا'] +['иÑĤ', 'елÑı'] +['×ŀ', '×ķש'] +['à¸Ĥ', 'à¸Ńà¸ļ'] +['l', 'ük'] +['Ġh', 'á»ĵi'] +['Ġëª', 'ħ'] +['ĠاÙĦÙĥ', 'Ø«ÙĬر'] +['צ', '×IJ'] +['Ġhaz', 'ır'] +['طر', 'Ùģ'] +['ا', 'ÙĬا'] +['ĠÄij', 'ôi'] +['ен', 'д'] +['ÙĦ', 'غ'] +['×Ĺ', '×ĸ×ķר'] +['ĠвÑģ', 'ег'] +['ĠвÑģег', 'да'] +['ëIJĺ', 'ê³ł'] +['×ĵ', '×ķ×ĵ'] +['ан', 'а'] +['د', 'ÙĪÙĦØ©'] +['Ġho', 'ạch'] +['ع', 'ÙĦا'] +['عÙĦا', 'ج'] +['Ġ×ķ', '×¢×ĵ'] +['×Ķ', '×Ŀ'] +['ки', 'й'] +['ÙĦ', 'ÙIJ'] +['Ġ×¢', '׾×Ļ×ķ'] +['ÑİÑī', 'ий'] +['Ġng', 'á»§'] +['صÙĨ', 'ع'] +['ĠاÙĦع', 'راÙĤ'] +['à¸ķà¹Īà¸Ń', 'à¹Ħà¸Ľ'] +['ãģŁãģı', 'ãģķãĤĵ'] +['Ġph', 'ạm'] +['ÙĦ', 'اÙĨ'] +['ات', 'Ùĩا'] +['Ġbö', 'yle'] +['تÙĨ', 'ÙģÙĬ'] +['تÙĨÙģÙĬ', 'ذ'] +['Ġש×Ķ', '×Ļ×IJ'] +['Ñģ', 'Ñĥ'] +['ย', 'าว'] +['Ġש', '×ķ׳×Ļ×Ŀ'] +['Ġ×ŀ', '×ķ׾'] +['ĠÑģ', 'ил'] +['Ġ×IJ×Ĺר', '×Ļ×Ŀ'] +['Ġph', 'á»§'] +['ÙĤØ·', 'ع'] +['ĠTh', 'á»§'] +['à¸Ľà¸£à¸°à¹Ģà¸Ĺศ', 'à¹Ħà¸Ĺย'] +['ÙĨ', 'ÙĤ'] +['ĠÄijo', 'ạn'] +['Ġب', 'Ø¥'] +['п', 'ÑĢедел'] +['×ķת', '×ķ'] +['Ġy', 'arı'] +['пÑĢ', 'е'] +['ĠczÄĻ', 'ÅĽci'] +['ØŃ', 'ÙĥÙħ'] +['×ķ׳', '×Ļת'] +['פע', '׾'] +['ãĤĴ', 'ãģĹãģ¦'] +['Ġktó', 'rzy'] +['׾', '×Ŀ'] +['ĠÄIJi', 'á»ģu'] +['ĠкоÑĤоÑĢ', 'аÑı'] +['ĠìĿ´', 'ìĥģ'] +['ãģĤ', 'ãģ£ãģŁ'] +['Ġ×ŀ×ĵ', '×ķ×ijר'] +['פ', '×ķ×¢×ľ'] +['d', 'ım'] +['éĢļ', 'ãĤĬ'] +['ĠбÑĥд', 'ÑĥÑĤ'] +['à¹Ģวà¹ĩà¸ļ', 'à¹Ħà¸ĭ'] +['à¹Ģวà¹ĩà¸ļà¹Ħà¸ĭ', 'à¸ķà¹Į'] +['ا', 'خر'] +['×Ĺ', '×Ļ׾'] +['Ġ×Ļ', '׾'] +['Ġ×Ļ׾', '×ĵ×Ļ×Ŀ'] +['×Ĺ', '×Ļפ'] +['×Ĺ×Ļפ', '×ķש'] +['Ġd', 'òng'] +['Ġש', '×ĸ×Ķ'] +['ÑĮ', 'е'] +['ãģĤ', 'ãģ¨'] +['ìŀIJ', 'ê°Ģ'] +['×IJ', '×ĵ'] +['Ġü', 'z'] +['Ġüz', 'ere'] +['ظ', 'ÙĦ'] +['Ġ×IJ', '×ķ׾×Ļ'] +['Ġ×ij', '×Ļ×ķ×Ŀ'] +['ÙĦ', 'ات'] +['Ġm', 'ê'] +['ì¹', '¨'] +['تØŃ', 'د'] +['تØŃد', 'Ø«'] +['ĠØ®', 'اصة'] +['Ġب', 'رÙĨ'] +['ĠبرÙĨ', 'اÙħج'] +['ĠH', 'Ãłn'] +['×Ĺ', 'ס'] +['ĠÙĪ', 'ÙĦÙħ'] +['×¢', '×Ŀ'] +['Ġm', 'ı'] +['à¸Ł', 'ัà¸ĩ'] +['ש', '×¢×Ķ'] +['ÙĪÙģ', 'ÙĤ'] +['ס', '×ij×Ļר'] +['алÑĮ', 'нÑĭй'] +['×Ĺש', '×ķ×ij'] +['Ġn', 'Ãłng'] +['ë³', '¼'] +['ĠкоÑĤоÑĢ', 'ÑĭÑħ'] +['Ġ×Ĺ', '×ķ×§'] +['t', 'ör'] +['ĠлÑĥÑĩ', 'ÑĪе'] +['ãĥij', 'ãĥ³'] +['ลà¹Īา', 'สุà¸Ķ'] +['Ġج', 'دÙĬد'] +['ÙĬد', 'Ø©'] +['à¸Ĺ', 'รà¸ĩ'] +['ãĤĪãĤĬ', 'ãĤĤ'] +['ÙĦ', 'ÙĦ'] +['ãĤĤ', 'ãģ£ãģ¨'] +['ש×ĺ', '×Ĺ'] +['Ġ×ķ', '×IJ×Ļ'] +['Ġgi', 'á»ijng'] +['Ø¥', 'ضاÙģ'] +['×§', 'ת'] +['ë§', 'Ŀ'] +['Ġzosta', 'ÅĤ'] +['ÑĢ', 'оз'] +['×Ļפ', '×Ļ×Ŀ'] +['Ġ׼׾', '׾'] +['ת×ķ׼', 'ף'] +['dıģ', 'ını'] +['ÙĤ', 'سÙħ'] +['ĠÑģ', 'ÑĩиÑĤ'] +['ĠÑģÑĩиÑĤ', 'а'] +['×ĺ', '×ķת'] +['Ġ', 'ưu'] +['ĠØ¢', 'ÙĦ'] +['Ġм', 'ом'] +['Ġмом', 'енÑĤ'] +['ĠاÙĦتع', 'ÙĦÙĬÙħ'] +['×¢×ľ', '×ķת'] +['Ġch', 'ữa'] +['Ġy', 'ön'] +['Ġtr', 'Ãł'] +['ĠØŃ', 'ÙĬÙĨ'] +['à¸ĭ', 'ั'] +['ĠC', 'á'] +['×¢', '×ĸ'] +['ĠاÙĦØ£', 'ÙħÙĨ'] +['c', 'ÃŃ'] +['Ġv', 'á»ijn'] +['Ġ', 'à¸Ļาย'] +['об', 'ÑĢа'] +['×§', '×IJ'] +['Ġthi', 'ếu'] +['ãĥŀ', 'ãĥ¼'] +['ส', 'วà¸Ļ'] +['Ġg', 'á»Ń'] +['Ġgá»Ń', 'i'] +['Ġê', '¹'] +['Ġê¹', 'Ģ'] +['Ġthi', 'á»ĩn'] +['ÙĤ', 'ع'] +['w', 'ÄĻ'] +['Ġн', 'ам'] +['ÑĤ', 'ол'] +['Ġs', 'ân'] +['ס', '×ķ×Ĵ'] +['Ġgeç', 'ir'] +['ÑĤ', 'он'] +['ев', 'а'] +['ĠÙĪ', 'ضع'] +['Ġع', 'شر'] +['Ñģ', 'ло'] +['à¸Ī', 'ัà¸ļ'] +['ãĤ·', 'ãĥ¼'] +['ãĤĤ', 'ãģĤãĤĬãģ¾ãģĻ'] +['Ġv', 'ẻ'] +['ĠÄIJ', 'á»ĥ'] +['ر', 'Ù쨹'] +['ĠاÙĦØ£ÙĪÙĦ', 'Ùī'] +['ÑĤ', 'аÑĢ'] +['ãģªãģı', 'ãģ¦'] +['Ùħ', 'Ùİ'] +['qu', 'ÃŃ'] +['×¢×ł×Ļ', '×Ļ׳'] +['г', 'ен'] +['Ġh', 'ôm'] +['à¸Ī', 'า'] +['Ġnh', 'Ỽ'] +['ĠاÙĦع', 'ربÙĬ'] +['×IJ', 'ף'] +['Ġl', 'á»Ļ'] +['Ġje', 'ÅĽli'] +['à¹Ģà¸Ĺà¹Īา', 'à¸Ļัà¹īà¸Ļ'] +['ĠØ£ÙĨ', 'Ùĩا'] +['Ġt', 'uy'] +['Ġtuy', 'á»ĩt'] +['Ġت', 'ص'] +['Ġتص', 'ÙĨÙĬ'] +['ĠتصÙĨÙĬ', 'Ùģ'] +['Ġê·¸ëŁ¬', 'ëĤĺ'] +['о', 'ÑĨен'] +['à¸ģิà¸Ī', 'à¸ģรรม'] +['ãĤĦ', 'ãģ£ãģ¦'] +['Ġkh', 'á»ıi'] +['Ġl', 'á»ĩ'] +['ĠاÙĦÙħج', 'تÙħع'] +['à¸Ńาà¸Ī', 'à¸Īะ'] +['à¸Īะ', 'à¹Ģà¸Ľà¹ĩà¸Ļ'] +['ов', 'Ñĭй'] +['ר', '×Ŀ'] +['ร', 'à¹īà¸Ńà¸Ļ'] +['ש', '×ŀש'] +['人', 'ãģ«'] +['Ġüzer', 'ine'] +['פר', '×Ļ'] +['du', 'ÄŁu'] +['Ñĩ', 'ик'] +['Ġmù', 'a'] +['Ġ×ŀת', '×ķ×ļ'] +['Ġc', 'áºŃp'] +['Ġت', 'ارÙĬØ®'] +['×ij׾', 'ת×Ļ'] +['Ġì¢', 'Ģ'] +['ÙĦ', 'ع'] +['ب', 'اÙĨ'] +['Ġch', 'út'] +['Ġ×Ķ×ĸ', '×ŀף'] +['n', 'ée'] +['ĠLi', 'ên'] +['ĠÙĦÙĦ', 'Ø£'] +['ØŃد', 'ÙĪØ¯'] +['Ġ×¢', '׼ש×Ļ×ķ'] +['в', 'оз'] +['Ġyapt', 'ı'] +['Ġоб', 'о'] +['à¹ĥหà¹ī', 'à¸ģัà¸ļ'] +['Ġ×ij×Ķ', '×Ŀ'] +['ãģı', 'ãģ¦'] +['ر', 'أس'] +['ĠÑģÑĢед', 'ÑģÑĤв'] +['ĠB', 'Ãłi'] +['ãģĵãģ¨', 'ãģ«'] +['ĠìĤ¬', 'íļĮ'] +['Ġ모', 'ëijIJ'] +['×ij', '×IJ'] +['Ġtr', 'ắng'] +['ĠاÙĦبÙĦ', 'د'] +['ĠHo', 'Ãłng'] +['ли', 'бо'] +['ĠдÑĢÑĥг', 'иÑħ'] +['İ', 'R'] +['Ñĥм', 'а'] +['ĠJe', 'ÅĽli'] +['ãĤĤ', 'ãģĹ'] +['Ġv', 'òng'] +['Ġ×IJתר', '×Ļ×Ŀ'] +['ĠÄij', 'á»įc'] +['Ġв', 'оÑĤ'] +['ãģł', 'ãģĮ'] +['ë°', '°'] +['à¸Ķู', 'à¹ģล'] +['Ġ×ŀ', '׼׾'] +['ìĹIJ', 'ëıĦ'] +['г', 'аз'] +['Ġ׳×ķס', 'פ×Ļ×Ŀ'] +['ãģĵãģ¨', 'ãģ§'] +['Ġت', 'ÙĪ'] +['ãģ§', 'ãģĤãĤĬ'] +['à¸Ļั', 'à¹Īà¸ĩ'] +['ĠможеÑĤ', 'е'] +['sz', 'ÄĻ'] +['ãģ®', 'ãģł'] +['ĠÙħÙĨ', 'Ùĩ'] +['Ġb', 'á»ķ'] +['Ġb', 'üt'] +['Ġbüt', 'ün'] +['ë³´', 'ê³ł'] +['Ġch', 'á»ĵng'] +['à¹ģà¸Ī', 'à¹īà¸ĩ'] +['ĠV', 'ì'] +['ĠØŃ', 'ر'] +['Ġgi', 'ản'] +['ĠÙħ', 'دÙĬÙĨØ©'] +['تط', 'بÙĬÙĤ'] +['à¸Ī', 'ิ'] +['æĹ¥', 'ãģ®'] +['б', 'ил'] +['à¸ģ', 'à¸Ńà¸ĩ'] +['ê³', '³'] +['ĠØ£', 'Ùħا'] +['ìĨ', 'IJ'] +['Ġtr', 'ái'] +['ĠвÑģ', 'ем'] +['Ġس', 'ÙĨØ©'] +['ĠÑģай', 'ÑĤ'] +['Ġг', 'оÑĤов'] +['п', 'Ñĭ'] +['ĠëIJ', 'ł'] +['ĠاÙĦØ®', 'Ø·'] +['ĠاÙĦرئÙĬس', 'ÙĬØ©'] +['Ġíķ', '©ëĭĪëĭ¤'] +['ĠìķĦëĭĪ', 'ëĿ¼'] +['ĠìĿ´', 'ëłĩ'] +['ĠìĿ´ëłĩ', 'ê²Į'] +[')', 'ØĮ'] +['h', 'ält'] +['ĠØ£', 'Ùħر'] +['Ġع', 'Ùħر'] +['à¸ģà¹ĩ', 'à¸Īะ'] +['Ġ', 'à¸Ĺำà¹ĥหà¹ī'] +['Ġc', 'ân'] +['Ġ×ij', '׾'] +['Ġ×ij׾', '×ij×ĵ'] +['פ', 'סק'] +['ĠÙĬ', 'ÙĤÙĪÙĦ'] +['н', 'ÑĥÑĤÑĮ'] +['à¹ģ', 'à¸Ħ'] +['Ġ×§', 'צת'] +['Ġn', 'ằm'] +['Ġh', 'òa'] +['bilit', 'Ãł'] +['ĠìĹĨ', 'ëĭ¤'] +['Ġ׼', 'פ×Ļ'] +['ÑĢ', 'ож'] +['лаг', 'а'] +['Ġ×Ķש', '×Ļ'] +['ĠNgo', 'Ãłi'] +['ĠÙĪ', 'ج'] +['ĠÙĪØ¬', 'ÙĪØ¯'] +['ĠìľĦ', 'íķľ'] +['Ġus', 'ÅĤug'] +['Ġtu', 'ần'] +['d', 'ź'] +['×ŀ', '×ķף'] +['ĠاÙĦع', 'دÙĬد'] +['Ġch', 'ẳng'] +['สุà¸Ĥ', 'à¸łà¸²à¸ŀ'] +['Ġ×ij', '×ĵר×ļ'] +['ĠÑģеб', 'е'] +['ĠìŀĪ', 'ìĿĦ'] +['ĠاÙĦØŃ', 'اÙĦ'] +['Ġd', 'á'] +['Ġc', 'ưá»Ŀi'] +['Ġnghi', 'ên'] +['ie', 'ÅĦ'] +['ĠD', 'ương'] +['ï¼', 'ħ'] +['Ø´', 'د'] +['ãģĦãģ¤', 'ãĤĤ'] +['ĠвÑĭб', 'оÑĢ'] +['Ġc', 'á»Ļng'] +['ש', '×Ļ׳×ķ×Ļ'] +['Ġch', 'ạy'] +['Ġ×ij×¢', '׾×Ļ'] +['اخ', 'بار'] +['íķĺ', 'ë©°'] +['ż', 'Äħ'] +['ج', 'از'] +['Ġ׳', 'ר×IJ×Ķ'] +['ศ', 'ู'] +['ศู', 'à¸Ļ'] +['ศูà¸Ļ', 'ยà¹Į'] +['×Ĵ', '×¢'] +['Ġ×¢', '×ĵ×Ļ'] +['Ġ×¢×ĵ×Ļ', '×Ļף'] +['بر', 'ا'] +['ÑĨи', 'й'] +['ĠÄIJ', 'á»ĵng'] +['ÙĤ', 'اÙĨÙĪÙĨ'] +['ĠÄij', 'ứng'] +['ãģĹãģŁ', 'ãĤĬ'] +['Ġ×Ĺ×Ļ', '×Ļ'] +['Ġë', 'IJľ'] +['ĠëIJľ', 'ëĭ¤'] +['Ġм', 'еждÑĥ'] +['à¸ŀวà¸ģ', 'à¹Ģà¸Ĥา'] +['ĠB', 'ắc'] +['ล', 'ำ'] +['ë°', '±'] +['ĠíĻ', 'ķ'] +['มาà¸ģ', 'ม'] +['มาà¸ģม', 'าย'] +['бан', 'к'] +['à¸Ńา', 'à¸ģาร'] +['Ġh', 'Ãł'] +['Ġ׾', '׳'] +['à¸Ń', 'à¸Ń'] +['Ġë°Ķ', 'ë¡ľ'] +['л', 'ом'] +['m', 'ática'] +['ĠØŃ', 'د'] +['اب', 'ت'] +['à¸Ĺีà¹Ī', 'à¸Ļีà¹Ī'] +['Ġco', 'ÅĽ'] +['ÙģÙĬ', 'دÙĬ'] +['ÙģÙĬدÙĬ', 'ÙĪ'] +['ĠмеÑģÑĤ', 'о'] +['Ġph', 'út'] +['มาà¸ģ', 'à¸ģวà¹Īา'] +['×IJ', 'פ'] +['ب', 'ÙIJ'] +['ĠPh', 'ú'] +['ì±', 'Ħ'] +['ĠÙĪ', 'سÙĦÙħ'] +['à¸Īี', 'à¸Ļ'] +['поÑĤ', 'ÑĢеб'] +['Ġ×Ĺ×ĵ', 'ש×ķת'] +['Ø´', 'ÙĪ'] +['Ġעצ', '×ŀ×ķ'] +['ĠعÙħÙĦ', 'ÙĬØ©'] +['à¸Ħุà¸ĵ', 'à¸łà¸²à¸ŀ'] +['ãģ¾ãģĻ', 'ãģĮ'] +['دع', 'ÙĪ'] +['طر', 'ÙĤ'] +['à¹Ħมà¹Ī', 'à¸ķà¹īà¸Ńà¸ĩ'] +['ë²', 'Ķ'] +['ìĬ', '¹'] +['Ġk', 'ÃŃch'] +['ĠìĹĨ', 'ëĬĶ'] +['ĠÑĤ', 'ам'] +['ĠÙĨ', 'ØŃÙĪ'] +['ĠاÙĦÙĤ', 'اÙĨÙĪÙĨ'] +['×Ĺ', '×ķ×Ŀ'] +['Ġk', 'ız'] +['Ġ×ĵ', '×Ļף'] +['ĠвÑĢем', 'ени'] +['ãģ£ãģŁ', 'ãĤĬ'] +['ĠØ´', 'Ùĩر'] +['ĠìĦľ', 'ë¹ĦìĬ¤'] +['×¢', 'ש×Ķ'] +['Ġgi', 'ác'] +['ĠاÙĦسÙĦ', 'اÙħ'] +['Ġ×IJ', 'ש'] +['ĠполÑĥÑĩ', 'а'] +['à¸Īัà¸Ķ', 'à¸ģาร'] +['к', 'оÑĢ'] +['Ġ×Ķ×ĺ', '×ķ×ij'] +['ราย', 'à¸ģาร'] +['주', 'ìĿĺ'] +['à¹ģà¸ķà¹Ī', 'ละ'] +['Ġê·¸ëŁ°', 'ëį°'] +['à¸Ĺีà¹Ī', 'à¹Ģà¸Ľà¹ĩà¸Ļ'] +['Ġת', '×ķ×ļ'] +['بÙĬ', 'اÙĨ'] +['Ð', 'Ļ'] +['oÅĽci', 'Äħ'] +['ÑĤ', 'ок'] +['ĠÃ', 'Ķ'] +['ĠÃĶ', 'ng'] +['à¹Ħมà¹Ī', 'à¹ĥà¸Ĭà¹Ī'] +['ãģ¿', 'ãģ¦'] +['ÐŁ', 'о'] +['ĠЧ', 'ÑĤо'] +['íĻ', '©'] +['×ĺ', '×ij×¢'] +['меÑĤ', 'ÑĢ'] +['Ġ×ij', '×ŀ×Ķ'] +['Ġ×ij×ŀ×Ķ', '׾'] +['Ġ×ij×ŀ×Ķ׾', '×ļ'] +['Ñĩ', 'ÑĮ'] +['×§', 'ש×Ķ'] +['з', 'нак'] +['знак', 'ом'] +['uj', 'ÄĻ'] +['×Ļצ', 'ר'] +['ĠاÙĦÙħ', 'ÙĦÙĥ'] +['ı', 'yla'] +['×IJ×ŀ', 'ת'] +['à¸Ľ', 'ิà¸Ķ'] +['×IJ', '×Ĺ×ĵ'] +['ر', 'اد'] +['Ġm', 'áºŃt'] +['ëĭ¤', 'ëĬĶ'] +['Ġl', 'ạnh'] +['ש׾', '×ķש'] +['ØŃ', 'دÙĬØ«'] +['ت', 'ز'] +['å¹´', 'ãģ®'] +['Ġк', 'ваÑĢ'] +['ĠкваÑĢ', 'ÑĤиÑĢ'] +['ä½ľ', 'ãĤĬ'] +['رÙĪ', 'ب'] +['ов', 'ан'] +['ĠТ', 'е'] +['à¸Īำ', 'à¸ģ'] +['à¸Īำà¸ģ', 'ัà¸Ķ'] +['ب', 'اط'] +['×Ĵ', 'ת'] +['Ġм', 'аÑĪ'] +['ĠмаÑĪ', 'ин'] +['×Ļצ', '×Ķ'] +['ãģ»', 'ãģ¨'] +['ãģ»ãģ¨', 'ãĤĵãģ©'] +['ÃŃ', 'do'] +['ĠÑı', 'зÑĭк'] +['à¸ļ', 'ิà¸Ļ'] +['สà¸ĸาà¸Ļ', 'à¸Ĺีà¹Ī'] +['ĠìĹ', '´'] +['ãĤ¦', 'ãĤ§'] +['Ġc', 'Ãł'] +['п', 'ан'] +['åı£', 'ãĤ³ãĥŁ'] +['Ġر', 'د'] +['اÙĤ', 'ت'] +['ĠÙĥ', 'ب'] +['ĠÙĥب', 'ÙĬرة'] +['ÑģÑĤ', 'ал'] +['ש×ŀ', '×Ĺ'] +['pos', 'ición'] +['ĠÙħÙĦÙĬ', 'ÙĪÙĨ'] +['ĠìĿ´', 'ìķ¼'] +['ĠìĿ´ìķ¼', '기'] +['Ġh', 'út'] +['ĠÅĽw', 'iat'] +['Ġë°©', 'ë²ķ'] +['ĠÑģв', 'еÑĤ'] +['Ġвиде', 'о'] +['ĠاÙĦÙĨ', 'ظاÙħ'] +['Ġtr', 'á»Ŀi'] +['ĠëĮĢ', 'íķ´ìĦľ'] +['ר', '×ŀת'] +['ت', 'داÙĪÙĦ'] +['×ķר', '×ĵ'] +['ת', '×ŀ'] +['ת×ŀ', '×ķ׳×ķת'] +['Ġ×ŀ', 'ף'] +['Ġдв', 'а'] +['Ġ×Ķ×§', '×ķ'] +['æĹ¥', 'ãģ«'] +['Ġ×Ķ×Ĵ', '×Ļ×¢'] +['à¹Ģà¸ŀิà¹Īม', 'à¹Ģà¸ķิม'] +['Ùħار', 'س'] +['Ġê²ĥ', 'ìŀħëĭĪëĭ¤'] +['ãģªãģĦ', 'ãģ¨'] +['Ġnhi', 'á»ĩt'] +['ëIJ', '©ëĭĪëĭ¤'] +['Ġ×ij׳', '×ķש×IJ'] +['Ġê°Ģ', 'ìŀ¥'] +['Ġv', 'ợ'] +['ĠÄij', 'óng'] +['צ×Ļ׾', '×ķ×Ŀ'] +['ê´Ģ', 'ê³Ħ'] +['в', 'аÑı'] +['×IJ', '×Ļ×ĸ'] +['×IJ×Ļ×ĸ', '×Ķ'] +['ĠÙĨ', 'ظاÙħ'] +['ÙħØŃ', 'اÙ쨏'] +['Ġt', 'ải'] +['기', 'ëıĦ'] +['à¸Ľà¸±à¸Ī', 'à¸Īุ'] +['à¸Ľà¸±à¸Īà¸Īุ', 'à¸ļัà¸Ļ'] +['׼', '×ĵ×ķר'] +['ĠìķĦ', 'ìĿ´'] +['׼׳', '×Ļס'] +['à¹Ģ', 'à¸ķร'] +['à¹Ģà¸ķร', 'ียม'] +['Ġngo', 'ại'] +['ĠدÙĪÙĦ', 'ار'] +['Ġr', 'ẻ'] +['Ġkh', 'Äĥn'] +['عد', 'د'] +['Ø´', 'عب'] +['czy', 'Äĩ'] +['ĠاÙĦ', 'Ùĥر'] +['ĠÑĩеловек', 'а'] +['ĠÙĪ', 'Ø¥ÙĨ'] +['×IJ', '×ĺ'] +['Ġth', 'Æ¡'] +['ĠاÙĦ', 'رÙĬاض'] +['оп', 'ÑĢедел'] +['опÑĢедел', 'ен'] +['×Ķ', '×ŀש×ļ'] +['ĠÐĿ', 'ово'] +['з', 'Ñĭва'] +['ĠاÙĦدÙĪÙĦ', 'ÙĬ'] +['ĠÄij', 'áp'] +['Ġк', 'ÑĢед'] +['ĠкÑĢед', 'иÑĤ'] +['ов', 'ого'] +['Ġm', 'ôn'] +['à¸Ľà¸£à¸°', 'à¹Ĥย'] +['à¸Ľà¸£à¸°à¹Ĥย', 'à¸Ĭà¸Ļ'] +['à¸Ľà¸£à¸°à¹Ĥยà¸Ĭà¸Ļ', 'à¹Į'] +['ÑģÑĤ', 'е'] +['ĠTh', 'á»ĭ'] +['د', 'ÙĬØ©'] +['×ŀצ', '×ķ'] +['Ùģ', 'ات'] +['×§', '×ĵ×Ŀ'] +['ìĿ´ëĿ¼', 'ê³ł'] +['ÙĪ', 'Ø®'] +['Ġ×Ĺ', '×ĸ'] +['ĠÑĦоÑĤ', 'о'] +['׾', '×Ļת'] +['ت', 'Ùİ'] +['ÙĪ', 'بر'] +['й', 'ÑĤи'] +['ĠÃ¶ÄŁ', 'ren'] +['Ġ×Ķ×ĸ', '×ķ'] +['Ġv', 'á»įng'] +['ÙĤÙĪ', 'Ø©'] +['ĠT', 'ây'] +['ĠÐĿ', 'и'] +['Ġש', '×ķ×ij'] +['ãģ¨è¨Ģ', 'ãĤıãĤĮ'] +['ãģ©', 'ãĤĵãģª'] +['×Ĺ', 'צ×Ļ'] +['ï½', 'ľ'] +['Ġ×ķ×Ķ', '×ķ×IJ'] +['ä¸Ģ', 'ãģ¤'] +['ĠÑģÑĤо', 'иÑĤ'] +['ni', 'Äħ'] +['×ĺר', '×Ļ'] +['ĠдеÑĤ', 'ей'] +['нÑı', 'ÑĤÑĮ'] +['ĠÑģдел', 'аÑĤÑĮ'] +['Ġë§İ', 'ìĿ´'] +['ä½ķ', 'ãģĭ'] +['ãģĽ', 'ãĤĭ'] +['à¹Ħ', 'หม'] +['à¸ķิà¸Ķ', 'à¸ķà¹Īà¸Ń'] +['Ġ×ij', 'ת×Ĺ'] +['Ġ×ijת×Ĺ', '×ķ×Ŀ'] +['ìĻ', 'Ħ'] +['ì§Ģ', 'ëĬĶ'] +['ÑģÑĤ', 'аÑĤ'] +['ÑıÑģ', 'н'] +['ü', 'b'] +['Ġth', 'ả'] +['Ġ×ij×IJ×ŀ', 'ת'] +['Ġt', 'uyến'] +['×ĵ', '×Ļר×Ķ'] +['Ġ×IJ', '×Ļש×Ļ'] +['×ĸ׼', 'ר'] +['ãģ°', 'ãģĭãĤĬ'] +['Ġx', 'ét'] +['׼', '×Ļ×ķ'] +['׼×Ļ×ķ', '×ķף'] +['diÄŁ', 'ini'] +['ĠاÙĦÙħ', 'ÙĪØ¶ÙĪØ¹'] +['Ġh', 'áºŃu'] +['à¸Īาà¸ģ', 'à¸ģาร'] +['×ijס', '×Ļס'] +['Ġ×ŀ×Ĵ', '×Ļ×¢'] +['×ij', '×Ļ×¢'] +['ĠÙĪ', 'جÙĩ'] +['à¹ģà¸Ķ', 'à¸ĩ'] +['à¸Ļ', 'าà¸ĩ'] +['ĠÅŀ', 'a'] +['ì', '¡´'] +['ë¡', 'Ģ'] +['à¸ķ', 'ะ'] +['Ġ×Ķ×Ĺ×Ļ', '×Ļ×Ŀ'] +['Ùģ', 'ÙĬد'] +['ãģ§ãģĻ', 'ãģĭãĤī'] +['ê·', 'ľ'] +['ź', 'ni'] +['ĠлÑİ', 'дей'] +['Ġyüz', 'de'] +['ıy', 'orum'] +['ĠاÙĦ', 'بØŃر'] +['e', 'ño'] +['п', 'аÑĢ'] +['ÙĬ', 'ÙĤØ©'] +['об', 'ÑĢ'] +['ר', '×ķ×ļ'] +['ت', 'ÙĪÙĤع'] +['ĠاÙĦØ´', 'ÙĬØ®'] +['åĪĿ', 'ãĤģãģ¦'] +['ĠÑĤ', 'елеÑĦ'] +['ĠÑĤелеÑĦ', 'он'] +['Ġth', 'ôi'] +['Ġ×Ļ׼×ķ׾', '×Ļ×Ŀ'] +['ĠÅŁ', 'irk'] +['ĠÅŁirk', 'et'] +['Ġìļ°ë¦¬', 'ê°Ģ'] +['ĠÄij', 'ông'] +['Ġת', '×ķ×ĵ×Ķ'] +['ÑģмоÑĤÑĢ', 'еÑĤÑĮ'] +['ĠÙĦ', 'ÙĩÙħ'] +['Ġ׾', '׼'] +['ĠN', 'ó'] +['ĠØŃ', 'اÙĦØ©'] +['ãģĦ', 'ãģij'] +['קר', '×ķ'] +['az', 'ı'] +['ãĤ³', 'ãĥ¼'] +['ĠÙĦÙĦ', 'ت'] +['s', 'ınız'] +['ĠH', 'ải'] +['기', 'ìĪł'] +['ยัà¸ĩ', 'à¹Ħมà¹Ī'] +['ëĭ¤', 'ê³ł'] +['פ', '×Ĺ'] +['Ġ׾×Ĵ', '×ij×Ļ'] +['Ġع', 'ÙĨÙĩ'] +['Ġк', 'аз'] +['Ġказ', 'ино'] +['ب', 'ÙĪØ±'] +['ÑĦ', 'еÑĢ'] +['Ġê°Ļ', 'ìĿ´'] +['تس', 'جÙĬÙĦ'] +['ĠاÙĦÙħ', 'رÙĥز'] +['ĠTh', 'ái'] +['д', 'аÑĤÑĮ'] +['×ŀ×Ļ', '×Ļ׾'] +['Ġpay', 'laÅŁ'] +['ãģ¤', 'ãģ®'] +['à¹Ģร', 'ืà¸Ń'] +['n', 'ça'] +['׳', '×ķ×Ĺ'] +['Ġ×IJ', 'פ×Ļ׾×ķ'] +['ãģ¨', 'èĢĥãģĪ'] +['ãģ¨ãģĹãģ¦', 'ãģ¯'] +['à¹Ģà¸Ī', 'à¸Ń'] +['×ŀ', 'פ'] +['Ġg', 'iriÅŁ'] +['л', 'иÑĤ'] +['ÑĤ', 'елÑı'] +['Ñij', 'н'] +['æ°Ĺ', 'ãģ«'] +['Ġg', 'ó'] +['Ġgó', 'p'] +['åĪĩ', 'ãĤĬ'] +['Ġ×Ķ', '×Ĺ×ĵש'] +['ж', 'ал'] +['Ġ×ĵ', 'עת'] +['éģķ', 'ãģĨ'] +['à¹Ģà¸Ĥà¹īา', 'à¹Ħà¸Ľ'] +['Ġס', 'ר×ĺ'] +['e', 'ña'] +['æĸ°', 'ãģĹãģĦ'] +['ر', 'Ùİ'] +['ĠÐIJ', 'ÑĢ'] +['Ġph', 'ản'] +['à¸Īะ', 'à¹Ħà¸Ķà¹ī'] +['Ġ×ijצ', '×ķר×Ķ'] +['Ø´', 'اÙĩ'] +['شاÙĩ', 'د'] +['ÙĪØ±', 'د'] +['à¹Ģà¸Ļืà¹Īà¸Ńà¸ĩ', 'à¸Īาà¸ģ'] +['или', 'ÑģÑĮ'] +['à¹ģละ', 'à¸ģาร'] +['Ġ×Ķ', '×ĸ׼'] +['Ġ×Ķ×ĸ׼', '×ķ×Ļ×ķת'] +['ei', 'ÃŁ'] +['ãĥ', '¨'] +['ìĥ', 'Ī'] +['ĠÃĩ', 'a'] +['Æ', '¯'] +['ש', '×Ĵ'] +['ÙĬÙĨ', 'Ø©'] +['ร', 'à¹īà¸Ńà¸ĩ'] +['ãĤµ', 'ãĥ³'] +['ÑĢоÑģÑģ', 'ий'] +['ÑĢоÑģÑģий', 'Ñģк'] +['a', 'ÄŁa'] +['ĠнаÑĩ', 'ина'] +['Ġص', 'ÙĦÙī'] +['à¸Ĺุà¸ģ', 'à¸Ħà¸Ļ'] +['íļĮ', 'ìĤ¬'] +['Ġли', 'ÑĨ'] +['Ø´', 'ÙĬر'] +['ĠØ´ÙĬ', 'Ø¡'] +['ÙĬÙĨ', 'ا'] +['Ġפ', '×Ĺ×ķת'] +['Ġiçer', 'is'] +['Ġiçeris', 'inde'] +['ĠØ£', 'ØŃÙħد'] +['Ġże', 'by'] +['ì´', 'Ŀ'] +['Ġп', 'оказ'] +['Ġи', 'менно'] +['หà¸Ļัà¸ĩ', 'ส'] +['หà¸Ļัà¸ĩส', 'ืà¸Ń'] +['ĠÑĤÑĢ', 'е'] +['สัà¸ĩ', 'à¸Ħม'] +['Ø¥', 'ÙIJ'] +['ãģĮ', 'å¿ħè¦ģ'] +['ÙĬÙij', 'Ø©'] +['פ', 'צ'] +['íĭ', '°'] +['ĠÙħ', 'جاÙĦ'] +['׳', 'פש'] +['к', 'ан'] +['×Ĺ', '×ķפ'] +['×Ĺ×ķפ', 'ש'] +['ì²ĺ', 'ëŁ¼'] +['ов', 'аÑı'] +['з', 'ов'] +['Ġh', 'ạ'] +['Ġdzi', 'ÄĻki'] +['×Ļר', '×ķ'] +['Ġ׾', '×ŀצ'] +['Ġ׾×ŀצ', '×ķ×IJ'] +['×Ļ×ĵ', '×ķ'] +['Ġs', 'ợ'] +['Ġ׾×Ķ', '×Ĵ×Ļ×¢'] +['×§', '×ij×¢'] +['Ġchi', 'á»ģu'] +['ãĥŀ', 'ãĤ¤'] +['Ġd', 'Ãłng'] +['à¹ģà¸Ł', 'à¸Ļ'] +['Ġü', 'ye'] +['×Ļ׳', '×Ĵ'] +['à¹Ģรีย', 'à¸ģ'] +['ç§ģ', 'ãģĮ'] +['th', 'é'] +['ĠÑĦ', 'илÑĮ'] +['ĠÑĦилÑĮ', 'м'] +['ĠNg', 'Ãły'] +['Ġж', 'ен'] +['Ġжен', 'Ñīин'] +['ج', 'ÙĬد'] +['n', 'ç'] +['à¸Ľ', 'รา'] +['×Ļ×ŀ', '×ķ'] +['Ġn', 'á»ģn'] +['×IJ', '×ķ׾×Ŀ'] +['Ġвозмож', 'ноÑģÑĤÑĮ'] +['Ġëĭ¤', 'ìĭľ'] +['è¦ĭ', 'ãģŁ'] +['à¸ĸ', 'à¸Ļ'] +['à¸ĸà¸Ļ', 'à¸Ļ'] +['mız', 'ı'] +['ĠÙħ', 'جÙħÙĪØ¹Ø©'] +['c', 'jÄħ'] +['ĠÐł', 'Ф'] +['à¸ģำ', 'หà¸Ļ'] +['à¸ģำหà¸Ļ', 'à¸Ķ'] +['ĠìŬ', '기'] +['land', 'ı'] +['ни', 'ÑĨ'] +['ÑģÑĤв', 'е'] +['Ġ×ĵ', '×ijר×Ļ×Ŀ'] +['Ġsk', 'ÅĤad'] +['ãĤĬ', 'ãģ¾ãģĹãģŁ'] +['ĠоÑĤ', 'кÑĢÑĭÑĤ'] +['нÑı', 'ÑĤ'] +['ĠÑģво', 'ей'] +['à¸Ī', 'ิà¸ķ'] +['ĠкаÑĩеÑģÑĤв', 'е'] +['Ġet', 'tiÄŁi'] +['ìĤ¬', 'íķŃ'] +['ĠاÙĦÙĬ', 'ÙħÙĨ'] +['иÑĩеÑģки', 'й'] +['ë¸', 'Į'] +['Ġ×ij×IJר', '×¥'] +['Ġا', 'سÙħ'] +['Ġиз', 'веÑģÑĤ'] +['r', 'ão'] +['Ġatt', 'ivitÃł'] +['à¹Ģà¸Ľà¹ĩà¸Ļ', 'à¸ģาร'] +['ĠاÙĦد', 'Ùĥت'] +['ĠاÙĦدÙĥت', 'ÙĪØ±'] +['ĠÙĪØ§ØŃد', 'Ø©'] +['ĠÑģ', 'ÑĩеÑĤ'] +['ĠпÑĢ', 'иÑĩ'] +['ĠпÑĢиÑĩ', 'ин'] +['ĠÙĪØ²', 'ارة'] +['Ġh', 'uyá»ĩn'] +['ĠÙĥ', 'تاب'] +['à¹ģà¸Ļ', 'à¹Īà¸Ļ'] +['à¹ģà¸Ļà¹Īà¸Ļ', 'à¸Ńà¸Ļ'] +['Ġgün', 'ü'] +['г', 'ÑĢÑĥз'] +['ĠاÙĦØ®', 'اص'] +['Ġgör', 'ül'] +['׾', '×ŀ×ĵ'] +['Ġìłķ', 'ëıĦ'] +['×ķ×ij', '×Ļ׾'] +['Ġ×ŀ×§', 'צ×ķ×¢×Ļ'] +['ĠоÑģоб', 'енно'] +['à¸Ľà¸£à¸°', 'à¸ģา'] +['à¸Ľà¸£à¸°à¸ģา', 'ศ'] +['aca', 'ģını'] +['ë¶', 'ģ'] +['à¸łà¸¹', 'มิ'] +['ĠÑį', 'лекÑĤ'] +['ĠÑįлекÑĤ', 'ÑĢо'] +['Ġ×§', 'ש×Ķ'] +['سÙĦ', 'Ø·'] +['à¸Ĭà¸Ļ', 'ะ'] +['×¢', '×Ļ׾'] +['ĠЧ', 'е'] +['à¹ģà¸Ļ', 'à¹Ī'] +['lı', 'ÄŁ'] +['lıģ', 'ın'] +['Ġ×ŀ×¢', '×¨×Ľ×ª'] +['好ãģį', 'ãģª'] +['มาà¸ģ', 'à¸Ĥึà¹īà¸Ļ'] +['×ŀ×¢', '×ijר'] +['ĠاÙĦÙħ', 'غرب'] +['ĠпеÑĢ', 'и'] +['ĠпеÑĢи', 'од'] +['Ġnh', 'ạc'] +['ا', 'ÙĪÙĬ'] +['ĠÙĪ', 'عÙĦÙī'] +['أخ', 'ذ'] +['ĠC', 'ô'] +['תר', '×ij×ķת'] +['×Ĵ', '×Ķ'] +['Ġktóre', 'j'] +['×IJ', '×Ļת'] +['×ij', '×ķ×IJ'] +['д', 'елÑĮ'] +['รี', 'วิ'] +['รีวิ', 'ว'] +['ж', 'Ñĥ'] +['Ġ×ij×Ĺ', '×ķ'] +['еÑĪ', 'ÑĮ'] +['ĠØ£', 'ÙĦÙģ'] +['ĠاÙĦÙĪ', 'Ø·ÙĨÙĬ'] +['ĠاÙĦÙħÙĨ', 'Ø·ÙĤØ©'] +['nÄħ', 'Äĩ'] +['Ġthi', 'ên'] +['иÑĩеÑģк', 'ой'] +['ĠاÙĦÙħ', 'ÙĦ'] +['Ġع', 'Ùħ'] +['ס', 'פר'] +['Ġnh', 'óm'] +['ÙĪØµ', 'Ùģ'] +['ĠCh', 'úng'] +['Ġر', 'ÙĤÙħ'] +['ãģ¾ãģĹãģŁ', 'ãģĮ'] +['al', 'ité'] +['ล', 'ม'] +['ĠëĤ´', 'ê°Ģ'] +['׾ק', '×ķ×Ĺ'] +['ĠS', 'Æ¡n'] +['pos', 'ição'] +['mi', 'ÄĻ'] +['Ġtr', 'ánh'] +['ĠÄIJ', 'á»Ļ'] +['׼', '×Ĺ'] +['ãģĤ', 'ãģ£ãģ¦'] +['à¸Ńย', 'à¹Īา'] +['Ġ×ŀ×Ĺ', '×Ļר'] +['Ġ×Ķ', '×Ļת×Ķ'] +['à¸Ľ', 'à¹Īา'] +['à¸Ńืà¹Īà¸Ļ', 'à¹Ĩ'] +['Ø´', 'ÙĤ'] +['×ł×¡', '×Ļ'] +['ë¦', '¼'] +['ãģ¦ãģĹãģ¾', 'ãģĨ'] +['Ġ×ŀ', 'צ×ij'] +['ãģ«', 'åĩº'] +['ÙħÙĪØ§', 'Ø·ÙĨ'] +['ยัà¸ĩ', 'มี'] +['алÑĮ', 'нÑĭе'] +['san', 'ız'] +['Ø¥', 'سرائÙĬÙĦ'] +['ĠvÃł', 'i'] +['ì¤', 'Ħ'] +['ã썿ĢĿ', 'ãģ£ãģ¦'] +['×Ļ', '×ķ׳×Ļ'] +['çĶŁ', 'ãģį'] +['Ġs', 'âu'] +['Ñĩ', 'иÑģÑĤ'] +['Ġl', 'á»ħ'] +['ĠGi', 'á'] +['à¸Ńุ', 'à¸Ľ'] +['à¸Ńà¸¸à¸Ľ', 'à¸ģร'] +['à¸Ńà¸¸à¸Ľà¸ģร', 'à¸ĵà¹Į'] +['Ġnh', 'ẹ'] +['r', 'ö'] +['ס', '×ĺ×Ļ'] +['ãģķãĤĵ', 'ãģĮ'] +['Ġd', 'ầu'] +['ع', 'Ùİ'] +['ت', 'را'] +['×Ĵ×ĵ', '׾'] +['Ġtécn', 'ica'] +['׼', '׳×Ļ×Ŀ'] +['תק', 'ש'] +['תקש', '×ķרת'] +['Ġн', 'его'] +['ét', 'ait'] +['Ġm', 'á»ģm'] +['Ñģ', 'еÑĤ'] +['Ġnh', 'áºŃt'] +['Ġ×ŀ', '×¢×ľ'] +['Ġ×Ķ×¢', '×ij×ķ×ĵ'] +['Ġ×Ķ×¢×ij×ķ×ĵ', '×Ķ'] +['Ġ×Ĵ', '×Ļ׾'] +['ãģ¯', 'ãģªãģĦ'] +['ائ', 'ØŃ'] +['Ġз', 'деÑģÑĮ'] +['×IJ', '×Ļ׳×ĺר'] +['Ùħ', 'ÙIJ'] +['Ġ×Ļ', '×Ĺ×ĵ'] +['ر', 'اÙģ'] +['ì²ĺ', '리'] +['×ĵ', '×¢×ķת'] +['ì¹', 'ľ'] +['ĠТ', 'о'] +['ĠTh', 'ế'] +['ì¶', '©'] +['Ġ׳׼', '×ķף'] +['عÙĬ', 'Ø´'] +['ни', 'з'] +['Ġج', 'اÙĨب'] +['×ŀ×§', 'צ×ķ×¢'] +['à¹Ĥ', 'à¸ĭ'] +['Ñģ', 'ÑĥÑĤ'] +['ìĸ´', 'ìļĶ'] +['ãĤĴè¦ĭ', 'ãģ¦'] +['ار', 'د'] +['Ġaç', 'ıl'] +['ĠاÙĦØŃ', 'ÙĬاة'] +['à¸ģà¹ĩ', 'à¹Ħà¸Ķà¹ī'] +['ãģĿãĤĮ', 'ãĤĴ'] +['عض', 'ÙĪ'] +['Ġг', 'ÑĢаж'] +['ĠгÑĢаж', 'дан'] +['à¸Īะ', 'à¸ķà¹īà¸Ńà¸ĩ'] +['ĠìĿ´', '룬'] +['ĠìĿ´ë٬', 'íķľ'] +['Ġtr', 'ách'] +['ÙĨ', 'Ùİ'] +['Ġkı', 'sa'] +['Ã', 'Ķ'] +['ÑĪ', 'ка'] +['ãģ®', '人'] +['ĠÐŁ', 'оÑģ'] +['ĠÐŁÐ¾Ñģ', 'ле'] +['Ñĥ', 'лÑĮ'] +['ÙĪØ§', 'جÙĩ'] +['ÙĤ', 'رب'] +['à¸Ľà¸ıิ', 'à¸ļัà¸ķิ'] +['ê°', 'Ļ'] +['Ġ×ŀ', '׳'] +['ĠÑģво', 'и'] +['بر', 'اÙħج'] +['Ġر', 'ÙĪ'] +['пÑĢ', 'од'] +['пÑĢод', 'аж'] +['Ġby', 'ÅĤy'] +['วั', 'ย'] +['Ġgör', 'ün'] +['ĠÃ', 'Ī'] +['ÑİÑī', 'им'] +['ĠÑĤак', 'ой'] +['Ùģ', 'ÙĪØ±'] +['ĠÙģ', 'عÙĦ'] +['Ġб', 'ел'] +['ëIJ', 'ł'] +['er', 'ÃŃa'] +['ĠÑģво', 'Ñİ'] +['Ġl', 'ã'] +['Ġlã', 'nh'] +['à¹Ģà¸ŀืà¹Īà¸Ń', 'à¹ĥหà¹ī'] +['ÙĤ', 'ÙĨ'] +['تط', 'ÙĪÙĬر'] +['Ġsay', 'ı'] +['ĠÑģ', 'ейÑĩаÑģ'] +['Ġ×IJ×Ĺר', 'ת'] +['×§', '×ķפ×Ķ'] +['×§×ķר', 'ס'] +['Ġس', 'Ùħ'] +['Ġ×ĺ', '×Ļפ×ķ׾'] +['ìĿ´ëĿ¼', 'ëĬĶ'] +['دراس', 'Ø©'] +['èµ·', 'ãģĵ'] +['×Ĺ', '×Ļ׳'] +['×Ĺ×Ļ׳', '×ķ×ļ'] +['×ĵ', '×§'] +['Ġë§', 'ŀ'] +['Ġком', 'анд'] +['ĠÐij', 'о'] +['Ġиг', 'ÑĢÑĭ'] +['à¸ļ', 'ี'] +['ĠØ£', 'Ùİ'] +['в', 'ен'] +['ĠاÙĦج', 'دÙĬد'] +['ĠÙĦ', 'Ø¥'] +['Ġ×ķ×IJ', '׳×Ļ'] +['Ġ×Ķס', '×Ļ'] +['иÑĩеÑģк', 'ого'] +['رÙĪ', 'ØŃ'] +['à¸ģาร', 'ศึà¸ģษา'] +['ĠTr', 'ưá»Ŀng'] +['иг', 'ÑĢа'] +['ıl', 'ması'] +['Ġм', 'аÑģÑģ'] +['ãģ¨ãģį', 'ãģ«'] +['à¸Ĺีà¹Ī', 'à¸ľà¹Īาà¸Ļ'] +['à¸Ĺีà¹Īà¸ľà¹Īาà¸Ļ', 'มา'] +['ĠاÙĦساب', 'ÙĤ'] +['Ġ×ŀ×¢', '×ĺ'] +['в', 'аÑĤÑĮ'] +['m', 'Ã¼ÅŁ'] +['Ġ׾', '׼×ļ'] +['Ġt', 'á»ĭch'] +['Ùģ', 'ÙĩÙħ'] +['تد', 'رÙĬب'] +['Ø´', 'Ùĥ'] +['Ġ×ij', '×ŀ×Ļ'] +['Ġ×ij×ŀ×Ļ', '×ķ×Ĺ×ĵ'] +['ÙĤØ·', 'اع'] +['ãģª', 'ãģĹ'] +['×ķצ', '×Ļ×IJ'] +['ĠÙĪ', 'سÙĬ'] +['з', 'Ñĥ'] +['Ġy', 'at'] +['Ġyat', 'ırım'] +['ë§', 'İ'] +['Ġth', 'ắng'] +['ãģĬ', '客'] +['ãģĬ客', 'æ§ĺ'] +['ĠThi', 'ên'] +['ãģ«å¯¾', 'ãģĹãģ¦'] +['ÑĢ', 'иÑģ'] +['ÙĨت', 'ائ'] +['ÙĨتائ', 'ج'] +['Ġ×ŀ', 'שר'] +['Ġ×ŀשר', '×ĵ'] +['Ġتع', 'اÙĦ'] +['ĠتعاÙĦ', 'Ùī'] +['ש', '׳×Ļ'] +['Ùĩ', 'اÙħ'] +['×IJ׳', 'ש×Ļ×Ŀ'] +['Ġżyc', 'ia'] +['ĠÑĢÑĥб', 'лей'] +['ÙĬ', 'ض'] +['Ġkat', 'ıl'] +['ĠÙħ', 'ÙĪØ¶ÙĪØ¹'] +['Ġvard', 'ır'] +['ĠÙħÙĨ', 'Ø·ÙĤØ©'] +['ĠTr', 'ần'] +['Ġв', 'еÑģ'] +['ü', 'p'] +['Ùħ', 'ÙĪÙĨ'] +['ÑĪ', 'ли'] +['Ġn', 'óng'] +['Ø®', 'ÙĦÙģ'] +['ĠС', 'ÑĤа'] +['Ġд', 'оÑĢ'] +['ĠдоÑĢ', 'ог'] +['ĠwÅĤa', 'ÅĽnie'] +['eÄŁ', 'in'] +['Ġhi', 'á»ĥm'] +['ĠС', 'ам'] +['ê»ĺ', 'ìĦľ'] +['ĠÑĦ', 'а'] +['ãģ»', 'ãģĨ'] +['ãģ»ãģĨ', 'ãģĮ'] +['×ķפ', '×Ļ×¢'] +['ê°', 'Ī'] +['د', 'ÙĪÙĦ'] +['Ġthu', 'ê'] +['Ġch', 'á»Ĺ'] +['Ġëĭ¹', 'ìĭł'] +['ãģij', 'ãĤĮ'] +['ãģijãĤĮ', 'ãģ©'] +['ë³´', 'íĺ¸'] +['ãģķãĤĮ', 'ãģ¦ãģĦãģ¾ãģĻ'] +['Ġнад', 'о'] +['ĠìĤ¬ëŀĮ', 'ëĵ¤'] +['à¹Ģà¸Ĥ', 'à¸ķ'] +['สม', 'ัย'] +['z', 'ÅĤ'] +['ت', 'ÙĪØ±'] +['Ġש', 'ת×Ļ'] +['v', 'ê'] +['Ġ×ijת', '×ķ×ļ'] +['à¸Ĭ', 'ัย'] +['ãģĦ', 'ãģ£ãģŁ'] +['ìĿ', 'ij'] +['Ġt', 'ầ'] +['Ġtầ', 'ng'] +['ש', '׼ר'] +['Ġê¸', 'Ģ'] +['Ġ×Ķש', '׳×Ķ'] +['Ġا', 'ÙĨÙĩ'] +['ç«ĭ', 'ãģ¡'] +['r', 'és'] +['füh', 'ren'] +['ر', 'ØŃÙħ'] +['ê·', '¹'] +['ĠâĢ', '«'] +['Ġsu', 'ất'] +['à¸Ł', 'ิ'] +['ÙĬ', 'Ùĩا'] +['ĠاÙĦ', 'اتØŃاد'] +['Ġt', 'uyá»ĥn'] +['ãģ¾', 'ãĤĭ'] +['Ġm', 'ại'] +['Ġng', 'ân'] +['ãĤ°', 'ãĥ©'] +['欲', 'ãģĹãģĦ'] +['س', 'ار'] +['ãĤĤãģ®', 'ãģ§ãģĻ'] +['ки', 'е'] +['Ġseç', 'im'] +['åħ¥', 'ãĤĬ'] +['ãģªãģ©', 'ãĤĴ'] +['ÑĤ', 'ÑĢи'] +['ĠÑģп', 'еÑĨ'] +['ĠØ£', 'د'] +['Ġод', 'но'] +['ÑĪ', 'ел'] +['ãĥĩ', 'ãĥ¼ãĤ¿'] +['ãĤ·', 'ãĤ¹ãĥĨ'] +['ãĤ·ãĤ¹ãĥĨ', 'ãĥł'] +['è¡Į', 'ãģį'] +['ã썿ĢĿ', 'ãģ£ãģŁ'] +['à¹Ģà¸ģิà¸Ķ', 'à¸Ĥึà¹īà¸Ļ'] +['ĠÑĤ', 'ож'] +['ĠÑĤож', 'е'] +['Ġs', 'ạch'] +['ĠÑģ', 'ÑĢок'] +['Ġкли', 'енÑĤ'] +['ĠÙħØ´', 'رÙĪØ¹'] +['Ġalt', 'ında'] +['Ġì', '·¨'] +['ä¸Ń', 'ãģ®'] +['ãģķãģĽ', 'ãĤĭ'] +['ãģĻ', 'ãģ¹'] +['ãģĻãģ¹', 'ãģ¦'] +['ê°ľ', 'ë°ľ'] +['ĠÄij', 'êm'] +['ãģªãģĦ', 'ãģ®ãģ§'] +['ì²', 'ł'] +['×¢', '×ij×ĵ'] +['Ġd', 'ấu'] +['à¸Ħà¸Ļ', 'à¸Ĺีà¹Ī'] +['ĠC', 'ách'] +['تع', 'ÙĦÙĬÙħ'] +['Ġh', 'ại'] +['ãĤ»', 'ãĥķãĥ¬'] +['ĠÙĨÙ쨳', 'Ùĩ'] +['ĠíĨµ', 'íķ´'] +['ÑĪ', 'ло'] +['Ġнап', 'ÑĢав'] +['ĠнапÑĢав', 'лен'] +['ÑĢÑĥ', 'Ñĩ'] +['íĶ', 'Į'] +['Ġ×ijר', '×Ļ×IJ'] +['ãģ®', 'ãģ¿'] +['ãģ«ãģĬ', 'ãģĦãģ¦'] +['×ij', '׳ק'] +['ãĤ¨', 'ãĥ³'] +['Ø«ÙĦ', 'اث'] +['Ġm', 'ỹ'] +['ĠÑģай', 'ÑĤе'] +['Ġе', 'мÑĥ'] +['ت', 'غÙĬ'] +['تغÙĬ', 'ÙĬر'] +['خص', 'ÙĪØµ'] +['ÑĤе', 'ли'] +['Ġ×ķ׾', '׼ף'] +['פע', '×Ŀ'] +['Ġпо', 'ÑįÑĤомÑĥ'] +['ر', 'اÙĨ'] +['иÑĤел', 'ей'] +['пиÑģ', 'ан'] +['×¢', '×¥'] +['ĠìĤ¬', 'ìĹħ'] +['Ùħ', 'ز'] +['جÙħ', 'ÙĬع'] +['ë©´', 'ìĦľ'] +['à¸ľà¸¥à¸´à¸ķ', 'à¸łà¸±'] +['à¸ľà¸¥à¸´à¸ķà¸łà¸±', 'à¸ĵ'] +['à¸ľà¸¥à¸´à¸ķà¸łà¸±à¸ĵ', 'à¸ij'] +['à¸ľà¸¥à¸´à¸ķà¸łà¸±à¸ĵà¸ij', 'à¹Į'] +['ĠпÑĢ', 'имеÑĢ'] +['ãĤŃ', 'ãĥ¼'] +['l', 'â'] +['Ġch', 'Äĥm'] +['缮', 'ãģ®'] +['ãģĦ', 'ãģĭ'] +['ãģ¨è¨Ģ', 'ãģĨ'] +['×ĸ', '×ķ×Ĵ'] +['Ġ×ij', '×ĵ×Ļ'] +['Ġ×ij×ĵ×Ļ', '×ķ×§'] +['ãģĬ', 'åºĹ'] +['à¸ķà¸Ńà¸Ļ', 'à¸Ļีà¹ī'] +['Ġph', 'á»iji'] +['п', 'ÑĤ'] +['สà¸Ļ', 'าม'] +['Ø·', 'ÙĪ'] +['ص', 'اØŃ'] +['صاØŃ', 'ب'] +['ĠD', 'ü'] +['ĠDü', 'nya'] +['Ġп', 'ока'] +['п', 'ал'] +['ĠÄij', 'ảo'] +['ĠاÙĦÙģ', 'ÙĪØ±'] +['ĠاÙĦÙģÙĪØ±', 'Ùĥس'] +['Ġmá', 'u'] +['кÑĢ', 'еп'] +['ĠاÙĦس', 'اعة'] +['ĠгоÑĢ', 'ода'] +['Ùģ', 'صÙĦ'] +['ай', 'ÑĤе'] +['Ġд', 'ог'] +['Ġдог', 'овоÑĢ'] +['ĠØ¥', 'ذ'] +['Ġ×ij׼׾', '׾'] +['ÙĬ', 'تÙĩ'] +['×Ĵ', '×ijר'] +['Ġbir', 'ç'] +['Ġbirç', 'ok'] +['문', 'íĻĶ'] +['ãģĿãģĨ', 'ãģª'] +['را', 'ØŃ'] +['ĠÙħ', 'رة'] +['ĠденÑĮ', 'ги'] +['f', 'ä'] +['à¸Ĥà¹īา', 'ว'] +['ĠÑģов', 'ÑĢем'] +['ĠÑģовÑĢем', 'енн'] +['׾×Ĺ', '×¥'] +['èī¯', 'ãģı'] +['ĠÙģ', 'Ø£'] +['Ġ×ķ', '×ĸ×Ķ'] +['Ġз', 'ани'] +['Ġзани', 'ма'] +['Ġê°Ģì§Ģ', 'ê³ł'] +['Ġh', 'Æ¡i'] +['ãģªãģ®', 'ãģĭ'] +['ãĥĨ', 'ãĥ¬ãĥĵ'] +['Ġר', '×ij×ķת'] +['à¸ķ', 'ี'] +['Ġ×ijש', '×ł×ª'] +['ĠT', 'ại'] +['Ġthu', 'áºŃn'] +['Ñģ', 'ел'] +['Ñij', 'м'] +['dzi', 'Äĩ'] +['ĠÑģ', 'ка'] +['ĠÑģка', 'Ñĩ'] +['ĠÑģкаÑĩ', 'аÑĤÑĮ'] +['×ķ×ŀ', '×ķ'] +['г', 'ла'] +['Ġмин', 'ÑĥÑĤ'] +['åĩº', 'ãģĻ'] +['Ġ×Ĺ×Ļ', '×Ļ×ij'] +['Ġת', '×Ĵ×ķ×ij×Ķ'] +['à¸£à¸¹à¸Ľ', 'à¹ģà¸ļà¸ļ'] +['ни', 'ÑĨа'] +['Ġİ', 'n'] +['ĠØ£', 'ع'] +['Ġض', 'ÙħÙĨ'] +['Ùħ', 'ثاÙĦ'] +['ĠyaÅŁ', 'an'] +['ĠìŰ', '구'] +['ĠL', 'ê'] +['ש׾', '×Ĺ'] +['ãģı', 'ãģªãĤĭ'] +['ìĹĨ', 'ìĿ´'] +['ĠÑĤ', 'ÑĢи'] +['ĠÑĩаÑģÑĤ', 'о'] +['Ġоб', 'ÑĢаÑĤ'] +['п', 'ло'] +['د', 'Ø®'] +['دخ', 'ÙĪÙĦ'] +['س', 'Ùĩ'] +['à¸Ń', 'าà¸ģ'] +['à¸Ńาà¸ģ', 'าศ'] +['Ġ׼', '×ĸ×Ķ'] +['Ġ×Ķ×¢', 'סק'] +['ĠاÙĦØ£', 'ÙĨ'] +['å¹´', 'ãģ«'] +['×¢', 'ש×ķ'] +['Ġש', '×¢×ķת'] +['Ġm', 'Ãłn'] +['×IJר', '×Ļ'] +['sı', 'yla'] +['Ù쨱', 'ÙĤ'] +['ни', 'Ñħ'] +['Ġت', 'ست'] +['è¦ĭ', 'ãģ¦'] +['ØŃا', 'ÙĪÙĦ'] +['×IJ', '×Ļ׼×ķת'] +['ĠbaÅŁ', 'ladı'] +['st', 'Äħ'] +['stÄħ', 'pi'] +['à¸Ĺีà¹Ī', 'à¹Ģรา'] +['ÙĤر', 'ر'] +['ج', 'اب'] +['Ġ×ijר', '×ķר'] +['à¹Ģà¸Ĥà¹īา', 'à¹ĥà¸Ī'] +['×ŀ×Ĺ', 'קר'] +['al', 'ım'] +['Ġס', '×Ļפ×ķר'] +['ãģ§ãģĤ', 'ãĤĮãģ°'] +['Ġש×ŀ', '×ķר×ķת'] +['Ġ×ķ', '×ŀ×Ķ'] +['ãģĵ', 'ãģĿ'] +['id', 'ée'] +['ä¸ĭ', 'ãģķãģĦ'] +['تÙĨا', 'ÙĪÙĦ'] +['Ġ', 'ลà¹īาà¸Ļ'] +['Ġìļ°ë¦¬', 'ëĬĶ'] +['اÙĨ', 'ا'] +['ÑģÑĤ', 'ой'] +['б', 'оÑĤ'] +['ĠyaÅŁ', 'am'] +['kö', 'y'] +['Ø¥', 'ÙĦ'] +['ÑĢ', 'Ñĭв'] +['기', 'ìĹħ'] +['Ġ×Ķ×ŀ', '×ĵ'] +['Ġ×Ķ×ŀ×ĵ', '×Ļ׳×Ķ'] +['د', 'ب'] +['×¢', '×Ļ׳×Ļ'] +['×ŀ', 'ת×Ĺ'] +['Ġפ', 'ר×Ļ'] +['ãĥĭ', 'ãĥ¼'] +['اÙħ', 'ÙĬ'] +['Ġnh', 'ằm'] +['ãĤĮ', 'ãģªãģĦ'] +['ت', 'عرÙģ'] +['Ġë§Ī', 'ìĿĮ'] +['ìĵ', '°'] +['Ġh', 'ấp'] +['ר×Ĵ', '×Ļ׾'] +['ب', 'Ùİ'] +['Ġr', 'Äĥng'] +['gl', 'Äħd'] +['ĠÑģиÑģÑĤем', 'Ñĭ'] +['Ġkh', 'óa'] +['ãģ§ãģĻ', 'ãĤĪãģŃ'] +['大ãģį', 'ãģı'] +['기', '를'] +['Ġké', 'o'] +['ÙĪ', 'Ø¡'] +['ج', 'اÙħ'] +['جاÙħ', 'ع'] +['Ġ×¢', '×Ļצ×ķ×ij'] +['t', 'éri'] +['Ġת', 'ש'] +['Ġ×IJ', '×ij×Ļ'] +['ĠCh', 'ương'] +['à¸ļริ', 'à¹Ģว'] +['à¸ļริà¹Ģว', 'à¸ĵ'] +['ãģ¤', 'ãģı'] +['Ġ×Ĺ', '×ķ׾'] +['עת', '×Ļ×ĵ'] +['ש', '×Ļ×ŀ×Ķ'] +['ëĤ', '¨'] +['Ġש×IJ', '×Ļף'] +['ĠÙĪØ§ÙĦ', 'Ø¥'] +['ÑĦ', 'а'] +['Ġkh', 'ám'] +['Ġ×ĺ', '×ķ×ij×Ķ'] +['ĠвÑĭ', 'Ñģ'] +['ĠвÑĭÑģ', 'око'] +['ĠاÙĦØŃ', 'دÙĬØ«'] +['人', 'ãĤĤ'] +['d', 'Ã¼ÄŁÃ¼'] +['×Ļ×Ĺ', '×ķ×ĵ'] +['تع', 'ÙĦÙĬ'] +['تعÙĦÙĬ', 'ÙĤ'] +['l', 'ö'] +['تØŃ', 'دÙĬد'] +['н', 'его'] +['ĠÑĥд', 'об'] +['Ġ׾', '×ŀ×Ļ'] +['Ġר', '×ķצ×Ļ×Ŀ'] +['Ġج', 'اء'] +['Ġ×ij', '×ĸ×ŀף'] +['à¸Ľà¸ģ', 'à¸ķิ'] +['é«ĺ', 'ãģı'] +['à¸Ľà¸¥', 'า'] +['Ġart', 'ık'] +['Ġbug', 'ün'] +['×§', '׳×Ļ'] +['Ġkho', 'á'] +['ĠÙħ', 'رÙĥز'] +['ĠìŀIJ', '기'] +['در', 'جة'] +['×ŀש', 'ר×ĵ'] +['Ġgi', 'ấy'] +['Ġch', 'óng'] +['×§', 'פ'] +['ÙĬب', 'Ø©'] +['ĠczÄĻ', 'sto'] +['в', 'али'] +['Ùĥ', 'ب'] +['ìŁ', 'ģ'] +['ส', 'à¸ļาย'] +['à¸Ľà¸£à¸°à¸Ĭา', 'à¸Ĭà¸Ļ'] +['×Ĵ', '×ķ×£'] +['ëŁ', 'ī'] +['ãģ®', 'ãģĵãģ¨'] +['ล', 'à¸Ń'] +['Ġngh', 'á»ī'] +['åŃIJ', 'ãģ©'] +['åŃIJãģ©', 'ãĤĤ'] +['à¹Ħà¸Ķ', 'à¹īà¸Ńย'] +['à¹Ħà¸Ķà¹īà¸Ńย', 'à¹Īาà¸ĩ'] +['×ĵ', '×¢'] +['ĠاÙĦت', 'Ùī'] +['ĠÑģов', 'еÑĤ'] +['Ġqual', 'itÃł'] +['åĩº', 'ãģĹ'] +['ĠÑĢÑĥк', 'ов'] +['ĠÑĢÑĥков', 'од'] +['ราย', 'ละà¹Ģà¸Ńียà¸Ķ'] +['ãģªãģĭ', 'ãģªãģĭ'] +['기', 'ê´Ģ'] +['Ġ×Ĺ', '×ķש'] +['Ġ×Ĺ×ķש', '×ij'] +['л', 'оÑĤ'] +['à¸Ļะ', 'à¸Ħรัà¸ļ'] +['×§×ij', '×ķצ×Ķ'] +['Ġth', 'ái'] +['Ġש', '×ij×Ķ'] +['ĠÑĪ', 'кол'] +['ĠÙĦ', 'ÙĥÙĦ'] +['à¹ĥà¸Ļ', 'à¸Ĭà¹Īวà¸ĩ'] +['ĠÙħ', 'ÙĥاÙĨ'] +['ë', 'ķĮ'] +['Ġc', 'ải'] +['ĠCh', 'ÃŃ'] +['ÑĥÑĩ', 'а'] +['ìĿ', 'µ'] +['Ġx', 'ảy'] +['à¸Ĭà¸Ļ', 'ิà¸Ķ'] +['Ġc', 'áºŃu'] +['к', 'ÑĢов'] +['ss', 'é'] +['ĠÙĨ', 'ÙĪØ¹'] +['ĠТ', 'а'] +['Ø®', 'Ùħس'] +['פ×ķס', '×ĺ'] +['Ġm', 'ắc'] +['ĠÄij', 'em'] +['à¸ģาร', 'à¹ĥà¸Ĭà¹ī'] +['ר', '×ķס'] +['ĠÐĽ', 'е'] +['Ġth', 'á»Ń'] +['รà¹Īาà¸ĩ', 'à¸ģาย'] +['üz', 'ü'] +['æĹ¥æľ¬', 'ãģ®'] +['ê³¼', 'ìłķ'] +['ש', '×Ļ×IJ'] +['ĠìŀĪ', 'ê³ł'] +['×ij', '×ķ׾'] +['ìķ', 'ħ'] +['ĠÙĪØ§ÙĦ', 'ا'] +['ĠÐĽ', 'и'] +['ĠвÑģ', 'Ñij'] +['Ġużytk', 'ow'] +['×Ĺ', '×ķ׾'] +['ر', 'Ù쨶'] +['Ġson', 'uç'] +['ãģĦ', 'ãģ¾ãģĽãĤĵ'] +['ìĤ¬', 'ìĹħ'] +['ëĪ', 'Ħ'] +['ÑĤ', 'ек'] +['Ġud', 'ziaÅĤ'] +['л', 'ез'] +['Ġ×Ķ×Ļ', '×Ļת×Ļ'] +['ãĤīãĤĮ', 'ãģ¦'] +['Ùħس', 'ؤÙĪÙĦ'] +['ر', 'ار'] +['ÑĤ', 'ан'] +['ĠÄij', 'Ãło'] +['Ġר', '×ķ×ij'] +['Ġ×ijש×ij', '×Ļ׾'] +['ä»ĬåĽŀ', 'ãģ¯'] +['ãĤ¸', 'ãĥ¥'] +['Ġ×¢', '×ijר'] +['ãģĽ', 'ãģ¦'] +['п', 'олÑĮ'] +['ak', 'lı'] +['Ġk', 'ÃŃnh'] +['د', 'ت'] +['лож', 'ение'] +['ĠاÙĦÙħ', 'ص'] +['ĠاÙĦÙħص', 'رÙĬ'] +['à¸Īริà¸ĩ', 'à¹Ĩ'] +['ĠاÙĦشر', 'ÙĥØ©'] +['ĠÄij', 'á»ı'] +['ãĥĽ', 'ãĥĨ'] +['ãĥĽãĥĨ', 'ãĥ«'] +['Ñį', 'кон'] +['Ñįкон', 'ом'] +['ĠÙĪ', 'عÙĨ'] +['Ġת', '׳'] +['Ġ×ª×ł', '×IJ×Ļ'] +['ĠاÙĦدÙĪÙĦ', 'ÙĬØ©'] +['Ġì§Ģ', 'ìĹŃ'] +['ãģ§ãģĻ', 'ãģĭ'] +['Ġв', 'аÑĢи'] +['ĠваÑĢи', 'анÑĤ'] +['ĠاÙĦع', 'رب'] +['ел', 'а'] +['Ġt', 'Æ°á»Ľng'] +['sk', 'Äħ'] +['Ġm', 'ặc'] +['ส', 'ัà¸ģ'] +['ãĥĵ', 'ãĥ¼'] +['Ġ×ij', '×Ĵ׾'] +['Ġ×ij×Ĵ׾', '׾'] +['ãĥķãĤ¡', 'ãĥ³'] +['×ij', '×Ļצ'] +['×ij×Ļצ', '×ķ×¢'] +['ли', 'ÑģÑĤ'] +['à¸Ł', 'ุ'] +['à¸Łà¸¸', 'à¸ķ'] +['à¸Łà¸¸à¸ķ', 'à¸ļà¸Ńล'] +['à¸Ŀ', 'à¹Īาย'] +['ìŀIJ', 'ìĿĺ'] +['Ġس', 'ÙĪÙģ'] +['Ġש', '×Ķת'] +['Ġê±', '¸'] +['×¢', '×ij×ķ×ĵ'] +['ãģĻãĤĭ', 'ãģĵãģ¨ãģĮ'] +['ĠÑĩа', 'ÑģÑĤÑĮ'] +['ãĤ¢', 'ãĥ¡ãĥª'] +['ãĤ¢ãĥ¡ãĥª', 'ãĤ«'] +['Ġtak', 'ım'] +['Ġs', 'Ỽ'] +['ĠsỼ', 'm'] +['שר', '×Ķ'] +['è¨Ģ', 'ãģĨ'] +['л', 'ан'] +['ì»', '¤'] +['׼', '׳×Ķ'] +['ÙĪÙģ', 'ÙĬ'] +['íĹ', 'Ī'] +['lu', 'ÄŁu'] +['ĠëĮĢ', 'íķ´'] +['Ġ׾×ij', '×Ļת'] +['Ġ×Ķר×IJש', '×ķ׳×Ķ'] +['ص', 'Ùħ'] +['Ġsö', 'yled'] +['Ġsöyled', 'i'] +['à¸Ľ', 'าà¸ģ'] +['Ġard', 'ından'] +['ãģĪ', 'ãģŁ'] +['à¸Ĺัà¹Īว', 'à¹Ħà¸Ľ'] +['Ġ׳×ķס', '×£'] +['б', 'олÑĮ'] +['ãĤĵãģ§ãģĻ', 'ãģijãģ©'] +['ĠлиÑĪ', 'ÑĮ'] +['Ġ×ij', '×IJ×Ļ'] +['ĠбÑĭ', 'ÑģÑĤÑĢо'] +['ส', 'ัà¸Ļ'] +['Ġ×ij', 'פ׳×Ļ'] +['л', 'еÑĩ'] +['ĠاÙĦØ®', 'بر'] +['Ġsó', 'c'] +['Ġth', 'ú'] +['Ġп', 'ÑıÑĤ'] +['ãģĬ', 'é¡ĺ'] +['ãģĬé¡ĺ', 'ãģĦ'] +['ÑĤ', 'ин'] +['ãģ«ãģ¤ãģĦãģ¦', 'ãģ¯'] +['פ', 'ף'] +['Ġдв', 'ÑĥÑħ'] +['à¸į', 'ีà¹Ī'] +['à¸įีà¹Ī', 'à¸Ľ'] +['à¸įีà¹Īà¸Ľ', 'ุ'] +['à¸įีà¹Īà¸Ľà¸¸', 'à¹Īà¸Ļ'] +['оп', 'еÑĢ'] +['ĠاÙĦب', 'شر'] +['ĠاÙĦÙħ', 'اÙĦ'] +['ıyor', 'uz'] +['تØŃ', 'ÙħÙĬÙĦ'] +['à¸ģ', 'ะ'] +['éĸĵ', 'ãģ«'] +['×Ĺ', '×ķש'] +['ĠNg', 'uyên'] +['ãģĦãģ¦', 'ãģĦãĤĭ'] +['дÑĥ', 'ÑĪ'] +['ש', 'פע'] +['ÑĪ', 'Ñĥ'] +['å®Ł', 'éļĽãģ«'] +['ĠÑĢай', 'он'] +['ĠCh', 'á»ī'] +['ÙĨ', 'صر'] +['Ġìļ', '´'] +['Ġìļ´', 'ìĺģ'] +['Ġ×Ķ×ĵ', '×Ļף'] +['ØŃد', 'د'] +['ر', 'ز'] +['ĠاÙĦد', 'Ùħ'] +['ĠPh', 'áp'] +['ÑĤ', 'ÑģÑı'] +['è¦ĭ', 'ãģĪ'] +['Ġti', 'á»ĥu'] +['Ġs', 'á»Ńa'] +['а', 'ÑİÑĤÑģÑı'] +['ĠB', 'á'] +['Ġ×ķ', '׼׾'] +['Ð', 'ĸ'] +['ÑĪ', 'им'] +['ìĿ´', 'ëĬĶ'] +['л', 'ев'] +['d', 'ık'] +['Ġprés', 'ente'] +['Ġara', 'ç'] +['صد', 'ÙĤ'] +['Ġпом', 'ог'] +['ĠاÙĦشر', 'ÙĤ'] +['ĠÙĪØ§ÙĦ', 'ذÙĬ'] +['رÙĬ', 'ا'] +['×ij', '׳×ķת'] +['Ġng', 'á»ĵi'] +['ר', '×ķפ'] +['ר×ķפ', '×IJ'] +['Ġth', 'ấp'] +['ãĤĦ', 'ãģ¯'] +['ãĤĦãģ¯', 'ãĤĬ'] +['ĠاÙĦج', 'دÙĬدة'] +['éĿŀ常', 'ãģ«'] +['ÙĬÙĦ', 'ÙĬ'] +['ìª', '½'] +['تع', 'اÙħÙĦ'] +['ãģł', 'ã썿ĢĿãģĦãģ¾ãģĻ'] +['Ùħ', 'Ùħ'] +['иÑĤе', 'ли'] +['ãĤµãĤ¤', 'ãĤº'] +['اد', 'ات'] +['ĠاÙĦÙħ', 'اÙĦÙĬØ©'] +['Ùĥات', 'ب'] +['к', 'ли'] +['веÑĢ', 'Ñħ'] +['ни', 'Ñĩ'] +['Ġ×ľ×¢', '×ij×ķ×ĵ'] +['׾', '×Ļ×Ķ'] +['ØŃ', 'Ùİ'] +['ãĤ¤', 'ãĥĻ'] +['ãĤ¤ãĥĻ', 'ãĥ³ãĥĪ'] +['Ġת', '×Ĵ×ķ×ij×ķת'] +['ÑĦ', 'он'] +['ĠдÑĢÑĥг', 'ие'] +['×IJ', '×ĸ×ķר'] +['Ġper', 'ò'] +['ìķ', 'ŀ'] +['åĢŁ', 'ãĤĬ'] +['ר', 'צ×Ļ'] +['×IJ', '×ĸ'] +['алÑĮ', 'нÑĭÑħ'] +['Ġê²ĥ', 'ìľ¼ë¡ľ'] +['ĠпÑĢав', 'о'] +['ĠاÙĦØ£', 'رض'] +['à¹Ģà¸Ĺ', 'à¸Ħ'] +['à¹Ģà¸Ĺà¸Ħ', 'à¹Ĥà¸Ļ'] +['à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļ', 'à¹Ĥล'] +['à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļà¹Ĥล', 'ย'] +['à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļà¹Ĥลย', 'ี'] +['צ', 'ר×Ļ'] +['ĠÐļ', 'Ñĥ'] +['ıl', 'ma'] +['決', 'ãĤģ'] +['ا', 'ÙĪ'] +['Ġ×ĵ', '×§×ķת'] +['à¸Ħร', 'ู'] +['ĠÙħست', 'ÙĪÙī'] +['à¸Ľ', 'à¹īà¸Ńà¸ĩ'] +['à¸Ľà¹īà¸Ńà¸ĩ', 'à¸ģัà¸Ļ'] +['×ĵ', '×ķ×ŀ×Ķ'] +['ĠÑģ', 'егоднÑı'] +['س', 'ÙĪÙĤ'] +['ר×Ĺ', '×ķ×ij'] +['ĠØ¥', 'دارة'] +['Ñħ', 'ож'] +['éģİ', 'ãģİ'] +['à¸Ħ', 'à¸Ń'] +['нÑĥ', 'л'] +['×ķ׼', '×Ķ'] +['ÙĪ', 'اÙģÙĤ'] +['׼׾', '׾'] +['Ġ×Ķ', '×ĵ×ķ'] +['Ġl', 'Ä©nh'] +['Ġkh', 'ảo'] +['×IJ×ŀ', 'צע'] +['ë¨', '¸'] +['Ġ׼', '×Ļצ'] +['Ġ׼×Ļצ', '×ĵ'] +['Ġдолж', 'нÑĭ'] +['หว', 'ัà¸ĩ'] +['ãĥĩ', 'ãĤ¶'] +['ãĥĩãĤ¶', 'ãĤ¤ãĥ³'] +['Ġng', 'á»Ŀ'] +['ä¸Ń', 'ãģ«'] +['à¸ģลัà¸ļ', 'มา'] +['جÙħ', 'اÙĦ'] +['à¸Ķัà¸ĩ', 'à¸ģลà¹Īาว'] +['س', 'ÙĥÙĨ'] +['س', 'ÙĨ'] +['Ġözellik', 'le'] +['з', 'еÑĢ'] +['rz', 'ÄĻ'] +['×ŀ', '×ķר×Ķ'] +['Ġl', 'ạ'] +['×ŀ', '×Ļ׳×Ļ'] +['ר', '×Ļת'] +['ãģĿãĤĮ', 'ãģĮ'] +['ãģĭ', 'ãĤĮ'] +['ĠÙĬÙħÙĥÙĨ', 'Ùĥ'] +['öff', 'entlich'] +['г', 'ан'] +['ĠاÙĦØŃ', 'ÙĦ'] +['ĠmiÄĻd', 'zy'] +['ĠÑĩа', 'ÑģÑĤи'] +['ujÄħ', 'cy'] +['ĠbaÄŁ', 'lı'] +['ĠiliÅŁ', 'ki'] +['Ùģ', 'اء'] +['ãĥª', 'ãĥ³ãĤ°'] +['Ġhã', 'ng'] +['ĠконÑĤ', 'ÑĢ'] +['ĠконÑĤÑĢ', 'ол'] +['к', 'оп'] +['ש', '×Ļ×¢'] +['ש×Ļ×¢', '×ķר'] +['ĠÐĴ', 'аÑĪ'] +['Ġ×Ķ', 'תק'] +['ÙħÙĨ', 'ع'] +['ĠpolÃŃt', 'ico'] +['Ġг', 'олов'] +['ĠØ¥', 'ÙĬ'] +['Ø¥', 'ÙĨتاج'] +['à¸ļ', 'ิ'] +['Ġг', 'овоÑĢ'] +['ĠговоÑĢ', 'иÑĤ'] +['Ġph', 'á»ķ'] +['ĠÑģем', 'ÑĮ'] +['ãģ¯', 'ãģĤãĤĬãģ¾ãģĽãĤĵ'] +['ĠÙĪ', 'است'] +['×ŀש', 'פ×ĺ'] +['з', 'ем'] +['×ŀ×ĵ', '×ijר'] +['Ġíģ', '°'] +['ĠìĿ´', 'ë²Ī'] +['ê°Ģ', 'ëĬĶ'] +['Ġì§Ģ', 'ìĽIJ'] +['Ġca', 'ÅĤy'] +['Ġgeli', 'ÅŁtir'] +['Ñģк', 'ое'] +['pos', 'é'] +['Ġkh', 'ô'] +['à¸ķิà¸Ķ', 'à¸ķาม'] +['miss', 'ão'] +['Ġ׾', '×ŀר'] +['Ġ׾×ŀר', '×ķת'] +['Ġb', 'ó'] +['à¸ķรวà¸Ī', 'สà¸Ńà¸ļ'] +['Ġngh', 'á»ģ'] +['Ġб', 'из'] +['Ġбиз', 'неÑģ'] +['ÑģÑĤ', 'еÑĢ'] +['ÙĪ', 'Ùİ'] +['楽', 'ãģĹãģ'] +['楽ãģĹãģ', '¿'] +['ãģĵãĤĮ', 'ãģĭãĤī'] +['wiÄħ', 'zan'] +['ส', 'à¸Ńà¸Ļ'] +['Ùħ', 'ÙĪØ±'] +['׳×ĵ', '׾'] +['Ġ×Ķ×IJ', '×ĵ×Ŀ'] +['Ġм', 'олод'] +['ØŃ', 'Ùħا'] +['ØŃÙħا', 'ÙĬØ©'] +['ÑģÑĤ', 'ÑĢан'] +['Ġbu', 'á»ķi'] +['ת×Ļ', '×Ļ×Ŀ'] +['abile', 'ceÄŁi'] +['L', 'İ'] +['à¹Ģย', 'à¸Ńะ'] +['à¸Ī', 'ร'] +['س', 'ÙĥاÙĨ'] +['à¸Ļ', 'ัà¸Ķ'] +['Ġm', 'ấy'] +['ĠÐij', 'а'] +['s', 'ÅĤaw'] +['ĠÙģ', 'ÙĦا'] +['ĠкоÑĤоÑĢ', 'ой'] +['Ġпло', 'Ñī'] +['ĠплоÑī', 'ад'] +['ãĤĤ', 'ãģĤãĤĬ'] +['sz', 'czÄĻ'] +['×Ļפ', '×ķ'] +['ש×ŀ', 'ת'] +['owa', 'ÅĤa'] +['Ġn', 'ông'] +['צ×ij', '×IJ'] +['ĠìŀĪ', 'ìĹĪ'] +['ãģ¾', 'ãģ¨'] +['ãģ¾ãģ¨', 'ãĤģ'] +['ÙĤÙĪ', 'ات'] +['ãģ¿', 'ãĤĵãģª'] +['Ġ׼', '×ŀ×¢×ĺ'] +['Ġx', 'úc'] +['ï¼', 'Ĩ'] +['r', 'ÄĻ'] +['rÄĻ', 'cz'] +['×ĵ', '×ŀ×Ļ'] +['Ġt', 'áºŃn'] +['à¸Ķ', 'วà¸ĩ'] +['ê²½', 'ìłľ'] +['п', 'ÑĥÑĤ'] +['Ø£', 'ربع'] +['Ġ×ŀ', 'שת×ŀש'] +['ãĤ¿ãĤ¤', 'ãĥĹ'] +['Ġìłľ', 'ê°Ģ'] +['Ġ׾', '׼ף'] +['ĠобÑĢаз', 'ом'] +['ÙĬÙĥ', 'ا'] +['w', 'ÅĤ'] +['wÅĤ', 'asn'] +['ĠاÙĦÙĪØ·ÙĨ', 'ÙĬØ©'] +['بÙĬ', 'ب'] +['×ŀ', '׾×Ļ'] +['к', 'ÑĢаÑĤ'] +['기', 'ìĹIJ'] +['ÙĤ', 'اد'] +['ĠÙĦ', 'دÙī'] +['à¸Ħวาม', 'รูà¹ī'] +['×ŀ×ĵ×Ļ׳', '×Ļ×ķת'] +['ê²', '¨'] +['Ġíĺ', 'Ħìŀ¬'] +['ש', 'ת×Ļ'] +['м', 'ол'] +['Ġmá', 'i'] +['à¸ŀิ', 'ม'] +['à¸ŀิม', 'à¸ŀ'] +['à¸ŀิมà¸ŀ', 'à¹Į'] +['หล', 'วà¸ĩ'] +['Ġx', 'uyên'] +['×Ĺ', 'סר'] +['رÙĪ', 'ÙĨ'] +['ãģĿãģĨ', 'ãģĦãģĨ'] +['ãģĿãĤĮ', 'ãģŀ'] +['ãģĿãĤĮãģŀ', 'ãĤĮ'] +['Ġ׼', 'ש×Ķ'] +['ÐŁ', 'ÑĢав'] +['×ŀ×ij', 'צע'] +['ع', 'رب'] +['Ġbü', 'yü'] +['פ×Ļת', '×ķ×Ĺ'] +['à¸Ī', 'à¸ļ'] +['ĠØ£', 'Ùĥبر'] +['שר', 'ת'] +['×ŀ׼', 'ש×Ļר'] +['ĠÙĪ', 'Ùħع'] +['ãģ®', 'ãģŁãĤģãģ«'] +['à¸Ļ', 'ัà¸ļ'] +['ì°', '°'] +['ãĥª', 'ãĥķãĤ©'] +['ãĥªãĥķãĤ©', 'ãĥ¼ãĥł'] +['Ġc', 'ưá»Ŀng'] +['ĠìłĢ', 'íĿ¬'] +['ÙħÙĨظ', 'ÙħØ©'] +['Ġhiç', 'bir'] +['ãģ§ãģ¯', 'ãģĤãĤĬãģ¾ãģĽãĤĵ'] +['ร', 'à¸Ńย'] +['ëIJľ', 'ëĭ¤'] +['ãģĻãģIJ', 'ãģ«'] +['к', 'ла'] +['Ġürün', 'ler'] +['Ġki', 'á»ĥu'] +['ĠëĤĺ', 'ëĬĶ'] +['ÑĤ', 'ки'] +['Ñģ', 'им'] +['Ġchá»ī', 'nh'] +['ãĤĤ', 'ãģªãģĦ'] +['ศ', 'รี'] +['æĽ¿', 'ãģĪ'] +['ta', 'ÅŁ'] +['Ġب', 'ÙĥÙĦ'] +['Ġ×ķ', '×Ļש'] +['vis', 'ão'] +['ä¼', 'Ŀ'] +['ä¼Ŀ', 'ãģĪ'] +['ÙĦ', 'د'] +['׾', '×Ļ×ŀ'] +['׾×Ļ×ŀ', '×ķ×ĵ'] +['t', 'ória'] +['د', 'Ùij'] +['اÙħ', 'ر'] +['Ġê·¸ëłĩ', 'ê²Į'] +['Ġmateria', 'ÅĤ'] +['à¸Ĺ', 'รา'] +['à¸Ĺรา', 'à¸ļ'] +['ã쮿ĸ¹', 'ãģĮ'] +['ãģ¦', 'ãģįãģŁ'] +['ض', 'غ'] +['ضغ', 'Ø·'] +['ĠÙĬ', 'عÙĨÙĬ'] +['ел', 'о'] +['×IJ×Ķ', '×ij×Ķ'] +['×¢', '×ŀ'] +['ÅŁ', 'ık'] +['ìŀIJ', 'ëĬĶ'] +['ãĤ¿', 'ãĥ³'] +['Ġb', 'áºŃt'] +['×ŀשפ', '×Ĺ×Ķ'] +['к', 'ÑĢи'] +['б', 'ли'] +['สั', 'à¸ķ'] +['สัà¸ķ', 'วà¹Į'] +['ĠسÙĨ', 'ÙĪØ§Øª'] +['ĠPh', 'ương'] +['ãģ¦ãģĹãģ¾', 'ãģ£ãģŁ'] +['ãģª', 'ãģľ'] +['Ġ×ij×IJ', '×ķ'] +['Ġc', 'án'] +['س', 'جÙĦ'] +['Ġl', 'ẽ'] +['ãĤ±', 'ãĥ¼ãĤ¹'] +['Ġ×§', '×Ļ×ij׾'] +['à¸ļà¸Ĺ', 'à¸Ħวาม'] +['Ġ×ķ', '׼ף'] +['ĠпÑĢедÑģÑĤав', 'лен'] +['Ġn', 'á»iji'] +['Ġcoment', 'ário'] +['ени', 'ем'] +['Ġtá»', 'ı'] +['l', 'Ãł'] +['Ġש×Ķ', '×Ļ×Ķ'] +['Ñģл', 'ав'] +['ĠاÙĦ', 'ÙĪÙĦا'] +['ĠاÙĦÙĪÙĦا', 'ÙĬات'] +['ÙĦج', 'ÙĨØ©'] +['×§×ķר', '×IJ'] +['бÑĭ', 'ÑĤ'] +['Ġì', '¦'] +['Ġì¦', 'ī'] +['ãģ§ãģĻ', 'ãģĹ'] +['หรืà¸Ń', 'à¹Ħมà¹Ī'] +['за', 'ÑīиÑĤ'] +['ÙģÙĦ', 'سطÙĬÙĨ'] +['Ġmi', 'á»ħn'] +['à¹Ģย', 'à¹ĩà¸Ļ'] +['ĠçalÄ±ÅŁ', 'an'] +['×Ļ×Ĵ', '×Ķ'] +['ĠE', 'ÄŁ'] +['ĠEÄŁ', 'itim'] +['ãĥĥãĤ·', 'ãĥ¥'] +['Ġоп', 'Ñĭ'] +['ĠопÑĭ', 'ÑĤ'] +['ر', 'غ'] +['رغ', 'ب'] +['ĠÑģво', 'иÑħ'] +['à¸Ľà¸£à¸°', 'à¸ķ'] +['à¸Ľà¸£à¸°à¸ķ', 'ู'] +['Ġ×ŀ×IJ', '×ĵ'] +['׼', '×ķ׳×Ļ×Ŀ'] +['à¸Ļ', 'ี'] +['ĠвÑĭ', 'Ñħод'] +['ãģ®ä¸Ń', 'ãģ«'] +['פ', '׾×IJ'] +['ĠÙĪ', 'ÙĦÙĬس'] +['פ×ķר', 'ס'] +['פ×ķרס', '×Ŀ'] +['Ùħ', 'سÙĦÙħ'] +['Ġng', 'ôi'] +['×ĵ', '×ŀ×ķת'] +['ãĤĴ使', 'ãģ£ãģ¦'] +['ĠпомоÑī', 'ÑĮÑİ'] +['Ø£', 'سر'] +['бл', 'ок'] +['ÙĤ', 'Ùĩ'] +['ãģĹãģ¾', 'ãģĦ'] +['ãģ¨', 'ãģĹãģŁ'] +['Ġп', 'еÑģ'] +['ãĥī', 'ãĥ«'] +['×Ĺ', '×Ŀ'] +['ãģĹãģª', 'ãģĮãĤī'] +['ĠÐŁ', 'ÑĢед'] +['ãĥģãĤ§', 'ãĥĥãĤ¯'] +['å¼·', 'ãģĦ'] +['ש', '×Ļר×ķת'] +['д', 'аеÑĤ'] +['×Ļ×ij', '×ķ'] +['Ġgen', 'ç'] +['ил', 'аÑģ'] +['илаÑģ', 'ÑĮ'] +['ĠبÙĦ', 'د'] +['æĤ', 'ª'] +['æĤª', 'ãģĦ'] +['Ġ×ŀ', 'שת'] +['æ§ĺ', 'ãĢħ'] +['æ§ĺãĢħ', 'ãģª'] +['à¸ĺรรม', 'à¸Ĭาà¸ķิ'] +['ĠÙĥ', 'اÙħÙĦ'] +['ĠاÙĦس', 'Ùħ'] +['×ij×ĺ', '×Ļ×Ĺ'] +['c', 'á'] +['g', 'ência'] +['ãĤ¹ãĤ¿', 'ãĥ¼'] +['à¸Ĺำ', 'à¸ģาร'] +['×Ļ׾', 'ת'] +['Ġ×Ļ', '×ķצ×IJ'] +['w', 'ój'] +['à¸ļุ', 'à¸Ħ'] +['à¸ļุà¸Ħ', 'à¸Ħล'] +['ع', 'تÙħ'] +['عتÙħ', 'د'] +['ãģĿãĤĮ', 'ãģ«'] +['ĠاÙĦت', 'ارÙĬØ®'] +['ÙĤر', 'اء'] +['Ġyönet', 'im'] +['×§', 'שר'] +['ĠÑģп', 'оÑĢÑĤ'] +['Ġר×IJש', '×ķף'] +['Ġseñ', 'al'] +['Ġch', 'ắn'] +['çĦ¡', 'ãģĦ'] +['ĠдоÑģÑĤ', 'аÑĤ'] +['ĠдоÑģÑĤаÑĤ', 'оÑĩно'] +['Ġá', 'gua'] +['à¸ģร', 'à¸ĵ'] +['à¸ģรà¸ĵ', 'ี'] +['Ġ×ŀש', '×ķ'] +['Ġtr', 'ải'] +['ë²', 'Į'] +['ujÄħ', 'cych'] +['Ù쨱', 'د'] +['à¹ĥ', 'à¸ģล'] +['à¹ĥà¸ģล', 'à¹ī'] +['ãĤĭ', 'ãģ®ãģ¯'] +['ר×ķ', '×ķ×Ĺ'] +['ÙĨ', 'Ùĥ'] +['ĠاÙĦÙĨ', 'ÙĤ'] +['ãģ®ãģ§', 'ãģĹãĤĩãģĨ'] +['ãģ®ãģ§ãģĹãĤĩãģĨ', 'ãģĭ'] +['Ùħ', 'عرÙģ'] +['ÙħعرÙģ', 'Ø©'] +['ÑĥÑī', 'е'] +['Ġ×ij×¢', '×Ļקר'] +['ت', 'صÙĦ'] +['Ġ×Ķ×IJ', 'ר'] +['Ġ×Ķ×IJר', '×¥'] +['ĠÅŀ', 'i'] +['à¸Ĥา', 'à¸Ķ'] +['íŀ', 'ĺ'] +['ãģªãĤĵ', 'ãģ¨'] +['ĠìĤ¬ëŀ', 'ij'] +['l', 'Ã¼ÄŁÃ¼'] +['ب', 'اء'] +['ĠاÙĦØ¢', 'خر'] +['Ġfam', 'ÃŃlia'] +['ĠTh', 'áng'] +['Ñī', 'ениÑı'] +['ãĤ¯', 'ãĥŃ'] +['ĠTh', 'ứ'] +['æĽ¸', 'ãģį'] +['ен', 'ной'] +['ìŀ', '¡'] +['бл', 'аг'] +['благ', 'о'] +['п', 'ов'] +['à¹ģ', 'ว'] +['à¸ĩ', 'à¸Ħà¹Į'] +['à¸Ńัà¸Ļ', 'à¸Ķัà¸ļ'] +['ãģĤ', 'ãģĴ'] +['ร', 'à¹īาย'] +['ün', 'ün'] +['Ġ×Ļ׼×ķ׾', '×Ķ'] +['з', 'он'] +['ĠÐľ', 'и'] +['маÑĤ', 'еÑĢиал'] +['Ġë³´', 'ë©´'] +['ØŃÙģ', 'ظ'] +['ê', 'Ìģ'] +['ãģ«', 'ãģĻãĤĭ'] +['Ġת', '×IJ'] +['Ġ×Ķס', '×ķ'] +['ĠÑģÑĤ', 'оÑĢ'] +['ĠÑģÑĤоÑĢ', 'он'] +['ãĥĪ', 'ãĥĥãĥĹ'] +['ÅĤo', 'ÅĽÄĩ'] +['ëħ', '¼'] +['ëĵ', 'Ŀ'] +['ĠÙĪØ§ÙĦ', 'ع'] +['ì¶', 'Ķ'] +['Ġ×Ļצ', '×IJ'] +['ĠÑĢаз', 'дел'] +['алÑĮ', 'наÑı'] +['×IJ׳', 'ש×Ļ'] +['spo', 'ÅĤ'] +['spoÅĤ', 'ec'] +['spoÅĤec', 'zn'] +['Ø¥', 'عÙĦ'] +['إعÙĦ', 'اÙĨ'] +['ÙĤÙĪ', 'Ùī'] +['íķĺë©´', 'ìĦľ'] +['تط', 'ÙĪØ±'] +['Ġsi', 'êu'] +['Ỽ', 't'] +['д', 'ви'] +['дви', 'ж'] +['Ġqu', 'ần'] +['k', 'ıl'] +['ĠпÑĢи', 'зна'] +['ĠH', 'ã'] +['ĠHã', 'y'] +['ĠباÙĦ', 'ت'] +['man', 'ın'] +['ãĤ«', 'ãĥ«'] +['Ġk', 'á»·'] +['×§', '׾×Ļ'] +['ëIJĺ', 'ì§Ģ'] +['تعÙĦ', 'Ùħ'] +['ìĭľ', 'ìĦ¤'] +['ìĭ', '¶'] +['íĺ', '¼'] +['Ùĥ', 'ÙĬÙģ'] +['売', 'ãĤĬ'] +['วิ', 'à¸Ĭา'] +['б', 'ал'] +['ĠØ£', 'ØŃ'] +['Ġдолж', 'ен'] +['รา', 'à¸ĩ'] +['ราà¸ĩ', 'วั'] +['ราà¸ĩวั', 'ล'] +['Ùħ', 'اء'] +['ج', 'ار'] +['Å', 'ļ'] +['Ġ×ŀ×IJ', '×ĸ'] +['ר', '×ŀ×Ķ'] +['ãģĭãĤĤãģĹãĤĮ', 'ãģªãģĦ'] +['ét', 'ude'] +['czÄħ', 'c'] +['Ġg', 'ór'] +['×ł×¡', '×Ķ'] +['Ùħ', 'ÙĬد'] +['ĠÐŁ', 'еÑĢе'] +['Ø£', 'خر'] +['ãģĿãģ®', 'å¾Į'] +['à¹Ģà¸Ķียว', 'à¸ģัà¸Ļ'] +['×ŀ', '×Ĵ×ķ'] +['×ŀ×Ĵ×ķ', '×ķף'] +['д', 'ов'] +['mas', 'ına'] +['×¢', '׳×Ķ'] +['ãĤ±', 'ãĥĥãĥĪ'] +['ס', '×¢'] +['סע', '×Ļ×£'] +['ĠT', 'ư'] +['Ġt', 'óc'] +['íĻľ', 'ëıĻ'] +['ĠÐŀ', 'д'] +['ĠÐŀд', 'нако'] +['Ġdol', 'ayı'] +['ؤ', 'Ùĥد'] +['ê³Ħ', 'íļį'] +['׾', 'ר'] +['в', 'еÑĩ'] +['Ġkh', 'ợi'] +['Ġth', 'á»§y'] +['×ĵ', 'ף'] +['ร', 'à¸ģ'] +['à¸ļั', 'à¸ķร'] +['à¹Ģà¸ģ', 'à¹Īา'] +['ĠاÙĦØ«', 'اÙĦ'] +['ĠاÙĦثاÙĦ', 'Ø«'] +['Ġpod', 'rá'] +['ער', '×Ļ'] +['ÙĨج', 'اØŃ'] +['Ġkh', 'ắc'] +['ì¸', '¡'] +['İ', 'M'] +['ãĤ»', 'ãĥĥãĥĪ'] +['ż', 'enia'] +['Ġ׾×Ĺ', '×ijר'] +['er', 'Ãł'] +['ì', '´Ī'] +['Ġkü', 'ç'] +['Ġküç', 'ük'] +['ات', 'ÙĩÙħ'] +['à¸ĭ', 'à¹Į'] +['Ùħشار', 'ÙĥØ©'] +['ĠاÙĦ', 'بط'] +['Ġd', 'ây'] +['ен', 'нÑĭм'] +['à¸Ĺีà¹Ī', 'à¹Ħมà¹Ī'] +['ÙĤ', 'Ùİ'] +['Ġv', 'ượt'] +['Ġtr', 'ì'] +['Ġwp', 'ÅĤyw'] +['A', 'Åŀ'] +['з', 'о'] +['ĠاÙĦس', 'ÙĬد'] +['à¸Ĺะ', 'à¹Ģล'] +['ĠÑģодеÑĢж', 'а'] +['ع', 'Ø·ÙĬ'] +['ĠاÙĦع', 'ÙĨ'] +['èĢħ', 'ãģĮ'] +['à¹Ģ', 'หà¸Ļ'] +['à¹Ģหà¸Ļ', 'ืà¸Ń'] +['Ġb', 'ÃŃ'] +['Ġüzer', 'inden'] +['ĠV', 'Å©'] +['Ġnu', 'ôi'] +['ÙĨ', 'Ùħ'] +['алÑĮ', 'ного'] +['×¢', '×Ļף'] +['ØŃ', 'ضر'] +['ĠоÑĤ', 'дел'] +['ëª', 'ĩ'] +['ìķ', '¡'] +['ĠÙĦدÙĬ', 'Ùĩ'] +['ìĻ', 'ľ'] +['Ġse', 'ktör'] +['Ġвозмож', 'но'] +['ĠÐĶ', 'ж'] +['Ġh', 'ô'] +['äºĭ', 'ãģĮ'] +['иÑĢов', 'ание'] +['алÑĮ', 'ной'] +['Ġ미', 'êµŃ'] +['ر', 'ØŃÙĦ'] +['ĠÑįк', 'Ñģ'] +['пÑĢав', 'лÑı'] +['Ġnh', 'á»Ŀ'] +['ĠÄij', 'ẩ'] +['ĠÄijẩ', 'y'] +['Ùģ', 'Ùĥر'] +['ĠÙĪØ£', 'ضاÙģ'] +['ãĥIJ', 'ãĤ¹'] +['ת×ķ׼', '׳×Ļת'] +['ÑĤел', 'ей'] +['ĠØ¥ÙĦÙĬ', 'Ùĩ'] +['ãģ¨è¨Ģ', 'ãģ£ãģ¦'] +['Ġдв', 'е'] +['Ġch', 'ấp'] +['ĠL', 'ö'] +['à¸Ħล', 'ิ'] +['à¸Ħลิ', 'à¸Ľ'] +['Ġس', 'ÙĪØ±'] +['ĠسÙĪØ±', 'ÙĬا'] +['×ŀ×Ĺ', '×ķ'] +['st', 'ä'] +['д', 'об'] +['Ġni', 'á»ĩm'] +['ãģ®', '大'] +['פר×ķ', '×Ļ×§'] +['פר×ķ×Ļ×§', '×ĺ'] +['ĠCh', 'âu'] +['Ġ×ŀ×Ķ', '×Ŀ'] +['Ñģк', 'им'] +['ĠполÑĥÑĩ', 'иÑĤÑĮ'] +['ÙĬ', 'ÙĪÙħ'] +['Ø«', 'ÙĪØ±'] +['פ×ķ׾', '×Ļ×ĺ'] +['פ×ķ׾×Ļ×ĺ', '×Ļ'] +['ĠмеÑģÑı', 'ÑĨ'] +['åħ¨', 'ãģ¦'] +['ĠاÙĦÙħ', 'جÙĦس'] +['ĠاÙĦت', 'اÙĦÙĬ'] +['Ġ×Ĺ', 'ר'] +['åIJij', 'ãģij'] +['׼', '×ŀ×Ķ'] +['б', 'ед'] +['Ø£', 'عض'] +['أعض', 'اء'] +['ÙĪÙĦ', 'د'] +['วà¹Īา', 'à¸Īะ'] +['Ġb', 'ánh'] +['à¸Ļิ', 'ย'] +['à¸Ļิย', 'ม'] +['à¸Ľà¸£à¸°', 'à¸ģัà¸Ļ'] +['ÑģÑĤав', 'иÑĤÑĮ'] +['à¸ŀ', 'à¸Ļัà¸Ļ'] +['ĠÑį', 'ÑĦÑĦ'] +['ĠÑįÑĦÑĦ', 'екÑĤив'] +['Ġав', 'ÑĤоÑĢ'] +['ĠÄIJ', 'Äĥng'] +['Ġth', 'Æ°á»Łng'] +['ãĤĴ', 'æĦŁãģĺ'] +['à¸ģัà¸ļ', 'à¸ģาร'] +['å¾Į', 'ãģ«'] +['Ġya', 'ÄŁ'] +['ست', 'اÙĨ'] +['Ġli', 'á»ģn'] +['ãģĦ', 'ãģ¾'] +['i', 'êu'] +['à¹Ĥà¸Ķ', 'à¸Ļ'] +['ĠÙĦ', 'ذÙĦÙĥ'] +['à¹Ĥรà¸ĩ', 'à¹Ģรียà¸Ļ'] +['צ', '×Ļ×Ĵ'] +['ĠاÙĦÙħ', 'عÙĦÙĪÙħات'] +['ç§ģ', 'ãģŁãģ¡'] +['à¸Ĺีà¹Ī', 'à¸Ħุà¸ĵ'] +['ãģ«ãģª', 'ãģ£ãģ¦ãģĦãĤĭ'] +['×ŀ×ĵ', '×Ļ׳×Ķ'] +['ס', '׼×Ŀ'] +['Ġв', 'не'] +['à¸ŀ', 'à¸Ļัà¸ģà¸ĩาà¸Ļ'] +['ÑĢ', 'ей'] +['à¹Ģà¸Īà¹īา', 'หà¸Ļà¹īาà¸Ĺีà¹Ī'] +['ĠHi', 'á»ĩn'] +['Ġméd', 'ico'] +['ĠتØŃ', 'ÙĤÙĬÙĤ'] +['ÑĮ', 'ÑĤе'] +['miÅŁ', 'ti'] +['ÙĤÙĬ', 'ادة'] +['ãĤı', 'ãģĭãĤĬ'] +['มา', 'à¸Īาà¸ģ'] +['ëħ', 'Ģ'] +['ãģ«éĸ¢', 'ãģĻãĤĭ'] +['×IJר×Ĵ', '×ķף'] +['m', 'ètre'] +['Ġעצ', '×ŀ×Ļ'] +['ĠCh', 'úa'] +['รูà¹ī', 'à¸Ī'] +['รูà¹īà¸Ī', 'ัà¸ģ'] +['ì£', 'Ħ'] +['ëĭ', 'µ'] +['à¹ģà¸Ĺ', 'à¹ī'] +['Ġgeç', 'en'] +['Ġlan', 'ça'] +['ĠاÙĦ', 'بØŃØ«'] +['×ĵ', '×ŀ×ķ'] +['ãģ¯', 'ãģĺ'] +['ãģ¯ãģĺ', 'ãĤģ'] +['Ġdön', 'Ã¼ÅŁ'] +['è¿ij', 'ãģı'] +['à¹Ģส', 'ม'] +['à¹Ģสม', 'à¸Ń'] +['ëĿ', '½'] +['Ġü', 'ç'] +['á»', 'ŀ'] +['ÑĪ', 'аÑı'] +['à¸Ĺ', 'ร'] +['ØŃ', 'ÙĤÙĬÙĤØ©'] +['à¸Ĥà¸Ńà¸ĩ', 'à¸ģาร'] +['Ġ무', 'ìĹĩ'] +['Ġ×Ķ', '׼ר'] +['ĠاÙĦص', 'ÙĬÙĨ'] +['ĠлÑİ', 'ди'] +['à¸ķ', 'าย'] +['ب', 'ÙĪÙĦ'] +['Ġvi', 'êm'] +['Ġthi', 'á»ĩu'] +['à¸ģ', 'à¸Ķ'] +['Ġ׾', '×ĵ×ijר'] +['פ', '׳×Ķ'] +['×IJר', '×ij×¢'] +['س', 'Ùī'] +['ĠاÙĦسÙĬ', 'اس'] +['ĠاÙĦسÙĬاس', 'ÙĬØ©'] +['yd', 'ı'] +['ÙĪØŃØ¯', 'Ø©'] +['ĠдеÑıÑĤелÑĮ', 'ноÑģÑĤи'] +['Ġ×ķ×Ķ', '×ŀ'] +['п', 'еÑĩ'] +['пеÑĩ', 'аÑĤ'] +['иÑĢов', 'аниÑı'] +['ĠÑģ', 'ог'] +['ĠÑģог', 'лаÑģ'] +['Ġ׼', '×ĵ'] +['Ġ׼×ĵ', '×IJ×Ļ'] +['ĠиÑģполÑĮзов', 'аÑĤÑĮ'] +['ס', 'פ×ķר×ĺ'] +['Ġil', 'çe'] +['exp', 'érience'] +['ĠTh', 'á»Ŀi'] +['İ', 'K'] +['à¹Ħà¸Ł', 'à¸Łà¹īา'] +['ëĵ¤', 'ìĹIJê²Į'] +['à¸Ľà¸£à¸°', 'à¹Ģà¸ł'] +['à¸Ľà¸£à¸°à¹Ģà¸ł', 'à¸Ĺ'] +['Ġmü', 'mk'] +['Ġmümk', 'ün'] +['Ġ×IJ×ķת', '׳×ķ'] +['ìĦ±', 'ìĿĦ'] +['ĠìĿ´', 'ìľł'] +['زÙĬ', 'ارة'] +['Ġolduk', 'ça'] +['r', 'ób'] +['ĠØ£', 'ÙĨا'] +['Ġ×Ķ', '×ij×Ļ'] +['Ñģ', 'ен'] +['×¢', '×Ļקר'] +['×Ļ×ĵ', '×ķ×¢'] +['d', 'zÄħ'] +['Ùħ', 'عÙĦÙĪÙħات'] +['Ø´', 'اب'] +['Ġpar', 'ça'] +['à¸Ļะ', 'à¸Ħะ'] +['ب', 'اس'] +['ĠÑĤоÑĢ', 'г'] +['ĠÑĤоÑĢг', 'ов'] +['Ġ×Ĺ', '×ĵר'] +['׼', 'ר×ĺ'] +['׼ר×ĺ', '×Ļס'] +['ĠA', 'yrıca'] +['êÌ', '£'] +['ìľ', '¨'] +['ĠÑĤак', 'ие'] +['Ġ×ŀצ', '×ķ×Ļ'] +['ãĥ©ãĥ³', 'ãĤŃãĥ³ãĤ°'] +['ש×Ļ×ķ', '×ķ×§'] +['åīį', 'ãģ®'] +['ĠB', 'ảo'] +['Ñī', 'Ñĥ'] +['æĹ©', 'ãģı'] +['ĠPh', 'òng'] +['à¸ŀระ', 'ราà¸Ĭ'] +['פ', '×Ĺ×ķת'] +['Ġг', 'л'] +['Ġгл', 'аз'] +['à¸Ĺ', 'à¹Īา'] +['Ġd', 'ạy'] +['ÑĢ', 'оÑģÑĤ'] +['à¹Ĥà¸Ķย', 'à¹Ģà¸īà¸ŀาะ'] +['Ġqu', 'áºŃn'] +['Ġ×Ĺ×ijר', '×ķת'] +['m', 'ême'] +['mÄ±ÅŁ', 'tı'] +['ĠاÙĦت', 'داÙĪÙĦ'] +['Ġn', 'ạn'] +['Ġ×Ķ', '×ĵ×Ļ'] +['ĠاÙĦØ·', 'رÙĬÙĤ'] +['×Ĵ', '×ķת'] +['Ġ×Ķ', '×ĵר×ļ'] +['ujÄħ', 'ce'] +['Ġch', 'ữ'] +['ãĤĤãģ®', 'ãģ®'] +['ë°', 'Ľ'] +['ãģķãĤĵ', 'ãģ¯'] +['Ġyard', 'ım'] +['ĠاÙĦع', 'Ùħ'] +['Ġì§Ħ', 'íĸī'] +['Ġ×Ļ', '×Ĺ'] +['Ġ×Ļ×Ĺ', 'ס×Ļ'] +['ĠاÙĦÙħ', 'دÙĬÙĨØ©'] +['Ġc', 'ú'] +['à¸ģี', 'ฬ'] +['à¸ģีฬ', 'า'] +['Ġni', 'ên'] +['mis', 'ión'] +['׳×Ļס', '×Ļ'] +['׳×Ļס×Ļ', '×ķף'] +['Ġвоз', 'ÑĢаÑģÑĤ'] +['Ġ×¢×ķש', '×Ķ'] +['ĠÙħ', 'دÙĬر'] +['Ñı', 'ÑģÑĮ'] +['ØŃ', 'جÙħ'] +['íĻĺ', 'ê²½'] +['ĠاÙĦØ£', 'خرÙī'] +['u', 'ÃŁer'] +['ĠاÙĦعاÙĦÙħ', 'ÙĬØ©'] +['ĠNg', 'á»įc'] +['êµIJ', 'íļĮ'] +['ä¸Ĭ', 'ãģ§'] +['×Ļ×Ķ', '×ķ×ĵ'] +['×Ļ×Ķ×ķ×ĵ', '×Ļ×Ŀ'] +['Ùħس', 'اعدة'] +['Ġжиз', 'нÑĮ'] +['ĠпоÑĤ', 'омÑĥ'] +['ĠاÙĦÙħ', 'ÙħÙĦ'] +['ĠاÙĦÙħÙħÙĦ', 'ÙĥØ©'] +['ĠG', 'ör'] +['ر', 'ÙIJ'] +['×ŀ×§', '×ķ×ŀ×ķת'] +['åĩºæĿ¥', 'ãĤĭ'] +['ÑĦ', 'ÑĤ'] +['ĠìĿ´', 'ìłľ'] +['ĠÑĢ', 'ем'] +['ĠÑĢем', 'онÑĤ'] +['ת', '×ķ×ļ'] +['æĻĤ', 'ãģ¯'] +['ãĤīãĤĮ', 'ãģªãģĦ'] +['alt', 'ı'] +['å®¶', 'ãģ®'] +['ĠاÙĦØ¥', 'عÙĦاÙħ'] +['리', 'ëĬĶ'] +['ãģĭãĤī', 'ãģ¯'] +['ĠH', 'ạ'] +['ãģĤ', 'ãģ®'] +['×ĵ×Ļ', '×ķף'] +['رÙĬ', 'س'] +['Ġsoci', 'etÃł'] +['ĠاÙĦÙĥ', 'بÙĬر'] +['Ġ×ij', '×ŀס'] +['Ġ×ij×ŀס', '×Ĵר'] +['Ġ×ij×ŀס×Ĵר', 'ת'] +['ĠìŀĪ', 'ìľ¼ë©°'] +['Ġn', 'ặng'] +['Ùĩ', 'Ùī'] +['ĠB', 'Ãł'] +['×ŀר', '×ķ'] +['Ġj', 'ÄĻ'] +['ĠjÄĻ', 'zy'] +['ĠjÄĻzy', 'k'] +['Ġ׼', '×ŀ×ķ×ijף'] +['×¢', '׾×Ķ'] +['à¸Ĺีà¹Ī', 'à¹Ħà¸Ķà¹ī'] +['ãģ¾', 'ãģĹãĤĩãģĨ'] +['×ŀס', 'פר'] +['Т', 'Ðŀ'] +['سÙĬاس', 'Ø©'] +['Ġкажд', 'Ñĭй'] +['ë²', 'ł'] +['t', 'ım'] +['y', 'á»ĩn'] +['ร', 'ีà¹Ī'] +['ĠдеÑĤ', 'Ñģк'] +['วิà¸ĺี', 'à¸ģาร'] +['m', 'ówi'] +['×ĺ×¢', '×Ŀ'] +['×Ķצ׾', '×Ĺ×Ķ'] +['ض', 'ÙĬÙģ'] +['ĠÑħоÑĤ', 'Ñı'] +['ãĤĵãģ§', 'ãģĦãĤĭ'] +['à¸Ħา', 'à¸Ķ'] +['à¸Ħร', 'à¸ļ'] +['Ġк', 'ÑĥÑĢÑģ'] +['ĠbaÅŁ', 'arı'] +['×ijר', '×ķ'] +['ÙĬع', 'Ø©'] +['ĠÐĿ', 'Ñĥ'] +['à¸Ħวาม', 'à¹Ģà¸Ľà¹ĩà¸Ļ'] +['Ġ׾', '×ŀש׾'] +['Ġì¢ĭ', 'ìĿĢ'] +['Ùħؤس', 'س'] +['Ùħؤسس', 'ات'] +['Ġpréc', 'is'] +['Ġth', 'ảo'] +['à¸ģà¹ĩ', 'à¸Ħืà¸Ń'] +['Ġש', '׼׾'] +['führ', 'ung'] +['ãģĦ', 'ãģ§'] +['à¹ģละ', 'มี'] +['à¸ģà¹ĩ', 'มี'] +['Ġש', 'ש'] +['м', 'ел'] +['Ġкни', 'г'] +['ĠباÙĦ', 'ÙĨ'] +['ĠباÙĦÙĨ', 'سبة'] +['Ġald', 'ı'] +['ÑĤ', 'ай'] +['Ġ×Ĺ×ĵ', 'ש×Ļ×Ŀ'] +['å®Ł', 'ãģ¯'] +['ع', 'ÙĪØ§'] +['ĠìĿĺ', '미'] +['из', 'м'] +['ÑĢабоÑĤ', 'аÑĤÑĮ'] +['Ùģ', 'ص'] +['Ġ×ij׳', '×ķסף'] +['ãģ¨ãģĹãģ¦', 'ãĤĤ'] +['à¹Ģà¸Ľà¹ĩà¸Ļ', 'à¸Ĺีà¹Ī'] +['ĠÑģлед', 'ÑĥеÑĤ'] +['èĢĥãģĪ', 'ãģ¦'] +['Ġ׼', '×Ļ×ķ×Ŀ'] +['ÑģÑĤ', 'Ñĭ'] +['׼׾׼', '׾×Ļ'] +['æµģ', 'ãĤĮ'] +['ãĤĴ', 'ãģ¤ãģij'] +['Ñĩ', 'аÑĤ'] +['×Ļ׼', '×ķף'] +['×Ļר', '×Ļ'] +['ları', 'yla'] +['ãĤ¤', 'ãĥ¡'] +['ãĤ¤ãĥ¡', 'ãĥ¼ãĤ¸'] +['׳×ĸ', '×§'] +['Ġci', 'ò'] +['Ġs', 'ın'] +['Ġsın', 'ır'] +['à¸Ļ', 'à¸Ħร'] +['к', 'аÑĤ'] +['Ġl', 'á»Ĺi'] +['ëŀ', 'Į'] +['تÙģ', 'اص'] +['تÙģØ§Øµ', 'ÙĬÙĦ'] +['ëĨ', 'ĵ'] +['ĠÙħ', 'ض'] +['il', 'miÅŁ'] +['بار', 'Ùĥ'] +['ÐĿ', 'Ðĺ'] +['Ġth', 'ẩm'] +['Ġ×IJ×ķת', '×ļ'] +['ĠпÑĢин', 'им'] +['ĠпÑĢиним', 'а'] +['Ġyö', 'nt'] +['Ġyönt', 'em'] +['Ġ×ŀ×§', '×ij׾'] +['Ġktó', 'rego'] +['ê·', 'Ģ'] +['شر', 'Ùģ'] +['د', 'اÙħ'] +['ãģĦãĤį', 'ãģĦãĤį'] +['ĠAl', 'ém'] +['Ġgör', 'ü'] +['Ġgörü', 'nt'] +['Ġgörünt', 'ü'] +['د', 'س'] +['ÑĪ', 'ки'] +['г', 'ÑĢад'] +['Ġl', 'ạc'] +['Ġs', 'ữa'] +['ãĤīãĤĮ', 'ãģ¾ãģĻ'] +['o', 'Ãłi'] +['Ñī', 'ен'] +['ãģĭ', 'ãģªãģĦ'] +['Ġп', 'оп'] +['Ġпоп', 'Ñĥ'] +['ĠпопÑĥ', 'лÑıÑĢ'] +['ĠاÙĦÙħ', 'ÙĪÙĤع'] +['rä', 'g'] +['ï¼', '¡'] +['íķ', 'Ħ'] +['ãĤĴè¦ĭ', 'ãĤĭ'] +['اÙħ', 'ا'] +['ĠاÙĦØŃ', 'رب'] +['ĠÐŁ', 'а'] +['Ġ׾', '×IJתר'] +['Ġt', 'á»ijc'] +['×ij', '׾×Ķ'] +['ر', 'ئÙĬس'] +['в', 'Ñĥ'] +['ÙĬ', 'دÙĬ'] +['каз', 'ан'] +['Ġ×Ĺ', 'ש×ij×ķף'] +['h', 'ôtel'] +['×¢', '×ķ׳×Ķ'] +['ب', 'ÙĨÙĬ'] +['×ŀ', '×ķ׾'] +['Ġд', 'нÑı'] +['éĽ£', 'ãģĹãģĦ'] +['вед', 'ениÑı'] +['Ġ×ķ', '×ŀת'] +['н', 'апÑĢимеÑĢ'] +['ÙĤ', 'ابÙĦ'] +['Ġrésult', 'at'] +['ĠÑĢазвиÑĤ', 'иÑı'] +['ر', 'Ùij'] +['ìłĦ', '문'] +['ĠاÙĦÙħ', 'زÙĬد'] +['ĠìľĦ', 'íķ´ìĦľ'] +['ëĨ', 'į'] +['íĻ', 'ķ'] +['ĠThi', 'ết'] +['íĮ', '¨'] +['malı', 'dır'] +['Ġcz', 'ÅĤ'] +['ĠczÅĤ', 'owie'] +['ĠczÅĤowie', 'k'] +['ĠÙĦ', 'بÙĨ'] +['ĠÙĦبÙĨ', 'اÙĨ'] +['üs', 'ü'] +['ãģªãĤĵ', 'ãģł'] +['Ġżyc', 'ie'] +['ĠÑħоÑĢоÑĪ', 'о'] +['æĸ¹', 'ãģ«'] +['ëĭ¤', 'ë©´'] +['иÑĩеÑģ', 'каÑı'] +['ער', '×Ļ׼'] +['ער×Ļ׼', 'ת'] +['ãģ¾ãģĽãĤĵ', 'ãģ§ãģĹãģŁ'] +['ĠÑģоб', 'ой'] +['Ġg', 'á»Ĺ'] +['Ġдел', 'аÑĤÑĮ'] +['da', 'Äĩ'] +['аÑĢ', 'а'] +['róż', 'ni'] +['à¹Ģล', 'ีà¹ī'] +['à¹Ģลีà¹ī', 'ย'] +['à¹Ģลีà¹īย', 'à¸ĩ'] +['à¸Ŀ', 'าà¸ģ'] +['Ġت', 'ÙĤ'] +['ĠتÙĤ', 'دÙĬ'] +['ĠتÙĤدÙĬ', 'Ùħ'] +['หà¸Ļ', 'ุà¹Īม'] +['Ġmü', 'cade'] +['Ġmücade', 'le'] +['ì§Ģ', '를'] +['ãĤ¤', 'ãĤ¹'] +['ĠØ£', 'ساس'] +['jÄħce', 'go'] +['ĠÅŁ', 'eh'] +['н', 'ÑĤеÑĢ'] +['ÑĨи', 'Ñİ'] +['ï»', '»'] +['ÑİÑī', 'его'] +['à¹Ĥà¸Ľà¸£', 'à¹ģ'] +['à¹Ĥà¸Ľà¸£à¹ģ', 'à¸ģรม'] +['Ġmie', 'Äĩ'] +['ØŃÙĥÙĪÙħ', 'Ø©'] +['ãģ§ãģĹãģŁ', 'ãģĮ'] +['×Ļס', '×Ķ'] +['ãĤĤãģ®', 'ãĤĴ'] +['Ġ×ŀ', '×IJת'] +['สุà¸Ķ', 'à¸Ĺà¹īาย'] +['Ġc', 'Å©'] +['ÙĨ', 'سب'] +['ĠпÑĢ', 'оÑĩ'] +['Ġд', 'ней'] +['ĠÑįÑĤи', 'Ñħ'] +['׾', '×ŀת'] +['нÑı', 'Ñı'] +['Ñį', 'к'] +['Ġì§Ģ', 'ëĤľ'] +['มหา', 'วิà¸Ĺยา'] +['มหาวิà¸Ĺยา', 'ล'] +['มหาวิà¸Ĺยาล', 'ัย'] +['d', 'ão'] +['ĠMá', 'y'] +['ĠêµŃ', 'ê°Ģ'] +['à¸ļุ', 'รี'] +['×Ĵ', '×Ļ׾'] +['ĠÑĤÑĭ', 'ÑģÑı'] +['ĠÑĤÑĭÑģÑı', 'Ñĩ'] +['Ùģ', 'Ùĥ'] +['ĠÐĺ', 'Ñģ'] +['è¡Į', 'ãĤıãĤĮ'] +['פר', '×ĵ'] +['ãģ¤', 'ãģį'] +['à¸Ħร', 'à¸Ńà¸ļ'] +['à¸Ħรà¸Ńà¸ļ', 'à¸Ħรัว'] +['à¸Ĥึà¹īà¸Ļ', 'มา'] +['ä»ĬæĹ¥', 'ãģ¯'] +['ĠìĤ¬ëŀĮ', 'ìĿ´'] +['עצ', '×ŀ×Ķ'] +['п', 'оÑĢ'] +['ĠK', 'ỳ'] +['Ġ', 'Æ¡n'] +['Ġth', 'Äĥm'] +['Ùģ', 'اÙĤ'] +['ãģļ', 'ãģ«'] +['Ġ׾', 'קר'] +['Ġ׾קר', '×ķ×IJ'] +['اÙģ', 'ÙĬØ©'] +['Ùħ', 'ÙİØ§'] +['г', 'аÑĢ'] +['ص', 'ÙĦا'] +['صÙĦا', 'Ø©'] +['Ġ×ŀ', '×ĸ×Ķ'] +['lı', 'ģını'] +['Ġ×IJ', '×Ļ׳×Ķ'] +['к', 'ÑĢо'] +['Ġng', 'ươi'] +['Ġв', 'ним'] +['Ġвним', 'ание'] +['jÄħ', 'cy'] +['ÙĢÙĢÙĢÙĢ', 'ÙĢ'] +['Ñģ', 'Ñħод'] +['ãģªãĤĵ', 'ãģĭ'] +['×ŀ', '×Ļ׾'] +['Ġ×Ķ×IJ', '×Ĺ'] +['ãĤı', 'ãģªãģĦ'] +['ع', 'سÙĥر'] +['ĠìĦ¸', 'ê³Ħ'] +['ĠÑĩ', 'его'] +['ĠÑģÑĢед', 'ÑģÑĤва'] +['ĠÐł', 'аÑģ'] +['ãģª', 'ãģģ'] +['ÙĨ', 'Ù쨳'] +['ר×Ļ', '×ķף'] +['Ñģ', 'Ñĥд'] +['ĠìĿ¸', 'ê°Ħ'] +['ĠاÙĦÙħ', 'ÙĤبÙĦ'] +['ÙĨ', 'عÙħ'] +['تÙĪ', 'Ù쨱'] +['ש', '×ij×¢'] +['ı', 'lm'] +['ılm', 'Ä±ÅŁ'] +['Ġ×ľ×ª', 'ת'] +['تص', 'Ùģ'] +['×Ķפ', '×ķ×ļ'] +['à¹ĥà¸Ļ', 'à¸Ľà¸µ'] +['ìĿ´', 'ê³ł'] +['Ùģ', 'ÙĪØ²'] +['à¸ľà¸¥', 'à¸ĩาà¸Ļ'] +['ĠGi', 'áo'] +['à¸ļà¸Ńà¸ģ', 'วà¹Īา'] +['Ġd', 'Ä±ÅŁ'] +['ĠdÄ±ÅŁ', 'ında'] +['ì£', '½'] +['Ġdzie', 'ÅĦ'] +['к', 'ÑĨии'] +['и', 'ÑĨе'] +['ãģ®', 'ä¸Ģ'] +['ع', 'Ø´'] +['пÑĢ', 'еÑģÑģ'] +['หà¸Ļ', 'à¹Īà¸Ńย'] +['ลัà¸ģษ', 'à¸ĵะ'] +['Ġpossibilit', 'Ãł'] +['à¹Ħà¸Ķà¹īรัà¸ļ', 'à¸ģาร'] +['หย', 'ุà¸Ķ'] +['Ġphi', 'ên'] +['çĶŁ', 'ãģ¾ãĤĮ'] +['Ø·', 'ÙĪÙĦ'] +['ÑĦ', 'ин'] +['f', 'ür'] +['ØŃ', 'ÙĬاة'] +['íĸ', 'ĪìĬµëĭĪëĭ¤'] +['׼', '׳×ķת'] +['à¸Ľà¸£à¸°', 'ส'] +['à¸Ľà¸£à¸°à¸ª', 'à¸ļ'] +['à¸Ľà¸£à¸°à¸ªà¸ļ', 'à¸ģารà¸ĵà¹Į'] +['ëIJĺ', 'ìĹĪ'] +['Ġkaż', 'dy'] +['Ġl', 'uyá»ĩn'] +['ĠоÑĢганиз', 'аÑĨии'] +['å°ij', 'ãģªãģı'] +['ÑģÑĤÑĢо', 'ен'] +['Ġtécn', 'ico'] +['×§', '×Ķ׾'] +['Ġ×ķ×IJ', '×Ĺ'] +['ĠعÙĦÙĬ', 'Ùĥ'] +['Ñī', 'ение'] +['Ġ×Ķ', '×Ļ׾×ĵ×Ļ×Ŀ'] +['ÙĪØ³', 'ائÙĦ'] +['Ġ×ķ', '×Ķת'] +['تÙħ', 'ÙĬز'] +['ĠÑģ', 'казал'] +['Ġпол', 'и'] +['Ġ×Ķ×ŀ', 'ס'] +['ÙĦÙij', 'Ùİ'] +['Ùħؤس', 'سة'] +['Ġ×ŀ', '×Ļ×ĵ'] +['ãģ£', 'ãģ¡'] +['ĠëĦĪ', '무'] +['à¸ŀ', 'ี'] +['Ġt', 'ặng'] +['Ġt', 'ấn'] +['ר', 'ש×Ŀ'] +['Ġméd', 'ica'] +['Ġ×¢', '×ķ×ŀ'] +['Ġ×¢×ķ×ŀ', '×ĵ'] +['ÑĦ', 'оÑĢ'] +['Ùħر', 'Ø©'] +['Ġvat', 'anda'] +['Ġvatanda', 'ÅŁ'] +['Ġдел', 'о'] +['à¸Ļ', 'ม'] +['ãģ¨', 'åIJĮãģĺ'] +['Ùģ', 'Ùī'] +['Ñģ', 'оÑĢ'] +['Ġ×Ķס', 'ר×ĺ'] +['Ġép', 'oca'] +['ìłķ', 'ì±ħ'] +['ĠÑģвÑıз', 'ан'] +['ض', 'رب'] +['ĠÙĦ', 'ÙĨا'] +['Ġuży', 'wa'] +['ĠاÙĦج', 'ÙĬØ´'] +['Ñİ', 'ÑĢ'] +['×ijס', '×ķ×£'] +['Ġм', 'Ñĥ'] +['ĠмÑĥ', 'зÑĭк'] +['bilit', 'é'] +['Ġma', 'ç'] +['س', 'Ùİ'] +['ت', 'ÙĦÙĥ'] +['ãģ', '¬'] +['ÙĬ', 'ÙĦا'] +['ÑĪ', 'ла'] +['ÙĢÙĢ', 'ÙĢ'] +['Ġод', 'ной'] +['зв', 'ан'] +['ĠÑģ', 'ÑĢаз'] +['ĠÑģÑĢаз', 'Ñĥ'] +['ÙĨ', 'ظÙħ'] +['را', 'Ùĩ'] +['ĠÙĦÙĩ', 'ذا'] +['׼', '×ķר'] +['Ġ×Ķש', '×ij×ķ×¢'] +['Ġ×Ķש', 'ת'] +['ĠQu', 'ảng'] +['ãĥ«', 'ãĥ¼'] +['ãģĪ', 'ãģªãģĦ'] +['×ĺ', '×IJ'] +['Ġmi', 'á»ģn'] +['ĠPh', 'áºŃt'] +['ĠاÙĦس', 'ÙĪÙĤ'] +['Ä', 'Ĥ'] +['ĠاÙĦج', 'Ùħع'] +['ĠاÙĦجÙħع', 'Ø©'] +['ÑİÑī', 'ей'] +['a', 'ÅĤem'] +['عت', 'ÙĤد'] +['Ø£', 'ÙĦÙħ'] +['Ñģ', 'ке'] +['ĠìĿ´', 'íķ´'] +['ÙĨس', 'Ø®'] +['è¨Ģ', 'ãģĦ'] +['д', 'обав'] +['سب', 'ÙĤ'] +['×¢×ķר', 'ר'] +['ÑĤи', 'п'] +['ãģĿãģĵ', 'ãģ§'] +['vis', 'ión'] +['عÙĪØ¯', 'Ø©'] +['ë¨', '¹'] +['×ŀ', '×ĸר×Ĺ'] +['ĠØ¥', 'ØŃ'] +['Ġ׾×ij', '×Ļף'] +['Ġ׾צ', '×IJת'] +['Ġyard', 'ı'] +['Ġyardı', 'mc'] +['Ġyardımc', 'ı'] +['İ', 'Z'] +['×§', 'פ×Ķ'] +['tr', 'é'] +['liÄŁ', 'ini'] +['клÑİÑĩ', 'а'] +['Ġüret', 'im'] +['Ġa', 'yrı'] +['ĠkiÅŁ', 'iler'] +['à¸Ħ', 'à¹īà¸Ļ'] +['à¸Ħà¹īà¸Ļ', 'หา'] +['ĠS', 'á»±'] +['Ġ׼', 'ס'] +['Ġ×Ľ×¡', '×£'] +['ĠÑĤак', 'иÑħ'] +['ĠXu', 'ân'] +['Ġл', 'ег'] +['Ġлег', 'ко'] +['Ø«ÙĤ', 'اÙ쨩'] +['ÐĿ', 'Ðŀ'] +['ãĤ¹ãĤ¿', 'ãĥĥ'] +['ãĤ¹ãĤ¿ãĥĥ', 'ãĥķ'] +['åIJĪ', 'ãģĦ'] +['Ġ×Ķש', '×Ļ×ŀ×ķש'] +['man', 'ız'] +['ĠÐĴ', 'аÑģ'] +['g', 'ün'] +['ìľĦìĽIJ', 'íļĮ'] +['Ġwsp', 'óln'] +['ĠÑģв', 'ое'] +['í', 'ĥģ'] +['à¹Ģà¸Ļ', 'ีย'] +['ÙĪØ¨', 'Ø©'] +['в', 'Ñıз'] +['ı', 'dır'] +['ëIJĺ', 'ìĹĪëĭ¤'] +['ĠdeÄŁi', 'ÅŁtir'] +['ãĤĭ', 'ãģĵãģ¨ãģĮ'] +['Ġ×Ĺ×ĵ', 'ש×Ķ'] +['ãĤīãĤĮ', 'ãģ¦ãģĦãĤĭ'] +['×Ĺ×Ļ', '×Ļ×ij'] +['ĠÐļ', 'аÑĢ'] +['׳×Ļת', '×ķ×Ĺ'] +['Ġ×§×ĺ', 'ף'] +['ר', '×ĸ'] +['ÙĪ', 'غ'] +['èªŃ', 'ãģ¿'] +['Ġت', 'ÙĤÙĪÙħ'] +['ĠÙĥ', 'اÙĦ'] +['à¸Ŀ', 'ึà¸ģ'] +['Ġë°ľ', 'ìĥĿ'] +['ológ', 'ico'] +['ر', 'اع'] +['à¹ģà¸ģà¹ī', 'à¹Ħà¸Ĥ'] +['ĠÑĢабоÑĤ', 'Ñĥ'] +['ÙĨÙij', 'Ùİ'] +['à¸Ńยูà¹Ī', 'à¸Ĺีà¹Ī'] +['ĠاÙĦØ«', 'اÙĨÙĬØ©'] +['ĠNh', 'ân'] +['Ñħ', 'ваÑĤ'] +['ö', 'ne'] +['Ġع', 'دة'] +['à¹ģ', 'สà¸ĩ'] +['ÑĤ', 'оп'] +['пÑĥÑģ', 'ка'] +['شر', 'اء'] +['ĠÐļ', 'ом'] +['Ġפע', '×ķ׾×Ķ'] +['ìĤ¬', 'ìĿ´'] +['ìĤ¬ìĿ´', 'íĬ¸'] +['è¡Į', 'ãģ£ãģ¦'] +['Ġ×Ķ', '×Ķת'] +['ĠÑģÑĤ', 'оÑĢо'] +['ĠÑģÑĤоÑĢо', 'нÑĭ'] +['در', 'س'] +['à¸ĭ', 'ู'] +['à¸ķà¹Ī', 'ำ'] +['ĠØ£', 'بÙĬ'] +['под', 'об'] +['ãģ«', 'ãģ¦'] +['ار', 'تÙģØ§Ø¹'] +['ĠÙħ', 'ؤ'] +['ик', 'ов'] +['ge', 'führt'] +['มืà¸Ń', 'à¸ĸืà¸Ń'] +['ĠÙĦ', 'ÙĤد'] +['ĠØ£ÙĨ', 'Ùij'] +['سÙĬ', 'طر'] +['ãģ¾ãģļ', 'ãģ¯'] +['ס', '×ĵ'] +['Ñģк', 'олÑĮко'] +['ãģ¿ãģŁãģĦ', 'ãģª'] +['×ĵר', '×Ĵ'] +['×¢', '×Ļ×ĵ'] +['à¹ĥหà¹ī', 'à¸ļริà¸ģาร'] +['ĠÐĶ', 'и'] +['×ij×¢', '×Ļ×ķת'] +['Ġ×Ķ×Ĺ', '×ķ'] +['пиÑģ', 'ÑĮ'] +['ĠاÙĦØ®', 'ÙĦ'] +['б', 'ав'] +['Ġİ', 'lk'] +['ĠاÙĦØ®', 'Ùħ'] +['ĠاÙĦØ®Ùħ', 'ÙĬس'] +['ĠÙĬ', 'ÙĤÙĪÙħ'] +['æĻĤ', 'ãģ®'] +['ĠsÅĤ', 'ow'] +['ĠØ£', 'ÙĩÙħ'] +['Ø®ÙĦ', 'ÙĤ'] +['ĠØ£', 'صبØŃ'] +['Ġchứ', 'a'] +['Ġth', 'ác'] +['Ùģ', 'اÙĦ'] +['Ġch', 'á»Ŀ'] +['ĠاÙĦØ®', 'ار'] +['ĠاÙĦخار', 'ج'] +['ĠاÙĦخارج', 'ÙĬØ©'] +['Ø·', 'ائر'] +['Ġt', 'Ãł'] +['ĠtÃł', 'u'] +['à¸ģล', 'à¹īà¸Ńà¸ĩ'] +['ĠاÙĦÙħر', 'Ø£'] +['ĠاÙĦÙħرأ', 'Ø©'] +['åħ¨', 'ãģı'] +['ĠÃĸ', 'n'] +['çļĦ', 'ãģ«ãģ¯'] +['Ġpiè', 'ce'] +['×Ĵ', '×Ļ×ij'] +['ĠاÙĦ', 'ÙĪØ§ÙĤع'] +['ä»Ĭ', 'ãģ®'] +['ĠاÙĦÙħ', 'ÙĤ'] +['cz', 'nÄħ'] +['Ù쨹', 'اÙĦ'] +['ен', 'ного'] +['ĠÑĦак', 'ÑĤ'] +['ìĭł', 'ì²Ń'] +['ĠÐŀ', 'ни'] +['ĠاÙĦبÙĦ', 'اد'] +['ов', 'иÑĩ'] +['ëı', 'Į'] +['ÑĦ', 'ÑĥнкÑĨи'] +['Ġìĸ´', 'ëĬIJ'] +['ãĥķãĤ©', 'ãĥ¼'] +['d', 'ÃŃ'] +['ил', 'оÑģÑĮ'] +['Ùħ', 'Ùī'] +['ĠاÙĦØ£ÙħرÙĬ', 'Ùĥ'] +['ĠاÙĦØ£ÙħرÙĬÙĥ', 'ÙĬØ©'] +['×ĺ', '×Ļפ×ķ׾'] +['íĶĦ', 'ë¡ľê·¸'] +['íĶĦë¡ľê·¸', 'ëŀ¨'] +['Ġש', '×ķ׳×ķת'] +['Ø´', 'ÙħÙĦ'] +['ĠпаÑĢ', 'а'] +['Ġ×Ķ×Ĺ', '×ķ×§'] +['ÙĪØ²', 'ارة'] +['ãģ¨', 'ãģĻãĤĭ'] +['Ġqu', 'ảng'] +['ĠaÄŁ', 'ır'] +['ĠاÙĦÙĦ', 'ج'] +['ĠاÙĦÙĦج', 'ÙĨØ©'] +['ê¸', '´'] +['ĠT', 'ân'] +['ج', 'ÙħÙĦ'] +['д', 'ол'] +['à¹ģà¸ŀ', 'à¸Ĺย'] +['à¹ģà¸ŀà¸Ĺย', 'à¹Į'] +['Ġר×IJ', 'ש×Ļ'] +['Ñī', 'ей'] +['Ġçev', 're'] +['Ġкомп', 'лекÑģ'] +['Ġ×ij', '×ŀש×ļ'] +['Ġalt', 'ın'] +['ĠØ£', 'عÙħاÙĦ'] +['ĠÑģво', 'его'] +['ãĤĪ', 'ãģĦ'] +['×Ĺ׾', '×Ļ×ĺ'] +['×ŀ׳', '×¢'] +['Ġר', '×ij×Ķ'] +['ĠØ£ÙĬضا', 'Ùĭ'] +['×ĸ', '׾'] +['ĠاÙĦسÙĬ', 'اسÙĬ'] +['æĢĿ', 'ãģĨ'] +['קר', '×§'] +['קרק', '×¢'] +['ĠاÙĦÙģ', 'رÙĬÙĤ'] +['б', 'иÑĤ'] +['×§', '׳×Ķ'] +['ĠØ¥', 'ÙĨÙĩ'] +['ĠÐĴ', 'ам'] +['Ðł', 'Ðŀ'] +['ãĥĪ', 'ãĥª'] +['å¿ħè¦ģ', 'ãģª'] +['Ġch', 'âu'] +['ç¶ļ', 'ãģij'] +['Ġçöz', 'üm'] +['gÅĤ', 'ow'] +['ع', 'ÙĤÙĦ'] +['売', 'ãĤĭ'] +['i', 'ết'] +['à¸Ĭิ', 'à¹īà¸Ļ'] +['ĠØŃÙĤ', 'ÙĪÙĤ'] +['Ø·ÙĦ', 'ع'] +['ĠÄij', 'en'] +['ĠÙĥ', 'اÙ쨩'] +['ãģ®', 'ãģĶ'] +['Ġë', '¬'] +['Ġë¬', '¼'] +['Ġ물', 'ë¡ł'] +['Ġرس', 'ÙĪÙĦ'] +['з', 'ам'] +['зам', 'ен'] +['Ġkullan', 'ıcı'] +['×¢', '×ķ׾'] +['èī²', 'ãĢħ'] +['ÑĪи', 'ÑĢ'] +['Ġ×Ĺ', 'ש'] +['Ġwy', 'gl'] +['Ġwygl', 'Äħda'] +['ש', '×Ļ×ŀ×ķש'] +['å¿ĺ', 'ãĤĮ'] +['×¢', '×Ļצ×ķ×ij'] +['ĠاÙĦس', 'ÙĪØ±ÙĬ'] +['å°ij', 'ãģªãģĦ'] +['Ġпо', 'иÑģк'] +['สำ', 'à¸Ļัà¸ģà¸ĩาà¸Ļ'] +['Ġ×ŀצ', '×ĵ'] +['Ġmü', 'ÅŁ'] +['ĠmÃ¼ÅŁ', 'ter'] +['ĠmÃ¼ÅŁter', 'i'] +['ĠÙħÙĨ', 'ÙĩÙħ'] +['à¸ķำ', 'à¹ģ'] +['à¸ķำà¹ģ', 'หà¸Ļ'] +['à¸ķำà¹ģหà¸Ļ', 'à¹Īà¸ĩ'] +['ÅĽ', 'mie'] +['Ġש', '×ł×ª'] +['Ġ×Ķ', 'פ×Ļ'] +['פר', 'ש'] +['×¢×ijר', '×Ļת'] +['สà¸Ļ', 'ัà¸ļ'] +['สà¸Ļัà¸ļ', 'สà¸Ļุ'] +['สà¸Ļัà¸ļสà¸Ļุ', 'à¸Ļ'] +['è¨Ģ', 'ãģ£ãģ¦'] +['à¸ģาร', 'à¸Īัà¸Ķ'] +['ĠMo', 'że'] +['из', 'аÑĨии'] +['ứ', 't'] +['ĠÙĪØ¨', 'عد'] +['ĠdeÄŁ', 'ild'] +['ĠdeÄŁild', 'ir'] +['Ġת', '×ŀ'] +['Ġ×ŀ×ŀ', '׳×ķ'] +['話', 'ãĤĴ'] +['ĠÑĨ', 'ена'] +['Ġth', 'úc'] +['×Ļ×ŀ', '×ķף'] +['ĠB', 'áo'] +['ãĤĴ', 'åıĸãĤĬ'] +['å®ī', 'ãģĦ'] +['Ġ×¢×ķש', '×Ļ×Ŀ'] +['èĩªåĪĨ', 'ãģĮ'] +['l', 'ée'] +['ãĤĭ', 'ãģ®ãģ§'] +['иÑĢÑĥ', 'еÑĤ'] +['ãģ¦', 'ãĤĭ'] +['ست', 'ر'] +['ĠاÙĦØŃ', 'ÙĬ'] +['×Ļ׾', '×ķת'] +['Ġ×Ĺ', '×ij'] +['ÙĤر', 'Ø£'] +['تÙħ', 'ÙĥÙĨ'] +['س', 'ائÙĦ'] +['prü', 'f'] +['ãģĭ', 'ãģijãģ¦'] +['ĠÑģоб', 'ÑģÑĤвенно'] +['ĠìľĦ', 'íķĺìŬ'] +['׾', '×Ļ×ĺ'] +['ãģĮ', 'å¤ļãģı'] +['ÙĬت', 'Ùĩا'] +['ç«ĭ', 'ãģ¦'] +['ม', 'à¸Ńà¸ļ'] +['ìĭľ', 'ìŀ¥'] +['оÑĢ', 'а'] +['Ġs', 'avaÅŁ'] +['×ĺ×Ļ×ij', '×Ļ'] +['×ij', '׳×ķ'] +['Ùħا', 'ذا'] +['기', 'ê°Ħ'] +['ãģªãģ©', 'ãģ§'] +['Ġ×ŀ', 'ת×Ĺ×Ļ׾'] +['Ġnhi', 'á»ħ'] +['Ġnhiá»ħ', 'm'] +['ка', 'ÑĢ'] +['каÑĢ', 'ÑĤ'] +['Ġ׾×Ķ', 'שת×ŀש'] +['׳', '×Ļ×Ĺ'] +['اد', 'ÙĬØ©'] +['ราย', 'à¸ĩาà¸Ļ'] +['Ġprzy', 'kÅĤad'] +['Ñī', 'ий'] +['ØŃض', 'ÙĪØ±'] +['Ġh', 'ôn'] +['Ã', 'Ŀ'] +['ת', '×ķצ×IJ×ķת'] +['راب', 'Ø·'] +['Ġb', 'ếp'] +['ĠполÑĥÑĩ', 'и'] +['åĩºä¼ļãģĦ', 'ç³»'] +['à¸Ľà¸¥', 'à¹Īà¸Ńย'] +['ĠاÙĦØ´', 'باب'] +['اÙĩ', 'ÙĦ'] +['ä»Ĭ', 'ãģ¾ãģ§'] +['رج', 'ع'] +['ãĤ¶', 'ãĥ¼'] +['ÙĤ', 'Ùģ'] +['ĠGro', 'ÃŁ'] +['ĠíļĮ', 'ìĽIJ'] +['اج', 'ر'] +['Ġ×ij×ŀ', 'קר×Ķ'] +['Ġseg', 'urança'] +['fü', 'hl'] +['ãģ¦', 'ãģĦãģı'] +['หม', 'à¸Ń'] +['ĠкоÑĤоÑĢ', 'ом'] +['ĠN', 'Äĥm'] +['ĠdÅĤ', 'ugo'] +['ÙħÙĨ', 'ØŃ'] +['ש×ķ', '×ķ×Ļ'] +['ĠØ£ÙĬ', 'اÙħ'] +['ส', 'à¸łà¸²à¸ŀ'] +['r', 'zÄħ'] +['شر', 'Ùĥات'] +['ãĤĴ', 'èĢĥãģĪ'] +['д', 'аÑĢ'] +['à¸Ľà¸£à¸°', 'à¸Ĭุม'] +['Ġ×ķ×IJ', '×ĸ'] +['i', 'á»ĩn'] +['Ġt', 'ươi'] +['ש', '×Ļ×Ĺ'] +['à¸Ń', 'à¹Īà¸Ńà¸Ļ'] +['æĽ¸', 'ãģĦãģ¦'] +['Ġng', 'ữ'] +['×ij×Ļ×ĺ', '×Ĺ'] +['×ij×Ļ×ĺ×Ĺ', '×ķף'] +['Ġs', 'ẵ'] +['Ġsẵ', 'n'] +['ì§Ģ', 'ëıĦ'] +['ĠпÑĢ', 'еп'] +['ĠпÑĢеп', 'аÑĢаÑĤ'] +['Ġна', 'ÑĥÑĩ'] +['ĠÃľ', 'nivers'] +['ĠÃľnivers', 'ites'] +['ĠÃľniversites', 'i'] +['Ġ×Ĵ×ĵ', '×ķ׾×Ķ'] +['Ġ×Ķ', '×ł×ª'] +['Ġ×Ķ×ł×ª', '×ij×¢'] +['ãģ§ãģĤ', 'ãģ£ãģŁ'] +['Ġmies', 'iÄħ'] +['ĠmiesiÄħ', 'c'] +['г', 'ÑĢам'] +['гÑĢам', 'м'] +['Ġبش', 'Ø£ÙĨ'] +['ĠÑħ', 'ÑĢ'] +['×§', '×Ļ×ĵ'] +['×§×Ļ×ĵ', '×ķ×Ŀ'] +['Ø´', 'Ùĥر'] +['Ġ', 'á»ķ'] +['Ġá»ķ', 'n'] +['ãģĮãģĤ', 'ãģ£ãģ¦'] +['ãģķãĤĮ', 'ãģ¾ãģĻ'] +['Ġ×Ĺ', '×ķ×ĵ'] +['Ġ×Ĺ×ķ×ĵ', 'ש×Ļ×Ŀ'] +['ÙħÙĪØ§', 'جÙĩ'] +['ÙħÙĪØ§Ø¬Ùĩ', 'Ø©'] +['أش', 'خاص'] +['ب', 'غ'] +['à¹Ģรียà¸Ļ', 'รูà¹ī'] +['ãģĹãģ¦', 'ãģĦãģı'] +['Ġs', 'ạn'] +['å¿ħ', 'ãģļ'] +['׳', '×Ļ×Ĵ'] +['׳×Ļ×Ĵ', '×ķ×ĵ'] +['باÙĦ', 'غ'] +['×Ĺ', 'ש×ŀ'] +['×Ĺש×ŀ', '׾'] +['Ġnap', 'raw'] +['Ġnapraw', 'dÄĻ'] +['Ø´Ùĩ', 'اد'] +['×IJ', '×ķ×Ķ'] +['×IJ×ķ×Ķ', '×ij'] +['и', 'ÑĨÑĭ'] +['Ġ×Ķ', 'ר׼×ij'] +['ëŀ', 'ij'] +['Ġת', '×¢'] +['Ġ×Ķ', '×Ļש'] +['Ġ×Ķ×Ļש', 'ר×IJ'] +['Ġ×Ķ×Ļשר×IJ', '׾×Ļ'] +['Ø£', 'ÙħÙĨ'] +['ÑİÑī', 'аÑı'] +['sk', 'ór'] +['LER', 'İ'] +['Ġ×Ķ×IJ×Ĺר', '×ķף'] +['×¢', '׳ק'] +['ĠÙĪ', 'ÙĥÙĦ'] +['ãģĵãģĵ', 'ãģ§'] +['Ġqu', 'án'] +['liÄŁ', 'in'] +['à¸ģà¸İ', 'หมาย'] +['Ø·', 'Ùħ'] +['Ø£', 'جÙĩ'] +['أجÙĩ', 'زة'] +['ĠEr', 'doÄŁan'] +['ãģ§', 'ãģĬ'] +['Ġв', 'ÑĢа'] +['ĠвÑĢа', 'Ñĩ'] +['ĠPh', 'ó'] +['à¸Ĭั', 'à¹Īว'] +['à¸Ĭัà¹Īว', 'à¹Ĥม'] +['à¸Ĭัà¹Īวà¹Ĥม', 'à¸ĩ'] +['Ġph', 'úc'] +['×Ļפ', '×ķת'] +['×¢×Ļ', '×ķף'] +['Ġduż', 'o'] +['ãĥģ', 'ãĥ¼ãĥł'] +['ĠÙĬ', 'Ùİ'] +['Ġзад', 'аÑĩ'] +['Ġ×Ĵ×ij×ķ×Ķ', '×Ķ'] +['Ġ׼', '׼׾'] +['лож', 'ен'] +['ét', 'at'] +['Ġng', 'Äĥn'] +['èµ·', 'ãģį'] +['ĠTi', 'ến'] +['ص', 'عب'] +['Ġexperi', 'ência'] +['Ø®', 'Ùħ'] +['à¸ģาร', 'à¸Ĺำà¸ĩาà¸Ļ'] +['س', 'ÙĬد'] +['ĠD', 'á»±'] +['ĠкоÑĤоÑĢ', 'ого'] +['lad', 'ıģı'] +['Ġkh', 'á»ķ'] +['Ġê³Ħ', 'ìĨį'] +['Ñī', 'ик'] +['สà¹Īวà¸Ļ', 'à¸ķัว'] +['з', 'оÑĢ'] +['ÙĨ', 'Ùı'] +['Ġ', 'à¸Ķัà¸ĩ'] +['Ġà¸Ķัà¸ĩ', 'à¸Ļัà¹īà¸Ļ'] +['Ġc', 'ấu'] +['ĠÄij', 'á»ijc'] +['о', 'ÑĦ'] +['ĠاÙĦØ£', 'عÙħاÙĦ'] +['ãģªãģı', 'ãģ¦ãĤĤ'] +['×ķ׼', '×Ļ×Ŀ'] +['à¹ģ', 'à¸Ľ'] +['ĠB', 'ên'] +['ãĥ¯', 'ãĥ³'] +['Ġgi', 'ám'] +['ĠÅŀ', 'u'] +['Ġd', 'áng'] +['ع', 'ÙĦÙĬ'] +['à¹Ģà¸ģ', 'ษ'] +['à¹Ģà¸ģษ', 'à¸ķร'] +['ÙĪØ¬', 'ب'] +['н', 'нÑĭе'] +['ÙĤ', 'ضاء'] +['à¸Ħว', 'à¸ļ'] +['à¸Ħวà¸ļ', 'à¸Ħุ'] +['à¸Ħวà¸ļà¸Ħุ', 'ม'] +['ãģ¤', 'ãģ¤'] +['ĠVi', 'á»ĩc'] +['×ŀ×ij', '×ĺ'] +['ש×Ļת', '×ķ×£'] +['Ġв', 'едÑĮ'] +['k', 'aza'] +['kaza', 'ÅĤ'] +['à¸ķำ', 'รวà¸Ī'] +['ãĤ¿', 'ãĥ«'] +['Ġпов', 'Ñĭ'] +['ĠповÑĭ', 'ÑĪен'] +['ĠS', 'ợ'] +['ĠìĦ¤', 'ëªħ'] +['ĠÃĩ', 'ünkü'] +['ìĥĿ', 'íĻľ'] +['Ö', '¾'] +['ãĤĮ', 'ãģ¦ãģĦãĤĭ'] +['Ġ×ij', 'ר×IJש'] +['ר', '×ķ×Ĵ'] +['Ġо', 'ÑĦи'] +['ĠоÑĦи', 'ÑĨиалÑĮн'] +['ĠÑĥ', 'ÑģÑĤанов'] +['ĠÑĥÑģÑĤанов', 'лен'] +['ĠاÙĦÙħ', 'صر'] +['ĠاÙĦÙħصر', 'ÙĬØ©'] +['ĠÐŁÐ¾', 'ÑįÑĤомÑĥ'] +['ÙĨ', 'صÙģ'] +['ĠÙĪØ§ÙĦ', 'ÙĨ'] +['Ġh', 'Ãłi'] +['à¸Ħ', 'ิ'] +['ĠApr', 'ès'] +['ì³', 'IJ'] +['à¹Ģà¸ĭ', 'ีย'] +['×ĵ', '×ŀ×Ķ'] +['activ', 'ité'] +['à¸Ħิà¸Ķ', 'วà¹Īา'] +['ÑĤ', 'ÑĢен'] +['à¹Ģ', 'ฮ'] +['ãĥı', 'ãĤ¤'] +['ãģĮ', 'å¢ĹãģĪ'] +['ен', 'наÑı'] +['Ġìĺ¤', 'ëĬĺ'] +['ãĥ¢', 'ãĥ³'] +['Ġкон', 'еÑĩно'] +['ĠÙħÙĤ', 'ابÙĦ'] +['cl', 'é'] +['Ġh', 'ü'] +['Ġth', 'ẳng'] +['ìłģ', 'ìĿ´'] +['ĠÐIJ', 'лекÑģ'] +['ĠÐIJлекÑģ', 'ан'] +['ĠÐIJлекÑģан', 'дÑĢ'] +['ãĥŀãĥ³', 'ãĤ·ãĥ§ãĥ³'] +['ãģ²ãģ¨', 'ãģ¤'] +['ãģª', 'ãģĬ'] +['à¹Ģà¸Īà¹īา', 'à¸Ĥà¸Ńà¸ĩ'] +['ëĵľ', '리'] +['Ø´', 'اء'] +['ĠsaÄŁ', 'lık'] +['ĠÅŁ', 'imdi'] +['×Ļ×IJ', '׾'] +['تأ', 'Ø«ÙĬر'] +['Ø£', 'سب'] +['أسب', 'اب'] +['ĠвÑĭполн', 'ен'] +['л', 'ок'] +['ש', '×Ļ×ij×Ķ'] +['Ġl', 'ắm'] +['ĠTr', 'Æ°á»Ľc'] +['Ġ×Ķ×¢', '׾'] +['리', '를'] +['ĠÑĢ', 'еж'] +['ĠÑĢеж', 'им'] +['int', 'é'] +['inté', 'gr'] +['×Ĵ', '׳×Ļ'] +['ĠاÙĦØ´', 'عر'] +['Ġmil', 'hões'] +['Ġpeque', 'ño'] +['ãĤ³', 'ãĥ¼ãĤ¹'] +['×ķ׼', '×Ĺ'] +['à¹Ģà¸Ĭ', 'à¹īา'] +['شر', 'ÙĤ'] +['Ġh', 'ương'] +['รัà¸IJ', 'à¸ļาล'] +['à¸ģล', 'าย'] +['à¸ģลาย', 'à¹Ģà¸Ľà¹ĩà¸Ļ'] +['Ġпод', 'Ñħод'] +['תש', '×ķ×ij×Ķ'] +['ãģıãģª', 'ãģ£ãģ¦'] +['ĠاÙĦØ£Ùħ', 'Ùħ'] +['ĠH', 'á»įc'] +['ĠwspóÅĤ', 'pr'] +['ĠwspóÅĤpr', 'ac'] +['Ñĩ', 'Ñĥв'] +['ÑĩÑĥв', 'ÑģÑĤв'] +['ÃŃst', 'ico'] +['à¹Ģà¸ģ', 'าะ'] +['ìĽ', 'Ģ'] +['Ġназ', 'ад'] +['ãĤĭ', 'ãĤĪãģĨãģ«'] +['ĠС', 'Ш'] +['ĠСШ', 'ÐIJ'] +['м', 'он'] +['ĠAs', 'ÃŃ'] +['×ķר', '×Ĵ'] +['полн', 'ен'] +['×ŀס', '׾'] +['×ŀ×¡×ľ', '×ķ׾'] +['à¹Ģลืà¸Ń', 'à¸Ķ'] +['à¹Ģริà¹Īม', 'à¸ķà¹īà¸Ļ'] +['ĠاÙĦØ¥', 'Ùħ'] +['ĠاÙĦØ¥Ùħ', 'ارات'] +['צ×Ķ', 'ר'] +['ãĥ¡ãĥª', 'ãĥĥãĥĪ'] +['ĠпоÑĤ', 'ом'] +['в', 'из'] +['ĠÙģ', 'ترة'] +['å¾Į', 'ãģ®'] +['ÐĿ', 'ÐIJ'] +['×ŀס', 'ר'] +['ÙĬر', 'ÙĬ'] +['pr', 'é'] +['Ġte', 'ÅŁek'] +['ĠteÅŁek', 'kür'] +['Ġöd', 'eme'] +['د', 'اÙĨ'] +['ãģ¾', 'ãģĹãģ¦'] +['缮', 'ãģ«'] +['ĠÑĤ', 'еÑĩение'] +['l', 'ard'] +['lard', 'ır'] +['à¹Ģรา', 'à¸Īะ'] +['ס', 'פ×Ļ'] +['ĠÙĪÙĥ', 'ذÙĦÙĥ'] +['Ġh', 'át'] +['Ġt', 'á»Ļc'] +['à¸Ħุ', 'ย'] +['Ġb', 'ức'] +['ØŃ', 'ÙĬÙĨ'] +['èģŀ', 'ãģĦãģ¦'] +['Ùħؤ', 'شر'] +['ĠNh', 'ư'] +['Ġмен', 'ее'] +['ละ', 'à¸Ħร'] +['Ñģ', 'ин'] +['ĠÑĢ', 'ек'] +['ĠÑĢек', 'л'] +['ĠÑĢекл', 'ам'] +['ĠÙģ', 'ÙĩÙĪ'] +['Ġ׾', '×ĸ'] +['×Ļ׳', '×ķת'] +['ĠÅŁ', 'art'] +['ÑģÑĤав', 'ка'] +['Ġíı¬', 'íķ¨'] +['ãģ«è¡Į', 'ãģı'] +['ï¼', 'Ŀ'] +['ĠпозволÑı', 'еÑĤ'] +['Ġת×ķ׼', '׾×ķ'] +['ов', 'ал'] +['صÙĦ', 'Ø©'] +['Ġ׾ש', '׳×ķת'] +['ĠÐĺ', 'гÑĢ'] +['ÙħÙĨتج', 'ات'] +['Ġsat', 'Ä±ÅŁ'] +['Ñģ', 'ко'] +['ĠاÙĦØ«ÙĦاث', 'اء'] +['Ġ×Ķ×ĵ×ijר', '×Ļ×Ŀ'] +['ãģĹãģ¾', 'ãģĹãĤĩãģĨ'] +['بÙĤ', 'Ùī'] +['åĬĽ', 'ãĤĴ'] +['ĠÃĩ', 'ok'] +['ãĥģ', 'ãĥ¥'] +['à¹Ģà¸Ĭ', 'ืà¹īà¸Ń'] +['ยุ', 'à¸Ħ'] +['ศา', 'ล'] +['Ġ×§×ķ×ĵ', '×Ŀ'] +['×ĸר', '×Ļ×Ŀ'] +['ãģ®', 'åł´åIJĪ'] +['ĠìķĬ', 'ìķĺ'] +['ãģĤãĤĬãģ¾ãģĻ', 'ãģĮ'] +['×IJ', 'שר'] +['è¡Į', 'ãģı'] +['ãģ»', 'ãģĭ'] +['æ°Ĺ', 'ãģ«ãģªãĤĭ'] +['й', 'деÑĤ'] +['íķĺìĺĢ', 'ëĭ¤'] +['ستÙħر', 'ار'] +['ĠÐŁÑĢ', 'е'] +['ĠÑģ', 'боÑĢ'] +['ĠìķĦ', '무'] +['ç§ģ', 'ãĤĤ'] +['ع', 'ص'] +['Ġн', 'иÑĩ'] +['ĠниÑĩ', 'его'] +['ĠпÑĢи', 'ем'] +['×§', '×ķ×ŀ'] +['ĠìĪĺ', 'ëıĦ'] +['Ġì', '¡´'] +['Ġì¡´', 'ìŀ¬'] +['ĠØ£', 'Ø«ÙĨ'] +['ĠأثÙĨ', 'اء'] +['ĠÙĪØ§ÙĦ', 'ØŃ'] +['ãģĮ', 'ãģ§ãģįãĤĭ'] +['Ġת', '×Ķ'] +['Ġת×Ķ', '×Ļ×Ķ'] +['ר', 'ף'] +['ĠÑģвÑıз', 'и'] +['×Ĵ', 'שת'] +['Ñģп', 'екÑĤ'] +['ס', '×ij×Ļ×ij'] +['ס×ij×Ļ×ij', '×Ķ'] +['ĠíķĦìļĶ', 'íķľ'] +['ت', 'خصص'] +['Ġж', 'ив'] +['Ġжив', 'оÑĤ'] +['ĠMay', 'ıs'] +['تع', 'ا'] +['تعا', 'ÙĪÙĨ'] +['ĠعÙĨ', 'Ùĩا'] +['ów', 'ki'] +['ĠاÙĦÙģÙĦسطÙĬÙĨ', 'ÙĬ'] +['ãģłãģijãģ§', 'ãģªãģı'] +['ìĿ¸', 'ì§Ģ'] +['ĠاÙĦس', 'ÙĪØ¯'] +['ĠاÙĦسÙĪØ¯', 'اÙĨ'] +['إجراء', 'ات'] +['Ġkö', 'tü'] +['Ġ×Ļ', 'תר'] +['×Ĵ', '×Ļש×Ķ'] +['Ġצ', '×ķר×ļ'] +['รà¸ĸ', 'ย'] +['รà¸ĸย', 'à¸Ļà¸ķà¹Į'] +['Ñħ', 'оÑĤ'] +['Ðł', 'ÐIJ'] +['ÙĪ', 'Ø·ÙĨ'] +['Ġsay', 'ısı'] +['ס', '×Ĺר'] +['Ùħ', 'ÙĪÙĦ'] +['ãĤĴæĮģ', 'ãģ£ãģ¦'] +['ع', 'اÙĨ'] +['Ġt', 'á»Ļi'] +['ĠвÑĭ', 'ÑĪе'] +['Ġt', 'ầm'] +['ãĥĪ', 'ãĥ¬'] +['×Ļצ', '×ķ'] +['ม', 'ุม'] +['س', 'ÙĪØ¯'] +['ìłĦ', 'ìŀIJ'] +['ãĤµ', 'ãĥŃãĥ³'] +['ìĤ°', 'ìĹħ'] +['ĠоÑģнов', 'ан'] +['Ø®', 'Ù쨶'] +['רצ', '×Ķ'] +['بÙĬ', 'ض'] +['×ķÖ', '¹'] +['ס×Ļ', '×Ļ×¢'] +['Ġש', '×IJ×Ļ'] +['ĠاÙĦÙĤر', 'Ø¢ÙĨ'] +['ĠТак', 'же'] +['×ŀש', '×ŀ×¢×ķת'] +['س', 'ÙĩÙĦ'] +['Ġ×Ķ', '׳×Ķ'] +['ãĤĴ', 'ãģĹãģ¦ãģĦãĤĭ'] +['×Ļ', '×Ļס'] +['×Ķ', '×ķ×IJ'] +['ĠB', 'ÃŃ'] +['Ġмал', 'о'] +['ĠëͰëĿ¼', 'ìĦľ'] +['Ġר', '×Ĺ×ij'] +['ãģĮ', 'é«ĺãģĦ'] +['ÙĪ', 'اس'] +['ìĤ', '¼'] +['׳', '×¢'] +['ãģ£', 'ãģ¡ãĤĥ'] +['ĠT', 'üm'] +['à¸Ńีà¸ģ', 'à¸Ķà¹īวย'] +['ãģĹãģ¦', 'ãģıãģłãģķãģĦ'] +['ÙĨØ´', 'اط'] +['ãĥĹ', 'ãĥ©ãĥ³'] +['али', 'ÑģÑĮ'] +['×ĵ', '×ľ×ª'] +['Ġwc', 'zeÅĽ'] +['ĠwczeÅĽ', 'niej'] +['ĠÑįÑĤ', 'им'] +['Ġthá»ĭ', 't'] +['à¸ļ', 'ัà¸į'] +['à¸ļัà¸į', 'à¸Ĭี'] +['ãģļ', 'ãģ£ãģ¨'] +['ÑĢ', 'ин'] +['Ġswo', 'jÄħ'] +['íķĺëĬĶ', 'ëį°'] +['Ġë§Įëĵ¤', 'ìĸ´'] +['تش', 'Ùĥ'] +['تشÙĥ', 'ÙĬÙĦ'] +['ائ', 'Ùĩ'] +['Ġ׾פ', '×Ĺ×ķת'] +['ãĥĭ', 'ãĥ¥'] +['ãĥĭãĥ¥', 'ãĥ¼ãĤ¹'] +['׼×IJ', 'ף'] +['ãģ§ãģį', 'ãģŁ'] +['зв', 'он'] +['Ġsta', 'ÅĤ'] +['×Ĺ×ijר', 'ת×Ļ'] +['ĠØ£', 'عÙĦÙĨ'] +['à¹ģà¸ļà¸ļ', 'à¸Ļีà¹ī'] +['بد', 'Ø¡'] +['ãĤģ', 'ãģŁ'] +['Ġ×ŀש', '×ŀ×¢×ķת'] +['Ġ×ŀש×ŀ×¢×ķת', '×Ļ'] +['ör', 'ü'] +['Ġh', 'ạnh'] +['z', 'ähl'] +['ĠL', 'ý'] +['Ġ×ij', '×Ķת'] +['Ġ×ij×Ķת', '×IJ×Ŀ'] +['б', 'аÑĢ'] +['ì¦', 'Ī'] +['ä»ĬåĽŀ', 'ãģ®'] +['Ġy', 'ü'] +['Ġyü', 'ks'] +['Ġyüks', 'el'] +['ãĤ½', 'ãĥ¼'] +['ãģĤ', 'ãĤĮ'] +['ת', '׾×ŀ×Ļ×ĵ'] +['ãģ¤', 'ãģª'] +['×ij', '׳×Ļ×Ŀ'] +['Ġx', 'ếp'] +['ĠмÑĥж', 'Ñĩин'] +['ĠاÙĦÙĥ', 'تاب'] +['׼', '×ŀ×ķת'] +['Ġç', 'e'] +['Ġçe', 'ÅŁ'] +['ĠçeÅŁ', 'it'] +['ĠçeÅŁit', 'li'] +['×ĵ', '×Ļר×ķת'] +['à¸ļุ', 'à¸į'] +['ĠاÙĦØ¥', 'ÙĦÙĥ'] +['ĠاÙĦØ¥ÙĦÙĥ', 'ترÙĪ'] +['ĠاÙĦØ¥ÙĦÙĥترÙĪ', 'ÙĨÙĬ'] +['ĠباÙĦØ¥', 'ض'] +['ĠباÙĦإض', 'اÙ쨩'] +['Ġyö', 'nel'] +['Ġyönel', 'ik'] +['mys', 'ÅĤ'] +['à¸Ķà¹īวย', 'à¸ģาร'] +['à¸ģาร', 'à¸Ĺำ'] +['ов', 'Ñĭм'] +['Ø£', 'زÙħØ©'] +['æİ¢', 'ãģĹ'] +['íļ', '¨'] +['Ġ×ķ×IJ', '×Ŀ'] +['Ġnghi', 'êm'] +['ÑĪ', 'ин'] +['ка', 'л'] +['Ġcrian', 'ças'] +['èĩªåĪĨ', 'ãģ§'] +['Ġн', 'ай'] +['Ġнай', 'ÑĤи'] +['ĠS', 'á»ij'] +['ĠÃ¶ÄŁrenc', 'iler'] +['ãĥ¶', 'æľĪ'] +['Ñģ', 'ан'] +['ĠJ', 'á'] +['ĠkonuÅŁ', 'ma'] +['شر', 'Ø·'] +['ëĪ', 'Ī'] +['ar', 'rière'] +['ضر', 'ÙĪØ±Ø©'] +['ãĥĶ', 'ãĥ³'] +['×¢', 'שר'] +['аÑĢ', 'ÑĮ'] +['جÙħ', 'اع'] +['Ġdé', 'co'] +['Ġ×Ļ×Ķ', '×ķ×ĵ×Ļ'] +['à¸ŀ', 'ลาà¸Ķ'] +['ĠÙĬ', 'ÙĥÙĨ'] +['Ġج', 'اÙħعة'] +['Ø·', 'بÙĤ'] +['Ġbo', 'ÅŁ'] +['×ķ', '×ķ×IJ'] +['×ŀ×ĵ', '×¢'] +['×§×ij×ķצ', 'ת'] +['פ', '×Ļר'] +['jÄħc', 'ym'] +['ÙħØ´', 'ا'] +['Ùħشا', 'ÙĥÙĦ'] +['צ', 'פ×ķף'] +['Ø¥', 'ست'] +['×ŀ׼', 'ר'] +['سÙħ', 'ع'] +['Ġкак', 'ой'] +['ÑĤ', 'воÑĢ'] +['ØŃ', 'ج'] +['Ù쨱', 'ض'] +['пÑĢав', 'лен'] +['Ġник', 'ак'] +['Ġmi', 'á»ĩ'] +['Ġmiá»ĩ', 'ng'] +['ü', 'ÃŁ'] +['иÑĢов', 'ал'] +['׾', '×ŀ×ķת'] +['次', 'ãģ®'] +['ÙĦ', 'Ø·'] +['à¸ķ', 'ัà¸Ļ'] +['×Ķ', 'ת×Ĺ×Ļ׾'] +['Ġfoto', 'ÄŁ'] +['ĠfotoÄŁ', 'raf'] +['طر', 'ØŃ'] +['à¸Ńà¸Ńà¸ģ', 'à¹Ħà¸Ľ'] +['Ġy', 'ên'] +['Ġп', 'ок'] +['Ġпок', 'Ñĥп'] +['ĠпокÑĥп', 'а'] +['ÑĨ', 'Ñĥ'] +['Ġкомп', 'ÑĮÑİ'] +['ĠкомпÑĮÑİ', 'ÑĤеÑĢ'] +['ĠاÙĦÙĥ', 'رÙĬÙħ'] +['تص', 'Ùħ'] +['تصÙħ', 'ÙĬÙħ'] +['Ġоказ', 'а'] +['Ġzar', 'ówn'] +['Ġzarówn', 'o'] +['ëĮĢ', 'ì¶ľ'] +['ãĤ»ãĥ³', 'ãĤ¿ãĥ¼'] +['Ġjako', 'ÅĽci'] +['æĤ', '©'] +['æĤ©', 'ãģ¿'] +['Ø£ÙĨ', 'ÙĪ'] +['Ø£ÙĨÙĪ', 'اع'] +['ë¹', 'ł'] +['Ġìłķ', 'ë§IJ'] +['Ġk', 'ẻ'] +['ĠÑģай', 'ÑĤа'] +['Ġ×Ķ', 'ער×ij'] +['Ùĩ', 'ز'] +['pres', 'ión'] +['ĠÑģÑĤ', 'ен'] +['ãģ£ãģ¦', 'ãĤĭ'] +['Ġhız', 'lı'] +['Ðļ', 'ÐIJ'] +['×ŀשפ', '×Ĺת'] +['ĠÙĨ', 'Ùĩا'] +['ĠÙĨÙĩا', 'ÙĬØ©'] +['ãģ¾', 'ãģĦ'] +['о', 'ÑħÑĢан'] +['ร', 'à¹īà¸Ńย'] +['ล', 'ึà¸ģ'] +['ĠÙĪØ¨', 'اÙĦ'] +['ãĤĤãģ®', 'ãģĮ'] +['ר׼', '×Ļ×ij'] +['ãĤ¤', 'ãĥ¤'] +['س', 'ؤ'] +['سؤ', 'اÙĦ'] +['ĠÙĦØ£ÙĨ', 'Ùĩ'] +['ĠkonuÅŁ', 'tu'] +['Ðļ', 'ÑĥпиÑĤÑĮ'] +['Ġש×IJת', '×Ķ'] +['ĠÙĪØ§ÙĦ', 'س'] +['Ġmożliwo', 'ÅĽci'] +['Ġpró', 'b'] +['ëĶ', '°'] +['ãģ©', 'ãĤĮ'] +['ĠÐľ', 'ин'] +['ĠоÑĢганиз', 'м'] +['ãģ«å¯¾', 'ãģĻãĤĭ'] +['ĠPr', 'é'] +['Ġpriv', 'é'] +['ch', 'è'] +['ãģĦãģŁãģł', 'ãģį'] +['สà¸Ļุ', 'à¸ģ'] +['ajÄħ', 'ce'] +['ĠD', 'zi'] +['ĠDzi', 'ÄĻki'] +['ÅĤat', 'w'] +['r', 'än'] +['rän', 'k'] +['æĿ¥', 'ãģŁ'] +['Ġ×Ķ×Ļ×Ķ', '×ķ×ĵ×Ļ'] +['ãĤ¬', 'ãĥ¼'] +['ĠÑĢаÐ', '´'] +['ĠÑĢад', 'и'] +['к', 'ÑĤив'] +['Ø£', 'Ùĩد'] +['Ø£Ùĩد', 'اÙģ'] +['ש', '×IJ×Ļר'] +['ãģ¦', 'ãģĦãģªãģĦ'] +['Ġfr', 'üh'] +['Ġок', 'ол'] +['Ġокол', 'о'] +['Ġreg', 'ião'] +['ĠÑĩиÑģ', 'ле'] +['Ġpon', 'iew'] +['Ġponiew', 'aż'] +['ìĦ¼', 'íĦ°'] +['Ġb', 'ầu'] +['Ġê', '·'] +['Ġê·', 'ľ'] +['Ġê·ľ', 'ìłķ'] +['ĠH', 'òa'] +['ĠÑĤ', 'оÑĤ'] +['ãĤĤ', 'å¤ļãģĦ'] +['ĠاÙĦإسÙĦاÙħ', 'ÙĬØ©'] +['ãģĭ', 'ãģĦ'] +['Ñį', 'н'] +['ĠÑĥказ', 'ан'] +['ĠÑĤак', 'ое'] +['ï¼', '³'] +['ëĮĢ', 'íķĻ'] +['Ġgen', 'iÅŁ'] +['ĠاÙĦØ®', 'ÙĬ'] +['ĠاÙĦØ®ÙĬ', 'ارات'] +['ãĤĴè¡Į', 'ãģĨ'] +['ש', '×ŀ×Ķ'] +['ĠLÃł', 'm'] +['ÙĪÙĨ', 'ÙĬ'] +['Ġ×IJ', '׾×Ļ×ķ'] +['Ä', 'ĺ'] +['à¹Ħมà¹Ī', 'สามารà¸ĸ'] +['人', 'ãģ¨'] +['بر', 'ز'] +['×Ļס', '×ķ×ĵ'] +['×Ĵ', '׾×Ļ'] +['ĠÙĬ', 'ÙĨا'] +['ĠÙĬÙĨا', 'ÙĬر'] +['ĠкаÑĢÑĤ', 'ин'] +['Ġt', 'ôn'] +['à¹Ģ', 'à¸ģร'] +['à¸Ħ', 'à¸Ķี'] +['Ġ׾×IJ', '×ķר×ļ'] +['ãĤĤãĤī', 'ãģĨ'] +['ãģĭ', 'ãģĭãĤĭ'] +['ани', 'и'] +['Ġara', 'ÅŁtırma'] +['ÙĦاØŃ', 'ظ'] +['ãģĦ', 'ãĤĦ'] +['ĠT', 'Ãłi'] +['Ġ', 'à¸Ļà¸Ńà¸ģà¸Īาà¸ģ'] +['Ġà¸Ļà¸Ńà¸ģà¸Īาà¸ģ', 'à¸Ļีà¹ī'] +['ĠÄIJ', 'ảng'] +['ãģ£ãģ¦', 'ãģįãģŁ'] +['Ġà¸ĭึà¹Īà¸ĩ', 'à¹Ģà¸Ľà¹ĩà¸Ļ'] +['Ġt', 'ả'] +['Ġmożliwo', 'ÅĽÄĩ'] +['ĠS', 'ản'] +['Ġİ', 'ki'] +['Ġc', 'ắt'] +['س', 'Ø£ÙĦ'] +['Ġbak', 'ım'] +['Ø´', 'ب'] +['à¸ķ', 'ีà¹ī'] +['à¸ŀ', 'ยาย'] +['à¸ŀยาย', 'าม'] +['สั', 'à¸Ľ'] +['à¸ªà¸±à¸Ľ', 'à¸Ķา'] +['à¸ªà¸±à¸Ľà¸Ķา', 'หà¹Į'] +['ë°', 'Ģ'] +['еÑĢ', 'Ñĭ'] +['Ġc', 'ánh'] +['Ġthu', 'ế'] +['ت', 'بع'] +['ãģ«åħ¥', 'ãĤĮ'] +['Ñİ', 'ÑģÑĮ'] +['íļĮ', 'ìĿĺ'] +['ç°¡', 'åį'] +['ç°¡åį', 'ĺ'] +['ç°¡åįĺ', 'ãģ«'] +['Ġtr', 'úc'] +['ĠاÙĦÙĥ', 'ÙĪÙĬ'] +['ĠاÙĦÙĥÙĪÙĬ', 'ت'] +['ãĤıãģij', 'ãģ§ãģĻ'] +['ĠÑģв', 'об'] +['ĠÑģвоб', 'од'] +['ĠÑĥÑĩаÑģÑĤ', 'ник'] +['สิ', 'à¹īà¸Ļ'] +['ĠпÑĢо', 'ÑĦеÑģÑģиона'] +['ĠпÑĢоÑĦеÑģÑģиона', 'лÑĮн'] +['Ñģп', 'оÑĢ'] +['×Ĺ', '×ķ×ij×Ķ'] +['Ùħع', 'ÙĨÙī'] +['ĠاÙĦÙģ', 'ترة'] +['สูà¸ĩ', 'สุà¸Ķ'] +['ãĤı', 'ãģļ'] +['ĠÄij', 'è'] +['ĠÄijè', 'n'] +['æ¯Ķ', 'ãģ¹'] +['า', 'à¸ĺิ'] +['Ġmoż', 'emy'] +['à¹ģ', 'à¸ĭ'] +['à¸Īะ', 'à¹Ħมà¹Ī'] +['Ġs', 'ắp'] +['Ðļ', 'Ðŀ'] +['Ġprá', 'ctica'] +['ÙĪÙĥ', 'اÙĦØ©'] +['è¾¼', 'ãĤĵãģ§'] +['ológ', 'ica'] +['Ġе', 'Ñī'] +['ĠеÑī', 'Ñij'] +['تع', 'دÙĬÙĦ'] +['ĠØ£', 'Ùĥد'] +['Ġצר', '×Ļ׼'] +['Ġצר×Ļ׼', '×Ļ×Ŀ'] +['Ø«', 'Ùħ'] +['Ġк', 'ÑĢÑĥ'] +['ĠкÑĢÑĥ', 'п'] +['×ij×Ļ×§', '×ķרת'] +['Ġì¡°', 'ê¸Ī'] +['ãģ¨ãģį', 'ãģ¯'] +['Ġb', 'ạc'] +['ĠÑĢаÑģ', 'пол'] +['ĠÑĢаÑģпол', 'ож'] +['ĠÑĢаÑģполож', 'ен'] +['ز', 'ÙĬÙĨ'] +['ĠÐļ', 'ÑĢоме'] +['ĠاÙĦÙĨ', 'ظر'] +['×Ķ', '×ķ×ĵ'] +['ĠاÙĦس', 'بت'] +['ã썿ĢĿ', 'ãģĦ'] +['Ġpa', 'ÅĦst'] +['ĠpaÅĦst', 'w'] +['ĠÙĦÙĬ', 'ست'] +['ĠбÑĥд', 'Ñĥ'] +['à¸Ĺัà¸Ļ', 'à¸Ĺี'] +['ร', 'าม'] +['ØŃ', 'صÙĪÙĦ'] +['ãģĹãģ¦ãģıãĤĮ', 'ãĤĭ'] +['ĠاÙĦØ¥', 'سرائÙĬÙĦ'] +['ĠاÙĦإسرائÙĬÙĦ', 'ÙĬ'] +['ãģĵãĤĮ', 'ãģ¾ãģ§'] +['ìĤ¬', '를'] +['Ġs', 'ürü'] +['à¹Ģว', 'à¸Ńรà¹Į'] +['à¹Ģà¸ĭ', 'à¸Ńรà¹Į'] +['Ġutilis', 'é'] +['ĠÑģиÑģÑĤем', 'а'] +['Ġdw', 'ó'] +['Ġdwó', 'ch'] +['Ġpróp', 'rio'] +['Ġëĵ±', 'ìĿĦ'] +['arr', 'êt'] +['ĠЧ', 'а'] +['×IJ×ŀ', '׳×ķת'] +['عار', 'ض'] +['à¹Ģà¸ģม', 'สà¹Į'] +['Ġ׾×Ķ', '×ij×Ļף'] +['Ġ׾', '×ij×Ĺ'] +['Ġ׾×ij×Ĺ', '×ķר'] +['สา', 'à¸Ĥา'] +['ĠÐľÐ¾Ñģк', 'ве'] +['ب', 'عد'] +['ĠاÙĦÙĤر', 'ار'] +['ĠÄIJ', 'á»ĭa'] +['Ġ×Ĺ', '×Ĵ'] +['Ùģ', 'تر'] +['ÙĪÙĨ', 'Ø©'] +['Ġ×Ķ×ĸ', '×IJת'] +['å¸Ĥ', 'ãģ®'] +['ãģ»', 'ãģĹãģĦ'] +['Ġ×ij×¢', '×Ļר'] +['ĠÑĤеп', 'еÑĢÑĮ'] +['ìĬµ', 'ëĭĪê¹Į'] +['à¹Ħม', 'à¹Īว'] +['à¹Ħมà¹Īว', 'à¹Īา'] +['à¹Ħมà¹Īวà¹Īา', 'à¸Īะ'] +['×ŀ', '×IJ×Ķ'] +['æĥħ', 'åł±'] +['æĥħåł±', 'ãĤĴ'] +['غ', 'ÙĨ'] +['Ġпо', 'Ñı'] +['ĠпоÑı', 'ви'] +['éģİ', 'ãģĶ'] +['تش', 'غ'] +['تشغ', 'ÙĬÙĦ'] +['в', 'ел'] +['Ġ×Ĺ', '×ŀ'] +['ãģ¨ãģªãĤĬ', 'ãģ¾ãģĻ'] +['Ġra', 'ÄŁ'] +['ĠraÄŁ', 'men'] +['ãģĭ', 'ãģ©ãģĨ'] +['ãģĭãģ©ãģĨ', 'ãģĭ'] +['ен', 'ко'] +['ì§Ģ', 'ê³ł'] +['Ġ×IJ׾', '×Ļ×Ķ'] +['ĠØ£', 'ÙĦ'] +['à¸Īำ', 'หà¸Ļ'] +['à¸Īำหà¸Ļ', 'à¹Īาย'] +['nız', 'ı'] +['Ġ׾ק', '×Ĺת'] +['Ø£', 'ÙĩÙħ'] +['Ø£ÙĩÙħ', 'ÙĬØ©'] +['ت', 'غÙĬر'] +['ש', '×Ĺר'] +['ס×ķפ', 'ר'] +['×ĵ', '×Ļר'] +['èī¯', 'ãģĭãģ£ãģŁ'] +['×ŀ׾×Ĺ', '×ŀ×Ķ'] +['ÑģÑĤв', 'ие'] +['ÑĤ', 'ÑĢаÑĤ'] +['ĠاÙĦØ£', 'Ø®'] +['ĠاÙĦأخ', 'ÙĬرة'] +['ĠاÙĦØŃ', 'صÙĪÙĦ'] +['Ġcréd', 'ito'] +['צ', '×Ļ×¢'] +['ãĥ¬', 'ãĥĻãĥ«'] +['بر', 'ÙĬ'] +['ëIJ', 'IJ'] +['ãģł', 'ãģ£ãģ¦'] +['Ġreal', 'tÃł'] +['س', 'Ù쨱'] +['×ķ׳', '×ķ'] +['×Ĵ', '×ķ×ĵ'] +['×Ĵ×ķ×ĵ', '׾'] +['ฮ', 'า'] +['ãģĹãģ¦', 'ãģĬãĤĬãģ¾ãģĻ'] +['Ġg', 'Ãł'] +['Ġ׾×ij', 'צע'] +['å¼ķ', 'è¶ĬãģĹ'] +['Ġ×ŀ', '×Ļ׾×Ļ'] +['Ġ×ŀ×Ļ׾×Ļ', '×ķף'] +['Ùħ', 'در'] +['Ùħدر', 'سة'] +['פ', '×ķ×ĺ'] +['à¸Ļà¹īำ', 'มัà¸Ļ'] +['ëģ', 'Ŀ'] +['ع', 'Ùĥس'] +['ĠÙĤ', 'ض'] +['ĠÑĢÑĭ', 'б'] +['خط', 'Ø·'] +['×ŀ×ķס', '×ĵ'] +['Ġ׼׾', '׾×Ļ'] +['ĠкоÑĤоÑĢ', 'ое'] +['צ×Ļ', '×ķף'] +['ĠмеÑģÑĤ', 'а'] +['ãģĭ', 'ãģ¤'] +['г', 'ÑĢÑĥпп'] +['׾', '×Ļ׾'] +['ת', '×ķ×IJר'] +['ë³µ', 'ì§Ģ'] +['à¹ģà¸ľ', 'à¹Īà¸Ļ'] +['Ġ×ij×¢', 'ת'] +['æĻĤéĸĵ', 'ãĤĴ'] +['ï¼', '£'] +['ãģ¨ãģĦãģĨãģĵãģ¨', 'ãģ§'] +['Ġ׾×Ķ', '×§'] +['Ġ׾', '×ĸ×Ķ'] +['ĠìłĢ', 'ëĬĶ'] +['ĠاÙĦØ¥', 'رÙĩاب'] +['ĠìŀĪëĬĶ', 'ëį°'] +['ĠÑĤ', 'огда'] +['Ġ×Ķ', 'צ×Ļ'] +['×ķ׾', '×ĺ'] +['Ġר', 'פ×ķ×IJ×Ļ'] +['ãģĵãģ¨', 'ãģ§ãģĻ'] +['ĠÄij', 'ÃŃch'] +['ØŃ', 'ÙĬا'] +['Ġ×Ķ×ŀש', '×Ĺ×§'] +['ãģľ', 'ãģ²'] +['Ġ×ŀ×IJ', 'פשר'] +['ãģ¿', 'ãģ¾ãģĹãģŁ'] +['ĠاÙĦØ£ÙħÙĬر', 'ÙĥÙĬ'] +['Ùħج', 'تÙħع'] +['Ġس', 'اب'] +['Ġساب', 'ÙĤ'] +['׼', '×Ļ׾'] +['áº', '¾'] +['ãĥª', 'ãĤ¹ãĥĪ'] +['Ġì', 'ĥ'] +['Ġìĥ', 'Ī'] +['ĠìĥĪ', 'ë¡ľ'] +['ĠìĥĪë¡ľ', 'ìļ´'] +['ĠD', 'á»ĭch'] +['à¹Ģหมาะ', 'สม'] +['ĠاÙĦÙĨ', 'بÙĬ'] +['׾', '׾'] +['ÙĨ', 'ع'] +['Ðĵ', 'лав'] +['Ðĵлав', 'наÑı'] +['Ùħر', 'ض'] +['Ġ×ķ', '×ĵ'] +['ت', 'ÙĤÙĬ'] +['تÙĤÙĬ', 'ÙĬÙħ'] +['Ġb', 'ảng'] +['ĠÙģ', 'ÙĤاÙĦ'] +['×¢', '×ŀ×Ļ'] +['д', 'ÑĢа'] +['Ġsu', 'á»ijt'] +['سر', 'عة'] +['Ġc', 'á»Ń'] +['Ġ×Ķ', '×Ļ×Ĺ×Ļ×ĵ'] +['سع', 'ÙĬد'] +['à¸Ńา', 'à¸Ĭีà¸ŀ'] +['Ġس', 'ÙĪØ§Ø¡'] +['ãĤ½', 'ãĥķãĥĪ'] +['Ġл', 'иÑĩно'] +['ĠÐļ', 'оÑĢ'] +['اÙĩ', 'تÙħ'] +['اÙĩتÙħ', 'اÙħ'] +['à¸Ń', 'à¸Ķี'] +['à¸Ńà¸Ķี', 'à¸ķ'] +['ãģIJ', 'ãĤīãģĦ'] +['Ġiht', 'iya'] +['Ġihtiya', 'ç'] +['ãģ¾ãģ§', 'ãģ®'] +['ìĭľ', 'ìĬ¤'] +['ìĭľìĬ¤', 'íħľ'] +['ÑĢÑĥ', 'ÑĪ'] +['ãĤĦ', 'ãģ£ãģ±'] +['ãĤĦãģ£ãģ±', 'ãĤĬ'] +['к', 'еÑĢ'] +['Ġ', 'ży'] +['Ġży', 'w'] +['кл', 'он'] +['Ġl', 'ượt'] +['Ã', '¾'] +['да', 'Ñĩи'] +['tür', 'k'] +['غ', 'ÙĪ'] +['ĠигÑĢ', 'ок'] +['Ġph', 'ê'] +['Ġש', '×¢×ľ'] +['ĠاÙĦÙħ', 'دÙĨÙĬ'] +['ĠìŬ룬', 'ë¶Ħ'] +['ער', '×Ļ×Ŀ'] +['Ñħод', 'ÑıÑĤ'] +['Ġx', 'ứ'] +['ÐĹ', 'а'] +['ĠÙģ', 'رص'] +['à¸Īะ', 'à¸Ĺำà¹ĥหà¹ī'] +['íģ', '´'] +['×¢', '×ij×ķר'] +['à¹Ģหลà¹Īา', 'à¸Ļีà¹ī'] +['èĢĥãģĪ', 'ãĤĭ'] +['ÑĢ', 'еÑģÑĤ'] +['н', 'нÑĭй'] +['Ġc', 'ầm'] +['دا', 'Ø®ÙĦ'] +['ĠÙħÙĦÙĬ', 'ار'] +['ĠÐIJ', 'л'] +['ĠвÑĢем', 'ен'] +['à¸Ĭà¹Īวย', 'à¹ĥหà¹ī'] +['ר×Ļ', '×ķת'] +['ëĵ', '¯'] +['飲', 'ãģ¿'] +['׳', '׾'] +['שת', '×£'] +['ĠاÙĦسعÙĪØ¯', 'ÙĬ'] +['u', 'ÃŁ'] +['ìĿ¸', 'ëį°'] +['ĠìĿ¼', 'ë°ĺ'] +['ÅĤ', 'ÄĻ'] +['Ġm', 'á»iji'] +['×ŀ', '×Ļ׳'] +['ĠاÙĦØ£', 'Ø·Ù쨧ÙĦ'] +['Ġçı', 'kan'] +['é', 'cole'] +['×§', '×Ļש'] +['×§×Ļש', '×ķר'] +['ĠоÑģ', 'ÑĥÑīеÑģÑĤв'] +['ĠоÑģÑĥÑīеÑģÑĤв', 'лÑı'] +['×ij', '×IJר'] +['à¹Ħà¸Ľ', 'à¸Ķà¹īวย'] +['Ġ×¢', '×ķ׾×Ķ'] +['à¸ģà¹ĩ', 'à¹Ħมà¹Ī'] +['ãĥ¢', 'ãĥĩ'] +['ãĥ¢ãĥĩ', 'ãĥ«'] +['تØŃ', 'ÙĪÙĦ'] +['Ġод', 'ного'] +['ת×Ĺ×Ļ׾', 'ת'] +['Ġت', 'Ø®'] +['Ġch', 'cia'] +['Ġchcia', 'ÅĤ'] +['ãĥIJ', 'ãĥ³'] +['èĢħ', 'ãģ¯'] +['ĠÙħ', 'ØŃÙĦ'] +['Ñģл', 'ож'] +['Ñģлож', 'н'] +['Ġt', 'ÄĻ'] +['Ġçı', 'kt'] +['Ġçıkt', 'ı'] +['ĠC', 'Æ¡'] +['à¹Ħà¸Ķà¹ī', 'à¹Ģลย'] +['ır', 'ken'] +['à¹Ģà¸Ĥà¹īา', 'สูà¹Ī'] +['ÙħØŃ', 'Ùĥ'] +['ÙħØŃÙĥ', 'ÙħØ©'] +['à¸Ħุ', 'à¹īม'] +['à¸Ļà¹Īา', 'à¸Īะ'] +['лÑİ', 'д'] +['де', 'ÑģÑı'] +['деÑģÑı', 'ÑĤ'] +['ĠлÑİб', 'ой'] +['تØŃر', 'ÙĬر'] +['צע', '×ĵ'] +['Ġе', 'Ñij'] +['ĠاÙĦØŃ', 'ÙĥÙħ'] +['Ġص', 'باØŃ'] +['à¹Ģà¸ļ', 'à¸Ńรà¹Į'] +['Ġróż', 'nych'] +['ги', 'б'] +['ĠÑģ', 'оÑĤ'] +['ĠÑģоÑĤ', 'ÑĢÑĥд'] +['ĠÑģоÑĤÑĢÑĥд', 'ник'] +['ĠобÑĬ', 'ем'] +['פ', '×ĺר'] +['ãģĻãģĶ', 'ãģı'] +['ãģ«éĸ¢', 'ãģĹãģ¦'] +['в', 'ол'] +['Ø«', 'ÙħاÙĨ'] +['Ġd', 'ần'] +['æĬ', 'ľ'] +['æĬľ', 'ãģij'] +['Ġ×¢', 'ש'] +['Ġעש', '×ķ×Ļ'] +['ס', '×ķף'] +['ãģªãģ®', 'ãģ§ãģĻ'] +['ãģ¯', 'ãģ©ãģĨ'] +['×ŀ×¢', 'ר×ij'] +['ï¼', '°'] +['Ùħ', 'صر'] +['ÙħÙĨ', 'اسب'] +['ÙħÙĨاسب', 'Ø©'] +['ä¸Ĭ', 'ãģ®'] +['×IJ×Ļש', '×ķר'] +['ĠìĦ¤', 'ì¹ĺ'] +['×ŀ×ĵ×Ļ׳', '×ķת'] +['×ŀר', 'ת'] +['ãĤĭ', 'ãģ®ãģĮ'] +['د', 'Ùİ'] +['ĠاÙĦشر', 'Ùĥات'] +['ìĭľ', 'ê°Ħ'] +['ĠÑĢеÑĪ', 'ение'] +['ãģĻãĤĭ', 'ãģ®ãģ¯'] +['ĠìŀIJìĭł', 'ìĿĺ'] +['׾', '×ŀ×ķ'] +['ãģ¨ãģĵãĤį', 'ãģ§'] +['Ġ×§', 'צר'] +['Ġmã', 'i'] +['Ġkü', 'ltür'] +['ãĥ©ãĤ¤', 'ãĥĸ'] +['à¸ľà¸¹à¹ī', 'หà¸įิà¸ĩ'] +['æĻĤéĸĵ', 'ãģĮ'] +['клÑİÑĩ', 'и'] +['diÄŁ', 'iniz'] +['มาà¸ģ', 'à¹Ĩ'] +['تØŃ', 'ÙħÙĦ'] +['Ġh', 'ạt'] +['ãĤ¦', 'ãĤ£'] +['п', 'ле'] +['×ŀ', '׾×IJ'] +['ÅĤ', 'ó'] +['Ġg', 'á»ijc'] +['Ġ×IJ', '×ķ×ĵ×ķת'] +['หว', 'าà¸Ļ'] +['ĠاÙĦ', 'ÙĪØ²'] +['ĠاÙĦÙĪØ²', 'راء'] +['ëĵ¤', 'ê³¼'] +['Ġص', 'ØŃ'] +['ĠصØŃ', 'ÙĬÙ쨩'] +['Ġм', 'м'] +['تد', 'Ø®ÙĦ'] +['Ġpersön', 'lich'] +['Ġز', 'ÙĬ'] +['ĠزÙĬ', 'ادة'] +['ãĤ·', 'ãĤ¢'] +['Ġng', 'ắn'] +['à¸Ħล', 'ิà¸ģ'] +['Ġs', 'ông'] +['Ġtü', 'ket'] +['Ñį', 'ÑĦÑĦ'] +['ÑįÑĦÑĦ', 'екÑĤ'] +['ש', '×Ļ×ij'] +['Ġا', 'عت'] +['ت', 'ض'] +['تض', 'ÙħÙĨ'] +['ĠاÙĦÙħØ´', 'رÙĪØ¹'] +['Ġprodu', 'ção'] +['ĠпÑĢимен', 'Ñı'] +['ни', 'ÑĨÑĭ'] +['주', 'ëĬĶ'] +['ر', 'Ùı'] +['Ġm', 'Æ¡'] +['Ġhayat', 'ı'] +['ëŁ', '½'] +['Ġü', 'cret'] +['Ġyan', 'ında'] +['Ġpr', 'ática'] +['×ij×Ļ×§', '×ķר'] +['Ãľ', 'N'] +['Ñģ', 'оÑĤ'] +['ãĤıãģij', 'ãģ§'] +['Ġдол', 'го'] +['ת', '׼×ķ'] +['ĠìķĦ', 'ëĭĮ'] +['ë', 'į°ìĿ´'] +['Ġç', 'iz'] +['Ġcho', 'Äĩ'] +['Ġ×Ķ', '×Ļת'] +['Ġ×Ķ×Ļת', 'ר'] +['Ġso', 'át'] +['׼', '×ij×ĵ'] +['à¹Ģล', 'à¹Īา'] +['Ġд', 'еÑĢ'] +['ĠдеÑĢ', 'ев'] +['ãĤĴ', 'åħ¥ãĤĮ'] +['×Ĺ', '×ķס'] +['×Ĺ×ķס', 'ר'] +['ج', 'ÙĬÙĨ'] +['t', 'ón'] +['onn', 'é'] +['Ġпол', 'ноÑģÑĤÑĮÑİ'] +['人', 'ãģŁãģ¡'] +['Ġpr', 'êt'] +['ëł', '¸'] +['Ġdéc', 'embre'] +['cı', 'lar'] +['Ġת', 'ת'] +['Ġê²½ìļ°', 'ìĹIJëĬĶ'] +['ÙĪ', 'عد'] +['è¦ĭ', 'ãĤĭ'] +['วิ', 'à¸Īัย'] +['ë', '¶Ī'] +['ز', 'ÙĪØ§'] +['زÙĪØ§', 'ج'] +['d', 'ì'] +['ãģ§ãģĻ', 'ãĤĪ'] +['Ġвод', 'о'] +['ĠÙĬ', 'ÙĪØ¬Ø¯'] +['Ñģ', 'оÑģÑĤоÑı'] +['Ðŀ', 'С'] +['ĠÄIJ', 'ó'] +['×Ĺ', 'פש'] +['Ġצ', '×Ļ×ij×ķר'] +['ĠاÙĦÙĤ', 'Ø·'] +['ĠاÙĦÙĤØ·', 'اع'] +['Ġиме', 'ÑİÑĤ'] +['Ġph', 'áºŃn'] +['×Ľ×¡', 'פ×Ļ'] +['полн', 'иÑĤелÑĮ'] +['éĻIJ', 'ãĤĬ'] +['ĠÑģ', 'ÑĢав'] +['ĠÑģÑĢав', 'н'] +['ÙħاÙĦ', 'Ùĥ'] +['×ĵר', '×ķ×Ŀ'] +['çļĨ', 'ãģķãĤĵ'] +['ØŃÙĤ', 'ÙĤ'] +['à¹ģหล', 'à¹Īà¸ĩ'] +['ĠاÙĦر', 'سÙħÙĬ'] +['оÑĩ', 'ки'] +['×ĺ', '×ij×Ĺ'] +['Ġcan', 'lı'] +['Ġ׾', '׾'] +['Ġ׾׾', '×ŀ×ķ×ĵ'] +['×ŀ×ij', '×ķ'] +['ת', '׼'] +['×ª×Ľ', '׳×Ļת'] +['ĠاÙĦÙħ', 'شار'] +['ĠاÙĦÙħشار', 'ÙĥØ©'] +['İ', 'Åŀ'] +['ĠسÙĬ', 'اسÙĬ'] +['в', 'олÑĮ'] +['ĠÑģ', 'пÑĢав'] +['æĿ¥', 'ãģ¦'] +['פ×ķר', '×ķ×Ŀ'] +['สำ', 'à¹Ģรà¹ĩ'] +['สำà¹Ģรà¹ĩ', 'à¸Ī'] +['ĠÅŁ', 'öyle'] +['Ġzosta', 'ÅĤa'] +['ĠH', 'ü'] +['ר', '×ķש'] +['د', 'ÙĦÙĬÙĦ'] +['ÑĢи', 'д'] +['ש', 'ף'] +['×ŀ×§', '×ķר'] +['ĠÑĥ', 'Ñĩ'] +['ĠÑĥÑĩ', 'еб'] +['ĠÑį', 'ÑĤа'] +['ков', 'а'] +['à¸ķà¸Ļ', 'à¹Ģà¸Ńà¸ĩ'] +['ÙĨ', 'ÙIJ'] +['à¸Ńีà¸ģ', 'à¸Ħรัà¹īà¸ĩ'] +['ระ', 'à¸ļุ'] +['Ġd', 'ữ'] +['ĠاÙĦØŃ', 'اÙĦÙĬ'] +['׼', '×ķ׼'] +['׼×ķ׼', '×ij'] +['Ġ×ŀ×IJ', 'שר'] +['Ġtr', 'ụ'] +['ÑĤел', 'ем'] +['Ġв', 'ли'] +['Ġвли', 'Ñı'] +['Ġש×IJת', '×Ŀ'] +['Ġuw', 'ag'] +['Ġuwag', 'ÄĻ'] +['×ĺ', '×Ļת'] +['×IJ', '×ĵ×Ŀ'] +['à¸Ķ', 'ุ'] +['Ġ×Ķ×IJ', '׾×Ķ'] +['Ġkar', 'Ä±ÅŁ'] +['ĠÄIJ', 'á»iji'] +['да', 'ÑİÑĤ'] +['ãģªãģ®', 'ãģ«'] +['Äħ', 'cych'] +['à¹Ģà¸Ļ', 'à¹īà¸Ļ'] +['ãģĹãģ¦', 'ãģĹãģ¾ãģĨ'] +['int', 'érieur'] +['ĠfÃŃs', 'ica'] +['ĠÐŁ', 'ол'] +['ãģĹãģ', 'ķ'] +['à¸Ĺำ', 'à¹Ħม'] +['ĠL', 'âm'] +['ĠاÙĦÙħ', 'سÙĦÙħ'] +['ĠاÙĦÙħسÙĦÙħ', 'ÙĬÙĨ'] +['ص', 'ØŃØ©'] +['ìĹ', 'Ħ'] +['à¹Ģà¸Ķà¹ĩ', 'à¸Ķ'] +['ĠÑĥ', 'ÑĩеÑĤ'] +['â', 'Ìģ'] +['Ġب', 'ÙĦا'] +['ĠاÙĦاجتÙħاع', 'ÙĬ'] +['פרס', '×Ŀ'] +['ãĥķ', 'ãĥ©'] +['ĠÐļ', 'огда'] +['mie', 'ÅĽci'] +['ĠبÙĬÙĨ', 'Ùħا'] +['Ġ×ŀ×IJ', '×ŀר×Ļ×Ŀ'] +['Ġ×ij×IJ', '×ĸ×ķר'] +['×ķש', '×Ļ×Ŀ'] +['ĠÑģдел', 'а'] +['entr', 'ée'] +['à¹Ģ', 'à¸Ħà¹īา'] +['Ñĥг', 'л'] +['ĠاÙĦÙģ', 'ÙĨÙĬ'] +['ĠÐĴ', 'оÑĤ'] +['à¸Ĺีà¹Ī', 'มา'] +['×ķצ', '×Ĵ'] +['ÙĤد', 'رة'] +['Ġëª', '©'] +['Ġ목', 'ìłģ'] +['íıī', 'ê°Ģ'] +['ĠاÙĦØ£', 'ربع'] +['ĠاÙĦأربع', 'اء'] +['פס', '×Ļ×§'] +['ĠÑıвлÑı', 'ÑİÑĤÑģÑı'] +['ب', 'ÙĪÙĨ'] +['ì°', '¾'] +['×ŀ×¢', 'ר׼'] +['×ŀ×¢×¨×Ľ', '×ķת'] +['ãĤ·', 'ãĤ§'] +['ĠباÙĦ', 'Ø£'] +['íĸĪ', 'ëįĺ'] +['ĠاÙĦبر', 'ÙĨاÙħج'] +['ĠاÙĦØ£', 'ØŃد'] +['Ġm', 'Å©'] +['ĠmÅ©', 'i'] +['п', 'аÑĤ'] +['ب', 'Ø«'] +['ĠÑĨ', 'енÑĭ'] +['Ġ×ijת', '׾'] +['è¨Ģ', 'ãĤıãĤĮ'] +['ĠاÙĦÙħ', 'جاÙĦ'] +['ĠìĦ¸', 'ìĥģ'] +['Ġ×Ĵ', '×ķפ'] +['ĠнаÑĪ', 'ей'] +['Ġкомп', 'аниÑı'] +['б', 'ин'] +['öl', 'ü'] +['×Ļ', '×Ļ×ĺ'] +['Ġ×ŀס', 'פ×Ļ×§'] +['ยัà¸ĩ', 'à¸Ħà¸ĩ'] +['ĠЧ', 'и'] +['Ġан', 'ÑĤи'] +['ĠÑģÑĢед', 'и'] +['สà¹Īวà¸Ļ', 'à¹ĥหà¸įà¹Ī'] +['оÑĩ', 'ка'] +['íĬ¹', 'ë³Ħ'] +['ว', 'à¹Īาà¸ĩ'] +['гоÑĢ', 'од'] +['با', 'Ùĥ'] +['à¹Ģส', 'ีà¹Īย'] +['à¹Ģสีà¹Īย', 'à¸ĩ'] +['ãĤĤãĤī', 'ãģĦ'] +['×§', '×ķ×Ŀ'] +['ãģĽ', 'ãģļ'] +['ĠاÙĦÙĤ', 'اÙĩرة'] +['Ġ×ij', '׼×ļ'] +['Ùħشار', 'ÙĬع'] +['باØŃ', 'Ø«'] +['Ġпо', 'Ñĩ'] +['ĠпоÑĩ', 'ÑĤи'] +['ĠÑĦоÑĢм', 'а'] +['S', 'İ'] +['Ġ×ŀצ', '×Ļ×¢'] +['ล', 'ื'] +['ลื', 'ม'] +['ĠÑĤ', 'еÑĢ'] +['ĠÑĤеÑĢ', 'ÑĢиÑĤоÑĢ'] +['ĠÑĤеÑĢÑĢиÑĤоÑĢ', 'ии'] +['Ġв', 'меÑģÑĤ'] +['ĠвмеÑģÑĤ', 'е'] +['dıkl', 'arı'] +['op', 'ération'] +['à¹Ĥ', 'ห'] +['ص', 'دÙĬ'] +['صدÙĬ', 'ÙĤ'] +['íĸī', 'ìłķ'] +['تج', 'ا'] +['تجا', 'ÙĪØ²'] +['Ġsu', 'ç'] +['Ġar', 'ty'] +['Ġarty', 'ku'] +['Ġartyku', 'ÅĤ'] +['ãĤ·ãĥ§', 'ãĥĥãĥĹ'] +['ש', 'פ'] +['שפ', '×Ļ×¢'] +['Ġ×Ķש', '×Ļר×ķת'] +['à¹ģà¸ĸ', 'ม'] +['ë¸', 'Ķ'] +['Ġuk', 'ÅĤad'] +['Ġ×ķ', '׼×Ļ'] +['หล', 'าà¸ģ'] +['หลาà¸ģ', 'หลาย'] +['æĸ¹', 'ãĤĤ'] +['Ġpodr', 'óż'] +['ĠE', 'ÄŁer'] +['Ġком', 'наÑĤ'] +['ĠÑģам', 'ÑĭÑħ'] +['Ġв', 'кÑĥÑģ'] +['б', 'еж'] +['Ġ×ij', '×§×ķ'] +['æİĽ', 'ãģij'] +['ãģ¿', 'ãĤĭãģ¨'] +['ĠiliÅŁ', 'kin'] +['ĠÙĬ', 'عÙħÙĦ'] +['Ġпод', 'аÑĢ'] +['Ġyaz', 'ılı'] +['ãĤĴ', 'å¾Ĺ'] +['Ġwyst', 'ÄĻp'] +['à¸Ĺีà¹Ī', 'à¹ĥà¸Ĭà¹ī'] +['ØŃاد', 'Ø«'] +['ÙĪ', 'ÙĬد'] +['кÑĥ', 'лÑĮÑĤ'] +['кÑĥлÑĮÑĤ', 'ÑĥÑĢ'] +['à¸ģาร', 'à¹ģà¸Ĥà¹Īà¸ĩ'] +['à¸ģารà¹ģà¸Ĥà¹Īà¸ĩ', 'à¸Ĥ'] +['à¸ģารà¹ģà¸Ĥà¹Īà¸ĩà¸Ĥ', 'ัà¸Ļ'] +['ÙħÙĪ', 'ظ'] +['ÙħÙĪØ¸', 'Ùģ'] +['ÙĬÙħ', 'ÙĬ'] +['ãĤĵãģ§ãģĻ', 'ãģĮ'] +['diÄŁ', 'im'] +['diÄŁim', 'iz'] +['ĠÐŁ', 'еÑĢ'] +['ĠÐŁÐµÑĢ', 'в'] +['Ġm', 'ão'] +['ĠÑģ', 'ез'] +['ĠÑģез', 'он'] +['Ġ×Ķ×ŀ', '×¢'] +['Ùħ', 'جÙħÙĪØ¹Ø©'] +['ĠинÑĦоÑĢм', 'аÑĨии'] +['i', 'ếc'] +['ã', 'ng'] +['ĠÄij', 'ấy'] +['ãģĶ', 'ç´'] +['ãģĶç´', '¹'] +['ãģĶç´¹', 'ä»ĭ'] +['Ġad', 'ım'] +['à¹Ħ', 'หล'] +['Ġп', 'ÑĢакÑĤи'] +['ĠпÑĢакÑĤи', 'Ñĩ'] +['ĠпÑĢакÑĤиÑĩ', 'еÑģ'] +['ĠпÑĢакÑĤиÑĩеÑģ', 'ки'] +['ĠاÙĦÙĨ', 'Ù쨳'] +['ĠÑĢабоÑĤ', 'е'] +['ÙĦÙĬ', 'Ùģ'] +['ĠاÙĦجÙĨ', 'ÙĪØ¨'] +['Ġвод', 'Ñĭ'] +['ì¹', 'Ļ'] +['Ġм', 'иÑĢа'] +['ĠÄij', 'ừng'] +['ĠпÑĢоÑĤив', 'о'] +['ĠÑģÑĤÑĢан', 'Ñĭ'] +['ล', 'ู'] +['ìĤ', '¶'] +['kre', 'ÅĽl'] +['Ġbul', 'und'] +['Ġbulund', 'uÄŁu'] +['à¹ģ', 'สà¸Ļ'] +['ãĤ±', 'ãĤ¢'] +['ת×Ĺ', '×ķ×ŀ×Ļ'] +['ר׼', '×Ķ'] +['Ġ׾ק', '×ķ×Ĺ'] +['Ġ׾ק×ķ×Ĺ', '×ķת'] +['Ġ×Ľ×ª', '×ķ×ijת'] +['ĠÙĦ', 'ÙĥÙħ'] +['ب', 'شر'] +['Ġr', 'Ãłng'] +['Ġ×ŀ×Ķ', '×ŀ'] +['Ġ×IJ×Ĺר', '×ķת'] +['Ġб', 'он'] +['Ġбон', 'ÑĥÑģ'] +['ï½', 'Ĺ'] +['à¹ģ', 'ยà¸ģ'] +['ãģĤãģªãģŁ', 'ãģ®'] +['ĠÑĥÑĩаÑģÑĤ', 'ие'] +['ĠE', 'yl'] +['ĠEyl', 'ül'] +['ĠçalÄ±ÅŁmalar', 'ı'] +['Ø®', 'طر'] +['ìĿ', '½'] +['à¸ģาร', 'à¹ĥà¸Ĭà¹īà¸ĩาà¸Ļ'] +['Ġана', 'лиз'] +['תק', '×ij׾'] +['ни', 'ем'] +['Ġİ', 'ns'] +['Ġİns', 'an'] +['ĠبÙĪ', 'اس'] +['ĠبÙĪØ§Ø³', 'طة'] +['Ġ׳', '×Ľ×ł×¡'] +['Ġ×Ķ×ŀ', '×Ļ×ĵ×¢'] +['Ġç', 'o'] +['Ġço', 'ÄŁu'] +['á»', 'ĺ'] +['ĠêµŃ', '민'] +['ãĤĤ', 'ãģĦãģĦ'] +['Ġ׼', '׾×Ļ'] +['ĠÑģÑĢед', 'не'] +['g', 'ÅĤo'] +['gÅĤo', 'ÅĽ'] +['Ġneg', 'ó'] +['Ġnegó', 'cio'] +['ĠÑĢ', 'егиÑģÑĤ'] +['ĠÑĢегиÑģÑĤ', 'ÑĢа'] +['ĠÑĢегиÑģÑĤÑĢа', 'ÑĨии'] +['Ġtr', 'á»ĵng'] +['ĠпÑĢ', 'Ñı'] +['ĠпÑĢÑı', 'мо'] +['ëłĪ', 'ìĿ´'] +['Ġk', 'ém'] +['к', 'ле'] +['à¸Ļำ', 'มา'] +['ĠÑĦ', 'ин'] +['ĠÑĦин', 'анÑģ'] +['ĠÑĦинанÑģ', 'ов'] +['Ġki', 'á»ĩm'] +['ยัà¸ĩ', 'à¹Ħ'] +['ยัà¸ĩà¹Ħ', 'à¸ĩ'] +['ย', 'ิà¸ĩ'] +['à¹Ĥ', 'à¸Ľ'] +['ĠполÑĥÑĩ', 'ил'] +['×Ļ×ĸ', '×Ŀ'] +['à¹ģละ', 'à¸Ħวาม'] +['Ġво', 'обÑīе'] +['ص', 'ÙĬر'] +['ãĥı', 'ãĥ³'] +['ĠاÙĦÙĤ', 'اد'] +['ĠاÙĦÙĤاد', 'Ùħ'] +['Ġب', 'دÙĪÙĨ'] +['ع', 'ظÙħ'] +['ת', '׳×ķ×¢'] +['×ª×ł×ķ×¢', '×Ķ'] +['Ø£', 'ÙħÙĦ'] +['ãģķ', 'ãģĪ'] +['ÑĤ', 'ем'] +['ÑĤем', 'пеÑĢ'] +['ÑĤемпеÑĢ', 'аÑĤÑĥÑĢ'] +['Ġ׾', '×Ļצ×ķר'] +['Ġr', 'ÄĻk'] +['ر', 'سÙĦ'] +['ìŀIJ', '를'] +['Ġ×Ļצ', '×Ļרת'] +['ÙĨ', 'بÙĬ'] +['Ñĩ', 'наÑı'] +['تØŃ', 'ÙĦÙĬÙĦ'] +['Ġм', 'ик'] +['Ġмик', 'ÑĢо'] +['ĠS', 'öz'] +['Ġfor', 'ça'] +['Ñģ', 'он'] +['ĠاÙĦع', 'را'] +['ĠاÙĦعرا', 'ÙĤÙĬ'] +['ĠH', 'á»ĵng'] +['ãģĻãĤĭ', 'ãģŁãĤģãģ«'] +['à¸Ĺีà¹Ī', 'à¸Ńยูà¹Ī'] +['Ġ×ķ×IJ', '×£'] +['ص', 'ÙĬد'] +['ĠìķĬ', 'ê³ł'] +['ร', 'ัà¸ĩ'] +['ĠاÙĦت', 'ÙĪØ§ØµÙĦ'] +['à¹Ģม', 'à¸ķร'] +['Ñĥ', 'ÑģÑĤÑĢой'] +['ÑĥÑģÑĤÑĢой', 'ÑģÑĤв'] +['m', 'ıyor'] +['Ġبا', 'سÙħ'] +['Ġ×ķ', '׼×ķ'] +['ĠG', 'ül'] +['á»', 'IJ'] +['Ãī', 'tat'] +['غ', 'اÙĦ'] +['Ø¥', 'ÙĨØ´'] +['Ø¥ÙĨØ´', 'اء'] +['T', 'İ'] +['à¸Ĥà¹īา', 'ม'] +['Ġtro', 'ch'] +['Ġtroch', 'ÄĻ'] +['Ø¥', 'ص'] +['إص', 'ابة'] +['ĠØ«', 'اÙĨÙĬ'] +['ĠاÙĦص', 'ØŃØ©'] +['Ġ×ĸ×Ķ', '×ķ'] +['jÄħ', 'cej'] +['ãĥĢ', 'ãĥ³'] +['ìĿ¸', 'ìĿ´'] +['Ġв', 'олоÑģ'] +['ëIJĺ', 'ë©´'] +['Ġzak', 'ÅĤad'] +['ãģĻ', 'ãģĵãģ¨'] +['以ä¸Ĭ', 'ãģ®'] +['Ġ×Ķ×ŀ×§', '×ķ×Ŀ'] +['ÙħØ´', 'اÙĩ'] +['ÙħشاÙĩ', 'دة'] +['Ñĩ', 'ив'] +['ب', 'Ø´'] +['ย', 'à¹īาย'] +['Ġsür', 'dür'] +['ĠN', 'ẵ'] +['ĠNẵ', 'ng'] +['ĠигÑĢ', 'аÑĤÑĮ'] +['Ġê·¸ëŁ¬', 'ë©´'] +['ãĥķ', 'ãĥ«'] +['ล', 'à¹Īะ'] +['Ġtend', 'rá'] +['Ġb', 'Ãły'] +['à¹Ģà¸Ľà¹ĩà¸Ļ', 'à¸ľà¸¹à¹ī'] +['Ġok', 'o'] +['Ġoko', 'ÅĤo'] +['w', 'ÅĤa'] +['wÅĤa', 'ÅĽci'] +['wÅĤaÅĽci', 'w'] +['æĢĿ', 'ãĤı'] +['ĠYa', 'ÅŁ'] +['ĠB', 'á»ĩnh'] +['íı', 'Ń'] +['بÙĬ', 'د'] +['קר', 'ף'] +['à¹Ģศ', 'ร'] +['à¹Ģศร', 'ษ'] +['à¹Ģศรษ', 'à¸IJ'] +['à¹Ģศรษà¸IJ', 'à¸ģิà¸Ī'] +['ĠاÙĦØ£', 'ÙĪØ±ÙĪ'] +['ĠاÙĦØ£ÙĪØ±ÙĪ', 'بÙĬ'] +['fl', 'äche'] +['ä¹Ĺ', 'ãĤĬ'] +['Ġb', 'á»ģn'] +['Ùĩ', 'ب'] +['æľĢ', 'ãĤĤ'] +['Ġsa', 'ç'] +['à¸Ńำ', 'à¹Ģà¸ł'] +['à¸Ńำà¹Ģà¸ł', 'à¸Ń'] +['ĠØ£', 'ج'] +['ĠاÙĦد', 'اخÙĦ'] +['ĠاÙĦداخÙĦ', 'ÙĬØ©'] +['×ĺ', '×ķ×ij'] +['ãĤĤ', 'ãģªãģı'] +['Ġли', 'ÑĨа'] +['à¹ģลà¹īว', 'à¸ģà¹ĩ'] +['×ĸ׼', '×Ļר'] +['Ġqu', 'Ãł'] +['ĠÙĥ', 'ذÙĦÙĥ'] +['صØŃ', 'Ùģ'] +['ĠÃĤ', 'u'] +['ÙĪØ¨', 'ا'] +['à¹Ģà¸Ľà¸¥à¸µà¹Īยà¸Ļ', 'à¹ģà¸Ľà¸¥'] +['à¹Ģà¸Ľà¸¥à¸µà¹Īยà¸Ļà¹ģà¸Ľà¸¥', 'à¸ĩ'] +['à¸ķัว', 'à¸Ńยà¹Īาà¸ĩ'] +['Ġráp', 'ida'] +['Ġtas', 'ar'] +['Ġtasar', 'ım'] +['ĠعÙĦÙĬ', 'ÙĩÙħ'] +['ס', '×ķ׾'] +['c', 'ılı'] +['cılı', 'k'] +['Ġر', 'غÙħ'] +['ìĭľ', 'íĤ¤'] +['Ġ×IJ׾', '×§'] +['Ġ×IJ׾ק', '×ĺר'] +['Ġ×IJ׾ק×ĺר', '×ķ׳×Ļ'] +['à¹ģà¸ļ', 'à¹Īà¸ĩ'] +['Ġh', 'ạng'] +['ãģ£ãģ¦', 'ãģıãĤĮ'] +['ĠÙĨ', 'تÙĬ'] +['ĠÙĨتÙĬ', 'جة'] +['ıkl', 'ı'] +['غ', 'اÙĨ'] +['à¸Ĥà¹īà¸Ń', 'à¸Ħวาม'] +['à¸Ľà¸¥', 'าย'] +['ĠØ£', 'Ùħس'] +['à¸Ĺีà¹Ī', 'à¹Ģà¸ģีà¹Īยว'] +['à¸Ĺีà¹Īà¹Ģà¸ģีà¹Īยว', 'à¸Ĥ'] +['à¸Ĺีà¹Īà¹Ģà¸ģีà¹Īยวà¸Ĥ', 'à¹īà¸Ńà¸ĩ'] +['Ġdé', 'fin'] +['Ġdéfin', 'i'] +['ÙģÙĨ', 'اد'] +['ÙģÙĨاد', 'ÙĤ'] +['à¹Ħà¸Ķà¹ī', 'วà¹Īา'] +['ãģªãģĦ', 'ãĤĪãģĨãģ«'] +['Ġpróp', 'ria'] +['ĠPh', 'át'] +['ãĤĦãģĻ', 'ãģı'] +['สวย', 'à¸ĩาม'] +['ê³ł', 'ìļĶ'] +['Ñı', 'еÑĤ'] +['ãģĭãĤĤãģĹãĤĮãģ¾ãģĽãĤĵ', 'ãģĮ'] +['تر', 'جÙħ'] +['ĠкÑĢаÑģ', 'ив'] +['Ġ×ŀ', 'ר×IJש'] +['д', 'еж'] +['ĠÙĬ', 'ÙĪÙĨ'] +['ĠÙĬÙĪÙĨ', 'ÙĬÙĪ'] +['Ñģк', 'оÑĢ'] +['ĠKas', 'ım'] +['ê³Ħ', 'ìķ½'] +['к', 'оÑģ'] +['Ġна', 'ÑĢÑĥ'] +['ĠнаÑĢÑĥ', 'ÑĪен'] +['Ġdu', 'że'] +['acc', 'ès'] +['Ġh', 'á»ĵng'] +['Ġv', 'Å©'] +['ãģĦãģŁ', 'ãģĹãģ¾ãģĻ'] +['Ġ×ĺ', '×Ļ'] +['Ġ×ĺ×Ļ', '×ķ׾'] +['lıkl', 'arı'] +['Ġqu', 'ê'] +['ëħ¸', 'ëıĻ'] +['ìķ', 'Ķ'] +['CI', 'ÃĵN'] +['Ġt', 'ắc'] +['press', 'ão'] +['ĠìŀĪ', 'ìľ¼'] +['สิà¸Ĺà¸ĺิ', 'à¹Į'] +['íĥ', 'Ħ'] +['Ġ×Ķ×ŀ', '×ŀש׾×Ķ'] +['å¬ī', 'ãģĹãģĦ'] +['ĠÄIJ', 'ặc'] +['ÙĨ', 'زÙĦ'] +['ĠдÑĢÑĥг', 'ой'] +['д', 'ÑĥÑĤ'] +['ìĪ', 'Ļ'] +['Ġth', 'ụ'] +['à¹Ģส', 'ร'] +['à¹Ģสร', 'à¹ĩ'] +['à¹Ģสรà¹ĩ', 'à¸Ī'] +['Ġto', 'plant'] +['Ġtoplant', 'ı'] +['×IJ×ŀ', 'ף'] +['×ķ׾', 'ת'] +['п', 'омн'] +['Ġyo', 'ÄŁun'] +['ÅĦsk', 'iego'] +['ì°', '©'] +['ĠØ«', 'ÙĦاث'] +['ĠØ«ÙĦاث', 'Ø©'] +['Ġl', 'ắng'] +['ë¦', '´'] +['ราà¸Ĭ', 'à¸ģาร'] +['ĠÑģлов', 'а'] +['á»', 'Ĩ'] +['à¸Ķี', 'à¸ģวà¹Īา'] +['ãģĶãģĸ', 'ãģĦãģ¾ãģĻ'] +['Ġд', 'из'] +['Ġдиз', 'айн'] +['fé', 'rence'] +['lıkl', 'ar'] +['ãģªãĤĵ', 'ãģ§ãģĻ'] +['ajÄħ', 'cy'] +['Ġëĭ¤', 'ìĸij'] +['Ġëĭ¤ìĸij', 'íķľ'] +['×§', '×Ļר'] +['ØŃ', 'ار'] +['ส', 'ูà¹ī'] +['Ġz', 'ro'] +['Ġzro', 'bi'] +['Ġzrobi', 'Äĩ'] +['×ŀ', '×Ļ׼×Ķ'] +['à¸Ĭà¹Īวย', 'à¹Ģหลืà¸Ń'] +['ĠÑįÑĤ', 'Ñĥ'] +['ë´', 'ī'] +['楽', 'ãģĹãģĦ'] +['س', 'ÙĪØ±'] +['íķĺ', 'ê±°ëĤĺ'] +['Ùħؤ', 'تÙħر'] +['Ġpoc', 'zÄħ'] +['ĠpoczÄħ', 'tk'] +['ĠpoczÄħtk', 'u'] +['Ġع', 'ربÙĬ'] +['اÙĦØ£', 'ر'] +['اÙĦأر', 'دÙĨ'] +['à¸Ķ', 'ร'] +['Åĵ', 'uvre'] +['ĠÙĪÙĥ', 'اÙĨت'] +['ĠÅĽ', 'redni'] +['Ø®', 'ضر'] +['Ġch', 'uyến'] +['н', 'ÑĤ'] +['ĠìķĮ', 'ê³ł'] +['Ġv', 'á»Ŀi'] +['Ġ×ij', '×Ļ×ĵ×Ļ'] +['×ŀ×ĵ', '×ķ×ijר'] +['ÙĪ', 'Ù쨱'] +['ÙĬ', 'Ø¡'] +['׳', '×Ľ×¡'] +['ĠÐĽ', 'а'] +['л', 'он'] +['Ġx', 'ấu'] +['Ùģ', 'ÙĬÙĨ'] +['Ġfé', 'vrier'] +['ĠÐŀ', 'на'] +['ĠV', 'á»ģ'] +['ĠÅŁey', 'ler'] +['ĠполÑĥÑĩ', 'ен'] +['з', 'ад'] +['Ġn', 'ét'] +['à¹Ħà¸Ľ', 'ยัà¸ĩ'] +['×Ĺש×ij', '×ķ'] +['à¸ļัà¸Ļ', 'à¸Ĺ'] +['à¸ļัà¸Ļà¸Ĺ', 'ึà¸ģ'] +['Ġgerçek', 'leÅŁ'] +['иÑĩеÑģк', 'ое'] +['ìĪĺ', 'ê°Ģ'] +['Ø«', 'بت'] +['ãģ¤', 'ãģ¾ãĤĬ'] +['ĠÑĥÑģловиÑı', 'Ñħ'] +['ëĭ¤', 'ê°Ģ'] +['ราย', 'à¹Ħà¸Ķà¹ī'] +['׼×IJ', '×ij'] +['à¹Ĥà¸Ľà¸£', 'à¹Ĥม'] +['à¹Ĥà¸Ľà¸£à¹Ĥม', 'à¸Ĭัà¹Īà¸Ļ'] +['j', 'ähr'] +['jähr', 'ige'] +['×§', '׳×Ļ×Ŀ'] +['×ŀ', '×ķ×§'] +['×ŀ×ķ×§', '×ĵ'] +['ãģ«è¡Į', 'ãģ£ãģ¦'] +['Ø¢', 'ÙĦ'] +['вед', 'ение'] +['Ġ׾', '×Ľ×ª×ķ×ij'] +['جÙħ', 'Ùĩ'] +['جÙħÙĩ', 'ÙĪØ±ÙĬØ©'] +['à¸ī', 'à¸ļ'] +['à¸īà¸ļ', 'ัà¸ļ'] +['ĠC', 'òn'] +['à¸ľ', 'สม'] +['ãģªãģ©', 'ãģĮ'] +['×IJ×Ķ', '×ij'] +['ĠдейÑģÑĤв', 'иÑı'] +['y', 'ız'] +['à¹Ħมà¹Ī', 'à¹Ģà¸Ħย'] +['ج', 'ÙĪØ²'] +['×Ķ×Ĺ׾×ĺ', '×Ķ'] +['f', 'ällt'] +['ãĥĵ', 'ãĤ¸'] +['ãĥĵãĤ¸', 'ãĥį'] +['ãĥĵãĤ¸ãĥį', 'ãĤ¹'] +['Ġ×IJ', '×Ļ׳×Ŀ'] +['ĠнаÑħод', 'иÑĤÑģÑı'] +['Ġdzi', 'ÅĽ'] +['ست', 'Ø·ÙĬع'] +['׾', '×Ļף'] +['Ø®', 'ÙĦاÙģ'] +['Ùĩ', 'ÙIJ'] +['Ġatr', 'ás'] +['íĺ', 'ģ'] +['ãĤĴ', 'ãģĶ'] +['Ġ×Ķ×ŀ', '×ķצר'] +['ĠBakan', 'lıģı'] +['ÑİÑī', 'ее'] +['ÙħÙĨ', 'اط'] +['ÙħÙĨاط', 'ÙĤ'] +['Ùģ', 'د'] +['à¸Ļำ', 'à¹Ħà¸Ľ'] +['Ġв', 'аж'] +['Ġваж', 'но'] +['Ġm', 'ạch'] +['׼', '׳×ķ'] +['بع', 'Ø«'] +['lan', 'ması'] +['Ġa', 'yr'] +['Ġayr', 'ıl'] +['ìĤ¬', 'íļĮ'] +['d', 'ÃŃa'] +['p', 'ÅĤyw'] +['اÙħ', 'ÙĬØ©'] +['íĺ', 'ľ'] +['×IJ׳', '×Ĵ׾'] +['×IJ׳×Ĵ׾', '×Ļת'] +['ĠìŀĪëĭ¤', 'ëĬĶ'] +['Ġس', 'اعة'] +['ĠëĤĺ', 'íĥĢ'] +['b', 'ö'] +['à¸Ħ', 'ัà¸Ļ'] +['ĠdziaÅĤ', 'ania'] +['Ø©', 'Ùĭ'] +['Ġng', 'Å©'] +['׳צ', '×Ĺ'] +['ãģ¯', 'ãģĤãĤĭ'] +['ĠyaÅŁ', 'ında'] +['st', 'ück'] +['car', 'acter'] +['caracter', 'ÃŃsticas'] +['Ġr', 'á»Ńa'] +['ĠÙħختÙĦÙģ', 'Ø©'] +['ãģ«ãģĬ', 'ãģijãĤĭ'] +['à¹ģà¸ŀ', 'à¸ĩ'] +['วิ', 'à¹Īà¸ĩ'] +['ת', 'פ×ķ'] +['سا', 'ÙĩÙħ'] +['使', 'ãģĨ'] +['Ùĥ', 'رÙĬ'] +['×IJ', 'פ×Ļ'] +['........', '.......'] +['ĠÑĤак', 'им'] +['×Ļ׼', '×ķ×Ļ'] +['Ø´', 'بÙĩ'] +['ج', 'ÙĬر'] +['ãģĿãģ®', 'ãģ¾ãģ¾'] +['ac', 'jÄĻ'] +['ĠاÙĦت', 'رÙĥ'] +['ĠاÙĦترÙĥ', 'ÙĬ'] +['ĠпÑĢав', 'илÑĮно'] +['Ġت', 'عÙħÙĦ'] +['à¸ģล', 'à¹īา'] +['Ġbi', 'ên'] +['Ġ×ij׳×Ļ', '×Ļת'] +['Ġкл', 'Ñĥб'] +['Ġ×ŀ', 'ש×Ķ'] +['в', 'ÑĪий'] +['ãģĵãģ¨ãģĮãģ§ãģį', 'ãĤĭ'] +['à¸ŀัà¸Ļà¸ĺ', 'ุ'] +['à¸ŀัà¸Ļà¸ĺุ', 'à¹Į'] +['ר', '×ķ×Ŀ'] +['ĠاÙĦÙģ', 'رÙĨ'] +['ĠاÙĦÙ쨱ÙĨ', 'سÙĬ'] +['à¹Ģà¸Ľà¹ĩà¸Ļ', 'à¸Ħà¸Ļ'] +['ãģĹãģ¦', 'ãģĬãĤĬ'] +['Ġth', 'ầy'] +['ãĤĵ', 'ãģłãģijãģ©'] +['ìĶ', '¨'] +['Ùħ', 'دÙĨ'] +['ت', 'ÙĪÙĨ'] +['ĠмеÑĤ', 'ал'] +['ĠмеÑĤал', 'л'] +['Ġin', 'ÃŃcio'] +['à¸Ńà¸Ńà¸ģ', 'à¸Īาà¸ģ'] +['ëĴ', '¤'] +['Ġcu', 'á»ijn'] +['Ġbu', 'á»Ļc'] +['ÙĨ', 'سÙĬ'] +['ä', 'cht'] +['×ŀ', '×Ļ׳×Ļ×Ŀ'] +['ãģķ', 'ãģ¦'] +['ãģĮ', 'ãģ§ãģį'] +['ÑĬ', 'ем'] +['Ġtá', 'i'] +['ĠЧ', 'ÑĤ'] +['ĠЧÑĤ', 'обÑĭ'] +['à¸Ľà¸¥', 'ูà¸ģ'] +['à¸Ĭุม', 'à¸Ĭà¸Ļ'] +['н', 'Ñģкий'] +['Ġv', 'ững'] +['Ġ×Ķ', '׾×ij'] +['ë', 'le'] +['Ġש', '×¢×ijר'] +['в', 'аÑĤÑĮÑģÑı'] +['б', 'ой'] +['ع', 'ÙĪÙĨ'] +['à¹ģà¸Ķ', 'à¸Ļ'] +['Ġספר', '×Ļ×Ŀ'] +['Ġt', 'uyên'] +['Ġnhi', 'êu'] +['ĠQu', 'ý'] +['Ġh', 'uyết'] +['ãĤı', 'ãģĭãĤīãģªãģĦ'] +['Ġ×ŀ', '׼ף'] +['Ġ×Ķ', 'ק׾'] +['Ġ׾×IJ', '×ķר'] +['ĠÄIJi', 'á»ĩn'] +['Ø´', 'ؤ'] +['شؤ', 'ÙĪÙĨ'] +['Ġ×ŀ×Ĺ', 'פש'] +['ĠпоÑģÑĤоÑıн', 'но'] +['×ŀ', '×Ļר'] +['ìħ', 'Ķ'] +['Ðŀ', 'Ñģ'] +['ÐŀÑģ', 'нов'] +['×ĸ', '×Ļת'] +['ĠH', 'á'] +['ĠÑĩаÑģ', 'ов'] +['×IJ', '×ķ׾×Ļ'] +['Ġm', 'át'] +['Ø®', 'رÙĪ'] +['خرÙĪ', 'ج'] +['ÙĤ', 'ضا'] +['ÙĤضا', 'ÙĬا'] +['à¹Ģà¸Ľ', 'à¸Ńรà¹Į'] +['ĠÙĬ', 'ÙĪÙĦ'] +['ĠÙĬÙĪÙĦ', 'ÙĬÙĪ'] +['à¹Ĥà¸Ĺ', 'ษ'] +['׳', 'פ׾'] +['ת', '×ķש'] +['ת×ķש', '×ij×Ļ'] +['Ġv', 'ários'] +['×ŀ', 'ר×IJ×Ķ'] +['ëĿ¼', 'ìĿ´'] +['ÙĨ', 'غ'] +['×ij', 'צע'] +['г', 'он'] +['ĠÄIJ', 'ược'] +['ع', 'Ùı'] +['пÑĥÑģ', 'к'] +['ĠÙĪØ§ÙĦ', 'Ùģ'] +['üc', 'ü'] +['×Ļ×§', '×Ļ×Ŀ'] +['Ġس', 'بÙĬÙĦ'] +['׾×ij', 'ף'] +['ĠاÙĦÙĤ', 'رÙĨ'] +['ס', '×ķת'] +['ĠQu', 'áºŃn'] +['ãģĵãĤĮ', 'ãģĮ'] +['ãĥĸ', 'ãĥ©ãĥ³ãĥī'] +['×Ĵ', '×ŀר'] +['Ġwarto', 'ÅĽci'] +['ĠÙĪØ¨', 'ÙĬÙĨ'] +['Ġd', 'ạ'] +['ÐIJ', 'в'] +['ÐIJв', 'ÑĤо'] +['Ġol', 'acaktır'] +['à¸Ļ', 'à¸Ĺà¹Į'] +['Ùħ', 'طار'] +['Ġ×¢', '×§×ij'] +['Ġת', 'פ'] +['ãģĹãģ¦', 'ãģĦãģ¦'] +['צ', '×ŀ×Ĺ'] +['à¸Ī', 'à¸Ńà¸ĩ'] +['Ġö', 'de'] +['ìį', '¨'] +['ÙĨ', 'اس'] +['調', 'ãģ¹'] +['ĠогÑĢ', 'омн'] +['ë³´', 'íĹĺ'] +['×ĺ', '×§'] +['×ĺ×§', 'ס×ĺ'] +['ĠbaÅŁ', 'v'] +['ĠbaÅŁv', 'uru'] +['Ġpom', 'ys'] +['Ġpomys', 'ÅĤ'] +['ãģ«', 'ä¹Ĺ'] +['Ġש', '׼ף'] +['ĠاÙĦÙħس', 'ؤÙĪÙĦ'] +['Ġз', 'ан'] +['Ġзан', 'ÑıÑĤ'] +['Ġd', 'ương'] +['ãĥĹãĥ¬', 'ãĤ¤'] +['ล', 'à¸ļ'] +['ÑĤи', 'ка'] +['ĠAr', 'alık'] +['Ġнед', 'о'] +['Ġm', 'á»Ļ'] +['Ġor', 'an'] +['Ġoran', 'ı'] +['Ġktó', 'r'] +['Ġktór', 'Äħ'] +['Ġ×Ķ×IJ×Ĺר', '×ķ׳×ķת'] +['ائ', 'ÙĨ'] +['ÅĦ', 's'] +['ÅĦs', 'ka'] +['åĽ½', 'ãģ®'] +['×ŀ', '×ĺ×Ļ'] +['ĠвопÑĢоÑģ', 'Ñĭ'] +['à¸Ńà¸ĩà¸Ħà¹Į', 'à¸ģร'] +['×ŀ', '×ķצ×IJ'] +['Ġpó', 'ź'] +['Ġpóź', 'niej'] +['ש×ŀ', '×IJ׾'] +['Ġk', 'aps'] +['Ġkaps', 'am'] +['Ġkapsam', 'ında'] +['Ġmá', 'quina'] +['ĠÅĽwie', 'cie'] +['Ġho', 'Ãłng'] +['Ġöz', 'gü'] +['×Ĵ×ķר', '×Ŀ'] +['ãģĤ', 'ãģŁãĤĬ'] +['à¸ķัà¸Ķ', 'สิà¸Ļ'] +['à¸ķัà¸Ķสิà¸Ļ', 'à¹ĥà¸Ī'] +['б', 'ÑĢи'] +['ãģ«ãģªãĤĭ', 'ãģ¨'] +['ت', 'ÙĥÙĪÙĨ'] +['Ġ×ķ×Ķ', '×Ļ×IJ'] +['Ġchi', 'ếu'] +['ÑģÑĤан', 'ав'] +['ÑģÑĤанав', 'ли'] +['ÑģÑĤанавли', 'ва'] +['×ŀ', '×ķ×Ĵ'] +['c', 'ité'] +['ĠK', 'örper'] +['Ġש', '×Ĵ×Ŀ'] +['ع', 'ظ'] +['عظ', 'ÙĬÙħ'] +['Ġ×Ķ×IJ', '×Ļש×Ļ'] +['Ġmat', 'ière'] +['ĠÙģ', 'ÙĪÙĤ'] +['Ġk', 'to'] +['Ġkto', 'ÅĽ'] +['à¸Ļ', 'à¹Ĥย'] +['à¸Ļà¹Ĥย', 'à¸ļาย'] +['å¾ħ', 'ãģ¡'] +['à¹Ģม', 'à¸Ļ'] +['à¹Ģมà¸Ļ', 'ู'] +['A', 'ÃĩÃĥO'] +['Ġt', 'ù'] +['Ġtù', 'y'] +['ãĥĪ', 'ãĥ³'] +['ĠоÑĤ', 'каз'] +['Ġ×ŀ', '×ķצר'] +['ül', 'ü'] +['ãģķãĤĵ', 'ãģ«'] +['Ġ×Ĺ', '×ķ×ij'] +['קר', '×Ļ×IJ×Ķ'] +['ĠاÙĦØ®', 'دÙħات'] +['ĠÙĦÙħ', 'دة'] +['ر', 'ؤ'] +['رؤ', 'ÙĬØ©'] +['ãĤĴè¦ĭ', 'ãģ¤ãģij'] +['à¸Ł', 'า'] +['Ġréuss', 'i'] +['à¸Ļัà¸ģ', 'à¹Ģรียà¸Ļ'] +['ĠÑĩиÑģ', 'л'] +['à¸ģาร', 'à¹Ģลà¹Īà¸Ļ'] +['Ġhaz', 'ırl'] +['Ġhazırl', 'an'] +['ĠпеÑĢв', 'Ñĭй'] +['ли', 'м'] +['ĠоÑĤзÑĭв', 'Ñĭ'] +['Ġwy', 'jÄħ'] +['ĠwyjÄħ', 'tk'] +['ĠØ£', 'ÙĤÙĦ'] +['ס', '×ļ'] +['Ġê²°', 'ìłķ'] +['Ġ׾×ŀ×¢', 'ש×Ķ'] +['Ġl', 'ắp'] +['à¹ģà¸ļ', 'ร'] +['à¹ģà¸ļร', 'à¸Ļà¸Ķà¹Į'] +['วà¹Īา', 'à¹Ģà¸Ľà¹ĩà¸Ļ'] +['Ġب', 'دا'] +['Ġبدا', 'ÙĬØ©'] +['ãģ¨ãģĦãģĨ', 'ãģ®ãģĮ'] +['иÑĩеÑģк', 'им'] +['à¸ģาร', 'à¸ŀัà¸Ĵà¸Ļา'] +['Ġb', 'Ãło'] +['Ġmia', 'ÅĤa'] +['y', 'waÄĩ'] +['ĠMär', 'z'] +['ĠÙĨ', 'سبة'] +['Ġéconom', 'ique'] +['×ĸ', '×ŀ'] +['×ĸ×ŀ', '׳×Ļ×Ŀ'] +['æŃ¢', 'ãĤģ'] +['Ġt', 'á»§'] +['íķĺ', 'ìĭł'] +['Ġkażde', 'go'] +['stra', 'ÃŁe'] +['à¸Ĭ', 'ีà¹ī'] +['à¹Ģ', 'à¸ļา'] +['ÑĢеÑģ', 'ÑĥÑĢÑģ'] +['ев', 'ой'] +['Ø´', 'باب'] +['à¸ķà¹Īาà¸ĩ', 'à¸Ľà¸£à¸°à¹Ģà¸Ĺศ'] +['Ġ×IJ', '×Ļש'] +['Ġ×IJ×Ļש', '×Ļת'] +['×Ļ', '×ķפ'] +['×Ļ×ķפ', '×Ļ'] +['ĠìļĶ', '구'] +['ì¡°', 'ìĤ¬'] +['ãģ£ãģŁ', 'ãĤī'] +['׾', '×Ļ×§'] +['миниÑģÑĤ', 'ÑĢ'] +['ãĤĤãģ®', 'ãģ¯'] +['Ġl', 'ương'] +['Ġна', 'и'] +['Ġнаи', 'бол'] +['Ġнаибол', 'ее'] +['íİ', 'ĺ'] +['à¹ģà¸ŀ', 'à¹ī'] +['ãĤŃ', 'ãĥ¥'] +['ĠкоÑĤоÑĢ', 'Ñĭм'] +['à¹ģà¸Ĺ', 'à¸ĩ'] +['à¹ģà¸Ĺà¸ĩ', 'à¸ļà¸Ńล'] +['Ġ׳', '×Ļ×Ķ'] +['Ġ׳×Ļ×Ķ', '×ķ׾'] +['âĤ', 'ª'] +['ĠGi', 'ải'] +['ĠиÑģполÑĮзов', 'а'] +['ëł¥', 'ìĿĦ'] +['ãģĹãģĭ', 'ãĤĤ'] +['à¸ģà¹ĩ', 'à¸ķà¹īà¸Ńà¸ĩ'] +['ĠÑĢ', 'еб'] +['ĠÑĢеб', 'ен'] +['ĠÑĢебен', 'ка'] +['ت', 'ÙĪØ§ØµÙĦ'] +['ãĤ°ãĥ«', 'ãĥ¼ãĥĹ'] +['ãĤĦ', 'ãĤī'] +['à¹Ģà¸Ľà¸´à¸Ķ', 'à¸ķัว'] +['б', 'ÑĢо'] +['ë°ĸ', 'ìĹIJ'] +['ÙĨ', 'ÙİØ§'] +['×Ķ', '×Ĵ'] +['×Ķ×Ĵ', '׳×Ķ'] +['à¸Ĺ', 'รั'] +['à¸Ĺรั', 'à¸ŀ'] +['à¸Ĺรัà¸ŀ', 'ยà¹Į'] +['Ġkh', 'á»iji'] +['עצ', '×ŀ×ķ'] +['бол', 'езн'] +['Ġë°Ľ', 'ìķĦ'] +['ม', 'à¸Ļ'] +['มà¸Ļ', 'ุ'] +['มà¸Ļุ', 'ษ'] +['มà¸Ļุษ', 'ยà¹Į'] +['âĹ', 'Ĩ'] +['×ŀ', 'צ׾×Ļ×Ĺ'] +['Ñıв', 'ление'] +['Ùħ', 'Ø·ÙĦ'] +['ÙħØ·ÙĦ', 'ÙĪØ¨'] +['Ø®', 'اÙĦÙģ'] +['ت', 'ÙĪÙĤÙģ'] +['ãģ§ãģį', 'ãģ¾ãģĽãĤĵ'] +['оÑģÑĤ', 'ей'] +['м', 'еÑĩа'] +['기', 'ëĬĶ'] +['תש', '×¢'] +['ص', 'ÙĬب'] +['Ġ×ij×¢', '×ķ×ĵ'] +['à¸Ĥà¸Ńà¸ĩ', 'à¹Ģà¸Ĥา'] +['ÑĤÑı', 'ж'] +['ĠÑĥ', 'пÑĢав'] +['ĠÑĥпÑĢав', 'лениÑı'] +['Ġgén', 'ér'] +['Ġth', 'ÃŃ'] +['פ', '×ļ'] +['Ġر', 'Ùħض'] +['ĠرÙħض', 'اÙĨ'] +['Ġtr', 'uyá»ĩn'] +['Ø¥', 'عداد'] +['ãĤµ', 'ãĥĿãĥ¼ãĥĪ'] +['Ġпол', 'но'] +['Ø®', 'اÙħ'] +['ÐŁ', 'еÑĤ'] +['ÐŁÐµÑĤ', 'еÑĢ'] +['ÐŁÐµÑĤеÑĢ', 'бÑĥÑĢ'] +['ÐŁÐµÑĤеÑĢбÑĥÑĢ', 'г'] +['ÙħÙĨت', 'دÙī'] +['ãģķãĤĮ', 'ãģ¾ãģĹãģŁ'] +['ĠëĮĢ', 'íķĺìŬ'] +['à¸ľà¸¹à¹ī', 'à¸Ĺีà¹Ī'] +['Ġ×ŀ×IJ', '×ķ'] +['׾', '׳×ĵ'] +['оÑĩ', 'нÑĭе'] +['ĠнаÑĩ', 'ала'] +['Ġ׾', '×Ļ׾×ĵ×Ļ×Ŀ'] +['ов', 'ое'] +['ãģĻãĤĭãģĵãģ¨', 'ãģ§'] +['ĠاÙĦÙĨ', 'Ùģ'] +['ĠاÙĦÙĨÙģ', 'Ø·'] +['ìŀĪ', 'ëĬĶ'] +['غ', 'ÙĨÙĬ'] +['פ', '×ĵ'] +['ãĤ', '¾'] +['ĠCr', 'é'] +['ãģ©', 'ãģ¡ãĤī'] +['Ø«', 'اÙĨ'] +['ÑĢаб', 'аÑĤ'] +['ÑĢабаÑĤ', 'Ñĭва'] +['Ġê°Ļ', 'ëĭ¤'] +['à¸Ī', 'ั'] +['à¸Īั', 'à¸ģร'] +['Ġch', 'ụ'] +['Ġchụ', 'p'] +['Ġм', 'аÑģÑĤ'] +['ĠмаÑģÑĤ', 'еÑĢ'] +['Ġn', 'ắm'] +['ĠÑģÑĤ', 'али'] +['Ġ×Ķ×IJ', '×Ļר×ķ×¢'] +['ãĤ½', 'ãĥ³'] +['åĪĨ', 'ãģĭãĤĬ'] +['Ø·', 'بع'] +['بد', 'ا'] +['gr', 'áfico'] +['г', 'еÑĢ'] +['à¸Ķำà¹Ģà¸Ļิà¸Ļ', 'à¸ģาร'] +['Ġsal', 'dır'] +['Ġsaldır', 'ı'] +['в', 'ÑĪиÑħ'] +['ãģĭãģ£ãģŁ', 'ãģ§ãģĻ'] +['Ġyapı', 'yor'] +['ĠاÙĦÙģ', 'ت'] +['צר', 'פת'] +['з', 'доÑĢов'] +['×ij×¢', '׾'] +['Ġ×IJ', '×ŀ×Ļת×Ļ'] +['Ġоб', 'Ñĭ'] +['ĠобÑĭ', 'Ñĩ'] +['ĠобÑĭÑĩ', 'но'] +['Ġ׾', '×ķ×ŀר'] +['ت', 'ÙĥÙĨ'] +['تÙĥÙĨ', 'ÙĪÙĦÙĪØ¬'] +['تÙĥÙĨÙĪÙĦÙĪØ¬', 'ÙĬا'] +['Ġhakk', 'ı'] +['ĠÑĢаÐ', '²'] +['ĠÑĢав', 'но'] +['رÙĬ', 'Ùĥ'] +['Ġ×ij', '×ŀ×Ļ×ĵ'] +['Ġ×ij×ŀ×Ļ×ĵ', '×Ķ'] +['à¹ģà¸ģ', 'à¹īว'] +['Ġìĸ', 'ĺ'] +['Ġìĸĺ', '기'] +['ãģĹãģ¦', 'ãģĦãģ¾ãģĹãģŁ'] +['Ġkı', 'sm'] +['Ġkısm', 'ı'] +['ê±', '¸'] +['åĨħ', 'ãģ®'] +['ì§', 'ķ'] +['à¹Ģหมืà¸Ńà¸Ļ', 'à¸ģัà¸Ļ'] +['ĠÙģ', 'ÙIJ'] +['ĠÙģÙIJ', 'ÙĬ'] +['ÙĤ', 'اعدة'] +['Ġmoż', 'esz'] +['Ùħ', 'صاÙĦ'] +['ÙħصاÙĦ', 'ØŃ'] +['ãģ¾ãģŁ', 'ãģ¯'] +['б', 'ег'] +['Ġs', 'ıc'] +['Ġsıc', 'ak'] +['Ñĩ', 'иÑģ'] +['ÑĩиÑģ', 'лен'] +['Ġн', 'ог'] +['ãĥģãĥ£', 'ãĥ³'] +['ãĥ«', 'ãĥī'] +['Ġgi', 'ó'] +['Ġs', 'ını'] +['Ġsını', 'f'] +['ив', 'аÑĤÑĮ'] +['Ġqu', 'ên'] +['Ġì', 'łģ'] +['Ġìłģ', 'ìļ©'] +['ĠJo', 'ão'] +['Ùģ', 'اد'] +['ĠGl', 'ück'] +['à¸Ĺ', 'à¸Ńà¸Ķ'] +['Ġg', 'ói'] +['ï¼', 'Ĭ'] +['Ġdé', 'tail'] +['ĠدÙĬ', 'سÙħ'] +['ĠدÙĬسÙħ', 'بر'] +['ë¡ľ', 'ìĦľ'] +['×ŀ', '×ķ×Ĺ'] +['à¹Ħ', 'ฮ'] +['ĠоÑĤ', 'д'] +['ĠоÑĤд', 'ÑĭÑħ'] +['Ġkh', 'uyến'] +['à¸Ħ', 'à¸Ńย'] +['Ġج', 'ÙĨÙĬ'] +['ĠجÙĨÙĬ', 'Ùĩ'] +['ĠاÙĦد', 'ÙģØ§Ø¹'] +['à¸Ļà¹īำ', 'หà¸Ļัà¸ģ'] +['ĠìĤ¬ëŀĮ', 'ëĵ¤ìĿ´'] +['Ġth', 'ừa'] +['ĠÃ¶ÄŁrenc', 'i'] +['ĠпомоÑī', 'и'] +['ĠczÄĻ', 'ÅĽÄĩ'] +['ש', '×ĺר'] +['ĠN', 'hi'] +['ĠNhi', 'á»ģu'] +['׳', 'צ×Ļ'] +['ĠнаÑĪ', 'ем'] +['ĠkarÅŁÄ±', 'laÅŁ'] +['Ġ×Ķש', '׳×Ļ×Ŀ'] +['ĠÄIJ', 'ưá»Ŀng'] +['Ġtr', 'ú'] +['ĠÑĢазлиÑĩ', 'нÑĭÑħ'] +['ĠاÙĦØ´', 'Ùĩر'] +['Ġ×ľ×¢', '×ķ׾×Ŀ'] +['ØŃ', 'جر'] +['ĠÄij', 'á»ķ'] +['ĠìĿĺ', 'íķ´'] +['à¸ļ', 'à¹Īà¸Ńย'] +['Ġ×Ķ', '×Ļ׾×ĵ'] +['ãģ¨ãģª', 'ãģ£ãģŁ'] +['Ġ×Ĺ×ķ', '×ķת'] +['Ġש×Ļר×ķת', '×Ļ'] +['Äħ', 'cy'] +['س', 'رÙĬ'] +['K', 'İ'] +['פ', '׳×ķ'] +['ÑģÑĤÑĢÑĥк', 'ÑĤÑĥÑĢ'] +['ÑĤ', 'ÑĢÑĥд'] +['Ġ×Ķ', 'קר'] +['Ġ×Ķקר', '×ķ×ij'] +['Ġth', 'áºŃm'] +['èģŀ', 'ãģį'] +['ÙĤÙĪ', 'ÙĬ'] +['клÑİÑĩ', 'ен'] +['ÑĤе', 'Ñħ'] +['ÑĤеÑħ', 'нолог'] +['è¡Į', 'ãģ£ãģŁ'] +['Ġ×ķ×IJ', '×Ļף'] +['ĠÅŁek', 'lin'] +['ĠÅŁeklin', 'de'] +['r', 'ô'] +['ÑĢ', 'ог'] +['Ġнов', 'Ñĭе'] +['Ġס', '×ij×Ļ×ij'] +['Ġtecn', 'ologÃŃa'] +['ס', '׼'] +['×¡×Ľ', '×ķ×Ŀ'] +['ĠÅŀ', 'ub'] +['ĠÅŀub', 'at'] +['Ġ×Ķ×ŀ', '׾×IJ'] +['Ġwy', 'pos'] +['Ġwypos', 'aż'] +['ãģ¯', 'ä½ķ'] +['ãĤ¬', 'ãĥ³'] +['ê°', 'ĸ'] +['Ġкак', 'ие'] +['Ġçocuk', 'lar'] +['Ġ׾צ', '×ĵ'] +['Ġkay', 'ıt'] +['ĠмеÑģÑĤ', 'е'] +['Ùħ', 'دÙĬÙĨØ©'] +['Ġ׼', '×Ĵ'] +['Ġ׼×Ĵ', '×ķף'] +['ãģĹãģ¦', 'ãĤĭ'] +['ĠÙħا', 'ÙĬÙĪ'] +['ãģ£ãģ¦ãģĹãģ¾', 'ãģ£ãģŁ'] +['ĠпÑĢогÑĢамм', 'Ñĭ'] +['à¹ģล', 'à¸Ļà¸Ķà¹Į'] +['ãĥ¯', 'ãĤ¤'] +['ער', '×ķ×¥'] +['Ñģ', 'ид'] +['ĠB', 'öyle'] +['Ġì²ĺ', 'ìĿĮ'] +['Ġת', 'פק×Ļ×ĵ'] +['ĠTr', 'ên'] +['íĥ', 'Ī'] +['ĠÐłÐ¾ÑģÑģ', 'ий'] +['ĠÐłÐ¾ÑģÑģий', 'Ñģкой'] +['Ġs', 'Ãłn'] +['Ġrè', 'gle'] +['ĠyaklaÅŁ', 'ık'] +['à¹Ģล', 'ิà¸ģ'] +['Ġد', 'ائÙħ'] +['Ġ×ķ', '×Ĵ'] +['اب', 'ر'] +['Ġb', 'è'] +['ĠاÙĦ', 'ÙĤدÙħ'] +['ĠÑĢеÑĪ', 'ениÑı'] +['hi', 'ên'] +['ÑĤи', 'к'] +['Ä', 'Ħ'] +['à¸ļรร', 'ยาà¸ģ'] +['à¸ļรรยาà¸ģ', 'าศ'] +['רצ', '×ķף'] +['åĭķ', 'ãģį'] +['ĠGä', 'ste'] +['Ġ기', '본'] +['ĠÙĬ', 'عرÙģ'] +['ĠS', 'á»Ń'] +['gÅĤ', 'ÄĻb'] +['à¹Ģà¸Ń', 'ส'] +['×IJ×ŀ', '×Ļף'] +['Ġп', 'Ñĥнк'] +['ĠпÑĥнк', 'ÑĤ'] +['Ġ×Ļ×ķ×ĵ', '×¢×Ļ×Ŀ'] +['ãĤ«', 'ãĥ©ãĥ¼'] +['Ġ×ijס', '×ĵר'] +['Ġbu', 'á»ĵn'] +['й', 'ÑĤ'] +['йÑĤ', 'еÑģÑĮ'] +['ãĤĴ', 'æ±ĤãĤģ'] +['Ġ×IJת', '׼×Ŀ'] +['Ġ모', '르'] +['ظ', 'رÙĪÙģ'] +['Ñĩ', 'еÑģÑĤво'] +['ìĸ´', 'ìĦľ'] +['Ġод', 'на'] +['Ġkap', 'ı'] +['Ġëħ¸', 'ëł¥'] +['ĠKü', 'che'] +['ĠاÙĦت', 'Ø´'] +['Ø·', 'ÙĬب'] +['ĠíĬ¹', 'íŀĪ'] +['ĠвÑĭп', 'ÑĥÑģ'] +['ĠвÑĭпÑĥÑģ', 'к'] +['×ĵ', 'ת×Ļ'] +['Ġu', 'ÄŁ'] +['ĠuÄŁ', 'ra'] +['ائ', 'Ùĩا'] +['Ġtho', 'át'] +['ãģª', 'ãĤĤãģ®'] +['Ñij', 'ÑĢ'] +['기', 'ê°Ģ'] +['ĠgeliÅŁ', 'me'] +['تØŃ', 'ÙĤ'] +['تØŃÙĤ', 'ÙĤ'] +['Ġоп', 'аÑģ'] +['б', 'ÑĢоÑģ'] +['ห', 'ุ'] +['หุ', 'à¹īà¸Ļ'] +['ì¼', 'Ģ'] +['ãĤ¹', 'ãĥŀ'] +['ãĤ¹ãĥŀ', 'ãĥĽ'] +['Ø£', 'Ù쨱'] +['Ø£Ù쨱', 'اد'] +['ĠTh', 'á»±c'] +['Ġth', 'ắ'] +['ãĥªãĥ³', 'ãĤ¯'] +['Ġni', 'á»ģm'] +['ĠHö', 'he'] +['عÙħ', 'ار'] +['ÙĥÙĪØ±', 'ÙĪÙĨ'] +['ÙĥÙĪØ±ÙĪÙĨ', 'ا'] +['ĠÄIJ', 'ến'] +['ĠÑģам', 'ом'] +['ĠÑĤ', 'еле'] +['ĠÄijo', 'án'] +['à¸Ħวามà¸Ħิà¸Ķ', 'à¹Ģหà¹ĩà¸Ļ'] +['Ġд', 'иÑģк'] +['Ø£', 'Ø·Ù쨧ÙĦ'] +['ม', 'ารà¹Į'] +['à¸Ĺ', 'หาร'] +['à¸Ĺ', 'à¸Ļ'] +['Ġب', 'عÙĬد'] +['ĠاÙĦÙĩ', 'ÙĨد'] +['åĩº', 'ãģĹãģ¦'] +['Ġkar', 'de'] +['Ġkarde', 'ÅŁ'] +['×Ķ×Ļס×ĺ', '×ķר'] +['×Ķ×Ļס×ĺ×ķר', '×Ļ×Ķ'] +['éģ¸', 'ãģ³'] +['ع', 'اÙħÙĦ'] +['à¸Ĥ', 'ยาย'] +['Ġtü', 'rl'] +['Ġtürl', 'ü'] +['ĠìĿ¼', 'ìĿ´'] +['Ġmaté', 'ria'] +['Ġ׼׾', '×ķ×ŀר'] +['ãĥģãĥ£', 'ãĥ¼'] +['جÙħ', 'اعة'] +['ĠÑģво', 'им'] +['Ø¥ÙĤ', 'اÙħØ©'] +['ä¾ĭ', 'ãģĪãģ°'] +['س', 'اب'] +['Ø¢', 'خر'] +['ÙĤ', 'دÙĬر'] +['×IJ×ŀ', '×Ļ'] +['ìĸ', '»'] +['Ġ׳×ķס', 'פת'] +['ĠÐĴ', 'лад'] +['ĠÐĴлад', 'им'] +['ĠÐĴладим', 'иÑĢ'] +['Ġest', 'ará'] +['ãģĵãģĨ', 'ãģĦãģĨ'] +['ãĤĴ', '使ç͍'] +['มา', 'à¸ķร'] +['มาà¸ķร', 'à¸IJาà¸Ļ'] +['ãģ£ãģ', '½'] +['Ġn', 'ú'] +['Ġnú', 'i'] +['ย', 'าà¸ĩ'] +['ĠاÙĦج', 'ÙĨس'] +['Ġüst', 'ün'] +['ëľ', '»'] +['ãĤ»', 'ãĥ«'] +['ãģ¦ãģĦ', 'ãģįãģ¾ãģĻ'] +['Ġ×Ĺ', '×ķ×ĸ'] +['Ġ×Ĺ×ķ×ĸ', 'ר'] +['ĠÐĵ', 'лав'] +['à¹Ĥà¸Ĭ', 'à¸Ħ'] +['íı', 'IJ'] +['ÙĨت', 'ظر'] +['Ġ×Ĵ', '×ij×Ļ'] +['ع', 'ÙĤب'] +['int', 'ér'] +['intér', 'êt'] +['×ŀ', 'פ×Ĵ'] +['×ŀפ×Ĵ', 'ש'] +['Ġth', 'ù'] +['اÙģ', 'ت'] +['Ġ×ŀש', 'פ'] +['Ġ×ŀשפ', '×ĺ×Ļ'] +['ĠÙħ', 'ÙĪØ§ÙĤع'] +['è¦', 'ļ'] +['è¦ļ', 'ãģĪ'] +['×ĵ', '×Ļף'] +['à¹Ģรืà¹Īà¸Ńà¸ĩ', 'ราว'] +['ãģ¾', 'ãģĤ'] +['Ġgh', 'ế'] +['иÑĢÑĥ', 'ÑİÑĤ'] +['à¸ģ', 'ว'] +['à¸ģว', 'à¹īาà¸ĩ'] +['Ġпов', 'еÑĢ'] +['ĠповеÑĢ', 'Ñħ'] +['ĠповеÑĢÑħ', 'ноÑģÑĤ'] +['׳', '×ĵר'] +['Ġкон', 'ÑĨе'] +['Ġдолж', 'на'] +['Ġ×Ļש', '×Ļר'] +['acaģı', 'z'] +['ìĹ', 'Ķ'] +['Ġn', 'ÃŃvel'] +['Ġö', 'r'] +['Ġör', 'nek'] +['Ùĥ', 'Ùģ'] +['ĠФедеÑĢ', 'аÑĨии'] +['Ġ구', 'ìĦ±'] +['หัว', 'à¹ĥà¸Ī'] +['ĠV', 'áºŃy'] +['м', 'ед'] +['мед', 'и'] +['меди', 'ÑĨин'] +['медиÑĨин', 'Ñģк'] +['از', 'ÙĬ'] +['×Ĵ×ij', '×ķ׾'] +['ÑĦ', 'ÑĢ'] +['Ġzus', 'ätzlich'] +['à¸ģ', 'à¸ģ'] +['ĠاÙĦاÙĤتصاد', 'ÙĬØ©'] +['Ġh', 'è'] +['lu', 'ÄŁun'] +['ج', 'Ùİ'] +['à¹Ħà¸Ł', 'ลà¹Į'] +['ÄIJ', 'T'] +['ãģĿãģ®', 'ä»ĸ'] +['à¸Ĺิ', 'à¹īà¸ĩ'] +['ĠاÙĦØ£', 'ÙĪ'] +['ر', 'سÙħ'] +['æ°Ĺ', 'ãģ¥'] +['ìĿ´', 'ë©°'] +['ÑĮ', 'ев'] +['ص', 'Ø·'] +['ĠاÙĦاست', 'Ø«'] +['ĠاÙĦاستث', 'Ùħار'] +['à¸Ńา', 'à¸Ħาร'] +['ĠÑĤоÑĩ', 'но'] +['ĠV', 'ân'] +['à¸Ń', 'ร'] +['à¸Ńร', 'à¹Īà¸Ńย'] +['ĠاÙĦس', 'ÙĨØ©'] +['Ġc', 'Æ°á»Ľi'] +['×Ļ×Ķ', 'ף'] +['íį', '¼'] +['話', 'ãģĹ'] +['âĹ', 'ĭ'] +['ĠìķĬ', 'ìĿĢ'] +['ãĥ¡', 'ãĥ¼ãĤ'] +['ãĥ¡ãĥ¼ãĤ', '«'] +['ãĥ¡ãĥ¼ãĤ«', 'ãĥ¼'] +['ĠÑĤеп', 'ло'] +['å½¼', 'ãĤī'] +['Ġİ', 'z'] +['Ġİz', 'mir'] +['íĻ', 'į'] +['Ġr', 'ượ'] +['Ġrượ', 'u'] +['æĢĿãģĦ', 'åĩº'] +['ĠPh', 'ạm'] +['Ġchá', 'u'] +['צ×Ļ', '×ķת'] +['ĠìĿ¼', '본'] +['ìĤ¬', 'ëĬĶ'] +['ĠÑģозд', 'ан'] +['Ġar', 'acı'] +['Ġ×¢', 'ר'] +['Ġער', '×Ļ׼×Ķ'] +['ĠíķĺëĤĺëĭĺ', 'ìĿĺ'] +['dzi', 'ÅĤ'] +['à¸Ľà¸£à¸°', 'à¸ĺาà¸Ļ'] +['Ġser', 'ÃŃa'] +['ĠìŀĪ', 'ëıĦë¡Ŀ'] +['در', 'ج'] +['íķľëĭ¤', 'ëĬĶ'] +['à¸Ńา', 'à¸Ĺ'] +['à¸Ńาà¸Ĺ', 'ิà¸ķ'] +['à¸Ńาà¸Ĺิà¸ķ', 'ยà¹Į'] +['ÑĤелÑĮ', 'нÑĭй'] +['ĠØ®', 'دÙħات'] +['×ŀ׳', '×ĺ'] +['Ġl', 'ược'] +['ĠS', 'Ãłi'] +['ĠÙĪ', 'اض'] +['ĠÙĪØ§Ø¶', 'ØŃ'] +['غ', 'از'] +['ĠdoÄŁ', 'al'] +['Ġ×ijש', '×Ŀ'] +['Ġд', 'лин'] +['ĠØ¥', 'طار'] +['Ġ×ijס', 'פר'] +['ãĤĴ', 'ä¸İ'] +['ãĤĴä¸İ', 'ãģĪ'] +['Ġë²ķ', 'ë¥ł'] +['ĠÑĥ', 'вели'] +['ĠÑĥвели', 'Ñĩи'] +['ส', 'à¹Ħà¸ķ'] +['สà¹Ħà¸ķ', 'ลà¹Į'] +['à¹Ħ', 'à¸ģล'] +['×ij×Ĺ', 'ף'] +['ĠìĿ´', 'íĽĦ'] +['Ġm', 'unic'] +['Ġmunic', 'ÃŃpio'] +['تÙħ', 'Ø«ÙĦ'] +['ĠÄij', 'áo'] +['H', 'ôtel'] +['Ġl', 'á»Ńa'] +['ĠÄij', 'ẳng'] +['Ñĩ', 'ки'] +['Ø´', 'رÙĪ'] +['شرÙĪ', 'Ø·'] +['ĠìĿ´', '를'] +['ÙĬ', 'Ùĭا'] +['×ŀ׾', '×ļ'] +['×ŀ×Ķ', '×Ļר×ķת'] +['ĠобÑıз', 'аÑĤелÑĮ'] +['ĠобÑıзаÑĤелÑĮ', 'но'] +['é', 'nergie'] +['Ġmud', 'ança'] +['Ġm', 'ụ'] +['Ġmụ', 'n'] +['Ġn', 'º'] +['ĠاÙĦت', 'عا'] +['ĠاÙĦتعا', 'ÙĪÙĨ'] +['ĠاÙĦاجتÙħاع', 'ÙĬØ©'] +['Ġп', 'лаÑģÑĤ'] +['Ġëĵ±', 'ìĿĺ'] +['ãĥIJãĤ¤', 'ãĤ¯'] +['Ùĩج', 'ÙĪÙħ'] +['ĠSa', 'úde'] +['Ġì¤ijìļĶ', 'íķľ'] +['Ġ×Ķצ', '×Ļ×ij×ķר'] +['תק', 'ף'] +['ĠاÙĦعاÙĦÙħ', 'ÙĬ'] +['ĠболÑĮÑĪ', 'ой'] +['ĠÙĥ', 'ÙĦÙħ'] +['ĠÙĥÙĦÙħ', 'Ø©'] +['ãģ®ãģ§ãģ¯ãģªãģĦ', 'ãģ§ãģĹãĤĩãģĨãģĭ'] +['ĠÙħ', 'باراة'] +['Ġש×IJ', '׳'] +['Ġש×IJ׳', '×Ĺ׳×ķ'] +['ãĤ¹ãĤ¿', 'ãĤ¤ãĥ«'] +['ĠSa', 'ÄŁ'] +['ĠSaÄŁ', 'lık'] +['Ġh', 'ư'] +['׳', '×Ĺ×Ķ'] +['Ġ×ij', 'קר×ij'] +['Ø·', 'عÙħ'] +['ห', 'ิà¸Ļ'] +['à¸Ĺุà¸ģ', 'วัà¸Ļ'] +['à¸Ħรัà¹īà¸ĩ', 'à¸Ĺีà¹Ī'] +['ĠlÃł', 'nh'] +['Ġdonn', 'é'] +['ãģĽ', 'ãģĦ'] +['جز', 'ÙĬرة'] +['доÑĢ', 'ож'] +['ì¼', 'ľ'] +['تÙĨظ', 'ÙĬÙģ'] +['ãĥģ', 'ãĥ§'] +['Ġald', 'ıģı'] +['ج', 'اج'] +['ĠÑĤ', 'омÑĥ'] +['à¸Ľ', 'ิ'] +['Ġ×ijר', 'שת'] +['ãģıãģªãĤĬ', 'ãģ¾ãģĻ'] +['ĠпÑĢин', 'ÑĨип'] +['Ġ×Ĺ', '׾×ķ'] +['ëı', '¼'] +['×ķ×Ĵ', 'ש'] +['س', 'س'] +['à¸Ľ', 'ู'] +['Ġh', 'ầu'] +['æĦŁãģĺ', 'ãĤĭ'] +['ï¼', '´'] +['د', 'ÙĪØ§'] +['ĠÑģм', 'ог'] +['scri', 'ção'] +['Ġth', 'áºŃn'] +['Ġר', '×ķ×IJ×Ķ'] +['обÑĢаж', 'ен'] +['ĠاÙĦتج', 'ارÙĬØ©'] +['Ø·', 'بÙĬع'] +['jÄħc', 'Äħ'] +['íĸī', 'ìľĦ'] +['Ġнов', 'Ñĭй'] +['Ġ×ŀ', '×Ĺ×ĵש'] +['æĮ¯', 'ãĤĬ'] +['gu', 'é'] +['Ġ×IJ', '×Ļר×ķ×¢'] +['Ġ×IJ×Ļר×ķ×¢', '×Ļ×Ŀ'] +['ĠاÙĦ', 'ذÙĩب'] +['×ĵ', '×IJ'] +['ت', 'اÙĨ'] +['ãģł', 'ãģĹ'] +['à¸Ńั', 'à¸ķรา'] +['à¹Ĥ', 'à¸Ī'] +['بÙĦ', 'اد'] +['×Ķ×Ļ', '×Ļ׳×ķ'] +['ĠÑģп', 'е'] +['ĠÑģпе', 'ÑĨиалÑĮно'] +['ĠÅĽwi', 'ata'] +['ãĤĵãģ§ãģĻ', 'ãĤĪ'] +['شر', 'ÙĥØ©'] +['ĠpÅĤ', 'yt'] +['Ġsitu', 'é'] +['Ġ׼×IJ', '׾×Ķ'] +['ס', '×ijר'] +['Ġkaż', 'd'] +['Ġkażd', 'ym'] +['ãĤĴæĮģ', 'ãģ¤'] +['׾×Ķ', '׾'] +['׾×Ķ׾', 'ף'] +['ĠwÅĤ', 'as'] +['ĠwÅĤas', 'ne'] +['ĠsaÄŁ', 'lan'] +['×ŀ×¢', '׾×Ķ'] +['ĠاÙĦا', 'ÙĪÙĦ'] +['ìĹIJìĦľ', 'ëıĦ'] +['×IJ×Ļר', '×ķפ×Ķ'] +['تÙĤ', 'ÙĨÙĬØ©'] +['Ùħ', 'ائ'] +['Ùħائ', 'Ø©'] +['Ġcompañ', 'ÃŃa'] +['Ġsü', 'rek'] +['Ġsürek', 'li'] +['ĠиÑģ', 'кÑĥÑģ'] +['ĠиÑģкÑĥÑģ', 'ÑģÑĤв'] +['ĠB', 'ürger'] +['ת', '×Ĺר'] +['ת×Ĺר', '×ķת'] +['à¸ŀรà¹īà¸Ńม', 'à¸ģัà¸ļ'] +['Ø´', 'Ùħ'] +['à¸ĸืà¸Ń', 'วà¹Īา'] +['è¾¼', 'ãĤĢ'] +['ä¼ij', 'ãģ¿'] +['ĠاÙĦØ£', 'ب'] +['ĠÑģÑĤоим', 'оÑģÑĤÑĮ'] +['ĠпÑĢав', 'а'] +['may', 'ın'] +['ห', 'วย'] +['ĠاÙĦØ·', 'بÙĬعÙĬ'] +['à¸Ĺีà¹Ī', 'à¸ŀัà¸ģ'] +['ĠEst', 'á'] +['Ñĭва', 'ÑİÑĤ'] +['ب', 'سÙĬ'] +['بسÙĬ', 'Ø·'] +['Ġ×ij×¢', '×ijר'] +['åı¯èĥ½', 'ãģ§ãģĻ'] +['Ġ×ĵ', '×ķ׾'] +['Ġ×ĵ×ķ׾', 'ר'] +['Ùĩ', 'ÙİØ§'] +['воÑĢ', 'оÑĤ'] +['ãģ¦', 'ãģĦãģ¾ãģĹãģŁ'] +['à¹Ĥà¸Ĺร', 'ศ'] +['à¹Ĥà¸Ĺรศ', 'ั'] +['à¹Ĥà¸Ĺรศั', 'à¸ŀ'] +['à¹Ĥà¸Ĺรศัà¸ŀ', 'à¸Ĺà¹Į'] +['Ġ×§', '׳'] +['ĠاÙĦØ«', 'ÙĨ'] +['ĠاÙĦØ«ÙĨ', 'ائÙĬØ©'] +['Ġco', 'ût'] +['à¸ķิà¸Ķ', 'à¸ķัà¹īà¸ĩ'] +['Ġö', 'rg'] +['Ġörg', 'üt'] +['ĠاÙĦØ®', 'ÙĦÙĬ'] +['ĠاÙĦØ®ÙĦÙĬ', 'ج'] +['Ġb', 'á»įn'] +['×ķ׾×ķ×Ĵ', '×Ļ'] +['ëŀ', 'ľ'] +['ĠÐij', 'олÑĮ'] +['ĠÐijолÑĮ', 'ÑĪ'] +['×Ĵ', '×ijר×Ļ×Ŀ'] +['ÙĤ', 'ÙĬد'] +['×ij×Ļ×ĺ', '×ķ×Ļ'] +['æīĵ', 'ãģ¡'] +['Ġol', 'muÅŁ'] +['f', 'äh'] +['fäh', 'ig'] +['ล', 'าà¸Ļ'] +['ĠÙĤ', 'طر'] +['ש', 'פ×Ķ'] +['èªŃ', 'ãĤĵãģ§'] +['à¸Ĥ', 'วา'] +['Ġchi', 'ếm'] +['ãĤ¤ãĥ³', 'ãĤ¿'] +['ãĤ¤ãĥ³ãĤ¿', 'ãĥ¼ãĥ'] +['ãĤ¤ãĥ³ãĤ¿ãĥ¼ãĥ', 'į'] +['ãĤ¤ãĥ³ãĤ¿ãĥ¼ãĥį', 'ãĥĥãĥĪ'] +['Ġ׾ש×ŀ', '×ķר'] +['Ġت', 'رÙĥ'] +['ĠترÙĥ', 'ÙĬا'] +['ר', '×ķ×ĺ'] +['ã썿ĢĿ', 'ãģĦãģ¾ãģĹãģŁ'] +['ĠاÙĦت', 'ÙĤ'] +['Ġd', 'ư'] +['ãģ¦ãģıãĤĮ', 'ãĤĭ'] +['ãģĹãģŁ', 'ãģĵãģ¨'] +['Ġróż', 'ne'] +['ĠاÙĦØ·', 'ÙģÙĦ'] +['ĠPost', 'é'] +['Ġ×ŀש', '×ķ×Ŀ'] +['Ñį', 'ÑĢ'] +['ĠÑĢабоÑĤ', 'аеÑĤ'] +['ãĤ·', 'ãĥª'] +['ãĤ·ãĥª', 'ãĥ¼ãĤº'] +['Ġ×ij×Ķ', '×Ĺ׾×ĺ'] +['×§×Ķ', '×Ļ׾×Ķ'] +['ãĤ«', 'ãĥ¡'] +['ãĤ«ãĥ¡', 'ãĥ©'] +['ï¼', '¯'] +['ĠìĤ¬', 'ìĿ´'] +['Ġk', 'ì'] +['Ġth', 'Æ°á»Ľc'] +['ض', 'بط'] +['ÙĤب', 'ÙĪÙĦ'] +['åĪ¥', 'ãģ®'] +['Ġparticul', 'ière'] +['ĠÑģво', 'ем'] +['Ġ×¢', 'סק'] +['Ġעסק', '×Ļ×Ŀ'] +['×ij×Ĺ', '×Ļר×ķת'] +['×ij', '×Ļ׳×ķ'] +['à¸ĭ', 'à¸Ń'] +['Ġ×¢', '×ķ×ijר'] +['ãģłãģ£ãģŁ', 'ãģ®ãģ§'] +['ıld', 'ıģı'] +['Ùħ', 'دار'] +['Ùħدار', 'س'] +['주', 'ìĭľ'] +['à¸Ńา', 'ศ'] +['à¸Ńาศ', 'ัย'] +['Ġt', 'ấm'] +['à¸ŀิ', 'à¸Ī'] +['à¸ŀิà¸Ī', 'าร'] +['à¸ŀิà¸Īาร', 'à¸ĵา'] +['ÑĤелÑĮ', 'нÑĭе'] +['Ñģк', 'ÑĥÑİ'] +['Ðľ', 'Ðĺ'] +['à¹Ģà¸ģ', 'า'] +['à¹Ģà¸ģา', 'หล'] +['à¹Ģà¸ģาหล', 'ี'] +['×ĵ', '×Ĺ'] +['à¹Ģà¸Ĭ', 'ิà¸ĩ'] +['Ġد', 'ÙĤÙĬÙĤØ©'] +['íķĻ', 'ìĥĿ'] +['Ġש×IJ', '׾×Ķ'] +['Ġcontr', 'ôle'] +['Ġsit', 'uação'] +['à¸Ĥà¸Ńà¸ĩ', 'à¸ľà¸¹à¹ī'] +['ÙĨ', 'Ø·ÙĤ'] +['ê³¼', 'íķĻ'] +['หลาย', 'à¸Ħà¸Ļ'] +['Ġn', 'ắng'] +['ÙĤ', 'Ùı'] +['ì¡°', 'ê±´'] +['Ñ', 'ķ'] +['ãĥĥ', 'ãģ¨'] +['×ŀ', '×Ļ׾×Ķ'] +['Gr', 'ün'] +['×Ļ', '×Ļ×¢'] +['×Ļ×Ļ×¢', '×ķ×¥'] +['×ŀ׳', '׼'] +['ë', 'ŃIJ'] +['×ŀ×¢', '×ŀ×ĵ'] +['สำ', 'à¸Ļัà¸ģ'] +['ج', 'دد'] +['à¸Ħ', 'ัà¸Ķ'] +['Ġ×Ķ×ŀש', 'פ'] +['Ġ×Ķ×ŀשפ', '×Ĺ×Ķ'] +['×ŀש', 'ק׾'] +['ÙĦ', 'Ùı'] +['Ġty', 'tu'] +['Ġtytu', 'ÅĤ'] +['ÑĪ', 'ей'] +['ĠìĿ¼', 'ë¶Ģ'] +['ÑĪ', 'ение'] +['Ġph', 'óng'] +['ĠìĹŃ', 'ìĤ¬'] +['ãĤ«', 'ãĥ³'] +['Ġtú', 'i'] +['ĠÙĨ', 'ÙĪÙģ'] +['ĠÙĨÙĪÙģ', 'Ùħبر'] +['gr', 'ün'] +['ĠاÙĦØ´', 'ÙħاÙĦ'] +['ÅĽwi', 'adc'] +['ÅĽwiadc', 'zenie'] +['ער', '×Ķ'] +['Ġ×¢', '×ķ×ij'] +['Ġ×¢×ķ×ij', '×ĵ×Ļ×Ŀ'] +['×ĵ×ķ×Ĵ', '×ŀ×IJ'] +['ä»Ĭ', 'ãģ¯'] +['Ġv', 'ão'] +['ĠТ', 'ем'] +['Ñģ', 'илÑĮ'] +['Ġch', 'ợ'] +['Ùħ', 'را'] +['Ùħرا', 'ÙĤب'] +['à¹Ħมà¹Ī', 'รูà¹ī'] +['Ġر', 'ائع'] +['×IJ׳', '×Ĺ׳×ķ'] +['สà¹Īà¸ĩ', 'à¹Ģสริม'] +['צ', '×Ĺ'] +['ĠìŀĪìĸ´', 'ìĦľ'] +['Ġkur', 'ulu'] +['Ġkurulu', 'ÅŁ'] +['ĠÃĸ', 'zellik'] +['ĠÃĸzellik', 'le'] +['Ġת', '×Ļ×§'] +['Ġgh', 'é'] +['Ġspr', 'zÄĻ'] +['ĠsprzÄĻ', 't'] +['ער', '×ķת'] +['را', 'ØŃØ©'] +['ãģ£', 'ãģį'] +['ãģ£ãģį', 'ãĤĬ'] +['ĠìķĦ', 'ëŀĺ'] +['stit', 'uição'] +['Ġдолж', 'но'] +['×Ķ', 'רש'] +['×Ķרש', '×ŀ×Ķ'] +['×Ķ׾', '×ļ'] +['ãģ¡', 'ãģª'] +['ãģ¡ãģª', 'ãģ¿'] +['ãģ¡ãģªãģ¿', 'ãģ«'] +['פ', '×Ĺ×ĵ'] +['ĠاÙĦج', 'ÙħÙĬع'] +['×ij×¢', '׾×Ļ'] +['Ġtr', 'ùng'] +['Ġפ', 'ת×Ĺ'] +['×ŀ׾×Ĺ', '×ŀת'] +['ãĥĨ', 'ãĥ¼ãĥ'] +['ãĥĨãĥ¼ãĥ', 'ŀ'] +['Ùħ', 'تاب'] +['Ùħتاب', 'عة'] +['Ġ모', 'ìĬµ'] +['ÙĬ', 'ص'] +['åIJĪ', 'ãģĨ'] +['ĠY', 'ap'] +['ĠYap', 'ı'] +['ĠÑģ', 'казаÑĤÑĮ'] +['ëª', '°'] +['à¸Ĺีà¹Ī', 'สำà¸Ħัà¸į'] +['ĠìĹĨ', 'ìĬµëĭĪëĭ¤'] +['Ġnh', 'ắc'] +['Ġülk', 'eler'] +['Ġмног', 'ие'] +['íķĺ', 'ìħ¨'] +['มาà¸ģ', 'à¸Ĺีà¹Īสุà¸Ķ'] +['à¸ģ', 'à¹īา'] +['à¸ģà¹īา', 'ว'] +['Ġİ', 'yi'] +['л', 'еж'] +['леж', 'а'] +['ãĤ¸', 'ãĥ§'] +['à¸Ĺั', 'à¸ŀ'] +['ا', 'ÙĪØ±'] +['Ġ×Ĺ×ijר', '×Ļ'] +['Ġ׾', 'ש×Ŀ'] +['ì²', '«'] +['ĠT', 'á»Ń'] +['×ŀ', '×ķ׳×Ļ'] +['ÙĤ', 'ÙĪØ¯'] +['à¸ģระ', 'à¹Ģà¸Ľ'] +['à¸ģระà¹Ģà¸Ľ', 'à¹ĭ'] +['à¸ģระà¹Ģà¸Ľà¹ĭ', 'า'] +['ĠпÑĢоблем', 'Ñĭ'] +['Ġaç', 'ıs'] +['Ġaçıs', 'ından'] +['Ġ×Ķ×ŀ', '׼'] +['ĠÙħع', 'ظÙħ'] +['ÙĤÙĬ', 'اس'] +['ĠпÑĢод', 'олж'] +['ĠпÑĢодолж', 'а'] +['Ġver', 'diÄŁi'] +['ĠпÑĢед', 'меÑĤ'] +['ãģĦãģ¾ãģĻ', 'ãģĮ'] +['ĠëͰ', '른'] +['ĠاÙĦ', 'ÙĤÙĬاÙħ'] +['ĠØ¥ÙĦÙĬ', 'Ùĩا'] +['Т', 'ÐIJ'] +['п', 'оз'] +['ãĤ·', 'ãĥ¥'] +['ä¸ĬãģĮ', 'ãĤĬ'] +['à¹Ģà¸Ķิม', 'à¸ŀัà¸Ļ'] +['à¸ģุ', 'ล'] +['ØŃر', 'ÙĬØ©'] +['×§×ij×ķצ', '×ķת'] +['ë¯', '¿'] +['ĠاÙĦÙħ', 'ÙĨا'] +['ĠاÙĦÙħÙĨا', 'Ø·ÙĤ'] +['ĠвÑĭп', 'ол'] +['ĠвÑĭпол', 'нÑı'] +['ãĥĭ', 'ãĤ¢'] +['Ġê²°', 'êµŃ'] +['×Ĺ', '×ķ×ŀ'] +['×Ĺ×ķ×ŀ', 'ר×Ļ×Ŀ'] +['ĠУкÑĢа', 'инÑĭ'] +['ห', 'à¸Ńม'] +['ר', '×Ļס'] +['ĠÑħоÑĤ', 'ел'] +['ĠобÑĢаз', 'ованиÑı'] +['Ġkh', 'ẳng'] +['Ġm', 'ưa'] +['Ġgör', 'me'] +['Ġgüç', 'lü'] +['سع', 'Ùī'] +['มัà¹Īà¸Ļ', 'à¹ĥà¸Ī'] +['íķĺ', 'ê²łìĬµëĭĪëĭ¤'] +['Ġпол', 'Ñĥ'] +['Ġfün', 'f'] +['ã썿ĢĿ', 'ãģ£ãģ¦ãģĦãģ¾ãģĻ'] +['Ġê·¸ê²ĥ', 'ìĿĢ'] +['ĠdÃ¼ÅŁÃ¼n', 'ce'] +['ìŀ', 'ł'] +['ĠH', 'Æ°á»Ľng'] +['ĠTi', 'á»ĥu'] +['Ġç', 'ift'] +['ãģij', 'ãģ°'] +['à¸Īà¸Ļ', 'à¸ĸึà¸ĩ'] +['à¸Ĺำ', 'à¹Ħà¸Ķà¹ī'] +['ĠìŀIJ', 'ì²´'] +['Ġd', 'õ'] +['Ġdõ', 'i'] +['à¸Ī', 'ัà¸Ļ'] +['à¸Īัà¸Ļ', 'à¸Ĺ'] +['à¸Īัà¸Ļà¸Ĺ', 'รà¹Į'] +['ece', 'ÄŁini'] +['׳×ķ×¢', 'ר'] +['غ', 'ار'] +['ĠاÙĦØ£ÙħرÙĬ', 'ÙĥÙĬ'] +['داع', 'Ø´'] +['ĠбезопаÑģ', 'ноÑģÑĤи'] +['Ġб', 'Ñİ'] +['ĠбÑİ', 'дж'] +['ĠбÑİдж', 'еÑĤ'] +['ãĥĬ', 'ãĤ¤'] +['à¸ŀà¸ļ', 'วà¹Īา'] +['da', 'ÄŁ'] +['×IJ', '×ķפף'] +['íĹ', 'Į'] +['ãĥĢãĤ¤', 'ãĤ¨'] +['ãĥĢãĤ¤ãĤ¨', 'ãĥĥãĥĪ'] +['ĠëĮĢ', 'íĨµ'] +['ĠëĮĢíĨµ', 'ëł¹'] +['D', 'İ'] +['Ø£', 'ØŃداث'] +['ĠA', 'ÄŁ'] +['ĠAÄŁ', 'ust'] +['ĠAÄŁust', 'os'] +['ØŃÙĦ', 'ÙĪÙĦ'] +['Ġw', 'ÅĽ'] +['ĠwÅĽ', 'ród'] +['ĠÑģо', 'оÑĤвеÑĤ'] +['ĠÑģооÑĤвеÑĤ', 'ÑģÑĤв'] +['ĠÑģооÑĤвеÑĤÑģÑĤв', 'ии'] +['ĠLu', 'áºŃt'] +['Ġ׼׾', 'פ×Ļ'] +['Ġв', 'еÑī'] +['ĠвеÑī', 'еÑģÑĤв'] +['×§', '×Ļ×¥'] +['ĠبÙĩ', 'ذا'] +['عا', 'Ø´'] +['à¹Ģà¸Ľà¹ĩà¸Ļ', 'à¹Ģรืà¹Īà¸Ńà¸ĩ'] +['Т', 'Ðķ'] +['Ġ×ij×IJ', '×Ļ׳×ĺר׳×ĺ'] +['س', 'عد'] +['Ġ×Ķ×ĺ', '×Ļפ×ķ׾'] +['פ', '×Ļס'] +['à¸ĩà¹Īาย', 'à¹Ĩ'] +['ĠGer', 'ät'] +['׾', '×Ļ×ĵ×Ķ'] +['ĠÑĢ', 'иÑģк'] +['׾ק', '×Ĺ'] +['н', 'наÑı'] +['ר', '×Ļ×ĵ'] +['п', 'ÑĢакÑĤи'] +['пÑĢакÑĤи', 'к'] +['à¸Ĥัà¹īà¸Ļ', 'à¸ķà¸Ńà¸Ļ'] +['à¸Ļà¹Īา', 'รัà¸ģ'] +['larınız', 'ı'] +['à¸Ńà¸Ļุ', 'à¸įา'] +['à¸Ńà¸Ļุà¸įา', 'à¸ķ'] +['ĠzdjÄĻ', 'cia'] +['Ġb', 'ây'] +['Ñģ', 'ÑĢ'] +['ÑģÑĢ', 'оÑĩ'] +['ãĥĭ', 'ãĥ³ãĤ°'] +['Ġö', 'ner'] +['Ġöner', 'i'] +['Ġнов', 'ÑĭÑħ'] +['دع', 'ÙĪØ©'] +['Ġg', 'ắn'] +['ĠاÙĦÙĦ', 'بÙĨ'] +['ĠاÙĦÙĦبÙĨ', 'اÙĨÙĬ'] +['ãĥĨãĤ£', 'ãĥ¼'] +['Ġص', 'ØŃÙĬØŃ'] +['ем', 'ÑĭÑħ'] +['çĸ²', 'ãĤĮ'] +['ĠпÑĢо', 'иÑģ'] +['ĠпÑĢоиÑģ', 'ÑħодиÑĤ'] +['ส', 'à¸ķิ'] +['ĠT', 'ết'] +['Ġ×Ķ׾', '׾×ķ'] +['à¹Ģรืà¹Īà¸Ńà¸ĩ', 'à¸Ļีà¹ī'] +['×ŀ×ij', '׳×Ķ'] +['Ġconte', 'údo'] +['Ġا', 'خت'] +['Ġاخت', 'ÙĬار'] +['Ùħ', 'سÙĦ'] +['ÙħسÙĦ', 'سÙĦ'] +['ëı', 'Ī'] +['Ġ׾', '×Ļ×ĵ'] +['à¸ŀิ', 'à¸ĺี'] +['ĠÑģов', 'Ñģ'] +['ĠÑģовÑģ', 'ем'] +['ãģĮãģĤãĤĬ', 'ãģ¾ãģĹãģŁ'] +['Ġsó', 'ng'] +['Ø¥', 'صÙĦاØŃ'] +['ë§', 'ģ'] +['Ùģ', 'ÙĬر'] +['ĠJe', 'żeli'] +['ìłľ', 'ëıĦ'] +['d', 'ÅĤug'] +['ìĥģ', 'ìĿĦ'] +['Ġc', 'áºŃn'] +['Ġhá»į', 'p'] +['Ø£', 'ست'] +['أست', 'اذ'] +['Ġ×ŀ', '×Ļש×Ķ'] +['Ġ×ŀ×Ļש×Ķ', '×ķ'] +['Ġd', 'Ãły'] +['Ġch', 'Ãłng'] +['ãģ¡ãĤĥãĤĵ', 'ãģ¨'] +['ĠÄij', 'ám'] +['Ġsw', 'ój'] +['Ġpoder', 'á'] +['ĠоÑĤлиÑĩ', 'а'] +['Ġpéri', 'ode'] +['ünd', 'ig'] +['×ĺ×¢', 'ף'] +['ÑģÑĤÑĢо', 'иÑĤелÑĮ'] +['ר', 'ת×Ļ'] +['Ġ×Ļ×Ķ', '×Ļ×ķ'] +['׾', 'ס'] +['ĠاÙĦÙħÙĨ', 'زÙĦ'] +['à¸Ļิ', 'à¹īว'] +['иÑĦ', 'ика'] +['иÑĦика', 'ÑĨи'] +['ðŁĺ', 'ī'] +['Ġad', 'ına'] +['ãĢĤãĢĤ', 'ãĢĤ'] +['×IJ', '×Ļף'] +['ס', '×Ļר'] +['ĠÙĬ', 'عد'] +['çŃĶ', 'ãģĪ'] +['اÙĦ', 'جز'] +['اÙĦجز', 'ائر'] +['енÑĮ', 'к'] +['ร', 'ห'] +['รห', 'ัส'] +['ĠTürk', 'çe'] +['ê¾', '¸'] +['Ġ×Ļ', '×ķ׼׾'] +['Ġש', '×ķ׳×Ķ'] +['Ġ×ij×ŀ', 'צ×ij'] +['ĠдейÑģÑĤв', 'иÑĤелÑĮно'] +['ĠبأÙĨ', 'Ùĩ'] +['×ŀ×§', '×ĵ'] +['Ġ×Ķש', '×§'] +['Ø®ÙĬ', 'ارات'] +['Ġf', 'ı'] +['Ġfı', 'rs'] +['Ġfırs', 'at'] +['ëij', 'ĺ'] +['ĠìĦľ', 'ìļ¸'] +['Ġ×Ķ×Ĵ', '×ķ×£'] +['ر', 'عا'] +['رعا', 'ÙĬØ©'] +['ĠK', 'ết'] +['к', 'Ñģи'] +['ĠÑĥÑģлÑĥг', 'и'] +['ноÑģÑĤ', 'ей'] +['ìļ´', 'ëıĻ'] +['ĠобÑĬ', 'Ñı'] +['ĠобÑĬÑı', 'вл'] +['н', 'еж'] +['×Ķפ', '×ļ'] +['Ġ×ij×¢', '×Ļ׳×Ļ'] +['ëĨ', 'Ĵ'] +['ĠпÑĢоÑĨ', 'ед'] +['ĠпÑĢоÑĨед', 'ÑĥÑĢ'] +['Ġiht', 'iy'] +['Ġihtiy', 'acı'] +['Ġë°Ķ', 'ëŀį'] +['Ġë°Ķëŀį', 'ëĭĪëĭ¤'] +['à¸ģล', 'ัว'] +['ĠÑģл', 'ожно'] +['×§×Ļ', '×Ļ×ŀת'] +['ĠÄIJ', 'ình'] +['ĠÙħ', 'ÙĦÙģ'] +['Ġà¹Ĥà¸Ķย', 'มี'] +['Ġkat', 'kı'] +['تØŃ', 'ÙĪÙĬÙĦ'] +['à¹Ħ', 'à¸ŀ'] +['ĠH', 'á»į'] +['ñ', 'e'] +['Ġдо', 'Ñħод'] +['Ġtho', 'ải'] +['íķĺìŬ', 'ìķ¼'] +['ãĤ¹ãĥĿ', 'ãĥ¼ãĥ'] +['ãĤ¹ãĥĿãĥ¼ãĥ', 'Ħ'] +['ĠG', 'òn'] +['Ġk', 'è'] +['Ġkè', 'm'] +['é̲', 'ãĤģ'] +['ãĤ¹', 'ãĥ¼ãĥ'] +['ãĤ¹ãĥ¼ãĥ', 'ij'] +['ãĤ¹ãĥ¼ãĥij', 'ãĥ¼'] +['ĠgiÃł', 'u'] +['ĠØ¥', 'عادة'] +['Ġ׾', '×ķ×§'] +['Ġ׾×ķ×§', '×Ĺ'] +['ĠÑħоÑĩ', 'еÑĤ'] +['×ĺ', '׾×ķ×ķ'] +['×ĺ׾×ķ×ķ', '×Ļ×ĸ'] +['×ĺ׾×ķ×ķ×Ļ×ĸ', '×Ļ×Ķ'] +['Ġth', 'uyết'] +['ãģĿãĤĮ', 'ãģ§'] +['Ġvard', 'ı'] +['à¹Ħร', 'à¹ī'] +['ع', 'بد'] +['ĠRep', 'ública'] +['ãĥ¼ãĤ¿', 'ãĥ¼'] +['Ġ×ŀ×IJ', '×ķת'] +['à¹Ħà¸Ľ', 'à¹ģลà¹īว'] +['Ġyapıl', 'acak'] +['ãĤ¹ãĤ¿', 'ãĥ¼ãĥĪ'] +['ãģ»', 'ãģ¼'] +['Ġko', 'ÅŁ'] +['ĠмаÑĤ', 'еÑĢи'] +['Ġsiè', 'cle'] +['ĠاÙĦÙħ', 'ختÙĦÙģ'] +['ĠاÙĦÙħختÙĦÙģ', 'Ø©'] +['Ġ׾ק', 'ר×IJ'] +['Ġ׾קר×IJ', 'ת'] +['Ġ×Ķפ', '×ķ×¢×ľ'] +['Ġt', 'òa'] +['Ġr', 'Æ¡i'] +['åij¨', 'ãĤĬ'] +['à¸Ŀ', 'à¸Ļ'] +['j', 'ÅĽÄĩ'] +['ĠìķĬ', 'ìĿĦ'] +['اÙĨت', 'ÙĤاÙĦ'] +['ëĸ', 'ł'] +['ив', 'аеÑĤ'] +['ãĥĪ', 'ãĥ«'] +['ĠاÙĦÙģÙĦسطÙĬÙĨ', 'ÙĬØ©'] +['à¸ģลà¹Īาว', 'วà¹Īา'] +['ا', 'Ùĥت'] +['ĠÃĸ', 'l'] +['ĠÑĢе', 'ÑĪи'] +['ĠÑĢеÑĪи', 'л'] +['Ġ׳×ķס', 'פ×ķת'] +['Ġìłķ', 'ì¹ĺ'] +['вл', 'еÑĩен'] +['Ùħر', 'ØŃÙĦØ©'] +['Ġcome', 'ça'] +['Ġy', 'ık'] +['ìĤ', '´'] +['à¸ĺ', 'à¸Ļา'] +['à¸ĺà¸Ļา', 'à¸Ħาร'] +['à¸Ńà¸Ļ', 'า'] +['à¸Ńà¸Ļา', 'à¸Ħ'] +['à¸Ńà¸Ļาà¸Ħ', 'à¸ķ'] +['Ġpeque', 'ña'] +['ä»ķ', 'äºĭãĤĴ'] +['Ġب', 'ذÙĦÙĥ'] +['Ġнов', 'ого'] +['ãģĹãģ¦', 'ãģĦãģªãģĦ'] +['ĠاÙĦÙħ', 'ÙĬاÙĩ'] +['à¸ģà¹ĩ', 'à¹Ģà¸Ľà¹ĩà¸Ļ'] +['Ġж', 'ÑĥÑĢ'] +['ĠжÑĥÑĢ', 'нал'] +['в', 'еÑģ'] +['خت', 'ار'] +['Ġ매', 'ìļ°'] +['ĠM', 'ã'] +['ĠавÑĤомаÑĤ', 'Ñĭ'] +['ضع', 'Ùģ'] +['ĠاÙĦÙģ', 'Ùĥر'] +['ãģ§ãģĻ', 'ãģ®ãģ§'] +['ãĥ¡ãĥ³', 'ãĥIJãĥ¼'] +['Ġк', 'ÑĢÑĥг'] +['ĠاÙĦسÙĦ', 'طة'] +['à¸Ħรัà¹īà¸ĩ', 'à¹ģรà¸ģ'] +['à¸ģระà¸Ĺ', 'รว'] +['à¸ģระà¸Ĺรว', 'à¸ĩ'] +['ÑĨ', 'ов'] +['éķ·', 'ãģĦ'] +['大ãģį', 'ãģĦ'] +['Ġgeç', 'miÅŁ'] +['ìĦ±', 'ìĿ´'] +['Ġצר', '×Ļ׼×Ķ'] +['Ġм', 'оÑī'] +['ĠмоÑī', 'н'] +['Ġ×§', '×Ļש'] +['Ġ×§×Ļש', '×ķר×Ļ×Ŀ'] +['ĠNas', 'ıl'] +['г', 'ÑĢан'] +['Ġ×ŀ', '×ķצר×Ļ×Ŀ'] +['Ġ×ŀס', '×ķ×Ĵ'] +['Ġy', 'ür'] +['Ġyür', 'üt'] +['Ġ׾×Ĺ', 'צ×ķ'] +['×ķÖ', '¼'] +['ĠìŀĪ', 'ìĹĪëĭ¤'] +['Ġter', 'ör'] +['ĠTh', 'ương'] +['ĠÙĪ', 'ÙĬÙħ'] +['ĠÙĪÙĬÙħ', 'ÙĥÙĨ'] +['ج', 'ÙĪÙĨ'] +['ĠÙĪØºÙĬر', 'Ùĩا'] +['×ŀ', 'פ×ķ'] +['×Ĵ×ķר', '×ŀ×Ļ×Ŀ'] +['׼×ij', '×Ļש'] +['ĠاÙĦÙĦ', 'غ'] +['ĠاÙĦÙĦغ', 'Ø©'] +['شر', 'Ùĥ'] +['ĠاÙĦر', 'اب'] +['ĠاÙĦراب', 'ع'] +['ĠпÑĢ', 'ек'] +['ĠпÑĢек', 'ÑĢаÑģ'] +['ĠпÑĢекÑĢаÑģ', 'н'] +['Ġenerg', 'ÃŃa'] +['×§×ĵ', '×ŀ×Ļ'] +['ãģıãģª', 'ãģ£ãģŁ'] +['ĠÄij', 'ứ'] +['ĠÄijứ', 'a'] +['Serv', 'i'] +['Servi', 'ço'] +['Ġkald', 'ır'] +['åĥį', 'ãģį'] +['Ġод', 'еж'] +['Ġодеж', 'д'] +['물', 'ìĿĦ'] +['ãģĿãģĨ', 'ãģ§'] +['ãģĮãģĤ', 'ãĤĮãģ°'] +['ìĻ', 'ķ'] +['צ×ĵ', '×§'] +['Ġart', 'ır'] +['Ġile', 'ti'] +['Ġileti', 'ÅŁim'] +['ãĤĪãģĨ', 'ãģ§'] +['ãĥĪ', 'ãĥ¼'] +['ãĤ¢', 'ãĥĭ'] +['ãĤ¢ãĥĭ', 'ãĥ¡'] +['×ĺ×Ļ', '×Ļ׾'] +['ãĥķ', 'ãĥªãĥ¼'] +['ãĥĿ', 'ãĥ³'] +['ÐŁÑĢ', 'о'] +['Ġع', 'اÙĦÙĬØ©'] +['ĠÃ¶ÄŁ', 'ret'] +['ĠÃ¶ÄŁret', 'men'] +['ĠкаÑĩеÑģÑĤв', 'а'] +['Ġ×Ķ×ĺ', '×ij×¢'] +['Ġзна', 'Ñİ'] +['ãģ¦', 'ãģıãĤĭ'] +['Ġm', 'ừng'] +['ÙħÙĪ', 'ت'] +['ש', '×ķ×ŀר'] +['×Ĺ׾', '×ij'] +['Ġwzgl', 'ÄĻ'] +['ĠwzglÄĻ', 'du'] +['ë²Ī', '째'] +['Ġtá»', 'ĵ'] +['Ġtá»ĵ', 'n'] +['ãĥ¯ãĥ¼', 'ãĤ¯'] +['Ġpo', 'życz'] +['Ġpożycz', 'k'] +['×Ļ', '×ķצר×Ļ×Ŀ'] +['Ùĥر', 'Ùħ'] +['Ġг', 'аÑĢ'] +['ĠгаÑĢ', 'ан'] +['ĠгаÑĢан', 'ÑĤи'] +['ล', 'à¹īาà¸ĩ'] +['Ġìĺģ', 'íĻĶ'] +['×ĺ', '×Ļס'] +['Ġth', 'ẻ'] +['ĠìŀĪëĭ¤', 'ê³ł'] +['اÙĦت', 'ز'] +['اÙĦتز', 'اÙħ'] +['Ġна', 'ÑĪи'] +['is', 'ée'] +['ãģĵãĤĮ', 'ãĤĴ'] +['Ġm', 'ẽ'] +['ض', 'ÙĦ'] +['بÙĪ', 'ت'] +['Ġ׼', '׼×Ķ'] +['h', 'ợ'] +['ĠاÙĦس', 'ÙĪØ±ÙĬØ©'] +['Ġ×ľ×¢', '×ķ×ŀ'] +['Ġ×ľ×¢×ķ×ŀ', 'ת'] +['ĠbaÅŁ', 'ar'] +['ĠbaÅŁar', 'ılı'] +['е', 'ÑģÑĤÑĮ'] +['à¸Ħร', 'ี'] +['à¸Ħรี', 'ม'] +['ĠìłĦ', 'ì²´'] +['ĠسÙĬ', 'ÙĥÙĪÙĨ'] +['Ġ×ŀ×ĵ', '×ķ×¢'] +['ĠëķĮ문', 'ìĿ´ëĭ¤'] +['Ġc', 'ứng'] +['ger', 'ät'] +['Ġм', 'иÑĢ'] +['ĠмиÑĢ', 'е'] +['ĠÙĥÙĬÙģ', 'ÙĬØ©'] +['Ġפר', '×ĺ×Ļ×Ŀ'] +['Ġgo', 'ÅĽci'] +['иÑĤ', 'еÑģÑĮ'] +['ÑĥÑĪ', 'ки'] +['ؤ', 'ÙħÙĨ'] +['Ġ×IJ', '׼ף'] +['ĠاÙĦر', 'جÙĦ'] +['Ġl', 'á»įc'] +['à¹Ģรีย', 'à¸ģวà¹Īา'] +['ãģĵãģ®', 'ãĤĪãģĨãģª'] +['ë§Į', 'íģ¼'] +['Ġп', 'еÑĩ'] +['ÙĪÙĦ', 'ات'] +['ĠÃľ', 'ye'] +['liÄŁ', 'inde'] +['à¸Ħะ', 'à¹ģà¸Ļ'] +['à¸Ħะà¹ģà¸Ļ', 'à¸Ļ'] +['ãĤĭãģĵãģ¨', 'ãģ¯'] +['วิ', 'à¹Ģà¸Ħร'] +['วิà¹Ģà¸Ħร', 'าะ'] +['วิà¹Ģà¸Ħราะ', 'หà¹Į'] +['Ġвозмож', 'ноÑģÑĤи'] +['ĠاÙĦÙĨ', 'ساء'] +['ãĥīãĥ©', 'ãĥŀ'] +['Ġgü', 'c'] +['Ġgüc', 'ü'] +['Ġt', 'ưá»Ŀng'] +['Ġacomp', 'aña'] +['ãĤ¤', 'ãĥ©'] +['×§', 'צ×ij'] +['ĠY', 'ö'] +['ĠYö', 'net'] +['ĠYönet', 'im'] +['สัม', 'à¸ľ'] +['à¸ªà¸±à¸¡à¸ľ', 'ัส'] +['à¸Ļ', 'าม'] +['ĠÄij', 'ợi'] +['à¹ģหà¹Īà¸ĩ', 'à¸Ĭาà¸ķิ'] +['ãģĿãĤĮ', 'ãģ§ãĤĤ'] +['ät', 'ig'] +['ת', '×ķ×Ŀ'] +['ĠbaÅŁ', 'lat'] +['ĠвÑģ', 'ей'] +['ת', '×Ļ×§'] +['ת×Ļ×§', '×ķף'] +['ĠNg', 'ô'] +['ĠGesch', 'ä'] +['ĠGeschä', 'fts'] +['Ø£', 'Ùħ'] +['Ø£Ùħ', 'راض'] +['à¹Ģà¸Ĺ', 'à¸Ħà¸Ļ'] +['à¹Ģà¸Ĺà¸Ħà¸Ļ', 'ิ'] +['à¹Ģà¸Ĺà¸Ħà¸Ļิ', 'à¸Ħ'] +['Ġм', 'енÑĮ'] +['ĠменÑĮ', 'ÑĪе'] +['Ġöl', 'ç'] +['Ġölç', 'ü'] +['ĠÙĬ', 'جعÙĦ'] +['ĠÄij', 'ỡ'] +['ש', '×Ļ׾'] +['ש×Ļ׾', '×ķ×ij'] +['ĠGr', 'Ã¶ÃŁe'] +['ĠÙĩ', 'اتÙģ'] +['รà¹īาà¸Ļ', 'à¸Ńาหาร'] +['×Ķ׾', '×Ļ׼'] +['×Ķ׾×Ļ׼', '×Ļ'] +['иÑĢÑĥ', 'ÑİÑī'] +['èĭ¥', 'ãģĦ'] +['ĠÃĸ', 'zel'] +['ãģĦãģŁ', 'ãĤī'] +['à¸Ħำ', 'à¸ĸาม'] +['Ġzosta', 'ÅĤy'] +['Ġ×Ķס', '×Ļפ×ķר'] +['×Ķ', '×ķ׾'] +['×Ķ×ķ׾', '×ļ'] +['à¹Ģà¸Ĭà¹Īà¸Ļ', 'à¸ģัà¸Ļ'] +['à¹Ĥ', 'à¸Ĩ'] +['à¹Ĥà¸Ĩ', 'ษ'] +['à¹Ĥà¸Ĩษ', 'à¸ĵา'] +['×IJר', 'צ×ķת'] +['×Ĵר', 'פ×Ļ'] +['Ġao', 'ût'] +['ĠÙĬ', 'رÙĬد'] +['ت', 'ÙĪØ¬'] +['تÙĪØ¬', 'ÙĬÙĩ'] +['ĠÑįÑĤ', 'ап'] +['ãĤ¹ãĤ¿', 'ãĥ³'] +['Ġkr', 'ó'] +['Ġkró', 'tk'] +['ãĤĴ使', 'ãģĨ'] +['ì', '·¨'] +['éĸ¢', 'ãĤı'] +['à¸Ķà¹īวย', 'à¸Ħวาม'] +['à¸Ļำ', 'à¹Ģสà¸Ļà¸Ń'] +['Ġa', 'yrıca'] +['à¸Ī', 'à¹īาà¸ĩ'] +['ĠÑĦоÑĤ', 'огÑĢаÑĦ'] +['Ġв', 'еÑĩ'] +['ĠвеÑĩ', 'еÑĢ'] +['åĩº', 'ãģĹãģŁ'] +['ĠÐ¥', 'о'] +['Ġ×ŀ', 'ר×Ĵ×Ļש'] +['à¹ĥหà¹ī', 'à¹Ģà¸Ľà¹ĩà¸Ļ'] +['ãĤĴ', '缮'] +['ãĤĴ缮', 'æĮĩ'] +['׾', '×ŀ×Ļ×Ŀ'] +['nÄħ', 'ÅĤ'] +['ĠÑģÑĤ', 'анд'] +['ĠÑģÑĤанд', 'аÑĢÑĤ'] +['ĠSü', 'd'] +['ĠT', 'âm'] +['اخت', 'بار'] +['à¹Ģà¸ģ', 'à¸Ńรà¹Į'] +['Ùħس', 'رØŃ'] +['Ġbi', 'á»ĩn'] +['ب', 'Ùı'] +['Ġص', 'اÙĦ'] +['ĠصاÙĦ', 'ØŃ'] +['ĠPh', 'ụ'] +['íľ', '´'] +['ãĥ¬ãĥĵ', 'ãĥ¥ãĥ¼'] +['Ġbụ', 'ng'] +['Ġrég', 'ime'] +['ĠØ£', 'Ø´Ùĩر'] +['ĠÑĢабоÑĤ', 'ник'] +['à¸Ŀ', 'ัà¸Ļ'] +['اع', 'تÙħ'] +['اعتÙħ', 'اد'] +['Ġзам', 'еÑĤ'] +['ãģ¾', 'ãģ£ãģ¦'] +['Ġch', 'ặt'] +['æĿ¥', 'ãĤĭ'] +['ĠاÙĦÙĤ', 'ÙĪØ§Øª'] +['ãģ«åħ¥', 'ãģ£ãģ¦'] +['تØŃ', 'اÙĦÙģ'] +['Ùħ', 'زÙĬد'] +['ĠÙĬ', 'صÙĦ'] +['ìĹ', '¼'] +['à¹Ģà¸Ĭ', 'à¹ĩ'] +['à¹Ģà¸Ĭà¹ĩ', 'à¸Ħ'] +['Ġk', 'á»ĭ'] +['Ġká»ĭ', 'p'] +['ĠìķĦ', 'ì§ģ'] +['×IJ׳', '×Ĵ'] +['Ġобла', 'ÑģÑĤÑĮ'] +['Ġpomoc', 'Äħ'] +['Ġ×ķ', 'ש׾'] +['ëĵł', 'ì§Ģ'] +['ĠGi', 'ám'] +['ĠSt', 'ück'] +['Ġchá', 'y'] +['ĠëĤĺ', 'ìĺ¤'] +['ש', '×Ļ×ĺת'] +['×ŀ×ĵ', 'ר'] +['×ŀ×ĵר', '×Ļ×ļ'] +['Ġsüre', 'ç'] +['к', 'ва'] +['×ij׾', '×Ļ×Ŀ'] +['×Ķ', 'ת×Ļ'] +['×Ķת×Ļ', '×Ļ×Ĺס'] +['ÙĤب', 'اÙĦ'] +['Ġס', '×ķ×Ĵ'] +['Ġס×ķ×Ĵ', '×Ļ'] +['ÑģÑĤ', 'олÑĮ'] +['ä½ķ', 'ãĤĤ'] +['×ĸ׼', '×ķר'] +['è²·', 'ãģĨ'] +['å®ī', 'ãģı'] +['à¸Ħรัà¹īà¸ĩ', 'à¸Ļีà¹ī'] +['kö', 'p'] +['ĠÑģеÑĢ', 'виÑģ'] +['оÑĩ', 'нÑĭÑħ'] +['ê±°', 'ëŀĺ'] +['تأ', 'Ùĥ'] +['تأÙĥ', 'ÙĬد'] +['×ĵ', '׾ק'] +['Ġпо', 'Ñĩем'] +['ĠпоÑĩем', 'Ñĥ'] +['пиÑģ', 'аÑĤÑĮ'] +['×ij', 'שר'] +['ĠH', 'Ãłng'] +['ĠT', 'ìm'] +['Ġtr', 'ừ'] +['ãĤ»', 'ãĥĥãĤ¯ãĤ¹'] +['×ķ׳', '×Ĵ'] +['mız', 'da'] +['п', 'Ñģи'] +['ĠìŀĪ', '기'] +['Ġr', 'út'] +['ز', 'اÙĨ'] +['تÙĨ', 'ÙĪØ¹'] +['ÙħÙĤ', 'ا'] +['ÙħÙĤا', 'ÙĪÙħØ©'] +['Ġ׾צ', '×ķר×ļ'] +['Ġ×ij', '×Ļר×ķש׾×Ļ×Ŀ'] +['ãĥ´', 'ãĤ£'] +['eb', 'ile'] +['ebile', 'ceÄŁi'] +['ãĥ¦', 'ãĥ¼ãĤ'] +['ãĥ¦ãĥ¼ãĤ', '¶'] +['ãĥ¦ãĥ¼ãĤ¶', 'ãĥ¼'] +['ãĤĴä½ľ', 'ãĤĭ'] +['Ñģ', 'меÑĢ'] +['ÑģмеÑĢ', 'ÑĤ'] +['Ġì§', 'ģ'] +['Ġì§ģ', 'ìłij'] +['ĠÐŁ', 'аÑĢ'] +['ØŃ', 'اض'] +['ØŃاض', 'ر'] +['Ùħ', 'ÙĥاÙģ'] +['ÙħÙĥاÙģ', 'ØŃØ©'] +['ล', 'ิà¸Ļ'] +['ãģ¦', 'ãģįãģ¦'] +['ÑĢоÑģ', 'л'] +['ĠÄ°ÅŁ', 'te'] +['ÙĤص', 'ÙĬر'] +['Ġ×ij×Ĵ', '×Ļ׾'] +['Ġ×ŀת', '×IJ×Ļ×Ŀ'] +['Ġ×Ķ', '×Ĺ×ĵ'] +['Ġ×Ķ×Ĺ×ĵ', 'ש×Ķ'] +['ר', '×ķ×¢'] +['Ġprodukt', 'ów'] +['ĠÙħ', 'صدر'] +['не', 'ÑĨ'] +['ĠاÙĦعÙħÙĦ', 'ات'] +['Ġçık', 'ma'] +['Ġد', 'بÙĬ'] +['×§', '×Ļף'] +['ת', '×IJר'] +['ת×IJר', '×Ļ×ļ'] +['׳×Ļ', '×Ļ×ĵ'] +['صر', 'اع'] +['l', 'ève'] +['צ', '×Ļר'] +['à¸Ķ', 'ัà¸Ļ'] +['à¹ĥหà¹ī', 'à¹Ħà¸Ķà¹ī'] +['ãĤ¿ãĤ¤', 'ãĥł'] +['Ġgi', 'ảng'] +['С', 'ÐŁ'] +['ĠاÙĦÙħ', 'ØŃÙĦ'] +['ĠاÙĦÙħØŃÙĦ', 'ÙĬØ©'] +['ĠT', 'ất'] +['׾', '×ķ×ĺ'] +['h', 'á»ķ'] +['Ġam', 'éric'] +['Ġaméric', 'ain'] +['Ġ×ijש׾', '×ij'] +['Ġ׾×IJ', '×ķ×ŀ×Ļ'] +['Ġpe', 'ça'] +['ĠÑĢаз', 'нÑĭÑħ'] +['ãģĦãĤĭ', 'ãģ¨'] +['ãĥĩ', 'ãĥ³'] +['ס', 'קר'] +['Ġ×Ķ×ŀ×Ĺ', '×Ļר'] +['ãģ¨ãģĦãģĨ', 'ãĤĤãģ®'] +['رت', 'بط'] +['ĠиÑģÑĤ', 'оÑĩ'] +['ĠиÑģÑĤоÑĩ', 'ник'] +['สมัà¸Ħร', 'สมาà¸Ĭิà¸ģ'] +['Ġ', 'à¸Ĺัà¹īà¸ĩ'] +['Ġà¸Ĺัà¹īà¸ĩ', 'à¸Ļีà¹ī'] +['ĠT', 'áºŃp'] +['ãģ£ãģ¦', 'ãģĦãģĨ'] +['ĠاÙĦÙĪ', 'صÙĪÙĦ'] +['Ġdéc', 'ada'] +['Ġо', 'ÑĦоÑĢм'] +['ĠоÑĦоÑĢм', 'лен'] +['สำหรัà¸ļ', 'à¸ģาร'] +['Ġog', 'óln'] +['ãģĨãģ¡', 'ãģ«'] +['Ġvá', 'rias'] +['ãģĻãģİ', 'ãĤĭ'] +['ÙĪ', 'Ùĩا'] +['à¹Ĥà¸Ľà¸£', 'à¸Ķ'] +['ĠÐłÐ¾ÑģÑģ', 'иÑı'] +['人', 'ãĢħ'] +['ãģĹãģ¦', 'ãģįãģŁ'] +['Ġsı', 'rasında'] +['Ġng', 'ôn'] +['س', 'ÙĨØ©'] +['تÙħ', 'تع'] +['×ŀ׼', '×ij×Ļ'] +['Ġnh', 'ấn'] +['×¢', '×ŀ×Ļ×ĵ'] +['á»', '¨'] +['ж', 'иÑĤÑĮ'] +['ãĤī', 'ãģĽ'] +['gr', 'áf'] +['gráf', 'ica'] +['ĠÙĤ', 'ÙĪÙĦ'] +['ĠÙĤÙĪÙĦ', 'Ùĩ'] +['ëĭ¨', 'ì²´'] +['ห', 'à¹īา'] +['หà¹īา', 'ม'] +['使', 'ãģ£ãģ¦'] +['ת', '×Ļ×ij'] +['ת×Ļ×ij', 'ת'] +['i', 'á»ĥu'] +['à¹ģ', 'à¸Ĭม'] +['à¹ģà¸Ĭม', 'à¸Ľ'] +['à¹ģà¸Ĭà¸¡à¸Ľ', 'à¹Į'] +['áº', '¬'] +['ĠëĤĺ', 'ëĿ¼'] +['ĠÙħباشر', 'Ø©'] +['Ġtr', 'Äĥm'] +['سÙĥ', 'ÙĪ'] +['ĠاÙĦذ', 'Ùī'] +['Ġbi', 'ç'] +['Ġbiç', 'im'] +['ت', 'راجع'] +['Ġоб', 'еÑģп'] +['ĠобеÑģп', 'еÑĩ'] +['ĠобеÑģпеÑĩ', 'ива'] +['Ġвозд', 'ÑĥÑħ'] +['Ñĭв', 'аÑĤÑĮ'] +['ÙĦ', 'ØŃÙĤ'] +['ĠMü', 'dü'] +['ĠMüdü', 'rl'] +['ĠMüdürl', 'Ã¼ÄŁÃ¼'] +['Ġyapt', 'ır'] +['Ġפר', 'ס'] +['Ġפרס', '×ķ×Ŀ'] +['Ø·', 'ÙĪØ±'] +['ÑģÑĤв', 'оваÑĤÑĮ'] +['ìŀ¥', 'ìĿĦ'] +['à¸Ĺีà¹Īà¸Ķี', 'à¸Ĺีà¹Īสุà¸Ķ'] +['à¸Ńั', 'ล'] +['ÑĢ', 'Ñİ'] +['Ùħست', 'ÙĤبÙĦ'] +['Ñģл', 'ÑĥÑĪ'] +['ÑģлÑĥÑĪ', 'а'] +['èªį', 'ãĤģ'] +['Ġ׾', '×Ļ×ŀ'] +['Ġ׾×Ļ×ŀ', '×ķ×ĵ×Ļ'] +['ת', 'ש×ķ×ij'] +['תש×ķ×ij', '×ķת'] +['ĠgerçekleÅŁtir', 'il'] +['ĠاÙĦ', 'اتÙ쨧ÙĤ'] +['ĠÑĥÑĢов', 'не'] +['ĠÑĤ', 'ÑĢав'] +['Ġ×Ķ×ŀ', '×ķף'] +['ØŃÙģ', 'اظ'] +['ĠÙħ', 'ÙIJ'] +['ĠÙħÙIJ', 'ÙĨ'] +['ĠÙħÙIJÙĨ', 'ÙĴ'] +['Ġdem', 'ás'] +['×ŀ×ķ×ĸ', '×Ļ×§×Ķ'] +['ש', '×Ļ×Ĺ×Ķ'] +['Ġb', 'ú'] +['алÑĮ', 'нÑĭм'] +['ãĤı', 'ãģŁ'] +['ãĤıãģŁ', 'ãģĹ'] +['ĠاÙĦÙħÙĪ', 'اد'] +['ת', '׼׳'] +['×ª×Ľ×ł', '×ķף'] +['ãĥŃ', 'ãĥĥãĤ¯'] +['hi', 'ếu'] +['ĠÑĥ', 'ме'] +['ÙħØŃا', 'ÙĪÙĦØ©'] +['×IJ', '×ķשר'] +['Ġкон', 'кÑĥÑĢ'] +['ĠконкÑĥÑĢ', 'Ñģ'] +['Ġ×ŀ', '×ij×Ĺ'] +['Ġ×ŀ×ij×Ĺ', '×Ļ×ł×ª'] +['Ġan', 'lam'] +['Ġanlam', 'ı'] +['Ġli', 'á»ĩt'] +['Ġв', 'Ñħод'] +['ĠH', 'ình'] +['ĠÙĨ', 'ÙĬ'] +['ĠÙĨÙĬ', 'ÙĪØ²'] +['ãĤ¸ãĥ£', 'ãĥ¼'] +['×ij', '×Ļ×¥'] +['ÑĤелÑĮ', 'нÑĭÑħ'] +['à¸Ĺุà¸ģ', 'à¸Ńยà¹Īาà¸ĩ'] +['ĠkiÅŁ', 'inin'] +['Ø£', 'Ùĥثر'] +['ĠиÑģÑĤоÑĢ', 'ии'] +['Ġë³Ģ', 'íĻĶ'] +['פ׾', 'ס×ĺ'] +['×¤×ľ×¡×ĺ', '×Ļ׳×Ļ'] +['ĠÑģ', 'еÑĤ'] +['ĠÑģеÑĤ', 'и'] +['dıģ', 'ımız'] +['íķĺ', 'ëıĦë¡Ŀ'] +['×Ķ', 'ר'] +['×Ķר', '×ij×Ķ'] +['ãģĻãĤĭãģĵãģ¨', 'ãģ¯'] +['Ġphi', 'ếu'] +['تØŃ', 'سÙĬÙĨ'] +['ĠÅĽ', 'rod'] +['ĠÅĽrod', 'ow'] +['ĠÅĽrodow', 'isk'] +['ĠÑĢаÑģ', 'Ñħод'] +['بر', 'ÙĬد'] +['Ġر', 'ÙĬ'] +['ĠرÙĬ', 'اÙĦ'] +['Ġ×ķ', '׼×ļ'] +['ì§Ģ', 'ìļĶ'] +['׼', '×ŀ×ķ'] +['Ġ×¢×ľ', '×Ļ×Ķ×Ŀ'] +['f', 'ÃŃcio'] +['Ġkar', 'arı'] +['tıģ', 'ını'] +['ĠС', 'ов'] +['ĠСов', 'еÑĤ'] +['ãģĬéĩij', 'ãĤĴ'] +['м', 'еждÑĥ'] +['междÑĥ', 'на'] +['междÑĥна', 'ÑĢод'] +['междÑĥнаÑĢод', 'н'] +['Ġm', 'á»Ŀi'] +['ĠاÙĦØ¥', 'ÙĬر'] +['ĠاÙĦØ¥ÙĬر', 'اÙĨÙĬ'] +['ĠاÙĦرÙĪ', 'سÙĬ'] +['ص', 'ÙĨد'] +['صÙĨد', 'ÙĪÙĤ'] +['ĠاÙĦØ¥ÙĨ', 'ترÙĨت'] +['Ġt', 'ắm'] +['ĠÑĤак', 'ого'] +['Ġ×ij', '׾×ķ×Ĵ'] +['Ġü', 'crets'] +['Ġücrets', 'iz'] +['×Ĺ×ĸ', '×Ļר'] +['ìĸ´', 'ìķ¼'] +['ĠPh', 'ần'] +['ï¼', 'ľ'] +['Ġ×ĺ', '×ij×¢'] +['Ġ×ĺ×ij×¢', '×Ļ'] +['×IJ×ŀ', '×IJ'] +['اÙĤ', 'ÙĦ'] +['Ġcondi', 'ções'] +['ÙĤات', 'ÙĦ'] +['ĠÑĢезÑĥлÑĮÑĤаÑĤ', 'е'] +['ĠÑģво', 'ими'] +['צ×ij', '×Ļ×¢'] +['gé', 'ni'] +['Ġz', 'es'] +['Ġzes', 'po'] +['Ġzespo', 'ÅĤ'] +['ÑĪ', 'ив'] +['Ġפר×ĺ×Ļ', '×ķת'] +['Ùħست', 'Ø´Ùģ'] +['ÙħستشÙģ', 'Ùī'] +['شر', 'ع'] +['Ġko', 'ÅĽci'] +['Ġ×Ķ×IJ', '×Ļ׳×ĺר׳×ĺ'] +['ĠЧ', 'еÑĢ'] +['поÑĩ', 'ÑĤ'] +['Ġactiv', 'ités'] +['çŁ¥', 'ãģ£ãģ¦'] +['Ġ×ij', '×ĸ×Ķ'] +['Ġyüz', 'den'] +['ãģªãĤĬ', 'ãģ¾ãģĽãĤĵ'] +['Ġíĺ', '¹'] +['Ġíĺ¹', 'ìĿĢ'] +['Ġ×ŀש', '׳×Ķ'] +['ĠÐĴ', 'еÑĢ'] +['Ġ×ij×IJ×ķת', '×ķ'] +['éĿ¢', 'çϽ'] +['éĿ¢çϽ', 'ãģĦ'] +['شر', 'ØŃ'] +['gr', 'ünde'] +['Ùģ', 'Ø´'] +['Ù쨴', 'ÙĦ'] +['Ġsé', 'jour'] +['ë´', 'IJ'] +['Ġr', 'ôle'] +['Ø´', 'عار'] +['ем', 'Ñĭе'] +['ĠاÙĦج', 'سÙħ'] +['алÑĮ', 'ное'] +['Ġìĥģ', 'íĥľ'] +['ï¼', '¤'] +['ë¯Ģ', 'ë¡ľ'] +['ĠÙĨ', 'ÙĤØ·'] +['ĠÙĨÙĤØ·', 'Ø©'] +['ãģĿãģĨ', 'ãģł'] +['ãģĻãĤĭ', 'ãģ®ãģĮ'] +['ห', 'ู'] +['Ġnh', 'á»ĭ'] +['Ġeconóm', 'ica'] +['ס×ĺ', '×ķ×ĵ'] +['ס×ĺ×ķ×ĵ', '׳×ĺ'] +['มี', 'à¹Ĥà¸Ńà¸ģาส'] +['Ġgest', 'ão'] +['รูà¹ī', 'วà¹Īา'] +['Ġlo', 'ạt'] +['ĠاÙĦÙħ', 'Ùı'] +['ĠاÙĦØŃ', 'ÙħÙĦ'] +['ĠاÙĦعÙħÙĦ', 'ÙĬØ©'] +['Ġê²ĥ', 'ëıĦ'] +['ĠÐľÐ¾Ñģк', 'ва'] +['×§×ĺ', '×ķר'] +['Ġпод', 'ÑĢоб'] +['ĠподÑĢоб', 'н'] +['Ġl', 'ưng'] +['ت', 'Ù쨳'] +['تÙ쨳', 'ÙĬر'] +['ĠاÙĦ', 'بع'] +['ĠاÙĦبع', 'ض'] +['ئ', 'ت'] +['Ðķ', 'ÐĿ'] +['ìŰ', '구'] +['à¹ĥหà¹ī', 'à¸Ħุà¸ĵ'] +['ãģĤãĤĬ', 'ãģ¾ãģĹãģŁ'] +['Ġbir', 'ka'] +['Ġbirka', 'ç'] +['Ġİ', 'sl'] +['Ġİsl', 'am'] +['çĹĽ', 'ãģ¿'] +['Ġh', 'ảo'] +['Ġм', 'аÑı'] +['ĠiÅŁ', 'çi'] +['ש', '×'] +['ש×', 'ģ'] +['à¸ģาร', 'à¹Ģมืà¸Ńà¸ĩ'] +['×ķ×Ķ', 'ר'] +['Ġch', 'ó'] +['ëĨ', 'Ģ'] +['Ġyan', 'lı'] +['Ġyanlı', 'ÅŁ'] +['幸', 'ãģĽ'] +['×IJר×Ĵ', '×ķ׳×Ļ'] +['à¸Ńาà¸Ī', 'าร'] +['à¸Ńาà¸Īาร', 'ยà¹Į'] +['ĠинÑĦоÑĢм', 'аÑĨиÑİ'] +['Ðĵ', 'Ðŀ'] +['׳', '×Ĺש'] +['ĠìķĮ', 'ìķĦ'] +['ĠÑħаÑĢакÑĤеÑĢ', 'иÑģÑĤ'] +['ĠÑħаÑĢакÑĤеÑĢиÑģÑĤ', 'ик'] +['à¸Ħุà¸ĵ', 'สามารà¸ĸ'] +['è¦ĭ', 'ãģĪãĤĭ'] +['à¸Ĭัà¸Ķ', 'à¹Ģà¸Ī'] +['à¸Ĭัà¸Ķà¹Ģà¸Ī', 'à¸Ļ'] +['ĠdziaÅĤ', 'al'] +['ĠdziaÅĤal', 'noÅĽci'] +['à¹Ĥà¸ŀ', 'สà¸ķà¹Į'] +['ĠÐļ', 'ол'] +['ĠÙģ', 'ÙĩÙĬ'] +['Ġ×ŀ', 'פ׳×Ļ'] +['Ġ×Ķ×§', 'שר'] +['Ùħر', 'Ùĥ'] +['ÙħرÙĥ', 'ز'] +['Ġho', 'á'] +['Ġа', 'пп'] +['Ġапп', 'аÑĢаÑĤ'] +['Ġp', 'ami'] +['Ġpami', 'ÄĻ'] +['ĠpamiÄĻ', 'ta'] +['Ġç', 'ünkü'] +['×ĵ', '×ķף'] +['ãģ¯', 'ãģĵãģ¡ãĤī'] +['ĠM', 'Ãł'] +['ĠÙĬ', 'ÙĤدÙħ'] +['ĠпÑĢ', 'ез'] +['ĠпÑĢез', 'иденÑĤ'] +['à¸Ńุ', 'à¸ķ'] +['à¸Ńุà¸ķ', 'สา'] +['à¸Ńุà¸ķสา', 'ห'] +['à¸Ńุà¸ķสาห', 'à¸ģรรม'] +['ì§Ģ', 'ìĽIJ'] +['Ġ×IJפשר', '×ķת'] +['sch', 'üt'] +['schüt', 'z'] +['ĠTi', 'ên'] +['Ġsay', 'ılı'] +['ĠгÑĢÑĥпп', 'Ñĭ'] +['оÑĩ', 'нÑĭй'] +['Ġ×ľ×¢', '×ŀ×ķ×ĵ'] +['Ġwr', 'zeÅĽ'] +['ĠwrzeÅĽ', 'nia'] +['ĠÄIJ', 'ầu'] +['à¹Ģà¸Ĥà¹īา', 'รà¹Īวม'] +['nız', 'da'] +['Ø®ÙĬ', 'ص'] +['Ġgü', 'nc'] +['Ġgünc', 'el'] +['ĠÙĦÙĩ', 'ذÙĩ'] +['ĠÙĬ', 'عتبر'] +['lé', 'gi'] +['ãĤı', 'ãģĭãĤĭ'] +['Ġr', 'ừng'] +['ظ', 'Ùĩ'] +['ظÙĩ', 'ÙĪØ±'] +['Ġ×ŀ×ij', '×Ļף'] +['Ġ기', 'íĥĢ'] +['åĪĩ', 'ãĤĮ'] +['lan', 'mÄ±ÅŁ'] +['à¸Ĺีà¹Ī', 'มีà¸Ħวาม'] +['Ġh', 'á»ģ'] +['ت', 'ÙĪØ¬Ùĩ'] +['ĠاÙĦØ¥', 'دارة'] +['Ġú', 'til'] +['ס', 'פ×ķ'] +['à¸Ħวาม', 'รัà¸ģ'] +['à¹Ĥ', 'ฮ'] +['Ġпол', 'иÑĤ'] +['ĠполиÑĤ', 'ик'] +['Ġsat', 'ın'] +['ĠÅŀ', 'imdi'] +['×ŀ', '×ķר×Ļ×Ŀ'] +['ìķĺ', 'ëĭ¤'] +['×Ĺ', '×ķ×ķ'] +['×Ĺ×ķ×ķ', '×Ļ×Ķ'] +['à¸Ħà¸Ńม', 'à¸ŀิ'] +['à¸Ħà¸Ńมà¸ŀิ', 'ว'] +['à¸Ħà¸Ńมà¸ŀิว', 'à¹Ģà¸ķà¸Ńรà¹Į'] +['Ġا', 'ذا'] +['تخ', 'اذ'] +['ãĤ¨', 'ãĥ«'] +['Ġpossibilit', 'é'] +['ยืà¸Ļ', 'ยัà¸Ļ'] +['Ġü', 'nivers'] +['Ġünivers', 'ite'] +['ĠاÙĦد', 'ÙĪØ±ÙĬ'] +['ĠìķĬëĬĶ', 'ëĭ¤'] +['ĠìĦľ', 'ë¡ľ'] +['ØŃ', 'اÙĦ'] +['Ġë', '¨'] +['Ġë¨', '¼'] +['Ġ먼', 'ìłĢ'] +['à¸Ĺีà¹Ī', 'à¸ĸูà¸ģ'] +['ì§', 'ľ'] +['Ġsk', 'óry'] +['лÑĮ', 'ÑĨ'] +['à¹ĥà¸Ĭà¹ī', 'à¹Ģวลา'] +['×ij×§', 'שת'] +['Ġذ', 'ÙĪ'] +['æĹ¥', 'ãĢħ'] +['ĠкоÑĤоÑĢ', 'ÑĥÑİ'] +['ĠÑĥÑĢов', 'енÑĮ'] +['ê¹', '¨'] +['à¹Ħ', 'à¸Ĺ'] +['ãĤµ', 'ãĥĹãĥª'] +['ãĤ¸', 'ãĥ§ãĥ³'] +['ãģĻ', 'ãģ¹ãģį'] +['ĠG', 'ór'] +['ãĥĪ', 'ãĤ¤'] +['ãĥĪãĤ¤', 'ãĥ¬'] +['ĠyaÅŁ', 'ama'] +['Ġdá»ĭ', 'p'] +['Ġb', 'ữa'] +['à¸ĭ', 'ุ'] +['Ġöl', 'üm'] +['ãģ£ãģ¦', 'ãģıãĤĭ'] +['à¸ģาร', 'à¸Ħà¹īา'] +['ש', 'ער'] +['ĠÑĤип', 'а'] +['Ġг', 'еÑĢ'] +['ĠгеÑĢ', 'о'] +['רק', '×¢'] +['Ġu', 'waż'] +['Ġuważ', 'a'] +['ש×ŀ', 'ף'] +['Ġhast', 'alık'] +['ãĤıãĤĮ', 'ãĤĭ'] +['ba', 'ÅŁÄ±'] +['Ñĩ', 'ÑĤо'] +['Ġ×ij', '×ŀר׼×ĸ'] +['Ġìļ°ë¦¬', 'ìĿĺ'] +['ĠÙĥاÙĨ', 'ÙĪØ§'] +['ĠØ£', 'بر'] +['Ġأبر', 'ÙĬÙĦ'] +['ì¸', 'µ'] +['à¹Ħà¸Ĥ', 'à¹Ī'] +['ĠÙĪ', 'ÙĦÙĪ'] +['à¸Ĺ', 'ัว'] +['à¸Ĺัว', 'รà¹Į'] +['ĠÙĪØ£', 'Ùĥد'] +['à¸Ĭ', 'วà¸Ļ'] +['׾', '×ķ×§'] +['æį', '¨'] +['æį¨', 'ãģ¦'] +['Ġİç', 'in'] +['p', 'éri'] +['Ġy', 'al'] +['Ġyal', 'nız'] +['ÑĮÑı', 'н'] +['Ġg', 'ắng'] +['à¸ģà¹ĩ', 'ยัà¸ĩ'] +['ĠУкÑĢа', 'ин'] +['ĠÑģ', 'ами'] +['ĠпÑĢовед', 'ен'] +['à¸ķà¸ģ', 'à¹ģà¸ķà¹Īà¸ĩ'] +['ĠQu', 'ân'] +['é', 'paration'] +['ĠbaÅŁ', 'ında'] +['Ġzn', 'ale'] +['Ġznale', 'ź'] +['Ġznaleź', 'Äĩ'] +['ãĤ±', 'ãĥ¼'] +['ãĥİ', 'ãĥ¼'] +['à¸ĸูà¸ģ', 'à¸ķà¹īà¸Ńà¸ĩ'] +['ëª', '¸'] +['Ġëı', 'Į'] +['ĠëıĮ', 'ìķĦ'] +['ĠSch', 'üler'] +['Ġпод', 'гоÑĤов'] +['ĠподгоÑĤов', 'к'] +['ع', 'رÙĪ'] +['عرÙĪ', 'ض'] +['la', 'ÅŁtır'] +['ĠÑģоÑģÑĤав', 'лÑıеÑĤ'] +['ĠпÑĢоиз', 'вод'] +['ĠпÑĢоизвод', 'ÑģÑĤва'] +['ĠоÑģнов', 'е'] +['ĠØ´', 'ÙħاÙĦ'] +['à¸ģร', 'ี'] +['ĠgörÃ¼ÅŁ', 'me'] +['оÑĩ', 'ек'] +['Ġ×Ĺ×ijר', '×Ļ×Ŀ'] +['ÙħØ®', 'اط'] +['Ùħخاط', 'ر'] +['ï¼', 'Ń'] +['ר', 'פ×IJ'] +['ĠM', 'ẹ'] +['ยà¸Ńม', 'รัà¸ļ'] +['Ġv', 'ết'] +['Ø®', 'ذ'] +['ĠاÙĦت', 'Ø·'] +['ĠاÙĦتط', 'بÙĬÙĤ'] +['à¸Ļ', 'ึà¸ģ'] +['Ġ×Ķ', '×Ľ×ł×¡×ª'] +['ĠогÑĢ', 'ани'] +['ĠогÑĢани', 'Ñĩен'] +['ĠÃĩ', 'alÄ±ÅŁ'] +['ĠاÙĦÙħÙĨت', 'دÙī'] +['à¸Īำà¸Ļวà¸Ļ', 'มาà¸ģ'] +['ĠÑĤоÑĢ', 'ÑĢ'] +['ĠÑĤоÑĢÑĢ', 'енÑĤ'] +['ĠìĤ´', 'ìķĦ'] +['à¸ŀลัà¸ĩ', 'à¸ĩาà¸Ļ'] +['à¸Ĭ', 'ัà¸Ļ'] +['ĠÐIJн', 'дÑĢ'] +['Ġréalis', 'é'] +['×ŀש', '×IJ'] +['à¹ģ', 'à¸Ĭ'] +['à¹ģà¸Ĭ', 'รà¹Į'] +['Ġб', 'ог'] +['มา', 'à¹ģลà¹īว'] +['ĠاÙĦÙĨ', 'ار'] +['Ġolmad', 'ıģı'] +['×ĵ', '×¢×Ķ'] +['ĠÑĥ', 'веÑĢ'] +['ĠÑĥвеÑĢ', 'ен'] +['ãĤĭ', 'ãĤĤãģ®'] +['Ø£', 'د'] +['أد', 'ÙĪØ§Øª'] +['Ġ×Ķ×ĸ', '×ķ×Ĵ'] +['Ø¥', 'عÙĦاÙħ'] +['h', 'á»ı'] +['ĠNä', 'he'] +['ĠÑĤ', 'еÑģÑĤ'] +['Ġ×ŀ', '×ķ׼ר'] +['Ġë¬¸ìłľ', 'ê°Ģ'] +['ת', '×ķצ×IJ×Ķ'] +['m', 'ó'] +['mó', 'vel'] +['ĠاÙĦتج', 'ارة'] +['Ġмног', 'иÑħ'] +['обÑī', 'а'] +['Ġ×¢', 'סק×Ļ'] +['ĠEdu', 'cação'] +['×§', 'ש×Ļ×Ŀ'] +['é', 'tabl'] +['établ', 'issement'] +['Ġд', 'еле'] +['иÑĢÑĥ', 'еÑĤÑģÑı'] +['Ø¢', 'ثار'] +['Ġ×Ķ×ŀ', 'ר׼×ĸ×Ļ'] +['ãĥIJ', 'ãĥ«'] +['ĠвÑģÑĤÑĢ', 'еÑĩ'] +['ãģĴ', 'ãĤĭ'] +['Ġci', 'Äħ'] +['ĠciÄħ', 'gu'] +['ÙĬ', 'ست'] +['à¸łà¸²', 'ว'] +['à¸łà¸²à¸§', 'ะ'] +['Ø£', 'Ùħر'] +['Ġо', 'жи'] +['Ġожи', 'да'] +['Ġ', 'á»§y'] +['ãĥŀ', 'ãĥ«'] +['ر', 'اس'] +['оÑĩ', 'ной'] +['ת', '×Ĵ×ķ×ij×ķת'] +['تع', 'رÙĬÙģ'] +['ĠÑģо', 'ÑĨиалÑĮно'] +['ãĤĴ', 'éĸĭ'] +['ĠиÑģÑģлед', 'ова'] +['Ġd', 'ú'] +['Ġdú', 'vida'] +['Ġsk', 'ÅĤ'] +['ĠskÅĤ', 'ada'] +['Ġhä', 'ufig'] +['ĠвÑĭб', 'ÑĢ'] +['ĠвÑĭбÑĢ', 'аÑĤÑĮ'] +['ãģ®ãģ§ãģ¯ãģªãģĦ', 'ãģĭ'] +['ĠÑģ', 'илÑĮно'] +['ÑĤвеÑĢж', 'ден'] +['ר', 'פ'] +['רפ', '×ķ×IJ×Ķ'] +['æĢĿ', 'ãģĦãģ¾ãģĻ'] +['ØŃر', 'ص'] +['ש×ķת', '×£'] +['Ùħس', 'جد'] +['à¹Ĥà¸Ĭ', 'วà¹Į'] +['ем', 'ÑģÑı'] +['в', 'ÑĪие'] +['Ġм', 'л'] +['Ġмл', 'н'] +['Ġ׾×Ķ', '×ij×Ļ×IJ'] +['ĠÙĬ', 'تعÙĦÙĤ'] +['à¸ķ', 'ูà¹ī'] +['Ġп', 'ÑĢаз'] +['ĠпÑĢаз', 'д'] +['ĠпÑĢазд', 'ник'] +['Ġн', 'ем'] +['Ġнем', 'ного'] +['Ġs', 'Ãłng'] +['تÙĨ', 'سÙĬ'] +['تÙĨسÙĬ', 'ÙĤ'] +['Ġtá»', 'Ŀ'] +['Ġмед', 'и'] +['ãģ«', 'æĪ'] +['ã쫿Ī', '»'] +['à¸Ħว', 'à¹īา'] +['ãģĭ', 'ãģijãĤĭ'] +['×ij׾', '×ķת'] +['ĠÑįк', 'Ñģп'] +['ĠÑįкÑģп', 'еÑĢÑĤ'] +['Ġдев', 'ÑĥÑĪ'] +['ĠдевÑĥÑĪ', 'к'] +['ĠØŃ', 'ص'] +['ÙĨØ´', 'Ø£'] +['ãģĮãģĤãĤĭ', 'ãģ®ãģ§'] +['Ġت', 'راÙħ'] +['ĠتراÙħ', 'ب'] +['أس', 'ÙĪØ§ÙĤ'] +['Ġ׾פ', '׳×ķת'] +['Ġا', 'ï»·'] +['ãģ«', 'ãģı'] +['ãģ«ãģı', 'ãģĦ'] +['ĠØ£', 'عÙĦÙī'] +['Ġ׾×Ķ', '×ŀש×Ļ×ļ'] +['rä', 'u'] +['ש×ŀ', '×Ļ×Ŀ'] +['åĪĨ', 'ãģij'] +['ãģĻ', 'ãģ§'] +['ãģĻãģ§', 'ãģ«'] +['×Ķ׾', '׼×Ķ'] +['×Ĺ׾', '×Ļ×£'] +['Ġì', '±ħ'] +['Ġì±ħ', 'ìŀĦ'] +['à¹Ģà¸Ī', 'ริ'] +['à¹Ģà¸Īริ', 'à¸į'] +['éģĬ', 'ãģ³'] +['ج', 'سد'] +['สา', 'à¸ĺ'] +['สาà¸ĺ', 'าร'] +['สาà¸ĺาร', 'à¸ĵ'] +['Ġbas', 'ın'] +['ÑĢаÐ', '³'] +['г', 'ад'] +['Ġho', 'ÅŁ'] +['íķ', 'µ'] +['×ij×Ĺ', '×Ļר×Ķ'] +['×ŀס', '×ļ'] +['Ġìłľ', 'íĴĪ'] +['تÙħ', 'ÙĪÙĬÙĦ'] +['ĠL', 'ưu'] +['ë¡ľ', 'ë¶ĢíĦ°'] +['Ġп', 'об'] +['Ġпоб', 'ед'] +['ÙħÙĨ', 'ذ'] +['常', 'ãģ«'] +['ÙĤ', 'س'] +['ĠاÙĦÙħ', 'صدر'] +['ĠÙĪØ§ÙĦ', 'است'] +['Ġkh', 'ắp'] +['ĠاÙĦج', 'اÙĨب'] +['Ġng', 'uyá»ĩn'] +['éĸĵ', 'éģķãģĦ'] +['ĠÑģÑĤ', 'ÑĢа'] +['ĠÑģÑĤÑĢа', 'Ñħ'] +['ĠÑģÑĤÑĢаÑħ', 'ов'] +['รี', 'à¸ļ'] +['Ġx', 'ương'] +['Ġì°', '¾'] +['Ġì°¾', 'ìķĦ'] +['Ġng', 'ại'] +['г', 'ал'] +['à¸ĭ', 'ีà¹Ī'] +['Ġ×ij', 'פ×Ļ×Ļס×ij×ķ×§'] +['Ц', 'енÑĤÑĢ'] +['Ġaval', 'iação'] +['Ġeconóm', 'ico'] +['×ĸ', 'ף'] +['ĠÐľ', 'ак'] +['Ġinter', 'és'] +['à¸ģล', 'ิà¹Īà¸Ļ'] +['ÑģÑĤÑĮ', 'Ñİ'] +['ĠÄij', 'ương'] +['å¼·', 'ãģı'] +['ĠKh', 'ách'] +['à¹Ģà¸Ļืà¹īà¸Ń', 'หา'] +['ĠYaz', 'ı'] +['è²·', 'ãģ£ãģ¦'] +['Ðł', 'Ðķ'] +['à¹Ģà¸ŀิà¹Īม', 'à¸Ĥึà¹īà¸Ļ'] +['สม', 'à¸ļู'] +['สมà¸ļู', 'รà¸ĵà¹Į'] +['Ġм', 'иÑĢов'] +['×Ĵ', '׳×Ļ×Ŀ'] +['ĠÄij', 'ức'] +['à¸Ń', 'ารà¹Į'] +['ص', 'اص'] +['ãģĬ', 'ãĤĪ'] +['ãģĬãĤĪ', 'ãģ³'] +['êÌ', 'ī'] +['ĠاÙĦÙħؤ', 'تÙħر'] +['ĠاÙĦÙħر', 'ØŃÙĦØ©'] +['สà¸Ńà¸ļ', 'à¸ĸาม'] +['Ġà¸Īาà¸ģ', 'à¸Ļัà¹īà¸Ļ'] +['Ġت', 'عد'] +['ãģĿãģ®', 'ãģŁãĤģ'] +['Ġkh', 'áng'] +['à¸Ļ', 'ิà¸Ķ'] +['ãĥĬ', 'ãĥ³'] +['ëĦ¤', 'ìļĶ'] +['ĠاÙĦ', 'اØŃت'] +['ĠاÙĦاØŃت', 'ÙĦاÙĦ'] +['ìļ', 'ķ'] +['Ġмод', 'ели'] +['ĠпÑĢоÑĨ', 'енÑĤ'] +['à¸ŀวà¸ģ', 'à¹Ģรา'] +['Ġ×Ķצ', '×ĵ'] +['Ġ×Ķצ×ĵ', '×ĵ×Ļ×Ŀ'] +['ständ', 'e'] +['׳', '×Ĵר'] +['Ġdot', 'yc'] +['Ġdotyc', 'zÄħ'] +['ĠdotyczÄħ', 'ce'] +['ĠÅĽ', 'wiÄĻt'] +['×ŀר', '×Ķ'] +['ãģĻãģĶ', 'ãģĦ'] +['ãĥĩãĤ£', 'ãĥ³ãĤ°'] +['à¸ģาร', 'สรà¹īาà¸ĩ'] +['ë', 'Ĥ¬'] +['Ġì°¸', 'ìŬ'] +['Ñģ', 'Ñħ'] +['ÑģÑħ', 'ем'] +['ÙħÙĪ', 'س'] +['Ġn', 'ấu'] +['Ġ׾×ŀ×¢', '׾×Ķ'] +['à¹Ģà¸Ľ', 'à¹īา'] +['à¹Ģà¸Ľà¹īา', 'หมาย'] +['Ġmù', 'i'] +['ائ', 'ز'] +['íĽ', 'Ī'] +['×Ĺ×ij', '×ķר×Ķ'] +['à¸ľà¸¹à¹ī', 'à¹ĥà¸Ĭà¹ī'] +['Ġpa', 'ź'] +['Ġpaź', 'dzi'] +['Ġpaździ', 'ern'] +['Ġpaździern', 'ika'] +['ลà¸ĩ', 'à¹Ħà¸Ľ'] +['ÙĤ', 'اع'] +['Ġch', 'áºŃm'] +['Ġözellik', 'leri'] +['ĠÄIJ', 'o'] +['ĠÄIJo', 'Ãłn'] +['ж', 'ение'] +['Ġh', 'ẳ'] +['Ġhẳ', 'n'] +['ĠaÅŁ', 'k'] +['ï½', 'į'] +['ãĥij', 'ãĤ¹'] +['×Ķ×ķר', '×IJ×ķת'] +['ĠÅ', '»'] +['ĠÅ»', 'y'] +['×ŀ×ĸ', '׾'] +['ĠÑĥ', 'кÑĢа'] +['ĠÑĥкÑĢа', 'ин'] +['à¹Ģà¸Ĭ', 'ิ'] +['à¹Ģà¸Ĭิ', 'à¸į'] +['Ðł', 'Ðĺ'] +['ĠzwiÄħz', 'ku'] +['×Ķ×Ĺ׾×ĺ', 'ת'] +['ãĤĵãģ§ãģĻ', 'ãĤĪãģŃ'] +['ãģ¦', 'ãģĬãĤĬ'] +['лож', 'иÑĤÑĮ'] +['×ŀ', '×ķ׳×Ļ×Ŀ'] +['ฮ', 'ิ'] +['ì°', '¬'] +['ĠاÙĦÙħØ´', 'ترÙĥ'] +['ĠdÃ¼ÅŁ', 'ük'] +['аг', 'енÑĤ'] +['ĠاÙĦØ£', 'سبÙĪØ¹'] +['ĠÙĤ', 'رÙĬب'] +['ин', 'д'] +['инд', 'ив'] +['индив', 'ид'] +['индивид', 'Ñĥ'] +['индивидÑĥ', 'алÑĮн'] +['för', 'der'] +['Ġseç', 'en'] +['Ġseçen', 'ek'] +['Ġét', 'ant'] +['ĠлÑİб', 'им'] +['каз', 'ÑĭваеÑĤ'] +['ว', 'ิà¸Ļ'] +['Ġ×Ķ×ij', '×IJ×Ļ×Ŀ'] +['Ġд', 'ов'] +['Ġдов', 'олÑĮ'] +['ĠдоволÑĮ', 'но'] +['×¢×ĵ', '×Ļ×£'] +['Ġok', 're'] +['Ġokre', 'ÅĽ'] +['ĠokreÅĽ', 'lon'] +['Ġت', 'رÙĬد'] +['à¹Ģมืà¹Īà¸Ń', 'วัà¸Ļà¸Ĺีà¹Ī'] +['ãĤĪ', 'ãģĭãģ£ãģŁ'] +['Cum', 'h'] +['Cumh', 'ur'] +['Cumhur', 'ba'] +['Cumhurba', 'ÅŁ'] +['CumhurbaÅŁ', 'kan'] +['CumhurbaÅŁkan', 'ı'] +['Ġn', 'ợ'] +['à¸ľà¸¹à¹ī', 'à¹Ģลà¹Īà¸Ļ'] +['Ġcompl', 'ète'] +['à¹Ģà¸ŀ', 'ศ'] +['د', 'ÙIJ'] +['Ġdü', 'z'] +['Ġdüz', 'ey'] +['ãģ§ãģĤãĤĭ', 'ãģĵãģ¨'] +['ext', 'érieur'] +['×', '³'] +['Ġinform', 'ação'] +['ãĤ¯ãĥª', 'ãĥĭãĥĥãĤ¯'] +['ĠPub', 'li'] +['ĠPubli', 'é'] +['ר', '×ķ×ĵ'] +['à¸Ħวาม', 'à¸Ľà¸¥à¸Ńà¸Ķà¸łà¸±à¸¢'] +['ĠØ£ÙĬ', 'ض'] +['ĠØ£ÙĬض', 'Ùĭا'] +['ت', 'سبب'] +['ãģ¤', 'ãĤĤãĤĬ'] +['из', 'ма'] +['à¸Ĥึà¹īà¸Ļ', 'à¹Ħà¸Ľ'] +['Ùĥ', 'ÙIJ'] +['ÙĦ', 'ÙĪÙħ'] +['Ġש', 'צר'] +['Ġשצר', '×Ļ×ļ'] +['ãģ¯', 'ãĤĤãģ¡ãĤįãĤĵ'] +['Ġк', 'ан'] +['Ġкан', 'ал'] +['ãģ«ãģª', 'ãģ£ãģ¦ãģĦãģ¾ãģĻ'] +['ĠاÙĦØ£', 'Ùĥثر'] +['ت', 'اØŃ'] +['ÙĨت', 'Ùĩ'] +['ÙĨتÙĩ', 'اء'] +['ا', 'ÙĪÙĬØ©'] +['ĠBug', 'ün'] +['н', 'Ñģкого'] +['à¸Ķ', 'à¹Īวà¸Ļ'] +['é', 'volution'] +['ãģ£ãģ¦', 'ãģĦãģ¾ãģĹãģŁ'] +['ãĤ', 'ħ'] +['ĠV', 'ương'] +['à¸łà¸²à¸ŀ', 'ย'] +['à¸łà¸²à¸ŀย', 'à¸Ļ'] +['à¸łà¸²à¸ŀยà¸Ļ', 'à¸ķรà¹Į'] +['Ġ×Ķ', 'צ׾×Ļ×Ĺ'] +['ĠاÙĦإسÙĦاÙħ', 'ÙĬ'] +['ÙĦÙĬ', 'ب'] +['Ġed', 'ição'] +['ÑģÑĤÑĢ', 'ел'] +['Ġkh', 'úc'] +['ÙĨÙħÙĪ', 'ذ'] +['ÙĨÙħÙĪØ°', 'ج'] +['׾', 'צ×Ķ'] +['ÑģÑĤав', 'ил'] +['à¸ĸ', 'า'] +['สรà¹īาà¸ĩ', 'à¸Ħวาม'] +['ãģĦ', 'ãģ£ãģ±'] +['ãģĦãģ£ãģ±', 'ãģĦ'] +['ÑģÑĤав', 'лен'] +['ĠاÙĦ', 'ÙĤدس'] +['Ġng', 'ược'] +['ب', 'Ø®'] +['ส', 'หร'] +['สหร', 'ั'] +['สหรั', 'à¸IJ'] +['ĠØ£', 'غ'] +['Ġأغ', 'سط'] +['Ġأغسط', 'س'] +['ãģĨ', 'ãģ¾'] +['ãģĨãģ¾', 'ãģı'] +['ĠêµŃ', 'ìłľ'] +['ØŃض', 'ار'] +['Ġd', 'ừng'] +['æĬ¼', 'ãģĹ'] +['ت', 'ÙĪØ§'] +['تÙĪØ§', 'جد'] +['ש×ŀ', '×Ĺ×Ķ'] +['ãģı', 'ãĤĵ'] +['Ġ×ij×¢', 'צ'] +['Ġ×ijעצ', '×Ŀ'] +['×ŀ', '׳×Ļ×ķת'] +['×ķ', '×Ļ×ĵ'] +['×ķ×Ļ×ĵ', '×IJ×ķ'] +['à¸Ĭ', 'ิà¸ĩ'] +['Ġprac', 'ÄĻ'] +['Ġз', 'аÑĤ'] +['ĠзаÑĤ', 'ем'] +['ĠìŀIJ', 'ìľł'] +['Ġì¤', 'Ģ'] +['Ġì¤Ģ', 'ë¹Ħ'] +['Ġb', 'áºŃ'] +['ĠbáºŃ', 'c'] +['Ġ×Ķ×ŀ', 'צ×ij'] +['ĠÙĤ', 'ÙĬÙħØ©'] +['à¹Ģà¸Ń', 'à¹Ģà¸Ĭ'] +['à¹Ģà¸Ńà¹Ģà¸Ĭ', 'ีย'] +['Ġperch', 'è'] +['ĠاÙĦع', 'سÙĥر'] +['ĠاÙĦعسÙĥر', 'ÙĬØ©'] +['ج', 'ÙĬب'] +['ëŀ', 'µ'] +['Ùħ', 'Ùĩر'] +['ÙħÙĩر', 'جاÙĨ'] +['Ùħ', 'راÙĥ'] +['ÙħراÙĥ', 'ز'] +['Ġод', 'нако'] +['à¸Ķี', 'à¹Ĩ'] +['Ġצ', 'פ×ķ'] +['Ġkullan', 'ılan'] +['Ġк', 'ино'] +['ãĥĨãĤ£', 'ãĥ³ãĤ°'] +['ĠGi', 'Ỽi'] +['ت', 'ÙĪØ²'] +['تÙĪØ²', 'ÙĬع'] +['ย', 'ิà¸Ļ'] +['ยิà¸Ļ', 'à¸Ķี'] +['Ġc', 'Åĵur'] +['ĠiÅŁ', 'aret'] +['Ġ×ij×¢', '×ĸר'] +['Ġ×ij×¢×ĸר', 'ת'] +['Ġп', 'аÑĨи'] +['ĠпаÑĨи', 'енÑĤ'] +['ãģ¿ãģŁãģĦ', 'ãģ§ãģĻ'] +['в', 'ез'] +['ли', 'на'] +['од', 'е'] +['Ġ×IJ×ķת', 'ף'] +['dıģ', 'ınız'] +['ĠÐIJ', 'в'] +['ĠÐIJв', 'ÑĤоÑĢ'] +['ï¼', '®'] +['ĠC', 'ần'] +['ĠاÙĦا', 'Ø®'] +['ĠاÙĦاخ', 'بار'] +['Ġê±°', 'ìĿĺ'] +['Ġat', 'enção'] +['Ġgeld', 'iÄŁi'] +['ãĤª', 'ãĤ¹'] +['ãĤªãĤ¹', 'ãĤ¹'] +['ãĤªãĤ¹ãĤ¹', 'ãĥ¡'] +['ев', 'Ñĭе'] +['кÑĢÑĭ', 'л'] +['à¹Ģà¸Ĭ', 'ียà¸ĩ'] +['à¹Ģà¸Ĭียà¸ĩ', 'à¹ĥหมà¹Ī'] +['Ġmar', 'ço'] +['ĠاÙĦÙħ', 'ادة'] +['Ġг', 'ол'] +['Ġsprzeda', 'ży'] +['Ġíķ´', 'ê²°'] +['ĠÐķ', 'го'] +['ê¹', 'Ģ'] +['Ġ׾ק×ij׾', 'ת'] +['ĠاÙĦÙģ', 'ÙĨاÙĨ'] +['Ġcomunic', 'ación'] +['à¹Ģสà¹īà¸Ļ', 'à¸Ĺาà¸ĩ'] +['íĺ', '¹'] +['à¸Ĭ', 'ำ'] +['à¸Ĭำ', 'ระ'] +['Ġ׼', '×IJ×ŀ'] +['Ġ׼×IJ×ŀ', '×ķר'] +['à¸Ĭ', 'à¹Īาà¸ĩ'] +['ز', 'Ùĩر'] +['Ġklient', 'ów'] +['ива', 'ÑİÑĤ'] +['ан', 'г'] +['׳', '×ļ'] +['Ġg', 'á»įn'] +['Ãľ', 'R'] +['ìĺģ', 'ìĥģ'] +['Ġغ', 'زة'] +['ìĿĮ', 'ìĿĦ'] +['Ġbez', 'po'] +['Ġbezpo', 'ÅĽ'] +['ĠbezpoÅĽ', 'redni'] +['ĠاÙĦÙħ', 'ÙĪØ§'] +['ĠاÙĦÙħÙĪØ§', 'Ø·ÙĨ'] +['ĠاÙĦÙħÙĪØ§Ø·ÙĨ', 'ÙĬÙĨ'] +['ãĤĮ', 'ãģ¾ãģĻ'] +['ĠмаÑĤ', 'Ñĩ'] +['×IJ', '×ķף'] +['Ġر', 'سÙħÙĬ'] +['ĠÑįк', 'он'] +['ĠÑįкон', 'ом'] +['ĠÑįконом', 'иÑĩеÑģк'] +['ãĥľ', 'ãĥ¼'] +['Ġд', 'иÑĢ'] +['ĠдиÑĢ', 'екÑĤоÑĢ'] +['ĠÑģк', 'оÑĢо'] +['à¸ļ', 'ำ'] +['à¸ļำ', 'ร'] +['à¸ļำร', 'ุà¸ĩ'] +['ĠÑĦ', 'ÑĥÑĤ'] +['ĠÑĦÑĥÑĤ', 'бол'] +['Ġ×IJ', '×Ļ׾'] +['Ġì¤ij', 'êµŃ'] +['ìľ', '¤'] +['eÄŁ', 'e'] +['à¹Ħ', 'à¸ģà¹Ī'] +['tra', 'î'] +['traî', 'n'] +['ĠÑĤ', 'ÑĢÑĥб'] +['à¹Ģà¸ļ', 'ื'] +['à¹Ģà¸ļื', 'à¹īà¸Ńà¸ĩ'] +['à¹ģม', 'à¸Ļ'] +['ĠتØŃ', 'دÙĬØ«'] +['Ġ׼', 'עת'] +['ØŃ', 'اسب'] +['lı', 'ÄŁa'] +['×§×Ļ', '×Ļ×ŀ×Ļ×Ŀ'] +['оÑģÑĤ', 'ÑĮÑİ'] +['à¸Ŀ', 'ั'] +['à¸Ŀั', 'à¹Īà¸ĩ'] +['Ø´', 'غÙĦ'] +['ìĽ', '¹'] +['Ġкажд', 'ого'] +['Ġbölüm', 'ü'] +['หà¸Ļ', 'ี'] +['Ġistedi', 'ÄŁi'] +['Ġtr', 'ưng'] +['ãĥ', 'Į'] +['ฮ', 'à¸Ń'] +['Ø£ÙĨ', 'Ø´'] +['Ø£ÙĨØ´', 'طة'] +['ĠاÙĦÙħ', 'سÙĬ'] +['ĠاÙĦÙħسÙĬ', 'ØŃ'] +['ลัà¸ģษ', 'à¸ĵà¹Į'] +['Ġn', 'á»Ńa'] +['à¸Ĺีà¹Ī', 'à¸ķà¹īà¸Ńà¸ĩà¸ģาร'] +['ÑĪ', 'ек'] +['л', 'Ñij'] +['Ġש', '×Ļ×Ķ'] +['Ġש×Ļ×Ķ', '×Ļ×Ķ'] +['Ġkhu', 'ôn'] +['ĠÑĤÑĢеб', 'ованиÑı'] +['Ġ×ľ×¢', '×ĸ×ķר'] +['ĠاÙĦع', 'Ùħر'] +['ราà¸Ħา', 'à¸ĸูà¸ģ'] +['ÙĩÙı', 'ÙħÙĴ'] +['ü', 'st'] +['üst', 'ü'] +['Ġден', 'ег'] +['Ġn', 'ạ'] +['à¸Ĥà¸Ļ', 'ม'] +['Ġбл', 'аг'] +['Ġблаг', 'од'] +['Ġблагод', 'аÑĢ'] +['ĠблагодаÑĢ', 'Ñı'] +['Ø¥', 'سÙĦاÙħ'] +['à¸Ļิ', 'ว'] +['çŁ¥', 'ãĤīãģªãģĦ'] +['Ø«', 'ÙĤØ©'] +['Ġг', 'олоÑģ'] +['×IJ×ķר', '×Ĺ'] +['Ġtr', 'ứng'] +['Ġод', 'ном'] +['ĠkoÅĦ', 'cu'] +['Ġ×ķ', 'רק'] +['Wi', 'ÄĻ'] +['WiÄĻ', 'cej'] +['Ġ×IJ', '×Ļ׼×ķת'] +['Ġ×IJ×Ļ׼×ķת', '×Ļ'] +['Ñģ', 'оÑģ'] +['Ġje', 'żeli'] +['以ä¸ĭ', 'ãģ®'] +['å°ı', 'ãģķ'] +['å°ıãģķ', 'ãģª'] +['олог', 'ии'] +['Ġоб', 'ÑģлÑĥж'] +['ĠобÑģлÑĥж', 'ива'] +['Ùĥت', 'ابة'] +['Ġê´Ģ', 'ìĭ¬'] +['×¢', 'ש×Ļר'] +['Ġaras', 'ındaki'] +['ĠÑĢай', 'она'] +['ÙĪØ§', 'جب'] +['Ġ×ij×Ĺ×Ļ', '×Ļ'] +['íķ´', '주'] +['Ġg', 'óc'] +['ай', 'л'] +['ĠT', 'ình'] +['æļ®', 'ãĤī'] +['æļ®ãĤī', 'ãģĹ'] +['æĻĤ', 'ãģ«ãģ¯'] +['ĠгоÑĢод', 'е'] +['Ġ׼×IJ', '×Ļ׾'] +['Ġ׼×IJ×Ļ׾', '×ķ'] +['ĠC', 'á»Ļng'] +['ãģ©ãģĨ', 'ãģĹãģ¦ãĤĤ'] +['×Ĺ', '×ķ×£'] +['تØŃ', 'رÙĥ'] +['ĠÑģлов', 'ам'] +['à¸Īะ', 'à¸Ĭà¹Īวย'] +['ĠاÙĦÙħست', 'ÙĤبÙĦ'] +['ÙĤ', 'ض'] +['ÙĤض', 'ÙĬ'] +['×ijס', '×ķפ'] +['×ijס×ķפ', '×ķ'] +['iÄĻ', 'Äĩ'] +['ĠY', 'ıl'] +['Ø´', 'ÙĬØ®'] +['à¸Ħุà¸ĵ', 'à¸Īะ'] +['ש×ŀ', '×ķת'] +['Ġت', 'عرض'] +['Ġanál', 'ise'] +['ĠÑģоб', 'иÑĢа'] +['à¹Ģà¸ŀ', 'à¸Ĭ'] +['à¹Ģà¸ŀà¸Ĭ', 'ร'] +['Ġв', 'ели'] +['Ġвели', 'к'] +['สั', 'à¹īà¸Ļ'] +['Ġpop', 'ulação'] +['รà¹Īวม', 'à¸ģัà¸Ļ'] +['×Ĺ', '×ŀ'] +['×Ĺ×ŀ', '×Ļש×Ļ'] +['ס', '×Ļס'] +['åĨħ', 'ãģ§'] +['Ġsob', 'Äħ'] +['ĠY', 'ay'] +['ĠYay', 'ın'] +['ãĥ¡', 'ãĥĭãĥ¥ãĥ¼'] +['ĠпÑĢедоÑģÑĤав', 'лÑı'] +['ãģł', 'ã썿ĢĿãģĨ'] +['Ġê³ł', 'ê°Ŀ'] +['Ġод', 'ним'] +['à¹ĥà¸Ļ', 'à¹Ģรืà¹Īà¸Ńà¸ĩ'] +['Ġs', 'á»ķ'] +['ĠÐĹ', 'деÑģÑĮ'] +['Ġизмен', 'ениÑı'] +['ĠìĿ¼', 'ìĿĦ'] +['ãģªãģ®', 'ãģł'] +['клад', 'Ñĭва'] +['ÑĢ', 'ма'] +['Ġ×ķ×ij', '׼׾'] +['تأ', 'ÙħÙĬÙĨ'] +['ĠпÑĢи', 'ÑıÑĤ'] +['ĠпÑĢиÑıÑĤ', 'н'] +['Ùħ', 'Ùħار'] +['ÙħÙħار', 'سة'] +['ãģ¨ãģª', 'ãģ£ãģ¦'] +['Ġج', 'ÙħÙĬÙĦ'] +['Ġì§', 'Ī'] +['Ġì§Ī', '문'] +['Ġquest', 'ão'] +['i', 'é'] +['ié', 'ndo'] +['หà¹īà¸Ńà¸ĩ', 'à¸ŀัà¸ģ'] +['ãĥij', 'ãĥ¼ãĥĪ'] +['ÑĤвеÑĢж', 'да'] +['н', 'Ñģкой'] +['з', 'ал'] +['มุ', 'à¹Īà¸ĩ'] +['á»', 'Ĭ'] +['Ġ×Ķ×IJ×Ĺר', '×ķ׳×Ķ'] +['ĠTh', 'ư'] +['주', '민'] +['ĠاÙĦع', 'ب'] +['év', 'én'] +['évén', 'ement'] +['ÙĤÙĪ', 'اعد'] +['د', 'Ùı'] +['ĠìķĬ', 'ìĬµëĭĪëĭ¤'] +['Ġë³´', '기'] +['Ġyapıl', 'ması'] +['à¹Ģร', 'าà¸ģ'] +['à¹Ģราà¸ģ', 'à¹ĩ'] +['ØŃ', 'ذر'] +['ÙĤ', 'صر'] +['ãģ¦ãģĹãģ¾', 'ãģĦãģ¾ãģĹãģŁ'] +['Ġà¹Ģà¸Ľà¹ĩà¸Ļ', 'à¸ķà¹īà¸Ļ'] +['ãģ¨', 'ãģ«'] +['ãģ¨ãģ«', 'ãģĭ'] +['ãģ¨ãģ«ãģĭ', 'ãģı'] +['н', 'ÑĨе'] +['зв', 'Ñĥк'] +['ãģĹãĤĪãģĨ', 'ãģ¨'] +['ĠاÙĦصØŃ', 'ÙĬØ©'] +['Ġש×Ķ', '×Ļ×ķ'] +['ĠDi', 'ÄŁer'] +['ÙĤÙĦ', 'ÙĤ'] +['ãĤ¸ãĥ£', 'ãĥ³'] +['Ġr', 'á»Ŀi'] +['Ġл', 'еÑĩ'] +['ĠлеÑĩ', 'ениÑı'] +['تب', 'اد'] +['تباد', 'ÙĦ'] +['צ', 'פ×Ķ'] +['à¸Ħวาม', 'à¹Ģหà¹ĩà¸Ļ'] +['ĠØ´', 'ب'] +['Ġشب', 'ÙĥØ©'] +['ר', '×Ļ×§'] +['Ùħ', 'عد'] +['Ùħعد', 'ات'] +['dıģ', 'ında'] +['Ġ×ijש', '׳×Ļ×Ŀ'] +['Ġ×Ķ', '×Ļשר×IJ׾'] +['Ġ×Ķ×Ļשר×IJ׾', '×Ļת'] +['Ġsı', 'nav'] +['׳צ', '×Ļ×Ĵ'] +['วัà¸ķ', 'à¸ĸุ'] +['ĠاÙĦبر', 'ÙĦÙħ'] +['ĠاÙĦبرÙĦÙħ', 'اÙĨ'] +['t', 'ivitÃł'] +['ãĤĵãģł', 'ãĤįãģĨ'] +['×§×Ļ', '×Ļ×ŀ'] +['ÙĦÙĬ', 'Ùĥ'] +['ĠÄij', 'ò'] +['ĠÄijò', 'i'] +['ĠÐĺн', 'ÑĤеÑĢ'] +['ĠÐĺнÑĤеÑĢ', 'неÑĤ'] +['ãģ«ãģ¨ãģ£ãģ¦', 'ãģ¯'] +['ãģ£', 'ãģĵ'] +['×§', '×ķס'] +['ست', 'ØŃÙĤ'] +['æķĻ', 'ãģĪãģ¦'] +['ãĥĢ', 'ãĥ¡'] +['ĠÙħÙĨ', 'زÙĦ'] +['à¹Ģà¸ĭ', 'à¹ĩà¸Ļ'] +['使', 'ãģĪãĤĭ'] +['è¦ĭ', 'ç©į'] +['è¦ĭç©į', 'ãĤĤãĤĬ'] +['Ø£', 'Ùģ'] +['Ø£Ùģ', 'Ùĥار'] +['Ġиг', 'ÑĢов'] +['ĠигÑĢов', 'Ñĭе'] +['Ġm', 'ÄĻż'] +['ĠmÄĻż', 'czy'] +['ĠmÄĻżczy', 'zn'] +['ĠاÙĦØŃ', 'ÙĤÙĬÙĤÙĬ'] +['ع', 'بر'] +['׼×ķ׾', '׳×ķ'] +['íĿ', '¥'] +['×ŀ×IJ', '×ķ×Ĺר'] +['خت', 'ص'] +['ãĥŀ', 'ãĥŀ'] +['Ġ×IJ×Ĺ', '×ķ×ĸ'] +['í', 'ĮĢ'] +['Ġr', 'á»iji'] +['Ġв', 'ÑĤоÑĢ'] +['ĠвÑĤоÑĢ', 'ой'] +['Ġl', 'ẫn'] +['пÑĢ', 'ом'] +['пÑĢом', 'ÑĭÑĪ'] +['пÑĢомÑĭÑĪ', 'лен'] +['пÑĢомÑĭÑĪлен', 'н'] +['ĠоÑĤноÑĪ', 'ениÑı'] +['Ġs', 'ứ'] +['Ġм', 'обилÑĮ'] +['ĠмобилÑĮ', 'н'] +['ĠÑįÑĤ', 'омÑĥ'] +['Ġt', 'ạp'] +['ĠìĤ¬', 'ê±´'] +['ĠìķĮ', '볤'] +['Ùĥ', 'Ùı'] +['ÙĥÙı', 'ÙħÙĴ'] +['Ġ×§', '×ķר×Ķ'] +['ĠÑĦ', 'иÑĢ'] +['ĠÑĦиÑĢ', 'м'] +['Ġsık', 'ıntı'] +['׳', '׼'] +['׳׼', '×ķף'] +['ÙĪÙĦÙĪØ¬', 'ÙĬ'] +['ØŃ', 'اÙĨ'] +['Ġlo', 'ạn'] +['Ġ×IJ׾', '×£'] +['Ġm', 'ắn'] +['abh', 'äng'] +['abhäng', 'ig'] +['ĠÑĥÑĢов', 'нÑı'] +['Ġ׾×ij×ĵ', '×ķ×§'] +['ÙĬ', 'ÙħÙĨ'] +['lay', 'ın'] +['Ġh', 'ải'] +['Ġзав', 'од'] +['ĠìķĦ', '주'] +['สà¸ĸ', 'า'] +['สà¸ĸา', 'à¸ļัà¸Ļ'] +['Ġgüven', 'lik'] +['à¹Ģà¸Ķ', 'à¹Īà¸Ļ'] +['×ij×ĵ', '×§'] +['Ġë', 'Ī'] +['ĠëĪ', 'Ħ'] +['ĠëĪĦ', '구'] +['éĩįè¦ģ', 'ãģª'] +['รà¸Ńà¸ĩ', 'รัà¸ļ'] +['sch', 'lie'] +['schlie', 'ÃŁen'] +['Ġìĸ', '¼'] +['Ġìĸ¼', 'ë§Ī'] +['Ġìĸ¼ë§Ī', 'ëĤĺ'] +['ÑĤи', 'ки'] +['íķľëĭ¤', 'ê³ł'] +['ãģłãģ£ãģŁ', 'ãĤī'] +['Ġ×Ķ', '×Ļ×ĺ×ij'] +['ãģªãģijãĤĮãģ°', 'ãģªãĤīãģªãģĦ'] +['â', 'Ì'] +['âÌ', '£'] +['Ġph', 'ạt'] +['ak', 'Ä±ÅŁ'] +['ãģ¦ãģĹãģ¾', 'ãģĦãģ¾ãģĻ'] +['à¹Ģà¸ĭ', 'à¹ĩ'] +['ĠС', 'егоднÑı'] +['Ġinsan', 'ların'] +['Ġdévelop', 'pe'] +['ת', 'פר'] +['תפר', '×Ļ×ĺ'] +['اÙĨت', 'شار'] +['ê°', 'ij'] +['Fran', 'çois'] +['Ø£ÙĦ', 'ع'] +['Ø£ÙĦع', 'اب'] +['ãĤĴ', 'è¶ħ'] +['ãĤĴè¶ħ', 'ãģĪ'] +['Ġê°Ļ', 'ìĬµëĭĪëĭ¤'] +['ãĤ³', 'ãĥ¬'] +['ĠмеÑģÑı', 'ÑĨев'] +['íĮ', 'ħ'] +['ĠاÙĦج', 'اÙħعة'] +['ìĿ¸', 'íĦ°'] +['ìĿ¸íĦ°', 'ëĦ·'] +['×ĵר', '×ķש'] +['ĠÙĪØ£', 'شار'] +['ĠпÑĢав', 'ила'] +['ãģĿãģĵ', 'ãģ«'] +['×Ĺ', '×ŀ×ĵ'] +['à¹Ģหà¸ķุ', 'à¸ģารà¸ĵà¹Į'] +['Ġê²½', 'íĹĺ'] +['ãģ¶', 'ãĤĬ'] +['׾', 'ש'] +['׾ש', '×ķף'] +['à¹Ģ', 'à¸ĸ'] +['ĠDo', 'ÄŁu'] +['ĠиÑģполÑĮзов', 'ание'] +['Ġçoc', 'uÄŁu'] +['магазин', 'е'] +['ĠÄiji', 'á»ĥn'] +['Ġas', 'lı'] +['Ġaslı', 'nda'] +['Ġdoen', 'ça'] +['Ġس', 'اع'] +['Ġساع', 'ات'] +['ĠиÑģполÑĮзов', 'аниÑı'] +['ר', '×ķצ×Ļ×Ŀ'] +['ĠзнаÑĩ', 'иÑĤ'] +['ĠÑĢаÐ', '¼'] +['ĠÑĢам', 'каÑħ'] +['ê±°', '리'] +['Ġп', 'ÑĭÑĤа'] +['ãĥģ', 'ãĥ³'] +['Ġпо', 'Ñģк'] +['ĠпоÑģк', 'олÑĮ'] +['ĠпоÑģколÑĮ', 'кÑĥ'] +['Ø¥', 'بر'] +['إبر', 'اÙĩ'] +['إبراÙĩ', 'ÙĬÙħ'] +['ĠÑĤÑĢ', 'еÑħ'] +['ĠGen', 'ç'] +['س', 'ÙĪÙģ'] +['Ġve', 'ÃŃculo'] +['ĠNg', 'ân'] +['ĠоÑĩеÑĢ', 'едÑĮ'] +['à¸Ħร', 'ึà¹Īà¸ĩ'] +['×IJ', '×ij×Ļ'] +['à¸ķ', 'à¹īม'] +['ãĤĴè¡Į', 'ãģĦ'] +['ĠاÙĦساب', 'ÙĤØ©'] +['на', 'ÑĨи'] +['наÑĨи', 'она'] +['наÑĨиона', 'лÑĮн'] +['Ġgest', 'ión'] +['ت', 'ÙĤد'] +['ĠاÙĦبÙĬ', 'اÙĨ'] +['ĠاÙĦبÙĬاÙĨ', 'ات'] +['ĠاÙĦ', 'اÙĨتخاب'] +['ĠاÙĦاÙĨتخاب', 'ات'] +['à¹Ģà¸Ĭ', 'à¹Īา'] +['×ĵ', '×IJ×Ĵ'] +['Ġ׾×Ĵ', '×ŀר×Ļ'] +['Ġت', 'ØŃتاج'] +['Ġth', 'ôn'] +['à¸ķ', 'à¹īà¸Ńà¸Ļ'] +['à¸ķà¹īà¸Ńà¸Ļ', 'รัà¸ļ'] +['女', 'ãģ®'] +['女ãģ®', 'åŃIJ'] +['Ġth', 'ợ'] +['Ø·', 'ØŃÙĨ'] +['ารà¹Į', 'à¸Ķ'] +['ת', '×ŀ×Ļ×ĵ'] +['ĠÑģам', 'Ñĭм'] +['Ġìĭľ', 'íĸī'] +['Ø¥', 'صد'] +['إصد', 'ار'] +['ĠNgh', 'á»ĩ'] +['ìķ', 'ķ'] +['س', 'ئ'] +['سئ', 'ÙĦ'] +['à¸Ń', 'าร'] +['à¸Ńาร', 'ม'] +['à¸Ńารม', 'à¸ĵà¹Į'] +['à¹ģ', 'ฮ'] +['׳×ĺ', '׾'] +['Ġì¢ĭ', 'ìķĦ'] +['×ķ׾', '׾'] +['Ġ×ij', '×Ľ×ª×ij'] +['ãĤ«', 'ãĥ©'] +['צע', '×Ļר×Ļ×Ŀ'] +['تعب', 'ÙĬر'] +['Ġ×ŀ', 'קר×Ķ'] +['ĠÑĦак', 'ÑĤоÑĢ'] +['Ġت', 'ÙħاÙħ'] +['ĠتÙħاÙħ', 'ا'] +['ëį', 'ķ'] +['Ġv', 'ưá»Ŀ'] +['Ġvưá»Ŀ', 'n'] +['Ġd', 'Ä±ÅŁÄ±'] +['ãģĦ', 'ãģ¡'] +['Ġ׾ק', '׳×ķת'] +['ĠاÙĦع', 'ÙĦاÙĤات'] +['п', 'Ñĥб'] +['пÑĥб', 'ли'] +['Ø¥', 'ÙĬÙħ'] +['Ø¥ÙĬÙħ', 'اÙĨ'] +['à¸Ńำ', 'à¸Ļา'] +['à¸Ńำà¸Ļา', 'à¸Ī'] +['åIJ«', 'ãģ¾ãĤĮ'] +['ãĤĭ', 'ãģŁãĤģãģ«'] +['ס', '×Ĵ'] +['ס×Ĵ', '׳×ķף'] +['تØŃ', 'دÙĬ'] +['Ġaup', 'rès'] +['ĠاÙĦج', 'Ùĩا'] +['ĠاÙĦجÙĩا', 'ز'] +['Ġ×ŀ', 'ת×Ĺת'] +['ен', 'нÑĥÑİ'] +['Ġз', 'им'] +['à¸ģา', 'à¹ģà¸Ł'] +['Ġ×ijת', '×ķר'] +['Ġngh', 'è'] +['Ġnghè', 'o'] +['ĠÐĽ', 'Ñİ'] +['ĠÐĽÑİ', 'б'] +['תק', 'צ×Ļ×ij'] +['×ŀ×¢', 'ש×Ķ'] +['ĠاÙĦبÙĬ', 'ت'] +['צ', '×Ļפ'] +['ĠобÑıз', 'ан'] +['ĠM', 'á»Ĺi'] +['ĠТ', 'ÑĥÑĢ'] +['ĠÙĪØ¨', 'اÙĦت'] +['ĠÙĪØ¨Ø§ÙĦت', 'اÙĦÙĬ'] +['Ġdéc', 'ision'] +['Ġب', 'د'] +['Ġبد', 'أت'] +['Ġc', 'ục'] +['Ġb', 'ask'] +['Ġbask', 'ı'] +['Ġhat', 'ırl'] +['Ġhatırl', 'a'] +['å°ı', 'ãģķãģĦ'] +['Ġgerçek', 'ten'] +['à¸ľ', 'ัà¸ģ'] +['åı¯èĥ½', 'ãģª'] +['×ŀ×IJ', 'ס'] +['Ġcr', 'ÃŃtica'] +['ĠìĿĺ', 'ìĽIJ'] +['عÙĤ', 'ÙĪØ¯'] +['×ĺ', '׼׳'] +['×ĺ׼׳', '×ķ׾×ķ×Ĵ×Ļ×Ķ'] +['è¨Ģ', 'ãģĪãģ°'] +['ĠÙĤ', 'ÙĨا'] +['ĠÙĤÙĨا', 'Ø©'] +['ĠìĿ´ê²ĥ', 'ìĿĢ'] +['ت', 'صر'] +['à¸Ł', 'ัà¸Ļ'] +['ĠÑĢе', 'ÑĨеп'] +['ĠÑĢеÑĨеп', 'ÑĤ'] +['ĠبÙĨ', 'Ù쨳'] +['ÑĢо', 'ÑĪ'] +['ĠмаÑĢ', 'ÑĤа'] +['Ġson', 'ras'] +['Ġsonras', 'ı'] +['×ķ×ij', 'ש'] +['ãĥª', 'ãĤ¹ãĤ¯'] +['ĠFranç', 'ais'] +['á»', 'ļ'] +['ê°', 'Ķ'] +['Ġ×Ķ×ijר', '×Ļת'] +['פ', '×Ļצ'] +['פ×Ļצ', '×ķ×Ļ'] +['ĠÙĦÙħا', 'ذا'] +['ĠÐļи', 'ев'] +['ĠÑģ', 'мÑĭÑģл'] +['ê¸Ī', 'ìľµ'] +['ãĤ·ãĥ£', 'ãĥ«'] +['ãĥ©', 'ãĤ¤ãĥĪ'] +['ìĽ', 'ĥ'] +['×ŀ', '×Ĺר'] +['ãĨ', 'į'] +['Ġkullan', 'ım'] +['Ġ×IJצ׾', '׳×ķ'] +['Ġt', 'Ãłn'] +['ãĥı', 'ãĥ¼'] +['ãģ¨', 'ãģ¨ãĤĤ'] +['ãģ¨ãģ¨ãĤĤ', 'ãģ«'] +['ÑĢ', 'ег'] +['ÑĢег', 'и'] +['ÑĢеги', 'он'] +['ãģªãģı', 'ãģªãĤĭ'] +['Ġch', 'ảy'] +['Ġج', 'ÙĩØ©'] +['ÅĦsk', 'iej'] +['à¸Ńี', 'à¹Ģม'] +['à¸Ńีà¹Ģม', 'ล'] +['ãģį', 'ãģ£ãģ¨'] +['ĠìĺĪ', 'ìĤ°'] +['Ġkit', 'abı'] +['Ġedu', 'cação'] +['Ġbul', 'uÅŁ'] +['олог', 'иÑı'] +['Ġкон', 'кÑĢ'] +['ĠконкÑĢ', 'еÑĤ'] +['×Ĵ', '×Ļר'] +['ĠпÑĢед', 'лаг'] +['ĠпÑĢедлаг', 'аеÑĤ'] +['ĠY', 'ên'] +['Ġíķľ', 'ë²Ī'] +['Ġ×ŀ', 'ר׼×ĸ×Ļ'] +['à¹Ģà¸Ľà¸´à¸Ķ', 'à¹Ģà¸ľà¸¢'] +['ÑĤвеÑĢ', 'д'] +['ĠH', 'á»ĩ'] +['ĠÐĵ', 'ÑĢ'] +['à¸Ŀ', 'à¹īา'] +['×Ķ', 'שק'] +['×Ķשק', '×¢×Ķ'] +['Ġна', 'Ñĥк'] +['ìłIJ', 'ìĿĦ'] +['Ġн', 'елÑĮ'] +['ĠнелÑĮ', 'з'] +['ĠнелÑĮз', 'Ñı'] +['г', 'ин'] +['ĠB', 'öl'] +['ĠBöl', 'ge'] +['Ġв', 'ла'] +['Ġвла', 'ÑģÑĤи'] +['à¹Ģà¸Ļ', 'à¹ĩ'] +['à¹Ģà¸Ļà¹ĩ', 'à¸ķ'] +['ê³', '¨'] +['Ġö', 'ld'] +['Ġöld', 'ür'] +['׼׳', '×¢'] +['ĠاÙĦÙĩ', 'ÙĬئة'] +['ت', 'ارÙĬØ®'] +['ĠÐij', 'ÑĢ'] +['ĠÑģ', 'мож'] +['ĠÑģмож', 'еÑĤе'] +['ĠL', 'úc'] +['à¹Ħà¸Ľ', 'à¸ĸึà¸ĩ'] +['ĠBakan', 'ı'] +['Ġerklä', 'rt'] +['ĠÐIJ', 'на'] +['Ġsc', 'ène'] +['åķı', 'ãģĦ'] +['åķıãģĦ', 'åIJĪãĤıãģĽ'] +['ÙħÙĩ', 'ÙĨد'] +['ÙħÙĩÙĨد', 'س'] +['Ġн', 'азвание'] +['ив', 'аниÑı'] +['ãĤĴ', 'å¤īãģĪ'] +['ä»ĺãģį', 'åIJĪ'] +['ãĥij', 'ãĤ½'] +['ãĥijãĤ½', 'ãĤ³ãĥ³'] +['æĺİ', 'ãĤī'] +['æĺİãĤī', 'ãģĭ'] +['à¹Ģà¸Ńà¸ģ', 'สาร'] +['à¹Ģà¸ģิà¸Ļ', 'à¹Ħà¸Ľ'] +['л', 'еп'] +['ãģĹãģŁ', 'ãĤĤãģ®'] +['ĠC', 'âm'] +['ĠCâm', 'ara'] +['×§×ķ׾', '׳×ķ×¢'] +['Ġ×ij×Ĵ', '×Ļף'] +['Ġoc', 'zy'] +['Ġoczy', 'wiÅĽcie'] +['att', 'ivitÃł'] +['ãĥĵ', 'ãĥ¥ãĥ¼'] +['Ġeduc', 'ación'] +['İ', 'YE'] +['ê¹Į', 'ìļĶ'] +['ãĤ¨', 'ãĥªãĤ¢'] +['н', 'еÑģÑĤи'] +['Ġm', 'óg'] +['Ġmóg', 'ÅĤ'] +['Ġ×§×ĺ', '׳×Ļ×Ŀ'] +['ĠPr', 'ä'] +['Ġ×ľ×¢', '×ij×ķר'] +['بÙĨ', 'Ùī'] +['з', 'ол'] +['зол', 'оÑĤ'] +['Ġwn', 'ÄĻtr'] +['ĠwnÄĻtr', 'z'] +['Ġconstr', 'ução'] +['รัà¸ļ', 'รà¸Ńà¸ĩ'] +['س', 'جÙĨ'] +['Ġ×§', '×ķ׳'] +['ס', '×Ļפ×ķר'] +['ĠÙħ', 'دÙī'] +['رض', 'Ùī'] +['п', 'лав'] +['ï¼', '¥'] +['Ġil', 'a'] +['Ġila', 'ç'] +['ãĤĭ', 'ãģ¹ãģį'] +['ĠÙħ', 'ÙĪÙĤÙģ'] +['à¸ģร', 'ุ'] +['à¸ģรุ', 'à¸ĵา'] +['chodzÄħ', 'c'] +['ĠÑĤÑĭ', 'Ñģ'] +['Ðķ', 'вÑĢо'] +['ĠÙĬ', 'ØŃدث'] +['ãĥ¡', 'ãĤ¤ãĥ³'] +['ĠاÙĦص', 'ØŃÙĬ'] +['ĠÐĶ', 'ан'] +['دع', 'اء'] +['ãĤ´', 'ãĥ¼ãĥ«'] +['ש', '×ł×ª×Ļ'] +['×©×ł×ª×Ļ', '×Ļ×Ŀ'] +['à¸Ķà¹īวย', 'à¸ģัà¸Ļ'] +['Ġol', 'acaģı'] +['Ġ×ij', '×ŀ×Ĺ×Ļר'] +['×Ķ', '×§'] +['×Ķ×§', '×ŀת'] +['ãĥ¢', 'ãĥİ'] +['ĠçalÄ±ÅŁ', 'tı'] +['Ġjó', 'venes'] +['ãģĦãģı', 'ãĤī'] +['ĠÙħ', 'عدÙĦ'] +['ĠC', 'Å©ng'] +['ĠSeg', 'ún'] +['Ġdönem', 'de'] +['Ġ׾', '×Ļ×ĵ×Ļ'] +['ãģį', 'ãģ¡'] +['ãģįãģ¡', 'ãĤĵ'] +['ãģįãģ¡ãĤĵ', 'ãģ¨'] +['Ù쨱', 'ÙĨس'] +['Ù쨱ÙĨس', 'ا'] +['åIJij', 'ãģį'] +['Ġcamp', 'aña'] +['ĠÑģам', 'оÑģÑĤоÑı'] +['ĠÑģамоÑģÑĤоÑı', 'ÑĤелÑĮно'] +['á»', 'Ģ'] +['ÙĤ', 'ÙĪØ§'] +['س', 'ÙĦاØŃ'] +['à¸ģระ', 'à¹ģ'] +['à¸ģระà¹ģ', 'ส'] +['ĠполÑĮз', 'Ñĥ'] +['n', 'qu'] +['nqu', 'ête'] +['รà¹Īวม', 'à¸ģัà¸ļ'] +['ëĬIJ', 'ëĥIJ'] +['à¸Ĺีม', 'à¸Ĭาà¸ķิ'] +['Ġyıll', 'ık'] +['ìĬ', '¬'] +['ĠØ£', 'صØŃاب'] +['ill', 'é'] +['Ġdó', 'la'] +['Ġdóla', 'res'] +['Ġк', 'ож'] +['Ġкож', 'и'] +['ล', 'à¹īà¸Ń'] +['à¹Ģรีย', 'à¸ļร'] +['à¹Ģรียà¸ļร', 'à¹īà¸Ńย'] +['à¹Ģà¸ŀ', 'ิ'] +['à¹Ģà¸ŀิ', 'à¹Īà¸ĩ'] +['ÑĢиÑĤоÑĢ', 'и'] +['Ġí', 'ijľ'] +['Ġíijľ', 'íĺĦ'] +['ĠпеÑĢ', 'ев'] +['ĠпеÑĢев', 'од'] +['פ×Ĵ', '×Ļ×¢×Ķ'] +['ĠdeÄŁerlendir', 'me'] +['Ùģ', 'ائ'] +['ĠвÑĭ', 'год'] +['ınız', 'ı'] +['×ķ׼', '×Ļ×Ĺ'] +['ĠдоÑģÑĤ', 'иг'] +['Ġng', 'Ãłn'] +['æĢĿ', 'ãģ£ãģŁ'] +['ĠÐķ', 'ÑģÑĤÑĮ'] +['ĠاÙĦر', 'غÙħ'] +['ĠzwiÄħz', 'ane'] +['رب', 'Ø·'] +['à¸Ļ', 'ึà¸ĩ'] +['Ġ׾×Ĺ', '×ķ×§'] +['Ġszczeg', 'óln'] +['Ġszczególn', 'ie'] +['Ġبا', 'ستخداÙħ'] +['ĠfÃŃs', 'ico'] +['×¢', 'ס'] +['עס', '×ķ×§'] +['سÙĦ', 'ÙĪÙĥ'] +['Ġا', 'ØŃد'] +['Ñĩ', 'ÑijÑĤ'] +['×ĸ׼', '×Ķ'] +['Ġl', 'á»ĩnh'] +['ĠÙĪ', 'ØŃت'] +['ĠÙĪØŃØª', 'Ùī'] +['à¸Ħวาม', 'สามารà¸ĸ'] +['à¸Ńยูà¹Ī', 'à¹ģลà¹īว'] +['à¸ģาร', 'à¹Ģà¸Ķิà¸Ļà¸Ĺาà¸ĩ'] +['تخ', 'ذ'] +['צ×Ļ', '×ķ×ĵ'] +['ĠاÙĦØ£', 'س'] +['ĠاÙĦأس', 'ÙĩÙħ'] +['Ġt', 'á»ĩ'] +['ãģ£ãģ¦', 'ãģĦãģ¦'] +['สร', 'ุ'] +['สรุ', 'à¸Ľ'] +['Ġком', 'ÑĦ'] +['ĠкомÑĦ', 'оÑĢÑĤ'] +['ìĺ¤', 'ëĬĶ'] +['ĠÑĢаз', 'в'] +['ĠÑĢазв', 'ива'] +['л', 'анд'] +['h', 'änge'] +['ĠبÙĨ', 'سبة'] +['à¹Ģà¸Ĥ', 'ียว'] +['עצ', '×Ŀ'] +['Ġ׾', '×ľ×Ľ×ª'] +['Ñģо', 'ÑĨиалÑĮн'] +['Ġëĭ¤ìĿĮ', 'ê³¼'] +['Ġרש', '×ķ×ŀ'] +['×ŀר', '×Ĺ×ij'] +['س', 'ÙĤØ·'] +['Ġalan', 'ı'] +['ĠÄij', 'á»ĩ'] +['é£Łãģ¹', 'ãĤĭ'] +['à¸Ķ', 'ึà¸ĩ'] +['Ġgegen', 'über'] +['ĠبÙĩ', 'ذÙĩ'] +['à¸ĸืà¸Ń', 'à¹Ģà¸Ľà¹ĩà¸Ļ'] +['ëķ', 'ħ'] +['à¸Ħà¸Ļ', 'à¹Ħà¸Ĺย'] +['ãĤ¢', 'ãĤ¦'] +['ãĤ¢ãĤ¦', 'ãĥĪ'] +['ศ', 'ัà¸ģ'] +['ศัà¸ģ', 'à¸Ķิ'] +['ศัà¸ģà¸Ķิ', 'à¹Į'] +['ÙĤÙĪ', 'اÙĨ'] +['ÙĤÙĪØ§ÙĨ', 'ÙĬÙĨ'] +['Ġhá»Ļ', 'p'] +['ãģªãģıãģª', 'ãģ£ãģ¦'] +['Ġ×IJ', '×ŀ׳'] +['Ġ×IJ×ŀ׳', '×Ŀ'] +['à¹Ģà¸ķ', 'ืà¸Ńà¸Ļ'] +['ĠзавиÑģ', 'им'] +['ĠзавиÑģим', 'оÑģÑĤи'] +['ת', '×Ļ×IJ'] +['ת×Ļ×IJ', '×ķר'] +['å§ĭãĤģ', 'ãģŁ'] +['Ġng', 'á»į'] +['Ġngá»į', 't'] +['íĴ', 'į'] +['ê³¼', 'ìŀ¥'] +['Ġb', 'ại'] +['ãģ§ãģį', 'ãģ¦'] +['Ġcomeç', 'ar'] +['à¸Ľà¸£', 'าà¸ģ'] +['à¸Ľà¸£à¸²à¸ģ', 'à¸ı'] +['Ġгод', 'Ñĭ'] +['м', 'еÑģ'] +['ĠاÙĦÙħست', 'ÙĪÙī'] +['ĠÑģам', 'Ñĭе'] +['л', 'леÑĢ'] +['ãģ£ãģ¦ãģĹãģ¾', 'ãģĦãģ¾ãģĻ'] +['ãģ¨ãģ®', 'ãģĵãģ¨'] +['bi', 'ó'] +['à¸ģล', 'à¹Īà¸Ńà¸ĩ'] +['ĠاÙĦز', 'ÙĪØ¬'] +['ãģ«è¡Į', 'ãģ£ãģŁ'] +['à¸Ħà¹Ī', 'à¸Ńà¸Ļ'] +['à¸Ħà¹Īà¸Ńà¸Ļ', 'à¸Ĥà¹īาà¸ĩ'] +['ĠbaÄŁ', 'l'] +['ĠbaÄŁl', 'ant'] +['ĠbaÄŁlant', 'ı'] +['確', 'ãģĭ'] +['確ãģĭ', 'ãģ«'] +['ãĥľ', 'ãĥ¼ãĥ«'] +['çµĤ', 'ãĤıãĤĬ'] +['ש', '×ŀר'] +['à¸Ĺีà¹Ī', 'สามารà¸ĸ'] +['ÙĦ', 'زÙħ'] +['д', 'аеÑĤÑģÑı'] +['รัà¸ļ', 'à¸Ľà¸£à¸°'] +['รัà¸ļà¸Ľà¸£à¸°', 'à¸Ĺาà¸Ļ'] +['å¤ī', 'ãĤıãĤĬ'] +['ï¼', '¢'] +['ĠìĺĪìĪĺ', 'ëĭĺ'] +['ãĤĪãģĨ', 'ãģ¨'] +['มัà¸ģ', 'à¸Īะ'] +['ĠH', 'ương'] +['ÙĨ', 'Ù쨰'] +['×ŀ×ĵ', '×ĵ'] +['ĠìĿ¸', 'ìłķ'] +['Ñħод', 'иÑĤÑĮ'] +['ĠзавиÑģ', 'иÑĤ'] +['×ķ×ĵ', '×Ļ×¢'] +['ãģĵãģ¨ãģĮ', 'ãģĤãĤĬãģ¾ãģĻ'] +['ع', 'راÙĤ'] +['سط', 'ØŃ'] +['à¸ģำ', 'à¹Ħร'] +['ëĵ¤', 'ëıĦ'] +['×Ļצ', '×Ļר×Ķ'] +['ãģĨ', 'ãģĵãģ¨'] +['ÙĦا', 'ØŃÙĤ'] +['ãģĦ', 'ãĤĮãģ°'] +['ĠиÑģполÑĮз', 'ÑĥÑİÑĤ'] +['ĠB', 'ợi'] +['Ġשק׾', '×Ļ×Ŀ'] +['ÑĨи', 'кл'] +['ÐIJ', 'Ðŀ'] +['Ġ×ijש', '׳×Ķ'] +['ÙĨØ´', 'Ø·'] +['Ġש', '×Ļ׳×ķ×Ļ'] +['Ġש×Ļ׳×ķ×Ļ', '×Ļ×Ŀ'] +['Ġpobl', 'ación'] +['ĠH', 'ưng'] +['ระ', 'ว'] +['ระว', 'ัà¸ĩ'] +['رÙĬاض', 'Ø©'] +['ر', 'صد'] +['تÙĤ', 'ÙĦÙĬ'] +['تÙĤÙĦÙĬ', 'د'] +['Ġülk', 'em'] +['Ġülkem', 'iz'] +['à¸Ĭ', 'ะ'] +['ãĤ¯ãĥª', 'ãĥ¼ãĥł'] +['èģŀ', 'ãģĦãģŁ'] +['Ġwa', 'ż'] +['Ġważ', 'ne'] +['ê±°', 'ëĵł'] +['ê±°ëĵł', 'ìļĶ'] +['×ŀ×IJ', '×ij×§'] +['×Ĺ×ĵ', 'ש×ķת'] +['ĠW', 'roc'] +['ĠWroc', 'ÅĤaw'] +['ĠKü', 'ltür'] +['s', 'ist'] +['sist', 'ência'] +['×¢×ĸר', '×Ķ'] +['Ġg', 'ương'] +['รà¹īาà¸Ļ', 'à¸Ħà¹īา'] +['ĠÙĪØ£', 'ÙĪØ¶ØŃ'] +['ánd', 'ose'] +['ãĤ·', 'ãĥ¼ãĥ³'] +['×IJ׳', 'ר×Ĵ'] +['×IJ׳ר×Ĵ', '×Ļ×Ķ'] +['ãģªãģĦ', 'ãģ§ãģĻ'] +['Ġkh', 'á»§ng'] +['Ġ문', 'ìĦľ'] +['Ġ×ij', '×ĵ×ijר'] +['×ĵ', '×Ļ×ķ'] +['×ĵ×Ļ×ķ', '×ķ×Ĺ'] +['Ġré', 'gl'] +['ÙħÙĪ', 'اد'] +['об', 'оÑĢ'] +['обоÑĢ', 'оÑĤ'] +['Ġ×Ķ', '×ij׾'] +['Ġ×Ķ×ij׾', '×ķ×Ĵ'] +['ØŃ', 'اÙħ'] +['ĠاÙĦع', 'اص'] +['ĠاÙĦعاص', 'ÙħØ©'] +['пеÑĢ', 'аÑĤоÑĢ'] +['ت', 'Ø®ÙĦ'] +['تخÙĦ', 'ص'] +['ãģŁãģł', 'ãģĹ'] +['ت', 'سÙħ'] +['à¹Ĥรà¸ĩ', 'à¸ŀ'] +['à¹Ĥรà¸ĩà¸ŀ', 'ยา'] +['à¹Ĥรà¸ĩà¸ŀยา', 'à¸ļาล'] +['ĠY', 'ük'] +['ĠYük', 'sek'] +['Ġש', '׳×Ļת'] +['Ġש׳×Ļת', 'ף'] +['liÄŁ', 'e'] +['Ġפ', 'ת'] +['Ġפת', '×ķ×Ĺ'] +['Ġbe', 'ÄŁ'] +['ĠbeÄŁ', 'en'] +['Ġ×ŀ', '×ķר'] +['Ġ×ŀ×ķר', '׼×ij'] +['Ġرس', 'اÙĦØ©'] +['íĨµ', 'ìĭł'] +['Ġaval', 'ia'] +['Ġavalia', 'ções'] +['Ġman', 'h'] +['Ġmanh', 'ã'] +['Ġìķ', 'ŀ'] +['Ġìķŀ', 'ìľ¼ë¡ľ'] +['ÙĤ', 'تر'] +['ÙĤتر', 'ØŃ'] +['à¹Ģà¸ģ', 'ืà¸Ń'] +['à¹Ģà¸ģืà¸Ń', 'à¸ļ'] +['Ġpropos', 'é'] +['Ø£', 'Ùħا'] +['Ø£Ùħا', 'ÙĥÙĨ'] +['ĠÐŀ', 'Ðŀ'] +['ĠÐŀÐŀ', 'Ðŀ'] +['ÙħÙĤ', 'ار'] +['ÙħÙĤار', 'ÙĨØ©'] +['ëĦ', 'IJ'] +['ãģĦãģŁãģł', 'ãģı'] +['ÙĤ', 'ÙĬÙĦ'] +['Ġна', 'ÑĪиÑħ'] +['ãĤ«', 'ãĥĥãĥĹ'] +['×Ĺ׾', 'ת'] +['Ġëĭ¤', 'ë§Į'] +['à¸Ĺัà¹Īว', 'à¹Ĥลà¸ģ'] +['ãĥį', 'ãĤ¿'] +['ØŃس', 'اس'] +['ãģ«ãģª', 'ãĤĮ'] +['ج', 'ائ'] +['جائ', 'زة'] +['é', 'change'] +['é', 'conom'] +['économ', 'ie'] +['Т', 'Ðĺ'] +['סת', '׼׾'] +['à¸Ĺัà¹īà¸ĩ', 'สà¸Ńà¸ĩ'] +['ĠاÙĦØ®', 'اÙħ'] +['ĠاÙĦخاÙħ', 'س'] +['×§', '×ĺ×¢'] +['au', 'waż'] +['à¸ľà¸¹à¹ī', 'à¸Ĭาย'] +['à¹ģà¸Ľà¸¥', 'à¸ģ'] +['åIJĮæĻĤ', 'ãģ«'] +['зн', 'аниÑı'] +['ãģĦãģŁãģł', 'ãģįãģ¾ãģĹãģŁ'] +['Ġ×ŀ×ij', '׾×Ļ'] +['à¸Ĥà¸Ń', 'à¹ĥหà¹ī'] +['ĠاÙĦت', 'ربÙĬØ©'] +['Ġdécou', 'vert'] +['Ġżyc', 'iu'] +['apr', 'ès'] +['Ġy', 'ab'] +['Ġyab', 'anc'] +['Ġyabanc', 'ı'] +['ĠbaÅŁ', 'layan'] +['ìĹĪ', 'ëįĺ'] +['Ġhes', 'abı'] +['Ġë§Į', 'ìķ½'] +['ë§', 'Īëĭ¤'] +['ĠTh', 'ánh'] +['ãĥ´', 'ãĤ¡'] +['à¸Ľà¸£à¸±à¸ļ', 'à¸Ľà¸£'] +['à¸Ľà¸£à¸±à¸ļà¸Ľà¸£', 'ุà¸ĩ'] +['ĠM', 'ặc'] +['à¹Ģหà¸ķุ', 'à¸ľà¸¥'] +['ĠÐij', 'ез'] +['Ġcapac', 'itÃł'] +['ÅĤe', 'ÅĽ'] +['ĠпÑĢе', 'им'] +['ĠпÑĢеим', 'ÑĥÑīеÑģÑĤв'] +['ĠÅļ', 'wiÄĻt'] +['Ġpubli', 'é'] +['×ŀ×¢', 'צ×ij'] +['Ùħشار', 'Ùĥات'] +['à¸łà¸²', 'ษ'] +['à¸łà¸²à¸©', 'ี'] +['Ġdeux', 'ième'] +['ĠÙħØŃ', 'اÙ쨏'] +['ĠÙħØŃاÙ쨏', 'Ø©'] +['ĠSch', 'ön'] +['ï½', '¤'] +['Ġ×Ķ', '×ij×¢'] +['Ġ×Ķ×ij×¢', '×Ļ×Ķ'] +['ĠÙĪØ§ÙĦ', 'ÙĦÙĩ'] +['è¨Ģ', 'ãģ£ãģŁ'] +['à¸ķ', 'à¹īาà¸Ļ'] +['วร', 'รà¸ĵ'] +['à¸Ĺิ', 'ศ'] +['ĠbaÅŁ', 'ına'] +['Ġmog', 'ÄĻ'] +['ש', '×Ļפ×ķר'] +['ĠÙĪ', 'عد'] +['ĠÙĪØ¹Ø¯', 'Ùħ'] +['Ġhistó', 'rico'] +['Ġk', 'ısı'] +['ĠìĿ´', 'ê²Į'] +['ĠPol', 'ÃŃtica'] +['ĠÑģиÑĤÑĥ', 'аÑĨии'] +['ĠkoÅĦ', 'ca'] +['×ij×ĵ', '×Ļ×§×Ķ'] +['ĠاÙĦسÙĬ', 'ارات'] +['ãģªãĤī', 'ãģ°'] +['ãĤµ', 'ãĥ©'] +['ãĤĭãģĵãģ¨ãģĮãģ§ãģį', 'ãĤĭ'] +['Ġdecis', 'ão'] +['×ķ', '×ķ×ĵ'] +['lä', 'ss'] +['läss', 'ig'] +['Ġ׾', '×Ļשר×IJ׾'] +['ĠÙĬ', 'أتÙĬ'] +['ר', '×ķ×ĸ'] +['ö', 'ÄŁ'] +['Ã¶ÄŁ', 'ret'] +['Ã¶ÄŁret', 'im'] +['Ġд', 'ек'] +['Ġдек', 'аб'] +['Ġдекаб', 'ÑĢÑı'] +['Ġש', '×Ĺ×ķר'] +['ãģ¦ãģıãĤĮ', 'ãģŁ'] +['عب', 'ارة'] +['Ġélect', 'rique'] +['ĠاÙĦتÙĨ', 'ÙħÙĬØ©'] +['جر', 'Ùī'] +['ĠìĪĺ', 'íĸī'] +['à¸Ĺ', 'ู'] +['ĠÑĢе', 'алÑĮно'] +['Ñģп', 'оÑģоб'] +['à¸Ħล', 'à¹īาย'] +['Ġس', 'عÙĪØ¯'] +['ön', 'ü'] +['ĠÙģ', 'ÙħÙĨ'] +['تÙĥ', 'ÙĪ'] +['تÙĥÙĪ', 'ÙĬÙĨ'] +['ĠкаÑĩ', 'еÑģÑĤво'] +['ĠконÑĤ', 'ак'] +['ĠконÑĤак', 'ÑĤ'] +['Ġsöz', 'leÅŁme'] +['à¸Ń', 'à¹īาà¸ĩ'] +['Ġت', 'ÙĪÙģ'] +['ĠتÙĪÙģ', 'ÙĬر'] +['×Ķ×ĸ', '×ĵ'] +['×Ķ×ĸ×ĵ', '×ŀ׳×ķת'] +['ĠØ·ÙĪÙĬÙĦ', 'Ø©'] +['Ġtér', 'mino'] +['Ġ×IJ', '×Ļפ×Ķ'] +['ãĥĵ', 'ãĥ«'] +['ส', 'à¹Ĥม'] +['สà¹Ĥม', 'สร'] +['ĠاÙĦ', 'اث'] +['ĠاÙĦاث', 'ÙĨÙĬÙĨ'] +['ев', 'иÑĩ'] +['Ġopin', 'ión'] +['à¸Ľ', 'วà¸Ķ'] +['åı¤', 'ãģĦ'] +['ร', 'à¹Īา'] +['ĠB', 'iaÅĤ'] +['ĠÑģÑĤ', 'ал'] +['ĠÑģÑĤал', 'о'] +['ó', 'logo'] +['ĠìķĦ', 'ëĭĪëĭ¤'] +['Ġ×IJ', '×Ļת'] +['Ġ×IJ×Ļת', '×ķ'] +['à¹Ģหà¹ĩà¸Ļ', 'วà¹Īา'] +['à¸ļ', 'ารà¹Į'] +['çĦ', '¼'] +['çĦ¼', 'ãģį'] +['ĠìĿ´ìļ©', 'ìŀIJ'] +['ĠнекоÑĤоÑĢ', 'Ñĭе'] +['ks', 'z'] +['ksz', 'taÅĤ'] +['ksztaÅĤ', 'c'] +['ãĤŃãĥ£', 'ãĥĥãĤ·'] +['ãĤŃãĥ£ãĥĥãĤ·', 'ãĥ³ãĤ°'] +['Ġro', 'ÅĽ'] +['ĠroÅĽ', 'lin'] +['ÑĢаж', 'а'] +['×ij׳×Ļ', '×Ļ×Ķ'] +['à¸Ľà¸£', 'สิ'] +['à¸Ľà¸£à¸ªà¸´', 'à¸ķ'] +['Ġgörd', 'ü'] +['×ŀ׳×Ķ', '×Ļ×Ĵ'] +['å¤īãĤı', 'ãģ£ãģ¦'] +['Ġ×IJ', '×Ķ'] +['Ġ×IJ×Ķ', '×ijת×Ļ'] +['à¹Ģร', 'à¹Īà¸ĩ'] +['Ġön', 'ünde'] +['Ġê·¸', 'ëĥ¥'] +['пол', 'иÑĤ'] +['полиÑĤ', 'иÑĩеÑģк'] +['ãĥ¡', 'ãĥĩãĤ£'] +['ãĥ¡ãĥĩãĤ£', 'ãĤ¢'] +['ĠDet', 'ay'] +['ĠDetay', 'lı'] +['ĠاÙĦصÙģ', 'ØŃØ©'] +['à¸ģาร', 'à¹Ģà¸ĩิà¸Ļ'] +['Ġìµľ', 'ê·¼'] +['׼', 'ש׾'] +['ï¼', '©'] +['вÑĪ', 'его'] +['íķĺ', 'ìĭ¤'] +['ĠÐŃ', 'ÑĤ'] +['ĠÐŃÑĤ', 'оÑĤ'] +['ส', 'ื'] +['สื', 'à¸ļ'] +['Ġng', 'ừng'] +['ĠдокÑĥменÑĤ', 'ов'] +['дав', 'аÑĤÑĮ'] +['ĠاÙĦشخص', 'ÙĬØ©'] +['Ġצ', '×¢×Ļר'] +['در', 'Ùĥ'] +['س', 'ØŃب'] +['à¹Ħมà¹Ī', 'à¸Ħà¹Īà¸Ńย'] +['Ġ×Ķ×ŀ×§', '×ķ×ŀ×Ļ'] +['สัà¹Īà¸ĩ', 'à¸ĭืà¹īà¸Ń'] +['Ġê·¸ê²ĥ', 'ìĿĦ'] +['ãģĤãĤĭ', 'ãģĦ'] +['ãģĤãĤĭãģĦ', 'ãģ¯'] +['×IJ×ķ×ĺ', '×ķ×ij'] +['×IJ×ķ×ĺ×ķ×ij', '×ķס'] +['к', 'ÑĨион'] +['ĠÐľ', 'ожно'] +['ãģı', 'ãģł'] +['ãģıãģł', 'ãģķ'] +['ĠинÑĦоÑĢм', 'аÑĨиÑı'] +['ï»', 'Ł'] +['Ġìŀij', 'ìĹħ'] +['Ġ×Ļ', '×ķסף'] +['Ø¥', 'دارة'] +['ĠاÙĦØŃ', 'اج'] +['×ł×¡', '×Ļ×¢×Ķ'] +['из', 'аÑĨиÑı'] +['×IJ׾', '×ij'] +['×IJ׾×ij', '×ķ×Ŀ'] +['п', 'ед'] +['Ġ×§×ĺ', '׳×Ķ'] +['ĠÙĨÙ쨳', 'Ùĩا'] +['ĠMinist', 'ério'] +['Ġп', 'ен'] +['Ġпен', 'Ñģи'] +['ãĥIJ', 'ãĥ©ãĥ³ãĤ¹'] +['Ġ×Ķת', '×ķר×Ķ'] +['Ġt', 'ạm'] +['ĠìĹŃ', 'ìĭľ'] +['ï½', '¡'] +['Ġth', 'á»±'] +['Ġ', 'ısı'] +['ì»', '¨'] +['ãģĹãģ£ãģĭãĤĬ', 'ãģ¨'] +['Ġx', 'ưa'] +['Ġc', 'ặp'] +['×Ĺ', '×Ļ×ij×ķר'] +['วัà¸Ĵà¸Ļ', 'à¸ĺรรม'] +['st', 'är'] +['stär', 'ke'] +['ĠÑģам', 'Ñĭй'] +['p', 'isa'] +['pisa', 'Äĩ'] +['ĠoluÅŁ', 'an'] +['ĠاÙĦØ¥', 'ÙħاÙħ'] +['ĠcÄĥ', 'ng'] +['Ġgü', 'nl'] +['Ġgünl', 'ük'] +['Ġ׳ש', '×IJר'] +['Ġkhi', 'á»ĥn'] +['ç¶ļ', 'ãģijãĤĭ'] +['stit', 'ución'] +['Ġcapac', 'ité'] +['Ġj', 'aki'] +['Ġjaki', 'ÅĽ'] +['вÑĪ', 'иÑģ'] +['вÑĪиÑģ', 'ÑĮ'] +['פע×ķ׾', '×ķת'] +['ĠØŃ', 'ÙĬات'] +['ĠØŃÙĬات', 'Ùĩ'] +['Ġник', 'огда'] +['ÐĽ', 'Ь'] +['Ġ×Ķ×¢', '×ķ×ij'] +['Ġ×Ķ×¢×ķ×ij', '×ĵ×Ķ'] +['Ġch', 'Ãło'] +['หลาย', 'à¹Ĩ'] +['ĠÑı', 'н'] +['ĠÑıн', 'ваÑĢ'] +['ĠÑıнваÑĢ', 'Ñı'] +['à¸Īำà¹Ģà¸Ľà¹ĩà¸Ļ', 'à¸ķà¹īà¸Ńà¸ĩ'] +['Ġhö', 'her'] +['ãģķãĤĮãģ¦', 'ãģĦãģŁ'] +['สà¸ĩ', 'สั'] +['สà¸ĩสั', 'ย'] +['ĠاÙĦ', 'اس'] +['ĠاÙĦاس', 'ÙĦاÙħ'] +['ĠاÙĦØ´', 'Ùħس'] +['สà¸ĸาà¸Ļ', 'ี'] +['ãĤ¯ãĥ©', 'ãĤ¹'] +['à¸ŀร', 'ร'] +['à¸ŀรร', 'à¸Ħ'] +['p', 'õ'] +['põ', 'e'] +['Ġpor', 'ém'] +['à¸Ľà¸£à¸°', 'สà¸ĩ'] +['à¸Ľà¸£à¸°à¸ªà¸ĩ', 'à¸Ħà¹Į'] +['powied', 'zie'] +['powiedzie', 'Äĩ'] +['Ġмог', 'Ñĥ'] +['Ġж', 'ел'] +['Ġжел', 'ез'] +['ĠاÙĦØ«', 'ÙĤ'] +['ĠاÙĦØ«ÙĤ', 'اÙģÙĬ'] +['ĠпÑĢав', 'ило'] +['Ġgdy', 'ż'] +['פש', '×ķ×ĺ'] +['ÑĢабоÑĤ', 'ка'] +['ĠÙĥ', 'رة'] +['Ø´', 'دد'] +['Ùħار', 'Ùĥ'] +['Ùħ', 'ÙĥØ©'] +['Ġпод', 'пиÑģ'] +['×ĺ×ķ', '×ķ×Ĺ'] +['ĠÅĽ', 'c'] +['ĠÅĽc', 'ian'] +['Ġر', 'جاÙĦ'] +['Ġ×ª×ľ', '×ķ×Ļ'] +['и', 'ÑĪ'] +['иÑĪ', 'ÑĮ'] +['Ġmé', 'dec'] +['Ġmédec', 'in'] +['ëįĶ', 'ëĿ¼ëıĦ'] +['ĠÑĤеб', 'Ñı'] +['Ġ׾×Ķ', '×ķס×Ļ×£'] +['ãģĬ', '話'] +['Ġà¹ģà¸ķà¹Ī', 'à¸ģà¹ĩ'] +['د', 'اÙģ'] +['داÙģ', 'ع'] +['ĠC', 'ùng'] +['ãĥ»ãĥ»', 'ãĥ»ãĥ»'] +['ê¶', 'ģ'] +['Ġdeber', 'ÃŃa'] +['หà¸Ļà¹Īวย', 'à¸ĩาà¸Ļ'] +['Ġva', 'ÌĢ'] +['Ġעצ', '×ŀ'] +['Ġעצ×ŀ', '×Ŀ'] +['à¹Ģà¸Ĭืà¹Īà¸Ń', 'วà¹Īา'] +['שק', '×¢'] +['Ġ×Ķ', '׼×ķ׾'] +['Ġ×Ķ׼×ķ׾', '׾'] +['ни', 'бÑĥд'] +['нибÑĥд', 'ÑĮ'] +['ĠëĦĪ', 'íĿ¬'] +['Ġоб', 'ÑĢаÑī'] +['ĠобÑĢаÑī', 'а'] +['Ġ×¢×ij×ķ×ĵ', 'ת'] +['ĠاÙĦÙħÙĨت', 'خب'] +['ıy', 'ord'] +['ıyord', 'u'] +['ÙĪ', 'ذ'] +['×Ĺש', '×Ļ×ij×ķת'] +['Ġ×Ķ×¢', '×Ļ×§'] +['Ġ×Ķ×¢×Ļ×§', 'ר×Ļ'] +['ì¢', 'Į'] +['ยุ', 'à¹Ĥร'] +['ยุà¹Ĥร', 'à¸Ľ'] +['Ġа', 'пÑĢ'] +['ĠапÑĢ', 'елÑı'] +['sz', 'ed'] +['szed', 'ÅĤ'] +['д', 'он'] +['à¹Ģà¸ķิ', 'à¸ļ'] +['à¹Ģà¸ķิà¸ļ', 'à¹Ĥà¸ķ'] +['кол', 'о'] +['Ġkażde', 'j'] +['å¸', '°'] +['帰', 'ãĤĬ'] +['Ġмил', 'ли'] +['Ġмилли', 'он'] +['ç¾İåij³', 'ãģĹãģĦ'] +['ت', 'ÙĤار'] +['تÙĤار', 'ÙĬر'] +['ĠìĿ´', '루'] +['ĠìĿ´ë£¨', 'ìĸ´'] +['Ġsprzeda', 'ż'] +['×Ķ', '×ķצ×IJ×ķת'] +['ãĤ¢ãĤ¯', 'ãĤ»'] +['ãĤ¢ãĤ¯ãĤ»', 'ãĤ¹'] +['ר', '×ķ×¥'] +['ĠгоÑģÑĥдаÑĢÑģÑĤв', 'енн'] +['Ø£', 'ØŃÙĥ'] +['Ø£ØŃÙĥ', 'اÙħ'] +['ĠoluÅŁ', 'u'] +['ĠA', 'ç'] +['ĠAç', 'ık'] +['ãĤ¸', 'ãĥ¼'] +['ç´ł', 'æĻ´'] +['ç´łæĻ´', 'ãĤīãģĹãģĦ'] +['Ġ×ijש×ij', '×ķ×¢'] +['ب', 'ذ'] +['بذ', 'ÙĦ'] +['สา', 'à¹Ģหà¸ķุ'] +['Ġpoz', 'osta'] +['Ġpozosta', 'ÅĤ'] +['ØŃر', 'Ùħ'] +['Ġimport', 'ância'] +['leÅŁtir', 'me'] +['Ġд', 'ÑĢев'] +['Ġmó', 'vil'] +['ĠA', 'ynı'] +['Ġна', 'лог'] +['Ġналог', 'ов'] +['Ġ×Ĺ', '×Ļפ×Ķ'] +['ĠÑĦоÑĢм', 'Ñĥ'] +['à¸Ĺà¸Ķ', 'สà¸Ńà¸ļ'] +['ĠksiÄħż', 'ki'] +['Ġma', 'ÅĤe'] +['Ùħس', 'Ø£ÙĦ'] +['ÙħسأÙĦ', 'Ø©'] +['ï¼¾', 'ï¼¾'] +['ç', 'ãeste'] +['év', 'iter'] +['Ġкон', 'ÑģÑĤÑĢÑĥк'] +['ĠконÑģÑĤÑĢÑĥк', 'ÑĨи'] +['ï¾', 'ŀ'] +['Ġת×ķ׼', '׳'] +['ãĤ¹ãĥĪ', 'ãĥ¬ãĤ¹'] +['ĠاÙĦاÙĤتصاد', 'ÙĬ'] +['×ŀ×ĵ', '×Ļ'] +['Ġw', 'ÅĤad'] +['ĠwÅĤad', 'z'] +['Ø®', 'ÙĪÙģ'] +['ĠмаÑĤеÑĢиал', 'ов'] +['ãģ¨ãģ£ãģ¦', 'ãĤĤ'] +['Ġznaj', 'du'] +['Ġznajdu', 'jÄħ'] +['Ùģ', 'ئة'] +['ãģ©ãģ®', 'ãĤĪãģĨãģª'] +['æĬij', 'ãģĪ'] +['׳', '×Ĺ׾'] +['Ġdü', 'ny'] +['Ġdüny', 'an'] +['Ġdünyan', 'ın'] +['гÑĢ', 'ани'] +['гÑĢани', 'Ñĩ'] +['Ġ×Ķש׾', '×Ļש×Ļ'] +['Ġ×Ķ×IJ', 'ש'] +['åıĬ', 'ãģ³'] +['ìĭŃ', 'ìĭľ'] +['ìĭŃìĭľ', 'ìĺ¤'] +['Ġдол', 'л'] +['Ġдолл', 'аÑĢ'] +['Ġпов', 'ÑĤоÑĢ'] +['Ġ×Ĺ', '×Ļ׳×Ŀ'] +['ת', 'פת×Ĺ'] +['Ñĥв', 'ели'] +['Ñĥвели', 'Ñĩен'] +['ãĤ«', 'ãĥª'] +['raw', 'id'] +['rawid', 'ÅĤow'] +['×ķ', '×ķ׾'] +['ãĥŁ', 'ãĥ¥'] +['ì½', 'ĺ'] +['ĠBy', 'ÅĤ'] +['Ðľ', 'ÐIJ'] +['ع', 'ÙIJ'] +['ĠÑģовеÑĢ', 'ÑĪ'] +['ĠÑģовеÑĢÑĪ', 'енно'] +['Ġм', 'ой'] +['Ġ×ķ׾×IJ', '×Ĺר'] +['æħ', '£'] +['æħ£', 'ãĤĮ'] +['ØŃ', 'اÙ쨏'] +['Ġ무', 'ë£Į'] +['à¸Ħà¸ĵะ', 'à¸ģรรม'] +['à¸Ħà¸ĵะà¸ģรรม', 'à¸ģาร'] +['Ġìĸ´', 'ëĶĶ'] +['Ġdif', 'eren'] +['Ġdiferen', 'ça'] +['ĠاÙĦØ£', 'ساس'] +['ĠاÙĦأساس', 'ÙĬØ©'] +['Ġ׾×IJ×Ĺר', '×ķ׳×Ķ'] +['ê·', 'ł'] +['Ġ×Ķש׳×Ļ', '×Ļ×Ķ'] +['ìľĦìĽIJ', 'ìŀ¥'] +['ลุ', 'à¸ģ'] +['ç', 'iler'] +['Ġ×Ķ×IJ', '׾×ķ'] +['èģŀ', 'ãģı'] +['Ġ×ķ×IJ', 'פ×Ļ׾×ķ'] +['ĠÑĢе', 'ализ'] +['ĠÑĢеализ', 'аÑĨи'] +['ระยะ', 'à¹Ģวลา'] +['Ġجدا', 'Ùĭ'] +['تب', 'اع'] +['Ġveh', 'ÃŃculo'] +['Ġдол', 'г'] +['à¸Ľà¸£à¸´', 'มาà¸ĵ'] +['ì¦', 'IJ'] +['Ġ׾', '×ŀ×§×ķ×Ŀ'] +['ĠìĤ¬', 'ì§Ħ'] +['à¸Ĭ', 'à¹īา'] +['Ġ×ŀ×¢', '×ķ׾×Ķ'] +['Ġgö', 'rm'] +['Ġgörm', 'ek'] +['ĠÙĪÙĩ', 'ذÙĩ'] +['пеÑĢ', 'в'] +['пеÑĢв', 'ÑĭÑħ'] +['ê·¸', 'ëŀĺ'] +['ĠاÙĦبر', 'ÙĬØ·'] +['ĠاÙĦبرÙĬØ·', 'اÙĨÙĬ'] +['ĠиÑİ', 'нÑı'] +['ĠÐĵ', 'оÑĢ'] +['Ġ׾', 'ש׾×Ŀ'] +['ÐIJ', 'ÐĿ'] +['Ġназ', 'наÑĩен'] +['о', 'оÑĢ'] +['ооÑĢ', 'Ñĥж'] +['Ġöz', 'elli'] +['Ġözelli', 'ÄŁi'] +['Ġни', 'же'] +['ç¶ļ', 'ãģijãģ¦'] +['Ġа', 'ÑĢенд'] +['Ġkat', 'ılı'] +['Ġkatılı', 'm'] +['ĠØ¥', 'Ø·ÙĦاÙĤ'] +['ĠÙĪØ¥', 'ذا'] +['Ġок', 'ÑĤÑı'] +['ĠокÑĤÑı', 'бÑĢÑı'] +['à¹Ĥà¸ķ', 'à¹'] +['à¹Ĥà¸ķà¹', 'Ĭ'] +['à¹Ĥà¸ķà¹Ĭ', 'ะ'] +['Ġolduk', 'ları'] +['Ùħ', 'ÙĪÙĤع'] +['ëĤ', '©'] +['ã썿ĢĿ', 'ãģ£ãģ¦ãģĦãĤĭ'] +['Ġש', '×Ļ׼×ķ׾'] +['วา', 'à¸Ķ'] +['س', 'ÙĬÙĦ'] +['à¸Ĥ', 'วั'] +['à¸Ĥวั', 'à¸į'] +['تØŃ', 'ÙĥÙħ'] +['ì', 'ĤŃ'] +['Ġconna', 'ît'] +['׳', 'פת×Ĺ'] +['Ġch', 'ặ'] +['Ġchặ', 'n'] +['ĠÙħ', 'ØŃÙħ'] +['ĠÙħØŃÙħ', 'ÙĪØ¯'] +['ãģ', '´'] +['ĠпÑĢодÑĥк', 'ÑĨии'] +['зд', 'ÑĢав'] +['ãģĶ', 'è¦'] +['ãģĶè¦', '§'] +['×IJ×ij', '×IJ'] +['Ġvé', 'ritable'] +['ĠØ·', 'ÙģÙĦ'] +['ãĥĪãĥ©', 'ãĥĸãĥ«'] +['ê³', '¡'] +['Ġת', '×ŀ×ķ׳×Ķ'] +['Ġki', 'ên'] +['ĠÙĤ', 'ادر'] +['Ø¥ÙĤ', 'ÙĦÙĬÙħ'] +['ĠпÑĢед', 'пÑĢи'] +['ĠпÑĢедпÑĢи', 'ÑıÑĤиÑı'] +['Ġb', 'Äĥng'] +['Ġay', 'ında'] +['Ġg', 'ấp'] +['еÑħ', 'ал'] +['Ġgi', 'Ãłnh'] +['Ġд', 'ав'] +['Ġдав', 'но'] +['ìĺĢ', 'ëĭ¤'] +['à¸Ļัà¸ģ', 'à¹Ģà¸ķ'] +['à¸Ļัà¸ģà¹Ģà¸ķ', 'ะ'] +['Ùħست', 'شار'] +['ست', 'راتÙĬج'] +['ستراتÙĬج', 'ÙĬ'] +['رÙħ', 'ز'] +['Ġt', 'Ä©nh'] +['ë¡', 'Ń'] +['ĠÑĩ', 'еÑĤ'] +['ĠÑĩеÑĤ', 'Ñĭ'] +['ĠÑĩеÑĤÑĭ', 'ÑĢе'] +['ĠEnt', 'ão'] +['Ġص', 'غ'] +['Ġصغ', 'ÙĬرة'] +['×ij×Ļ×ĺ', '×ķ׾'] +['خط', 'ÙĪØ·'] +['ĠÑĢазвиÑĤ', 'ие'] +['Ġamacı', 'yla'] +['à¸Ĺี', 'วี'] +['Ġо', 'ÑģÑĤ'] +['ĠоÑģÑĤ', 'алÑĮн'] +['ש×ķ׾×Ĺ', 'ף'] +['Ġ׼', '׳×Ļס'] +['Ġ׼׳×Ļס', '×Ķ'] +['Ġd', 'áºŃy'] +['ĠyaÅŁ', 'ayan'] +['Ġ×ŀ×Ķ', '×ķ×ķ×Ķ'] +['ĠÑĥ', 'Ñģи'] +['ĠÑĥÑģи', 'ли'] +['×ŀ', 'פ×Ļ'] +['ĠпÑĢовед', 'ениÑı'] +['Ġر', 'ب'] +['Ġرب', 'Ùħا'] +['ĠاÙĦØ£', 'ÙĪØ³Ø·'] +['Ġìľł', 'ì§Ģ'] +['Ġprac', 'ownik'] +['Ġpracownik', 'ów'] +['×ŀס', '×ķרת'] +['ÙĤار', 'ب'] +['à¸Ħวาม', 'รูà¹īสึà¸ģ'] +['à¹ģหล', 'ะ'] +['ĠاÙĦÙĨ', 'ÙĤد'] +['Ġ×IJ׾', 'פ×Ļ'] +['Ùħس', 'ئ'] +['Ùħسئ', 'ÙĪÙĦ'] +['ев', 'ÑĭÑħ'] +['клÑİÑĩ', 'ениÑı'] +['×ij', '×Ļ׳'] +['×ij×Ļ׳', '×Ļ×Ķ×Ŀ'] +['ש', '×ķ×IJ×Ķ'] +['ĠÅŁ', 'ark'] +['ĠÅŁark', 'ı'] +['Ġsü', 'rec'] +['Ġsürec', 'in'] +['à¹Ģà¸Ħร', 'à¸Ķ'] +['à¹Ģà¸Ħรà¸Ķ', 'ิà¸ķ'] +['ãĥIJ', 'ãĥ¬'] +['ĠØ´', 'Ø£ÙĨ'] +['à¹Ģà¸Ńา', 'à¹Ħวà¹ī'] +['niÄĻ', 'cie'] +['רצ', '×Ĺ'] +['ĠaÅŁ', 'ama'] +['׳', 'פ×Ĵ×¢'] +['Ġth', 'á»Ŀ'] +['Ġkhu', 'ẩn'] +['diÄŁ', 'inde'] +['ÑıÑī', 'иÑħ'] +['ãĥĺ', 'ãĥ«'] +['Ġüber', 'h'] +['Ġüberh', 'aupt'] +['ĠÑĤÑĢеб', 'ова'] +['ĠdÅĤ', 'ugi'] +['×ĺ', '×Ļף'] +['à¸Ĥà¸Ļาà¸Ķ', 'à¹ĥหà¸įà¹Ī'] +['ĠاÙĦØ£', 'Ùĩ'] +['ĠاÙĦØ£Ùĩ', 'ÙĦÙĬ'] +['ĠMü', 'd'] +['ĠMüd', 'ürü'] +['Ġ×Ļ×Ķ', '×ķ×ĵ×Ķ'] +['Ñĭв', 'аеÑĤÑģÑı'] +['س', 'اط'] +['×Ķת', '׳×Ķ×Ĵ'] +['×Ķ×ª×ł×Ķ×Ĵ', '×ķת'] +['à¸ģาร', 'à¸ľà¸¥à¸´à¸ķ'] +['íĴ', 'Ģ'] +['สà¸ĸาà¸Ļ', 'à¸ģารà¸ĵà¹Į'] +['Ġо', 'ÑĦ'] +['ĠоÑĦ', 'иÑģ'] +['ĠÙĦ', 'عبة'] +['Ġstron', 'ÄĻ'] +['Ġר×IJ', '×ķ×Ļ'] +['×Ĺ', '×ij׾'] +['ĠÑĢÑĭ', 'н'] +['ĠÑĢÑĭн', 'ке'] +['Ġ׾×ŀ×¢', 'ף'] +['اس', 'ÙĦ'] +['ห', 'ัà¸Ļ'] +['Ġ×IJ', '×Ĺ×Ļ'] +['ĠпÑĢод', 'ол'] +['ê°Ģ', 'ìŀħ'] +['Ġ×ijר', '×Ĺ'] +['Ġ×ijר×Ĺ', '×ij×Ļ'] +['дж', 'еÑĢ'] +['Ġ׾', '×Ĺ׾'] +['Ġ׾×Ĺ׾', '×ķ×ĺ'] +['Ġ׾×Ĺ׾×ķ×ĺ', '×Ļף'] +['ศาส', 'à¸Ļา'] +['ãĤ¢ãĤ¤', 'ãĥĨ'] +['ãĤ¢ãĤ¤ãĥĨ', 'ãĥł'] +['Ġפר', '×ķפ'] +['جز', 'اء'] +['ล', 'à¸Ńย'] +['Ġc', 'iaÅĤa'] +['Ġgi', 'ết'] +['ĠзнаÑĩ', 'иÑĤелÑĮно'] +['Ġolmad', 'ıģ'] +['Ġolmadıģ', 'ını'] +['н', 'д'] +['нд', 'екÑģ'] +['تأ', 'Ùĥد'] +['Ġìĸ', '¸'] +['Ġìĸ¸', 'ìłľ'] +['ay', 'dın'] +['ãĥī', 'ãĥ¬ãĤ¹'] +['Ġs', 'ắt'] +['Ġíĺ¸', 'íħĶ'] +['Ġë¶', 'ģ'] +['Ġë¶ģ', 'íķľ'] +['ãĥij', 'ãĤ¤'] +['Ġ×ŀש×Ĺ×§', '×Ļ'] +['à¸Ħà¸Ļ', 'à¸Ńืà¹Īà¸Ļ'] +['Ġиз', 'гоÑĤов'] +['ĠизгоÑĤов', 'лен'] +['à¹Ģà¸ģีย', 'ร'] +['à¹Ģà¸ģียร', 'à¸ķิ'] +['תק', 'שר'] +['ĠÑĢаÑģ', 'ÑĩеÑĤ'] +['ส', 'à¹Ģà¸ķ'] +['Ġl', 'änger'] +['ĠiÅŁ', 'let'] +['ĠiÅŁlet', 'me'] +['Ġع', 'ÙĦÙĬÙĨ'] +['ĠعÙĦÙĬÙĨ', 'ا'] +['é', 'lection'] +['ĠاÙĦغ', 'ربÙĬØ©'] +['íĭ', 'Ģ'] +['ãĤĤãĤī', 'ãģĪ'] +['Ġкни', 'ги'] +['Ø£', 'سÙħ'] +['أسÙħ', 'اء'] +['Ġth', 'á»ı'] +['Ġthá»ı', 'a'] +['หà¸Ļ', 'ู'] +['Ġ×ł×¢', 'ש×Ķ'] +['à¸łà¸²à¸¢', 'à¹ĥà¸ķà¹ī'] +['à¸ŀื', 'à¸Ĭ'] +['رÙĬ', 'Ø·'] +['Ùģ', 'ÙĪØ¶'] +['ãģĤãĤĬãģĮãģ¨ãģĨãģĶãģĸ', 'ãģĦãģ¾ãģĹãģŁ'] +['ש', '×ĵ×Ķ'] +['Ġng', 'á»±c'] +['ĠÑģеÑĢ', 'ÑĮ'] +['ĠÑģеÑĢÑĮ', 'езн'] +['T', 'ôi'] +['Ġfiyat', 'ları'] +['ĠвÑģ', 'Ñİ'] +['ĠC', 'ódigo'] +['Ġ×Ķש', '×IJ'] +['Ġ×Ķש×IJ', '׾×Ķ'] +['ĠP', 'ública'] +['Ø¥', 'Ø®'] +['إخ', 'ÙĪØ§ÙĨ'] +['ĠзаÑıв', 'ил'] +['ãĥ¦', 'ãĥ¼'] +['ר×IJ', '×Ļת'] +['vol', 'ución'] +['Ġsz', 'ko'] +['Ġszko', 'ÅĤy'] +['جرÙĬ', 'دة'] +['Ġpens', 'é'] +['ìī', '¬'] +['ĠBüyük', 'ÅŁehir'] +['ĠØ£Ùħ', 'رÙĬ'] +['ĠØ£ÙħرÙĬ', 'ÙĥÙĬ'] +['à¸Ļัà¸ģ', 'ศึà¸ģษา'] +['Ġtod', 'av'] +['Ġtodav', 'ÃŃa'] +['ĠС', 'ан'] +['ĠСан', 'кÑĤ'] +['íķĺ', 'ìŀIJ'] +['ØŃÙĪ', 'اÙĦ'] +['׼', '×ķשר'] +['à¹Ģลย', 'à¸Ħรัà¸ļ'] +['Ġal', 'gu'] +['Ġalgu', 'ém'] +['Ùģ', 'ز'] +['Ġçek', 'il'] +['Ġ×ĵ', 'ר׼×Ļ×Ŀ'] +['ãĥIJ', 'ãĥ©'] +['à¸ģà¹ĩ', 'สามารà¸ĸ'] +['สà¹Īวà¸Ļ', 'ลà¸Ķ'] +['íı', '°'] +['ĠP', 'úb'] +['ĠPúb', 'lico'] +['à¹ģà¸Ļว', 'à¸Ĺาà¸ĩ'] +['×IJת', '×Ĵר'] +['Ø´', 'اش'] +['شاش', 'Ø©'] +['ci', 'ÅĽni'] +['ĠÃľ', 'rün'] +['ÙĦÙĪ', 'ØŃ'] +['ĠاÙĦ', 'بÙĨ'] +['ĠاÙĦبÙĨ', 'Ùĥ'] +['ì¡°', 'ì¹ĺ'] +['Ġorganiz', 'ación'] +['ãģĤãĤĬãģĮãģ¨ãģĨãģĶãģĸ', 'ãģĦãģ¾ãģĻ'] +['s', 'ätze'] +['ĠÑģем', 'ей'] +['ÙĤ', 'صد'] +['ÑģÑĤв', 'еннÑĭе'] +['Ġpréc', 'éd'] +['Ġprécéd', 'ent'] +['à¸ģรุà¸ĩà¹Ģà¸Ĺà¸ŀ', 'ฯ'] +['ãģ¨è¨Ģ', 'ãģĦ'] +['×ij׳×Ļ', '×Ļף'] +['ĠØŃ', 'ÙĪ'] +['ĠØŃÙĪ', 'اÙĦÙĬ'] +['סק', 'ס'] +['ĠsaÄŁlam', 'ak'] +['Ġ׾', 'צ×Ļ×Ļף'] +['×§×ĵ', 'ש'] +['Ġ×Ķ×ŀ', '×¢×¨×Ľ×ª'] +['Ġ׾×Ķ', '×¢×ij×Ļר'] +['Ġg', 'ünd'] +['Ġgünd', 'em'] +['ĠнаÑĪ', 'его'] +['à¹ĥà¸Ļ', 'à¸ŀืà¹īà¸Ļà¸Ĺีà¹Ī'] +['à¹Ģà¸Ħร', 'ืà¸Ń'] +['à¹Ģà¸Ħรืà¸Ń', 'à¸Ĥ'] +['à¹Ģà¸Ħรืà¸Ńà¸Ĥ', 'à¹Īาย'] +['ظ', 'اÙĩرة'] +['ÙħÙĨ', 'ظÙħ'] +['ÙħÙĨظÙħ', 'ات'] +['Ùħت', 'از'] +['追', 'ãģĦ'] +['dı', 'kt'] +['dıkt', 'an'] +['ĠëįĶ', 'ìļ±'] +['ĠÐĿ', 'апÑĢимеÑĢ'] +['tw', 'ór'] +['×ŀ×ķ×¢', 'צ×Ķ'] +['Ùĥ', 'ÙĪÙĥ'] +['Ð', '©'] +['×ŀ×ĺ', 'פ׾'] +['ó', 'lica'] +['訪', 'ãĤĮ'] +['ĠëĮĢ', 'ë¶Ģ'] +['ĠëĮĢë¶Ģ', 'ë¶Ħ'] +['ãĤ¯ãĥª', 'ãĥĥãĤ¯'] +['ãĤĴ', 'éģ¸'] +['ãĤĴéģ¸', 'ãģ¶'] +['Ġpow', 'sta'] +['Ġpowsta', 'ÅĤ'] +['Ġraz', 'ón'] +['×ij', '×ķ×Ĺר'] +['ĠÑģообÑī', 'ил'] +['Ġ×§', '×ij×ķ×¢'] +['r', 'êt'] +['à¸Ķี', 'à¸Ĥึà¹īà¸Ļ'] +['×ŀס', '×¢×ĵ'] +['×ŀסע×ĵ', '×ķת'] +['ĠÃĸ', 'sterreich'] +['Ġ׳', '×Ĺש×ij'] +['Ùħباد', 'رة'] +['ì´', 'ī'] +['×Ĵ', '׳×ĺ×Ļ'] +['ä¿¡', 'ãģĺ'] +['du', 'ÄŁ'] +['duÄŁ', 'unu'] +['Ġph', 'ú'] +['ĠاÙĦØ£', 'Ø®ÙĬر'] +['Ġت', 'عتبر'] +['landır', 'ıl'] +['ãģ¨ãģ¯', 'ãģĦ'] +['ãģ¨ãģ¯ãģĦ', 'ãģĪ'] +['ĠاÙĦ', 'Ø·ÙĦ'] +['ĠاÙĦØ·ÙĦ', 'اب'] +['ĠN', 'º'] +['éģ¿', 'ãģij'] +['اÙĦ', 'Ùħع'] +['اÙĦÙħع', 'رÙĪÙģ'] +['ส', 'à¸łà¸²'] +['éĽ¢', 'ãĤĮ'] +['ĠпомоÑī', 'ÑĮ'] +['Ġзна', 'еÑĤ'] +['ãĥĹãĥ¬', 'ãĤ¼'] +['ãĥĹãĥ¬ãĤ¼', 'ãĥ³ãĥĪ'] +['Ġsup', 'érieur'] +['Ġש׾', '×Ļש×Ļ'] +['ĠاÙĦÙĨ', 'ÙĪØ¹'] +['ãĤĵãģ§ãģĻ', 'ãģŃ'] +['à¸Ńà¸ļ', 'รม'] +['Ġgi', 'á»įng'] +['Ġwzgl', 'ÄĻd'] +['ĠاÙĦÙģ', 'ÙĤر'] +['è', 'rent'] +['Ġ×ŀ×IJ', '×Ĺ'] +['Ġ×ŀ×IJ×Ĺ', '×ķר×Ļ'] +['×Ĵ', '×Ĵ'] +['×Ļ', '×Ļ×ij'] +['ÙħÙĦ', 'اب'] +['ÙħÙĦاب', 'س'] +['Ġhük', 'ü'] +['Ġhükü', 'met'] +['Ġ×ŀ×Ĵ', '×Ļ×ij'] +['ĠÐŀ', 'Ñĩ'] +['ĠÐŀÑĩ', 'енÑĮ'] +['æĹ©', 'ãģĦ'] +['Ġconstr', 'ucción'] +['Ġth', 'ượng'] +['ï¼', 'ĭ'] +['Ġcor', 'ação'] +['à¹Ģหล', 'à¹ĩà¸ģ'] +['ĠBaÅŁ', 'b'] +['ĠBaÅŁb', 'akan'] +['éĢ£', 'ãĤĮ'] +['ãģĻãĤĭ', 'ãģĵãģ¨ãģĮãģ§ãģįãģ¾ãģĻ'] +['ĠÙĤ', 'اÙħت'] +['Ġا', 'Ùĥثر'] +['ÙģØ§Ø¹', 'ÙĦ'] +['ĠÑĦ', 'оÑĢ'] +['ĠÑĦоÑĢ', 'Ñĥм'] +['غ', 'ذÙĬ'] +['ĠiÅŁ', 'le'] +['ĠiÅŁle', 'ml'] +['ĠiÅŁleml', 'eri'] +['ĠìĤ¬ëŀĮ', 'ìĿĢ'] +['Ġìŀij', 'ìĦ±'] +['Ġë§Ī', '볨'] +['Ùħ', 'جÙĦس'] +['หม', 'ู'] +['д', 'в'] +['дв', 'иг'] +['двиг', 'а'] +['à¹Ģสีย', 'à¸Ĭีวิà¸ķ'] +['×Ķת', 'פת×Ĺ'] +['×Ķתפת×Ĺ', '×ķת'] +['ĠмеÑĤ', 'ÑĢо'] +['ĠÑģ', 'енÑĤ'] +['ĠÑģенÑĤ', 'Ñı'] +['ĠÑģенÑĤÑı', 'бÑĢÑı'] +['ê³', '§'] +['Ġ׾', 'פע'] +['Ġ×ľ×¤×¢', '×ŀ×Ļ×Ŀ'] +['à¹Ģà¸ļ', 'ีย'] +['詳', 'ãģĹãģı'] +['çķ°', 'ãģªãĤĭ'] +['Ġİl', 'çe'] +['ĠAt', 'at'] +['ĠAtat', 'ür'] +['ĠAtatür', 'k'] +['รุ', 'à¹Īà¸ĩ'] +['Ġkald', 'ı'] +['Ġ주', 'ìŀ¥'] +['Ġprés', 'ence'] +['Ġн', 'аб'] +['Ġнаб', 'лÑİ'] +['ĠнаблÑİ', 'да'] +['ĠÑģам', 'ого'] +['×Ĵ', '×ķש'] +['×ŀ×ĺ', '×ķפ'] +['×ŀ×ĺ×ķפ', '׾'] +['ĠвÑĭб', 'иÑĢа'] +['ĠìŀIJ', '리'] +['åĪĨ', 'ãģĭãĤīãģªãģĦ'] +['Ġз', 'Ñĥб'] +['Ġש׼', '×ijר'] +['Ġد', 'ائ'] +['Ġدائ', 'Ùħا'] +['ĠпаÑĢ', 'ÑĤи'] +['ï¼', '²'] +['ĠاÙĬ', 'ضا'] +['ĠÑħ', 'оз'] +['ĠÑħоз', 'Ñı'] +['ĠÑħозÑı', 'й'] +['ĠÑħозÑıй', 'ÑģÑĤв'] +['ĠاÙĦØ£', 'ج'] +['ĠاÙĦأج', 'ÙĨب'] +['ĠاÙĦأجÙĨب', 'ÙĬØ©'] +['ĠÐĹ', 'на'] +['ĠAp', 'ós'] +['ĠÑį', 'неÑĢ'] +['ĠÑįнеÑĢ', 'ги'] +['Ġy', 'ans'] +['Ġyans', 'ı'] +['ĠJust', 'i'] +['ĠJusti', 'ça'] +['Ġpré', 'vu'] +['ม', 'วล'] +['ìŀ¥', 'ëĭĺ'] +['à¸ģระ', 'à¸ļ'] +['à¸ģระà¸ļ', 'วà¸Ļ'] +['à¸ģระà¸ļวà¸Ļ', 'à¸ģาร'] +['×ŀ', '×ŀ'] +['×ŀ×ŀ', '×ķצע'] +['Ġh', 'ẹ'] +['Ġhẹ', 'n'] +['зд', 'ание'] +['Ġak', 'ÅŁ'] +['ĠakÅŁ', 'am'] +['×ĺ', '×ķפ'] +['Ġgere', 'kt'] +['Ġgerekt', 'i'] +['Ġgerekti', 'ÄŁini'] +['Ġnar', 'z'] +['Ġnarz', 'ÄĻdzi'] +['é', 'po'] +['épo', 'que'] +['ĠTh', 'ần'] +['Ġwys', 'oko'] +['Ġwysoko', 'ÅĽci'] +['à¸ľà¸¹à¹ī', 'à¸Ľ'] +['à¸ľà¸¹à¹īà¸Ľ', 'à¹Īวย'] +['ĠÙĬ', 'بدÙĪ'] +['ÑĤелÑĮ', 'ного'] +['Ġвз', 'глÑıд'] +['Ġjed', 'nÄħ'] +['ĠìĿĺ', '견'] +['Ġ', 'à¸Ĥà¸ĵะà¸Ĺีà¹Ī'] +['פ', '×Ļ×ĵ'] +['ìĥģ', 'ëĭ´'] +['Ġm', 'ỡ'] +['×Ķ', '×ŀ׾'] +['×Ķ×ŀ׾', 'צ×ķת'] +['ĠÑģоÑģÑĤ', 'о'] +['ĠÑģоÑģÑĤо', 'иÑĤ'] +['Ġав', 'и'] +['Ġави', 'а'] +['ĠL', 'änder'] +['تص', 'ÙĪÙĬر'] +['×ŀ×ĵ', '×Ļ×Ķ'] +['ìłĪ', 'ì°¨'] +['ãģ¨', 'ãĤĬ'] +['ãģ¨ãĤĬ', 'ãģĤ'] +['ãģ¨ãĤĬãģĤ', 'ãģĪ'] +['ãģ¨ãĤĬãģĤãģĪ', 'ãģļ'] +['ĠÑĢ', 'Ñıд'] +['ĠÑĢÑıд', 'ом'] +['ĠNh', 'ất'] +['ĠاÙĦÙĥ', 'اÙħÙĦ'] +['×Ĺ׾', '׾'] +['ĠGi', 'ấy'] +['צ', '×ĺר'] +['צ×ĺר', '×£'] +['Ġ׾×ij', '×ĺ׾'] +['Ġим', 'еÑĤÑĮ'] +['ס×ŀ', '×ķ×ļ'] +['Ġparticip', 'ação'] +['íķľëĭ¤', 'ë©´'] +['ÙħÙĨت', 'دÙĬ'] +['ÙħÙĨتدÙĬ', 'ات'] +['ĠeÄŁ', 'len'] +['g', 'änge'] +['رب', 'ØŃ'] +['ãĤ®', 'ãĥ£'] +['ĠاÙĦر', 'ÙĤÙħ'] +['à¸ĭ', 'à¹īำ'] +['ĠH', 'óa'] +['×ŀר', '×Ĺ×§'] +['ØŃÙħ', 'اÙħ'] +['بÙĪ', 'Ùĥ'] +['ĠArt', 'ÃŃculo'] +['ãĥĦ', 'ãĤ¢ãĥ¼'] +['×Ķפ', '׼×Ķ'] +['×Ĺ׾', '×ķף'] +['ĠпеÑĢе', 'Ñħод'] +['len', 'miÅŁ'] +['زر', 'اعة'] +['Ġseñ', 'or'] +['ãģ£ãģ¦', 'ãģįãģ¦'] +['Ø¥', 'Ø´'] +['إش', 'ارة'] +['Ġpod', 'ÃŃa'] +['ĠÃľ', 'lke'] +['н', 'ÑģкаÑı'] +['Ġadapt', 'é'] +['Ġdüzen', 'len'] +['Ġdüzenlen', 'en'] +['ĠÑģÑĤ', 'ала'] +['ĠÙĬ', 'ØŃتاج'] +['Ġn', 'ier'] +['Ġnier', 'uch'] +['Ġnieruch', 'omo'] +['Ġnieruchomo', 'ÅĽci'] +['ãģĵãģ¨ãģĮ', 'ãģĤãĤĭ'] +['ยà¸Ńà¸Ķ', 'à¹Ģยีà¹Īยม'] +['ĠÙħ', 'ج'] +['ĠÙħج', 'اÙĨÙĬ'] +['Ġз', 'аб'] +['Ġзаб', 'ол'] +['Ġзабол', 'ев'] +['Ġзаболев', 'аниÑı'] +['ĠÅĽ', 'ro'] +['ĠÅĽro', 'dk'] +['ĠÅĽrodk', 'ów'] +['Ġ×Ķ', '׾×IJ×ķ×ŀ×Ļ'] +['Ġdok', 'ÅĤad'] +['ĠdokÅĤad', 'nie'] +['ãģŁãģı', 'ãģªãģĦ'] +['ãģ¯ãģļ', 'ãģ§ãģĻ'] +['ã썿ĢĿ', 'ãģ£ãģ¦ãģĦãģŁ'] +['é', 'cran'] +['ìĹħ', 'ì²´'] +['trzym', 'aÅĤ'] +['ÑģÑĤв', 'еннÑĭй'] +['ĠNot', 'ÃŃc'] +['ĠNotÃŃc', 'ias'] +['Ùħ', 'رÙĬ'] +['ÙħرÙĬ', 'ض'] +['æ°Ĺ', 'è»'] +['æ°Ĺè»', '½'] +['æ°Ĺ軽', 'ãģ«'] +['ëĵ', '£'] +['Ġ×ĵ', '×ķ×IJר'] +['Ġ׾', '×ŀ׳'] +['Ġ׾×ŀ׳', '×ķ×¢'] +['ĠçalÄ±ÅŁ', 'ıyor'] +['ĠÅŁ', 'idd'] +['ĠÅŁidd', 'et'] +['ĠM', 'ặt'] +['Ġate', 'ÅŁ'] +['ĠполÑĥÑĩ', 'ениÑı'] +['à¹Ģà¸Ħรืà¹Īà¸Ńà¸ĩ', 'มืà¸Ń'] +['Ġgrö', 'ÃŁer'] +['د', 'ائ'] +['دائ', 'رة'] +['Ġbul', 'un'] +['Ġbulun', 'maktadır'] +['à¹Ģห', 'ร'] +['à¹Ģหร', 'ีย'] +['à¹Ģหรีย', 'à¸į'] +['à¸Ļัà¸ģ', 'à¸Ĺà¹Īà¸Ńà¸ĩà¹Ģà¸Ĺีà¹Īยว'] +['Ġalan', 'ında'] +['ĠÑĥ', 'зна'] +['Ġл', 'еÑĩение'] +['売', 'ãĤĮ'] +['Ġçev', 'ir'] +['Ġdeste', 'ÄŁi'] +['ĠheiÃŁ', 't'] +['âĸ', '²'] +['ØŃ', 'Ø·'] +['à¸Ħำ', 'à¸ķà¸Ńà¸ļ'] +['ãĤªãĥ³', 'ãĥ©ãĤ¤ãĥ³'] +['Ġ×ij×Ĺ×Ļ', '×Ļ×Ŀ'] +['ãĥ¦', 'ãĥĭ'] +['Ġdüzenle', 'me'] +['Ġmodal', 'itÃł'] +['سر', 'Ø·'] +['سرط', 'اÙĨ'] +['×ŀ׼', '×ķף'] +['ĠданнÑĭ', 'й'] +['تر', 'ت'] +['ترت', 'ÙĬب'] +['à¸ļาà¸ĩ', 'à¸Ħà¸Ļ'] +['ĠÄIJ', 'á»ĭnh'] +['ม', 'ูล'] +['มูล', 'à¸Ħà¹Īา'] +['ÙĨ', 'ÙĤص'] +['à¸ģาร', 'รัà¸ģษา'] +['ĠÑĦ', 'он'] +['ĠÑĦон', 'д'] +['ãĤĪãģĨ', 'ãģ«ãģªãģ£ãģŁ'] +['Ùħع', 'اÙĦ'] +['ÙħعاÙĦ', 'جة'] +['ĠOs', 'man'] +['ĠOsman', 'lı'] +['иÑĩеÑģк', 'ом'] +['à¸Ńยาà¸ģ', 'à¸Īะ'] +['ãģķãģ¾', 'ãģĸ'] +['ãģķãģ¾ãģĸ', 'ãģ¾'] +['ãģķãģ¾ãģĸãģ¾', 'ãģª'] +['Ġת', '×ķ׼׾'] +['×¢', 'צ×ij'] +['ĠاÙĦع', 'سÙĥ'] +['ĠاÙĦعسÙĥ', 'رÙĬ'] +['Ġvé', 'hic'] +['Ġvéhic', 'ule'] +['Ġ×Ļצ', '×Ĺ×§'] +['ĠاÙĦÙĪ', 'ØŃ'] +['ĠاÙĦÙĪØŃ', 'ÙĬد'] +['ĠاÙĦع', 'دÙĪ'] +['ĠQu', 'ản'] +['Ġê³µ', 'ëıĻ'] +['بد', 'ÙĦ'] +['ĠÄij', 'ảng'] +['Ġm', 'á»ĩnh'] +['Ġnie', 'zb'] +['Ġniezb', 'ÄĻ'] +['ĠniezbÄĻ', 'dn'] +['Ġyayın', 'lan'] +['обÑī', 'и'] +['Ġgö', 'tür'] +['צ', 'פ'] +['צפ', '×ķ×Ļ'] +['ĠÙĦÙĬ', 'بÙĬ'] +['ĠÙĦÙĬبÙĬ', 'ا'] +['ØŃ', 'ÙĪØ§'] +['Ġд', 'об'] +['Ġдоб', 'ÑĢо'] +['иÑĢÑĥ', 'ем'] +['ĠاÙĦØŃÙĥÙĪÙħ', 'ÙĬØ©'] +['m', 'Ã¤ÃŁig'] +['Ġed', 'ición'] +['влек', 'аÑĤелÑĮ'] +['влекаÑĤелÑĮ', 'н'] +['Ġת', 'ש׾×ķ×Ŀ'] +['Ġ×Ķש', '×ķ׳×Ļ×Ŀ'] +['มิ', 'à¸ĸุ'] +['มิà¸ĸุ', 'à¸Ļ'] +['มิà¸ĸุà¸Ļ', 'ายà¸Ļ'] +['é£Łãģ¹', 'ãģ¦'] +['ĠìĪĺ', 'ì§ij'] +['ס', '×ij×Ļ'] +['ĠиÑİ', 'лÑı'] +['Ġà¹Ħà¸Ķà¹ī', 'à¹ģà¸ģà¹Ī'] +['׾×Ĺ', '×Ŀ'] +['tr', 'ä'] +['trä', 'gt'] +['ãģĿãĤĤ', 'ãģĿãĤĤ'] +['ÐĿ', 'Ðķ'] +['Ġв', 'нÑĥÑĤ'] +['ĠвнÑĥÑĤ', 'ÑĢи'] +['ãģ¨', 'ä¸Ģç·Ĵãģ«'] +['ãĤ«', 'ãĥķãĤ§'] +['Ġ×ij×Ĺ', '×ĵר'] +['×Ĺ', '×ŀש'] +['ãĤ¨', 'ãĥį'] +['ãĤ¨ãĥį', 'ãĥ«'] +['ãĤ¨ãĥįãĥ«', 'ãĤ®'] +['ãĤ¨ãĥįãĥ«ãĤ®', 'ãĥ¼'] +['à¸Ĥà¸Ńà¸ĩ', 'à¸ķัวà¹Ģà¸Ńà¸ĩ'] +['بÙĤ', 'اء'] +['פס', '×Ļ׼'] +['פס×Ļ׼', '×ķ׾×ķ×Ĵ'] +['ãĥ¡', 'ãĥĥ'] +['ãĥ¡ãĥĥ', 'ãĤ»'] +['ãĥ¡ãĥĥãĤ»', 'ãĥ¼ãĤ¸'] +['ÙĦ', 'ÙĤب'] +['A', 'Äŀ'] +['שק', '×Ļ×¢'] +['ÙĤ', 'ساÙħ'] +['×ĵ×ķ×Ĵ', '×ŀ×Ķ'] +['æ·±', 'ãģĦ'] +['íĸĪ', 'ëĬĶëį°'] +['ĠrozwiÄħz', 'anie'] +['à¸Ļัà¹Īà¸Ļ', 'à¹Ģà¸Ńà¸ĩ'] +['×Ļצ', '×ij'] +['Ġtr', 'ông'] +['à¹ĥà¸Ĭà¹ī', 'à¸ļริà¸ģาร'] +['ĠاÙĦÙħÙĪ', 'سÙħ'] +['ĠдеÑĤ', 'и'] +['ãģĹãģĭ', 'ãģªãģĦ'] +['ס', '×Ļף'] +['Ġréfé', 'rence'] +['à¹ģห', 'à¹īà¸ĩ'] +['ãĤĤãĤī', 'ãģ£ãģŁ'] +['Ġ׾', 'ר׼'] +['Ġ׾ר׼', '×ķש'] +['شع', 'ÙĪØ±'] +['ĠÐij', 'ог'] +['Ġlaz', 'ım'] +['Ġ×Ļש', '׳×Ŀ'] +['Ġп', 'аÑĢÑĤ'] +['ĠпаÑĢÑĤ', 'неÑĢ'] +['ĠÑĥ', 'ника'] +['ĠÑĥника', 'лÑĮн'] +['Ġmaté', 'riel'] +['×ŀר', '×§'] +['Ġph', 'ưá»Ŀng'] +['Ġз', 'ай'] +['Ġзай', 'м'] +['Ùģ', 'ÙĤد'] +['Univers', 'itÃł'] +['×¢', 'ר׼×Ļ×Ŀ'] +['Ġba', 'ño'] +['Ġн', 'оÑı'] +['ĠноÑı', 'бÑĢÑı'] +['à¸Ľ', 'à¹īาย'] +['Ġt', 'ats'] +['Ġtats', 'äch'] +['Ġtatsäch', 'lich'] +['ĠÑĤÑĢ', 'еÑĤÑĮ'] +['Ñį', 'м'] +['ãĥĻ', 'ãĥ¼ãĤ¹'] +['Ġnh', 'á»±a'] +['ìĬ¤', 'íģ¬'] +['ĠعبداÙĦ', 'ÙĦÙĩ'] +['Ġת', '×ķר×Ķ'] +['أش', 'ÙĬ'] +['أشÙĬ', 'اء'] +['ĠÙĦÙĦ', 'غا'] +['ĠÙĦÙĦغا', 'ÙĬØ©'] +['Ùħ', 'ÙĪØ§ÙĤ'] +['ÙħÙĪØ§ÙĤ', 'Ùģ'] +['ĠgÅĤówn', 'a'] +['Ġart', 'Ä±ÅŁ'] +['Ġ×ŀ×§', '×ķ×ŀ×Ļ'] +['ãĤ¯ãĥ©', 'ãĥĸ'] +['Ġس', 'ÙĪÙī'] +['ĠìŬ', 'ìĦ±'] +['اس', 'ر'] +['اسر', 'ائÙĬÙĦ'] +['Ġ׳', '×Ľ×ª×ij'] +['ย', 'à¹īà¸Ńà¸Ļ'] +['Ġdeber', 'á'] +['Ġph', 'ẫu'] +['ÑİÑī', 'ем'] +['ĠÙĦدÙĬ', 'ÙĨا'] +['×ŀ×ĺ', '×Ķ'] +['Ġ׳', '×ķ׾×ĵ'] +['ĠвÑģÑĤÑĢ', 'еÑĩа'] +['ãĤīãĤĮ', 'ãģ¦ãģĦãģ¾ãģĻ'] +['ĠcaÅĤ', 'ej'] +['ย', 'ึ'] +['ยึ', 'à¸Ķ'] +['поÑĤ', 'ен'] +['поÑĤен', 'ÑĨи'] +['Ġл', 'иÑĤ'] +['ĠлиÑĤ', 'еÑĢ'] +['ĠлиÑĤеÑĢ', 'аÑĤÑĥÑĢ'] +['Ġкажд', 'ом'] +['ĠíĮ', 'IJ'] +['ĠíĮIJ', 'ëĭ¨'] +['à¸Ī', 'ู'] +['Ġpres', 'ença'] +['ãģªãĤĵ', 'ãģ§'] +['Ùħ', 'ÙĬاÙĩ'] +['ин', 'ÑĦоÑĢм'] +['инÑĦоÑĢм', 'аÑĨион'] +['инÑĦоÑĢмаÑĨион', 'н'] +['ĠìŀIJ', 'ìŰ'] +['ר׼', 'ש'] +['Ġöd', 'ül'] +['ç¶ļ', 'ãģı'] +['Ġп', 'Ñģ'] +['ĠпÑģ', 'иÑħ'] +['ĠпÑģиÑħ', 'олог'] +['ت', 'ذÙĥر'] +['Ġìŀħ', 'ìŀ¥'] +['ล', 'à¸Ķà¹Į'] +['ìĦł', 'ê±°'] +['ãģ£ãģ¦', 'ãģĬãĤĬãģ¾ãģĻ'] +['Ġ×Ļ', '×¢'] +['Ġ×Ļ×¢', '×§×ij'] +['ĠاÙĦØ·', 'عاÙħ'] +['ãĥĨ', 'ãĤ¹ãĥĪ'] +['ĠTu', 'ấn'] +['Ġparticip', 'ación'] +['×ŀ×ķ×ŀ', '×Ĺ×Ķ'] +['×Ĵר', 'ס×Ķ'] +['ĠاÙĦتÙĨ', 'ÙģÙĬ'] +['ĠاÙĦتÙĨÙģÙĬ', 'ذÙĬ'] +['ĠбезопаÑģ', 'н'] +['ge', 'f'] +['gef', 'ähr'] +['Ø´', 'ÙĪØ±'] +['Ġmy', 'ÅĽli'] +['ÙĪØ§', 'Ø´ÙĨ'] +['ÙĪØ§Ø´ÙĨ', 'Ø·ÙĨ'] +['׳×ķס', '×¢'] +['Ùĥ', 'Ùĩ'] +['ÙĥÙĩ', 'رب'] +['ÙĥÙĩرب', 'اء'] +['Ġmus', 'iaÅĤ'] +['ìĭ', '¸'] +['ãĥĸãĥ©', 'ãĥĥãĤ¯'] +['Ġcré', 'é'] +['ÙĨÙĩ', 'ار'] +['owo', 'ÅĽÄĩ'] +['ÙħØŃا', 'ÙĥÙħ'] +['ĠwÅĤa', 'ÅĽ'] +['ĠwÅĤaÅĽ', 'c'] +['ĠwÅĤaÅĽc', 'iciel'] +['ĠÙĬ', 'ؤ'] +['ĠÙĬؤ', 'دÙĬ'] +['×ŀ×¢', '×ķ׳'] +['×IJ', '×ij׾'] +['خط', 'Ø£'] +['ĠÑħ', 'олод'] +['×ĸ', '×ķ׾'] +['ãģĵãĤĮ', 'ãĤī'] +['ãģĵãĤĮãĤī', 'ãģ®'] +['Ġbás', 'ica'] +['ฤ', 'à¸Ķ'] +['ฤà¸Ķ', 'ูà¸ģ'] +['ฤà¸Ķูà¸ģ', 'า'] +['ฤà¸Ķูà¸ģา', 'ล'] +['èIJ½ãģ¡', 'çĿĢ'] +['ãģªãģĦ', 'ãģĵãģ¨'] +['ص', 'ÙĪÙħ'] +['ÙĨج', 'ØŃ'] +['׳ק', '×ķ×ĵ'] +['׳ק×ķ×ĵ', 'ת'] +['кл', 'аÑģÑģ'] +['íķĺìĭľ', 'ëĬĶ'] +['ëĦ', 'ĺ'] +['Ġש×IJ', '×Ļ׳×ķ'] +['ĠС', 'ейÑĩаÑģ'] +['may', 'acaģı'] +['Ġyap', 'ılır'] +['Ġcategor', 'ÃŃa'] +['عب', 'اد'] +['ĠТ', 'еп'] +['ĠТеп', 'еÑĢÑĮ'] +['×Ķ×Ļס×ĺ', '×ķר×Ļ'] +['h', 'ế'] +['ãĤ³', 'ãĥ¼ãĥī'] +['Ġcabe', 'ça'] +['ج', 'Ùħا'] +['جÙħا', 'Ùĩ'] +['جÙħاÙĩ', 'ÙĬر'] +['ä½İ', 'ãģĦ'] +['ĠÑĤоваÑĢ', 'ов'] +['à¸Ĭาว', 'à¸ļà¹īาà¸Ļ'] +['ĠÑģÑĤан', 'ов'] +['ĠÑģÑĤанов', 'иÑĤÑģÑı'] +['ĠавÑĤом', 'обилÑĮ'] +['ĠÑģлÑĥÑĩ', 'ай'] +['à¸Ńั', 'à¸ŀ'] +['ĠG', 'iriÅŁ'] +['ĠìĿ¼', 'ëĭ¨'] +['ĠпÑĢ', 'оÑģ'] +['ĠпÑĢоÑģ', 'моÑĤÑĢ'] +['ãģªãģıãģª', 'ãģ£ãģŁ'] +['มี', 'à¸Ľà¸±à¸įหา'] +['ïº', 'İ'] +['éc', 'oute'] +['ĠÙħ', 'ÙĪØ¬ÙĪØ¯'] +['Ġس', 'رÙĬع'] +['ĠÙĪÙĩ', 'ÙĨا'] +['ĠÙĪÙĩÙĨا', 'Ùĥ'] +['à¸Ħุà¸ĵ', 'สม'] +['à¸Ħุà¸ĵสม', 'à¸ļัà¸ķิ'] +['Ġìļ°', 'ìĦł'] +['à¸ŀระ', 'à¸ŀุà¸Ĺà¸ĺ'] +['好', 'ãģ¿'] +['ظ', 'ÙĦÙħ'] +['Ġм', 'акÑģ'] +['ĠмакÑģ', 'ималÑĮ'] +['ĠмакÑģималÑĮ', 'но'] +['ãĥª', 'ãĤ¢ãĥ«'] +['à¹ģมà¹ī', 'วà¹Īา'] +['ĠاÙĦØŃ', 'ÙĪØ§Ø±'] +['ãĥĹãĥ©', 'ãĤ¹'] +['Ġع', 'ÙĦاÙĤØ©'] +['Ġíĸī', 'ëıĻ'] +['Ġgönder', 'il'] +['Ġl', 'ãi'] +['ĠsaÄŁ', 'lıkl'] +['ĠsaÄŁlıkl', 'ı'] +['ĠÑĪ', 'аг'] +['Ġ×ij×IJר', '×Ķ'] +['prowadzi', 'Äĩ'] +['ãģĦãģı', 'ãģ¤ãģĭ'] +['Ġبت', 'ارÙĬØ®'] +['Ġ×ij×IJ×ķת', '×Ķ'] +['Ġmó', 'c'] +['ĠÐľ', 'не'] +['ãĥĹãĥ¬', 'ãĥ¼'] +['×IJ', '×ĸר×Ĺ'] +['åł´åIJĪ', 'ãģ«ãģ¯'] +['使', 'ãģĪ'] +['à¹Ģร', 'ืà¸Ńà¸Ļ'] +['ĠÐŁ', 'еÑĤ'] +['ĠÐŁÐµÑĤ', 'ÑĢ'] +['ãģ«åħ¥', 'ãĤĭ'] +['Ùħ', 'ادة'] +['à¹Ģà¸ĩ', 'ืà¹Īà¸Ńà¸Ļ'] +['à¹Ģà¸ĩืà¹Īà¸Ńà¸Ļ', 'à¹Ħà¸Ĥ'] +['ĠÑģоÑģÑĤоÑı', 'ние'] +['ôn', 'ica'] +['ĠÑĦ', 'ев'] +['ĠÑĦев', 'ÑĢа'] +['ĠÑĦевÑĢа', 'лÑı'] +['Ġ×ķ', '×ĸ'] +['Ġ×ķ×ĸ', '×IJת'] +['à¸Ħร', 'ิ'] +['à¸Ħริ', 'ส'] +['ĠÐķ', 'Ñīе'] +['ãģ£ãģ¦ãģĹãģ¾', 'ãģĦãģ¾ãģĹãģŁ'] +['ĠпÑĢав', 'иÑĤелÑĮ'] +['ĠпÑĢавиÑĤелÑĮ', 'ÑģÑĤв'] +['Ġtä', 'glich'] +['Ġëĭ¹', 'ìĭľ'] +['×ŀ×ķ×¢', '×ŀ×ĵ'] +['Ġдв', 'оÑĢ'] +['æī', 'ķ'] +['æīķ', 'ãģĦ'] +['ĠÑģÑĤан', 'еÑĤ'] +['Ġвозд', 'ейÑģÑĤв'] +['ĠвоздейÑģÑĤв', 'и'] +['Ġf', 'ête'] +['à¹Ģส', 'า'] +['תק', '×ķ×ķ×Ķ'] +['Ġu', 'yar'] +['Ġuyar', 'ı'] +['à¸ģลัà¸ļ', 'à¹Ħà¸Ľ'] +['Ġgi', 'ưá»Ŀng'] +['Ġв', 'а'] +['Ġва', 'ÑĪи'] +['ĠÄij', 'áºŃu'] +['ĠSpa', 'ÃŁ'] +['ĠìķĦ', 'ë§Ī'] +['à¹Ħà¸Ķà¹ī', 'à¸ĩà¹Īาย'] +['Ġ×Ķ×ŀ', '×ijקש'] +['æĸ°', 'ãģŁ'] +['æĸ°ãģŁ', 'ãģª'] +['ılı', 'yor'] +['пл', 'ан'] +['Ġ×Ķ×ijר', '×Ļ×IJ×ķת'] +['ĠaÄŁ', 'rı'] +['Ġsay', 'gı'] +['建', 'ãģ¦'] +['Ġnaj', 'wyż'] +['Ġnajwyż', 'sz'] +['سÙĬاس', 'ات'] +['ãģĬ', 'å¾Ĺ'] +['ĠاÙĦع', 'ÙĦÙĬ'] +['ĠاÙĦعÙĦÙĬ', 'ا'] +['Ġcoraz', 'ón'] +['ì¹ĺ', 'ë£Į'] +['หัว', 'à¸Ĥà¹īà¸Ń'] +['Ġب', 'ØŃÙĬ'] +['ĠبØŃÙĬ', 'Ø«'] +['зв', 'езд'] +['بÙĪ', 'ابة'] +['ÐĽ', 'Ðĺ'] +['ÙĦا', 'زÙħ'] +['Ġroz', 'p'] +['Ġrozp', 'oc'] +['Ġrozpoc', 'zÄĻ'] +['触', 'ãĤĮ'] +['ĠاÙĦج', 'ÙħÙĩ'] +['ĠاÙĦجÙħÙĩ', 'ÙĪØ±'] +['Ġsp', 'ÄĻd'] +['ĠspÄĻd', 'z'] +['วิà¸Ĺยา', 'ศาสà¸ķรà¹Į'] +['ив', 'аеÑĤÑģÑı'] +['Ġдан', 'ной'] +['Ġreprés', 'ente'] +['ĠÄij', 'á»ĭch'] +['Ġ×¢×ŀ', '×ķ×§'] +['à¸Ńัà¸Ļ', 'à¸ķร'] +['à¸Ńัà¸Ļà¸ķร', 'าย'] +['Ġestr', 'atég'] +['Ġestratég', 'ia'] +['pad', 'ÅĤ'] +['Ġв', 'полн'] +['Ġвполн', 'е'] +['ĠпÑĢедоÑģÑĤав', 'лен'] +['×Ĺ׾', '×ķ×§'] +['×Ĺ׾×ķ×§', 'ת'] +['ãĤ¢', 'ãĥĬ'] +['ĠاÙĦغ', 'ذ'] +['ĠاÙĦغذ', 'ائÙĬ'] +['ĠÑĥ', 'зн'] +['ĠÑĥзн', 'аÑĤÑĮ'] +['à¸ĭ', 'à¹īาย'] +['å½ĵ', 'ãģ¦'] +['ØŃÙĬ', 'اء'] +['Ġbás', 'ico'] +['×§×ķ×ij', '×¢'] +['ĠاÙĦÙħ', 'باراة'] +['ĠاÙĦÙĩ', 'اتÙģ'] +['Ġ׼', '׳×Ĵ×ĵ'] +['à¸Ľà¸£à¸°', 'หย'] +['à¸Ľà¸£à¸°à¸«à¸¢', 'ัà¸Ķ'] +['Ðļ', 'ак'] +['à¸Ĺีà¹Ī', 'à¸Ļà¹Īา'] +['à¸Ĺีà¹Īà¸Ļà¹Īา', 'สà¸Ļà¹ĥà¸Ī'] +['ãģ¾', 'ãģģ'] +['ï½', '¢'] +['Ñģк', 'оп'] +['Ġson', 'rasında'] +['Ġur', 'zÄħd'] +['ĠurzÄħd', 'zenia'] +['׼×ķ', '×ķ׳'] +['׼×ķ×ķ׳', 'ת'] +['Ġ׾×Ķת', '×ŀ×ķ×ĵ'] +['Ġ׾×Ķת×ŀ×ķ×ĵ', '×ĵ'] +['ĠÑģ', 'ли'] +['ĠÑģли', 'ÑĪ'] +['ĠÑģлиÑĪ', 'ком'] +['ĠÑģÑĤ', 'Ñĥд'] +['ĠÑģÑĤÑĥд', 'енÑĤ'] +['Ġ×Ķ', '×ķ×ĵ'] +['Ġ×Ķ×ķ×ĵ', '×¢×Ķ'] +['ë¹Ħ', 'ìļ©'] +['à¸Ńยาà¸ģ', 'à¹ĥหà¹ī'] +['Ġb', 'á»ģ'] +['ยุ', 'à¸Ĺà¸ĺ'] +['Ðĺ', 'ÐĿ'] +['س', 'ائر'] +['Ø£', 'صÙĪÙĦ'] +['ĠاÙĦغ', 'رÙģ'] +['ãģĵãģ¨ãĤĤ', 'ãģĤãĤĬãģ¾ãģĻ'] +['è¾¼', 'ãģ¾ãĤĮ'] +['ĠاÙĦساب', 'ع'] +['Ġc', 'á»§'] +['ãģĦãģŁãģł', 'ãģĦãģŁ'] +['ì§', 'ĵ'] +['ìĤ¬', '무'] +['powied', 'ź'] +['تÙģ', 'Ùĥ'] +['تÙģÙĥ', 'ÙĬر'] +['иÑĢов', 'ки'] +['ĠíĨµ', 'íķ´ìĦľ'] +['ãĤ¨', 'ãĤ¹ãĥĨ'] +['ĠдеÑıÑĤелÑĮ', 'ноÑģÑĤÑĮ'] +['ĠданнÑĭ', 'м'] +['Ġ×¢', '×ķר'] +['Ġ×¢×ķר', '׼×Ļ'] +['×ķ×ĵ', 'עת'] +['Ġhayat', 'ını'] +['Ġb', 'Äħd'] +['ĠbÄħd', 'ź'] +['obs', 'ÅĤug'] +['à¹Ģà¸ŀียà¸ĩ', 'à¹ģà¸Ħà¹Ī'] +['à¸ĭ', 'à¹Īา'] +['è²ł', 'ãģij'] +['ĠÑģÑĤÑĢ', 'ем'] +['ĠÄij', 'á»īnh'] +['ĠÐł', 'ÑĥÑģ'] +['ĠN', 'ữ'] +['Ġ׾×Ķש', '×Ļ×Ĵ'] +['Ġjed', 'noc'] +['Ġjednoc', 'ze'] +['Ġjednocze', 'ÅĽnie'] +['Ġ×Ķ×Ĵ', '×ij×ķ×Ķ'] +['أخ', 'ÙĦاÙĤ'] +['ĠнаÑģ', 'ел'] +['ĠнаÑģел', 'ениÑı'] +['ĠÙĬ', 'ÙĨب'] +['ĠÙĬÙĨب', 'غÙĬ'] +['ãģĮ', 'ãģĭ'] +['ãģĮãģĭ', 'ãģĭ'] +['×Ĵ', 'עת'] +['Ðŀ', 'Ðł'] +['ĠналиÑĩ', 'ии'] +['Ġë§Ī', 'ì§Ģ'] +['Ġë§Īì§Ģ', 'ë§ī'] +['Ġíĸī', 'ìĤ¬'] +['Ġtre', 'ÅĽci'] +['Ġê°Ģ', 'ì¹ĺ'] +['ì¦', 'ĺ'] +['Ġана', 'лог'] +['×Ķצע', 'ת'] +['в', 'лад'] +['влад', 'е'] +['ĠÑģдел', 'ал'] +['Ġ׳', '×Ĵ×Ļש'] +['Ġ׳×Ĵ×Ļש', '×ķת'] +['полн', 'ение'] +['à¸Ĩ', 'à¹Īา'] +['ĠD', 'ön'] +['׼׾׼', '׾×Ķ'] +['×ŀ×ĸ', '×Ĵ'] +['Ùħ', 'Ùģ'] +['ÙħÙģ', 'Ùĩ'] +['ÙħÙģÙĩ', 'ÙĪÙħ'] +['×Ķ', '×ĵ'] +['×Ķ×ĵ', 'פס'] +['×Ķ×ĵפס', '×Ķ'] +['ãģĻãģİ', 'ãģ¦'] +['Ġг', 'ÑĢ'] +['ĠгÑĢ', 'н'] +['×ŀ×ĺ', '×ķס'] +['Ġ기', 'ìĸµ'] +['ï¾', 'Ł'] +['ĠpÅĤ', 'yn'] +['ĠGr', 'ünde'] +['ĠBü', 'cher'] +['Ġwed', 'ÅĤug'] +['ãģ¾ãģł', 'ãģ¾ãģł'] +['Ġ׳×Ķ', '×ĵר'] +['ĠÙĬست', 'Ø·ÙĬع'] +['ĠHi', 'á»ĩp'] +['ãĤŃãĥ£ãĥ³', 'ãĥļ'] +['ãĤŃãĥ£ãĥ³ãĥļ', 'ãĥ¼ãĥ³'] +['Ġth', 'á»ķ'] +['Ġeuropé', 'enne'] +['à¸ļ', 'ัà¸ĩ'] +['à¸ļัà¸ĩ', 'à¸Ħัà¸ļ'] +['ĠszczegóÅĤ', 'owo'] +['׳', 'שק'] +['ãĥķ', 'ãĥ©ãĥ³ãĤ¹'] +['×ŀ×ķ×ŀ', '×Ĺ×Ļ'] +['Ġcom', 'ún'] +['Ġç', 'arp'] +['ØŃت', 'ÙĬا'] +['ØŃتÙĬا', 'ج'] +['ØŃتÙĬاج', 'ات'] +['ëĭ´', 'ëĭ¹'] +['ä½ķ', '度'] +['ä½ķ度', 'ãĤĤ'] +['×ĵ', '×ij×§'] +['ãģį', 'ãĤĮ'] +['ãģįãĤĮ', 'ãģĦ'] +['Ġк', 'ам'] +['Ġкам', 'еÑĢ'] +['ĠespecÃŃf', 'ico'] +['Ġtel', 'éfono'] +['à¸ķัà¹īà¸ĩ', 'à¸Ńยูà¹Ī'] +['I', 'Åŀ'] +['ãģ©', 'ãĤĵãģ©'] +['ãģ©ãĤĵãģ©', 'ãĤĵ'] +['עצ', '×ŀ×IJ×Ļ'] +['à¸Ķัà¸ĩ', 'à¸Ļีà¹ī'] +['ĠÑĦоÑĢм', 'иÑĢов'] +['ĠÑĦоÑĢмиÑĢов', 'а'] +['×ķ×ŀ', '×ij'] +['Ġkullan', 'ımı'] +['Ðľ', 'Ðŀ'] +['×¢', 'ש×Ļ'] +['עש×Ļ', '×Ļ×Ķ'] +['Ġön', 'lem'] +['à¹Ģà¸Ń', 'à¹ĩ'] +['à¹Ģà¸Ńà¹ĩ', 'ม'] +['×ŀשק', '×Ļ×¢'] +['ר', '×Ļ×Ĺ'] +['à¸Ĥ', 'ัà¸Ķ'] +['ĠíĻ', 'ľ'] +['ĠíĻľ', 'ìļ©'] +['à¸ĭ', 'ะ'] +['ãĤĪãģĨ', 'ãģ«ãģªãĤĬãģ¾ãģĹãģŁ'] +['ĠÑĢаÑģ', 'пÑĢ'] +['ĠÑĢаÑģпÑĢ', 'оÑģÑĤ'] +['ĠÑĢаÑģпÑĢоÑģÑĤ', 'ÑĢан'] +['ĠÑĢаÑģпÑĢоÑģÑĤÑĢан', 'ен'] +['׼×Ļ', '×ķף'] +['ÙĤب', 'ض'] +['تص', 'رÙĬØŃ'] +['تصرÙĬØŃ', 'ات'] +['Ġо', 'ÑĢи'] +['ĠоÑĢи', 'г'] +['ĠоÑĢиг', 'ина'] +['ĠоÑĢигина', 'л'] +['ĠاÙĦع', 'اÙĦÙĬ'] +['à¹ģหà¹Īà¸ĩ', 'à¸Ļีà¹ī'] +['ãĥķãĤ¡', 'ãĥ¼'] +['ãģ¦ãģĦ', 'ãģį'] +['ãģ¦ãģĦãģį', 'ãģŁãģĦ'] +['פ', 'תר'] +['פתר', '×ķ׳×ķת'] +['Ġ×ij', '×Ļ×Ĺ'] +['Ġ×ij×Ļ×Ĺ', '×ĵ'] +['Ġod', 'by'] +['Ġodby', 'ÅĤ'] +['ĠоÑĩеÑĢ', 'ед'] +['Ġtr', 'ương'] +['ãĤŃ', 'ãĥ³'] +['×ŀ', '×ķפ'] +['×ŀ×ķפ', '×¢'] +['ëĵľ', '립'] +['ëĵľë¦½', 'ëĭĪëĭ¤'] +['à¸ŀืà¹īà¸Ļ', 'à¸IJาà¸Ļ'] +['ìŀIJ', '격'] +['ĠVi', 'á»ĩn'] +['ĠDes', 'pués'] +['Ġ×IJ׾', '×Ļ׳×ķ'] +['Ġdur', 'ée'] +['íĩ', '´'] +['Ġmü', 'zik'] +['i', 'ếu'] +['ĠÑĢаз', 'меÑīен'] +['Ġк', 'Ñĥд'] +['ĠкÑĥд', 'а'] +['غ', 'ض'] +['غض', 'ب'] +['ĠTamb', 'ém'] +['à¸Īัà¸Ķ', 'สà¹Īà¸ĩ'] +['à¸ģาร', 'à¹ģสà¸Ķà¸ĩ'] +['onom', 'ÃŃa'] +['Ġан', 'г'] +['Ġанг', 'ли'] +['Ġангли', 'й'] +['Ġанглий', 'Ñģк'] +['Ġzn', 'al'] +['Ġznal', 'az'] +['Ġznalaz', 'ÅĤ'] +['תר', '×Ĵ'] +['תר×Ĵ', '×ķ×Ŀ'] +['ĠÑģ', 'нов'] +['ĠÑģнов', 'а'] +['ĠÑĩаÑģ', 'а'] +['Ġcommun', 'auté'] +['ĠespecÃŃf', 'ica'] +['ĠL', 'á»ĭch'] +['Ġli', 'é'] +['Ùģ', 'جر'] +['à¹Ģà¸ģ', 'à¹Īà¸ĩ'] +['ع', 'اÙĦ'] +['عاÙĦ', 'ج'] +['Ø£ÙĨ', 'ظ'] +['Ø£ÙĨظ', 'ÙħØ©'] +['ES', 'İ'] +['ĠاÙĦØŃ', 'دÙĬد'] +['à¸ŀระ', 'à¸Ńà¸ĩà¸Ħà¹Į'] +['Ġפר', 'שת'] +['Ġдв', 'иж'] +['Ġдвиж', 'ениÑı'] +['ĠاÙĦج', 'ارÙĬ'] +['à¸ĺาà¸Ļ', 'ี'] +['неÑģ', 'ен'] +['ĠاÙĦÙĨ', 'ÙĩائÙĬ'] +['Ġб', 'еÑĢ'] +['ĠбеÑĢ', 'ем'] +['ĠбеÑĢем', 'енн'] +['Ġdépart', 'ement'] +['à¹Ģà¸Ĺ', 'ีย'] +['à¹Ģà¸Ĺีย', 'à¸ļ'] +['ĠÐľ', 'аÑĢи'] +['ĠнекоÑĤоÑĢ', 'ÑĭÑħ'] +['об', 'еÑģп'] +['обеÑģп', 'еÑĩен'] +['×Ĺ', '×ķ×ĸ'] +['×Ĺ×ķ×ĸ', '×Ķ'] +['ÙĨت', 'ج'] +['à¸Īะ', 'à¹Ħà¸Ķà¹īรัà¸ļ'] +['á»', '°'] +['Ġél', 'éments'] +['ع', 'Ø·'] +['عط', 'اء'] +['Ġt', 'ắt'] +['i', 'á»ĩm'] +['ÑİÑīиÑħ', 'ÑģÑı'] +['ãģĹãģ', '°'] +['ãģĹãģ°', 'ãĤīãģı'] +['Ġпом', 'ожеÑĤ'] +['à¸Ĥà¸ĵะ', 'à¸Ļีà¹ī'] +['Ġ×¢', 'שר×ķת'] +['éģķ', 'ãģ£ãģ¦'] +['ĠпÑĢ', 'ог'] +['ĠпÑĢог', 'н'] +['ĠпÑĢогн', 'оз'] +['Ġt', 'ÅĤ'] +['ĠtÅĤ', 'um'] +['ĠtÅĤum', 'acz'] +['T', 'ür'] +['Tür', 'kiye'] +['ãģį', 'ãģ£'] +['ãģįãģ£', 'ãģĭãģij'] +['Ġ×Ķ׳', '×ķ׼'] +['Ġ×Ķ׳×ķ׼', '×Ĺ×Ļ'] +['ĠìĥĿ', 'ìĤ°'] +['ĠÑĦоÑĢм', 'Ñĭ'] +['ç¾İ', 'ãģĹãģĦ'] +['à¸Ľà¸£', 'ึà¸ģ'] +['à¸Ľà¸£à¸¶à¸ģ', 'ษา'] +['Ġlum', 'ière'] +['ãĤª', 'ãĥ¼ãĥĹ'] +['ãĤªãĥ¼ãĥĹ', 'ãĥ³'] +['à¸Ľ', 'ืà¸Ļ'] +['วั', 'สà¸Ķ'] +['วัสà¸Ķ', 'ุ'] +['еÑĢÑĤ', 'в'] +['ÙĥÙĦ', 'Ùģ'] +['ï½', '£'] +['à¸ĺรรม', 'à¸Ķา'] +['׳', '×ĺר'] +['ĠпÑĢедÑģÑĤав', 'лÑıеÑĤ'] +['Ġanál', 'isis'] +['Ġb', 'ãi'] +['با', 'ÙĤÙĬ'] +['à¸Ľà¸£à¸°', 'à¹Ģà¸Ķ'] +['à¸Ľà¸£à¸°à¹Ģà¸Ķ', 'à¹ĩà¸Ļ'] +['ĠÑģлÑĥÑĩ', 'аÑı'] +['ĠÑģлÑĥÑĩаÑı', 'Ñħ'] +['ÐĽ', 'ÐIJ'] +['สัà¸ĩ', 'à¹Ģà¸ģ'] +['สัà¸ĩà¹Ģà¸ģ', 'à¸ķ'] +['Ġprz', 'ec'] +['Ġprzec', 'ież'] +['Ùħ', 'صÙĦ'] +['ÙħصÙĦ', 'ØŃØ©'] +['ש×ķ×§', '×ķ׾×ĵ'] +['ĠобоÑĢÑĥд', 'ованиÑı'] +['Ġtr', 'waÅĤ'] +['رÙĪ', 'Ùħ'] +['ìķĪ', 'ëĤ´'] +['ĠNgh', 'á»ĭ'] +['Ø®', 'Ø´'] +['à¸ļา', 'à¸Ħาร'] +['à¸ļาà¸Ħาร', 'à¹Īา'] +['Ġоп', 'ÑĨион'] +['ĠÑģозд', 'аниÑı'] +['ãĤ³', 'ãĤ¹ãĥĪ'] +['Ġ×Ķ×¢', '׾×Ļ'] +['Ġ×Ķ×¢×ľ×Ļ', '×ķף'] +['lä', 'uft'] +['ãĥĻ', 'ãĤ¹ãĥĪ'] +['Ġr', 'ê'] +['Ġrê', 've'] +['×IJ', '×ij×Ļ×ij'] +['×Ļ', '×Ļ×ļ'] +['ë¶', 'Ļ'] +['ãĤ¤ãĥ³', 'ãĥī'] +['ÅĤo', 'ży'] +['ÅĤoży', 'Äĩ'] +['ع', 'ائÙĦ'] +['عائÙĦ', 'Ø©'] +['Ø£', 'ÙĪØ±'] +['Ø£ÙĪØ±', 'اÙĤ'] +['à¸Ĺà¹īà¸Ńà¸ĩ', 'à¸ĸ'] +['à¸Ĺà¹īà¸Ńà¸ĩà¸ĸ', 'ิà¹Īà¸Ļ'] +['Ġä', 'hn'] +['Ġähn', 'lich'] +['ãĥŁ', 'ãĥĭ'] +['à¸ľ', 'ู'] +['à¸ľà¸¹', 'à¹īà¸Ļ'] +['à¸ľà¸¹à¹īà¸Ļ', 'ำ'] +['ĠмаÑĤеÑĢиал', 'Ñĭ'] +['Ġкап', 'иÑĤ'] +['ĠкапиÑĤ', 'ал'] +['ï¼', '¦'] +['Ġseç', 'il'] +['Ġh', 'ứng'] +['Ġintéress', 'ant'] +['ãģ£ãģ¦', 'ãģĦãģı'] +['Ġe', 'ÄŁer'] +['ëIJĺ', 'ìĹĪìĬµëĭĪëĭ¤'] +['Ġan', 'laÅŁma'] +['ãģĶ', 'åĪ©ç͍'] +['Ġ×ij', '×ĸ׼'] +['Ġ×ij×ĸ׼', '×ķת'] +['ëĿ¼', 'ë©´'] +['ĠÙĬ', 'ÙĪØ³'] +['ĠÙĬÙĪØ³', 'Ùģ'] +['أسÙĦ', 'ØŃØ©'] +['ĠGef', 'ühl'] +['ĠноÑĢм', 'алÑĮн'] +['ãĥĻ', 'ãĥ³'] +['ãģķãĤĮ', 'ãĤĭãģĵãģ¨'] +['ĠÐij', 'еÑģ'] +['ãģ¨ãģĦ', 'ãģĪãģ°'] +['ĠÙħ', 'ÙĩÙħ'] +['ĠÙħÙĩÙħ', 'Ø©'] +['ãģ§ãģĹãĤĩãģĨ', 'ãģŃ'] +['ĠêµŃ', 'ëĤ´'] +['à¹Ģม', 'à¹ĩà¸Ķ'] +['×ŀ×ij', 'קר'] +['ĠاÙĦد', 'ÙĨÙĬ'] +['ĠاÙĦدÙĨÙĬ', 'ا'] +['à¸Ĭ', 'ู'] +['к', 'ÑĢÑĥÑĤ'] +['Ġtho', 'áng'] +['Ġ׳', '×ĵר'] +['Ġ׳×ĵר', 'ש'] +['ĠÑĢаÑģÑģ', 'казал'] +['ĠAu', 'ÃŁerdem'] +['פ', '×IJר'] +['פ×IJר', '×§'] +['Ġ×ŀש×Ĺ×§', '×Ļ×Ŀ'] +['צ', 'ר׼×Ļ×Ŀ'] +['×ŀ×ĵ', '×ķ'] +['×ŀ×ĵ×ķ', '×Ļ×§'] +['èĭ¦', 'ãģĹ'] +['ĠÑģ', 'иг'] +['ĠÑģиг', 'нал'] +['ĠM', 'á»įi'] +['Ġtr', 'ữ'] +['Ġnast', 'ÄĻp'] +['ĠnastÄĻp', 'nie'] +['Ġì¶Ķ', 'ì§Ħ'] +['ĠاÙĦÙģ', 'ÙĨد'] +['ĠاÙĦÙģÙĨد', 'ÙĤ'] +['koÅĦ', 'czyÅĤ'] +['ส', 'ีà¹Ī'] +['×§', '×Ļ×ij'] +['×§×Ļ×ij', '×ķ×¥'] +['ĠнÑĥж', 'нÑĭ'] +['大', 'åĪĩ'] +['大åĪĩ', 'ãģª'] +['æıĽ', 'ãģĪ'] +['ת', '×ķס'] +['ת×ķס', 'פת'] +['ãģ£ãģ¦', 'ãģĦãģªãģĦ'] +['Ġм', 'Ñı'] +['ĠмÑı', 'г'] +['ĠмÑıг', 'к'] +['Ġjak', 'ie'] +['Ġjakie', 'ÅĽ'] +['à¸ķำ', 'à¸ļ'] +['à¸ķำà¸ļ', 'ล'] +['ĠìŀĪ', 'ì§Ģ'] +['×ij×ĺ', '×IJ'] +['ĠоÑĤлиÑĩ', 'но'] +['ÙĤ', 'ÙIJ'] +['ĠавÑĤом', 'об'] +['ĠавÑĤомоб', 'и'] +['ĠавÑĤомоби', 'лÑı'] +['دÙĬÙħÙĤرا', 'Ø·ÙĬ'] +['ĠاÙĦ', 'ÙĪØ§'] +['ĠاÙĦÙĪØ§', 'ØŃد'] +['Ġس', 'ÙĪØ±ÙĬØ©'] +['Ø£', 'غÙĦ'] +['أغÙĦ', 'ب'] +['ĠÑįк', 'ÑĢан'] +['ãĥĹ', 'ãĥ©ãĤ¤'] +['Ġjeste', 'ÅĽ'] +['ãĥIJ', 'ãĥª'] +['Ġ×Ķ×IJ', '×ķ×ķ×Ļר'] +['ائ', 'Ùĥ'] +['à¸Ńยà¹Īาà¸ĩ', 'ยิà¹Īà¸ĩ'] +['ÑĢ', 'екÑĤ'] +['Ġum', 'o'] +['Ġumo', 'ż'] +['Ġumoż', 'li'] +['Ġumożli', 'w'] +['Ġumożliw', 'ia'] +['Ġnäch', 'ste'] +['ĠìŀĪ', 'ì§Ģë§Į'] +['ĠпÑĢед', 'н'] +['ĠпÑĢедн', 'аз'] +['ĠпÑĢедназ', 'наÑĩен'] +['Ġma', 'çı'] +['Ġp', 'omi'] +['Ġpomi', 'ÄĻd'] +['ĠpomiÄĻd', 'zy'] +['ĠاÙĦÙĦ', 'ÙĤاء'] +['à¹Ģà¸Ķ', 'à¸Ńะ'] +['Ġнов', 'оÑģÑĤи'] +['×ŀ×Ĺ', '׾×Ķ'] +['رÙĬاض', 'ÙĬ'] +['à¸Ķ', 'à¸Ļ'] +['à¸Ķà¸Ļ', 'à¸ķรี'] +['ب', 'صر'] +['ìĬ¤', 'íĥĢ'] +['scri', 'pción'] +['Ġnap', 'isa'] +['Ġnapisa', 'ÅĤ'] +['Ġ׳ש', '×ŀ×¢'] +['ĠاÙĦÙħØŃ', 'ÙĦÙĬ'] +['Ġhi', 'á»ĥn'] +['×IJ', '×Ĺ'] +['×IJ×Ĺ', 'ר×IJ×Ļ'] +['Ġг', 'ÑĢаниÑĨ'] +['æīĭ', 'ç¶ļãģį'] +['Ùĥ', 'سب'] +['Ġà¹ģà¸ķà¹Ī', 'à¸ĸà¹īา'] +['à¸Ķาว', 'à¸Ļà¹Į'] +['à¸Ķาวà¸Ļà¹Į', 'à¹Ĥหลà¸Ķ'] +['ãĤĭãģĵãģ¨ãģĮãģ§ãģį', 'ãģ¾ãģĻ'] +['åŁºæľ¬', 'çļĦãģ«'] +['ÙĪÙĦ', 'اد'] +['rä', 'ume'] +['د', 'ÙģØ§Ø¹'] +['×Ļצ', '×¢'] +['ĠO', 'czy'] +['ĠOczy', 'wiÅĽcie'] +['ĠÅ', 'ģ'] +['ĠÅģ', 'a'] +['اÙĦÙĬ', 'اب'] +['اÙĦÙĬاب', 'اÙĨ'] +['áºł', 'I'] +['ĠBir', 'liÄŁi'] +['×Ķ', '×ķצ'] +['×Ķ×ķצ', '×IJת'] +['ĠÄij', 'ua'] +['Ġê·¸ëŁ¬', 'ëĭĪê¹Į'] +['Ġréal', 'ité'] +['ع', 'ÙĦاÙĤات'] +['J', 'este'] +['Jeste', 'ÅĽ'] +['Ġмн', 'ож'] +['Ġмнож', 'еÑģÑĤво'] +['ï¼', '«'] +['ãĥĹãĥŃ', 'ãĤ¸ãĤ§'] +['ãĥĹãĥŃãĤ¸ãĤ§', 'ãĤ¯ãĥĪ'] +['ĠÑĦ', 'л'] +['ظ', 'ÙĨ'] +['×Ĵ׾', '×Ĵ׾'] +['ĠmÅĤod', 'zie'] +['ĠmÅĤodzie', 'ż'] +['à¸Ļà¹īำ', 'à¸ķา'] +['à¸Ļà¹īำà¸ķา', 'ล'] +['ÐĽ', 'Ðķ'] +['×ij', '×ķ×ĺ'] +['Ġ׾×Ķ', '×Ĵ×Ļ×ĵ'] +['ãģĵãģ¨ãĤĤ', 'ãģĤãĤĭ'] +['ز', 'اد'] +['×ŀ×Ļ×ĵ', '×¢'] +['ĠgÅĤówn', 'ie'] +['ãĥı', 'ãĤ¦'] +['ãĥıãĤ¦', 'ãĤ¹'] +['б', 'ел'] +['Ġét', 'ape'] +['ðŁĺ', 'Ģ'] +['Ġмод', 'елÑĮ'] +['a', 'ģını'] +['ש', '×Ĺ×§'] +['ש×Ĺ×§', 'ף'] +['Ġni', 'ño'] +['à¸Ĭ', 'à¹īาà¸ĩ'] +['à¹Ģล', 'ีย'] +['ĠÑĦоÑĢм', 'е'] +['ĠاÙĦØ´', 'رÙĬÙģ'] +['ĠÑĥд', 'аÑĢ'] +['arr', 'iv'] +['arriv', 'ée'] +['Ġmies', 'iÄĻ'] +['ĠmiesiÄĻ', 'cy'] +['ØŃ', 'رÙĥ'] +['ØŃرÙĥ', 'ات'] +['ĠDi', 'á»ħn'] +['ÐĿ', 'Ы'] +['ãģ¾ãģ£ãģŁ', 'ãģı'] +['Ġ×Ļ', 'ר×ķ×§'] +['еÑģÑĤ', 'еÑģÑĤв'] +['еÑģÑĤеÑģÑĤв', 'енн'] +['Ġê·¸', 'ëŁ¼'] +['ĠاÙĦÙħ', 'تÙĪ'] +['ĠاÙĦÙħتÙĪ', 'سط'] +['Ġbéné', 'fic'] +['Ġbénéfic', 'ie'] +['Ġwy', 'bra'] +['Ġwybra', 'Äĩ'] +['ĠاÙĦز', 'ÙħÙĨ'] +['ĠпÑĢин', 'Ñı'] +['ĠпÑĢинÑı', 'л'] +['Ù쨱', 'ØŃ'] +['Ġk', 'sz'] +['Ġksz', 'taÅĤ'] +['ĠksztaÅĤ', 't'] +['ק׾', '×ĺ'] +['×ij×ĵ×Ļ×§', 'ת'] +['Ġgi', 'ấ'] +['Ġgiấ', 'c'] +['Ġpropriet', 'Ãł'] +['деÑĢж', 'ан'] +['ĠKö', 'ln'] +['ĠGü', 'zel'] +['×Ļפ', '×ķ×Ļ'] +['ĠCu', 'á»Ļc'] +['ÑįÑĤ', 'аж'] +['تر', 'ÙĥÙĬ'] +['ترÙĥÙĬ', 'ز'] +['лож', 'ений'] +['Ġп', 'Ñĥ'] +['ĠпÑĥ', 'ÑĤи'] +['اخت', 'ÙĦاÙģ'] +['åĩºãģ¦', 'ãģıãĤĭ'] +['à¸ļุ', 'à¸ģ'] +['âĿ', '¤'] +['ÑĦ', 'ан'] +['פש', '×ĺ'] +['à¸ļัà¸Ļ', 'à¹Ģà¸Ĺ'] +['à¸ļัà¸Ļà¹Ģà¸Ĺ', 'ิà¸ĩ'] +['ĠاÙĦس', 'اد'] +['ĠاÙĦساد', 'س'] +['ĠاÙĦÙĤ', 'ÙĪÙħ'] +['ĠاÙĦÙĤÙĪÙħ', 'ÙĬ'] +['Ġyönet', 'ici'] +['Ùĩ', 'ÙĪØ§Øª'] +['ÙĩÙĪØ§Øª', 'Ùģ'] +['Ġrespons', 'ável'] +['Ġпод', 'деÑĢжива'] +['ĠاÙĦسÙĦ', 'Ø·'] +['ĠاÙĦسÙĦØ·', 'ات'] +['ãģĹãģ¦', 'ãģĬãģı'] +['ãĥļ', 'ãĥĥãĥĪ'] +['à¸Ľ', 'ุà¹Īม'] +['Ġogl', 'Äħda'] +['ÙĨا', 'ÙĤ'] +['ÙĨاÙĤ', 'Ø´'] +['à¸Ħà¸Ńà¸Ļ', 'à¹Ĥà¸Ķ'] +['ĠMü', 'sl'] +['ĠMüsl', 'ü'] +['ĠMüslü', 'man'] +['ĠMo', 'ż'] +['ĠMoż', 'na'] +['Ġnum', 'érique'] +['Ġv', 'á»ı'] +['ĠسÙĬ', 'تÙħ'] +['Ġyer', 'leÅŁ'] +['монÑĤ', 'аж'] +['Ġgo', 'ût'] +['ãģ¦', 'ãģĬãĤĬãģ¾ãģĻ'] +['ĠKh', 'ánh'] +['Ġе', 'дин'] +['Ġедин', 'ÑģÑĤв'] +['اÙĨ', 'Ø®Ùģ'] +['اÙĨØ®Ùģ', 'اض'] +['ìĭľ', 'íĹĺ'] +['Ġl', 'ặng'] +['ĠÑĢ', 'олÑĮ'] +['à¸ķัว', 'à¹ģà¸Ĺà¸Ļ'] +['à¸Ħà¹Īา', 'à¹ĥà¸Ĭà¹ī'] +['à¸Ħà¹Īาà¹ĥà¸Ĭà¹ī', 'à¸Īà¹Īาย'] +['Ġver', 'füg'] +['Ġverfüg', 'bar'] +['ìĻĶ', 'ëĭ¤'] +['ãģĦ', 'ãģļ'] +['ãģĦãģļ', 'ãĤĮ'] +['ĠиÑģÑģлед', 'ованиÑı'] +['меÑī', 'а'] +['×Ķ', '×Ĺ'] +['×Ķ×Ĺ', '×ĸר'] +['à¹ģà¸Ł', 'à¸Ĭัà¹Īà¸Ļ'] +['ت', 'صرÙģ'] +['Ø¥', 'رÙĩاب'] +['Ġexerc', 'ÃŃcio'] +['Ġé', 'lev'] +['Ġélev', 'é'] +['สัà¸įà¸įา', 'à¸ĵ'] +['Ãĸ', 'Z'] +['ãĥĹ', 'ãĥŃãĤ°'] +['ãĥĹãĥŃãĤ°', 'ãĥ©'] +['ãĥĹãĥŃãĤ°ãĥ©', 'ãĥł'] +['Ġw', 'ewnÄĻtrzn'] +['Ġhen', 'üz'] +['é£Ľ', 'ãģ³'] +['à¹Ģà¸Ķ', 'à¸Ńรà¹Į'] +['Ñģ', 'Ñĥж'] +['ÑģÑĥж', 'ден'] +['شع', 'ÙĪØ¨'] +['ãģ²ãģ¨', 'ãĤĬ'] +['Ġwy', 'ÅĤÄħ'] +['ĠwyÅĤÄħ', 'cznie'] +['Ġпло', 'Ñħо'] +['ÐĶ', 'Ðķ'] +['áº', '¦'] +['Ù쨹', 'اÙĦÙĬ'] +['ÙģØ¹Ø§ÙĦÙĬ', 'ات'] +['ĠاÙĦع', 'شر'] +['ÑģÑĤÑĥп', 'ил'] +['Ġy', 'arg'] +['Ġyarg', 'ı'] +['нÑİ', 'Ñİ'] +['×ķ×IJ', '×ij'] +['Ġu', 'ç'] +['Ġuç', 'ak'] +['ë²', '½'] +['تÙĪ', 'ÙĤÙĬ'] +['تÙĪÙĤÙĬ', 'ع'] +['Ġì¤ij', 'ìĭ¬'] +['׳×Ļ×ķ', '×ķ×ĺ'] +['Ø£', 'ÙĥÙĦ'] +['ç½®', 'ãģĦãģ¦'] +['éłĤ', 'ãģį'] +['Ġ×Ķת', '×ij'] +['Ġ×Ķת×ij', '×Ļ×¢×Ķ'] +['Ġdür', 'fen'] +['Ùħ', 'ÙĤاÙĦ'] +['ÙħÙĤاÙĦ', 'ات'] +['Ġز', 'ÙħÙĨ'] +['à¸ŀฤ', 'ศ'] +['à¸ŀฤศ', 'à¸Ī'] +['à¸ŀฤศà¸Ī', 'ิà¸ģ'] +['à¸ŀฤศà¸Īิà¸ģ', 'ายà¸Ļ'] +['ĠнеÑģк', 'олÑĮ'] +['ĠнеÑģколÑĮ', 'ки'] +['ĠнеÑģколÑĮки', 'Ñħ'] +['Ġcrian', 'ça'] +['มิ', 'à¸ķร'] +['×ŀ׼', '×Ļר×ķת'] +['à¸ģาร', 'à¸ļริหาร'] +['Ġtélé', 'charg'] +['Ġ×IJ×ķ×Ķ', '×ijת'] +['ĠBü', 'ro'] +['ä½ľ', 'ãģ£ãģŁ'] +['ĠKi', 'ÅŁi'] +['ç¾İåij³', 'ãģĹ'] +['à¹Ģลย', 'à¸Ħà¹Īะ'] +['à¸ŀà¸ļ', 'à¸ģัà¸ļ'] +['à¸Ī', 'à¹īา'] +['Ġç', 'er'] +['Ġçer', 'ç'] +['Ġçerç', 'eve'] +['ãĤĴä½ľ', 'ãģ£ãģ¦'] +['ĠпеÑĢв', 'ÑĥÑİ'] +['×ŀצ', 'ר×Ļ×Ŀ'] +['×IJ׾', '×ķ×Ķ'] +['×IJ׾×ķ×Ķ', '×Ļ×Ŀ'] +['Ġagr', 'é'] +['Ġagré', 'able'] +['Ġay', 'ır'] +['İL', 'İ'] +['ãĤ', '¥'] +['Ġíĺ', 'Ħ'] +['ĠíĺĦ', 'ìĭ¤'] +['ثاÙĦ', 'Ø«'] +['ת', '×ĸ'] +['ת×ĸ', '×ķ׳×Ķ'] +['ãģ¨ãģĦ', 'ãģ£ãģ¦'] +['ãģ¨ãģĦãģ£ãģ¦', 'ãĤĤ'] +['Ġا', 'بÙĪ'] +['ĠÑģоб', 'ак'] +['é£Łãģ¹', 'ãģŁ'] +['Ġдан', 'ном'] +['à¹Ģล', 'ิ'] +['à¹Ģลิ', 'ศ'] +['Ġí', 'ļ'] +['Ġíļ', '¨'] +['Ġíļ¨', 'ê³¼'] +['ãĤĤãĤī', 'ãģĪãĤĭ'] +['׳', 'צ׾'] +['ÑĦ', 'ик'] +['ÑĦик', 'Ñģ'] +['Ġjeste', 'ÅĽmy'] +['ת×Ĺ×ķש', '×Ķ'] +['à¹Ħมà¹Ī', 'à¸Ħวร'] +['ĠØŃ', 'سÙĬÙĨ'] +['à¸ģาร', 'ลà¸ĩà¸Ĺุà¸Ļ'] +['ë´', '¤'] +['ĠÐĺ', 'менно'] +['à¸ļ', 'à¸Ńรà¹Į'] +['à¸ļà¸Ńรà¹Į', 'à¸Ķ'] +['ĠC', 'ảnh'] +['ìĦľ', 'ë¹ĦìĬ¤'] +['Ġпол', 'ов'] +['Ġполов', 'ин'] +['Ġзам', 'еÑĩа'] +['ãģĦãĤį', 'ãĤĵãģª'] +['Ġ×ij', '×Ļ×§'] +['Ġ×ij×Ļ×§', 'ש'] +['л', 'ÑĥÑĪ'] +['ãĤĴ', 'è¿İ'] +['ãĤĴè¿İ', 'ãģĪ'] +['جرÙĬ', 'ÙħØ©'] +['Ġt', 'ây'] +['ĠاÙĦÙĨ', 'ÙĪ'] +['ĠاÙĦÙĨÙĪ', 'ÙĪÙĬ'] +['ÃĤ', 'N'] +['ì¿', 'ł'] +['หà¸Ļ', 'าว'] +['Ġ×ij×Ĺ', 'ש×ij×ķף'] +['ز', 'ار'] +['à¸Ķ', 'าร'] +['à¸Ķาร', 'า'] +['ĠÅĽ', 'l'] +['ĠÅĽl', 'ub'] +['มีà¸Ħวาม', 'สุà¸Ĥ'] +['Ġn', 'hu'] +['Ġnhu', 'áºŃn'] +['ÙħØŃ', 'طة'] +['à¹Ģสืà¹īà¸Ń', 'à¸ľà¹īา'] +['ĠТ', 'олÑĮко'] +['ĠÙĥ', 'س'] +['ĠÙĥس', 'ارة'] +['ÙħØ´', 'رÙĪØ¹'] +['niÄĻ', 'cia'] +['×¢', '׼ש×Ļ×ķ'] +['ت', 'ÙĦÙģ'] +['تÙĦÙģ', 'زÙĬ'] +['تÙĦÙ쨲ÙĬ', 'ÙĪÙĨ'] +['Ġl', 'Æ°á»Ľi'] +['ĠÐľÐ¾Ñģк', 'вÑĭ'] +['Ġré', 'serve'] +['Ġan', 'laÅŁ'] +['ĠanlaÅŁ', 'ıl'] +['Ġed', 'eceÄŁi'] +['รà¸Ńà¸ĩ', 'à¹Ģà¸Ĺà¹īา'] +['Ġب', 'Ø·'] +['Ġبط', 'رÙĬ'] +['ĠبطرÙĬ', 'ÙĤØ©'] +['ãģ¦ãģĹãģ¾', 'ãģ£ãģ¦'] +['ãĤĤãĤī', 'ãģ£ãģ¦'] +['بر', 'ج'] +['æ±', 'ļ'] +['æ±ļ', 'ãĤĮ'] +['Ġch', 'oc'] +['Ġchoc', 'ia'] +['Ġchocia', 'ż'] +['Ġzob', 'ac'] +['Ġzobac', 'zyÄĩ'] +['пÑĢ', 'Ñı'] +['пÑĢÑı', 'жен'] +['ĠÑĨ', 'иÑĦ'] +['ĠÑĨиÑĦ', 'ÑĢ'] +['Ġм', 'ам'] +['Ġвз', 'ÑıÑĤÑĮ'] +['Ġch', 'ạm'] +['ج', 'سÙħ'] +['ØŃÙħ', 'اس'] +['à¹Ģล', 'à¹Īม'] +['à¸ŀิ', 'ษ'] +['×Ķפ', '׼×ķ'] +['à¸Ĭà¹Īà¸Ńà¸ĩ', 'à¸Ĺาà¸ĩ'] +['Ġв', 'ек'] +['Ġвек', 'а'] +['Æ¡', 'Ìģ'] +['Æ¡Ìģ', 'i'] +['ĠTi', 'á»ģn'] +['Ġtr', 'ầm'] +['мÑĭ', 'ÑĪ'] +['мÑĭÑĪ', 'л'] +['ĠÑĤ', 'Ñĥ'] +['ĠÑĤÑĥ', 'ÑĢиÑģÑĤ'] +['Ġch', 'c'] +['Ġchc', 'Äħ'] +['Ġав', 'г'] +['Ġавг', 'ÑĥÑģÑĤ'] +['ĠавгÑĥÑģÑĤ', 'а'] +['ס', '×IJ×ķת'] +['Ġר', '×Ĵ׾'] +['à¸ľà¸¥', 'à¸ģระà¸Ĺ'] +['à¸ľà¸¥à¸ģระà¸Ĺ', 'à¸ļ'] +['å¤īãĤı', 'ãĤĭ'] +['Ġ×Ķ×IJ×Ĺר', '×ķ׳×Ļ×Ŀ'] +['سÙģ', 'ÙĬر'] +['ĠÑĩа', 'Ñīе'] +['ãģĦ', 'ãĤī'] +['ãģĦãĤī', 'ãģ£'] +['ãģĦãĤīãģ£', 'ãģĹãĤĥ'] +['×ķ×ŀ', '׳×Ļ×Ŀ'] +['Ġart', 'tır'] +['ĠCh', 'á»ĭ'] +['Ġì¡°', 'ì§ģ'] +['ĠÑĥÑģп', 'еÑħ'] +['Ġ×¢', '×ķס'] +['Ġ×¢×ķס', '×§'] +['ĠìĥĿ', 'ëªħ'] +['ÑĨ', 'иÑĤ'] +['Ġreg', 'ión'] +['Ðŀ', 'ÐĿ'] +['ĠdoÄŁ', 'um'] +['ĠyaÅŁ', 'ad'] +['ĠyaÅŁad', 'ıģı'] +['à¸Ĺà¸Ķ', 'ลà¸Ńà¸ĩ'] +['Ġgöz', 'ü'] +['ש', '×Ļר×Ķ'] +['дÑĥм', 'ал'] +['Ġda', 'ģı'] +['Ġdaģı', 't'] +['à¸Ĺีม', 'à¸ĩาà¸Ļ'] +['Ġti', 'á»ģm'] +['ĠاÙĦÙĥ', 'بر'] +['ĠاÙĦÙĥبر', 'Ùī'] +['ì¹', 'Ń'] +['ĠGü', 'nc'] +['ĠGünc', 'elle'] +['ĠGüncelle', 'me'] +['ê¹', 'Ĭ'] +['ĠобоÑĢÑĥд', 'ование'] +['ĠÑĢеÑĪ', 'а'] +['á»', '¤'] +['Ġп', 'иÑĤ'] +['ĠпиÑĤ', 'аниÑı'] +['à¹Ģรีย', 'à¸ļ'] +['×Ľ×ª', '×Ļ×ij×Ķ'] +['Ġп', 'он'] +['Ġпон', 'ÑĢав'] +['ĠпонÑĢав', 'и'] +['Ġ×Ķ', '×ķ׾×ĵ'] +['Ġ×Ķ×ķ׾×ĵ', 'ת'] +['Ġê²', 'ģ'] +['Ġê²ģ', 'ëĭĪëĭ¤'] +['ĠпеÑĢв', 'ой'] +['ãĥ©ãĤ¤', 'ãĥķ'] +['ĠÅŁi', 'ir'] +['kr', 'ÄĻ'] +['krÄĻ', 'c'] +['Ġthi', 'á»ĥu'] +['à¹Ģลย', 'à¸Ĺี'] +['à¹Ģลยà¸Ĺี', 'à¹Ģà¸Ķียว'] +['×ĺ×¢', '׳×ķת'] +['ائ', 'ÙĩÙħ'] +['Ġ×IJ', 'ס×ķר'] +['ĠплаÑĤ', 'еж'] +['تر', 'دد'] +['Ġmożli', 'we'] +['Ġkh', 'Ỽ'] +['ĠkhỼ', 'p'] +['تÙģØ§Ø¹', 'ÙĦ'] +['ĠÑĪ', 'колÑĮ'] +['ĠÑĪколÑĮ', 'н'] +['ĠÙĤ', 'صة'] +['Ġmét', 'ier'] +['nÄĻ', 'ÅĤa'] +['หล', 'à¹Īà¸Ń'] +['Ġ', 'á»§ng'] +['Ġprz', 'egl'] +['Ġprzegl', 'Äħd'] +['ĠاÙĦÙħ', 'تعÙĦ'] +['ĠاÙĦÙħتعÙĦ', 'ÙĤØ©'] +['ĠÑģÑĭ', 'н'] +['Ġв', 'олн'] +['ãĥĩ', 'ãĥ¼ãĥĪ'] +['ĠÐŃ', 'ÑĤи'] +['Ġк', 'ÑĢоме'] +['à¸Ħ', 'ารà¹Į'] +['׳ק', '×ķ×ĵ×Ķ'] +['Ġ׾ש×ŀ', '×ķ×¢'] +['Ġ×ĸ', '×ķ׼ר'] +['ï¼', '§'] +['ÙĬ', 'ÙİØ§'] +['Ġgi', 'á»ıi'] +['åĥį', 'ãģı'] +['ĠÑģ', 'ни'] +['ĠÑģни', 'жен'] +['à¹ģà¸Ķ', 'à¸Ķ'] +['รุ', 'à¸Ļ'] +['รุà¸Ļ', 'à¹ģรà¸ĩ'] +['Ġhi', 'á»ĩp'] +['ograf', 'ÃŃa'] +['à¹Ģà¸Ī', 'à¸Ńรà¹Į'] +['Ġдв', 'иг'] +['Ġдвиг', 'аÑĤ'] +['ĠдвигаÑĤ', 'ел'] +['Ġü', 'y'] +['Ġüy', 'eler'] +['Ġüyeler', 'i'] +['Ġб', 'Ñĥк'] +['ĠбÑĥк', 'в'] +['ãĤĤ', 'å¤ļãģı'] +['Ġthi', 'á»ĩt'] +['ĠPa', 'ÃŃs'] +['ĠØ·', 'بÙĬعÙĬ'] +['à¹ģà¸Ī', 'à¸ģ'] +['ĠاÙĦص', 'ØŃÙĬØŃ'] +['Ġapp', 'ré'] +['Ġappré', 'ci'] +['Ġdecis', 'ión'] +['Ġë°ĺ', 'ëĵľ'] +['Ġë°ĺëĵľ', 'ìĭľ'] +['ĠÑĤеб', 'е'] +['ãĤ·', 'ãĥ¼ãĤº'] +['ãĤ·ãĥ¼ãĤº', 'ãĥ³'] +['Ġд', 'алÑĮн'] +['ĠìĬ', '¤'] +['ĠìĬ¤', 'ìĬ¤'] +['ĠìĬ¤ìĬ¤', 'ë¡ľ'] +['ĠTh', 'á»ĥ'] +['Ġkar', 'ÅŁ'] +['ĠkarÅŁ', 'ıs'] +['ĠkarÅŁÄ±s', 'ında'] +['ĠK', 'ön'] +['ĠKön', 'ig'] +['ив', 'ание'] +['×ij', '×ķצע'] +['г', 'лаÑģ'] +['Ġtw', 'ó'] +['Ġtwó', 'rc'] +['à¸Ľà¸ģ', 'à¸Ħร'] +['à¸Ľà¸ģà¸Ħร', 'à¸Ńà¸ĩ'] +['ĠG', 'ÅĤ'] +['ĠGÅĤ', 'ówn'] +['ĠUnter', 'stüt'] +['ĠUnterstüt', 'zung'] +['Ġд', 'ÑĥÑħ'] +['ĠдÑĥÑħ', 'ов'] +['Ø£', 'ÙħاÙĨ'] +['×Ĺש', 'ש'] +['ت', 'ظ'] +['تظ', 'اÙĩر'] +['ĠлÑİб', 'ом'] +['à¸ķ', 'าร'] +['à¸ķาร', 'าà¸ĩ'] +['Ġkr', 'ól'] +['Ø£', 'ØŃدث'] +['ì¡Į', 'ëĭ¤'] +['Ðļ', 'ÑĥÑĢÑģ'] +['ãĥĥ', 'ãĥĦ'] +['×ŀ×§', '×ķ×ij׾'] +['ĠÑģимв', 'ол'] +['Ġdés', 'orm'] +['Ġdésorm', 'ais'] +['w', 'üns'] +['wüns', 'che'] +['Ñĥ', 'ни'] +['Ñĥни', 'ÑĨип'] +['ÑĥниÑĨип', 'алÑĮн'] +['หลัà¸ģ', 'สูà¸ķร'] +['ÙĨت', 'شر'] +['Ġа', 'л'] +['Ġал', 'к'] +['Ġалк', 'ог'] +['Ġалког', 'ол'] +['ĠÑĥ', 'ÑĩиÑĤÑĭва'] +['à¸ģำ', 'à¸ģัà¸ļ'] +['Ġ׾', 'פע×ķ׾'] +['ĠìŰ', 'ê²°'] +['s', 'Äħd'] +['ĠاÙĦØ£', 'ÙĬ'] +['ĠاÙĦØ£ÙĬ', 'اÙħ'] +['غÙĬ', 'اب'] +['Ġна', 'ÑĢ'] +['ĠнаÑĢ', 'ко'] +['×ŀ×ķ×ĵ', '×¢'] +['ĠÑģеÑĢ', 'ии'] +['пиÑģ', 'Ñĭва'] +['สิ', 'ว'] +['ç¶ļ', 'ãģĦãģ¦'] +['çͳãģĹ', 'è¾¼ãģ¿'] +['Ġ׾', '×Ĵר'] +['Ġ׾×Ĵר', '×ķ×Ŀ'] +['Ġд', 'ем'] +['Ġдем', 'о'] +['Ġë³´', 'ëĤ´'] +['تÙĩ', 'دÙĬد'] +['ĠÙħØ´', 'ÙĬرا'] +['Ġdu', 'y'] +['Ġduy', 'á»ĩt'] +['ĠwiÄĻks', 'ze'] +['Ùħع', 'اÙĬ'] +['ÙħعاÙĬ', 'ÙĬر'] +['ĠG', 'da'] +['ĠGda', 'ÅĦsk'] +['Ġr', 'ah'] +['Ġrah', 'ats'] +['Ġrahats', 'ız'] +['ר', '×ķצ×Ķ'] +['l', 'ös'] +['lös', 'ung'] +['ĠТак', 'им'] +['ÑĪ', 'ед'] +['ÑĪед', 'ÑĪ'] +['ع', 'زÙĦ'] +['Ġרש', '×Ļ×ŀת'] +['Ġ׾×Ķ', '×Ļ׼'] +['Ġ׾×Ķ×Ļ׼', '×ł×¡'] +['Ġп', 'ÑĥÑĤ'] +['ĠпÑĥÑĤ', 'еÑĪ'] +['ĠпÑĥÑĤеÑĪ', 'еÑģÑĤв'] +['Ġnot', 'ÃŃcia'] +['Ġal', 'Ä±ÅŁ'] +['ĠalÄ±ÅŁ', 'ver'] +['ĠalÄ±ÅŁver', 'iÅŁ'] +['ĠwÅĤ', 'os'] +['ĠwÅĤos', 'ów'] +['Ġب', 'غ'] +['Ġبغ', 'داد'] +['Ġver', 'öffent'] +['Ġveröffent', 'licht'] +['ĠKh', 'á'] +['Ġt', 'án'] +['ëIJĺ', '기'] +['Ġë°©', '문'] +['Ùģ', 'ÙĬÙĦ'] +['à¹Ģà¸ģิà¸Ķ', 'à¸Īาà¸ģ'] +['åı¯', 'æĦĽ'] +['åı¯æĦĽ', 'ãģĦ'] +['à¸ĸ', 'ุà¸ĩ'] +['Ġz', 'ewnÄĻtrzn'] +['à¸łà¸²à¸©à¸²', 'à¸Ńัà¸ĩà¸ģฤษ'] +['Ġmá', 'xima'] +['Ġul', 'us'] +['Ġulus', 'lararası'] +['Ġ׳×Ķ', '׳'] +['à¸Ĥà¹Īาว', 'สาร'] +['ĠìĿĺ', 'ìĤ¬'] +['à¹Ģหล', 'ืà¸Ńà¸ĩ'] +['Ġد', 'ÙĤ'] +['ĠدÙĤ', 'ائÙĤ'] +['สืà¹Īà¸Ń', 'สาร'] +['ë¨', '¼'] +['ĠÑģоÑģÑĤоÑı', 'нии'] +['สมา', 'à¸Ħม'] +['á»', 'Ĥ'] +['ĠÐľÐ¾Ñģ', 'ков'] +['ĠÐľÐ¾Ñģков', 'Ñģк'] +['×ŀס', '×ķ×Ĵ׾'] +['ãģĭ', 'ãģĭãĤĬ'] +['ĠTr', 'uyá»ģn'] +['à¹ģà¸Ĥà¹ĩà¸ĩ', 'à¹ģรà¸ĩ'] +['×ŀ×Ĺ', '×ĸ×Ļ×§'] +['à¹Ĥà¸ģ', 'à¹ī'] +['ÙĬس', 'ر'] +['ìĶ', '©'] +['×IJ', '×ķ×§'] +['×IJ×ķ×§', '×ĺ'] +['×IJ×ķ×§×ĺ', '×ķ×ijר'] +['Ġprox', 'imité'] +['ÙħÙĨ', 'Ùĩج'] +['ĠاÙĦج', 'ز'] +['ĠاÙĦجز', 'ائ'] +['ĠاÙĦجزائ', 'رÙĬ'] +['ĠÄIJi', 'á»ĥm'] +['Ġден', 'еж'] +['Ġденеж', 'н'] +['ÙģØŃ', 'ص'] +['Ùģ', 'ئ'] +['ĠÐij', 'Ñĥд'] +['×Ĵ×Ļ×ĵ', '×ķ׾'] +['ĠÐĴ', 'едÑĮ'] +['عÙĦ', 'اÙħØ©'] +['Ġ×IJ×Ĺר', '×ķ׳×ķת'] +['ãģĦãģŁãģł', 'ãģĦãģ¦'] +['سÙĦ', 'ØŃ'] +['ØŃ', 'ÙĦÙħ'] +['ز', 'ÙĪØ§Ø±'] +['Ùĥ', 'سر'] +['×ĺ', 'קס'] +['Ġб', 'ан'] +['Ġбан', 'ков'] +['ĠпÑĢ', 'ож'] +['ĠпÑĢож', 'ива'] +['li', 'wo'] +['liwo', 'ÅĽci'] +['ĠTi', 'ếp'] +['ĠاÙĦÙħÙĨ', 'اسب'] +['ĠاÙĦØ®', 'ÙĬار'] +['ãģĬ', 'ãģĭ'] +['ãģĬãģĭ', 'ãģĴ'] +['à¸Ķà¸Ńà¸ģ', 'à¹Ħมà¹ī'] +['ä', 'mp'] +['ämp', 'fe'] +['à¸ķัà¹īà¸ĩ', 'à¹ĥà¸Ī'] +['Ġза', 'ÑīиÑĤ'] +['ĠзаÑīиÑĤ', 'Ñĭ'] +['ĠTh', 'ưá»Ŀng'] +['Ġص', 'Ùģ'] +['ĠصÙģ', 'ØŃØ©'] +['×Ĺ×ķר', '×£'] +['ãĥIJ', 'ãĥĥãĤ°'] +['Ġ×ĵ', '×Ļ×Ĵ'] +['Ġ×ĵ×Ļ×Ĵ', '×Ļ×ĺ'] +['Ġ×ĵ×Ļ×Ĵ×Ļ×ĺ', '׾×Ļ'] +['Ġ×Ķ×Ĺ', '×ķ׾×Ļ×Ŀ'] +['в', 'еÑī'] +['веÑī', 'а'] +['Ġк', 'ÑĥлÑĮÑĤ'] +['ĠкÑĥлÑĮÑĤ', 'Ñĥ'] +['ĠкÑĥлÑĮÑĤÑĥ', 'ÑĢÑĭ'] +['ĠاÙĦاÙĨ', 'ترÙĨت'] +['Ġhö', 'ch'] +['Ġhöch', 'st'] +['Ġíĺ', 'ķ'] +['Ġíĺķ', 'íĥľ'] +['Ġв', 'ой'] +['Ġвой', 'нÑĭ'] +['ÐĽ', 'Ðŀ'] +['ìĭł', 'ìļ©'] +['Ġ×ŀ×ij', '×ķס'] +['Ġ×ŀ×ij×ķס', 'ס'] +['×ŀ׳', '×Ļ×¢'] +['Ġfiyat', 'ı'] +['ĠÑģл', 'Ñĥж'] +['ĠÑģлÑĥж', 'бÑĭ'] +['à¸Ĺั', 'ศ'] +['à¸Ĺัศ', 'à¸Ļ'] +['ãģĵãģ¨ãģĮ', 'å¤ļãģĦ'] +['Ġ×Ķ×ŀש', 'ת'] +['Ġ×Ķ×ŀשת', '×ŀש'] +['å¯Ħ', 'ãģĽ'] +['×ŀש׾', '×ķ×Ĺ'] +['æĻĤ', 'çĤ¹'] +['æĻĤçĤ¹', 'ãģ§'] +['à¸ŀร', 'ี'] +['à¸ŀรี', 'à¹Ģมีย'] +['à¸ŀรีà¹Ģมีย', 'รà¹Į'] +['à¸ŀรีà¹Ģมียรà¹Į', 'ลีà¸ģ'] +['Ġdiffic', 'olt'] +['Ġdifficolt', 'Ãł'] +['ãĥ¬', 'ãĤ¹ãĥĪ'] +['ãĥ¬ãĤ¹ãĥĪ', 'ãĥ©ãĥ³'] +['สม', 'à¹Ģà¸Ķà¹ĩ'] +['สมà¹Ģà¸Ķà¹ĩ', 'à¸Ī'] +['Ġж', 'ид'] +['Ġжид', 'к'] +['Ġzu', 'peÅĤ'] +['ĠzupeÅĤ', 'nie'] +['ĠÙħ', 'جر'] +['ĠÙħجر', 'د'] +['ãģĮ', 'å§ĭ'] +['ãģĮå§ĭ', 'ãģ¾'] +['ãĤŃãĥ£', 'ãĥ©'] +['Ġ×IJ', '×ķ×ķ×Ļר'] +['ãģĬ', 'äºĴ'] +['ãģĬäºĴ', 'ãģĦ'] +['Ġpot', 'rÃł'] +['ĠPa', 'ÅĦst'] +['ĠPaÅĦst', 'wo'] +['Ġب', 'ÙĬاÙĨ'] +['ĠبÙĬاÙĨ', 'ات'] +['Ġин', 'огда'] +['ĠÑĢ', 'а'] +['ĠÑĢа', 'ÑģÑĤв'] +['ĠÑĢаÑģÑĤв', 'оÑĢ'] +['Ġ×ĸ', '×ŀ׳'] +['ยิ', 'à¹īม'] +['Ä', 'Ĩ'] +['ãģ¾', 'ãģķ'] +['ãģ¾ãģķ', 'ãģ«'] +['ãĥķãĤ¡', 'ãĤ¤ãĥ«'] +['Ġgörd', 'Ã¼ÄŁÃ¼'] +['สà¸ĩ', 'à¸Ħร'] +['สà¸ĩà¸Ħร', 'าม'] +['ĠArk', 'adaÅŁ'] +['ĠrozwiÄħz', 'ania'] +['×ŀ', '×ķ×ĺ'] +['pi', 'ÄĻ'] +['piÄĻ', 't'] +['ص', 'غر'] +['ส', 'ย'] +['สย', 'าม'] +['ãĤĨ', 'ãģ£ãģıãĤĬ'] +['Ġtr', 'ần'] +['Ġeconom', 'ÃŃa'] +['Ġgeh', 'ören'] +['ãĤ·ãĥ§', 'ãĥ¼'] +['ĠsÅĤ', 'ucha'] +['à¸ŀà¸Ń', 'à¹ĥà¸Ī'] +['ĠоÑĤмеÑĤ', 'ил'] +['ÙĨت', 'ÙĤÙĦ'] +['Ġprop', 'ósito'] +['ĠваÑĪ', 'его'] +['Ġnh', 'ắn'] +['à¹ģà¸ĸ', 'ว'] +['Ġком', 'иÑģ'] +['ĠкомиÑģ', 'Ñģи'] +['waż', 'nie'] +['Ġy', 'avaÅŁ'] +['×ŀ', '×Ļ×§'] +['×ŀ×Ļ×§', '×ķ×Ŀ'] +['ש×IJ׾', 'ת'] +['Ġyıll', 'arda'] +['ĠÐ', '®'] +['ĠЮ', 'ÑĢ'] +['×ł×¡', '×Ļ×ij×ķת'] +['ת', 'צ'] +['תצ', '×ķ×Ĵ'] +['Ġод', 'нÑĥ'] +['Ġ', 'à¸Ńยà¹Īาà¸ĩà¹Ħร'] +['Ġà¸Ńยà¹Īาà¸ĩà¹Ħร', 'à¸ģà¹ĩà¸ķาม'] +['ëģ', '¼'] +['à¹Ħล', 'à¹Ī'] +['تس', 'ÙĦÙĬÙħ'] +['بÙĦ', 'اغ'] +['Ġì', 'ī'] +['Ġìī', '½'] +['Ġìī½', 'ê²Į'] +['ãĥļ', 'ãĥ³'] +['зв', 'ÑĥÑĩ'] +['ĠW', 'äh'] +['ĠWäh', 'rend'] +['Ġ×Ļ', '×Ļת'] +['Ġ×Ļ×Ļת', '׼ף'] +['Ġkh', 'uyên'] +['Ġv', 'ẽ'] +['Ġа', 'меÑĢ'] +['ĠамеÑĢ', 'ик'] +['ĠамеÑĢик', 'ан'] +['ĠамеÑĢикан', 'Ñģк'] +['ع', 'جب'] +['ãĥĽãĥ¼ãĥł', 'ãĥļãĥ¼ãĤ¸'] +['Ġник', 'ÑĤо'] +['ĠÙĤ', 'Ùİ'] +['ĠÙĤÙİ', 'اÙĦ'] +['ĠÙĤÙİØ§ÙĦ', 'Ùİ'] +['ÐIJ', 'ÐĹ'] +['Ùħ', 'جÙħÙĪØ¹'] +['ÙħجÙħÙĪØ¹', 'ات'] +['Ġnecess', 'itÃł'] +['Ġpob', 'li'] +['Ġpobli', 'żu'] +['Ġph', 'ấn'] +['ĠСо', 'обÑī'] +['ÙħÙĤ', 'اط'] +['ÙħÙĤاط', 'ع'] +['Ġ×Ķצ', '×ķר×ļ'] +['la', 'ÅŁtırma'] +['ว', 'ิà¸Ķ'] +['วิà¸Ķ', 'ี'] +['วิà¸Ķี', 'à¹Ĥà¸Ń'] +['Ġ그리', 'ìĬ¤'] +['Ġ그리ìĬ¤', 'ëıĦ'] +['ãĤ¿ãĤ¤', 'ãĥŁ'] +['ãĤ¿ãĤ¤ãĥŁ', 'ãĥ³ãĤ°'] +['×§×ĺ', '×Ĵ×ķר'] +['×§×ĺ×Ĵ×ķר', '×Ļ×Ķ'] +['Ġ×Ĺ', '×ķפ'] +['Ġ×Ĺ×ķפ', 'ש×Ļ'] +['Ø£', 'جر'] +['Ġим', 'ени'] +['ĠÑĢан', 'ее'] +['à¹Ģà¸ŀืà¹Īà¸Ńà¸Ļ', 'à¹Ĩ'] +['ĠJes', 'ús'] +['Ñģо', 'един'] +['Ñģоедин', 'ен'] +['Ġר', '×Ĺ×ķ×§'] +['à¹Ĥà¸ļ', 'รา'] +['à¹Ĥà¸ļรา', 'à¸ĵ'] +['ĠH', 'Æ¡n'] +['Ġth', 'áºŃp'] +['تع', 'ÙĬÙĬÙĨ'] +['Ġtart', 'Ä±ÅŁ'] +['ĠtartÄ±ÅŁ', 'ma'] +['ĠGes', 'pr'] +['ĠGespr', 'äch'] +['תר', '×ķפ'] +['תר×ķפ', '×ķת'] +['Ġcat', 'égorie'] +['Ġоказ', 'Ñĭва'] +['ĠналиÑĩ', 'ие'] +['Ġprésent', 'é'] +['Ġk', 'ull'] +['Ġkull', 'and'] +['Ġkulland', 'ı'] +['Ġü', 'nl'] +['Ġünl', 'ü'] +['ĠÙģ', 'Ùĥرة'] +['из', 'аÑĤоÑĢ'] +['×IJ', '×ķ׳'] +['×IJ×ķ׳', '×Ļ×ij'] +['×IJ×ķ׳×Ļ×ij', 'רס'] +['×IJ×ķ׳×Ļ×ijרס', '×Ļ×ĺת'] +['ĠÑĢаÑģÑģ', 'маÑĤ'] +['ĠÑĢаÑģÑģмаÑĤ', 'ÑĢ'] +['ĠÑĢаÑģÑģмаÑĤÑĢ', 'ива'] +['تÙĥÙĦ', 'Ùħ'] +['Ùĥت', 'رÙĪ'] +['ÙĥترÙĪ', 'ÙĨÙĬ'] +['ĠÑģо', 'ÑĩеÑĤ'] +['ĠÑģоÑĩеÑĤ', 'а'] +['ãĤĴè¦ĭ', 'ãģĽ'] +['Ġng', 'ừa'] +['ĠÐł', 'еÑģп'] +['ĠÐłÐµÑģп', 'Ñĥб'] +['ĠÐłÐµÑģпÑĥб', 'лик'] +['ãĤ¦', 'ãĤ©'] +['ãĤ¦ãĤ©', 'ãĥ¼'] +['ĠÐľ', 'еждÑĥ'] +['ĠìŀĪ', 'ê²Į'] +['Ġm', 'â'] +['ĠìļĶ', 'ì²Ń'] +['ض', 'ار'] +['ลุ', 'à¹īà¸Ļ'] +['ëĮĢ', 'íķĻêµIJ'] +['×ĸ', '×Ļ׼'] +['×ĸ×Ļ׼', 'ר×ķף'] +['ãĤ¹', 'ãĥļ'] +['ãĤ¹ãĥļ', 'ãĥ¼ãĤ¹'] +['ĠкÑĢаÑģ', 'оÑĤ'] +['ï¼', '¨'] +['ê¼', 'Ń'] +['ãĤĴ', 'éĽĨ'] +['ãĤĴéĽĨ', 'ãĤģ'] +['ë°', 'Ŀ'] +['Ġ×Ķ׳', '×IJ'] +['Ġ×Ķ׳×IJ', 'ש×Ŀ'] +['Ġê°Ģ', 'ìļ´'] +['Ġê°Ģìļ´', 'ëį°'] +['تÙĥÙĦ', 'Ù쨩'] +['ĠØŃ', 'ÙĤÙĬÙĤÙĬ'] +['Ġh', 'alk'] +['Ġhalk', 'ın'] +['ÑİÑī', 'ÑĥÑİ'] +['ĠÑģп', 'ин'] +['סר×ĺ', 'ף'] +['ĠпеÑĢв', 'ого'] +['Ġпол', 'ож'] +['Ġполож', 'иÑĤелÑĮн'] +['Ġд', 'л'] +['Ġдл', 'иÑĤелÑĮн'] +['ĠV', 'Ä©nh'] +['ê´', '´'] +['ĠÑģÑĭ', 'ÑĢ'] +['ĠíĨµ', 'íķĺìŬ'] +['ë³ij', 'ìĽIJ'] +['à¹Ĥรà¸ĩ', 'à¸ĩาà¸Ļ'] +['รัà¸ļ', 'à¸ľà¸´à¸Ķ'] +['รัà¸ļà¸ľà¸´à¸Ķ', 'à¸Ĭà¸Ńà¸ļ'] +['تج', 'ÙĨب'] +['s', 'ÅĤ'] +['sÅĤ', 'uch'] +['ãĤ¢ãĥ«', 'ãĥIJ'] +['ãĤ¢ãĥ«ãĥIJ', 'ãĥł'] +['ëī´', 'ìĬ¤'] +['Ġpat', 'ië'] +['Ġpatië', 'nt'] +['Ġìĺ', '¤í'] +['Ġìĺ¤í', 'ŀ'] +['Ġìĺ¤íŀ', 'Ī'] +['Ġìĺ¤íŀĪ', '볤'] +['ĠDer', 'ne'] +['ĠDerne', 'ÄŁi'] +['wró', 'ci'] +['wróci', 'Äĩ'] +['Ġоб', 'Ñī'] +['ĠобÑī', 'еÑģÑĤв'] +['ĠобÑīеÑģÑĤв', 'енно'] +['ĠêµIJ', 'ìĪĺ'] +['tıģ', 'ımız'] +['Ġ×Ķ×ŀש', '×Ļ×ij'] +['k', 'örper'] +['Ġпозв', 'ол'] +['Ġпозвол', 'иÑĤ'] +['ĠChi', 'ến'] +['أخ', 'ÙĪ'] +['ĠAy', 'dın'] +['à¸Ķà¹īาà¸Ļ', 'ล'] +['à¸Ķà¹īาà¸Ļล', 'à¹Īาà¸ĩ'] +['Ġdr', 'u'] +['Ġdru', 'ż'] +['Ġdruż', 'yn'] +['Ġë°ľ', 'íijľ'] +['ĠTh', 'ảo'] +['جÙĩ', 'اد'] +['à¸ģระà¸Ĺ', 'ูà¹ī'] +['Ġк', 'ÑĢов'] +['ĠкÑĢов', 'и'] +['Ġiçer', 'ik'] +['Ġnad', 'zie'] +['Ġnadzie', 'jÄĻ'] +['ĠС', 'моÑĤÑĢ'] +['Ġph', 'ức'] +['ج', 'تÙħاع'] +['جتÙħاع', 'ÙĬØ©'] +['ком', 'пон'] +['компон', 'енÑĤ'] +['Ġб', 'ил'] +['Ġбил', 'еÑĤ'] +['ãĥIJ', 'ãĥ³ãĥī'] +['ĠPol', 'ÃŃcia'] +['اÙĦ', 'تÙĩ'] +['اÙĦتÙĩ', 'اب'] +['ØŃر', 'Ùģ'] +['ت', 'خط'] +['تخط', 'ÙĬØ·'] +['ãĤ³', 'ãĥ¼ãĥ'] +['ãĤ³ãĥ¼ãĥ', 'Ĵ'] +['ãĤ³ãĥ¼ãĥĴ', 'ãĥ¼'] +['・・', 'ï½¥'] +['à¸ĭ', 'à¸Ńย'] +['Ġcréd', 'it'] +['è²·', 'ãģ£ãģŁ'] +['ĠпоÑĢ', 'Ñıд'] +['ĠпоÑĢÑıд', 'ке'] +['Ġph', 'ó'] +['Ġw', 'ida'] +['Ġwida', 'Äĩ'] +['جر', 'ائÙħ'] +['à¸ľ', 'ี'] +['ĠbÄĻd', 'ÄĻ'] +['Ġ×ŀ', 'פת×Ĺ'] +['ãĥij', 'ãĥ¼ãĥ'] +['ãĥijãĥ¼ãĥ', 'Ĩ'] +['ãĥijãĥ¼ãĥĨ', 'ãĤ£'] +['ãĥijãĥ¼ãĥĨãĤ£', 'ãĥ¼'] +['ĠKa', 'ż'] +['ĠKaż', 'dy'] +['ĠнеобÑħодим', 'оÑģÑĤи'] +['à¸Ł', 'à¸Ńรà¹Į'] +['à¸Łà¸Ńรà¹Į', 'ม'] +['Ġмал', 'ÑĭÑĪ'] +['Ġпл', 'оÑĤ'] +['ĠÑĥ', 'ÑģÑĤÑĢой'] +['ĠÑĥÑģÑĤÑĢой', 'ÑģÑĤва'] +['à¸ĸ', 'à¸Ńà¸Ļ'] +['ĠoluÅŁtur', 'ul'] +['ĠÅĽwi', 'ad'] +['ĠÅĽwiad', 'om'] +['Ùħع', 'Ùĩد'] +['ĠпÑĢоиз', 'веден'] +['Æ', 'ł'] +['ר', '×Ļש'] +['Ùħست', 'Ø«'] +['Ùħستث', 'Ùħر'] +['׳×Ļ', '×Ļר'] +['pa', 'ñ'] +['Ġ;', '-)'] +['Ġë°ľ', '견'] +['Ġgör', 'üyor'] +['Ùħؤ', 'ÙĦÙģ'] +['ĠÄIJ', 'á»ģ'] +['ĠاÙĦÙĨ', 'ÙĪØ§Ø¨'] +['×Ĺ×§', '×Ļר×Ķ'] +['Ġm', 'á»ıi'] +['è¿°', 'ãģ¹'] +['ÐĿ', 'ик'] +['ìŀĸ', 'ìķĦ'] +['ìŀĸìķĦ', 'ìļĶ'] +['prowadzi', 'ÅĤ'] +['l', 'óg'] +['lóg', 'ica'] +['פס', '×ĺ'] +['פס×ĺ', '×Ļ×ij׾'] +['Ġ×ŀ', '×ĵ×Ķ'] +['Ġ×ŀ×ĵ×Ķ', '×Ļ×Ŀ'] +['ãģĵãģĵ', 'ãģ¾ãģ§'] +['×Ķ', 'ת×Ĺ'] +['×Ķת×Ĺ', '׾×Ķ'] +['Ġפ', '×ķס'] +['Ġפ×ķס', '×ĺ×Ļ×Ŀ'] +['Ġн', 'ев'] +['Ġнев', 'оз'] +['Ġневоз', 'можно'] +['ĠdostÄĻp', 'ny'] +['Ġغ', 'اÙĦ'] +['ĠغاÙĦ', 'ب'] +['Ġbez', 'pieczeÅĦst'] +['ĠbezpieczeÅĦst', 'wa'] +['åĪĨ', 'ãģĭãĤĭ'] +['ĠF', 'ührung'] +['à¸ģ', 'ีà¹ī'] +['gem', 'Ã¤ÃŁ'] +['à¸Ĭà¹Īวà¸ĩ', 'à¹Ģวลา'] +['Ġìļ°ë¦¬', 'ëĤĺ'] +['Ġìļ°ë¦¬ëĤĺ', 'ëĿ¼'] +['ãģ¥', 'ãģıãĤĬ'] +['ĠاÙĦÙħ', 'سÙĦ'] +['ĠاÙĦÙħسÙĦ', 'ØŃØ©'] +['Ġlibert', 'é'] +['клÑİÑĩ', 'ение'] +['Ġzam', 'ów'] +['Ġzamów', 'ienia'] +['รà¸ĸ', 'à¹Ħà¸Ł'] +['Ø£', 'ÙģÙĦ'] +['Ø£ÙģÙĦ', 'اÙħ'] +['Ùħ', 'راج'] +['Ùħراج', 'عة'] +['Ġë¹Ħ', 'êµIJ'] +['ĠاÙĦت', 'اب'] +['ĠاÙĦتاب', 'عة'] +['Ġë§Į', 'ëĤĺ'] +['Ġб', 'Ñĥм'] +['ĠбÑĥм', 'аг'] +['Ġgé', 'nero'] +['Ġìŀĺ', '못'] +['×ŀ', 'פ×ķר×ĺ'] +['è²·ãģĦ', 'çī©'] +['ĠÙĦدÙĬ', 'Ùĥ'] +['Ġ×ľ×¢', '×Ļת'] +['Ġ×ľ×¢×Ļת', '×Ļ×Ŀ'] +['ĠsÅĤ', 'ab'] +['ĠпÑĢедÑģÑĤав', 'лÑı'] +['ãĤ¿', 'ãĤ¤ãĥĪ'] +['ãĤ¿ãĤ¤ãĥĪ', 'ãĥ«'] +['Ùħ', 'ص'] +['Ùħص', 'Ø·Ùģ'] +['ÙħصطÙģ', 'Ùī'] +['Ġdifficult', 'é'] +['ãĥĨãĤ£', 'ãĥĸ'] +['Ġpew', 'noÅĽci'] +['ĠpewnoÅĽci', 'Äħ'] +['Ġ무', 'ìĬ¨'] +['Ø¥', 'رس'] +['إرس', 'اÙĦ'] +['Ġд', 'алÑĮ'] +['ĠдалÑĮ', 'ÑĪе'] +['Ġ׾', '×ł×¡'] +['Ġ×ľ×ł×¡', '×ķת'] +['หมูà¹Ī', 'à¸ļà¹īาà¸Ļ'] +['×ŀס×ŀ', '׼×Ļ'] +['أسÙĦ', 'ÙĪØ¨'] +['Ġzw', 'ÅĤ'] +['ĠzwÅĤ', 'as'] +['ĠzwÅĤas', 'zc'] +['ĠzwÅĤaszc', 'za'] +['ĠпÑĢ', 'еж'] +['ĠпÑĢеж', 'де'] +['ĠоÑĢганиз', 'аÑĨиÑı'] +['Ġdön', 'emin'] +['Ġdönemin', 'de'] +['Ġ', 'Ủ'] +['ĠỦ', 'y'] +['ä¸ĭ', 'ãģĴ'] +['ĠпоÑģлед', 'ние'] +['Ġgü', 'ne'] +['Ġgüne', 'ÅŁ'] +['Ġ×IJ', '×ĸר'] +['Ġ×IJ×ĸר', '×Ĺ×Ļ'] +['ãģ§ãģĤ', 'ãĤįãģĨ'] +['ĠÙĨ', 'ÙĤ'] +['ĠÙĨÙĤ', 'اط'] +['æŃ£', 'ãģĹãģĦ'] +['ĠÑĢ', 'ег'] +['ĠÑĢег', 'иона'] +['ĠFör', 'der'] +['ê²½', 'ìĺģ'] +['dıkl', 'ar'] +['dıklar', 'ını'] +['trzym', 'aÄĩ'] +['أش', 'Ùĥ'] +['أشÙĥ', 'اÙĦ'] +['×Ķת', '×IJ'] +['×Ķת×IJ', '×ŀ×Ķ'] +['à¸Ĺำà¹ĥหà¹ī', 'à¹Ģà¸ģิà¸Ķ'] +['ĠGeb', 'ä'] +['ĠGebä', 'ude'] +['ĠСеÑĢ', 'г'] +['ĠСеÑĢг', 'ей'] +['Ġз', 'доÑĢов'] +['ĠздоÑĢов', 'ÑĮÑı'] +['Ġr', 'ãi'] +['ĠпÑĢед', 'ÑĥÑģ'] +['ĠпÑĢедÑĥÑģ', 'моÑĤÑĢ'] +['ĠпÑĢедÑĥÑģмоÑĤÑĢ', 'ен'] +['Ġ×Ķצ', '×Ļ×ij'] +['Ġ×Ķצ×Ļ×ij', '×ķר×Ļ'] +['Ġdés', 'ir'] +['Ġн', 'оÑĩ'] +['ĠноÑĩ', 'ÑĮ'] +['möglich', 'keiten'] +['Ġ×IJ×Ĺר', '×ķ׳×Ļ×Ŀ'] +['Ġsoir', 'ée'] +['ĠNh', 'áºŃn'] +['Ù', 'ª'] +['à¸Ľà¸£à¸°à¸§à¸±à¸ķิ', 'ศาสà¸ķรà¹Į'] +['êµIJ', 'íĨµ'] +['ĠØ£', 'Ø®ÙĬ'] +['Ġdé', 'cid'] +['Ġdécid', 'é'] +['Ġwy', 'ja'] +['Ġwyja', 'ÅĽni'] +['Ġ', 'สิ'] +['Ġสิ', 'à¸ĩ'] +['Ġสิà¸ĩ', 'หา'] +['Ġสิà¸ĩหา', 'à¸Ħม'] +['à¹ģ', 'à¸Ńรà¹Į'] +['หà¸Ļà¹īา', 'à¸Īà¸Ń'] +['ס', 'תר'] +['Ġê', '¶'] +['Ġê¶', 'Į'] +['Ġê¶Į', '리'] +['pl', 'ätze'] +['ب', 'Ø·ÙĦ'] +['ê±´', 'ìĦ¤'] +['Ġ×IJ', '×Ļ×ŀ×Ļ'] +['Ġ×IJ×Ļ×ŀ×Ļ', '×Ļ׾'] +['ãģ', '½'] +['تر', 'اث'] +['×IJ׾', '×Ļ×ŀ×ķת'] +['Ġdispon', 'ÃŃveis'] +['Ġz', 'ale'] +['Ġzale', 'ży'] +['à¸Ľà¸£à¸°à¸Ĭา', 'สัมà¸ŀัà¸Ļà¸ĺà¹Į'] +['ĠÅļw', 'iat'] +['Ġpor', 'ówn'] +['Ġporówn', 'a'] +['Ġ׾×ĺ', '×ķ×ijת'] +['×Ķ×ĸ', '×ŀ׳×Ķ'] +['Ġ×Ľ×ª', '×ķצ×IJ×Ķ'] +['Ġ×ij', 'ק׾'] +['Ġ×ijק׾', '×ķת'] +['ĠоÑĤ', 'кÑĢ'] +['ĠоÑĤкÑĢ', 'Ñĭва'] +['ãĥij', 'ãĥ¯ãĥ¼'] +['ë¿IJ', 'ë§Į'] +['Ġв', 'ÑģÑı'] +['ĠвÑģÑı', 'к'] +['ãģ¨ãģª', 'ãģ£ãģ¦ãģĦãĤĭ'] +['Ġgi', 'áºŃn'] +['Ġок', 'ÑĢÑĥ'] +['ĠокÑĢÑĥ', 'жа'] +['ĠокÑĢÑĥжа', 'ÑİÑī'] +['ĠUnivers', 'ität'] +['ĠÑĢ', 'ож'] +['ĠÑĢож', 'д'] +['ĠÑĢожд', 'ениÑı'] +['Ø®', 'ÙĬÙĦ'] +['Ġкомпани', 'й'] +['ĠÑĢазлиÑĩ', 'нÑĭе'] +['ĠЦ', 'ена'] +['׳×Ļ', '×ķ×ĸ'] +['׳×Ļ×ķ×ĸ', '׾'] +['׳×Ļ×ķ×ĸ׾', '×ĺר'] +['Ġê³µ', 'ê°Ħ'] +['Ġê°ľ', 'ëħIJ'] +['landır', 'ma'] +['ĠÑĥдал', 'ен'] +['à¸ŀัà¸ģ', 'à¸ľ'] +['à¸ŀัà¸ģà¸ľ', 'à¹Īà¸Ńà¸Ļ'] +['Ġprote', 'cción'] +['Ġb', 'ÅĤ'] +['ĠbÅĤ', 'ÄĻd'] +['Ã', 'Ī'] +['Ġíĸī', 'ë³µ'] +['ĠÅŁ', 'ü'] +['ĠÅŁÃ¼', 'phe'] +['Ġí', 'Ķ'] +['ĠíĶ', '¼'] +['Ġíͼ', 'íķ´'] +['Ġëĭ¤', '르'] +['à¹Ħมà¹Ī', 'à¹Ģà¸ģิà¸Ļ'] +['ãģ¿', 'ãģª'] +['ãģ¿ãģª', 'ãģķãĤĵ'] +['ĠпоÑĤ', 'ÑĢеб'] +['ĠпоÑĤÑĢеб', 'иÑĤел'] +['ĠاÙĦÙĥÙĦ', 'اÙħ'] +['ìķĦ', 'ë²Ħ'] +['ìķĦë²Ħ', 'ì§Ģ'] +['ãĤĴ使', 'ãģ£ãģŁ'] +['Ġbụ', 'i'] +['ĠпоÑĤ', 'еÑĢ'] +['ĠпоÑĤеÑĢ', 'Ñı'] +['ĠØ¢', 'ÙĦاÙģ'] +['ĠнаÑģÑĤоÑıÑī', 'ее'] +['ãģıãģªãĤĬ', 'ãģ¾ãģĹãģŁ'] +['clus', 'ão'] +['ãĤ³', 'ãĥĶãĥ¼'] +['צ', 'פ×Ļ'] +['צפ×Ļ', '×Ļ×Ķ'] +['Ø®', 'ÙĦا'] +['Ø®ÙĦا', 'ص'] +['ล', 'à¹īำ'] +['ãĥ¯', 'ãĤ¤ãĥ³'] +['Ġมี', 'à¸Ļา'] +['Ġมีà¸Ļา', 'à¸Ħม'] +['Ø´', 'خص'] +['شخص', 'ÙĬات'] +['Ġ×ĸ', '×§'] +['Ġ×ĸ×§', '×ķ×§'] +['×Ļ', '×Ļצ'] +['×Ļ×Ļצ', '×Ĵ'] +['èĢĥãģĪ', 'æĸ¹'] +['Ġürün', 'ü'] +['ĠиÑģп', 'ол'] +['ĠиÑģпол', 'ни'] +['Ġcompañ', 'ero'] +['×§', 'צ×Ķ'] +['×ŀ×¢', '׳×Ļ×§'] +['Ùħ', 'ØŃÙħد'] +['Ġc', 'ámara'] +['Ġп', 'ед'] +['Ġпед', 'аг'] +['Ġпедаг', 'ог'] +['м', 'аÑĢ'] +['маÑĢ', 'к'] +['×Ķת', '׳×Ĵ×ĵ'] +['ĠìĨĮ', 'ê°ľ'] +['Ġcom', 'unitÃł'] +['ê³', '¤'] +['ĠNg', 'Ãłi'] +['สà¸ĩ', 'à¸ļ'] +['ĠmieszkaÅĦ', 'ców'] +['ĠÙĨ', 'ÙĩائÙĬ'] +['iv', 'ité'] +['Ġи', 'де'] +['Ġиде', 'алÑĮн'] +['ĠØ£', 'سبÙĪØ¹'] +['Ġ×Ļ', '×¢×ľ'] +['Ġ׾', 'ר×IJש'] +['Ġ׾ר×IJש', '×ķ׳×Ķ'] +['ĠзапиÑģ', 'и'] +['ĠкоÑĢ', 'пÑĥÑģ'] +['วà¸ĩ', 'ศ'] +['วà¸ĩศ', 'à¹Į'] +['ĠÐĶ', 'м'] +['ĠÐĶм', 'иÑĤ'] +['ĠÐĶмиÑĤ', 'ÑĢ'] +['Ġkön', 'nt'] +['Ġböl', 'ges'] +['Ġbölges', 'inde'] +['׼', '×Ļ׼'] +['׼×Ļ׼', 'ר'] +['ĠاÙĦØ¥', 'Ø«ÙĨ'] +['ĠاÙĦإثÙĨ', 'ÙĬÙĨ'] +['Ġng', 'á»Ļ'] +['ì¹', 'ł'] +['د', 'راج'] +['Ġu', 'da'] +['Ġuda', 'ÅĤo'] +['ìº', 'IJ'] +['بر', 'ÙĨاÙħج'] +['ĠÑģÑĥд', 'еб'] +['ĠÑģÑĥдеб', 'н'] +['Ġzun', 'ächst'] +['ĠEduc', 'ación'] +['ãģ¨ãģª', 'ãģ£ãģ¦ãģĦãģ¾ãģĻ'] +['Ġ×Ķ×IJ', '×ŀ×Ļת×Ļ'] +['Ġİ', 'nt'] +['Ġİnt', 'ernet'] +['ĠcaÅĤ', 'ego'] +['ãĥĹãĥª', 'ãĥ³'] +['Ø¥', 'بد'] +['إبد', 'اع'] +['ĠпоÑĢ', 'ÑĤал'] +['à¹Ĥà¸ķ', 'à¹ī'] +['Ġ×Ķ×§', 'ש×ķר'] +['пл', 'од'] +['ĠÙħ', 'د'] +['ĠÙħد', 'رÙĬد'] +['×ŀסע', '×ĵ×Ķ'] +['ĠØ´ÙĬ', 'ئ'] +['ĠØ´ÙĬئ', 'ا'] +['à¸ģà¹Īà¸Ń', 'สรà¹īาà¸ĩ'] +['Ġì°¸', 'ê³ł'] +['à¹Ģà¸Ĺ', 'ร'] +['à¹Ģà¸Ĺร', 'à¸Ķ'] +['Ġ×ij×ŀ', 'קר×Ļ×Ŀ'] +['Ġb', 'ât'] +['Ġbât', 'iment'] +['åij¼', 'ãģ³'] +['ç´ł', 'æķµ'] +['ç´łæķµ', 'ãģª'] +['przedsiÄĻbior', 'st'] +['przedsiÄĻbiorst', 'w'] +['Ġ×ł×ª', '×ķ׳×Ļ×Ŀ'] +['×Ĺ׾', '×ķ×Ŀ'] +['ร', 'วย'] +['Ùħ', 'ÙĪØ¶ÙĪØ¹'] +['ĠÑģоб', 'ÑĢан'] +['вед', 'ÑĥÑī'] +['ĠÑĤе', 'аÑĤ'] +['ĠÑĤеаÑĤ', 'ÑĢ'] +['m', 'eye'] +['meye', 'ceÄŁi'] +['Ġpien', 'iÄħ'] +['ĠpieniÄħ', 'd'] +['ĠpieniÄħd', 'ze'] +['ÑĢез', 'иденÑĤ'] +['ØŃ', 'صر'] +['ìĺ', '¥'] +['à¹Ģย', 'ืà¸Ńà¸Ļ'] +['ĠÑĥ', 'ни'] +['ĠÑĥни', 'веÑĢ'] +['ĠÑĥнивеÑĢ', 'Ñģ'] +['ĠÑĥнивеÑĢÑģ', 'иÑĤеÑĤ'] +['ĠاÙĦر', 'ØŃ'] +['ĠاÙĦرØŃ', 'ÙħÙĨ'] +['ĠÑĤеÑħ', 'нолог'] +['ĠÑĤеÑħнолог', 'ии'] +['ìĹIJ', 'ëĦĪ'] +['ìĹIJëĦĪ', 'ì§Ģ'] +['Ġíķ', 'Ń'] +['ĠíķŃ', 'ìĥģ'] +['à¸ĺ', 'า'] +['à¸ĺา', 'à¸ķุ'] +['ĠEspañ', 'ol'] +['×ĵ×Ĵ', 'ש'] +['Ġêµ', 'ī'] +['Ġêµī', 'ìŀ¥'] +['Ġêµīìŀ¥', 'íŀĪ'] +['ĠÅĤ', 'at'] +['ĠÅĤat', 'wo'] +['Ġk', 'á»ĭch'] +['Ø¥', 'ز'] +['إز', 'اÙĦØ©'] +['ĠдейÑģÑĤв', 'ие'] +['ĠsaÄŁ', 'layan'] +['สุà¸Ķ', 'ยà¸Ńà¸Ķ'] +['Ġzosta', 'Äĩ'] +['Ġdispon', 'ÃŃvel'] +['ïº', 'į'] +['ver', 'ständ'] +['verständ', 'lich'] +['tw', 'or'] +['twor', 'zyÄĩ'] +['ع', 'جز'] +['à¹Ģà¸Ĥ', 'à¹īม'] +['ยà¹Ī', 'à¸Ńม'] +['Ġstrat', 'ég'] +['Ġstratég', 'ie'] +['à¸ľà¸¥', 'à¹Ħมà¹ī'] +['Ġê°ģ', 'ì¢ħ'] +['ĠÙħ', 'ÙĪØ§'] +['ĠÙħÙĪØ§', 'ض'] +['ĠÙħÙĪØ§Ø¶', 'ÙĬع'] +['اØŃ', 'تج'] +['اØŃتج', 'اج'] +['Ġ', 'Ấ'] +['ĠẤ', 'n'] +['×ŀ', '×ŀש׾×Ķ'] +['ĠÅŁek', 'il'] +['×ŀ', '×Ĺ׾'] +['×ŀ×Ĺ׾', '×ķת'] +['Ġ', 'à¸ĺ'] +['Ġà¸ĺ', 'ัà¸Ļ'] +['Ġà¸ĺัà¸Ļ', 'วา'] +['Ġà¸ĺัà¸Ļวา', 'à¸Ħม'] +['Ġìĭ¤', 'ìłľ'] +['Ġìĭ¤ìłľ', 'ë¡ľ'] +['ì¤ij', 'ìķĻ'] +['ëįĶ', 'ëĿ¼'] +['ĠÑĪ', 'иÑĢ'] +['ĠÑĪиÑĢ', 'око'] +['Ġsol', 'ución'] +['วาà¸ĩ', 'à¹ģà¸ľà¸Ļ'] +['×IJ×ķ×ĺ', '×ķ×ŀ'] +['×IJ×ķ×ĺ×ķ×ŀ', '×ĺ×Ļ'] +['ĠÑĢ', 'еÑģÑĤ'] +['ĠÑĢеÑģÑĤ', 'оÑĢ'] +['ĠÑĢеÑģÑĤоÑĢ', 'ан'] +['ëį', '¸'] +['ÑĤ', 'ÑĢад'] +['ÑĤÑĢад', 'и'] +['ÑĤÑĢади', 'ÑĨион'] +['ÑĤÑĢадиÑĨион', 'н'] +['มะ', 'à¹Ģรà¹ĩ'] +['มะà¹Ģรà¹ĩ', 'à¸ĩ'] +['à¹Ĥ', 'ส'] +['Ġol', 'masını'] +['×ŀ×ķס', 'ר'] +['ĠоÑĤноÑĪ', 'ении'] +['Ġê°ĢëĬ¥', 'ìĦ±'] +['Ġy', 'uk'] +['Ġyuk', 'arı'] +['ìĨ', 'Ķ'] +['ĠÑģ', 'ÑĦ'] +['ĠÑģÑĦ', 'еÑĢе'] +['Ġ×§', '×ķפ'] +['ãĤ±', 'ãĥ¼ãĤ'] +['ãĤ±ãĥ¼ãĤ', 'Ń'] +['âĢķ', 'âĢķ'] +['ĠاÙĦØ£', 'ÙĦÙħ'] +['ĠاÙĦØ£ÙĦÙħ', 'اÙĨÙĬ'] +['Ả', 'N'] +['ת×ķ׼', '׳×Ļ×ķת'] +['ĠÑģÑĥÑīеÑģÑĤв', 'ÑĥеÑĤ'] +['æĪij', 'ãĢħ'] +['ĠاÙĦص', 'ادر'] +['ĠTr', 'á»įng'] +['Ġа', 'д'] +['Ġад', 'миниÑģÑĤ'] +['ĠадминиÑģÑĤ', 'ÑĢа'] +['ĠадминиÑģÑĤÑĢа', 'ÑĨи'] +['ĠдÑĢÑĥг', 'ими'] +['Ñģп', 'еÑĪ'] +['عÙĦاÙħ', 'ات'] +['Ġа', 'б'] +['Ġаб', 'Ñģол'] +['ĠабÑģол', 'ÑİÑĤ'] +['ĠабÑģолÑİÑĤ', 'но'] +['ฤ', 'à¸Ķู'] +['é', 'tr'] +['étr', 'anger'] +['нÑı', 'ÑĤи'] +['нÑıÑĤи', 'е'] +['×¢', '×ķ׳'] +['×¢×ķ׳', 'ש'] +['ĠÙĤ', 'ائ'] +['ĠÙĤائ', 'ÙĦا'] +['Ġм', 'аÑģ'] +['ĠмаÑģ', 'ло'] +['ãĥī', 'ãĤ¤'] +['ãĥīãĤ¤', 'ãĥĦ'] +['å¿ħè¦ģ', 'ãģĮãģĤãĤĬãģ¾ãģĻ'] +['×ŀ×ķ×ĸ', '×Ļ×IJ'] +['×ŀ×ķ×ĸ×Ļ×IJ', '×ķף'] +['ĠNgo', 'ại'] +['Ġkê', 'nh'] +['à¸ģาร', 'à¸Ńà¸Ńà¸ģà¹ģà¸ļà¸ļ'] +['×ŀ', 'פק'] +['×ŀפק', '×ĵ'] +['ÙħÙĨ', 'از'] +['ÙħÙĨاز', 'ÙĦ'] +['ë·', '°'] +['íĹ', '¤'] +['ÙħÙĩ', 'ارات'] +['Ġpropri', 'été'] +['פ×Ĵ', '×Ļש×Ķ'] +['Ñĩ', 'ÑĢ'] +['ÑĩÑĢ', 'еж'] +['ÑĩÑĢеж', 'ден'] +['×Ķ', '×ķצ×IJ×Ķ'] +['ØŃÙĥ', 'ÙĬÙħ'] +['ĠíĻ', 'Ī'] +['ĠíĻĪ', 'íİĺìĿ´ì§Ģ'] +['åİ', '³'] +['åݳ', 'ãģĹãģĦ'] +['×¢', '×ŀ×ĵ×Ķ'] +['ĠAu', 'ÃŁen'] +['سÙĪ', 'Ø¡'] +['ë¹', 'Ī'] +['ĠÙĪ', 'Ø®'] +['ĠÙĪØ®', 'اصة'] +['ин', 'ÑĤеÑĢ'] +['инÑĤеÑĢ', 'еÑģ'] +['èĩ´', 'ãģĹãģ¾ãģĻ'] +['Ġhük', 'üm'] +['à¹Ħà¸Ĥ', 'มัà¸Ļ'] +['Ġdav', 'ran'] +['Ġdavran', 'Ä±ÅŁ'] +['à¹Ģà¸ķ', 'ียà¸ĩ'] +['в', 'ÑĢем'] +['вÑĢем', 'енно'] +['à¹Ģà¸Ĺศ', 'à¸ģา'] +['à¹Ģà¸Ĺศà¸ģา', 'ล'] +['å¼ķ', 'ãģ£'] +['å¼ķãģ£', 'è¶ĬãģĹ'] +['×IJר', '×ķ×Ĺ'] +['×IJר×ķ×Ĺ', 'ת'] +['à¹Ģ', 'วิ'] +['à¹Ģวิ', 'รà¹Į'] +['à¸Ńยà¹Īาà¸ĩ', 'รวà¸Ķà¹Ģรà¹ĩว'] +['ĠìŬ', 'íĸī'] +['ĠÑĢан', 'ÑĮ'] +['ĠÑĢанÑĮ', 'ÑĪе'] +['Ġzob', 'ow'] +['Ġzobow', 'iÄħ'] +['ĠzobowiÄħ', 'z'] +['Ġ×ķ׼', '×ŀ×ķ×ijף'] +['ĠاÙĦÙħ', 'Ùĩ'] +['ĠاÙĦÙħÙĩ', 'ÙĨÙĬ'] +['ãĤ¢', 'ãĤ¸'] +['ãĤ¢ãĤ¸', 'ãĤ¢'] +['ë°©', 'ìĨ¡'] +['à¸Ńà¸Ńà¸ģ', 'à¸ģำลัà¸ĩ'] +['à¸Ńà¸Ńà¸ģà¸ģำลัà¸ĩ', 'à¸ģาย'] +['am', 'éli'] +['améli', 'orer'] +['å½ĵãģŁãĤĬ', 'åīį'] +['Ġreg', 'elm'] +['Ġregelm', 'Ã¤ÃŁig'] +['ãģĬ', 'åĭ'] +['ãģĬåĭ', '§'] +['ãģĬåĭ§', 'ãĤģ'] +['Ġm', 'ưá»Ŀi'] +['بر', 'Ùħج'] +['ĠNat', 'ürlich'] +['ĠD', 'Å©ng'] +['ĠاÙĦر', 'جاÙĦ'] +['Ġthé', 'p'] +['Ġol', 'muÅŁtur'] +['×ŀ×ķס', '×Ļ×§×Ķ'] +['f', 'älle'] +['주', 'íĥĿ'] +['ĠاÙĦÙģ', 'رص'] +['Ġnaj', 'wiÄĻks'] +['ĠnajwiÄĻks', 'zy'] +['Ġça', 'ÄŁ'] +['ĠçaÄŁ', 'rı'] +['ì¸', 'ł'] +['ĠvÃŃ', 'ct'] +['ĠvÃŃct', 'ima'] +['ĠÑģовеÑĢ', 'ÑĪен'] +['×Ķ×Ļ', '×Ļת×Ļ'] +['à¹Ģà¸Ķ', 'ี'] +['à¹Ģà¸Ķี', 'à¹ĭ'] +['à¹Ģà¸Ķีà¹ĭ', 'ยว'] +['ü', 'yü'] +['Ġд', 'оп'] +['Ġдоп', 'олн'] +['Ġдополн', 'иÑĤелÑĮно'] +['à¹ģà¸ķà¸ģà¸ķà¹Īาà¸ĩ', 'à¸ģัà¸Ļ'] +['Ġá', 'l'] +['Ġál', 'bum'] +['à¸Ľà¸£à¸°à¸Īำ', 'à¸Ľà¸µ'] +['ĠÑĦ', 'едеÑĢ'] +['ĠÑĦедеÑĢ', 'алÑĮн'] +['Ġobs', 'ÅĤ'] +['ĠobsÅĤ', 'ugi'] +['à¹Ģร', 'ืà¹Ī'] +['à¹Ģรืà¹Ī', 'à¸Ńย'] +['à¹Ģรืà¹Īà¸Ńย', 'à¹Ĩ'] +['ëģ', 'Į'] +['Ġngh', 'ìn'] +['ĠBaÅŁkan', 'lıģı'] +['تأ', 'سÙĬ'] +['تأسÙĬ', 'س'] +['Ġ×ij×ij', '×ķקר'] +['Ġ×¢×ij×ķ×ĵ', '×ķת'] +['Ġبص', 'ÙĪØ±Ø©'] +['ãĤıãģij', 'ãģ§ãģ¯ãģªãģĦ'] +['führ', 'er'] +['ãĤ¹', 'ãĤŃ'] +['ãĤ¹ãĤŃ', 'ãĥ«'] +['ĠاÙĦÙĤ', 'ض'] +['ĠاÙĦÙĤض', 'ÙĬØ©'] +['Ġдолж', 'ноÑģÑĤ'] +['ÙģØ§Ø±', 'ÙĤ'] +['Ġcomeç', 'ou'] +['Ġorganis', 'é'] +['Ġxu', 'ân'] +['ĠÑģообÑī', 'аеÑĤ'] +['ĠпÑĢи', 'д'] +['ĠпÑĢид', 'еÑĤÑģÑı'] +['TÃľ', 'RK'] +['ãĥ¬', 'ãĥ¼ãĤ·ãĥ§ãĥ³'] +['Kh', 'ông'] +['است', 'Ùģ'] +['استÙģ', 'ادة'] +['ä¸ĬãģĮ', 'ãģ£ãģ¦'] +['Ġum', 'ie'] +['Ġumie', 'jÄĻ'] +['ĠumiejÄĻ', 'tn'] +['ĠumiejÄĻtn', 'oÅĽci'] +['ëĤ', '¸'] +['à¹Ģà¸Ļ', 'à¸Ńรà¹Į'] +['×ĵ×ķ', '×ķ×Ĺ'] +['ÃŃs', 'imo'] +['I', 'ÃĬ'] +['IÃĬ', 'N'] +['Ġalcan', 'ç'] +['Ġ', 'à¸ķุ'] +['Ġà¸ķุ', 'ลา'] +['Ġà¸ķุลา', 'à¸Ħม'] +['ש׾', '×ĺ×ķף'] +['Ġél', 'è'] +['Ġélè', 'ves'] +['ĠÄij', 'u'] +['ĠÄiju', 'á»ķi'] +['ĠØ£', 'Ùģ'] +['ĠØ£Ùģ', 'رÙĬ'] +['ĠØ£Ù쨱ÙĬ', 'ÙĤÙĬ'] +['ĠØ£Ù쨱ÙĬÙĤÙĬ', 'ا'] +['ãĤĴæİ¢', 'ãģĻ'] +['ĠпÑĢед', 'ложениÑı'] +['ج', 'اد'] +['ĠÑħоÑĤ', 'ÑĮ'] +['Ñģ', 'ал'] +['Ñģал', 'он'] +['à¸Ľà¸£à¸°', 'à¹Ģม'] +['à¸Ľà¸£à¸°à¹Ģม', 'ิà¸Ļ'] +['ãĤŃ', 'ãĥĥãĥģ'] +['ãĤŃãĥĥãĥģ', 'ãĥ³'] +['×ij×ĵ×Ļ×§', '×ķת'] +['Ġch', 'ù'] +['Ġchù', 'a'] +['ÐĴ', 'иде'] +['ÐĴиде', 'о'] +['иÑĢов', 'ка'] +['ĠÑħоÑĤ', 'иÑĤе'] +['Ġspéc', 'ifique'] +['รส', 'à¸Ĭาà¸ķิ'] +['è¾¼', 'ãĤĵãģł'] +['伸', 'ãģ³'] +['×Ķצ׾', '×Ĺת'] +['ãģ©ãģ®', 'ãĤĪãģĨãģ«'] +['سع', 'ادة'] +['Ġл', 'ид'] +['Ġлид', 'еÑĢ'] +['ม', 'à¸ĩ'] +['มà¸ĩ', 'à¸Ħล'] +['ØŃ', 'اÙħÙĦ'] +['หล', 'ุà¸Ķ'] +['à¸Ńยà¹Īาà¸ĩ', 'à¸ķà¹Īà¸Ń'] +['à¸Ńยà¹Īาà¸ĩà¸ķà¹Īà¸Ń', 'à¹Ģà¸Ļืà¹Īà¸Ńà¸ĩ'] +['ãģķãģĽãģ¦', 'éłĤ'] +['تس', 'ÙĪÙĬ'] +['تسÙĪÙĬ', 'ÙĤ'] +['ĠaÅŁaģı', 'd'] +['ĠaÅŁaģıd', 'aki'] +['ĠÑĨ', 'елÑĮ'] +['ĠÑĨелÑĮ', 'Ñİ'] +['ĠAra', 'ÅŁtırma'] +['à¸Ĥัà¸ļ', 'รà¸ĸ'] +['Ùĩ', 'ذÙĩ'] +['ลà¸ĩ', 'à¸Ĺะ'] +['ลà¸ĩà¸Ĺะ', 'à¹Ģà¸ļ'] +['ลà¸ĩà¸Ĺะà¹Ģà¸ļ', 'ียà¸Ļ'] +['تÙĥ', 'اÙħÙĦ'] +['Ġc', 'io'] +['Ġcio', 'è'] +['ãģ¦', 'ãģĬãģı'] +['ĠاÙĦصØŃ', 'ÙģÙĬ'] +['ĠíĬ¹', 'ìłķ'] +['полн', 'иÑĤÑĮ'] +['ãĤĵ', 'ãģĺãĤĥãģªãģĦ'] +['ãĤĵãģĺãĤĥãģªãģĦ', 'ãģĭ'] +['ĠاÙĦج', 'Ùĩ'] +['ĠاÙĦجÙĩ', 'ات'] +['ĠÑĥÑģпеÑĪ', 'но'] +['Ġв', 'ок'] +['Ġвок', 'ÑĢÑĥг'] +['ĠÑģиÑĤÑĥ', 'аÑĨиÑı'] +['Ġ×Ķ×IJ', '×ŀר'] +['Ġ×Ķ×IJ×ŀר', '×Ļ×§'] +['Ġ×Ķ×IJ×ŀר×Ļ×§', '×IJ×Ļ'] +['×ŀ', '×Ĵ×ĸ'] +['×ŀ×Ĵ×ĸ', '×Ļף'] +['Ġак', 'ÑĤÑĥ'] +['ĠакÑĤÑĥ', 'алÑĮн'] +['é', 'ta'] +['éta', 'is'] +['Ġmog', 'ÅĤa'] +['ĠÑĤоÑĩ', 'ки'] +['Ġ×ŀ×Ķ', '×ŀ×¢'] +['Ġ×ŀ×Ķ×ŀ×¢', '×¨×Ľ×ª'] +['มี', 'à¸Ľà¸£à¸°à¸ªà¸´à¸Ĺà¸ĺà¸´à¸łà¸²à¸ŀ'] +['×Ļר', '×Ļ×ĵ×Ķ'] +['×Ĵר', '×ŀ׳'] +['×Ĵר×ŀ׳', '×Ļ×Ķ'] +['Ġг', 'лав'] +['Ġглав', 'ное'] +['Ġ미', 'ëŀĺ'] +['Ġ׳׼', '×ķ׳×Ķ'] +['ĠÙĪ', 'Ø·ÙĨÙĬ'] +['op', 'port'] +['opport', 'unitÃł'] +['Ġh', 'á»§y'] +['ĠÙĦ', 'تØŃ'] +['ĠÙĦتØŃ', 'ÙĤÙĬÙĤ'] +['Ġó', 'rg'] +['Ġórg', 'ão'] +['ãĤ¹', 'ãĥĶ'] +['ãĤ¹ãĥĶ', 'ãĥ¼ãĥī'] +['Ġön', 'ü'] +['Ġönü', 'ne'] +['Ùħع', 'اÙħÙĦ'] +['ש×ŀ', '×Ļר×Ķ'] +['ĠвеÑģÑĮ', 'ма'] +['ĠwiÄĻks', 'zo'] +['ĠwiÄĻkszo', 'ÅĽÄĩ'] +['Ġاست', 'راتÙĬج'] +['ĠاستراتÙĬج', 'ÙĬØ©'] +['ĠÙģ', 'Ø¥'] +['ĠÙ쨥', 'ذا'] +['à¹Ģà¸Ĭืà¹Īà¸Ń', 'ม'] +['à¹Ģà¸Ĭืà¹Īà¸Ńม', 'à¸ķà¹Īà¸Ń'] +['Ġ׾', 'פר'] +['Ġ׾פר', '×ĺ×Ļ×Ŀ'] +['Ùħض', 'ÙĬ'] +['ĠGer', 'çek'] +['Ġçocuk', 'ların'] +['ÙĪØ«', 'ائÙĤ'] +['ĠÙħساء', 'Ùĭ'] +['Ġunterstüt', 'zt'] +['Ġpré', 'st'] +['Ġprést', 'amo'] +['ĠÐłÐ°Ð·', 'меÑĢ'] +['ĠÅŁ', 'eker'] +['Ġsé', 'culo'] +['×ij×Ķ', '×Ļר'] +['Ø´Ùĩ', 'ÙĪØ±'] +['Ġ', 'à¸Ńีà¸ģ'] +['Ġà¸Ńีà¸ģ', 'à¸Ĺัà¹īà¸ĩ'] +['Ġlleg', 'ó'] +['à¸¨à¸´à¸¥à¸Ľ', 'ะ'] +['æĪij', 'ãģĮ'] +['æĪijãģĮ', 'å®¶'] +['ع', 'ÙĤÙĪ'] +['عÙĤÙĪ', 'بات'] +['ĠF', 'älle'] +['Ġs', 'ÅĤuż'] +['ĠsÅĤuż', 'b'] +['ĠاÙĦØŃÙĤ', 'ÙĪÙĤ'] +['Ġпл', 'иÑĤ'] +['Ġи', 'ноÑģÑĤ'] +['ĠиноÑģÑĤ', 'ÑĢан'] +['ĠиноÑģÑĤÑĢан', 'н'] +['à¹ĥà¸Ļ', 'à¸Ĥà¸ĵะà¸Ĺีà¹Ī'] +['ãĤ«', 'ãĥĨ'] +['ãĤ«ãĥĨ', 'ãĤ´'] +['ãĤ«ãĥĨãĤ´', 'ãĥª'] +['à¸Ńิ', 'ส'] +['à¸Ńิส', 'ระ'] +['à¹Ģà¸ľà¸¢', 'à¹ģ'] +['à¹Ģà¸ľà¸¢à¹ģ', 'à¸ŀร'] +['à¹Ģà¸ľà¸¢à¹ģà¸ŀร', 'à¹Ī'] +['ãģĬ', 'ãģĦ'] +['ãģĬãģĦ', 'ãģĹãģĦ'] +['است', 'ÙĤÙĦ'] +['استÙĤÙĦ', 'اÙĦ'] +['تØŃ', 'ض'] +['تØŃض', 'ÙĬر'] +['åĬ©', 'ãģij'] +['Ùħر', 'اÙģÙĤ'] +['Ġ×ĵ', '×ķר'] +['Ġ×ĵ×ķר', 'ש'] +['×ŀת×Ļ', '×Ļ×Ĺס'] +['ס', '×Ļ׼'] +['ס×Ļ׼', '×ķ×Ŀ'] +['íĮĮ', 'íĬ¸'] +['Ġwy', 'ÅĽ'] +['ĠwyÅĽ', 'w'] +['ĠwyÅĽw', 'iet'] +['ĠwyÅĽwiet', 'l'] +['ĠاÙĦاÙĨ', 'ساÙĨ'] +['ĠStra', 'ÃŁen'] +['ï¼', '¬'] +['ãģ«', 'åŁº'] +['ãģ«åŁº', 'ãģ¥'] +['Ġcap', 'ÃŃtulo'] +['ลุ', 'ย'] +['Ġ×Ķ×ŀ×§', 'צ×ķ×¢×Ļ'] +['ãģĤãĤĭ', 'ç¨ĭ度'] +['á»', '¢'] +['ĠاÙĦ', 'ÙĦا'] +['ĠاÙĦÙĦا', 'زÙħØ©'] +['æķĻ', 'ãģĪ'] +['Ġרש', '×IJ×Ļ'] +['з', 'ав'] +['зав', 'иÑģ'] +['завиÑģ', 'им'] +['à¸Ľà¸±à¸Ī', 'à¸Īัย'] +['à¹Ģà¸ĭ', 'ล'] +['à¹Ģà¸ĭล', 'ลà¹Į'] +['Ġdiffé', 'rence'] +['ĠAlt', 'ın'] +['Ġк', 'ÑĢай'] +['ĠкÑĢай', 'не'] +['Ġз', 'ло'] +['Ġgün', 'ümüz'] +['Ġн', 'аÑĤÑĥÑĢ'] +['ĠнаÑĤÑĥÑĢ', 'алÑĮн'] +['×Ĵ×ķ׾', 'ש×Ļ×Ŀ'] +['Ġк', 'аÑĤегоÑĢ'] +['ĠкаÑĤегоÑĢ', 'ии'] +['Ġз', 'нак'] +['à¸ģà¹Īà¸Ńà¸Ļ', 'หà¸Ļà¹īา'] +['à¸ģà¹Īà¸Ńà¸Ļหà¸Ļà¹īา', 'à¸Ļีà¹ī'] +['ĠÙħÙĨ', 'ت'] +['ĠÙħÙĨت', 'خب'] +['ãĥĽ', 'ãĥ¼ãĥ«'] +['Ġе', 'вÑĢо'] +['ส', 'ว'] +['สว', 'ม'] +['ĠìľĦ', 'ìĽIJ'] +['ĠìľĦìĽIJ', 'ëĭĺ'] +['ĠاÙĦØŃ', 'ÙĪØ«'] +['ĠاÙĦØŃÙĪØ«', 'ÙĬ'] +['ĠÑģодеÑĢж', 'иÑĤ'] +['ãĥķãĤ¡', 'ãĥĥãĤ·ãĥ§ãĥ³'] +['Ġ', 'à¸ģัà¸Ļ'] +['Ġà¸ģัà¸Ļ', 'ย'] +['Ġà¸ģัà¸Ļย', 'ายà¸Ļ'] +['ãĤª', 'ãĥª'] +['ãĤªãĥª', 'ãĤ¸'] +['ãĤªãĥªãĤ¸', 'ãĥĬãĥ«'] +['Ġб', 'ÑĢенд'] +['ãĤĴæĮģ', 'ãģ£ãģ¦ãģĦãĤĭ'] +['Ġinvers', 'ión'] +['Ġê°', 'ĸ'] +['Ġê°ĸ', 'ê³ł'] +['Ġnov', 'itÃł'] +['ê´Ģ', 'ê´ij'] +['Ġà¸ŀ', 'ฤษ'] +['Ġà¸ŀฤษ', 'à¸łà¸²'] +['Ġà¸ŀà¸¤à¸©à¸łà¸²', 'à¸Ħม'] +['×ķר', '×Ĺ×Ļ×Ŀ'] +['׼׾', '×ķ׾'] +['Ġng', 'ạc'] +['×Ļ', '×Ļש'] +['×Ļ×Ļש', '×ķ×ij'] +['f', 'äll'] +['fäll', 'ig'] +['ĠÑĤÑĢеб', 'ÑĥеÑĤÑģÑı'] +['Ġcar', 'á'] +['Ġcará', 'cter'] +['Ġprinc', 'ÃŃpio'] +['ĠÅĤ', 'az'] +['ĠÅĤaz', 'ien'] +['ĠÅĤazien', 'k'] +['Ġgi', 'ãn'] +['ÑģÑĤÑĢа', 'ива'] +['Ùħس', 'اب'] +['Ùħساب', 'ÙĤØ©'] +['à¹Ģà¸Ħรืà¹Īà¸Ńà¸ĩ', 'à¸Ķืà¹Īม'] +['ترÙĥ', 'ÙĬب'] +['vol', 'ução'] +['ĠÐŁ', 'оÑĩ'] +['ĠÐŁÐ¾Ñĩ', 'ем'] +['ĠÐŁÐ¾Ñĩем', 'Ñĥ'] +['казал', 'оÑģÑĮ'] +['ĠпÑĢимен', 'ениÑı'] +['à¹Ģà¸Ĺ', 'ียม'] +['íĮ', 'Ķ'] +['à¸Ĥà¹īà¸Ń', 'à¹Ģสà¸Ļà¸Ń'] +['à¸Ľà¸±à¸į', 'à¸įา'] +['Ġоб', 'ÑĥÑĩ'] +['ĠобÑĥÑĩ', 'ениÑı'] +['ĠÑģеÑĢ', 'и'] +['ĠÑģеÑĢи', 'ал'] +['Ġingl', 'és'] +['ĠÙĦ', 'Ùĥرة'] +['Ġ×ĺ', '׾'] +['Ġ×ĺ׾', 'פ×ķף'] +['Ġìł', 'ij'] +['Ġìłij', 'ê·¼'] +['×IJ', '×ķ×Ĵ'] +['×IJ×ķ×Ĵ', '×ķס'] +['×IJ×ķ×Ĵ×ķס', '×ĺ'] +['ĠболÑĮÑĪ', 'ое'] +['ĠÐļон', 'еÑĩно'] +['×¢×Ļת', '×ķ׳'] +['×¢×Ļת×ķ׳', '×IJ×Ļ'] +['Ġкноп', 'к'] +['Ġз', 'н'] +['Ġзн', 'аÑĤÑĮ'] +['ĠÄij', 'á»±'] +['ĠÄijá»±', 'ng'] +['вл', 'аж'] +['влаж', 'н'] +['×ŀ', '×Ļ×ĺ×ij'] +['ãĤ¬', 'ãĤ¤'] +['ãĤ¬ãĤ¤', 'ãĥī'] +['........', '..'] +['Ġà¸ģ', 'ุม'] +['Ġà¸ģุม', 'à¸łà¸²à¸ŀ'] +['Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀ', 'ัà¸Ļ'] +['Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀัà¸Ļ', 'à¸ĺ'] +['Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀัà¸Ļà¸ĺ', 'à¹Į'] +['be', 'z'] +['bez', 'pieczeÅĦst'] +['bezpieczeÅĦst', 'w'] +['ãĥijãĥij', 'æ´»'] +['ع', 'اط'] +['عاط', 'Ùģ'] +['ĠÄij', 'áºŃm'] +['Ġз', 'ÑĢ'] +['ĠзÑĢ', 'ениÑı'] +['Ġbor', 'ç'] +['Ġнед', 'ел'] +['Ġнедел', 'Ñİ'] +['Ġh', 'á»ı'] +['Ġhá»ı', 'ng'] +['ìŀ¥', 'ìķł'] +['ìŀ¥ìķł', 'ìĿ¸'] +['ĠاÙĦع', 'ÙĦاÙĤØ©'] +['Ġíģ', '¬'] +['Ġíģ¬', 'ê²Į'] +['à¹Ħร', 'à¹Ī'] +['à¸ļา', 'à¸Ķ'] +['à¸ļาà¸Ķ', 'à¹Ģà¸Īà¹ĩà¸ļ'] +['à¸Ŀ', 'รั'] +['à¸Ŀรั', 'à¹Īà¸ĩ'] +['à¸Ŀรัà¹Īà¸ĩ', 'à¹Ģศ'] +['à¸Ŀรัà¹Īà¸ĩà¹Ģศ', 'ส'] +['ר', '×¢×Ļ'] +['רע×Ļ', '×ķ׳×ķת'] +['Ġë', 'Į'] +['ĠëĮ', 'ĵ'] +['ĠëĮĵ', 'ê¸Ģ'] +['Ġnaj', 'b'] +['Ġnajb', 'li'] +['Ġnajbli', 'ż'] +['Ġnajbliż', 'sz'] +['ĠиÑģполÑĮз', 'ÑĥеÑĤÑģÑı'] +['Ġcient', 'ÃŃf'] +['ĠcientÃŃf', 'ico'] +['×¢', '×ŀ×§'] +['Ġg', 'ợi'] +['Ø´', 'ØŃÙĨ'] +['ĠÅĽ', 'm'] +['ĠÅĽm', 'ier'] +['ĠÅĽmier', 'ci'] +['à¸Ħาสิà¹Ĥà¸Ļ', 'à¸Ńà¸Ńà¸Ļà¹Ħลà¸Ļà¹Į'] +['×Ĺש×ij', 'ת×Ļ'] +['Ġn', 'ingu'] +['Ġningu', 'ém'] +['è¾¼', 'ãĤģ'] +['ãģ', '·'] +['ĠÑĥ', 'г'] +['ĠÑĥг', 'ол'] +['ï½', '°'] +['פת', '×Ļ×Ĺ'] +['פת×Ļ×Ĺ', 'ת'] +['Ġ×Ķר×IJש', '×ķ׳×Ļ×Ŀ'] +['p', 'ósito'] +['ãĤŃ', 'ãĥ¬ãĤ¤'] +['ãģ©', 'ãģĵãĤį'] +['à¹Ģà¸Ĺà¹Īา', 'à¹Ħ'] +['à¹Ģà¸Ĺà¹Īาà¹Ħ', 'หร'] +['à¹Ģà¸Ĺà¹Īาà¹Ħหร', 'à¹Ī'] +['ĠинÑĤеÑĢ', 'ÑĮеÑĢ'] +['ĠØŃ', 'اج'] +['ĠØŃاج', 'Ø©'] +['สี', 'à¸Ĥาว'] +['ìĸ', '¼'] +['Ġn', 'á»Ļ'] +['Ġná»Ļ', 'p'] +['ĠÃŃ', 'nd'] +['ĠÃŃnd', 'ice'] +['สำ', 'รวà¸Ī'] +['Ġкажд', 'ой'] +['Ġhot', 'éis'] +['Ġnast', 'ÄĻ'] +['ĠnastÄĻ', 'pn'] +['Ġ×Ķ×§', '×ķ×ĵ'] +['Ġ×Ķ×§×ķ×ĵ', '×Ŀ'] +['פ', '×ķפ'] +['פ×ķפ', '×ķ׾'] +['פ×ķפ×ķ׾', 'ר×Ļ'] +['вÑĪ', 'ей'] +['ãĤ·ãĥ³', 'ãĥĹ'] +['ãĤ·ãĥ³ãĥĹ', 'ãĥ«'] +['ĠzdjÄĻ', 'Äĩ'] +['ĠгÑĢÑĥпп', 'а'] +['Ġпом', 'еÑī'] +['ĠпомеÑī', 'ениÑı'] +['ãģ©ãģĨ', 'ãģĦãģĨ'] +['ĠиÑģп', 'ÑĭÑĤа'] +['Ġog', 'ÅĤ'] +['ĠogÅĤ', 'os'] +['ĠogÅĤos', 'zen'] +['ĠogÅĤoszen', 'i'] +['สรà¹īาà¸ĩ', 'สรร'] +['สรà¹īาà¸ĩสรร', 'à¸Ħà¹Į'] +['à¸ŀร', 'รà¸ĵ'] +['Ġçık', 'Ä±ÅŁ'] +['ĠÑĩаÑģÑĤ', 'ноÑģÑĤи'] +['Ġ×ķ', '×Ļ×ķתר'] +['ç¶ļãģį', 'ãĤĴ'] +['ç¶ļãģįãĤĴ', 'èªŃ'] +['ç¶ļãģįãĤĴèªŃ', 'ãĤĢ'] +['à¸ģร', 'ั'] +['à¸ģรั', 'ม'] +['г', 'ÑĢаÑĦ'] +['Ġв', 'лад'] +['Ġвлад', 'елÑĮ'] +['ĠвладелÑĮ', 'ÑĨ'] +['Ġistedi', 'ÄŁ'] +['ĠistediÄŁ', 'iniz'] +['×ij׾', '×¢'] +['×ij×ľ×¢', '×ĵ×Ļ'] +['ÙħÙĪ', 'اÙģ'] +['ÙħÙĪØ§Ùģ', 'ÙĤØ©'] +['Ġ×Ļ', '×ķר'] +['Ġ×Ļ×ķר', '×§'] +['ãĤ«ãĥ¼ãĥī', 'ãĥŃãĥ¼ãĥ³'] +['ĠاÙĦÙħØ´', 'ÙĥÙĦ'] +['ĠاÙĦÙħØ´ÙĥÙĦ', 'Ø©'] +['ĠêµŃ', 'íļĮ'] +['ס', 'פ×ĺ'] +['ספ×ĺ', '×ŀ'] +['ספ×ĺ×ŀ', '×ijר'] +['Ġìĸ´', 'ëłµ'] +['Ùĥ', 'اÙħ'] +['ÙĥاÙħ', 'ÙĬرا'] +['sch', 'lü'] +['schlü', 'sse'] +['ĠØ«', 'ÙĨ'] +['ĠØ«ÙĨ', 'ائÙĬ'] +['ìī', '½'] +['ĠÐŀ', 'Ñģоб'] +['ĠÐŀÑģоб', 'енно'] +['Ġин', 'веÑģÑĤи'] +['ĠинвеÑģÑĤи', 'ÑĨи'] +['اØŃ', 'تÙħ'] +['اØŃتÙħ', 'اÙĦ'] +['E', 'Äŀ'] +['EÄŀ', 'İ'] +['íķĺ', 'ê²łëĭ¤'] +['Ġ×IJ', '×ijר×Ķ'] +['Ġ×IJ×ijר×Ķ', '×Ŀ'] +['Ġ×ij×Ĺ', '×Ļ׳×Ŀ'] +['Ø£', 'ÙĪØ¶'] +['Ø£ÙĪØ¶', 'اع'] +['Ġdé', 'l'] +['Ġdél', 'ai'] +['Ġ×IJ×ķ×Ķ', '×ij×Ļ×Ŀ'] +['ĠÑģо', 'Ñħ'] +['ĠÑģоÑħ', 'ÑĢ'] +['ĠÑģоÑħÑĢ', 'ани'] +['ĠдоÑģÑĤ', 'иж'] +['ĠдоÑģÑĤиж', 'ени'] +['สิà¹Īà¸ĩ', 'à¹ģ'] +['สิà¹Īà¸ĩà¹ģ', 'วà¸Ķ'] +['สิà¹Īà¸ĩà¹ģวà¸Ķ', 'ล'] +['สิà¹Īà¸ĩà¹ģวà¸Ķล', 'à¹īà¸Ńม'] +['ĠاÙĦÙħ', 'باشر'] +['ĠÑĦ', 'иг'] +['ĠÑĦиг', 'ÑĥÑĢ'] +['мож', 'ем'] +['׾×ŀ×Ļ×ĵ', '×Ķ'] +['Ġcin', 'é'] +['Ġciné', 'ma'] +['Ġb', 'ada'] +['Ġbada', 'ÅĦ'] +['جب', 'ÙĩØ©'] +['Ġд', 'еп'] +['Ġдеп', 'ÑĥÑĤ'] +['ĠдепÑĥÑĤ', 'аÑĤ'] +['Ġdist', 'ância'] +['ĠاÙĦÙħ', 'عار'] +['ĠاÙĦÙħعار', 'ضة'] +['thè', 'se'] +['ü', 'nc'] +['ünc', 'ü'] +['Ġдан', 'ного'] +['ĠBel', 'gi'] +['ĠBelgi', 'ë'] +['Ġ×ij', '×ij×§'] +['Ġ×ij×ij×§', 'ש×Ķ'] +['ย', 'à¹Īาà¸Ļ'] +['Ġsol', 'ução'] +['Ġ×Ķצ', '×ĺר'] +['Ġ×Ķצ×ĺר', 'פ×ķ'] +['ĠØ£ÙĨ', 'ØŃ'] +['ĠØ£ÙĨØŃ', 'اء'] +['Ġد', 'ÙħØ´'] +['ĠدÙħØ´', 'ÙĤ'] +['มั', 'à¹ī'] +['มัà¹ī', 'ย'] +['Ùħ', 'غرب'] +['است', 'عÙħاÙĦ'] +['ĠS', 'ÅĤow'] +['ĠëıĻ', 'ìĭľ'] +['ĠëıĻìĭľ', 'ìĹIJ'] +['ĠÑģ', 'оÑģ'] +['ĠÑģоÑģ', 'ед'] +['ì²Ń', 'ìĨĮ'] +['ì²ŃìĨĮ', 'ëħĦ'] +['Ġг', 'ÑĢаÑĦ'] +['ĠгÑĢаÑĦ', 'ик'] +['Ġìŀij', 'ìĿĢ'] +['Ġyet', 'i'] +['Ġyeti', 'ÅŁtir'] +['ĠìĿ´ê²ĥ', 'ìĿ´'] +['ห', 'à¹Īาà¸ĩ'] +['Ø¥', 'ÙħÙĥاÙĨ'] +['Ø¥ÙħÙĥاÙĨ', 'ÙĬØ©'] +['است', 'عراض'] +['ÙħØ®', 'در'] +['ĠÑĩ', 'ÑĥÑĤÑĮ'] +['Ùħ', 'دÙĬر'] +['ÙħدÙĬر', 'ÙĬØ©'] +['Ġà¹Ģม', 'ษ'] +['Ġà¹Ģมษ', 'ายà¸Ļ'] +['Ġм', 'еÑħ'] +['ĠмеÑħ', 'аниз'] +['ĠмеÑħаниз', 'м'] +['ĠÑģ', 'Ñĥм'] +['ĠÑģÑĥм', 'мÑĥ'] +['Ġv', 'ö'] +['Ġvö', 'll'] +['Ġvöll', 'ig'] +['Ġд', 'ÑĢÑĥз'] +['ĠдÑĢÑĥз', 'ÑĮÑı'] +['ãĤĴåĪ©ç͍', 'ãģĹãģ¦'] +['à¸ļรร', 'à¸Īุ'] +['po', 'życz'] +['×ŀש', '׼'] +['×ŀש׼', '×ł×ª'] +['×ŀ×©×Ľ×ł×ª', '×IJ'] +['Ġeuropé', 'en'] +['Ġpropri', 'é'] +['Ġproprié', 'taire'] +['Ġkh', 'ấu'] +['ãģĦãģŁãģł', 'ãģijãĤĭ'] +['Ġtec', 'rü'] +['Ġtecrü', 'be'] +['×Ķ', '×ij'] +['×Ķ×ij', '׳×Ķ'] +['Ġcu', 'Ì'] +['ĠcuÌ', 'ī'] +['ĠcuÌī', 'a'] +['×IJ', '×ķ×ķ'] +['×IJ×ķ×ķ', '×Ļר×Ķ'] +['Ġ׼×ķ׾', '×ķ'] +['U', 'lus'] +['Ulus', 'lararası'] +['Ġ׳', '×ķת'] +['Ġ׳×ķת', 'ף'] +['ãģ«', 'åIJij'] +['ãģ«åIJij', 'ãģijãģ¦'] +['ë¹', 'Ľ'] +['à¸Ĺ', 'ัà¸ģษ'] +['à¸Ĺัà¸ģษ', 'ะ'] +['س', 'ÙĤÙĪ'] +['سÙĤÙĪ', 'Ø·'] +['Ġв', 'н'] +['Ġвн', 'еÑĪ'] +['ĠвнеÑĪ', 'не'] +['Ġur', 'z'] +['Ġurz', 'ÄĻd'] +['Ġá', 'mb'] +['Ġámb', 'ito'] +['à¸Ń', 'à¸ĺิ'] +['à¸Ńà¸ĺิ', 'à¸ļาย'] +['Ġ', 'ÅĤad'] +['ĠÅĤad', 'n'] +['ê±´', 'ì¶ķ'] +['wód', 'zt'] +['wództ', 'w'] +['Ġquest', 'ões'] +['Ġש', '×§'] +['Ġשק', '×Ļ×ij׾'] +['Ġmiejsc', 'owoÅĽci'] +['Ġв', 'ал'] +['Ġвал', 'ÑİÑĤ'] +['hä', 'user'] +['หà¸Ļ', 'à¸Ńà¸ĩ'] +['ãģ¨', 'åħ±'] +['ãģ¨åħ±', 'ãģ«'] +['ãĥı', 'ãĥ¼ãĥī'] +['Ġê°ľ', 'ìµľ'] +['ĠоÑģнов', 'ном'] +['Ġм', 'ÑıÑģ'] +['اع', 'ت'] +['اعت', 'ÙĤاÙĦ'] +['สà¸ĸ', 'ิ'] +['สà¸ĸิ', 'à¸ķิ'] +['N', 'gu'] +['Ngu', 'á»ĵn'] +['ĠÙħ', 'جÙĦ'] +['ĠÙħجÙĦ', 'Ø©'] +['à¹ģà¸Ĥ', 'à¸Ļ'] +['ĠاÙĦÙĦÙĬ', 'بÙĬ'] +['פע×Ļ׾', '×ķ×Ļ×ķת'] +['Ġ×Ķר', 'פ×ķ×IJ×Ļ'] +['פר', '×ķפ'] +['פר×ķפ', '×Ļ׾'] +['×§', '׾×IJ'] +['ק׾×IJ', 'ס×Ļ'] +['Ùĥت', 'Ø´Ùģ'] +['ãģ«ãģª', 'ãģ£ãģ¦ãģĹãģ¾ãģĨ'] +['à¹Ģà¸Ħล', 'à¹ĩà¸Ķ'] +['à¹Ģà¸Ħลà¹ĩà¸Ķ', 'ลัà¸ļ'] +['Ġì»', '´'] +['Ġì»´', 'íĵ¨'] +['Ġì»´íĵ¨', 'íĦ°'] +['Ġ×Ĺ×Ļ', '×ķ×ij×Ļ'] +['Ġnä', 'm'] +['Ġnäm', 'lich'] +['åij¼', 'ãģ°'] +['åij¼ãģ°', 'ãĤĮ'] +['ĠÑĢ', 'ол'] +['ĠÑĢол', 'и'] +['Ġspécial', 'isé'] +['à¸Ļ', 'วัà¸ķ'] +['à¸Ļวัà¸ķ', 'à¸ģรรม'] +['ÙĨص', 'ÙĪØµ'] +['пеÑĢ', 'ед'] +['пеÑĢед', 'аÑĩ'] +['thè', 'que'] +['Ġר×IJ', '×Ļת×Ļ'] +['ãĥĢ', 'ãĤ¦ãĥ³'] +['ãĤı', 'ãģĭ'] +['ãĤıãģĭ', 'ãģ£ãģ¦'] +['беÑĢ', 'еж'] +['ĠÑģ', 'ек'] +['ĠÑģек', 'ÑĢ'] +['ĠÑģекÑĢ', 'еÑĤ'] +['ĠпоÑģÑĤоÑıн', 'н'] +['à¸Ĥà¸Ļ', 'สà¹Īà¸ĩ'] +['Ġm', 'ük'] +['Ġmük', 'em'] +['Ġmükem', 'mel'] +['еÑĤ', 'еÑģÑĮ'] +['ĠاÙĦسÙĨ', 'ÙĪØ§Øª'] +['ĠìłĦ', 'íĺĢ'] +['Ġ×Ķ×ŀ×§', '×ķר×Ļ'] +['Ġmü', 'd'] +['Ġmüd', 'ah'] +['Ġmüdah', 'ale'] +['Ġwy', 'b'] +['Ġwyb', 'ór'] +['Ġtend', 'ência'] +['Ø¥', 'دار'] +['إدار', 'ÙĬØ©'] +['Ġunterstüt', 'zen'] +['ת', '×ijר'] +['ת×ijר', 'ר'] +['Ġdi', 'á'] +['Ġdiá', 'logo'] +['ĠÃĸ', 'nce'] +['ĠÃĸnce', 'ki'] +['ãĤ¹ãĥĿ', 'ãĥĥãĥĪ'] +['ëĦ', '£'] +['ĠG', 'eli'] +['ĠGeli', 'ÅŁ'] +['ãĤĴ', 'éĢļ'] +['ãĤĴéĢļ', 'ãģĹãģ¦'] +['ĠFuÃŁ', 'ball'] +['Ġsal', 'ari'] +['Ġsalari', 'é'] +['ĠпÑĢодÑĥк', 'ÑĤов'] +['صÙģ', 'ÙĤØ©'] +['รว', 'à¸ļ'] +['รวà¸ļ', 'รวม'] +['à¹ĥà¸Ļ', 'à¸IJาà¸Ļ'] +['à¹ĥà¸Ļà¸IJาà¸Ļ', 'ะ'] +['Ġkay', 'na'] +['Ġkayna', 'ģı'] +['Ġìŀij', 'íĴĪ'] +['ĠвÑĭ', 'ÑĢаж'] +['ĠвÑĭÑĢаж', 'ен'] +['ĠÑģÑĤ', 'еп'] +['ĠÑģÑĤеп', 'ени'] +['ĠاÙĦÙħ', 'ÙĪØ¬ÙĪØ¯'] +['ĠاÙĦÙħÙĪØ¬ÙĪØ¯', 'Ø©'] +['ล', 'à¹īม'] +['Ġnaj', 'czÄĻ'] +['ĠnajczÄĻ', 'ÅĽcie'] +['ĠnajczÄĻÅĽcie', 'j'] +['Ġz', 'wy'] +['Ġzwy', 'k'] +['Ġzwyk', 'ÅĤ'] +['Ġê·¸ëłĩ', 'ì§Ģ'] +['à¸ģระ', 'à¸Ī'] +['à¸ģระà¸Ī', 'าย'] +['Ġëĭ', 'µ'] +['Ġëĭµ', 'ë³Ģ'] +['ĠÑĢе', 'ак'] +['ĠÑĢеак', 'ÑĨи'] +['ĠÅĽwie', 'ż'] +['ĠÑģÑĤоим', 'оÑģÑĤи'] +['ÙħÙĨ', 'اÙĤ'] +['ÙħÙĨاÙĤ', 'Ø´'] +['ÙħÙĨاÙĤØ´', 'Ø©'] +['ĠÑħоÑĩ', 'Ñĥ'] +['ãĥľ', 'ãĥ¼ãĥī'] +['Ġróż', 'nic'] +['Ġк', 'ÑĢÑĭ'] +['ĠкÑĢÑĭ', 'ÑĪ'] +['âľ', 'ĵ'] +['ãĤ³ãĥ³', 'ãĥĨãĥ³'] +['ãĤ³ãĥ³ãĥĨãĥ³', 'ãĥĦ'] +['ĠпÑĢед', 'поÑĩ'] +['×ŀר', '×ij×Ļת'] +['ĠØ´', 'Ùĥ'] +['ĠØ´Ùĥ', 'را'] +['Ġд', 'ал'] +['Ġдал', 'ек'] +['Ġдалек', 'о'] +['بر', 'ÙĬØ·'] +['برÙĬØ·', 'اÙĨÙĬا'] +['ع', 'ÙĨا'] +['عÙĨا', 'ÙĬØ©'] +['ĠÑĢаÑģÑģ', 'каз'] +['ĠÑĢаÑģÑģказ', 'Ñĭва'] +['Ø£', 'ÙĦÙĪ'] +['Ø£ÙĦÙĪ', 'اÙĨ'] +['æĮģ', 'ãģ£ãģ¦'] +['æĮģãģ£ãģ¦', 'ãģĦ'] +['Ùħباد', 'ئ'] +['×Ķ', '×¢×ijר'] +['×Ķ×¢×ijר', 'ת'] +['Ġyay', 'ı'] +['Ġyayı', 'ml'] +['Ġyayıml', 'a'] +['m', 'át'] +['mát', 'icos'] +['à¸ģ', 'ัà¸ĩ'] +['à¸ģัà¸ĩ', 'วล'] +['Ġ׾', 'פת'] +['Ġ×ľ×¤×ª', '×ķ×Ĺ'] +['à¸ŀฤ', 'à¸ķิ'] +['à¸ŀฤà¸ķิ', 'à¸ģรรม'] +['í', 'Ĥ¬'] +['Ġок', 'ÑĢÑĥг'] +['Ġ×ŀצ', '×ķ×ķ×Ķ'] +['ÐĽ', 'ени'] +['ÐĽÐµÐ½Ð¸', 'н'] +['ĠTri', 'á»ģu'] +['ãĤ³ãĥŁ', 'ãĥ¥'] +['ãĤ³ãĥŁãĥ¥', 'ãĥĭ'] +['ãĤ³ãĥŁãĥ¥ãĥĭ', 'ãĤ±'] +['ãĤ³ãĥŁãĥ¥ãĥĭãĤ±', 'ãĥ¼ãĤ·ãĥ§ãĥ³'] +['Ùĥ', 'ÙĨÙĬ'] +['ÙĥÙĨÙĬ', 'سة'] +['ãĤĴ', 'ä¸Ńå¿ĥ'] +['ãĤĴä¸Ńå¿ĥ', 'ãģ«'] +['ĠmiÄĻd', 'z'] +['ĠmiÄĻdz', 'yn'] +['ĠmiÄĻdzyn', 'ar'] +['ĠmiÄĻdzynar', 'od'] +['ĠmiÄĻdzynarod', 'ow'] +['ÙĦ', 'ÙĨ'] +['ÙĦÙĨ', 'دا'] +['بر', 'Ø´'] +['برش', 'ÙĦÙĪÙĨ'] +['برشÙĦÙĪÙĨ', 'Ø©'] +['à¸ģระ', 'à¸ķุ'] +['à¸ģระà¸ķุ', 'à¹īà¸Ļ'] +['Ġg', 'ı'] +['Ġgı', 'da'] +['à¸Ľà¸£à¸°', 'à¸Ĺัà¸ļ'] +['à¸Ľà¸£à¸°à¸Ĺัà¸ļ', 'à¹ĥà¸Ī'] +['Ġë¶Ī', '구'] +['Ġë¶Ī구', 'íķĺê³ł'] +['ĠÙĨ', 'Ø·'] +['ĠÙĨØ·', 'اÙĤ'] +['ĠÐľ', 'ожеÑĤ'] +['Pr', 'äs'] +['Präs', 'ident'] +['ĠÑģк', 'оÑĢ'] +['ĠÑģкоÑĢ', 'оÑģÑĤÑĮ'] +['Ġ×Ķ×ij', '×ķקר'] +['еÑħ', 'аÑĤÑĮ'] +['Ġg', 'ạo'] +['Ġש×IJ', '×Ļ׳×Ŀ'] +['Ġ×ij׳', '×ķ×Ĵ'] +['Ġ×ij׳×ķ×Ĵ', '×¢'] +['Ġо', 'пиÑģание'] +['Ġucz', 'ni'] +['Ġuczni', 'ów'] +['à¹Ģà¸Ń', 'à¹ĩà¸Ļ'] +['Ġت', 'Ø´'] +['Ġتش', 'رÙĬÙĨ'] +['Ġnh', 'ãn'] +['ë¹', '¨'] +['Ġcaract', 'ère'] +['×¢', '׾×Ļ'] +['×¢×ľ×Ļ', '×Ļ×Ķ'] +['楽ãģĹ', 'ãĤģãĤĭ'] +['ĠÑģ', 'аÑħ'] +['ĠÑģаÑħ', 'аÑĢ'] +['дÑĥм', 'аÑĤÑĮ'] +['ĠÐĴоз', 'можно'] +['ص', 'ÙĬاÙĨ'] +['صÙĬاÙĨ', 'Ø©'] +['öm', 'ür'] +['ส', 'ล'] +['สล', 'à¹ĩ'] +['สลà¹ĩ', 'à¸Ń'] +['สลà¹ĩà¸Ń', 'à¸ķ'] +['ë¡', '¯'] +['Ġth', 'ói'] +['gr', 'Ã¶ÃŁe'] +['Ġksi', 'ÄĻ'] +['ĠksiÄĻ', 'g'] +['ĠÑĢ', 'ом'] +['ĠÑĢом', 'ан'] +['ÙĤ', 'اسÙħ'] +['×ŀ×ij', '×ķ×Ĵ'] +['×ŀ×ij×ķ×Ĵ', 'ר×Ļ×Ŀ'] +['bes', 'ch'] +['besch', 'äft'] +['beschäft', 'ig'] +['×Ķצע', '×Ķ'] +['ĠÃģ', 'rea'] +['ĠзаÑıв', 'к'] +['Ä', '¹'] +['ĠлÑİб', 'ого'] +['Ġ', 'ม'] +['Ġม', 'à¸ģร'] +['Ġมà¸ģร', 'าà¸Ħม'] +['ÑĦ', 'из'] +['ÑĦиз', 'иÑĩеÑģк'] +['ин', 'ÑĦ'] +['инÑĦ', 'ек'] +['инÑĦек', 'ÑĨи'] +['اÙĦ', 'Ø·'] +['اÙĦØ·', 'ائÙģ'] +['Ġкол', 'л'] +['Ġколл', 'екÑĤив'] +['ез', 'жа'] +['Ġس', 'بØŃ'] +['ĠسبØŃ', 'اÙĨ'] +['ĠسبØŃاÙĨ', 'Ùĩ'] +['sch', 'lä'] +['schlä', 'ge'] +['Ġд', 'и'] +['Ġди', 'аг'] +['Ġдиаг', 'ноÑģÑĤ'] +['ĠоÑĤмеÑĤ', 'иÑĤÑĮ'] +['Т', 'Ь'] +['ĠاÙĦ', 'در'] +['ĠاÙĦدر', 'اسÙĬ'] +['עצ', '×ŀ'] +['עצ×ŀ', '×IJ×ķת'] +['Ġdém', 'arch'] +['Ġdémarch', 'e'] +['Ġ×ĺ', '×ķ×¢'] +['Ġ×ĺ×ķ×¢', 'ף'] +['Ġfuncion', 'ários'] +['á»', 'µ'] +['׾', '׼×IJ'] +['׾׼×IJ', '×ķר×Ķ'] +['à¸ĭ', 'à¹Ī'] +['à¸ĭà¹Ī', 'à¸Ńม'] +['ĠÑĩ', 'Ñĥв'] +['ĠÑĩÑĥв', 'ÑģÑĤво'] +['âĸ', '¼'] +['п', 'ÑĥÑī'] +['пÑĥÑī', 'ен'] +['Ġм', 'еÑĢ'] +['ĠмеÑĢ', 'оп'] +['ĠмеÑĢоп', 'ÑĢи'] +['ĠмеÑĢопÑĢи', 'ÑıÑĤиÑı'] +['Ġu', 'çu'] +['Ġuçu', 'ÅŁ'] +['ãĤĴåĪ©ç͍', 'ãģĻãĤĭ'] +['a', 'ÄŁ'] +['aÄŁ', 'lı'] +['ìĺĪ', 'ìĪł'] +['à¹ģ', 'ยà¹Ī'] +['ĠاÙĦÙĥ', 'Ùħ'] +['ĠاÙĦÙĥÙħ', 'بÙĬ'] +['ĠاÙĦÙĥÙħبÙĬ', 'ÙĪØªØ±'] +['ت', 'ÙĪÙĬ'] +['تÙĪÙĬ', 'تر'] +['à¹Ģà¸Ĭ', 'ีà¹Īยว'] +['à¹Ģà¸Ĭีà¹Īยว', 'à¸Ĭา'] +['à¹Ģà¸Ĭีà¹Īยวà¸Ĭา', 'à¸į'] +['á»', 'Ķ'] +['Ġhi', 'ếm'] +['ذا', 'Ùĥرة'] +['Ġ×Ķ×ŀ×Ļ', '×ķ×Ĺ×ĵ'] +['ĠìĪ', 'ľ'] +['ĠìĪľ', 'ê°Ħ'] +['ĠK', 'ı'] +['ĠKı', 'sa'] +['Ġgele', 'ceÄŁi'] +['пÑĢо', 'ÑĦеÑģÑģиона'] +['пÑĢоÑĦеÑģÑģиона', 'л'] +['Ġog', 'ó'] +['Ġogó', 'le'] +['ĠgÅĤ', 'ów'] +['ĠgÅĤów', 'ne'] +['ĠÑģÑĤ', 'илÑĮ'] +['×IJ', 'פ׾'] +['×IJפ׾', '×Ļ×§'] +['×IJפ׾×Ļ×§', 'צ×Ļ×Ķ'] +['สม', 'ารà¹Į'] +['สมารà¹Į', 'à¸Ĺ'] +['สมารà¹Įà¸Ĺ', 'à¹Ĥà¸Ł'] +['สมารà¹Įà¸Ĺà¹Ĥà¸Ł', 'à¸Ļ'] +['Ġth', 'ánh'] +['ÐŁ', 'од'] +['ÐŁÐ¾Ð´', 'ÑĢоб'] +['ÐŁÐ¾Ð´ÑĢоб', 'нее'] +['ĠاÙĦت', 'ÙĪÙĨ'] +['ĠاÙĦتÙĪÙĨ', 'سÙĬ'] +['Ġbah', 'çe'] +['à¹ģà¸ģà¹ī', 'à¸Ľà¸±à¸įหา'] +['é', 'ducation'] +['eu', 'rop'] +['europ', 'ä'] +['europä', 'ische'] +['ĠK', 'si'] +['ĠKsi', 'ÄĻ'] +['ĠëĦ', 'ĺ'] +['ĠëĦĺ', 'ìĸ´'] +['Ġv', 'üc'] +['Ġvüc', 'ud'] +['Ġyay', 'g'] +['Ġyayg', 'ın'] +['Ġnie', 'kt'] +['Ġniekt', 'óry'] +['Ġniektóry', 'ch'] +['ãģŃ', 'ãģĩ'] +['Ġк', 'аж'] +['Ġкаж', 'еÑĤÑģÑı'] +['к', 'аж'] +['каж', 'еÑĤ'] +['ĠاÙĦ', 'دÙĬÙħÙĤرا'] +['ĠاÙĦدÙĬÙħÙĤرا', 'Ø·'] +['ĠاÙĦدÙĬÙħÙĤراط', 'ÙĬØ©'] +['æŃ', '©'] +['æŃ©', 'ãģĦãģ¦'] +['Ġv', 'az'] +['Ġvaz', 'ge'] +['Ġvazge', 'ç'] +['Ġмин', 'ималÑĮ'] +['ĠминималÑĮ', 'н'] +['ãĥij', 'ãĤ¿'] +['ãĥijãĤ¿', 'ãĥ¼ãĥ³'] +['Ġë', 'Ĭ'] +['ĠëĬ', 'IJ'] +['ĠëĬIJ', 'ëĤĮ'] +['ãģ¡', 'ãĤĩãģĨ'] +['ãģ¡ãĤĩãģĨ', 'ãģ©'] +['Ġ', 'à¸ģร'] +['Ġà¸ģร', 'à¸ģà¸İ'] +['Ġà¸ģรà¸ģà¸İ', 'าà¸Ħม'] +['تج', 'دÙĬد'] +['ĠØ´', 'اÙħÙĦ'] +['หลัà¸ģ', 'à¸IJาà¸Ļ'] +['ĠмаÑĢ', 'ÑĪ'] +['ĠмаÑĢÑĪ', 'ÑĢÑĥÑĤ'] +['Ġv', 'ÃŃt'] +['ĠvÃŃt', 'ima'] +['Ġquiz', 'á'] +['ay', 'gı'] +['×ĵ×ijר', '×Ļ×ķ'] +['Ġиз', 'д'] +['Ġизд', 'ели'] +['Ġиздели', 'Ñı'] +['п', 'ла'] +['пла', 'Ñĩ'] +['плаÑĩ', 'ива'] +['ä»»', 'ãģĽ'] +['Ġéquip', 'é'] +['ä¹ħ', 'ãģĹãģ'] +['ä¹ħãģĹãģ', '¶'] +['ä¹ħãģĹãģ¶', 'ãĤĬ'] +['Ġк', 'аÑĤ'] +['ĠкаÑĤ', 'ал'] +['ĠкаÑĤал', 'ог'] +['ส', 'à¹īม'] +['ĠÑĢ', 'ей'] +['ĠÑĢей', 'ÑĤ'] +['ĠÑĢейÑĤ', 'инг'] +['Ġth', 'uyá»ģn'] +['ĠاÙĦÙħ', 'ÙĤدس'] +['esp', 'ère'] +['ãģ«åħ¥', 'ãģ£ãģŁ'] +['หมาย', 'à¹Ģลà¸Ĥ'] +['ת×Ĺ×ķש', 'ת'] +['à¸Ļ', 'à¹Īะ'] +['Ġpe', 'ÅĤ'] +['ĠpeÅĤ', 'ne'] +['Ġpé', 'rd'] +['Ġpérd', 'ida'] +['หม', 'วà¸Ķ'] +['หมวà¸Ķ', 'หมูà¹Ī'] +['иÑĩеÑģк', 'ÑĥÑİ'] +['çµĤ', 'ãĤı'] +['çµĤãĤı', 'ãģ£ãģŁ'] +['Ġ×Ĵ', '×ķ×Ĵ׾'] +['à¸Ĺำ', 'à¸Ħวาม'] +['à¸Ĺำà¸Ħวาม', 'สะà¸Ńาà¸Ķ'] +['Hot', 'éis'] +['Ġз', 'аÑĢ'] +['ĠзаÑĢ', 'егиÑģÑĤ'] +['ĠзаÑĢегиÑģÑĤ', 'ÑĢи'] +['ĠзаÑĢегиÑģÑĤÑĢи', 'ÑĢова'] +['ĠÑģ', 'обÑĭÑĤи'] +['ĠÑģобÑĭÑĤи', 'Ñı'] +['Ġ×ĸ', '׼×IJ'] +['ÙħÙĨظ', 'ÙĪÙħØ©'] +['Ġ×Ķ×ŀ', 'צ'] +['Ġ×Ķ×ŀצ', '×Ļ×IJ×ķת'] +['Ùħ', 'ÙĥÙĪÙĨ'] +['ÙħÙĥÙĪÙĨ', 'ات'] +['ä¸ĬãģĮ', 'ãĤĭ'] +['Ġm', 'ÄĻ'] +['ĠmÄĻ', 'sk'] +['หรืà¸Ń', 'à¹Ģà¸Ľà¸¥à¹Īา'] +['ëĤ', '®'] +['Ġnok', 'tas'] +['Ġnoktas', 'ı'] +['ĠболÑĮÑĪ', 'им'] +['ĠлÑĥÑĩ', 'ÑĪиÑħ'] +['Ø´Ùĩ', 'ÙĬد'] +['à¸Ńำ', 'à¸Ļ'] +['à¸Ńำà¸Ļ', 'วย'] +['à¸Ńำà¸Ļวย', 'à¸Ħวาม'] +['à¸Ńำà¸Ļวยà¸Ħวาม', 'สะà¸Ķวà¸ģ'] +['Ġе', 'в'] +['Ġев', 'ÑĢ'] +['ĠевÑĢ', 'оп'] +['ĠевÑĢоп', 'ей'] +['à¸ī', 'าย'] +['ìĦ', 'Ń'] +['Ùħ', 'Ù쨧'] +['ÙħÙ쨧', 'ÙĪØ¶'] +['ÙħÙ쨧ÙĪØ¶', 'ات'] +['ë¹', 'Į'] +['赤', 'ãģ¡ãĤĥãĤĵ'] +['ĠÑĥдал', 'оÑģÑĮ'] +['ĠÐ¥', 'оÑĤ'] +['ĠХоÑĤ', 'Ñı'] +['przedsiÄĻbior', 'c'] +['ĠH', 'ôm'] +['íķĺìĺĢ', 'ìĬµëĭĪëĭ¤'] +['Ġн', 'аг'] +['Ġнаг', 'ÑĢÑĥз'] +['ĠнагÑĢÑĥз', 'к'] +['Ġ×ij×Ļ׳', '׾×IJ×ķ×ŀ×Ļ'] +['Ġê°ĢëĬ¥', 'íķľ'] +['ĠH', 'ữu'] +['à¸Ń', 'ุà¸Ķ'] +['à¸Ńุà¸Ķ', 'ม'] +['ת', '×ķפ'] +['ת×ķפ', '×¢×Ķ'] +['Ġmi', 'ÅĤo'] +['ĠmiÅĤo', 'ÅĽci'] +['ksi', 'Äħż'] +['ksiÄħż', 'ka'] +['ĠاÙĦÙĦ', 'عبة'] +['à¸ī', 'าà¸ģ'] +['สะ', 'สม'] +['×ŀ', 'תר'] +['×ŀתר', '×Ĺש'] +['Ġlég', 'ère'] +['Ġ׾צ', 'פ'] +['Ġ׾צפ', '×Ļ×Ķ'] +['ĠиÑģÑĤоÑĢ', 'иÑı'] +['Ġ', 'ãĥĪãĥ©'] +['ĠãĥĪãĥ©', 'ãĥĥãĤ¯'] +['ĠãĥĪãĥ©ãĥĥãĤ¯', 'ãĥIJãĥĥãĤ¯'] +['Ġк', 'а'] +['Ġка', 'ÑĦе'] +['×ŀס×ŀ', '×ļ'] +['Ġc', 'üm'] +['Ġcüm', 'le'] +['à¹Ģà¸Ħลืà¹Īà¸Ńà¸Ļ', 'à¹Ħหว'] +['ãģĬ', 'ãģĿ'] +['ãģĬãģĿ', 'ãĤīãģı'] +['ìŀIJ', 'ëıĻ'] +['ìŀIJëıĻ', 'ì°¨'] +['à¸Ńั', 'à¸ķ'] +['à¸Ńัà¸ķ', 'à¹Ĥà¸Ļ'] +['à¸Ńัà¸ķà¹Ĥà¸Ļ', 'มั'] +['à¸Ńัà¸ķà¹Ĥà¸Ļมั', 'à¸ķิ'] +['ĠÅŁ', 'ik'] +['ĠÅŁik', 'ay'] +['ĠÅŁikay', 'et'] +['extr', 'ême'] +['kr', 'ä'] +['krä', 'fte'] +['ëĤ', 'Ļ'] +['íķ', 'ij'] +['ì²', 'Ļ'] +['íĺ', 'Ī'] +['ì°', 'į'] +['âĻ', '¡'] +['ìŀ', 'Ķ'] +['ë¢', '°'] +['íĿ', 'Ķ'] +['íĿ', 'IJ'] +['âĩ', 'Ĵ'] +['ë§', 'Ľ'] +['ìĬ', 'Ī'] +['á»', 'Ĵ'] +['ìĺ', 'µ'] +['âĹ', 'İ'] +['í', 'Ĥ¨'] +['ê¿', 'Ī'] +['ìĪ', '¨'] +['ìĽ', '¨'] +['ë§', '¥'] +['ï½', 'Ģ'] +['ï¼', 'ª'] +['áº', '¨'] +['ãħ', 'İ'] +['Ñ', 'Ĺ'] +['ìĦ', '¬'] +['ì¹', '¼'] +['ï¼', '¶'] +['ìĽ', 'ł'] +['ëŁ', '´'] +['Å', 'ĥ'] +['ëĤ', '¼'] +['ëĭ', 'IJ'] +['âĢ', '¹'] +['ë¦', 'Ń'] +['ì§', 'IJ'] +['âĢ', '¤'] +['Ã', 'ħ'] +['ëľ', '¨'] +['íĦ', '¸'] +['íľ', 'ĺ'] +['ê²', 'ģ'] +['ë´', 'ħ'] +['Ã', 'ĺ'] +['ëŃ', 'Ķ'] +['ëĺ', 'ij'] +['âĹ', 'ĩ'] +['ìĹ', 'ĺ'] +['ï»', '´'] +['ë§', '¹'] +['ï¾', 'Ŀ'] +['ìĬ', '·'] +['íĥ', 'ķ'] +['ï¼', 'ł'] +['ì»', '´'] +['ëł', 'Į'] +['ì½', 'ľ'] +['ï»', '¹'] +['ãħ', 'ł'] +['ì¡', '¸'] +['ëħ', '¹'] +['âĤ', 'º'] +['âĸ', '¶'] +['íĥ', 'IJ'] +['êµ', '´'] +['íij', '¸'] +['Ñ', 'Ķ'] +['íĶ', '½'] +['Ð', 'ħ'] +['ë°', '¤'] +['Ô', 'ģ'] +['ì²', '¨'] +['ì¶', 'ĺ'] +['ë²', 'Ĺ'] +['ë©', '¸'] +['ï¼', '»'] +['ï¼', '½'] +['ï¼', '·'] +['ì°', 'Į'] +['Ã', 'Ĵ'] +['íı', '´'] +['ìĵ', '¸'] +['ì´', 'Į'] +['ëģ', 'Ķ'] +['ëĶ', '©'] +['ëĩ', 'Į'] +['ë©', 'Ģ'] +['ë²', '¨'] +['ï¼', 'µ'] +['ë§', '¡'] +['ëĭ', '«'] +['à¸', '¿'] +['ãģ', '±'] +['ìĩ', '¼'] +['ìº', 'ł'] +['ë®', '¤'] +['ê±', '±'] +['ì»', '¬'] +['âĦ', 'ĥ'] +['ëĶ', '±'] +['ëĥ', 'Ī'] +['ìĭ', '±'] +['íĻ', 'Ī'] +['ëŀ', 'IJ'] +['ìħ', 'Ģ'] +['ìł', 'ł'] +['Ð', 'Ĩ'] +['ëł', 'ī'] +['ï½', 'ħ'] +['ï½', 'ı'] +['íĻ', 'Ģ'] +['ëĽ', '°'] +['á»', '®'] +['í', 'Ĥ¹'] +['ê½', 'ĥ'] +['ï»', '¤'] +['ïº', 'Ķ'] +['êº', '¼'] +['ìķ', 'ī'] +['âĻ', '¦'] +['ï½', 'ģ'] +['ìĵ', '´'] +['ãĢ', 'ī'] +['ì°', '®'] +['ì¤', 'ĺ'] +['á»', 'ª'] +['ëģ', 'Ħ'] +['ëIJ', '¨'] +['ìķ', 'Į'] +['íĿ', 'ĺ'] +['íħ', 'IJ'] +['ãĢ', 'Ī'] +['ê²', 'ª'] +['ëĭ', '¥'] +['ê²', '¼'] +['á»', 'Į'] +['ë§', '¨'] +['ëģ', 'Ĭ'] +['ë²', '¤'] +['ëij', 'Ķ'] +['íĿ', '¡'] +['á»', '¬'] +['ë¬', 'ĺ'] +['ãģ', 'ī'] +['ëŀ', '«'] +['íĶ', 'Ī'] +['í', 'ħį'] +['ìŀ', 'ĥ'] +['ï½', 'ī'] +['ìģ', 'ľ'] +['âĸ', '½'] +['ë¬', '»'] +['âĸ', '³'] +['ï¼', '¸'] +['ìģ', 'ĺ'] +['ì¶', '°'] +['ìĬ', '´'] +['ìķ', '±'] +['ìĩ', 'Ħ'] +['áº', '®'] +['ï´', '¿'] +['ï´', '¾'] +['âĤ', '½'] +['ëĦ', 'ĵ'] +['ë£', '©'] +['ì³', '¤'] +['ê´', 'ľ'] +['Ã', 'Ļ'] +['á»', 'ľ'] +['ï¿', '£'] +['ëĵ', 'Ń'] +['ë©', 'ĺ'] +['ê»', '´'] +['ëł', '´'] +['Ð', 'ĥ'] +['ë¬', 'µ'] +['ì§', 'Ŀ'] +['ãģ', 'º'] +['ðŁĺ', 'Ĥ'] +['ëŀ', '¬'] +['ìł', 'Ĭ'] +['ê´', 'Ħ'] +['ìŀ', 'Ĭ'] +['íŀ', 'Į'] +['ìĦ', '¯'] +['âĪ', 'Ģ'] +['âĸ', '¡'] +['ëĢ', 'Į'] +['ëŀ', 'Ļ'] +['ï½', 'ĥ'] +['áº', '¶'] +['ï¾', 'Ħ'] +['ïº', 'ĺ'] +['ë¹', '¼'] +['Ã', 'Į'] +['âĸ', '·'] +['ê¸', 'į'] +['ë©', 'ĭ'] +['ãģ', 'ĥ'] +['ìĺ', 'Ĩ'] +['ìĺ', '®'] +['ëª', '¬'] +['ë¡', '¤'] +['ëł', '¬'] +['ëĬ', '¦'] +['âĸ', 'ª'] +['ì¼', 'ĵ'] +['ìľ', 'Ī'] +['ì§', '§'] +['ï½', '½'] +['ëĥ', 'ī'] +['ï¾', 'Į'] +['ëĺ', 'IJ'] +['ï¼', 'ĥ'] +['á»', 'Ħ'] +['ì´', '¬'] +['ì¶', '¤'] +['ï¼', '¹'] +['ï»', 'Ń'] +['âĤ', '«'] +['ï½', 'ĩ'] +['ìĺ', '·'] +['ëĸ', '¨'] +['âī', '«'] +['ë¦', '¿'] +['âľ', '¨'] +['Ù', '±'] +['ì¯', '¤'] +['ê¹', 'Ķ'] +['ðŁĺ', 'Ĭ'] +['ìĪ', '«'] +['ê³', '±'] +['êµ', '³'] +['ï½', 'ĭ'] +['à¸', 'Į'] +['Ä', 'ł'] +['ëĶ', '¸'] +['ë°', 'ij'] +['ìħ', 'ĭ'] +['íİ', '´'] +['âľ', 'ħ'] +['íĥ', 'ij'] +['ëĪ', 'ĩ'] +['íı', '¼'] +['ðŁĺ', 'į'] +['ìĺ', 'Ľ'] +['ï»', '£'] +['Ñ', 'ĺ'] +['ì©', 'Į'] +['ë¦', 'ħ'] +['ìĿ', 'į'] +['ï½', '¸'] +['ëį', 'ľ'] +['ãģ', 'ħ'] +['íİ', '¼'] +['ëĭ', 'Ŀ'] +['ë¿', 'Į'] +['ì¼', '°'] +['ìĭ', '«'] +['ë°', '¥'] +['íĽ', 'Į'] +['ì¨', 'Į'] +['ë¹', 'Ļ'] +['ï½', 'İ'] +['ë´', 'Ħ'] +['ìĦ', '¹'] +['ï½', '²'] +['ìĮ', 'ĵ'] +['Ò', 'ij'] +['ë°', 'į'] +['ëł', 'Ģ'] +['íĨ', '¤'] +['ï½', '¯'] +['ë¤', 'Ħ'] +['ê½', '¤'] +['ï½', 'Ĵ'] +['ìķ', '¨'] +['ï½', '¼'] +['ê¹', 'IJ'] +['íģ', 'IJ'] +['âĦ', 'ĸ'] +['ë§', 'º'] +['ïº', '®'] +['ëħ', 'ģ'] +['ê²', '¸'] +['ï»', 'ł'] +['íĬ', 'ľ'] +['Å', '¹'] +['ë¥', 'Ń'] +['ëĪ', 'ī'] +['ï½', 'Ķ'] +['íĮ', '¬'] +['ìŀ', 'ĩ'] +['ï', '¬ģ'] +['ï»', '¨'] +['ëij', '¥'] +['ëŀ', 'Ħ'] +['Ù', '¬'] +['íĭ', '´'] +['ìŀ', 'ī'] +['Ú', '¾'] +['ìĽ', 'ħ'] +['ï»', '®'] +['ëĭ', 'ī'] +['âī', 'ª'] +['âĹ', 'Ħ'] +['ëĪ', 'Į'] +['íĽ', '¼'] +['ì¤', 'į'] +['Å', '¸'] +['ì¤', '¬'] +['ì¾', 'Į'] +['ï½', 'ĵ'] +['ï¾', 'Ĭ'] +['ðŁı', '»'] +['ï¾', 'ī'] +['Ð', 'ģ'] +['íĺ', 'IJ'] +['ï¾', 'Ļ'] +['ê¼', '¬'] +['íŀ', 'IJ'] +['âĢ', '¥'] +['ëŁ', 'Ń'] +['ë§', 'ŀ'] +['ìĥ', '¤'] +['ïº', 'Ĵ'] +['íĭ', '±'] +['ë½', 'ij'] +['Ã', 'ķ'] +['âĪ', 'ļ'] +['ëĤ', 'Ħ'] +['ê¹', 'Ŀ'] +['ëĨ', 'Ī'] +['áº', 'º'] +['ìħ', 'Ī'] +['ìĮ', 'į'] +['âĢ', '¡'] +['ï¼', '±'] +['ìģ', '¨'] +['âĺ', 'º'] +['ëĴ', '·'] +['ìĺ', '³'] +['ðŁij', 'į'] +['ëª', '½'] +['ëĤ', 'Ń'] +['ïº', 'Ń'] +['ë©', 'Ī'] +['á»', 'Ī'] +['íķ', 'Ģ'] +['ëĭ', 'Ļ'] +['ë¦', 'ĩ'] +['ìķ', '¤'] +['ìį', '¼'] +['ãĥ', 'µ'] +['Ñ', '£'] +['ìľ', 'Ĺ'] +['â', 'ŃIJ'] +['ï¾', 'ĺ'] +['íĹ', '¬'] +['ê¾', '¼'] +['ìķ', 'Ĺ'] +['ï»', 'Į'] +['ê±', '·'] +['ëħ', 'ķ'] +['ë¡', '±'] +['ìķ', 'Ĭ'] +['ï¾', 'Ģ'] +['ìĩ', 'ł'] +['íĮ', '©'] +['ïº', 'ª'] +['ë§', 'Ļ'] +['ï¼', '¿'] +['ê¿', 'Ķ'] +['íİ', 'ľ'] +['ë£', '¸'] +['íĶ', 'Ķ'] +['ï»', '³'] +['ëı', 'ķ'] +['ìĭ', '¼'] +['á»', 'İ'] +['ë§', 'ĺ'] +['ì¢', 'ĭ'] +['íĨ', '¡'] +['ï½', '±'] +['íĿ', 'ij'] +['á»', '¸'] +['ì¦', 'Į'] +['ì¹', '¸'] +['ëŃ', 'ĺ'] +['ï¾', 'Ĺ'] +['ï»', 'ĭ'] +['íĬ', 'Ģ'] +['ë¥', 'Ļ'] +['ì½', '©'] +['ëģ', 'Ĺ'] +['ëį', '´'] +['ìħ', 'ľ'] +['Â', '¸'] +['ë»', 'IJ'] +['ìĥ', 'µ'] +['ê²', 'IJ'] +['ëĵ', '¬'] +['ë£', '°'] +['ãħ', 'ĭ'] +['ìĹ', 'ī'] +['á»', 'ĸ'] +['ëĦ', 'Į'] +['ï½', '¶'] +['ë´', 'ĩ'] +['ëĤ', '³'] +['ãĤ', 'ľ'] +['ëĸ', '»'] +['íİ', 'Ģ'] +['ëį', '©'] +['íķ', '¸'] +['Ã', '·'] +['ê¼', '¼'] +['ëĶ', 'ľ'] +['ë°', '´'] +['ë©', 'į'] +['âĹ', '¯'] +['ìĹ', 'ij'] +['ìĻ', '¼'] +['ïº', 'ij'] +['ë¶', 'ķ'] +['ë¡', '¬'] +['ï½', 'Į'] +['íĨ', '¨'] +['ïº', '´'] +['ëł', 'ĺ'] +['ê°', '¤'] +['ìĪ', '²'] +['Ñ', 'ĵ'] +['ìħ', 'ī'] +['ï»', 'ĵ'] +['ëĪ', 'Ķ'] +['ëį', '§'] +['âĢ', '¼'] +['ï»', '²'] +['ê°', '±'] +['ê¿', 'Ģ'] +['ëĭ', '·'] +['áº', '¸'] +['áº', 'ª'] +['Æ', 'Ĵ'] +['ëį', '¤'] +['ìĪ', 'Ń'] +['ï½', 'Ĥ'] +['ï½', 'Ī'] +['Å', 'ł'] +['ë£', '¬'] +['Ñ', 'µ'] +['ëĸ', '¡'] +['ëĥ', 'Ħ'] +['ìĦ', '°'] +['ëĵ', 'Ī'] +['ï¾', 'ĥ'] +['ëĩ', '¨'] +['ï½', 'IJ'] +['êµ', '½'] +['ìĹ', '½'] +['ëĤ', 'Ģ'] +['ë¬', '¶'] +['ï½', '·'] +['ìı', 'Ł'] +['íĺ', 'Ķ'] +['ê¼', 'Ī'] +['ëģ', 'Ī'] +['ì¥', 'IJ'] +['ïº', 'Ĺ'] +['Ä', 'Į'] +['ëĪ', 'ł'] +['ëĸ', '¼'] +['íĢ', '´'] +['âī', '¥'] +['ëĭ', 'Ń'] +['ì±', 'Ļ'] +['ê»', 'ı'] +['ë©', '¤'] +['ìĥ', 'ĺ'] +['ëį', '®'] +['ë£', '¡'] +['ìĤ', '½'] +['ãĪ', 'ľ'] +['Ä', '¨'] +['âĢ', '§'] +['ï½', 'º'] +['Ä', '£'] +['ì¦', 'ī'] +['ï¼', '¼'] +['Û', '©'] +['âĪ', 'Ļ'] +['ë°', 'ı'] +['ë¹', 'ħ'] +['ðŁĺ', 'Ľ'] +['íĪ', '´'] +['ðŁĴ', 'ķ'] +['ãĢ', 'Ĵ'] +['ìŀ', 'ĺ'] +['ïº', '¤'] +['ï½', 'ĸ'] +['ë©', 'ľ'] +['ë²', '¼'] +['ëĿ', 'Ħ'] +['ëļ', 'ľ'] +['ï»', 'ĺ'] +['ìĥ', 'Į'] +['ï½', 'Ħ'] +['ì©', 'Ķ'] +['ï½', 'Ļ'] +['ïº', '©'] +['Û', 'ŀ'] +['âĺ', 'İ'] +['ìł', '¤'] +['ëIJ', '©'] +['Å', 'Ŀ'] +['âŀ', '¡'] +['ï»', '§'] +['Ð', 'ı'] +['ì«', 'ĵ'] +['ê³', '½'] +['É', 'ij'] +['ãĥ', '²'] +['ëĤ', '«'] +['ë¦', 'ī'] +['ì¢', 'ģ'] +['ë°', 'Ń'] +['ðŁĺ', 'ģ'] +['ë¹', 'µ'] +['ì²', '©'] +['ì»', 'µ'] +['ðŁĺ', 'ĺ'] +['ë±', 'ħ'] +['âī', 'Ī'] +['ë¹', 'ļ'] +['ï»', 'ľ'] +['ðŁĻ', 'ı'] +['íģ', '°'] +['ìĦ', 'ŀ'] +['ï¾', 'ļ'] +['ìĺ', '¹'] +['ë¼', 'Ī'] +['ëĤ', '¯'] +['ëŀ', '©'] +['íļ', '¡'] +['ï½', 'ķ'] +['íĥ', 'ĵ'] +['ëĿ', 'ł'] +['ê³', 'ģ'] +['ëĵ', 'Ģ'] +['ìĹ', 'ł'] +['ï¼', 'º'] +['ë§', 'ij'] +['ëĭ', '¿'] +['ì¿', '¨'] +['ãİ', '¡'] +['Ð', 'Ĭ'] +['íĦ', '±'] +['Å', '¨'] +['ïº', '³'] +['ï¾', 'ı'] +['âĭ', 'ħ'] +['ê¼', '´'] +['âī', '¤'] +['íĮ', 'ģ'] +['Î', '©'] +['ê¶', '¤'] +['ìĪ', 'į'] +['âľ', '¿'] +['ì½', '¤'] +['ëĪ', 'ħ'] +['íĨ', '±'] +['ãħ', 'ľ'] +['áIJ', 'ħ'] +['Å', 'Ĵ'] +['ðŁij', 'ī'] +['ï»', '¦'] +['Ð', 'ª'] +['ë¥', 'ľ'] +['íķ', '«'] +['ï¾', 'ĭ'] +['âĻ', '«'] +['ê¹', 'ľ'] +['ë°', '¸'] +['ëĶ', 'ĺ'] +['íĿ', 'ī'] +['ï¾', 'ģ'] +['ï¾', 'Ľ'] +['ëł', 'Ľ'] +['ê²', '¹'] +['ì¿', '¼'] +['ï»', '¬'] +['âŀ', '¤'] +['ðŁĻ', 'ģ'] +['ïº', 'ł'] +['ëĨ', '¨'] +['ë¯', '¹'] +['ê¸', 'ĭ'] +['ë»', 'Ķ'] +['ê¹', 'ĥ'] +['ëij', 'ij'] +['íĭ', '¸'] +['íİ', 'Ļ'] +['âŀ', 'ĸ'] +['ãĥ', '½'] +['ì§', 'ļ'] +['ï½', '¬'] +['ï»', '¥'] +['íĮ', '½'] +['âĢ', 'Ĵ'] +['ì', 'ĮĢ'] +['ìŃ', 'ī'] +['ëļ', '±'] +['ãĤ', 'ŀ'] +['íĭ', 'Ī'] +['ãĤ', 'IJ'] +['ëī', 'ĺ'] +['Î', '£'] +['ê³', '°'] +['ë¹', 'Ĺ'] +['ï¾', 'İ'] +['ðŁĺ', 'Ń'] +['íĿ', 'ł'] +['ìĹ', '¿'] +['ê°', 'ļ'] +['ì¤', 'Į'] +['ë§', 'µ'] +['ï½', '³'] +['ãģ', '¢'] +['ï»', 'Ĺ'] +['âī', '¦'] +['Ú', '¤'] +['ë', 'łģ'] +['ê¼', '½'] +['ï»', '«'] +['âī', '§'] +['ì´', 'Ľ'] +['ìł', 'Ŀ'] +['áº', '°'] +['âĻ', '£'] +['ìº', 'ĺ'] +['âĪ', 'ĩ'] +['ê²', 'ī'] +['ë°', 'Ł'] +['ï»', 'Ķ'] +['íĸ', 'ĩ'] +['âĸ', 'Ĵ'] +['ðŁij', 'ı'] +['Ã', 'ŀ'] +['ðŁĺ', 'Ĩ'] +['ïº', '¼'] +['âĿ', 'Ĺ'] +['ìº', 'Ķ'] +['ì¹', '©'] +['ëĸ', '¤'] +['ëĥ', 'ħ'] +['âĶ', 'ľ'] +['ï½', '»'] +['Î', 'Ķ'] +['áĥ', '¦'] +['ìŀ', 'İ'] +['âĺ', 'Ģ'] +['âĪ', '¼'] +['ðŁĶ', '¥'] +['ë°', 'Į'] +['ìł', 'ĸ'] +['íĹ', 'Ľ'] +['Î', 'ķ'] +['ïº', 'ĥ'] +['ë¶', 'ī'] +['âĪ', 'ŀ'] +['íĥ', 'Ń'] +['Ã', 'ĭ'] +['âģ', 'Ħ'] +['ãħ', 'ĩ'] +['ëĦ', '¥'] +['ëĭ', '®'] +['ëł', '·'] +['íĮ', 'Ŀ'] +['ìº', '¡'] +['ë·', 'Ķ'] +['ì©', 'į'] +['íĤ', '´'] +['ëļ', '«'] +['âĵ', 'Ĵ'] +['íķ', 'į'] +['âĻ', 'Ĥ'] +['ï¾', 'Ĩ'] +['âĨ', '©'] +['ìį', '©'] +['ïº', 'ķ'] +['íĿ', 'Ļ'] +['Ñ', 'ľ'] +['íĤ', '·'] +['íĿ', '°'] +['íĥ', '±'] +['ëķ', 'IJ'] +['ï¾', 'Ĵ'] +['×', 'ĥ'] +['ëĮ', 'Ħ'] +['ìĺ', '´'] +['ìķ', 'µ'] +['ê¹', '¥'] +['ëŀ', 'Ń'] +['ìª', '¼'] +['ãİ', 'Ŀ'] +['ðŁĺ', 'ħ'] +['ëı', 'ĭ'] +['ëª', '«'] +['ïº', '¸'] +['ë®', '¬'] +['ë²', 'ħ'] +['ëij', 'ł'] +['ìħ', '°'] +['ì»', '·'] +['ëĶ', 'ª'] +['ëħ', 'Ķ'] +['ãħ', '¡'] +['ìĶ', '»'] +['íķ', 'ı'] +['ëį', '±'] +['ïº', '¨'] +['ï¾', 'į'] +['ï½', 'µ'] +['ì¢', 'Ģ'] +['íİ', 'Į'] +['ï»', '°'] +['ïº', '£'] +['Æ', '£'] +['ð٤', '£'] +['ï·', 'º'] +['ëĤ', 'ļ'] +['âĭ', 'Ĩ'] +['ë³', 'į'] +['ðŁĺ', 'Ħ'] +['ìĸ', 'Ģ'] +['ìĻ', 'ł'] +['ëĨ', 'Ķ'] +['íĹ', '¨'] +['ï»', 'Ľ'] +['ï»', 'Ŀ'] +['á»', '¶'] +['ìĸ', 'ĺ'] +['ìİ', 'Ħ'] +['Ú', 'Ĩ'] +['ï»', 'ŀ'] +['ëĢ', 'IJ'] +['ê²', 'Ķ'] +['ï»', 'µ'] +['âĹ', '¦'] +['íļ', 'Ł'] +['ê¹', 'ģ'] +['ê°', 'ĵ'] +['ëĶ', '´'] +['ìı', 'ĺ'] +['ëļ', 'Ŀ'] +['á»', 'ł'] +['ëŀ', '´'] +['ëĦ', 'ī'] +['âĺ', 'ŀ'] +['ï½', 'ĺ'] +['Å', '½'] +['ë¦', 'İ'] +['âĸ', '¬'] +['ëŃ', 'ī'] +['âĩ', 'Ľ'] +['ìį', '¬'] +['ïº', 'Ł'] +['Ë', 'ľ'] +['ë¶', 'ĵ'] +['ìĽ', '°'] +['Å', 'ľ'] +['ëŃ', 'ĩ'] +['á»', '²'] +['Ë', 'ļ'] +['ëķ', 'Ģ'] +['âĺ', 'ij'] +['ðŁı', '¼'] +['ìĸ', '½'] +['âĮ', 'Ĵ'] +['Ð', 'İ'] +['É', '¾'] +['íĮ', '¡'] +['ï¾', 'ħ'] +['ìŀ', 'Ń'] +['ï½', '¨'] +['ì¹', '«'] +['ìľ', 'Į'] +['Ò', 'Ľ'] +['êµ', '¿'] +['ëĭ', '¦'] +['âĶ', 'Ķ'] +['ï¾', 'ij'] +['ì§', 'ĸ'] +['ìº', 'Ħ'] +['ãĢ', 'ĥ'] +['Ê', '¼'] +['ê²', 'Ł'] +['ï½', '§'] +['Ä', '¢'] +['íİ', 'ł'] +['ë§', '·'] +['ê°', 'ĩ'] +['ìĭ', '¹'] +['ðŁĴ', '¦'] +['ï¾', 'ľ'] +['ëĬ', 'Ļ'] +['ë²', '¡'] +['Å', '¿'] +['ðŁĺ', 'ĭ'] +['ðŁĴ', 'ª'] +['ì¿', 'Ħ'] +['ë©', 'ķ'] +['ìŃ', '¤'] +['ëĬ', 'Ħ'] +['ðŁĮ', '¸'] +['ãĤ', 'Ŀ'] +['Ç', 'İ'] +['ï½', 'ļ'] +['Ä', 'Ĺ'] +['ëģ', 'ĵ'] +['ê¶', 'IJ'] +['áµ', 'ī'] +['ãĥ', 'Ĥ'] +['ê»', 'į'] +['ðŁĺ', '¦'] +['ãĢ', 'Ŀ'] +['ð٤', 'Ĺ'] +['Ñ', 'Ł'] +['ìĹ', 'İ'] +['âľ', 'Į'] +['ìī', 'IJ'] +['Ã', 'Ĩ'] +['íĹ', 'IJ'] +['ðŁİ', 'ī'] +['Î', 'ij'] +['ï½', 'Ń'] +['ðŁĴ', 'Ļ'] +['ìĽ', '¬'] +['íĢ', 'ĺ'] +['ï»', '¢'] +['ðŁĺ', 'İ'] +['íij', '¼'] +['íĿ', '©'] +['ï»', 'Ħ'] +['íħ', 'Ģ'] +['ëł', 'IJ'] +['ì¥', '¬'] +['Ð', 'ĭ'] +['ìĥ', '·'] +['ëľ', '¬'] +['ðŁĺ', 'ĥ'] +['ëĦ', '¬'] +['ë¥', '¨'] +['ìĽ', 'į'] +['ï½', 'Ĩ'] +['ï½', '´'] +['ãĥ', 'ħ'] +['Ã', 'ı'] +['ï»', 'ª'] +['âĻ', 'ł'] +['ëĬ', '¬'] +['ë±', 'Ģ'] +['ë°', 'ĭ'] +['ìĥ', 'Ģ'] +['ï½', '¾'] +['ëĤ', '±'] +['ì»', '¸'] +['ðŁĴ', 'ĸ'] +['ðŁij', 'Į'] +['Ñ', 'ŀ'] +['ì§', '±'] +['Ë', 'Ĩ'] +['ðŁĵ', 'ļ'] +['âŃ', 'ķ'] +['ï¬', 'Ĥ'] +['ï»', '¡'] +['ëij', '¬'] +['íĪ', '¼'] +['âĸ', '¸'] +['ê°', '¯'] +['ê¹', 'ħ'] +['ï½', '®'] +['ëĺ', '¥'] +['Ä', '¡'] +['íĮ', 'Ł'] +['Ð', 'Į'] +['ìĨ', 'Ł'] +['ïº', 'ĵ'] +['ï»', '¼'] +['Ã', 'Ľ'] +['ãĥ', '¾'] +['ëĮ', 'ĵ'] +['íĴ', 'ĭ'] +['ìķ', 'ĵ'] +['ï½', '¹'] +['ëĤ', '¡'] +['ðŁij', 'ĩ'] +['áº', '¼'] +['ãĢ', 'Ł'] +['ðŁĮ', 'Ł'] +['íĥ', 'ł'] +['ãĢ', 'Ĩ'] +['âĢ', 'Ł'] +['ë¸', 'IJ'] +['ðŁĮ', '¹'] +['ìł', '¼'] +['ðŁĵ', 'Į'] +['ìĶ', '¬'] +['âĹ', 'Ģ'] +['ðŁĴ', 'ĵ'] +['ê¹', 'İ'] +['ìĤ', 'IJ'] +['ìĶ', 'Į'] +['Ñ', 'Ľ'] +['âĶ', 'Ī'] +['ë²', '³'] +['ãİ', 'ŀ'] +['Õ', '¡'] +['íĤ', 'µ'] +['ð٤', 'Ķ'] +['ëĢ', 'Ķ'] +['ìĬ', 'IJ'] +['íĻ', 'ī'] +['âľ', '¦'] +['ëľ', '¯'] +['ìł', '¯'] +['ëĶ', '§'] +['Î', '¦'] +['Ë', 'Ī'] +['ìī', '¼'] +['âĹ', 'Ĭ'] +['ëľ', '©'] +['ëľ', '°'] +['ï¾', 'IJ'] +['ë¿', 'Ķ'] +['ìĹ', '®'] +['ì·', 'Į'] +['ïº', '§'] +['Î', 'Ĵ'] +['ëµ', 'Ļ'] +['ï»', 'Ĭ'] +['ì°', 'Ķ'] +['íİ', 'Ħ'] +['ðŁĴ', 'Ĺ'] +['áº', '´'] +['ì°', '¢'] +['íľ', '¼'] +['ê½', 'Ĥ'] +['ì±', 'Ķ'] +['ìī', '´'] +['âĸ', '¾'] +['íĪ', '°'] +['ëĭ', 'Ľ'] +['âĿ', '£'] +['ï½', 'ª'] +['ðŁĴ', 'ľ'] +['Ë', 'ĺ'] +['ãħ', '¤'] +['âĨ', 'Ĺ'] +['íĸ', 'Ħ'] +['âĻ', '¬'] +['ìķ', '°'] +['ïº', 'ľ'] +['âī', '¡'] +['ãĢ', 'ĵ'] +['ìij', '¥'] +['íĮ', 'į'] +['íī', 'ģ'] +['ë»', 'Ĺ'] +['íľ', 'ł'] +['íľ', '©'] +['âľ', 'Ī'] +['íĢ', 'Ħ'] +['ìĸ', 'ĩ'] +['ì¢', 'ĩ'] +['íŀ', 'Ļ'] +['ëª', '¹'] +['ãĤ', 'Ľ'] +['ðŁĺ', '±'] +['ëį', 'Ł'] +['à¹', 'ħ'] +['êµ', '¶'] +['Ù', '«'] +['ìĶ', 'ģ'] +['âľ', 'ª'] +['ï¾', 'Ī'] +['ðŁĻ', 'Į'] +['âļ', '¡'] +['Î', 'ļ'] +['ì¼', 'Ī'] +['ï¾', 'Ķ'] +['ï¾', 'Ĥ'] +['êµ', 'ī'] +['ïº', '»'] +['ðŁĴ', 'ĭ'] +['á¹', '£'] +['Ó', 'Ļ'] +['ìĨ', 'ľ'] +['ìĹ', '£'] +['âľ', '©'] +['ìľ', 'Ļ'] +['ïº', '°'] +['áº', '²'] +['ìŀ', '£'] +['âĿ', 'Į'] +['âĺ', 'ģ'] +['ìķ', 'İ'] +['Ä', '½'] +['Û', 'ģ'] +['ãĦ', '±'] +['ëŁ', '¿'] +['íĮ', '¸'] +['ê½', 'ī'] +['ìı', 'ł'] +['ðŁį', 'Ģ'] +['âĨ', 'Ķ'] +['ëŃ', '¡'] +['ï»', 'ģ'] +['ï¼', 'Ħ'] +['ðŁĴ', '¥'] +['âĺ', 'Ľ'] +['íĹ', '·'] +['ëij', '¡'] +['Î', 'ł'] +['Î', '¤'] +['âĦ', 'ĵ'] +['ïº', '·'] +['Î', 'Ļ'] +['ëı', 'Ķ'] +['ì§', '¤'] +['âĶ', 'ĥ'] +['ãĦ', '·'] +['Ç', 'Ĵ'] +['ðŁ¥', '°'] +['ëĶ', 'ķ'] +['ìļ', '¥'] +['ì¸', 'Ħ'] +['íĽ', 'Ķ'] +['ïº', 'ĩ'] +['ïº', '¬'] +['ðŁĺ', '¢'] +['ë¹', '¡'] +['ìĶ', '¹'] +['Å', '³'] +['Ë', 'Ŀ'] +['íİ', 'ij'] +['ï¾', 'ĵ'] +['ðŁĴ', 'ļ'] +['ëĬ', 'ij'] +['êº', '¾'] +['íĨ', '°'] +['Ã', '¿'] +['Ð', 'Ħ'] +['ëĮ', 'IJ'] +['ë½', 'Ģ'] +['ì·', 'Ħ'] +['ðŁ', 'ĵį'] +['ðŁĻ', 'Ī'] +['âĹ', 'Ī'] +['ê¿', 'ĩ'] +['ì¼', 'Ħ'] +['íİ', '«'] +['ðŁĩ', '·'] +['âĶ', 'ĭ'] +['âļ', 'ł'] +['ë±', 'ī'] +['ì', 'į°'] +['ìĻ', 'Ī'] +['É', 'ª'] +['ïº', 'ĭ'] +['ðŁĺ', 'ľ'] +['Î', 'Ł'] +['ðŁ', 'ĻĤ'] +['âļ', '½'] +['Å', 'Ī'] +['ë¹', 'Ķ'] +['íĮ', 'ľ'] +['à¹', 'ı'] +['ìĸ', '¹'] +['íĪ', 'Ń'] +['ðŁ¥', 'ĩ'] +['ãĦ', '´'] +['ëĶ', '¥'] +['ìŃ', 'Ī'] +['âĪ', 'Ĩ'] +['ëĸ', '³'] +['ë±', 'ĥ'] +['ìŀ', '¦'] +['ï»', 'IJ'] +['Î', 'ľ'] +['âľ', '§'] +['Ï', 'į'] +['ìł', 'ĵ'] +['âĹ', 'ķ'] +['ëĴ', 'Ģ'] +['ï»', 'Ģ'] +['ðŁĶ', '´'] +['ê½', 'ģ'] +['ëĮ', 'Ī'] +['ëİ', 'Į'] +['ãĤ', 'İ'] +['â¦', 'ģ'] +['ì½', '§'] +['ï¯', '¾'] +['âĿ', '¯'] +['à¸', 'ħ'] +['ðŁĻ', 'Ħ'] +['âĿ', 'Ģ'] +['ðŁĶ', '¹'] +['âĩ', 'IJ'] +['êµ', 'µ'] +['âĩ', 'Ķ'] +['ë¶', 'IJ'] +['ðŁĴ', 'Ľ'] +['Î', '¾'] +['íĥ', '¬'] +['âĿ', 'Ħ'] +['Ò', '£'] +['ãĢ', '°'] +['âĪ', 'ij'] +['âĺ', '¼'] +['âī', 'ł'] +['Ò', '¯'] +['ïº', '¯'] +['ê¿', '¨'] +['âľ', 'ĸ'] +['Ê', 'ĸ'] +['íĢ', 'Ģ'] +['ê¾', 'Ģ'] +['íĹ', 'Ŀ'] +['âĶ', '£'] +['ãİ', 'ľ'] +['ëĶ', 'Ľ'] +['ëľ', '¸'] +['ï', 'º«'] +['ê¿', '°'] +['ðŁĩ', '¹'] +['Ç', 'IJ'] +['Û', 'Ĵ'] +['ë£', '»'] +['ïº', 'ĸ'] +['Ñ', 'ļ'] +['ëĬ', 'ł'] +['Û', 'ķ'] +['ê¹', '¡'] +['ë¿', 'ľ'] +['ì²', '¼'] +['ï¨', 'ij'] +['ë¥', 'µ'] +['ìį', '¸'] +['íħ', 'ħ'] +['íij', '¹'] +['Ö', 'Ģ'] +['ï³', 'Į'] +['ãħ', '£'] +['ìij', '¤'] +['ì½', 'ķ'] +['ëķ', 'ł'] +['ðŁĮ', '¿'] +['íĥ', 'Ķ'] +['ìĽ', 'ģ'] +['Î', '¶'] +['âŀ', 'ľ'] +['ìĬ', 'ĺ'] +['íĽ', 'Ĺ'] +['ë©', '§'] +['ìī', 'ĺ'] +['Õ', '¶'] +['á¹', 'ĩ'] +['ðŁİ', 'ģ'] +['ï½', '¿'] +['ï¼', 'Ĥ'] +['á¼', 'IJ'] +['âľ', 'ķ'] +['âŀ', '¢'] +['ëĦ', '¨'] +['ì»', '«'] +['ì¯', 'Ķ'] +['ì°', 'ľ'] +['ðŁĴ', '°'] +['íħ', 'Ŀ'] +['ãİ', 'ı'] +['ë³', '¶'] +['Ò', 'ĵ'] +['âĨ', '³'] +['ìĥ', '´'] +['íģ', 'ĺ'] +['âĸ', 'Ģ'] +['ë²', 'Ļ'] +['à¸', 'ĥ'] +['á½', '¶'] +['Ä', 'ķ'] +['â¬', 'ĩ'] +['ë¤', 'ĺ'] +['ðŁİ', 'µ'] +['âľ', 'ļ'] +['ïº', 'ı'] +['Î', '¡'] +['âĹ', 'ī'] +['ðŁĴ', '«'] +['Ð', 'Ī'] +['ìĸ', 'Ħ'] +['ì§', 'Ļ'] +['ï»', 'ĥ'] +['ðĿij', 'Ĵ'] +['ëŃ', 'Ħ'] +['âĿ', '¥'] +['âĿ', 'ĸ'] +['âĺ', 'Ŀ'] +['Ê', '¹'] +['á¸', '¥'] +['âĢ', '¿'] +['ãħ', 'ħ'] +['ê¸', 'ģ'] +['ëķ', '¡'] +['ëį', '¥'] +['âĪ', '©'] +['ê»', 'Ħ'] +['ë®', 'Į'] +['Ò', '±'] +['âĪ', 'Ĺ'] +['ëł', 'Ļ'] +['ïº', 'Į'] +['Ë', 'IJ'] +['ðŁĺ', '³'] +['ðŁij', '©'] +['ðŁİ', '¶'] +['ì¿', 'µ'] +['ð٤', '©'] +['ê·', '¤'] +['ëĮ', 'Ķ'] +['ïº', 'IJ'] +['Ï', 'İ'] +['ì¶', '¥'] +['ï½', 'Ĭ'] +['á¹', 'Ń'] +['ë¤', '¼'] +['âĸ', '«'] +['ì§', 'ł'] +['á¼', 'Ģ'] +['ê»', 'ij'] +['ëĮ', 'ģ'] +['íĢ', '¸'] +['âĻ', 'Ľ'] +['ðŁĴ', 'ŀ'] +['âĸ', '°'] +['ðĿij', 'ĸ'] +['ëĿ', '¤'] +['à¤', '¦'] +['ì´', 'ĺ'] +['ðŁĺ', 'ĩ'] +['ëĶ', '¤'] +['Î', 'Ĺ'] +['ðŁĻ', 'ĩ'] +['Ë', 'Ľ'] +['ì©', '¡'] +['âĪ', '§'] +['Õ', '¥'] +['Ñ', 'Ļ'] +['ëIJ', '¬'] +['ëĸ', 'Ħ'] +['ðŁĮ', '·'] +['ìĹ', 'Į'] +['ðŁĺ', '¥'] +['ëĪ', '´'] +['ï»', 'ļ'] +['É', 'Ľ'] +['ïº', 'Ħ'] +['ï»', 'ı'] +['Å', 'Į'] +['ë²', 'ļ'] +['ìĭ', '£'] +['ïº', 'Ģ'] +['Î', 'ĵ'] +['ðŁĺ', 'Į'] +['Ë', 'Ļ'] +['ëŀ', 'ı'] +['ðŁĶ', '¸'] +['ðŁĵ', '·'] +['ëģ', '½'] +['íģ', '½'] +['ðŁĴ', '¡'] +['ðŁĮ', '±'] +['ëº', 'ı'] +['ìģ', 'ł'] +['ìĥ', 'IJ'] +['ëı', 'Ĺ'] +['ì¸', '°'] +['ëĪ', 'ķ'] +['Î', 'Ŀ'] +['âģ', 'ī'] +['ðŁĮ', '¼'] +['íĮ', 'ł'] +['âĭ', '¯'] +['áĥ', 'ĺ'] +['âľ', '¤'] +['ê±', 'Ķ'] +['íĮ', 'İ'] +['ðŁĴ', '¯'] +['ìı', 'Ļ'] +['íĹ', 'ī'] +['Ù', 'Ń'] +['ì½', '°'] +['ïº', '¿'] +['ï»', '±'] +['ì±', 'Į'] +['âĺ', 'ķ'] +['ðŁİ', 'Ģ'] +['Ä', 'Ŀ'] +['ë°', '§'] +['ìĤ', '¿'] +['áij', 'ķ'] +['ðŁį', 'ĥ'] +['âĩ', '¨'] +['Î', 'Ľ'] +['ë§', '´'] +['ë³', 'ķ'] +['á', 'ijIJ'] +['âĸ', 'ĵ'] +['ðĿ', 'ijľ'] +['âĻ', '»'] +['íĤ', '¥'] +['Õ', '¸'] +['ãĪ', '±'] +['ëº', 'Ģ'] +['ì²', '¸'] +['ïº', 'Ľ'] +['ðŁı', 'Ĩ'] +['ðŁĩ', 'ª'] +['âĿ', 'ĵ'] +['Ä', 'Ģ'] +['ì½', '¥'] +['ðŁĩ', '§'] +['á½', '·'] +['âľ', 'Ĥ'] +['ìŀ', '¼'] +['ï§', '¡'] +['ðŁĵ', '¸'] +['âĻ', '¯'] +['É', 'Ķ'] +['á½', '¸'] +['âĮ', 'ª'] +['ï»', 'ĸ'] +['ï¥', '§'] +['âļ', '«'] +['âĶ', 'Ĺ'] +['ðŁĮ', 'Ī'] +['ï»', '©'] +['ðŁĵ', '²'] +['Ï', 'Ī'] +['ðŁĺ', '¡'] +['ðĿij', 'İ'] +['ìľ', '½'] +['ì§', '¬'] +['ì§', 'Ĭ'] +['á½', '³'] +['ìĮ', '¤'] +['ëĤ', 'į'] +['âī', 'Ĵ'] +['ðŁij', '¨'] +['âĺ', 'ĺ'] +['Ó', '©'] +['âĤ', 'ĵ'] +['âĪ', 'Ĥ'] +['ï¹', 'ģ'] +['ðŁĴ', 'IJ'] +['íħ', 'ĥ'] +['ðŁı', '½'] +['ê·', 'Ħ'] +['ðŁĺ', 'ı'] +['ðŁĮ', 'º'] +['ðŁĺ', 'Ķ'] +['ï½', '«'] +['âľ', 'İ'] +['ëµ', 'Ī'] +['ðŁĩ', '¸'] +['âĢ', '£'] +['âŀ', 'Ķ'] +['ëĺ', 'ĺ'] +['ìĥ', '¬'] +['Ê', 'ĥ'] +['â¬', 'ħ'] +['ì©', 'IJ'] +['ðŁĻ', 'Ĩ'] +['ðŁİ', 'Ħ'] +['Ä', '¾'] +['âŁ', '¶'] +['áĥ', 'IJ'] +['âĺ', '»'] +['ì±', 'ķ'] +['ìģ', '©'] +['ë½', 'ķ'] +['ìº', '£'] +['ðŁij', 'Ī'] +['ðŁĻ', 'ĭ'] +['ï¾', 'ĸ'] +['Ò', 'ļ'] +['Õ', '«'] +['ìĮ', 'Ī'] +['ë²', '§'] +['ðŁĩ', '®'] +['ï½', 'Ŀ'] +['ðŁį', 'ģ'] +['ìĹ', '¥'] +['Ä', '³'] +['ë½', 'IJ'] +['íį', '½'] +['íĽ', 'ij'] +['âĤ', '¹'] +['ãħ', 'ģ'] +['ìĶ', '½'] +['ðŁĶ', 'ģ'] +['à¤', '¯'] +['ê¾', '¹'] +['ëī', 'ľ'] +['âĹ', '¡'] +['íķ', 'Į'] +['Î', 'ĺ'] +['ë£', '¹'] +['ìĻ', 'ĵ'] +['ðŁĩ', '¦'] +['ðŁij', 'Ģ'] +['âĶ', 'Į'] +['á¿', '¦'] +['ëĦ', 'Ľ'] +['ìĦ', '£'] +['ìŃ', 'Ļ'] +['ï±', 'ł'] +['Î', 'ŀ'] +['Ê', '»'] +['á¿', '¶'] +['âĿ', 'Ŀ'] +['ê±', 'Ģ'] +['ëĸ', '´'] +['ãĦ', '¹'] +['ðŁĴ', 'İ'] +['Ï', '¹'] +['âĽ', 'ħ'] +['ï»', 'ķ'] +['ãĥ', '±'] +['ï½', 'Ľ'] +['ëĮ', 'ķ'] +['ë¹', '½'] +['ì¥', 'Ķ'] +['ì¿', '¤'] +['ðŁĸ', '¤'] +['Ñ', 'Ĵ'] +['ê¹', 'į'] +['ëİ', 'Ģ'] +['ìĭ', '¯'] +['ë»', '¤'] +['ðŁĵ', 'ŀ'] +['ðŁĵ', '£'] +['ðŁĺ', 'Ŀ'] +['ìį', '¹'] +['ìĹ', '¡'] +['ì°', 'IJ'] +['á½', 'IJ'] +['ï»', 'Ī'] +['âľ', 'į'] +['Ä', 'ı'] +['ðŁĮ', 'ŀ'] +['âĦ', '¦'] +['ê½', 'Ŀ'] +['ë»', 'ĺ'] +['ìĪ', '±'] +['âĶ', 'ĺ'] +['ðŁĮ', '»'] +['âĤ', '´'] +['âŀ', '¨'] +['íIJ', 'ģ'] +['ê', '¶Ī'] +['âĺ', '¢'] +['ðŁĺ', 'Ī'] +['ï½', '©'] +['âĦ', 'Ĺ'] +['ê°', 'Ń'] +['ê°', '¸'] +['ë»', 'ij'] +['ì¥', '´'] +['ì»', '¥'] +['ï¤', 'Ĭ'] +['ï»', 'Ĵ'] +['ðŁĺ', 'ķ'] +['âĺ', 'Ķ'] +['ìĺ', 'IJ'] +['ðŁļ', 'Ĺ'] +['ëĹ', 'Ħ'] +['ë§', 'ı'] +['Õ', '½'] +['âĸ', '»'] +['âŁ', 'µ'] +['ìī', '°'] +['ï»', 'ij'] +['âĻ', '©'] +['Î', '¥'] +['ðŁĺ', '£'] +['âĬ', 'Ĥ'] +['ãħ', 'Ĥ'] +['ìħ', '¸'] +['íı', 'Ħ'] +['âľ', '½'] +['ì¦', 'Ļ'] +['âĸ', '£'] +['ê±', 'į'] +['ê¿', 'ĭ'] +['ì«', 'Ħ'] +['ìº', 'ĩ'] +['ðŁĩ', 'µ'] +['ðŁij', 'ij'] +['âľ', 'ĺ'] +['ðĿij', 'Ľ'] +['ìį', '½'] +['ìº', 'ī'] +['ï¬', 'µ'] +['ðŁĶ', 'º'] +['âĦ', '®'] +['íĥ', '¤'] +['ðŁĩ', 'º'] +['ðŁĴ', 'µ'] +['íħ', '¨'] +['ï½', 'ij'] +['Î', '¨'] +['ìĥ', '¹'] +['ìĸ', 'ķ'] +['ì¹', 'µ'] +['ðŁĵ', '±'] +['à¤', 'µ'] +['ðŁij', 'Ĭ'] +['ðŁĴ', 'Ħ'] +['ðŁĴ', 'Ŀ'] +['ãĮ', 'Ķ'] +['ìĻ', 'ģ'] +['Ð', 'ĩ'] +['à®', 'IJ'] +['âĸ', '¹'] +['á´', 'Ľ'] +['âĹ', 'ĺ'] +['ëº', '¨'] +['íĥ', 'ī'] +['ìĸ', 'Į'] +['ðŁIJ', '¶'] +['ãĤ', 'ij'] +['Ë', 'ĩ'] +['Å', 'ı'] +['á½', '¹'] +['ìħ', '§'] +['ï¹', '°'] +['ðĿij', '¡'] +['ðŁĶ', 'Ŀ'] +['ðŁĺ', '»'] +['ðŁĴ', 'ĥ'] +['ð٤', '¦'] +['ðŁį', 'Ĵ'] +['íĢ', 'µ'] +['âľ', 'Ĩ'] +['ë¹', '´'] +['ï§', '¤'] +['ï»', 'Ļ'] +['á´', 'Ĺ'] +['ðŁĮ', '´'] +['Í', '¾'] +['ëĮ', 'ij'] +['ì¨', 'ĭ'] +['ìµ', '¸'] +['ðŁİ', 'Ī'] +['ðŁı', 'ł'] +['á½', '±'] +['Û', 'Ĩ'] +['á¿', 'ĸ'] +['âĢ', 'Ľ'] +['ì°', '¼'] +['íķ', '¥'] +['íĹ', '´'] +['ðŁĩ', '¬'] +['ì°', 'Ŀ'] +['âĪ', 'ł'] +['ï¼', 'ĩ'] +['âĬ', 'Ļ'] +['âĿ', 'ij'] +['ëĦ', 'ĭ'] +['ëŀ', 'Ĺ'] +['ë°', 'ī'] +['ìĹ', 'Ĭ'] +['ì¢', 'Ĩ'] +['íĮ', '¥'] +['ï°', '²'] +['ðŁĵ', 'ĸ'] +['ðŁĺ', '®'] +['âļ', 'ª'] +['ðŁĺ', 'ļ'] +['âĿ', 'ŀ'] +['ðĿij', 'Ł'] +['ðŁİ', 'Ĥ'] +['Å', 'ķ'] +['áIJ', 'Ī'] +['êº', '½'] +['ì±', 'ł'] +['ïº', 'Ŀ'] +['ê¿', 'ī'] +['áĥ', 'ł'] +['ðŁı', 'ĥ'] +['ðŁĴ', '¸'] +['âĿ', 'ģ'] +['âĹ', '¾'] +['Ú', 'ª'] +['á¹', 'ĥ'] +['íĬ', '¬'] +['ðŁĩ', '±'] +['íİ', 'Ń'] +['ðŁĺ', 'ŀ'] +['ë¾', '°'] +['á¹', 'Ľ'] +['ëĽ', '¸'] +['âĿ', 'Ĥ'] +['êĴ', '³'] +['âĶ', 'IJ'] +['íĵ', '°'] +['âŀ', 'ł'] +['ê´', 'ĺ'] +['ëħ', 'ĺ'] +['ë»', '¥'] +['ì¾', 'ħ'] +['ðŁĺ', 'IJ'] +['âĪ', 'ª'] +['ðŁij', 'ģ'] +['âĪ', '´'] +['âĹ', 'ģ'] +['ëº', 'IJ'] +['ìŀ', '¤'] +['ì±', 'Ĺ'] +['ðŁı', '¾'] +['Î', '§'] +['á½', '»'] +['âŀ', '¥'] +['ìŁ', 'Ī'] +['ï»', 'ī'] +['âĸ', 'Į'] +['ãĥ', '®'] +['ð٤', '¤'] +['âĩ', 'ĵ'] +['ì¼', 'ł'] +['á´', 'ı'] +['ë§', '¬'] +['ë»', '£'] +['ðŁĴ', '¬'] +['ðŁį', 'ĵ'] +['Ä', '¸'] +['Ù', '¹'] +['Ê', '¿'] +['á½', '°'] +['ëķ', 'ľ'] +['ì°', '¡'] +['ì°', '»'] +['íİ', 'į'] +['ðŁİ', '¯'] +['ðŁį', 'Ĥ'] +['ðŁij', '§'] +['âĻ', '¢'] +['áĨ', 'ŀ'] +['âĻ', '§'] +['âļ', 'ľ'] +['âľ', 'ī'] +['ëĵ', '¦'] +['ëŃ', '£'] +['ìĪ', 'ı'] +['ìĵ', '±'] +['Å', 'Ń'] +['Ê', 'Ĭ'] +['âĴ', '¸'] +['âĩ', '©'] +['ðŁĴ', 'Ķ'] +['Õ', 'µ'] +['Ð', 'ī'] +['Ò', '»'] +['ë§', '£'] +['ìĽ', 'ľ'] +['ì¿', '¡'] +['íĽ', 'ħ'] +['íĽ', '¤'] +['ïº', '¢'] +['âľ', 'ĭ'] +['âĪ', 'Ī'] +['ðŁĮ', 'į'] +['Ê', 'ľ'] +['ëĬ', 'ª'] +['ëĴ', '¹'] +['ïº', '²'] +['âĸ', 'Ħ'] +['ãħ', 'Ī'] +['ëļ', '¤'] +['íİ', '©'] +['âĪ', '¨'] +['ð٤', 'ª'] +['áĥ', 'ļ'] +['ê³', '¶'] +['íĬ', 'ķ'] +['ðŁĺ', '¬'] +['âĪ', '«'] +['ðŁij', 'ĭ'] +['Ò', 'IJ'] +['íĬ', '¿'] +['ðŁĶ', 'µ'] +['ðŁĴ', '¨'] +['ðŁĮ', 'Ļ'] +['ëĩ', '©'] +['âľ', '³'] +['ë¨', 'ģ'] +['ëº', 'Ħ'] +['ìĻ', 'ij'] +['ìº', 'ħ'] +['íı', 'Ī'] +['ðĿij', 'Ļ'] +['ðŁĴ', 'ĺ'] +['ãİ', '¥'] +['âĿ', 'ı'] +['âľ', '°'] +['ï¯', '¿'] +['ëµ', 'IJ'] +['ì¼', 'IJ'] +['ïº', '±'] +['Õ', '´'] +['ï¬', 'Ģ'] +['âľ', '´'] +['ð٤', 'Ń'] +['ðŁij', 'Ĩ'] +['âĽ', 'Ķ'] +['ê·', 'ĵ'] +['ìĮ', 'Į'] +['ð٤', '·'] +['Û', 'Ķ'] +['ð٧', '¡'] +['ðŁĺ', 'ĵ'] +['Î', 'ĸ'] +['âı', '°'] +['ê²', 'ľ'] +['ëĭ', '³'] +['ëİ', 'ħ'] +['ë°', 'Ī'] +['ï®', 'IJ'] +['ðŁı', '¡'] +['âĨ', 'ª'] +['âĵ', 'Ķ'] +['âľ', 'Ĭ'] +['Ï', '²'] +['Ü', 'IJ'] +['ðŁĩ', '³'] +['Ö', 'Ĥ'] +['âľ', 'ı'] +['ìĸ', 'Ĺ'] +['ì«', 'Ļ'] +['ðŁĺ', '²'] +['Ä', 'Ń'] +['âĻ', 'Ń'] +['âĶ', 'ı'] +['âĹ', 'Į'] +['ðŁĺ', '¯'] +['áµ', 'Ĵ'] +['íĬ', 'ł'] +['Ä', '·'] +['Ê', 'ģ'] +['à¤', 'Ł'] +['á¹', 'ģ'] +['á¼', '°'] +['á¿', 'Ĩ'] +['â', '«'] +['â«', '¸'] +['ëį', '«'] +['ì³', 'ĩ'] +['ì¼', '¤'] +['íĽ', '¨'] +['ðŁĴ', 'Ł'] +['Ê', 'Ģ'] +['Ê', '³'] +['ëĵ', 'IJ'] +['âķ', '°'] +['âĿ', 'ĩ'] +['Ç', 'Ģ'] +['Ç', 'Ķ'] +['É', '´'] +['âĺ', 'ļ'] +['âĺ', 'ľ'] +['ê¶', 'Ĥ'] +['ì«', 'Ĵ'] +['ì±', 'Ī'] +['ðŁĩ', '¨'] +['ðŁİ', '¥'] +['ðŁĵ', 'Ŀ'] +['Ä', '§'] +['ðĿ', 'ijIJ'] +['Û', 'Ī'] +['à¤', '¬'] +['ì¬', 'IJ'] +['íĹ', '¥'] +['âĻ', '¨'] +['ðŁį', '´'] +['ï¹', 'ı'] +['Ë', 'ĭ'] +['ðŁ¥', 'º'] +['âĸ', '¨'] +['íĻ', 'ĭ'] +['âĪ', 'ħ'] +['ëģ', 'Ļ'] +['ëŀ', 'ł'] +['ìĨ', '¥'] +['âĢ', 'ĸ'] +['ð٤', 'ĺ'] +['ðŁIJ', '»'] +['áµ', 'ķ'] +['Ç', 'Ŀ'] +['âĺ', 'ı'] +['ïº', 'ļ'] +['ï»', 'Ĥ'] +['ðŁļ', '©'] +['ìĪ', 'Ł'] +['Ë', 'Ĭ'] +['â¤', 'µ'] +['ðŁĴ', '§'] +['ã', 'ħį'] +['ë©', '©'] +['Æ', '¬'] +['Î', 'ĩ'] +['âĩ', '§'] +['âĵ', 'ļ'] +['ìĤ', '¯'] +['ìĪ', '¯'] +['ëĨ', 'ĭ'] +['âľ', '¯'] +['ðŁļ', 'Ģ'] +['Ú', 'ĺ'] +['Ú', '¨'] +['âľ', 'Ń'] +['ê²', 'ħ'] +['íĮ', '°'] +['íľ', 'Ļ'] +['ðŁĮ', 'Ĭ'] +['ðŁİ', 'ĵ'] +['ðŁĺ', 'Ļ'] +['Ë', 'ĥ'] +['ðŁĴ', 'ģ'] +['ðŁij', 'İ'] +['âĺ', '¹'] +['ðŁĺ', '«'] +['ðŁĴ', '»'] +['ëĤ', 'µ'] +['ìĿ', 'Ĭ'] +['íĮ', '»'] +['Ò', '³'] +['á½', '²'] +['âŀ', 'ŀ'] +['ëĤ', 'ij'] +['ëĿ', 'Ī'] +['ì£', '¤'] +['ï»', '¯'] +['ðŁĩ', '©'] +['ðŁ¥', '³'] +['âĴ', '¼'] +['ð٦', 'ĭ'] +['âĺ', 'Ĥ'] +['ðŁĺ', '°'] +['ðŁĻ', 'ĥ'] +['ðŁĺ', 'Ĵ'] +['Û', 'İ'] +['Ï', 'ķ'] +['á¸', '¤'] +['ë£', '½'] +['ìĬ', '¥'] +['ðĿij', 'ī'] +['É', 'IJ'] +['ðŁį', 'İ'] +['âķ', '¯'] +['âķ', '¹'] +['àº', '²'] +['ï¾', 'ł'] +['ë¹', 'ķ'] +['ïº', 'Ĩ'] +['Ê', 'º'] +['Ó', '§'] +['âĨ', 'ł'] +['ëĥ', 'ĩ'] +['ìİ', 'Ī'] +['ìŁ', '¤'] +['ï±', '¢'] +['âķ', '¬'] +['âĺ', 'ł'] +['ðŁİ', 'Ĭ'] +['ãį', 'į'] +['ãİ', 'İ'] +['âĺ', '°'] +['âľ', 'ĥ'] +['ãħ', 'ī'] +['ë¯', 'Ī'] +['ë¹', '¤'] +['ìı', 'Ń'] +['ðĿij', '¢'] +['ðŁIJ', '¾'] +['Å', 'ĭ'] +['ðŁij', '¶'] +['âĶ', 'Ľ'] +['ï¿', '¢'] +['áĥ', '¡'] +['Ä', '¼'] +['Å', 'Ĩ'] +['Ñ', 'IJ'] +['ìĥ', 'Ľ'] +['ìĺ', 'Į'] +['ì±', '¤'] +['íħ', 'ģ'] +['íļ', 'ĥ'] +['ï³', 'Ĭ'] +['ðĿij', 'Ķ'] +['ðŁĩ', '«'] +['âĭ', '°'] +['ðŁĺ', '¨'] +['âĤ', '©'] +['Õ', '¬'] +['á¸', 'į'] +['á»', '´'] +['âĨ', 'ĺ'] +['âĺ', '¯'] +['ãħ', 'ı'] +['ìł', '¬'] +['âĻ', 'Ķ'] +['ðŁĶ', 'Ķ'] +['ðŁĺ', 'ł'] +['ðŁĻ', 'Ĭ'] +['à®', 'ľ'] +['á¹', 'ħ'] +['âĹ', 'IJ'] +['âĿ', 'Ī'] +['âŀ', '½'] +['ìĥ', 'ħ'] +['ðĿij', 'ł'] +['Æ', '¢'] +['âĭ', 'Ļ'] +['ê°', 'Ľ'] +['ëĿ', 'µ'] +['ë£', 'Ł'] +['ìı', 'ľ'] +['ïº', 'ģ'] +['ðŁĴ', 'Ń'] +['âĬ', 'ĥ'] +['ðŁIJ', '°'] +['ãħ', 'Į'] +['Ü', 'ĵ'] +['âŀ', 'ķ'] +['á½', 'ģ'] +['ìķ', '³'] +['ðĿij', 'Ŀ'] +['ðŁİ', '¬'] +['É', '¡'] +['à¤', 'Ĺ'] +['áIJ', 'ī'] +['ì©', 'ľ'] +['ì¶', '§'] +['ï³', 'ī'] +['ï»', 'ħ'] +['ðĿIJ', 'ŀ'] +['à¤', '¶'] +['ðŁĵ', '¢'] +['ðŁį', 'ĭ'] +['ðŁĴ', 'ħ'] +['ï¾', 'ķ'] +['â¬', 'Ĩ'] +['âĪ', 'µ'] +['ð٤', 'ij'] +['áĥ', '£'] +['Æ', 'Ħ'] +['Ñ', '¹'] +['á¼', 'Ķ'] +['ê°', 'ł'] +['ê´', 'Į'] +['ê·', 'IJ'] +['ëĽ', '´'] +['ì±', 'ĺ'] +['ï®', 'Ń'] +['ïº', '¹'] +['ïº', '¾'] +['âľ', 'Ĺ'] +['âĿ', '¦'] +['ðŁij', '¦'] +['áĥ', 'Ĺ'] +['Ù', '²'] +['á½', '´'] +['âĪ', 'ı'] +['âľ', '®'] +['ê¹', '°'] +['ë²', 'µ'] +['ìĦ', 'Ģ'] +['ì©', 'Ŀ'] +['ïº', 'ŀ'] +['ïº', '½'] +['ðŁĩ', 'Ń'] +['Ë', 'Ĥ'] +['ðŁį', 'ij'] +['ðŁį', 'Į'] +['ðŁĶ', '»'] +['ê¹', '¬'] +['ìĬ', 'Ń'] +['ìľ', '·'] +['ðŁĽ', 'ij'] +['Ç', '§'] +['ë¼', 'Ľ'] +['ïº', '¡'] +['ïº', 'º'] +['ðĿij', 'ļ'] +['ðŁĵ', '¦'] +['ðŁĶ', 'İ'] +['ðŁĹ', 'ĵ'] +['áĥ', 'Ķ'] +['âľ', 'Ĵ'] +['âľ', '¡'] +['ðŁĮ', 'µ'] +['âĶ', 'ķ'] +['ëĢ', 'Ŀ'] +['ðŁį', 'Ĭ'] +['âĺ', 'ĥ'] +['ìĺ', 'ħ'] +['à¦', '¬'] +['ð٦', 'ģ'] +['âİ', '¯'] +['ðŁIJ', 'ķ'] +['Ñ', '¿'] +['à¥', '¤'] +['à¼', 'ĭ'] +['ê·', 'Ī'] +['ì«', 'Į'] +['ðŁĩ', '°'] +['âĿ', 'ī'] +['ì«', 'Ģ'] +['íĿ', 'Ħ'] +['ðĿIJ', '¢'] +['ðŁļ', '¨'] +['âĻ', '¤'] +['ðŁĺ', '©'] +['ðŁį', 'į'] +['ðŁĺ', 'ij'] +['ðŁļ', 'ļ'] +['Ö', 'Ħ'] +['ë', '«'] +['ë«', '¼'] +['à¤', 'ı'] +['á¿', '·'] +['âĮ', '©'] +['âĺ', 'IJ'] +['âŀ', '£'] +['ê¸', '±'] +['ê¼', '¿'] +['ëĦ', 'Ŀ'] +['ìı', '´'] +['ìļ', '¤'] +['ì¿', '±'] +['íİ', 'IJ'] +['ðŁĴ', '¢'] +['ì´', 'IJ'] +['âĩ', 'ij'] +['âĶ', 'ĵ'] +['âģ', '¾'] +['Ü', 'Ŀ'] +['ðŁ', 'į°'] +['â´', '°'] +['Æ', 'ı'] +['Ï', 'Ł'] +['Ú', 'º'] +['Û', 'ĥ'] +['áĦ', 'Ĵ'] +['âĪ', 'Ł'] +['âĿ', 'į'] +['ãĦ', '²'] +['ìľ', 'ħ'] +['ì¤', 'ı'] +['ðŁĩ', '²'] +['êº', 'Ħ'] +['ðŁİ', '¤'] +['âľ', '£'] +['â¸', 'Ŀ'] +['ï¸', 'µ'] +['àº', '§'] +['áĢ', 'Ļ'] +['âķ', 'ł'] +['Õ', '¯'] +['âı', '©'] +['ðĿij', '£'] +['ðŁĴ', '£'] +['Å', 'ĺ'] +['à¥', 'IJ'] +['âģ', 'ĥ'] +['âĮ', 'ĺ'] +['ê»', 'Į'] +['ìĮ', 'Ķ'] +['ðĿij', 'ĺ'] +['ð٤', 'ĵ'] +['Õ', '¿'] +['à¤', 'Ń'] +['âĮ', 'ļ'] +['âľ', 'Ŀ'] +['ðŁIJ', '¼'] +['Ë', 'Į'] +['âķ', 'ļ'] +['ï¦', 'Ĺ'] +['âĿ', 'ķ'] +['âķ', '£'] +['ðŁIJ', '±'] +['à®', '¤'] +['Ñ', '¾'] +['à¤', 'ļ'] +['à¤', 'ľ'] +['ìĪ', 'Ħ'] +['ìļ', 'ľ'] +['ðŁİ', '®'] +['É', 'Ĵ'] +['Ú', '·'] +['àº', 'į'] +['âĨ', 'µ'] +['â', 'Īĺ'] +['âĿ', 'Ĭ'] +['ë¿', 'į'] +['ìIJ', 'Ī'] +['ìļ', 'ĺ'] +['ì¯', '§'] +['íĥ', '¯'] +['ìĸ', 'ı'] +['ï¸', '°'] +['ðŁĩ', '¯'] +['ð٧', 'ļ'] +['ðŁĺ', 'µ'] +['ðŁĺ', '·'] +['ðŁĮ', '³'] +['àº', '¥'] +['Ä', 'ī'] +['Ä', '¥'] +['âľ', '¶'] +['á¿', '¾'] +['âĬ', '±'] +['âĺ', '¾'] +['ê°', 'ī'] +['ê¼', '°'] +['ëº', 'ij'] +['ðŁĶ', 'Ĭ'] +['ðŁĸ', 'IJ'] +['Å', '¤'] +['Ò', '«'] +['à®', '®'] +['âĮ', 'Ī'] +['âĹ', 'Ĺ'] +['ëĦ', 'µ'] +['ëħ', 'ľ'] +['ëľ', '¹'] +['ðĿij', '¥'] +['ðŁĴ', '¿'] +['ðŁĽ', 'Ĵ'] +['Ê', 'Ĵ'] +['áŀ', 'ĵ'] +['ðŁIJ', 'Ŀ'] +['ð٦', 'Ħ'] +['ðŁį', '·'] +['âĺ', 'Ł'] +['ï¸', '¶'] +['ð٤', 'Ł'] +['Ô', '±'] +['âĨ', '²'] +['âĪ', 'İ'] +['âľ', '«'] +['ëĩ', '½'] +['ëı', 'IJ'] +['ëķ', 'Ħ'] +['ï¦', '³'] +['ï§', 'Ŀ'] +['ïº', 'Ļ'] +['ðŁij', '»'] +['ðŁĵ', 'º'] +['êµ', '¼'] +['ìĮ', '©'] +['ðŁĮ', '²'] +['È', '±'] +['íĶ', 'ķ'] +['ðŁĺ', '¤'] +['ãĮ', '¢'] +['Ê', 'Ķ'] +['à¤', '¡'] +['á¼', 'Ī'] +['ëİ', 'ĥ'] +['ë©', '±'] +['ë®', 'Ī'] +['ðĿIJ', '«'] +['âĬ', 'ķ'] +['ëĥ', 'ł'] +['ë»', '¬'] +['íĭ', 'Ķ'] +['Õ', '¤'] +['á¼', '±'] +['âľ', '¥'] +['âĺ', 'Ħ'] +['âĪ', '¥'] +['âļ', 'ķ'] +['ðŁij', 'Ħ'] +['ðŁİ', 'ħ'] +['àº', 'Ļ'] +['âĶ', '¬'] +['á½', 'µ'] +['Õ', '¾'] +['Ö', 'ģ'] +['âĹ', 'Ķ'] +['ê¿', 'į'] +['ëĸ', 'µ'] +['ë©', 'İ'] +['ë®', '´'] +['ìķ', '´'] +['áĥ', 'ľ'] +['á¼', '¡'] +['âĶ', 'Ĭ'] +['âķ', '®'] +['âĹ', '¼'] +['ðŁį', '¾'] +['ðŁĽ', 'į'] +['ðŁij', 'Ĺ'] +['ð٤', 'ŀ'] +['âľ', 'Ħ'] +['Õ', 'Ģ'] +['à¦', '²'] +['Ë', 'ī'] +['âŁ', '¨'] +['Ä', '¯'] +['Ï', 'Ĭ'] +['á´', 'ľ'] +['ë¹', '³'] +['ï³', 'ĭ'] +['ï¿', 'ł'] +['Ä', 'ª'] +['âĤ', '¸'] +['âľ', '±'] +['ê»', 'IJ'] +['ëĭ', '»'] +['ë§', '¸'] +['ìŀ', '¿'] +['ì©', '¨'] +['ì', 'ŃIJ'] +['ì°', '¿'] +['íħ', 'Ł'] +['ðĿIJ', '§'] +['ðĿij', 'ij'] +['ðŁĮ', 'İ'] +['ðŁĵ', '®'] +['ðŁķ', 'Ķ'] +['âĹ', 'Ļ'] +['âĹ', '»'] +['âŀ', '§'] +['ìŁ', 'Ŀ'] +['âľ', '¬'] +['ãĥ', '°'] +['âģ', 'Ī'] +['â', 'ĵĺ'] +['ðŁ', 'ĴĮ'] +['ï¬', 'ĥ'] +['àº', 'Ķ'] +['ìĶ', '°'] +['ðŁĺ', 'ª'] +['×', 'Ģ'] +['ìĥ', '¨'] +['ïŃ', 'ĭ'] +['ðŁį', 'ķ'] +['ðŁĺ', '´'] +['Ï', '³'] +['á¼', 'Ħ'] +['á½', 'ħ'] +['âĩ', '¢'] +['âķ', 'Ń'] +['ìĺ', '»'] +['íĬ', '¤'] +['Ü', 'ĺ'] +['â¤', '´'] +['âĹ', 'į'] +['áŀ', 'Ł'] +['ðŁį', 'º'] +['áŀ', 'ļ'] +['ðŁı', 'Ĭ'] +['ðŁIJ', '·'] +['Ê', 'Į'] +['á½', 'º'] +['âģ', '»'] +['ê½', 'Į'] +['ëĪ', 'Ĺ'] +['ë', 'Ĺı'] +['ì¿', '°'] +['íĢ', '¼'] +['íį', 'ħ'] +['ï·', '²'] +['ðŁĮ', 'ı'] +['ðŁį', '«'] +['ðŁį', '³'] +['ðŁİ', '°'] +['ðŁij', '°'] +['ðŁĴ', '²'] +['á¥', 'Ļ'] +['ðŁIJ', 'Ł'] +['ï¿', '¡'] +['ðŁĹ', '£'] +['ðŁį', 'ľ'] +['âľ', '²'] +['ãİ', '¢'] +['ðŁĶ', '°'] +['á¼', '¸'] +['á½', 'ij'] +['Ä', 'İ'] +['áĦ', 'Ģ'] +['âĻ', 'ķ'] +['ëł', 'Ŀ'] +['ìĪ', '´'] +['ïŃ', 'Ń'] +['Ó', 'ľ'] +['Ô', 'Ģ'] +['ëĢ', 'ľ'] +['ëĥ', 'Ķ'] +['ìĬ', 'Ľ'] +['ì«', 'ij'] +['ìº', '¥'] +['ìº', '¬'] +['ðĿij', '¦'] +['ðŁĶ', '¶'] +['ì¾', '¨'] +['ðĿIJ', 'ļ'] +['ðŁį', '»'] +['ðŁĴ', 'į'] +['ð٤', '¡'] +['ðŁķ', 'Ĭ'] +['â½', 'ĩ'] +['âĵ', 'IJ'] +['ðŁį', 'Ń'] +['ðŁį', 'ª'] +['ðŁĶ', 'Ĩ'] +['Ò', '¡'] +['á´', 'ĩ'] +['É', 'Ĺ'] +['Ü', 'Ķ'] +['âĦ', 'İ'] +['âĿ', 'ĥ'] +['ëĹ', 'Ģ'] +['ï²', 'Ķ'] +['ïº', 'Ī'] +['ðĿIJ', '»'] +['ðŁĴ', 'Ĭ'] +['ðŁļ', '«'] +['Ñ', '°'] +['Ñ', '³'] +['à¤', '·'] +['âĹ', 'ł'] +['ðŁij', '¤'] +['ï¾', 'ĩ'] +['âĺ', 'ĵ'] +['ðŁį', 'µ'] +['ð٤', '¨'] +['âĸ', 'Ń'] +['à®', '´'] +['Ü', '¢'] +['Ü', '¬'] +['à´', '®'] +['ðŁķ', 'º'] +['Ô', '¹'] +['Õ', '£'] +['à´', '¯'] +['á', '´Ģ'] +['âĮ', 'ī'] +['âľ', 'IJ'] +['âŀ', '¦'] +['ê¹', '½'] +['ëĮ', 'ľ'] +['ðŁı', '¥'] +['ðŁĵ', '©'] +['Ò', '¹'] +['Ó', 'ĺ'] +['à¤', 'ħ'] +['âĿ', '§'] +['Æ', 'Ĺ'] +['âĹ', '½'] +['ðŁij', '«'] +['ðŁİ', '§'] +['ðŁij', '£'] +['âľ', '»'] +['ðŁĻ', 'ħ'] +['ðŁĺ', 'ĸ'] +['ðŁĴ', '®'] +['àº', '°'] +['ðŁĶ', 'ľ'] +['ðŁį', 'Ħ'] +['ð٤', 'Ŀ'] +['á', 'ĥĿ'] +['áŀ', 'Ģ'] +['âĩ', '¦'] +['Ê', '¾'] +['Ò', '®'] +['Õ', '¼'] +['à¤', 'Ĩ'] +['âĹ', 'ħ'] +['âļ', 'ĵ'] +['âļ', 'ĸ'] +['ê¿', '©'] +['ë¯', 'Ħ'] +['ìIJ', 'IJ'] +['ìŀ', '°'] +['ì§', 'Ń'] +['íĭ', 'ĭ'] +['íİ', '¨'] +['íĻ', '§'] +['ï²', 'ij'] +['ðŁİ', 'Ĺ'] +['Ù', '³'] +['ðŁij', '¸'] +['à¦', '®'] +['ðŁij', 'ķ'] +['Ú', 'µ'] +['âĢ', '¾'] +['âŀ', '°'] +['ðŁij', '¯'] +['ðŁİ', '¼'] +['ðŁı', 'ģ'] +['Ä', 'º'] +['Ê', 'ı'] +['Ú', '³'] +['âı', '±'] +['ê½', 'Ī'] +['ëĿ', 'Į'] +['ìĮ', 'ī'] +['ìĹ', '·'] +['ìŀ', '´'] +['íĹ', '¹'] +['íľ', '¨'] +['ðĿĹ', '²'] +['ðŁĮ', 'IJ'] +['ðŁİ', 'Ļ'] +['ðŁı', 'µ'] +['íĽ', 'Ļ'] +['ðĿij', 'ħ'] +['ðŁĺ', '¶'] +['âĵ', 'ħ'] +['âķ', '¥'] +['ðŁį', 'ı'] +['ï¦', 'İ'] +['Õ', '©'] +['ðĿIJ', 'Ħ'] +['Ó', '£'] +['Ú', '¿'] +['âĻ', 'ļ'] +['ðŁĶ', 'Ĺ'] +['á¸', '«'] +['âĭ', '®'] +['âĸ', '¦'] +['âĽ', '½'] +['âľ', 'µ'] +['ãħ', 'Ĩ'] +['ãħ', 'Ĭ'] +['ëĦ', 'Ļ'] +['ëĿ', '¨'] +['ë¥', 'Ħ'] +['ìĦ', '¦'] +['ì§', '°'] +['ì§', '¹'] +['íī', 'Ī'] +['ï§', 'ij'] +['ï»', 'ĩ'] +['ðŁĮ', '¾'] +['ðŁı', 'ĸ'] +['ðŁIJ', 'ij'] +['ðŁĴ', '³'] +['ðŁĵ', 'Ĩ'] +['Û', 'ĩ'] +['Ü', 'ķ'] +['á½', '½'] +['ëĦ', 'ľ'] +['à´', '²'] +['à´', '³'] +['àº', 'Ń'] +['áĥ', 'Ľ'] +['âĿ', 'Ķ'] +['âij', 'ħ'] +['áĥ', '¥'] +['ðŁĵ', 'ħ'] +['âŀ', '³'] +['á´', 'µ'] +['ï¹', '¡'] +['ï¹', '¶'] +['Î', 'Ĩ'] +['à¤', '¥'] +['áī', 'µ'] +['âĿ', 'Ļ'] +['âĿ', '±'] +['ëī', 'ł'] +['ëİ', 'ł'] +['ëı', 'Ľ'] +['ë¿', 'ħ'] +['ìĶ', '¸'] +['íij', '¯'] +['íŀ', 'ī'] +['íŀ', 'Ľ'] +['ï§', 'Ħ'] +['ïŃ', 'ĺ'] +['ïº', '¦'] +['ï»', '¸'] +['ðĿij', 'Ĥ'] +['ðĿij', 'ı'] +['Ï', 'ij'] +['Ú', 'ł'] +['áĢ', 'Ķ'] +['áŀ', 'Ķ'] +['á¹', '¢'] +['ëĦ', '¸'] +['ðĿIJ', '¨'] +['ðŁĩ', '´'] +['Õ', '°'] +['ðŁij', 'ł'] +['ðŁį', 'Ĩ'] +['ðŁı', 'Ģ'] +['ðŁ', 'ijIJ'] +['ðŁį', 'ĩ'] +['ðŁIJ', '£'] +['áĪ', 'Ń'] +['Ü', 'ª'] +['ðŁ', 'ĮĢ'] +['áŀ', 'ĺ'] +['âĩ', 'Ħ'] +['ðĿIJ', 'Ģ'] +['Ê', 'Ļ'] +['âĶ', '¼'] +['ðŁı', '¿'] +['Æ', '·'] +['È', 'ł'] +['Ñ', '½'] +['âĤ', '¨'] +['ê´', 'Ń'] +['ê¹', '»'] +['ëĶ', '¨'] +['ìĪ', 'Ģ'] +['ì¾', '°'] +['íĨ', 'Ī'] +['ï®', '§'] +['ï¯', '½'] +['ðŁĶ', 'ħ'] +['ðŁĶ', '®'] +['Å', '¢'] +['Ê', '°'] +['Ñ', '¸'] +['à¤', '£'] +['âĬ', 'Ĺ'] +['ëª', 'Ħ'] +['ï¹', '·'] +['ïº', 'ħ'] +['ðĿIJ', 'µ'] +['ðŁĮ', '¶'] +['ðŁĵ', '°'] +['ðŁĶ', '·'] +['ðŁĸ', 'Ĵ'] +['ð٤', '²'] +['ëī', '©'] +['ðŁİ', 'Ĩ'] +['ð٧', 'IJ'] +['ðŁį', '®'] +['âĨ', 'º'] +['âĿ', '¢'] +['ðŁij', 'ª'] +['ðŁij', '±'] +['âĨ', '¡'] +['áŀ', 'ı'] +['Ú', 'ķ'] +['ðŁį', '¹'] +['ðŁĴ', 'Ģ'] +['Ë', '®'] +['Ó', '¨'] +['Ö', 'ħ'] +['à¤', 'ĩ'] +['âĤ', '¡'] +['âĪ', 'ķ'] +['âĺ', 'ī'] +['ê¹', '¼'] +['ê¼', 'IJ'] +['ì½', '¸'] +['ðĿIJ', '¬'] +['ðŁı', 'ħ'] +['ðŁij', 'Ļ'] +['ðŁĴ', 'ī'] +['ð٤', 'Ļ'] +['È', 'ĺ'] +['É', '³'] +['É', '¹'] +['Ù', 'º'] +['áĢ', 'Ħ'] +['á¿', '³'] +['âļ', 'ĺ'] +['âĿ', 'Ĩ'] +['ëĨ', 'ī'] +['ìĸ', 'į'] +['ìĺ', 'ĩ'] +['ì¥', 'ĺ'] +['íĸ', 'ħ'] +['íĻ', 'ij'] +['ï®', 'Ĭ'] +['ï¿', 'Ń'] +['ðĿĴ', 'IJ'] +['ðĿĹ', '¢'] +['ðŁĶ', 'ĸ'] +['ðŁĶ', '¨'] +['ðŁļ', 'ij'] +['ðŁļ', '²'] +['Æ', '¸'] +['âĹ', '¥'] +['ðĿIJ', 'Ń'] +['ðŁį', '½'] +['âĹ', 'ij'] +['âĵ', 'ĩ'] +['ðŁĶ', '±'] +['âľ', '¼'] +['ï¹', 'ĥ'] +['âķ', '±'] +['ãĢ', 'Ĺ'] +['ðŁı', 'ĭ'] +['ðŁļ', '´'] +['ðĿIJ', '®'] +['Ä', 'ļ'] +['Õ', 'ı'] +['Ä', '¶'] +['áĥ', 'ij'] +['á¹', '¬'] +['Ä', 'Ī'] +['Ä', 'Ĵ'] +['Ò', '°'] +['Ó', 'ķ'] +['â', 'IJ'] +['âIJ', '£'] +['âĹ', '¢'] +['âļ', 'Ļ'] +['ãħ', 'Ĺ'] +['ê°', '¬'] +['ê³', 'ª'] +['ê»', 'Ģ'] +['ëĦ', '´'] +['ëİ', 'ģ'] +['ëĿ', 'Ķ'] +['ë¬', '½'] +['ëŃ', 'į'] +['ìĩ', '³'] +['ì°', '¹'] +['íĮ', '¹'] +['íŀ', 'Ŀ'] +['ï®', 'ĭ'] +['ï', '¶Ī'] +['ðĿĴ', 'Ĥ'] +['ðŁ¥', 'Ģ'] +['ð٦', 'ħ'] +['Ê', 'ĺ'] +['á¼', 'ij'] +['âģ', 'İ'] +['ðŁį', 'ŀ'] +['âĨ', 'ĸ'] +['âĨ', 'Ļ'] +['ðŁİ', 'ĥ'] +['âĦ', '¡'] +['âĭ', '±'] +['ðŁĶ', 'į'] +['à²', '¨'] +['áµ', 'ĥ'] +['âĶ', '«'] +['â¦', '¿'] +['ðŁĩ', '»'] +['Æ', '¤'] +['Ò', 'ı'] +['Ò', '·'] +['Û', 'ī'] +['à®', 'ķ'] +['á¸', '³'] +['ï¬', '±'] +['ðŁĨ', 'Ķ'] +['Ú', 'Ń'] +['Û', '¦'] +['áħ', '¡'] +['âĦ', '¹'] +['ê¿', 'İ'] +['ëķ', 'Ķ'] +['ë¼', 'ī'] +['ìļ', '§'] +['ì²', 'µ'] +['ì´', '¨'] +['íĬ', 'Ī'] +['íĸ', 'IJ'] +['ðĿĹ', 'ĺ'] +['ðŁĩ', '¿'] +['ðŁİ', 'ĸ'] +['ðŁij', 'ħ'] +['ðŁ', 'ĵĺ'] +['ðŁļ', 'Ļ'] +['ðŁĽ', 'µ'] +['à¶', '½'] +['âĽ', 'µ'] +['ðĿIJ', '³'] +['ðĿIJ', '¸'] +['âļ', 'Ķ'] +['ðŁij', 'Ń'] +['Ó', 'ij'] +['âĶ', '¯'] +['ðŁħ', '¿'] +['ðŁĺ', '¹'] +['ï¿', '«'] +['â¼', '¤'] +['ðŁĴ', 'ĩ'] +['ðŁĵ', 'İ'] +['ðŁĸ', 'ĭ'] +['à¦', '¸'] +['ðĿIJ', 'į'] +['Ä', '²'] +['Ï', 'ĭ'] +['Ñ', '¬'] +['Ú', '¬'] +['Ü', 'Ĵ'] +['á´', '¬'] +['ï¨', 'Ħ'] +['É', '£'] +['Ë', 'ij'] +['Ï', 'µ'] +['Ò', 'Ŀ'] +['Û', '¥'] +['Ü', 'ł'] +['à¹', 'Ľ'] +['áĥ', 'ķ'] +['áĬ', 'ķ'] +['á¾', '¶'] +['âĤ', '·'] +['âĩ', '¾'] +['âķ', '©'] +['âĸ', 'IJ'] +['âĺ', 'ª'] +['âĺ', '®'] +['âĿ', 'ļ'] +['âĿ', 'Ń'] +['âŀ', '±'] +['âµ', 'İ'] +['ãı', 'Ĭ'] +['ë©', 'ĵ'] +['ìĹ', '¾'] +['ìª', 'Ħ'] +['íĵ', 'Į'] +['íķ', '¼'] +['ïŃ', '¬'] +['ðĿij', 'Ĩ'] +['ðĿij', 'ŀ'] +['ðĿĸ', 'Ĭ'] +['ðŁİ', '¸'] +['ðŁı', 'Ħ'] +['ðŁij', 'µ'] +['ðŁĴ', 'ł'] +['ðŁĶ', 'ĺ'] +['ðŁ¥', 'Ĥ'] +['Å', 'ª'] +['à·', 'ĥ'] +['á´', '¼'] +['âĬ', '°'] +['ë³', 'ı'] +['ë´', '£'] +['ï¥', 'ľ'] +['ðŁĵ', 'Ī'] +['ðŁķ', '¯'] +['ð٧', 'Ģ'] +['âĻ', 'IJ'] +['ðŁĨ', 'Ĺ'] +['ðŁĵ', 'ķ'] +['ð٧', 'ģ'] +['Ü', '«'] +['âĿ', 'IJ'] +['Õ', 'ķ'] +['à½', 'ķ'] +['âŀ', 'Ŀ'] +['à¦', 'ķ'] +['ðĿIJ', '¶'] +['É', '¢'] +['Î', 'Ħ'] +['áĨ', '¢'] +['âĤ', '±'] +['Õ', 'į'] +['à¡', 'ķ'] +['á´', '°'] +['á¸', '©'] +['âĽ', '·'] +['âĿ', '®'] +['ê¡', 'ĵ'] +['ëı', '¤'] +['ëĹ', 'IJ'] +['ëµ', 'Į'] +['ìij', 'Ī'] +['íı', '¿'] +['íĹ', 'µ'] +['ðĿIJ', 'İ'] +['ðŁĨ', 'ĺ'] +['ðŁı', 'Ł'] +['É', '¥'] +['Õ', '»'] +['à¡', 'Ķ'] +['à¤', 'ĸ'] +['á´', '¸'] +['âİ', 'Ļ'] +['âİ', '¥'] +['âı', '³'] +['ëģ', 'ķ'] +['ëĬ', 'ī'] +['ì¡', 'į'] +['ì¹', '¡'] +['ï¦', '¶'] +['ï¬', 'Ł'] +['ï®', '«'] +['ï®', '¯'] +['ï±', 'ĥ'] +['ï', '·»'] +['ïº', 'µ'] +['ðĿĹ', 'Ķ'] +['ðĿĹ', '¡'] +['ðŁİ', '¨'] +['ðŁĶ', 'Ĵ'] +['Ú', 'Ľ'] +['à¤', '§'] +['âŀ', '¹'] +['áĢ', 'Ģ'] +['ðŁį', 'ħ'] +['âĹ', '¤'] +['à¤', 'ł'] +['ðŁIJ', '¥'] +['áĥ', 'Ĵ'] +['ðŁı', 'Ŀ'] +['ðŁį', '¼'] +['ãĮ', '§'] +['âĿ', 'Ľ'] +['ðŁIJ', 'Ī'] +['à¦', '¯'] +['áĢ', 'ŀ'] +['ãĢ', 'ĸ'] +['áŀ', 'Ļ'] +['à¦', 'ª'] +['Õ', 'Ĩ'] +['âĬ', 'Ĩ'] +['âľ', '¾'] +['ðŁIJ', 'Ĺ'] +['ï¹', '¿'] +['Ä', '¦'] +['Ü', 'Ł'] +['à²', 'ł'] +['à²', '¥'] +['áŀ', 'ī'] +['á´', '¥'] +['á´', '©'] +['á½', 'Ģ'] +['á½', '¡'] +['âĨ', 'ķ'] +['âŀ', '¯'] +['ê¡', 'ij'] +['ëij', '£'] +['ë±', 'Į'] +['ìĪ', 'ij'] +['ìľ', 'Ķ'] +['ìŀ', '½'] +['ì¨', 'į'] +['ðĿij', 'Ģ'] +['ðŁĮ', 'Į'] +['ðŁį', '¦'] +['ðŁį', '©'] +['ðŁIJ', 'ļ'] +['ðŁĵ', 'Ĵ'] +['ðŁĵ', '¹'] +['ðŁ¥', 'ij'] +['Ä', 'ĭ'] +['Ë', 'Ĺ'] +['Ñ', '«'] +['Õ', '¢'] +['Ú', '°'] +['â', 'ĮĢ'] +['âĹ', 'Ĥ'] +['âĹ', '£'] +['âľ', 'Ľ'] +['âĿ', 'Ĵ'] +['âĿ', 'ĺ'] +['âŀ', 'Ļ'] +['âŀ', '²'] +['ãİ', 'į'] +['ê¡', 'IJ'] +['ëŀ', 'ĸ'] +['ìĬ', 'Ŀ'] +['ìĽ', '¤'] +['ì¡', 'ĭ'] +['ì¨', '°'] +['íĹ', 'Ļ'] +['ï¥', '¸'] +['ï³', 'į'] +['ï»', 'İ'] +['ðĿij', 'ĵ'] +['ðŁĵ', 'Ĭ'] +['ðŁļ', '¼'] +['ï¦', 'ģ'] +['ðĿķ', 'Ĵ'] +['ðŁ', 'ijľ'] +['ðŁij', '¿'] +['ðŁĩ', '½'] +['à·', 'Ħ'] +['âĸ', '´'] +['ãį', 'ī'] +['âĬ', 'ĩ'] +['ð٧', '¸'] +['Ú', '¡'] +['â¾', 'ĥ'] +['ðŁĹ', '»'] +['âĵ', 'ij'] +['ð٤', '¸'] +['ð٤', '¯'] +['êĴ', '°'] +['ðĿIJ', 'ĵ'] +['âĶ', '´'] +['êĴ', '±'] +['áĢ', 'ĺ'] +['â', 'ĽĦ'] +['ï¹', '¹'] +['Ó', 'Ķ'] +['áĥ', '±'] +['Ü', '¡'] +['ß', 'ŀ'] +['âĻ', 'ı'] +['âľ', '¸'] +['ìij', '¨'] +['ðĿIJ', 'Ŀ'] +['ðĿIJ', '¥'] +['ðŁį', 'ī'] +['ðŁij', '¼'] +['ðŁ¥', 'Ŀ'] +['Æ', 'Ķ'] +['Ý', '¬'] +['à¤', '«'] +['àº', 'ļ'] +['á´', '´'] +['á½', 'ĸ'] +['âĤ', '¶'] +['âİ', '¢'] +['âĿ', 'ħ'] +['âŁ', '«'] +['ãİ', 'Ľ'] +['ë®', '¨'] +['ëº', 'Į'] +['ë¼', 'ĺ'] +['ìĨ', 'Ŀ'] +['ìľ', '³'] +['ìŀ', 'Į'] +['ì£', 'Ĺ'] +['ìª', 'ĺ'] +['ì»', '¹'] +['ï·', '¼'] +['ïº', 'Ĥ'] +['ðĿIJ', '´'] +['ðĿIJ', '¼'] +['ðŁĮ', 'ļ'] +['ðŁı', '«'] +['ðŁĴ', '¤'] +['ðŁĴ', '¶'] +['ðŁĴ', '¼'] +['Ê', 'ķ'] +['Ê', '½'] +['â²', 'Ł'] +['ãī', 'ł'] +['ê¡', 'Ĵ'] +['ëľ', 'Ģ'] +['ìĥ', '¾'] +['ì¸', '¤'] +['ï¥', 'ģ'] +['ðĿļ', 'Ĭ'] +['ðŁļ', 'ĥ'] +['âŀ', 'Ľ'] +['ìħ', '´'] +['áĦ', 'ĭ'] +['âĩ', 'Ĺ'] +['ï§', '·'] +['âĺ', 'ĸ'] +['ðŁIJ', '¦'] +['â¸', 'ľ'] +['ðŁĴ', '´'] +['ð٤', 'ļ'] +['ãĬ', 'Ĺ'] +['âĮ', 'Ľ'] +['áĪ', 'Ľ'] +['à¼', 'º'] +['â½', 'ī'] +['ðŁı', '¢'] +['âĵ', 'ŀ'] +['âĺ', '½'] +['ãĢ', 'Ļ'] +['ð٤', '®'] +['Å', 'IJ'] +['áĥ', '¬'] +['ðĿĹ', '»'] +['ðŁį', 'ĸ'] +['Æ', 'Ĭ'] +['Ê', 'Ł'] +['ß', 'ĭ'] +['à¤', 'ĭ'] +['áµ', 'Ķ'] +['á¿', 'ĥ'] +['âĦ', 'ī'] +['âĮ', 'ĭ'] +['âı', '²'] +['âĵ', 'Ī'] +['âĵ', '¢'] +['âķ', 'Ķ'] +['âļ', 'ij'] +['âĿ', 'ĭ'] +['âĿ', 'İ'] +['â', 'µľ'] +['âµ', '£'] +['ëĴ', 'Ī'] +['ëľ', 'ģ'] +['ë¶', 'ĩ'] +['ìį', '»'] +['ìĺ', 'Ń'] +['ì§', '¢'] +['íĹ', 'Ģ'] +['ï§', 'Ĭ'] +['ï', '¬¸'] +['ï±', '¡'] +['ðĿIJ', 'º'] +['ðĿij', '§'] +['ðĿĺ', '¦'] +['ðŁĵ', '¥'] +['ðŁĺ', 'Ł'] +['ðŁ¥', 'IJ'] +['Ä', 'ĸ'] +['É', '¨'] +['áĢ', 'IJ'] +['áĥ', 'ĵ'] +['áº', 'ĵ'] +['á¼', '¶'] +['á½', 'Ħ'] +['âĤ', '¤'] +['âĮ', 'ľ'] +['âĮ', 'Ł'] +['âİ', 'ł'] +['âĽ', '¸'] +['âµ', 'į'] +['âµ', 'ı'] +['âµ', 'ĵ'] +['ãĢ', 'ĺ'] +['ë', '·¸'] +['íħ', '¼'] +['ï¦', 'Į'] +['ïŃ', 'Ħ'] +['ïŃ', 'İ'] +['ðĿĻ', 'ļ'] +['ðĿļ', 'ĺ'] +['à¼', 'ĵ'] +['ëŃ', 'ħ'] +['áIJ', 'Ľ'] +['ãİ', '¾'] +['ï¨', 'Ģ'] +['ðŁĹ', '½'] +['âĻ', 'ŀ'] +['Ë', 'ĸ'] +['âĹ', 'ŀ'] +['ð٤', '«'] +['ðŁĺ', 'Ĺ'] +['ï½', '¦'] +['ð٤', '¢'] +['âģ', 'ĩ'] +['ãĢ', 'µ'] +['ðŁį', 'Ķ'] +['áĬ', 'ł'] +['ðŁĺ', '¼'] +['ðĿĹ', '®'] +['ðŁIJ', '³'] +['ðĿIJ', 'ĭ'] +['ðŁĨ', 'ļ'] +['ðŁĶ', 'Ľ'] +['Ñ', '»'] +['Ü', '¨'] +['à®', '²'] +['âľ', 'ŀ'] +['âµ', 'Ļ'] +['êµ', '£'] +['ì¸', '¨'] +['ðĿ', 'IJľ'] +['ðĿĺ', '°'] +['ðŁĶ', '½'] +['Ç', '»'] +['Ç', '¿'] +['Ê', 'ĩ'] +['Î', 'IJ'] +['Ð', 'Ģ'] +['Ñ', '¡'] +['Ñ', '²'] +['Ò', 'Ĵ'] +['Ù', '¶'] +['ß', 'ķ'] +['à¶', '±'] +['áIJ', 'ģ'] +['âģ', 'ŀ'] +['âĸ', '§'] +['âĽ', 'Ī'] +['âľ', 'ľ'] +['âľ', '¹'] +['âŁ', '¹'] +['â¤', 'ĩ'] +['ê²', 'Ĭ'] +['ê¾', 'ľ'] +['ë¯', 'IJ'] +['ë³', 'IJ'] +['ìħ', '©'] +['ìIJ', '¬'] +['ìij', '¹'] +['ï¤', 'Ķ'] +['ï¦', 'ļ'] +['ï¬', 'ł'] +['ïŃ', 'Ķ'] +['ïº', '¶'] +['ðĿĴ', 'ı'] +['ðĿĸ', 'Ĩ'] +['ðĿĹ', '¶'] +['ðŁı', 'Ĥ'] +['ðŁIJ', '½'] +['ðŁĴ', '©'] +['ðŁĵ', '½'] +['ðŁĹ', '¨'] +['ðŁĹ', 'º'] +['ðŁĺ', '¸'] +['ðŁ¥', '§'] +['Å', 'Ĺ'] +['Ê', 'İ'] +['Ò', 'Ļ'] +['×', '²'] +['à¤', 'Ī'] +['á¼', '´'] +['á¿', 'ij'] +['âµ', 'ī'] +['ãħ', 'ĵ'] +['ì½', '´'] +['ðĿĸ', 'ĵ'] +['ðŁĵ', 'Ĺ'] +['ðŁĶ', 'ª'] +['ðŁĸ', 'į'] +['Ï', 'Ĵ'] +['ðŁij', '¬'] +['áĥ', 'Ļ'] +['âĨ', '¬'] +['âĶ', '¤'] +['âĽ', '¹'] +['âĻ', 'Ł'] +['ðŁļ', '¶'] +['ðŁij', '¾'] +['âĪ', 'ĭ'] +['ðŁIJ', '¯'] +['à¼', 'İ'] +['âľ', '·'] +['ï¨', 'Ļ'] +['âĶ', '»'] +['ðŁij', '¹'] +['áĦ', 'ī'] +['àº', 'ª'] +['â¾', 'ı'] +['â½', 'ħ'] +['ãİ', 'ĸ'] +['Ñ', '´'] +['Õ', '®'] +['Ú', '¼'] +['áĢ', 'ķ'] +['áĨ', '¼'] +['ëŃ', 'ı'] +['ðŁIJ', '¸'] +['ðŁļ', '£'] +['Æ', 'Ŀ'] +['Ô', '»'] +['áĥ', '¢'] +['ðŁį', '¯'] +['É', '¦'] +['Õ', '¦'] +['âĻ', 'ĭ'] +['ï¬', '«'] +['ðĿĹ', '¦'] +['Ç', 'ļ'] +['É', '±'] +['à¤', 'ī'] +['á´', 'Ħ'] +['âĻ', 'ĵ'] +['âĽ', '°'] +['âŁ', 'ª'] +['ëĥ', 'ĺ'] +['ë¢', '¸'] +['ìĤ', 'ij'] +['ï®', 'Ķ'] +['ðĿķ', 'ĸ'] +['ðĿĹ', '§'] +['ðŁĩ', '¼'] +['ðŁĵ', 'ĭ'] +['ðŁļ', 'ľ'] +['ðŁ¥', '¤'] +['Ä', '®'] +['Å', '·'] +['ß', 'Ĭ'] +['à¥', '¥'] +['à®', 'ª'] +['áŀ', 'Ħ'] +['áµ', 'Ģ'] +['á¸', 'ħ'] +['á¼', '¢'] +['âĪ', 'Ŀ'] +['âĬ', '¹'] +['âĴ', '¶'] +['âķ', '´'] +['âĽ', '±'] +['âĽ', '³'] +['âĽ', 'º'] +['âŀ', 'Ł'] +['ãı', 'Ħ'] +['ê¸', 'Ķ'] +['ê¹', 'Ł'] +['ëĩ', '°'] +['ë¹', '»'] +['ìĤ', '¥'] +['ìĽ', '»'] +['ì°', 'Ł'] +['íĥ', '°'] +['íĨ', 'º'] +['íļ', '½'] +['ï¤', '´'] +['ï¥', '¾'] +['ï³', 'Ŀ'] +['ðĿIJ', '¦'] +['ðĿĴ', 'ľ'] +['ðĿĴ', 'Ł'] +['ðĿļ', 'Ĺ'] +['ðŁİ', 'Ń'] +['ðŁı', 'ĵ'] +['ðŁı', '³'] +['ðŁı', 'º'] +['ðŁIJ', 'į'] +['ðŁij', 'ĥ'] +['ðŁĴ', 'ı'] +['ð٤', 'ĸ'] +['ð٤', 'µ'] +['Õ', '²'] +['âµ', 'Ķ'] +['ëĺ', '¬'] +['ï¦', '£'] +['Ê', 'Ĥ'] +['áĨ', '«'] +['áŀ', 'ij'] +['ðĿĸ', 'İ'] +['ðĿĹ', 'ĸ'] +['áĦ', 'ĥ'] +['âĩ', 'ł'] +['áĢ', '¡'] +['à½', 'Ħ'] +['âŀ', '¸'] +['ï¦', 'Ļ'] +['âĩ', 'ļ'] +['ðŁIJ', '¬'] +['ðŁIJ', '¢'] +['â¾', 'Ĵ'] +['ðŁIJ', '¤'] +['ðŁĶ', '«'] +['ãĢ', 'ŀ'] +['ï¸', 'º'] +['ðŁĺ', 'º'] +['â½', '´'] +['ðŁĨ', 'ķ'] +['âģ', '¿'] +['ðŁį', '¨'] +['à²', 'ķ'] +['ðŁļ', 'ĺ'] +['áŀ', 'ħ'] +['à¦', 'ħ'] +['áŀ', '¢'] +['à¨', 'ľ'] +['â', 'ļĮ'] +['ãĢ', '½'] +['à·', '´'] +['âĵ', 'Ľ'] +['áĢ', 'ľ'] +['ìĨ', '¨'] +['Ë', '©'] +['Ü', 'Ĺ'] +['âĭ', '¼'] +['ðŁĻ', 'ī'] +['Å', 'Ĭ'] +['É', 'ĵ'] +['Ê', '²'] +['Î', '°'] +['Ñ', '¼'] +['Ô', '¿'] +['à¡', 'IJ'] +['à¼', 'ľ'] +['à½', '¦'] +['á¶', 'ľ'] +['âĤ', '²'] +['âĨ', '¨'] +['âĬ', '¥'] +['âķ', '§'] +['âĻ', 'ľ'] +['ãĭ', '¡'] +['ë´', '¬'] +['ë¶', 'ij'] +['ìī', '¿'] +['ìİ', 'ħ'] +['ìł', '±'] +['ì°', '§'] +['ï²', '¡'] +['ðĿĴ', 'Ľ'] +['ðĿķ', '£'] +['ðĿĹ', 'ľ'] +['ðŁį', '²'] +['ðŁİ', '©'] +['ðŁIJ', 'IJ'] +['ðŁIJ', 'ł'] +['ðŁij', '½'] +['ðŁĴ', 'ij'] +['ðŁĵ', 'ľ'] +['ðŁķ', 'µ'] +['ðŁ', 'ļĮ'] +['ðŁĽ', '£'] +['Ê', 'ĭ'] +['Ó', '¯'] +['Ù', '¸'] +['ß', 'Ķ'] +['ß', 'Ļ'] +['à¡', 'ĵ'] +['á´', 'į'] +['á¸', '¿'] +['âı', 'º'] +['âĸ', '¥'] +['ë¤', '½'] +['íľ', 'ij'] +['ðĿIJ', '¹'] +['ðĿĸ', 'Ķ'] +['ðĿļ', 'İ'] +['ðŁĵ', 'Ħ'] +['ð٦', '·'] +['Æ', 'ĥ'] +['à¦', 'Ł'] +['âĮ', 'Ĥ'] +['âĺ', 'Ń'] +['â²', 'ļ'] +['ëĿ', 'ķ'] +['ðŁİ', '£'] +['à®', 'ĩ'] +['à½', 'Ĩ'] +['áħ', 'µ'] +['áĹ', 'ľ'] +['âĢ', '½'] +['âĮ', '£'] +['âģ', '½'] +['ðŁĵ', '¬'] +['ð٤', '§'] +['âĩ', 'ª'] +['â½', '£'] +['âĹ', 'Ł'] +['ï¨', 'Ĺ'] +['êĴ', 'ª'] +['ðŁĽ', 'Ģ'] +['Ç', 'Ĥ'] +['ðŁ¥', '¶'] +['ðŁİ', 'į'] +['ï¿', '©'] +['ðŁij', 'Ĵ'] +['áµ', 'Ī'] +['ï¸', '¿'] +['áħ', '©'] +['â¾', '¦'] +['à°', '¤'] +['á´', 'ĸ'] +['à¨', '¬'] +['àº', 'Ĺ'] +['à¼', '»'] +['Ñ', 'º'] +['à¨', 'ª'] +['á´', '³'] +['ðĿIJ', 'Ī'] +['à»', 'Ģ'] +['á´', '¿'] +['âĤ', 'į'] +['âĩ', '¡'] +['âĽ', 'ª'] +['ðĿIJ', 'Ĥ'] +['ðĿĴ', 'ķ'] +['ðŁ', 'IJľ'] +['Ê', 'į'] +['Ñ', '±'] +['à½', 'ĥ'] +['ë®', 'IJ'] +['ìĽ', '¡'] +['ìľ', 'ģ'] +['ðĿIJ', '¿'] +['ðĿķ', 'ł'] +['ðŁij', 'Ľ'] +['Æ', 'ª'] +['Ï', 'º'] +['Ó', '¬'] +['Ù', '¿'] +['Ý', '£'] +['àª', 'ī'] +['à®', '¹'] +['à½', 'ij'] +['áĨ', '¯'] +['áµ', 'ĩ'] +['âĩ', '¥'] +['âı', 'ª'] +['âĻ', '°'] +['âļ', 'Ń'] +['âļ', '¾'] +['ãħ', 'Ħ'] +['êĢ', '°'] +['ê°', 'Ĺ'] +['ê²', 'ĭ'] +['ê²', '»'] +['ê¶', 'ľ'] +['ê¼', 'ĩ'] +['ê½', '¹'] +['ëĤ', 'Ł'] +['ëħ', 'Ī'] +['ëĭ', '¢'] +['ë§', 'Ł'] +['ëª', 'Ĩ'] +['ëµ', 'Ģ'] +['ì½', '±'] +['íĩ', 'ĺ'] +['íľ', 'ľ'] +['ï§', '¾'] +['ï±', 'µ'] +['ï²', '¢'] +['ï²', '¤'] +['ðĿĴ', 'Ĭ'] +['ðĿĺ', '¯'] +['ðŁį', 'Ĺ'] +['ðŁı', 'į'] +['ðŁIJ', 'ĺ'] +['ðŁĵ', '¡'] +['ðŁĶ', 'ŀ'] +['ð٤', '³'] +['ðŁ¥', 'ģ'] +['ðŁ¥', 'Ĺ'] +['ð٦', 'Ĭ'] +['Ä', 'µ'] +['Æ', '¦'] +['Ç', 'µ'] +['É', '¯'] +['Î', 'ı'] +['Õ', 'Ħ'] +['Ü', '¥'] +['à½', 'ģ'] +['á¨', 'ł'] +['âķ', '«'] +['ãİ', 'ī'] +['ë·', '´'] +['ìĨ', 'İ'] +['ìİ', 'Į'] +['ì£', 'µ'] +['íĽ', 'ł'] +['ï§', 'ª'] +['ï³', 'ı'] +['ï»', 'º'] +['ðĿij', 'ģ'] +['ðĿij', 'ĩ'] +['ðĿĴ', 'Ĩ'] +['ðŁİ', 'ł'] +['ðŁIJ', 'Ķ'] +['ðŁij', 'Ł'] +['Å', 'ĸ'] +['à¤', 'Į'] +['á¾', '½'] +['ê¦', 'Ĵ'] +['à®', 'Ł'] +['á´', '±'] +['ðŁı', '°'] +['ðŁIJ', 'ŀ'] +['à½', 'Ģ'] +['áĢ', 'ħ'] +['âĬ', '¿'] +['ðŁIJ', '§'] +['áĽ', 'ģ'] +['â¼', 'Ī'] +['âĶ', '¿'] +['ðŁ¥', '´'] +['â¼', '¿'] +['ð٧', 'ľ'] +['ãħ', '¿'] +['âĦ', '«'] +['ãĢ', '³'] +['ãĬ', 'Ļ'] +['â¼', 'Ģ'] +['ï', '¦¬'] +['ðŁı', '¬'] +['ðŁĵ', '»'] +['áĬ', 'Ľ'] +['áĦ', 'ħ'] +['àº', 'Ĭ'] +['àº', 'Ľ'] +['áħ', '³'] +['ðŁij', '®'] +['à®', '±'] +['âĺ', 'ĩ'] +['ðĿIJ', 'ı'] +['à´', 'µ'] +['à»', 'ģ'] +['à½', 'ı'] +['à½', '¢'] +['á¥', '±'] +['âĤ', '£'] +['ï¥', '¦'] +['ïŃ', 'Ļ'] +['ï´', '©'] +['ï¹', 'Ĥ'] +['ðŁį', '£'] +['ðŁķ', '¹'] +['Ï', 'ĸ'] +['à¶', '¸'] +['àº', '¢'] +['áĭ', 'Ń'] +['âİ', 'Ŀ'] +['âĹ', 'Ŀ'] +['âĻ', 'Ī'] +['âĻ', 'İ'] +['ê½', '¥'] +['ì³', 'Ķ'] +['ì¼', 'ij'] +['ï±', '°'] +['ðĿij', 'ĥ'] +['ðŁĮ', 'ª'] +['ðŁį', '¡'] +['Å', 'İ'] +['Ê', '¦'] +['Ñ', '§'] +['Ó', 'İ'] +['Ô', '´'] +['Ú', 'Ī'] +['ß', 'ĵ'] +['ß', '§'] +['à¤', 'Ķ'] +['áĪ', '«'] +['áĪ', 'µ'] +['áĹ', '©'] +['á´', 'ł'] +['á¼', 'ł'] +['âĢ', 'Ĺ'] +['âģ', 'ij'] +['âĦ', 'ı'] +['âĸ', 'ĩ'] +['â²', '£'] +['ãĦ', '³'] +['ãī', '®'] +['ê³', 'Ĺ'] +['ëĦ', 'Ĵ'] +['ëĸ', '«'] +['ë¡', 'Ħ'] +['ë¹', '°'] +['ë½', 'ģ'] +['ìĦ', 'ģ'] +['ìĮ', 'ĺ'] +['ìŁ', 'Į'] +['ì³', 'ī'] +['ì¼', 'ķ'] +['ï¬', '»'] +['ï³', 'İ'] +['ï¹', '¸'] +['ï¹', '¾'] +['ðĿIJ', 'Ĩ'] +['ðĿij', '·'] +['ðĿĽ', '¼'] +['ðŁİ', 'ı'] +['ðŁİ', 'ŀ'] +['ðŁIJ', 'Ļ'] +['ðŁij', 'Ĥ'] +['ðŁĵ', 'ģ'] +['ðŁĸ', '±'] +['ðŁļ', 'į'] +['ðŁļ', '§'] +['ðŁĽ', '¡'] +['ð٤', 'Ĵ'] +['ðŁ¥', 'ŀ'] +['ðŁ¥', '©'] +['ð٦', 'Ģ'] +['ð٦', 'ĸ'] +['Ë', '¢'] +['Ü', 'ļ'] +['à®', 'µ'] +['áĢ', 'ģ'] +['áī', '°'] +['âı', 'Ń'] +['âĻ', '¿'] +['ê³', 'ĺ'] +['ëı', 'Ŀ'] +['ëķ', 'ĥ'] +['ìħ', 'Į'] +['ìĴ', '¸'] +['ìĽ', 'Ł'] +['íħ', 'Ħ'] +['íľ', '«'] +['ï§', 'ĺ'] +['ï¿', '¬'] +['ðŁı', '·'] +['ðŁĶ', '§'] +['ðŁ¥', 'Ī'] +['Æ', 'ĸ'] +['áŀ', 'ĩ'] +['áŀ', 'ĸ'] +['âģ', 'º'] +['âĹ', 'ľ'] +['âŀ', '©'] +['ê¦', 'Ń'] +['ëĻ', '¤'] +['ïŃ', '¼'] +['ðĿĻ', 'ĸ'] +['ðĿĻ', '£'] +['ðĿĻ', '¤'] +['ðŁĮ', 'Ŀ'] +['ðŁĶ', 'ij'] +['ðŁĽ', 'ł'] +['àº', 'ĩ'] +['âĺ', '£'] +['ãĦ', '¨'] +['ðĿĸ', 'Ĺ'] +['Ó', 'ĵ'] +['âĨ', '£'] +['ðŁ¥', 'ī'] +['ðŁĮ', 'ł'] +['ðŁĺ', '½'] +['ãİ', 'ł'] +['Å', '§'] +['ðŁIJ', 'Ĵ'] +['ï§', 'IJ'] +['ðŁĺ', '¿'] +['âĪ', '¬'] +['ðŁIJ', '®'] +['âŁ', '±'] +['à²', '¡'] +['â¾', '¼'] +['à°', '²'] +['Ë', '¶'] +['âĸ', '¿'] +['Õ', 'Ī'] +['áŀ', 'İ'] +['áħ', '¥'] +['áŀ', 'Ĺ'] +['Õ', '§'] +['ð٤', 'IJ'] +['ðŁį', 'ł'] +['à¦', '¤'] +['à¶', 'º'] +['âĻ', 'į'] +['ìĺ', 'Ļ'] +['íĺ', 'ĵ'] +['ï¹', 'º'] +['ðŁĽ', '³'] +['Å', 'ī'] +['á´', 'İ'] +['âı', 'ľ'] +['âĶ', '³'] +['ê¸', '·'] +['ì¡', 'Ķ'] +['ðĿĴ', 'Ī'] +['ðĿĴ', 'į'] +['ðĿĴ', '¹'] +['ðĿĵ', 'ĩ'] +['ðĿķ', 'Ł'] +['ðĿĹ', '¹'] +['ðŁĮ', 'ħ'] +['ðŁı', '´'] +['Ä', 'Ķ'] +['Ä', '¤'] +['Å', 'µ'] +['Ç', '¾'] +['Ï', 'ŀ'] +['Ï', '¶'] +['Ô', '³'] +['Ü', 'Ĩ'] +['ß', '©'] +['à¡', 'Ĵ'] +['à¤', 'ĺ'] +['à¶', 'ļ'] +['à½', 'ĸ'] +['áģ', 'Ĭ'] +['áĥ', 'ŀ'] +['áĦ', 'Ĥ'] +['áĭ', '«'] +['á´', 'º'] +['á¸', '£'] +['á¸', 'ª'] +['á¹', 'Ĥ'] +['á¼', '·'] +['á¿', 'ĩ'] +['âĩ', 'Į'] +['âı', '¬'] +['âĻ', 'Į'] +['â®', 'Ł'] +['â´', '»'] +['âµ', 'Ł'] +['ê¦', 'ķ'] +['ê¦', 'ª'] +['ê¦', '®'] +['ê²', 'Ħ'] +['ê¾', 'IJ'] +['ëĥ', 'ij'] +['ëķ', 'ĭ'] +['ë¡', '¸'] +['ë¬', 'Ģ'] +['ìĩ', '¤'] +['ìĪ', '©'] +['ìľ', 'ķ'] +['ìŃ', 'ĺ'] +['ì·', '°'] +['ì', '·¸'] +['íľ', 'Ģ'] +['ï¤', '£'] +['ï§', 'į'] +['ï±', 'Ħ'] +['ï³', 'ij'] +['ðĿIJ', '¤'] +['ðĿĴ', 'ĵ'] +['ðĿĴ', '¶'] +['ðĿĹ', '¼'] +['ðĿĻ', 'Ĭ'] +['ðŁĩ', '¾'] +['ðŁĮ', 'Ľ'] +['ðŁĮ', '®'] +['ðŁİ', 'ĩ'] +['ðŁİ', '²'] +['ðŁı', 'Ľ'] +['ðŁij', '¥'] +['ðŁij', '´'] +['ðŁĴ', 'Ĩ'] +['ðŁĵ', 'Ĥ'] +['ðŁĵ', '§'] +['ðŁķ', 'IJ'] +['ðŁĸ', 'ķ'] +['ðŁĺ', '§'] +['ðŁĻ', 'Ģ'] +['ðŁļ', 'Ĵ'] +['ðŁĽ', '«'] +['ð٤', 'ł'] +['ðŁ¥', 'ļ'] +['ðŁ¥', 'Ľ'] +['ðŁ¥', '£'] +['Ç', '¯'] +['È', '§'] +['Î', 'Ĭ'] +['Ò', '²'] +['×', '°'] +['Û', 'ij'] +['áĥ', '©'] +['áĦ', 'Į'] +['áĪ', 'į'] +['áī', '¥'] +['áı', 'Ĥ'] +['âģ', '±'] +['âĬ', '¢'] +['âĹ', 'ĵ'] +['âĿ', '°'] +['ë¿', '¡'] +['ìĽ', '©'] +['íģ', 'Ń'] +['íĨ', '³'] +['íĬ', 'Ħ'] +['íĵ', '¸'] +['ï¥', '£'] +['ï¥', '´'] +['ï±', 'IJ'] +['ï±', '¯'] +['ï³', 'ļ'] +['ðĿĸ', 'ĺ'] +['ðĿĺ', 'Ģ'] +['ðŁIJ', 'Ĭ'] +['ðŁIJ', 'Į'] +['ðŁij', 'ļ'] +['ðŁĵ', 'ĥ'] +['ðŁļ', 'Ľ'] +['ðŁļ', 'ª'] +['ð٤', '°'] +['Ä', '´'] +['áĥ', '®'] +['áĹ', '¨'] +['âĻ', '®'] +['â²', 'ŀ'] +['ãĪ', 'Ķ'] +['ì', 'ħį'] +['ãħ', 'ĥ'] +['ï¥', '¡'] +['àº', '¡'] +['Õ', 'İ'] +['Õ', 'º'] +['â¬', 'Ľ'] +['â½', '¤'] +['ðĿIJ', '²'] +['âŀ', 'µ'] +['áĢ', 'Ľ'] +['âĶ', 'ħ'] +['âĨ', 'Ł'] +['â¼', 'Ĭ'] +['ðŁĮ', '½'] +['ðŁļ', '¿'] +['ï¦', 'Ĭ'] +['ãĦ', '£'] +['âĽ', '©'] +['ï©', 'Ľ'] +['ðŁį', '±'] +['â¾', '¨'] +['à´', '¤'] +['áŀ', 'ģ'] +['àº', 'ŀ'] +['Ê', 'ļ'] +['ðĿIJ', 'Ĵ'] +['à´', '±'] +['áŀ', 'ľ'] +['à®', '©'] +['à°', 'Ĺ'] +['à´', 'ļ'] +['âĩ', '£'] +['ï¦', 'ķ'] +['Õ', 'ħ'] +['Æ', 'ĺ'] +['âĤ', '¦'] +['âĶ', 'Ħ'] +['ï¦', 'Ł'] +['ï¦', '«'] +['ðĿIJ', 'ģ'] +['ðĿIJ', 'ĥ'] +['ðŁį', '¸'] +['ðŁIJ', '²'] +['Å', '¶'] +['É', 'ĸ'] +['ß', 'ĺ'] +['à¸', '¦'] +['à½', 'Ķ'] +['áĨ', '·'] +['âģ', 'ķ'] +['âĵ', 'Ĥ'] +['âĿ', 'ľ'] +['ï¥', '¥'] +['ï¬', '®'] +['ðĿĹ', 'Ŀ'] +['ðĿĹ', '¿'] +['ðŁİ', '¾'] +['ðŁĹ', 'Ŀ'] +['ð٦', 'Į'] +['Æ', 'ħ'] +['Ç', 'ª'] +['Ò', 'Ĺ'] +['Ü', 'Ľ'] +['ß', 'ł'] +['à¡', 'ij'] +['áī', '£'] +['áĬ', 'Ń'] +['á¹', '¡'] +['âŀ', '¼'] +['âŀ', '¾'] +['â´', '±'] +['ãī', '¡'] +['ê³', '¯'] +['ë½', 'Ī'] +['ìĤ', 'ĺ'] +['ìī', 'ij'] +['ì', '«ĺ'] +['íĮ', 'ĥ'] +['íĻ', '°'] +['ï¤', 'Ĺ'] +['ðŁĮ', '¬'] +['ðŁĮ', '°'] +['ðŁį', '¤'] +['Ä', '»'] +['Å', 'ĩ'] +['Æ', '¨'] +['É', 'ķ'] +['Ò', '¢'] +['Ò', 'º'] +['Ö', 'į'] +['×', '±'] +['Ú', '±'] +['Ú', '½'] +['Û', 'IJ'] +['à¤', 'Ľ'] +['à·', 'Ģ'] +['à¹', 'ļ'] +['àº', '«'] +['á´', '¹'] +['á', '½Ķ'] +['á¾', '³'] +['âĤ', 'Ĵ'] +['âĨ', '´'] +['âĩ', 'Ŀ'] +['âī', 'ħ'] +['â', 'Į¨'] +['âĵ', 'ĵ'] +['âĸ', '¢'] +['âļ', '¬'] +['âŀ', 'Ń'] +['â²', 'Ĵ'] +['ãİ', '¿'] +['ê¿', '´'] +['ëĪ', '±'] +['ëį', '¬'] +['ëİ', 'IJ'] +['ëIJ', '«'] +['ëĶ', '«'] +['ë±', 'ģ'] +['ìĥ', '¥'] +['íĮ', '¼'] +['ïŃ', 'ĵ'] +['ï®', '¥'] +['ï²', '°'] +['ðĿIJ', 'ĩ'] +['ðĿIJ', 'ij'] +['ðĿij', 'Į'] +['ðĿĵ', 'ª'] +['ðĿķ', 'ļ'] +['ðĿĺ', 'ª'] +['ðĿĺ', '¼'] +['ðĿļ', 'Ľ'] +['ðŁĩ', '¶'] +['ðŁĮ', 'Ħ'] +['ðŁĮ', 'ķ'] +['ðŁĮ', '¤'] +['ðŁĮ', '§'] +['ðŁį', '¬'] +['ðŁİ', 'ĭ'] +['ðŁİ', '»'] +['ðŁı', '¨'] +['ðŁIJ', 'ĩ'] +['ðŁij', 'ĵ'] +['ðŁĵ', 'IJ'] +['ðŁĵ', 'Ļ'] +['ðŁĶ', '¼'] +['ðŁķ', 'Ĵ'] +['ðŁĸ', 'ı'] +['ðŁĸ', '¥'] +['ð٤', '¬'] +['ðŁ¥', 'Ĭ'] +['ðŁ¥', 'Ĵ'] +['ß', 'Į'] +['àº', 'Ħ'] +['á¼', 'µ'] +['âķ', '¡'] +['â²', '¤'] +['â´', '¼'] +['âµ', '¢'] +['ãĪ', '¯'] +['ëĵ', '¸'] +['ëŁ', 'ĩ'] +['ëº', 'į'] +['ðĿĻ', '§'] +['ðŁį', 'Ī'] +['ðŁĶ', '¬'] +['ðŁĸ', 'Ĭ'] +['ð٤', '¾'] +['Ë', '¡'] +['Ü', '©'] +['âĮ', '¡'] +['âŃ', 'ij'] +['â²', '¦'] +['ë©', 'ī'] +['ì¼', 'Ń'] +['ï¿', '¤'] +['ðĿĴ', 'İ'] +['ðĿĹ', '¥'] +['ðŁIJ', 'µ'] +['ðŁķ', '¶'] +['ðŁķ', '¸'] +['ð٤', 'ľ'] +['Õ', 'ª'] +['áĪ', 'ĭ'] +['ðŁ¥', 'µ'] +['ï°', 'ģ'] +['áµ', 'IJ'] +['âķ', 'ĵ'] +['áĢ', 'ĸ'] +['âĭ', 'Ī'] +['É', 'ŀ'] +['âŀ', '®'] +['à¥', '°'] +['ãĨ', 'ģ'] +['ðŁĴ', '±'] +['ðŁı', 'Ń'] +['áĨ', '¨'] +['ðŁį', 'ļ'] +['ð٦', 'IJ'] +['á´', '»'] +['âĺ', 'Į'] +['à´', 'ķ'] +['Õ', '±'] +['áħ', '®'] +['ðĿIJ', 'Į'] +['Å', '¦'] +['àº', 'ķ'] +['âľ', 'Ļ'] +['Ë', '³'] +['Ô', 'µ'] +['âķ', 'Ĵ'] +['ðĿĹ', 'Ĺ'] +['ðĿĹ', 'ł'] +['Ú', 'ļ'] +['à¦', '§'] +['âĨ', 'Ŀ'] +['âĻ', 'ī'] +['ãĮ', '»'] +['ì¹', 'Ĭ'] +['ðĿĹ', 'º'] +['ð٧', 'ĺ'] +['ì³', '£'] +['ï¬', 'Ŀ'] +['ðŁij', 'º'] +['Ç', 'Ł'] +['Î', 'Ī'] +['Î', '«'] +['Ñ', '¥'] +['Ô', '²'] +['Õ', '¨'] +['Ü', '¦'] +['à¦', 'Ĩ'] +['à¦', '¥'] +['áIJ', '¢'] +['á¼', 'ģ'] +['á¼', 'ĺ'] +['á¼', '¦'] +['âĵ', 'Ŀ'] +['ãĪ', '°'] +['ãİ', 'Ĺ'] +['ê²', '¡'] +['ë¨', 'Ģ'] +['ì£', 'Ķ'] +['ì´', '¤'] +['ìµ', 'Ŀ'] +['ï§', '´'] +['ïŃ', 'Ĭ'] +['ï²', 'Ł'] +['ðĿIJ', '·'] +['ðĿij', 'ĭ'] +['ðĿĵ', 'ī'] +['ðĿĺ', 'µ'] +['ðŁĴ', '·'] +['ðŁĽ', '©'] +['ð٧', '¹'] +['Å', 'Ķ'] +['Ê', 'ŀ'] +['Ë', '¥'] +['Î', 'Į'] +['Ñ', '©'] +['Ó', 'IJ'] +['Ó', 'ł'] +['Ú', 'ij'] +['Ú', 'Ĵ'] +['ß', '¨'] +['àª', 'Ī'] +['áIJ', 'ĥ'] +['á¹', '¯'] +['âĤ', 'ĭ'] +['âĤ', 'µ'] +['âĦ', 'ħ'] +['âĦ', 'ł'] +['âĪ', '£'] +['âī', 'º'] +['âī', '»'] +['âĬ', 'Ľ'] +['âĮ', 'IJ'] +['âİ', 'ĵ'] +['âĺ', '¸'] +['âĻ', 'Ĵ'] +['âļ', 'Ĵ'] +['âľ', 'ĩ'] +['âľ', 'ł'] +['â´', '·'] +['âµ', 'ĸ'] +['ãĦ', '¸'] +['ãī', '¢'] +['ãī', '°'] +['êĩ', '´'] +['ê´', '¸'] +['êº', 'ł'] +['ëĤ', 'ı'] +['ëĤ', '¢'] +['ëIJ', 'Ģ'] +['ëº', '´'] +['ìĥ', 'ľ'] +['ìį', 'ħ'] +['ì¤', '«'] +['ì±', '¦'] +['ìº', 'ij'] +['ì¼', 'ģ'] +['ì¿', '³'] +['íĤ', 'ģ'] +['íħ', '¡'] +['íĴ', 'Ĥ'] +['íĴ', 'ī'] +['íľ', 'Ħ'] +['ïŃ', 'ª'] +['ï®', '¬'] +['ï¯', '¦'] +['ï±', 'ª'] +['ï²', 'ı'] +['ï', '´Ģ'] +['ï»', 'Ĩ'] +['ï¿', '¦'] +['ðĿij', 'Ĺ'] +['ðĿĸ', 'Ļ'] +['ðŁĮ', '¡'] +['ðŁį', 'Ŀ'] +['ðŁį', '§'] +['ðŁİ', '«'] +['ðŁı', 'ĺ'] +['ðŁı', 'ª'] +['ðŁIJ', 'ĭ'] +['ðŁIJ', 'Ľ'] +['ðŁIJ', 'º'] +['ðŁij', 'ĸ'] +['ðŁij', 'ŀ'] +['ðŁij', '·'] +['ðŁĵ', 'Ģ'] +['ðŁ', 'ĶĦ'] +['ðŁĶ', 'Į'] +['ðŁķ', 'Ļ'] +['ðŁĻ', 'į'] +['ðŁĻ', 'İ'] +['ð٦', 'į'] +['Ç', '°'] +['É', 'Ł'] +['Ê', 'Ĩ'] +['Ô', '¼'] +['Ú', 'ľ'] +['à¦', '¡'] +['à¦', '¶'] +['áĴ', 'ĥ'] +['á¼', '©'] +['âĵ', 'ķ'] +['â²', 'Ī'] +['ê°', '°'] +['ê¹', 'ł'] +['êº', 'ħ'] +['ëĦ', '¹'] +['ë¯', 'ĵ'] +['íIJ', 'Ī'] +['ï§', '¶'] +['ï®', 'ij'] +['ï²', '¨'] +['ðĿĴ', 'ī'] +['ðĿĴ', 'Ķ'] +['ðĿĹ', '¨'] +['ðĿĻ', 'ŀ'] +['ðĿļ', 'Ĵ'] +['ðĿļ', 'ķ'] +['ðŁIJ', 'İ'] +['ð٤', 'ķ'] +['ð٧', 'Ķ'] +['Ï', '°'] +['Ô', 'Ŀ'] +['âĮ', 'Ĭ'] +['âĴ', '¾'] +['ãī', '£'] +['ïŃ', '©'] +['ðĿļ', 'ŀ'] +['Ê', 'ij'] +['à¦', '¦'] +['áĦ', 'ĩ'] +['âī', 'ĥ'] +['â²', 'Ģ'] +['ìŁ', 'İ'] +['ðĿij', '¶'] +['ðĿĵ', '²'] +['ðŁ', 'İ·'] +['ðŁļ', '¹'] +['àº', 'ģ'] +['áł', 'ł'] +['ãĦ', 'ļ'] +['ðŁIJ', '¿'] +['áĽ', 'ļ'] +['âķ', '³'] +['ðŁIJ', 'Ń'] +['âĴ', '¹'] +['ðĿĸ', 'ļ'] +['âĻ', 'ĸ'] +['ãĪ', '²'] +['âĨ', '¾'] +['áĦ', 'Ĩ'] +['âķ', 'Ľ'] +['ð٤', 'į'] +['â½', '¥'] +['ðŁ', 'Į¨'] +['âĪ', '®'] +['ãĮ', 'ĺ'] +['ãį', 'ij'] +['ï¹', 'Ģ'] +['âĵ', 'Ĺ'] +['âĬ', 'Ħ'] +['ðŁı', '¹'] +['Ë', 'Ĵ'] +['ð٤', '±'] +['ãı', 'ľ'] +['ðŁİ', 'Į'] +['ï¥', 'Ń'] +['à¦', '£'] +['ðŁİ', '¹'] +['ãĬ', 'Ł'] +['à´', '°'] +['ðĿIJ', 'Ķ'] +['à´', '¨'] +['à½', 'ļ'] +['âľ', 'º'] +['Õ', '·'] +['ðŁij', '³'] +['à¦', 'ľ'] +['âĺ', 'ĭ'] +['âĻ', 'Ĭ'] +['ãĢ', 'Ľ'] +['È', 'ĭ'] +['à®', '°'] +['áĥ', '¨'] +['âĦ', 'ķ'] +['íij', 'Ģ'] +['ðĿĵ', 'ĥ'] +['ð٦', 'Ķ'] +['Ä', '¿'] +['Å', 'Ģ'] +['Æ', '³'] +['É', 'ļ'] +['Ö', 'ĥ'] +['Ü', '£'] +['ß', 'Ł'] +['à¦', 'Ń'] +['à§', '¡'] +['à¶', '»'] +['àº', '£'] +['à½', 'ĩ'] +['á¸', '¨'] +['á½', 'Ī'] +['â½', '¬'] +['ê¡', 'Ķ'] +['ì³', 'Ħ'] +['ï¨', 'ī'] +['ðĿIJ', '¡'] +['ðĿĺ', '¢'] +['ðŁį', '¿'] +['ðŁİ', 'Ł'] +['ðŁı', 'ī'] +['ðŁĶ', 'IJ'] +['ðŁļ', 'ħ'] +['ð٤', '½'] +['Æ', 'į'] +['Ç', '«'] +['Ç', '½'] +['È', 'ļ'] +['Î', 'ī'] +['Ó', '¤'] +['Ó', 'ª'] +['Õ', 'Ĭ'] +['Ù', '¼'] +['Ú', '´'] +['ß', 'Ŀ'] +['à¶', 'ľ'] +['á¼', 'ķ'] +['á¿', '¥'] +['âİ', 'ŀ'] +['ãĢ', 'ļ'] +['ãī', '¤'] +['ê³', '¸'] +['ê·', 'ģ'] +['ëĵ', 'Ħ'] +['ëĵ', 'ķ'] +['ì¨', 'Ķ'] +['ì±', '¨'] +['ðĿIJ', '¾'] +['ðĿij', '»'] +['ðĿĶ', '¼'] +['ðĿķ', 'Ŀ'] +['ðĿĺ', 'Ń'] +['ðŁĨ', 'Ļ'] +['ðŁĵ', '¤'] +['ðŁĶ', 'Ł'] +['ðŁĹ', '¼'] +['Ä', 'ľ'] +['Æ', 'ģ'] +['Æ', '¿'] +['Ç', '³'] +['Ç', '·'] +['É', 'ĥ'] +['É', 'ł'] +['Ê', 'ī'] +['Ê', '§'] +['Ë', '²'] +['Ï', '´'] +['Õ', 'ģ'] +['Õ', 'ŀ'] +['Ö', 'ĩ'] +['Û', 'Ĥ'] +['Û', 'ĵ'] +['ß', 'Ĺ'] +['ß', '¦'] +['à¦', '¹'] +['à®', '³'] +['à´', '¸'] +['à»', 'Ĥ'] +['áĪ', 'Ŀ'] +['áĪ', 'ª'] +['áĭ', 'µ'] +['áIJ', 'Ĭ'] +['áĴ', 'ª'] +['áļ', 'ĸ'] +['áŀ', 'Ľ'] +['á´', '¢'] +['áµ', 'ı'] +['áµ', 'Ń'] +['á¶', '«'] +['á¸', 'ı'] +['áº', 'Ĵ'] +['á¼', '¥'] +['á½', 'ķ'] +['á½', '¼'] +['âĤ', 'Ĭ'] +['âĦ', 'Ĥ'] +['âĦ', '©'] +['âĩ', 'ī'] +['âī', '£'] +['âĮ', 'ł'] +['âİ', 'Ł'] +['âı', '®'] +['âķ', 'ĺ'] +['âĹ', 'ĸ'] +['âĺ', '©'] +['âĻ', 'ij'] +['âĻ', '²'] +['âļ', 'Ľ'] +['ãĦ', 'Ł'] +['ãī', '±'] +['ãİ', 'ļ'] +['ê¡', 'ķ'] +['êª', 'ĸ'] +['ê°', '¹'] +['ê²', 'Ĩ'] +['êµ', 'Ħ'] +['ëĩ', '¬'] +['ëĭ', '¯'] +['ëı', 'ł'] +['ëĴ', '¬'] +['ëĸ', 'Ī'] +['ëĸ', '½'] +['ëĺ', 'Ķ'] +['ëŀ', '¸'] +['ë¸', 'ħ'] +['ë»', 'ł'] +['ë¿', 'Ł'] +['ìĤ', 'µ'] +['ìĬ', 'ī'] +['ìľ', '°'] +['ìł', 'ĭ'] +['ìł', 'Ķ'] +['ì¥', '¡'] +['ìŃ', 'Ŀ'] +['ì¼', '¬'] +['íĪ', 'ĩ'] +['íī', 'ľ'] +['íį', 'Ħ'] +['íĽ', '¾'] +['íĿ', '£'] +['ï¤', '©'] +['ï¤', '¯'] +['ï¦', 'ľ'] +['ï¦', '§'] +['ï§', 'ľ'] +['ï¨', 'Ī'] +['ï¬', 'ª'] +['ï', '¬´'] +['ïŃ', '½'] +['ï®', 'ī'] +['ï¯', 'ŀ'] +['ï°', 'Ĵ'] +['ï±', 'ĩ'] +['ï¿', 'Ħ'] +['ðĿIJ', 'ħ'] +['ðĿij', 'Ħ'] +['ðĿij', 'º'] +['ðĿĴ', 'Ĺ'] +['ðĿĵ', '®'] +['ðĿķ', 'Ľ'] +['ðĿķ', 'ŀ'] +['ðĿĸ', 'ij'] +['ðĿĺ', 'ģ'] +['ðĿĺ', 'Ĩ'] +['ðĿĺ', '¶'] +['ðĿĻ', '¢'] +['ðĿļ', 'ľ'] +['ðŁĮ', 'ĥ'] +['ðŁĮ', '¦'] +['ðŁį', 'Ł'] +['ðŁİ', 'İ'] +['ðŁı', 'Ļ'] +['ðŁIJ', '©'] +['ðŁIJ', '«'] +['ðŁIJ', '´'] +['ðŁij', 'Ķ'] +['ðŁĵ', 'ī'] +['ðŁĵ', 'Ľ'] +['ðŁĶ', 'ī'] +['ðŁĸ', '¼'] +['ðŁĹ', 'ĥ'] +['ðŁĹ', '¯'] +['ðŁļ', 'ĩ'] +['ðŁļ', 'IJ'] +['ðŁļ', 'µ'] +['ð٤', '¶'] +['ðŁ¥', 'ĭ'] +['ðŁ¥', 'ĵ'] +['ðŁ¥', '®'] +['ð٦', 'İ'] +['ð٦', 'ł'] +['ð٧', 'Ĵ'] +['ð٧', '¨'] +['Æ', 'IJ'] +['Ç', 'į'] +['Ó', 'Ģ'] +['Ô', 'Ľ'] +['à²', '°'] +['à´', 'Ļ'] +['áĢ', 'Ĵ'] +['ê²', 'Ŀ'] +['ê¹', '¹'] +['ë©', '¥'] +['ìĸ', 'Ķ'] +['ï¤', 'ģ'] +['ï¤', 'ı'] +['ï¦', 'ī'] +['ï¦', 'ĵ'] +['ï§', 'ī'] +['ï²', 'Ŀ'] +['ðĿĹ', 'ŀ'] +['ðĿĹ', '±'] +['ðŁĮ', 'ĭ'] +['ðŁį', '¶'] +['à¦', 'ļ'] +['ìķ', 'ľ'] +['ðĿIJ', '¯'] +['ðĿļ', 'Ŀ'] +['à°', '¨'] +['à½', 'ĺ'] +['à½', 'ł'] +['á¡', '¥'] +['á¾', '°'] +['âģ', 'į'] +['âĶ', '°'] +['â¬', 'ľ'] +['ðĿIJ', 'ł'] +['ðĿij', '¯'] +['ðĿĹ', 'Ľ'] +['ðĿĵ', '»'] +['ðĿĸ', 'Ī'] +['âŀ', '»'] +['áŀ', 'ł'] +['â¡', '±'] +['â»', 'ij'] +['ð٧', 'µ'] +['ï¦', '¢'] +['ðŁij', 'ĺ'] +['ãĤ', 'Ķ'] +['â¼', 'Ł'] +['ãĬ', '¤'] +['ï¦', 'Ŀ'] +['ãĮ', '¦'] +['âĢ', '¸'] +['ðŁĶ', 'Ļ'] +['ã', '¹'] +['ã¹', '¦'] +['ï¹', 'ħ'] +['ï©', 'Į'] +['ãī', '¨'] +['ï¸', '½'] +['âį', '¥'] +['ðŁļ', 'ī'] +['ðŁ¥', 'ľ'] +['âĵ', 'ľ'] +['â»', 'Ŀ'] +['ï¨', 'ľ'] +['ðŁĴ', 'Ĵ'] +['áĦ', 'ij'] +['â¾', 'ŀ'] +['ï¨', 'ģ'] +['à´', 'ª'] +['áĦ', 'İ'] +['âŀ', '´'] +['à¦', '·'] +['áħ', '¬'] +['áŀ', '§'] +['âĨ', '¢'] +['âķ', '¦'] +['âľ', 'ij'] +['Ë', '¬'] +['Õ', 'IJ'] +['à¼', 'Ķ'] +['Ê', '¤'] +['Ë', '¨'] +['à¤', 'ŀ'] +['à»', 'ĥ'] +['à¼', 'ļ'] +['âĵ', '¥'] +['âķ', 'ľ'] +['ðŁIJ', 'ĸ'] +['á¼', 'Ļ'] +['á¼', '¤'] +['ìĨ', '°'] +['È', 'Ĥ'] +['Ê', '±'] +['à®', 'ļ'] +['áĥ', '§'] +['á´', 'ĭ'] +['á´', '®'] +['âĿ', '¡'] +['âŀ', '·'] +['ëĿ', '¡'] +['ï§', '¢'] +['ï¯', '¡'] +['ðĿķ', 'ķ'] +['ðŁħ', '°'] +['ð٦', '¸'] +['Ç', '¸'] +['Ó', 'ŀ'] +['Ô', '¶'] +['Ö', 'Ĩ'] +['Ú', 'ģ'] +['Û', 'ĭ'] +['áİ', '¥'] +['á¾', '¿'] +['âĶ', 'Ń'] +['âĶ', '®'] +['êĢ', 'Ģ'] +['ê±', 'ĺ'] +['ëIJ', 'Ń'] +['ë½', 'Ħ'] +['ìĶ', 'IJ'] +['ì¸', 'Į'] +['íģ', 'ł'] +['íĻ', '±'] +['ï¥', 'ī'] +['ï¨', 'ĸ'] +['ðĿij', '´'] +['ðĿĸ', 'Ĵ'] +['ðĿĺ', '¨'] +['ðĿ', 'ļĮ'] +['ðŁIJ', '¡'] +['ðŁij', '¢'] +['ðŁĵ', 'Ķ'] +['Å', 'ħ'] +['Æ', 'İ'] +['È', '©'] +['Ò', 'ª'] +['Ô', 'ĥ'] +['áĥ', '«'] +['á¸', 'ĩ'] +['âĽ', 'Ł'] +['ê»', 'Ń'] +['ë¨', 'Ħ'] +['ìŁ', 'Ģ'] +['ì¤', '´'] +['íļ', 'IJ'] +['ï¤', '³'] +['ðŁŁ', '¢'] +['Æ', '§'] +['È', '¼'] +['Ê', 'Ŀ'] +['Ë', 'Ħ'] +['Ë', 'ħ'] +['Ë', 'į'] +['Ë', '§'] +['Ò', '¥'] +['Õ', 'Ķ'] +['Ø', 'ı'] +['Ø', '¼'] +['ß', 'IJ'] +['ß', 'ľ'] +['à¤', 'ĵ'] +['à¦', 'Ļ'] +['à®', 'ĵ'] +['à¶', '´'] +['à¼', 'į'] +['à¼', 'Ĵ'] +['à½', '£'] +['áĢ', 'Ĥ'] +['áĢ', 'Ĭ'] +['áĦ', 'Ħ'] +['á', 'Īĺ'] +['áĭ', 'Ĭ'] +['áĮ', 'į'] +['áij', 'ĭ'] +['áŀ', 'Ĥ'] +['áł', '¢'] +['á¡', 'Ŀ'] +['á´', '¦'] +['áµ', 'į'] +['áµ', '¨'] +['á¸', '¡'] +['á¸', '¯'] +['á¼', '£'] +['âģ', 'Ĥ'] +['âĦ', 'ĺ'] +['âĦ', 'ľ'] +['âĦ', '³'] +['âĦ', 'µ'] +['âĨ', '¦'] +['âĩ', 'Ĩ'] +['âĪ', '·'] +['âĬ', 'ļ'] +['âĮ', '«'] +['âĮ', '¯'] +['âİ', 'Ľ'] +['âİ', 'ľ'] +['âİ', '¤'] +['âİ', '¦'] +['âİ', '®'] +['âij', 'ī'] +['âĶ', 'ī'] +['âķ', 'Ļ'] +['âĸ', 'Ĥ'] +['âĹ', 'Ń'] +['âĺ', 'Ĭ'] +['âĺ', 'į'] +['âĺ', 'Ĵ'] +['âļ', 'Ĩ'] +['âĽ', '§'] +['âĽ', '²'] +['âŀ', 'ĺ'] +['â¥', 'Ħ'] +['â´', '³'] +['â´', '½'] +['âµ', 'Ī'] +['ãī', '¯'] +['ãİ', 'ij'] +['ã§', '¬'] +['êĻ', '¬'] +['ê§', 'ģ'] +['ê³', '¬'] +['ê´', 'ŀ'] +['ê»', 'ľ'] +['ëħ', 'ĵ'] +['ëĭ', '¼'] +['ëį', 'ĸ'] +['ëĸ', '±'] +['ëĿ', '°'] +['ë¡', '¹'] +['ë¢', '´'] +['ë£', 'Ģ'] +['ë¤', 'ł'] +['ë¨', 'ķ'] +['ëŃ', '¥'] +['ìĦ', '¶'] +['ìħ', '¤'] +['ìĮ', 'ķ'] +['ìį', 'ª'] +['ìı', '©'] +['ìĴ', 'Ģ'] +['ìĶ', '¯'] +['ìĿ', 'Ķ'] +['ìĿ', 'ľ'] +['ìł', 'Ń'] +['ì§', '¦'] +['ì¨', '©'] +['ì²', '¬'] +['ì³', '¥'] +['ì¼', '¯'] +['íĢ', '«'] +['íĢ', 'Ń'] +['íĥ', '¸'] +['íĵ', 'ģ'] +['íķ', '¬'] +['íĹ', '¸'] +['íĽ', 'ķ'] +['íľ', 'Ń'] +['íĿ', 'Ĺ'] +['ï¤', 'Į'] +['ï¤', 'ª'] +['ï§', '¿'] +['ï¬', 'Ħ'] +['ï¬', 'ħ'] +['ïŃ', 'ij'] +['ïŃ', '«'] +['ïŃ', 'º'] +['ï®', 'Ĥ'] +['ï®', '¢'] +['ï®', '¨'] +['ï°', 'İ'] +['ï°', 'ł'] +['ï²', '£'] +['ï³', 'IJ'] +['ï³', 'Ĵ'] +['ï³', 'ĺ'] +['ï³', 'ľ'] +['ï¹', '¼'] +['ï¿', '¨'] +['ðĿIJ', '©'] +['ðĿĴ', 'ļ'] +['ðĿķ', 'Ķ'] +['ðĿķ', '¤'] +['ðĿĸ', 'Į'] +['ðĿĹ', '£'] +['ðĿĹ', '°'] +['ðĿĹ', '´'] +['ðĿĺ', 'Ĥ'] +['ðĿĺ', '¥'] +['ðĿĺ', '®'] +['ðĿĺ', '¸'] +['ðĿĻ', 'Ģ'] +['ðĿĽ', '¾'] +['ðĿľ', 'ı'] +['ðŁĮ', 'ģ'] +['ðŁĮ', 'ľ'] +['ðŁĮ', '¥'] +['ðŁĮ', '¯'] +['ðŁį', 'IJ'] +['ðŁİ', 'Ĵ'] +['ðŁı', 'Ķ'] +['ðŁı', 'ķ'] +['ðŁı', '®'] +['ðŁIJ', 'Ĥ'] +['ðŁIJ', 'ī'] +['ðŁIJ', '¹'] +['ðŁĶ', 'ķ'] +['ðŁĶ', 'ļ'] +['ðŁķ', 'ij'] +['ðŁķ', '£'] +['ðŁĹ', 'ŀ'] +['ðŁĹ', '¡'] +['ðŁĹ', '¿'] +['ðŁļ', 'Ĩ'] +['ðŁļ', 'Ĭ'] +['ðŁļ', 'ĵ'] +['ðŁļ', 'ķ'] +['ðŁļ', '¾'] +['ðŁĽ', 'ģ'] +['ðŁĽ', 'İ'] +['ðŁĽ', 'ı'] +['ð٤', '´'] +['ðŁ¥', 'ķ'] +['ðŁ¥', 'ĸ'] +['ðŁ¥', 'ł'] +['ðŁ¥', '¥'] +['ð٦', 'Ĩ'] +['ð٦', 'ī'] +['ð٦', 'ļ'] +['ð٧', 'ij'] +['ð٧', '¥'] +['ð٧', '¿'] +['Å', '°'] +['Æ', 'º'] +['É', '§'] +['àª', 'ĩ'] +['à®', '£'] +['áĪ', 'Ī'] +['áĬ', '¤'] +['áĭ', '®'] +['áĮ', 'Ī'] +['áĮ', 'µ'] +['á¥', '²'] +['âĵ', 'Ł'] +['êĻ', '³'] +['ê°', 'Ĭ'] +['ëķ', 'ģ'] +['ëķ', '¨'] +['ìĬ', 'ģ'] +['ï¦', 'µ'] +['ï¬', '²'] +['ðĿĸ', 'į'] +['ðĿĺ', 'Į'] +['ðĿĺ', '³'] +['ðĿĻ', '©'] +['ðŁį', 'Ļ'] +['ðŁĸ', 'ĸ'] +['áī', '³'] +['áĭ', '¨'] +['áĸ', 'ĩ'] +['áŀ', 'Į'] +['á¹', '§'] +['âķ', 'ª'] +['âŀ', 'ļ'] +['â²', 'ĺ'] +['ê', 'ķ'] +['êķ', '¥'] +['ï¤', '·'] +['ï®', '£'] +['ï¯', 'ł'] +['ðĿĴ', 'ĸ'] +['ðĿķ', 'ĺ'] +['ðĿĸ', 'ĩ'] +['ðĿĹ', 'Ł'] +['ðĿĹ', 'ª'] +['ðĿĹ', '¯'] +['ðĿĻ', 'ł'] +['ðŁĵ', 'ı'] +['à¦', 'Ĺ'] +['âĴ', '»'] +['â²', 'ł'] +['ðĿĵ', 'µ'] +['Ê', '£'] +['à°', 'ľ'] +['áĬ', '¢'] +['áŀ', 'IJ'] +['á¸', '·'] +['âĦ', 'Ľ'] +['âĩ', 'Ģ'] +['âĩ', 'Ĭ'] +['êĴ', '¦'] +['ê¦', 'ł'] +['ï®', '¤'] +['ðŁį', 'Ľ'] +['ð٤', 'Ľ'] +['á¨', '¾'] +['âŀ', 'º'] +['áķ', '¯'] +['áĽ', 'ı'] +['âĩ', 'Ĥ'] +['âĶ', '¹'] +['âĻ', 'Ĺ'] +['ðŁĸ', '¨'] +['ê¦', 'ı'] +['àª', '°'] +['áļ', '¨'] +['ð٤', '¥'] +['ð٧', '¢'] +['ãIJ', 'Ĥ'] +['ãĦ', '¥'] +['ðŁĸ', 'Į'] +['â¼', 'Ĵ'] +['ãĬ', '§'] +['âį', '©'] +['ð٦', 'ij'] +['âĶ', '·'] +['ï©', 'IJ'] +['ï©', '¡'] +['ðĵ', 'Ī'] +['ðĵĪ', 'Ĵ'] +['â»', 'Ħ'] +['ï¨', 'Ĵ'] +['âĦ', 'ª'] +['Ò', '§'] +['Ú', 'Į'] +['âĢ', '¶'] +['âº', 'ł'] +['â»', 'ģ'] +['âĨ', '¸'] +['áĦ', 'IJ'] +['ãħ', 'IJ'] +['à»', 'Ħ'] +['áĹ', 'ª'] +['âĨ', '¼'] +['âĩ', 'ĭ'] +['âĩ', 'ĺ'] +['âĮ', 'ij'] +['âĸ', '©'] +['ðĿIJ', 'Ĺ'] +['Ä', 'Ĭ'] +['à¦', 'ī'] +['ìī', 'ł'] +['É', '¤'] +['ß', 'į'] +['ß', 'ı'] +['áµ', 'Ĺ'] +['âĤ', '¥'] +['âĵ', 'ī'] +['âĶ', 'ł'] +['âĶ', '¨'] +['âķ', 'Ħ'] +['ä', '¤'] +['ä¤', 'Ģ'] +['ê»', '¸'] +['ï®', 'ģ'] +['ðĵ', 'Ĥ'] +['ðĵĤ', 'ĥ'] +['ð٦', 'ķ'] +['Æ', 'Ľ'] +['à¦', 'ĩ'] +['ãı', 'ĺ'] +['ï®', '¼'] +['Ú', 'ĵ'] +['Ú', 'Ŀ'] +['à¦', 'ĵ'] +['à¶', '¯'] +['á´', 'ħ'] +['á½', 'Ļ'] +['âģ', '¼'] +['âĸ', 'İ'] +['â¼', '©'] +['ä', 'Ķ'] +['äĶ', 'Ģ'] +['ë»', '¡'] +['ìĽ', '½'] +['íģ', 'Ħ'] +['ï¥', '¼'] +['ï±', 'ī'] +['ï¹', '»'] +['ðĿĸ', 'ĭ'] +['ðĿĻ', 'Ī'] +['ðĿĻ', 'ª'] +['ðĿ', '϶'] +['ðŁIJ', 'Ħ'] +['ðŁIJ', 'Ĩ'] +['áİ', '¢'] +['á¸', 'Į'] +['âĿ', '´'] +['ðŁı', '¸'] +['È', 'Ŀ'] +['É', '¸'] +['Î', 'ħ'] +['Ï', 'ľ'] +['Ó', '¢'] +['Õ', '¹'] +['à´', 'ħ'] +['àº', 'Ī'] +['áĭ', '°'] +['áij', 'İ'] +['áł', 'µ'] +['á¡', 'ł'] +['á´', 'ī'] +['á¸', 'µ'] +['á¿', '´'] +['âĵ', '£'] +['âĶ', '¶'] +['â½', '¯'] +['ê²', '¥'] +['ê¿', 'ĺ'] +['ëģ', 'İ'] +['ëİ', 'Ī'] +['ëĶ', '¯'] +['ë²', '°'] +['ìĺ', '¯'] +['ìĽ', '¸'] +['ìŀ', 'Ĺ'] +['ì§', 'ĺ'] +['ì¬', '¬'] +['ì·', '¬'] +['íģ', 'ħ'] +['íĵ', 'Ķ'] +['íĽ', 'Ŀ'] +['ï¤', '®'] +['ï¤', '¹'] +['ï¥', '²'] +['ï¯', 'ĸ'] +['ðĿĵ', 'ħ'] +['ðĿĻ', 'Ħ'] +['ðŁĵ', '¶'] +['ðŁĹ', 'Ĵ'] +['ðŁ¥', 'Ķ'] +['ðŁ¥', 'Ń'] +['Å', '®'] +['Å', '´'] +['Æ', 'ī'] +['Æ', '«'] +['Ç', 'ģ'] +['Ç', '£'] +['Ç', 'º'] +['Ç', '¼'] +['È', 'į'] +['È', '¯'] +['É', 'ľ'] +['Ê', '¬'] +['Ë', 'ģ'] +['Ë', '¤'] +['Ë', 'µ'] +['Ï', 'Ľ'] +['Ò', '¤'] +['Ò', '¬'] +['Ó', 'ı'] +['Ó', 'Ľ'] +['Ó', '¡'] +['Ó', '³'] +['Ô', 'Į'] +['Ô', '¬'] +['Õ', '³'] +['Ù', '»'] +['Ú', 'ī'] +['Ú', '§'] +['Ü', 'ľ'] +['ß', 'ª'] +['à¤', 'Ŀ'] +['à¦', 'Ľ'] +['à¨', 'Ĩ'] +['àª', 'ķ'] +['àª', '¡'] +['à®', 'İ'] +['à°', '¬'] +['àµ', '»'] +['àµ', '¼'] +['à¶', 'ł'] +['à¶', 'Ń'] +['à¶', '¶'] +['à·', 'Ĩ'] +['à¼', '½'] +['áĢ', 'ļ'] +['áħ', '¢'] +['áĨ', '¸'] +['áĪ', 'Ģ'] +['áĪ', 'ķ'] +['áĪ', '°'] +['áī', '¡'] +['áī', '¤'] +['áĬ', '¦'] +['áĬ', '«'] +['áĭ', 'ĭ'] +['áĭ', 'į'] +['áİ', '¯'] +['áij', 'Ń'] +['áķ', 'Ĺ'] +['áŁ', 'Ľ'] +['á¥', 'Ĵ'] +['á©', 'ī'] +['áŃ', 'º'] +['á´', '¡'] +['áµ', 'ĺ'] +['áµ', 'Ľ'] +['á¶', 'ł'] +['á¸', 'ģ'] +['á¸', 'ĭ'] +['á¹', 'Ļ'] +['á¹', 'Ŀ'] +['á¹', '¦'] +['áº', 'ħ'] +['á¼', 'Ĥ'] +['á½', 'ĥ'] +['á½', 'į'] +['á½', '§'] +['á¾', '·'] +['âĢ', 'µ'] +['âĤ', 'İ'] +['âĦ', 'Ŀ'] +['âħ', 'Ģ'] +['âĨ', 'ŀ'] +['âĨ', '§'] +['âĩ', 'ħ'] +['âĪ', 'ĥ'] +['âī', 'ı'] +['âī', '½'] +['âĬ', 'ŀ'] +['âĬ', '¡'] +['âĬ', '§'] +['â', 'Ĭ¶'] +['âĭ', 'Ħ'] +['âİ', 'Ĵ'] +['âİ', '¡'] +['âİ', '£'] +['âİ', 'ª'] +['âı', 'İ'] +['âĵ', 'ĥ'] +['âĵ', 'ĸ'] +['âĵ', '¨'] +['âķ', 'ĭ'] +['âķ', 'ĸ'] +['âķ', '¢'] +['âķ', '²'] +['âĸ', 'Ĩ'] +['âĸ', 'Ĭ'] +['âĸ', 'į'] +['âĸ', '®'] +['âĺ', '¡'] +['âĺ', '¦'] +['âĺ', '±'] +['âĺ', '¿'] +['âĻ', 'ĺ'] +['âĻ', 'Ŀ'] +['âļ', '°'] +['âĽ', 'ij'] +['âŀ', 'ª'] +['â¤', 'Ŀ'] +['â¤', '¢'] +['â¤', '·'] +['â§', '«'] +['â¨', 'Ń'] +['â¨', '¯'] +['â±', '£'] +['â²', 'İ'] +['âµ', 'Ľ'] +['ãħ', 'Ķ'] +['ãĪ', 'ı'] +['ãī', '²'] +['ãī', '³'] +['ãĬ', 'ij'] +['ãĭ', 'Ľ'] +['ãİ', 'IJ'] +['ê²', '¤'] +['ê·', '¿'] +['ê¹', 'ŀ'] +['ê»', '¨'] +['ê¼', 'į'] +['ê¿', '¸'] +['ëĥ', '¬'] +['ëĩ', 'IJ'] +['ëĭ', 'ł'] +['ëį', '¯'] +['ëĹ', 'Į'] +['ëĹ', 'ij'] +['ë¥', 'Ģ'] +['ëª', 'ĥ'] +['ëª', '¯'] +['ë±', '¡'] +['ë³', 'ĵ'] +['ë³', '½'] +['ë', 'µľ'] +['ìĤ', '³'] +['ìħ', '¥'] +['ìĩ', '½'] +['ìı', '¨'] +['ìı', '¸'] +['ìķ', 'į'] +['ìĸ', 'ĸ'] +['ìŁ', '¨'] +['ì¢', 'ĥ'] +['ì¢', 'į'] +['ì¥', 'ij'] +['ì§', '¼'] +['ì©', 'ĥ'] +['ì®', 'ľ'] +['ì®', '¸'] +['ì³', 'ij'] +['ì´', '¥'] +['ì¾', 'ĥ'] +['íħ', '¦'] +['íĪ', '¿'] +['íĵ', '½'] +['íķ', '³'] +['íĸ', 'ı'] +['íĹ', 'ł'] +['íĿ', '«'] +['ï¤', 'ĵ'] +['ï¤', 'ĺ'] +['ï¥', 'İ'] +['ï¥', '¶'] +['ï¦', 'ħ'] +['ï¦', '½'] +['ï§', 'ĩ'] +['ï¬', 'Ĩ'] +['ï¬', '³'] +['ï®', 'ĩ'] +['ï®', 'Ī'] +['ï®', 'Ŀ'] +['ï®', '©'] +['ï®', '±'] +['ï¯', 'ĺ'] +['ï¯', 'Ļ'] +['ï¯', '¢'] +['ï¯', '£'] +['ï¯', '¤'] +['ï¯', '¥'] +['ï±', 'Ĥ'] +['ï²', 'Ĩ'] +['ï²', 'ª'] +['ï´', '¼'] +['ïº', 'ī'] +['ïº', 'Ĭ'] +['ïº', '¥'] +['ðĿij', '¨'] +['ðĿij', '©'] +['ðĿij', '²'] +['ðĿ', 'ĴĮ'] +['ðĿĴ', 'ª'] +['ðĿĴ', '®'] +['ðĿĵ', 'Ĥ'] +['ðĿĵ', 'Ī'] +['ðĿĵ', '¯'] +['ðĿĶ', '¨'] +['ðĿķ', 'Ģ'] +['ðĿķ', 'Ĩ'] +['ðĿķ', '¦'] +['ðĿķ', '§'] +['ðĿķ', '«'] +['ðĿķ', '·'] +['ðĿĹ', 'µ'] +['ðĿĹ', '¸'] +['ðĿĺ', 'Ħ'] +['ðĿĺ', 'Ļ'] +['ðĿĺ', 'ł'] +['ðĿĺ', '¬'] +['ðĿĻ', 'į'] +['ðĿĻ', 'ij'] +['ðĿĻ', '¡'] +['ðĿ', 'ύ'] +['ðĿĻ', '·'] +['ðĿļ', 'į'] +['ðĿĽ', '¿'] +['ðŁ', 'ĥ'] +['ðŁĥ', 'ı'] +['ðŁħ', 'ĺ'] +['ðŁ', 'ī'] +['ðŁī', 'ij'] +['ðŁİ', '¡'] +['ðŁİ', 'ª'] +['ðŁİ', '±'] +['ðŁİ', '³'] +['ðŁİ', 'º'] +['ðŁı', 'İ'] +['ðŁı', 'Ĺ'] +['ðŁı', 'ļ'] +['ðŁı', 'ŀ'] +['ðŁı', '¦'] +['ðŁı', '§'] +['ðŁIJ', 'ģ'] +['ðŁIJ', 'ħ'] +['ðŁIJ', 'ĵ'] +['ðŁĴ', 'Ĥ'] +['ðŁĵ', 'ij'] +['ðŁĵ', 'ĵ'] +['ðŁĵ', '¨'] +['ðŁĵ', '«'] +['ðŁĶ', 'ĭ'] +['ðŁĶ', 'Ń'] +['ðŁĶ', '¯'] +['ðŁķ', 'Ĺ'] +['ðŁļ', 'Ĥ'] +['ðŁļ', '¢'] +['ðŁļ', '¦'] +['ðŁļ', '¬'] +['ðŁĽ', 'ĭ'] +['ðŁĽ', 'Į'] +['ðŁĽ', '¬'] +['ðŁĽ', '¶'] +['ðŁŁ', '¡'] +['ðŁ¥', 'ĺ'] +['ðŁ¥', 'Ł'] +['ðŁ¥', '¦'] +['ð٦', 'ĩ'] +['ð٦', 'Ī'] +['ð٧', 'Ĭ'] +['ð٧', 'Ĺ'] +['ð٧', '¤'] +['Ê', '·'] +['Ë', '¹'] +['á¹', 'ļ'] +['á½', '¥'] +['âĦ', 'Ł'] +['ê²', '¯'] +['ê»', '«'] +['ë°', '·'] +['ìĥ', 'Ĩ'] +['ìĽ', 'Ŀ'] +['ì¨', 'ī'] +['ì«', 'ı'] +['ï¯', 'ķ'] +['ðĿľ', 'ĭ'] +['É', '²'] +['Ò', 'Ń'] +['Ó', 'Ī'] +['à½', 'Ľ'] +['áĭ', 'ĵ'] +['áĻ', 'Ń'] +['áł', '©'] +['á¹', '®'] +['âĦ', 'Ĵ'] +['âĨ', '»'] +['âµ', 'ĥ'] +['ëĢ', '¨'] +['ëł', '§'] +['ìī', '¥'] +['ìĮ', 'ľ'] +['ìĹ', '¶'] +['ì¨', 'Ī'] +['ìª', '¾'] +['íı', '½'] +['íļ', 'Ķ'] +['íĽ', 'µ'] +['ï¤', '¸'] +['ï¦', 'IJ'] +['ï§', 'Ĺ'] +['ï§', 'ļ'] +['ï¬', '¯'] +['ðĿIJ', 'Ĭ'] +['ðĿķ', 'Ĺ'] +['ðĿĹ', 'ļ'] +['ðĿļ', 'ĸ'] +['ðŁħ', '´'] +['È', 'ĥ'] +['É', 'Ŀ'] +['Ï', '±'] +['Ó', 'Ĺ'] +['à¤', '¢'] +['áħ', 'ł'] +['áī', '¦'] +['áij', 'Į'] +['áĴ', '¼'] +['áŀ', '¡'] +['áł', '¨'] +['áł', 'Ń'] +['á¨', 'ħ'] +['á¨', 'Ķ'] +['á´', 'ĺ'] +['á¶', '¦'] +['á¸', 'İ'] +['á¼', 'ħ'] +['á¼', '¹'] +['âĨ', '¯'] +['âĵ', 'İ'] +['ãı', 'Į'] +['ê', 'ī'] +['êī', 'Ĥ'] +['ëĨ', '§'] +['ëĿ', '±'] +['ì¢', '¡'] +['íĪ', '½'] +['ï¤', 'ĩ'] +['ï¤', 'Ľ'] +['ðĿIJ', 'ķ'] +['ðĿĵ', '¸'] +['ðĿĵ', '¼'] +['ðĿĹ', 'ķ'] +['ðĿĺ', 'Ī'] +['ðŁı', '£'] +['ðŁı', '¤'] +['ðŁĹ', 'Ħ'] +['Ñ', '·'] +['Ò', 'ł'] +['áµ', 'ĸ'] +['á¼', '¨'] +['ë¬', 'Ħ'] +['ï°', '´'] +['âĪ', '½'] +['Õ', 'Ń'] +['Ú', '¹'] +['à¥', 'Ł'] +['áĢ', 'Ĩ'] +['áŀ', 'Ĵ'] +['ãĢ', '¶'] +['ê¦', '«'] +['ï¸', 'ĵ'] +['ðĿIJ', 'Ľ'] +['ðĿĺ', 'Ĺ'] +['ðŁı', 'ľ'] +['ì«', 'Ń'] +['ð٧', 'ŀ'] +['à½', 'Ĥ'] +['âĨ', '¿'] +['âĩ', 'ı'] +['âĵ', 'ģ'] +['âĶ', '§'] +['âķ', 'ģ'] +['âķ', '¤'] +['ê¦', 'Ĺ'] +['ê¦', '¤'] +['ðŁı', 'Ī'] +['áŀ', 'ķ'] +['Ô', '½'] +['àª', 'Ĺ'] +['à¬', 'Ĩ'] +['âķ', 'ķ'] +['ï½', 'ł'] +['â¼', '¦'] +['â¼', '¯'] +['â¾', '·'] +['âĶ', 'ĸ'] +['à¬', 'ĵ'] +['âĺ', 'Ĺ'] +['âį', 'ĭ'] +['ï¨', 'Ŀ'] +['â¼', '¥'] +['ï¦', 'ª'] +['âĦ', 'Ĭ'] +['ãĢ', '´'] +['âį', '¢'] +['ð¡', 'Ī'] +['ð¡Ī', '½'] +['ï©', '¨'] +['ãĢ', '»'] +['ãı', 'ĥ'] +['ï¦', '¡'] +['ï¨', 'ĺ'] +['ðŁIJ', 'ĥ'] +['ðŁĨ', 'ĸ'] +['ðŁĹ', '¾'] +['ãĦ', 'ĩ'] +['Þ', 'ĭ'] +['â¼', '¼'] +['ï¨', 'Ń'] +['Þ', 'Ģ'] +['Þ', 'Ħ'] +['Þ', 'Ī'] +['Þ', 'IJ'] +['âĮ', 'Ħ'] +['â»', 'ĺ'] +['ãŁ', '¢'] +['á', 'ħ§'] +['ðIJĮ', '¿'] +['Ë', '»'] +['à²', 'Ĺ'] +['áĢ', 'ĩ'] +['áŀ', 'Ĭ'] +['âķ', 'ĩ'] +['ãĩ', '¼'] +['ãİ', '°'] +['Õ', 'Ĵ'] +['Ü', 'Ī'] +['ß', '¥'] +['à¿', 'IJ'] +['áĢ', 'Ł'] +['âĨ', '¥'] +['âķ', 'Į'] +['â½', 'Ģ'] +['â½', '°'] +['â¾', 'Ĭ'] +['ä', 'Ħ'] +['äĦ', 'Ģ'] +['ðĵ', 'IJ'] +['ðĵIJ', 'į'] +['ðŁİ', '¦'] +['âĤ', '¯'] +['âĬ', 'ĺ'] +['âĦ', 'į'] +['Ê', 'µ'] +['Ñ', '¶'] +['Ú', 'ĥ'] +['à¦', 'Ķ'] +['à´', '¦'] +['áİ', '¶'] +['áĵ', 'ķ'] +['á¹', '¨'] +['âĤ', 'ł'] +['âĩ', '°'] +['âĹ', 'Ĵ'] +['â¿', 'Ĭ'] +['ê·', '±'] +['ì¹', 'ķ'] +['íĪ', '©'] +['ïŃ', 'Ģ'] +['ðĿĴ', '¸'] +['ðĿĵ', 'Ĭ'] +['ðĿĺ', '©'] +['Ç', '¦'] +['É', '«'] +['áĬ', '¨'] +['È', '¹'] +['Ê', '¯'] +['Î', 'ª'] +['Ú', 'Ģ'] +['áĮ', '¸'] +['áİ', '»'] +['áı', 'ķ'] +['áı', '´'] +['á²', 'Ĥ'] +['á½', '¨'] +['âı', 'Ŀ'] +['âĺ', 'Ļ'] +['ëĥ', '¨'] +['ëĦ', '¼'] +['ëĪ', 'Ļ'] +['ë£', 'ħ'] +['ìĶ', '¼'] +['ìķ', 'Ŀ'] +['ìļ', '¬'] +['ìľ', '±'] +['ï¥', 'Ĥ'] +['ï¦', '¹'] +['ï¬', '¹'] +['ïŃ', 'ģ'] +['ï³', 'Ī'] +['ðĿĶ', 'ħ'] +['ðĿĺ', '¤'] +['ðĿĻ', 'ı'] +['ðĿĻ', 'Ļ'] +['ðŁķ', 'ī'] +['ð٧', 'Ļ'] +['á¸', 'ij'] +['ê´', '¼'] +['ëģ', 'į'] +['ëĹ', '´'] +['ëĿ', '³'] +['ë°', 'ŀ'] +['ë°', '¢'] +['ëµ', 'ĺ'] +['ìĤ', 'Ķ'] +['ìĦ', 'Ħ'] +['ì¼', 'ļ'] +['íĢ', 'ł'] +['íĬ', '±'] +['íĮ', 'ĸ'] +['ï¤', 'ij'] +['ï¦', '´'] +['ï¦', '¸'] +['ï´', 'į'] +['ðĿĺ', '·'] +['Ä', '¬'] +['Å', '¬'] +['Æ', 'Ģ'] +['Æ', 'ĭ'] +['Æ', 'ľ'] +['Ç', 'ij'] +['Ç', 'ĺ'] +['Ç', 'ŀ'] +['Ç', '¥'] +['Ç', '®'] +['É', '°'] +['É', '¶'] +['É', '·'] +['É', '½'] +['Ê', 'Ī'] +['Ê', 'IJ'] +['Ë', 'İ'] +['Ë', 'Ł'] +['Ë', '¦'] +['Ë', '¯'] +['Ï', 'IJ'] +['Ï', 'ĵ'] +['Ï', '¢'] +['Ï', '¤'] +['Ï', 'ª'] +['Ï', 'Ń'] +['Ï', '®'] +['Ï', '»'] +['Ñ', 'ł'] +['Ñ', 'Ń'] +['Ò', '¨'] +['Ó', 'Ŀ'] +['Ô', '¡'] +['Ô', '·'] +['Õ', 'ī'] +['Õ', 'ĵ'] +['Õ', 'ĸ'] +['Õ', 'ļ'] +['Õ', 'Ŀ'] +['Ö', 'İ'] +['Ø', '¿'] +['Ú', 'ħ'] +['Ú', 'į'] +['Ú', 'Ķ'] +['Û', 'Ĭ'] +['Û', '¾'] +['Ü', 'Ļ'] +['Ý', 'Ĵ'] +['Ý', 'ĺ'] +['ß', 'Ĵ'] +['ß', 'ĸ'] +['à¤', 'Ĭ'] +['à¤', 'IJ'] +['à¦', 'ı'] +['à¦', 'ĸ'] +['à§', 'Ł'] +['àª', '®'] +['àª', '¹'] +['à®', 'ħ'] +['à®', 'Ĩ'] +['à°', '¡'] +['à°', '°'] +['à²', 'ļ'] +['à²', '®'] +['à²', '¯'] +['à´', 'Ł'] +['à´', '·'] +['àµ', '¾'] +['à¶', 'ij'] +['à¶', 'ŀ'] +['à¼', '¼'] +['à½', 'ĵ'] +['áĢ', 'ĵ'] +['áĤ', '¦'] +['áĥ', 'ĸ'] +['áĥ', 'Ń'] +['áĥ', '¯'] +['áħ', '¨'] +['áħ', 'ª'] +['áĨ', '°'] +['áĪ', 'ģ'] +['áĪ', 'İ'] +['áĪ', 'ĵ'] +['áĪ', '¥'] +['áĪ', '²'] +['áĪ', '´'] +['áĪ', '»'] +['áī', 'ł'] +['áī', '²'] +['áī', '¶'] +['áĬ', '£'] +['áĬ', '¥'] +['áĬ', 'ª'] +['áĭ', 'ĺ'] +['áĭ', '²'] +['áĭ', '¶'] +['áĮ', '£'] +['áį', '¡'] +['áį', '£'] +['áİ', '¬'] +['áİ', '¾'] +['áIJ', '¡'] +['áķ', 'ķ'] +['áĸ', '±'] +['áĹ', 'IJ'] +['áĹ', 'Ń'] +['áĺ', 'ī'] +['áļ', '±'] +['áĽ', 'Ł'] +['áŀ', '¥'] +['áŁ', 'Ķ'] +['áł', '£'] +['áł', 'ª'] +['áł', '°'] +['áł', '´'] +['á¤', 'ĸ'] +['á¥', '£'] +['á', '®'] +['á®', 'ł'] +['á', '¯'] +['á¯', 'Ļ'] +['á', '°'] +['á°', 'į'] +['á´', 'Ĭ'] +['á´', '¾'] +['áµ', 'ģ'] +['áµ', 'İ'] +['áµ', 'ŀ'] +['áµ', '¤'] +['á¶', 'ħ'] +['á¶', 'ĺ'] +['á¶', 'Ł'] +['á¶', '¢'] +['á¶', '¤'] +['á¶', '±'] +['á¶', '»'] +['á¸', 'ī'] +['á¸', 'ŀ'] +['á¸', 'º'] +['á¹', 'ĵ'] +['á¹', 'Ĺ'] +['á¹', 'ª'] +['áº', 'Ĭ'] +['áº', 'ı'] +['áº', 'Ľ'] +['á¼', 'ĥ'] +['á¼', 'Į'] +['á¼', '¿'] +['á½', 'Ĥ'] +['á½', 'ĵ'] +['á½', 'Ĺ'] +['á½', '¦'] +['á¾', '±'] +['á¾', '´'] +['á¿', 'ĺ'] +['á¿', 'Ł'] +['á¿', '¸'] +['âģ', 'ĺ'] +['âĤ', 'ij'] +['âĤ', 'Ľ'] +['âĤ', '¿'] +['âĦ', 'ĩ'] +['âĦ', 'ŀ'] +['âĦ', '±'] +['âĩ', 'Ł'] +['âĩ', '²'] +['âĪ', '¤'] +['âĪ', '¶'] +['âī', 'Ĥ'] +['âī', '¾'] +['âĬ', '¨'] +['âĬ', '³'] +['âĬ', '·'] +['âĭ', 'Į'] +['âĭ', 'ĺ'] +['âĮ', 'ķ'] +['âĮ', '¥'] +['âĮ', 'µ'] +['âĮ', 'º'] +['âį', '£'] +['âį', '²'] +['âį', 'µ'] +['âİ', 'ĩ'] +['âı', 'ĥ'] +['âı', 'IJ'] +['âı', 'ł'] +['âı', '¤'] +['âı', '¶'] +['âı', '¸'] +['âı', '¹'] +['âij', 'Ĥ'] +['âĴ', '·'] +['âĴ', 'º'] +['âĵ', '¡'] +['âĵ', '¤'] +['âĶ', '¾'] +['âĸ', 'ĺ'] +['âĸ', 'µ'] +['âĹ', 'ª'] +['âĹ', '·'] +['âĺ', '¨'] +['âĺ', '«'] +['âĺ', '²'] +['âĺ', '³'] +['âĻ', 'Ĩ'] +['âļ', '¤'] +['âļ', '¥'] +['âĽ', 'ĵ'] +['âĽ', '´'] +['âĽ', '¾'] +['âŀ', '«'] +['âŀ', '¿'] +['âŁ', '·'] +['â¤', 'ij'] +['â¤', '«'] +['â¤', '¶'] +['â¤', '½'] +['â§', 'ª'] +['â¨', 'Ģ'] +['â', '©½'] +['â¬', '¡'] +['â¬', '¢'] +['â¬', '¤'] +['â²', 'ĸ'] +['â²', 'ª'] +['âµ', 'Ģ'] +['â¸', '®'] +['â¸', '½'] +['ãĢ', 'ł'] +['ãĢ', '·'] +['ãĦ', 'Į'] +['ãĦ', 'ĺ'] +['ãħ', 'ij'] +['ãĪ', 'İ'] +['ãĪ', 'IJ'] +['ãĬ', 'ľ'] +['ãĮ', 'ĵ'] +['ãĮ', 'ł'] +['ãİ', 'Ł'] +['ãİ', '¤'] +['ãİ', '§'] +['ã¬', '®'] +['ä', 'Ī'] +['äĪ', 'Ģ'] +['ä', '°'] +['ä°', 'Ģ'] +['ê', 'ħ'] +['êħ', 'ī'] +['êĩ', 'Ĺ'] +['ê', 'Ī'] +['êĪ', 'į'] +['ê§', 'Ĥ'] +['ê§', 'Ĭ'] +['êª', 'Ģ'] +['ê²', 'Ī'] +['ê²', 'į'] +['ê³', 'Ģ'] +['êµ', 'ł'] +['ê½', 'IJ'] +['ê¾', 'Ī'] +['ê¿', '±'] +['ëĥ', 'ı'] +['ëĦ', 'ij'] +['ëħ', '¤'] +['ëĩ', '¸'] +['ëĪ', '¼'] +['ëī', 'ħ'] +['ëĬ', '£'] +['ëĭ', 'º'] +['ëį', 'ŀ'] +['ëIJ', 'Į'] +['ëķ', '¸'] +['ëĺ', 'ł'] +['ëĻ', 'ĩ'] +['ëĻ', 'Ī'] +['ëľ', '½'] +['ëŀ', 'Ķ'] +['ëł', 'ľ'] +['ë£', 'IJ'] +['ë§', 'Ģ'] +['ë§', 'Ĭ'] +['ëª', 'Ģ'] +['ë¬', 'Ń'] +['ë¯', '¾'] +['ë³', 'ľ'] +['ë´', 'Ĭ'] +['ëµ', 'ī'] +['ë·', 'ľ'] +['ë¸', 'Ģ'] +['ë¹', 'ĭ'] +['ìģ', 'Ħ'] +['ìĤ', '£'] +['ìĤ', '»'] +['ìĦ', 'µ'] +['ìħ', 'Ĵ'] +['ìī', 'Ī'] +['ìī', 'Ķ'] +['ìĬ', 'Į'] +['ìĬ', 'Ļ'] +['ìIJ', '´'] +['ìĵ', 'º'] +['ìķ', 'ļ'] +['ìķ', 'º'] +['ìĸ', 'ľ'] +['ìĹ', 'ª'] +['ìĺ', 'ľ'] +['ìĻ', '¤'] +['ìļ', 'Ľ'] +['ìļ', 'º'] +['ìĿ', 'ħ'] +['ìĿ', 'ı'] +['ìĿ', 'Ń'] +['ìĿ', '¶'] +['ìł', 'Ľ'] +['ì¡', 'Ī'] +['ì¢', 'ī'] +['ì¢', 'Ķ'] +['ì©', 'ł'] +['ìŃ', 'Į'] +['ì¯', '©'] +['ì´', '£'] +['ì¸', 'ķ'] +['ì¹', 'Ł'] +['ì¾', '¡'] +['ì¿', 'Ļ'] +['íģ', 'ĩ'] +['íģ', 'ī'] +['íĩ', 'Ģ'] +['íĪ', '¶'] +['íĸ', 'ij'] +['íĸ', '¤'] +['íĹ', 'ħ'] +['íľ', 'ı'] +['íĿ', 'Ŀ'] +['ï¤', 'Ĵ'] +['ï¤', 'ķ'] +['ï¤', '¬'] +['ï¥', 'ħ'] +['ï¥', 'ĩ'] +['ï¥', 'ı'] +['ï¥', 'ļ'] +['ï¥', 'Ł'] +['ï¦', 'Ħ'] +['ï¦', 'Ī'] +['ï¦', '¨'] +['ï¦', '©'] +['ï¦', '²'] +['ï§', 'ģ'] +['ï§', 'ĥ'] +['ï§', 'Ķ'] +['ï§', 'ł'] +['ï§', '£'] +['ï§', '®'] +['ï', 'ŃIJ'] +['ïŃ', 'ĸ'] +['ïŃ', '¦'] +['ïŃ', '´'] +['ïŃ', 'µ'] +['ïŃ', '¶'] +['ïŃ', '¸'] +['ï®', 'Į'] +['ï®', 'İ'] +['ï®', 'ŀ'] +['ï®', 'Ł'] +['ï®', '¡'] +['ï®', 'ª'] +['ï¯', 'Ķ'] +['ï¯', 'Ĺ'] +['ï¯', 'ļ'] +['ï¯', 'Ľ'] +['ï¯', 'Ŀ'] +['ï¯', 'Ł'] +['ï¯', '§'] +['ï¯', '¨'] +['ï¯', '«'] +['ï¯', '¯'] +['ï¯', '°'] +['ï¯', '±'] +['ï¯', '²'] +['ï¯', '³'] +['ï¯', '´'] +['ï¯', 'µ'] +['ï¯', '¶'] +['ï°', 'Ģ'] +['ï±', 'ħ'] +['ï±', 'Ķ'] +['ï±', '´'] +['ï²', 'ģ'] +['ï³', 'ķ'] +['ï·', '½'] +['ï¸', 'ķ'] +['ï¸', '±'] +['ï¹', '£'] +['ï¹', '½'] +['ï»', 'į'] +['ï¾', '±'] +['ðĿIJ', 'Ļ'] +['ðĿIJ', '½'] +['ðĿij', '¤'] +['ðĿij', '®'] +['ðĿij', 'µ'] +['ðĿĴ', 'ĥ'] +['ðĿĴ', 'Ħ'] +['ðĿĵ', 'Ń'] +['ðĿĵ', '·'] +['ðĿĶ', 'ĸ'] +['ðĿĶ', 'ŀ'] +['ðĿĶ', '¢'] +['ðĿĶ', '¦'] +['ðĿĶ', '¬'] +['ðĿķ', 'Ħ'] +['ðĿķ', 'Ĭ'] +['ðĿķ', 'İ'] +['ðĿķ', 'Ļ'] +['ðĿķ', 'ľ'] +['ðĿķ', 'Ń'] +['ðĿķ', '³'] +['ðĿķ', '¸'] +['ðĿķ', '¾'] +['ðĿ', 'ĸī'] +['ðĿĸ', 'ı'] +['ðĿĺ', 'ĩ'] +['ðĿĺ', 'ī'] +['ðĿĺ', 'ĸ'] +['ðĿĺ', 'Ľ'] +['ðĿĺ', 'ŀ'] +['ðĿĺ', '«'] +['ðĿĺ', '¾'] +['ðĿĻ', 'ĩ'] +['ðĿĻ', 'ī'] +['ðĿĻ', 'ĭ'] +['ðĿĻ', 'İ'] +['ðĿĻ', 'ĺ'] +['ðĿĻ', '¥'] +['ðĿļ', 'ĥ'] +['ðĿļ', 'IJ'] +['ðĿļ', 'Ķ'] +['ðĿľ', 'ĥ'] +['ðŁĦ', '·'] +['ðŁħ', 'Ŀ'] +['ðŁħ', '¾'] +['ðŁĨ', 'Ĥ'] +['ðŁĨ', 'ĵ'] +['ðŁĮ', 'Ĥ'] +['ðŁĮ', 'Ĩ'] +['ðŁĮ', 'ī'] +['ðŁĮ', 'ij'] +['ðŁĮ', 'ĺ'] +['ðŁĮ', '©'] +['ðŁĮ', '«'] +['ðŁį', '¢'] +['ðŁį', '¥'] +['ðŁİ', 'Ľ'] +['ðŁİ', '¢'] +['ðŁİ', '´'] +['ðŁij', '¡'] +['ðŁĴ', '¾'] +['ðŁĵ', 'Ń'] +['ðŁĶ', 'Ī'] +['ðŁĶ', '¦'] +['ðŁĶ', '²'] +['ðŁĶ', '³'] +['ðŁķ', 'ĵ'] +['ðŁķ', 'ķ'] +['ðŁķ', 'ĺ'] +['ðŁķ', 'Ł'] +['ðŁķ', '·'] +['ðŁĹ', '³'] +['ðŁļ', 'Ħ'] +['ðŁļ', 'Ķ'] +['ðŁļ', 'ĸ'] +['ðŁĽ', 'IJ'] +['ðŁĽ', '¤'] +['ðŁĽ', '¸'] +['ðŁ', 'ł'] +['ðŁł', '³'] +['ð٤', '¹'] +['ðŁ¥', 'ĥ'] +['ðŁ¥', '¨'] +['ðŁ¥', 'ª'] +['ðŁ¥', '¾'] +['ð٦', 'ĥ'] +['ð٦', 'Ĵ'] +['ð٦', 'Ļ'] +['ð٦', '¶'] +['ð٧', 'ł'] +['ð٧', 'ª'] +['ð٧', 'Ń'] +['ð٧', '²'] +['ð£', '·'] +['ð£·', 'Ń'] +['ð¦', 'ĺ'] +['ð¦ĺ', 'Ĵ'] +['Æ', 'ij'] +['Ç', 'Ļ'] +['È', '®'] +['Ø', 'ł'] +['Ú', 'Ħ'] +['Ü', 'Ģ'] +['ß', '¢'] +['áī', 'Ģ'] +['áĬ', 'IJ'] +['áİ', 'ł'] +['áº', 'ŀ'] +['ëĪ', 'ŀ'] +['ëķ', 'Ł'] +['ë£', 'ģ'] +['ë¤', 'Ĺ'] +['ìĦ', '¥'] +['ìħ', 'ij'] +['ìĸ', 'IJ'] +['ìĽ', 'Ľ'] +['ì£', 'ķ'] +['íİ', 'ı'] +['íĽ', 'ĵ'] +['ï¥', 'º'] +['ï³', 'Ľ'] +['ï´', '«'] +['ðĸ', '§'] +['ðĸ§', '·'] +['ðĿķ', 'ģ'] +['ðŁIJ', 'ª'] +['ðŁĴ', 'Ī'] +['ðŁĵ', 'ł'] +['ðŁķ', 'Ľ'] +['ðŁķ', '´'] +['Ñ', 'Ŀ'] +['Ó', 'Ĭ'] +['à¥', '²'] +['àª', 'ª'] +['áĥ', '¤'] +['áį', 'IJ'] +['á¶', '°'] +['á¼', 'Ŀ'] +['á½', '©'] +['âĭ', 'ĭ'] +['âĴ', '½'] +['âĻ', '¾'] +['â', '½Ķ'] +['â¾', '¯'] +['ãĦ', 'Ĵ'] +['ãħ', 'ļ'] +['ëIJ', 'į'] +['ë·', 'ģ'] +['ìĭ', 'Ģ'] +['ìļ', 'Ŀ'] +['ì¥', '°'] +['ìº', '´'] +['íĭ', 'ī'] +['íĿ', '½'] +['ï¦', 'Ģ'] +['ï¦', '¿'] +['ï§', 'ħ'] +['ï§', 'ĵ'] +['ïŃ', '¯'] +['ï®', 'Ĩ'] +['ðIJ¤', 'ķ'] +['ðĿIJ', 'Ł'] +['ðĿĴ', 'ħ'] +['ðĿĵ', 'ľ'] +['ðĿĶ', '°'] +['ðĿĶ', '»'] +['ðĿĺ', 'į'] +['ðĿĻ', '¯'] +['ðŁĦ', '½'] +['ðŁħ', 'Ĥ'] +['ðŁħ', 'Ķ'] +['ðŁħ', '½'] +['ðŁĵ', '´'] +['ð٧', 'ĸ'] +['Ó', 'Ĵ'] +['á¸', '²'] +['ëī', '¼'] +['Ç', 'ı'] +['È', 'ĵ'] +['Ê', '¸'] +['Õ', 'Ĥ'] +['Û', 'ħ'] +['ß', '¡'] +['ß', '£'] +['à®', '¯'] +['à°', 'Ī'] +['à²', '¸'] +['àº', '®'] +['à¼', 'ķ'] +['áĢ', 'İ'] +['áĨ', '¡'] +['áIJ', 'ĭ'] +['áIJ', 'ķ'] +['áij', '¯'] +['áŀ', 'Ĩ'] +['á¨', 'ķ'] +['á©', 'Ī'] +['âģ', 'ħ'] +['âĨ', 'ļ'] +['âĶ', 'İ'] +['âł', '©'] +['â²', 'Ĥ'] +['â²', 'Ķ'] +['â²', '¨'] +['ãĬ', 'ļ'] +['íĵ', '²'] +['ðĿij', 'Ī'] +['ðĿij', '¬'] +['ðĿij', '¹'] +['ðĿĴ', '¾'] +['ðĿĵ', '±'] +['ðĿĵ', '½'] +['ðĿķ', '¯'] +['ðĿķ', '»'] +['ðĿĺ', '½'] +['ðĿļ', 'Ĩ'] +['ðŁĦ', '°'] +['ðŁIJ', '¨'] +['Ò', 'ķ'] +['à²', 'ħ'] +['ï¨', 'Ĩ'] +['ðĿij', '°'] +['ðŁĦ', '¸'] +['Ô', 'İ'] +['Ø', 'į'] +['Ù', 'µ'] +['à²', '¶'] +['áĢ', 'Ī'] +['áĺ', 'Ĺ'] +['áł', '¸'] +['á¡', '¡'] +['á¨', '²'] +['á©', 'ģ'] +['á´', '·'] +['áµ', '§'] +['âķ', '¨'] +['âļ', 'ģ'] +['â¾', 'Ŀ'] +['ãĢ', '¼'] +['ãĦ', 'ı'] +['êĴ', '«'] +['ê¦', '¥'] +['ê¦', '©'] +['ê¦', '²'] +['ìĺ', '¼'] +['íĵ', 'IJ'] +['ðĵ', 'ĩ'] +['ðĵĩ', '¼'] +['ðĿķ', '¿'] +['ðŁĽ', '´'] +['ë¨', 'ľ'] +['à²', 'µ'] +['à´', 'İ'] +['à¼', 'Ģ'] +['âĩ', 'ĸ'] +['ãĪ', '«'] +['âĵ', 'Ģ'] +['áħ', '´'] +['áļ', '¾'] +['áĽ', 'ŀ'] +['áĽ', '«'] +['á¥', '´'] +['âĨ', 'Ľ'] +['âĨ', '¶'] +['âĩ', '¤'] +['âķ', 'Ł'] +['âĺ', '·'] +['âļ', 'IJ'] +['ð٧', '´'] +['á¹', '³'] +['âĶ', 'į'] +['âĶ', 'Ĵ'] +['âĶ', '©'] +['âĶ', '¦'] +['â¾', 'µ'] +['àª', 'ľ'] +['àª', '¤'] +['âĩ', 'Ļ'] +['âĶ', '±'] +['âķ', 'Ģ'] +['â½', 'Ĭ'] +['ï½', 'Ł'] +['à¬', '¡'] +['ðł', '®'] +['ðł®', '·'] +['âķ', 'ĥ'] +['â°', 'Ķ'] +['ãĬ', '¦'] +['ðŁİ', 'IJ'] +['ãĩ', '°'] +['â¼', 'Ŀ'] +['â¾', 'Ķ'] +['â½', 'Ĵ'] +['âł', 'Ĵ'] +['ï¨', '¦'] +['ï©', 'Ĵ'] +['ï¨', '²'] +['ï©', 'ĸ'] +['ðĵı', '¸'] +['ãĮ', 'ĥ'] +['ðĸ', '¤'] +['ðĸ¤', 'IJ'] +['ï¦', 'Ń'] +['âĬ', 'ħ'] +['â¾', '³'] +['ä´', '¥'] +['ï©', 'ķ'] +['ðŁĮ', 'Ķ'] +['áŀ', 'ĭ'] +['âļ', 'į'] +['â¼', 'ĭ'] +['ãİ', 'ĺ'] +['ðIJĮ', '²'] +['É', '©'] +['áİ', 'ij'] +['âĨ', '®'] +['âĩ', 'ĥ'] +['âļ', 'İ'] +['ãĩ', '±'] +['ãĭ', '©'] +['ãĮ', '¶'] +['êĻ', 'ª'] +['ëİ', '¬'] +['ï¨', 'IJ'] +['ï¨', 'Ľ'] +['ï©', 'Ĭ'] +['ï©', 'į'] +['ðĵ', 'ħ'] +['ðĵħ', 'º'] +['Ï', '¡'] +['È', 'ij'] +['É', 'Ĥ'] +['Ô', 'ĵ'] +['ß', 'İ'] +['à´', '§'] +['áĢ', 'ī'] +['áĢ', 'ĭ'] +['áĢ', 'ij'] +['áĢ', 'ł'] +['áļ', 'Ļ'] +['á¨', 'Ħ'] +['á¨', '©'] +['á¨', '¹'] +['á©', 'ĵ'] +['á¬', 'ľ'] +['á´', 'Ļ'] +['áµ', 'ij'] +['âĤ', 'Ń'] +['âĨ', '°'] +['âľ', 'ģ'] +['â½', 'IJ'] +['ãĭ', '¯'] +['ãĮ', '½'] +['íĨ', '¢'] +['ï¤', '¿'] +['ðŁ', 'Ĥ'] +['ðŁĤ', '»'] +['È', 'Ĵ'] +['Í', 'º'] +['Ô', '¥'] +['Õ', 'ij'] +['Ú', '¶'] +['à§', 'İ'] +['à¶', '®'] +['àº', 'ĸ'] +['àº', 'ľ'] +['àº', '½'] +['áĥ', '»'] +['áħ', '¯'] +['áĭ', 'ŀ'] +['áĸ', 'ķ'] +['á', '´Ī'] +['á¶', 'Ĩ'] +['á¸', 'ľ'] +['á¹', '¼'] +['á¿', '¨'] +['âĦ', 'ĭ'] +['âĦ', 'Ń'] +['âĪ', '±'] +['âĮ', 'ĵ'] +['âĶ', 'ĩ'] +['âĶ', '¢'] +['â±', '®'] +['â²', 'Ħ'] +['ãĩ', '¾'] +['ãĪ', '¬'] +['ë¸', '¡'] +['ìIJ', 'ī'] +['íĻ', 'Ľ'] +['ðĿķ', 'ª'] +['Æ', '¹'] +['Í', '²'] +['Ó', 'ģ'] +['Û', '¼'] +['à¦', '«'] +['áħ', 'Ł'] +['áī', 'Ĩ'] +['áį', 'Ī'] +['áº', 'ĸ'] +['á½', 'ī'] +['âĶ', '¸'] +['â½', '©'] +['ê', 'ľ'] +['êľ', '¥'] +['êµ', 'ħ'] +['ëĤ', 'Ķ'] +['ëĦ', 'ł'] +['ëĩ', 'Ĺ'] +['ëĻ', 'Ŀ'] +['ìļ', '¯'] +['ìļ', '·'] +['ìŁ', 'Ľ'] +['ì·', 'IJ'] +['íŁ', '¬'] +['íŁ', '®'] +['íŁ', '°'] +['ï¦', 'Ĩ'] +['ï¦', '±'] +['ï²', 'ŀ'] +['ï³', '¤'] +['ï³', '¥'] +['ðIJĮ', '¸'] +['ðĿĶ', 'ı'] +['ðĿķ', '®'] +['ðĿĺ', '£'] +['à¦', 'Ī'] +['âı', 'ı'] +['ãĦ', 'ĸ'] +['ê²', 'ĩ'] +['ëĸ', 'ĺ'] +['ëľ', '·'] +['ëŀ', 'Ĵ'] +['ë¡', 'ĵ'] +['ë¢', 'ī'] +['ë£', 'ĥ'] +['ë§', 'ĭ'] +['ë²', 'ĭ'] +['ìĤ', '·'] +['ìĪ', 'ķ'] +['ì', 'Į¨'] +['ìĵ', '»'] +['ìĸ', 'Ĭ'] +['ìĻ', '¬'] +['ìĿ', '»'] +['ì¦', 'ģ'] +['ìµ', '¤'] +['ì·', 'ĥ'] +['íĢ', 'ľ'] +['íħ', 'ī'] +['íį', 'ł'] +['íı', 'ħ'] +['íij', '±'] +['íķ', 'ķ'] +['íĸ', 'ł'] +['íĿ', 'ķ'] +['Æ', 'Ļ'] +['Æ', 'ļ'] +['Æ', 'ŀ'] +['Ç', 'ĥ'] +['Ç', 'Ĭ'] +['Ç', 'ľ'] +['Ç', '¤'] +['Ç', 'Ń'] +['Ç', '¹'] +['È', 'Ģ'] +['È', 'ģ'] +['È', 'ħ'] +['È', 'ī'] +['È', 'Ĺ'] +['È', 'Ł'] +['È', '¤'] +['È', '¥'] +['È', '¨'] +['È', 'µ'] +['È', 'º'] +['È', '»'] +['É', 'Į'] +['É', '®'] +['Ê', 'ħ'] +['Ê', '¥'] +['Ê', '¨'] +['Ë', 'ĵ'] +['Ë', 'Ķ'] +['Ë', 'ł'] +['Ë', '£'] +['Ë', '¸'] +['Í', '´'] +['Ï', 'Ĺ'] +['Ï', 'ĺ'] +['Ï', 'Ļ'] +['Ï', 'ļ'] +['Ï', 'Ŀ'] +['Ï', '¨'] +['Ï', '¬'] +['Ï', '¾'] +['Ï', '¿'] +['Ñ', 'ª'] +['Ò', 'Ģ'] +['Ò', 'ľ'] +['Ò', '¼'] +['Ò', '½'] +['Ó', 'Ĥ'] +['Ó', 'ħ'] +['Ó', 'ĩ'] +['Ó', 'į'] +['Ó', 'ĸ'] +['Ó', 'Ł'] +['Ó', '«'] +['Ó', '±'] +['Ô', 'Ĩ'] +['Ô', 'ĩ'] +['Ô', 'º'] +['Õ', 'ĭ'] +['Ö', 'ī'] +['Ø', 'Ī'] +['Ø', 'Ĭ'] +['Ø', '½'] +['Ø', '¾'] +['Ù', '·'] +['Ú', 'Ĥ'] +['Ú', 'Ĭ'] +['Ú', 'ĸ'] +['Ú', 'Ĺ'] +['Ú', '£'] +['Ú', '«'] +['Ú', '¸'] +['Û', 'Ģ'] +['Û', 'į'] +['Û', '½'] +['Ü', 'ī'] +['Ü', '¤'] +['Ý', '§'] +['Ý', '´'] +['Þ', 'ĥ'] +['Þ', '¤'] +['Þ', '¥'] +['ß', 'ļ'] +['ß', 'Ľ'] +['ß', '¤'] +['àł', 'į'] +['àł', 'ĵ'] +['àł', '³'] +['à¡', '¢'] +['à¥', 'ł'] +['à§', 'ł'] +['à§', 'º'] +['à¨', 'Ĭ'] +['à¨', 'IJ'] +['à¨', '®'] +['à¨', '¯'] +['à¨', '°'] +['à¨', '¸'] +['àª', 'Ĩ'] +['àª', '³'] +['àª', 'µ'] +['àª', '½'] +['à¬', 'Į'] +['à¬', 'ĺ'] +['à¬', '½'] +['à®', 'ĥ'] +['à®', '¸'] +['à°', 'Ĩ'] +['à°', 'ķ'] +['à°', '¦'] +['à²', 'Ĩ'] +['à²', 'Ĭ'] +['à²', 'Į'] +['à²', 'IJ'] +['à²', 'Ľ'] +['à²', '¤'] +['à²', '¦'] +['à²', 'ª'] +['à²', '²'] +['à²', '¹'] +['à´', 'Ĩ'] +['à´', 'ı'] +['à´', 'Ĺ'] +['à´', '«'] +['à´', '¹'] +['àµ', 'º'] +['àµ', '½'] +['à¶', 'ħ'] +['à¶', 'Ĭ'] +['à¶', 'Ķ'] +['à¶', '§'] +['à¶', '«'] +['à¶', '°'] +['à¼', 'Ħ'] +['à¼', 'ħ'] +['à¼', 'Ĭ'] +['à½', 'Ļ'] +['à½', '¡'] +['à½', '§'] +['à¿', 'Ģ'] +['à¿', 'Ļ'] +['áĢ', 'Ŀ'] +['áĢ', '§'] +['áĢ', '©'] +['áĢ', '¿'] +['áģ', 'µ'] +['áĤ', 'ģ'] +['áĤ', '½'] +['áĥ', 'Ĥ'] +['áĥ', 'ª'] +['áĦ', 'Ĭ'] +['áĦ', '¢'] +['áħ', '¦'] +['áħ', 'Ń'] +['áĨ', '®'] +['áĨ', '±'] +['áĨ', '»'] +['á', 'ĩ'] +['áĩ', 'Ĥ'] +['áĪ', 'ħ'] +['áĪ', 'ī'] +['áĪ', 'Į'] +['áĪ', 'IJ'] +['áĪ', 'Ĵ'] +['áĪ', 'Ļ'] +['áĪ', 'ļ'] +['áĪ', 'ľ'] +['áĪ', 'ŀ'] +['áĪ', '©'] +['áĪ', '³'] +['áĪ', 'º'] +['áĪ', '½'] +['áī', 'ħ'] +['áī', '¢'] +['áī', '±'] +['áī', '´'] +['áĬ', 'ĥ'] +['áĬ', 'į'] +['áĬ', 'ĸ'] +['áĬ', '®'] +['áĬ', '¸'] +['áĭ', 'Ľ'] +['áĭ', 'Ŀ'] +['áĭ', '³'] +['áĮ', 'ģ'] +['áĮ', 'ħ'] +['áĮ', '¥'] +['áĮ', '¦'] +['á', 'Į¨'] +['áį', 'Ĭ'] +['áį', 'į'] +['áį', 'ķ'] +['áį', 'ĸ'] +['áį', '¢'] +['áį', '¤'] +['áİ', 'Ĵ'] +['áİ', 'ª'] +['áı', 'ģ'] +['áı', 'IJ'] +['áı', 'Ł'] +['áIJ', 'Ĥ'] +['áIJ', 'ĸ'] +['áIJ', 'Ŀ'] +['áIJ', 'ŀ'] +['áIJ', 'Ł'] +['áIJ', 'ł'] +['áij', 'ĸ'] +['áĴ', 'ĭ'] +['áĴ', 'į'] +['áĴ', '¡'] +['áĵ', '«'] +['áĶ', 'ķ'] +['áķ', 'ĭ'] +['áķ', 'ij'] +['áķ', 'Ļ'] +['áķ', 'ļ'] +['áķ', 'Ľ'] +['áķ', '¤'] +['áķ', '¦'] +['áķ', '®'] +['áķ', '¼'] +['áĸ', 'ĵ'] +['áĹ', 'Ĺ'] +['áĹ', '¢'] +['áĹ', '¯'] +['áĹ', '·'] +['áĺ', 'Ħ'] +['áĺ', 'ij'] +['áĽ', 'Ĥ'] +['áĽ', 'Ļ'] +['áŀ', 'į'] +['áł', 'Ĩ'] +['áł', '¡'] +['áł', '¦'] +['áł', '®'] +['áł', '¯'] +['áł', '²'] +['áł', '·'] +['á¡', 'į'] +['á¡', 'ŀ'] +['á¡', '¤'] +['á', '¡´'] +['á¡', 'µ'] +['á¤', 'ĵ'] +['á¥', 'ĸ'] +['á¥', '°'] +['á¨', '¦'] +['á¨', '§'] +['á¨', '¨'] +['á¨', 'ª'] +['á¨', '¬'] +['á¨', '¯'] +['á¨', '³'] +['á¨', 'µ'] +['á©', 'ĥ'] +['á¬', 'ķ'] +['áŃ', '£'] +['á', '±'] +['á±', 'ļ'] +['á²', 'ł'] +['á´', 'ĵ'] +['á´', '¶'] +['áµ', 'Ĥ'] +['áµ', 'Į'] +['áµ', '¥'] +['áµ', '´'] +['á¶', 'ĩ'] +['á¸', 'Ī'] +['á¸', 'ł'] +['á¸', '§'] +['á¸', '´'] +['á¸', '¾'] +['á¹', 'Ģ'] +['á¹', 'ĸ'] +['á¹', 'Ł'] +['á¹', 'ł'] +['á¹', '«'] +['á¹', '±'] +['á¹', '·'] +['á¹', '¿'] +['áº', 'Ħ'] +['áº', 'į'] +['áº', 'ij'] +['áº', 'Ĺ'] +['á¼', 'ī'] +['á¼', 'ĵ'] +['á¼', 'Ń'] +['á½', 'ĭ'] +['á½', 'Ĵ'] +['á½', 'ł'] +['á½', '£'] +['á¾', 'Ħ'] +['á¾', 'ı'] +['á¾', 'ij'] +['á¾', 'Ĺ'] +['á¾', '¦'] +['á¾', '§'] +['á¾', '¾'] +['á¿', 'Ħ'] +['á¿', 'ĵ'] +['á¿', '¡'] +['á¿', '¬'] +['âģ', 'ļ'] +['âĤ', 'Į'] +['âĦ', 'ģ'] +['âĦ', 'Ķ'] +['âĦ', '£'] +['âĦ', '§'] +['âĦ', '¯'] +['âĦ', '°'] +['âĦ', '´'] +['âħ', 'ħ'] +['âĨ', 'ľ'] +['âĨ', '«'] +['âĨ', 'Ń'] +['âĨ', '±'] +['âĨ', '¹'] +['âĨ', '½'] +['âĩ', 'ĩ'] +['âĩ', 'ľ'] +['âĩ', 'µ'] +['âĪ', 'ī'] +['âĪ', 'Ĭ'] +['âĪ', 'ĸ'] +['âĪ', 'ľ'] +['âĪ', '¾'] +['âī', 'Ģ'] +['âī', 'ĭ'] +['âī', 'Į'] +['âī', 'ĵ'] +['âī', 'ľ'] +['âī', '´'] +['âī', '¿'] +['âĬ', 'Ĭ'] +['âĬ', 'ĭ'] +['âĬ', 'Ķ'] +['âĬ', 'ĸ'] +['âĬ', '£'] +['âĬ', '¦'] +['âĭ', 'İ'] +['âĭ', 'ª'] +['âĭ', '²'] +['âĮ', '¦'] +['âĮ', '§'] +['âį', 'º'] +['âİ', 'Ī'] +['âİ', '¨'] +['âİ', '¬'] +['âİ', '³'] +['âİ', '¼'] +['âİ', '¾'] +['âı', 'Į'] +['âı', 'ļ'] +['âı', '«'] +['âı', '¯'] +['âı', 'µ'] +['âĴ', 'ľ'] +['âĴ', 'Ŀ'] +['âĴ', '«'] +['âĵ', 'Ħ'] +['âĵ', 'Ĭ'] +['âĵ', 'Ļ'] +['âĵ', '©'] +['âĶ', 'ij'] +['âĶ', 'Ļ'] +['âĶ', 'ļ'] +['âĶ', '¥'] +['âķ', 'ħ'] +['âķ', 'ī'] +['âķ', 'į'] +['âķ', 'ı'] +['âķ', 'ŀ'] +['âĸ', 'ļ'] +['âĸ', '¯'] +['âĹ', 'ĥ'] +['âĹ', 'ļ'] +['âĹ', '¬'] +['âĹ', '´'] +['âĺ', 'Ī'] +['âĺ', '¤'] +['âĺ', '¥'] +['âĺ', '§'] +['âĺ', '¬'] +['âĻ', 'ģ'] +['âĻ', '±'] +['âļ', 'ĥ'] +['âļ', 'Ħ'] +['âļ', 'ħ'] +['âļ', 'ı'] +['âļ', 'ļ'] +['âļ', 'ŀ'] +['âļ', 'Ł'] +['âļ', '±'] +['âļ', '²'] +['âľ', 'Ģ'] +['âľ', 'Ł'] +['âľ', '¢'] +['âĿ', 'µ'] +['âŁ', '¡'] +['âŁ', '¦'] +['âŁ', '§'] +['âŁ', '³'] +['âŁ', '¾'] +['âŁ', '¿'] +['âł', 'ĩ'] +['â¤', 'Ħ'] +['â¤', 'º'] +['â¥', 'Ĥ'] +['â¥', '¹'] +['â§', 'ī'] +['â§', '¼'] +['â§', '½'] +['â¨', 'į'] +['â¬', 'Ĭ'] +['â¬', 'Ł'] +['âŃ', 'ŀ'] +['â®', 'ŀ'] +['â®', '³'] +['â¯', 'Ī'] +['â¯', 'ij'] +['â±', 'ł'] +['â±', '±'] +['â²', 'Ń'] +['â´', '¹'] +['âµ', 'ķ'] +['â¸', '¾'] +['â', 'º«'] +['â¼', 'Ĩ'] +['â¼', 'ł'] +['â½', 'Ł'] +['â½', '¼'] +['â¾', 'Ľ'] +['â¾', '§'] +['â¿', 'ĥ'] +['â¿', '»'] +['ãĤ', 'ķ'] +['ãĤ', 'Ł'] +['ãĦ', 'Ľ'] +['ãĦ', '¡'] +['ãĦ', '¶'] +['ãĦ', 'º'] +['ãħ', 'Ĵ'] +['ãħ', 'Ł'] +['ãĨ', 'Ģ'] +['ãĩ', '»'] +['ãĪ', 'ij'] +['ãĪ', 'Ń'] +['ãĪ', '®'] +['ãĪ', '³'] +['ãĪ', '¹'] +['ãī', '¥'] +['ãī', '¦'] +['ãī', '¹'] +['ãī', '¿'] +['ãĬ', 'ŀ'] +['ãĬ', '¨'] +['ãĭ', 'ij'] +['ãĭ', '¥'] +['ãĭ', '´'] +['ãĭ', 'º'] +['ãİ', 'Ħ'] +['ãİ', 'ķ'] +['ãİ', '¯'] +['ãı', 'Ĥ'] +['ãı', 'Ī'] +['ãı', 'ĵ'] +['ãı', 'ĸ'] +['ãı', '±'] +['ãIJ', '±'] +['ãŁ', 'ģ'] +['ã', '¢'] +['ã¢', '¨'] +['ã', '¨'] +['ã¨', '³'] +['ã«', 'ª'] +['ã«', '´'] +['ã¶', '³'] +['ãº', '¾'] +['ä', 'Ģ'] +['äĢ', 'Ģ'] +['ä', 'ĭ'] +['äĭ', 'Į'] +['ä', 'ĮĢ'] +['äIJ', 'Ģ'] +['ä', 'łĢ'] +['ä', 'ł'] +['äł', '¼'] +['ä', '§'] +['ä§', 'ŀ'] +['ä¨', '°'] +['ä¨', 'º'] +['ä', '´Ģ'] +['ä', '·'] +['ä·', 'ħ'] +['ä', '·¸'] +['ê', 'Ĥ'] +['êĤ', '«'] +['ê', 'Į'] +['êĮ', '¼'] +['ê', 'į'] +['êį', '²'] +['êĴ', 'µ'] +['ê', 'ĵ'] +['êĵ', '½'] +['êĻ', 'Ń'] +['êĿ', 'Ľ'] +['êĿ', '¥'] +['ê', 'ŀ'] +['êŀ', 'Ĭ'] +['ê¦', 'Ĩ'] +['ê¦', 'ĩ'] +['ê¦', 'Ł'] +['ê¦', '¨'] +['ê§', 'Ī'] +['ê', '©'] +['ê©', 'Ł'] +['êª', 'ĭ'] +['êª', 'ij'] +['êª', 'ķ'] +['êª', 'Ĺ'] +['êª', 'ľ'] +['êª', '®'] +['êª', '±'] +['êª', '»'] +['êª', '¼'] +['ê«', 'Ģ'] +['ê«', 'Ŀ'] +['ê°', 'ĥ'] +['ê°', 'ĺ'] +['ê±', 'ľ'] +['ê²', 'ĵ'] +['ê²', 'ļ'] +['ê³', 'Ļ'] +['ê³', '¾'] +['ê´', 'Ĺ'] +['ê´', 'Ļ'] +['êµ', 'Ľ'] +['ê¶', 'ĥ'] +['ê¶', 'ķ'] +['ê¶', '¨'] +['ê¸', '©'] +['ê¸', '¿'] +['ê', '¹Ħ'] +['ê¹', 'Ĩ'] +['ê¹', 'ī'] +['ê¹', 'ĵ'] +['ê¹', '¢'] +['ê¹', '£'] +['ê¹', '¸'] +['êº', '³'] +['ê¿', 'ı'] +['ê¿', 'ķ'] +['ê¿', '§'] +['ëĢ', '©'] +['ëģ', 'ħ'] +['ëĥ', 'µ'] +['ëĦ', 'ĸ'] +['ëĦ', 'Ĺ'] +['ëĦ', '¢'] +['ëħ', 'Ĥ'] +['ëĨ', 'IJ'] +['ëĩ', 'ľ'] +['ëĪ', 'ĭ'] +['ëĪ', 'ļ'] +['ëī', 'į'] +['ëī', '¨'] +['ëĬ', 'ļ'] +['ëĬ', '¡'] +['ëĭ', 'ľ'] +['ëĭ', 'ª'] +['ëĮ', 'ĺ'] +['ëĮ', '¤'] +['ëĮ', '¸'] +['ëİ', 'Ł'] +['ëı', '¨'] +['ëIJ', 'Ħ'] +['ëIJ', 'ı'] +['ëIJ', '´'] +['ëIJ', '¸'] +['ëij', 'ģ'] +['ëij', '¿'] +['ëĴ', '¨'] +['ëĵ', '·'] +['ëĶ', '®'] +['ëĶ', '²'] +['ëķ', '§'] +['ëĸ', 'Ķ'] +['ëĸ', 'ª'] +['ëĺ', 'Ń'] +['ëļ', 'Ģ'] +['ëļ', 'ł'] +['ëĽ', 'Ķ'] +['ëĽ', '©'] +['ëľ', 'ħ'] +['ëŀ', 'ķ'] +['ëŀ', '°'] +['ëŁ', 'IJ'] +['ëł', '¡'] +['ë¡', 'ŀ'] +['ë¡', '£'] +['ë¡', 'µ'] +['ë£', 'Ħ'] +['ë£', 'į'] +['ë¤', '³'] +['ë¦', 'į'] +['ë¦', 'ı'] +['ë¦', '³'] +['ë§', 'Ħ'] +['ë§', 'Ĩ'] +['ë§', 'į'] +['ë§', 'ľ'] +['ë§', '«'] +['ë§', '»'] +['ë¨', '®'] +['ë©', 'Ĥ'] +['ë©', 'Ń'] +['ëª', '´'] +['ë¬', 'ľ'] +['ë¬', 'ł'] +['ë¬', '«'] +['ë¬', '¾'] +['ëŃ', '¬'] +['ë®', 'ĺ'] +['ë®', '¹'] +['ë¯', 'ķ'] +['ë¯', 'ľ'] +['ë°', '¨'] +['ë°', 'ª'] +['ë±', 'Ķ'] +['ë²', 'ĺ'] +['ë²', 'Ľ'] +['ë²', '±'] +['ë²', '´'] +['ë´', '½'] +['ëµ', '¤'] +['ëµ', '¨'] +['ë·', 'Ĺ'] +['ë·', 'ĺ'] +['ë¸', 'ĵ'] +['ë¸', 'ľ'] +['ë¹', 'ª'] +['ëº', 'ĥ'] +['ëº', 'ĺ'] +['ëº', 'µ'] +['ë»', '´'] +['ë¼', 'IJ'] +['ë¾', 'Ķ'] +['ìģ', 'Ń'] +['ìĤ', 'ł'] +['ìĤ', '®'] +['ìĥ', 'ı'] +['ìĥ', 'Ļ'] +['ìĦ', 'º'] +['ìħ', '¢'] +['ìĨ', 'Ģ'] +['ìĨ', 'ħ'] +['ìĨ', '¤'] +['ìĨ', '¦'] +['ìĨ', '¬'] +['ìĩ', '±'] +['ìĪ', 'µ'] +['ìĭ', '¨'] +['ìĭ', '´'] +['ìĮ', '°'] +['ìį', 'ľ'] +['ìİ', 'Ĺ'] +['ìİ', 'ĺ'] +['ìİ', '¼'] +['ìij', 'ī'] +['ìij', 'Ŀ'] +['ìij', '»'] +['ìĴ', 'Ķ'] +['ìĴ', '¯'] +['ìĵ', '©'] +['ìķ', 'IJ'] +['ìķ', 'ĸ'] +['ìĸ', 'ł'] +['ìĸ', '¾'] +['ìĹ', 'ĥ'] +['ìĹ', 'Ĺ'] +['ìĹ', 'ľ'] +['ìĹ', '¨'] +['ìĺ', 'Ĥ'] +['ìĺ', 'Ħ'] +['ìĺ', 'ı'] +['ìĺ', '¾'] +['ìĺ', '¿'] +['ìľ', '§'] +['ìĿ', 'IJ'] +['ìĿ', 'ĸ'] +['ìĿ', '·'] +['ìŀ', 'į'] +['ìŀ', 'ı'] +['ìŀ', '¨'] +['ìŀ', 'ª'] +['ìŀ', '³'] +['ìł', '¡'] +['ìł', '´'] +['ìł', '¹'] +['ì¡', 'Ģ'] +['ì¡', 'ª'] +['ì¡', 'µ'] +['ì¢', 'IJ'] +['ì¢', '¨'] +['ì£', 'Į'] +['ì£', 'Ļ'] +['ì£', '³'] +['ì¦', 'ij'] +['ì§', '¥'] +['ì§', '´'] +['ì§', '¾'] +['ì¨', 'ĵ'] +['ì¨', 'ķ'] +['ì©', '°'] +['ì©', '»'] +['ì©', '¼'] +['ìª', 'Ĺ'] +['ì¬', 'Ķ'] +['ì¬', 'ĺ'] +['ì®', '®'] +['ì¯', 'ķ'] +['ì¯', 'ĺ'] +['ì°', 'İ'] +['ì°', '¯'] +['ì±', 'ĥ'] +['ì±', 'µ'] +['ì²', '§'] +['ì²', '®'] +['ì²', '¯'] +['ì³', '¬'] +['ì´', 'ĭ'] +['ì´', '¢'] +['ìµ', '¥'] +['ì¶', '£'] +['ì¸', 'Ī'] +['ì¸', 'Ļ'] +['ìº', '¤'] +['ìº', 'Ń'] +['ì»', '½'] +['ì¼', 'Ļ'] +['ì½', '¬'] +['ì¾', 'Ģ'] +['ì¿', 'ħ'] +['ì¿', '½'] +['íĢ', 'ħ'] +['íģ', '¦'] +['íĤ', 'ħ'] +['íĥ', '¶'] +['íĥ', '¹'] +['íĦ', 'Ķ'] +['íħ', '£'] +['íĨ', 'Ħ'] +['íĨ', '§'] +['íĨ', '¹'] +['íĩ', '¼'] +['íī', '¤'] +['íĬ', '½'] +['íĭ', 'Ĥ'] +['íĭ', 'ij'] +['íį', 'Ī'] +['íį', 'Ļ'] +['íį', '¿'] +['íİ', '¶'] +['íIJ', 'Ŀ'] +['íĴ', 'ľ'] +['íĵ', 'Ŀ'] +['íĵ', 'ª'] +['íĵ', '±'] +['íĵ', '·'] +['íĵ', '¼'] +['íĶ', 'Ļ'] +['íĶ', 'ł'] +['íķ', 'ļ'] +['íķ', 'Ľ'] +['íķ', 'ŀ'] +['íķ', 'Ł'] +['íķ', '§'] +['íķ', '¶'] +['íĸ', 'Ĭ'] +['íĸ', 'ĭ'] +['íĸ', 'į'] +['íĸ', 'Ķ'] +['íĸ', 'ĺ'] +['íĸ', '¡'] +['íĸ', '¬'] +['íĹ', '£'] +['íĹ', '¿'] +['íĺ', 'ĸ'] +['íĺ', 'Ń'] +['íļ', '°'] +['íĽ', 'į'] +['íĽ', '½'] +['íĿ', 'Ł'] +['íĿ', 'Ń'] +['íĿ', '´'] +['íŀ', 'ľ'] +['ï¤', 'ī'] +['ï¤', 'Ń'] +['ï¤', '²'] +['ï¤', 'µ'] +['ï¤', '¼'] +['ï¥', 'Ģ'] +['ï¥', 'ij'] +['ï¥', 'Ĵ'] +['ï¥', 'ķ'] +['ï¥', 'ĺ'] +['ï¥', 'Ļ'] +['ï¥', '«'] +['ï¥', '¬'] +['ï¥', '°'] +['ï', '¥¿'] +['ï¦', 'ĭ'] +['ï¦', 'ı'] +['ï¦', 'Ķ'] +['ï¦', 'ĸ'] +['ï¦', 'ĺ'] +['ï¦', 'Ľ'] +['ï¦', 'ł'] +['ï¦', '®'] +['ï¦', '¯'] +['ï¦', 'º'] +['ï¦', '»'] +['ï¦', '¾'] +['ï§', 'Ĩ'] +['ï§', 'ĸ'] +['ï§', 'Ľ'] +['ï§', 'ŀ'] +['ï§', 'Ł'] +['ï§', '§'] +['ï§', '³'] +['ï§', 'º'] +['ï§', '½'] +['ï¨', 'ĥ'] +['ï¨', 'ļ'] +['ï¨', '¢'] +['ï©', 'Ł'] +['ï¬', '¤'] +['ï¬', '¬'] +['ï¬', '¼'] +['ïŃ', 'Ĵ'] +['ïŃ', 'ķ'] +['ïŃ', 'Ľ'] +['ïŃ', 'Ŀ'] +['ïŃ', 'ŀ'] +['ïŃ', 'Ł'] +['ïŃ', '¤'] +['ïŃ', '§'] +['ïŃ', '¨'] +['ïŃ', '®'] +['ïŃ', '°'] +['ïŃ', '±'] +['ïŃ', '·'] +['ïŃ', '¹'] +['ïŃ', '»'] +['ï®', 'Ģ'] +['ï®', 'ĥ'] +['ï®', 'Ħ'] +['ï®', 'ħ'] +['ï®', 'į'] +['ï®', 'Ĵ'] +['ï®', 'ĵ'] +['ï®', 'ķ'] +['ï®', '¦'] +['ï®', '®'] +['ï®', '°'] +['ï¯', 'ĵ'] +['ï¯', 'ľ'] +['ï¯', '©'] +['ï¯', 'ª'] +['ï¯', '¬'] +['ï¯', 'Ń'] +['ï¯', '®'] +['ï¯', '·'] +['ï¯', '¹'] +['ï¯', '»'] +['ï¯', '¼'] +['ï°', 'ĥ'] +['ï°', 'Į'] +['ï°', 'IJ'] +['ï°', 'ĺ'] +['ï°', 'Ļ'] +['ï°', 'ľ'] +['ï°', 'ŀ'] +['ï°', '¢'] +['ï°', '®'] +['ï°', '°'] +['ï°', '¼'] +['ï°', '¿'] +['ï±', 'Ģ'] +['ï±', 'ģ'] +['ï±', 'Ī'] +['ï±', 'ĭ'] +['ï±', 'ı'] +['ï±', 'Ń'] +['ï²', 'Ģ'] +['ï²', 'ĩ'] +['ï²', 'Ī'] +['ï²', 'ĭ'] +['ï²', 'İ'] +['ï²', 'Ĵ'] +['ï²', 'ľ'] +['ï²', 'ł'] +['ï²', '¬'] +['ï²', '»'] +['ï³', 'ĩ'] +['ï³', 'Ķ'] +['ï³', '£'] +['ï³', '«'] +['ï´', 'ĺ'] +['ï´', '°'] +['ï´', '½'] +['ï', '¶'] +['ï¶', '°'] +['ï¸', 'ĸ'] +['ï¸', '´'] +['ï¸', '¹'] +['ï¹', 'į'] +['ï¹', 'Ĺ'] +['ï¹', '¢'] +['ï¹', '¤'] +['ï¹', '©'] +['ï¹', '±'] +['ï¾', '°'] +['ï¿', 'Ĥ'] +['ï¿', '®'] +['ðIJĮ', '°'] +['ðIJĮ', '¹'] +['ðIJĮ', 'º'] +['ðIJĮ', '½'] +['ðIJį', 'Ĥ'] +['ðIJį', 'ĥ'] +['ðIJį', 'Ħ'] +['ðIJ', 'İ'] +['ðIJİ', '¹'] +['ðIJ¤', 'Ĥ'] +['ðIJ¤', 'į'] +['ðIJ¤', 'ı'] +['ðIJ¤', 'ĵ'] +['ðIJŃ', 'ī'] +['ðIJŃ', 'į'] +['ðIJ°', 'ĩ'] +['ðIJ°', '°'] +['ðij', 'Ĥ'] +['ðijĤ', 'Ħ'] +['ðij', 'ĺ'] +['ðijĺ', 'ģ'] +['ðĴ', 'Ģ'] +['ðĴĢ', '¸'] +['ðĴ', 'ģ'] +['ðĴģ', 'º'] +['ðĴ', 'Ħ'] +['ðĴĦ', '·'] +['ðĴ', 'Ĭ'] +['ðĴĬ', 'ij'] +['ðĴ', 'ĭ'] +['ðĴĭ', 'Ĺ'] +['ð', 'ĴĮ'] +['ðĴĮ', '¨'] +['ðĵĥ', '¢'] +['ðĵĥ', '°'] +['ðĸ', 'ł'] +['ðĸł', 'ļ'] +['ðĿĦ', 'ĥ'] +['ðĿĦ', 'ħ'] +['ðĿĦ', 'ķ'] +['ðĿĦ', 'Ļ'] +['ðĿĦ', '±'] +['ðĿĦ', '´'] +['ðĿĦ', '¹'] +['ðĿħ', 'İ'] +['ðĿħ', 'ª'] +['ðĿĨ', '£'] +['ðĿĨ', '³'] +['ðĿĨ', '¹'] +['ðĿĩ', 'Ĭ'] +['ðĿĩ', 'Ĺ'] +['ðĿĩ', 'ļ'] +['ðĿĩ', 'ľ'] +['ðĿĩ', 'ł'] +['ðĿIJ', 'ī'] +['ðĿIJ', 'ĸ'] +['ðĿIJ', 'ĺ'] +['ðĿIJ', '£'] +['ðĿIJ', '±'] +['ðĿij', 'Ĭ'] +['ðĿij', 'Ń'] +['ðĿij', '¼'] +['ðĿij', '½'] +['ðĿĴ', '°'] +['ðĿĴ', '·'] +['ðĿĴ', '¿'] +['ðĿĵ', 'ģ'] +['ðĿĵ', 'ĭ'] +['ðĿĵ', 'İ'] +['ðĿĵ', 'Ĵ'] +['ðĿ', 'ĵĺ'] +['ðĿĵ', '¢'] +['ðĿĵ', '¦'] +['ðĿĵ', '«'] +['ðĿĵ', '¿'] +['ðĿĶ', 'İ'] +['ðĿĶ', '±'] +['ðĿĶ', '´'] +['ðĿĶ', '·'] +['ðĿĶ', '¸'] +['ðĿĶ', '½'] +['ðĿķ', 'Ĥ'] +['ðĿķ', 'ĥ'] +['ðĿķ', 'ĭ'] +['ðĿķ', 'ı'] +['ðĿķ', 'IJ'] +['ðĿķ', '¥'] +['ðĿķ', '´'] +['ðĿķ', 'º'] +['ðĿĸ', 'IJ'] +['ðĿĸ', 'Ľ'] +['ðĿĸ', 'Ŀ'] +['ðĿĸ', 'ŀ'] +['ðĿĹ', '©'] +['ðĿĹ', '³'] +['ðĿĹ', '½'] +['ðĿĺ', 'Ĭ'] +['ðĿĺ', 'ĭ'] +['ðĿĺ', 'Ķ'] +['ðĿĺ', '±'] +['ðĿĺ', '´'] +['ðĿĺ', '¿'] +['ðĿĻ', 'Ĵ'] +['ðĿĻ', 'Ŀ'] +['ðĿĻ', 'Ł'] +['ðĿĻ', '¬'] +['ðĿĻ', 'Ń'] +['ðĿĻ', '»'] +['ðĿĻ', '¾'] +['ðĿļ', 'Ī'] +['ðĿļ', 'ĭ'] +['ðĿļ', 'ij'] +['ðĿļ', 'Ł'] +['ðĿļ', 'ł'] +['ðĿļ', '£'] +['ðĿĽ', '½'] +['ðĿľ', 'Ĥ'] +['ðĿľ', 'Ķ'] +['ðĿľ', 'Ļ'] +['ðŁ', 'Ģ'] +['ðŁĢ', 'Ħ'] +['ðŁĦ', '²'] +['ðŁĦ', '¶'] +['ðŁħ', 'IJ'] +['ðŁħ', 'ĸ'] +['ðŁħ', 'ļ'] +['ðŁħ', 'Ľ'] +['ðŁħ', '¦'] +['ðŁħ', '¶'] +['ðŁħ', '»'] +['ðŁħ', '¼'] +['ðŁĨ', 'ĥ'] +['ðŁĨ', 'Ĩ'] +['ðŁĨ', 'İ'] +['ðŁĪ', '¯'] +['ðŁĪ', '²'] +['ðŁĪ', '¹'] +['ðŁĮ', 'ĩ'] +['ðŁĮ', 'ĵ'] +['ðŁį', 'ĺ'] +['ðŁİ', 'ij'] +['ðŁİ', '¿'] +['ðŁı', 'ı'] +['ðŁı', 'Ĵ'] +['ðŁı', '©'] +['ðŁı', '¯'] +['ðŁIJ', 'Ģ'] +['ðŁij', 'Ŀ'] +['ðŁĴ', '¹'] +['ðŁĴ', 'º'] +['ðŁĵ', 'Ł'] +['ðŁĵ', 'ª'] +['ðŁĵ', '¼'] +['ðŁĶ', 'Ģ'] +['ðŁĶ', 'Ĥ'] +['ðŁĶ', 'ĥ'] +['ðŁĶ', 'ĩ'] +['ðŁĶ', 'ĵ'] +['ðŁĶ', '¢'] +['ðŁĶ', '¤'] +['ðŁĶ', '©'] +['ðŁķ', 'ĸ'] +['ðŁķ', 'ļ'] +['ðŁķ', 'ľ'] +['ðŁķ', 'Ŀ'] +['ðŁķ', 'ŀ'] +['ðŁķ', 'ł'] +['ðŁķ', '¢'] +['ðŁķ', '³'] +['ðŁĸ', 'ĩ'] +['ðŁĸ', 'ij'] +['ðŁĸ', '¶'] +['ðŁĹ', 'ģ'] +['Ñ', '¨'] +['Ú', 'İ'] +['á¡', 'Į'] +['á¸', '°'] +['áº', 'Ģ'] +['á¼', '®'] +['á½', 'Ŀ'] +['âĦ', '¬'] +['âļ', '§'] +['âĽ', '¤'] +['ã³', '¬'] +['êĻ', 'ĭ'] +['ê¸', 'ij'] +['ëĶ', 'ī'] +['ëĹ', 'į'] +['ë¡', 'ij'] +['ë¯', 'ij'] +['ë»', 'ħ'] +['ë¼', 'Ŀ'] +['ìĦ', 'IJ'] +['ìī', '¡'] +['ìĭ', '²'] +['ìı', '±'] +['ìĹ', '¤'] +['ìĿ', '©'] +['ìĿ', '¿'] +['ìŁ', 'Ļ'] +['ìł', '°'] +['ì¥', 'ī'] +['íĬ', 'Ń'] +['íķ', '®'] +['ï®', 'ı'] +['ðŁħ', '±'] +['ðŁĨ', 'Ĵ'] +['ðŁķ', 'ĭ'] +['É', 'ĺ'] +['Ê', 'ĵ'] +['Õ', 'ĥ'] +['à´', '´'] +['à½', 'ħ'] +['áĨ', 'º'] +['áĪ', 'Ĭ'] +['áĪ', '¨'] +['áĪ', '¾'] +['áī', 'IJ'] +['áĮ', 'ĥ'] +['áĮ', '½'] +['áĶ', 'Ń'] +['áł', 'Ĥ'] +['áł', '¬'] +['á¨', '¸'] +['á©', 'ĭ'] +['á¶', 'ı'] +['á¾', 'Ķ'] +['á¿', 'IJ'] +['á¿', 'ļ'] +['âĻ', 'Ļ'] +['âļ', 'Ĥ'] +['âļ', 'Ĺ'] +['â¡', '¢'] +['â¤', '¦'] +['ëĸ', '°'] +['ë¤', 'Ĥ'] +['ë§', 'ł'] +['ë±', 'ĭ'] +['ë±', 'IJ'] +['ìĽ', '¢'] +['ìľ', '¾'] +['ì³', 'ħ'] +['ì»', 'ģ'] +['íģ', '»'] +['íĥ', 'Ļ'] +['íĵ', 'ĸ'] +['íĵ', 'Ń'] +['íķ', '±'] +['íĽ', 'ľ'] +['ï¤', 'ħ'] +['ï¤', 'Ĩ'] +['ï¦', 'ĥ'] +['ï§', '©'] +['ï¨', 'Ĥ'] +['ðIJ¤', 'Ķ'] +['ðIJŃ', 'ĵ'] +['ðIJ°', '¼'] +['ðĿĵ', 'ŀ'] +['ðĿĵ', '°'] +['ðĿĻ', 'ľ'] +['ðĿļ', 'ģ'] +['ðŁħ', '¢'] +['ðŁı', 'ĩ'] +['È', '²'] +['Ê', '¶'] +['Ô', 'Ī'] +['Ô', 'ij'] +['Ý', 'ĵ'] +['Ý', '¥'] +['à¤', 'ij'] +['à¥', '±'] +['à¬', 'ī'] +['à°', '³'] +['à°', 'µ'] +['à²', 'Ł'] +['áĢ', 'ı'] +['áģ', '¼'] +['áī', '¨'] +['áĬ', 'Ĵ'] +['áĭ', '©'] +['áĮ', 'Ħ'] +['áĮ', 'Ķ'] +['áIJ', '§'] +['á', 'ĴĮ'] +['áĶ', 'ħ'] +['áĶ', 'Ĭ'] +['áł', 'Ħ'] +['á¨', 'ģ'] +['á¸', 'ĥ'] +['á¸', '»'] +['âĶ', 'ŀ'] +['âĺ', 'µ'] +['âļ', '£'] +['â²', '¢'] +['ãĪ', 'ª'] +['ä¶', 'µ'] +['ê²', 'Ļ'] +['ê²', '´'] +['ê³', 'Ĥ'] +['ë¡', '¼'] +['ìĨ', 'Ĭ'] +['ì¼', 'ĩ'] +['íĭ', 'į'] +['íĵ', '¬'] +['íĵ', '®'] +['íĵ', '¶'] +['íĵ', '»'] +['ï¤', '¦'] +['ï¥', 'ł'] +['ï¥', '±'] +['ïŃ', '²'] +['ðIJŃ', 'Ĭ'] +['ðIJ', '±ħ'] +['ðĸ', '¥'] +['ðĸ¥', '¨'] +['ðĿij', '³'] +['ðĿĵ', 'ķ'] +['ðĿĵ', '¬'] +['ðĿĵ', '¹'] +['ðĿĵ', '¾'] +['ðĿĶ', 'ĵ'] +['ðĿķ', 'į'] +['ðĿķ', '¡'] +['ðĿķ', '±'] +['ðĿĸ', 'ĸ'] +['ðĿĺ', 'ı'] +['ðĿĺ', 'IJ'] +['ðĿĺ', 'ļ'] +['ðĿĻ', '®'] +['ðĿĻ', '°'] +['ðĿĻ', '¸'] +['ðĿĻ', 'º'] +['ðĿĻ', '¼'] +['ðĿĻ', '½'] +['ðĿĻ', '¿'] +['ðĿļ', 'Ħ'] +['ðĿļ', 'ı'] +['ðŁħ', 'ħ'] +['ðŁħ', 'ĵ'] +['Æ', 'Ī'] +['àł', 'Į'] +['áĻ', '³'] +['á', 'ļĮ'] +['áĽ', 'ħ'] +['áĽ', 'IJ'] +['á¤', 'Ĭ'] +['á¸', 'Ĭ'] +['âĶ', '½'] +['âķ', 'Ĭ'] +['âĽ', 'ĩ'] +['âĽ', 'ı'] +['âĿ', 'ª'] +['âĿ', '«'] +['âŁ', '°'] +['ãĦ', 'į'] +['ãĦ', 'ĵ'] +['ãĦ', '§'] +['ãħ', 'ĸ'] +['ãī', '«'] +['ê¦', 'Ķ'] +['ï±', 'Ĭ'] +['àº', 'Ĥ'] +['áħ', '£'] +['á¥', 'Ķ'] +['á¥', '¤'] +['âĨ', '¤'] +['âĨ', '·'] +['âĩ', 'ŀ'] +['âĸ', '¤'] +['âŀ', '¶'] +['ãĪ', '¼'] +['ï¨', '·'] +['ðĵı', '§'] +['âĶ', '²'] +['âĢ', '´'] +['âĴ', 'Ł'] +['âĴ', '¡'] +['â°', 'Ĥ'] +['â°', 'į'] +['â°', 'İ'] +['â°', 'IJ'] +['â°', 'ij'] +['â°', 'Ł'] +['â°', 'ł'] +['â°', '¡'] +['â¼', 'Ń'] +['ãĬ', '¥'] +['âĴ', 'ł'] +['â½', 'º'] +['ãĩ', 'º'] +['ãĩ', '½'] +['ï¨', 'Ĭ'] +['áķ', '·'] +['âį', '¨'] +['âº', 'Ł'] +['â½', 'Ĺ'] diff --git a/vocab/smallthinker_vocab.mllm b/vocab/smallthinker_vocab.mllm new file mode 100644 index 000000000..adfa9853c Binary files /dev/null and b/vocab/smallthinker_vocab.mllm differ